xref: /netbsd-src/sys/dev/pci/if_wm.c (revision e6c7e151de239c49d2e38720a061ed9d1fa99309)
1 /*	$NetBSD: if_wm.c,v 1.670 2020/03/21 16:47:05 thorpej Exp $	*/
2 
3 /*
4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed for the NetBSD Project by
20  *	Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 /*******************************************************************************
39 
40   Copyright (c) 2001-2005, Intel Corporation
41   All rights reserved.
42 
43   Redistribution and use in source and binary forms, with or without
44   modification, are permitted provided that the following conditions are met:
45 
46    1. Redistributions of source code must retain the above copyright notice,
47       this list of conditions and the following disclaimer.
48 
49    2. Redistributions in binary form must reproduce the above copyright
50       notice, this list of conditions and the following disclaimer in the
51       documentation and/or other materials provided with the distribution.
52 
53    3. Neither the name of the Intel Corporation nor the names of its
54       contributors may be used to endorse or promote products derived from
55       this software without specific prior written permission.
56 
57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67   POSSIBILITY OF SUCH DAMAGE.
68 
69 *******************************************************************************/
70 /*
71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72  *
73  * TODO (in order of importance):
74  *
75  *	- Check XXX'ed comments
76  *	- TX Multi queue improvement (refine queue selection logic)
77  *	- Split header buffer for newer descriptors
78  *	- EEE (Energy Efficiency Ethernet) for I354
79  *	- Virtual Function
80  *	- Set LED correctly (based on contents in EEPROM)
81  *	- Rework how parameters are loaded from the EEPROM.
82  */
83 
84 #include <sys/cdefs.h>
85 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.670 2020/03/21 16:47:05 thorpej Exp $");
86 
87 #ifdef _KERNEL_OPT
88 #include "opt_net_mpsafe.h"
89 #include "opt_if_wm.h"
90 #endif
91 
92 #include <sys/param.h>
93 #include <sys/systm.h>
94 #include <sys/callout.h>
95 #include <sys/mbuf.h>
96 #include <sys/malloc.h>
97 #include <sys/kmem.h>
98 #include <sys/kernel.h>
99 #include <sys/socket.h>
100 #include <sys/ioctl.h>
101 #include <sys/errno.h>
102 #include <sys/device.h>
103 #include <sys/queue.h>
104 #include <sys/syslog.h>
105 #include <sys/interrupt.h>
106 #include <sys/cpu.h>
107 #include <sys/pcq.h>
108 #include <sys/sysctl.h>
109 #include <sys/workqueue.h>
110 
111 #include <sys/rndsource.h>
112 
113 #include <net/if.h>
114 #include <net/if_dl.h>
115 #include <net/if_media.h>
116 #include <net/if_ether.h>
117 
118 #include <net/bpf.h>
119 
120 #include <net/rss_config.h>
121 
122 #include <netinet/in.h>			/* XXX for struct ip */
123 #include <netinet/in_systm.h>		/* XXX for struct ip */
124 #include <netinet/ip.h>			/* XXX for struct ip */
125 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
126 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
127 
128 #include <sys/bus.h>
129 #include <sys/intr.h>
130 #include <machine/endian.h>
131 
132 #include <dev/mii/mii.h>
133 #include <dev/mii/mdio.h>
134 #include <dev/mii/miivar.h>
135 #include <dev/mii/miidevs.h>
136 #include <dev/mii/mii_bitbang.h>
137 #include <dev/mii/ikphyreg.h>
138 #include <dev/mii/igphyreg.h>
139 #include <dev/mii/igphyvar.h>
140 #include <dev/mii/inbmphyreg.h>
141 #include <dev/mii/ihphyreg.h>
142 
143 #include <dev/pci/pcireg.h>
144 #include <dev/pci/pcivar.h>
145 #include <dev/pci/pcidevs.h>
146 
147 #include <dev/pci/if_wmreg.h>
148 #include <dev/pci/if_wmvar.h>
149 
150 #ifdef WM_DEBUG
151 #define	WM_DEBUG_LINK		__BIT(0)
152 #define	WM_DEBUG_TX		__BIT(1)
153 #define	WM_DEBUG_RX		__BIT(2)
154 #define	WM_DEBUG_GMII		__BIT(3)
155 #define	WM_DEBUG_MANAGE		__BIT(4)
156 #define	WM_DEBUG_NVM		__BIT(5)
157 #define	WM_DEBUG_INIT		__BIT(6)
158 #define	WM_DEBUG_LOCK		__BIT(7)
159 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
160     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
161 #define	DPRINTF(x, y)	do { if (wm_debug & (x)) printf y; } while (0)
162 #else
163 #define	DPRINTF(x, y)	__nothing
164 #endif /* WM_DEBUG */
165 
166 #ifdef NET_MPSAFE
167 #define WM_MPSAFE	1
168 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
169 #define WM_WORKQUEUE_FLAGS	WQ_PERCPU | WQ_MPSAFE
170 #else
171 #define CALLOUT_FLAGS	0
172 #define WM_WORKQUEUE_FLAGS	WQ_PERCPU
173 #endif
174 
175 #define WM_WORKQUEUE_PRI PRI_SOFTNET
176 
177 /*
178  * This device driver's max interrupt numbers.
179  */
180 #define WM_MAX_NQUEUEINTR	16
181 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
182 
183 #ifndef WM_DISABLE_MSI
184 #define	WM_DISABLE_MSI 0
185 #endif
186 #ifndef WM_DISABLE_MSIX
187 #define	WM_DISABLE_MSIX 0
188 #endif
189 
190 int wm_disable_msi = WM_DISABLE_MSI;
191 int wm_disable_msix = WM_DISABLE_MSIX;
192 
193 #ifndef WM_WATCHDOG_TIMEOUT
194 #define WM_WATCHDOG_TIMEOUT 5
195 #endif
196 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
197 
198 /*
199  * Transmit descriptor list size.  Due to errata, we can only have
200  * 256 hardware descriptors in the ring on < 82544, but we use 4096
201  * on >= 82544. We tell the upper layers that they can queue a lot
202  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
203  * of them at a time.
204  *
205  * We allow up to 64 DMA segments per packet.  Pathological packet
206  * chains containing many small mbufs have been observed in zero-copy
207  * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
208  * m_defrag() is called to reduce it.
209  */
210 #define	WM_NTXSEGS		64
211 #define	WM_IFQUEUELEN		256
212 #define	WM_TXQUEUELEN_MAX	64
213 #define	WM_TXQUEUELEN_MAX_82547	16
214 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
215 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
216 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
217 #define	WM_NTXDESC_82542	256
218 #define	WM_NTXDESC_82544	4096
219 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
220 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
221 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
222 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
223 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
224 
225 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
226 
227 #define	WM_TXINTERQSIZE		256
228 
229 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
230 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
231 #endif
232 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
233 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
234 #endif
235 
236 /*
237  * Receive descriptor list size.  We have one Rx buffer for normal
238  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
239  * packet.  We allocate 256 receive descriptors, each with a 2k
240  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
241  */
242 #define	WM_NRXDESC		256U
243 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
244 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
245 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
246 
247 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
248 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
249 #endif
250 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
251 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
252 #endif
253 
254 typedef union txdescs {
255 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
256 	nq_txdesc_t	 sctxu_nq_txdescs[WM_NTXDESC_82544];
257 } txdescs_t;
258 
259 typedef union rxdescs {
260 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
261 	ext_rxdesc_t	  sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
262 	nq_rxdesc_t	 sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
263 } rxdescs_t;
264 
265 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
266 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
267 
268 /*
269  * Software state for transmit jobs.
270  */
271 struct wm_txsoft {
272 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
273 	bus_dmamap_t txs_dmamap;	/* our DMA map */
274 	int txs_firstdesc;		/* first descriptor in packet */
275 	int txs_lastdesc;		/* last descriptor in packet */
276 	int txs_ndesc;			/* # of descriptors used */
277 };
278 
279 /*
280  * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
281  * buffer and a DMA map. For packets which fill more than one buffer, we chain
282  * them together.
283  */
284 struct wm_rxsoft {
285 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
286 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
287 };
288 
289 #define WM_LINKUP_TIMEOUT	50
290 
291 static uint16_t swfwphysem[] = {
292 	SWFW_PHY0_SM,
293 	SWFW_PHY1_SM,
294 	SWFW_PHY2_SM,
295 	SWFW_PHY3_SM
296 };
297 
298 static const uint32_t wm_82580_rxpbs_table[] = {
299 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
300 };
301 
302 struct wm_softc;
303 
304 #ifdef WM_EVENT_COUNTERS
305 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
306 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
307 	struct evcnt qname##_ev_##evname;
308 
309 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
310 	do {								\
311 		snprintf((q)->qname##_##evname##_evcnt_name,		\
312 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
313 		    "%s%02d%s", #qname, (qnum), #evname);		\
314 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
315 		    (evtype), NULL, (xname),				\
316 		    (q)->qname##_##evname##_evcnt_name);		\
317 	} while (0)
318 
319 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
320 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
321 
322 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
323 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
324 
325 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
326 	evcnt_detach(&(q)->qname##_ev_##evname);
327 #endif /* WM_EVENT_COUNTERS */
328 
329 struct wm_txqueue {
330 	kmutex_t *txq_lock;		/* lock for tx operations */
331 
332 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
333 
334 	/* Software state for the transmit descriptors. */
335 	int txq_num;			/* must be a power of two */
336 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
337 
338 	/* TX control data structures. */
339 	int txq_ndesc;			/* must be a power of two */
340 	size_t txq_descsize;		/* a tx descriptor size */
341 	txdescs_t *txq_descs_u;
342 	bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
343 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
344 	int txq_desc_rseg;		/* real number of control segment */
345 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
346 #define	txq_descs	txq_descs_u->sctxu_txdescs
347 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
348 
349 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
350 
351 	int txq_free;			/* number of free Tx descriptors */
352 	int txq_next;			/* next ready Tx descriptor */
353 
354 	int txq_sfree;			/* number of free Tx jobs */
355 	int txq_snext;			/* next free Tx job */
356 	int txq_sdirty;			/* dirty Tx jobs */
357 
358 	/* These 4 variables are used only on the 82547. */
359 	int txq_fifo_size;		/* Tx FIFO size */
360 	int txq_fifo_head;		/* current head of FIFO */
361 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
362 	int txq_fifo_stall;		/* Tx FIFO is stalled */
363 
364 	/*
365 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
366 	 * CPUs. This queue intermediate them without block.
367 	 */
368 	pcq_t *txq_interq;
369 
370 	/*
371 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
372 	 * to manage Tx H/W queue's busy flag.
373 	 */
374 	int txq_flags;			/* flags for H/W queue, see below */
375 #define	WM_TXQ_NO_SPACE	0x1
376 
377 	bool txq_stopping;
378 
379 	bool txq_sending;
380 	time_t txq_lastsent;
381 
382 	uint32_t txq_packets;		/* for AIM */
383 	uint32_t txq_bytes;		/* for AIM */
384 #ifdef WM_EVENT_COUNTERS
385 	/* TX event counters */
386 	WM_Q_EVCNT_DEFINE(txq, txsstall)    /* Stalled due to no txs */
387 	WM_Q_EVCNT_DEFINE(txq, txdstall)    /* Stalled due to no txd */
388 	WM_Q_EVCNT_DEFINE(txq, fifo_stall)  /* FIFO stalls (82547) */
389 	WM_Q_EVCNT_DEFINE(txq, txdw)	    /* Tx descriptor interrupts */
390 	WM_Q_EVCNT_DEFINE(txq, txqe)	    /* Tx queue empty interrupts */
391 					    /* XXX not used? */
392 
393 	WM_Q_EVCNT_DEFINE(txq, ipsum)	    /* IP checksums comp. */
394 	WM_Q_EVCNT_DEFINE(txq, tusum)	    /* TCP/UDP cksums comp. */
395 	WM_Q_EVCNT_DEFINE(txq, tusum6)	    /* TCP/UDP v6 cksums comp. */
396 	WM_Q_EVCNT_DEFINE(txq, tso)	    /* TCP seg offload (IPv4) */
397 	WM_Q_EVCNT_DEFINE(txq, tso6)	    /* TCP seg offload (IPv6) */
398 	WM_Q_EVCNT_DEFINE(txq, tsopain)	    /* Painful header manip. for TSO */
399 	WM_Q_EVCNT_DEFINE(txq, pcqdrop)	    /* Pkt dropped in pcq */
400 	WM_Q_EVCNT_DEFINE(txq, descdrop)    /* Pkt dropped in MAC desc ring */
401 					    /* other than toomanyseg */
402 
403 	WM_Q_EVCNT_DEFINE(txq, toomanyseg)  /* Pkt dropped(toomany DMA segs) */
404 	WM_Q_EVCNT_DEFINE(txq, defrag)	    /* m_defrag() */
405 	WM_Q_EVCNT_DEFINE(txq, underrun)    /* Tx underrun */
406 
407 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
408 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
409 #endif /* WM_EVENT_COUNTERS */
410 };
411 
412 struct wm_rxqueue {
413 	kmutex_t *rxq_lock;		/* lock for rx operations */
414 
415 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
416 
417 	/* Software state for the receive descriptors. */
418 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
419 
420 	/* RX control data structures. */
421 	int rxq_ndesc;			/* must be a power of two */
422 	size_t rxq_descsize;		/* a rx descriptor size */
423 	rxdescs_t *rxq_descs_u;
424 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
425 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
426 	int rxq_desc_rseg;		/* real number of control segment */
427 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
428 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
429 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
430 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
431 
432 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
433 
434 	int rxq_ptr;			/* next ready Rx desc/queue ent */
435 	int rxq_discard;
436 	int rxq_len;
437 	struct mbuf *rxq_head;
438 	struct mbuf *rxq_tail;
439 	struct mbuf **rxq_tailp;
440 
441 	bool rxq_stopping;
442 
443 	uint32_t rxq_packets;		/* for AIM */
444 	uint32_t rxq_bytes;		/* for AIM */
445 #ifdef WM_EVENT_COUNTERS
446 	/* RX event counters */
447 	WM_Q_EVCNT_DEFINE(rxq, intr);	/* Interrupts */
448 	WM_Q_EVCNT_DEFINE(rxq, defer);	/* Rx deferred processing */
449 
450 	WM_Q_EVCNT_DEFINE(rxq, ipsum);	/* IP checksums checked */
451 	WM_Q_EVCNT_DEFINE(rxq, tusum);	/* TCP/UDP cksums checked */
452 #endif
453 };
454 
455 struct wm_queue {
456 	int wmq_id;			/* index of TX/RX queues */
457 	int wmq_intr_idx;		/* index of MSI-X tables */
458 
459 	uint32_t wmq_itr;		/* interrupt interval per queue. */
460 	bool wmq_set_itr;
461 
462 	struct wm_txqueue wmq_txq;
463 	struct wm_rxqueue wmq_rxq;
464 
465 	bool wmq_txrx_use_workqueue;
466 	struct work wmq_cookie;
467 	void *wmq_si;
468 	krndsource_t rnd_source;	/* random source */
469 };
470 
471 struct wm_phyop {
472 	int (*acquire)(struct wm_softc *);
473 	void (*release)(struct wm_softc *);
474 	int (*readreg_locked)(device_t, int, int, uint16_t *);
475 	int (*writereg_locked)(device_t, int, int, uint16_t);
476 	int reset_delay_us;
477 	bool no_errprint;
478 };
479 
480 struct wm_nvmop {
481 	int (*acquire)(struct wm_softc *);
482 	void (*release)(struct wm_softc *);
483 	int (*read)(struct wm_softc *, int, int, uint16_t *);
484 };
485 
486 /*
487  * Software state per device.
488  */
489 struct wm_softc {
490 	device_t sc_dev;		/* generic device information */
491 	bus_space_tag_t sc_st;		/* bus space tag */
492 	bus_space_handle_t sc_sh;	/* bus space handle */
493 	bus_size_t sc_ss;		/* bus space size */
494 	bus_space_tag_t sc_iot;		/* I/O space tag */
495 	bus_space_handle_t sc_ioh;	/* I/O space handle */
496 	bus_size_t sc_ios;		/* I/O space size */
497 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
498 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
499 	bus_size_t sc_flashs;		/* flash registers space size */
500 	off_t sc_flashreg_offset;	/*
501 					 * offset to flash registers from
502 					 * start of BAR
503 					 */
504 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
505 
506 	struct ethercom sc_ethercom;	/* ethernet common data */
507 	struct mii_data sc_mii;		/* MII/media information */
508 
509 	pci_chipset_tag_t sc_pc;
510 	pcitag_t sc_pcitag;
511 	int sc_bus_speed;		/* PCI/PCIX bus speed */
512 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
513 
514 	uint16_t sc_pcidevid;		/* PCI device ID */
515 	wm_chip_type sc_type;		/* MAC type */
516 	int sc_rev;			/* MAC revision */
517 	wm_phy_type sc_phytype;		/* PHY type */
518 	uint8_t sc_sfptype;		/* SFP type */
519 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
520 #define	WM_MEDIATYPE_UNKNOWN		0x00
521 #define	WM_MEDIATYPE_FIBER		0x01
522 #define	WM_MEDIATYPE_COPPER		0x02
523 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
524 	int sc_funcid;			/* unit number of the chip (0 to 3) */
525 	int sc_flags;			/* flags; see below */
526 	u_short sc_if_flags;		/* last if_flags */
527 	int sc_ec_capenable;		/* last ec_capenable */
528 	int sc_flowflags;		/* 802.3x flow control flags */
529 	uint16_t eee_lp_ability;	/* EEE link partner's ability */
530 	int sc_align_tweak;
531 
532 	void *sc_ihs[WM_MAX_NINTR];	/*
533 					 * interrupt cookie.
534 					 * - legacy and msi use sc_ihs[0] only
535 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
536 					 */
537 	pci_intr_handle_t *sc_intrs;	/*
538 					 * legacy and msi use sc_intrs[0] only
539 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
540 					 */
541 	int sc_nintrs;			/* number of interrupts */
542 
543 	int sc_link_intr_idx;		/* index of MSI-X tables */
544 
545 	callout_t sc_tick_ch;		/* tick callout */
546 	bool sc_core_stopping;
547 
548 	int sc_nvm_ver_major;
549 	int sc_nvm_ver_minor;
550 	int sc_nvm_ver_build;
551 	int sc_nvm_addrbits;		/* NVM address bits */
552 	unsigned int sc_nvm_wordsize;	/* NVM word size */
553 	int sc_ich8_flash_base;
554 	int sc_ich8_flash_bank_size;
555 	int sc_nvm_k1_enabled;
556 
557 	int sc_nqueues;
558 	struct wm_queue *sc_queue;
559 	u_int sc_tx_process_limit;	/* Tx proc. repeat limit in softint */
560 	u_int sc_tx_intr_process_limit;	/* Tx proc. repeat limit in H/W intr */
561 	u_int sc_rx_process_limit;	/* Rx proc. repeat limit in softint */
562 	u_int sc_rx_intr_process_limit;	/* Rx proc. repeat limit in H/W intr */
563 	struct workqueue *sc_queue_wq;
564 	bool sc_txrx_use_workqueue;
565 
566 	int sc_affinity_offset;
567 
568 #ifdef WM_EVENT_COUNTERS
569 	/* Event counters. */
570 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
571 
572 	/* WM_T_82542_2_1 only */
573 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
574 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
575 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
576 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
577 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
578 #endif /* WM_EVENT_COUNTERS */
579 
580 	struct sysctllog *sc_sysctllog;
581 
582 	/* This variable are used only on the 82547. */
583 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
584 
585 	uint32_t sc_ctrl;		/* prototype CTRL register */
586 #if 0
587 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
588 #endif
589 	uint32_t sc_icr;		/* prototype interrupt bits */
590 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
591 	uint32_t sc_tctl;		/* prototype TCTL register */
592 	uint32_t sc_rctl;		/* prototype RCTL register */
593 	uint32_t sc_txcw;		/* prototype TXCW register */
594 	uint32_t sc_tipg;		/* prototype TIPG register */
595 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
596 	uint32_t sc_pba;		/* prototype PBA register */
597 
598 	int sc_tbi_linkup;		/* TBI link status */
599 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
600 	int sc_tbi_serdes_ticks;	/* tbi ticks */
601 
602 	int sc_mchash_type;		/* multicast filter offset */
603 
604 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
605 
606 	kmutex_t *sc_core_lock;		/* lock for softc operations */
607 	kmutex_t *sc_ich_phymtx;	/*
608 					 * 82574/82583/ICH/PCH specific PHY
609 					 * mutex. For 82574/82583, the mutex
610 					 * is used for both PHY and NVM.
611 					 */
612 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
613 
614 	struct wm_phyop phy;
615 	struct wm_nvmop nvm;
616 };
617 
618 #define WM_CORE_LOCK(_sc)						\
619 	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
620 #define WM_CORE_UNLOCK(_sc)						\
621 	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
622 #define WM_CORE_LOCKED(_sc)						\
623 	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
624 
625 #define	WM_RXCHAIN_RESET(rxq)						\
626 do {									\
627 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
628 	*(rxq)->rxq_tailp = NULL;					\
629 	(rxq)->rxq_len = 0;						\
630 } while (/*CONSTCOND*/0)
631 
632 #define	WM_RXCHAIN_LINK(rxq, m)						\
633 do {									\
634 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
635 	(rxq)->rxq_tailp = &(m)->m_next;				\
636 } while (/*CONSTCOND*/0)
637 
638 #ifdef WM_EVENT_COUNTERS
639 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
640 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
641 
642 #define WM_Q_EVCNT_INCR(qname, evname)			\
643 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
644 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
645 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
646 #else /* !WM_EVENT_COUNTERS */
647 #define	WM_EVCNT_INCR(ev)	/* nothing */
648 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
649 
650 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
651 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
652 #endif /* !WM_EVENT_COUNTERS */
653 
654 #define	CSR_READ(sc, reg)						\
655 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
656 #define	CSR_WRITE(sc, reg, val)						\
657 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
658 #define	CSR_WRITE_FLUSH(sc)						\
659 	(void)CSR_READ((sc), WMREG_STATUS)
660 
661 #define ICH8_FLASH_READ32(sc, reg)					\
662 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
663 	    (reg) + sc->sc_flashreg_offset)
664 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
665 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
666 	    (reg) + sc->sc_flashreg_offset, (data))
667 
668 #define ICH8_FLASH_READ16(sc, reg)					\
669 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
670 	    (reg) + sc->sc_flashreg_offset)
671 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
672 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
673 	    (reg) + sc->sc_flashreg_offset, (data))
674 
675 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
676 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
677 
678 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
679 #define	WM_CDTXADDR_HI(txq, x)						\
680 	(sizeof(bus_addr_t) == 8 ?					\
681 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
682 
683 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
684 #define	WM_CDRXADDR_HI(rxq, x)						\
685 	(sizeof(bus_addr_t) == 8 ?					\
686 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
687 
688 /*
689  * Register read/write functions.
690  * Other than CSR_{READ|WRITE}().
691  */
692 #if 0
693 static inline uint32_t wm_io_read(struct wm_softc *, int);
694 #endif
695 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
696 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
697     uint32_t, uint32_t);
698 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
699 
700 /*
701  * Descriptor sync/init functions.
702  */
703 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
704 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
705 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
706 
707 /*
708  * Device driver interface functions and commonly used functions.
709  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
710  */
711 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
712 static int	wm_match(device_t, cfdata_t, void *);
713 static void	wm_attach(device_t, device_t, void *);
714 static int	wm_detach(device_t, int);
715 static bool	wm_suspend(device_t, const pmf_qual_t *);
716 static bool	wm_resume(device_t, const pmf_qual_t *);
717 static void	wm_watchdog(struct ifnet *);
718 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
719     uint16_t *);
720 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
721     uint16_t *);
722 static void	wm_tick(void *);
723 static int	wm_ifflags_cb(struct ethercom *);
724 static int	wm_ioctl(struct ifnet *, u_long, void *);
725 /* MAC address related */
726 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
727 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
728 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
729 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
730 static int	wm_rar_count(struct wm_softc *);
731 static void	wm_set_filter(struct wm_softc *);
732 /* Reset and init related */
733 static void	wm_set_vlan(struct wm_softc *);
734 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
735 static void	wm_get_auto_rd_done(struct wm_softc *);
736 static void	wm_lan_init_done(struct wm_softc *);
737 static void	wm_get_cfg_done(struct wm_softc *);
738 static int	wm_phy_post_reset(struct wm_softc *);
739 static int	wm_write_smbus_addr(struct wm_softc *);
740 static int	wm_init_lcd_from_nvm(struct wm_softc *);
741 static int	wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
742 static void	wm_initialize_hardware_bits(struct wm_softc *);
743 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
744 static int	wm_reset_phy(struct wm_softc *);
745 static void	wm_flush_desc_rings(struct wm_softc *);
746 static void	wm_reset(struct wm_softc *);
747 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
748 static void	wm_rxdrain(struct wm_rxqueue *);
749 static void	wm_init_rss(struct wm_softc *);
750 static void	wm_adjust_qnum(struct wm_softc *, int);
751 static inline bool	wm_is_using_msix(struct wm_softc *);
752 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
753 static int	wm_softint_establish(struct wm_softc *, int, int);
754 static int	wm_setup_legacy(struct wm_softc *);
755 static int	wm_setup_msix(struct wm_softc *);
756 static int	wm_init(struct ifnet *);
757 static int	wm_init_locked(struct ifnet *);
758 static void	wm_init_sysctls(struct wm_softc *);
759 static void	wm_unset_stopping_flags(struct wm_softc *);
760 static void	wm_set_stopping_flags(struct wm_softc *);
761 static void	wm_stop(struct ifnet *, int);
762 static void	wm_stop_locked(struct ifnet *, bool, bool);
763 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
764 static void	wm_82547_txfifo_stall(void *);
765 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
766 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
767 /* DMA related */
768 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
769 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
770 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
771 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
772     struct wm_txqueue *);
773 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
774 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
775 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
776     struct wm_rxqueue *);
777 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
778 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
779 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
780 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
781 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
782 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
783 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
784     struct wm_txqueue *);
785 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
786     struct wm_rxqueue *);
787 static int	wm_alloc_txrx_queues(struct wm_softc *);
788 static void	wm_free_txrx_queues(struct wm_softc *);
789 static int	wm_init_txrx_queues(struct wm_softc *);
790 /* Start */
791 static int	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
792     struct wm_txsoft *, uint32_t *, uint8_t *);
793 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
794 static void	wm_start(struct ifnet *);
795 static void	wm_start_locked(struct ifnet *);
796 static int	wm_transmit(struct ifnet *, struct mbuf *);
797 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
798 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
799 		    bool);
800 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
801     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
802 static void	wm_nq_start(struct ifnet *);
803 static void	wm_nq_start_locked(struct ifnet *);
804 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
805 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
806 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
807 		    bool);
808 static void	wm_deferred_start_locked(struct wm_txqueue *);
809 static void	wm_handle_queue(void *);
810 static void	wm_handle_queue_work(struct work *, void *);
811 /* Interrupt */
812 static bool	wm_txeof(struct wm_txqueue *, u_int);
813 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
814 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
815 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
816 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
817 static void	wm_linkintr(struct wm_softc *, uint32_t);
818 static int	wm_intr_legacy(void *);
819 static inline void	wm_txrxintr_disable(struct wm_queue *);
820 static inline void	wm_txrxintr_enable(struct wm_queue *);
821 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
822 static int	wm_txrxintr_msix(void *);
823 static int	wm_linkintr_msix(void *);
824 
825 /*
826  * Media related.
827  * GMII, SGMII, TBI, SERDES and SFP.
828  */
829 /* Common */
830 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
831 /* GMII related */
832 static void	wm_gmii_reset(struct wm_softc *);
833 static void	wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
834 static int	wm_get_phy_id_82575(struct wm_softc *);
835 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
836 static int	wm_gmii_mediachange(struct ifnet *);
837 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
838 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
839 static uint16_t	wm_i82543_mii_recvbits(struct wm_softc *);
840 static int	wm_gmii_i82543_readreg(device_t, int, int, uint16_t *);
841 static int	wm_gmii_i82543_writereg(device_t, int, int, uint16_t);
842 static int	wm_gmii_mdic_readreg(device_t, int, int, uint16_t *);
843 static int	wm_gmii_mdic_writereg(device_t, int, int, uint16_t);
844 static int	wm_gmii_i82544_readreg(device_t, int, int, uint16_t *);
845 static int	wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
846 static int	wm_gmii_i82544_writereg(device_t, int, int, uint16_t);
847 static int	wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
848 static int	wm_gmii_i80003_readreg(device_t, int, int, uint16_t *);
849 static int	wm_gmii_i80003_writereg(device_t, int, int, uint16_t);
850 static int	wm_gmii_bm_readreg(device_t, int, int, uint16_t *);
851 static int	wm_gmii_bm_writereg(device_t, int, int, uint16_t);
852 static int	wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
853 static int	wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
854 static int	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int,
855 	bool);
856 static int	wm_gmii_hv_readreg(device_t, int, int, uint16_t *);
857 static int	wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
858 static int	wm_gmii_hv_writereg(device_t, int, int, uint16_t);
859 static int	wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
860 static int	wm_gmii_82580_readreg(device_t, int, int, uint16_t *);
861 static int	wm_gmii_82580_writereg(device_t, int, int, uint16_t);
862 static int	wm_gmii_gs40g_readreg(device_t, int, int, uint16_t *);
863 static int	wm_gmii_gs40g_writereg(device_t, int, int, uint16_t);
864 static void	wm_gmii_statchg(struct ifnet *);
865 /*
866  * kumeran related (80003, ICH* and PCH*).
867  * These functions are not for accessing MII registers but for accessing
868  * kumeran specific registers.
869  */
870 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
871 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
872 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
873 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
874 /* EMI register related */
875 static int	wm_access_emi_reg_locked(device_t, int, uint16_t *, bool);
876 static int	wm_read_emi_reg_locked(device_t, int, uint16_t *);
877 static int	wm_write_emi_reg_locked(device_t, int, uint16_t);
878 /* SGMII */
879 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
880 static int	wm_sgmii_readreg(device_t, int, int, uint16_t *);
881 static int	wm_sgmii_readreg_locked(device_t, int, int, uint16_t *);
882 static int	wm_sgmii_writereg(device_t, int, int, uint16_t);
883 static int	wm_sgmii_writereg_locked(device_t, int, int, uint16_t);
884 /* TBI related */
885 static bool	wm_tbi_havesignal(struct wm_softc *, uint32_t);
886 static void	wm_tbi_mediainit(struct wm_softc *);
887 static int	wm_tbi_mediachange(struct ifnet *);
888 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
889 static int	wm_check_for_link(struct wm_softc *);
890 static void	wm_tbi_tick(struct wm_softc *);
891 /* SERDES related */
892 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
893 static int	wm_serdes_mediachange(struct ifnet *);
894 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
895 static void	wm_serdes_tick(struct wm_softc *);
896 /* SFP related */
897 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
898 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
899 
900 /*
901  * NVM related.
902  * Microwire, SPI (w/wo EERD) and Flash.
903  */
904 /* Misc functions */
905 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
906 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
907 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
908 /* Microwire */
909 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
910 /* SPI */
911 static int	wm_nvm_ready_spi(struct wm_softc *);
912 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
913 /* Using with EERD */
914 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
915 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
916 /* Flash */
917 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
918     unsigned int *);
919 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
920 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
921 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
922     uint32_t *);
923 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
924 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
925 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
926 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
927 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
928 /* iNVM */
929 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
930 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
931 /* Lock, detecting NVM type, validate checksum and read */
932 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
933 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
934 static int	wm_nvm_validate_checksum(struct wm_softc *);
935 static void	wm_nvm_version_invm(struct wm_softc *);
936 static void	wm_nvm_version(struct wm_softc *);
937 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
938 
939 /*
940  * Hardware semaphores.
941  * Very complexed...
942  */
943 static int	wm_get_null(struct wm_softc *);
944 static void	wm_put_null(struct wm_softc *);
945 static int	wm_get_eecd(struct wm_softc *);
946 static void	wm_put_eecd(struct wm_softc *);
947 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
948 static void	wm_put_swsm_semaphore(struct wm_softc *);
949 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
950 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
951 static int	wm_get_nvm_80003(struct wm_softc *);
952 static void	wm_put_nvm_80003(struct wm_softc *);
953 static int	wm_get_nvm_82571(struct wm_softc *);
954 static void	wm_put_nvm_82571(struct wm_softc *);
955 static int	wm_get_phy_82575(struct wm_softc *);
956 static void	wm_put_phy_82575(struct wm_softc *);
957 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
958 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
959 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
960 static void	wm_put_swflag_ich8lan(struct wm_softc *);
961 static int	wm_get_nvm_ich8lan(struct wm_softc *);
962 static void	wm_put_nvm_ich8lan(struct wm_softc *);
963 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
964 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
965 
966 /*
967  * Management mode and power management related subroutines.
968  * BMC, AMT, suspend/resume and EEE.
969  */
970 #if 0
971 static int	wm_check_mng_mode(struct wm_softc *);
972 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
973 static int	wm_check_mng_mode_82574(struct wm_softc *);
974 static int	wm_check_mng_mode_generic(struct wm_softc *);
975 #endif
976 static int	wm_enable_mng_pass_thru(struct wm_softc *);
977 static bool	wm_phy_resetisblocked(struct wm_softc *);
978 static void	wm_get_hw_control(struct wm_softc *);
979 static void	wm_release_hw_control(struct wm_softc *);
980 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
981 static int	wm_init_phy_workarounds_pchlan(struct wm_softc *);
982 static void	wm_init_manageability(struct wm_softc *);
983 static void	wm_release_manageability(struct wm_softc *);
984 static void	wm_get_wakeup(struct wm_softc *);
985 static int	wm_ulp_disable(struct wm_softc *);
986 static int	wm_enable_phy_wakeup(struct wm_softc *);
987 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
988 static void	wm_suspend_workarounds_ich8lan(struct wm_softc *);
989 static int	wm_resume_workarounds_pchlan(struct wm_softc *);
990 static void	wm_enable_wakeup(struct wm_softc *);
991 static void	wm_disable_aspm(struct wm_softc *);
992 /* LPLU (Low Power Link Up) */
993 static void	wm_lplu_d0_disable(struct wm_softc *);
994 /* EEE */
995 static int	wm_set_eee_i350(struct wm_softc *);
996 static int	wm_set_eee_pchlan(struct wm_softc *);
997 static int	wm_set_eee(struct wm_softc *);
998 
999 /*
1000  * Workarounds (mainly PHY related).
1001  * Basically, PHY's workarounds are in the PHY drivers.
1002  */
1003 static int	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
1004 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
1005 static int	wm_hv_phy_workarounds_ich8lan(struct wm_softc *);
1006 static void	wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *);
1007 static int	wm_lv_phy_workarounds_ich8lan(struct wm_softc *);
1008 static int	wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
1009 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
1010 static int	wm_k1_workaround_lv(struct wm_softc *);
1011 static int	wm_link_stall_workaround_hv(struct wm_softc *);
1012 static int	wm_set_mdio_slow_mode_hv(struct wm_softc *);
1013 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
1014 static void	wm_reset_init_script_82575(struct wm_softc *);
1015 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
1016 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
1017 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
1018 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
1019 static int	wm_pll_workaround_i210(struct wm_softc *);
1020 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
1021 
1022 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
1023     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
1024 
1025 /*
1026  * Devices supported by this driver.
1027  */
1028 static const struct wm_product {
1029 	pci_vendor_id_t		wmp_vendor;
1030 	pci_product_id_t	wmp_product;
1031 	const char		*wmp_name;
1032 	wm_chip_type		wmp_type;
1033 	uint32_t		wmp_flags;
1034 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
1035 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
1036 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
1037 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
1038 #define WMP_MEDIATYPE(x)	((x) & 0x03)
1039 } wm_products[] = {
1040 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
1041 	  "Intel i82542 1000BASE-X Ethernet",
1042 	  WM_T_82542_2_1,	WMP_F_FIBER },
1043 
1044 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
1045 	  "Intel i82543GC 1000BASE-X Ethernet",
1046 	  WM_T_82543,		WMP_F_FIBER },
1047 
1048 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
1049 	  "Intel i82543GC 1000BASE-T Ethernet",
1050 	  WM_T_82543,		WMP_F_COPPER },
1051 
1052 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
1053 	  "Intel i82544EI 1000BASE-T Ethernet",
1054 	  WM_T_82544,		WMP_F_COPPER },
1055 
1056 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
1057 	  "Intel i82544EI 1000BASE-X Ethernet",
1058 	  WM_T_82544,		WMP_F_FIBER },
1059 
1060 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
1061 	  "Intel i82544GC 1000BASE-T Ethernet",
1062 	  WM_T_82544,		WMP_F_COPPER },
1063 
1064 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
1065 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
1066 	  WM_T_82544,		WMP_F_COPPER },
1067 
1068 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
1069 	  "Intel i82540EM 1000BASE-T Ethernet",
1070 	  WM_T_82540,		WMP_F_COPPER },
1071 
1072 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
1073 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
1074 	  WM_T_82540,		WMP_F_COPPER },
1075 
1076 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
1077 	  "Intel i82540EP 1000BASE-T Ethernet",
1078 	  WM_T_82540,		WMP_F_COPPER },
1079 
1080 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
1081 	  "Intel i82540EP 1000BASE-T Ethernet",
1082 	  WM_T_82540,		WMP_F_COPPER },
1083 
1084 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
1085 	  "Intel i82540EP 1000BASE-T Ethernet",
1086 	  WM_T_82540,		WMP_F_COPPER },
1087 
1088 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
1089 	  "Intel i82545EM 1000BASE-T Ethernet",
1090 	  WM_T_82545,		WMP_F_COPPER },
1091 
1092 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
1093 	  "Intel i82545GM 1000BASE-T Ethernet",
1094 	  WM_T_82545_3,		WMP_F_COPPER },
1095 
1096 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
1097 	  "Intel i82545GM 1000BASE-X Ethernet",
1098 	  WM_T_82545_3,		WMP_F_FIBER },
1099 
1100 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
1101 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
1102 	  WM_T_82545_3,		WMP_F_SERDES },
1103 
1104 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
1105 	  "Intel i82546EB 1000BASE-T Ethernet",
1106 	  WM_T_82546,		WMP_F_COPPER },
1107 
1108 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
1109 	  "Intel i82546EB 1000BASE-T Ethernet",
1110 	  WM_T_82546,		WMP_F_COPPER },
1111 
1112 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
1113 	  "Intel i82545EM 1000BASE-X Ethernet",
1114 	  WM_T_82545,		WMP_F_FIBER },
1115 
1116 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
1117 	  "Intel i82546EB 1000BASE-X Ethernet",
1118 	  WM_T_82546,		WMP_F_FIBER },
1119 
1120 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
1121 	  "Intel i82546GB 1000BASE-T Ethernet",
1122 	  WM_T_82546_3,		WMP_F_COPPER },
1123 
1124 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
1125 	  "Intel i82546GB 1000BASE-X Ethernet",
1126 	  WM_T_82546_3,		WMP_F_FIBER },
1127 
1128 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
1129 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
1130 	  WM_T_82546_3,		WMP_F_SERDES },
1131 
1132 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
1133 	  "i82546GB quad-port Gigabit Ethernet",
1134 	  WM_T_82546_3,		WMP_F_COPPER },
1135 
1136 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
1137 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
1138 	  WM_T_82546_3,		WMP_F_COPPER },
1139 
1140 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
1141 	  "Intel PRO/1000MT (82546GB)",
1142 	  WM_T_82546_3,		WMP_F_COPPER },
1143 
1144 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
1145 	  "Intel i82541EI 1000BASE-T Ethernet",
1146 	  WM_T_82541,		WMP_F_COPPER },
1147 
1148 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
1149 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
1150 	  WM_T_82541,		WMP_F_COPPER },
1151 
1152 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
1153 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
1154 	  WM_T_82541,		WMP_F_COPPER },
1155 
1156 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
1157 	  "Intel i82541ER 1000BASE-T Ethernet",
1158 	  WM_T_82541_2,		WMP_F_COPPER },
1159 
1160 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
1161 	  "Intel i82541GI 1000BASE-T Ethernet",
1162 	  WM_T_82541_2,		WMP_F_COPPER },
1163 
1164 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
1165 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
1166 	  WM_T_82541_2,		WMP_F_COPPER },
1167 
1168 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
1169 	  "Intel i82541PI 1000BASE-T Ethernet",
1170 	  WM_T_82541_2,		WMP_F_COPPER },
1171 
1172 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
1173 	  "Intel i82547EI 1000BASE-T Ethernet",
1174 	  WM_T_82547,		WMP_F_COPPER },
1175 
1176 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
1177 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
1178 	  WM_T_82547,		WMP_F_COPPER },
1179 
1180 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
1181 	  "Intel i82547GI 1000BASE-T Ethernet",
1182 	  WM_T_82547_2,		WMP_F_COPPER },
1183 
1184 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
1185 	  "Intel PRO/1000 PT (82571EB)",
1186 	  WM_T_82571,		WMP_F_COPPER },
1187 
1188 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
1189 	  "Intel PRO/1000 PF (82571EB)",
1190 	  WM_T_82571,		WMP_F_FIBER },
1191 
1192 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
1193 	  "Intel PRO/1000 PB (82571EB)",
1194 	  WM_T_82571,		WMP_F_SERDES },
1195 
1196 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
1197 	  "Intel PRO/1000 QT (82571EB)",
1198 	  WM_T_82571,		WMP_F_COPPER },
1199 
1200 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
1201 	  "Intel PRO/1000 PT Quad Port Server Adapter",
1202 	  WM_T_82571,		WMP_F_COPPER },
1203 
1204 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
1205 	  "Intel Gigabit PT Quad Port Server ExpressModule",
1206 	  WM_T_82571,		WMP_F_COPPER },
1207 
1208 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
1209 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
1210 	  WM_T_82571,		WMP_F_SERDES },
1211 
1212 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
1213 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
1214 	  WM_T_82571,		WMP_F_SERDES },
1215 
1216 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
1217 	  "Intel 82571EB Quad 1000baseX Ethernet",
1218 	  WM_T_82571,		WMP_F_FIBER },
1219 
1220 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
1221 	  "Intel i82572EI 1000baseT Ethernet",
1222 	  WM_T_82572,		WMP_F_COPPER },
1223 
1224 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
1225 	  "Intel i82572EI 1000baseX Ethernet",
1226 	  WM_T_82572,		WMP_F_FIBER },
1227 
1228 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
1229 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
1230 	  WM_T_82572,		WMP_F_SERDES },
1231 
1232 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
1233 	  "Intel i82572EI 1000baseT Ethernet",
1234 	  WM_T_82572,		WMP_F_COPPER },
1235 
1236 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
1237 	  "Intel i82573E",
1238 	  WM_T_82573,		WMP_F_COPPER },
1239 
1240 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
1241 	  "Intel i82573E IAMT",
1242 	  WM_T_82573,		WMP_F_COPPER },
1243 
1244 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
1245 	  "Intel i82573L Gigabit Ethernet",
1246 	  WM_T_82573,		WMP_F_COPPER },
1247 
1248 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
1249 	  "Intel i82574L",
1250 	  WM_T_82574,		WMP_F_COPPER },
1251 
1252 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
1253 	  "Intel i82574L",
1254 	  WM_T_82574,		WMP_F_COPPER },
1255 
1256 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
1257 	  "Intel i82583V",
1258 	  WM_T_82583,		WMP_F_COPPER },
1259 
1260 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
1261 	  "i80003 dual 1000baseT Ethernet",
1262 	  WM_T_80003,		WMP_F_COPPER },
1263 
1264 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
1265 	  "i80003 dual 1000baseX Ethernet",
1266 	  WM_T_80003,		WMP_F_COPPER },
1267 
1268 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
1269 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
1270 	  WM_T_80003,		WMP_F_SERDES },
1271 
1272 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
1273 	  "Intel i80003 1000baseT Ethernet",
1274 	  WM_T_80003,		WMP_F_COPPER },
1275 
1276 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
1277 	  "Intel i80003 Gigabit Ethernet (SERDES)",
1278 	  WM_T_80003,		WMP_F_SERDES },
1279 
1280 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
1281 	  "Intel i82801H (M_AMT) LAN Controller",
1282 	  WM_T_ICH8,		WMP_F_COPPER },
1283 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
1284 	  "Intel i82801H (AMT) LAN Controller",
1285 	  WM_T_ICH8,		WMP_F_COPPER },
1286 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
1287 	  "Intel i82801H LAN Controller",
1288 	  WM_T_ICH8,		WMP_F_COPPER },
1289 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
1290 	  "Intel i82801H (IFE) 10/100 LAN Controller",
1291 	  WM_T_ICH8,		WMP_F_COPPER },
1292 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
1293 	  "Intel i82801H (M) LAN Controller",
1294 	  WM_T_ICH8,		WMP_F_COPPER },
1295 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
1296 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
1297 	  WM_T_ICH8,		WMP_F_COPPER },
1298 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
1299 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
1300 	  WM_T_ICH8,		WMP_F_COPPER },
1301 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
1302 	  "82567V-3 LAN Controller",
1303 	  WM_T_ICH8,		WMP_F_COPPER },
1304 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
1305 	  "82801I (AMT) LAN Controller",
1306 	  WM_T_ICH9,		WMP_F_COPPER },
1307 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
1308 	  "82801I 10/100 LAN Controller",
1309 	  WM_T_ICH9,		WMP_F_COPPER },
1310 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
1311 	  "82801I (G) 10/100 LAN Controller",
1312 	  WM_T_ICH9,		WMP_F_COPPER },
1313 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
1314 	  "82801I (GT) 10/100 LAN Controller",
1315 	  WM_T_ICH9,		WMP_F_COPPER },
1316 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
1317 	  "82801I (C) LAN Controller",
1318 	  WM_T_ICH9,		WMP_F_COPPER },
1319 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
1320 	  "82801I mobile LAN Controller",
1321 	  WM_T_ICH9,		WMP_F_COPPER },
1322 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
1323 	  "82801I mobile (V) LAN Controller",
1324 	  WM_T_ICH9,		WMP_F_COPPER },
1325 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
1326 	  "82801I mobile (AMT) LAN Controller",
1327 	  WM_T_ICH9,		WMP_F_COPPER },
1328 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
1329 	  "82567LM-4 LAN Controller",
1330 	  WM_T_ICH9,		WMP_F_COPPER },
1331 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
1332 	  "82567LM-2 LAN Controller",
1333 	  WM_T_ICH10,		WMP_F_COPPER },
1334 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
1335 	  "82567LF-2 LAN Controller",
1336 	  WM_T_ICH10,		WMP_F_COPPER },
1337 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
1338 	  "82567LM-3 LAN Controller",
1339 	  WM_T_ICH10,		WMP_F_COPPER },
1340 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
1341 	  "82567LF-3 LAN Controller",
1342 	  WM_T_ICH10,		WMP_F_COPPER },
1343 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
1344 	  "82567V-2 LAN Controller",
1345 	  WM_T_ICH10,		WMP_F_COPPER },
1346 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
1347 	  "82567V-3? LAN Controller",
1348 	  WM_T_ICH10,		WMP_F_COPPER },
1349 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
1350 	  "HANKSVILLE LAN Controller",
1351 	  WM_T_ICH10,		WMP_F_COPPER },
1352 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
1353 	  "PCH LAN (82577LM) Controller",
1354 	  WM_T_PCH,		WMP_F_COPPER },
1355 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
1356 	  "PCH LAN (82577LC) Controller",
1357 	  WM_T_PCH,		WMP_F_COPPER },
1358 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
1359 	  "PCH LAN (82578DM) Controller",
1360 	  WM_T_PCH,		WMP_F_COPPER },
1361 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
1362 	  "PCH LAN (82578DC) Controller",
1363 	  WM_T_PCH,		WMP_F_COPPER },
1364 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
1365 	  "PCH2 LAN (82579LM) Controller",
1366 	  WM_T_PCH2,		WMP_F_COPPER },
1367 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
1368 	  "PCH2 LAN (82579V) Controller",
1369 	  WM_T_PCH2,		WMP_F_COPPER },
1370 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
1371 	  "82575EB dual-1000baseT Ethernet",
1372 	  WM_T_82575,		WMP_F_COPPER },
1373 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
1374 	  "82575EB dual-1000baseX Ethernet (SERDES)",
1375 	  WM_T_82575,		WMP_F_SERDES },
1376 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
1377 	  "82575GB quad-1000baseT Ethernet",
1378 	  WM_T_82575,		WMP_F_COPPER },
1379 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
1380 	  "82575GB quad-1000baseT Ethernet (PM)",
1381 	  WM_T_82575,		WMP_F_COPPER },
1382 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
1383 	  "82576 1000BaseT Ethernet",
1384 	  WM_T_82576,		WMP_F_COPPER },
1385 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
1386 	  "82576 1000BaseX Ethernet",
1387 	  WM_T_82576,		WMP_F_FIBER },
1388 
1389 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
1390 	  "82576 gigabit Ethernet (SERDES)",
1391 	  WM_T_82576,		WMP_F_SERDES },
1392 
1393 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
1394 	  "82576 quad-1000BaseT Ethernet",
1395 	  WM_T_82576,		WMP_F_COPPER },
1396 
1397 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
1398 	  "82576 Gigabit ET2 Quad Port Server Adapter",
1399 	  WM_T_82576,		WMP_F_COPPER },
1400 
1401 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
1402 	  "82576 gigabit Ethernet",
1403 	  WM_T_82576,		WMP_F_COPPER },
1404 
1405 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
1406 	  "82576 gigabit Ethernet (SERDES)",
1407 	  WM_T_82576,		WMP_F_SERDES },
1408 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
1409 	  "82576 quad-gigabit Ethernet (SERDES)",
1410 	  WM_T_82576,		WMP_F_SERDES },
1411 
1412 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
1413 	  "82580 1000BaseT Ethernet",
1414 	  WM_T_82580,		WMP_F_COPPER },
1415 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
1416 	  "82580 1000BaseX Ethernet",
1417 	  WM_T_82580,		WMP_F_FIBER },
1418 
1419 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
1420 	  "82580 1000BaseT Ethernet (SERDES)",
1421 	  WM_T_82580,		WMP_F_SERDES },
1422 
1423 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
1424 	  "82580 gigabit Ethernet (SGMII)",
1425 	  WM_T_82580,		WMP_F_COPPER },
1426 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
1427 	  "82580 dual-1000BaseT Ethernet",
1428 	  WM_T_82580,		WMP_F_COPPER },
1429 
1430 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1431 	  "82580 quad-1000BaseX Ethernet",
1432 	  WM_T_82580,		WMP_F_FIBER },
1433 
1434 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
1435 	  "DH89XXCC Gigabit Ethernet (SGMII)",
1436 	  WM_T_82580,		WMP_F_COPPER },
1437 
1438 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
1439 	  "DH89XXCC Gigabit Ethernet (SERDES)",
1440 	  WM_T_82580,		WMP_F_SERDES },
1441 
1442 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
1443 	  "DH89XXCC 1000BASE-KX Ethernet",
1444 	  WM_T_82580,		WMP_F_SERDES },
1445 
1446 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
1447 	  "DH89XXCC Gigabit Ethernet (SFP)",
1448 	  WM_T_82580,		WMP_F_SERDES },
1449 
1450 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
1451 	  "I350 Gigabit Network Connection",
1452 	  WM_T_I350,		WMP_F_COPPER },
1453 
1454 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
1455 	  "I350 Gigabit Fiber Network Connection",
1456 	  WM_T_I350,		WMP_F_FIBER },
1457 
1458 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
1459 	  "I350 Gigabit Backplane Connection",
1460 	  WM_T_I350,		WMP_F_SERDES },
1461 
1462 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
1463 	  "I350 Quad Port Gigabit Ethernet",
1464 	  WM_T_I350,		WMP_F_SERDES },
1465 
1466 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
1467 	  "I350 Gigabit Connection",
1468 	  WM_T_I350,		WMP_F_COPPER },
1469 
1470 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
1471 	  "I354 Gigabit Ethernet (KX)",
1472 	  WM_T_I354,		WMP_F_SERDES },
1473 
1474 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
1475 	  "I354 Gigabit Ethernet (SGMII)",
1476 	  WM_T_I354,		WMP_F_COPPER },
1477 
1478 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
1479 	  "I354 Gigabit Ethernet (2.5G)",
1480 	  WM_T_I354,		WMP_F_COPPER },
1481 
1482 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
1483 	  "I210-T1 Ethernet Server Adapter",
1484 	  WM_T_I210,		WMP_F_COPPER },
1485 
1486 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
1487 	  "I210 Ethernet (Copper OEM)",
1488 	  WM_T_I210,		WMP_F_COPPER },
1489 
1490 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
1491 	  "I210 Ethernet (Copper IT)",
1492 	  WM_T_I210,		WMP_F_COPPER },
1493 
1494 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
1495 	  "I210 Ethernet (Copper, FLASH less)",
1496 	  WM_T_I210,		WMP_F_COPPER },
1497 
1498 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
1499 	  "I210 Gigabit Ethernet (Fiber)",
1500 	  WM_T_I210,		WMP_F_FIBER },
1501 
1502 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
1503 	  "I210 Gigabit Ethernet (SERDES)",
1504 	  WM_T_I210,		WMP_F_SERDES },
1505 
1506 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
1507 	  "I210 Gigabit Ethernet (SERDES, FLASH less)",
1508 	  WM_T_I210,		WMP_F_SERDES },
1509 
1510 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
1511 	  "I210 Gigabit Ethernet (SGMII)",
1512 	  WM_T_I210,		WMP_F_COPPER },
1513 
1514 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII_WOF,
1515 	  "I210 Gigabit Ethernet (SGMII, FLASH less)",
1516 	  WM_T_I210,		WMP_F_COPPER },
1517 
1518 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
1519 	  "I211 Ethernet (COPPER)",
1520 	  WM_T_I211,		WMP_F_COPPER },
1521 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
1522 	  "I217 V Ethernet Connection",
1523 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1524 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
1525 	  "I217 LM Ethernet Connection",
1526 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1527 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
1528 	  "I218 V Ethernet Connection",
1529 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1530 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
1531 	  "I218 V Ethernet Connection",
1532 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1533 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
1534 	  "I218 V Ethernet Connection",
1535 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1536 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
1537 	  "I218 LM Ethernet Connection",
1538 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1539 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
1540 	  "I218 LM Ethernet Connection",
1541 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1542 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
1543 	  "I218 LM Ethernet Connection",
1544 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1545 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
1546 	  "I219 LM Ethernet Connection",
1547 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1548 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
1549 	  "I219 LM Ethernet Connection",
1550 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1551 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
1552 	  "I219 LM Ethernet Connection",
1553 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1554 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
1555 	  "I219 LM Ethernet Connection",
1556 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1557 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
1558 	  "I219 LM Ethernet Connection",
1559 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1560 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM6,
1561 	  "I219 LM Ethernet Connection",
1562 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1563 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM7,
1564 	  "I219 LM Ethernet Connection",
1565 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1566 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM8,
1567 	  "I219 LM Ethernet Connection",
1568 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1569 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM9,
1570 	  "I219 LM Ethernet Connection",
1571 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1572 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM10,
1573 	  "I219 LM Ethernet Connection",
1574 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1575 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM11,
1576 	  "I219 LM Ethernet Connection",
1577 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1578 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM12,
1579 	  "I219 LM Ethernet Connection",
1580 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1581 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM13,
1582 	  "I219 LM Ethernet Connection",
1583 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1584 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM14,
1585 	  "I219 LM Ethernet Connection",
1586 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1587 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM15,
1588 	  "I219 LM Ethernet Connection",
1589 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1590 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
1591 	  "I219 V Ethernet Connection",
1592 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1593 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
1594 	  "I219 V Ethernet Connection",
1595 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1596 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
1597 	  "I219 V Ethernet Connection",
1598 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1599 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
1600 	  "I219 V Ethernet Connection",
1601 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1602 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V6,
1603 	  "I219 V Ethernet Connection",
1604 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1605 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V7,
1606 	  "I219 V Ethernet Connection",
1607 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1608 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V8,
1609 	  "I219 V Ethernet Connection",
1610 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1611 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V9,
1612 	  "I219 V Ethernet Connection",
1613 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1614 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V10,
1615 	  "I219 V Ethernet Connection",
1616 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1617 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V11,
1618 	  "I219 V Ethernet Connection",
1619 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1620 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V12,
1621 	  "I219 V Ethernet Connection",
1622 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1623 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V13,
1624 	  "I219 V Ethernet Connection",
1625 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1626 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V14,
1627 	  "I219 V Ethernet Connection",
1628 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1629 	{ 0,			0,
1630 	  NULL,
1631 	  0,			0 },
1632 };
1633 
1634 /*
1635  * Register read/write functions.
1636  * Other than CSR_{READ|WRITE}().
1637  */
1638 
1639 #if 0 /* Not currently used */
1640 static inline uint32_t
1641 wm_io_read(struct wm_softc *sc, int reg)
1642 {
1643 
1644 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1645 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1646 }
1647 #endif
1648 
1649 static inline void
1650 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1651 {
1652 
1653 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1654 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1655 }
1656 
1657 static inline void
1658 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1659     uint32_t data)
1660 {
1661 	uint32_t regval;
1662 	int i;
1663 
1664 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1665 
1666 	CSR_WRITE(sc, reg, regval);
1667 
1668 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1669 		delay(5);
1670 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1671 			break;
1672 	}
1673 	if (i == SCTL_CTL_POLL_TIMEOUT) {
1674 		aprint_error("%s: WARNING:"
1675 		    " i82575 reg 0x%08x setup did not indicate ready\n",
1676 		    device_xname(sc->sc_dev), reg);
1677 	}
1678 }
1679 
1680 static inline void
1681 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1682 {
1683 	wa->wa_low = htole32(v & 0xffffffffU);
1684 	if (sizeof(bus_addr_t) == 8)
1685 		wa->wa_high = htole32((uint64_t) v >> 32);
1686 	else
1687 		wa->wa_high = 0;
1688 }
1689 
1690 /*
1691  * Descriptor sync/init functions.
1692  */
1693 static inline void
1694 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
1695 {
1696 	struct wm_softc *sc = txq->txq_sc;
1697 
1698 	/* If it will wrap around, sync to the end of the ring. */
1699 	if ((start + num) > WM_NTXDESC(txq)) {
1700 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1701 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
1702 		    (WM_NTXDESC(txq) - start), ops);
1703 		num -= (WM_NTXDESC(txq) - start);
1704 		start = 0;
1705 	}
1706 
1707 	/* Now sync whatever is left. */
1708 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1709 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
1710 }
1711 
1712 static inline void
1713 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
1714 {
1715 	struct wm_softc *sc = rxq->rxq_sc;
1716 
1717 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
1718 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
1719 }
1720 
1721 static inline void
1722 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
1723 {
1724 	struct wm_softc *sc = rxq->rxq_sc;
1725 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
1726 	struct mbuf *m = rxs->rxs_mbuf;
1727 
1728 	/*
1729 	 * Note: We scoot the packet forward 2 bytes in the buffer
1730 	 * so that the payload after the Ethernet header is aligned
1731 	 * to a 4-byte boundary.
1732 
1733 	 * XXX BRAINDAMAGE ALERT!
1734 	 * The stupid chip uses the same size for every buffer, which
1735 	 * is set in the Receive Control register.  We are using the 2K
1736 	 * size option, but what we REALLY want is (2K - 2)!  For this
1737 	 * reason, we can't "scoot" packets longer than the standard
1738 	 * Ethernet MTU.  On strict-alignment platforms, if the total
1739 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
1740 	 * the upper layer copy the headers.
1741 	 */
1742 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
1743 
1744 	if (sc->sc_type == WM_T_82574) {
1745 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
1746 		rxd->erx_data.erxd_addr =
1747 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1748 		rxd->erx_data.erxd_dd = 0;
1749 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
1750 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
1751 
1752 		rxd->nqrx_data.nrxd_paddr =
1753 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1754 		/* Currently, split header is not supported. */
1755 		rxd->nqrx_data.nrxd_haddr = 0;
1756 	} else {
1757 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
1758 
1759 		wm_set_dma_addr(&rxd->wrx_addr,
1760 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1761 		rxd->wrx_len = 0;
1762 		rxd->wrx_cksum = 0;
1763 		rxd->wrx_status = 0;
1764 		rxd->wrx_errors = 0;
1765 		rxd->wrx_special = 0;
1766 	}
1767 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1768 
1769 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
1770 }
1771 
1772 /*
1773  * Device driver interface functions and commonly used functions.
1774  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
1775  */
1776 
1777 /* Lookup supported device table */
1778 static const struct wm_product *
1779 wm_lookup(const struct pci_attach_args *pa)
1780 {
1781 	const struct wm_product *wmp;
1782 
1783 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1784 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1785 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1786 			return wmp;
1787 	}
1788 	return NULL;
1789 }
1790 
1791 /* The match function (ca_match) */
1792 static int
1793 wm_match(device_t parent, cfdata_t cf, void *aux)
1794 {
1795 	struct pci_attach_args *pa = aux;
1796 
1797 	if (wm_lookup(pa) != NULL)
1798 		return 1;
1799 
1800 	return 0;
1801 }
1802 
1803 /* The attach function (ca_attach) */
1804 static void
1805 wm_attach(device_t parent, device_t self, void *aux)
1806 {
1807 	struct wm_softc *sc = device_private(self);
1808 	struct pci_attach_args *pa = aux;
1809 	prop_dictionary_t dict;
1810 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1811 	pci_chipset_tag_t pc = pa->pa_pc;
1812 	int counts[PCI_INTR_TYPE_SIZE];
1813 	pci_intr_type_t max_type;
1814 	const char *eetype, *xname;
1815 	bus_space_tag_t memt;
1816 	bus_space_handle_t memh;
1817 	bus_size_t memsize;
1818 	int memh_valid;
1819 	int i, error;
1820 	const struct wm_product *wmp;
1821 	prop_data_t ea;
1822 	prop_number_t pn;
1823 	uint8_t enaddr[ETHER_ADDR_LEN];
1824 	char buf[256];
1825 	char wqname[MAXCOMLEN];
1826 	uint16_t cfg1, cfg2, swdpin, nvmword;
1827 	pcireg_t preg, memtype;
1828 	uint16_t eeprom_data, apme_mask;
1829 	bool force_clear_smbi;
1830 	uint32_t link_mode;
1831 	uint32_t reg;
1832 
1833 	sc->sc_dev = self;
1834 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
1835 	callout_setfunc(&sc->sc_tick_ch, wm_tick, sc);
1836 	sc->sc_core_stopping = false;
1837 
1838 	wmp = wm_lookup(pa);
1839 #ifdef DIAGNOSTIC
1840 	if (wmp == NULL) {
1841 		printf("\n");
1842 		panic("wm_attach: impossible");
1843 	}
1844 #endif
1845 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
1846 
1847 	sc->sc_pc = pa->pa_pc;
1848 	sc->sc_pcitag = pa->pa_tag;
1849 
1850 	if (pci_dma64_available(pa))
1851 		sc->sc_dmat = pa->pa_dmat64;
1852 	else
1853 		sc->sc_dmat = pa->pa_dmat;
1854 
1855 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
1856 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
1857 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
1858 
1859 	sc->sc_type = wmp->wmp_type;
1860 
1861 	/* Set default function pointers */
1862 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
1863 	sc->phy.release = sc->nvm.release = wm_put_null;
1864 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
1865 
1866 	if (sc->sc_type < WM_T_82543) {
1867 		if (sc->sc_rev < 2) {
1868 			aprint_error_dev(sc->sc_dev,
1869 			    "i82542 must be at least rev. 2\n");
1870 			return;
1871 		}
1872 		if (sc->sc_rev < 3)
1873 			sc->sc_type = WM_T_82542_2_0;
1874 	}
1875 
1876 	/*
1877 	 * Disable MSI for Errata:
1878 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
1879 	 *
1880 	 *  82544: Errata 25
1881 	 *  82540: Errata  6 (easy to reproduce device timeout)
1882 	 *  82545: Errata  4 (easy to reproduce device timeout)
1883 	 *  82546: Errata 26 (easy to reproduce device timeout)
1884 	 *  82541: Errata  7 (easy to reproduce device timeout)
1885 	 *
1886 	 * "Byte Enables 2 and 3 are not set on MSI writes"
1887 	 *
1888 	 *  82571 & 82572: Errata 63
1889 	 */
1890 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
1891 	    || (sc->sc_type == WM_T_82572))
1892 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
1893 
1894 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1895 	    || (sc->sc_type == WM_T_82580)
1896 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
1897 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
1898 		sc->sc_flags |= WM_F_NEWQUEUE;
1899 
1900 	/* Set device properties (mactype) */
1901 	dict = device_properties(sc->sc_dev);
1902 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1903 
1904 	/*
1905 	 * Map the device.  All devices support memory-mapped acccess,
1906 	 * and it is really required for normal operation.
1907 	 */
1908 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1909 	switch (memtype) {
1910 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1911 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1912 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1913 			memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1914 		break;
1915 	default:
1916 		memh_valid = 0;
1917 		break;
1918 	}
1919 
1920 	if (memh_valid) {
1921 		sc->sc_st = memt;
1922 		sc->sc_sh = memh;
1923 		sc->sc_ss = memsize;
1924 	} else {
1925 		aprint_error_dev(sc->sc_dev,
1926 		    "unable to map device registers\n");
1927 		return;
1928 	}
1929 
1930 	/*
1931 	 * In addition, i82544 and later support I/O mapped indirect
1932 	 * register access.  It is not desirable (nor supported in
1933 	 * this driver) to use it for normal operation, though it is
1934 	 * required to work around bugs in some chip versions.
1935 	 */
1936 	if (sc->sc_type >= WM_T_82544) {
1937 		/* First we have to find the I/O BAR. */
1938 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1939 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
1940 			if (memtype == PCI_MAPREG_TYPE_IO)
1941 				break;
1942 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
1943 			    PCI_MAPREG_MEM_TYPE_64BIT)
1944 				i += 4;	/* skip high bits, too */
1945 		}
1946 		if (i < PCI_MAPREG_END) {
1947 			/*
1948 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
1949 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
1950 			 * It's no problem because newer chips has no this
1951 			 * bug.
1952 			 *
1953 			 * The i8254x doesn't apparently respond when the
1954 			 * I/O BAR is 0, which looks somewhat like it's not
1955 			 * been configured.
1956 			 */
1957 			preg = pci_conf_read(pc, pa->pa_tag, i);
1958 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1959 				aprint_error_dev(sc->sc_dev,
1960 				    "WARNING: I/O BAR at zero.\n");
1961 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1962 					0, &sc->sc_iot, &sc->sc_ioh,
1963 					NULL, &sc->sc_ios) == 0) {
1964 				sc->sc_flags |= WM_F_IOH_VALID;
1965 			} else
1966 				aprint_error_dev(sc->sc_dev,
1967 				    "WARNING: unable to map I/O space\n");
1968 		}
1969 
1970 	}
1971 
1972 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
1973 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1974 	preg |= PCI_COMMAND_MASTER_ENABLE;
1975 	if (sc->sc_type < WM_T_82542_2_1)
1976 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1977 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1978 
1979 	/* Power up chip */
1980 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
1981 	    && error != EOPNOTSUPP) {
1982 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1983 		return;
1984 	}
1985 
1986 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
1987 	/*
1988 	 *  Don't use MSI-X if we can use only one queue to save interrupt
1989 	 * resource.
1990 	 */
1991 	if (sc->sc_nqueues > 1) {
1992 		max_type = PCI_INTR_TYPE_MSIX;
1993 		/*
1994 		 *  82583 has a MSI-X capability in the PCI configuration space
1995 		 * but it doesn't support it. At least the document doesn't
1996 		 * say anything about MSI-X.
1997 		 */
1998 		counts[PCI_INTR_TYPE_MSIX]
1999 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
2000 	} else {
2001 		max_type = PCI_INTR_TYPE_MSI;
2002 		counts[PCI_INTR_TYPE_MSIX] = 0;
2003 	}
2004 
2005 	/* Allocation settings */
2006 	counts[PCI_INTR_TYPE_MSI] = 1;
2007 	counts[PCI_INTR_TYPE_INTX] = 1;
2008 	/* overridden by disable flags */
2009 	if (wm_disable_msi != 0) {
2010 		counts[PCI_INTR_TYPE_MSI] = 0;
2011 		if (wm_disable_msix != 0) {
2012 			max_type = PCI_INTR_TYPE_INTX;
2013 			counts[PCI_INTR_TYPE_MSIX] = 0;
2014 		}
2015 	} else if (wm_disable_msix != 0) {
2016 		max_type = PCI_INTR_TYPE_MSI;
2017 		counts[PCI_INTR_TYPE_MSIX] = 0;
2018 	}
2019 
2020 alloc_retry:
2021 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
2022 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
2023 		return;
2024 	}
2025 
2026 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
2027 		error = wm_setup_msix(sc);
2028 		if (error) {
2029 			pci_intr_release(pc, sc->sc_intrs,
2030 			    counts[PCI_INTR_TYPE_MSIX]);
2031 
2032 			/* Setup for MSI: Disable MSI-X */
2033 			max_type = PCI_INTR_TYPE_MSI;
2034 			counts[PCI_INTR_TYPE_MSI] = 1;
2035 			counts[PCI_INTR_TYPE_INTX] = 1;
2036 			goto alloc_retry;
2037 		}
2038 	} else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
2039 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
2040 		error = wm_setup_legacy(sc);
2041 		if (error) {
2042 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
2043 			    counts[PCI_INTR_TYPE_MSI]);
2044 
2045 			/* The next try is for INTx: Disable MSI */
2046 			max_type = PCI_INTR_TYPE_INTX;
2047 			counts[PCI_INTR_TYPE_INTX] = 1;
2048 			goto alloc_retry;
2049 		}
2050 	} else {
2051 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
2052 		error = wm_setup_legacy(sc);
2053 		if (error) {
2054 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
2055 			    counts[PCI_INTR_TYPE_INTX]);
2056 			return;
2057 		}
2058 	}
2059 
2060 	snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(sc->sc_dev));
2061 	error = workqueue_create(&sc->sc_queue_wq, wqname,
2062 	    wm_handle_queue_work, sc, WM_WORKQUEUE_PRI, IPL_NET,
2063 	    WM_WORKQUEUE_FLAGS);
2064 	if (error) {
2065 		aprint_error_dev(sc->sc_dev,
2066 		    "unable to create workqueue\n");
2067 		goto out;
2068 	}
2069 
2070 	/*
2071 	 * Check the function ID (unit number of the chip).
2072 	 */
2073 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
2074 	    || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
2075 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
2076 	    || (sc->sc_type == WM_T_82580)
2077 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
2078 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
2079 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
2080 	else
2081 		sc->sc_funcid = 0;
2082 
2083 	/*
2084 	 * Determine a few things about the bus we're connected to.
2085 	 */
2086 	if (sc->sc_type < WM_T_82543) {
2087 		/* We don't really know the bus characteristics here. */
2088 		sc->sc_bus_speed = 33;
2089 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
2090 		/*
2091 		 * CSA (Communication Streaming Architecture) is about as fast
2092 		 * a 32-bit 66MHz PCI Bus.
2093 		 */
2094 		sc->sc_flags |= WM_F_CSA;
2095 		sc->sc_bus_speed = 66;
2096 		aprint_verbose_dev(sc->sc_dev,
2097 		    "Communication Streaming Architecture\n");
2098 		if (sc->sc_type == WM_T_82547) {
2099 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
2100 			callout_setfunc(&sc->sc_txfifo_ch,
2101 			    wm_82547_txfifo_stall, sc);
2102 			aprint_verbose_dev(sc->sc_dev,
2103 			    "using 82547 Tx FIFO stall work-around\n");
2104 		}
2105 	} else if (sc->sc_type >= WM_T_82571) {
2106 		sc->sc_flags |= WM_F_PCIE;
2107 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
2108 		    && (sc->sc_type != WM_T_ICH10)
2109 		    && (sc->sc_type != WM_T_PCH)
2110 		    && (sc->sc_type != WM_T_PCH2)
2111 		    && (sc->sc_type != WM_T_PCH_LPT)
2112 		    && (sc->sc_type != WM_T_PCH_SPT)
2113 		    && (sc->sc_type != WM_T_PCH_CNP)) {
2114 			/* ICH* and PCH* have no PCIe capability registers */
2115 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
2116 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
2117 				NULL) == 0)
2118 				aprint_error_dev(sc->sc_dev,
2119 				    "unable to find PCIe capability\n");
2120 		}
2121 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
2122 	} else {
2123 		reg = CSR_READ(sc, WMREG_STATUS);
2124 		if (reg & STATUS_BUS64)
2125 			sc->sc_flags |= WM_F_BUS64;
2126 		if ((reg & STATUS_PCIX_MODE) != 0) {
2127 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
2128 
2129 			sc->sc_flags |= WM_F_PCIX;
2130 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
2131 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
2132 				aprint_error_dev(sc->sc_dev,
2133 				    "unable to find PCIX capability\n");
2134 			else if (sc->sc_type != WM_T_82545_3 &&
2135 				 sc->sc_type != WM_T_82546_3) {
2136 				/*
2137 				 * Work around a problem caused by the BIOS
2138 				 * setting the max memory read byte count
2139 				 * incorrectly.
2140 				 */
2141 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
2142 				    sc->sc_pcixe_capoff + PCIX_CMD);
2143 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
2144 				    sc->sc_pcixe_capoff + PCIX_STATUS);
2145 
2146 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
2147 				    PCIX_CMD_BYTECNT_SHIFT;
2148 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
2149 				    PCIX_STATUS_MAXB_SHIFT;
2150 				if (bytecnt > maxb) {
2151 					aprint_verbose_dev(sc->sc_dev,
2152 					    "resetting PCI-X MMRBC: %d -> %d\n",
2153 					    512 << bytecnt, 512 << maxb);
2154 					pcix_cmd = (pcix_cmd &
2155 					    ~PCIX_CMD_BYTECNT_MASK) |
2156 					    (maxb << PCIX_CMD_BYTECNT_SHIFT);
2157 					pci_conf_write(pa->pa_pc, pa->pa_tag,
2158 					    sc->sc_pcixe_capoff + PCIX_CMD,
2159 					    pcix_cmd);
2160 				}
2161 			}
2162 		}
2163 		/*
2164 		 * The quad port adapter is special; it has a PCIX-PCIX
2165 		 * bridge on the board, and can run the secondary bus at
2166 		 * a higher speed.
2167 		 */
2168 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
2169 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
2170 								      : 66;
2171 		} else if (sc->sc_flags & WM_F_PCIX) {
2172 			switch (reg & STATUS_PCIXSPD_MASK) {
2173 			case STATUS_PCIXSPD_50_66:
2174 				sc->sc_bus_speed = 66;
2175 				break;
2176 			case STATUS_PCIXSPD_66_100:
2177 				sc->sc_bus_speed = 100;
2178 				break;
2179 			case STATUS_PCIXSPD_100_133:
2180 				sc->sc_bus_speed = 133;
2181 				break;
2182 			default:
2183 				aprint_error_dev(sc->sc_dev,
2184 				    "unknown PCIXSPD %d; assuming 66MHz\n",
2185 				    reg & STATUS_PCIXSPD_MASK);
2186 				sc->sc_bus_speed = 66;
2187 				break;
2188 			}
2189 		} else
2190 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
2191 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
2192 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
2193 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
2194 	}
2195 
2196 	/* clear interesting stat counters */
2197 	CSR_READ(sc, WMREG_COLC);
2198 	CSR_READ(sc, WMREG_RXERRC);
2199 
2200 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
2201 	    || (sc->sc_type >= WM_T_ICH8))
2202 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2203 	if (sc->sc_type >= WM_T_ICH8)
2204 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2205 
2206 	/* Set PHY, NVM mutex related stuff */
2207 	switch (sc->sc_type) {
2208 	case WM_T_82542_2_0:
2209 	case WM_T_82542_2_1:
2210 	case WM_T_82543:
2211 	case WM_T_82544:
2212 		/* Microwire */
2213 		sc->nvm.read = wm_nvm_read_uwire;
2214 		sc->sc_nvm_wordsize = 64;
2215 		sc->sc_nvm_addrbits = 6;
2216 		break;
2217 	case WM_T_82540:
2218 	case WM_T_82545:
2219 	case WM_T_82545_3:
2220 	case WM_T_82546:
2221 	case WM_T_82546_3:
2222 		/* Microwire */
2223 		sc->nvm.read = wm_nvm_read_uwire;
2224 		reg = CSR_READ(sc, WMREG_EECD);
2225 		if (reg & EECD_EE_SIZE) {
2226 			sc->sc_nvm_wordsize = 256;
2227 			sc->sc_nvm_addrbits = 8;
2228 		} else {
2229 			sc->sc_nvm_wordsize = 64;
2230 			sc->sc_nvm_addrbits = 6;
2231 		}
2232 		sc->sc_flags |= WM_F_LOCK_EECD;
2233 		sc->nvm.acquire = wm_get_eecd;
2234 		sc->nvm.release = wm_put_eecd;
2235 		break;
2236 	case WM_T_82541:
2237 	case WM_T_82541_2:
2238 	case WM_T_82547:
2239 	case WM_T_82547_2:
2240 		reg = CSR_READ(sc, WMREG_EECD);
2241 		/*
2242 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
2243 		 * on 8254[17], so set flags and functios before calling it.
2244 		 */
2245 		sc->sc_flags |= WM_F_LOCK_EECD;
2246 		sc->nvm.acquire = wm_get_eecd;
2247 		sc->nvm.release = wm_put_eecd;
2248 		if (reg & EECD_EE_TYPE) {
2249 			/* SPI */
2250 			sc->nvm.read = wm_nvm_read_spi;
2251 			sc->sc_flags |= WM_F_EEPROM_SPI;
2252 			wm_nvm_set_addrbits_size_eecd(sc);
2253 		} else {
2254 			/* Microwire */
2255 			sc->nvm.read = wm_nvm_read_uwire;
2256 			if ((reg & EECD_EE_ABITS) != 0) {
2257 				sc->sc_nvm_wordsize = 256;
2258 				sc->sc_nvm_addrbits = 8;
2259 			} else {
2260 				sc->sc_nvm_wordsize = 64;
2261 				sc->sc_nvm_addrbits = 6;
2262 			}
2263 		}
2264 		break;
2265 	case WM_T_82571:
2266 	case WM_T_82572:
2267 		/* SPI */
2268 		sc->nvm.read = wm_nvm_read_eerd;
2269 		/* Not use WM_F_LOCK_EECD because we use EERD */
2270 		sc->sc_flags |= WM_F_EEPROM_SPI;
2271 		wm_nvm_set_addrbits_size_eecd(sc);
2272 		sc->phy.acquire = wm_get_swsm_semaphore;
2273 		sc->phy.release = wm_put_swsm_semaphore;
2274 		sc->nvm.acquire = wm_get_nvm_82571;
2275 		sc->nvm.release = wm_put_nvm_82571;
2276 		break;
2277 	case WM_T_82573:
2278 	case WM_T_82574:
2279 	case WM_T_82583:
2280 		sc->nvm.read = wm_nvm_read_eerd;
2281 		/* Not use WM_F_LOCK_EECD because we use EERD */
2282 		if (sc->sc_type == WM_T_82573) {
2283 			sc->phy.acquire = wm_get_swsm_semaphore;
2284 			sc->phy.release = wm_put_swsm_semaphore;
2285 			sc->nvm.acquire = wm_get_nvm_82571;
2286 			sc->nvm.release = wm_put_nvm_82571;
2287 		} else {
2288 			/* Both PHY and NVM use the same semaphore. */
2289 			sc->phy.acquire = sc->nvm.acquire
2290 			    = wm_get_swfwhw_semaphore;
2291 			sc->phy.release = sc->nvm.release
2292 			    = wm_put_swfwhw_semaphore;
2293 		}
2294 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
2295 			sc->sc_flags |= WM_F_EEPROM_FLASH;
2296 			sc->sc_nvm_wordsize = 2048;
2297 		} else {
2298 			/* SPI */
2299 			sc->sc_flags |= WM_F_EEPROM_SPI;
2300 			wm_nvm_set_addrbits_size_eecd(sc);
2301 		}
2302 		break;
2303 	case WM_T_82575:
2304 	case WM_T_82576:
2305 	case WM_T_82580:
2306 	case WM_T_I350:
2307 	case WM_T_I354:
2308 	case WM_T_80003:
2309 		/* SPI */
2310 		sc->sc_flags |= WM_F_EEPROM_SPI;
2311 		wm_nvm_set_addrbits_size_eecd(sc);
2312 		if ((sc->sc_type == WM_T_80003)
2313 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
2314 			sc->nvm.read = wm_nvm_read_eerd;
2315 			/* Don't use WM_F_LOCK_EECD because we use EERD */
2316 		} else {
2317 			sc->nvm.read = wm_nvm_read_spi;
2318 			sc->sc_flags |= WM_F_LOCK_EECD;
2319 		}
2320 		sc->phy.acquire = wm_get_phy_82575;
2321 		sc->phy.release = wm_put_phy_82575;
2322 		sc->nvm.acquire = wm_get_nvm_80003;
2323 		sc->nvm.release = wm_put_nvm_80003;
2324 		break;
2325 	case WM_T_ICH8:
2326 	case WM_T_ICH9:
2327 	case WM_T_ICH10:
2328 	case WM_T_PCH:
2329 	case WM_T_PCH2:
2330 	case WM_T_PCH_LPT:
2331 		sc->nvm.read = wm_nvm_read_ich8;
2332 		/* FLASH */
2333 		sc->sc_flags |= WM_F_EEPROM_FLASH;
2334 		sc->sc_nvm_wordsize = 2048;
2335 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
2336 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
2337 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
2338 			aprint_error_dev(sc->sc_dev,
2339 			    "can't map FLASH registers\n");
2340 			goto out;
2341 		}
2342 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
2343 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
2344 		    ICH_FLASH_SECTOR_SIZE;
2345 		sc->sc_ich8_flash_bank_size =
2346 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
2347 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
2348 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
2349 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
2350 		sc->sc_flashreg_offset = 0;
2351 		sc->phy.acquire = wm_get_swflag_ich8lan;
2352 		sc->phy.release = wm_put_swflag_ich8lan;
2353 		sc->nvm.acquire = wm_get_nvm_ich8lan;
2354 		sc->nvm.release = wm_put_nvm_ich8lan;
2355 		break;
2356 	case WM_T_PCH_SPT:
2357 	case WM_T_PCH_CNP:
2358 		sc->nvm.read = wm_nvm_read_spt;
2359 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
2360 		sc->sc_flags |= WM_F_EEPROM_FLASH;
2361 		sc->sc_flasht = sc->sc_st;
2362 		sc->sc_flashh = sc->sc_sh;
2363 		sc->sc_ich8_flash_base = 0;
2364 		sc->sc_nvm_wordsize =
2365 		    (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
2366 		    * NVM_SIZE_MULTIPLIER;
2367 		/* It is size in bytes, we want words */
2368 		sc->sc_nvm_wordsize /= 2;
2369 		/* Assume 2 banks */
2370 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
2371 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
2372 		sc->phy.acquire = wm_get_swflag_ich8lan;
2373 		sc->phy.release = wm_put_swflag_ich8lan;
2374 		sc->nvm.acquire = wm_get_nvm_ich8lan;
2375 		sc->nvm.release = wm_put_nvm_ich8lan;
2376 		break;
2377 	case WM_T_I210:
2378 	case WM_T_I211:
2379 		/* Allow a single clear of the SW semaphore on I210 and newer*/
2380 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
2381 		if (wm_nvm_flash_presence_i210(sc)) {
2382 			sc->nvm.read = wm_nvm_read_eerd;
2383 			/* Don't use WM_F_LOCK_EECD because we use EERD */
2384 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
2385 			wm_nvm_set_addrbits_size_eecd(sc);
2386 		} else {
2387 			sc->nvm.read = wm_nvm_read_invm;
2388 			sc->sc_flags |= WM_F_EEPROM_INVM;
2389 			sc->sc_nvm_wordsize = INVM_SIZE;
2390 		}
2391 		sc->phy.acquire = wm_get_phy_82575;
2392 		sc->phy.release = wm_put_phy_82575;
2393 		sc->nvm.acquire = wm_get_nvm_80003;
2394 		sc->nvm.release = wm_put_nvm_80003;
2395 		break;
2396 	default:
2397 		break;
2398 	}
2399 
2400 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
2401 	switch (sc->sc_type) {
2402 	case WM_T_82571:
2403 	case WM_T_82572:
2404 		reg = CSR_READ(sc, WMREG_SWSM2);
2405 		if ((reg & SWSM2_LOCK) == 0) {
2406 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
2407 			force_clear_smbi = true;
2408 		} else
2409 			force_clear_smbi = false;
2410 		break;
2411 	case WM_T_82573:
2412 	case WM_T_82574:
2413 	case WM_T_82583:
2414 		force_clear_smbi = true;
2415 		break;
2416 	default:
2417 		force_clear_smbi = false;
2418 		break;
2419 	}
2420 	if (force_clear_smbi) {
2421 		reg = CSR_READ(sc, WMREG_SWSM);
2422 		if ((reg & SWSM_SMBI) != 0)
2423 			aprint_error_dev(sc->sc_dev,
2424 			    "Please update the Bootagent\n");
2425 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
2426 	}
2427 
2428 	/*
2429 	 * Defer printing the EEPROM type until after verifying the checksum
2430 	 * This allows the EEPROM type to be printed correctly in the case
2431 	 * that no EEPROM is attached.
2432 	 */
2433 	/*
2434 	 * Validate the EEPROM checksum. If the checksum fails, flag
2435 	 * this for later, so we can fail future reads from the EEPROM.
2436 	 */
2437 	if (wm_nvm_validate_checksum(sc)) {
2438 		/*
2439 		 * Read twice again because some PCI-e parts fail the
2440 		 * first check due to the link being in sleep state.
2441 		 */
2442 		if (wm_nvm_validate_checksum(sc))
2443 			sc->sc_flags |= WM_F_EEPROM_INVALID;
2444 	}
2445 
2446 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
2447 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
2448 	else {
2449 		aprint_verbose_dev(sc->sc_dev, "%u words ",
2450 		    sc->sc_nvm_wordsize);
2451 		if (sc->sc_flags & WM_F_EEPROM_INVM)
2452 			aprint_verbose("iNVM");
2453 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
2454 			aprint_verbose("FLASH(HW)");
2455 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
2456 			aprint_verbose("FLASH");
2457 		else {
2458 			if (sc->sc_flags & WM_F_EEPROM_SPI)
2459 				eetype = "SPI";
2460 			else
2461 				eetype = "MicroWire";
2462 			aprint_verbose("(%d address bits) %s EEPROM",
2463 			    sc->sc_nvm_addrbits, eetype);
2464 		}
2465 	}
2466 	wm_nvm_version(sc);
2467 	aprint_verbose("\n");
2468 
2469 	/*
2470 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
2471 	 * incorrect.
2472 	 */
2473 	wm_gmii_setup_phytype(sc, 0, 0);
2474 
2475 	/* Check for WM_F_WOL on some chips before wm_reset() */
2476 	switch (sc->sc_type) {
2477 	case WM_T_ICH8:
2478 	case WM_T_ICH9:
2479 	case WM_T_ICH10:
2480 	case WM_T_PCH:
2481 	case WM_T_PCH2:
2482 	case WM_T_PCH_LPT:
2483 	case WM_T_PCH_SPT:
2484 	case WM_T_PCH_CNP:
2485 		apme_mask = WUC_APME;
2486 		eeprom_data = CSR_READ(sc, WMREG_WUC);
2487 		if ((eeprom_data & apme_mask) != 0)
2488 			sc->sc_flags |= WM_F_WOL;
2489 		break;
2490 	default:
2491 		break;
2492 	}
2493 
2494 	/* Reset the chip to a known state. */
2495 	wm_reset(sc);
2496 
2497 	/*
2498 	 * Check for I21[01] PLL workaround.
2499 	 *
2500 	 * Three cases:
2501 	 * a) Chip is I211.
2502 	 * b) Chip is I210 and it uses INVM (not FLASH).
2503 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
2504 	 */
2505 	if (sc->sc_type == WM_T_I211)
2506 		sc->sc_flags |= WM_F_PLL_WA_I210;
2507 	if (sc->sc_type == WM_T_I210) {
2508 		if (!wm_nvm_flash_presence_i210(sc))
2509 			sc->sc_flags |= WM_F_PLL_WA_I210;
2510 		else if ((sc->sc_nvm_ver_major < 3)
2511 		    || ((sc->sc_nvm_ver_major == 3)
2512 			&& (sc->sc_nvm_ver_minor < 25))) {
2513 			aprint_verbose_dev(sc->sc_dev,
2514 			    "ROM image version %d.%d is older than 3.25\n",
2515 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
2516 			sc->sc_flags |= WM_F_PLL_WA_I210;
2517 		}
2518 	}
2519 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
2520 		wm_pll_workaround_i210(sc);
2521 
2522 	wm_get_wakeup(sc);
2523 
2524 	/* Non-AMT based hardware can now take control from firmware */
2525 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
2526 		wm_get_hw_control(sc);
2527 
2528 	/*
2529 	 * Read the Ethernet address from the EEPROM, if not first found
2530 	 * in device properties.
2531 	 */
2532 	ea = prop_dictionary_get(dict, "mac-address");
2533 	if (ea != NULL) {
2534 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
2535 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
2536 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
2537 	} else {
2538 		if (wm_read_mac_addr(sc, enaddr) != 0) {
2539 			aprint_error_dev(sc->sc_dev,
2540 			    "unable to read Ethernet address\n");
2541 			goto out;
2542 		}
2543 	}
2544 
2545 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
2546 	    ether_sprintf(enaddr));
2547 
2548 	/*
2549 	 * Read the config info from the EEPROM, and set up various
2550 	 * bits in the control registers based on their contents.
2551 	 */
2552 	pn = prop_dictionary_get(dict, "i82543-cfg1");
2553 	if (pn != NULL) {
2554 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2555 		cfg1 = (uint16_t) prop_number_integer_value(pn);
2556 	} else {
2557 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
2558 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
2559 			goto out;
2560 		}
2561 	}
2562 
2563 	pn = prop_dictionary_get(dict, "i82543-cfg2");
2564 	if (pn != NULL) {
2565 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2566 		cfg2 = (uint16_t) prop_number_integer_value(pn);
2567 	} else {
2568 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
2569 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
2570 			goto out;
2571 		}
2572 	}
2573 
2574 	/* check for WM_F_WOL */
2575 	switch (sc->sc_type) {
2576 	case WM_T_82542_2_0:
2577 	case WM_T_82542_2_1:
2578 	case WM_T_82543:
2579 		/* dummy? */
2580 		eeprom_data = 0;
2581 		apme_mask = NVM_CFG3_APME;
2582 		break;
2583 	case WM_T_82544:
2584 		apme_mask = NVM_CFG2_82544_APM_EN;
2585 		eeprom_data = cfg2;
2586 		break;
2587 	case WM_T_82546:
2588 	case WM_T_82546_3:
2589 	case WM_T_82571:
2590 	case WM_T_82572:
2591 	case WM_T_82573:
2592 	case WM_T_82574:
2593 	case WM_T_82583:
2594 	case WM_T_80003:
2595 	case WM_T_82575:
2596 	case WM_T_82576:
2597 		apme_mask = NVM_CFG3_APME;
2598 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
2599 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
2600 		break;
2601 	case WM_T_82580:
2602 	case WM_T_I350:
2603 	case WM_T_I354:
2604 	case WM_T_I210:
2605 	case WM_T_I211:
2606 		apme_mask = NVM_CFG3_APME;
2607 		wm_nvm_read(sc,
2608 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
2609 		    1, &eeprom_data);
2610 		break;
2611 	case WM_T_ICH8:
2612 	case WM_T_ICH9:
2613 	case WM_T_ICH10:
2614 	case WM_T_PCH:
2615 	case WM_T_PCH2:
2616 	case WM_T_PCH_LPT:
2617 	case WM_T_PCH_SPT:
2618 	case WM_T_PCH_CNP:
2619 		/* Already checked before wm_reset () */
2620 		apme_mask = eeprom_data = 0;
2621 		break;
2622 	default: /* XXX 82540 */
2623 		apme_mask = NVM_CFG3_APME;
2624 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
2625 		break;
2626 	}
2627 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
2628 	if ((eeprom_data & apme_mask) != 0)
2629 		sc->sc_flags |= WM_F_WOL;
2630 
2631 	/*
2632 	 * We have the eeprom settings, now apply the special cases
2633 	 * where the eeprom may be wrong or the board won't support
2634 	 * wake on lan on a particular port
2635 	 */
2636 	switch (sc->sc_pcidevid) {
2637 	case PCI_PRODUCT_INTEL_82546GB_PCIE:
2638 		sc->sc_flags &= ~WM_F_WOL;
2639 		break;
2640 	case PCI_PRODUCT_INTEL_82546EB_FIBER:
2641 	case PCI_PRODUCT_INTEL_82546GB_FIBER:
2642 		/* Wake events only supported on port A for dual fiber
2643 		 * regardless of eeprom setting */
2644 		if (sc->sc_funcid == 1)
2645 			sc->sc_flags &= ~WM_F_WOL;
2646 		break;
2647 	case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3:
2648 		/* If quad port adapter, disable WoL on all but port A */
2649 		if (sc->sc_funcid != 0)
2650 			sc->sc_flags &= ~WM_F_WOL;
2651 		break;
2652 	case PCI_PRODUCT_INTEL_82571EB_FIBER:
2653 		/* Wake events only supported on port A for dual fiber
2654 		 * regardless of eeprom setting */
2655 		if (sc->sc_funcid == 1)
2656 			sc->sc_flags &= ~WM_F_WOL;
2657 		break;
2658 	case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER:
2659 	case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER:
2660 	case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER:
2661 		/* If quad port adapter, disable WoL on all but port A */
2662 		if (sc->sc_funcid != 0)
2663 			sc->sc_flags &= ~WM_F_WOL;
2664 		break;
2665 	}
2666 
2667 	if (sc->sc_type >= WM_T_82575) {
2668 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
2669 			aprint_debug_dev(sc->sc_dev, "COMPAT = %hx\n",
2670 			    nvmword);
2671 			if ((sc->sc_type == WM_T_82575) ||
2672 			    (sc->sc_type == WM_T_82576)) {
2673 				/* Check NVM for autonegotiation */
2674 				if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE)
2675 				    != 0)
2676 					sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
2677 			}
2678 			if ((sc->sc_type == WM_T_82575) ||
2679 			    (sc->sc_type == WM_T_I350)) {
2680 				if (nvmword & NVM_COMPAT_MAS_EN(sc->sc_funcid))
2681 					sc->sc_flags |= WM_F_MAS;
2682 			}
2683 		}
2684 	}
2685 
2686 	/*
2687 	 * XXX need special handling for some multiple port cards
2688 	 * to disable a paticular port.
2689 	 */
2690 
2691 	if (sc->sc_type >= WM_T_82544) {
2692 		pn = prop_dictionary_get(dict, "i82543-swdpin");
2693 		if (pn != NULL) {
2694 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2695 			swdpin = (uint16_t) prop_number_integer_value(pn);
2696 		} else {
2697 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
2698 				aprint_error_dev(sc->sc_dev,
2699 				    "unable to read SWDPIN\n");
2700 				goto out;
2701 			}
2702 		}
2703 	}
2704 
2705 	if (cfg1 & NVM_CFG1_ILOS)
2706 		sc->sc_ctrl |= CTRL_ILOS;
2707 
2708 	/*
2709 	 * XXX
2710 	 * This code isn't correct because pin 2 and 3 are located
2711 	 * in different position on newer chips. Check all datasheet.
2712 	 *
2713 	 * Until resolve this problem, check if a chip < 82580
2714 	 */
2715 	if (sc->sc_type <= WM_T_82580) {
2716 		if (sc->sc_type >= WM_T_82544) {
2717 			sc->sc_ctrl |=
2718 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
2719 			    CTRL_SWDPIO_SHIFT;
2720 			sc->sc_ctrl |=
2721 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
2722 			    CTRL_SWDPINS_SHIFT;
2723 		} else {
2724 			sc->sc_ctrl |=
2725 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
2726 			    CTRL_SWDPIO_SHIFT;
2727 		}
2728 	}
2729 
2730 	if ((sc->sc_type >= WM_T_82580) && (sc->sc_type <= WM_T_I211)) {
2731 		wm_nvm_read(sc,
2732 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
2733 		    1, &nvmword);
2734 		if (nvmword & NVM_CFG3_ILOS)
2735 			sc->sc_ctrl |= CTRL_ILOS;
2736 	}
2737 
2738 #if 0
2739 	if (sc->sc_type >= WM_T_82544) {
2740 		if (cfg1 & NVM_CFG1_IPS0)
2741 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
2742 		if (cfg1 & NVM_CFG1_IPS1)
2743 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
2744 		sc->sc_ctrl_ext |=
2745 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
2746 		    CTRL_EXT_SWDPIO_SHIFT;
2747 		sc->sc_ctrl_ext |=
2748 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
2749 		    CTRL_EXT_SWDPINS_SHIFT;
2750 	} else {
2751 		sc->sc_ctrl_ext |=
2752 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
2753 		    CTRL_EXT_SWDPIO_SHIFT;
2754 	}
2755 #endif
2756 
2757 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2758 #if 0
2759 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2760 #endif
2761 
2762 	if (sc->sc_type == WM_T_PCH) {
2763 		uint16_t val;
2764 
2765 		/* Save the NVM K1 bit setting */
2766 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
2767 
2768 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
2769 			sc->sc_nvm_k1_enabled = 1;
2770 		else
2771 			sc->sc_nvm_k1_enabled = 0;
2772 	}
2773 
2774 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
2775 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
2776 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
2777 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
2778 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
2779 	    || sc->sc_type == WM_T_82573
2780 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
2781 		/* Copper only */
2782 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
2783 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
2784 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
2785 	    || (sc->sc_type ==WM_T_I211)) {
2786 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
2787 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
2788 		switch (link_mode) {
2789 		case CTRL_EXT_LINK_MODE_1000KX:
2790 			aprint_normal_dev(sc->sc_dev, "1000KX\n");
2791 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
2792 			break;
2793 		case CTRL_EXT_LINK_MODE_SGMII:
2794 			if (wm_sgmii_uses_mdio(sc)) {
2795 				aprint_normal_dev(sc->sc_dev,
2796 				    "SGMII(MDIO)\n");
2797 				sc->sc_flags |= WM_F_SGMII;
2798 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2799 				break;
2800 			}
2801 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
2802 			/*FALLTHROUGH*/
2803 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
2804 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
2805 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
2806 				if (link_mode
2807 				    == CTRL_EXT_LINK_MODE_SGMII) {
2808 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2809 					sc->sc_flags |= WM_F_SGMII;
2810 					aprint_verbose_dev(sc->sc_dev,
2811 					    "SGMII\n");
2812 				} else {
2813 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
2814 					aprint_verbose_dev(sc->sc_dev,
2815 					    "SERDES\n");
2816 				}
2817 				break;
2818 			}
2819 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
2820 				aprint_normal_dev(sc->sc_dev, "SERDES(SFP)\n");
2821 			else if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
2822 				aprint_normal_dev(sc->sc_dev, "SGMII(SFP)\n");
2823 				sc->sc_flags |= WM_F_SGMII;
2824 			}
2825 			/* Do not change link mode for 100BaseFX */
2826 			if (sc->sc_sfptype == SFF_SFP_ETH_FLAGS_100FX)
2827 				break;
2828 
2829 			/* Change current link mode setting */
2830 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
2831 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
2832 				reg |= CTRL_EXT_LINK_MODE_SGMII;
2833 			else
2834 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
2835 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2836 			break;
2837 		case CTRL_EXT_LINK_MODE_GMII:
2838 		default:
2839 			aprint_normal_dev(sc->sc_dev, "Copper\n");
2840 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2841 			break;
2842 		}
2843 
2844 		reg &= ~CTRL_EXT_I2C_ENA;
2845 		if ((sc->sc_flags & WM_F_SGMII) != 0)
2846 			reg |= CTRL_EXT_I2C_ENA;
2847 		else
2848 			reg &= ~CTRL_EXT_I2C_ENA;
2849 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2850 		if ((sc->sc_flags & WM_F_SGMII) != 0) {
2851 			wm_gmii_setup_phytype(sc, 0, 0);
2852 			wm_reset_mdicnfg_82580(sc);
2853 		}
2854 	} else if (sc->sc_type < WM_T_82543 ||
2855 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
2856 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
2857 			aprint_error_dev(sc->sc_dev,
2858 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
2859 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
2860 		}
2861 	} else {
2862 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
2863 			aprint_error_dev(sc->sc_dev,
2864 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
2865 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2866 		}
2867 	}
2868 
2869 	if (sc->sc_type >= WM_T_PCH2)
2870 		sc->sc_flags |= WM_F_EEE;
2871 	else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211)
2872 	    && (sc->sc_mediatype == WM_MEDIATYPE_COPPER)) {
2873 		/* XXX: Need special handling for I354. (not yet) */
2874 		if (sc->sc_type != WM_T_I354)
2875 			sc->sc_flags |= WM_F_EEE;
2876 	}
2877 
2878 	/* Set device properties (macflags) */
2879 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
2880 
2881 	snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
2882 	aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
2883 
2884 #ifdef WM_MPSAFE
2885 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2886 #else
2887 	sc->sc_core_lock = NULL;
2888 #endif
2889 
2890 	/* Initialize the media structures accordingly. */
2891 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
2892 		wm_gmii_mediainit(sc, wmp->wmp_product);
2893 	else
2894 		wm_tbi_mediainit(sc); /* All others */
2895 
2896 	ifp = &sc->sc_ethercom.ec_if;
2897 	xname = device_xname(sc->sc_dev);
2898 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
2899 	ifp->if_softc = sc;
2900 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2901 #ifdef WM_MPSAFE
2902 	ifp->if_extflags = IFEF_MPSAFE;
2903 #endif
2904 	ifp->if_ioctl = wm_ioctl;
2905 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
2906 		ifp->if_start = wm_nq_start;
2907 		/*
2908 		 * When the number of CPUs is one and the controller can use
2909 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
2910 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
2911 		 * and the other is used for link status changing.
2912 		 * In this situation, wm_nq_transmit() is disadvantageous
2913 		 * because of wm_select_txqueue() and pcq(9) overhead.
2914 		 */
2915 		if (wm_is_using_multiqueue(sc))
2916 			ifp->if_transmit = wm_nq_transmit;
2917 	} else {
2918 		ifp->if_start = wm_start;
2919 		/*
2920 		 * wm_transmit() has the same disadvantage as wm_transmit().
2921 		 */
2922 		if (wm_is_using_multiqueue(sc))
2923 			ifp->if_transmit = wm_transmit;
2924 	}
2925 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
2926 	ifp->if_init = wm_init;
2927 	ifp->if_stop = wm_stop;
2928 	IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
2929 	IFQ_SET_READY(&ifp->if_snd);
2930 
2931 	/* Check for jumbo frame */
2932 	switch (sc->sc_type) {
2933 	case WM_T_82573:
2934 		/* XXX limited to 9234 if ASPM is disabled */
2935 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
2936 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
2937 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2938 		break;
2939 	case WM_T_82571:
2940 	case WM_T_82572:
2941 	case WM_T_82574:
2942 	case WM_T_82583:
2943 	case WM_T_82575:
2944 	case WM_T_82576:
2945 	case WM_T_82580:
2946 	case WM_T_I350:
2947 	case WM_T_I354:
2948 	case WM_T_I210:
2949 	case WM_T_I211:
2950 	case WM_T_80003:
2951 	case WM_T_ICH9:
2952 	case WM_T_ICH10:
2953 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
2954 	case WM_T_PCH_LPT:
2955 	case WM_T_PCH_SPT:
2956 	case WM_T_PCH_CNP:
2957 		/* XXX limited to 9234 */
2958 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2959 		break;
2960 	case WM_T_PCH:
2961 		/* XXX limited to 4096 */
2962 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2963 		break;
2964 	case WM_T_82542_2_0:
2965 	case WM_T_82542_2_1:
2966 	case WM_T_ICH8:
2967 		/* No support for jumbo frame */
2968 		break;
2969 	default:
2970 		/* ETHER_MAX_LEN_JUMBO */
2971 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2972 		break;
2973 	}
2974 
2975 	/* If we're a i82543 or greater, we can support VLANs. */
2976 	if (sc->sc_type >= WM_T_82543) {
2977 		sc->sc_ethercom.ec_capabilities |=
2978 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
2979 		sc->sc_ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
2980 	}
2981 
2982 	if ((sc->sc_flags & WM_F_EEE) != 0)
2983 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_EEE;
2984 
2985 	/*
2986 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
2987 	 * on i82543 and later.
2988 	 */
2989 	if (sc->sc_type >= WM_T_82543) {
2990 		ifp->if_capabilities |=
2991 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
2992 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
2993 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
2994 		    IFCAP_CSUM_TCPv6_Tx |
2995 		    IFCAP_CSUM_UDPv6_Tx;
2996 	}
2997 
2998 	/*
2999 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
3000 	 *
3001 	 *	82541GI (8086:1076) ... no
3002 	 *	82572EI (8086:10b9) ... yes
3003 	 */
3004 	if (sc->sc_type >= WM_T_82571) {
3005 		ifp->if_capabilities |=
3006 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
3007 	}
3008 
3009 	/*
3010 	 * If we're a i82544 or greater (except i82547), we can do
3011 	 * TCP segmentation offload.
3012 	 */
3013 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
3014 		ifp->if_capabilities |= IFCAP_TSOv4;
3015 	}
3016 
3017 	if (sc->sc_type >= WM_T_82571) {
3018 		ifp->if_capabilities |= IFCAP_TSOv6;
3019 	}
3020 
3021 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
3022 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
3023 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
3024 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
3025 
3026 	/* Attach the interface. */
3027 	error = if_initialize(ifp);
3028 	if (error != 0) {
3029 		aprint_error_dev(sc->sc_dev, "if_initialize failed(%d)\n",
3030 		    error);
3031 		return; /* Error */
3032 	}
3033 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
3034 	ether_ifattach(ifp, enaddr);
3035 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
3036 	if_register(ifp);
3037 
3038 #ifdef WM_EVENT_COUNTERS
3039 	/* Attach event counters. */
3040 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
3041 	    NULL, xname, "linkintr");
3042 
3043 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
3044 	    NULL, xname, "tx_xoff");
3045 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
3046 	    NULL, xname, "tx_xon");
3047 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
3048 	    NULL, xname, "rx_xoff");
3049 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
3050 	    NULL, xname, "rx_xon");
3051 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
3052 	    NULL, xname, "rx_macctl");
3053 #endif /* WM_EVENT_COUNTERS */
3054 
3055 	sc->sc_txrx_use_workqueue = false;
3056 
3057 	wm_init_sysctls(sc);
3058 
3059 	if (pmf_device_register(self, wm_suspend, wm_resume))
3060 		pmf_class_network_register(self, ifp);
3061 	else
3062 		aprint_error_dev(self, "couldn't establish power handler\n");
3063 
3064 	sc->sc_flags |= WM_F_ATTACHED;
3065 out:
3066 	return;
3067 }
3068 
3069 /* The detach function (ca_detach) */
3070 static int
3071 wm_detach(device_t self, int flags __unused)
3072 {
3073 	struct wm_softc *sc = device_private(self);
3074 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3075 	int i;
3076 
3077 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
3078 		return 0;
3079 
3080 	/* Stop the interface. Callouts are stopped in it. */
3081 	wm_stop(ifp, 1);
3082 
3083 	pmf_device_deregister(self);
3084 
3085 	sysctl_teardown(&sc->sc_sysctllog);
3086 
3087 #ifdef WM_EVENT_COUNTERS
3088 	evcnt_detach(&sc->sc_ev_linkintr);
3089 
3090 	evcnt_detach(&sc->sc_ev_tx_xoff);
3091 	evcnt_detach(&sc->sc_ev_tx_xon);
3092 	evcnt_detach(&sc->sc_ev_rx_xoff);
3093 	evcnt_detach(&sc->sc_ev_rx_xon);
3094 	evcnt_detach(&sc->sc_ev_rx_macctl);
3095 #endif /* WM_EVENT_COUNTERS */
3096 
3097 	/* Tell the firmware about the release */
3098 	WM_CORE_LOCK(sc);
3099 	wm_release_manageability(sc);
3100 	wm_release_hw_control(sc);
3101 	wm_enable_wakeup(sc);
3102 	WM_CORE_UNLOCK(sc);
3103 
3104 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
3105 
3106 	ether_ifdetach(ifp);
3107 	if_detach(ifp);
3108 	if_percpuq_destroy(sc->sc_ipq);
3109 
3110 	/* Delete all remaining media. */
3111 	ifmedia_fini(&sc->sc_mii.mii_media);
3112 
3113 	/* Unload RX dmamaps and free mbufs */
3114 	for (i = 0; i < sc->sc_nqueues; i++) {
3115 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
3116 		mutex_enter(rxq->rxq_lock);
3117 		wm_rxdrain(rxq);
3118 		mutex_exit(rxq->rxq_lock);
3119 	}
3120 	/* Must unlock here */
3121 
3122 	/* Disestablish the interrupt handler */
3123 	for (i = 0; i < sc->sc_nintrs; i++) {
3124 		if (sc->sc_ihs[i] != NULL) {
3125 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
3126 			sc->sc_ihs[i] = NULL;
3127 		}
3128 	}
3129 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
3130 
3131 	/* wm_stop() ensure workqueue is stopped. */
3132 	workqueue_destroy(sc->sc_queue_wq);
3133 
3134 	for (i = 0; i < sc->sc_nqueues; i++)
3135 		softint_disestablish(sc->sc_queue[i].wmq_si);
3136 
3137 	wm_free_txrx_queues(sc);
3138 
3139 	/* Unmap the registers */
3140 	if (sc->sc_ss) {
3141 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
3142 		sc->sc_ss = 0;
3143 	}
3144 	if (sc->sc_ios) {
3145 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
3146 		sc->sc_ios = 0;
3147 	}
3148 	if (sc->sc_flashs) {
3149 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
3150 		sc->sc_flashs = 0;
3151 	}
3152 
3153 	if (sc->sc_core_lock)
3154 		mutex_obj_free(sc->sc_core_lock);
3155 	if (sc->sc_ich_phymtx)
3156 		mutex_obj_free(sc->sc_ich_phymtx);
3157 	if (sc->sc_ich_nvmmtx)
3158 		mutex_obj_free(sc->sc_ich_nvmmtx);
3159 
3160 	return 0;
3161 }
3162 
3163 static bool
3164 wm_suspend(device_t self, const pmf_qual_t *qual)
3165 {
3166 	struct wm_softc *sc = device_private(self);
3167 
3168 	wm_release_manageability(sc);
3169 	wm_release_hw_control(sc);
3170 	wm_enable_wakeup(sc);
3171 
3172 	return true;
3173 }
3174 
3175 static bool
3176 wm_resume(device_t self, const pmf_qual_t *qual)
3177 {
3178 	struct wm_softc *sc = device_private(self);
3179 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3180 	pcireg_t reg;
3181 	char buf[256];
3182 
3183 	reg = CSR_READ(sc, WMREG_WUS);
3184 	if (reg != 0) {
3185 		snprintb(buf, sizeof(buf), WUS_FLAGS, reg);
3186 		device_printf(sc->sc_dev, "wakeup status %s\n", buf);
3187 		CSR_WRITE(sc, WMREG_WUS, 0xffffffff); /* W1C */
3188 	}
3189 
3190 	if (sc->sc_type >= WM_T_PCH2)
3191 		wm_resume_workarounds_pchlan(sc);
3192 	if ((ifp->if_flags & IFF_UP) == 0) {
3193 		wm_reset(sc);
3194 		/* Non-AMT based hardware can now take control from firmware */
3195 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
3196 			wm_get_hw_control(sc);
3197 		wm_init_manageability(sc);
3198 	} else {
3199 		/*
3200 		 * We called pmf_class_network_register(), so if_init() is
3201 		 * automatically called when IFF_UP. wm_reset(),
3202 		 * wm_get_hw_control() and wm_init_manageability() are called
3203 		 * via wm_init().
3204 		 */
3205 	}
3206 
3207 	return true;
3208 }
3209 
3210 /*
3211  * wm_watchdog:		[ifnet interface function]
3212  *
3213  *	Watchdog timer handler.
3214  */
3215 static void
3216 wm_watchdog(struct ifnet *ifp)
3217 {
3218 	int qid;
3219 	struct wm_softc *sc = ifp->if_softc;
3220 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
3221 
3222 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
3223 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
3224 
3225 		wm_watchdog_txq(ifp, txq, &hang_queue);
3226 	}
3227 
3228 	/* IF any of queues hanged up, reset the interface. */
3229 	if (hang_queue != 0) {
3230 		(void)wm_init(ifp);
3231 
3232 		/*
3233 		 * There are still some upper layer processing which call
3234 		 * ifp->if_start(). e.g. ALTQ or one CPU system
3235 		 */
3236 		/* Try to get more packets going. */
3237 		ifp->if_start(ifp);
3238 	}
3239 }
3240 
3241 
3242 static void
3243 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
3244 {
3245 
3246 	mutex_enter(txq->txq_lock);
3247 	if (txq->txq_sending &&
3248 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout)
3249 		wm_watchdog_txq_locked(ifp, txq, hang);
3250 
3251 	mutex_exit(txq->txq_lock);
3252 }
3253 
3254 static void
3255 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
3256     uint16_t *hang)
3257 {
3258 	struct wm_softc *sc = ifp->if_softc;
3259 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
3260 
3261 	KASSERT(mutex_owned(txq->txq_lock));
3262 
3263 	/*
3264 	 * Since we're using delayed interrupts, sweep up
3265 	 * before we report an error.
3266 	 */
3267 	wm_txeof(txq, UINT_MAX);
3268 
3269 	if (txq->txq_sending)
3270 		*hang |= __BIT(wmq->wmq_id);
3271 
3272 	if (txq->txq_free == WM_NTXDESC(txq)) {
3273 		log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
3274 		    device_xname(sc->sc_dev));
3275 	} else {
3276 #ifdef WM_DEBUG
3277 		int i, j;
3278 		struct wm_txsoft *txs;
3279 #endif
3280 		log(LOG_ERR,
3281 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
3282 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
3283 		    txq->txq_next);
3284 		if_statinc(ifp, if_oerrors);
3285 #ifdef WM_DEBUG
3286 		for (i = txq->txq_sdirty; i != txq->txq_snext;
3287 		    i = WM_NEXTTXS(txq, i)) {
3288 			txs = &txq->txq_soft[i];
3289 			printf("txs %d tx %d -> %d\n",
3290 			    i, txs->txs_firstdesc, txs->txs_lastdesc);
3291 			for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
3292 				if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3293 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
3294 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
3295 					printf("\t %#08x%08x\n",
3296 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
3297 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
3298 				} else {
3299 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
3300 					    (uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
3301 					    txq->txq_descs[j].wtx_addr.wa_low);
3302 					printf("\t %#04x%02x%02x%08x\n",
3303 					    txq->txq_descs[j].wtx_fields.wtxu_vlan,
3304 					    txq->txq_descs[j].wtx_fields.wtxu_options,
3305 					    txq->txq_descs[j].wtx_fields.wtxu_status,
3306 					    txq->txq_descs[j].wtx_cmdlen);
3307 				}
3308 				if (j == txs->txs_lastdesc)
3309 					break;
3310 			}
3311 		}
3312 #endif
3313 	}
3314 }
3315 
3316 /*
3317  * wm_tick:
3318  *
3319  *	One second timer, used to check link status, sweep up
3320  *	completed transmit jobs, etc.
3321  */
3322 static void
3323 wm_tick(void *arg)
3324 {
3325 	struct wm_softc *sc = arg;
3326 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3327 #ifndef WM_MPSAFE
3328 	int s = splnet();
3329 #endif
3330 
3331 	WM_CORE_LOCK(sc);
3332 
3333 	if (sc->sc_core_stopping) {
3334 		WM_CORE_UNLOCK(sc);
3335 #ifndef WM_MPSAFE
3336 		splx(s);
3337 #endif
3338 		return;
3339 	}
3340 
3341 	if (sc->sc_type >= WM_T_82542_2_1) {
3342 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
3343 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
3344 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
3345 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
3346 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
3347 	}
3348 
3349 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
3350 	if_statadd_ref(nsr, if_collisions, CSR_READ(sc, WMREG_COLC));
3351 	if_statadd_ref(nsr, if_ierrors, 0ULL /* ensure quad_t */
3352 	    + CSR_READ(sc, WMREG_CRCERRS)
3353 	    + CSR_READ(sc, WMREG_ALGNERRC)
3354 	    + CSR_READ(sc, WMREG_SYMERRC)
3355 	    + CSR_READ(sc, WMREG_RXERRC)
3356 	    + CSR_READ(sc, WMREG_SEC)
3357 	    + CSR_READ(sc, WMREG_CEXTERR)
3358 	    + CSR_READ(sc, WMREG_RLEC));
3359 	/*
3360 	 * WMREG_RNBC is incremented when there is no available buffers in host
3361 	 * memory. It does not mean the number of dropped packet. Because
3362 	 * ethernet controller can receive packets in such case if there is
3363 	 * space in phy's FIFO.
3364 	 *
3365 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
3366 	 * own EVCNT instead of if_iqdrops.
3367 	 */
3368 	if_statadd_ref(nsr, if_iqdrops, CSR_READ(sc, WMREG_MPC));
3369 	IF_STAT_PUTREF(ifp);
3370 
3371 	if (sc->sc_flags & WM_F_HAS_MII)
3372 		mii_tick(&sc->sc_mii);
3373 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
3374 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
3375 		wm_serdes_tick(sc);
3376 	else
3377 		wm_tbi_tick(sc);
3378 
3379 	WM_CORE_UNLOCK(sc);
3380 
3381 	wm_watchdog(ifp);
3382 
3383 	callout_schedule(&sc->sc_tick_ch, hz);
3384 }
3385 
3386 static int
3387 wm_ifflags_cb(struct ethercom *ec)
3388 {
3389 	struct ifnet *ifp = &ec->ec_if;
3390 	struct wm_softc *sc = ifp->if_softc;
3391 	u_short iffchange;
3392 	int ecchange;
3393 	bool needreset = false;
3394 	int rc = 0;
3395 
3396 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3397 		device_xname(sc->sc_dev), __func__));
3398 
3399 	WM_CORE_LOCK(sc);
3400 
3401 	/*
3402 	 * Check for if_flags.
3403 	 * Main usage is to prevent linkdown when opening bpf.
3404 	 */
3405 	iffchange = ifp->if_flags ^ sc->sc_if_flags;
3406 	sc->sc_if_flags = ifp->if_flags;
3407 	if ((iffchange & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
3408 		needreset = true;
3409 		goto ec;
3410 	}
3411 
3412 	/* iff related updates */
3413 	if ((iffchange & IFF_PROMISC) != 0)
3414 		wm_set_filter(sc);
3415 
3416 	wm_set_vlan(sc);
3417 
3418 ec:
3419 	/* Check for ec_capenable. */
3420 	ecchange = ec->ec_capenable ^ sc->sc_ec_capenable;
3421 	sc->sc_ec_capenable = ec->ec_capenable;
3422 	if ((ecchange & ~ETHERCAP_EEE) != 0) {
3423 		needreset = true;
3424 		goto out;
3425 	}
3426 
3427 	/* ec related updates */
3428 	wm_set_eee(sc);
3429 
3430 out:
3431 	if (needreset)
3432 		rc = ENETRESET;
3433 	WM_CORE_UNLOCK(sc);
3434 
3435 	return rc;
3436 }
3437 
3438 /*
3439  * wm_ioctl:		[ifnet interface function]
3440  *
3441  *	Handle control requests from the operator.
3442  */
3443 static int
3444 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
3445 {
3446 	struct wm_softc *sc = ifp->if_softc;
3447 	struct ifreq *ifr = (struct ifreq *)data;
3448 	struct ifaddr *ifa = (struct ifaddr *)data;
3449 	struct sockaddr_dl *sdl;
3450 	int s, error;
3451 
3452 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3453 		device_xname(sc->sc_dev), __func__));
3454 
3455 #ifndef WM_MPSAFE
3456 	s = splnet();
3457 #endif
3458 	switch (cmd) {
3459 	case SIOCSIFMEDIA:
3460 		WM_CORE_LOCK(sc);
3461 		/* Flow control requires full-duplex mode. */
3462 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
3463 		    (ifr->ifr_media & IFM_FDX) == 0)
3464 			ifr->ifr_media &= ~IFM_ETH_FMASK;
3465 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
3466 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
3467 				/* We can do both TXPAUSE and RXPAUSE. */
3468 				ifr->ifr_media |=
3469 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
3470 			}
3471 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
3472 		}
3473 		WM_CORE_UNLOCK(sc);
3474 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
3475 		break;
3476 	case SIOCINITIFADDR:
3477 		WM_CORE_LOCK(sc);
3478 		if (ifa->ifa_addr->sa_family == AF_LINK) {
3479 			sdl = satosdl(ifp->if_dl->ifa_addr);
3480 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
3481 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
3482 			/* Unicast address is the first multicast entry */
3483 			wm_set_filter(sc);
3484 			error = 0;
3485 			WM_CORE_UNLOCK(sc);
3486 			break;
3487 		}
3488 		WM_CORE_UNLOCK(sc);
3489 		/*FALLTHROUGH*/
3490 	default:
3491 #ifdef WM_MPSAFE
3492 		s = splnet();
3493 #endif
3494 		/* It may call wm_start, so unlock here */
3495 		error = ether_ioctl(ifp, cmd, data);
3496 #ifdef WM_MPSAFE
3497 		splx(s);
3498 #endif
3499 		if (error != ENETRESET)
3500 			break;
3501 
3502 		error = 0;
3503 
3504 		if (cmd == SIOCSIFCAP)
3505 			error = (*ifp->if_init)(ifp);
3506 		else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
3507 			;
3508 		else if (ifp->if_flags & IFF_RUNNING) {
3509 			/*
3510 			 * Multicast list has changed; set the hardware filter
3511 			 * accordingly.
3512 			 */
3513 			WM_CORE_LOCK(sc);
3514 			wm_set_filter(sc);
3515 			WM_CORE_UNLOCK(sc);
3516 		}
3517 		break;
3518 	}
3519 
3520 #ifndef WM_MPSAFE
3521 	splx(s);
3522 #endif
3523 	return error;
3524 }
3525 
3526 /* MAC address related */
3527 
3528 /*
3529  * Get the offset of MAC address and return it.
3530  * If error occured, use offset 0.
3531  */
3532 static uint16_t
3533 wm_check_alt_mac_addr(struct wm_softc *sc)
3534 {
3535 	uint16_t myea[ETHER_ADDR_LEN / 2];
3536 	uint16_t offset = NVM_OFF_MACADDR;
3537 
3538 	/* Try to read alternative MAC address pointer */
3539 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
3540 		return 0;
3541 
3542 	/* Check pointer if it's valid or not. */
3543 	if ((offset == 0x0000) || (offset == 0xffff))
3544 		return 0;
3545 
3546 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
3547 	/*
3548 	 * Check whether alternative MAC address is valid or not.
3549 	 * Some cards have non 0xffff pointer but those don't use
3550 	 * alternative MAC address in reality.
3551 	 *
3552 	 * Check whether the broadcast bit is set or not.
3553 	 */
3554 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
3555 		if (((myea[0] & 0xff) & 0x01) == 0)
3556 			return offset; /* Found */
3557 
3558 	/* Not found */
3559 	return 0;
3560 }
3561 
3562 static int
3563 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
3564 {
3565 	uint16_t myea[ETHER_ADDR_LEN / 2];
3566 	uint16_t offset = NVM_OFF_MACADDR;
3567 	int do_invert = 0;
3568 
3569 	switch (sc->sc_type) {
3570 	case WM_T_82580:
3571 	case WM_T_I350:
3572 	case WM_T_I354:
3573 		/* EEPROM Top Level Partitioning */
3574 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
3575 		break;
3576 	case WM_T_82571:
3577 	case WM_T_82575:
3578 	case WM_T_82576:
3579 	case WM_T_80003:
3580 	case WM_T_I210:
3581 	case WM_T_I211:
3582 		offset = wm_check_alt_mac_addr(sc);
3583 		if (offset == 0)
3584 			if ((sc->sc_funcid & 0x01) == 1)
3585 				do_invert = 1;
3586 		break;
3587 	default:
3588 		if ((sc->sc_funcid & 0x01) == 1)
3589 			do_invert = 1;
3590 		break;
3591 	}
3592 
3593 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
3594 		goto bad;
3595 
3596 	enaddr[0] = myea[0] & 0xff;
3597 	enaddr[1] = myea[0] >> 8;
3598 	enaddr[2] = myea[1] & 0xff;
3599 	enaddr[3] = myea[1] >> 8;
3600 	enaddr[4] = myea[2] & 0xff;
3601 	enaddr[5] = myea[2] >> 8;
3602 
3603 	/*
3604 	 * Toggle the LSB of the MAC address on the second port
3605 	 * of some dual port cards.
3606 	 */
3607 	if (do_invert != 0)
3608 		enaddr[5] ^= 1;
3609 
3610 	return 0;
3611 
3612  bad:
3613 	return -1;
3614 }
3615 
3616 /*
3617  * wm_set_ral:
3618  *
3619  *	Set an entery in the receive address list.
3620  */
3621 static void
3622 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
3623 {
3624 	uint32_t ral_lo, ral_hi, addrl, addrh;
3625 	uint32_t wlock_mac;
3626 	int rv;
3627 
3628 	if (enaddr != NULL) {
3629 		ral_lo = (uint32_t)enaddr[0] | ((uint32_t)enaddr[1] << 8) |
3630 		    ((uint32_t)enaddr[2] << 16) | ((uint32_t)enaddr[3] << 24);
3631 		ral_hi = (uint32_t)enaddr[4] | ((uint32_t)enaddr[5] << 8);
3632 		ral_hi |= RAL_AV;
3633 	} else {
3634 		ral_lo = 0;
3635 		ral_hi = 0;
3636 	}
3637 
3638 	switch (sc->sc_type) {
3639 	case WM_T_82542_2_0:
3640 	case WM_T_82542_2_1:
3641 	case WM_T_82543:
3642 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
3643 		CSR_WRITE_FLUSH(sc);
3644 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
3645 		CSR_WRITE_FLUSH(sc);
3646 		break;
3647 	case WM_T_PCH2:
3648 	case WM_T_PCH_LPT:
3649 	case WM_T_PCH_SPT:
3650 	case WM_T_PCH_CNP:
3651 		if (idx == 0) {
3652 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
3653 			CSR_WRITE_FLUSH(sc);
3654 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
3655 			CSR_WRITE_FLUSH(sc);
3656 			return;
3657 		}
3658 		if (sc->sc_type != WM_T_PCH2) {
3659 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
3660 			    FWSM_WLOCK_MAC);
3661 			addrl = WMREG_SHRAL(idx - 1);
3662 			addrh = WMREG_SHRAH(idx - 1);
3663 		} else {
3664 			wlock_mac = 0;
3665 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
3666 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
3667 		}
3668 
3669 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
3670 			rv = wm_get_swflag_ich8lan(sc);
3671 			if (rv != 0)
3672 				return;
3673 			CSR_WRITE(sc, addrl, ral_lo);
3674 			CSR_WRITE_FLUSH(sc);
3675 			CSR_WRITE(sc, addrh, ral_hi);
3676 			CSR_WRITE_FLUSH(sc);
3677 			wm_put_swflag_ich8lan(sc);
3678 		}
3679 
3680 		break;
3681 	default:
3682 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
3683 		CSR_WRITE_FLUSH(sc);
3684 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
3685 		CSR_WRITE_FLUSH(sc);
3686 		break;
3687 	}
3688 }
3689 
3690 /*
3691  * wm_mchash:
3692  *
3693  *	Compute the hash of the multicast address for the 4096-bit
3694  *	multicast filter.
3695  */
3696 static uint32_t
3697 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
3698 {
3699 	static const int lo_shift[4] = { 4, 3, 2, 0 };
3700 	static const int hi_shift[4] = { 4, 5, 6, 8 };
3701 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
3702 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
3703 	uint32_t hash;
3704 
3705 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3706 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3707 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
3708 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
3709 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
3710 		    (((uint16_t)enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
3711 		return (hash & 0x3ff);
3712 	}
3713 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
3714 	    (((uint16_t)enaddr[5]) << hi_shift[sc->sc_mchash_type]);
3715 
3716 	return (hash & 0xfff);
3717 }
3718 
3719 /*
3720  *
3721  *
3722  */
3723 static int
3724 wm_rar_count(struct wm_softc *sc)
3725 {
3726 	int size;
3727 
3728 	switch (sc->sc_type) {
3729 	case WM_T_ICH8:
3730 		size = WM_RAL_TABSIZE_ICH8 -1;
3731 		break;
3732 	case WM_T_ICH9:
3733 	case WM_T_ICH10:
3734 	case WM_T_PCH:
3735 		size = WM_RAL_TABSIZE_ICH8;
3736 		break;
3737 	case WM_T_PCH2:
3738 		size = WM_RAL_TABSIZE_PCH2;
3739 		break;
3740 	case WM_T_PCH_LPT:
3741 	case WM_T_PCH_SPT:
3742 	case WM_T_PCH_CNP:
3743 		size = WM_RAL_TABSIZE_PCH_LPT;
3744 		break;
3745 	case WM_T_82575:
3746 	case WM_T_I210:
3747 	case WM_T_I211:
3748 		size = WM_RAL_TABSIZE_82575;
3749 		break;
3750 	case WM_T_82576:
3751 	case WM_T_82580:
3752 		size = WM_RAL_TABSIZE_82576;
3753 		break;
3754 	case WM_T_I350:
3755 	case WM_T_I354:
3756 		size = WM_RAL_TABSIZE_I350;
3757 		break;
3758 	default:
3759 		size = WM_RAL_TABSIZE;
3760 	}
3761 
3762 	return size;
3763 }
3764 
3765 /*
3766  * wm_set_filter:
3767  *
3768  *	Set up the receive filter.
3769  */
3770 static void
3771 wm_set_filter(struct wm_softc *sc)
3772 {
3773 	struct ethercom *ec = &sc->sc_ethercom;
3774 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3775 	struct ether_multi *enm;
3776 	struct ether_multistep step;
3777 	bus_addr_t mta_reg;
3778 	uint32_t hash, reg, bit;
3779 	int i, size, ralmax;
3780 
3781 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3782 		device_xname(sc->sc_dev), __func__));
3783 
3784 	if (sc->sc_type >= WM_T_82544)
3785 		mta_reg = WMREG_CORDOVA_MTA;
3786 	else
3787 		mta_reg = WMREG_MTA;
3788 
3789 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
3790 
3791 	if (ifp->if_flags & IFF_BROADCAST)
3792 		sc->sc_rctl |= RCTL_BAM;
3793 	if (ifp->if_flags & IFF_PROMISC) {
3794 		sc->sc_rctl |= RCTL_UPE;
3795 		ETHER_LOCK(ec);
3796 		ec->ec_flags |= ETHER_F_ALLMULTI;
3797 		ETHER_UNLOCK(ec);
3798 		goto allmulti;
3799 	}
3800 
3801 	/*
3802 	 * Set the station address in the first RAL slot, and
3803 	 * clear the remaining slots.
3804 	 */
3805 	size = wm_rar_count(sc);
3806 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
3807 
3808 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
3809 	    || (sc->sc_type == WM_T_PCH_CNP)) {
3810 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
3811 		switch (i) {
3812 		case 0:
3813 			/* We can use all entries */
3814 			ralmax = size;
3815 			break;
3816 		case 1:
3817 			/* Only RAR[0] */
3818 			ralmax = 1;
3819 			break;
3820 		default:
3821 			/* Available SHRA + RAR[0] */
3822 			ralmax = i + 1;
3823 		}
3824 	} else
3825 		ralmax = size;
3826 	for (i = 1; i < size; i++) {
3827 		if (i < ralmax)
3828 			wm_set_ral(sc, NULL, i);
3829 	}
3830 
3831 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3832 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3833 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
3834 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
3835 		size = WM_ICH8_MC_TABSIZE;
3836 	else
3837 		size = WM_MC_TABSIZE;
3838 	/* Clear out the multicast table. */
3839 	for (i = 0; i < size; i++) {
3840 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
3841 		CSR_WRITE_FLUSH(sc);
3842 	}
3843 
3844 	ETHER_LOCK(ec);
3845 	ETHER_FIRST_MULTI(step, ec, enm);
3846 	while (enm != NULL) {
3847 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
3848 			ec->ec_flags |= ETHER_F_ALLMULTI;
3849 			ETHER_UNLOCK(ec);
3850 			/*
3851 			 * We must listen to a range of multicast addresses.
3852 			 * For now, just accept all multicasts, rather than
3853 			 * trying to set only those filter bits needed to match
3854 			 * the range.  (At this time, the only use of address
3855 			 * ranges is for IP multicast routing, for which the
3856 			 * range is big enough to require all bits set.)
3857 			 */
3858 			goto allmulti;
3859 		}
3860 
3861 		hash = wm_mchash(sc, enm->enm_addrlo);
3862 
3863 		reg = (hash >> 5);
3864 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3865 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3866 		    || (sc->sc_type == WM_T_PCH2)
3867 		    || (sc->sc_type == WM_T_PCH_LPT)
3868 		    || (sc->sc_type == WM_T_PCH_SPT)
3869 		    || (sc->sc_type == WM_T_PCH_CNP))
3870 			reg &= 0x1f;
3871 		else
3872 			reg &= 0x7f;
3873 		bit = hash & 0x1f;
3874 
3875 		hash = CSR_READ(sc, mta_reg + (reg << 2));
3876 		hash |= 1U << bit;
3877 
3878 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
3879 			/*
3880 			 * 82544 Errata 9: Certain register cannot be written
3881 			 * with particular alignments in PCI-X bus operation
3882 			 * (FCAH, MTA and VFTA).
3883 			 */
3884 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
3885 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3886 			CSR_WRITE_FLUSH(sc);
3887 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
3888 			CSR_WRITE_FLUSH(sc);
3889 		} else {
3890 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3891 			CSR_WRITE_FLUSH(sc);
3892 		}
3893 
3894 		ETHER_NEXT_MULTI(step, enm);
3895 	}
3896 	ec->ec_flags &= ~ETHER_F_ALLMULTI;
3897 	ETHER_UNLOCK(ec);
3898 
3899 	goto setit;
3900 
3901  allmulti:
3902 	sc->sc_rctl |= RCTL_MPE;
3903 
3904  setit:
3905 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
3906 }
3907 
3908 /* Reset and init related */
3909 
3910 static void
3911 wm_set_vlan(struct wm_softc *sc)
3912 {
3913 
3914 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3915 		device_xname(sc->sc_dev), __func__));
3916 
3917 	/* Deal with VLAN enables. */
3918 	if (VLAN_ATTACHED(&sc->sc_ethercom))
3919 		sc->sc_ctrl |= CTRL_VME;
3920 	else
3921 		sc->sc_ctrl &= ~CTRL_VME;
3922 
3923 	/* Write the control registers. */
3924 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3925 }
3926 
3927 static void
3928 wm_set_pcie_completion_timeout(struct wm_softc *sc)
3929 {
3930 	uint32_t gcr;
3931 	pcireg_t ctrl2;
3932 
3933 	gcr = CSR_READ(sc, WMREG_GCR);
3934 
3935 	/* Only take action if timeout value is defaulted to 0 */
3936 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
3937 		goto out;
3938 
3939 	if ((gcr & GCR_CAP_VER2) == 0) {
3940 		gcr |= GCR_CMPL_TMOUT_10MS;
3941 		goto out;
3942 	}
3943 
3944 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
3945 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
3946 	ctrl2 |= WM_PCIE_DCSR2_16MS;
3947 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
3948 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
3949 
3950 out:
3951 	/* Disable completion timeout resend */
3952 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
3953 
3954 	CSR_WRITE(sc, WMREG_GCR, gcr);
3955 }
3956 
3957 void
3958 wm_get_auto_rd_done(struct wm_softc *sc)
3959 {
3960 	int i;
3961 
3962 	/* wait for eeprom to reload */
3963 	switch (sc->sc_type) {
3964 	case WM_T_82571:
3965 	case WM_T_82572:
3966 	case WM_T_82573:
3967 	case WM_T_82574:
3968 	case WM_T_82583:
3969 	case WM_T_82575:
3970 	case WM_T_82576:
3971 	case WM_T_82580:
3972 	case WM_T_I350:
3973 	case WM_T_I354:
3974 	case WM_T_I210:
3975 	case WM_T_I211:
3976 	case WM_T_80003:
3977 	case WM_T_ICH8:
3978 	case WM_T_ICH9:
3979 		for (i = 0; i < 10; i++) {
3980 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
3981 				break;
3982 			delay(1000);
3983 		}
3984 		if (i == 10) {
3985 			log(LOG_ERR, "%s: auto read from eeprom failed to "
3986 			    "complete\n", device_xname(sc->sc_dev));
3987 		}
3988 		break;
3989 	default:
3990 		break;
3991 	}
3992 }
3993 
3994 void
3995 wm_lan_init_done(struct wm_softc *sc)
3996 {
3997 	uint32_t reg = 0;
3998 	int i;
3999 
4000 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
4001 		device_xname(sc->sc_dev), __func__));
4002 
4003 	/* Wait for eeprom to reload */
4004 	switch (sc->sc_type) {
4005 	case WM_T_ICH10:
4006 	case WM_T_PCH:
4007 	case WM_T_PCH2:
4008 	case WM_T_PCH_LPT:
4009 	case WM_T_PCH_SPT:
4010 	case WM_T_PCH_CNP:
4011 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
4012 			reg = CSR_READ(sc, WMREG_STATUS);
4013 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
4014 				break;
4015 			delay(100);
4016 		}
4017 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
4018 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
4019 			    "complete\n", device_xname(sc->sc_dev), __func__);
4020 		}
4021 		break;
4022 	default:
4023 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4024 		    __func__);
4025 		break;
4026 	}
4027 
4028 	reg &= ~STATUS_LAN_INIT_DONE;
4029 	CSR_WRITE(sc, WMREG_STATUS, reg);
4030 }
4031 
4032 void
4033 wm_get_cfg_done(struct wm_softc *sc)
4034 {
4035 	int mask;
4036 	uint32_t reg;
4037 	int i;
4038 
4039 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
4040 		device_xname(sc->sc_dev), __func__));
4041 
4042 	/* Wait for eeprom to reload */
4043 	switch (sc->sc_type) {
4044 	case WM_T_82542_2_0:
4045 	case WM_T_82542_2_1:
4046 		/* null */
4047 		break;
4048 	case WM_T_82543:
4049 	case WM_T_82544:
4050 	case WM_T_82540:
4051 	case WM_T_82545:
4052 	case WM_T_82545_3:
4053 	case WM_T_82546:
4054 	case WM_T_82546_3:
4055 	case WM_T_82541:
4056 	case WM_T_82541_2:
4057 	case WM_T_82547:
4058 	case WM_T_82547_2:
4059 	case WM_T_82573:
4060 	case WM_T_82574:
4061 	case WM_T_82583:
4062 		/* generic */
4063 		delay(10*1000);
4064 		break;
4065 	case WM_T_80003:
4066 	case WM_T_82571:
4067 	case WM_T_82572:
4068 	case WM_T_82575:
4069 	case WM_T_82576:
4070 	case WM_T_82580:
4071 	case WM_T_I350:
4072 	case WM_T_I354:
4073 	case WM_T_I210:
4074 	case WM_T_I211:
4075 		if (sc->sc_type == WM_T_82571) {
4076 			/* Only 82571 shares port 0 */
4077 			mask = EEMNGCTL_CFGDONE_0;
4078 		} else
4079 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
4080 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
4081 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
4082 				break;
4083 			delay(1000);
4084 		}
4085 		if (i >= WM_PHY_CFG_TIMEOUT)
4086 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
4087 				device_xname(sc->sc_dev), __func__));
4088 		break;
4089 	case WM_T_ICH8:
4090 	case WM_T_ICH9:
4091 	case WM_T_ICH10:
4092 	case WM_T_PCH:
4093 	case WM_T_PCH2:
4094 	case WM_T_PCH_LPT:
4095 	case WM_T_PCH_SPT:
4096 	case WM_T_PCH_CNP:
4097 		delay(10*1000);
4098 		if (sc->sc_type >= WM_T_ICH10)
4099 			wm_lan_init_done(sc);
4100 		else
4101 			wm_get_auto_rd_done(sc);
4102 
4103 		/* Clear PHY Reset Asserted bit */
4104 		reg = CSR_READ(sc, WMREG_STATUS);
4105 		if ((reg & STATUS_PHYRA) != 0)
4106 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
4107 		break;
4108 	default:
4109 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4110 		    __func__);
4111 		break;
4112 	}
4113 }
4114 
4115 int
4116 wm_phy_post_reset(struct wm_softc *sc)
4117 {
4118 	device_t dev = sc->sc_dev;
4119 	uint16_t reg;
4120 	int rv = 0;
4121 
4122 	/* This function is only for ICH8 and newer. */
4123 	if (sc->sc_type < WM_T_ICH8)
4124 		return 0;
4125 
4126 	if (wm_phy_resetisblocked(sc)) {
4127 		/* XXX */
4128 		device_printf(dev, "PHY is blocked\n");
4129 		return -1;
4130 	}
4131 
4132 	/* Allow time for h/w to get to quiescent state after reset */
4133 	delay(10*1000);
4134 
4135 	/* Perform any necessary post-reset workarounds */
4136 	if (sc->sc_type == WM_T_PCH)
4137 		rv = wm_hv_phy_workarounds_ich8lan(sc);
4138 	else if (sc->sc_type == WM_T_PCH2)
4139 		rv = wm_lv_phy_workarounds_ich8lan(sc);
4140 	if (rv != 0)
4141 		return rv;
4142 
4143 	/* Clear the host wakeup bit after lcd reset */
4144 	if (sc->sc_type >= WM_T_PCH) {
4145 		wm_gmii_hv_readreg(dev, 2, BM_PORT_GEN_CFG, &reg);
4146 		reg &= ~BM_WUC_HOST_WU_BIT;
4147 		wm_gmii_hv_writereg(dev, 2, BM_PORT_GEN_CFG, reg);
4148 	}
4149 
4150 	/* Configure the LCD with the extended configuration region in NVM */
4151 	if ((rv = wm_init_lcd_from_nvm(sc)) != 0)
4152 		return rv;
4153 
4154 	/* Configure the LCD with the OEM bits in NVM */
4155 	rv = wm_oem_bits_config_ich8lan(sc, true);
4156 
4157 	if (sc->sc_type == WM_T_PCH2) {
4158 		/* Ungate automatic PHY configuration on non-managed 82579 */
4159 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
4160 			delay(10 * 1000);
4161 			wm_gate_hw_phy_config_ich8lan(sc, false);
4162 		}
4163 		/* Set EEE LPI Update Timer to 200usec */
4164 		rv = sc->phy.acquire(sc);
4165 		if (rv)
4166 			return rv;
4167 		rv = wm_write_emi_reg_locked(dev,
4168 		    I82579_LPI_UPDATE_TIMER, 0x1387);
4169 		sc->phy.release(sc);
4170 	}
4171 
4172 	return rv;
4173 }
4174 
4175 /* Only for PCH and newer */
4176 static int
4177 wm_write_smbus_addr(struct wm_softc *sc)
4178 {
4179 	uint32_t strap, freq;
4180 	uint16_t phy_data;
4181 	int rv;
4182 
4183 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
4184 		device_xname(sc->sc_dev), __func__));
4185 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
4186 
4187 	strap = CSR_READ(sc, WMREG_STRAP);
4188 	freq = __SHIFTOUT(strap, STRAP_FREQ);
4189 
4190 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data);
4191 	if (rv != 0)
4192 		return -1;
4193 
4194 	phy_data &= ~HV_SMB_ADDR_ADDR;
4195 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
4196 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
4197 
4198 	if (sc->sc_phytype == WMPHY_I217) {
4199 		/* Restore SMBus frequency */
4200 		if (freq --) {
4201 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
4202 			    | HV_SMB_ADDR_FREQ_HIGH);
4203 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
4204 			    HV_SMB_ADDR_FREQ_LOW);
4205 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
4206 			    HV_SMB_ADDR_FREQ_HIGH);
4207 		} else
4208 			DPRINTF(WM_DEBUG_INIT,
4209 			    ("%s: %s Unsupported SMB frequency in PHY\n",
4210 				device_xname(sc->sc_dev), __func__));
4211 	}
4212 
4213 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR,
4214 	    phy_data);
4215 }
4216 
4217 static int
4218 wm_init_lcd_from_nvm(struct wm_softc *sc)
4219 {
4220 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
4221 	uint16_t phy_page = 0;
4222 	int rv = 0;
4223 
4224 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
4225 		device_xname(sc->sc_dev), __func__));
4226 
4227 	switch (sc->sc_type) {
4228 	case WM_T_ICH8:
4229 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
4230 		    || (sc->sc_phytype != WMPHY_IGP_3))
4231 			return 0;
4232 
4233 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
4234 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
4235 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
4236 			break;
4237 		}
4238 		/* FALLTHROUGH */
4239 	case WM_T_PCH:
4240 	case WM_T_PCH2:
4241 	case WM_T_PCH_LPT:
4242 	case WM_T_PCH_SPT:
4243 	case WM_T_PCH_CNP:
4244 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
4245 		break;
4246 	default:
4247 		return 0;
4248 	}
4249 
4250 	if ((rv = sc->phy.acquire(sc)) != 0)
4251 		return rv;
4252 
4253 	reg = CSR_READ(sc, WMREG_FEXTNVM);
4254 	if ((reg & sw_cfg_mask) == 0)
4255 		goto release;
4256 
4257 	/*
4258 	 * Make sure HW does not configure LCD from PHY extended configuration
4259 	 * before SW configuration
4260 	 */
4261 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
4262 	if ((sc->sc_type < WM_T_PCH2)
4263 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
4264 		goto release;
4265 
4266 	DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
4267 		device_xname(sc->sc_dev), __func__));
4268 	/* word_addr is in DWORD */
4269 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
4270 
4271 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
4272 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
4273 	if (cnf_size == 0)
4274 		goto release;
4275 
4276 	if (((sc->sc_type == WM_T_PCH)
4277 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
4278 	    || (sc->sc_type > WM_T_PCH)) {
4279 		/*
4280 		 * HW configures the SMBus address and LEDs when the OEM and
4281 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
4282 		 * are cleared, SW will configure them instead.
4283 		 */
4284 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
4285 			device_xname(sc->sc_dev), __func__));
4286 		if ((rv = wm_write_smbus_addr(sc)) != 0)
4287 			goto release;
4288 
4289 		reg = CSR_READ(sc, WMREG_LEDCTL);
4290 		rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG,
4291 		    (uint16_t)reg);
4292 		if (rv != 0)
4293 			goto release;
4294 	}
4295 
4296 	/* Configure LCD from extended configuration region. */
4297 	for (i = 0; i < cnf_size; i++) {
4298 		uint16_t reg_data, reg_addr;
4299 
4300 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
4301 			goto release;
4302 
4303 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
4304 			goto release;
4305 
4306 		if (reg_addr == MII_IGPHY_PAGE_SELECT)
4307 			phy_page = reg_data;
4308 
4309 		reg_addr &= IGPHY_MAXREGADDR;
4310 		reg_addr |= phy_page;
4311 
4312 		KASSERT(sc->phy.writereg_locked != NULL);
4313 		rv = sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr,
4314 		    reg_data);
4315 	}
4316 
4317 release:
4318 	sc->phy.release(sc);
4319 	return rv;
4320 }
4321 
4322 /*
4323  *  wm_oem_bits_config_ich8lan - SW-based LCD Configuration
4324  *  @sc:       pointer to the HW structure
4325  *  @d0_state: boolean if entering d0 or d3 device state
4326  *
4327  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
4328  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
4329  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
4330  */
4331 int
4332 wm_oem_bits_config_ich8lan(struct wm_softc *sc, bool d0_state)
4333 {
4334 	uint32_t mac_reg;
4335 	uint16_t oem_reg;
4336 	int rv;
4337 
4338 	if (sc->sc_type < WM_T_PCH)
4339 		return 0;
4340 
4341 	rv = sc->phy.acquire(sc);
4342 	if (rv != 0)
4343 		return rv;
4344 
4345 	if (sc->sc_type == WM_T_PCH) {
4346 		mac_reg = CSR_READ(sc, WMREG_EXTCNFCTR);
4347 		if ((mac_reg & EXTCNFCTR_OEM_WRITE_ENABLE) != 0)
4348 			goto release;
4349 	}
4350 
4351 	mac_reg = CSR_READ(sc, WMREG_FEXTNVM);
4352 	if ((mac_reg & FEXTNVM_SW_CONFIG_ICH8M) == 0)
4353 		goto release;
4354 
4355 	mac_reg = CSR_READ(sc, WMREG_PHY_CTRL);
4356 
4357 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_OEM_BITS, &oem_reg);
4358 	if (rv != 0)
4359 		goto release;
4360 	oem_reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
4361 
4362 	if (d0_state) {
4363 		if ((mac_reg & PHY_CTRL_GBE_DIS) != 0)
4364 			oem_reg |= HV_OEM_BITS_A1KDIS;
4365 		if ((mac_reg & PHY_CTRL_D0A_LPLU) != 0)
4366 			oem_reg |= HV_OEM_BITS_LPLU;
4367 	} else {
4368 		if ((mac_reg & (PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS))
4369 		    != 0)
4370 			oem_reg |= HV_OEM_BITS_A1KDIS;
4371 		if ((mac_reg & (PHY_CTRL_D0A_LPLU | PHY_CTRL_NOND0A_LPLU))
4372 		    != 0)
4373 			oem_reg |= HV_OEM_BITS_LPLU;
4374 	}
4375 
4376 	/* Set Restart auto-neg to activate the bits */
4377 	if ((d0_state || (sc->sc_type != WM_T_PCH))
4378 	    && (wm_phy_resetisblocked(sc) == false))
4379 		oem_reg |= HV_OEM_BITS_ANEGNOW;
4380 
4381 	rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_OEM_BITS, oem_reg);
4382 
4383 release:
4384 	sc->phy.release(sc);
4385 
4386 	return rv;
4387 }
4388 
4389 /* Init hardware bits */
4390 void
4391 wm_initialize_hardware_bits(struct wm_softc *sc)
4392 {
4393 	uint32_t tarc0, tarc1, reg;
4394 
4395 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
4396 		device_xname(sc->sc_dev), __func__));
4397 
4398 	/* For 82571 variant, 80003 and ICHs */
4399 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
4400 	    || (sc->sc_type >= WM_T_80003)) {
4401 
4402 		/* Transmit Descriptor Control 0 */
4403 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
4404 		reg |= TXDCTL_COUNT_DESC;
4405 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
4406 
4407 		/* Transmit Descriptor Control 1 */
4408 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
4409 		reg |= TXDCTL_COUNT_DESC;
4410 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
4411 
4412 		/* TARC0 */
4413 		tarc0 = CSR_READ(sc, WMREG_TARC0);
4414 		switch (sc->sc_type) {
4415 		case WM_T_82571:
4416 		case WM_T_82572:
4417 		case WM_T_82573:
4418 		case WM_T_82574:
4419 		case WM_T_82583:
4420 		case WM_T_80003:
4421 			/* Clear bits 30..27 */
4422 			tarc0 &= ~__BITS(30, 27);
4423 			break;
4424 		default:
4425 			break;
4426 		}
4427 
4428 		switch (sc->sc_type) {
4429 		case WM_T_82571:
4430 		case WM_T_82572:
4431 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
4432 
4433 			tarc1 = CSR_READ(sc, WMREG_TARC1);
4434 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
4435 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
4436 			/* 8257[12] Errata No.7 */
4437 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
4438 
4439 			/* TARC1 bit 28 */
4440 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
4441 				tarc1 &= ~__BIT(28);
4442 			else
4443 				tarc1 |= __BIT(28);
4444 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
4445 
4446 			/*
4447 			 * 8257[12] Errata No.13
4448 			 * Disable Dyamic Clock Gating.
4449 			 */
4450 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
4451 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
4452 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4453 			break;
4454 		case WM_T_82573:
4455 		case WM_T_82574:
4456 		case WM_T_82583:
4457 			if ((sc->sc_type == WM_T_82574)
4458 			    || (sc->sc_type == WM_T_82583))
4459 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
4460 
4461 			/* Extended Device Control */
4462 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
4463 			reg &= ~__BIT(23);	/* Clear bit 23 */
4464 			reg |= __BIT(22);	/* Set bit 22 */
4465 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4466 
4467 			/* Device Control */
4468 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
4469 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4470 
4471 			/* PCIe Control Register */
4472 			/*
4473 			 * 82573 Errata (unknown).
4474 			 *
4475 			 * 82574 Errata 25 and 82583 Errata 12
4476 			 * "Dropped Rx Packets":
4477 			 *   NVM Image Version 2.1.4 and newer has no this bug.
4478 			 */
4479 			reg = CSR_READ(sc, WMREG_GCR);
4480 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
4481 			CSR_WRITE(sc, WMREG_GCR, reg);
4482 
4483 			if ((sc->sc_type == WM_T_82574)
4484 			    || (sc->sc_type == WM_T_82583)) {
4485 				/*
4486 				 * Document says this bit must be set for
4487 				 * proper operation.
4488 				 */
4489 				reg = CSR_READ(sc, WMREG_GCR);
4490 				reg |= __BIT(22);
4491 				CSR_WRITE(sc, WMREG_GCR, reg);
4492 
4493 				/*
4494 				 * Apply workaround for hardware errata
4495 				 * documented in errata docs Fixes issue where
4496 				 * some error prone or unreliable PCIe
4497 				 * completions are occurring, particularly
4498 				 * with ASPM enabled. Without fix, issue can
4499 				 * cause Tx timeouts.
4500 				 */
4501 				reg = CSR_READ(sc, WMREG_GCR2);
4502 				reg |= __BIT(0);
4503 				CSR_WRITE(sc, WMREG_GCR2, reg);
4504 			}
4505 			break;
4506 		case WM_T_80003:
4507 			/* TARC0 */
4508 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
4509 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
4510 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
4511 
4512 			/* TARC1 bit 28 */
4513 			tarc1 = CSR_READ(sc, WMREG_TARC1);
4514 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
4515 				tarc1 &= ~__BIT(28);
4516 			else
4517 				tarc1 |= __BIT(28);
4518 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
4519 			break;
4520 		case WM_T_ICH8:
4521 		case WM_T_ICH9:
4522 		case WM_T_ICH10:
4523 		case WM_T_PCH:
4524 		case WM_T_PCH2:
4525 		case WM_T_PCH_LPT:
4526 		case WM_T_PCH_SPT:
4527 		case WM_T_PCH_CNP:
4528 			/* TARC0 */
4529 			if (sc->sc_type == WM_T_ICH8) {
4530 				/* Set TARC0 bits 29 and 28 */
4531 				tarc0 |= __BITS(29, 28);
4532 			} else if (sc->sc_type == WM_T_PCH_SPT) {
4533 				tarc0 |= __BIT(29);
4534 				/*
4535 				 *  Drop bit 28. From Linux.
4536 				 * See I218/I219 spec update
4537 				 * "5. Buffer Overrun While the I219 is
4538 				 * Processing DMA Transactions"
4539 				 */
4540 				tarc0 &= ~__BIT(28);
4541 			}
4542 			/* Set TARC0 bits 23,24,26,27 */
4543 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
4544 
4545 			/* CTRL_EXT */
4546 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
4547 			reg |= __BIT(22);	/* Set bit 22 */
4548 			/*
4549 			 * Enable PHY low-power state when MAC is at D3
4550 			 * w/o WoL
4551 			 */
4552 			if (sc->sc_type >= WM_T_PCH)
4553 				reg |= CTRL_EXT_PHYPDEN;
4554 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4555 
4556 			/* TARC1 */
4557 			tarc1 = CSR_READ(sc, WMREG_TARC1);
4558 			/* bit 28 */
4559 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
4560 				tarc1 &= ~__BIT(28);
4561 			else
4562 				tarc1 |= __BIT(28);
4563 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
4564 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
4565 
4566 			/* Device Status */
4567 			if (sc->sc_type == WM_T_ICH8) {
4568 				reg = CSR_READ(sc, WMREG_STATUS);
4569 				reg &= ~__BIT(31);
4570 				CSR_WRITE(sc, WMREG_STATUS, reg);
4571 
4572 			}
4573 
4574 			/* IOSFPC */
4575 			if (sc->sc_type == WM_T_PCH_SPT) {
4576 				reg = CSR_READ(sc, WMREG_IOSFPC);
4577 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
4578 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
4579 			}
4580 			/*
4581 			 * Work-around descriptor data corruption issue during
4582 			 * NFS v2 UDP traffic, just disable the NFS filtering
4583 			 * capability.
4584 			 */
4585 			reg = CSR_READ(sc, WMREG_RFCTL);
4586 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
4587 			CSR_WRITE(sc, WMREG_RFCTL, reg);
4588 			break;
4589 		default:
4590 			break;
4591 		}
4592 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
4593 
4594 		switch (sc->sc_type) {
4595 		/*
4596 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
4597 		 * Avoid RSS Hash Value bug.
4598 		 */
4599 		case WM_T_82571:
4600 		case WM_T_82572:
4601 		case WM_T_82573:
4602 		case WM_T_80003:
4603 		case WM_T_ICH8:
4604 			reg = CSR_READ(sc, WMREG_RFCTL);
4605 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
4606 			CSR_WRITE(sc, WMREG_RFCTL, reg);
4607 			break;
4608 		case WM_T_82574:
4609 			/* Use extened Rx descriptor. */
4610 			reg = CSR_READ(sc, WMREG_RFCTL);
4611 			reg |= WMREG_RFCTL_EXSTEN;
4612 			CSR_WRITE(sc, WMREG_RFCTL, reg);
4613 			break;
4614 		default:
4615 			break;
4616 		}
4617 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
4618 		/*
4619 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
4620 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
4621 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
4622 		 * Correctly by the Device"
4623 		 *
4624 		 * I354(C2000) Errata AVR53:
4625 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
4626 		 * Hang"
4627 		 */
4628 		reg = CSR_READ(sc, WMREG_RFCTL);
4629 		reg |= WMREG_RFCTL_IPV6EXDIS;
4630 		CSR_WRITE(sc, WMREG_RFCTL, reg);
4631 	}
4632 }
4633 
4634 static uint32_t
4635 wm_rxpbs_adjust_82580(uint32_t val)
4636 {
4637 	uint32_t rv = 0;
4638 
4639 	if (val < __arraycount(wm_82580_rxpbs_table))
4640 		rv = wm_82580_rxpbs_table[val];
4641 
4642 	return rv;
4643 }
4644 
4645 /*
4646  * wm_reset_phy:
4647  *
4648  *	generic PHY reset function.
4649  *	Same as e1000_phy_hw_reset_generic()
4650  */
4651 static int
4652 wm_reset_phy(struct wm_softc *sc)
4653 {
4654 	uint32_t reg;
4655 
4656 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
4657 		device_xname(sc->sc_dev), __func__));
4658 	if (wm_phy_resetisblocked(sc))
4659 		return -1;
4660 
4661 	sc->phy.acquire(sc);
4662 
4663 	reg = CSR_READ(sc, WMREG_CTRL);
4664 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
4665 	CSR_WRITE_FLUSH(sc);
4666 
4667 	delay(sc->phy.reset_delay_us);
4668 
4669 	CSR_WRITE(sc, WMREG_CTRL, reg);
4670 	CSR_WRITE_FLUSH(sc);
4671 
4672 	delay(150);
4673 
4674 	sc->phy.release(sc);
4675 
4676 	wm_get_cfg_done(sc);
4677 	wm_phy_post_reset(sc);
4678 
4679 	return 0;
4680 }
4681 
4682 /*
4683  * Only used by WM_T_PCH_SPT which does not use multiqueue,
4684  * so it is enough to check sc->sc_queue[0] only.
4685  */
4686 static void
4687 wm_flush_desc_rings(struct wm_softc *sc)
4688 {
4689 	pcireg_t preg;
4690 	uint32_t reg;
4691 	struct wm_txqueue *txq;
4692 	wiseman_txdesc_t *txd;
4693 	int nexttx;
4694 	uint32_t rctl;
4695 
4696 	/* First, disable MULR fix in FEXTNVM11 */
4697 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
4698 	reg |= FEXTNVM11_DIS_MULRFIX;
4699 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
4700 
4701 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
4702 	reg = CSR_READ(sc, WMREG_TDLEN(0));
4703 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
4704 		return;
4705 
4706 	/* TX */
4707 	device_printf(sc->sc_dev, "Need TX flush (reg = %08x, len = %u)\n",
4708 	    preg, reg);
4709 	reg = CSR_READ(sc, WMREG_TCTL);
4710 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
4711 
4712 	txq = &sc->sc_queue[0].wmq_txq;
4713 	nexttx = txq->txq_next;
4714 	txd = &txq->txq_descs[nexttx];
4715 	wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
4716 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
4717 	txd->wtx_fields.wtxu_status = 0;
4718 	txd->wtx_fields.wtxu_options = 0;
4719 	txd->wtx_fields.wtxu_vlan = 0;
4720 
4721 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
4722 	    BUS_SPACE_BARRIER_WRITE);
4723 
4724 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
4725 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
4726 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
4727 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
4728 	delay(250);
4729 
4730 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
4731 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
4732 		return;
4733 
4734 	/* RX */
4735 	device_printf(sc->sc_dev, "Need RX flush (reg = %08x)\n", preg);
4736 	rctl = CSR_READ(sc, WMREG_RCTL);
4737 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
4738 	CSR_WRITE_FLUSH(sc);
4739 	delay(150);
4740 
4741 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
4742 	/* Zero the lower 14 bits (prefetch and host thresholds) */
4743 	reg &= 0xffffc000;
4744 	/*
4745 	 * Update thresholds: prefetch threshold to 31, host threshold
4746 	 * to 1 and make sure the granularity is "descriptors" and not
4747 	 * "cache lines"
4748 	 */
4749 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
4750 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
4751 
4752 	/* Momentarily enable the RX ring for the changes to take effect */
4753 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
4754 	CSR_WRITE_FLUSH(sc);
4755 	delay(150);
4756 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
4757 }
4758 
4759 /*
4760  * wm_reset:
4761  *
4762  *	Reset the i82542 chip.
4763  */
4764 static void
4765 wm_reset(struct wm_softc *sc)
4766 {
4767 	int phy_reset = 0;
4768 	int i, error = 0;
4769 	uint32_t reg;
4770 	uint16_t kmreg;
4771 	int rv;
4772 
4773 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
4774 		device_xname(sc->sc_dev), __func__));
4775 	KASSERT(sc->sc_type != 0);
4776 
4777 	/*
4778 	 * Allocate on-chip memory according to the MTU size.
4779 	 * The Packet Buffer Allocation register must be written
4780 	 * before the chip is reset.
4781 	 */
4782 	switch (sc->sc_type) {
4783 	case WM_T_82547:
4784 	case WM_T_82547_2:
4785 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
4786 		    PBA_22K : PBA_30K;
4787 		for (i = 0; i < sc->sc_nqueues; i++) {
4788 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
4789 			txq->txq_fifo_head = 0;
4790 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
4791 			txq->txq_fifo_size =
4792 			    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
4793 			txq->txq_fifo_stall = 0;
4794 		}
4795 		break;
4796 	case WM_T_82571:
4797 	case WM_T_82572:
4798 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
4799 	case WM_T_80003:
4800 		sc->sc_pba = PBA_32K;
4801 		break;
4802 	case WM_T_82573:
4803 		sc->sc_pba = PBA_12K;
4804 		break;
4805 	case WM_T_82574:
4806 	case WM_T_82583:
4807 		sc->sc_pba = PBA_20K;
4808 		break;
4809 	case WM_T_82576:
4810 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
4811 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
4812 		break;
4813 	case WM_T_82580:
4814 	case WM_T_I350:
4815 	case WM_T_I354:
4816 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
4817 		break;
4818 	case WM_T_I210:
4819 	case WM_T_I211:
4820 		sc->sc_pba = PBA_34K;
4821 		break;
4822 	case WM_T_ICH8:
4823 		/* Workaround for a bit corruption issue in FIFO memory */
4824 		sc->sc_pba = PBA_8K;
4825 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
4826 		break;
4827 	case WM_T_ICH9:
4828 	case WM_T_ICH10:
4829 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
4830 		    PBA_14K : PBA_10K;
4831 		break;
4832 	case WM_T_PCH:
4833 	case WM_T_PCH2:	/* XXX 14K? */
4834 	case WM_T_PCH_LPT:
4835 	case WM_T_PCH_SPT:
4836 	case WM_T_PCH_CNP:
4837 		sc->sc_pba = PBA_26K;
4838 		break;
4839 	default:
4840 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
4841 		    PBA_40K : PBA_48K;
4842 		break;
4843 	}
4844 	/*
4845 	 * Only old or non-multiqueue devices have the PBA register
4846 	 * XXX Need special handling for 82575.
4847 	 */
4848 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
4849 	    || (sc->sc_type == WM_T_82575))
4850 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
4851 
4852 	/* Prevent the PCI-E bus from sticking */
4853 	if (sc->sc_flags & WM_F_PCIE) {
4854 		int timeout = 800;
4855 
4856 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
4857 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4858 
4859 		while (timeout--) {
4860 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
4861 			    == 0)
4862 				break;
4863 			delay(100);
4864 		}
4865 		if (timeout == 0)
4866 			device_printf(sc->sc_dev,
4867 			    "failed to disable busmastering\n");
4868 	}
4869 
4870 	/* Set the completion timeout for interface */
4871 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
4872 	    || (sc->sc_type == WM_T_82580)
4873 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
4874 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
4875 		wm_set_pcie_completion_timeout(sc);
4876 
4877 	/* Clear interrupt */
4878 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4879 	if (wm_is_using_msix(sc)) {
4880 		if (sc->sc_type != WM_T_82574) {
4881 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
4882 			CSR_WRITE(sc, WMREG_EIAC, 0);
4883 		} else
4884 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
4885 	}
4886 
4887 	/* Stop the transmit and receive processes. */
4888 	CSR_WRITE(sc, WMREG_RCTL, 0);
4889 	sc->sc_rctl &= ~RCTL_EN;
4890 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
4891 	CSR_WRITE_FLUSH(sc);
4892 
4893 	/* XXX set_tbi_sbp_82543() */
4894 
4895 	delay(10*1000);
4896 
4897 	/* Must acquire the MDIO ownership before MAC reset */
4898 	switch (sc->sc_type) {
4899 	case WM_T_82573:
4900 	case WM_T_82574:
4901 	case WM_T_82583:
4902 		error = wm_get_hw_semaphore_82573(sc);
4903 		break;
4904 	default:
4905 		break;
4906 	}
4907 
4908 	/*
4909 	 * 82541 Errata 29? & 82547 Errata 28?
4910 	 * See also the description about PHY_RST bit in CTRL register
4911 	 * in 8254x_GBe_SDM.pdf.
4912 	 */
4913 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
4914 		CSR_WRITE(sc, WMREG_CTRL,
4915 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
4916 		CSR_WRITE_FLUSH(sc);
4917 		delay(5000);
4918 	}
4919 
4920 	switch (sc->sc_type) {
4921 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
4922 	case WM_T_82541:
4923 	case WM_T_82541_2:
4924 	case WM_T_82547:
4925 	case WM_T_82547_2:
4926 		/*
4927 		 * On some chipsets, a reset through a memory-mapped write
4928 		 * cycle can cause the chip to reset before completing the
4929 		 * write cycle. This causes major headache that can be avoided
4930 		 * by issuing the reset via indirect register writes through
4931 		 * I/O space.
4932 		 *
4933 		 * So, if we successfully mapped the I/O BAR at attach time,
4934 		 * use that. Otherwise, try our luck with a memory-mapped
4935 		 * reset.
4936 		 */
4937 		if (sc->sc_flags & WM_F_IOH_VALID)
4938 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
4939 		else
4940 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
4941 		break;
4942 	case WM_T_82545_3:
4943 	case WM_T_82546_3:
4944 		/* Use the shadow control register on these chips. */
4945 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
4946 		break;
4947 	case WM_T_80003:
4948 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
4949 		sc->phy.acquire(sc);
4950 		CSR_WRITE(sc, WMREG_CTRL, reg);
4951 		sc->phy.release(sc);
4952 		break;
4953 	case WM_T_ICH8:
4954 	case WM_T_ICH9:
4955 	case WM_T_ICH10:
4956 	case WM_T_PCH:
4957 	case WM_T_PCH2:
4958 	case WM_T_PCH_LPT:
4959 	case WM_T_PCH_SPT:
4960 	case WM_T_PCH_CNP:
4961 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
4962 		if (wm_phy_resetisblocked(sc) == false) {
4963 			/*
4964 			 * Gate automatic PHY configuration by hardware on
4965 			 * non-managed 82579
4966 			 */
4967 			if ((sc->sc_type == WM_T_PCH2)
4968 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
4969 				== 0))
4970 				wm_gate_hw_phy_config_ich8lan(sc, true);
4971 
4972 			reg |= CTRL_PHY_RESET;
4973 			phy_reset = 1;
4974 		} else
4975 			device_printf(sc->sc_dev, "XXX reset is blocked!!!\n");
4976 		sc->phy.acquire(sc);
4977 		CSR_WRITE(sc, WMREG_CTRL, reg);
4978 		/* Don't insert a completion barrier when reset */
4979 		delay(20*1000);
4980 		mutex_exit(sc->sc_ich_phymtx);
4981 		break;
4982 	case WM_T_82580:
4983 	case WM_T_I350:
4984 	case WM_T_I354:
4985 	case WM_T_I210:
4986 	case WM_T_I211:
4987 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
4988 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
4989 			CSR_WRITE_FLUSH(sc);
4990 		delay(5000);
4991 		break;
4992 	case WM_T_82542_2_0:
4993 	case WM_T_82542_2_1:
4994 	case WM_T_82543:
4995 	case WM_T_82540:
4996 	case WM_T_82545:
4997 	case WM_T_82546:
4998 	case WM_T_82571:
4999 	case WM_T_82572:
5000 	case WM_T_82573:
5001 	case WM_T_82574:
5002 	case WM_T_82575:
5003 	case WM_T_82576:
5004 	case WM_T_82583:
5005 	default:
5006 		/* Everything else can safely use the documented method. */
5007 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
5008 		break;
5009 	}
5010 
5011 	/* Must release the MDIO ownership after MAC reset */
5012 	switch (sc->sc_type) {
5013 	case WM_T_82573:
5014 	case WM_T_82574:
5015 	case WM_T_82583:
5016 		if (error == 0)
5017 			wm_put_hw_semaphore_82573(sc);
5018 		break;
5019 	default:
5020 		break;
5021 	}
5022 
5023 	/* Set Phy Config Counter to 50msec */
5024 	if (sc->sc_type == WM_T_PCH2) {
5025 		reg = CSR_READ(sc, WMREG_FEXTNVM3);
5026 		reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
5027 		reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
5028 		CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
5029 	}
5030 
5031 	if (phy_reset != 0)
5032 		wm_get_cfg_done(sc);
5033 
5034 	/* Reload EEPROM */
5035 	switch (sc->sc_type) {
5036 	case WM_T_82542_2_0:
5037 	case WM_T_82542_2_1:
5038 	case WM_T_82543:
5039 	case WM_T_82544:
5040 		delay(10);
5041 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
5042 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5043 		CSR_WRITE_FLUSH(sc);
5044 		delay(2000);
5045 		break;
5046 	case WM_T_82540:
5047 	case WM_T_82545:
5048 	case WM_T_82545_3:
5049 	case WM_T_82546:
5050 	case WM_T_82546_3:
5051 		delay(5*1000);
5052 		/* XXX Disable HW ARPs on ASF enabled adapters */
5053 		break;
5054 	case WM_T_82541:
5055 	case WM_T_82541_2:
5056 	case WM_T_82547:
5057 	case WM_T_82547_2:
5058 		delay(20000);
5059 		/* XXX Disable HW ARPs on ASF enabled adapters */
5060 		break;
5061 	case WM_T_82571:
5062 	case WM_T_82572:
5063 	case WM_T_82573:
5064 	case WM_T_82574:
5065 	case WM_T_82583:
5066 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
5067 			delay(10);
5068 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
5069 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5070 			CSR_WRITE_FLUSH(sc);
5071 		}
5072 		/* check EECD_EE_AUTORD */
5073 		wm_get_auto_rd_done(sc);
5074 		/*
5075 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
5076 		 * is set.
5077 		 */
5078 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
5079 		    || (sc->sc_type == WM_T_82583))
5080 			delay(25*1000);
5081 		break;
5082 	case WM_T_82575:
5083 	case WM_T_82576:
5084 	case WM_T_82580:
5085 	case WM_T_I350:
5086 	case WM_T_I354:
5087 	case WM_T_I210:
5088 	case WM_T_I211:
5089 	case WM_T_80003:
5090 		/* check EECD_EE_AUTORD */
5091 		wm_get_auto_rd_done(sc);
5092 		break;
5093 	case WM_T_ICH8:
5094 	case WM_T_ICH9:
5095 	case WM_T_ICH10:
5096 	case WM_T_PCH:
5097 	case WM_T_PCH2:
5098 	case WM_T_PCH_LPT:
5099 	case WM_T_PCH_SPT:
5100 	case WM_T_PCH_CNP:
5101 		break;
5102 	default:
5103 		panic("%s: unknown type\n", __func__);
5104 	}
5105 
5106 	/* Check whether EEPROM is present or not */
5107 	switch (sc->sc_type) {
5108 	case WM_T_82575:
5109 	case WM_T_82576:
5110 	case WM_T_82580:
5111 	case WM_T_I350:
5112 	case WM_T_I354:
5113 	case WM_T_ICH8:
5114 	case WM_T_ICH9:
5115 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
5116 			/* Not found */
5117 			sc->sc_flags |= WM_F_EEPROM_INVALID;
5118 			if (sc->sc_type == WM_T_82575)
5119 				wm_reset_init_script_82575(sc);
5120 		}
5121 		break;
5122 	default:
5123 		break;
5124 	}
5125 
5126 	if (phy_reset != 0)
5127 		wm_phy_post_reset(sc);
5128 
5129 	if ((sc->sc_type == WM_T_82580)
5130 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
5131 		/* Clear global device reset status bit */
5132 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
5133 	}
5134 
5135 	/* Clear any pending interrupt events. */
5136 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
5137 	reg = CSR_READ(sc, WMREG_ICR);
5138 	if (wm_is_using_msix(sc)) {
5139 		if (sc->sc_type != WM_T_82574) {
5140 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
5141 			CSR_WRITE(sc, WMREG_EIAC, 0);
5142 		} else
5143 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
5144 	}
5145 
5146 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5147 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5148 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
5149 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
5150 		reg = CSR_READ(sc, WMREG_KABGTXD);
5151 		reg |= KABGTXD_BGSQLBIAS;
5152 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
5153 	}
5154 
5155 	/* Reload sc_ctrl */
5156 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
5157 
5158 	wm_set_eee(sc);
5159 
5160 	/*
5161 	 * For PCH, this write will make sure that any noise will be detected
5162 	 * as a CRC error and be dropped rather than show up as a bad packet
5163 	 * to the DMA engine
5164 	 */
5165 	if (sc->sc_type == WM_T_PCH)
5166 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
5167 
5168 	if (sc->sc_type >= WM_T_82544)
5169 		CSR_WRITE(sc, WMREG_WUC, 0);
5170 
5171 	if (sc->sc_type < WM_T_82575)
5172 		wm_disable_aspm(sc); /* Workaround for some chips */
5173 
5174 	wm_reset_mdicnfg_82580(sc);
5175 
5176 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
5177 		wm_pll_workaround_i210(sc);
5178 
5179 	if (sc->sc_type == WM_T_80003) {
5180 		/* Default to TRUE to enable the MDIC W/A */
5181 		sc->sc_flags |= WM_F_80003_MDIC_WA;
5182 
5183 		rv = wm_kmrn_readreg(sc,
5184 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
5185 		if (rv == 0) {
5186 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
5187 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
5188 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
5189 			else
5190 				sc->sc_flags |= WM_F_80003_MDIC_WA;
5191 		}
5192 	}
5193 }
5194 
5195 /*
5196  * wm_add_rxbuf:
5197  *
5198  *	Add a receive buffer to the indiciated descriptor.
5199  */
5200 static int
5201 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
5202 {
5203 	struct wm_softc *sc = rxq->rxq_sc;
5204 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
5205 	struct mbuf *m;
5206 	int error;
5207 
5208 	KASSERT(mutex_owned(rxq->rxq_lock));
5209 
5210 	MGETHDR(m, M_DONTWAIT, MT_DATA);
5211 	if (m == NULL)
5212 		return ENOBUFS;
5213 
5214 	MCLGET(m, M_DONTWAIT);
5215 	if ((m->m_flags & M_EXT) == 0) {
5216 		m_freem(m);
5217 		return ENOBUFS;
5218 	}
5219 
5220 	if (rxs->rxs_mbuf != NULL)
5221 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
5222 
5223 	rxs->rxs_mbuf = m;
5224 
5225 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
5226 	/*
5227 	 * Cannot use bus_dmamap_load_mbuf() here because m_data may be
5228 	 * sc_align_tweak'd between bus_dmamap_load() and bus_dmamap_sync().
5229 	 */
5230 	error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, m->m_ext.ext_buf,
5231 	    m->m_ext.ext_size, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT);
5232 	if (error) {
5233 		/* XXX XXX XXX */
5234 		aprint_error_dev(sc->sc_dev,
5235 		    "unable to load rx DMA map %d, error = %d\n", idx, error);
5236 		panic("wm_add_rxbuf");
5237 	}
5238 
5239 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
5240 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
5241 
5242 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5243 		if ((sc->sc_rctl & RCTL_EN) != 0)
5244 			wm_init_rxdesc(rxq, idx);
5245 	} else
5246 		wm_init_rxdesc(rxq, idx);
5247 
5248 	return 0;
5249 }
5250 
5251 /*
5252  * wm_rxdrain:
5253  *
5254  *	Drain the receive queue.
5255  */
5256 static void
5257 wm_rxdrain(struct wm_rxqueue *rxq)
5258 {
5259 	struct wm_softc *sc = rxq->rxq_sc;
5260 	struct wm_rxsoft *rxs;
5261 	int i;
5262 
5263 	KASSERT(mutex_owned(rxq->rxq_lock));
5264 
5265 	for (i = 0; i < WM_NRXDESC; i++) {
5266 		rxs = &rxq->rxq_soft[i];
5267 		if (rxs->rxs_mbuf != NULL) {
5268 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
5269 			m_freem(rxs->rxs_mbuf);
5270 			rxs->rxs_mbuf = NULL;
5271 		}
5272 	}
5273 }
5274 
5275 /*
5276  * Setup registers for RSS.
5277  *
5278  * XXX not yet VMDq support
5279  */
5280 static void
5281 wm_init_rss(struct wm_softc *sc)
5282 {
5283 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
5284 	int i;
5285 
5286 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
5287 
5288 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
5289 		unsigned int qid, reta_ent;
5290 
5291 		qid  = i % sc->sc_nqueues;
5292 		switch (sc->sc_type) {
5293 		case WM_T_82574:
5294 			reta_ent = __SHIFTIN(qid,
5295 			    RETA_ENT_QINDEX_MASK_82574);
5296 			break;
5297 		case WM_T_82575:
5298 			reta_ent = __SHIFTIN(qid,
5299 			    RETA_ENT_QINDEX1_MASK_82575);
5300 			break;
5301 		default:
5302 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
5303 			break;
5304 		}
5305 
5306 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
5307 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
5308 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
5309 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
5310 	}
5311 
5312 	rss_getkey((uint8_t *)rss_key);
5313 	for (i = 0; i < RSSRK_NUM_REGS; i++)
5314 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
5315 
5316 	if (sc->sc_type == WM_T_82574)
5317 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
5318 	else
5319 		mrqc = MRQC_ENABLE_RSS_MQ;
5320 
5321 	/*
5322 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
5323 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
5324 	 */
5325 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
5326 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
5327 #if 0
5328 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
5329 	mrqc |= MRQC_RSS_FIELD_IPV6_UDP_EX;
5330 #endif
5331 	mrqc |= MRQC_RSS_FIELD_IPV6_TCP_EX;
5332 
5333 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
5334 }
5335 
5336 /*
5337  * Adjust TX and RX queue numbers which the system actulally uses.
5338  *
5339  * The numbers are affected by below parameters.
5340  *     - The nubmer of hardware queues
5341  *     - The number of MSI-X vectors (= "nvectors" argument)
5342  *     - ncpu
5343  */
5344 static void
5345 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
5346 {
5347 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
5348 
5349 	if (nvectors < 2) {
5350 		sc->sc_nqueues = 1;
5351 		return;
5352 	}
5353 
5354 	switch (sc->sc_type) {
5355 	case WM_T_82572:
5356 		hw_ntxqueues = 2;
5357 		hw_nrxqueues = 2;
5358 		break;
5359 	case WM_T_82574:
5360 		hw_ntxqueues = 2;
5361 		hw_nrxqueues = 2;
5362 		break;
5363 	case WM_T_82575:
5364 		hw_ntxqueues = 4;
5365 		hw_nrxqueues = 4;
5366 		break;
5367 	case WM_T_82576:
5368 		hw_ntxqueues = 16;
5369 		hw_nrxqueues = 16;
5370 		break;
5371 	case WM_T_82580:
5372 	case WM_T_I350:
5373 	case WM_T_I354:
5374 		hw_ntxqueues = 8;
5375 		hw_nrxqueues = 8;
5376 		break;
5377 	case WM_T_I210:
5378 		hw_ntxqueues = 4;
5379 		hw_nrxqueues = 4;
5380 		break;
5381 	case WM_T_I211:
5382 		hw_ntxqueues = 2;
5383 		hw_nrxqueues = 2;
5384 		break;
5385 		/*
5386 		 * As below ethernet controllers does not support MSI-X,
5387 		 * this driver let them not use multiqueue.
5388 		 *     - WM_T_80003
5389 		 *     - WM_T_ICH8
5390 		 *     - WM_T_ICH9
5391 		 *     - WM_T_ICH10
5392 		 *     - WM_T_PCH
5393 		 *     - WM_T_PCH2
5394 		 *     - WM_T_PCH_LPT
5395 		 */
5396 	default:
5397 		hw_ntxqueues = 1;
5398 		hw_nrxqueues = 1;
5399 		break;
5400 	}
5401 
5402 	hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
5403 
5404 	/*
5405 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
5406 	 * the number of queues used actually.
5407 	 */
5408 	if (nvectors < hw_nqueues + 1)
5409 		sc->sc_nqueues = nvectors - 1;
5410 	else
5411 		sc->sc_nqueues = hw_nqueues;
5412 
5413 	/*
5414 	 * As queues more then cpus cannot improve scaling, we limit
5415 	 * the number of queues used actually.
5416 	 */
5417 	if (ncpu < sc->sc_nqueues)
5418 		sc->sc_nqueues = ncpu;
5419 }
5420 
5421 static inline bool
5422 wm_is_using_msix(struct wm_softc *sc)
5423 {
5424 
5425 	return (sc->sc_nintrs > 1);
5426 }
5427 
5428 static inline bool
5429 wm_is_using_multiqueue(struct wm_softc *sc)
5430 {
5431 
5432 	return (sc->sc_nqueues > 1);
5433 }
5434 
5435 static int
5436 wm_softint_establish(struct wm_softc *sc, int qidx, int intr_idx)
5437 {
5438 	struct wm_queue *wmq = &sc->sc_queue[qidx];
5439 
5440 	wmq->wmq_id = qidx;
5441 	wmq->wmq_intr_idx = intr_idx;
5442 	wmq->wmq_si = softint_establish(SOFTINT_NET
5443 #ifdef WM_MPSAFE
5444 	    | SOFTINT_MPSAFE
5445 #endif
5446 	    , wm_handle_queue, wmq);
5447 	if (wmq->wmq_si != NULL)
5448 		return 0;
5449 
5450 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
5451 	    wmq->wmq_id);
5452 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
5453 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
5454 	return ENOMEM;
5455 }
5456 
5457 /*
5458  * Both single interrupt MSI and INTx can use this function.
5459  */
5460 static int
5461 wm_setup_legacy(struct wm_softc *sc)
5462 {
5463 	pci_chipset_tag_t pc = sc->sc_pc;
5464 	const char *intrstr = NULL;
5465 	char intrbuf[PCI_INTRSTR_LEN];
5466 	int error;
5467 
5468 	error = wm_alloc_txrx_queues(sc);
5469 	if (error) {
5470 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
5471 		    error);
5472 		return ENOMEM;
5473 	}
5474 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
5475 	    sizeof(intrbuf));
5476 #ifdef WM_MPSAFE
5477 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
5478 #endif
5479 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
5480 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
5481 	if (sc->sc_ihs[0] == NULL) {
5482 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
5483 		    (pci_intr_type(pc, sc->sc_intrs[0])
5484 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
5485 		return ENOMEM;
5486 	}
5487 
5488 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
5489 	sc->sc_nintrs = 1;
5490 
5491 	return wm_softint_establish(sc, 0, 0);
5492 }
5493 
5494 static int
5495 wm_setup_msix(struct wm_softc *sc)
5496 {
5497 	void *vih;
5498 	kcpuset_t *affinity;
5499 	int qidx, error, intr_idx, txrx_established;
5500 	pci_chipset_tag_t pc = sc->sc_pc;
5501 	const char *intrstr = NULL;
5502 	char intrbuf[PCI_INTRSTR_LEN];
5503 	char intr_xname[INTRDEVNAMEBUF];
5504 
5505 	if (sc->sc_nqueues < ncpu) {
5506 		/*
5507 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
5508 		 * interrupts start from CPU#1.
5509 		 */
5510 		sc->sc_affinity_offset = 1;
5511 	} else {
5512 		/*
5513 		 * In this case, this device use all CPUs. So, we unify
5514 		 * affinitied cpu_index to msix vector number for readability.
5515 		 */
5516 		sc->sc_affinity_offset = 0;
5517 	}
5518 
5519 	error = wm_alloc_txrx_queues(sc);
5520 	if (error) {
5521 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
5522 		    error);
5523 		return ENOMEM;
5524 	}
5525 
5526 	kcpuset_create(&affinity, false);
5527 	intr_idx = 0;
5528 
5529 	/*
5530 	 * TX and RX
5531 	 */
5532 	txrx_established = 0;
5533 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
5534 		struct wm_queue *wmq = &sc->sc_queue[qidx];
5535 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
5536 
5537 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
5538 		    sizeof(intrbuf));
5539 #ifdef WM_MPSAFE
5540 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
5541 		    PCI_INTR_MPSAFE, true);
5542 #endif
5543 		memset(intr_xname, 0, sizeof(intr_xname));
5544 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
5545 		    device_xname(sc->sc_dev), qidx);
5546 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
5547 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
5548 		if (vih == NULL) {
5549 			aprint_error_dev(sc->sc_dev,
5550 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
5551 			    intrstr ? " at " : "",
5552 			    intrstr ? intrstr : "");
5553 
5554 			goto fail;
5555 		}
5556 		kcpuset_zero(affinity);
5557 		/* Round-robin affinity */
5558 		kcpuset_set(affinity, affinity_to);
5559 		error = interrupt_distribute(vih, affinity, NULL);
5560 		if (error == 0) {
5561 			aprint_normal_dev(sc->sc_dev,
5562 			    "for TX and RX interrupting at %s affinity to %u\n",
5563 			    intrstr, affinity_to);
5564 		} else {
5565 			aprint_normal_dev(sc->sc_dev,
5566 			    "for TX and RX interrupting at %s\n", intrstr);
5567 		}
5568 		sc->sc_ihs[intr_idx] = vih;
5569 		if (wm_softint_establish(sc, qidx, intr_idx) != 0)
5570 			goto fail;
5571 		txrx_established++;
5572 		intr_idx++;
5573 	}
5574 
5575 	/* LINK */
5576 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
5577 	    sizeof(intrbuf));
5578 #ifdef WM_MPSAFE
5579 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
5580 #endif
5581 	memset(intr_xname, 0, sizeof(intr_xname));
5582 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
5583 	    device_xname(sc->sc_dev));
5584 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
5585 	    IPL_NET, wm_linkintr_msix, sc, intr_xname);
5586 	if (vih == NULL) {
5587 		aprint_error_dev(sc->sc_dev,
5588 		    "unable to establish MSI-X(for LINK)%s%s\n",
5589 		    intrstr ? " at " : "",
5590 		    intrstr ? intrstr : "");
5591 
5592 		goto fail;
5593 	}
5594 	/* Keep default affinity to LINK interrupt */
5595 	aprint_normal_dev(sc->sc_dev,
5596 	    "for LINK interrupting at %s\n", intrstr);
5597 	sc->sc_ihs[intr_idx] = vih;
5598 	sc->sc_link_intr_idx = intr_idx;
5599 
5600 	sc->sc_nintrs = sc->sc_nqueues + 1;
5601 	kcpuset_destroy(affinity);
5602 	return 0;
5603 
5604  fail:
5605 	for (qidx = 0; qidx < txrx_established; qidx++) {
5606 		struct wm_queue *wmq = &sc->sc_queue[qidx];
5607 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
5608 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
5609 	}
5610 
5611 	kcpuset_destroy(affinity);
5612 	return ENOMEM;
5613 }
5614 
5615 static void
5616 wm_unset_stopping_flags(struct wm_softc *sc)
5617 {
5618 	int i;
5619 
5620 	KASSERT(WM_CORE_LOCKED(sc));
5621 
5622 	/* Must unset stopping flags in ascending order. */
5623 	for (i = 0; i < sc->sc_nqueues; i++) {
5624 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
5625 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
5626 
5627 		mutex_enter(txq->txq_lock);
5628 		txq->txq_stopping = false;
5629 		mutex_exit(txq->txq_lock);
5630 
5631 		mutex_enter(rxq->rxq_lock);
5632 		rxq->rxq_stopping = false;
5633 		mutex_exit(rxq->rxq_lock);
5634 	}
5635 
5636 	sc->sc_core_stopping = false;
5637 }
5638 
5639 static void
5640 wm_set_stopping_flags(struct wm_softc *sc)
5641 {
5642 	int i;
5643 
5644 	KASSERT(WM_CORE_LOCKED(sc));
5645 
5646 	sc->sc_core_stopping = true;
5647 
5648 	/* Must set stopping flags in ascending order. */
5649 	for (i = 0; i < sc->sc_nqueues; i++) {
5650 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
5651 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
5652 
5653 		mutex_enter(rxq->rxq_lock);
5654 		rxq->rxq_stopping = true;
5655 		mutex_exit(rxq->rxq_lock);
5656 
5657 		mutex_enter(txq->txq_lock);
5658 		txq->txq_stopping = true;
5659 		mutex_exit(txq->txq_lock);
5660 	}
5661 }
5662 
5663 /*
5664  * Write interrupt interval value to ITR or EITR
5665  */
5666 static void
5667 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
5668 {
5669 
5670 	if (!wmq->wmq_set_itr)
5671 		return;
5672 
5673 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5674 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
5675 
5676 		/*
5677 		 * 82575 doesn't have CNT_INGR field.
5678 		 * So, overwrite counter field by software.
5679 		 */
5680 		if (sc->sc_type == WM_T_82575)
5681 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
5682 		else
5683 			eitr |= EITR_CNT_INGR;
5684 
5685 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
5686 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
5687 		/*
5688 		 * 82574 has both ITR and EITR. SET EITR when we use
5689 		 * the multi queue function with MSI-X.
5690 		 */
5691 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
5692 		    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
5693 	} else {
5694 		KASSERT(wmq->wmq_id == 0);
5695 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
5696 	}
5697 
5698 	wmq->wmq_set_itr = false;
5699 }
5700 
5701 /*
5702  * TODO
5703  * Below dynamic calculation of itr is almost the same as linux igb,
5704  * however it does not fit to wm(4). So, we will have been disable AIM
5705  * until we will find appropriate calculation of itr.
5706  */
5707 /*
5708  * calculate interrupt interval value to be going to write register in
5709  * wm_itrs_writereg(). This function does not write ITR/EITR register.
5710  */
5711 static void
5712 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
5713 {
5714 #ifdef NOTYET
5715 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
5716 	struct wm_txqueue *txq = &wmq->wmq_txq;
5717 	uint32_t avg_size = 0;
5718 	uint32_t new_itr;
5719 
5720 	if (rxq->rxq_packets)
5721 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
5722 	if (txq->txq_packets)
5723 		avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
5724 
5725 	if (avg_size == 0) {
5726 		new_itr = 450; /* restore default value */
5727 		goto out;
5728 	}
5729 
5730 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
5731 	avg_size += 24;
5732 
5733 	/* Don't starve jumbo frames */
5734 	avg_size = uimin(avg_size, 3000);
5735 
5736 	/* Give a little boost to mid-size frames */
5737 	if ((avg_size > 300) && (avg_size < 1200))
5738 		new_itr = avg_size / 3;
5739 	else
5740 		new_itr = avg_size / 2;
5741 
5742 out:
5743 	/*
5744 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
5745 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
5746 	 */
5747 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
5748 		new_itr *= 4;
5749 
5750 	if (new_itr != wmq->wmq_itr) {
5751 		wmq->wmq_itr = new_itr;
5752 		wmq->wmq_set_itr = true;
5753 	} else
5754 		wmq->wmq_set_itr = false;
5755 
5756 	rxq->rxq_packets = 0;
5757 	rxq->rxq_bytes = 0;
5758 	txq->txq_packets = 0;
5759 	txq->txq_bytes = 0;
5760 #endif
5761 }
5762 
5763 static void
5764 wm_init_sysctls(struct wm_softc *sc)
5765 {
5766 	struct sysctllog **log;
5767 	const struct sysctlnode *rnode, *cnode;
5768 	int rv;
5769 	const char *dvname;
5770 
5771 	log = &sc->sc_sysctllog;
5772 	dvname = device_xname(sc->sc_dev);
5773 
5774 	rv = sysctl_createv(log, 0, NULL, &rnode,
5775 	    0, CTLTYPE_NODE, dvname,
5776 	    SYSCTL_DESCR("wm information and settings"),
5777 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
5778 	if (rv != 0)
5779 		goto err;
5780 
5781 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
5782 	    CTLTYPE_BOOL, "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
5783 	    NULL, 0, &sc->sc_txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL);
5784 	if (rv != 0)
5785 		goto teardown;
5786 
5787 	return;
5788 
5789 teardown:
5790 	sysctl_teardown(log);
5791 err:
5792 	sc->sc_sysctllog = NULL;
5793 	device_printf(sc->sc_dev, "%s: sysctl_createv failed, rv = %d\n",
5794 	    __func__, rv);
5795 }
5796 
5797 /*
5798  * wm_init:		[ifnet interface function]
5799  *
5800  *	Initialize the interface.
5801  */
5802 static int
5803 wm_init(struct ifnet *ifp)
5804 {
5805 	struct wm_softc *sc = ifp->if_softc;
5806 	int ret;
5807 
5808 	WM_CORE_LOCK(sc);
5809 	ret = wm_init_locked(ifp);
5810 	WM_CORE_UNLOCK(sc);
5811 
5812 	return ret;
5813 }
5814 
5815 static int
5816 wm_init_locked(struct ifnet *ifp)
5817 {
5818 	struct wm_softc *sc = ifp->if_softc;
5819 	struct ethercom *ec = &sc->sc_ethercom;
5820 	int i, j, trynum, error = 0;
5821 	uint32_t reg, sfp_mask = 0;
5822 
5823 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
5824 		device_xname(sc->sc_dev), __func__));
5825 	KASSERT(WM_CORE_LOCKED(sc));
5826 
5827 	/*
5828 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
5829 	 * There is a small but measurable benefit to avoiding the adjusment
5830 	 * of the descriptor so that the headers are aligned, for normal mtu,
5831 	 * on such platforms.  One possibility is that the DMA itself is
5832 	 * slightly more efficient if the front of the entire packet (instead
5833 	 * of the front of the headers) is aligned.
5834 	 *
5835 	 * Note we must always set align_tweak to 0 if we are using
5836 	 * jumbo frames.
5837 	 */
5838 #ifdef __NO_STRICT_ALIGNMENT
5839 	sc->sc_align_tweak = 0;
5840 #else
5841 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
5842 		sc->sc_align_tweak = 0;
5843 	else
5844 		sc->sc_align_tweak = 2;
5845 #endif /* __NO_STRICT_ALIGNMENT */
5846 
5847 	/* Cancel any pending I/O. */
5848 	wm_stop_locked(ifp, false, false);
5849 
5850 	/* Update statistics before reset */
5851 	if_statadd2(ifp, if_collisions, CSR_READ(sc, WMREG_COLC),
5852 	    if_ierrors, CSR_READ(sc, WMREG_RXERRC));
5853 
5854 	/* PCH_SPT hardware workaround */
5855 	if (sc->sc_type == WM_T_PCH_SPT)
5856 		wm_flush_desc_rings(sc);
5857 
5858 	/* Reset the chip to a known state. */
5859 	wm_reset(sc);
5860 
5861 	/*
5862 	 * AMT based hardware can now take control from firmware
5863 	 * Do this after reset.
5864 	 */
5865 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
5866 		wm_get_hw_control(sc);
5867 
5868 	if ((sc->sc_type >= WM_T_PCH_SPT) &&
5869 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
5870 		wm_legacy_irq_quirk_spt(sc);
5871 
5872 	/* Init hardware bits */
5873 	wm_initialize_hardware_bits(sc);
5874 
5875 	/* Reset the PHY. */
5876 	if (sc->sc_flags & WM_F_HAS_MII)
5877 		wm_gmii_reset(sc);
5878 
5879 	if (sc->sc_type >= WM_T_ICH8) {
5880 		reg = CSR_READ(sc, WMREG_GCR);
5881 		/*
5882 		 * ICH8 No-snoop bits are opposite polarity. Set to snoop by
5883 		 * default after reset.
5884 		 */
5885 		if (sc->sc_type == WM_T_ICH8)
5886 			reg |= GCR_NO_SNOOP_ALL;
5887 		else
5888 			reg &= ~GCR_NO_SNOOP_ALL;
5889 		CSR_WRITE(sc, WMREG_GCR, reg);
5890 	}
5891 	if ((sc->sc_type >= WM_T_ICH8)
5892 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER)
5893 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) {
5894 
5895 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
5896 		reg |= CTRL_EXT_RO_DIS;
5897 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5898 	}
5899 
5900 	/* Calculate (E)ITR value */
5901 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
5902 		/*
5903 		 * For NEWQUEUE's EITR (except for 82575).
5904 		 * 82575's EITR should be set same throttling value as other
5905 		 * old controllers' ITR because the interrupt/sec calculation
5906 		 * is the same, that is, 1,000,000,000 / (N * 256).
5907 		 *
5908 		 * 82574's EITR should be set same throttling value as ITR.
5909 		 *
5910 		 * For N interrupts/sec, set this value to:
5911 		 * 1,000,000 / N in contrast to ITR throttoling value.
5912 		 */
5913 		sc->sc_itr_init = 450;
5914 	} else if (sc->sc_type >= WM_T_82543) {
5915 		/*
5916 		 * Set up the interrupt throttling register (units of 256ns)
5917 		 * Note that a footnote in Intel's documentation says this
5918 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
5919 		 * or 10Mbit mode.  Empirically, it appears to be the case
5920 		 * that that is also true for the 1024ns units of the other
5921 		 * interrupt-related timer registers -- so, really, we ought
5922 		 * to divide this value by 4 when the link speed is low.
5923 		 *
5924 		 * XXX implement this division at link speed change!
5925 		 */
5926 
5927 		/*
5928 		 * For N interrupts/sec, set this value to:
5929 		 * 1,000,000,000 / (N * 256).  Note that we set the
5930 		 * absolute and packet timer values to this value
5931 		 * divided by 4 to get "simple timer" behavior.
5932 		 */
5933 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
5934 	}
5935 
5936 	error = wm_init_txrx_queues(sc);
5937 	if (error)
5938 		goto out;
5939 
5940 	if (((sc->sc_flags & WM_F_SGMII) == 0) &&
5941 	    (sc->sc_mediatype == WM_MEDIATYPE_SERDES) &&
5942 	    (sc->sc_type >= WM_T_82575))
5943 		wm_serdes_power_up_link_82575(sc);
5944 
5945 	/* Clear out the VLAN table -- we don't use it (yet). */
5946 	CSR_WRITE(sc, WMREG_VET, 0);
5947 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
5948 		trynum = 10; /* Due to hw errata */
5949 	else
5950 		trynum = 1;
5951 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
5952 		for (j = 0; j < trynum; j++)
5953 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
5954 
5955 	/*
5956 	 * Set up flow-control parameters.
5957 	 *
5958 	 * XXX Values could probably stand some tuning.
5959 	 */
5960 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
5961 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
5962 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
5963 	    && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
5964 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
5965 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
5966 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
5967 	}
5968 
5969 	sc->sc_fcrtl = FCRTL_DFLT;
5970 	if (sc->sc_type < WM_T_82543) {
5971 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
5972 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
5973 	} else {
5974 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
5975 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
5976 	}
5977 
5978 	if (sc->sc_type == WM_T_80003)
5979 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
5980 	else
5981 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
5982 
5983 	/* Writes the control register. */
5984 	wm_set_vlan(sc);
5985 
5986 	if (sc->sc_flags & WM_F_HAS_MII) {
5987 		uint16_t kmreg;
5988 
5989 		switch (sc->sc_type) {
5990 		case WM_T_80003:
5991 		case WM_T_ICH8:
5992 		case WM_T_ICH9:
5993 		case WM_T_ICH10:
5994 		case WM_T_PCH:
5995 		case WM_T_PCH2:
5996 		case WM_T_PCH_LPT:
5997 		case WM_T_PCH_SPT:
5998 		case WM_T_PCH_CNP:
5999 			/*
6000 			 * Set the mac to wait the maximum time between each
6001 			 * iteration and increase the max iterations when
6002 			 * polling the phy; this fixes erroneous timeouts at
6003 			 * 10Mbps.
6004 			 */
6005 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
6006 			    0xFFFF);
6007 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
6008 			    &kmreg);
6009 			kmreg |= 0x3F;
6010 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
6011 			    kmreg);
6012 			break;
6013 		default:
6014 			break;
6015 		}
6016 
6017 		if (sc->sc_type == WM_T_80003) {
6018 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
6019 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
6020 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
6021 
6022 			/* Bypass RX and TX FIFO's */
6023 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
6024 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
6025 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
6026 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
6027 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
6028 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
6029 		}
6030 	}
6031 #if 0
6032 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
6033 #endif
6034 
6035 	/* Set up checksum offload parameters. */
6036 	reg = CSR_READ(sc, WMREG_RXCSUM);
6037 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
6038 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
6039 		reg |= RXCSUM_IPOFL;
6040 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
6041 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
6042 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
6043 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
6044 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
6045 
6046 	/* Set registers about MSI-X */
6047 	if (wm_is_using_msix(sc)) {
6048 		uint32_t ivar, qintr_idx;
6049 		struct wm_queue *wmq;
6050 		unsigned int qid;
6051 
6052 		if (sc->sc_type == WM_T_82575) {
6053 			/* Interrupt control */
6054 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
6055 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
6056 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
6057 
6058 			/* TX and RX */
6059 			for (i = 0; i < sc->sc_nqueues; i++) {
6060 				wmq = &sc->sc_queue[i];
6061 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
6062 				    EITR_TX_QUEUE(wmq->wmq_id)
6063 				    | EITR_RX_QUEUE(wmq->wmq_id));
6064 			}
6065 			/* Link status */
6066 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
6067 			    EITR_OTHER);
6068 		} else if (sc->sc_type == WM_T_82574) {
6069 			/* Interrupt control */
6070 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
6071 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
6072 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
6073 
6074 			/*
6075 			 * Workaround issue with spurious interrupts
6076 			 * in MSI-X mode.
6077 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
6078 			 * initialized yet. So re-initialize WMREG_RFCTL here.
6079 			 */
6080 			reg = CSR_READ(sc, WMREG_RFCTL);
6081 			reg |= WMREG_RFCTL_ACKDIS;
6082 			CSR_WRITE(sc, WMREG_RFCTL, reg);
6083 
6084 			ivar = 0;
6085 			/* TX and RX */
6086 			for (i = 0; i < sc->sc_nqueues; i++) {
6087 				wmq = &sc->sc_queue[i];
6088 				qid = wmq->wmq_id;
6089 				qintr_idx = wmq->wmq_intr_idx;
6090 
6091 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
6092 				    IVAR_TX_MASK_Q_82574(qid));
6093 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
6094 				    IVAR_RX_MASK_Q_82574(qid));
6095 			}
6096 			/* Link status */
6097 			ivar |= __SHIFTIN((IVAR_VALID_82574
6098 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
6099 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
6100 		} else {
6101 			/* Interrupt control */
6102 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
6103 			    | GPIE_EIAME | GPIE_PBA);
6104 
6105 			switch (sc->sc_type) {
6106 			case WM_T_82580:
6107 			case WM_T_I350:
6108 			case WM_T_I354:
6109 			case WM_T_I210:
6110 			case WM_T_I211:
6111 				/* TX and RX */
6112 				for (i = 0; i < sc->sc_nqueues; i++) {
6113 					wmq = &sc->sc_queue[i];
6114 					qid = wmq->wmq_id;
6115 					qintr_idx = wmq->wmq_intr_idx;
6116 
6117 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
6118 					ivar &= ~IVAR_TX_MASK_Q(qid);
6119 					ivar |= __SHIFTIN((qintr_idx
6120 						| IVAR_VALID),
6121 					    IVAR_TX_MASK_Q(qid));
6122 					ivar &= ~IVAR_RX_MASK_Q(qid);
6123 					ivar |= __SHIFTIN((qintr_idx
6124 						| IVAR_VALID),
6125 					    IVAR_RX_MASK_Q(qid));
6126 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
6127 				}
6128 				break;
6129 			case WM_T_82576:
6130 				/* TX and RX */
6131 				for (i = 0; i < sc->sc_nqueues; i++) {
6132 					wmq = &sc->sc_queue[i];
6133 					qid = wmq->wmq_id;
6134 					qintr_idx = wmq->wmq_intr_idx;
6135 
6136 					ivar = CSR_READ(sc,
6137 					    WMREG_IVAR_Q_82576(qid));
6138 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
6139 					ivar |= __SHIFTIN((qintr_idx
6140 						| IVAR_VALID),
6141 					    IVAR_TX_MASK_Q_82576(qid));
6142 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
6143 					ivar |= __SHIFTIN((qintr_idx
6144 						| IVAR_VALID),
6145 					    IVAR_RX_MASK_Q_82576(qid));
6146 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
6147 					    ivar);
6148 				}
6149 				break;
6150 			default:
6151 				break;
6152 			}
6153 
6154 			/* Link status */
6155 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
6156 			    IVAR_MISC_OTHER);
6157 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
6158 		}
6159 
6160 		if (wm_is_using_multiqueue(sc)) {
6161 			wm_init_rss(sc);
6162 
6163 			/*
6164 			** NOTE: Receive Full-Packet Checksum Offload
6165 			** is mutually exclusive with Multiqueue. However
6166 			** this is not the same as TCP/IP checksums which
6167 			** still work.
6168 			*/
6169 			reg = CSR_READ(sc, WMREG_RXCSUM);
6170 			reg |= RXCSUM_PCSD;
6171 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
6172 		}
6173 	}
6174 
6175 	/* Set up the interrupt registers. */
6176 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
6177 
6178 	/* Enable SFP module insertion interrupt if it's required */
6179 	if ((sc->sc_flags & WM_F_SFP) != 0) {
6180 		sc->sc_ctrl |= CTRL_EXTLINK_EN;
6181 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6182 		sfp_mask = ICR_GPI(0);
6183 	}
6184 
6185 	if (wm_is_using_msix(sc)) {
6186 		uint32_t mask;
6187 		struct wm_queue *wmq;
6188 
6189 		switch (sc->sc_type) {
6190 		case WM_T_82574:
6191 			mask = 0;
6192 			for (i = 0; i < sc->sc_nqueues; i++) {
6193 				wmq = &sc->sc_queue[i];
6194 				mask |= ICR_TXQ(wmq->wmq_id);
6195 				mask |= ICR_RXQ(wmq->wmq_id);
6196 			}
6197 			mask |= ICR_OTHER;
6198 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
6199 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
6200 			break;
6201 		default:
6202 			if (sc->sc_type == WM_T_82575) {
6203 				mask = 0;
6204 				for (i = 0; i < sc->sc_nqueues; i++) {
6205 					wmq = &sc->sc_queue[i];
6206 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
6207 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
6208 				}
6209 				mask |= EITR_OTHER;
6210 			} else {
6211 				mask = 0;
6212 				for (i = 0; i < sc->sc_nqueues; i++) {
6213 					wmq = &sc->sc_queue[i];
6214 					mask |= 1 << wmq->wmq_intr_idx;
6215 				}
6216 				mask |= 1 << sc->sc_link_intr_idx;
6217 			}
6218 			CSR_WRITE(sc, WMREG_EIAC, mask);
6219 			CSR_WRITE(sc, WMREG_EIAM, mask);
6220 			CSR_WRITE(sc, WMREG_EIMS, mask);
6221 
6222 			/* For other interrupts */
6223 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC | sfp_mask);
6224 			break;
6225 		}
6226 	} else {
6227 		sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
6228 		    ICR_RXO | ICR_RXT0 | sfp_mask;
6229 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
6230 	}
6231 
6232 	/* Set up the inter-packet gap. */
6233 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
6234 
6235 	if (sc->sc_type >= WM_T_82543) {
6236 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
6237 			struct wm_queue *wmq = &sc->sc_queue[qidx];
6238 			wm_itrs_writereg(sc, wmq);
6239 		}
6240 		/*
6241 		 * Link interrupts occur much less than TX
6242 		 * interrupts and RX interrupts. So, we don't
6243 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
6244 		 * FreeBSD's if_igb.
6245 		 */
6246 	}
6247 
6248 	/* Set the VLAN ethernetype. */
6249 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
6250 
6251 	/*
6252 	 * Set up the transmit control register; we start out with
6253 	 * a collision distance suitable for FDX, but update it whe
6254 	 * we resolve the media type.
6255 	 */
6256 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
6257 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
6258 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
6259 	if (sc->sc_type >= WM_T_82571)
6260 		sc->sc_tctl |= TCTL_MULR;
6261 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
6262 
6263 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
6264 		/* Write TDT after TCTL.EN is set. See the document. */
6265 		CSR_WRITE(sc, WMREG_TDT(0), 0);
6266 	}
6267 
6268 	if (sc->sc_type == WM_T_80003) {
6269 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
6270 		reg &= ~TCTL_EXT_GCEX_MASK;
6271 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
6272 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
6273 	}
6274 
6275 	/* Set the media. */
6276 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
6277 		goto out;
6278 
6279 	/* Configure for OS presence */
6280 	wm_init_manageability(sc);
6281 
6282 	/*
6283 	 * Set up the receive control register; we actually program the
6284 	 * register when we set the receive filter. Use multicast address
6285 	 * offset type 0.
6286 	 *
6287 	 * Only the i82544 has the ability to strip the incoming CRC, so we
6288 	 * don't enable that feature.
6289 	 */
6290 	sc->sc_mchash_type = 0;
6291 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
6292 	    | __SHIFTIN(sc->sc_mchash_type, RCTL_MO);
6293 
6294 	/* 82574 use one buffer extended Rx descriptor. */
6295 	if (sc->sc_type == WM_T_82574)
6296 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
6297 
6298 	/*
6299 	 * The I350 has a bug where it always strips the CRC whether
6300 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
6301 	 */
6302 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
6303 	    || (sc->sc_type == WM_T_I210))
6304 		sc->sc_rctl |= RCTL_SECRC;
6305 
6306 	if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
6307 	    && (ifp->if_mtu > ETHERMTU)) {
6308 		sc->sc_rctl |= RCTL_LPE;
6309 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
6310 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
6311 	}
6312 
6313 	if (MCLBYTES == 2048)
6314 		sc->sc_rctl |= RCTL_2k;
6315 	else {
6316 		if (sc->sc_type >= WM_T_82543) {
6317 			switch (MCLBYTES) {
6318 			case 4096:
6319 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
6320 				break;
6321 			case 8192:
6322 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
6323 				break;
6324 			case 16384:
6325 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
6326 				break;
6327 			default:
6328 				panic("wm_init: MCLBYTES %d unsupported",
6329 				    MCLBYTES);
6330 				break;
6331 			}
6332 		} else
6333 			panic("wm_init: i82542 requires MCLBYTES = 2048");
6334 	}
6335 
6336 	/* Enable ECC */
6337 	switch (sc->sc_type) {
6338 	case WM_T_82571:
6339 		reg = CSR_READ(sc, WMREG_PBA_ECC);
6340 		reg |= PBA_ECC_CORR_EN;
6341 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
6342 		break;
6343 	case WM_T_PCH_LPT:
6344 	case WM_T_PCH_SPT:
6345 	case WM_T_PCH_CNP:
6346 		reg = CSR_READ(sc, WMREG_PBECCSTS);
6347 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
6348 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
6349 
6350 		sc->sc_ctrl |= CTRL_MEHE;
6351 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6352 		break;
6353 	default:
6354 		break;
6355 	}
6356 
6357 	/*
6358 	 * Set the receive filter.
6359 	 *
6360 	 * For 82575 and 82576, the RX descriptors must be initialized after
6361 	 * the setting of RCTL.EN in wm_set_filter()
6362 	 */
6363 	wm_set_filter(sc);
6364 
6365 	/* On 575 and later set RDT only if RX enabled */
6366 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
6367 		int qidx;
6368 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
6369 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
6370 			for (i = 0; i < WM_NRXDESC; i++) {
6371 				mutex_enter(rxq->rxq_lock);
6372 				wm_init_rxdesc(rxq, i);
6373 				mutex_exit(rxq->rxq_lock);
6374 
6375 			}
6376 		}
6377 	}
6378 
6379 	wm_unset_stopping_flags(sc);
6380 
6381 	/* Start the one second link check clock. */
6382 	callout_schedule(&sc->sc_tick_ch, hz);
6383 
6384 	/* ...all done! */
6385 	ifp->if_flags |= IFF_RUNNING;
6386 
6387  out:
6388 	/* Save last flags for the callback */
6389 	sc->sc_if_flags = ifp->if_flags;
6390 	sc->sc_ec_capenable = ec->ec_capenable;
6391 	if (error)
6392 		log(LOG_ERR, "%s: interface not running\n",
6393 		    device_xname(sc->sc_dev));
6394 	return error;
6395 }
6396 
6397 /*
6398  * wm_stop:		[ifnet interface function]
6399  *
6400  *	Stop transmission on the interface.
6401  */
6402 static void
6403 wm_stop(struct ifnet *ifp, int disable)
6404 {
6405 	struct wm_softc *sc = ifp->if_softc;
6406 
6407 	ASSERT_SLEEPABLE();
6408 
6409 	WM_CORE_LOCK(sc);
6410 	wm_stop_locked(ifp, disable ? true : false, true);
6411 	WM_CORE_UNLOCK(sc);
6412 
6413 	/*
6414 	 * After wm_set_stopping_flags(), it is guaranteed
6415 	 * wm_handle_queue_work() does not call workqueue_enqueue().
6416 	 * However, workqueue_wait() cannot call in wm_stop_locked()
6417 	 * because it can sleep...
6418 	 * so, call workqueue_wait() here.
6419 	 */
6420 	for (int i = 0; i < sc->sc_nqueues; i++)
6421 		workqueue_wait(sc->sc_queue_wq, &sc->sc_queue[i].wmq_cookie);
6422 }
6423 
6424 static void
6425 wm_stop_locked(struct ifnet *ifp, bool disable, bool wait)
6426 {
6427 	struct wm_softc *sc = ifp->if_softc;
6428 	struct wm_txsoft *txs;
6429 	int i, qidx;
6430 
6431 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
6432 		device_xname(sc->sc_dev), __func__));
6433 	KASSERT(WM_CORE_LOCKED(sc));
6434 
6435 	wm_set_stopping_flags(sc);
6436 
6437 	if (sc->sc_flags & WM_F_HAS_MII) {
6438 		/* Down the MII. */
6439 		mii_down(&sc->sc_mii);
6440 	} else {
6441 #if 0
6442 		/* Should we clear PHY's status properly? */
6443 		wm_reset(sc);
6444 #endif
6445 	}
6446 
6447 	/* Stop the transmit and receive processes. */
6448 	CSR_WRITE(sc, WMREG_TCTL, 0);
6449 	CSR_WRITE(sc, WMREG_RCTL, 0);
6450 	sc->sc_rctl &= ~RCTL_EN;
6451 
6452 	/*
6453 	 * Clear the interrupt mask to ensure the device cannot assert its
6454 	 * interrupt line.
6455 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
6456 	 * service any currently pending or shared interrupt.
6457 	 */
6458 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
6459 	sc->sc_icr = 0;
6460 	if (wm_is_using_msix(sc)) {
6461 		if (sc->sc_type != WM_T_82574) {
6462 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
6463 			CSR_WRITE(sc, WMREG_EIAC, 0);
6464 		} else
6465 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
6466 	}
6467 
6468 	/*
6469 	 * Stop callouts after interrupts are disabled; if we have
6470 	 * to wait for them, we will be releasing the CORE_LOCK
6471 	 * briefly, which will unblock interrupts on the current CPU.
6472 	 */
6473 
6474 	/* Stop the one second clock. */
6475 	if (wait)
6476 		callout_halt(&sc->sc_tick_ch, sc->sc_core_lock);
6477 	else
6478 		callout_stop(&sc->sc_tick_ch);
6479 
6480 	/* Stop the 82547 Tx FIFO stall check timer. */
6481 	if (sc->sc_type == WM_T_82547) {
6482 		if (wait)
6483 			callout_halt(&sc->sc_txfifo_ch, sc->sc_core_lock);
6484 		else
6485 			callout_stop(&sc->sc_txfifo_ch);
6486 	}
6487 
6488 	/* Release any queued transmit buffers. */
6489 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
6490 		struct wm_queue *wmq = &sc->sc_queue[qidx];
6491 		struct wm_txqueue *txq = &wmq->wmq_txq;
6492 		mutex_enter(txq->txq_lock);
6493 		txq->txq_sending = false; /* Ensure watchdog disabled */
6494 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
6495 			txs = &txq->txq_soft[i];
6496 			if (txs->txs_mbuf != NULL) {
6497 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
6498 				m_freem(txs->txs_mbuf);
6499 				txs->txs_mbuf = NULL;
6500 			}
6501 		}
6502 		mutex_exit(txq->txq_lock);
6503 	}
6504 
6505 	/* Mark the interface as down and cancel the watchdog timer. */
6506 	ifp->if_flags &= ~IFF_RUNNING;
6507 
6508 	if (disable) {
6509 		for (i = 0; i < sc->sc_nqueues; i++) {
6510 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
6511 			mutex_enter(rxq->rxq_lock);
6512 			wm_rxdrain(rxq);
6513 			mutex_exit(rxq->rxq_lock);
6514 		}
6515 	}
6516 
6517 #if 0 /* notyet */
6518 	if (sc->sc_type >= WM_T_82544)
6519 		CSR_WRITE(sc, WMREG_WUC, 0);
6520 #endif
6521 }
6522 
6523 static void
6524 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
6525 {
6526 	struct mbuf *m;
6527 	int i;
6528 
6529 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
6530 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
6531 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
6532 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
6533 		    m->m_data, m->m_len, m->m_flags);
6534 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
6535 	    i, i == 1 ? "" : "s");
6536 }
6537 
6538 /*
6539  * wm_82547_txfifo_stall:
6540  *
6541  *	Callout used to wait for the 82547 Tx FIFO to drain,
6542  *	reset the FIFO pointers, and restart packet transmission.
6543  */
6544 static void
6545 wm_82547_txfifo_stall(void *arg)
6546 {
6547 	struct wm_softc *sc = arg;
6548 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
6549 
6550 	mutex_enter(txq->txq_lock);
6551 
6552 	if (txq->txq_stopping)
6553 		goto out;
6554 
6555 	if (txq->txq_fifo_stall) {
6556 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
6557 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
6558 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
6559 			/*
6560 			 * Packets have drained.  Stop transmitter, reset
6561 			 * FIFO pointers, restart transmitter, and kick
6562 			 * the packet queue.
6563 			 */
6564 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
6565 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
6566 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
6567 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
6568 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
6569 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
6570 			CSR_WRITE(sc, WMREG_TCTL, tctl);
6571 			CSR_WRITE_FLUSH(sc);
6572 
6573 			txq->txq_fifo_head = 0;
6574 			txq->txq_fifo_stall = 0;
6575 			wm_start_locked(&sc->sc_ethercom.ec_if);
6576 		} else {
6577 			/*
6578 			 * Still waiting for packets to drain; try again in
6579 			 * another tick.
6580 			 */
6581 			callout_schedule(&sc->sc_txfifo_ch, 1);
6582 		}
6583 	}
6584 
6585 out:
6586 	mutex_exit(txq->txq_lock);
6587 }
6588 
6589 /*
6590  * wm_82547_txfifo_bugchk:
6591  *
6592  *	Check for bug condition in the 82547 Tx FIFO.  We need to
6593  *	prevent enqueueing a packet that would wrap around the end
6594  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
6595  *
6596  *	We do this by checking the amount of space before the end
6597  *	of the Tx FIFO buffer. If the packet will not fit, we "stall"
6598  *	the Tx FIFO, wait for all remaining packets to drain, reset
6599  *	the internal FIFO pointers to the beginning, and restart
6600  *	transmission on the interface.
6601  */
6602 #define	WM_FIFO_HDR		0x10
6603 #define	WM_82547_PAD_LEN	0x3e0
6604 static int
6605 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
6606 {
6607 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
6608 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
6609 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
6610 
6611 	/* Just return if already stalled. */
6612 	if (txq->txq_fifo_stall)
6613 		return 1;
6614 
6615 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
6616 		/* Stall only occurs in half-duplex mode. */
6617 		goto send_packet;
6618 	}
6619 
6620 	if (len >= WM_82547_PAD_LEN + space) {
6621 		txq->txq_fifo_stall = 1;
6622 		callout_schedule(&sc->sc_txfifo_ch, 1);
6623 		return 1;
6624 	}
6625 
6626  send_packet:
6627 	txq->txq_fifo_head += len;
6628 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
6629 		txq->txq_fifo_head -= txq->txq_fifo_size;
6630 
6631 	return 0;
6632 }
6633 
6634 static int
6635 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
6636 {
6637 	int error;
6638 
6639 	/*
6640 	 * Allocate the control data structures, and create and load the
6641 	 * DMA map for it.
6642 	 *
6643 	 * NOTE: All Tx descriptors must be in the same 4G segment of
6644 	 * memory.  So must Rx descriptors.  We simplify by allocating
6645 	 * both sets within the same 4G segment.
6646 	 */
6647 	if (sc->sc_type < WM_T_82544)
6648 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
6649 	else
6650 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
6651 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
6652 		txq->txq_descsize = sizeof(nq_txdesc_t);
6653 	else
6654 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
6655 
6656 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
6657 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
6658 		    1, &txq->txq_desc_rseg, 0)) != 0) {
6659 		aprint_error_dev(sc->sc_dev,
6660 		    "unable to allocate TX control data, error = %d\n",
6661 		    error);
6662 		goto fail_0;
6663 	}
6664 
6665 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
6666 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
6667 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
6668 		aprint_error_dev(sc->sc_dev,
6669 		    "unable to map TX control data, error = %d\n", error);
6670 		goto fail_1;
6671 	}
6672 
6673 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
6674 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
6675 		aprint_error_dev(sc->sc_dev,
6676 		    "unable to create TX control data DMA map, error = %d\n",
6677 		    error);
6678 		goto fail_2;
6679 	}
6680 
6681 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
6682 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
6683 		aprint_error_dev(sc->sc_dev,
6684 		    "unable to load TX control data DMA map, error = %d\n",
6685 		    error);
6686 		goto fail_3;
6687 	}
6688 
6689 	return 0;
6690 
6691  fail_3:
6692 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
6693  fail_2:
6694 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
6695 	    WM_TXDESCS_SIZE(txq));
6696  fail_1:
6697 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
6698  fail_0:
6699 	return error;
6700 }
6701 
6702 static void
6703 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
6704 {
6705 
6706 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
6707 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
6708 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
6709 	    WM_TXDESCS_SIZE(txq));
6710 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
6711 }
6712 
6713 static int
6714 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
6715 {
6716 	int error;
6717 	size_t rxq_descs_size;
6718 
6719 	/*
6720 	 * Allocate the control data structures, and create and load the
6721 	 * DMA map for it.
6722 	 *
6723 	 * NOTE: All Tx descriptors must be in the same 4G segment of
6724 	 * memory.  So must Rx descriptors.  We simplify by allocating
6725 	 * both sets within the same 4G segment.
6726 	 */
6727 	rxq->rxq_ndesc = WM_NRXDESC;
6728 	if (sc->sc_type == WM_T_82574)
6729 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
6730 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
6731 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
6732 	else
6733 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
6734 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
6735 
6736 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
6737 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
6738 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
6739 		aprint_error_dev(sc->sc_dev,
6740 		    "unable to allocate RX control data, error = %d\n",
6741 		    error);
6742 		goto fail_0;
6743 	}
6744 
6745 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
6746 		    rxq->rxq_desc_rseg, rxq_descs_size,
6747 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
6748 		aprint_error_dev(sc->sc_dev,
6749 		    "unable to map RX control data, error = %d\n", error);
6750 		goto fail_1;
6751 	}
6752 
6753 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
6754 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
6755 		aprint_error_dev(sc->sc_dev,
6756 		    "unable to create RX control data DMA map, error = %d\n",
6757 		    error);
6758 		goto fail_2;
6759 	}
6760 
6761 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
6762 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
6763 		aprint_error_dev(sc->sc_dev,
6764 		    "unable to load RX control data DMA map, error = %d\n",
6765 		    error);
6766 		goto fail_3;
6767 	}
6768 
6769 	return 0;
6770 
6771  fail_3:
6772 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
6773  fail_2:
6774 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
6775 	    rxq_descs_size);
6776  fail_1:
6777 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
6778  fail_0:
6779 	return error;
6780 }
6781 
6782 static void
6783 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
6784 {
6785 
6786 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
6787 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
6788 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
6789 	    rxq->rxq_descsize * rxq->rxq_ndesc);
6790 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
6791 }
6792 
6793 
6794 static int
6795 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
6796 {
6797 	int i, error;
6798 
6799 	/* Create the transmit buffer DMA maps. */
6800 	WM_TXQUEUELEN(txq) =
6801 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
6802 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
6803 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
6804 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
6805 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
6806 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
6807 			aprint_error_dev(sc->sc_dev,
6808 			    "unable to create Tx DMA map %d, error = %d\n",
6809 			    i, error);
6810 			goto fail;
6811 		}
6812 	}
6813 
6814 	return 0;
6815 
6816  fail:
6817 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
6818 		if (txq->txq_soft[i].txs_dmamap != NULL)
6819 			bus_dmamap_destroy(sc->sc_dmat,
6820 			    txq->txq_soft[i].txs_dmamap);
6821 	}
6822 	return error;
6823 }
6824 
6825 static void
6826 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
6827 {
6828 	int i;
6829 
6830 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
6831 		if (txq->txq_soft[i].txs_dmamap != NULL)
6832 			bus_dmamap_destroy(sc->sc_dmat,
6833 			    txq->txq_soft[i].txs_dmamap);
6834 	}
6835 }
6836 
6837 static int
6838 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
6839 {
6840 	int i, error;
6841 
6842 	/* Create the receive buffer DMA maps. */
6843 	for (i = 0; i < rxq->rxq_ndesc; i++) {
6844 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
6845 			    MCLBYTES, 0, 0,
6846 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
6847 			aprint_error_dev(sc->sc_dev,
6848 			    "unable to create Rx DMA map %d error = %d\n",
6849 			    i, error);
6850 			goto fail;
6851 		}
6852 		rxq->rxq_soft[i].rxs_mbuf = NULL;
6853 	}
6854 
6855 	return 0;
6856 
6857  fail:
6858 	for (i = 0; i < rxq->rxq_ndesc; i++) {
6859 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
6860 			bus_dmamap_destroy(sc->sc_dmat,
6861 			    rxq->rxq_soft[i].rxs_dmamap);
6862 	}
6863 	return error;
6864 }
6865 
6866 static void
6867 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
6868 {
6869 	int i;
6870 
6871 	for (i = 0; i < rxq->rxq_ndesc; i++) {
6872 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
6873 			bus_dmamap_destroy(sc->sc_dmat,
6874 			    rxq->rxq_soft[i].rxs_dmamap);
6875 	}
6876 }
6877 
6878 /*
6879  * wm_alloc_quques:
6880  *	Allocate {tx,rx}descs and {tx,rx} buffers
6881  */
6882 static int
6883 wm_alloc_txrx_queues(struct wm_softc *sc)
6884 {
6885 	int i, error, tx_done, rx_done;
6886 
6887 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
6888 	    KM_SLEEP);
6889 	if (sc->sc_queue == NULL) {
6890 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
6891 		error = ENOMEM;
6892 		goto fail_0;
6893 	}
6894 
6895 	/* For transmission */
6896 	error = 0;
6897 	tx_done = 0;
6898 	for (i = 0; i < sc->sc_nqueues; i++) {
6899 #ifdef WM_EVENT_COUNTERS
6900 		int j;
6901 		const char *xname;
6902 #endif
6903 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
6904 		txq->txq_sc = sc;
6905 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
6906 
6907 		error = wm_alloc_tx_descs(sc, txq);
6908 		if (error)
6909 			break;
6910 		error = wm_alloc_tx_buffer(sc, txq);
6911 		if (error) {
6912 			wm_free_tx_descs(sc, txq);
6913 			break;
6914 		}
6915 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
6916 		if (txq->txq_interq == NULL) {
6917 			wm_free_tx_descs(sc, txq);
6918 			wm_free_tx_buffer(sc, txq);
6919 			error = ENOMEM;
6920 			break;
6921 		}
6922 
6923 #ifdef WM_EVENT_COUNTERS
6924 		xname = device_xname(sc->sc_dev);
6925 
6926 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
6927 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
6928 		WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
6929 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
6930 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
6931 		WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
6932 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
6933 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
6934 		WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
6935 		WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
6936 		WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
6937 
6938 		for (j = 0; j < WM_NTXSEGS; j++) {
6939 			snprintf(txq->txq_txseg_evcnt_names[j],
6940 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
6941 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
6942 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
6943 		}
6944 
6945 		WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
6946 		WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
6947 		WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
6948 		WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
6949 		WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
6950 #endif /* WM_EVENT_COUNTERS */
6951 
6952 		tx_done++;
6953 	}
6954 	if (error)
6955 		goto fail_1;
6956 
6957 	/* For receive */
6958 	error = 0;
6959 	rx_done = 0;
6960 	for (i = 0; i < sc->sc_nqueues; i++) {
6961 #ifdef WM_EVENT_COUNTERS
6962 		const char *xname;
6963 #endif
6964 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
6965 		rxq->rxq_sc = sc;
6966 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
6967 
6968 		error = wm_alloc_rx_descs(sc, rxq);
6969 		if (error)
6970 			break;
6971 
6972 		error = wm_alloc_rx_buffer(sc, rxq);
6973 		if (error) {
6974 			wm_free_rx_descs(sc, rxq);
6975 			break;
6976 		}
6977 
6978 #ifdef WM_EVENT_COUNTERS
6979 		xname = device_xname(sc->sc_dev);
6980 
6981 		WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
6982 		WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
6983 
6984 		WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
6985 		WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
6986 #endif /* WM_EVENT_COUNTERS */
6987 
6988 		rx_done++;
6989 	}
6990 	if (error)
6991 		goto fail_2;
6992 
6993 	for (i = 0; i < sc->sc_nqueues; i++) {
6994 		char rndname[16];
6995 
6996 		snprintf(rndname, sizeof(rndname), "%sTXRX%d",
6997 		    device_xname(sc->sc_dev), i);
6998 		rnd_attach_source(&sc->sc_queue[i].rnd_source, rndname,
6999 		    RND_TYPE_NET, RND_FLAG_DEFAULT);
7000 	}
7001 
7002 	return 0;
7003 
7004  fail_2:
7005 	for (i = 0; i < rx_done; i++) {
7006 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
7007 		wm_free_rx_buffer(sc, rxq);
7008 		wm_free_rx_descs(sc, rxq);
7009 		if (rxq->rxq_lock)
7010 			mutex_obj_free(rxq->rxq_lock);
7011 	}
7012  fail_1:
7013 	for (i = 0; i < tx_done; i++) {
7014 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
7015 		pcq_destroy(txq->txq_interq);
7016 		wm_free_tx_buffer(sc, txq);
7017 		wm_free_tx_descs(sc, txq);
7018 		if (txq->txq_lock)
7019 			mutex_obj_free(txq->txq_lock);
7020 	}
7021 
7022 	kmem_free(sc->sc_queue,
7023 	    sizeof(struct wm_queue) * sc->sc_nqueues);
7024  fail_0:
7025 	return error;
7026 }
7027 
7028 /*
7029  * wm_free_quques:
7030  *	Free {tx,rx}descs and {tx,rx} buffers
7031  */
7032 static void
7033 wm_free_txrx_queues(struct wm_softc *sc)
7034 {
7035 	int i;
7036 
7037 	for (i = 0; i < sc->sc_nqueues; i++)
7038 		rnd_detach_source(&sc->sc_queue[i].rnd_source);
7039 
7040 	for (i = 0; i < sc->sc_nqueues; i++) {
7041 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
7042 
7043 #ifdef WM_EVENT_COUNTERS
7044 		WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
7045 		WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
7046 		WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
7047 		WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
7048 #endif /* WM_EVENT_COUNTERS */
7049 
7050 		wm_free_rx_buffer(sc, rxq);
7051 		wm_free_rx_descs(sc, rxq);
7052 		if (rxq->rxq_lock)
7053 			mutex_obj_free(rxq->rxq_lock);
7054 	}
7055 
7056 	for (i = 0; i < sc->sc_nqueues; i++) {
7057 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
7058 		struct mbuf *m;
7059 #ifdef WM_EVENT_COUNTERS
7060 		int j;
7061 
7062 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
7063 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
7064 		WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
7065 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
7066 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
7067 		WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
7068 		WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
7069 		WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
7070 		WM_Q_EVCNT_DETACH(txq, tso, txq, i);
7071 		WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
7072 		WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
7073 
7074 		for (j = 0; j < WM_NTXSEGS; j++)
7075 			evcnt_detach(&txq->txq_ev_txseg[j]);
7076 
7077 		WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
7078 		WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
7079 		WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
7080 		WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
7081 		WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
7082 #endif /* WM_EVENT_COUNTERS */
7083 
7084 		/* Drain txq_interq */
7085 		while ((m = pcq_get(txq->txq_interq)) != NULL)
7086 			m_freem(m);
7087 		pcq_destroy(txq->txq_interq);
7088 
7089 		wm_free_tx_buffer(sc, txq);
7090 		wm_free_tx_descs(sc, txq);
7091 		if (txq->txq_lock)
7092 			mutex_obj_free(txq->txq_lock);
7093 	}
7094 
7095 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
7096 }
7097 
7098 static void
7099 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
7100 {
7101 
7102 	KASSERT(mutex_owned(txq->txq_lock));
7103 
7104 	/* Initialize the transmit descriptor ring. */
7105 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
7106 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
7107 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
7108 	txq->txq_free = WM_NTXDESC(txq);
7109 	txq->txq_next = 0;
7110 }
7111 
7112 static void
7113 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
7114     struct wm_txqueue *txq)
7115 {
7116 
7117 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
7118 		device_xname(sc->sc_dev), __func__));
7119 	KASSERT(mutex_owned(txq->txq_lock));
7120 
7121 	if (sc->sc_type < WM_T_82543) {
7122 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
7123 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
7124 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
7125 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
7126 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
7127 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
7128 	} else {
7129 		int qid = wmq->wmq_id;
7130 
7131 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
7132 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
7133 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
7134 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
7135 
7136 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
7137 			/*
7138 			 * Don't write TDT before TCTL.EN is set.
7139 			 * See the document.
7140 			 */
7141 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
7142 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
7143 			    | TXDCTL_WTHRESH(0));
7144 		else {
7145 			/* XXX should update with AIM? */
7146 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
7147 			if (sc->sc_type >= WM_T_82540) {
7148 				/* Should be the same */
7149 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
7150 			}
7151 
7152 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
7153 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
7154 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
7155 		}
7156 	}
7157 }
7158 
7159 static void
7160 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
7161 {
7162 	int i;
7163 
7164 	KASSERT(mutex_owned(txq->txq_lock));
7165 
7166 	/* Initialize the transmit job descriptors. */
7167 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
7168 		txq->txq_soft[i].txs_mbuf = NULL;
7169 	txq->txq_sfree = WM_TXQUEUELEN(txq);
7170 	txq->txq_snext = 0;
7171 	txq->txq_sdirty = 0;
7172 }
7173 
7174 static void
7175 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
7176     struct wm_txqueue *txq)
7177 {
7178 
7179 	KASSERT(mutex_owned(txq->txq_lock));
7180 
7181 	/*
7182 	 * Set up some register offsets that are different between
7183 	 * the i82542 and the i82543 and later chips.
7184 	 */
7185 	if (sc->sc_type < WM_T_82543)
7186 		txq->txq_tdt_reg = WMREG_OLD_TDT;
7187 	else
7188 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
7189 
7190 	wm_init_tx_descs(sc, txq);
7191 	wm_init_tx_regs(sc, wmq, txq);
7192 	wm_init_tx_buffer(sc, txq);
7193 
7194 	txq->txq_flags = 0; /* Clear WM_TXQ_NO_SPACE */
7195 	txq->txq_sending = false;
7196 }
7197 
7198 static void
7199 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
7200     struct wm_rxqueue *rxq)
7201 {
7202 
7203 	KASSERT(mutex_owned(rxq->rxq_lock));
7204 
7205 	/*
7206 	 * Initialize the receive descriptor and receive job
7207 	 * descriptor rings.
7208 	 */
7209 	if (sc->sc_type < WM_T_82543) {
7210 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
7211 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
7212 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
7213 		    rxq->rxq_descsize * rxq->rxq_ndesc);
7214 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
7215 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
7216 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
7217 
7218 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
7219 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
7220 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
7221 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
7222 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
7223 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
7224 	} else {
7225 		int qid = wmq->wmq_id;
7226 
7227 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
7228 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
7229 		CSR_WRITE(sc, WMREG_RDLEN(qid),
7230 		    rxq->rxq_descsize * rxq->rxq_ndesc);
7231 
7232 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
7233 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
7234 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
7235 
7236 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
7237 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
7238 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
7239 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
7240 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
7241 			    | RXDCTL_WTHRESH(1));
7242 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
7243 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
7244 		} else {
7245 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
7246 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
7247 			/* XXX should update with AIM? */
7248 			CSR_WRITE(sc, WMREG_RDTR,
7249 			    (wmq->wmq_itr / 4) | RDTR_FPD);
7250 			/* MUST be same */
7251 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
7252 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
7253 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
7254 		}
7255 	}
7256 }
7257 
7258 static int
7259 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
7260 {
7261 	struct wm_rxsoft *rxs;
7262 	int error, i;
7263 
7264 	KASSERT(mutex_owned(rxq->rxq_lock));
7265 
7266 	for (i = 0; i < rxq->rxq_ndesc; i++) {
7267 		rxs = &rxq->rxq_soft[i];
7268 		if (rxs->rxs_mbuf == NULL) {
7269 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
7270 				log(LOG_ERR, "%s: unable to allocate or map "
7271 				    "rx buffer %d, error = %d\n",
7272 				    device_xname(sc->sc_dev), i, error);
7273 				/*
7274 				 * XXX Should attempt to run with fewer receive
7275 				 * XXX buffers instead of just failing.
7276 				 */
7277 				wm_rxdrain(rxq);
7278 				return ENOMEM;
7279 			}
7280 		} else {
7281 			/*
7282 			 * For 82575 and 82576, the RX descriptors must be
7283 			 * initialized after the setting of RCTL.EN in
7284 			 * wm_set_filter()
7285 			 */
7286 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
7287 				wm_init_rxdesc(rxq, i);
7288 		}
7289 	}
7290 	rxq->rxq_ptr = 0;
7291 	rxq->rxq_discard = 0;
7292 	WM_RXCHAIN_RESET(rxq);
7293 
7294 	return 0;
7295 }
7296 
7297 static int
7298 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
7299     struct wm_rxqueue *rxq)
7300 {
7301 
7302 	KASSERT(mutex_owned(rxq->rxq_lock));
7303 
7304 	/*
7305 	 * Set up some register offsets that are different between
7306 	 * the i82542 and the i82543 and later chips.
7307 	 */
7308 	if (sc->sc_type < WM_T_82543)
7309 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
7310 	else
7311 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
7312 
7313 	wm_init_rx_regs(sc, wmq, rxq);
7314 	return wm_init_rx_buffer(sc, rxq);
7315 }
7316 
7317 /*
7318  * wm_init_quques:
7319  *	Initialize {tx,rx}descs and {tx,rx} buffers
7320  */
7321 static int
7322 wm_init_txrx_queues(struct wm_softc *sc)
7323 {
7324 	int i, error = 0;
7325 
7326 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
7327 		device_xname(sc->sc_dev), __func__));
7328 
7329 	for (i = 0; i < sc->sc_nqueues; i++) {
7330 		struct wm_queue *wmq = &sc->sc_queue[i];
7331 		struct wm_txqueue *txq = &wmq->wmq_txq;
7332 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
7333 
7334 		/*
7335 		 * TODO
7336 		 * Currently, use constant variable instead of AIM.
7337 		 * Furthermore, the interrupt interval of multiqueue which use
7338 		 * polling mode is less than default value.
7339 		 * More tuning and AIM are required.
7340 		 */
7341 		if (wm_is_using_multiqueue(sc))
7342 			wmq->wmq_itr = 50;
7343 		else
7344 			wmq->wmq_itr = sc->sc_itr_init;
7345 		wmq->wmq_set_itr = true;
7346 
7347 		mutex_enter(txq->txq_lock);
7348 		wm_init_tx_queue(sc, wmq, txq);
7349 		mutex_exit(txq->txq_lock);
7350 
7351 		mutex_enter(rxq->rxq_lock);
7352 		error = wm_init_rx_queue(sc, wmq, rxq);
7353 		mutex_exit(rxq->rxq_lock);
7354 		if (error)
7355 			break;
7356 	}
7357 
7358 	return error;
7359 }
7360 
7361 /*
7362  * wm_tx_offload:
7363  *
7364  *	Set up TCP/IP checksumming parameters for the
7365  *	specified packet.
7366  */
7367 static int
7368 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
7369     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
7370 {
7371 	struct mbuf *m0 = txs->txs_mbuf;
7372 	struct livengood_tcpip_ctxdesc *t;
7373 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
7374 	uint32_t ipcse;
7375 	struct ether_header *eh;
7376 	int offset, iphl;
7377 	uint8_t fields;
7378 
7379 	/*
7380 	 * XXX It would be nice if the mbuf pkthdr had offset
7381 	 * fields for the protocol headers.
7382 	 */
7383 
7384 	eh = mtod(m0, struct ether_header *);
7385 	switch (htons(eh->ether_type)) {
7386 	case ETHERTYPE_IP:
7387 	case ETHERTYPE_IPV6:
7388 		offset = ETHER_HDR_LEN;
7389 		break;
7390 
7391 	case ETHERTYPE_VLAN:
7392 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
7393 		break;
7394 
7395 	default:
7396 		/* Don't support this protocol or encapsulation. */
7397 		*fieldsp = 0;
7398 		*cmdp = 0;
7399 		return 0;
7400 	}
7401 
7402 	if ((m0->m_pkthdr.csum_flags &
7403 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
7404 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
7405 	} else
7406 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
7407 
7408 	ipcse = offset + iphl - 1;
7409 
7410 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
7411 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
7412 	seg = 0;
7413 	fields = 0;
7414 
7415 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
7416 		int hlen = offset + iphl;
7417 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
7418 
7419 		if (__predict_false(m0->m_len <
7420 				    (hlen + sizeof(struct tcphdr)))) {
7421 			/*
7422 			 * TCP/IP headers are not in the first mbuf; we need
7423 			 * to do this the slow and painful way. Let's just
7424 			 * hope this doesn't happen very often.
7425 			 */
7426 			struct tcphdr th;
7427 
7428 			WM_Q_EVCNT_INCR(txq, tsopain);
7429 
7430 			m_copydata(m0, hlen, sizeof(th), &th);
7431 			if (v4) {
7432 				struct ip ip;
7433 
7434 				m_copydata(m0, offset, sizeof(ip), &ip);
7435 				ip.ip_len = 0;
7436 				m_copyback(m0,
7437 				    offset + offsetof(struct ip, ip_len),
7438 				    sizeof(ip.ip_len), &ip.ip_len);
7439 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
7440 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
7441 			} else {
7442 				struct ip6_hdr ip6;
7443 
7444 				m_copydata(m0, offset, sizeof(ip6), &ip6);
7445 				ip6.ip6_plen = 0;
7446 				m_copyback(m0,
7447 				    offset + offsetof(struct ip6_hdr, ip6_plen),
7448 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
7449 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
7450 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
7451 			}
7452 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
7453 			    sizeof(th.th_sum), &th.th_sum);
7454 
7455 			hlen += th.th_off << 2;
7456 		} else {
7457 			/*
7458 			 * TCP/IP headers are in the first mbuf; we can do
7459 			 * this the easy way.
7460 			 */
7461 			struct tcphdr *th;
7462 
7463 			if (v4) {
7464 				struct ip *ip =
7465 				    (void *)(mtod(m0, char *) + offset);
7466 				th = (void *)(mtod(m0, char *) + hlen);
7467 
7468 				ip->ip_len = 0;
7469 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
7470 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
7471 			} else {
7472 				struct ip6_hdr *ip6 =
7473 				    (void *)(mtod(m0, char *) + offset);
7474 				th = (void *)(mtod(m0, char *) + hlen);
7475 
7476 				ip6->ip6_plen = 0;
7477 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
7478 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
7479 			}
7480 			hlen += th->th_off << 2;
7481 		}
7482 
7483 		if (v4) {
7484 			WM_Q_EVCNT_INCR(txq, tso);
7485 			cmdlen |= WTX_TCPIP_CMD_IP;
7486 		} else {
7487 			WM_Q_EVCNT_INCR(txq, tso6);
7488 			ipcse = 0;
7489 		}
7490 		cmd |= WTX_TCPIP_CMD_TSE;
7491 		cmdlen |= WTX_TCPIP_CMD_TSE |
7492 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
7493 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
7494 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
7495 	}
7496 
7497 	/*
7498 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
7499 	 * offload feature, if we load the context descriptor, we
7500 	 * MUST provide valid values for IPCSS and TUCSS fields.
7501 	 */
7502 
7503 	ipcs = WTX_TCPIP_IPCSS(offset) |
7504 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
7505 	    WTX_TCPIP_IPCSE(ipcse);
7506 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
7507 		WM_Q_EVCNT_INCR(txq, ipsum);
7508 		fields |= WTX_IXSM;
7509 	}
7510 
7511 	offset += iphl;
7512 
7513 	if (m0->m_pkthdr.csum_flags &
7514 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
7515 		WM_Q_EVCNT_INCR(txq, tusum);
7516 		fields |= WTX_TXSM;
7517 		tucs = WTX_TCPIP_TUCSS(offset) |
7518 		    WTX_TCPIP_TUCSO(offset +
7519 			M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
7520 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
7521 	} else if ((m0->m_pkthdr.csum_flags &
7522 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
7523 		WM_Q_EVCNT_INCR(txq, tusum6);
7524 		fields |= WTX_TXSM;
7525 		tucs = WTX_TCPIP_TUCSS(offset) |
7526 		    WTX_TCPIP_TUCSO(offset +
7527 			M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
7528 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
7529 	} else {
7530 		/* Just initialize it to a valid TCP context. */
7531 		tucs = WTX_TCPIP_TUCSS(offset) |
7532 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
7533 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
7534 	}
7535 
7536 	/*
7537 	 * We don't have to write context descriptor for every packet
7538 	 * except for 82574. For 82574, we must write context descriptor
7539 	 * for every packet when we use two descriptor queues.
7540 	 * It would be overhead to write context descriptor for every packet,
7541 	 * however it does not cause problems.
7542 	 */
7543 	/* Fill in the context descriptor. */
7544 	t = (struct livengood_tcpip_ctxdesc *)
7545 	    &txq->txq_descs[txq->txq_next];
7546 	t->tcpip_ipcs = htole32(ipcs);
7547 	t->tcpip_tucs = htole32(tucs);
7548 	t->tcpip_cmdlen = htole32(cmdlen);
7549 	t->tcpip_seg = htole32(seg);
7550 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
7551 
7552 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
7553 	txs->txs_ndesc++;
7554 
7555 	*cmdp = cmd;
7556 	*fieldsp = fields;
7557 
7558 	return 0;
7559 }
7560 
7561 static inline int
7562 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
7563 {
7564 	struct wm_softc *sc = ifp->if_softc;
7565 	u_int cpuid = cpu_index(curcpu());
7566 
7567 	/*
7568 	 * Currently, simple distribute strategy.
7569 	 * TODO:
7570 	 * distribute by flowid(RSS has value).
7571 	 */
7572 	return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues;
7573 }
7574 
7575 /*
7576  * wm_start:		[ifnet interface function]
7577  *
7578  *	Start packet transmission on the interface.
7579  */
7580 static void
7581 wm_start(struct ifnet *ifp)
7582 {
7583 	struct wm_softc *sc = ifp->if_softc;
7584 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
7585 
7586 #ifdef WM_MPSAFE
7587 	KASSERT(if_is_mpsafe(ifp));
7588 #endif
7589 	/*
7590 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
7591 	 */
7592 
7593 	mutex_enter(txq->txq_lock);
7594 	if (!txq->txq_stopping)
7595 		wm_start_locked(ifp);
7596 	mutex_exit(txq->txq_lock);
7597 }
7598 
7599 static void
7600 wm_start_locked(struct ifnet *ifp)
7601 {
7602 	struct wm_softc *sc = ifp->if_softc;
7603 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
7604 
7605 	wm_send_common_locked(ifp, txq, false);
7606 }
7607 
7608 static int
7609 wm_transmit(struct ifnet *ifp, struct mbuf *m)
7610 {
7611 	int qid;
7612 	struct wm_softc *sc = ifp->if_softc;
7613 	struct wm_txqueue *txq;
7614 
7615 	qid = wm_select_txqueue(ifp, m);
7616 	txq = &sc->sc_queue[qid].wmq_txq;
7617 
7618 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
7619 		m_freem(m);
7620 		WM_Q_EVCNT_INCR(txq, pcqdrop);
7621 		return ENOBUFS;
7622 	}
7623 
7624 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
7625 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
7626 	if (m->m_flags & M_MCAST)
7627 		if_statinc_ref(nsr, if_omcasts);
7628 	IF_STAT_PUTREF(ifp);
7629 
7630 	if (mutex_tryenter(txq->txq_lock)) {
7631 		if (!txq->txq_stopping)
7632 			wm_transmit_locked(ifp, txq);
7633 		mutex_exit(txq->txq_lock);
7634 	}
7635 
7636 	return 0;
7637 }
7638 
7639 static void
7640 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
7641 {
7642 
7643 	wm_send_common_locked(ifp, txq, true);
7644 }
7645 
7646 static void
7647 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
7648     bool is_transmit)
7649 {
7650 	struct wm_softc *sc = ifp->if_softc;
7651 	struct mbuf *m0;
7652 	struct wm_txsoft *txs;
7653 	bus_dmamap_t dmamap;
7654 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
7655 	bus_addr_t curaddr;
7656 	bus_size_t seglen, curlen;
7657 	uint32_t cksumcmd;
7658 	uint8_t cksumfields;
7659 	bool remap = true;
7660 
7661 	KASSERT(mutex_owned(txq->txq_lock));
7662 
7663 	if ((ifp->if_flags & IFF_RUNNING) == 0)
7664 		return;
7665 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
7666 		return;
7667 
7668 	/* Remember the previous number of free descriptors. */
7669 	ofree = txq->txq_free;
7670 
7671 	/*
7672 	 * Loop through the send queue, setting up transmit descriptors
7673 	 * until we drain the queue, or use up all available transmit
7674 	 * descriptors.
7675 	 */
7676 	for (;;) {
7677 		m0 = NULL;
7678 
7679 		/* Get a work queue entry. */
7680 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
7681 			wm_txeof(txq, UINT_MAX);
7682 			if (txq->txq_sfree == 0) {
7683 				DPRINTF(WM_DEBUG_TX,
7684 				    ("%s: TX: no free job descriptors\n",
7685 					device_xname(sc->sc_dev)));
7686 				WM_Q_EVCNT_INCR(txq, txsstall);
7687 				break;
7688 			}
7689 		}
7690 
7691 		/* Grab a packet off the queue. */
7692 		if (is_transmit)
7693 			m0 = pcq_get(txq->txq_interq);
7694 		else
7695 			IFQ_DEQUEUE(&ifp->if_snd, m0);
7696 		if (m0 == NULL)
7697 			break;
7698 
7699 		DPRINTF(WM_DEBUG_TX,
7700 		    ("%s: TX: have packet to transmit: %p\n",
7701 			device_xname(sc->sc_dev), m0));
7702 
7703 		txs = &txq->txq_soft[txq->txq_snext];
7704 		dmamap = txs->txs_dmamap;
7705 
7706 		use_tso = (m0->m_pkthdr.csum_flags &
7707 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
7708 
7709 		/*
7710 		 * So says the Linux driver:
7711 		 * The controller does a simple calculation to make sure
7712 		 * there is enough room in the FIFO before initiating the
7713 		 * DMA for each buffer. The calc is:
7714 		 *	4 = ceil(buffer len / MSS)
7715 		 * To make sure we don't overrun the FIFO, adjust the max
7716 		 * buffer len if the MSS drops.
7717 		 */
7718 		dmamap->dm_maxsegsz =
7719 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
7720 		    ? m0->m_pkthdr.segsz << 2
7721 		    : WTX_MAX_LEN;
7722 
7723 		/*
7724 		 * Load the DMA map.  If this fails, the packet either
7725 		 * didn't fit in the allotted number of segments, or we
7726 		 * were short on resources.  For the too-many-segments
7727 		 * case, we simply report an error and drop the packet,
7728 		 * since we can't sanely copy a jumbo packet to a single
7729 		 * buffer.
7730 		 */
7731 retry:
7732 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
7733 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
7734 		if (__predict_false(error)) {
7735 			if (error == EFBIG) {
7736 				if (remap == true) {
7737 					struct mbuf *m;
7738 
7739 					remap = false;
7740 					m = m_defrag(m0, M_NOWAIT);
7741 					if (m != NULL) {
7742 						WM_Q_EVCNT_INCR(txq, defrag);
7743 						m0 = m;
7744 						goto retry;
7745 					}
7746 				}
7747 				WM_Q_EVCNT_INCR(txq, toomanyseg);
7748 				log(LOG_ERR, "%s: Tx packet consumes too many "
7749 				    "DMA segments, dropping...\n",
7750 				    device_xname(sc->sc_dev));
7751 				wm_dump_mbuf_chain(sc, m0);
7752 				m_freem(m0);
7753 				continue;
7754 			}
7755 			/* Short on resources, just stop for now. */
7756 			DPRINTF(WM_DEBUG_TX,
7757 			    ("%s: TX: dmamap load failed: %d\n",
7758 				device_xname(sc->sc_dev), error));
7759 			break;
7760 		}
7761 
7762 		segs_needed = dmamap->dm_nsegs;
7763 		if (use_tso) {
7764 			/* For sentinel descriptor; see below. */
7765 			segs_needed++;
7766 		}
7767 
7768 		/*
7769 		 * Ensure we have enough descriptors free to describe
7770 		 * the packet. Note, we always reserve one descriptor
7771 		 * at the end of the ring due to the semantics of the
7772 		 * TDT register, plus one more in the event we need
7773 		 * to load offload context.
7774 		 */
7775 		if (segs_needed > txq->txq_free - 2) {
7776 			/*
7777 			 * Not enough free descriptors to transmit this
7778 			 * packet.  We haven't committed anything yet,
7779 			 * so just unload the DMA map, put the packet
7780 			 * pack on the queue, and punt. Notify the upper
7781 			 * layer that there are no more slots left.
7782 			 */
7783 			DPRINTF(WM_DEBUG_TX,
7784 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
7785 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
7786 				segs_needed, txq->txq_free - 1));
7787 			txq->txq_flags |= WM_TXQ_NO_SPACE;
7788 			bus_dmamap_unload(sc->sc_dmat, dmamap);
7789 			WM_Q_EVCNT_INCR(txq, txdstall);
7790 			break;
7791 		}
7792 
7793 		/*
7794 		 * Check for 82547 Tx FIFO bug. We need to do this
7795 		 * once we know we can transmit the packet, since we
7796 		 * do some internal FIFO space accounting here.
7797 		 */
7798 		if (sc->sc_type == WM_T_82547 &&
7799 		    wm_82547_txfifo_bugchk(sc, m0)) {
7800 			DPRINTF(WM_DEBUG_TX,
7801 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
7802 				device_xname(sc->sc_dev)));
7803 			txq->txq_flags |= WM_TXQ_NO_SPACE;
7804 			bus_dmamap_unload(sc->sc_dmat, dmamap);
7805 			WM_Q_EVCNT_INCR(txq, fifo_stall);
7806 			break;
7807 		}
7808 
7809 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
7810 
7811 		DPRINTF(WM_DEBUG_TX,
7812 		    ("%s: TX: packet has %d (%d) DMA segments\n",
7813 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
7814 
7815 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
7816 
7817 		/*
7818 		 * Store a pointer to the packet so that we can free it
7819 		 * later.
7820 		 *
7821 		 * Initially, we consider the number of descriptors the
7822 		 * packet uses the number of DMA segments.  This may be
7823 		 * incremented by 1 if we do checksum offload (a descriptor
7824 		 * is used to set the checksum context).
7825 		 */
7826 		txs->txs_mbuf = m0;
7827 		txs->txs_firstdesc = txq->txq_next;
7828 		txs->txs_ndesc = segs_needed;
7829 
7830 		/* Set up offload parameters for this packet. */
7831 		if (m0->m_pkthdr.csum_flags &
7832 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
7833 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
7834 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
7835 			if (wm_tx_offload(sc, txq, txs, &cksumcmd,
7836 					  &cksumfields) != 0) {
7837 				/* Error message already displayed. */
7838 				bus_dmamap_unload(sc->sc_dmat, dmamap);
7839 				continue;
7840 			}
7841 		} else {
7842 			cksumcmd = 0;
7843 			cksumfields = 0;
7844 		}
7845 
7846 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
7847 
7848 		/* Sync the DMA map. */
7849 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
7850 		    BUS_DMASYNC_PREWRITE);
7851 
7852 		/* Initialize the transmit descriptor. */
7853 		for (nexttx = txq->txq_next, seg = 0;
7854 		     seg < dmamap->dm_nsegs; seg++) {
7855 			for (seglen = dmamap->dm_segs[seg].ds_len,
7856 			     curaddr = dmamap->dm_segs[seg].ds_addr;
7857 			     seglen != 0;
7858 			     curaddr += curlen, seglen -= curlen,
7859 			     nexttx = WM_NEXTTX(txq, nexttx)) {
7860 				curlen = seglen;
7861 
7862 				/*
7863 				 * So says the Linux driver:
7864 				 * Work around for premature descriptor
7865 				 * write-backs in TSO mode.  Append a
7866 				 * 4-byte sentinel descriptor.
7867 				 */
7868 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
7869 				    curlen > 8)
7870 					curlen -= 4;
7871 
7872 				wm_set_dma_addr(
7873 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
7874 				txq->txq_descs[nexttx].wtx_cmdlen
7875 				    = htole32(cksumcmd | curlen);
7876 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
7877 				    = 0;
7878 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
7879 				    = cksumfields;
7880 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
7881 				lasttx = nexttx;
7882 
7883 				DPRINTF(WM_DEBUG_TX,
7884 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
7885 					"len %#04zx\n",
7886 					device_xname(sc->sc_dev), nexttx,
7887 					(uint64_t)curaddr, curlen));
7888 			}
7889 		}
7890 
7891 		KASSERT(lasttx != -1);
7892 
7893 		/*
7894 		 * Set up the command byte on the last descriptor of
7895 		 * the packet. If we're in the interrupt delay window,
7896 		 * delay the interrupt.
7897 		 */
7898 		txq->txq_descs[lasttx].wtx_cmdlen |=
7899 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
7900 
7901 		/*
7902 		 * If VLANs are enabled and the packet has a VLAN tag, set
7903 		 * up the descriptor to encapsulate the packet for us.
7904 		 *
7905 		 * This is only valid on the last descriptor of the packet.
7906 		 */
7907 		if (vlan_has_tag(m0)) {
7908 			txq->txq_descs[lasttx].wtx_cmdlen |=
7909 			    htole32(WTX_CMD_VLE);
7910 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
7911 			    = htole16(vlan_get_tag(m0));
7912 		}
7913 
7914 		txs->txs_lastdesc = lasttx;
7915 
7916 		DPRINTF(WM_DEBUG_TX,
7917 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
7918 			device_xname(sc->sc_dev),
7919 			lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
7920 
7921 		/* Sync the descriptors we're using. */
7922 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
7923 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
7924 
7925 		/* Give the packet to the chip. */
7926 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
7927 
7928 		DPRINTF(WM_DEBUG_TX,
7929 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
7930 
7931 		DPRINTF(WM_DEBUG_TX,
7932 		    ("%s: TX: finished transmitting packet, job %d\n",
7933 			device_xname(sc->sc_dev), txq->txq_snext));
7934 
7935 		/* Advance the tx pointer. */
7936 		txq->txq_free -= txs->txs_ndesc;
7937 		txq->txq_next = nexttx;
7938 
7939 		txq->txq_sfree--;
7940 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
7941 
7942 		/* Pass the packet to any BPF listeners. */
7943 		bpf_mtap(ifp, m0, BPF_D_OUT);
7944 	}
7945 
7946 	if (m0 != NULL) {
7947 		txq->txq_flags |= WM_TXQ_NO_SPACE;
7948 		WM_Q_EVCNT_INCR(txq, descdrop);
7949 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
7950 			__func__));
7951 		m_freem(m0);
7952 	}
7953 
7954 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
7955 		/* No more slots; notify upper layer. */
7956 		txq->txq_flags |= WM_TXQ_NO_SPACE;
7957 	}
7958 
7959 	if (txq->txq_free != ofree) {
7960 		/* Set a watchdog timer in case the chip flakes out. */
7961 		txq->txq_lastsent = time_uptime;
7962 		txq->txq_sending = true;
7963 	}
7964 }
7965 
7966 /*
7967  * wm_nq_tx_offload:
7968  *
7969  *	Set up TCP/IP checksumming parameters for the
7970  *	specified packet, for NEWQUEUE devices
7971  */
7972 static int
7973 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
7974     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
7975 {
7976 	struct mbuf *m0 = txs->txs_mbuf;
7977 	uint32_t vl_len, mssidx, cmdc;
7978 	struct ether_header *eh;
7979 	int offset, iphl;
7980 
7981 	/*
7982 	 * XXX It would be nice if the mbuf pkthdr had offset
7983 	 * fields for the protocol headers.
7984 	 */
7985 	*cmdlenp = 0;
7986 	*fieldsp = 0;
7987 
7988 	eh = mtod(m0, struct ether_header *);
7989 	switch (htons(eh->ether_type)) {
7990 	case ETHERTYPE_IP:
7991 	case ETHERTYPE_IPV6:
7992 		offset = ETHER_HDR_LEN;
7993 		break;
7994 
7995 	case ETHERTYPE_VLAN:
7996 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
7997 		break;
7998 
7999 	default:
8000 		/* Don't support this protocol or encapsulation. */
8001 		*do_csum = false;
8002 		return 0;
8003 	}
8004 	*do_csum = true;
8005 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
8006 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
8007 
8008 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
8009 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
8010 
8011 	if ((m0->m_pkthdr.csum_flags &
8012 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
8013 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
8014 	} else {
8015 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
8016 	}
8017 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
8018 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
8019 
8020 	if (vlan_has_tag(m0)) {
8021 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
8022 		    << NQTXC_VLLEN_VLAN_SHIFT);
8023 		*cmdlenp |= NQTX_CMD_VLE;
8024 	}
8025 
8026 	mssidx = 0;
8027 
8028 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
8029 		int hlen = offset + iphl;
8030 		int tcp_hlen;
8031 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
8032 
8033 		if (__predict_false(m0->m_len <
8034 				    (hlen + sizeof(struct tcphdr)))) {
8035 			/*
8036 			 * TCP/IP headers are not in the first mbuf; we need
8037 			 * to do this the slow and painful way. Let's just
8038 			 * hope this doesn't happen very often.
8039 			 */
8040 			struct tcphdr th;
8041 
8042 			WM_Q_EVCNT_INCR(txq, tsopain);
8043 
8044 			m_copydata(m0, hlen, sizeof(th), &th);
8045 			if (v4) {
8046 				struct ip ip;
8047 
8048 				m_copydata(m0, offset, sizeof(ip), &ip);
8049 				ip.ip_len = 0;
8050 				m_copyback(m0,
8051 				    offset + offsetof(struct ip, ip_len),
8052 				    sizeof(ip.ip_len), &ip.ip_len);
8053 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
8054 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
8055 			} else {
8056 				struct ip6_hdr ip6;
8057 
8058 				m_copydata(m0, offset, sizeof(ip6), &ip6);
8059 				ip6.ip6_plen = 0;
8060 				m_copyback(m0,
8061 				    offset + offsetof(struct ip6_hdr, ip6_plen),
8062 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
8063 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
8064 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
8065 			}
8066 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
8067 			    sizeof(th.th_sum), &th.th_sum);
8068 
8069 			tcp_hlen = th.th_off << 2;
8070 		} else {
8071 			/*
8072 			 * TCP/IP headers are in the first mbuf; we can do
8073 			 * this the easy way.
8074 			 */
8075 			struct tcphdr *th;
8076 
8077 			if (v4) {
8078 				struct ip *ip =
8079 				    (void *)(mtod(m0, char *) + offset);
8080 				th = (void *)(mtod(m0, char *) + hlen);
8081 
8082 				ip->ip_len = 0;
8083 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
8084 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
8085 			} else {
8086 				struct ip6_hdr *ip6 =
8087 				    (void *)(mtod(m0, char *) + offset);
8088 				th = (void *)(mtod(m0, char *) + hlen);
8089 
8090 				ip6->ip6_plen = 0;
8091 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
8092 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
8093 			}
8094 			tcp_hlen = th->th_off << 2;
8095 		}
8096 		hlen += tcp_hlen;
8097 		*cmdlenp |= NQTX_CMD_TSE;
8098 
8099 		if (v4) {
8100 			WM_Q_EVCNT_INCR(txq, tso);
8101 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
8102 		} else {
8103 			WM_Q_EVCNT_INCR(txq, tso6);
8104 			*fieldsp |= NQTXD_FIELDS_TUXSM;
8105 		}
8106 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
8107 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
8108 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
8109 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
8110 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
8111 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
8112 	} else {
8113 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
8114 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
8115 	}
8116 
8117 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
8118 		*fieldsp |= NQTXD_FIELDS_IXSM;
8119 		cmdc |= NQTXC_CMD_IP4;
8120 	}
8121 
8122 	if (m0->m_pkthdr.csum_flags &
8123 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
8124 		WM_Q_EVCNT_INCR(txq, tusum);
8125 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
8126 			cmdc |= NQTXC_CMD_TCP;
8127 		else
8128 			cmdc |= NQTXC_CMD_UDP;
8129 
8130 		cmdc |= NQTXC_CMD_IP4;
8131 		*fieldsp |= NQTXD_FIELDS_TUXSM;
8132 	}
8133 	if (m0->m_pkthdr.csum_flags &
8134 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
8135 		WM_Q_EVCNT_INCR(txq, tusum6);
8136 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
8137 			cmdc |= NQTXC_CMD_TCP;
8138 		else
8139 			cmdc |= NQTXC_CMD_UDP;
8140 
8141 		cmdc |= NQTXC_CMD_IP6;
8142 		*fieldsp |= NQTXD_FIELDS_TUXSM;
8143 	}
8144 
8145 	/*
8146 	 * We don't have to write context descriptor for every packet to
8147 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
8148 	 * I210 and I211. It is enough to write once per a Tx queue for these
8149 	 * controllers.
8150 	 * It would be overhead to write context descriptor for every packet,
8151 	 * however it does not cause problems.
8152 	 */
8153 	/* Fill in the context descriptor. */
8154 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
8155 	    htole32(vl_len);
8156 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
8157 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
8158 	    htole32(cmdc);
8159 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
8160 	    htole32(mssidx);
8161 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
8162 	DPRINTF(WM_DEBUG_TX,
8163 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
8164 		txq->txq_next, 0, vl_len));
8165 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
8166 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
8167 	txs->txs_ndesc++;
8168 	return 0;
8169 }
8170 
8171 /*
8172  * wm_nq_start:		[ifnet interface function]
8173  *
8174  *	Start packet transmission on the interface for NEWQUEUE devices
8175  */
8176 static void
8177 wm_nq_start(struct ifnet *ifp)
8178 {
8179 	struct wm_softc *sc = ifp->if_softc;
8180 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
8181 
8182 #ifdef WM_MPSAFE
8183 	KASSERT(if_is_mpsafe(ifp));
8184 #endif
8185 	/*
8186 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
8187 	 */
8188 
8189 	mutex_enter(txq->txq_lock);
8190 	if (!txq->txq_stopping)
8191 		wm_nq_start_locked(ifp);
8192 	mutex_exit(txq->txq_lock);
8193 }
8194 
8195 static void
8196 wm_nq_start_locked(struct ifnet *ifp)
8197 {
8198 	struct wm_softc *sc = ifp->if_softc;
8199 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
8200 
8201 	wm_nq_send_common_locked(ifp, txq, false);
8202 }
8203 
8204 static int
8205 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
8206 {
8207 	int qid;
8208 	struct wm_softc *sc = ifp->if_softc;
8209 	struct wm_txqueue *txq;
8210 
8211 	qid = wm_select_txqueue(ifp, m);
8212 	txq = &sc->sc_queue[qid].wmq_txq;
8213 
8214 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
8215 		m_freem(m);
8216 		WM_Q_EVCNT_INCR(txq, pcqdrop);
8217 		return ENOBUFS;
8218 	}
8219 
8220 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
8221 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
8222 	if (m->m_flags & M_MCAST)
8223 		if_statinc_ref(nsr, if_omcasts);
8224 	IF_STAT_PUTREF(ifp);
8225 
8226 	/*
8227 	 * The situations which this mutex_tryenter() fails at running time
8228 	 * are below two patterns.
8229 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
8230 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
8231 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
8232 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
8233 	 * In the case of (2), the last packet enqueued to txq->txq_interq is
8234 	 * also dequeued by wm_deferred_start_locked(). So, it does not get
8235 	 * stuck, either.
8236 	 */
8237 	if (mutex_tryenter(txq->txq_lock)) {
8238 		if (!txq->txq_stopping)
8239 			wm_nq_transmit_locked(ifp, txq);
8240 		mutex_exit(txq->txq_lock);
8241 	}
8242 
8243 	return 0;
8244 }
8245 
8246 static void
8247 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
8248 {
8249 
8250 	wm_nq_send_common_locked(ifp, txq, true);
8251 }
8252 
8253 static void
8254 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
8255     bool is_transmit)
8256 {
8257 	struct wm_softc *sc = ifp->if_softc;
8258 	struct mbuf *m0;
8259 	struct wm_txsoft *txs;
8260 	bus_dmamap_t dmamap;
8261 	int error, nexttx, lasttx = -1, seg, segs_needed;
8262 	bool do_csum, sent;
8263 	bool remap = true;
8264 
8265 	KASSERT(mutex_owned(txq->txq_lock));
8266 
8267 	if ((ifp->if_flags & IFF_RUNNING) == 0)
8268 		return;
8269 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
8270 		return;
8271 
8272 	sent = false;
8273 
8274 	/*
8275 	 * Loop through the send queue, setting up transmit descriptors
8276 	 * until we drain the queue, or use up all available transmit
8277 	 * descriptors.
8278 	 */
8279 	for (;;) {
8280 		m0 = NULL;
8281 
8282 		/* Get a work queue entry. */
8283 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
8284 			wm_txeof(txq, UINT_MAX);
8285 			if (txq->txq_sfree == 0) {
8286 				DPRINTF(WM_DEBUG_TX,
8287 				    ("%s: TX: no free job descriptors\n",
8288 					device_xname(sc->sc_dev)));
8289 				WM_Q_EVCNT_INCR(txq, txsstall);
8290 				break;
8291 			}
8292 		}
8293 
8294 		/* Grab a packet off the queue. */
8295 		if (is_transmit)
8296 			m0 = pcq_get(txq->txq_interq);
8297 		else
8298 			IFQ_DEQUEUE(&ifp->if_snd, m0);
8299 		if (m0 == NULL)
8300 			break;
8301 
8302 		DPRINTF(WM_DEBUG_TX,
8303 		    ("%s: TX: have packet to transmit: %p\n",
8304 		    device_xname(sc->sc_dev), m0));
8305 
8306 		txs = &txq->txq_soft[txq->txq_snext];
8307 		dmamap = txs->txs_dmamap;
8308 
8309 		/*
8310 		 * Load the DMA map.  If this fails, the packet either
8311 		 * didn't fit in the allotted number of segments, or we
8312 		 * were short on resources.  For the too-many-segments
8313 		 * case, we simply report an error and drop the packet,
8314 		 * since we can't sanely copy a jumbo packet to a single
8315 		 * buffer.
8316 		 */
8317 retry:
8318 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
8319 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
8320 		if (__predict_false(error)) {
8321 			if (error == EFBIG) {
8322 				if (remap == true) {
8323 					struct mbuf *m;
8324 
8325 					remap = false;
8326 					m = m_defrag(m0, M_NOWAIT);
8327 					if (m != NULL) {
8328 						WM_Q_EVCNT_INCR(txq, defrag);
8329 						m0 = m;
8330 						goto retry;
8331 					}
8332 				}
8333 				WM_Q_EVCNT_INCR(txq, toomanyseg);
8334 				log(LOG_ERR, "%s: Tx packet consumes too many "
8335 				    "DMA segments, dropping...\n",
8336 				    device_xname(sc->sc_dev));
8337 				wm_dump_mbuf_chain(sc, m0);
8338 				m_freem(m0);
8339 				continue;
8340 			}
8341 			/* Short on resources, just stop for now. */
8342 			DPRINTF(WM_DEBUG_TX,
8343 			    ("%s: TX: dmamap load failed: %d\n",
8344 				device_xname(sc->sc_dev), error));
8345 			break;
8346 		}
8347 
8348 		segs_needed = dmamap->dm_nsegs;
8349 
8350 		/*
8351 		 * Ensure we have enough descriptors free to describe
8352 		 * the packet. Note, we always reserve one descriptor
8353 		 * at the end of the ring due to the semantics of the
8354 		 * TDT register, plus one more in the event we need
8355 		 * to load offload context.
8356 		 */
8357 		if (segs_needed > txq->txq_free - 2) {
8358 			/*
8359 			 * Not enough free descriptors to transmit this
8360 			 * packet.  We haven't committed anything yet,
8361 			 * so just unload the DMA map, put the packet
8362 			 * pack on the queue, and punt. Notify the upper
8363 			 * layer that there are no more slots left.
8364 			 */
8365 			DPRINTF(WM_DEBUG_TX,
8366 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
8367 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
8368 				segs_needed, txq->txq_free - 1));
8369 			txq->txq_flags |= WM_TXQ_NO_SPACE;
8370 			bus_dmamap_unload(sc->sc_dmat, dmamap);
8371 			WM_Q_EVCNT_INCR(txq, txdstall);
8372 			break;
8373 		}
8374 
8375 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
8376 
8377 		DPRINTF(WM_DEBUG_TX,
8378 		    ("%s: TX: packet has %d (%d) DMA segments\n",
8379 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
8380 
8381 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
8382 
8383 		/*
8384 		 * Store a pointer to the packet so that we can free it
8385 		 * later.
8386 		 *
8387 		 * Initially, we consider the number of descriptors the
8388 		 * packet uses the number of DMA segments.  This may be
8389 		 * incremented by 1 if we do checksum offload (a descriptor
8390 		 * is used to set the checksum context).
8391 		 */
8392 		txs->txs_mbuf = m0;
8393 		txs->txs_firstdesc = txq->txq_next;
8394 		txs->txs_ndesc = segs_needed;
8395 
8396 		/* Set up offload parameters for this packet. */
8397 		uint32_t cmdlen, fields, dcmdlen;
8398 		if (m0->m_pkthdr.csum_flags &
8399 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
8400 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
8401 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
8402 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
8403 			    &do_csum) != 0) {
8404 				/* Error message already displayed. */
8405 				bus_dmamap_unload(sc->sc_dmat, dmamap);
8406 				continue;
8407 			}
8408 		} else {
8409 			do_csum = false;
8410 			cmdlen = 0;
8411 			fields = 0;
8412 		}
8413 
8414 		/* Sync the DMA map. */
8415 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
8416 		    BUS_DMASYNC_PREWRITE);
8417 
8418 		/* Initialize the first transmit descriptor. */
8419 		nexttx = txq->txq_next;
8420 		if (!do_csum) {
8421 			/* Setup a legacy descriptor */
8422 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
8423 			    dmamap->dm_segs[0].ds_addr);
8424 			txq->txq_descs[nexttx].wtx_cmdlen =
8425 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
8426 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
8427 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
8428 			if (vlan_has_tag(m0)) {
8429 				txq->txq_descs[nexttx].wtx_cmdlen |=
8430 				    htole32(WTX_CMD_VLE);
8431 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
8432 				    htole16(vlan_get_tag(m0));
8433 			} else
8434 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
8435 
8436 			dcmdlen = 0;
8437 		} else {
8438 			/* Setup an advanced data descriptor */
8439 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
8440 			    htole64(dmamap->dm_segs[0].ds_addr);
8441 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
8442 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
8443 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen);
8444 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
8445 			    htole32(fields);
8446 			DPRINTF(WM_DEBUG_TX,
8447 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
8448 				device_xname(sc->sc_dev), nexttx,
8449 				(uint64_t)dmamap->dm_segs[0].ds_addr));
8450 			DPRINTF(WM_DEBUG_TX,
8451 			    ("\t 0x%08x%08x\n", fields,
8452 				(uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
8453 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
8454 		}
8455 
8456 		lasttx = nexttx;
8457 		nexttx = WM_NEXTTX(txq, nexttx);
8458 		/*
8459 		 * Fill in the next descriptors. legacy or advanced format
8460 		 * is the same here
8461 		 */
8462 		for (seg = 1; seg < dmamap->dm_nsegs;
8463 		     seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
8464 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
8465 			    htole64(dmamap->dm_segs[seg].ds_addr);
8466 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
8467 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
8468 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
8469 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
8470 			lasttx = nexttx;
8471 
8472 			DPRINTF(WM_DEBUG_TX,
8473 			    ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
8474 				device_xname(sc->sc_dev), nexttx,
8475 				(uint64_t)dmamap->dm_segs[seg].ds_addr,
8476 				dmamap->dm_segs[seg].ds_len));
8477 		}
8478 
8479 		KASSERT(lasttx != -1);
8480 
8481 		/*
8482 		 * Set up the command byte on the last descriptor of
8483 		 * the packet. If we're in the interrupt delay window,
8484 		 * delay the interrupt.
8485 		 */
8486 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
8487 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
8488 		txq->txq_descs[lasttx].wtx_cmdlen |=
8489 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
8490 
8491 		txs->txs_lastdesc = lasttx;
8492 
8493 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
8494 		    device_xname(sc->sc_dev),
8495 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
8496 
8497 		/* Sync the descriptors we're using. */
8498 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
8499 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
8500 
8501 		/* Give the packet to the chip. */
8502 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
8503 		sent = true;
8504 
8505 		DPRINTF(WM_DEBUG_TX,
8506 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
8507 
8508 		DPRINTF(WM_DEBUG_TX,
8509 		    ("%s: TX: finished transmitting packet, job %d\n",
8510 			device_xname(sc->sc_dev), txq->txq_snext));
8511 
8512 		/* Advance the tx pointer. */
8513 		txq->txq_free -= txs->txs_ndesc;
8514 		txq->txq_next = nexttx;
8515 
8516 		txq->txq_sfree--;
8517 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
8518 
8519 		/* Pass the packet to any BPF listeners. */
8520 		bpf_mtap(ifp, m0, BPF_D_OUT);
8521 	}
8522 
8523 	if (m0 != NULL) {
8524 		txq->txq_flags |= WM_TXQ_NO_SPACE;
8525 		WM_Q_EVCNT_INCR(txq, descdrop);
8526 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
8527 			__func__));
8528 		m_freem(m0);
8529 	}
8530 
8531 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
8532 		/* No more slots; notify upper layer. */
8533 		txq->txq_flags |= WM_TXQ_NO_SPACE;
8534 	}
8535 
8536 	if (sent) {
8537 		/* Set a watchdog timer in case the chip flakes out. */
8538 		txq->txq_lastsent = time_uptime;
8539 		txq->txq_sending = true;
8540 	}
8541 }
8542 
8543 static void
8544 wm_deferred_start_locked(struct wm_txqueue *txq)
8545 {
8546 	struct wm_softc *sc = txq->txq_sc;
8547 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
8548 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
8549 	int qid = wmq->wmq_id;
8550 
8551 	KASSERT(mutex_owned(txq->txq_lock));
8552 
8553 	if (txq->txq_stopping) {
8554 		mutex_exit(txq->txq_lock);
8555 		return;
8556 	}
8557 
8558 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
8559 		/* XXX need for ALTQ or one CPU system */
8560 		if (qid == 0)
8561 			wm_nq_start_locked(ifp);
8562 		wm_nq_transmit_locked(ifp, txq);
8563 	} else {
8564 		/* XXX need for ALTQ or one CPU system */
8565 		if (qid == 0)
8566 			wm_start_locked(ifp);
8567 		wm_transmit_locked(ifp, txq);
8568 	}
8569 }
8570 
8571 /* Interrupt */
8572 
8573 /*
8574  * wm_txeof:
8575  *
8576  *	Helper; handle transmit interrupts.
8577  */
8578 static bool
8579 wm_txeof(struct wm_txqueue *txq, u_int limit)
8580 {
8581 	struct wm_softc *sc = txq->txq_sc;
8582 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
8583 	struct wm_txsoft *txs;
8584 	int count = 0;
8585 	int i;
8586 	uint8_t status;
8587 	bool more = false;
8588 
8589 	KASSERT(mutex_owned(txq->txq_lock));
8590 
8591 	if (txq->txq_stopping)
8592 		return false;
8593 
8594 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
8595 
8596 	/*
8597 	 * Go through the Tx list and free mbufs for those
8598 	 * frames which have been transmitted.
8599 	 */
8600 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
8601 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
8602 		if (limit-- == 0) {
8603 			more = true;
8604 			DPRINTF(WM_DEBUG_TX,
8605 			    ("%s: TX: loop limited, job %d is not processed\n",
8606 				device_xname(sc->sc_dev), i));
8607 			break;
8608 		}
8609 
8610 		txs = &txq->txq_soft[i];
8611 
8612 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
8613 			device_xname(sc->sc_dev), i));
8614 
8615 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
8616 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
8617 
8618 		status =
8619 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
8620 		if ((status & WTX_ST_DD) == 0) {
8621 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
8622 			    BUS_DMASYNC_PREREAD);
8623 			break;
8624 		}
8625 
8626 		count++;
8627 		DPRINTF(WM_DEBUG_TX,
8628 		    ("%s: TX: job %d done: descs %d..%d\n",
8629 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
8630 		    txs->txs_lastdesc));
8631 
8632 		/*
8633 		 * XXX We should probably be using the statistics
8634 		 * XXX registers, but I don't know if they exist
8635 		 * XXX on chips before the i82544.
8636 		 */
8637 
8638 #ifdef WM_EVENT_COUNTERS
8639 		if (status & WTX_ST_TU)
8640 			WM_Q_EVCNT_INCR(txq, underrun);
8641 #endif /* WM_EVENT_COUNTERS */
8642 
8643 		/*
8644 		 * 82574 and newer's document says the status field has neither
8645 		 * EC (Excessive Collision) bit nor LC (Late Collision) bit
8646 		 * (reserved). Refer "PCIe GbE Controller Open Source Software
8647 		 * Developer's Manual", 82574 datasheet and newer.
8648 		 *
8649 		 * XXX I saw the LC bit was set on I218 even though the media
8650 		 * was full duplex, so the bit might be used for other
8651 		 * meaning ...(I have no document).
8652 		 */
8653 
8654 		if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
8655 		    && ((sc->sc_type < WM_T_82574)
8656 			|| (sc->sc_type == WM_T_80003))) {
8657 			if_statinc(ifp, if_oerrors);
8658 			if (status & WTX_ST_LC)
8659 				log(LOG_WARNING, "%s: late collision\n",
8660 				    device_xname(sc->sc_dev));
8661 			else if (status & WTX_ST_EC) {
8662 				if_statadd(ifp, if_collisions,
8663 				    TX_COLLISION_THRESHOLD + 1);
8664 				log(LOG_WARNING, "%s: excessive collisions\n",
8665 				    device_xname(sc->sc_dev));
8666 			}
8667 		} else
8668 			if_statinc(ifp, if_opackets);
8669 
8670 		txq->txq_packets++;
8671 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
8672 
8673 		txq->txq_free += txs->txs_ndesc;
8674 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
8675 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
8676 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
8677 		m_freem(txs->txs_mbuf);
8678 		txs->txs_mbuf = NULL;
8679 	}
8680 
8681 	/* Update the dirty transmit buffer pointer. */
8682 	txq->txq_sdirty = i;
8683 	DPRINTF(WM_DEBUG_TX,
8684 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
8685 
8686 	/*
8687 	 * If there are no more pending transmissions, cancel the watchdog
8688 	 * timer.
8689 	 */
8690 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
8691 		txq->txq_sending = false;
8692 
8693 	return more;
8694 }
8695 
8696 static inline uint32_t
8697 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
8698 {
8699 	struct wm_softc *sc = rxq->rxq_sc;
8700 
8701 	if (sc->sc_type == WM_T_82574)
8702 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
8703 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
8704 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
8705 	else
8706 		return rxq->rxq_descs[idx].wrx_status;
8707 }
8708 
8709 static inline uint32_t
8710 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
8711 {
8712 	struct wm_softc *sc = rxq->rxq_sc;
8713 
8714 	if (sc->sc_type == WM_T_82574)
8715 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
8716 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
8717 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
8718 	else
8719 		return rxq->rxq_descs[idx].wrx_errors;
8720 }
8721 
8722 static inline uint16_t
8723 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
8724 {
8725 	struct wm_softc *sc = rxq->rxq_sc;
8726 
8727 	if (sc->sc_type == WM_T_82574)
8728 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
8729 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
8730 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
8731 	else
8732 		return rxq->rxq_descs[idx].wrx_special;
8733 }
8734 
8735 static inline int
8736 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
8737 {
8738 	struct wm_softc *sc = rxq->rxq_sc;
8739 
8740 	if (sc->sc_type == WM_T_82574)
8741 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
8742 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
8743 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
8744 	else
8745 		return rxq->rxq_descs[idx].wrx_len;
8746 }
8747 
8748 #ifdef WM_DEBUG
8749 static inline uint32_t
8750 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
8751 {
8752 	struct wm_softc *sc = rxq->rxq_sc;
8753 
8754 	if (sc->sc_type == WM_T_82574)
8755 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
8756 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
8757 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
8758 	else
8759 		return 0;
8760 }
8761 
8762 static inline uint8_t
8763 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
8764 {
8765 	struct wm_softc *sc = rxq->rxq_sc;
8766 
8767 	if (sc->sc_type == WM_T_82574)
8768 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
8769 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
8770 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
8771 	else
8772 		return 0;
8773 }
8774 #endif /* WM_DEBUG */
8775 
8776 static inline bool
8777 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
8778     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
8779 {
8780 
8781 	if (sc->sc_type == WM_T_82574)
8782 		return (status & ext_bit) != 0;
8783 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
8784 		return (status & nq_bit) != 0;
8785 	else
8786 		return (status & legacy_bit) != 0;
8787 }
8788 
8789 static inline bool
8790 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
8791     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
8792 {
8793 
8794 	if (sc->sc_type == WM_T_82574)
8795 		return (error & ext_bit) != 0;
8796 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
8797 		return (error & nq_bit) != 0;
8798 	else
8799 		return (error & legacy_bit) != 0;
8800 }
8801 
8802 static inline bool
8803 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
8804 {
8805 
8806 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
8807 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
8808 		return true;
8809 	else
8810 		return false;
8811 }
8812 
8813 static inline bool
8814 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
8815 {
8816 	struct wm_softc *sc = rxq->rxq_sc;
8817 
8818 	/* XXX missing error bit for newqueue? */
8819 	if (wm_rxdesc_is_set_error(sc, errors,
8820 		WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
8821 		EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
8822 		| EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
8823 		NQRXC_ERROR_RXE)) {
8824 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
8825 		    EXTRXC_ERROR_SE, 0))
8826 			log(LOG_WARNING, "%s: symbol error\n",
8827 			    device_xname(sc->sc_dev));
8828 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
8829 		    EXTRXC_ERROR_SEQ, 0))
8830 			log(LOG_WARNING, "%s: receive sequence error\n",
8831 			    device_xname(sc->sc_dev));
8832 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
8833 		    EXTRXC_ERROR_CE, 0))
8834 			log(LOG_WARNING, "%s: CRC error\n",
8835 			    device_xname(sc->sc_dev));
8836 		return true;
8837 	}
8838 
8839 	return false;
8840 }
8841 
8842 static inline bool
8843 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
8844 {
8845 	struct wm_softc *sc = rxq->rxq_sc;
8846 
8847 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
8848 		NQRXC_STATUS_DD)) {
8849 		/* We have processed all of the receive descriptors. */
8850 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
8851 		return false;
8852 	}
8853 
8854 	return true;
8855 }
8856 
8857 static inline bool
8858 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
8859     uint16_t vlantag, struct mbuf *m)
8860 {
8861 
8862 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
8863 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
8864 		vlan_set_tag(m, le16toh(vlantag));
8865 	}
8866 
8867 	return true;
8868 }
8869 
8870 static inline void
8871 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
8872     uint32_t errors, struct mbuf *m)
8873 {
8874 	struct wm_softc *sc = rxq->rxq_sc;
8875 
8876 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
8877 		if (wm_rxdesc_is_set_status(sc, status,
8878 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
8879 			WM_Q_EVCNT_INCR(rxq, ipsum);
8880 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
8881 			if (wm_rxdesc_is_set_error(sc, errors,
8882 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
8883 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
8884 		}
8885 		if (wm_rxdesc_is_set_status(sc, status,
8886 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
8887 			/*
8888 			 * Note: we don't know if this was TCP or UDP,
8889 			 * so we just set both bits, and expect the
8890 			 * upper layers to deal.
8891 			 */
8892 			WM_Q_EVCNT_INCR(rxq, tusum);
8893 			m->m_pkthdr.csum_flags |=
8894 			    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
8895 			    M_CSUM_TCPv6 | M_CSUM_UDPv6;
8896 			if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
8897 			    EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
8898 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
8899 		}
8900 	}
8901 }
8902 
8903 /*
8904  * wm_rxeof:
8905  *
8906  *	Helper; handle receive interrupts.
8907  */
8908 static bool
8909 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
8910 {
8911 	struct wm_softc *sc = rxq->rxq_sc;
8912 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
8913 	struct wm_rxsoft *rxs;
8914 	struct mbuf *m;
8915 	int i, len;
8916 	int count = 0;
8917 	uint32_t status, errors;
8918 	uint16_t vlantag;
8919 	bool more = false;
8920 
8921 	KASSERT(mutex_owned(rxq->rxq_lock));
8922 
8923 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
8924 		if (limit-- == 0) {
8925 			rxq->rxq_ptr = i;
8926 			more = true;
8927 			DPRINTF(WM_DEBUG_RX,
8928 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
8929 				device_xname(sc->sc_dev), i));
8930 			break;
8931 		}
8932 
8933 		rxs = &rxq->rxq_soft[i];
8934 
8935 		DPRINTF(WM_DEBUG_RX,
8936 		    ("%s: RX: checking descriptor %d\n",
8937 			device_xname(sc->sc_dev), i));
8938 		wm_cdrxsync(rxq, i,
8939 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
8940 
8941 		status = wm_rxdesc_get_status(rxq, i);
8942 		errors = wm_rxdesc_get_errors(rxq, i);
8943 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
8944 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
8945 #ifdef WM_DEBUG
8946 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
8947 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
8948 #endif
8949 
8950 		if (!wm_rxdesc_dd(rxq, i, status)) {
8951 			/*
8952 			 * Update the receive pointer holding rxq_lock
8953 			 * consistent with increment counter.
8954 			 */
8955 			rxq->rxq_ptr = i;
8956 			break;
8957 		}
8958 
8959 		count++;
8960 		if (__predict_false(rxq->rxq_discard)) {
8961 			DPRINTF(WM_DEBUG_RX,
8962 			    ("%s: RX: discarding contents of descriptor %d\n",
8963 				device_xname(sc->sc_dev), i));
8964 			wm_init_rxdesc(rxq, i);
8965 			if (wm_rxdesc_is_eop(rxq, status)) {
8966 				/* Reset our state. */
8967 				DPRINTF(WM_DEBUG_RX,
8968 				    ("%s: RX: resetting rxdiscard -> 0\n",
8969 					device_xname(sc->sc_dev)));
8970 				rxq->rxq_discard = 0;
8971 			}
8972 			continue;
8973 		}
8974 
8975 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
8976 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
8977 
8978 		m = rxs->rxs_mbuf;
8979 
8980 		/*
8981 		 * Add a new receive buffer to the ring, unless of
8982 		 * course the length is zero. Treat the latter as a
8983 		 * failed mapping.
8984 		 */
8985 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
8986 			/*
8987 			 * Failed, throw away what we've done so
8988 			 * far, and discard the rest of the packet.
8989 			 */
8990 			if_statinc(ifp, if_ierrors);
8991 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
8992 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
8993 			wm_init_rxdesc(rxq, i);
8994 			if (!wm_rxdesc_is_eop(rxq, status))
8995 				rxq->rxq_discard = 1;
8996 			if (rxq->rxq_head != NULL)
8997 				m_freem(rxq->rxq_head);
8998 			WM_RXCHAIN_RESET(rxq);
8999 			DPRINTF(WM_DEBUG_RX,
9000 			    ("%s: RX: Rx buffer allocation failed, "
9001 			    "dropping packet%s\n", device_xname(sc->sc_dev),
9002 				rxq->rxq_discard ? " (discard)" : ""));
9003 			continue;
9004 		}
9005 
9006 		m->m_len = len;
9007 		rxq->rxq_len += len;
9008 		DPRINTF(WM_DEBUG_RX,
9009 		    ("%s: RX: buffer at %p len %d\n",
9010 			device_xname(sc->sc_dev), m->m_data, len));
9011 
9012 		/* If this is not the end of the packet, keep looking. */
9013 		if (!wm_rxdesc_is_eop(rxq, status)) {
9014 			WM_RXCHAIN_LINK(rxq, m);
9015 			DPRINTF(WM_DEBUG_RX,
9016 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
9017 				device_xname(sc->sc_dev), rxq->rxq_len));
9018 			continue;
9019 		}
9020 
9021 		/*
9022 		 * Okay, we have the entire packet now. The chip is
9023 		 * configured to include the FCS except I350 and I21[01]
9024 		 * (not all chips can be configured to strip it),
9025 		 * so we need to trim it.
9026 		 * May need to adjust length of previous mbuf in the
9027 		 * chain if the current mbuf is too short.
9028 		 * For an eratta, the RCTL_SECRC bit in RCTL register
9029 		 * is always set in I350, so we don't trim it.
9030 		 */
9031 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
9032 		    && (sc->sc_type != WM_T_I210)
9033 		    && (sc->sc_type != WM_T_I211)) {
9034 			if (m->m_len < ETHER_CRC_LEN) {
9035 				rxq->rxq_tail->m_len
9036 				    -= (ETHER_CRC_LEN - m->m_len);
9037 				m->m_len = 0;
9038 			} else
9039 				m->m_len -= ETHER_CRC_LEN;
9040 			len = rxq->rxq_len - ETHER_CRC_LEN;
9041 		} else
9042 			len = rxq->rxq_len;
9043 
9044 		WM_RXCHAIN_LINK(rxq, m);
9045 
9046 		*rxq->rxq_tailp = NULL;
9047 		m = rxq->rxq_head;
9048 
9049 		WM_RXCHAIN_RESET(rxq);
9050 
9051 		DPRINTF(WM_DEBUG_RX,
9052 		    ("%s: RX: have entire packet, len -> %d\n",
9053 			device_xname(sc->sc_dev), len));
9054 
9055 		/* If an error occurred, update stats and drop the packet. */
9056 		if (wm_rxdesc_has_errors(rxq, errors)) {
9057 			m_freem(m);
9058 			continue;
9059 		}
9060 
9061 		/* No errors.  Receive the packet. */
9062 		m_set_rcvif(m, ifp);
9063 		m->m_pkthdr.len = len;
9064 		/*
9065 		 * TODO
9066 		 * should be save rsshash and rsstype to this mbuf.
9067 		 */
9068 		DPRINTF(WM_DEBUG_RX,
9069 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
9070 			device_xname(sc->sc_dev), rsstype, rsshash));
9071 
9072 		/*
9073 		 * If VLANs are enabled, VLAN packets have been unwrapped
9074 		 * for us.  Associate the tag with the packet.
9075 		 */
9076 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
9077 			continue;
9078 
9079 		/* Set up checksum info for this packet. */
9080 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
9081 		/*
9082 		 * Update the receive pointer holding rxq_lock consistent with
9083 		 * increment counter.
9084 		 */
9085 		rxq->rxq_ptr = i;
9086 		rxq->rxq_packets++;
9087 		rxq->rxq_bytes += len;
9088 		mutex_exit(rxq->rxq_lock);
9089 
9090 		/* Pass it on. */
9091 		if_percpuq_enqueue(sc->sc_ipq, m);
9092 
9093 		mutex_enter(rxq->rxq_lock);
9094 
9095 		if (rxq->rxq_stopping)
9096 			break;
9097 	}
9098 
9099 	DPRINTF(WM_DEBUG_RX,
9100 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
9101 
9102 	return more;
9103 }
9104 
9105 /*
9106  * wm_linkintr_gmii:
9107  *
9108  *	Helper; handle link interrupts for GMII.
9109  */
9110 static void
9111 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
9112 {
9113 	device_t dev = sc->sc_dev;
9114 	uint32_t status, reg;
9115 	bool link;
9116 	int rv;
9117 
9118 	KASSERT(WM_CORE_LOCKED(sc));
9119 
9120 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(dev),
9121 		__func__));
9122 
9123 	if ((icr & ICR_LSC) == 0) {
9124 		if (icr & ICR_RXSEQ)
9125 			DPRINTF(WM_DEBUG_LINK,
9126 			    ("%s: LINK Receive sequence error\n",
9127 				device_xname(dev)));
9128 		return;
9129 	}
9130 
9131 	/* Link status changed */
9132 	status = CSR_READ(sc, WMREG_STATUS);
9133 	link = status & STATUS_LU;
9134 	if (link) {
9135 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
9136 			device_xname(dev),
9137 			(status & STATUS_FD) ? "FDX" : "HDX"));
9138 	} else {
9139 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
9140 			device_xname(dev)));
9141 	}
9142 	if ((sc->sc_type == WM_T_ICH8) && (link == false))
9143 		wm_gig_downshift_workaround_ich8lan(sc);
9144 
9145 	if ((sc->sc_type == WM_T_ICH8)
9146 	    && (sc->sc_phytype == WMPHY_IGP_3)) {
9147 		wm_kmrn_lock_loss_workaround_ich8lan(sc);
9148 	}
9149 	DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
9150 		device_xname(dev)));
9151 	mii_pollstat(&sc->sc_mii);
9152 	if (sc->sc_type == WM_T_82543) {
9153 		int miistatus, active;
9154 
9155 		/*
9156 		 * With 82543, we need to force speed and
9157 		 * duplex on the MAC equal to what the PHY
9158 		 * speed and duplex configuration is.
9159 		 */
9160 		miistatus = sc->sc_mii.mii_media_status;
9161 
9162 		if (miistatus & IFM_ACTIVE) {
9163 			active = sc->sc_mii.mii_media_active;
9164 			sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
9165 			switch (IFM_SUBTYPE(active)) {
9166 			case IFM_10_T:
9167 				sc->sc_ctrl |= CTRL_SPEED_10;
9168 				break;
9169 			case IFM_100_TX:
9170 				sc->sc_ctrl |= CTRL_SPEED_100;
9171 				break;
9172 			case IFM_1000_T:
9173 				sc->sc_ctrl |= CTRL_SPEED_1000;
9174 				break;
9175 			default:
9176 				/*
9177 				 * Fiber?
9178 				 * Shoud not enter here.
9179 				 */
9180 				device_printf(dev, "unknown media (%x)\n",
9181 				    active);
9182 				break;
9183 			}
9184 			if (active & IFM_FDX)
9185 				sc->sc_ctrl |= CTRL_FD;
9186 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9187 		}
9188 	} else if (sc->sc_type == WM_T_PCH) {
9189 		wm_k1_gig_workaround_hv(sc,
9190 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
9191 	}
9192 
9193 	/*
9194 	 * When connected at 10Mbps half-duplex, some parts are excessively
9195 	 * aggressive resulting in many collisions. To avoid this, increase
9196 	 * the IPG and reduce Rx latency in the PHY.
9197 	 */
9198 	if ((sc->sc_type >= WM_T_PCH2) && (sc->sc_type <= WM_T_PCH_CNP)
9199 	    && link) {
9200 		uint32_t tipg_reg;
9201 		uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
9202 		bool fdx;
9203 		uint16_t emi_addr, emi_val;
9204 
9205 		tipg_reg = CSR_READ(sc, WMREG_TIPG);
9206 		tipg_reg &= ~TIPG_IPGT_MASK;
9207 		fdx = status & STATUS_FD;
9208 
9209 		if (!fdx && (speed == STATUS_SPEED_10)) {
9210 			tipg_reg |= 0xff;
9211 			/* Reduce Rx latency in analog PHY */
9212 			emi_val = 0;
9213 		} else if ((sc->sc_type >= WM_T_PCH_SPT) &&
9214 		    fdx && speed != STATUS_SPEED_1000) {
9215 			tipg_reg |= 0xc;
9216 			emi_val = 1;
9217 		} else {
9218 			/* Roll back the default values */
9219 			tipg_reg |= 0x08;
9220 			emi_val = 1;
9221 		}
9222 
9223 		CSR_WRITE(sc, WMREG_TIPG, tipg_reg);
9224 
9225 		rv = sc->phy.acquire(sc);
9226 		if (rv)
9227 			return;
9228 
9229 		if (sc->sc_type == WM_T_PCH2)
9230 			emi_addr = I82579_RX_CONFIG;
9231 		else
9232 			emi_addr = I217_RX_CONFIG;
9233 		rv = wm_write_emi_reg_locked(dev, emi_addr, emi_val);
9234 
9235 		if (sc->sc_type >= WM_T_PCH_LPT) {
9236 			uint16_t phy_reg;
9237 
9238 			sc->phy.readreg_locked(dev, 2,
9239 			    I217_PLL_CLOCK_GATE_REG, &phy_reg);
9240 			phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
9241 			if (speed == STATUS_SPEED_100
9242 			    || speed == STATUS_SPEED_10)
9243 				phy_reg |= 0x3e8;
9244 			else
9245 				phy_reg |= 0xfa;
9246 			sc->phy.writereg_locked(dev, 2,
9247 			    I217_PLL_CLOCK_GATE_REG, phy_reg);
9248 
9249 			if (speed == STATUS_SPEED_1000) {
9250 				sc->phy.readreg_locked(dev, 2,
9251 				    HV_PM_CTRL, &phy_reg);
9252 
9253 				phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
9254 
9255 				sc->phy.writereg_locked(dev, 2,
9256 				    HV_PM_CTRL, phy_reg);
9257 			}
9258 		}
9259 		sc->phy.release(sc);
9260 
9261 		if (rv)
9262 			return;
9263 
9264 		if (sc->sc_type >= WM_T_PCH_SPT) {
9265 			uint16_t data, ptr_gap;
9266 
9267 			if (speed == STATUS_SPEED_1000) {
9268 				rv = sc->phy.acquire(sc);
9269 				if (rv)
9270 					return;
9271 
9272 				rv = sc->phy.readreg_locked(dev, 2,
9273 				    I219_UNKNOWN1, &data);
9274 				if (rv) {
9275 					sc->phy.release(sc);
9276 					return;
9277 				}
9278 
9279 				ptr_gap = (data & (0x3ff << 2)) >> 2;
9280 				if (ptr_gap < 0x18) {
9281 					data &= ~(0x3ff << 2);
9282 					data |= (0x18 << 2);
9283 					rv = sc->phy.writereg_locked(dev,
9284 					    2, I219_UNKNOWN1, data);
9285 				}
9286 				sc->phy.release(sc);
9287 				if (rv)
9288 					return;
9289 			} else {
9290 				rv = sc->phy.acquire(sc);
9291 				if (rv)
9292 					return;
9293 
9294 				rv = sc->phy.writereg_locked(dev, 2,
9295 				    I219_UNKNOWN1, 0xc023);
9296 				sc->phy.release(sc);
9297 				if (rv)
9298 					return;
9299 
9300 			}
9301 		}
9302 	}
9303 
9304 	/*
9305 	 * I217 Packet Loss issue:
9306 	 * ensure that FEXTNVM4 Beacon Duration is set correctly
9307 	 * on power up.
9308 	 * Set the Beacon Duration for I217 to 8 usec
9309 	 */
9310 	if (sc->sc_type >= WM_T_PCH_LPT) {
9311 		reg = CSR_READ(sc, WMREG_FEXTNVM4);
9312 		reg &= ~FEXTNVM4_BEACON_DURATION;
9313 		reg |= FEXTNVM4_BEACON_DURATION_8US;
9314 		CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
9315 	}
9316 
9317 	/* Work-around I218 hang issue */
9318 	if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
9319 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
9320 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
9321 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
9322 		wm_k1_workaround_lpt_lp(sc, link);
9323 
9324 	if (sc->sc_type >= WM_T_PCH_LPT) {
9325 		/*
9326 		 * Set platform power management values for Latency
9327 		 * Tolerance Reporting (LTR)
9328 		 */
9329 		wm_platform_pm_pch_lpt(sc,
9330 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
9331 	}
9332 
9333 	/* Clear link partner's EEE ability */
9334 	sc->eee_lp_ability = 0;
9335 
9336 	/* FEXTNVM6 K1-off workaround */
9337 	if (sc->sc_type == WM_T_PCH_SPT) {
9338 		reg = CSR_READ(sc, WMREG_FEXTNVM6);
9339 		if (CSR_READ(sc, WMREG_PCIEANACFG) & FEXTNVM6_K1_OFF_ENABLE)
9340 			reg |= FEXTNVM6_K1_OFF_ENABLE;
9341 		else
9342 			reg &= ~FEXTNVM6_K1_OFF_ENABLE;
9343 		CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
9344 	}
9345 
9346 	if (!link)
9347 		return;
9348 
9349 	switch (sc->sc_type) {
9350 	case WM_T_PCH2:
9351 		wm_k1_workaround_lv(sc);
9352 		/* FALLTHROUGH */
9353 	case WM_T_PCH:
9354 		if (sc->sc_phytype == WMPHY_82578)
9355 			wm_link_stall_workaround_hv(sc);
9356 		break;
9357 	default:
9358 		break;
9359 	}
9360 
9361 	/* Enable/Disable EEE after link up */
9362 	if (sc->sc_phytype > WMPHY_82579)
9363 		wm_set_eee_pchlan(sc);
9364 }
9365 
9366 /*
9367  * wm_linkintr_tbi:
9368  *
9369  *	Helper; handle link interrupts for TBI mode.
9370  */
9371 static void
9372 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
9373 {
9374 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9375 	uint32_t status;
9376 
9377 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
9378 		__func__));
9379 
9380 	status = CSR_READ(sc, WMREG_STATUS);
9381 	if (icr & ICR_LSC) {
9382 		wm_check_for_link(sc);
9383 		if (status & STATUS_LU) {
9384 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
9385 				device_xname(sc->sc_dev),
9386 				(status & STATUS_FD) ? "FDX" : "HDX"));
9387 			/*
9388 			 * NOTE: CTRL will update TFCE and RFCE automatically,
9389 			 * so we should update sc->sc_ctrl
9390 			 */
9391 
9392 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
9393 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
9394 			sc->sc_fcrtl &= ~FCRTL_XONE;
9395 			if (status & STATUS_FD)
9396 				sc->sc_tctl |=
9397 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
9398 			else
9399 				sc->sc_tctl |=
9400 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
9401 			if (sc->sc_ctrl & CTRL_TFCE)
9402 				sc->sc_fcrtl |= FCRTL_XONE;
9403 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
9404 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
9405 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
9406 			sc->sc_tbi_linkup = 1;
9407 			if_link_state_change(ifp, LINK_STATE_UP);
9408 		} else {
9409 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
9410 				device_xname(sc->sc_dev)));
9411 			sc->sc_tbi_linkup = 0;
9412 			if_link_state_change(ifp, LINK_STATE_DOWN);
9413 		}
9414 		/* Update LED */
9415 		wm_tbi_serdes_set_linkled(sc);
9416 	} else if (icr & ICR_RXSEQ)
9417 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
9418 			device_xname(sc->sc_dev)));
9419 }
9420 
9421 /*
9422  * wm_linkintr_serdes:
9423  *
9424  *	Helper; handle link interrupts for TBI mode.
9425  */
9426 static void
9427 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
9428 {
9429 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9430 	struct mii_data *mii = &sc->sc_mii;
9431 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
9432 	uint32_t pcs_adv, pcs_lpab, reg;
9433 
9434 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
9435 		__func__));
9436 
9437 	if (icr & ICR_LSC) {
9438 		/* Check PCS */
9439 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
9440 		if ((reg & PCS_LSTS_LINKOK) != 0) {
9441 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
9442 				device_xname(sc->sc_dev)));
9443 			mii->mii_media_status |= IFM_ACTIVE;
9444 			sc->sc_tbi_linkup = 1;
9445 			if_link_state_change(ifp, LINK_STATE_UP);
9446 		} else {
9447 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
9448 				device_xname(sc->sc_dev)));
9449 			mii->mii_media_status |= IFM_NONE;
9450 			sc->sc_tbi_linkup = 0;
9451 			if_link_state_change(ifp, LINK_STATE_DOWN);
9452 			wm_tbi_serdes_set_linkled(sc);
9453 			return;
9454 		}
9455 		mii->mii_media_active |= IFM_1000_SX;
9456 		if ((reg & PCS_LSTS_FDX) != 0)
9457 			mii->mii_media_active |= IFM_FDX;
9458 		else
9459 			mii->mii_media_active |= IFM_HDX;
9460 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
9461 			/* Check flow */
9462 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
9463 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
9464 				DPRINTF(WM_DEBUG_LINK,
9465 				    ("XXX LINKOK but not ACOMP\n"));
9466 				return;
9467 			}
9468 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
9469 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
9470 			DPRINTF(WM_DEBUG_LINK,
9471 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
9472 			if ((pcs_adv & TXCW_SYM_PAUSE)
9473 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
9474 				mii->mii_media_active |= IFM_FLOW
9475 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
9476 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
9477 			    && (pcs_adv & TXCW_ASYM_PAUSE)
9478 			    && (pcs_lpab & TXCW_SYM_PAUSE)
9479 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
9480 				mii->mii_media_active |= IFM_FLOW
9481 				    | IFM_ETH_TXPAUSE;
9482 			else if ((pcs_adv & TXCW_SYM_PAUSE)
9483 			    && (pcs_adv & TXCW_ASYM_PAUSE)
9484 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
9485 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
9486 				mii->mii_media_active |= IFM_FLOW
9487 				    | IFM_ETH_RXPAUSE;
9488 		}
9489 		/* Update LED */
9490 		wm_tbi_serdes_set_linkled(sc);
9491 	} else
9492 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
9493 		    device_xname(sc->sc_dev)));
9494 }
9495 
9496 /*
9497  * wm_linkintr:
9498  *
9499  *	Helper; handle link interrupts.
9500  */
9501 static void
9502 wm_linkintr(struct wm_softc *sc, uint32_t icr)
9503 {
9504 
9505 	KASSERT(WM_CORE_LOCKED(sc));
9506 
9507 	if (sc->sc_flags & WM_F_HAS_MII)
9508 		wm_linkintr_gmii(sc, icr);
9509 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
9510 	    && ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)))
9511 		wm_linkintr_serdes(sc, icr);
9512 	else
9513 		wm_linkintr_tbi(sc, icr);
9514 }
9515 
9516 
9517 static inline void
9518 wm_sched_handle_queue(struct wm_softc *sc, struct wm_queue *wmq)
9519 {
9520 
9521 	if (wmq->wmq_txrx_use_workqueue)
9522 		workqueue_enqueue(sc->sc_queue_wq, &wmq->wmq_cookie, curcpu());
9523 	else
9524 		softint_schedule(wmq->wmq_si);
9525 }
9526 
9527 /*
9528  * wm_intr_legacy:
9529  *
9530  *	Interrupt service routine for INTx and MSI.
9531  */
9532 static int
9533 wm_intr_legacy(void *arg)
9534 {
9535 	struct wm_softc *sc = arg;
9536 	struct wm_queue *wmq = &sc->sc_queue[0];
9537 	struct wm_txqueue *txq = &wmq->wmq_txq;
9538 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
9539 	uint32_t icr, rndval = 0;
9540 	int handled = 0;
9541 
9542 	while (1 /* CONSTCOND */) {
9543 		icr = CSR_READ(sc, WMREG_ICR);
9544 		if ((icr & sc->sc_icr) == 0)
9545 			break;
9546 		if (handled == 0)
9547 			DPRINTF(WM_DEBUG_TX,
9548 			    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
9549 		if (rndval == 0)
9550 			rndval = icr;
9551 
9552 		mutex_enter(rxq->rxq_lock);
9553 
9554 		if (rxq->rxq_stopping) {
9555 			mutex_exit(rxq->rxq_lock);
9556 			break;
9557 		}
9558 
9559 		handled = 1;
9560 
9561 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
9562 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
9563 			DPRINTF(WM_DEBUG_RX,
9564 			    ("%s: RX: got Rx intr 0x%08x\n",
9565 				device_xname(sc->sc_dev),
9566 				icr & (ICR_RXDMT0 | ICR_RXT0)));
9567 			WM_Q_EVCNT_INCR(rxq, intr);
9568 		}
9569 #endif
9570 		/*
9571 		 * wm_rxeof() does *not* call upper layer functions directly,
9572 		 * as if_percpuq_enqueue() just call softint_schedule().
9573 		 * So, we can call wm_rxeof() in interrupt context.
9574 		 */
9575 		wm_rxeof(rxq, UINT_MAX);
9576 		/* Fill lower bits with RX index. See below for the upper. */
9577 		rndval |= rxq->rxq_ptr & WM_NRXDESC_MASK;
9578 
9579 		mutex_exit(rxq->rxq_lock);
9580 		mutex_enter(txq->txq_lock);
9581 
9582 		if (txq->txq_stopping) {
9583 			mutex_exit(txq->txq_lock);
9584 			break;
9585 		}
9586 
9587 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
9588 		if (icr & ICR_TXDW) {
9589 			DPRINTF(WM_DEBUG_TX,
9590 			    ("%s: TX: got TXDW interrupt\n",
9591 				device_xname(sc->sc_dev)));
9592 			WM_Q_EVCNT_INCR(txq, txdw);
9593 		}
9594 #endif
9595 		wm_txeof(txq, UINT_MAX);
9596 		/* Fill upper bits with TX index. See above for the lower. */
9597 		rndval = txq->txq_next * WM_NRXDESC;
9598 
9599 		mutex_exit(txq->txq_lock);
9600 		WM_CORE_LOCK(sc);
9601 
9602 		if (sc->sc_core_stopping) {
9603 			WM_CORE_UNLOCK(sc);
9604 			break;
9605 		}
9606 
9607 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
9608 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
9609 			wm_linkintr(sc, icr);
9610 		}
9611 		if ((icr & ICR_GPI(0)) != 0)
9612 			device_printf(sc->sc_dev, "got module interrupt\n");
9613 
9614 		WM_CORE_UNLOCK(sc);
9615 
9616 		if (icr & ICR_RXO) {
9617 #if defined(WM_DEBUG)
9618 			log(LOG_WARNING, "%s: Receive overrun\n",
9619 			    device_xname(sc->sc_dev));
9620 #endif /* defined(WM_DEBUG) */
9621 		}
9622 	}
9623 
9624 	rnd_add_uint32(&sc->sc_queue[0].rnd_source, rndval);
9625 
9626 	if (handled) {
9627 		/* Try to get more packets going. */
9628 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
9629 		wm_sched_handle_queue(sc, wmq);
9630 	}
9631 
9632 	return handled;
9633 }
9634 
9635 static inline void
9636 wm_txrxintr_disable(struct wm_queue *wmq)
9637 {
9638 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
9639 
9640 	if (sc->sc_type == WM_T_82574)
9641 		CSR_WRITE(sc, WMREG_IMC,
9642 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
9643 	else if (sc->sc_type == WM_T_82575)
9644 		CSR_WRITE(sc, WMREG_EIMC,
9645 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
9646 	else
9647 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
9648 }
9649 
9650 static inline void
9651 wm_txrxintr_enable(struct wm_queue *wmq)
9652 {
9653 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
9654 
9655 	wm_itrs_calculate(sc, wmq);
9656 
9657 	/*
9658 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
9659 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
9660 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
9661 	 * while each wm_handle_queue(wmq) is runnig.
9662 	 */
9663 	if (sc->sc_type == WM_T_82574)
9664 		CSR_WRITE(sc, WMREG_IMS,
9665 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
9666 	else if (sc->sc_type == WM_T_82575)
9667 		CSR_WRITE(sc, WMREG_EIMS,
9668 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
9669 	else
9670 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
9671 }
9672 
9673 static int
9674 wm_txrxintr_msix(void *arg)
9675 {
9676 	struct wm_queue *wmq = arg;
9677 	struct wm_txqueue *txq = &wmq->wmq_txq;
9678 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
9679 	struct wm_softc *sc = txq->txq_sc;
9680 	u_int txlimit = sc->sc_tx_intr_process_limit;
9681 	u_int rxlimit = sc->sc_rx_intr_process_limit;
9682 	uint32_t rndval = 0;
9683 	bool txmore;
9684 	bool rxmore;
9685 
9686 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
9687 
9688 	DPRINTF(WM_DEBUG_TX,
9689 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
9690 
9691 	wm_txrxintr_disable(wmq);
9692 
9693 	mutex_enter(txq->txq_lock);
9694 
9695 	if (txq->txq_stopping) {
9696 		mutex_exit(txq->txq_lock);
9697 		return 0;
9698 	}
9699 
9700 	WM_Q_EVCNT_INCR(txq, txdw);
9701 	txmore = wm_txeof(txq, txlimit);
9702 	/* Fill upper bits with TX index. See below for the lower. */
9703 	rndval = txq->txq_next * WM_NRXDESC;
9704 	/* wm_deferred start() is done in wm_handle_queue(). */
9705 	mutex_exit(txq->txq_lock);
9706 
9707 	DPRINTF(WM_DEBUG_RX,
9708 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
9709 	mutex_enter(rxq->rxq_lock);
9710 
9711 	if (rxq->rxq_stopping) {
9712 		mutex_exit(rxq->rxq_lock);
9713 		return 0;
9714 	}
9715 
9716 	WM_Q_EVCNT_INCR(rxq, intr);
9717 	rxmore = wm_rxeof(rxq, rxlimit);
9718 
9719 	/* Fill lower bits with RX index. See above for the upper. */
9720 	rndval |= rxq->rxq_ptr & WM_NRXDESC_MASK;
9721 	mutex_exit(rxq->rxq_lock);
9722 
9723 	wm_itrs_writereg(sc, wmq);
9724 
9725 	/*
9726 	 * This function is called in the hardware interrupt context and
9727 	 * per-CPU, so it's not required to take a lock.
9728 	 */
9729 	if (rndval != 0)
9730 		rnd_add_uint32(&sc->sc_queue[wmq->wmq_id].rnd_source, rndval);
9731 
9732 	if (txmore || rxmore) {
9733 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
9734 		wm_sched_handle_queue(sc, wmq);
9735 	} else
9736 		wm_txrxintr_enable(wmq);
9737 
9738 	return 1;
9739 }
9740 
9741 static void
9742 wm_handle_queue(void *arg)
9743 {
9744 	struct wm_queue *wmq = arg;
9745 	struct wm_txqueue *txq = &wmq->wmq_txq;
9746 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
9747 	struct wm_softc *sc = txq->txq_sc;
9748 	u_int txlimit = sc->sc_tx_process_limit;
9749 	u_int rxlimit = sc->sc_rx_process_limit;
9750 	bool txmore;
9751 	bool rxmore;
9752 
9753 	mutex_enter(txq->txq_lock);
9754 	if (txq->txq_stopping) {
9755 		mutex_exit(txq->txq_lock);
9756 		return;
9757 	}
9758 	txmore = wm_txeof(txq, txlimit);
9759 	wm_deferred_start_locked(txq);
9760 	mutex_exit(txq->txq_lock);
9761 
9762 	mutex_enter(rxq->rxq_lock);
9763 	if (rxq->rxq_stopping) {
9764 		mutex_exit(rxq->rxq_lock);
9765 		return;
9766 	}
9767 	WM_Q_EVCNT_INCR(rxq, defer);
9768 	rxmore = wm_rxeof(rxq, rxlimit);
9769 	mutex_exit(rxq->rxq_lock);
9770 
9771 	if (txmore || rxmore) {
9772 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
9773 		wm_sched_handle_queue(sc, wmq);
9774 	} else
9775 		wm_txrxintr_enable(wmq);
9776 }
9777 
9778 static void
9779 wm_handle_queue_work(struct work *wk, void *context)
9780 {
9781 	struct wm_queue *wmq = container_of(wk, struct wm_queue, wmq_cookie);
9782 
9783 	/*
9784 	 * "enqueued flag" is not required here.
9785 	 */
9786 	wm_handle_queue(wmq);
9787 }
9788 
9789 /*
9790  * wm_linkintr_msix:
9791  *
9792  *	Interrupt service routine for link status change for MSI-X.
9793  */
9794 static int
9795 wm_linkintr_msix(void *arg)
9796 {
9797 	struct wm_softc *sc = arg;
9798 	uint32_t reg;
9799 	bool has_rxo;
9800 
9801 	reg = CSR_READ(sc, WMREG_ICR);
9802 	WM_CORE_LOCK(sc);
9803 	DPRINTF(WM_DEBUG_LINK,
9804 	    ("%s: LINK: got link intr. ICR = %08x\n",
9805 		device_xname(sc->sc_dev), reg));
9806 
9807 	if (sc->sc_core_stopping)
9808 		goto out;
9809 
9810 	if ((reg & ICR_LSC) != 0) {
9811 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
9812 		wm_linkintr(sc, ICR_LSC);
9813 	}
9814 	if ((reg & ICR_GPI(0)) != 0)
9815 		device_printf(sc->sc_dev, "got module interrupt\n");
9816 
9817 	/*
9818 	 * XXX 82574 MSI-X mode workaround
9819 	 *
9820 	 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
9821 	 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
9822 	 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
9823 	 * interrupts by writing WMREG_ICS to process receive packets.
9824 	 */
9825 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
9826 #if defined(WM_DEBUG)
9827 		log(LOG_WARNING, "%s: Receive overrun\n",
9828 		    device_xname(sc->sc_dev));
9829 #endif /* defined(WM_DEBUG) */
9830 
9831 		has_rxo = true;
9832 		/*
9833 		 * The RXO interrupt is very high rate when receive traffic is
9834 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
9835 		 * interrupts. ICR_OTHER will be enabled at the end of
9836 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
9837 		 * ICR_RXQ(1) interrupts.
9838 		 */
9839 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
9840 
9841 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
9842 	}
9843 
9844 
9845 
9846 out:
9847 	WM_CORE_UNLOCK(sc);
9848 
9849 	if (sc->sc_type == WM_T_82574) {
9850 		if (!has_rxo)
9851 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
9852 		else
9853 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
9854 	} else if (sc->sc_type == WM_T_82575)
9855 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
9856 	else
9857 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
9858 
9859 	return 1;
9860 }
9861 
9862 /*
9863  * Media related.
9864  * GMII, SGMII, TBI (and SERDES)
9865  */
9866 
9867 /* Common */
9868 
9869 /*
9870  * wm_tbi_serdes_set_linkled:
9871  *
9872  *	Update the link LED on TBI and SERDES devices.
9873  */
9874 static void
9875 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
9876 {
9877 
9878 	if (sc->sc_tbi_linkup)
9879 		sc->sc_ctrl |= CTRL_SWDPIN(0);
9880 	else
9881 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
9882 
9883 	/* 82540 or newer devices are active low */
9884 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
9885 
9886 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9887 }
9888 
9889 /* GMII related */
9890 
9891 /*
9892  * wm_gmii_reset:
9893  *
9894  *	Reset the PHY.
9895  */
9896 static void
9897 wm_gmii_reset(struct wm_softc *sc)
9898 {
9899 	uint32_t reg;
9900 	int rv;
9901 
9902 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
9903 		device_xname(sc->sc_dev), __func__));
9904 
9905 	rv = sc->phy.acquire(sc);
9906 	if (rv != 0) {
9907 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9908 		    __func__);
9909 		return;
9910 	}
9911 
9912 	switch (sc->sc_type) {
9913 	case WM_T_82542_2_0:
9914 	case WM_T_82542_2_1:
9915 		/* null */
9916 		break;
9917 	case WM_T_82543:
9918 		/*
9919 		 * With 82543, we need to force speed and duplex on the MAC
9920 		 * equal to what the PHY speed and duplex configuration is.
9921 		 * In addition, we need to perform a hardware reset on the PHY
9922 		 * to take it out of reset.
9923 		 */
9924 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
9925 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9926 
9927 		/* The PHY reset pin is active-low. */
9928 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
9929 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
9930 		    CTRL_EXT_SWDPIN(4));
9931 		reg |= CTRL_EXT_SWDPIO(4);
9932 
9933 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
9934 		CSR_WRITE_FLUSH(sc);
9935 		delay(10*1000);
9936 
9937 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
9938 		CSR_WRITE_FLUSH(sc);
9939 		delay(150);
9940 #if 0
9941 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
9942 #endif
9943 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
9944 		break;
9945 	case WM_T_82544:	/* Reset 10000us */
9946 	case WM_T_82540:
9947 	case WM_T_82545:
9948 	case WM_T_82545_3:
9949 	case WM_T_82546:
9950 	case WM_T_82546_3:
9951 	case WM_T_82541:
9952 	case WM_T_82541_2:
9953 	case WM_T_82547:
9954 	case WM_T_82547_2:
9955 	case WM_T_82571:	/* Reset 100us */
9956 	case WM_T_82572:
9957 	case WM_T_82573:
9958 	case WM_T_82574:
9959 	case WM_T_82575:
9960 	case WM_T_82576:
9961 	case WM_T_82580:
9962 	case WM_T_I350:
9963 	case WM_T_I354:
9964 	case WM_T_I210:
9965 	case WM_T_I211:
9966 	case WM_T_82583:
9967 	case WM_T_80003:
9968 		/* Generic reset */
9969 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
9970 		CSR_WRITE_FLUSH(sc);
9971 		delay(20000);
9972 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9973 		CSR_WRITE_FLUSH(sc);
9974 		delay(20000);
9975 
9976 		if ((sc->sc_type == WM_T_82541)
9977 		    || (sc->sc_type == WM_T_82541_2)
9978 		    || (sc->sc_type == WM_T_82547)
9979 		    || (sc->sc_type == WM_T_82547_2)) {
9980 			/* Workaround for igp are done in igp_reset() */
9981 			/* XXX add code to set LED after phy reset */
9982 		}
9983 		break;
9984 	case WM_T_ICH8:
9985 	case WM_T_ICH9:
9986 	case WM_T_ICH10:
9987 	case WM_T_PCH:
9988 	case WM_T_PCH2:
9989 	case WM_T_PCH_LPT:
9990 	case WM_T_PCH_SPT:
9991 	case WM_T_PCH_CNP:
9992 		/* Generic reset */
9993 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
9994 		CSR_WRITE_FLUSH(sc);
9995 		delay(100);
9996 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9997 		CSR_WRITE_FLUSH(sc);
9998 		delay(150);
9999 		break;
10000 	default:
10001 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
10002 		    __func__);
10003 		break;
10004 	}
10005 
10006 	sc->phy.release(sc);
10007 
10008 	/* get_cfg_done */
10009 	wm_get_cfg_done(sc);
10010 
10011 	/* Extra setup */
10012 	switch (sc->sc_type) {
10013 	case WM_T_82542_2_0:
10014 	case WM_T_82542_2_1:
10015 	case WM_T_82543:
10016 	case WM_T_82544:
10017 	case WM_T_82540:
10018 	case WM_T_82545:
10019 	case WM_T_82545_3:
10020 	case WM_T_82546:
10021 	case WM_T_82546_3:
10022 	case WM_T_82541_2:
10023 	case WM_T_82547_2:
10024 	case WM_T_82571:
10025 	case WM_T_82572:
10026 	case WM_T_82573:
10027 	case WM_T_82574:
10028 	case WM_T_82583:
10029 	case WM_T_82575:
10030 	case WM_T_82576:
10031 	case WM_T_82580:
10032 	case WM_T_I350:
10033 	case WM_T_I354:
10034 	case WM_T_I210:
10035 	case WM_T_I211:
10036 	case WM_T_80003:
10037 		/* Null */
10038 		break;
10039 	case WM_T_82541:
10040 	case WM_T_82547:
10041 		/* XXX Configure actively LED after PHY reset */
10042 		break;
10043 	case WM_T_ICH8:
10044 	case WM_T_ICH9:
10045 	case WM_T_ICH10:
10046 	case WM_T_PCH:
10047 	case WM_T_PCH2:
10048 	case WM_T_PCH_LPT:
10049 	case WM_T_PCH_SPT:
10050 	case WM_T_PCH_CNP:
10051 		wm_phy_post_reset(sc);
10052 		break;
10053 	default:
10054 		panic("%s: unknown type\n", __func__);
10055 		break;
10056 	}
10057 }
10058 
10059 /*
10060  * Setup sc_phytype and mii_{read|write}reg.
10061  *
10062  *  To identify PHY type, correct read/write function should be selected.
10063  * To select correct read/write function, PCI ID or MAC type are required
10064  * without accessing PHY registers.
10065  *
10066  *  On the first call of this function, PHY ID is not known yet. Check
10067  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
10068  * result might be incorrect.
10069  *
10070  *  In the second call, PHY OUI and model is used to identify PHY type.
10071  * It might not be perfect because of the lack of compared entry, but it
10072  * would be better than the first call.
10073  *
10074  *  If the detected new result and previous assumption is different,
10075  * diagnous message will be printed.
10076  */
10077 static void
10078 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
10079     uint16_t phy_model)
10080 {
10081 	device_t dev = sc->sc_dev;
10082 	struct mii_data *mii = &sc->sc_mii;
10083 	uint16_t new_phytype = WMPHY_UNKNOWN;
10084 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
10085 	mii_readreg_t new_readreg;
10086 	mii_writereg_t new_writereg;
10087 	bool dodiag = true;
10088 
10089 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
10090 		device_xname(sc->sc_dev), __func__));
10091 
10092 	/*
10093 	 * 1000BASE-T SFP uses SGMII and the first asumed PHY type is always
10094 	 * incorrect. So don't print diag output when it's 2nd call.
10095 	 */
10096 	if ((sc->sc_sfptype != 0) && (phy_oui == 0) && (phy_model == 0))
10097 		dodiag = false;
10098 
10099 	if (mii->mii_readreg == NULL) {
10100 		/*
10101 		 *  This is the first call of this function. For ICH and PCH
10102 		 * variants, it's difficult to determine the PHY access method
10103 		 * by sc_type, so use the PCI product ID for some devices.
10104 		 */
10105 
10106 		switch (sc->sc_pcidevid) {
10107 		case PCI_PRODUCT_INTEL_PCH_M_LM:
10108 		case PCI_PRODUCT_INTEL_PCH_M_LC:
10109 			/* 82577 */
10110 			new_phytype = WMPHY_82577;
10111 			break;
10112 		case PCI_PRODUCT_INTEL_PCH_D_DM:
10113 		case PCI_PRODUCT_INTEL_PCH_D_DC:
10114 			/* 82578 */
10115 			new_phytype = WMPHY_82578;
10116 			break;
10117 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
10118 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
10119 			/* 82579 */
10120 			new_phytype = WMPHY_82579;
10121 			break;
10122 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
10123 		case PCI_PRODUCT_INTEL_82801I_BM:
10124 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
10125 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
10126 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
10127 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
10128 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
10129 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
10130 			/* ICH8, 9, 10 with 82567 */
10131 			new_phytype = WMPHY_BM;
10132 			break;
10133 		default:
10134 			break;
10135 		}
10136 	} else {
10137 		/* It's not the first call. Use PHY OUI and model */
10138 		switch (phy_oui) {
10139 		case MII_OUI_ATTANSIC: /* XXX ??? */
10140 			switch (phy_model) {
10141 			case 0x0004: /* XXX */
10142 				new_phytype = WMPHY_82578;
10143 				break;
10144 			default:
10145 				break;
10146 			}
10147 			break;
10148 		case MII_OUI_xxMARVELL:
10149 			switch (phy_model) {
10150 			case MII_MODEL_xxMARVELL_I210:
10151 				new_phytype = WMPHY_I210;
10152 				break;
10153 			case MII_MODEL_xxMARVELL_E1011:
10154 			case MII_MODEL_xxMARVELL_E1000_3:
10155 			case MII_MODEL_xxMARVELL_E1000_5:
10156 			case MII_MODEL_xxMARVELL_E1112:
10157 				new_phytype = WMPHY_M88;
10158 				break;
10159 			case MII_MODEL_xxMARVELL_E1149:
10160 				new_phytype = WMPHY_BM;
10161 				break;
10162 			case MII_MODEL_xxMARVELL_E1111:
10163 			case MII_MODEL_xxMARVELL_I347:
10164 			case MII_MODEL_xxMARVELL_E1512:
10165 			case MII_MODEL_xxMARVELL_E1340M:
10166 			case MII_MODEL_xxMARVELL_E1543:
10167 				new_phytype = WMPHY_M88;
10168 				break;
10169 			case MII_MODEL_xxMARVELL_I82563:
10170 				new_phytype = WMPHY_GG82563;
10171 				break;
10172 			default:
10173 				break;
10174 			}
10175 			break;
10176 		case MII_OUI_INTEL:
10177 			switch (phy_model) {
10178 			case MII_MODEL_INTEL_I82577:
10179 				new_phytype = WMPHY_82577;
10180 				break;
10181 			case MII_MODEL_INTEL_I82579:
10182 				new_phytype = WMPHY_82579;
10183 				break;
10184 			case MII_MODEL_INTEL_I217:
10185 				new_phytype = WMPHY_I217;
10186 				break;
10187 			case MII_MODEL_INTEL_I82580:
10188 			case MII_MODEL_INTEL_I350:
10189 				new_phytype = WMPHY_82580;
10190 				break;
10191 			default:
10192 				break;
10193 			}
10194 			break;
10195 		case MII_OUI_yyINTEL:
10196 			switch (phy_model) {
10197 			case MII_MODEL_yyINTEL_I82562G:
10198 			case MII_MODEL_yyINTEL_I82562EM:
10199 			case MII_MODEL_yyINTEL_I82562ET:
10200 				new_phytype = WMPHY_IFE;
10201 				break;
10202 			case MII_MODEL_yyINTEL_IGP01E1000:
10203 				new_phytype = WMPHY_IGP;
10204 				break;
10205 			case MII_MODEL_yyINTEL_I82566:
10206 				new_phytype = WMPHY_IGP_3;
10207 				break;
10208 			default:
10209 				break;
10210 			}
10211 			break;
10212 		default:
10213 			break;
10214 		}
10215 
10216 		if (dodiag) {
10217 			if (new_phytype == WMPHY_UNKNOWN)
10218 				aprint_verbose_dev(dev,
10219 				    "%s: Unknown PHY model. OUI=%06x, "
10220 				    "model=%04x\n", __func__, phy_oui,
10221 				    phy_model);
10222 
10223 			if ((sc->sc_phytype != WMPHY_UNKNOWN)
10224 			    && (sc->sc_phytype != new_phytype)) {
10225 				aprint_error_dev(dev, "Previously assumed PHY "
10226 				    "type(%u) was incorrect. PHY type from PHY"
10227 				    "ID = %u\n", sc->sc_phytype, new_phytype);
10228 			}
10229 		}
10230 	}
10231 
10232 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
10233 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
10234 		/* SGMII */
10235 		new_readreg = wm_sgmii_readreg;
10236 		new_writereg = wm_sgmii_writereg;
10237 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
10238 		/* BM2 (phyaddr == 1) */
10239 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
10240 		    && (new_phytype != WMPHY_BM)
10241 		    && (new_phytype != WMPHY_UNKNOWN))
10242 			doubt_phytype = new_phytype;
10243 		new_phytype = WMPHY_BM;
10244 		new_readreg = wm_gmii_bm_readreg;
10245 		new_writereg = wm_gmii_bm_writereg;
10246 	} else if (sc->sc_type >= WM_T_PCH) {
10247 		/* All PCH* use _hv_ */
10248 		new_readreg = wm_gmii_hv_readreg;
10249 		new_writereg = wm_gmii_hv_writereg;
10250 	} else if (sc->sc_type >= WM_T_ICH8) {
10251 		/* non-82567 ICH8, 9 and 10 */
10252 		new_readreg = wm_gmii_i82544_readreg;
10253 		new_writereg = wm_gmii_i82544_writereg;
10254 	} else if (sc->sc_type >= WM_T_80003) {
10255 		/* 80003 */
10256 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
10257 		    && (new_phytype != WMPHY_GG82563)
10258 		    && (new_phytype != WMPHY_UNKNOWN))
10259 			doubt_phytype = new_phytype;
10260 		new_phytype = WMPHY_GG82563;
10261 		new_readreg = wm_gmii_i80003_readreg;
10262 		new_writereg = wm_gmii_i80003_writereg;
10263 	} else if (sc->sc_type >= WM_T_I210) {
10264 		/* I210 and I211 */
10265 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
10266 		    && (new_phytype != WMPHY_I210)
10267 		    && (new_phytype != WMPHY_UNKNOWN))
10268 			doubt_phytype = new_phytype;
10269 		new_phytype = WMPHY_I210;
10270 		new_readreg = wm_gmii_gs40g_readreg;
10271 		new_writereg = wm_gmii_gs40g_writereg;
10272 	} else if (sc->sc_type >= WM_T_82580) {
10273 		/* 82580, I350 and I354 */
10274 		new_readreg = wm_gmii_82580_readreg;
10275 		new_writereg = wm_gmii_82580_writereg;
10276 	} else if (sc->sc_type >= WM_T_82544) {
10277 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
10278 		new_readreg = wm_gmii_i82544_readreg;
10279 		new_writereg = wm_gmii_i82544_writereg;
10280 	} else {
10281 		new_readreg = wm_gmii_i82543_readreg;
10282 		new_writereg = wm_gmii_i82543_writereg;
10283 	}
10284 
10285 	if (new_phytype == WMPHY_BM) {
10286 		/* All BM use _bm_ */
10287 		new_readreg = wm_gmii_bm_readreg;
10288 		new_writereg = wm_gmii_bm_writereg;
10289 	}
10290 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
10291 		/* All PCH* use _hv_ */
10292 		new_readreg = wm_gmii_hv_readreg;
10293 		new_writereg = wm_gmii_hv_writereg;
10294 	}
10295 
10296 	/* Diag output */
10297 	if (dodiag) {
10298 		if (doubt_phytype != WMPHY_UNKNOWN)
10299 			aprint_error_dev(dev, "Assumed new PHY type was "
10300 			    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
10301 			    new_phytype);
10302 		else if ((sc->sc_phytype != WMPHY_UNKNOWN)
10303 		    && (sc->sc_phytype != new_phytype))
10304 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
10305 			    "was incorrect. New PHY type = %u\n",
10306 			    sc->sc_phytype, new_phytype);
10307 
10308 		if ((mii->mii_readreg != NULL) &&
10309 		    (new_phytype == WMPHY_UNKNOWN))
10310 			aprint_error_dev(dev, "PHY type is still unknown.\n");
10311 
10312 		if ((mii->mii_readreg != NULL) &&
10313 		    (mii->mii_readreg != new_readreg))
10314 			aprint_error_dev(dev, "Previously assumed PHY "
10315 			    "read/write function was incorrect.\n");
10316 	}
10317 
10318 	/* Update now */
10319 	sc->sc_phytype = new_phytype;
10320 	mii->mii_readreg = new_readreg;
10321 	mii->mii_writereg = new_writereg;
10322 	if (new_readreg == wm_gmii_hv_readreg) {
10323 		sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
10324 		sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
10325 	} else if (new_readreg == wm_sgmii_readreg) {
10326 		sc->phy.readreg_locked = wm_sgmii_readreg_locked;
10327 		sc->phy.writereg_locked = wm_sgmii_writereg_locked;
10328 	} else if (new_readreg == wm_gmii_i82544_readreg) {
10329 		sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
10330 		sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
10331 	}
10332 }
10333 
10334 /*
10335  * wm_get_phy_id_82575:
10336  *
10337  * Return PHY ID. Return -1 if it failed.
10338  */
10339 static int
10340 wm_get_phy_id_82575(struct wm_softc *sc)
10341 {
10342 	uint32_t reg;
10343 	int phyid = -1;
10344 
10345 	/* XXX */
10346 	if ((sc->sc_flags & WM_F_SGMII) == 0)
10347 		return -1;
10348 
10349 	if (wm_sgmii_uses_mdio(sc)) {
10350 		switch (sc->sc_type) {
10351 		case WM_T_82575:
10352 		case WM_T_82576:
10353 			reg = CSR_READ(sc, WMREG_MDIC);
10354 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
10355 			break;
10356 		case WM_T_82580:
10357 		case WM_T_I350:
10358 		case WM_T_I354:
10359 		case WM_T_I210:
10360 		case WM_T_I211:
10361 			reg = CSR_READ(sc, WMREG_MDICNFG);
10362 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
10363 			break;
10364 		default:
10365 			return -1;
10366 		}
10367 	}
10368 
10369 	return phyid;
10370 }
10371 
10372 
10373 /*
10374  * wm_gmii_mediainit:
10375  *
10376  *	Initialize media for use on 1000BASE-T devices.
10377  */
10378 static void
10379 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
10380 {
10381 	device_t dev = sc->sc_dev;
10382 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
10383 	struct mii_data *mii = &sc->sc_mii;
10384 
10385 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
10386 		device_xname(sc->sc_dev), __func__));
10387 
10388 	/* We have GMII. */
10389 	sc->sc_flags |= WM_F_HAS_MII;
10390 
10391 	if (sc->sc_type == WM_T_80003)
10392 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
10393 	else
10394 		sc->sc_tipg = TIPG_1000T_DFLT;
10395 
10396 	/*
10397 	 * Let the chip set speed/duplex on its own based on
10398 	 * signals from the PHY.
10399 	 * XXXbouyer - I'm not sure this is right for the 80003,
10400 	 * the em driver only sets CTRL_SLU here - but it seems to work.
10401 	 */
10402 	sc->sc_ctrl |= CTRL_SLU;
10403 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10404 
10405 	/* Initialize our media structures and probe the GMII. */
10406 	mii->mii_ifp = ifp;
10407 
10408 	mii->mii_statchg = wm_gmii_statchg;
10409 
10410 	/* get PHY control from SMBus to PCIe */
10411 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
10412 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
10413 	    || (sc->sc_type == WM_T_PCH_CNP))
10414 		wm_init_phy_workarounds_pchlan(sc);
10415 
10416 	wm_gmii_reset(sc);
10417 
10418 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
10419 	ifmedia_init_with_lock(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
10420 	    wm_gmii_mediastatus, sc->sc_core_lock);
10421 
10422 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
10423 	    || (sc->sc_type == WM_T_82580)
10424 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
10425 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
10426 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
10427 			/* Attach only one port */
10428 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
10429 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
10430 		} else {
10431 			int i, id;
10432 			uint32_t ctrl_ext;
10433 
10434 			id = wm_get_phy_id_82575(sc);
10435 			if (id != -1) {
10436 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
10437 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
10438 			}
10439 			if ((id == -1)
10440 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
10441 				/* Power on sgmii phy if it is disabled */
10442 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
10443 				CSR_WRITE(sc, WMREG_CTRL_EXT,
10444 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
10445 				CSR_WRITE_FLUSH(sc);
10446 				delay(300*1000); /* XXX too long */
10447 
10448 				/*
10449 				 * From 1 to 8.
10450 				 *
10451 				 * I2C access fails with I2C register's ERROR
10452 				 * bit set, so prevent error message while
10453 				 * scanning.
10454 				 */
10455 				sc->phy.no_errprint = true;
10456 				for (i = 1; i < 8; i++)
10457 					mii_attach(sc->sc_dev, &sc->sc_mii,
10458 					    0xffffffff, i, MII_OFFSET_ANY,
10459 					    MIIF_DOPAUSE);
10460 				sc->phy.no_errprint = false;
10461 
10462 				/* Restore previous sfp cage power state */
10463 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
10464 			}
10465 		}
10466 	} else
10467 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
10468 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
10469 
10470 	/*
10471 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
10472 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
10473 	 */
10474 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
10475 		|| (sc->sc_type == WM_T_PCH_SPT)
10476 		|| (sc->sc_type == WM_T_PCH_CNP))
10477 	    && (LIST_FIRST(&mii->mii_phys) == NULL)) {
10478 		wm_set_mdio_slow_mode_hv(sc);
10479 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
10480 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
10481 	}
10482 
10483 	/*
10484 	 * (For ICH8 variants)
10485 	 * If PHY detection failed, use BM's r/w function and retry.
10486 	 */
10487 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
10488 		/* if failed, retry with *_bm_* */
10489 		aprint_verbose_dev(dev, "Assumed PHY access function "
10490 		    "(type = %d) might be incorrect. Use BM and retry.\n",
10491 		    sc->sc_phytype);
10492 		sc->sc_phytype = WMPHY_BM;
10493 		mii->mii_readreg = wm_gmii_bm_readreg;
10494 		mii->mii_writereg = wm_gmii_bm_writereg;
10495 
10496 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
10497 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
10498 	}
10499 
10500 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
10501 		/* Any PHY wasn't find */
10502 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
10503 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
10504 		sc->sc_phytype = WMPHY_NONE;
10505 	} else {
10506 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
10507 
10508 		/*
10509 		 * PHY Found! Check PHY type again by the second call of
10510 		 * wm_gmii_setup_phytype.
10511 		 */
10512 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
10513 		    child->mii_mpd_model);
10514 
10515 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
10516 	}
10517 }
10518 
10519 /*
10520  * wm_gmii_mediachange:	[ifmedia interface function]
10521  *
10522  *	Set hardware to newly-selected media on a 1000BASE-T device.
10523  */
10524 static int
10525 wm_gmii_mediachange(struct ifnet *ifp)
10526 {
10527 	struct wm_softc *sc = ifp->if_softc;
10528 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
10529 	uint32_t reg;
10530 	int rc;
10531 
10532 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
10533 		device_xname(sc->sc_dev), __func__));
10534 	if ((ifp->if_flags & IFF_UP) == 0)
10535 		return 0;
10536 
10537 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
10538 	if ((sc->sc_type == WM_T_82580)
10539 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
10540 	    || (sc->sc_type == WM_T_I211)) {
10541 		reg = CSR_READ(sc, WMREG_PHPM);
10542 		reg &= ~PHPM_GO_LINK_D;
10543 		CSR_WRITE(sc, WMREG_PHPM, reg);
10544 	}
10545 
10546 	/* Disable D0 LPLU. */
10547 	wm_lplu_d0_disable(sc);
10548 
10549 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
10550 	sc->sc_ctrl |= CTRL_SLU;
10551 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
10552 	    || (sc->sc_type > WM_T_82543)) {
10553 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
10554 	} else {
10555 		sc->sc_ctrl &= ~CTRL_ASDE;
10556 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
10557 		if (ife->ifm_media & IFM_FDX)
10558 			sc->sc_ctrl |= CTRL_FD;
10559 		switch (IFM_SUBTYPE(ife->ifm_media)) {
10560 		case IFM_10_T:
10561 			sc->sc_ctrl |= CTRL_SPEED_10;
10562 			break;
10563 		case IFM_100_TX:
10564 			sc->sc_ctrl |= CTRL_SPEED_100;
10565 			break;
10566 		case IFM_1000_T:
10567 			sc->sc_ctrl |= CTRL_SPEED_1000;
10568 			break;
10569 		case IFM_NONE:
10570 			/* There is no specific setting for IFM_NONE */
10571 			break;
10572 		default:
10573 			panic("wm_gmii_mediachange: bad media 0x%x",
10574 			    ife->ifm_media);
10575 		}
10576 	}
10577 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10578 	CSR_WRITE_FLUSH(sc);
10579 
10580 	if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
10581 		wm_serdes_mediachange(ifp);
10582 
10583 	if (sc->sc_type <= WM_T_82543)
10584 		wm_gmii_reset(sc);
10585 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
10586 	    && ((sc->sc_flags & WM_F_SGMII) != 0)) {
10587 		/* allow time for SFP cage time to power up phy */
10588 		delay(300 * 1000);
10589 		wm_gmii_reset(sc);
10590 	}
10591 
10592 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
10593 		return 0;
10594 	return rc;
10595 }
10596 
10597 /*
10598  * wm_gmii_mediastatus:	[ifmedia interface function]
10599  *
10600  *	Get the current interface media status on a 1000BASE-T device.
10601  */
10602 static void
10603 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
10604 {
10605 	struct wm_softc *sc = ifp->if_softc;
10606 
10607 	ether_mediastatus(ifp, ifmr);
10608 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
10609 	    | sc->sc_flowflags;
10610 }
10611 
10612 #define	MDI_IO		CTRL_SWDPIN(2)
10613 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
10614 #define	MDI_CLK		CTRL_SWDPIN(3)
10615 
10616 static void
10617 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
10618 {
10619 	uint32_t i, v;
10620 
10621 	v = CSR_READ(sc, WMREG_CTRL);
10622 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
10623 	v |= MDI_DIR | CTRL_SWDPIO(3);
10624 
10625 	for (i = __BIT(nbits - 1); i != 0; i >>= 1) {
10626 		if (data & i)
10627 			v |= MDI_IO;
10628 		else
10629 			v &= ~MDI_IO;
10630 		CSR_WRITE(sc, WMREG_CTRL, v);
10631 		CSR_WRITE_FLUSH(sc);
10632 		delay(10);
10633 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
10634 		CSR_WRITE_FLUSH(sc);
10635 		delay(10);
10636 		CSR_WRITE(sc, WMREG_CTRL, v);
10637 		CSR_WRITE_FLUSH(sc);
10638 		delay(10);
10639 	}
10640 }
10641 
10642 static uint16_t
10643 wm_i82543_mii_recvbits(struct wm_softc *sc)
10644 {
10645 	uint32_t v, i;
10646 	uint16_t data = 0;
10647 
10648 	v = CSR_READ(sc, WMREG_CTRL);
10649 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
10650 	v |= CTRL_SWDPIO(3);
10651 
10652 	CSR_WRITE(sc, WMREG_CTRL, v);
10653 	CSR_WRITE_FLUSH(sc);
10654 	delay(10);
10655 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
10656 	CSR_WRITE_FLUSH(sc);
10657 	delay(10);
10658 	CSR_WRITE(sc, WMREG_CTRL, v);
10659 	CSR_WRITE_FLUSH(sc);
10660 	delay(10);
10661 
10662 	for (i = 0; i < 16; i++) {
10663 		data <<= 1;
10664 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
10665 		CSR_WRITE_FLUSH(sc);
10666 		delay(10);
10667 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
10668 			data |= 1;
10669 		CSR_WRITE(sc, WMREG_CTRL, v);
10670 		CSR_WRITE_FLUSH(sc);
10671 		delay(10);
10672 	}
10673 
10674 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
10675 	CSR_WRITE_FLUSH(sc);
10676 	delay(10);
10677 	CSR_WRITE(sc, WMREG_CTRL, v);
10678 	CSR_WRITE_FLUSH(sc);
10679 	delay(10);
10680 
10681 	return data;
10682 }
10683 
10684 #undef MDI_IO
10685 #undef MDI_DIR
10686 #undef MDI_CLK
10687 
10688 /*
10689  * wm_gmii_i82543_readreg:	[mii interface function]
10690  *
10691  *	Read a PHY register on the GMII (i82543 version).
10692  */
10693 static int
10694 wm_gmii_i82543_readreg(device_t dev, int phy, int reg, uint16_t *val)
10695 {
10696 	struct wm_softc *sc = device_private(dev);
10697 
10698 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
10699 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
10700 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
10701 	*val = wm_i82543_mii_recvbits(sc) & 0xffff;
10702 
10703 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04hx\n",
10704 		device_xname(dev), phy, reg, *val));
10705 
10706 	return 0;
10707 }
10708 
10709 /*
10710  * wm_gmii_i82543_writereg:	[mii interface function]
10711  *
10712  *	Write a PHY register on the GMII (i82543 version).
10713  */
10714 static int
10715 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, uint16_t val)
10716 {
10717 	struct wm_softc *sc = device_private(dev);
10718 
10719 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
10720 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
10721 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
10722 	    (MII_COMMAND_START << 30), 32);
10723 
10724 	return 0;
10725 }
10726 
10727 /*
10728  * wm_gmii_mdic_readreg:	[mii interface function]
10729  *
10730  *	Read a PHY register on the GMII.
10731  */
10732 static int
10733 wm_gmii_mdic_readreg(device_t dev, int phy, int reg, uint16_t *val)
10734 {
10735 	struct wm_softc *sc = device_private(dev);
10736 	uint32_t mdic = 0;
10737 	int i;
10738 
10739 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
10740 	    && (reg > MII_ADDRMASK)) {
10741 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
10742 		    __func__, sc->sc_phytype, reg);
10743 		reg &= MII_ADDRMASK;
10744 	}
10745 
10746 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
10747 	    MDIC_REGADD(reg));
10748 
10749 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
10750 		delay(50);
10751 		mdic = CSR_READ(sc, WMREG_MDIC);
10752 		if (mdic & MDIC_READY)
10753 			break;
10754 	}
10755 
10756 	if ((mdic & MDIC_READY) == 0) {
10757 		DPRINTF(WM_DEBUG_GMII,
10758 		    ("%s: MDIC read timed out: phy %d reg %d\n",
10759 			device_xname(dev), phy, reg));
10760 		return ETIMEDOUT;
10761 	} else if (mdic & MDIC_E) {
10762 		/* This is normal if no PHY is present. */
10763 		DPRINTF(WM_DEBUG_GMII, ("%s: MDIC read error: phy %d reg %d\n",
10764 			device_xname(sc->sc_dev), phy, reg));
10765 		return -1;
10766 	} else
10767 		*val = MDIC_DATA(mdic);
10768 
10769 	/*
10770 	 * Allow some time after each MDIC transaction to avoid
10771 	 * reading duplicate data in the next MDIC transaction.
10772 	 */
10773 	if (sc->sc_type == WM_T_PCH2)
10774 		delay(100);
10775 
10776 	return 0;
10777 }
10778 
10779 /*
10780  * wm_gmii_mdic_writereg:	[mii interface function]
10781  *
10782  *	Write a PHY register on the GMII.
10783  */
10784 static int
10785 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, uint16_t val)
10786 {
10787 	struct wm_softc *sc = device_private(dev);
10788 	uint32_t mdic = 0;
10789 	int i;
10790 
10791 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
10792 	    && (reg > MII_ADDRMASK)) {
10793 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
10794 		    __func__, sc->sc_phytype, reg);
10795 		reg &= MII_ADDRMASK;
10796 	}
10797 
10798 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
10799 	    MDIC_REGADD(reg) | MDIC_DATA(val));
10800 
10801 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
10802 		delay(50);
10803 		mdic = CSR_READ(sc, WMREG_MDIC);
10804 		if (mdic & MDIC_READY)
10805 			break;
10806 	}
10807 
10808 	if ((mdic & MDIC_READY) == 0) {
10809 		DPRINTF(WM_DEBUG_GMII,
10810 		    ("%s: MDIC write timed out: phy %d reg %d\n",
10811 			device_xname(dev), phy, reg));
10812 		return ETIMEDOUT;
10813 	} else if (mdic & MDIC_E) {
10814 		DPRINTF(WM_DEBUG_GMII,
10815 		    ("%s: MDIC write error: phy %d reg %d\n",
10816 			device_xname(dev), phy, reg));
10817 		return -1;
10818 	}
10819 
10820 	/*
10821 	 * Allow some time after each MDIC transaction to avoid
10822 	 * reading duplicate data in the next MDIC transaction.
10823 	 */
10824 	if (sc->sc_type == WM_T_PCH2)
10825 		delay(100);
10826 
10827 	return 0;
10828 }
10829 
10830 /*
10831  * wm_gmii_i82544_readreg:	[mii interface function]
10832  *
10833  *	Read a PHY register on the GMII.
10834  */
10835 static int
10836 wm_gmii_i82544_readreg(device_t dev, int phy, int reg, uint16_t *val)
10837 {
10838 	struct wm_softc *sc = device_private(dev);
10839 	int rv;
10840 
10841 	if (sc->phy.acquire(sc)) {
10842 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
10843 		return -1;
10844 	}
10845 
10846 	rv = wm_gmii_i82544_readreg_locked(dev, phy, reg, val);
10847 
10848 	sc->phy.release(sc);
10849 
10850 	return rv;
10851 }
10852 
10853 static int
10854 wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
10855 {
10856 	struct wm_softc *sc = device_private(dev);
10857 	int rv;
10858 
10859 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
10860 		switch (sc->sc_phytype) {
10861 		case WMPHY_IGP:
10862 		case WMPHY_IGP_2:
10863 		case WMPHY_IGP_3:
10864 			rv = wm_gmii_mdic_writereg(dev, phy,
10865 			    MII_IGPHY_PAGE_SELECT, reg);
10866 			if (rv != 0)
10867 				return rv;
10868 			break;
10869 		default:
10870 #ifdef WM_DEBUG
10871 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
10872 			    __func__, sc->sc_phytype, reg);
10873 #endif
10874 			break;
10875 		}
10876 	}
10877 
10878 	return wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
10879 }
10880 
10881 /*
10882  * wm_gmii_i82544_writereg:	[mii interface function]
10883  *
10884  *	Write a PHY register on the GMII.
10885  */
10886 static int
10887 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, uint16_t val)
10888 {
10889 	struct wm_softc *sc = device_private(dev);
10890 	int rv;
10891 
10892 	if (sc->phy.acquire(sc)) {
10893 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
10894 		return -1;
10895 	}
10896 
10897 	rv = wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val);
10898 	sc->phy.release(sc);
10899 
10900 	return rv;
10901 }
10902 
10903 static int
10904 wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
10905 {
10906 	struct wm_softc *sc = device_private(dev);
10907 	int rv;
10908 
10909 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
10910 		switch (sc->sc_phytype) {
10911 		case WMPHY_IGP:
10912 		case WMPHY_IGP_2:
10913 		case WMPHY_IGP_3:
10914 			rv = wm_gmii_mdic_writereg(dev, phy,
10915 			    MII_IGPHY_PAGE_SELECT, reg);
10916 			if (rv != 0)
10917 				return rv;
10918 			break;
10919 		default:
10920 #ifdef WM_DEBUG
10921 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
10922 			    __func__, sc->sc_phytype, reg);
10923 #endif
10924 			break;
10925 		}
10926 	}
10927 
10928 	return wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
10929 }
10930 
10931 /*
10932  * wm_gmii_i80003_readreg:	[mii interface function]
10933  *
10934  *	Read a PHY register on the kumeran
10935  * This could be handled by the PHY layer if we didn't have to lock the
10936  * ressource ...
10937  */
10938 static int
10939 wm_gmii_i80003_readreg(device_t dev, int phy, int reg, uint16_t *val)
10940 {
10941 	struct wm_softc *sc = device_private(dev);
10942 	int page_select;
10943 	uint16_t temp, temp2;
10944 	int rv = 0;
10945 
10946 	if (phy != 1) /* Only one PHY on kumeran bus */
10947 		return -1;
10948 
10949 	if (sc->phy.acquire(sc)) {
10950 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
10951 		return -1;
10952 	}
10953 
10954 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
10955 		page_select = GG82563_PHY_PAGE_SELECT;
10956 	else {
10957 		/*
10958 		 * Use Alternative Page Select register to access registers
10959 		 * 30 and 31.
10960 		 */
10961 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
10962 	}
10963 	temp = reg >> GG82563_PAGE_SHIFT;
10964 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
10965 		goto out;
10966 
10967 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
10968 		/*
10969 		 * Wait more 200us for a bug of the ready bit in the MDIC
10970 		 * register.
10971 		 */
10972 		delay(200);
10973 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
10974 		if ((rv != 0) || (temp2 != temp)) {
10975 			device_printf(dev, "%s failed\n", __func__);
10976 			rv = -1;
10977 			goto out;
10978 		}
10979 		delay(200);
10980 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
10981 		delay(200);
10982 	} else
10983 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
10984 
10985 out:
10986 	sc->phy.release(sc);
10987 	return rv;
10988 }
10989 
10990 /*
10991  * wm_gmii_i80003_writereg:	[mii interface function]
10992  *
10993  *	Write a PHY register on the kumeran.
10994  * This could be handled by the PHY layer if we didn't have to lock the
10995  * ressource ...
10996  */
10997 static int
10998 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, uint16_t val)
10999 {
11000 	struct wm_softc *sc = device_private(dev);
11001 	int page_select, rv;
11002 	uint16_t temp, temp2;
11003 
11004 	if (phy != 1) /* Only one PHY on kumeran bus */
11005 		return -1;
11006 
11007 	if (sc->phy.acquire(sc)) {
11008 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
11009 		return -1;
11010 	}
11011 
11012 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
11013 		page_select = GG82563_PHY_PAGE_SELECT;
11014 	else {
11015 		/*
11016 		 * Use Alternative Page Select register to access registers
11017 		 * 30 and 31.
11018 		 */
11019 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
11020 	}
11021 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
11022 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
11023 		goto out;
11024 
11025 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
11026 		/*
11027 		 * Wait more 200us for a bug of the ready bit in the MDIC
11028 		 * register.
11029 		 */
11030 		delay(200);
11031 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
11032 		if ((rv != 0) || (temp2 != temp)) {
11033 			device_printf(dev, "%s failed\n", __func__);
11034 			rv = -1;
11035 			goto out;
11036 		}
11037 		delay(200);
11038 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
11039 		delay(200);
11040 	} else
11041 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
11042 
11043 out:
11044 	sc->phy.release(sc);
11045 	return rv;
11046 }
11047 
11048 /*
11049  * wm_gmii_bm_readreg:	[mii interface function]
11050  *
11051  *	Read a PHY register on the kumeran
11052  * This could be handled by the PHY layer if we didn't have to lock the
11053  * ressource ...
11054  */
11055 static int
11056 wm_gmii_bm_readreg(device_t dev, int phy, int reg, uint16_t *val)
11057 {
11058 	struct wm_softc *sc = device_private(dev);
11059 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
11060 	int rv;
11061 
11062 	if (sc->phy.acquire(sc)) {
11063 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
11064 		return -1;
11065 	}
11066 
11067 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
11068 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
11069 		    || (reg == 31)) ? 1 : phy;
11070 	/* Page 800 works differently than the rest so it has its own func */
11071 	if (page == BM_WUC_PAGE) {
11072 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
11073 		goto release;
11074 	}
11075 
11076 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
11077 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
11078 		    && (sc->sc_type != WM_T_82583))
11079 			rv = wm_gmii_mdic_writereg(dev, phy,
11080 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
11081 		else
11082 			rv = wm_gmii_mdic_writereg(dev, phy,
11083 			    BME1000_PHY_PAGE_SELECT, page);
11084 		if (rv != 0)
11085 			goto release;
11086 	}
11087 
11088 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
11089 
11090 release:
11091 	sc->phy.release(sc);
11092 	return rv;
11093 }
11094 
11095 /*
11096  * wm_gmii_bm_writereg:	[mii interface function]
11097  *
11098  *	Write a PHY register on the kumeran.
11099  * This could be handled by the PHY layer if we didn't have to lock the
11100  * ressource ...
11101  */
11102 static int
11103 wm_gmii_bm_writereg(device_t dev, int phy, int reg, uint16_t val)
11104 {
11105 	struct wm_softc *sc = device_private(dev);
11106 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
11107 	int rv;
11108 
11109 	if (sc->phy.acquire(sc)) {
11110 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
11111 		return -1;
11112 	}
11113 
11114 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
11115 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
11116 		    || (reg == 31)) ? 1 : phy;
11117 	/* Page 800 works differently than the rest so it has its own func */
11118 	if (page == BM_WUC_PAGE) {
11119 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, &val, false, false);
11120 		goto release;
11121 	}
11122 
11123 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
11124 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
11125 		    && (sc->sc_type != WM_T_82583))
11126 			rv = wm_gmii_mdic_writereg(dev, phy,
11127 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
11128 		else
11129 			rv = wm_gmii_mdic_writereg(dev, phy,
11130 			    BME1000_PHY_PAGE_SELECT, page);
11131 		if (rv != 0)
11132 			goto release;
11133 	}
11134 
11135 	rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
11136 
11137 release:
11138 	sc->phy.release(sc);
11139 	return rv;
11140 }
11141 
11142 /*
11143  *  wm_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
11144  *  @dev: pointer to the HW structure
11145  *  @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
11146  *
11147  *  Assumes semaphore already acquired and phy_reg points to a valid memory
11148  *  address to store contents of the BM_WUC_ENABLE_REG register.
11149  */
11150 static int
11151 wm_enable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
11152 {
11153 	uint16_t temp;
11154 	int rv;
11155 
11156 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
11157 		device_xname(dev), __func__));
11158 
11159 	if (!phy_regp)
11160 		return -1;
11161 
11162 	/* All page select, port ctrl and wakeup registers use phy address 1 */
11163 
11164 	/* Select Port Control Registers page */
11165 	rv = wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
11166 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
11167 	if (rv != 0)
11168 		return rv;
11169 
11170 	/* Read WUCE and save it */
11171 	rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG, phy_regp);
11172 	if (rv != 0)
11173 		return rv;
11174 
11175 	/* Enable both PHY wakeup mode and Wakeup register page writes.
11176 	 * Prevent a power state change by disabling ME and Host PHY wakeup.
11177 	 */
11178 	temp = *phy_regp;
11179 	temp |= BM_WUC_ENABLE_BIT;
11180 	temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
11181 
11182 	if ((rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, temp)) != 0)
11183 		return rv;
11184 
11185 	/* Select Host Wakeup Registers page - caller now able to write
11186 	 * registers on the Wakeup registers page
11187 	 */
11188 	return wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
11189 	    BM_WUC_PAGE << IGP3_PAGE_SHIFT);
11190 }
11191 
11192 /*
11193  *  wm_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
11194  *  @dev: pointer to the HW structure
11195  *  @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
11196  *
11197  *  Restore BM_WUC_ENABLE_REG to its original value.
11198  *
11199  *  Assumes semaphore already acquired and *phy_reg is the contents of the
11200  *  BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
11201  *  caller.
11202  */
11203 static int
11204 wm_disable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
11205 {
11206 
11207 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
11208 		device_xname(dev), __func__));
11209 
11210 	if (!phy_regp)
11211 		return -1;
11212 
11213 	/* Select Port Control Registers page */
11214 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
11215 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
11216 
11217 	/* Restore 769.17 to its original value */
11218 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, *phy_regp);
11219 
11220 	return 0;
11221 }
11222 
11223 /*
11224  *  wm_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
11225  *  @sc: pointer to the HW structure
11226  *  @offset: register offset to be read or written
11227  *  @val: pointer to the data to read or write
11228  *  @rd: determines if operation is read or write
11229  *  @page_set: BM_WUC_PAGE already set and access enabled
11230  *
11231  *  Read the PHY register at offset and store the retrieved information in
11232  *  data, or write data to PHY register at offset.  Note the procedure to
11233  *  access the PHY wakeup registers is different than reading the other PHY
11234  *  registers. It works as such:
11235  *  1) Set 769.17.2 (page 769, register 17, bit 2) = 1
11236  *  2) Set page to 800 for host (801 if we were manageability)
11237  *  3) Write the address using the address opcode (0x11)
11238  *  4) Read or write the data using the data opcode (0x12)
11239  *  5) Restore 769.17.2 to its original value
11240  *
11241  *  Steps 1 and 2 are done by wm_enable_phy_wakeup_reg_access_bm() and
11242  *  step 5 is done by wm_disable_phy_wakeup_reg_access_bm().
11243  *
11244  *  Assumes semaphore is already acquired.  When page_set==TRUE, assumes
11245  *  the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
11246  *  is responsible for calls to wm_[enable|disable]_phy_wakeup_reg_bm()).
11247  */
11248 static int
11249 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd,
11250 	bool page_set)
11251 {
11252 	struct wm_softc *sc = device_private(dev);
11253 	uint16_t regnum = BM_PHY_REG_NUM(offset);
11254 	uint16_t page = BM_PHY_REG_PAGE(offset);
11255 	uint16_t wuce;
11256 	int rv = 0;
11257 
11258 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
11259 		device_xname(dev), __func__));
11260 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
11261 	if ((sc->sc_type == WM_T_PCH)
11262 	    && ((CSR_READ(sc, WMREG_PHY_CTRL) & PHY_CTRL_GBE_DIS) == 0)) {
11263 		device_printf(dev,
11264 		    "Attempting to access page %d while gig enabled.\n", page);
11265 	}
11266 
11267 	if (!page_set) {
11268 		/* Enable access to PHY wakeup registers */
11269 		rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
11270 		if (rv != 0) {
11271 			device_printf(dev,
11272 			    "%s: Could not enable PHY wakeup reg access\n",
11273 			    __func__);
11274 			return rv;
11275 		}
11276 	}
11277 	DPRINTF(WM_DEBUG_GMII, ("%s: %s: Accessing PHY page %d reg 0x%x\n",
11278 		device_xname(sc->sc_dev), __func__, page, regnum));
11279 
11280 	/*
11281 	 * 2) Access PHY wakeup register.
11282 	 * See wm_access_phy_wakeup_reg_bm.
11283 	 */
11284 
11285 	/* Write the Wakeup register page offset value using opcode 0x11 */
11286 	rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
11287 	if (rv != 0)
11288 		return rv;
11289 
11290 	if (rd) {
11291 		/* Read the Wakeup register page value using opcode 0x12 */
11292 		rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE, val);
11293 	} else {
11294 		/* Write the Wakeup register page value using opcode 0x12 */
11295 		rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
11296 	}
11297 	if (rv != 0)
11298 		return rv;
11299 
11300 	if (!page_set)
11301 		rv = wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
11302 
11303 	return rv;
11304 }
11305 
11306 /*
11307  * wm_gmii_hv_readreg:	[mii interface function]
11308  *
11309  *	Read a PHY register on the kumeran
11310  * This could be handled by the PHY layer if we didn't have to lock the
11311  * ressource ...
11312  */
11313 static int
11314 wm_gmii_hv_readreg(device_t dev, int phy, int reg, uint16_t *val)
11315 {
11316 	struct wm_softc *sc = device_private(dev);
11317 	int rv;
11318 
11319 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
11320 		device_xname(dev), __func__));
11321 	if (sc->phy.acquire(sc)) {
11322 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
11323 		return -1;
11324 	}
11325 
11326 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg, val);
11327 	sc->phy.release(sc);
11328 	return rv;
11329 }
11330 
11331 static int
11332 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
11333 {
11334 	uint16_t page = BM_PHY_REG_PAGE(reg);
11335 	uint16_t regnum = BM_PHY_REG_NUM(reg);
11336 	int rv;
11337 
11338 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
11339 
11340 	/* Page 800 works differently than the rest so it has its own func */
11341 	if (page == BM_WUC_PAGE)
11342 		return wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
11343 
11344 	/*
11345 	 * Lower than page 768 works differently than the rest so it has its
11346 	 * own func
11347 	 */
11348 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
11349 		device_printf(dev, "gmii_hv_readreg!!!\n");
11350 		return -1;
11351 	}
11352 
11353 	/*
11354 	 * XXX I21[789] documents say that the SMBus Address register is at
11355 	 * PHY address 01, Page 0 (not 768), Register 26.
11356 	 */
11357 	if (page == HV_INTC_FC_PAGE_START)
11358 		page = 0;
11359 
11360 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
11361 		rv = wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
11362 		    page << BME1000_PAGE_SHIFT);
11363 		if (rv != 0)
11364 			return rv;
11365 	}
11366 
11367 	return wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK, val);
11368 }
11369 
11370 /*
11371  * wm_gmii_hv_writereg:	[mii interface function]
11372  *
11373  *	Write a PHY register on the kumeran.
11374  * This could be handled by the PHY layer if we didn't have to lock the
11375  * ressource ...
11376  */
11377 static int
11378 wm_gmii_hv_writereg(device_t dev, int phy, int reg, uint16_t val)
11379 {
11380 	struct wm_softc *sc = device_private(dev);
11381 	int rv;
11382 
11383 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
11384 		device_xname(dev), __func__));
11385 
11386 	if (sc->phy.acquire(sc)) {
11387 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
11388 		return -1;
11389 	}
11390 
11391 	rv = wm_gmii_hv_writereg_locked(dev, phy, reg, val);
11392 	sc->phy.release(sc);
11393 
11394 	return rv;
11395 }
11396 
11397 static int
11398 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
11399 {
11400 	struct wm_softc *sc = device_private(dev);
11401 	uint16_t page = BM_PHY_REG_PAGE(reg);
11402 	uint16_t regnum = BM_PHY_REG_NUM(reg);
11403 	int rv;
11404 
11405 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
11406 
11407 	/* Page 800 works differently than the rest so it has its own func */
11408 	if (page == BM_WUC_PAGE)
11409 		return wm_access_phy_wakeup_reg_bm(dev, reg, &val, false,
11410 		    false);
11411 
11412 	/*
11413 	 * Lower than page 768 works differently than the rest so it has its
11414 	 * own func
11415 	 */
11416 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
11417 		device_printf(dev, "gmii_hv_writereg!!!\n");
11418 		return -1;
11419 	}
11420 
11421 	{
11422 		/*
11423 		 * XXX I21[789] documents say that the SMBus Address register
11424 		 * is at PHY address 01, Page 0 (not 768), Register 26.
11425 		 */
11426 		if (page == HV_INTC_FC_PAGE_START)
11427 			page = 0;
11428 
11429 		/*
11430 		 * XXX Workaround MDIO accesses being disabled after entering
11431 		 * IEEE Power Down (whenever bit 11 of the PHY control
11432 		 * register is set)
11433 		 */
11434 		if (sc->sc_phytype == WMPHY_82578) {
11435 			struct mii_softc *child;
11436 
11437 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
11438 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
11439 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
11440 			    && ((val & (1 << 11)) != 0)) {
11441 				device_printf(dev, "XXX need workaround\n");
11442 			}
11443 		}
11444 
11445 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
11446 			rv = wm_gmii_mdic_writereg(dev, 1,
11447 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
11448 			if (rv != 0)
11449 				return rv;
11450 		}
11451 	}
11452 
11453 	return wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
11454 }
11455 
11456 /*
11457  * wm_gmii_82580_readreg:	[mii interface function]
11458  *
11459  *	Read a PHY register on the 82580 and I350.
11460  * This could be handled by the PHY layer if we didn't have to lock the
11461  * ressource ...
11462  */
11463 static int
11464 wm_gmii_82580_readreg(device_t dev, int phy, int reg, uint16_t *val)
11465 {
11466 	struct wm_softc *sc = device_private(dev);
11467 	int rv;
11468 
11469 	if (sc->phy.acquire(sc) != 0) {
11470 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
11471 		return -1;
11472 	}
11473 
11474 #ifdef DIAGNOSTIC
11475 	if (reg > MII_ADDRMASK) {
11476 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
11477 		    __func__, sc->sc_phytype, reg);
11478 		reg &= MII_ADDRMASK;
11479 	}
11480 #endif
11481 	rv = wm_gmii_mdic_readreg(dev, phy, reg, val);
11482 
11483 	sc->phy.release(sc);
11484 	return rv;
11485 }
11486 
11487 /*
11488  * wm_gmii_82580_writereg:	[mii interface function]
11489  *
11490  *	Write a PHY register on the 82580 and I350.
11491  * This could be handled by the PHY layer if we didn't have to lock the
11492  * ressource ...
11493  */
11494 static int
11495 wm_gmii_82580_writereg(device_t dev, int phy, int reg, uint16_t val)
11496 {
11497 	struct wm_softc *sc = device_private(dev);
11498 	int rv;
11499 
11500 	if (sc->phy.acquire(sc) != 0) {
11501 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
11502 		return -1;
11503 	}
11504 
11505 #ifdef DIAGNOSTIC
11506 	if (reg > MII_ADDRMASK) {
11507 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
11508 		    __func__, sc->sc_phytype, reg);
11509 		reg &= MII_ADDRMASK;
11510 	}
11511 #endif
11512 	rv = wm_gmii_mdic_writereg(dev, phy, reg, val);
11513 
11514 	sc->phy.release(sc);
11515 	return rv;
11516 }
11517 
11518 /*
11519  * wm_gmii_gs40g_readreg:	[mii interface function]
11520  *
11521  *	Read a PHY register on the I2100 and I211.
11522  * This could be handled by the PHY layer if we didn't have to lock the
11523  * ressource ...
11524  */
11525 static int
11526 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg, uint16_t *val)
11527 {
11528 	struct wm_softc *sc = device_private(dev);
11529 	int page, offset;
11530 	int rv;
11531 
11532 	/* Acquire semaphore */
11533 	if (sc->phy.acquire(sc)) {
11534 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
11535 		return -1;
11536 	}
11537 
11538 	/* Page select */
11539 	page = reg >> GS40G_PAGE_SHIFT;
11540 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
11541 	if (rv != 0)
11542 		goto release;
11543 
11544 	/* Read reg */
11545 	offset = reg & GS40G_OFFSET_MASK;
11546 	rv = wm_gmii_mdic_readreg(dev, phy, offset, val);
11547 
11548 release:
11549 	sc->phy.release(sc);
11550 	return rv;
11551 }
11552 
11553 /*
11554  * wm_gmii_gs40g_writereg:	[mii interface function]
11555  *
11556  *	Write a PHY register on the I210 and I211.
11557  * This could be handled by the PHY layer if we didn't have to lock the
11558  * ressource ...
11559  */
11560 static int
11561 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, uint16_t val)
11562 {
11563 	struct wm_softc *sc = device_private(dev);
11564 	uint16_t page;
11565 	int offset, rv;
11566 
11567 	/* Acquire semaphore */
11568 	if (sc->phy.acquire(sc)) {
11569 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
11570 		return -1;
11571 	}
11572 
11573 	/* Page select */
11574 	page = reg >> GS40G_PAGE_SHIFT;
11575 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
11576 	if (rv != 0)
11577 		goto release;
11578 
11579 	/* Write reg */
11580 	offset = reg & GS40G_OFFSET_MASK;
11581 	rv = wm_gmii_mdic_writereg(dev, phy, offset, val);
11582 
11583 release:
11584 	/* Release semaphore */
11585 	sc->phy.release(sc);
11586 	return rv;
11587 }
11588 
11589 /*
11590  * wm_gmii_statchg:	[mii interface function]
11591  *
11592  *	Callback from MII layer when media changes.
11593  */
11594 static void
11595 wm_gmii_statchg(struct ifnet *ifp)
11596 {
11597 	struct wm_softc *sc = ifp->if_softc;
11598 	struct mii_data *mii = &sc->sc_mii;
11599 
11600 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
11601 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
11602 	sc->sc_fcrtl &= ~FCRTL_XONE;
11603 
11604 	/* Get flow control negotiation result. */
11605 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
11606 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
11607 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
11608 		mii->mii_media_active &= ~IFM_ETH_FMASK;
11609 	}
11610 
11611 	if (sc->sc_flowflags & IFM_FLOW) {
11612 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
11613 			sc->sc_ctrl |= CTRL_TFCE;
11614 			sc->sc_fcrtl |= FCRTL_XONE;
11615 		}
11616 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
11617 			sc->sc_ctrl |= CTRL_RFCE;
11618 	}
11619 
11620 	if (mii->mii_media_active & IFM_FDX) {
11621 		DPRINTF(WM_DEBUG_LINK,
11622 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
11623 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
11624 	} else {
11625 		DPRINTF(WM_DEBUG_LINK,
11626 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
11627 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
11628 	}
11629 
11630 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11631 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
11632 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
11633 						 : WMREG_FCRTL, sc->sc_fcrtl);
11634 	if (sc->sc_type == WM_T_80003) {
11635 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
11636 		case IFM_1000_T:
11637 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
11638 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
11639 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
11640 			break;
11641 		default:
11642 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
11643 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
11644 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
11645 			break;
11646 		}
11647 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
11648 	}
11649 }
11650 
11651 /* kumeran related (80003, ICH* and PCH*) */
11652 
11653 /*
11654  * wm_kmrn_readreg:
11655  *
11656  *	Read a kumeran register
11657  */
11658 static int
11659 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
11660 {
11661 	int rv;
11662 
11663 	if (sc->sc_type == WM_T_80003)
11664 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
11665 	else
11666 		rv = sc->phy.acquire(sc);
11667 	if (rv != 0) {
11668 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
11669 		    __func__);
11670 		return rv;
11671 	}
11672 
11673 	rv = wm_kmrn_readreg_locked(sc, reg, val);
11674 
11675 	if (sc->sc_type == WM_T_80003)
11676 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
11677 	else
11678 		sc->phy.release(sc);
11679 
11680 	return rv;
11681 }
11682 
11683 static int
11684 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
11685 {
11686 
11687 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
11688 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
11689 	    KUMCTRLSTA_REN);
11690 	CSR_WRITE_FLUSH(sc);
11691 	delay(2);
11692 
11693 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
11694 
11695 	return 0;
11696 }
11697 
11698 /*
11699  * wm_kmrn_writereg:
11700  *
11701  *	Write a kumeran register
11702  */
11703 static int
11704 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
11705 {
11706 	int rv;
11707 
11708 	if (sc->sc_type == WM_T_80003)
11709 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
11710 	else
11711 		rv = sc->phy.acquire(sc);
11712 	if (rv != 0) {
11713 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
11714 		    __func__);
11715 		return rv;
11716 	}
11717 
11718 	rv = wm_kmrn_writereg_locked(sc, reg, val);
11719 
11720 	if (sc->sc_type == WM_T_80003)
11721 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
11722 	else
11723 		sc->phy.release(sc);
11724 
11725 	return rv;
11726 }
11727 
11728 static int
11729 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
11730 {
11731 
11732 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
11733 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
11734 
11735 	return 0;
11736 }
11737 
11738 /*
11739  * EMI register related (82579, WMPHY_I217(PCH2 and newer))
11740  * This access method is different from IEEE MMD.
11741  */
11742 static int
11743 wm_access_emi_reg_locked(device_t dev, int reg, uint16_t *val, bool rd)
11744 {
11745 	struct wm_softc *sc = device_private(dev);
11746 	int rv;
11747 
11748 	rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_ADDR, reg);
11749 	if (rv != 0)
11750 		return rv;
11751 
11752 	if (rd)
11753 		rv = sc->phy.readreg_locked(dev, 2, I82579_EMI_DATA, val);
11754 	else
11755 		rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_DATA, *val);
11756 	return rv;
11757 }
11758 
11759 static int
11760 wm_read_emi_reg_locked(device_t dev, int reg, uint16_t *val)
11761 {
11762 
11763 	return wm_access_emi_reg_locked(dev, reg, val, true);
11764 }
11765 
11766 static int
11767 wm_write_emi_reg_locked(device_t dev, int reg, uint16_t val)
11768 {
11769 
11770 	return wm_access_emi_reg_locked(dev, reg, &val, false);
11771 }
11772 
11773 /* SGMII related */
11774 
11775 /*
11776  * wm_sgmii_uses_mdio
11777  *
11778  * Check whether the transaction is to the internal PHY or the external
11779  * MDIO interface. Return true if it's MDIO.
11780  */
11781 static bool
11782 wm_sgmii_uses_mdio(struct wm_softc *sc)
11783 {
11784 	uint32_t reg;
11785 	bool ismdio = false;
11786 
11787 	switch (sc->sc_type) {
11788 	case WM_T_82575:
11789 	case WM_T_82576:
11790 		reg = CSR_READ(sc, WMREG_MDIC);
11791 		ismdio = ((reg & MDIC_DEST) != 0);
11792 		break;
11793 	case WM_T_82580:
11794 	case WM_T_I350:
11795 	case WM_T_I354:
11796 	case WM_T_I210:
11797 	case WM_T_I211:
11798 		reg = CSR_READ(sc, WMREG_MDICNFG);
11799 		ismdio = ((reg & MDICNFG_DEST) != 0);
11800 		break;
11801 	default:
11802 		break;
11803 	}
11804 
11805 	return ismdio;
11806 }
11807 
11808 /*
11809  * wm_sgmii_readreg:	[mii interface function]
11810  *
11811  *	Read a PHY register on the SGMII
11812  * This could be handled by the PHY layer if we didn't have to lock the
11813  * ressource ...
11814  */
11815 static int
11816 wm_sgmii_readreg(device_t dev, int phy, int reg, uint16_t *val)
11817 {
11818 	struct wm_softc *sc = device_private(dev);
11819 	int rv;
11820 
11821 	if (sc->phy.acquire(sc)) {
11822 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
11823 		return -1;
11824 	}
11825 
11826 	rv = wm_sgmii_readreg_locked(dev, phy, reg, val);
11827 
11828 	sc->phy.release(sc);
11829 	return rv;
11830 }
11831 
11832 static int
11833 wm_sgmii_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
11834 {
11835 	struct wm_softc *sc = device_private(dev);
11836 	uint32_t i2ccmd;
11837 	int i, rv = 0;
11838 
11839 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
11840 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
11841 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
11842 
11843 	/* Poll the ready bit */
11844 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
11845 		delay(50);
11846 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
11847 		if (i2ccmd & I2CCMD_READY)
11848 			break;
11849 	}
11850 	if ((i2ccmd & I2CCMD_READY) == 0) {
11851 		device_printf(dev, "I2CCMD Read did not complete\n");
11852 		rv = ETIMEDOUT;
11853 	}
11854 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
11855 		if (!sc->phy.no_errprint)
11856 			device_printf(dev, "I2CCMD Error bit set\n");
11857 		rv = EIO;
11858 	}
11859 
11860 	*val = (uint16_t)((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
11861 
11862 	return rv;
11863 }
11864 
11865 /*
11866  * wm_sgmii_writereg:	[mii interface function]
11867  *
11868  *	Write a PHY register on the SGMII.
11869  * This could be handled by the PHY layer if we didn't have to lock the
11870  * ressource ...
11871  */
11872 static int
11873 wm_sgmii_writereg(device_t dev, int phy, int reg, uint16_t val)
11874 {
11875 	struct wm_softc *sc = device_private(dev);
11876 	int rv;
11877 
11878 	if (sc->phy.acquire(sc) != 0) {
11879 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
11880 		return -1;
11881 	}
11882 
11883 	rv = wm_sgmii_writereg_locked(dev, phy, reg, val);
11884 
11885 	sc->phy.release(sc);
11886 
11887 	return rv;
11888 }
11889 
11890 static int
11891 wm_sgmii_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
11892 {
11893 	struct wm_softc *sc = device_private(dev);
11894 	uint32_t i2ccmd;
11895 	uint16_t swapdata;
11896 	int rv = 0;
11897 	int i;
11898 
11899 	/* Swap the data bytes for the I2C interface */
11900 	swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
11901 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
11902 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
11903 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
11904 
11905 	/* Poll the ready bit */
11906 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
11907 		delay(50);
11908 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
11909 		if (i2ccmd & I2CCMD_READY)
11910 			break;
11911 	}
11912 	if ((i2ccmd & I2CCMD_READY) == 0) {
11913 		device_printf(dev, "I2CCMD Write did not complete\n");
11914 		rv = ETIMEDOUT;
11915 	}
11916 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
11917 		device_printf(dev, "I2CCMD Error bit set\n");
11918 		rv = EIO;
11919 	}
11920 
11921 	return rv;
11922 }
11923 
11924 /* TBI related */
11925 
11926 static bool
11927 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
11928 {
11929 	bool sig;
11930 
11931 	sig = ctrl & CTRL_SWDPIN(1);
11932 
11933 	/*
11934 	 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
11935 	 * detect a signal, 1 if they don't.
11936 	 */
11937 	if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
11938 		sig = !sig;
11939 
11940 	return sig;
11941 }
11942 
11943 /*
11944  * wm_tbi_mediainit:
11945  *
11946  *	Initialize media for use on 1000BASE-X devices.
11947  */
11948 static void
11949 wm_tbi_mediainit(struct wm_softc *sc)
11950 {
11951 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
11952 	const char *sep = "";
11953 
11954 	if (sc->sc_type < WM_T_82543)
11955 		sc->sc_tipg = TIPG_WM_DFLT;
11956 	else
11957 		sc->sc_tipg = TIPG_LG_DFLT;
11958 
11959 	sc->sc_tbi_serdes_anegticks = 5;
11960 
11961 	/* Initialize our media structures */
11962 	sc->sc_mii.mii_ifp = ifp;
11963 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
11964 
11965 	if (((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
11966 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
11967 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
11968 		    wm_serdes_mediachange, wm_serdes_mediastatus,
11969 		    sc->sc_core_lock);
11970 	} else {
11971 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
11972 		    wm_tbi_mediachange, wm_tbi_mediastatus, sc->sc_core_lock);
11973 	}
11974 
11975 	/*
11976 	 * SWD Pins:
11977 	 *
11978 	 *	0 = Link LED (output)
11979 	 *	1 = Loss Of Signal (input)
11980 	 */
11981 	sc->sc_ctrl |= CTRL_SWDPIO(0);
11982 
11983 	/* XXX Perhaps this is only for TBI */
11984 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
11985 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
11986 
11987 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
11988 		sc->sc_ctrl &= ~CTRL_LRST;
11989 
11990 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11991 
11992 #define	ADD(ss, mm, dd)							\
11993 do {									\
11994 	aprint_normal("%s%s", sep, ss);					\
11995 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
11996 	sep = ", ";							\
11997 } while (/*CONSTCOND*/0)
11998 
11999 	aprint_normal_dev(sc->sc_dev, "");
12000 
12001 	if (sc->sc_type == WM_T_I354) {
12002 		uint32_t status;
12003 
12004 		status = CSR_READ(sc, WMREG_STATUS);
12005 		if (((status & STATUS_2P5_SKU) != 0)
12006 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
12007 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
12008 		} else
12009 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
12010 	} else if (sc->sc_type == WM_T_82545) {
12011 		/* Only 82545 is LX (XXX except SFP) */
12012 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
12013 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
12014 	} else if (sc->sc_sfptype != 0) {
12015 		/* XXX wm(4) fiber/serdes don't use ifm_data */
12016 		switch (sc->sc_sfptype) {
12017 		default:
12018 		case SFF_SFP_ETH_FLAGS_1000SX:
12019 			ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
12020 			ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
12021 			break;
12022 		case SFF_SFP_ETH_FLAGS_1000LX:
12023 			ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
12024 			ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
12025 			break;
12026 		case SFF_SFP_ETH_FLAGS_1000CX:
12027 			ADD("1000baseCX", IFM_1000_CX, ANAR_X_HD);
12028 			ADD("1000baseCX-FDX", IFM_1000_CX | IFM_FDX, ANAR_X_FD);
12029 			break;
12030 		case SFF_SFP_ETH_FLAGS_1000T:
12031 			ADD("1000baseT", IFM_1000_T, 0);
12032 			ADD("1000baseT-FDX", IFM_1000_T | IFM_FDX, 0);
12033 			break;
12034 		case SFF_SFP_ETH_FLAGS_100FX:
12035 			ADD("100baseFX", IFM_100_FX, ANAR_TX);
12036 			ADD("100baseFX-FDX", IFM_100_FX | IFM_FDX, ANAR_TX_FD);
12037 			break;
12038 		}
12039 	} else {
12040 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
12041 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
12042 	}
12043 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
12044 	aprint_normal("\n");
12045 
12046 #undef ADD
12047 
12048 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
12049 }
12050 
12051 /*
12052  * wm_tbi_mediachange:	[ifmedia interface function]
12053  *
12054  *	Set hardware to newly-selected media on a 1000BASE-X device.
12055  */
12056 static int
12057 wm_tbi_mediachange(struct ifnet *ifp)
12058 {
12059 	struct wm_softc *sc = ifp->if_softc;
12060 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
12061 	uint32_t status, ctrl;
12062 	bool signal;
12063 	int i;
12064 
12065 	KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
12066 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
12067 		/* XXX need some work for >= 82571 and < 82575 */
12068 		if (sc->sc_type < WM_T_82575)
12069 			return 0;
12070 	}
12071 
12072 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
12073 	    || (sc->sc_type >= WM_T_82575))
12074 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
12075 
12076 	sc->sc_ctrl &= ~CTRL_LRST;
12077 	sc->sc_txcw = TXCW_ANE;
12078 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
12079 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
12080 	else if (ife->ifm_media & IFM_FDX)
12081 		sc->sc_txcw |= TXCW_FD;
12082 	else
12083 		sc->sc_txcw |= TXCW_HD;
12084 
12085 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
12086 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
12087 
12088 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
12089 		device_xname(sc->sc_dev), sc->sc_txcw));
12090 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
12091 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
12092 	CSR_WRITE_FLUSH(sc);
12093 	delay(1000);
12094 
12095 	ctrl = CSR_READ(sc, WMREG_CTRL);
12096 	signal = wm_tbi_havesignal(sc, ctrl);
12097 
12098 	DPRINTF(WM_DEBUG_LINK, ("%s: signal = %d\n", device_xname(sc->sc_dev),
12099 		signal));
12100 
12101 	if (signal) {
12102 		/* Have signal; wait for the link to come up. */
12103 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
12104 			delay(10000);
12105 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
12106 				break;
12107 		}
12108 
12109 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
12110 			device_xname(sc->sc_dev), i));
12111 
12112 		status = CSR_READ(sc, WMREG_STATUS);
12113 		DPRINTF(WM_DEBUG_LINK,
12114 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
12115 			device_xname(sc->sc_dev), status, STATUS_LU));
12116 		if (status & STATUS_LU) {
12117 			/* Link is up. */
12118 			DPRINTF(WM_DEBUG_LINK,
12119 			    ("%s: LINK: set media -> link up %s\n",
12120 				device_xname(sc->sc_dev),
12121 				(status & STATUS_FD) ? "FDX" : "HDX"));
12122 
12123 			/*
12124 			 * NOTE: CTRL will update TFCE and RFCE automatically,
12125 			 * so we should update sc->sc_ctrl
12126 			 */
12127 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
12128 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
12129 			sc->sc_fcrtl &= ~FCRTL_XONE;
12130 			if (status & STATUS_FD)
12131 				sc->sc_tctl |=
12132 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
12133 			else
12134 				sc->sc_tctl |=
12135 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
12136 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
12137 				sc->sc_fcrtl |= FCRTL_XONE;
12138 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
12139 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
12140 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
12141 			sc->sc_tbi_linkup = 1;
12142 		} else {
12143 			if (i == WM_LINKUP_TIMEOUT)
12144 				wm_check_for_link(sc);
12145 			/* Link is down. */
12146 			DPRINTF(WM_DEBUG_LINK,
12147 			    ("%s: LINK: set media -> link down\n",
12148 				device_xname(sc->sc_dev)));
12149 			sc->sc_tbi_linkup = 0;
12150 		}
12151 	} else {
12152 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
12153 			device_xname(sc->sc_dev)));
12154 		sc->sc_tbi_linkup = 0;
12155 	}
12156 
12157 	wm_tbi_serdes_set_linkled(sc);
12158 
12159 	return 0;
12160 }
12161 
12162 /*
12163  * wm_tbi_mediastatus:	[ifmedia interface function]
12164  *
12165  *	Get the current interface media status on a 1000BASE-X device.
12166  */
12167 static void
12168 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
12169 {
12170 	struct wm_softc *sc = ifp->if_softc;
12171 	uint32_t ctrl, status;
12172 
12173 	ifmr->ifm_status = IFM_AVALID;
12174 	ifmr->ifm_active = IFM_ETHER;
12175 
12176 	status = CSR_READ(sc, WMREG_STATUS);
12177 	if ((status & STATUS_LU) == 0) {
12178 		ifmr->ifm_active |= IFM_NONE;
12179 		return;
12180 	}
12181 
12182 	ifmr->ifm_status |= IFM_ACTIVE;
12183 	/* Only 82545 is LX */
12184 	if (sc->sc_type == WM_T_82545)
12185 		ifmr->ifm_active |= IFM_1000_LX;
12186 	else
12187 		ifmr->ifm_active |= IFM_1000_SX;
12188 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
12189 		ifmr->ifm_active |= IFM_FDX;
12190 	else
12191 		ifmr->ifm_active |= IFM_HDX;
12192 	ctrl = CSR_READ(sc, WMREG_CTRL);
12193 	if (ctrl & CTRL_RFCE)
12194 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
12195 	if (ctrl & CTRL_TFCE)
12196 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
12197 }
12198 
12199 /* XXX TBI only */
12200 static int
12201 wm_check_for_link(struct wm_softc *sc)
12202 {
12203 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
12204 	uint32_t rxcw;
12205 	uint32_t ctrl;
12206 	uint32_t status;
12207 	bool signal;
12208 
12209 	DPRINTF(WM_DEBUG_LINK, ("%s: %s called\n",
12210 		device_xname(sc->sc_dev), __func__));
12211 
12212 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
12213 		/* XXX need some work for >= 82571 */
12214 		if (sc->sc_type >= WM_T_82571) {
12215 			sc->sc_tbi_linkup = 1;
12216 			return 0;
12217 		}
12218 	}
12219 
12220 	rxcw = CSR_READ(sc, WMREG_RXCW);
12221 	ctrl = CSR_READ(sc, WMREG_CTRL);
12222 	status = CSR_READ(sc, WMREG_STATUS);
12223 	signal = wm_tbi_havesignal(sc, ctrl);
12224 
12225 	DPRINTF(WM_DEBUG_LINK,
12226 	    ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
12227 		device_xname(sc->sc_dev), __func__, signal,
12228 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
12229 
12230 	/*
12231 	 * SWDPIN   LU RXCW
12232 	 *	0    0	  0
12233 	 *	0    0	  1	(should not happen)
12234 	 *	0    1	  0	(should not happen)
12235 	 *	0    1	  1	(should not happen)
12236 	 *	1    0	  0	Disable autonego and force linkup
12237 	 *	1    0	  1	got /C/ but not linkup yet
12238 	 *	1    1	  0	(linkup)
12239 	 *	1    1	  1	If IFM_AUTO, back to autonego
12240 	 *
12241 	 */
12242 	if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
12243 		DPRINTF(WM_DEBUG_LINK,
12244 		    ("%s: %s: force linkup and fullduplex\n",
12245 			device_xname(sc->sc_dev), __func__));
12246 		sc->sc_tbi_linkup = 0;
12247 		/* Disable auto-negotiation in the TXCW register */
12248 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
12249 
12250 		/*
12251 		 * Force link-up and also force full-duplex.
12252 		 *
12253 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
12254 		 * so we should update sc->sc_ctrl
12255 		 */
12256 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
12257 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
12258 	} else if (((status & STATUS_LU) != 0)
12259 	    && ((rxcw & RXCW_C) != 0)
12260 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
12261 		sc->sc_tbi_linkup = 1;
12262 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
12263 			device_xname(sc->sc_dev),
12264 			__func__));
12265 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
12266 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
12267 	} else if (signal && ((rxcw & RXCW_C) != 0)) {
12268 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: /C/",
12269 			device_xname(sc->sc_dev), __func__));
12270 	} else {
12271 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
12272 			device_xname(sc->sc_dev), __func__, rxcw, ctrl,
12273 			status));
12274 	}
12275 
12276 	return 0;
12277 }
12278 
12279 /*
12280  * wm_tbi_tick:
12281  *
12282  *	Check the link on TBI devices.
12283  *	This function acts as mii_tick().
12284  */
12285 static void
12286 wm_tbi_tick(struct wm_softc *sc)
12287 {
12288 	struct mii_data *mii = &sc->sc_mii;
12289 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
12290 	uint32_t status;
12291 
12292 	KASSERT(WM_CORE_LOCKED(sc));
12293 
12294 	status = CSR_READ(sc, WMREG_STATUS);
12295 
12296 	/* XXX is this needed? */
12297 	(void)CSR_READ(sc, WMREG_RXCW);
12298 	(void)CSR_READ(sc, WMREG_CTRL);
12299 
12300 	/* set link status */
12301 	if ((status & STATUS_LU) == 0) {
12302 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
12303 			device_xname(sc->sc_dev)));
12304 		sc->sc_tbi_linkup = 0;
12305 	} else if (sc->sc_tbi_linkup == 0) {
12306 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
12307 			device_xname(sc->sc_dev),
12308 			(status & STATUS_FD) ? "FDX" : "HDX"));
12309 		sc->sc_tbi_linkup = 1;
12310 		sc->sc_tbi_serdes_ticks = 0;
12311 	}
12312 
12313 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
12314 		goto setled;
12315 
12316 	if ((status & STATUS_LU) == 0) {
12317 		sc->sc_tbi_linkup = 0;
12318 		/* If the timer expired, retry autonegotiation */
12319 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
12320 		    && (++sc->sc_tbi_serdes_ticks
12321 			>= sc->sc_tbi_serdes_anegticks)) {
12322 			DPRINTF(WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
12323 				device_xname(sc->sc_dev), __func__));
12324 			sc->sc_tbi_serdes_ticks = 0;
12325 			/*
12326 			 * Reset the link, and let autonegotiation do
12327 			 * its thing
12328 			 */
12329 			sc->sc_ctrl |= CTRL_LRST;
12330 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
12331 			CSR_WRITE_FLUSH(sc);
12332 			delay(1000);
12333 			sc->sc_ctrl &= ~CTRL_LRST;
12334 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
12335 			CSR_WRITE_FLUSH(sc);
12336 			delay(1000);
12337 			CSR_WRITE(sc, WMREG_TXCW,
12338 			    sc->sc_txcw & ~TXCW_ANE);
12339 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
12340 		}
12341 	}
12342 
12343 setled:
12344 	wm_tbi_serdes_set_linkled(sc);
12345 }
12346 
12347 /* SERDES related */
12348 static void
12349 wm_serdes_power_up_link_82575(struct wm_softc *sc)
12350 {
12351 	uint32_t reg;
12352 
12353 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
12354 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
12355 		return;
12356 
12357 	/* Enable PCS to turn on link */
12358 	reg = CSR_READ(sc, WMREG_PCS_CFG);
12359 	reg |= PCS_CFG_PCS_EN;
12360 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
12361 
12362 	/* Power up the laser */
12363 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
12364 	reg &= ~CTRL_EXT_SWDPIN(3);
12365 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
12366 
12367 	/* Flush the write to verify completion */
12368 	CSR_WRITE_FLUSH(sc);
12369 	delay(1000);
12370 }
12371 
12372 static int
12373 wm_serdes_mediachange(struct ifnet *ifp)
12374 {
12375 	struct wm_softc *sc = ifp->if_softc;
12376 	bool pcs_autoneg = true; /* XXX */
12377 	uint32_t ctrl_ext, pcs_lctl, reg;
12378 
12379 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
12380 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
12381 		return 0;
12382 
12383 	/* XXX Currently, this function is not called on 8257[12] */
12384 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
12385 	    || (sc->sc_type >= WM_T_82575))
12386 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
12387 
12388 	/* Power on the sfp cage if present */
12389 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
12390 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
12391 	ctrl_ext |= CTRL_EXT_I2C_ENA;
12392 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
12393 
12394 	sc->sc_ctrl |= CTRL_SLU;
12395 
12396 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
12397 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
12398 
12399 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
12400 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
12401 	case CTRL_EXT_LINK_MODE_SGMII:
12402 		/* SGMII mode lets the phy handle forcing speed/duplex */
12403 		pcs_autoneg = true;
12404 		/* Autoneg time out should be disabled for SGMII mode */
12405 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
12406 		break;
12407 	case CTRL_EXT_LINK_MODE_1000KX:
12408 		pcs_autoneg = false;
12409 		/* FALLTHROUGH */
12410 	default:
12411 		if ((sc->sc_type == WM_T_82575)
12412 		    || (sc->sc_type == WM_T_82576)) {
12413 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
12414 				pcs_autoneg = false;
12415 		}
12416 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
12417 		    | CTRL_FRCFDX;
12418 
12419 		/* Set speed of 1000/Full if speed/duplex is forced */
12420 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
12421 	}
12422 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
12423 
12424 	pcs_lctl &= ~(PCS_LCTL_AN_ENABLE | PCS_LCTL_FLV_LINK_UP |
12425 	    PCS_LCTL_FSD | PCS_LCTL_FORCE_LINK);
12426 
12427 	if (pcs_autoneg) {
12428 		/* Set PCS register for autoneg */
12429 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
12430 
12431 		/* Disable force flow control for autoneg */
12432 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
12433 
12434 		/* Configure flow control advertisement for autoneg */
12435 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
12436 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
12437 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
12438 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
12439 	} else
12440 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
12441 
12442 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
12443 
12444 	return 0;
12445 }
12446 
12447 static void
12448 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
12449 {
12450 	struct wm_softc *sc = ifp->if_softc;
12451 	struct mii_data *mii = &sc->sc_mii;
12452 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
12453 	uint32_t pcs_adv, pcs_lpab, reg;
12454 
12455 	ifmr->ifm_status = IFM_AVALID;
12456 	ifmr->ifm_active = IFM_ETHER;
12457 
12458 	/* Check PCS */
12459 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
12460 	if ((reg & PCS_LSTS_LINKOK) == 0) {
12461 		ifmr->ifm_active |= IFM_NONE;
12462 		sc->sc_tbi_linkup = 0;
12463 		goto setled;
12464 	}
12465 
12466 	sc->sc_tbi_linkup = 1;
12467 	ifmr->ifm_status |= IFM_ACTIVE;
12468 	if (sc->sc_type == WM_T_I354) {
12469 		uint32_t status;
12470 
12471 		status = CSR_READ(sc, WMREG_STATUS);
12472 		if (((status & STATUS_2P5_SKU) != 0)
12473 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
12474 			ifmr->ifm_active |= IFM_2500_KX;
12475 		} else
12476 			ifmr->ifm_active |= IFM_1000_KX;
12477 	} else {
12478 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
12479 		case PCS_LSTS_SPEED_10:
12480 			ifmr->ifm_active |= IFM_10_T; /* XXX */
12481 			break;
12482 		case PCS_LSTS_SPEED_100:
12483 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
12484 			break;
12485 		case PCS_LSTS_SPEED_1000:
12486 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
12487 			break;
12488 		default:
12489 			device_printf(sc->sc_dev, "Unknown speed\n");
12490 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
12491 			break;
12492 		}
12493 	}
12494 	if ((reg & PCS_LSTS_FDX) != 0)
12495 		ifmr->ifm_active |= IFM_FDX;
12496 	else
12497 		ifmr->ifm_active |= IFM_HDX;
12498 	mii->mii_media_active &= ~IFM_ETH_FMASK;
12499 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
12500 		/* Check flow */
12501 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
12502 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
12503 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
12504 			goto setled;
12505 		}
12506 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
12507 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
12508 		DPRINTF(WM_DEBUG_LINK,
12509 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
12510 		if ((pcs_adv & TXCW_SYM_PAUSE)
12511 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
12512 			mii->mii_media_active |= IFM_FLOW
12513 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
12514 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
12515 		    && (pcs_adv & TXCW_ASYM_PAUSE)
12516 		    && (pcs_lpab & TXCW_SYM_PAUSE)
12517 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
12518 			mii->mii_media_active |= IFM_FLOW
12519 			    | IFM_ETH_TXPAUSE;
12520 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
12521 		    && (pcs_adv & TXCW_ASYM_PAUSE)
12522 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
12523 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
12524 			mii->mii_media_active |= IFM_FLOW
12525 			    | IFM_ETH_RXPAUSE;
12526 		}
12527 	}
12528 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
12529 	    | (mii->mii_media_active & IFM_ETH_FMASK);
12530 setled:
12531 	wm_tbi_serdes_set_linkled(sc);
12532 }
12533 
12534 /*
12535  * wm_serdes_tick:
12536  *
12537  *	Check the link on serdes devices.
12538  */
12539 static void
12540 wm_serdes_tick(struct wm_softc *sc)
12541 {
12542 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
12543 	struct mii_data *mii = &sc->sc_mii;
12544 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
12545 	uint32_t reg;
12546 
12547 	KASSERT(WM_CORE_LOCKED(sc));
12548 
12549 	mii->mii_media_status = IFM_AVALID;
12550 	mii->mii_media_active = IFM_ETHER;
12551 
12552 	/* Check PCS */
12553 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
12554 	if ((reg & PCS_LSTS_LINKOK) != 0) {
12555 		mii->mii_media_status |= IFM_ACTIVE;
12556 		sc->sc_tbi_linkup = 1;
12557 		sc->sc_tbi_serdes_ticks = 0;
12558 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
12559 		if ((reg & PCS_LSTS_FDX) != 0)
12560 			mii->mii_media_active |= IFM_FDX;
12561 		else
12562 			mii->mii_media_active |= IFM_HDX;
12563 	} else {
12564 		mii->mii_media_status |= IFM_NONE;
12565 		sc->sc_tbi_linkup = 0;
12566 		/* If the timer expired, retry autonegotiation */
12567 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
12568 		    && (++sc->sc_tbi_serdes_ticks
12569 			>= sc->sc_tbi_serdes_anegticks)) {
12570 			DPRINTF(WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
12571 				device_xname(sc->sc_dev), __func__));
12572 			sc->sc_tbi_serdes_ticks = 0;
12573 			/* XXX */
12574 			wm_serdes_mediachange(ifp);
12575 		}
12576 	}
12577 
12578 	wm_tbi_serdes_set_linkled(sc);
12579 }
12580 
12581 /* SFP related */
12582 
12583 static int
12584 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
12585 {
12586 	uint32_t i2ccmd;
12587 	int i;
12588 
12589 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
12590 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
12591 
12592 	/* Poll the ready bit */
12593 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
12594 		delay(50);
12595 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
12596 		if (i2ccmd & I2CCMD_READY)
12597 			break;
12598 	}
12599 	if ((i2ccmd & I2CCMD_READY) == 0)
12600 		return -1;
12601 	if ((i2ccmd & I2CCMD_ERROR) != 0)
12602 		return -1;
12603 
12604 	*data = i2ccmd & 0x00ff;
12605 
12606 	return 0;
12607 }
12608 
12609 static uint32_t
12610 wm_sfp_get_media_type(struct wm_softc *sc)
12611 {
12612 	uint32_t ctrl_ext;
12613 	uint8_t val = 0;
12614 	int timeout = 3;
12615 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
12616 	int rv = -1;
12617 
12618 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
12619 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
12620 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
12621 	CSR_WRITE_FLUSH(sc);
12622 
12623 	/* Read SFP module data */
12624 	while (timeout) {
12625 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
12626 		if (rv == 0)
12627 			break;
12628 		delay(100*1000); /* XXX too big */
12629 		timeout--;
12630 	}
12631 	if (rv != 0)
12632 		goto out;
12633 
12634 	switch (val) {
12635 	case SFF_SFP_ID_SFF:
12636 		aprint_normal_dev(sc->sc_dev,
12637 		    "Module/Connector soldered to board\n");
12638 		break;
12639 	case SFF_SFP_ID_SFP:
12640 		sc->sc_flags |= WM_F_SFP;
12641 		break;
12642 	case SFF_SFP_ID_UNKNOWN:
12643 		goto out;
12644 	default:
12645 		break;
12646 	}
12647 
12648 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
12649 	if (rv != 0)
12650 		goto out;
12651 
12652 	sc->sc_sfptype = val;
12653 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
12654 		mediatype = WM_MEDIATYPE_SERDES;
12655 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
12656 		sc->sc_flags |= WM_F_SGMII;
12657 		mediatype = WM_MEDIATYPE_COPPER;
12658 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
12659 		sc->sc_flags |= WM_F_SGMII;
12660 		mediatype = WM_MEDIATYPE_SERDES;
12661 	} else {
12662 		device_printf(sc->sc_dev, "%s: unknown media type? (0x%hhx)\n",
12663 		    __func__, sc->sc_sfptype);
12664 		sc->sc_sfptype = 0; /* XXX unknown */
12665 	}
12666 
12667 out:
12668 	/* Restore I2C interface setting */
12669 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
12670 
12671 	return mediatype;
12672 }
12673 
12674 /*
12675  * NVM related.
12676  * Microwire, SPI (w/wo EERD) and Flash.
12677  */
12678 
12679 /* Both spi and uwire */
12680 
12681 /*
12682  * wm_eeprom_sendbits:
12683  *
12684  *	Send a series of bits to the EEPROM.
12685  */
12686 static void
12687 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
12688 {
12689 	uint32_t reg;
12690 	int x;
12691 
12692 	reg = CSR_READ(sc, WMREG_EECD);
12693 
12694 	for (x = nbits; x > 0; x--) {
12695 		if (bits & (1U << (x - 1)))
12696 			reg |= EECD_DI;
12697 		else
12698 			reg &= ~EECD_DI;
12699 		CSR_WRITE(sc, WMREG_EECD, reg);
12700 		CSR_WRITE_FLUSH(sc);
12701 		delay(2);
12702 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
12703 		CSR_WRITE_FLUSH(sc);
12704 		delay(2);
12705 		CSR_WRITE(sc, WMREG_EECD, reg);
12706 		CSR_WRITE_FLUSH(sc);
12707 		delay(2);
12708 	}
12709 }
12710 
12711 /*
12712  * wm_eeprom_recvbits:
12713  *
12714  *	Receive a series of bits from the EEPROM.
12715  */
12716 static void
12717 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
12718 {
12719 	uint32_t reg, val;
12720 	int x;
12721 
12722 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
12723 
12724 	val = 0;
12725 	for (x = nbits; x > 0; x--) {
12726 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
12727 		CSR_WRITE_FLUSH(sc);
12728 		delay(2);
12729 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
12730 			val |= (1U << (x - 1));
12731 		CSR_WRITE(sc, WMREG_EECD, reg);
12732 		CSR_WRITE_FLUSH(sc);
12733 		delay(2);
12734 	}
12735 	*valp = val;
12736 }
12737 
12738 /* Microwire */
12739 
12740 /*
12741  * wm_nvm_read_uwire:
12742  *
12743  *	Read a word from the EEPROM using the MicroWire protocol.
12744  */
12745 static int
12746 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
12747 {
12748 	uint32_t reg, val;
12749 	int i;
12750 
12751 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
12752 		device_xname(sc->sc_dev), __func__));
12753 
12754 	if (sc->nvm.acquire(sc) != 0)
12755 		return -1;
12756 
12757 	for (i = 0; i < wordcnt; i++) {
12758 		/* Clear SK and DI. */
12759 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
12760 		CSR_WRITE(sc, WMREG_EECD, reg);
12761 
12762 		/*
12763 		 * XXX: workaround for a bug in qemu-0.12.x and prior
12764 		 * and Xen.
12765 		 *
12766 		 * We use this workaround only for 82540 because qemu's
12767 		 * e1000 act as 82540.
12768 		 */
12769 		if (sc->sc_type == WM_T_82540) {
12770 			reg |= EECD_SK;
12771 			CSR_WRITE(sc, WMREG_EECD, reg);
12772 			reg &= ~EECD_SK;
12773 			CSR_WRITE(sc, WMREG_EECD, reg);
12774 			CSR_WRITE_FLUSH(sc);
12775 			delay(2);
12776 		}
12777 		/* XXX: end of workaround */
12778 
12779 		/* Set CHIP SELECT. */
12780 		reg |= EECD_CS;
12781 		CSR_WRITE(sc, WMREG_EECD, reg);
12782 		CSR_WRITE_FLUSH(sc);
12783 		delay(2);
12784 
12785 		/* Shift in the READ command. */
12786 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
12787 
12788 		/* Shift in address. */
12789 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
12790 
12791 		/* Shift out the data. */
12792 		wm_eeprom_recvbits(sc, &val, 16);
12793 		data[i] = val & 0xffff;
12794 
12795 		/* Clear CHIP SELECT. */
12796 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
12797 		CSR_WRITE(sc, WMREG_EECD, reg);
12798 		CSR_WRITE_FLUSH(sc);
12799 		delay(2);
12800 	}
12801 
12802 	sc->nvm.release(sc);
12803 	return 0;
12804 }
12805 
12806 /* SPI */
12807 
12808 /*
12809  * Set SPI and FLASH related information from the EECD register.
12810  * For 82541 and 82547, the word size is taken from EEPROM.
12811  */
12812 static int
12813 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
12814 {
12815 	int size;
12816 	uint32_t reg;
12817 	uint16_t data;
12818 
12819 	reg = CSR_READ(sc, WMREG_EECD);
12820 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
12821 
12822 	/* Read the size of NVM from EECD by default */
12823 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
12824 	switch (sc->sc_type) {
12825 	case WM_T_82541:
12826 	case WM_T_82541_2:
12827 	case WM_T_82547:
12828 	case WM_T_82547_2:
12829 		/* Set dummy value to access EEPROM */
12830 		sc->sc_nvm_wordsize = 64;
12831 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
12832 			aprint_error_dev(sc->sc_dev,
12833 			    "%s: failed to read EEPROM size\n", __func__);
12834 		}
12835 		reg = data;
12836 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
12837 		if (size == 0)
12838 			size = 6; /* 64 word size */
12839 		else
12840 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
12841 		break;
12842 	case WM_T_80003:
12843 	case WM_T_82571:
12844 	case WM_T_82572:
12845 	case WM_T_82573: /* SPI case */
12846 	case WM_T_82574: /* SPI case */
12847 	case WM_T_82583: /* SPI case */
12848 		size += NVM_WORD_SIZE_BASE_SHIFT;
12849 		if (size > 14)
12850 			size = 14;
12851 		break;
12852 	case WM_T_82575:
12853 	case WM_T_82576:
12854 	case WM_T_82580:
12855 	case WM_T_I350:
12856 	case WM_T_I354:
12857 	case WM_T_I210:
12858 	case WM_T_I211:
12859 		size += NVM_WORD_SIZE_BASE_SHIFT;
12860 		if (size > 15)
12861 			size = 15;
12862 		break;
12863 	default:
12864 		aprint_error_dev(sc->sc_dev,
12865 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
12866 		return -1;
12867 		break;
12868 	}
12869 
12870 	sc->sc_nvm_wordsize = 1 << size;
12871 
12872 	return 0;
12873 }
12874 
12875 /*
12876  * wm_nvm_ready_spi:
12877  *
12878  *	Wait for a SPI EEPROM to be ready for commands.
12879  */
12880 static int
12881 wm_nvm_ready_spi(struct wm_softc *sc)
12882 {
12883 	uint32_t val;
12884 	int usec;
12885 
12886 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
12887 		device_xname(sc->sc_dev), __func__));
12888 
12889 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
12890 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
12891 		wm_eeprom_recvbits(sc, &val, 8);
12892 		if ((val & SPI_SR_RDY) == 0)
12893 			break;
12894 	}
12895 	if (usec >= SPI_MAX_RETRIES) {
12896 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
12897 		return -1;
12898 	}
12899 	return 0;
12900 }
12901 
12902 /*
12903  * wm_nvm_read_spi:
12904  *
12905  *	Read a work from the EEPROM using the SPI protocol.
12906  */
12907 static int
12908 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
12909 {
12910 	uint32_t reg, val;
12911 	int i;
12912 	uint8_t opc;
12913 	int rv = 0;
12914 
12915 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
12916 		device_xname(sc->sc_dev), __func__));
12917 
12918 	if (sc->nvm.acquire(sc) != 0)
12919 		return -1;
12920 
12921 	/* Clear SK and CS. */
12922 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
12923 	CSR_WRITE(sc, WMREG_EECD, reg);
12924 	CSR_WRITE_FLUSH(sc);
12925 	delay(2);
12926 
12927 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
12928 		goto out;
12929 
12930 	/* Toggle CS to flush commands. */
12931 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
12932 	CSR_WRITE_FLUSH(sc);
12933 	delay(2);
12934 	CSR_WRITE(sc, WMREG_EECD, reg);
12935 	CSR_WRITE_FLUSH(sc);
12936 	delay(2);
12937 
12938 	opc = SPI_OPC_READ;
12939 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
12940 		opc |= SPI_OPC_A8;
12941 
12942 	wm_eeprom_sendbits(sc, opc, 8);
12943 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
12944 
12945 	for (i = 0; i < wordcnt; i++) {
12946 		wm_eeprom_recvbits(sc, &val, 16);
12947 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
12948 	}
12949 
12950 	/* Raise CS and clear SK. */
12951 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
12952 	CSR_WRITE(sc, WMREG_EECD, reg);
12953 	CSR_WRITE_FLUSH(sc);
12954 	delay(2);
12955 
12956 out:
12957 	sc->nvm.release(sc);
12958 	return rv;
12959 }
12960 
12961 /* Using with EERD */
12962 
12963 static int
12964 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
12965 {
12966 	uint32_t attempts = 100000;
12967 	uint32_t i, reg = 0;
12968 	int32_t done = -1;
12969 
12970 	for (i = 0; i < attempts; i++) {
12971 		reg = CSR_READ(sc, rw);
12972 
12973 		if (reg & EERD_DONE) {
12974 			done = 0;
12975 			break;
12976 		}
12977 		delay(5);
12978 	}
12979 
12980 	return done;
12981 }
12982 
12983 static int
12984 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
12985 {
12986 	int i, eerd = 0;
12987 	int rv = 0;
12988 
12989 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
12990 		device_xname(sc->sc_dev), __func__));
12991 
12992 	if (sc->nvm.acquire(sc) != 0)
12993 		return -1;
12994 
12995 	for (i = 0; i < wordcnt; i++) {
12996 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
12997 		CSR_WRITE(sc, WMREG_EERD, eerd);
12998 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
12999 		if (rv != 0) {
13000 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
13001 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
13002 			break;
13003 		}
13004 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
13005 	}
13006 
13007 	sc->nvm.release(sc);
13008 	return rv;
13009 }
13010 
13011 /* Flash */
13012 
13013 static int
13014 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
13015 {
13016 	uint32_t eecd;
13017 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
13018 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
13019 	uint32_t nvm_dword = 0;
13020 	uint8_t sig_byte = 0;
13021 	int rv;
13022 
13023 	switch (sc->sc_type) {
13024 	case WM_T_PCH_SPT:
13025 	case WM_T_PCH_CNP:
13026 		bank1_offset = sc->sc_ich8_flash_bank_size * 2;
13027 		act_offset = ICH_NVM_SIG_WORD * 2;
13028 
13029 		/* Set bank to 0 in case flash read fails. */
13030 		*bank = 0;
13031 
13032 		/* Check bank 0 */
13033 		rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
13034 		if (rv != 0)
13035 			return rv;
13036 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
13037 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
13038 			*bank = 0;
13039 			return 0;
13040 		}
13041 
13042 		/* Check bank 1 */
13043 		rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
13044 		    &nvm_dword);
13045 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
13046 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
13047 			*bank = 1;
13048 			return 0;
13049 		}
13050 		aprint_error_dev(sc->sc_dev,
13051 		    "%s: no valid NVM bank present (%u)\n", __func__, *bank);
13052 		return -1;
13053 	case WM_T_ICH8:
13054 	case WM_T_ICH9:
13055 		eecd = CSR_READ(sc, WMREG_EECD);
13056 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
13057 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
13058 			return 0;
13059 		}
13060 		/* FALLTHROUGH */
13061 	default:
13062 		/* Default to 0 */
13063 		*bank = 0;
13064 
13065 		/* Check bank 0 */
13066 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
13067 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
13068 			*bank = 0;
13069 			return 0;
13070 		}
13071 
13072 		/* Check bank 1 */
13073 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
13074 		    &sig_byte);
13075 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
13076 			*bank = 1;
13077 			return 0;
13078 		}
13079 	}
13080 
13081 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
13082 		device_xname(sc->sc_dev)));
13083 	return -1;
13084 }
13085 
13086 /******************************************************************************
13087  * This function does initial flash setup so that a new read/write/erase cycle
13088  * can be started.
13089  *
13090  * sc - The pointer to the hw structure
13091  ****************************************************************************/
13092 static int32_t
13093 wm_ich8_cycle_init(struct wm_softc *sc)
13094 {
13095 	uint16_t hsfsts;
13096 	int32_t error = 1;
13097 	int32_t i     = 0;
13098 
13099 	if (sc->sc_type >= WM_T_PCH_SPT)
13100 		hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
13101 	else
13102 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
13103 
13104 	/* May be check the Flash Des Valid bit in Hw status */
13105 	if ((hsfsts & HSFSTS_FLDVAL) == 0)
13106 		return error;
13107 
13108 	/* Clear FCERR in Hw status by writing 1 */
13109 	/* Clear DAEL in Hw status by writing a 1 */
13110 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
13111 
13112 	if (sc->sc_type >= WM_T_PCH_SPT)
13113 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
13114 	else
13115 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
13116 
13117 	/*
13118 	 * Either we should have a hardware SPI cycle in progress bit to check
13119 	 * against, in order to start a new cycle or FDONE bit should be
13120 	 * changed in the hardware so that it is 1 after hardware reset, which
13121 	 * can then be used as an indication whether a cycle is in progress or
13122 	 * has been completed .. we should also have some software semaphore
13123 	 * mechanism to guard FDONE or the cycle in progress bit so that two
13124 	 * threads access to those bits can be sequentiallized or a way so that
13125 	 * 2 threads don't start the cycle at the same time
13126 	 */
13127 
13128 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
13129 		/*
13130 		 * There is no cycle running at present, so we can start a
13131 		 * cycle
13132 		 */
13133 
13134 		/* Begin by setting Flash Cycle Done. */
13135 		hsfsts |= HSFSTS_DONE;
13136 		if (sc->sc_type >= WM_T_PCH_SPT)
13137 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
13138 			    hsfsts & 0xffffUL);
13139 		else
13140 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
13141 		error = 0;
13142 	} else {
13143 		/*
13144 		 * Otherwise poll for sometime so the current cycle has a
13145 		 * chance to end before giving up.
13146 		 */
13147 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
13148 			if (sc->sc_type >= WM_T_PCH_SPT)
13149 				hsfsts = ICH8_FLASH_READ32(sc,
13150 				    ICH_FLASH_HSFSTS) & 0xffffUL;
13151 			else
13152 				hsfsts = ICH8_FLASH_READ16(sc,
13153 				    ICH_FLASH_HSFSTS);
13154 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
13155 				error = 0;
13156 				break;
13157 			}
13158 			delay(1);
13159 		}
13160 		if (error == 0) {
13161 			/*
13162 			 * Successful in waiting for previous cycle to timeout,
13163 			 * now set the Flash Cycle Done.
13164 			 */
13165 			hsfsts |= HSFSTS_DONE;
13166 			if (sc->sc_type >= WM_T_PCH_SPT)
13167 				ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
13168 				    hsfsts & 0xffffUL);
13169 			else
13170 				ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
13171 				    hsfsts);
13172 		}
13173 	}
13174 	return error;
13175 }
13176 
13177 /******************************************************************************
13178  * This function starts a flash cycle and waits for its completion
13179  *
13180  * sc - The pointer to the hw structure
13181  ****************************************************************************/
13182 static int32_t
13183 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
13184 {
13185 	uint16_t hsflctl;
13186 	uint16_t hsfsts;
13187 	int32_t error = 1;
13188 	uint32_t i = 0;
13189 
13190 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
13191 	if (sc->sc_type >= WM_T_PCH_SPT)
13192 		hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
13193 	else
13194 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
13195 	hsflctl |= HSFCTL_GO;
13196 	if (sc->sc_type >= WM_T_PCH_SPT)
13197 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
13198 		    (uint32_t)hsflctl << 16);
13199 	else
13200 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
13201 
13202 	/* Wait till FDONE bit is set to 1 */
13203 	do {
13204 		if (sc->sc_type >= WM_T_PCH_SPT)
13205 			hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
13206 			    & 0xffffUL;
13207 		else
13208 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
13209 		if (hsfsts & HSFSTS_DONE)
13210 			break;
13211 		delay(1);
13212 		i++;
13213 	} while (i < timeout);
13214 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
13215 		error = 0;
13216 
13217 	return error;
13218 }
13219 
13220 /******************************************************************************
13221  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
13222  *
13223  * sc - The pointer to the hw structure
13224  * index - The index of the byte or word to read.
13225  * size - Size of data to read, 1=byte 2=word, 4=dword
13226  * data - Pointer to the word to store the value read.
13227  *****************************************************************************/
13228 static int32_t
13229 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
13230     uint32_t size, uint32_t *data)
13231 {
13232 	uint16_t hsfsts;
13233 	uint16_t hsflctl;
13234 	uint32_t flash_linear_address;
13235 	uint32_t flash_data = 0;
13236 	int32_t error = 1;
13237 	int32_t count = 0;
13238 
13239 	if (size < 1  || size > 4 || data == 0x0 ||
13240 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
13241 		return error;
13242 
13243 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
13244 	    sc->sc_ich8_flash_base;
13245 
13246 	do {
13247 		delay(1);
13248 		/* Steps */
13249 		error = wm_ich8_cycle_init(sc);
13250 		if (error)
13251 			break;
13252 
13253 		if (sc->sc_type >= WM_T_PCH_SPT)
13254 			hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
13255 			    >> 16;
13256 		else
13257 			hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
13258 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
13259 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
13260 		    & HSFCTL_BCOUNT_MASK;
13261 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
13262 		if (sc->sc_type >= WM_T_PCH_SPT) {
13263 			/*
13264 			 * In SPT, This register is in Lan memory space, not
13265 			 * flash. Therefore, only 32 bit access is supported.
13266 			 */
13267 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
13268 			    (uint32_t)hsflctl << 16);
13269 		} else
13270 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
13271 
13272 		/*
13273 		 * Write the last 24 bits of index into Flash Linear address
13274 		 * field in Flash Address
13275 		 */
13276 		/* TODO: TBD maybe check the index against the size of flash */
13277 
13278 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
13279 
13280 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
13281 
13282 		/*
13283 		 * Check if FCERR is set to 1, if set to 1, clear it and try
13284 		 * the whole sequence a few more times, else read in (shift in)
13285 		 * the Flash Data0, the order is least significant byte first
13286 		 * msb to lsb
13287 		 */
13288 		if (error == 0) {
13289 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
13290 			if (size == 1)
13291 				*data = (uint8_t)(flash_data & 0x000000FF);
13292 			else if (size == 2)
13293 				*data = (uint16_t)(flash_data & 0x0000FFFF);
13294 			else if (size == 4)
13295 				*data = (uint32_t)flash_data;
13296 			break;
13297 		} else {
13298 			/*
13299 			 * If we've gotten here, then things are probably
13300 			 * completely hosed, but if the error condition is
13301 			 * detected, it won't hurt to give it another try...
13302 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
13303 			 */
13304 			if (sc->sc_type >= WM_T_PCH_SPT)
13305 				hsfsts = ICH8_FLASH_READ32(sc,
13306 				    ICH_FLASH_HSFSTS) & 0xffffUL;
13307 			else
13308 				hsfsts = ICH8_FLASH_READ16(sc,
13309 				    ICH_FLASH_HSFSTS);
13310 
13311 			if (hsfsts & HSFSTS_ERR) {
13312 				/* Repeat for some time before giving up. */
13313 				continue;
13314 			} else if ((hsfsts & HSFSTS_DONE) == 0)
13315 				break;
13316 		}
13317 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
13318 
13319 	return error;
13320 }
13321 
13322 /******************************************************************************
13323  * Reads a single byte from the NVM using the ICH8 flash access registers.
13324  *
13325  * sc - pointer to wm_hw structure
13326  * index - The index of the byte to read.
13327  * data - Pointer to a byte to store the value read.
13328  *****************************************************************************/
13329 static int32_t
13330 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
13331 {
13332 	int32_t status;
13333 	uint32_t word = 0;
13334 
13335 	status = wm_read_ich8_data(sc, index, 1, &word);
13336 	if (status == 0)
13337 		*data = (uint8_t)word;
13338 	else
13339 		*data = 0;
13340 
13341 	return status;
13342 }
13343 
13344 /******************************************************************************
13345  * Reads a word from the NVM using the ICH8 flash access registers.
13346  *
13347  * sc - pointer to wm_hw structure
13348  * index - The starting byte index of the word to read.
13349  * data - Pointer to a word to store the value read.
13350  *****************************************************************************/
13351 static int32_t
13352 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
13353 {
13354 	int32_t status;
13355 	uint32_t word = 0;
13356 
13357 	status = wm_read_ich8_data(sc, index, 2, &word);
13358 	if (status == 0)
13359 		*data = (uint16_t)word;
13360 	else
13361 		*data = 0;
13362 
13363 	return status;
13364 }
13365 
13366 /******************************************************************************
13367  * Reads a dword from the NVM using the ICH8 flash access registers.
13368  *
13369  * sc - pointer to wm_hw structure
13370  * index - The starting byte index of the word to read.
13371  * data - Pointer to a word to store the value read.
13372  *****************************************************************************/
13373 static int32_t
13374 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
13375 {
13376 	int32_t status;
13377 
13378 	status = wm_read_ich8_data(sc, index, 4, data);
13379 	return status;
13380 }
13381 
13382 /******************************************************************************
13383  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
13384  * register.
13385  *
13386  * sc - Struct containing variables accessed by shared code
13387  * offset - offset of word in the EEPROM to read
13388  * data - word read from the EEPROM
13389  * words - number of words to read
13390  *****************************************************************************/
13391 static int
13392 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
13393 {
13394 	int32_t	 rv = 0;
13395 	uint32_t flash_bank = 0;
13396 	uint32_t act_offset = 0;
13397 	uint32_t bank_offset = 0;
13398 	uint16_t word = 0;
13399 	uint16_t i = 0;
13400 
13401 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
13402 		device_xname(sc->sc_dev), __func__));
13403 
13404 	if (sc->nvm.acquire(sc) != 0)
13405 		return -1;
13406 
13407 	/*
13408 	 * We need to know which is the valid flash bank.  In the event
13409 	 * that we didn't allocate eeprom_shadow_ram, we may not be
13410 	 * managing flash_bank. So it cannot be trusted and needs
13411 	 * to be updated with each read.
13412 	 */
13413 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
13414 	if (rv) {
13415 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
13416 			device_xname(sc->sc_dev)));
13417 		flash_bank = 0;
13418 	}
13419 
13420 	/*
13421 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
13422 	 * size
13423 	 */
13424 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
13425 
13426 	for (i = 0; i < words; i++) {
13427 		/* The NVM part needs a byte offset, hence * 2 */
13428 		act_offset = bank_offset + ((offset + i) * 2);
13429 		rv = wm_read_ich8_word(sc, act_offset, &word);
13430 		if (rv) {
13431 			aprint_error_dev(sc->sc_dev,
13432 			    "%s: failed to read NVM\n", __func__);
13433 			break;
13434 		}
13435 		data[i] = word;
13436 	}
13437 
13438 	sc->nvm.release(sc);
13439 	return rv;
13440 }
13441 
13442 /******************************************************************************
13443  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
13444  * register.
13445  *
13446  * sc - Struct containing variables accessed by shared code
13447  * offset - offset of word in the EEPROM to read
13448  * data - word read from the EEPROM
13449  * words - number of words to read
13450  *****************************************************************************/
13451 static int
13452 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
13453 {
13454 	int32_t	 rv = 0;
13455 	uint32_t flash_bank = 0;
13456 	uint32_t act_offset = 0;
13457 	uint32_t bank_offset = 0;
13458 	uint32_t dword = 0;
13459 	uint16_t i = 0;
13460 
13461 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
13462 		device_xname(sc->sc_dev), __func__));
13463 
13464 	if (sc->nvm.acquire(sc) != 0)
13465 		return -1;
13466 
13467 	/*
13468 	 * We need to know which is the valid flash bank.  In the event
13469 	 * that we didn't allocate eeprom_shadow_ram, we may not be
13470 	 * managing flash_bank. So it cannot be trusted and needs
13471 	 * to be updated with each read.
13472 	 */
13473 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
13474 	if (rv) {
13475 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
13476 			device_xname(sc->sc_dev)));
13477 		flash_bank = 0;
13478 	}
13479 
13480 	/*
13481 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
13482 	 * size
13483 	 */
13484 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
13485 
13486 	for (i = 0; i < words; i++) {
13487 		/* The NVM part needs a byte offset, hence * 2 */
13488 		act_offset = bank_offset + ((offset + i) * 2);
13489 		/* but we must read dword aligned, so mask ... */
13490 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
13491 		if (rv) {
13492 			aprint_error_dev(sc->sc_dev,
13493 			    "%s: failed to read NVM\n", __func__);
13494 			break;
13495 		}
13496 		/* ... and pick out low or high word */
13497 		if ((act_offset & 0x2) == 0)
13498 			data[i] = (uint16_t)(dword & 0xFFFF);
13499 		else
13500 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
13501 	}
13502 
13503 	sc->nvm.release(sc);
13504 	return rv;
13505 }
13506 
13507 /* iNVM */
13508 
13509 static int
13510 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
13511 {
13512 	int32_t	 rv = 0;
13513 	uint32_t invm_dword;
13514 	uint16_t i;
13515 	uint8_t record_type, word_address;
13516 
13517 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
13518 		device_xname(sc->sc_dev), __func__));
13519 
13520 	for (i = 0; i < INVM_SIZE; i++) {
13521 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
13522 		/* Get record type */
13523 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
13524 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
13525 			break;
13526 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
13527 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
13528 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
13529 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
13530 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
13531 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
13532 			if (word_address == address) {
13533 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
13534 				rv = 0;
13535 				break;
13536 			}
13537 		}
13538 	}
13539 
13540 	return rv;
13541 }
13542 
13543 static int
13544 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
13545 {
13546 	int rv = 0;
13547 	int i;
13548 
13549 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
13550 		device_xname(sc->sc_dev), __func__));
13551 
13552 	if (sc->nvm.acquire(sc) != 0)
13553 		return -1;
13554 
13555 	for (i = 0; i < words; i++) {
13556 		switch (offset + i) {
13557 		case NVM_OFF_MACADDR:
13558 		case NVM_OFF_MACADDR1:
13559 		case NVM_OFF_MACADDR2:
13560 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
13561 			if (rv != 0) {
13562 				data[i] = 0xffff;
13563 				rv = -1;
13564 			}
13565 			break;
13566 		case NVM_OFF_CFG2:
13567 			rv = wm_nvm_read_word_invm(sc, offset, data);
13568 			if (rv != 0) {
13569 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
13570 				rv = 0;
13571 			}
13572 			break;
13573 		case NVM_OFF_CFG4:
13574 			rv = wm_nvm_read_word_invm(sc, offset, data);
13575 			if (rv != 0) {
13576 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
13577 				rv = 0;
13578 			}
13579 			break;
13580 		case NVM_OFF_LED_1_CFG:
13581 			rv = wm_nvm_read_word_invm(sc, offset, data);
13582 			if (rv != 0) {
13583 				*data = NVM_LED_1_CFG_DEFAULT_I211;
13584 				rv = 0;
13585 			}
13586 			break;
13587 		case NVM_OFF_LED_0_2_CFG:
13588 			rv = wm_nvm_read_word_invm(sc, offset, data);
13589 			if (rv != 0) {
13590 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
13591 				rv = 0;
13592 			}
13593 			break;
13594 		case NVM_OFF_ID_LED_SETTINGS:
13595 			rv = wm_nvm_read_word_invm(sc, offset, data);
13596 			if (rv != 0) {
13597 				*data = ID_LED_RESERVED_FFFF;
13598 				rv = 0;
13599 			}
13600 			break;
13601 		default:
13602 			DPRINTF(WM_DEBUG_NVM,
13603 			    ("NVM word 0x%02x is not mapped.\n", offset));
13604 			*data = NVM_RESERVED_WORD;
13605 			break;
13606 		}
13607 	}
13608 
13609 	sc->nvm.release(sc);
13610 	return rv;
13611 }
13612 
13613 /* Lock, detecting NVM type, validate checksum, version and read */
13614 
13615 static int
13616 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
13617 {
13618 	uint32_t eecd = 0;
13619 
13620 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
13621 	    || sc->sc_type == WM_T_82583) {
13622 		eecd = CSR_READ(sc, WMREG_EECD);
13623 
13624 		/* Isolate bits 15 & 16 */
13625 		eecd = ((eecd >> 15) & 0x03);
13626 
13627 		/* If both bits are set, device is Flash type */
13628 		if (eecd == 0x03)
13629 			return 0;
13630 	}
13631 	return 1;
13632 }
13633 
13634 static int
13635 wm_nvm_flash_presence_i210(struct wm_softc *sc)
13636 {
13637 	uint32_t eec;
13638 
13639 	eec = CSR_READ(sc, WMREG_EEC);
13640 	if ((eec & EEC_FLASH_DETECTED) != 0)
13641 		return 1;
13642 
13643 	return 0;
13644 }
13645 
13646 /*
13647  * wm_nvm_validate_checksum
13648  *
13649  * The checksum is defined as the sum of the first 64 (16 bit) words.
13650  */
13651 static int
13652 wm_nvm_validate_checksum(struct wm_softc *sc)
13653 {
13654 	uint16_t checksum;
13655 	uint16_t eeprom_data;
13656 #ifdef WM_DEBUG
13657 	uint16_t csum_wordaddr, valid_checksum;
13658 #endif
13659 	int i;
13660 
13661 	checksum = 0;
13662 
13663 	/* Don't check for I211 */
13664 	if (sc->sc_type == WM_T_I211)
13665 		return 0;
13666 
13667 #ifdef WM_DEBUG
13668 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
13669 	    || (sc->sc_type == WM_T_PCH_CNP)) {
13670 		csum_wordaddr = NVM_OFF_COMPAT;
13671 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
13672 	} else {
13673 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
13674 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
13675 	}
13676 
13677 	/* Dump EEPROM image for debug */
13678 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
13679 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
13680 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
13681 		/* XXX PCH_SPT? */
13682 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
13683 		if ((eeprom_data & valid_checksum) == 0)
13684 			DPRINTF(WM_DEBUG_NVM,
13685 			    ("%s: NVM need to be updated (%04x != %04x)\n",
13686 				device_xname(sc->sc_dev), eeprom_data,
13687 				    valid_checksum));
13688 	}
13689 
13690 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
13691 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
13692 		for (i = 0; i < NVM_SIZE; i++) {
13693 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
13694 				printf("XXXX ");
13695 			else
13696 				printf("%04hx ", eeprom_data);
13697 			if (i % 8 == 7)
13698 				printf("\n");
13699 		}
13700 	}
13701 
13702 #endif /* WM_DEBUG */
13703 
13704 	for (i = 0; i < NVM_SIZE; i++) {
13705 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
13706 			return 1;
13707 		checksum += eeprom_data;
13708 	}
13709 
13710 	if (checksum != (uint16_t) NVM_CHECKSUM) {
13711 #ifdef WM_DEBUG
13712 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
13713 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
13714 #endif
13715 	}
13716 
13717 	return 0;
13718 }
13719 
13720 static void
13721 wm_nvm_version_invm(struct wm_softc *sc)
13722 {
13723 	uint32_t dword;
13724 
13725 	/*
13726 	 * Linux's code to decode version is very strange, so we don't
13727 	 * obey that algorithm and just use word 61 as the document.
13728 	 * Perhaps it's not perfect though...
13729 	 *
13730 	 * Example:
13731 	 *
13732 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
13733 	 */
13734 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
13735 	dword = __SHIFTOUT(dword, INVM_VER_1);
13736 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
13737 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
13738 }
13739 
13740 static void
13741 wm_nvm_version(struct wm_softc *sc)
13742 {
13743 	uint16_t major, minor, build, patch;
13744 	uint16_t uid0, uid1;
13745 	uint16_t nvm_data;
13746 	uint16_t off;
13747 	bool check_version = false;
13748 	bool check_optionrom = false;
13749 	bool have_build = false;
13750 	bool have_uid = true;
13751 
13752 	/*
13753 	 * Version format:
13754 	 *
13755 	 * XYYZ
13756 	 * X0YZ
13757 	 * X0YY
13758 	 *
13759 	 * Example:
13760 	 *
13761 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
13762 	 *	82571	0x50a6	5.10.6?
13763 	 *	82572	0x506a	5.6.10?
13764 	 *	82572EI	0x5069	5.6.9?
13765 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
13766 	 *		0x2013	2.1.3?
13767 	 *	82583	0x10a0	1.10.0? (document says it's default value)
13768 	 * ICH8+82567	0x0040	0.4.0?
13769 	 * ICH9+82566	0x1040	1.4.0?
13770 	 *ICH10+82567	0x0043	0.4.3?
13771 	 *  PCH+82577	0x00c1	0.12.1?
13772 	 * PCH2+82579	0x00d3	0.13.3?
13773 	 *		0x00d4	0.13.4?
13774 	 *  LPT+I218	0x0023	0.2.3?
13775 	 *  SPT+I219	0x0084	0.8.4?
13776 	 *  CNP+I219	0x0054	0.5.4?
13777 	 */
13778 
13779 	/*
13780 	 * XXX
13781 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
13782 	 * I've never seen on real 82574 hardware with such small SPI ROM.
13783 	 */
13784 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
13785 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
13786 		have_uid = false;
13787 
13788 	switch (sc->sc_type) {
13789 	case WM_T_82571:
13790 	case WM_T_82572:
13791 	case WM_T_82574:
13792 	case WM_T_82583:
13793 		check_version = true;
13794 		check_optionrom = true;
13795 		have_build = true;
13796 		break;
13797 	case WM_T_ICH8:
13798 	case WM_T_ICH9:
13799 	case WM_T_ICH10:
13800 	case WM_T_PCH:
13801 	case WM_T_PCH2:
13802 	case WM_T_PCH_LPT:
13803 	case WM_T_PCH_SPT:
13804 	case WM_T_PCH_CNP:
13805 		check_version = true;
13806 		have_build = true;
13807 		have_uid = false;
13808 		break;
13809 	case WM_T_82575:
13810 	case WM_T_82576:
13811 	case WM_T_82580:
13812 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
13813 			check_version = true;
13814 		break;
13815 	case WM_T_I211:
13816 		wm_nvm_version_invm(sc);
13817 		have_uid = false;
13818 		goto printver;
13819 	case WM_T_I210:
13820 		if (!wm_nvm_flash_presence_i210(sc)) {
13821 			wm_nvm_version_invm(sc);
13822 			have_uid = false;
13823 			goto printver;
13824 		}
13825 		/* FALLTHROUGH */
13826 	case WM_T_I350:
13827 	case WM_T_I354:
13828 		check_version = true;
13829 		check_optionrom = true;
13830 		break;
13831 	default:
13832 		return;
13833 	}
13834 	if (check_version
13835 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
13836 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
13837 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
13838 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
13839 			build = nvm_data & NVM_BUILD_MASK;
13840 			have_build = true;
13841 		} else
13842 			minor = nvm_data & 0x00ff;
13843 
13844 		/* Decimal */
13845 		minor = (minor / 16) * 10 + (minor % 16);
13846 		sc->sc_nvm_ver_major = major;
13847 		sc->sc_nvm_ver_minor = minor;
13848 
13849 printver:
13850 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
13851 		    sc->sc_nvm_ver_minor);
13852 		if (have_build) {
13853 			sc->sc_nvm_ver_build = build;
13854 			aprint_verbose(".%d", build);
13855 		}
13856 	}
13857 
13858 	/* Assume the Option ROM area is at avove NVM_SIZE */
13859 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
13860 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
13861 		/* Option ROM Version */
13862 		if ((off != 0x0000) && (off != 0xffff)) {
13863 			int rv;
13864 
13865 			off += NVM_COMBO_VER_OFF;
13866 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
13867 			rv |= wm_nvm_read(sc, off, 1, &uid0);
13868 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
13869 			    && (uid1 != 0) && (uid1 != 0xffff)) {
13870 				/* 16bits */
13871 				major = uid0 >> 8;
13872 				build = (uid0 << 8) | (uid1 >> 8);
13873 				patch = uid1 & 0x00ff;
13874 				aprint_verbose(", option ROM Version %d.%d.%d",
13875 				    major, build, patch);
13876 			}
13877 		}
13878 	}
13879 
13880 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
13881 		aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
13882 }
13883 
13884 /*
13885  * wm_nvm_read:
13886  *
13887  *	Read data from the serial EEPROM.
13888  */
13889 static int
13890 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
13891 {
13892 	int rv;
13893 
13894 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
13895 		device_xname(sc->sc_dev), __func__));
13896 
13897 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
13898 		return -1;
13899 
13900 	rv = sc->nvm.read(sc, word, wordcnt, data);
13901 
13902 	return rv;
13903 }
13904 
13905 /*
13906  * Hardware semaphores.
13907  * Very complexed...
13908  */
13909 
13910 static int
13911 wm_get_null(struct wm_softc *sc)
13912 {
13913 
13914 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
13915 		device_xname(sc->sc_dev), __func__));
13916 	return 0;
13917 }
13918 
13919 static void
13920 wm_put_null(struct wm_softc *sc)
13921 {
13922 
13923 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
13924 		device_xname(sc->sc_dev), __func__));
13925 	return;
13926 }
13927 
13928 static int
13929 wm_get_eecd(struct wm_softc *sc)
13930 {
13931 	uint32_t reg;
13932 	int x;
13933 
13934 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
13935 		device_xname(sc->sc_dev), __func__));
13936 
13937 	reg = CSR_READ(sc, WMREG_EECD);
13938 
13939 	/* Request EEPROM access. */
13940 	reg |= EECD_EE_REQ;
13941 	CSR_WRITE(sc, WMREG_EECD, reg);
13942 
13943 	/* ..and wait for it to be granted. */
13944 	for (x = 0; x < 1000; x++) {
13945 		reg = CSR_READ(sc, WMREG_EECD);
13946 		if (reg & EECD_EE_GNT)
13947 			break;
13948 		delay(5);
13949 	}
13950 	if ((reg & EECD_EE_GNT) == 0) {
13951 		aprint_error_dev(sc->sc_dev,
13952 		    "could not acquire EEPROM GNT\n");
13953 		reg &= ~EECD_EE_REQ;
13954 		CSR_WRITE(sc, WMREG_EECD, reg);
13955 		return -1;
13956 	}
13957 
13958 	return 0;
13959 }
13960 
13961 static void
13962 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
13963 {
13964 
13965 	*eecd |= EECD_SK;
13966 	CSR_WRITE(sc, WMREG_EECD, *eecd);
13967 	CSR_WRITE_FLUSH(sc);
13968 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
13969 		delay(1);
13970 	else
13971 		delay(50);
13972 }
13973 
13974 static void
13975 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
13976 {
13977 
13978 	*eecd &= ~EECD_SK;
13979 	CSR_WRITE(sc, WMREG_EECD, *eecd);
13980 	CSR_WRITE_FLUSH(sc);
13981 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
13982 		delay(1);
13983 	else
13984 		delay(50);
13985 }
13986 
13987 static void
13988 wm_put_eecd(struct wm_softc *sc)
13989 {
13990 	uint32_t reg;
13991 
13992 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
13993 		device_xname(sc->sc_dev), __func__));
13994 
13995 	/* Stop nvm */
13996 	reg = CSR_READ(sc, WMREG_EECD);
13997 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
13998 		/* Pull CS high */
13999 		reg |= EECD_CS;
14000 		wm_nvm_eec_clock_lower(sc, &reg);
14001 	} else {
14002 		/* CS on Microwire is active-high */
14003 		reg &= ~(EECD_CS | EECD_DI);
14004 		CSR_WRITE(sc, WMREG_EECD, reg);
14005 		wm_nvm_eec_clock_raise(sc, &reg);
14006 		wm_nvm_eec_clock_lower(sc, &reg);
14007 	}
14008 
14009 	reg = CSR_READ(sc, WMREG_EECD);
14010 	reg &= ~EECD_EE_REQ;
14011 	CSR_WRITE(sc, WMREG_EECD, reg);
14012 
14013 	return;
14014 }
14015 
14016 /*
14017  * Get hardware semaphore.
14018  * Same as e1000_get_hw_semaphore_generic()
14019  */
14020 static int
14021 wm_get_swsm_semaphore(struct wm_softc *sc)
14022 {
14023 	int32_t timeout;
14024 	uint32_t swsm;
14025 
14026 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
14027 		device_xname(sc->sc_dev), __func__));
14028 	KASSERT(sc->sc_nvm_wordsize > 0);
14029 
14030 retry:
14031 	/* Get the SW semaphore. */
14032 	timeout = sc->sc_nvm_wordsize + 1;
14033 	while (timeout) {
14034 		swsm = CSR_READ(sc, WMREG_SWSM);
14035 
14036 		if ((swsm & SWSM_SMBI) == 0)
14037 			break;
14038 
14039 		delay(50);
14040 		timeout--;
14041 	}
14042 
14043 	if (timeout == 0) {
14044 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
14045 			/*
14046 			 * In rare circumstances, the SW semaphore may already
14047 			 * be held unintentionally. Clear the semaphore once
14048 			 * before giving up.
14049 			 */
14050 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
14051 			wm_put_swsm_semaphore(sc);
14052 			goto retry;
14053 		}
14054 		aprint_error_dev(sc->sc_dev,
14055 		    "could not acquire SWSM SMBI\n");
14056 		return 1;
14057 	}
14058 
14059 	/* Get the FW semaphore. */
14060 	timeout = sc->sc_nvm_wordsize + 1;
14061 	while (timeout) {
14062 		swsm = CSR_READ(sc, WMREG_SWSM);
14063 		swsm |= SWSM_SWESMBI;
14064 		CSR_WRITE(sc, WMREG_SWSM, swsm);
14065 		/* If we managed to set the bit we got the semaphore. */
14066 		swsm = CSR_READ(sc, WMREG_SWSM);
14067 		if (swsm & SWSM_SWESMBI)
14068 			break;
14069 
14070 		delay(50);
14071 		timeout--;
14072 	}
14073 
14074 	if (timeout == 0) {
14075 		aprint_error_dev(sc->sc_dev,
14076 		    "could not acquire SWSM SWESMBI\n");
14077 		/* Release semaphores */
14078 		wm_put_swsm_semaphore(sc);
14079 		return 1;
14080 	}
14081 	return 0;
14082 }
14083 
14084 /*
14085  * Put hardware semaphore.
14086  * Same as e1000_put_hw_semaphore_generic()
14087  */
14088 static void
14089 wm_put_swsm_semaphore(struct wm_softc *sc)
14090 {
14091 	uint32_t swsm;
14092 
14093 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
14094 		device_xname(sc->sc_dev), __func__));
14095 
14096 	swsm = CSR_READ(sc, WMREG_SWSM);
14097 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
14098 	CSR_WRITE(sc, WMREG_SWSM, swsm);
14099 }
14100 
14101 /*
14102  * Get SW/FW semaphore.
14103  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
14104  */
14105 static int
14106 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
14107 {
14108 	uint32_t swfw_sync;
14109 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
14110 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
14111 	int timeout;
14112 
14113 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
14114 		device_xname(sc->sc_dev), __func__));
14115 
14116 	if (sc->sc_type == WM_T_80003)
14117 		timeout = 50;
14118 	else
14119 		timeout = 200;
14120 
14121 	while (timeout) {
14122 		if (wm_get_swsm_semaphore(sc)) {
14123 			aprint_error_dev(sc->sc_dev,
14124 			    "%s: failed to get semaphore\n",
14125 			    __func__);
14126 			return 1;
14127 		}
14128 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
14129 		if ((swfw_sync & (swmask | fwmask)) == 0) {
14130 			swfw_sync |= swmask;
14131 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
14132 			wm_put_swsm_semaphore(sc);
14133 			return 0;
14134 		}
14135 		wm_put_swsm_semaphore(sc);
14136 		delay(5000);
14137 		timeout--;
14138 	}
14139 	device_printf(sc->sc_dev,
14140 	    "failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
14141 	    mask, swfw_sync);
14142 	return 1;
14143 }
14144 
14145 static void
14146 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
14147 {
14148 	uint32_t swfw_sync;
14149 
14150 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
14151 		device_xname(sc->sc_dev), __func__));
14152 
14153 	while (wm_get_swsm_semaphore(sc) != 0)
14154 		continue;
14155 
14156 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
14157 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
14158 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
14159 
14160 	wm_put_swsm_semaphore(sc);
14161 }
14162 
14163 static int
14164 wm_get_nvm_80003(struct wm_softc *sc)
14165 {
14166 	int rv;
14167 
14168 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
14169 		device_xname(sc->sc_dev), __func__));
14170 
14171 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
14172 		aprint_error_dev(sc->sc_dev,
14173 		    "%s: failed to get semaphore(SWFW)\n", __func__);
14174 		return rv;
14175 	}
14176 
14177 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
14178 	    && (rv = wm_get_eecd(sc)) != 0) {
14179 		aprint_error_dev(sc->sc_dev,
14180 		    "%s: failed to get semaphore(EECD)\n", __func__);
14181 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
14182 		return rv;
14183 	}
14184 
14185 	return 0;
14186 }
14187 
14188 static void
14189 wm_put_nvm_80003(struct wm_softc *sc)
14190 {
14191 
14192 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
14193 		device_xname(sc->sc_dev), __func__));
14194 
14195 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
14196 		wm_put_eecd(sc);
14197 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
14198 }
14199 
14200 static int
14201 wm_get_nvm_82571(struct wm_softc *sc)
14202 {
14203 	int rv;
14204 
14205 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
14206 		device_xname(sc->sc_dev), __func__));
14207 
14208 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
14209 		return rv;
14210 
14211 	switch (sc->sc_type) {
14212 	case WM_T_82573:
14213 		break;
14214 	default:
14215 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
14216 			rv = wm_get_eecd(sc);
14217 		break;
14218 	}
14219 
14220 	if (rv != 0) {
14221 		aprint_error_dev(sc->sc_dev,
14222 		    "%s: failed to get semaphore\n",
14223 		    __func__);
14224 		wm_put_swsm_semaphore(sc);
14225 	}
14226 
14227 	return rv;
14228 }
14229 
14230 static void
14231 wm_put_nvm_82571(struct wm_softc *sc)
14232 {
14233 
14234 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
14235 		device_xname(sc->sc_dev), __func__));
14236 
14237 	switch (sc->sc_type) {
14238 	case WM_T_82573:
14239 		break;
14240 	default:
14241 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
14242 			wm_put_eecd(sc);
14243 		break;
14244 	}
14245 
14246 	wm_put_swsm_semaphore(sc);
14247 }
14248 
14249 static int
14250 wm_get_phy_82575(struct wm_softc *sc)
14251 {
14252 
14253 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
14254 		device_xname(sc->sc_dev), __func__));
14255 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
14256 }
14257 
14258 static void
14259 wm_put_phy_82575(struct wm_softc *sc)
14260 {
14261 
14262 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
14263 		device_xname(sc->sc_dev), __func__));
14264 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
14265 }
14266 
14267 static int
14268 wm_get_swfwhw_semaphore(struct wm_softc *sc)
14269 {
14270 	uint32_t ext_ctrl;
14271 	int timeout = 200;
14272 
14273 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
14274 		device_xname(sc->sc_dev), __func__));
14275 
14276 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
14277 	for (timeout = 0; timeout < 200; timeout++) {
14278 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
14279 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
14280 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
14281 
14282 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
14283 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
14284 			return 0;
14285 		delay(5000);
14286 	}
14287 	device_printf(sc->sc_dev,
14288 	    "failed to get swfwhw semaphore ext_ctrl 0x%x\n", ext_ctrl);
14289 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
14290 	return 1;
14291 }
14292 
14293 static void
14294 wm_put_swfwhw_semaphore(struct wm_softc *sc)
14295 {
14296 	uint32_t ext_ctrl;
14297 
14298 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
14299 		device_xname(sc->sc_dev), __func__));
14300 
14301 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
14302 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
14303 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
14304 
14305 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
14306 }
14307 
14308 static int
14309 wm_get_swflag_ich8lan(struct wm_softc *sc)
14310 {
14311 	uint32_t ext_ctrl;
14312 	int timeout;
14313 
14314 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
14315 		device_xname(sc->sc_dev), __func__));
14316 	mutex_enter(sc->sc_ich_phymtx);
14317 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
14318 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
14319 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
14320 			break;
14321 		delay(1000);
14322 	}
14323 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
14324 		device_printf(sc->sc_dev,
14325 		    "SW has already locked the resource\n");
14326 		goto out;
14327 	}
14328 
14329 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
14330 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
14331 	for (timeout = 0; timeout < 1000; timeout++) {
14332 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
14333 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
14334 			break;
14335 		delay(1000);
14336 	}
14337 	if (timeout >= 1000) {
14338 		device_printf(sc->sc_dev, "failed to acquire semaphore\n");
14339 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
14340 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
14341 		goto out;
14342 	}
14343 	return 0;
14344 
14345 out:
14346 	mutex_exit(sc->sc_ich_phymtx);
14347 	return 1;
14348 }
14349 
14350 static void
14351 wm_put_swflag_ich8lan(struct wm_softc *sc)
14352 {
14353 	uint32_t ext_ctrl;
14354 
14355 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
14356 		device_xname(sc->sc_dev), __func__));
14357 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
14358 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
14359 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
14360 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
14361 	} else {
14362 		device_printf(sc->sc_dev, "Semaphore unexpectedly released\n");
14363 	}
14364 
14365 	mutex_exit(sc->sc_ich_phymtx);
14366 }
14367 
14368 static int
14369 wm_get_nvm_ich8lan(struct wm_softc *sc)
14370 {
14371 
14372 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
14373 		device_xname(sc->sc_dev), __func__));
14374 	mutex_enter(sc->sc_ich_nvmmtx);
14375 
14376 	return 0;
14377 }
14378 
14379 static void
14380 wm_put_nvm_ich8lan(struct wm_softc *sc)
14381 {
14382 
14383 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
14384 		device_xname(sc->sc_dev), __func__));
14385 	mutex_exit(sc->sc_ich_nvmmtx);
14386 }
14387 
14388 static int
14389 wm_get_hw_semaphore_82573(struct wm_softc *sc)
14390 {
14391 	int i = 0;
14392 	uint32_t reg;
14393 
14394 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
14395 		device_xname(sc->sc_dev), __func__));
14396 
14397 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
14398 	do {
14399 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
14400 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
14401 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
14402 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
14403 			break;
14404 		delay(2*1000);
14405 		i++;
14406 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
14407 
14408 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
14409 		wm_put_hw_semaphore_82573(sc);
14410 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
14411 		    device_xname(sc->sc_dev));
14412 		return -1;
14413 	}
14414 
14415 	return 0;
14416 }
14417 
14418 static void
14419 wm_put_hw_semaphore_82573(struct wm_softc *sc)
14420 {
14421 	uint32_t reg;
14422 
14423 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
14424 		device_xname(sc->sc_dev), __func__));
14425 
14426 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
14427 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
14428 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
14429 }
14430 
14431 /*
14432  * Management mode and power management related subroutines.
14433  * BMC, AMT, suspend/resume and EEE.
14434  */
14435 
14436 #ifdef WM_WOL
14437 static int
14438 wm_check_mng_mode(struct wm_softc *sc)
14439 {
14440 	int rv;
14441 
14442 	switch (sc->sc_type) {
14443 	case WM_T_ICH8:
14444 	case WM_T_ICH9:
14445 	case WM_T_ICH10:
14446 	case WM_T_PCH:
14447 	case WM_T_PCH2:
14448 	case WM_T_PCH_LPT:
14449 	case WM_T_PCH_SPT:
14450 	case WM_T_PCH_CNP:
14451 		rv = wm_check_mng_mode_ich8lan(sc);
14452 		break;
14453 	case WM_T_82574:
14454 	case WM_T_82583:
14455 		rv = wm_check_mng_mode_82574(sc);
14456 		break;
14457 	case WM_T_82571:
14458 	case WM_T_82572:
14459 	case WM_T_82573:
14460 	case WM_T_80003:
14461 		rv = wm_check_mng_mode_generic(sc);
14462 		break;
14463 	default:
14464 		/* Noting to do */
14465 		rv = 0;
14466 		break;
14467 	}
14468 
14469 	return rv;
14470 }
14471 
14472 static int
14473 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
14474 {
14475 	uint32_t fwsm;
14476 
14477 	fwsm = CSR_READ(sc, WMREG_FWSM);
14478 
14479 	if (((fwsm & FWSM_FW_VALID) != 0)
14480 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
14481 		return 1;
14482 
14483 	return 0;
14484 }
14485 
14486 static int
14487 wm_check_mng_mode_82574(struct wm_softc *sc)
14488 {
14489 	uint16_t data;
14490 
14491 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
14492 
14493 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
14494 		return 1;
14495 
14496 	return 0;
14497 }
14498 
14499 static int
14500 wm_check_mng_mode_generic(struct wm_softc *sc)
14501 {
14502 	uint32_t fwsm;
14503 
14504 	fwsm = CSR_READ(sc, WMREG_FWSM);
14505 
14506 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
14507 		return 1;
14508 
14509 	return 0;
14510 }
14511 #endif /* WM_WOL */
14512 
14513 static int
14514 wm_enable_mng_pass_thru(struct wm_softc *sc)
14515 {
14516 	uint32_t manc, fwsm, factps;
14517 
14518 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
14519 		return 0;
14520 
14521 	manc = CSR_READ(sc, WMREG_MANC);
14522 
14523 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
14524 		device_xname(sc->sc_dev), manc));
14525 	if ((manc & MANC_RECV_TCO_EN) == 0)
14526 		return 0;
14527 
14528 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
14529 		fwsm = CSR_READ(sc, WMREG_FWSM);
14530 		factps = CSR_READ(sc, WMREG_FACTPS);
14531 		if (((factps & FACTPS_MNGCG) == 0)
14532 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
14533 			return 1;
14534 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
14535 		uint16_t data;
14536 
14537 		factps = CSR_READ(sc, WMREG_FACTPS);
14538 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
14539 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
14540 			device_xname(sc->sc_dev), factps, data));
14541 		if (((factps & FACTPS_MNGCG) == 0)
14542 		    && ((data & NVM_CFG2_MNGM_MASK)
14543 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
14544 			return 1;
14545 	} else if (((manc & MANC_SMBUS_EN) != 0)
14546 	    && ((manc & MANC_ASF_EN) == 0))
14547 		return 1;
14548 
14549 	return 0;
14550 }
14551 
14552 static bool
14553 wm_phy_resetisblocked(struct wm_softc *sc)
14554 {
14555 	bool blocked = false;
14556 	uint32_t reg;
14557 	int i = 0;
14558 
14559 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
14560 		device_xname(sc->sc_dev), __func__));
14561 
14562 	switch (sc->sc_type) {
14563 	case WM_T_ICH8:
14564 	case WM_T_ICH9:
14565 	case WM_T_ICH10:
14566 	case WM_T_PCH:
14567 	case WM_T_PCH2:
14568 	case WM_T_PCH_LPT:
14569 	case WM_T_PCH_SPT:
14570 	case WM_T_PCH_CNP:
14571 		do {
14572 			reg = CSR_READ(sc, WMREG_FWSM);
14573 			if ((reg & FWSM_RSPCIPHY) == 0) {
14574 				blocked = true;
14575 				delay(10*1000);
14576 				continue;
14577 			}
14578 			blocked = false;
14579 		} while (blocked && (i++ < 30));
14580 		return blocked;
14581 		break;
14582 	case WM_T_82571:
14583 	case WM_T_82572:
14584 	case WM_T_82573:
14585 	case WM_T_82574:
14586 	case WM_T_82583:
14587 	case WM_T_80003:
14588 		reg = CSR_READ(sc, WMREG_MANC);
14589 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
14590 			return true;
14591 		else
14592 			return false;
14593 		break;
14594 	default:
14595 		/* No problem */
14596 		break;
14597 	}
14598 
14599 	return false;
14600 }
14601 
14602 static void
14603 wm_get_hw_control(struct wm_softc *sc)
14604 {
14605 	uint32_t reg;
14606 
14607 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
14608 		device_xname(sc->sc_dev), __func__));
14609 
14610 	if (sc->sc_type == WM_T_82573) {
14611 		reg = CSR_READ(sc, WMREG_SWSM);
14612 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
14613 	} else if (sc->sc_type >= WM_T_82571) {
14614 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
14615 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
14616 	}
14617 }
14618 
14619 static void
14620 wm_release_hw_control(struct wm_softc *sc)
14621 {
14622 	uint32_t reg;
14623 
14624 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
14625 		device_xname(sc->sc_dev), __func__));
14626 
14627 	if (sc->sc_type == WM_T_82573) {
14628 		reg = CSR_READ(sc, WMREG_SWSM);
14629 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
14630 	} else if (sc->sc_type >= WM_T_82571) {
14631 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
14632 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
14633 	}
14634 }
14635 
14636 static void
14637 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
14638 {
14639 	uint32_t reg;
14640 
14641 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
14642 		device_xname(sc->sc_dev), __func__));
14643 
14644 	if (sc->sc_type < WM_T_PCH2)
14645 		return;
14646 
14647 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
14648 
14649 	if (gate)
14650 		reg |= EXTCNFCTR_GATE_PHY_CFG;
14651 	else
14652 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
14653 
14654 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
14655 }
14656 
14657 static int
14658 wm_init_phy_workarounds_pchlan(struct wm_softc *sc)
14659 {
14660 	uint32_t fwsm, reg;
14661 	int rv = 0;
14662 
14663 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
14664 		device_xname(sc->sc_dev), __func__));
14665 
14666 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
14667 	wm_gate_hw_phy_config_ich8lan(sc, true);
14668 
14669 	/* Disable ULP */
14670 	wm_ulp_disable(sc);
14671 
14672 	/* Acquire PHY semaphore */
14673 	rv = sc->phy.acquire(sc);
14674 	if (rv != 0) {
14675 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: failed\n",
14676 		device_xname(sc->sc_dev), __func__));
14677 		return -1;
14678 	}
14679 
14680 	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
14681 	 * inaccessible and resetting the PHY is not blocked, toggle the
14682 	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
14683 	 */
14684 	fwsm = CSR_READ(sc, WMREG_FWSM);
14685 	switch (sc->sc_type) {
14686 	case WM_T_PCH_LPT:
14687 	case WM_T_PCH_SPT:
14688 	case WM_T_PCH_CNP:
14689 		if (wm_phy_is_accessible_pchlan(sc))
14690 			break;
14691 
14692 		/* Before toggling LANPHYPC, see if PHY is accessible by
14693 		 * forcing MAC to SMBus mode first.
14694 		 */
14695 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
14696 		reg |= CTRL_EXT_FORCE_SMBUS;
14697 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
14698 #if 0
14699 		/* XXX Isn't this required??? */
14700 		CSR_WRITE_FLUSH(sc);
14701 #endif
14702 		/* Wait 50 milliseconds for MAC to finish any retries
14703 		 * that it might be trying to perform from previous
14704 		 * attempts to acknowledge any phy read requests.
14705 		 */
14706 		delay(50 * 1000);
14707 		/* FALLTHROUGH */
14708 	case WM_T_PCH2:
14709 		if (wm_phy_is_accessible_pchlan(sc) == true)
14710 			break;
14711 		/* FALLTHROUGH */
14712 	case WM_T_PCH:
14713 		if (sc->sc_type == WM_T_PCH)
14714 			if ((fwsm & FWSM_FW_VALID) != 0)
14715 				break;
14716 
14717 		if (wm_phy_resetisblocked(sc) == true) {
14718 			device_printf(sc->sc_dev, "XXX reset is blocked(3)\n");
14719 			break;
14720 		}
14721 
14722 		/* Toggle LANPHYPC Value bit */
14723 		wm_toggle_lanphypc_pch_lpt(sc);
14724 
14725 		if (sc->sc_type >= WM_T_PCH_LPT) {
14726 			if (wm_phy_is_accessible_pchlan(sc) == true)
14727 				break;
14728 
14729 			/* Toggling LANPHYPC brings the PHY out of SMBus mode
14730 			 * so ensure that the MAC is also out of SMBus mode
14731 			 */
14732 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
14733 			reg &= ~CTRL_EXT_FORCE_SMBUS;
14734 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
14735 
14736 			if (wm_phy_is_accessible_pchlan(sc) == true)
14737 				break;
14738 			rv = -1;
14739 		}
14740 		break;
14741 	default:
14742 		break;
14743 	}
14744 
14745 	/* Release semaphore */
14746 	sc->phy.release(sc);
14747 
14748 	if (rv == 0) {
14749 		/* Check to see if able to reset PHY.  Print error if not */
14750 		if (wm_phy_resetisblocked(sc)) {
14751 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
14752 			goto out;
14753 		}
14754 
14755 		/* Reset the PHY before any access to it.  Doing so, ensures
14756 		 * that the PHY is in a known good state before we read/write
14757 		 * PHY registers.  The generic reset is sufficient here,
14758 		 * because we haven't determined the PHY type yet.
14759 		 */
14760 		if (wm_reset_phy(sc) != 0)
14761 			goto out;
14762 
14763 		/* On a successful reset, possibly need to wait for the PHY
14764 		 * to quiesce to an accessible state before returning control
14765 		 * to the calling function.  If the PHY does not quiesce, then
14766 		 * return E1000E_BLK_PHY_RESET, as this is the condition that
14767 		 *  the PHY is in.
14768 		 */
14769 		if (wm_phy_resetisblocked(sc))
14770 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
14771 	}
14772 
14773 out:
14774 	/* Ungate automatic PHY configuration on non-managed 82579 */
14775 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
14776 		delay(10*1000);
14777 		wm_gate_hw_phy_config_ich8lan(sc, false);
14778 	}
14779 
14780 	return 0;
14781 }
14782 
14783 static void
14784 wm_init_manageability(struct wm_softc *sc)
14785 {
14786 
14787 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
14788 		device_xname(sc->sc_dev), __func__));
14789 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
14790 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
14791 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
14792 
14793 		/* Disable hardware interception of ARP */
14794 		manc &= ~MANC_ARP_EN;
14795 
14796 		/* Enable receiving management packets to the host */
14797 		if (sc->sc_type >= WM_T_82571) {
14798 			manc |= MANC_EN_MNG2HOST;
14799 			manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
14800 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
14801 		}
14802 
14803 		CSR_WRITE(sc, WMREG_MANC, manc);
14804 	}
14805 }
14806 
14807 static void
14808 wm_release_manageability(struct wm_softc *sc)
14809 {
14810 
14811 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
14812 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
14813 
14814 		manc |= MANC_ARP_EN;
14815 		if (sc->sc_type >= WM_T_82571)
14816 			manc &= ~MANC_EN_MNG2HOST;
14817 
14818 		CSR_WRITE(sc, WMREG_MANC, manc);
14819 	}
14820 }
14821 
14822 static void
14823 wm_get_wakeup(struct wm_softc *sc)
14824 {
14825 
14826 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
14827 	switch (sc->sc_type) {
14828 	case WM_T_82573:
14829 	case WM_T_82583:
14830 		sc->sc_flags |= WM_F_HAS_AMT;
14831 		/* FALLTHROUGH */
14832 	case WM_T_80003:
14833 	case WM_T_82575:
14834 	case WM_T_82576:
14835 	case WM_T_82580:
14836 	case WM_T_I350:
14837 	case WM_T_I354:
14838 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
14839 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
14840 		/* FALLTHROUGH */
14841 	case WM_T_82541:
14842 	case WM_T_82541_2:
14843 	case WM_T_82547:
14844 	case WM_T_82547_2:
14845 	case WM_T_82571:
14846 	case WM_T_82572:
14847 	case WM_T_82574:
14848 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
14849 		break;
14850 	case WM_T_ICH8:
14851 	case WM_T_ICH9:
14852 	case WM_T_ICH10:
14853 	case WM_T_PCH:
14854 	case WM_T_PCH2:
14855 	case WM_T_PCH_LPT:
14856 	case WM_T_PCH_SPT:
14857 	case WM_T_PCH_CNP:
14858 		sc->sc_flags |= WM_F_HAS_AMT;
14859 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
14860 		break;
14861 	default:
14862 		break;
14863 	}
14864 
14865 	/* 1: HAS_MANAGE */
14866 	if (wm_enable_mng_pass_thru(sc) != 0)
14867 		sc->sc_flags |= WM_F_HAS_MANAGE;
14868 
14869 	/*
14870 	 * Note that the WOL flags is set after the resetting of the eeprom
14871 	 * stuff
14872 	 */
14873 }
14874 
14875 /*
14876  * Unconfigure Ultra Low Power mode.
14877  * Only for I217 and newer (see below).
14878  */
14879 static int
14880 wm_ulp_disable(struct wm_softc *sc)
14881 {
14882 	uint32_t reg;
14883 	uint16_t phyreg;
14884 	int i = 0, rv = 0;
14885 
14886 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
14887 		device_xname(sc->sc_dev), __func__));
14888 	/* Exclude old devices */
14889 	if ((sc->sc_type < WM_T_PCH_LPT)
14890 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
14891 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
14892 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
14893 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
14894 		return 0;
14895 
14896 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
14897 		/* Request ME un-configure ULP mode in the PHY */
14898 		reg = CSR_READ(sc, WMREG_H2ME);
14899 		reg &= ~H2ME_ULP;
14900 		reg |= H2ME_ENFORCE_SETTINGS;
14901 		CSR_WRITE(sc, WMREG_H2ME, reg);
14902 
14903 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
14904 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
14905 			if (i++ == 30) {
14906 				device_printf(sc->sc_dev, "%s timed out\n",
14907 				    __func__);
14908 				return -1;
14909 			}
14910 			delay(10 * 1000);
14911 		}
14912 		reg = CSR_READ(sc, WMREG_H2ME);
14913 		reg &= ~H2ME_ENFORCE_SETTINGS;
14914 		CSR_WRITE(sc, WMREG_H2ME, reg);
14915 
14916 		return 0;
14917 	}
14918 
14919 	/* Acquire semaphore */
14920 	rv = sc->phy.acquire(sc);
14921 	if (rv != 0) {
14922 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: failed\n",
14923 		device_xname(sc->sc_dev), __func__));
14924 		return -1;
14925 	}
14926 
14927 	/* Toggle LANPHYPC */
14928 	wm_toggle_lanphypc_pch_lpt(sc);
14929 
14930 	/* Unforce SMBus mode in PHY */
14931 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg);
14932 	if (rv != 0) {
14933 		uint32_t reg2;
14934 
14935 		aprint_debug_dev(sc->sc_dev, "%s: Force SMBus first.\n",
14936 			__func__);
14937 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
14938 		reg2 |= CTRL_EXT_FORCE_SMBUS;
14939 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
14940 		delay(50 * 1000);
14941 
14942 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL,
14943 		    &phyreg);
14944 		if (rv != 0)
14945 			goto release;
14946 	}
14947 	phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
14948 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg);
14949 
14950 	/* Unforce SMBus mode in MAC */
14951 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
14952 	reg &= ~CTRL_EXT_FORCE_SMBUS;
14953 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
14954 
14955 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg);
14956 	if (rv != 0)
14957 		goto release;
14958 	phyreg |= HV_PM_CTRL_K1_ENA;
14959 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg);
14960 
14961 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1,
14962 		&phyreg);
14963 	if (rv != 0)
14964 		goto release;
14965 	phyreg &= ~(I218_ULP_CONFIG1_IND
14966 	    | I218_ULP_CONFIG1_STICKY_ULP
14967 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
14968 	    | I218_ULP_CONFIG1_WOL_HOST
14969 	    | I218_ULP_CONFIG1_INBAND_EXIT
14970 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
14971 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
14972 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
14973 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
14974 	phyreg |= I218_ULP_CONFIG1_START;
14975 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
14976 
14977 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
14978 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
14979 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
14980 
14981 release:
14982 	/* Release semaphore */
14983 	sc->phy.release(sc);
14984 	wm_gmii_reset(sc);
14985 	delay(50 * 1000);
14986 
14987 	return rv;
14988 }
14989 
14990 /* WOL in the newer chipset interfaces (pchlan) */
14991 static int
14992 wm_enable_phy_wakeup(struct wm_softc *sc)
14993 {
14994 	device_t dev = sc->sc_dev;
14995 	uint32_t mreg, moff;
14996 	uint16_t wuce, wuc, wufc, preg;
14997 	int i, rv;
14998 
14999 	KASSERT(sc->sc_type >= WM_T_PCH);
15000 
15001 	/* Copy MAC RARs to PHY RARs */
15002 	wm_copy_rx_addrs_to_phy_ich8lan(sc);
15003 
15004 	/* Activate PHY wakeup */
15005 	rv = sc->phy.acquire(sc);
15006 	if (rv != 0) {
15007 		device_printf(dev, "%s: failed to acquire semaphore\n",
15008 		    __func__);
15009 		return rv;
15010 	}
15011 
15012 	/*
15013 	 * Enable access to PHY wakeup registers.
15014 	 * BM_MTA, BM_RCTL, BM_WUFC and BM_WUC are in BM_WUC_PAGE.
15015 	 */
15016 	rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
15017 	if (rv != 0) {
15018 		device_printf(dev,
15019 		    "%s: Could not enable PHY wakeup reg access\n", __func__);
15020 		goto release;
15021 	}
15022 
15023 	/* Copy MAC MTA to PHY MTA */
15024 	for (i = 0; i < WM_ICH8_MC_TABSIZE; i++) {
15025 		uint16_t lo, hi;
15026 
15027 		mreg = CSR_READ(sc, WMREG_CORDOVA_MTA + (i * 4));
15028 		lo = (uint16_t)(mreg & 0xffff);
15029 		hi = (uint16_t)((mreg >> 16) & 0xffff);
15030 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i), &lo, 0, true);
15031 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i) + 1, &hi, 0, true);
15032 	}
15033 
15034 	/* Configure PHY Rx Control register */
15035 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 1, true);
15036 	mreg = CSR_READ(sc, WMREG_RCTL);
15037 	if (mreg & RCTL_UPE)
15038 		preg |= BM_RCTL_UPE;
15039 	if (mreg & RCTL_MPE)
15040 		preg |= BM_RCTL_MPE;
15041 	preg &= ~(BM_RCTL_MO_MASK);
15042 	moff = __SHIFTOUT(mreg, RCTL_MO);
15043 	if (moff != 0)
15044 		preg |= moff << BM_RCTL_MO_SHIFT;
15045 	if (mreg & RCTL_BAM)
15046 		preg |= BM_RCTL_BAM;
15047 	if (mreg & RCTL_PMCF)
15048 		preg |= BM_RCTL_PMCF;
15049 	mreg = CSR_READ(sc, WMREG_CTRL);
15050 	if (mreg & CTRL_RFCE)
15051 		preg |= BM_RCTL_RFCE;
15052 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 0, true);
15053 
15054 	wuc = WUC_APME | WUC_PME_EN;
15055 	wufc = WUFC_MAG;
15056 	/* Enable PHY wakeup in MAC register */
15057 	CSR_WRITE(sc, WMREG_WUC,
15058 	    WUC_PHY_WAKE | WUC_PME_STATUS | WUC_APMPME | wuc);
15059 	CSR_WRITE(sc, WMREG_WUFC, wufc);
15060 
15061 	/* Configure and enable PHY wakeup in PHY registers */
15062 	wm_access_phy_wakeup_reg_bm(dev, BM_WUC, &wuc, 0, true);
15063 	wm_access_phy_wakeup_reg_bm(dev, BM_WUFC, &wufc, 0, true);
15064 
15065 	wuce |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
15066 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
15067 
15068 release:
15069 	sc->phy.release(sc);
15070 
15071 	return 0;
15072 }
15073 
15074 /* Power down workaround on D3 */
15075 static void
15076 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
15077 {
15078 	uint32_t reg;
15079 	uint16_t phyreg;
15080 	int i;
15081 
15082 	for (i = 0; i < 2; i++) {
15083 		/* Disable link */
15084 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
15085 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
15086 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
15087 
15088 		/*
15089 		 * Call gig speed drop workaround on Gig disable before
15090 		 * accessing any PHY registers
15091 		 */
15092 		if (sc->sc_type == WM_T_ICH8)
15093 			wm_gig_downshift_workaround_ich8lan(sc);
15094 
15095 		/* Write VR power-down enable */
15096 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
15097 		phyreg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
15098 		phyreg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
15099 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, phyreg);
15100 
15101 		/* Read it back and test */
15102 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
15103 		phyreg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
15104 		if ((phyreg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
15105 			break;
15106 
15107 		/* Issue PHY reset and repeat at most one more time */
15108 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
15109 	}
15110 }
15111 
15112 /*
15113  *  wm_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
15114  *  @sc: pointer to the HW structure
15115  *
15116  *  During S0 to Sx transition, it is possible the link remains at gig
15117  *  instead of negotiating to a lower speed.  Before going to Sx, set
15118  *  'Gig Disable' to force link speed negotiation to a lower speed based on
15119  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
15120  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
15121  *  needs to be written.
15122  *  Parts that support (and are linked to a partner which support) EEE in
15123  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
15124  *  than 10Mbps w/o EEE.
15125  */
15126 static void
15127 wm_suspend_workarounds_ich8lan(struct wm_softc *sc)
15128 {
15129 	device_t dev = sc->sc_dev;
15130 	struct ethercom *ec = &sc->sc_ethercom;
15131 	uint32_t phy_ctrl;
15132 	int rv;
15133 
15134 	phy_ctrl = CSR_READ(sc, WMREG_PHY_CTRL);
15135 	phy_ctrl |= PHY_CTRL_GBE_DIS;
15136 
15137 	KASSERT((sc->sc_type >= WM_T_ICH8) && (sc->sc_type <= WM_T_PCH_CNP));
15138 
15139 	if (sc->sc_phytype == WMPHY_I217) {
15140 		uint16_t devid = sc->sc_pcidevid;
15141 
15142 		if ((devid == PCI_PRODUCT_INTEL_I218_LM) ||
15143 		    (devid == PCI_PRODUCT_INTEL_I218_V) ||
15144 		    (devid == PCI_PRODUCT_INTEL_I218_LM3) ||
15145 		    (devid == PCI_PRODUCT_INTEL_I218_V3) ||
15146 		    (sc->sc_type >= WM_T_PCH_SPT))
15147 			CSR_WRITE(sc, WMREG_FEXTNVM6,
15148 			    CSR_READ(sc, WMREG_FEXTNVM6)
15149 			    & ~FEXTNVM6_REQ_PLL_CLK);
15150 
15151 		if (sc->phy.acquire(sc) != 0)
15152 			goto out;
15153 
15154 		if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
15155 			uint16_t eee_advert;
15156 
15157 			rv = wm_read_emi_reg_locked(dev,
15158 			    I217_EEE_ADVERTISEMENT, &eee_advert);
15159 			if (rv)
15160 				goto release;
15161 
15162 			/*
15163 			 * Disable LPLU if both link partners support 100BaseT
15164 			 * EEE and 100Full is advertised on both ends of the
15165 			 * link, and enable Auto Enable LPI since there will
15166 			 * be no driver to enable LPI while in Sx.
15167 			 */
15168 			if ((eee_advert & AN_EEEADVERT_100_TX) &&
15169 			    (sc->eee_lp_ability & AN_EEEADVERT_100_TX)) {
15170 				uint16_t anar, phy_reg;
15171 
15172 				sc->phy.readreg_locked(dev, 2, MII_ANAR,
15173 				    &anar);
15174 				if (anar & ANAR_TX_FD) {
15175 					phy_ctrl &= ~(PHY_CTRL_D0A_LPLU |
15176 					    PHY_CTRL_NOND0A_LPLU);
15177 
15178 					/* Set Auto Enable LPI after link up */
15179 					sc->phy.readreg_locked(dev, 2,
15180 					    I217_LPI_GPIO_CTRL, &phy_reg);
15181 					phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
15182 					sc->phy.writereg_locked(dev, 2,
15183 					    I217_LPI_GPIO_CTRL, phy_reg);
15184 				}
15185 			}
15186 		}
15187 
15188 		/*
15189 		 * For i217 Intel Rapid Start Technology support,
15190 		 * when the system is going into Sx and no manageability engine
15191 		 * is present, the driver must configure proxy to reset only on
15192 		 * power good.	LPI (Low Power Idle) state must also reset only
15193 		 * on power good, as well as the MTA (Multicast table array).
15194 		 * The SMBus release must also be disabled on LCD reset.
15195 		 */
15196 
15197 		/*
15198 		 * Enable MTA to reset for Intel Rapid Start Technology
15199 		 * Support
15200 		 */
15201 
15202 release:
15203 		sc->phy.release(sc);
15204 	}
15205 out:
15206 	CSR_WRITE(sc, WMREG_PHY_CTRL, phy_ctrl);
15207 
15208 	if (sc->sc_type == WM_T_ICH8)
15209 		wm_gig_downshift_workaround_ich8lan(sc);
15210 
15211 	if (sc->sc_type >= WM_T_PCH) {
15212 		wm_oem_bits_config_ich8lan(sc, false);
15213 
15214 		/* Reset PHY to activate OEM bits on 82577/8 */
15215 		if (sc->sc_type == WM_T_PCH)
15216 			wm_reset_phy(sc);
15217 
15218 		if (sc->phy.acquire(sc) != 0)
15219 			return;
15220 		wm_write_smbus_addr(sc);
15221 		sc->phy.release(sc);
15222 	}
15223 }
15224 
15225 /*
15226  *  wm_resume_workarounds_pchlan - workarounds needed during Sx->S0
15227  *  @sc: pointer to the HW structure
15228  *
15229  *  During Sx to S0 transitions on non-managed devices or managed devices
15230  *  on which PHY resets are not blocked, if the PHY registers cannot be
15231  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
15232  *  the PHY.
15233  *  On i217, setup Intel Rapid Start Technology.
15234  */
15235 static int
15236 wm_resume_workarounds_pchlan(struct wm_softc *sc)
15237 {
15238 	device_t dev = sc->sc_dev;
15239 	int rv;
15240 
15241 	if (sc->sc_type < WM_T_PCH2)
15242 		return 0;
15243 
15244 	rv = wm_init_phy_workarounds_pchlan(sc);
15245 	if (rv != 0)
15246 		return -1;
15247 
15248 	/* For i217 Intel Rapid Start Technology support when the system
15249 	 * is transitioning from Sx and no manageability engine is present
15250 	 * configure SMBus to restore on reset, disable proxy, and enable
15251 	 * the reset on MTA (Multicast table array).
15252 	 */
15253 	if (sc->sc_phytype == WMPHY_I217) {
15254 		uint16_t phy_reg;
15255 
15256 		if (sc->phy.acquire(sc) != 0)
15257 			return -1;
15258 
15259 		/* Clear Auto Enable LPI after link up */
15260 		sc->phy.readreg_locked(dev, 1, I217_LPI_GPIO_CTRL, &phy_reg);
15261 		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
15262 		sc->phy.writereg_locked(dev, 1, I217_LPI_GPIO_CTRL, phy_reg);
15263 
15264 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
15265 			/* Restore clear on SMB if no manageability engine
15266 			 * is present
15267 			 */
15268 			rv = sc->phy.readreg_locked(dev, 1, I217_MEMPWR,
15269 			    &phy_reg);
15270 			if (rv != 0)
15271 				goto release;
15272 			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
15273 			sc->phy.writereg_locked(dev, 1, I217_MEMPWR, phy_reg);
15274 
15275 			/* Disable Proxy */
15276 			sc->phy.writereg_locked(dev, 1, I217_PROXY_CTRL, 0);
15277 		}
15278 		/* Enable reset on MTA */
15279 		sc->phy.readreg_locked(dev, 1, I217_CFGREG, &phy_reg);
15280 		if (rv != 0)
15281 			goto release;
15282 		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
15283 		sc->phy.writereg_locked(dev, 1, I217_CFGREG, phy_reg);
15284 
15285 release:
15286 		sc->phy.release(sc);
15287 		return rv;
15288 	}
15289 
15290 	return 0;
15291 }
15292 
15293 static void
15294 wm_enable_wakeup(struct wm_softc *sc)
15295 {
15296 	uint32_t reg, pmreg;
15297 	pcireg_t pmode;
15298 	int rv = 0;
15299 
15300 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
15301 		device_xname(sc->sc_dev), __func__));
15302 
15303 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
15304 	    &pmreg, NULL) == 0)
15305 		return;
15306 
15307 	if ((sc->sc_flags & WM_F_WOL) == 0)
15308 		goto pme;
15309 
15310 	/* Advertise the wakeup capability */
15311 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
15312 	    | CTRL_SWDPIN(3));
15313 
15314 	/* Keep the laser running on fiber adapters */
15315 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
15316 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
15317 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
15318 		reg |= CTRL_EXT_SWDPIN(3);
15319 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
15320 	}
15321 
15322 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) ||
15323 	    (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) ||
15324 	    (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
15325 	    (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
15326 		wm_suspend_workarounds_ich8lan(sc);
15327 
15328 #if 0	/* For the multicast packet */
15329 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
15330 	reg |= WUFC_MC;
15331 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
15332 #endif
15333 
15334 	if (sc->sc_type >= WM_T_PCH) {
15335 		rv = wm_enable_phy_wakeup(sc);
15336 		if (rv != 0)
15337 			goto pme;
15338 	} else {
15339 		/* Enable wakeup by the MAC */
15340 		CSR_WRITE(sc, WMREG_WUC, WUC_APME | WUC_PME_EN);
15341 		CSR_WRITE(sc, WMREG_WUFC, WUFC_MAG);
15342 	}
15343 
15344 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
15345 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
15346 		|| (sc->sc_type == WM_T_PCH2))
15347 	    && (sc->sc_phytype == WMPHY_IGP_3))
15348 		wm_igp3_phy_powerdown_workaround_ich8lan(sc);
15349 
15350 pme:
15351 	/* Request PME */
15352 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
15353 	pmode |= PCI_PMCSR_PME_STS; /* in case it's already set (W1C) */
15354 	if ((rv == 0) && (sc->sc_flags & WM_F_WOL) != 0) {
15355 		/* For WOL */
15356 		pmode |= PCI_PMCSR_PME_EN;
15357 	} else {
15358 		/* Disable WOL */
15359 		pmode &= ~PCI_PMCSR_PME_EN;
15360 	}
15361 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
15362 }
15363 
15364 /* Disable ASPM L0s and/or L1 for workaround */
15365 static void
15366 wm_disable_aspm(struct wm_softc *sc)
15367 {
15368 	pcireg_t reg, mask = 0;
15369 	unsigned const char *str = "";
15370 
15371 	/*
15372 	 *  Only for PCIe device which has PCIe capability in the PCI config
15373 	 * space.
15374 	 */
15375 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
15376 		return;
15377 
15378 	switch (sc->sc_type) {
15379 	case WM_T_82571:
15380 	case WM_T_82572:
15381 		/*
15382 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
15383 		 * State Power management L1 State (ASPM L1).
15384 		 */
15385 		mask = PCIE_LCSR_ASPM_L1;
15386 		str = "L1 is";
15387 		break;
15388 	case WM_T_82573:
15389 	case WM_T_82574:
15390 	case WM_T_82583:
15391 		/*
15392 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
15393 		 *
15394 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
15395 		 * some chipset.  The document of 82574 and 82583 says that
15396 		 * disabling L0s with some specific chipset is sufficient,
15397 		 * but we follow as of the Intel em driver does.
15398 		 *
15399 		 * References:
15400 		 * Errata 8 of the Specification Update of i82573.
15401 		 * Errata 20 of the Specification Update of i82574.
15402 		 * Errata 9 of the Specification Update of i82583.
15403 		 */
15404 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
15405 		str = "L0s and L1 are";
15406 		break;
15407 	default:
15408 		return;
15409 	}
15410 
15411 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
15412 	    sc->sc_pcixe_capoff + PCIE_LCSR);
15413 	reg &= ~mask;
15414 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
15415 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
15416 
15417 	/* Print only in wm_attach() */
15418 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
15419 		aprint_verbose_dev(sc->sc_dev,
15420 		    "ASPM %s disabled to workaround the errata.\n", str);
15421 }
15422 
15423 /* LPLU */
15424 
15425 static void
15426 wm_lplu_d0_disable(struct wm_softc *sc)
15427 {
15428 	struct mii_data *mii = &sc->sc_mii;
15429 	uint32_t reg;
15430 	uint16_t phyval;
15431 
15432 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
15433 		device_xname(sc->sc_dev), __func__));
15434 
15435 	if (sc->sc_phytype == WMPHY_IFE)
15436 		return;
15437 
15438 	switch (sc->sc_type) {
15439 	case WM_T_82571:
15440 	case WM_T_82572:
15441 	case WM_T_82573:
15442 	case WM_T_82575:
15443 	case WM_T_82576:
15444 		mii->mii_readreg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, &phyval);
15445 		phyval &= ~PMR_D0_LPLU;
15446 		mii->mii_writereg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, phyval);
15447 		break;
15448 	case WM_T_82580:
15449 	case WM_T_I350:
15450 	case WM_T_I210:
15451 	case WM_T_I211:
15452 		reg = CSR_READ(sc, WMREG_PHPM);
15453 		reg &= ~PHPM_D0A_LPLU;
15454 		CSR_WRITE(sc, WMREG_PHPM, reg);
15455 		break;
15456 	case WM_T_82574:
15457 	case WM_T_82583:
15458 	case WM_T_ICH8:
15459 	case WM_T_ICH9:
15460 	case WM_T_ICH10:
15461 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
15462 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
15463 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
15464 		CSR_WRITE_FLUSH(sc);
15465 		break;
15466 	case WM_T_PCH:
15467 	case WM_T_PCH2:
15468 	case WM_T_PCH_LPT:
15469 	case WM_T_PCH_SPT:
15470 	case WM_T_PCH_CNP:
15471 		wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS, &phyval);
15472 		phyval &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
15473 		if (wm_phy_resetisblocked(sc) == false)
15474 			phyval |= HV_OEM_BITS_ANEGNOW;
15475 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, phyval);
15476 		break;
15477 	default:
15478 		break;
15479 	}
15480 }
15481 
15482 /* EEE */
15483 
15484 static int
15485 wm_set_eee_i350(struct wm_softc *sc)
15486 {
15487 	struct ethercom *ec = &sc->sc_ethercom;
15488 	uint32_t ipcnfg, eeer;
15489 	uint32_t ipcnfg_mask
15490 	    = IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN | IPCNFG_10BASE_TE;
15491 	uint32_t eeer_mask = EEER_TX_LPI_EN | EEER_RX_LPI_EN | EEER_LPI_FC;
15492 
15493 	KASSERT(sc->sc_mediatype == WM_MEDIATYPE_COPPER);
15494 
15495 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
15496 	eeer = CSR_READ(sc, WMREG_EEER);
15497 
15498 	/* Enable or disable per user setting */
15499 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
15500 		ipcnfg |= ipcnfg_mask;
15501 		eeer |= eeer_mask;
15502 	} else {
15503 		ipcnfg &= ~ipcnfg_mask;
15504 		eeer &= ~eeer_mask;
15505 	}
15506 
15507 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
15508 	CSR_WRITE(sc, WMREG_EEER, eeer);
15509 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
15510 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
15511 
15512 	return 0;
15513 }
15514 
15515 static int
15516 wm_set_eee_pchlan(struct wm_softc *sc)
15517 {
15518 	device_t dev = sc->sc_dev;
15519 	struct ethercom *ec = &sc->sc_ethercom;
15520 	uint16_t lpa, pcs_status, adv_addr, adv, lpi_ctrl, data;
15521 	int rv = 0;
15522 
15523 	switch (sc->sc_phytype) {
15524 	case WMPHY_82579:
15525 		lpa = I82579_EEE_LP_ABILITY;
15526 		pcs_status = I82579_EEE_PCS_STATUS;
15527 		adv_addr = I82579_EEE_ADVERTISEMENT;
15528 		break;
15529 	case WMPHY_I217:
15530 		lpa = I217_EEE_LP_ABILITY;
15531 		pcs_status = I217_EEE_PCS_STATUS;
15532 		adv_addr = I217_EEE_ADVERTISEMENT;
15533 		break;
15534 	default:
15535 		return 0;
15536 	}
15537 
15538 	if (sc->phy.acquire(sc)) {
15539 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
15540 		return 0;
15541 	}
15542 
15543 	rv = sc->phy.readreg_locked(dev, 1, I82579_LPI_CTRL, &lpi_ctrl);
15544 	if (rv != 0)
15545 		goto release;
15546 
15547 	/* Clear bits that enable EEE in various speeds */
15548 	lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE;
15549 
15550 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
15551 		/* Save off link partner's EEE ability */
15552 		rv = wm_read_emi_reg_locked(dev, lpa, &sc->eee_lp_ability);
15553 		if (rv != 0)
15554 			goto release;
15555 
15556 		/* Read EEE advertisement */
15557 		if ((rv = wm_read_emi_reg_locked(dev, adv_addr, &adv)) != 0)
15558 			goto release;
15559 
15560 		/*
15561 		 * Enable EEE only for speeds in which the link partner is
15562 		 * EEE capable and for which we advertise EEE.
15563 		 */
15564 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_1000_T)
15565 			lpi_ctrl |= I82579_LPI_CTRL_EN_1000;
15566 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_100_TX) {
15567 			sc->phy.readreg_locked(dev, 2, MII_ANLPAR, &data);
15568 			if ((data & ANLPAR_TX_FD) != 0)
15569 				lpi_ctrl |= I82579_LPI_CTRL_EN_100;
15570 			else {
15571 				/*
15572 				 * EEE is not supported in 100Half, so ignore
15573 				 * partner's EEE in 100 ability if full-duplex
15574 				 * is not advertised.
15575 				 */
15576 				sc->eee_lp_ability
15577 				    &= ~AN_EEEADVERT_100_TX;
15578 			}
15579 		}
15580 	}
15581 
15582 	if (sc->sc_phytype == WMPHY_82579) {
15583 		rv = wm_read_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, &data);
15584 		if (rv != 0)
15585 			goto release;
15586 
15587 		data &= ~I82579_LPI_PLL_SHUT_100;
15588 		rv = wm_write_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, data);
15589 	}
15590 
15591 	/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
15592 	if ((rv = wm_read_emi_reg_locked(dev, pcs_status, &data)) != 0)
15593 		goto release;
15594 
15595 	rv = sc->phy.writereg_locked(dev, 1, I82579_LPI_CTRL, lpi_ctrl);
15596 release:
15597 	sc->phy.release(sc);
15598 
15599 	return rv;
15600 }
15601 
15602 static int
15603 wm_set_eee(struct wm_softc *sc)
15604 {
15605 	struct ethercom *ec = &sc->sc_ethercom;
15606 
15607 	if ((ec->ec_capabilities & ETHERCAP_EEE) == 0)
15608 		return 0;
15609 
15610 	if (sc->sc_type == WM_T_I354) {
15611 		/* I354 uses an external PHY */
15612 		return 0; /* not yet */
15613 	} else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
15614 		return wm_set_eee_i350(sc);
15615 	else if (sc->sc_type >= WM_T_PCH2)
15616 		return wm_set_eee_pchlan(sc);
15617 
15618 	return 0;
15619 }
15620 
15621 /*
15622  * Workarounds (mainly PHY related).
15623  * Basically, PHY's workarounds are in the PHY drivers.
15624  */
15625 
15626 /* Work-around for 82566 Kumeran PCS lock loss */
15627 static int
15628 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
15629 {
15630 	struct mii_data *mii = &sc->sc_mii;
15631 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
15632 	int i, reg, rv;
15633 	uint16_t phyreg;
15634 
15635 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
15636 		device_xname(sc->sc_dev), __func__));
15637 
15638 	/* If the link is not up, do nothing */
15639 	if ((status & STATUS_LU) == 0)
15640 		return 0;
15641 
15642 	/* Nothing to do if the link is other than 1Gbps */
15643 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
15644 		return 0;
15645 
15646 	for (i = 0; i < 10; i++) {
15647 		/* read twice */
15648 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
15649 		if (rv != 0)
15650 			return rv;
15651 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
15652 		if (rv != 0)
15653 			return rv;
15654 
15655 		if ((phyreg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
15656 			goto out;	/* GOOD! */
15657 
15658 		/* Reset the PHY */
15659 		wm_reset_phy(sc);
15660 		delay(5*1000);
15661 	}
15662 
15663 	/* Disable GigE link negotiation */
15664 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
15665 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
15666 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
15667 
15668 	/*
15669 	 * Call gig speed drop workaround on Gig disable before accessing
15670 	 * any PHY registers.
15671 	 */
15672 	wm_gig_downshift_workaround_ich8lan(sc);
15673 
15674 out:
15675 	return 0;
15676 }
15677 
15678 /*
15679  *  wm_gig_downshift_workaround_ich8lan - WoL from S5 stops working
15680  *  @sc: pointer to the HW structure
15681  *
15682  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
15683  *  LPLU, Gig disable, MDIC PHY reset):
15684  *    1) Set Kumeran Near-end loopback
15685  *    2) Clear Kumeran Near-end loopback
15686  *  Should only be called for ICH8[m] devices with any 1G Phy.
15687  */
15688 static void
15689 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
15690 {
15691 	uint16_t kmreg;
15692 
15693 	/* Only for igp3 */
15694 	if (sc->sc_phytype == WMPHY_IGP_3) {
15695 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
15696 			return;
15697 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
15698 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
15699 			return;
15700 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
15701 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
15702 	}
15703 }
15704 
15705 /*
15706  * Workaround for pch's PHYs
15707  * XXX should be moved to new PHY driver?
15708  */
15709 static int
15710 wm_hv_phy_workarounds_ich8lan(struct wm_softc *sc)
15711 {
15712 	device_t dev = sc->sc_dev;
15713 	struct mii_data *mii = &sc->sc_mii;
15714 	struct mii_softc *child;
15715 	uint16_t phy_data, phyrev = 0;
15716 	int phytype = sc->sc_phytype;
15717 	int rv;
15718 
15719 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
15720 		device_xname(dev), __func__));
15721 	KASSERT(sc->sc_type == WM_T_PCH);
15722 
15723 	/* Set MDIO slow mode before any other MDIO access */
15724 	if (phytype == WMPHY_82577)
15725 		if ((rv = wm_set_mdio_slow_mode_hv(sc)) != 0)
15726 			return rv;
15727 
15728 	child = LIST_FIRST(&mii->mii_phys);
15729 	if (child != NULL)
15730 		phyrev = child->mii_mpd_rev;
15731 
15732 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
15733 	if ((child != NULL) &&
15734 	    (((phytype == WMPHY_82577) && ((phyrev == 1) || (phyrev == 2))) ||
15735 		((phytype == WMPHY_82578) && (phyrev == 1)))) {
15736 		/* Disable generation of early preamble (0x4431) */
15737 		rv = mii->mii_readreg(dev, 2, BM_RATE_ADAPTATION_CTRL,
15738 		    &phy_data);
15739 		if (rv != 0)
15740 			return rv;
15741 		phy_data &= ~(BM_RATE_ADAPTATION_CTRL_RX_RXDV_PRE |
15742 		    BM_RATE_ADAPTATION_CTRL_RX_CRS_PRE);
15743 		rv = mii->mii_writereg(dev, 2, BM_RATE_ADAPTATION_CTRL,
15744 		    phy_data);
15745 		if (rv != 0)
15746 			return rv;
15747 
15748 		/* Preamble tuning for SSC */
15749 		rv = mii->mii_writereg(dev, 2, HV_KMRN_FIFO_CTRLSTA, 0xa204);
15750 		if (rv != 0)
15751 			return rv;
15752 	}
15753 
15754 	/* 82578 */
15755 	if (phytype == WMPHY_82578) {
15756 		/*
15757 		 * Return registers to default by doing a soft reset then
15758 		 * writing 0x3140 to the control register
15759 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
15760 		 */
15761 		if ((child != NULL) && (phyrev < 2)) {
15762 			PHY_RESET(child);
15763 			rv = mii->mii_writereg(dev, 2, MII_BMCR, 0x3140);
15764 			if (rv != 0)
15765 				return rv;
15766 		}
15767 	}
15768 
15769 	/* Select page 0 */
15770 	if ((rv = sc->phy.acquire(sc)) != 0)
15771 		return rv;
15772 	rv = wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT, 0);
15773 	sc->phy.release(sc);
15774 	if (rv != 0)
15775 		return rv;
15776 
15777 	/*
15778 	 * Configure the K1 Si workaround during phy reset assuming there is
15779 	 * link so that it disables K1 if link is in 1Gbps.
15780 	 */
15781 	if ((rv = wm_k1_gig_workaround_hv(sc, 1)) != 0)
15782 		return rv;
15783 
15784 	/* Workaround for link disconnects on a busy hub in half duplex */
15785 	rv = sc->phy.acquire(sc);
15786 	if (rv)
15787 		return rv;
15788 	rv = sc->phy.readreg_locked(dev, 2, BM_PORT_GEN_CFG, &phy_data);
15789 	if (rv)
15790 		goto release;
15791 	rv = sc->phy.writereg_locked(dev, 2, BM_PORT_GEN_CFG,
15792 	    phy_data & 0x00ff);
15793 	if (rv)
15794 		goto release;
15795 
15796 	/* Set MSE higher to enable link to stay up when noise is high */
15797 	rv = wm_write_emi_reg_locked(dev, I82577_MSE_THRESHOLD, 0x0034);
15798 release:
15799 	sc->phy.release(sc);
15800 
15801 	return rv;
15802 }
15803 
15804 /*
15805  *  wm_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
15806  *  @sc:   pointer to the HW structure
15807  */
15808 static void
15809 wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *sc)
15810 {
15811 	device_t dev = sc->sc_dev;
15812 	uint32_t mac_reg;
15813 	uint16_t i, wuce;
15814 	int count;
15815 
15816 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
15817 		device_xname(sc->sc_dev), __func__));
15818 
15819 	if (sc->phy.acquire(sc) != 0)
15820 		return;
15821 	if (wm_enable_phy_wakeup_reg_access_bm(dev, &wuce) != 0)
15822 		goto release;
15823 
15824 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
15825 	count = wm_rar_count(sc);
15826 	for (i = 0; i < count; i++) {
15827 		uint16_t lo, hi;
15828 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
15829 		lo = (uint16_t)(mac_reg & 0xffff);
15830 		hi = (uint16_t)((mac_reg >> 16) & 0xffff);
15831 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_L(i), &lo, 0, true);
15832 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_M(i), &hi, 0, true);
15833 
15834 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
15835 		lo = (uint16_t)(mac_reg & 0xffff);
15836 		hi = (uint16_t)((mac_reg & RAL_AV) >> 16);
15837 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_H(i), &lo, 0, true);
15838 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_CTRL(i), &hi, 0, true);
15839 	}
15840 
15841 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
15842 
15843 release:
15844 	sc->phy.release(sc);
15845 }
15846 
15847 /*
15848  *  wm_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
15849  *  done after every PHY reset.
15850  */
15851 static int
15852 wm_lv_phy_workarounds_ich8lan(struct wm_softc *sc)
15853 {
15854 	device_t dev = sc->sc_dev;
15855 	int rv;
15856 
15857 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
15858 		device_xname(dev), __func__));
15859 	KASSERT(sc->sc_type == WM_T_PCH2);
15860 
15861 	/* Set MDIO slow mode before any other MDIO access */
15862 	rv = wm_set_mdio_slow_mode_hv(sc);
15863 	if (rv != 0)
15864 		return rv;
15865 
15866 	rv = sc->phy.acquire(sc);
15867 	if (rv != 0)
15868 		return rv;
15869 	/* Set MSE higher to enable link to stay up when noise is high */
15870 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_THRESHOLD, 0x0034);
15871 	if (rv != 0)
15872 		goto release;
15873 	/* Drop link after 5 times MSE threshold was reached */
15874 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_LINK_DOWN, 0x0005);
15875 release:
15876 	sc->phy.release(sc);
15877 
15878 	return rv;
15879 }
15880 
15881 /**
15882  *  wm_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
15883  *  @link: link up bool flag
15884  *
15885  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
15886  *  preventing further DMA write requests.  Workaround the issue by disabling
15887  *  the de-assertion of the clock request when in 1Gpbs mode.
15888  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
15889  *  speeds in order to avoid Tx hangs.
15890  **/
15891 static int
15892 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
15893 {
15894 	uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
15895 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
15896 	uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
15897 	uint16_t phyreg;
15898 
15899 	if (link && (speed == STATUS_SPEED_1000)) {
15900 		sc->phy.acquire(sc);
15901 		int rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
15902 		    &phyreg);
15903 		if (rv != 0)
15904 			goto release;
15905 		rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
15906 		    phyreg & ~KUMCTRLSTA_K1_ENABLE);
15907 		if (rv != 0)
15908 			goto release;
15909 		delay(20);
15910 		CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
15911 
15912 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
15913 		    &phyreg);
15914 release:
15915 		sc->phy.release(sc);
15916 		return rv;
15917 	}
15918 
15919 	fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
15920 
15921 	struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys);
15922 	if (((child != NULL) && (child->mii_mpd_rev > 5))
15923 	    || !link
15924 	    || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
15925 		goto update_fextnvm6;
15926 
15927 	wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL, &phyreg);
15928 
15929 	/* Clear link status transmit timeout */
15930 	phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
15931 	if (speed == STATUS_SPEED_100) {
15932 		/* Set inband Tx timeout to 5x10us for 100Half */
15933 		phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
15934 
15935 		/* Do not extend the K1 entry latency for 100Half */
15936 		fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
15937 	} else {
15938 		/* Set inband Tx timeout to 50x10us for 10Full/Half */
15939 		phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
15940 
15941 		/* Extend the K1 entry latency for 10 Mbps */
15942 		fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
15943 	}
15944 
15945 	wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
15946 
15947 update_fextnvm6:
15948 	CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
15949 	return 0;
15950 }
15951 
15952 /*
15953  *  wm_k1_gig_workaround_hv - K1 Si workaround
15954  *  @sc:   pointer to the HW structure
15955  *  @link: link up bool flag
15956  *
15957  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
15958  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
15959  *  If link is down, the function will restore the default K1 setting located
15960  *  in the NVM.
15961  */
15962 static int
15963 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
15964 {
15965 	int k1_enable = sc->sc_nvm_k1_enabled;
15966 
15967 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
15968 		device_xname(sc->sc_dev), __func__));
15969 
15970 	if (sc->phy.acquire(sc) != 0)
15971 		return -1;
15972 
15973 	if (link) {
15974 		k1_enable = 0;
15975 
15976 		/* Link stall fix for link up */
15977 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
15978 		    0x0100);
15979 	} else {
15980 		/* Link stall fix for link down */
15981 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
15982 		    0x4100);
15983 	}
15984 
15985 	wm_configure_k1_ich8lan(sc, k1_enable);
15986 	sc->phy.release(sc);
15987 
15988 	return 0;
15989 }
15990 
15991 /*
15992  *  wm_k1_workaround_lv - K1 Si workaround
15993  *  @sc:   pointer to the HW structure
15994  *
15995  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
15996  *  Disable K1 for 1000 and 100 speeds
15997  */
15998 static int
15999 wm_k1_workaround_lv(struct wm_softc *sc)
16000 {
16001 	uint32_t reg;
16002 	uint16_t phyreg;
16003 	int rv;
16004 
16005 	if (sc->sc_type != WM_T_PCH2)
16006 		return 0;
16007 
16008 	/* Set K1 beacon duration based on 10Mbps speed */
16009 	rv = wm_gmii_hv_readreg(sc->sc_dev, 2, HV_M_STATUS, &phyreg);
16010 	if (rv != 0)
16011 		return rv;
16012 
16013 	if ((phyreg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
16014 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
16015 		if (phyreg &
16016 		    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
16017 			/* LV 1G/100 Packet drop issue wa  */
16018 			rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_PM_CTRL,
16019 			    &phyreg);
16020 			if (rv != 0)
16021 				return rv;
16022 			phyreg &= ~HV_PM_CTRL_K1_ENA;
16023 			rv = wm_gmii_hv_writereg(sc->sc_dev, 1, HV_PM_CTRL,
16024 			    phyreg);
16025 			if (rv != 0)
16026 				return rv;
16027 		} else {
16028 			/* For 10Mbps */
16029 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
16030 			reg &= ~FEXTNVM4_BEACON_DURATION;
16031 			reg |= FEXTNVM4_BEACON_DURATION_16US;
16032 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
16033 		}
16034 	}
16035 
16036 	return 0;
16037 }
16038 
16039 /*
16040  *  wm_link_stall_workaround_hv - Si workaround
16041  *  @sc: pointer to the HW structure
16042  *
16043  *  This function works around a Si bug where the link partner can get
16044  *  a link up indication before the PHY does. If small packets are sent
16045  *  by the link partner they can be placed in the packet buffer without
16046  *  being properly accounted for by the PHY and will stall preventing
16047  *  further packets from being received.  The workaround is to clear the
16048  *  packet buffer after the PHY detects link up.
16049  */
16050 static int
16051 wm_link_stall_workaround_hv(struct wm_softc *sc)
16052 {
16053 	uint16_t phyreg;
16054 
16055 	if (sc->sc_phytype != WMPHY_82578)
16056 		return 0;
16057 
16058 	/* Do not apply workaround if in PHY loopback bit 14 set */
16059 	wm_gmii_hv_readreg(sc->sc_dev, 2, MII_BMCR, &phyreg);
16060 	if ((phyreg & BMCR_LOOP) != 0)
16061 		return 0;
16062 
16063 	/* Check if link is up and at 1Gbps */
16064 	wm_gmii_hv_readreg(sc->sc_dev, 2, BM_CS_STATUS, &phyreg);
16065 	phyreg &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
16066 	    | BM_CS_STATUS_SPEED_MASK;
16067 	if (phyreg != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
16068 		| BM_CS_STATUS_SPEED_1000))
16069 		return 0;
16070 
16071 	delay(200 * 1000);	/* XXX too big */
16072 
16073 	/* Flush the packets in the fifo buffer */
16074 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
16075 	    HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED);
16076 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
16077 	    HV_MUX_DATA_CTRL_GEN_TO_MAC);
16078 
16079 	return 0;
16080 }
16081 
16082 static int
16083 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
16084 {
16085 	int rv;
16086 	uint16_t reg;
16087 
16088 	rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL, &reg);
16089 	if (rv != 0)
16090 		return rv;
16091 
16092 	return wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
16093 	    reg | HV_KMRN_MDIO_SLOW);
16094 }
16095 
16096 /*
16097  *  wm_configure_k1_ich8lan - Configure K1 power state
16098  *  @sc: pointer to the HW structure
16099  *  @enable: K1 state to configure
16100  *
16101  *  Configure the K1 power state based on the provided parameter.
16102  *  Assumes semaphore already acquired.
16103  */
16104 static void
16105 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
16106 {
16107 	uint32_t ctrl, ctrl_ext, tmp;
16108 	uint16_t kmreg;
16109 	int rv;
16110 
16111 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
16112 
16113 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
16114 	if (rv != 0)
16115 		return;
16116 
16117 	if (k1_enable)
16118 		kmreg |= KUMCTRLSTA_K1_ENABLE;
16119 	else
16120 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
16121 
16122 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
16123 	if (rv != 0)
16124 		return;
16125 
16126 	delay(20);
16127 
16128 	ctrl = CSR_READ(sc, WMREG_CTRL);
16129 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
16130 
16131 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
16132 	tmp |= CTRL_FRCSPD;
16133 
16134 	CSR_WRITE(sc, WMREG_CTRL, tmp);
16135 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
16136 	CSR_WRITE_FLUSH(sc);
16137 	delay(20);
16138 
16139 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
16140 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
16141 	CSR_WRITE_FLUSH(sc);
16142 	delay(20);
16143 
16144 	return;
16145 }
16146 
16147 /* special case - for 82575 - need to do manual init ... */
16148 static void
16149 wm_reset_init_script_82575(struct wm_softc *sc)
16150 {
16151 	/*
16152 	 * Remark: this is untested code - we have no board without EEPROM
16153 	 *  same setup as mentioned int the FreeBSD driver for the i82575
16154 	 */
16155 
16156 	/* SerDes configuration via SERDESCTRL */
16157 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
16158 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
16159 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
16160 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
16161 
16162 	/* CCM configuration via CCMCTL register */
16163 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
16164 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
16165 
16166 	/* PCIe lanes configuration */
16167 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
16168 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
16169 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
16170 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
16171 
16172 	/* PCIe PLL Configuration */
16173 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
16174 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
16175 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
16176 }
16177 
16178 static void
16179 wm_reset_mdicnfg_82580(struct wm_softc *sc)
16180 {
16181 	uint32_t reg;
16182 	uint16_t nvmword;
16183 	int rv;
16184 
16185 	if (sc->sc_type != WM_T_82580)
16186 		return;
16187 	if ((sc->sc_flags & WM_F_SGMII) == 0)
16188 		return;
16189 
16190 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
16191 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
16192 	if (rv != 0) {
16193 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
16194 		    __func__);
16195 		return;
16196 	}
16197 
16198 	reg = CSR_READ(sc, WMREG_MDICNFG);
16199 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
16200 		reg |= MDICNFG_DEST;
16201 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
16202 		reg |= MDICNFG_COM_MDIO;
16203 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
16204 }
16205 
16206 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
16207 
16208 static bool
16209 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
16210 {
16211 	uint32_t reg;
16212 	uint16_t id1, id2;
16213 	int i, rv;
16214 
16215 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
16216 		device_xname(sc->sc_dev), __func__));
16217 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
16218 
16219 	id1 = id2 = 0xffff;
16220 	for (i = 0; i < 2; i++) {
16221 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
16222 		    &id1);
16223 		if ((rv != 0) || MII_INVALIDID(id1))
16224 			continue;
16225 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
16226 		    &id2);
16227 		if ((rv != 0) || MII_INVALIDID(id2))
16228 			continue;
16229 		break;
16230 	}
16231 	if ((rv == 0) && !MII_INVALIDID(id1) && !MII_INVALIDID(id2))
16232 		goto out;
16233 
16234 	/*
16235 	 * In case the PHY needs to be in mdio slow mode,
16236 	 * set slow mode and try to get the PHY id again.
16237 	 */
16238 	rv = 0;
16239 	if (sc->sc_type < WM_T_PCH_LPT) {
16240 		sc->phy.release(sc);
16241 		wm_set_mdio_slow_mode_hv(sc);
16242 		rv = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1, &id1);
16243 		rv |= wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2, &id2);
16244 		sc->phy.acquire(sc);
16245 	}
16246 	if ((rv != 0) || MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
16247 		device_printf(sc->sc_dev, "XXX return with false\n");
16248 		return false;
16249 	}
16250 out:
16251 	if (sc->sc_type >= WM_T_PCH_LPT) {
16252 		/* Only unforce SMBus if ME is not active */
16253 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
16254 			uint16_t phyreg;
16255 
16256 			/* Unforce SMBus mode in PHY */
16257 			rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
16258 			    CV_SMB_CTRL, &phyreg);
16259 			phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
16260 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
16261 			    CV_SMB_CTRL, phyreg);
16262 
16263 			/* Unforce SMBus mode in MAC */
16264 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
16265 			reg &= ~CTRL_EXT_FORCE_SMBUS;
16266 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
16267 		}
16268 	}
16269 	return true;
16270 }
16271 
16272 static void
16273 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
16274 {
16275 	uint32_t reg;
16276 	int i;
16277 
16278 	/* Set PHY Config Counter to 50msec */
16279 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
16280 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
16281 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
16282 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
16283 
16284 	/* Toggle LANPHYPC */
16285 	reg = CSR_READ(sc, WMREG_CTRL);
16286 	reg |= CTRL_LANPHYPC_OVERRIDE;
16287 	reg &= ~CTRL_LANPHYPC_VALUE;
16288 	CSR_WRITE(sc, WMREG_CTRL, reg);
16289 	CSR_WRITE_FLUSH(sc);
16290 	delay(1000);
16291 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
16292 	CSR_WRITE(sc, WMREG_CTRL, reg);
16293 	CSR_WRITE_FLUSH(sc);
16294 
16295 	if (sc->sc_type < WM_T_PCH_LPT)
16296 		delay(50 * 1000);
16297 	else {
16298 		i = 20;
16299 
16300 		do {
16301 			delay(5 * 1000);
16302 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
16303 		    && i--);
16304 
16305 		delay(30 * 1000);
16306 	}
16307 }
16308 
16309 static int
16310 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
16311 {
16312 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
16313 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
16314 	uint32_t rxa;
16315 	uint16_t scale = 0, lat_enc = 0;
16316 	int32_t obff_hwm = 0;
16317 	int64_t lat_ns, value;
16318 
16319 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
16320 		device_xname(sc->sc_dev), __func__));
16321 
16322 	if (link) {
16323 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
16324 		uint32_t status;
16325 		uint16_t speed;
16326 		pcireg_t preg;
16327 
16328 		status = CSR_READ(sc, WMREG_STATUS);
16329 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
16330 		case STATUS_SPEED_10:
16331 			speed = 10;
16332 			break;
16333 		case STATUS_SPEED_100:
16334 			speed = 100;
16335 			break;
16336 		case STATUS_SPEED_1000:
16337 			speed = 1000;
16338 			break;
16339 		default:
16340 			device_printf(sc->sc_dev, "Unknown speed "
16341 			    "(status = %08x)\n", status);
16342 			return -1;
16343 		}
16344 
16345 		/* Rx Packet Buffer Allocation size (KB) */
16346 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
16347 
16348 		/*
16349 		 * Determine the maximum latency tolerated by the device.
16350 		 *
16351 		 * Per the PCIe spec, the tolerated latencies are encoded as
16352 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
16353 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
16354 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
16355 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
16356 		 */
16357 		lat_ns = ((int64_t)rxa * 1024 -
16358 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
16359 			+ ETHER_HDR_LEN))) * 8 * 1000;
16360 		if (lat_ns < 0)
16361 			lat_ns = 0;
16362 		else
16363 			lat_ns /= speed;
16364 		value = lat_ns;
16365 
16366 		while (value > LTRV_VALUE) {
16367 			scale ++;
16368 			value = howmany(value, __BIT(5));
16369 		}
16370 		if (scale > LTRV_SCALE_MAX) {
16371 			device_printf(sc->sc_dev,
16372 			    "Invalid LTR latency scale %d\n", scale);
16373 			return -1;
16374 		}
16375 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
16376 
16377 		/* Determine the maximum latency tolerated by the platform */
16378 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
16379 		    WM_PCI_LTR_CAP_LPT);
16380 		max_snoop = preg & 0xffff;
16381 		max_nosnoop = preg >> 16;
16382 
16383 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
16384 
16385 		if (lat_enc > max_ltr_enc) {
16386 			lat_enc = max_ltr_enc;
16387 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
16388 			    * PCI_LTR_SCALETONS(
16389 				    __SHIFTOUT(lat_enc,
16390 					PCI_LTR_MAXSNOOPLAT_SCALE));
16391 		}
16392 
16393 		if (lat_ns) {
16394 			lat_ns *= speed * 1000;
16395 			lat_ns /= 8;
16396 			lat_ns /= 1000000000;
16397 			obff_hwm = (int32_t)(rxa - lat_ns);
16398 		}
16399 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
16400 			device_printf(sc->sc_dev, "Invalid high water mark %d"
16401 			    "(rxa = %d, lat_ns = %d)\n",
16402 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
16403 			return -1;
16404 		}
16405 	}
16406 	/* Snoop and No-Snoop latencies the same */
16407 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
16408 	CSR_WRITE(sc, WMREG_LTRV, reg);
16409 
16410 	/* Set OBFF high water mark */
16411 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
16412 	reg |= obff_hwm;
16413 	CSR_WRITE(sc, WMREG_SVT, reg);
16414 
16415 	/* Enable OBFF */
16416 	reg = CSR_READ(sc, WMREG_SVCR);
16417 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
16418 	CSR_WRITE(sc, WMREG_SVCR, reg);
16419 
16420 	return 0;
16421 }
16422 
16423 /*
16424  * I210 Errata 25 and I211 Errata 10
16425  * Slow System Clock.
16426  */
16427 static int
16428 wm_pll_workaround_i210(struct wm_softc *sc)
16429 {
16430 	uint32_t mdicnfg, wuc;
16431 	uint32_t reg;
16432 	pcireg_t pcireg;
16433 	uint32_t pmreg;
16434 	uint16_t nvmword, tmp_nvmword;
16435 	uint16_t phyval;
16436 	bool wa_done = false;
16437 	int i, rv = 0;
16438 
16439 	/* Get Power Management cap offset */
16440 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
16441 	    &pmreg, NULL) == 0)
16442 		return -1;
16443 
16444 	/* Save WUC and MDICNFG registers */
16445 	wuc = CSR_READ(sc, WMREG_WUC);
16446 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
16447 
16448 	reg = mdicnfg & ~MDICNFG_DEST;
16449 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
16450 
16451 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
16452 		nvmword = INVM_DEFAULT_AL;
16453 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
16454 
16455 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
16456 		wm_gmii_gs40g_readreg(sc->sc_dev, 1,
16457 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG, &phyval);
16458 
16459 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
16460 			rv = 0;
16461 			break; /* OK */
16462 		} else
16463 			rv = -1;
16464 
16465 		wa_done = true;
16466 		/* Directly reset the internal PHY */
16467 		reg = CSR_READ(sc, WMREG_CTRL);
16468 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
16469 
16470 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
16471 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
16472 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
16473 
16474 		CSR_WRITE(sc, WMREG_WUC, 0);
16475 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
16476 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
16477 
16478 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
16479 		    pmreg + PCI_PMCSR);
16480 		pcireg |= PCI_PMCSR_STATE_D3;
16481 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
16482 		    pmreg + PCI_PMCSR, pcireg);
16483 		delay(1000);
16484 		pcireg &= ~PCI_PMCSR_STATE_D3;
16485 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
16486 		    pmreg + PCI_PMCSR, pcireg);
16487 
16488 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
16489 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
16490 
16491 		/* Restore WUC register */
16492 		CSR_WRITE(sc, WMREG_WUC, wuc);
16493 	}
16494 
16495 	/* Restore MDICNFG setting */
16496 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
16497 	if (wa_done)
16498 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
16499 	return rv;
16500 }
16501 
16502 static void
16503 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
16504 {
16505 	uint32_t reg;
16506 
16507 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
16508 		device_xname(sc->sc_dev), __func__));
16509 	KASSERT((sc->sc_type == WM_T_PCH_SPT)
16510 	    || (sc->sc_type == WM_T_PCH_CNP));
16511 
16512 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
16513 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
16514 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
16515 
16516 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
16517 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
16518 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
16519 }
16520