xref: /netbsd-src/sys/dev/pci/if_wm.c (revision 7d62b00eb9ad855ffcd7da46b41e23feb5476fac)
1 /*	$NetBSD: if_wm.c,v 1.767 2022/12/08 08:14:28 knakahara Exp $	*/
2 
3 /*
4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed for the NetBSD Project by
20  *	Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 /*******************************************************************************
39 
40   Copyright (c) 2001-2005, Intel Corporation
41   All rights reserved.
42 
43   Redistribution and use in source and binary forms, with or without
44   modification, are permitted provided that the following conditions are met:
45 
46    1. Redistributions of source code must retain the above copyright notice,
47       this list of conditions and the following disclaimer.
48 
49    2. Redistributions in binary form must reproduce the above copyright
50       notice, this list of conditions and the following disclaimer in the
51       documentation and/or other materials provided with the distribution.
52 
53    3. Neither the name of the Intel Corporation nor the names of its
54       contributors may be used to endorse or promote products derived from
55       this software without specific prior written permission.
56 
57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67   POSSIBILITY OF SUCH DAMAGE.
68 
69 *******************************************************************************/
70 /*
71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72  *
73  * TODO (in order of importance):
74  *
75  *	- Check XXX'ed comments
76  *	- TX Multi queue improvement (refine queue selection logic)
77  *	- Split header buffer for newer descriptors
78  *	- EEE (Energy Efficiency Ethernet) for I354
79  *	- Virtual Function
80  *	- Set LED correctly (based on contents in EEPROM)
81  *	- Rework how parameters are loaded from the EEPROM.
82  */
83 
84 #include <sys/cdefs.h>
85 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.767 2022/12/08 08:14:28 knakahara Exp $");
86 
87 #ifdef _KERNEL_OPT
88 #include "opt_if_wm.h"
89 #endif
90 
91 #include <sys/param.h>
92 
93 #include <sys/atomic.h>
94 #include <sys/callout.h>
95 #include <sys/cpu.h>
96 #include <sys/device.h>
97 #include <sys/errno.h>
98 #include <sys/interrupt.h>
99 #include <sys/ioctl.h>
100 #include <sys/kernel.h>
101 #include <sys/kmem.h>
102 #include <sys/mbuf.h>
103 #include <sys/pcq.h>
104 #include <sys/queue.h>
105 #include <sys/rndsource.h>
106 #include <sys/socket.h>
107 #include <sys/sysctl.h>
108 #include <sys/syslog.h>
109 #include <sys/systm.h>
110 #include <sys/workqueue.h>
111 
112 #include <net/if.h>
113 #include <net/if_dl.h>
114 #include <net/if_media.h>
115 #include <net/if_ether.h>
116 
117 #include <net/bpf.h>
118 
119 #include <net/rss_config.h>
120 
121 #include <netinet/in.h>			/* XXX for struct ip */
122 #include <netinet/in_systm.h>		/* XXX for struct ip */
123 #include <netinet/ip.h>			/* XXX for struct ip */
124 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
125 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
126 
127 #include <sys/bus.h>
128 #include <sys/intr.h>
129 #include <machine/endian.h>
130 
131 #include <dev/mii/mii.h>
132 #include <dev/mii/mdio.h>
133 #include <dev/mii/miivar.h>
134 #include <dev/mii/miidevs.h>
135 #include <dev/mii/mii_bitbang.h>
136 #include <dev/mii/ikphyreg.h>
137 #include <dev/mii/igphyreg.h>
138 #include <dev/mii/igphyvar.h>
139 #include <dev/mii/inbmphyreg.h>
140 #include <dev/mii/ihphyreg.h>
141 #include <dev/mii/makphyreg.h>
142 
143 #include <dev/pci/pcireg.h>
144 #include <dev/pci/pcivar.h>
145 #include <dev/pci/pcidevs.h>
146 
147 #include <dev/pci/if_wmreg.h>
148 #include <dev/pci/if_wmvar.h>
149 
150 #ifdef WM_DEBUG
151 #define	WM_DEBUG_LINK		__BIT(0)
152 #define	WM_DEBUG_TX		__BIT(1)
153 #define	WM_DEBUG_RX		__BIT(2)
154 #define	WM_DEBUG_GMII		__BIT(3)
155 #define	WM_DEBUG_MANAGE		__BIT(4)
156 #define	WM_DEBUG_NVM		__BIT(5)
157 #define	WM_DEBUG_INIT		__BIT(6)
158 #define	WM_DEBUG_LOCK		__BIT(7)
159 
160 #if 0
161 #define WM_DEBUG_DEFAULT	WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | \
162 	WM_DEBUG_GMII | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT |    \
163 	WM_DEBUG_LOCK
164 #endif
165 
166 #define	DPRINTF(sc, x, y)			  \
167 	do {					  \
168 		if ((sc)->sc_debug & (x))	  \
169 			printf y;		  \
170 	} while (0)
171 #else
172 #define	DPRINTF(sc, x, y)	__nothing
173 #endif /* WM_DEBUG */
174 
175 #define WM_WORKQUEUE_PRI PRI_SOFTNET
176 
177 /*
178  * This device driver's max interrupt numbers.
179  */
180 #define WM_MAX_NQUEUEINTR	16
181 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
182 
183 #ifndef WM_DISABLE_MSI
184 #define	WM_DISABLE_MSI 0
185 #endif
186 #ifndef WM_DISABLE_MSIX
187 #define	WM_DISABLE_MSIX 0
188 #endif
189 
190 int wm_disable_msi = WM_DISABLE_MSI;
191 int wm_disable_msix = WM_DISABLE_MSIX;
192 
193 #ifndef WM_WATCHDOG_TIMEOUT
194 #define WM_WATCHDOG_TIMEOUT 5
195 #endif
196 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
197 
198 /*
199  * Transmit descriptor list size.  Due to errata, we can only have
200  * 256 hardware descriptors in the ring on < 82544, but we use 4096
201  * on >= 82544. We tell the upper layers that they can queue a lot
202  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
203  * of them at a time.
204  *
205  * We allow up to 64 DMA segments per packet.  Pathological packet
206  * chains containing many small mbufs have been observed in zero-copy
207  * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
208  * m_defrag() is called to reduce it.
209  */
210 #define	WM_NTXSEGS		64
211 #define	WM_IFQUEUELEN		256
212 #define	WM_TXQUEUELEN_MAX	64
213 #define	WM_TXQUEUELEN_MAX_82547	16
214 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
215 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
216 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
217 #define	WM_NTXDESC_82542	256
218 #define	WM_NTXDESC_82544	4096
219 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
220 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
221 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
222 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
223 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
224 
225 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
226 
227 #define	WM_TXINTERQSIZE		256
228 
229 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
230 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
231 #endif
232 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
233 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
234 #endif
235 
236 /*
237  * Receive descriptor list size.  We have one Rx buffer for normal
238  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
239  * packet.  We allocate 256 receive descriptors, each with a 2k
240  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
241  */
242 #define	WM_NRXDESC		256U
243 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
244 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
245 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
246 
247 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
248 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
249 #endif
250 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
251 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
252 #endif
253 
254 typedef union txdescs {
255 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
256 	nq_txdesc_t	 sctxu_nq_txdescs[WM_NTXDESC_82544];
257 } txdescs_t;
258 
259 typedef union rxdescs {
260 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
261 	ext_rxdesc_t	 sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
262 	nq_rxdesc_t	 sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
263 } rxdescs_t;
264 
265 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
266 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
267 
268 /*
269  * Software state for transmit jobs.
270  */
271 struct wm_txsoft {
272 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
273 	bus_dmamap_t txs_dmamap;	/* our DMA map */
274 	int txs_firstdesc;		/* first descriptor in packet */
275 	int txs_lastdesc;		/* last descriptor in packet */
276 	int txs_ndesc;			/* # of descriptors used */
277 };
278 
279 /*
280  * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
281  * buffer and a DMA map. For packets which fill more than one buffer, we chain
282  * them together.
283  */
284 struct wm_rxsoft {
285 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
286 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
287 };
288 
289 #define WM_LINKUP_TIMEOUT	50
290 
291 static uint16_t swfwphysem[] = {
292 	SWFW_PHY0_SM,
293 	SWFW_PHY1_SM,
294 	SWFW_PHY2_SM,
295 	SWFW_PHY3_SM
296 };
297 
298 static const uint32_t wm_82580_rxpbs_table[] = {
299 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
300 };
301 
302 struct wm_softc;
303 
304 #if defined(_LP64) && !defined(WM_DISABLE_EVENT_COUNTERS)
305 #if !defined(WM_EVENT_COUNTERS)
306 #define WM_EVENT_COUNTERS 1
307 #endif
308 #endif
309 
310 #ifdef WM_EVENT_COUNTERS
311 #define WM_Q_EVCNT_DEFINE(qname, evname)				 \
312 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
313 	struct evcnt qname##_ev_##evname
314 
315 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
316 	do {								\
317 		snprintf((q)->qname##_##evname##_evcnt_name,		\
318 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
319 		    "%s%02d%s", #qname, (qnum), #evname);		\
320 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
321 		    (evtype), NULL, (xname),				\
322 		    (q)->qname##_##evname##_evcnt_name);		\
323 	} while (0)
324 
325 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
326 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
327 
328 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
329 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
330 
331 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
332 	evcnt_detach(&(q)->qname##_ev_##evname)
333 #endif /* WM_EVENT_COUNTERS */
334 
335 struct wm_txqueue {
336 	kmutex_t *txq_lock;		/* lock for tx operations */
337 
338 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
339 
340 	/* Software state for the transmit descriptors. */
341 	int txq_num;			/* must be a power of two */
342 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
343 
344 	/* TX control data structures. */
345 	int txq_ndesc;			/* must be a power of two */
346 	size_t txq_descsize;		/* a tx descriptor size */
347 	txdescs_t *txq_descs_u;
348 	bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
349 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
350 	int txq_desc_rseg;		/* real number of control segment */
351 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
352 #define	txq_descs	txq_descs_u->sctxu_txdescs
353 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
354 
355 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
356 
357 	int txq_free;			/* number of free Tx descriptors */
358 	int txq_next;			/* next ready Tx descriptor */
359 
360 	int txq_sfree;			/* number of free Tx jobs */
361 	int txq_snext;			/* next free Tx job */
362 	int txq_sdirty;			/* dirty Tx jobs */
363 
364 	/* These 4 variables are used only on the 82547. */
365 	int txq_fifo_size;		/* Tx FIFO size */
366 	int txq_fifo_head;		/* current head of FIFO */
367 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
368 	int txq_fifo_stall;		/* Tx FIFO is stalled */
369 
370 	/*
371 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
372 	 * CPUs. This queue intermediate them without block.
373 	 */
374 	pcq_t *txq_interq;
375 
376 	/*
377 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
378 	 * to manage Tx H/W queue's busy flag.
379 	 */
380 	int txq_flags;			/* flags for H/W queue, see below */
381 #define	WM_TXQ_NO_SPACE		0x1
382 #define	WM_TXQ_LINKDOWN_DISCARD	0x2
383 
384 	bool txq_stopping;
385 
386 	bool txq_sending;
387 	time_t txq_lastsent;
388 
389 	/* Checksum flags used for previous packet */
390 	uint32_t	txq_last_hw_cmd;
391 	uint8_t		txq_last_hw_fields;
392 	uint16_t	txq_last_hw_ipcs;
393 	uint16_t	txq_last_hw_tucs;
394 
395 	uint32_t txq_packets;		/* for AIM */
396 	uint32_t txq_bytes;		/* for AIM */
397 #ifdef WM_EVENT_COUNTERS
398 	/* TX event counters */
399 	WM_Q_EVCNT_DEFINE(txq, txsstall);   /* Stalled due to no txs */
400 	WM_Q_EVCNT_DEFINE(txq, txdstall);   /* Stalled due to no txd */
401 	WM_Q_EVCNT_DEFINE(txq, fifo_stall); /* FIFO stalls (82547) */
402 	WM_Q_EVCNT_DEFINE(txq, txdw);	    /* Tx descriptor interrupts */
403 	WM_Q_EVCNT_DEFINE(txq, txqe);	    /* Tx queue empty interrupts */
404 					    /* XXX not used? */
405 
406 	WM_Q_EVCNT_DEFINE(txq, ipsum);	    /* IP checksums comp. */
407 	WM_Q_EVCNT_DEFINE(txq, tusum);	    /* TCP/UDP cksums comp. */
408 	WM_Q_EVCNT_DEFINE(txq, tusum6);	    /* TCP/UDP v6 cksums comp. */
409 	WM_Q_EVCNT_DEFINE(txq, tso);	    /* TCP seg offload (IPv4) */
410 	WM_Q_EVCNT_DEFINE(txq, tso6);	    /* TCP seg offload (IPv6) */
411 	WM_Q_EVCNT_DEFINE(txq, tsopain);    /* Painful header manip. for TSO */
412 	WM_Q_EVCNT_DEFINE(txq, pcqdrop);    /* Pkt dropped in pcq */
413 	WM_Q_EVCNT_DEFINE(txq, descdrop);   /* Pkt dropped in MAC desc ring */
414 					    /* other than toomanyseg */
415 
416 	WM_Q_EVCNT_DEFINE(txq, toomanyseg); /* Pkt dropped(toomany DMA segs) */
417 	WM_Q_EVCNT_DEFINE(txq, defrag);	    /* m_defrag() */
418 	WM_Q_EVCNT_DEFINE(txq, underrun);   /* Tx underrun */
419 	WM_Q_EVCNT_DEFINE(txq, skipcontext); /* Tx skip wrong cksum context */
420 
421 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
422 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
423 #endif /* WM_EVENT_COUNTERS */
424 };
425 
426 struct wm_rxqueue {
427 	kmutex_t *rxq_lock;		/* lock for rx operations */
428 
429 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
430 
431 	/* Software state for the receive descriptors. */
432 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
433 
434 	/* RX control data structures. */
435 	int rxq_ndesc;			/* must be a power of two */
436 	size_t rxq_descsize;		/* a rx descriptor size */
437 	rxdescs_t *rxq_descs_u;
438 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
439 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
440 	int rxq_desc_rseg;		/* real number of control segment */
441 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
442 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
443 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
444 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
445 
446 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
447 
448 	int rxq_ptr;			/* next ready Rx desc/queue ent */
449 	int rxq_discard;
450 	int rxq_len;
451 	struct mbuf *rxq_head;
452 	struct mbuf *rxq_tail;
453 	struct mbuf **rxq_tailp;
454 
455 	bool rxq_stopping;
456 
457 	uint32_t rxq_packets;		/* for AIM */
458 	uint32_t rxq_bytes;		/* for AIM */
459 #ifdef WM_EVENT_COUNTERS
460 	/* RX event counters */
461 	WM_Q_EVCNT_DEFINE(rxq, intr);	/* Interrupts */
462 	WM_Q_EVCNT_DEFINE(rxq, defer);	/* Rx deferred processing */
463 
464 	WM_Q_EVCNT_DEFINE(rxq, ipsum);	/* IP checksums checked */
465 	WM_Q_EVCNT_DEFINE(rxq, tusum);	/* TCP/UDP cksums checked */
466 #endif
467 };
468 
469 struct wm_queue {
470 	int wmq_id;			/* index of TX/RX queues */
471 	int wmq_intr_idx;		/* index of MSI-X tables */
472 
473 	uint32_t wmq_itr;		/* interrupt interval per queue. */
474 	bool wmq_set_itr;
475 
476 	struct wm_txqueue wmq_txq;
477 	struct wm_rxqueue wmq_rxq;
478 	char sysctlname[32];		/* Name for sysctl */
479 
480 	bool wmq_txrx_use_workqueue;
481 	bool wmq_wq_enqueued;
482 	struct work wmq_cookie;
483 	void *wmq_si;
484 };
485 
486 struct wm_phyop {
487 	int (*acquire)(struct wm_softc *) __attribute__((warn_unused_result));
488 	void (*release)(struct wm_softc *);
489 	int (*readreg_locked)(device_t, int, int, uint16_t *);
490 	int (*writereg_locked)(device_t, int, int, uint16_t);
491 	int reset_delay_us;
492 	bool no_errprint;
493 };
494 
495 struct wm_nvmop {
496 	int (*acquire)(struct wm_softc *) __attribute__((warn_unused_result));
497 	void (*release)(struct wm_softc *);
498 	int (*read)(struct wm_softc *, int, int, uint16_t *);
499 };
500 
501 /*
502  * Software state per device.
503  */
504 struct wm_softc {
505 	device_t sc_dev;		/* generic device information */
506 	bus_space_tag_t sc_st;		/* bus space tag */
507 	bus_space_handle_t sc_sh;	/* bus space handle */
508 	bus_size_t sc_ss;		/* bus space size */
509 	bus_space_tag_t sc_iot;		/* I/O space tag */
510 	bus_space_handle_t sc_ioh;	/* I/O space handle */
511 	bus_size_t sc_ios;		/* I/O space size */
512 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
513 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
514 	bus_size_t sc_flashs;		/* flash registers space size */
515 	off_t sc_flashreg_offset;	/*
516 					 * offset to flash registers from
517 					 * start of BAR
518 					 */
519 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
520 
521 	struct ethercom sc_ethercom;	/* Ethernet common data */
522 	struct mii_data sc_mii;		/* MII/media information */
523 
524 	pci_chipset_tag_t sc_pc;
525 	pcitag_t sc_pcitag;
526 	int sc_bus_speed;		/* PCI/PCIX bus speed */
527 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
528 
529 	uint16_t sc_pcidevid;		/* PCI device ID */
530 	wm_chip_type sc_type;		/* MAC type */
531 	int sc_rev;			/* MAC revision */
532 	wm_phy_type sc_phytype;		/* PHY type */
533 	uint8_t sc_sfptype;		/* SFP type */
534 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
535 #define	WM_MEDIATYPE_UNKNOWN		0x00
536 #define	WM_MEDIATYPE_FIBER		0x01
537 #define	WM_MEDIATYPE_COPPER		0x02
538 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
539 	int sc_funcid;			/* unit number of the chip (0 to 3) */
540 	int sc_flags;			/* flags; see below */
541 	u_short sc_if_flags;		/* last if_flags */
542 	int sc_ec_capenable;		/* last ec_capenable */
543 	int sc_flowflags;		/* 802.3x flow control flags */
544 	uint16_t eee_lp_ability;	/* EEE link partner's ability */
545 	int sc_align_tweak;
546 
547 	void *sc_ihs[WM_MAX_NINTR];	/*
548 					 * interrupt cookie.
549 					 * - legacy and msi use sc_ihs[0] only
550 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
551 					 */
552 	pci_intr_handle_t *sc_intrs;	/*
553 					 * legacy and msi use sc_intrs[0] only
554 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
555 					 */
556 	int sc_nintrs;			/* number of interrupts */
557 
558 	int sc_link_intr_idx;		/* index of MSI-X tables */
559 
560 	callout_t sc_tick_ch;		/* tick callout */
561 	bool sc_core_stopping;
562 
563 	int sc_nvm_ver_major;
564 	int sc_nvm_ver_minor;
565 	int sc_nvm_ver_build;
566 	int sc_nvm_addrbits;		/* NVM address bits */
567 	unsigned int sc_nvm_wordsize;	/* NVM word size */
568 	int sc_ich8_flash_base;
569 	int sc_ich8_flash_bank_size;
570 	int sc_nvm_k1_enabled;
571 
572 	int sc_nqueues;
573 	struct wm_queue *sc_queue;
574 	u_int sc_tx_process_limit;	/* Tx proc. repeat limit in softint */
575 	u_int sc_tx_intr_process_limit;	/* Tx proc. repeat limit in H/W intr */
576 	u_int sc_rx_process_limit;	/* Rx proc. repeat limit in softint */
577 	u_int sc_rx_intr_process_limit;	/* Rx proc. repeat limit in H/W intr */
578 	struct workqueue *sc_queue_wq;
579 	bool sc_txrx_use_workqueue;
580 
581 	int sc_affinity_offset;
582 
583 #ifdef WM_EVENT_COUNTERS
584 	/* Event counters. */
585 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
586 
587 	/* >= WM_T_82542_2_1 */
588 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
589 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
590 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
591 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
592 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
593 
594 	struct evcnt sc_ev_crcerrs;	/* CRC Error */
595 	struct evcnt sc_ev_algnerrc;	/* Alignment Error */
596 	struct evcnt sc_ev_symerrc;	/* Symbol Error */
597 	struct evcnt sc_ev_rxerrc;	/* Receive Error */
598 	struct evcnt sc_ev_mpc;		/* Missed Packets */
599 	struct evcnt sc_ev_colc;	/* Collision */
600 	struct evcnt sc_ev_sec;		/* Sequence Error */
601 	struct evcnt sc_ev_cexterr;	/* Carrier Extension Error */
602 	struct evcnt sc_ev_rlec;	/* Receive Length Error */
603 	struct evcnt sc_ev_scc;		/* Single Collision */
604 	struct evcnt sc_ev_ecol;	/* Excessive Collision */
605 	struct evcnt sc_ev_mcc;		/* Multiple Collision */
606 	struct evcnt sc_ev_latecol;	/* Late Collision */
607 	struct evcnt sc_ev_dc;		/* Defer */
608 	struct evcnt sc_ev_gprc;	/* Good Packets Rx */
609 	struct evcnt sc_ev_bprc;	/* Broadcast Packets Rx */
610 	struct evcnt sc_ev_mprc;	/* Multicast Packets Rx */
611 	struct evcnt sc_ev_gptc;	/* Good Packets Tx */
612 	struct evcnt sc_ev_gorc;	/* Good Octets Rx */
613 	struct evcnt sc_ev_gotc;	/* Good Octets Tx */
614 	struct evcnt sc_ev_rnbc;	/* Rx No Buffers */
615 	struct evcnt sc_ev_ruc;		/* Rx Undersize */
616 	struct evcnt sc_ev_rfc;		/* Rx Fragment */
617 	struct evcnt sc_ev_roc;		/* Rx Oversize */
618 	struct evcnt sc_ev_rjc;		/* Rx Jabber */
619 	struct evcnt sc_ev_tor;		/* Total Octets Rx */
620 	struct evcnt sc_ev_tot;		/* Total Octets Tx */
621 	struct evcnt sc_ev_tpr;		/* Total Packets Rx */
622 	struct evcnt sc_ev_tpt;		/* Total Packets Tx */
623 	struct evcnt sc_ev_mptc;	/* Multicast Packets Tx */
624 	struct evcnt sc_ev_bptc;	/* Broadcast Packets Tx Count */
625 	struct evcnt sc_ev_prc64;	/* Packets Rx (64 bytes) */
626 	struct evcnt sc_ev_prc127;	/* Packets Rx (65-127 bytes) */
627 	struct evcnt sc_ev_prc255;	/* Packets Rx (128-255 bytes) */
628 	struct evcnt sc_ev_prc511;	/* Packets Rx (255-511 bytes) */
629 	struct evcnt sc_ev_prc1023;	/* Packets Rx (512-1023 bytes) */
630 	struct evcnt sc_ev_prc1522;	/* Packets Rx (1024-1522 bytes) */
631 	struct evcnt sc_ev_ptc64;	/* Packets Tx (64 bytes) */
632 	struct evcnt sc_ev_ptc127;	/* Packets Tx (65-127 bytes) */
633 	struct evcnt sc_ev_ptc255;	/* Packets Tx (128-255 bytes) */
634 	struct evcnt sc_ev_ptc511;	/* Packets Tx (256-511 bytes) */
635 	struct evcnt sc_ev_ptc1023;	/* Packets Tx (512-1023 bytes) */
636 	struct evcnt sc_ev_ptc1522;	/* Packets Tx (1024-1522 Bytes) */
637 	struct evcnt sc_ev_iac;		/* Interrupt Assertion */
638 	struct evcnt sc_ev_icrxptc;	/* Intr. Cause Rx Pkt Timer Expire */
639 	struct evcnt sc_ev_icrxatc;	/* Intr. Cause Rx Abs Timer Expire */
640 	struct evcnt sc_ev_ictxptc;	/* Intr. Cause Tx Pkt Timer Expire */
641 	struct evcnt sc_ev_ictxact;	/* Intr. Cause Tx Abs Timer Expire */
642 	struct evcnt sc_ev_ictxqec;	/* Intr. Cause Tx Queue Empty */
643 	struct evcnt sc_ev_ictxqmtc;	/* Intr. Cause Tx Queue Min Thresh */
644 	struct evcnt sc_ev_icrxdmtc;	/* Intr. Cause Rx Desc Min Thresh */
645 	struct evcnt sc_ev_icrxoc;	/* Intr. Cause Receiver Overrun */
646 	struct evcnt sc_ev_tncrs;	/* Tx-No CRS */
647 	struct evcnt sc_ev_tsctc;	/* TCP Segmentation Context Tx */
648 	struct evcnt sc_ev_tsctfc;	/* TCP Segmentation Context Tx Fail */
649 	struct evcnt sc_ev_mgtprc;	/* Management Packets RX */
650 	struct evcnt sc_ev_mgtpdc;	/* Management Packets Dropped */
651 	struct evcnt sc_ev_mgtptc;	/* Management Packets TX */
652 	struct evcnt sc_ev_b2ogprc;	/* BMC2OS pkts received by host */
653 	struct evcnt sc_ev_o2bspc;	/* OS2BMC pkts transmitted by host */
654 	struct evcnt sc_ev_b2ospc;	/* BMC2OS pkts sent by BMC */
655 	struct evcnt sc_ev_o2bgptc;	/* OS2BMC pkts received by BMC */
656 
657 #endif /* WM_EVENT_COUNTERS */
658 
659 	struct sysctllog *sc_sysctllog;
660 
661 	/* This variable are used only on the 82547. */
662 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
663 
664 	uint32_t sc_ctrl;		/* prototype CTRL register */
665 #if 0
666 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
667 #endif
668 	uint32_t sc_icr;		/* prototype interrupt bits */
669 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
670 	uint32_t sc_tctl;		/* prototype TCTL register */
671 	uint32_t sc_rctl;		/* prototype RCTL register */
672 	uint32_t sc_txcw;		/* prototype TXCW register */
673 	uint32_t sc_tipg;		/* prototype TIPG register */
674 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
675 	uint32_t sc_pba;		/* prototype PBA register */
676 
677 	int sc_tbi_linkup;		/* TBI link status */
678 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
679 	int sc_tbi_serdes_ticks;	/* tbi ticks */
680 
681 	int sc_mchash_type;		/* multicast filter offset */
682 
683 	krndsource_t rnd_source;	/* random source */
684 
685 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
686 
687 	kmutex_t *sc_core_lock;		/* lock for softc operations */
688 	kmutex_t *sc_ich_phymtx;	/*
689 					 * 82574/82583/ICH/PCH specific PHY
690 					 * mutex. For 82574/82583, the mutex
691 					 * is used for both PHY and NVM.
692 					 */
693 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
694 
695 	struct wm_phyop phy;
696 	struct wm_nvmop nvm;
697 
698 	struct workqueue *sc_reset_wq;
699 	struct work sc_reset_work;
700 	volatile unsigned sc_reset_pending;
701 
702 	bool sc_dying;
703 
704 #ifdef WM_DEBUG
705 	uint32_t sc_debug;
706 	bool sc_trigger_reset;
707 #endif
708 };
709 
710 #define	WM_RXCHAIN_RESET(rxq)						\
711 do {									\
712 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
713 	*(rxq)->rxq_tailp = NULL;					\
714 	(rxq)->rxq_len = 0;						\
715 } while (/*CONSTCOND*/0)
716 
717 #define	WM_RXCHAIN_LINK(rxq, m)						\
718 do {									\
719 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
720 	(rxq)->rxq_tailp = &(m)->m_next;				\
721 } while (/*CONSTCOND*/0)
722 
723 #ifdef WM_EVENT_COUNTERS
724 #ifdef __HAVE_ATOMIC64_LOADSTORE
725 #define	WM_EVCNT_INCR(ev)						\
726 	atomic_store_relaxed(&((ev)->ev_count),				\
727 	    atomic_load_relaxed(&(ev)->ev_count) + 1)
728 #define	WM_EVCNT_ADD(ev, val)						\
729 	atomic_store_relaxed(&((ev)->ev_count),				\
730 	    atomic_load_relaxed(&(ev)->ev_count) + (val))
731 #else
732 #define	WM_EVCNT_INCR(ev)						\
733 	((ev)->ev_count)++
734 #define	WM_EVCNT_ADD(ev, val)						\
735 	(ev)->ev_count += (val)
736 #endif
737 
738 #define WM_Q_EVCNT_INCR(qname, evname)			\
739 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
740 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
741 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
742 #else /* !WM_EVENT_COUNTERS */
743 #define	WM_EVCNT_INCR(ev)	/* nothing */
744 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
745 
746 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
747 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
748 #endif /* !WM_EVENT_COUNTERS */
749 
750 #define	CSR_READ(sc, reg)						\
751 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
752 #define	CSR_WRITE(sc, reg, val)						\
753 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
754 #define	CSR_WRITE_FLUSH(sc)						\
755 	(void)CSR_READ((sc), WMREG_STATUS)
756 
757 #define ICH8_FLASH_READ32(sc, reg)					\
758 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
759 	    (reg) + sc->sc_flashreg_offset)
760 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
761 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
762 	    (reg) + sc->sc_flashreg_offset, (data))
763 
764 #define ICH8_FLASH_READ16(sc, reg)					\
765 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
766 	    (reg) + sc->sc_flashreg_offset)
767 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
768 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
769 	    (reg) + sc->sc_flashreg_offset, (data))
770 
771 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
772 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
773 
774 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
775 #define	WM_CDTXADDR_HI(txq, x)						\
776 	(sizeof(bus_addr_t) == 8 ?					\
777 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
778 
779 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
780 #define	WM_CDRXADDR_HI(rxq, x)						\
781 	(sizeof(bus_addr_t) == 8 ?					\
782 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
783 
784 /*
785  * Register read/write functions.
786  * Other than CSR_{READ|WRITE}().
787  */
788 #if 0
789 static inline uint32_t wm_io_read(struct wm_softc *, int);
790 #endif
791 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
792 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
793     uint32_t, uint32_t);
794 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
795 
796 /*
797  * Descriptor sync/init functions.
798  */
799 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
800 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
801 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
802 
803 /*
804  * Device driver interface functions and commonly used functions.
805  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
806  */
807 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
808 static int	wm_match(device_t, cfdata_t, void *);
809 static void	wm_attach(device_t, device_t, void *);
810 static int	wm_detach(device_t, int);
811 static bool	wm_suspend(device_t, const pmf_qual_t *);
812 static bool	wm_resume(device_t, const pmf_qual_t *);
813 static bool	wm_watchdog(struct ifnet *);
814 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
815     uint16_t *);
816 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
817     uint16_t *);
818 static void	wm_tick(void *);
819 static int	wm_ifflags_cb(struct ethercom *);
820 static int	wm_ioctl(struct ifnet *, u_long, void *);
821 /* MAC address related */
822 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
823 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
824 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
825 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
826 static int	wm_rar_count(struct wm_softc *);
827 static void	wm_set_filter(struct wm_softc *);
828 /* Reset and init related */
829 static void	wm_set_vlan(struct wm_softc *);
830 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
831 static void	wm_get_auto_rd_done(struct wm_softc *);
832 static void	wm_lan_init_done(struct wm_softc *);
833 static void	wm_get_cfg_done(struct wm_softc *);
834 static int	wm_phy_post_reset(struct wm_softc *);
835 static int	wm_write_smbus_addr(struct wm_softc *);
836 static int	wm_init_lcd_from_nvm(struct wm_softc *);
837 static int	wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
838 static void	wm_initialize_hardware_bits(struct wm_softc *);
839 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
840 static int	wm_reset_phy(struct wm_softc *);
841 static void	wm_flush_desc_rings(struct wm_softc *);
842 static void	wm_reset(struct wm_softc *);
843 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
844 static void	wm_rxdrain(struct wm_rxqueue *);
845 static void	wm_init_rss(struct wm_softc *);
846 static void	wm_adjust_qnum(struct wm_softc *, int);
847 static inline bool	wm_is_using_msix(struct wm_softc *);
848 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
849 static int	wm_softint_establish_queue(struct wm_softc *, int, int);
850 static int	wm_setup_legacy(struct wm_softc *);
851 static int	wm_setup_msix(struct wm_softc *);
852 static int	wm_init(struct ifnet *);
853 static int	wm_init_locked(struct ifnet *);
854 static void	wm_init_sysctls(struct wm_softc *);
855 static void	wm_unset_stopping_flags(struct wm_softc *);
856 static void	wm_set_stopping_flags(struct wm_softc *);
857 static void	wm_stop(struct ifnet *, int);
858 static void	wm_stop_locked(struct ifnet *, bool, bool);
859 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
860 static void	wm_82547_txfifo_stall(void *);
861 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
862 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
863 /* DMA related */
864 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
865 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
866 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
867 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
868     struct wm_txqueue *);
869 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
870 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
871 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
872     struct wm_rxqueue *);
873 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
874 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
875 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
876 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
877 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
878 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
879 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
880     struct wm_txqueue *);
881 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
882     struct wm_rxqueue *);
883 static int	wm_alloc_txrx_queues(struct wm_softc *);
884 static void	wm_free_txrx_queues(struct wm_softc *);
885 static int	wm_init_txrx_queues(struct wm_softc *);
886 /* Start */
887 static void	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
888     struct wm_txsoft *, uint32_t *, uint8_t *);
889 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
890 static void	wm_start(struct ifnet *);
891 static void	wm_start_locked(struct ifnet *);
892 static int	wm_transmit(struct ifnet *, struct mbuf *);
893 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
894 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
895     bool);
896 static void	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
897     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
898 static void	wm_nq_start(struct ifnet *);
899 static void	wm_nq_start_locked(struct ifnet *);
900 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
901 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
902 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
903     bool);
904 static void	wm_deferred_start_locked(struct wm_txqueue *);
905 static void	wm_handle_queue(void *);
906 static void	wm_handle_queue_work(struct work *, void *);
907 static void	wm_handle_reset_work(struct work *, void *);
908 /* Interrupt */
909 static bool	wm_txeof(struct wm_txqueue *, u_int);
910 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
911 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
912 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
913 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
914 static void	wm_linkintr(struct wm_softc *, uint32_t);
915 static int	wm_intr_legacy(void *);
916 static inline void	wm_txrxintr_disable(struct wm_queue *);
917 static inline void	wm_txrxintr_enable(struct wm_queue *);
918 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
919 static int	wm_txrxintr_msix(void *);
920 static int	wm_linkintr_msix(void *);
921 
922 /*
923  * Media related.
924  * GMII, SGMII, TBI, SERDES and SFP.
925  */
926 /* Common */
927 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
928 /* GMII related */
929 static void	wm_gmii_reset(struct wm_softc *);
930 static void	wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
931 static int	wm_get_phy_id_82575(struct wm_softc *);
932 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
933 static int	wm_gmii_mediachange(struct ifnet *);
934 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
935 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
936 static uint16_t	wm_i82543_mii_recvbits(struct wm_softc *);
937 static int	wm_gmii_i82543_readreg(device_t, int, int, uint16_t *);
938 static int	wm_gmii_i82543_writereg(device_t, int, int, uint16_t);
939 static int	wm_gmii_mdic_readreg(device_t, int, int, uint16_t *);
940 static int	wm_gmii_mdic_writereg(device_t, int, int, uint16_t);
941 static int	wm_gmii_i82544_readreg(device_t, int, int, uint16_t *);
942 static int	wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
943 static int	wm_gmii_i82544_writereg(device_t, int, int, uint16_t);
944 static int	wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
945 static int	wm_gmii_i80003_readreg(device_t, int, int, uint16_t *);
946 static int	wm_gmii_i80003_writereg(device_t, int, int, uint16_t);
947 static int	wm_gmii_bm_readreg(device_t, int, int, uint16_t *);
948 static int	wm_gmii_bm_writereg(device_t, int, int, uint16_t);
949 static int	wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
950 static int	wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
951 static int	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int,
952 	bool);
953 static int	wm_gmii_hv_readreg(device_t, int, int, uint16_t *);
954 static int	wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
955 static int	wm_gmii_hv_writereg(device_t, int, int, uint16_t);
956 static int	wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
957 static int	wm_gmii_82580_readreg(device_t, int, int, uint16_t *);
958 static int	wm_gmii_82580_writereg(device_t, int, int, uint16_t);
959 static int	wm_gmii_gs40g_readreg(device_t, int, int, uint16_t *);
960 static int	wm_gmii_gs40g_writereg(device_t, int, int, uint16_t);
961 static void	wm_gmii_statchg(struct ifnet *);
962 /*
963  * kumeran related (80003, ICH* and PCH*).
964  * These functions are not for accessing MII registers but for accessing
965  * kumeran specific registers.
966  */
967 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
968 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
969 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
970 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
971 /* EMI register related */
972 static int	wm_access_emi_reg_locked(device_t, int, uint16_t *, bool);
973 static int	wm_read_emi_reg_locked(device_t, int, uint16_t *);
974 static int	wm_write_emi_reg_locked(device_t, int, uint16_t);
975 /* SGMII */
976 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
977 static void	wm_sgmii_sfp_preconfig(struct wm_softc *);
978 static int	wm_sgmii_readreg(device_t, int, int, uint16_t *);
979 static int	wm_sgmii_readreg_locked(device_t, int, int, uint16_t *);
980 static int	wm_sgmii_writereg(device_t, int, int, uint16_t);
981 static int	wm_sgmii_writereg_locked(device_t, int, int, uint16_t);
982 /* TBI related */
983 static bool	wm_tbi_havesignal(struct wm_softc *, uint32_t);
984 static void	wm_tbi_mediainit(struct wm_softc *);
985 static int	wm_tbi_mediachange(struct ifnet *);
986 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
987 static int	wm_check_for_link(struct wm_softc *);
988 static void	wm_tbi_tick(struct wm_softc *);
989 /* SERDES related */
990 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
991 static int	wm_serdes_mediachange(struct ifnet *);
992 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
993 static void	wm_serdes_tick(struct wm_softc *);
994 /* SFP related */
995 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
996 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
997 
998 /*
999  * NVM related.
1000  * Microwire, SPI (w/wo EERD) and Flash.
1001  */
1002 /* Misc functions */
1003 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
1004 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
1005 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
1006 /* Microwire */
1007 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
1008 /* SPI */
1009 static int	wm_nvm_ready_spi(struct wm_softc *);
1010 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
1011 /* Using with EERD */
1012 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
1013 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
1014 /* Flash */
1015 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
1016     unsigned int *);
1017 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
1018 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
1019 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
1020     uint32_t *);
1021 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
1022 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
1023 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
1024 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
1025 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
1026 /* iNVM */
1027 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
1028 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
1029 /* Lock, detecting NVM type, validate checksum and read */
1030 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
1031 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
1032 static int	wm_nvm_validate_checksum(struct wm_softc *);
1033 static void	wm_nvm_version_invm(struct wm_softc *);
1034 static void	wm_nvm_version(struct wm_softc *);
1035 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
1036 
1037 /*
1038  * Hardware semaphores.
1039  * Very complexed...
1040  */
1041 static int	wm_get_null(struct wm_softc *);
1042 static void	wm_put_null(struct wm_softc *);
1043 static int	wm_get_eecd(struct wm_softc *);
1044 static void	wm_put_eecd(struct wm_softc *);
1045 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
1046 static void	wm_put_swsm_semaphore(struct wm_softc *);
1047 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
1048 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
1049 static int	wm_get_nvm_80003(struct wm_softc *);
1050 static void	wm_put_nvm_80003(struct wm_softc *);
1051 static int	wm_get_nvm_82571(struct wm_softc *);
1052 static void	wm_put_nvm_82571(struct wm_softc *);
1053 static int	wm_get_phy_82575(struct wm_softc *);
1054 static void	wm_put_phy_82575(struct wm_softc *);
1055 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
1056 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
1057 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
1058 static void	wm_put_swflag_ich8lan(struct wm_softc *);
1059 static int	wm_get_nvm_ich8lan(struct wm_softc *);
1060 static void	wm_put_nvm_ich8lan(struct wm_softc *);
1061 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
1062 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
1063 
1064 /*
1065  * Management mode and power management related subroutines.
1066  * BMC, AMT, suspend/resume and EEE.
1067  */
1068 #if 0
1069 static int	wm_check_mng_mode(struct wm_softc *);
1070 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
1071 static int	wm_check_mng_mode_82574(struct wm_softc *);
1072 static int	wm_check_mng_mode_generic(struct wm_softc *);
1073 #endif
1074 static int	wm_enable_mng_pass_thru(struct wm_softc *);
1075 static bool	wm_phy_resetisblocked(struct wm_softc *);
1076 static void	wm_get_hw_control(struct wm_softc *);
1077 static void	wm_release_hw_control(struct wm_softc *);
1078 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
1079 static int	wm_init_phy_workarounds_pchlan(struct wm_softc *);
1080 static void	wm_init_manageability(struct wm_softc *);
1081 static void	wm_release_manageability(struct wm_softc *);
1082 static void	wm_get_wakeup(struct wm_softc *);
1083 static int	wm_ulp_disable(struct wm_softc *);
1084 static int	wm_enable_phy_wakeup(struct wm_softc *);
1085 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
1086 static void	wm_suspend_workarounds_ich8lan(struct wm_softc *);
1087 static int	wm_resume_workarounds_pchlan(struct wm_softc *);
1088 static void	wm_enable_wakeup(struct wm_softc *);
1089 static void	wm_disable_aspm(struct wm_softc *);
1090 /* LPLU (Low Power Link Up) */
1091 static void	wm_lplu_d0_disable(struct wm_softc *);
1092 /* EEE */
1093 static int	wm_set_eee_i350(struct wm_softc *);
1094 static int	wm_set_eee_pchlan(struct wm_softc *);
1095 static int	wm_set_eee(struct wm_softc *);
1096 
1097 /*
1098  * Workarounds (mainly PHY related).
1099  * Basically, PHY's workarounds are in the PHY drivers.
1100  */
1101 static int	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
1102 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
1103 static int	wm_hv_phy_workarounds_ich8lan(struct wm_softc *);
1104 static void	wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *);
1105 static void	wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *);
1106 static int	wm_lv_jumbo_workaround_ich8lan(struct wm_softc *, bool);
1107 static int	wm_lv_phy_workarounds_ich8lan(struct wm_softc *);
1108 static int	wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
1109 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
1110 static int	wm_k1_workaround_lv(struct wm_softc *);
1111 static int	wm_link_stall_workaround_hv(struct wm_softc *);
1112 static int	wm_set_mdio_slow_mode_hv(struct wm_softc *);
1113 static int	wm_set_mdio_slow_mode_hv_locked(struct wm_softc *);
1114 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
1115 static void	wm_reset_init_script_82575(struct wm_softc *);
1116 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
1117 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
1118 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
1119 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
1120 static int	wm_pll_workaround_i210(struct wm_softc *);
1121 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
1122 static bool	wm_phy_need_linkdown_discard(struct wm_softc *);
1123 static void	wm_set_linkdown_discard(struct wm_softc *);
1124 static void	wm_clear_linkdown_discard(struct wm_softc *);
1125 
1126 static int	wm_sysctl_tdh_handler(SYSCTLFN_PROTO);
1127 static int	wm_sysctl_tdt_handler(SYSCTLFN_PROTO);
1128 #ifdef WM_DEBUG
1129 static int	wm_sysctl_debug(SYSCTLFN_PROTO);
1130 #endif
1131 
1132 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
1133     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
1134 
1135 /*
1136  * Devices supported by this driver.
1137  */
1138 static const struct wm_product {
1139 	pci_vendor_id_t		wmp_vendor;
1140 	pci_product_id_t	wmp_product;
1141 	const char		*wmp_name;
1142 	wm_chip_type		wmp_type;
1143 	uint32_t		wmp_flags;
1144 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
1145 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
1146 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
1147 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
1148 #define WMP_MEDIATYPE(x)	((x) & 0x03)
1149 } wm_products[] = {
1150 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
1151 	  "Intel i82542 1000BASE-X Ethernet",
1152 	  WM_T_82542_2_1,	WMP_F_FIBER },
1153 
1154 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
1155 	  "Intel i82543GC 1000BASE-X Ethernet",
1156 	  WM_T_82543,		WMP_F_FIBER },
1157 
1158 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
1159 	  "Intel i82543GC 1000BASE-T Ethernet",
1160 	  WM_T_82543,		WMP_F_COPPER },
1161 
1162 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
1163 	  "Intel i82544EI 1000BASE-T Ethernet",
1164 	  WM_T_82544,		WMP_F_COPPER },
1165 
1166 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
1167 	  "Intel i82544EI 1000BASE-X Ethernet",
1168 	  WM_T_82544,		WMP_F_FIBER },
1169 
1170 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
1171 	  "Intel i82544GC 1000BASE-T Ethernet",
1172 	  WM_T_82544,		WMP_F_COPPER },
1173 
1174 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
1175 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
1176 	  WM_T_82544,		WMP_F_COPPER },
1177 
1178 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
1179 	  "Intel i82540EM 1000BASE-T Ethernet",
1180 	  WM_T_82540,		WMP_F_COPPER },
1181 
1182 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
1183 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
1184 	  WM_T_82540,		WMP_F_COPPER },
1185 
1186 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
1187 	  "Intel i82540EP 1000BASE-T Ethernet",
1188 	  WM_T_82540,		WMP_F_COPPER },
1189 
1190 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
1191 	  "Intel i82540EP 1000BASE-T Ethernet",
1192 	  WM_T_82540,		WMP_F_COPPER },
1193 
1194 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
1195 	  "Intel i82540EP 1000BASE-T Ethernet",
1196 	  WM_T_82540,		WMP_F_COPPER },
1197 
1198 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
1199 	  "Intel i82545EM 1000BASE-T Ethernet",
1200 	  WM_T_82545,		WMP_F_COPPER },
1201 
1202 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
1203 	  "Intel i82545GM 1000BASE-T Ethernet",
1204 	  WM_T_82545_3,		WMP_F_COPPER },
1205 
1206 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
1207 	  "Intel i82545GM 1000BASE-X Ethernet",
1208 	  WM_T_82545_3,		WMP_F_FIBER },
1209 
1210 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
1211 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
1212 	  WM_T_82545_3,		WMP_F_SERDES },
1213 
1214 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
1215 	  "Intel i82546EB 1000BASE-T Ethernet",
1216 	  WM_T_82546,		WMP_F_COPPER },
1217 
1218 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
1219 	  "Intel i82546EB 1000BASE-T Ethernet",
1220 	  WM_T_82546,		WMP_F_COPPER },
1221 
1222 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
1223 	  "Intel i82545EM 1000BASE-X Ethernet",
1224 	  WM_T_82545,		WMP_F_FIBER },
1225 
1226 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
1227 	  "Intel i82546EB 1000BASE-X Ethernet",
1228 	  WM_T_82546,		WMP_F_FIBER },
1229 
1230 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
1231 	  "Intel i82546GB 1000BASE-T Ethernet",
1232 	  WM_T_82546_3,		WMP_F_COPPER },
1233 
1234 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
1235 	  "Intel i82546GB 1000BASE-X Ethernet",
1236 	  WM_T_82546_3,		WMP_F_FIBER },
1237 
1238 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
1239 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
1240 	  WM_T_82546_3,		WMP_F_SERDES },
1241 
1242 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
1243 	  "i82546GB quad-port Gigabit Ethernet",
1244 	  WM_T_82546_3,		WMP_F_COPPER },
1245 
1246 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
1247 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
1248 	  WM_T_82546_3,		WMP_F_COPPER },
1249 
1250 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
1251 	  "Intel PRO/1000MT (82546GB)",
1252 	  WM_T_82546_3,		WMP_F_COPPER },
1253 
1254 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
1255 	  "Intel i82541EI 1000BASE-T Ethernet",
1256 	  WM_T_82541,		WMP_F_COPPER },
1257 
1258 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
1259 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
1260 	  WM_T_82541,		WMP_F_COPPER },
1261 
1262 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
1263 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
1264 	  WM_T_82541,		WMP_F_COPPER },
1265 
1266 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
1267 	  "Intel i82541ER 1000BASE-T Ethernet",
1268 	  WM_T_82541_2,		WMP_F_COPPER },
1269 
1270 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
1271 	  "Intel i82541GI 1000BASE-T Ethernet",
1272 	  WM_T_82541_2,		WMP_F_COPPER },
1273 
1274 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
1275 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
1276 	  WM_T_82541_2,		WMP_F_COPPER },
1277 
1278 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
1279 	  "Intel i82541PI 1000BASE-T Ethernet",
1280 	  WM_T_82541_2,		WMP_F_COPPER },
1281 
1282 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
1283 	  "Intel i82547EI 1000BASE-T Ethernet",
1284 	  WM_T_82547,		WMP_F_COPPER },
1285 
1286 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
1287 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
1288 	  WM_T_82547,		WMP_F_COPPER },
1289 
1290 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
1291 	  "Intel i82547GI 1000BASE-T Ethernet",
1292 	  WM_T_82547_2,		WMP_F_COPPER },
1293 
1294 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
1295 	  "Intel PRO/1000 PT (82571EB)",
1296 	  WM_T_82571,		WMP_F_COPPER },
1297 
1298 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
1299 	  "Intel PRO/1000 PF (82571EB)",
1300 	  WM_T_82571,		WMP_F_FIBER },
1301 
1302 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
1303 	  "Intel PRO/1000 PB (82571EB)",
1304 	  WM_T_82571,		WMP_F_SERDES },
1305 
1306 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
1307 	  "Intel PRO/1000 QT (82571EB)",
1308 	  WM_T_82571,		WMP_F_COPPER },
1309 
1310 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
1311 	  "Intel PRO/1000 PT Quad Port Server Adapter",
1312 	  WM_T_82571,		WMP_F_COPPER },
1313 
1314 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
1315 	  "Intel Gigabit PT Quad Port Server ExpressModule",
1316 	  WM_T_82571,		WMP_F_COPPER },
1317 
1318 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
1319 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
1320 	  WM_T_82571,		WMP_F_SERDES },
1321 
1322 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
1323 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
1324 	  WM_T_82571,		WMP_F_SERDES },
1325 
1326 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
1327 	  "Intel 82571EB Quad 1000baseX Ethernet",
1328 	  WM_T_82571,		WMP_F_FIBER },
1329 
1330 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
1331 	  "Intel i82572EI 1000baseT Ethernet",
1332 	  WM_T_82572,		WMP_F_COPPER },
1333 
1334 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
1335 	  "Intel i82572EI 1000baseX Ethernet",
1336 	  WM_T_82572,		WMP_F_FIBER },
1337 
1338 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
1339 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
1340 	  WM_T_82572,		WMP_F_SERDES },
1341 
1342 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
1343 	  "Intel i82572EI 1000baseT Ethernet",
1344 	  WM_T_82572,		WMP_F_COPPER },
1345 
1346 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
1347 	  "Intel i82573E",
1348 	  WM_T_82573,		WMP_F_COPPER },
1349 
1350 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
1351 	  "Intel i82573E IAMT",
1352 	  WM_T_82573,		WMP_F_COPPER },
1353 
1354 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
1355 	  "Intel i82573L Gigabit Ethernet",
1356 	  WM_T_82573,		WMP_F_COPPER },
1357 
1358 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
1359 	  "Intel i82574L",
1360 	  WM_T_82574,		WMP_F_COPPER },
1361 
1362 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
1363 	  "Intel i82574L",
1364 	  WM_T_82574,		WMP_F_COPPER },
1365 
1366 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
1367 	  "Intel i82583V",
1368 	  WM_T_82583,		WMP_F_COPPER },
1369 
1370 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
1371 	  "i80003 dual 1000baseT Ethernet",
1372 	  WM_T_80003,		WMP_F_COPPER },
1373 
1374 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
1375 	  "i80003 dual 1000baseX Ethernet",
1376 	  WM_T_80003,		WMP_F_COPPER },
1377 
1378 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
1379 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
1380 	  WM_T_80003,		WMP_F_SERDES },
1381 
1382 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
1383 	  "Intel i80003 1000baseT Ethernet",
1384 	  WM_T_80003,		WMP_F_COPPER },
1385 
1386 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
1387 	  "Intel i80003 Gigabit Ethernet (SERDES)",
1388 	  WM_T_80003,		WMP_F_SERDES },
1389 
1390 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
1391 	  "Intel i82801H (M_AMT) LAN Controller",
1392 	  WM_T_ICH8,		WMP_F_COPPER },
1393 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
1394 	  "Intel i82801H (AMT) LAN Controller",
1395 	  WM_T_ICH8,		WMP_F_COPPER },
1396 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
1397 	  "Intel i82801H LAN Controller",
1398 	  WM_T_ICH8,		WMP_F_COPPER },
1399 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
1400 	  "Intel i82801H (IFE) 10/100 LAN Controller",
1401 	  WM_T_ICH8,		WMP_F_COPPER },
1402 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
1403 	  "Intel i82801H (M) LAN Controller",
1404 	  WM_T_ICH8,		WMP_F_COPPER },
1405 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
1406 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
1407 	  WM_T_ICH8,		WMP_F_COPPER },
1408 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
1409 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
1410 	  WM_T_ICH8,		WMP_F_COPPER },
1411 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
1412 	  "82567V-3 LAN Controller",
1413 	  WM_T_ICH8,		WMP_F_COPPER },
1414 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
1415 	  "82801I (AMT) LAN Controller",
1416 	  WM_T_ICH9,		WMP_F_COPPER },
1417 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
1418 	  "82801I 10/100 LAN Controller",
1419 	  WM_T_ICH9,		WMP_F_COPPER },
1420 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
1421 	  "82801I (G) 10/100 LAN Controller",
1422 	  WM_T_ICH9,		WMP_F_COPPER },
1423 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
1424 	  "82801I (GT) 10/100 LAN Controller",
1425 	  WM_T_ICH9,		WMP_F_COPPER },
1426 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
1427 	  "82801I (C) LAN Controller",
1428 	  WM_T_ICH9,		WMP_F_COPPER },
1429 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
1430 	  "82801I mobile LAN Controller",
1431 	  WM_T_ICH9,		WMP_F_COPPER },
1432 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
1433 	  "82801I mobile (V) LAN Controller",
1434 	  WM_T_ICH9,		WMP_F_COPPER },
1435 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
1436 	  "82801I mobile (AMT) LAN Controller",
1437 	  WM_T_ICH9,		WMP_F_COPPER },
1438 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
1439 	  "82567LM-4 LAN Controller",
1440 	  WM_T_ICH9,		WMP_F_COPPER },
1441 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
1442 	  "82567LM-2 LAN Controller",
1443 	  WM_T_ICH10,		WMP_F_COPPER },
1444 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
1445 	  "82567LF-2 LAN Controller",
1446 	  WM_T_ICH10,		WMP_F_COPPER },
1447 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
1448 	  "82567LM-3 LAN Controller",
1449 	  WM_T_ICH10,		WMP_F_COPPER },
1450 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
1451 	  "82567LF-3 LAN Controller",
1452 	  WM_T_ICH10,		WMP_F_COPPER },
1453 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
1454 	  "82567V-2 LAN Controller",
1455 	  WM_T_ICH10,		WMP_F_COPPER },
1456 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
1457 	  "82567V-3? LAN Controller",
1458 	  WM_T_ICH10,		WMP_F_COPPER },
1459 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
1460 	  "HANKSVILLE LAN Controller",
1461 	  WM_T_ICH10,		WMP_F_COPPER },
1462 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
1463 	  "PCH LAN (82577LM) Controller",
1464 	  WM_T_PCH,		WMP_F_COPPER },
1465 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
1466 	  "PCH LAN (82577LC) Controller",
1467 	  WM_T_PCH,		WMP_F_COPPER },
1468 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
1469 	  "PCH LAN (82578DM) Controller",
1470 	  WM_T_PCH,		WMP_F_COPPER },
1471 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
1472 	  "PCH LAN (82578DC) Controller",
1473 	  WM_T_PCH,		WMP_F_COPPER },
1474 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
1475 	  "PCH2 LAN (82579LM) Controller",
1476 	  WM_T_PCH2,		WMP_F_COPPER },
1477 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
1478 	  "PCH2 LAN (82579V) Controller",
1479 	  WM_T_PCH2,		WMP_F_COPPER },
1480 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
1481 	  "82575EB dual-1000baseT Ethernet",
1482 	  WM_T_82575,		WMP_F_COPPER },
1483 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
1484 	  "82575EB dual-1000baseX Ethernet (SERDES)",
1485 	  WM_T_82575,		WMP_F_SERDES },
1486 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
1487 	  "82575GB quad-1000baseT Ethernet",
1488 	  WM_T_82575,		WMP_F_COPPER },
1489 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
1490 	  "82575GB quad-1000baseT Ethernet (PM)",
1491 	  WM_T_82575,		WMP_F_COPPER },
1492 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
1493 	  "82576 1000BaseT Ethernet",
1494 	  WM_T_82576,		WMP_F_COPPER },
1495 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
1496 	  "82576 1000BaseX Ethernet",
1497 	  WM_T_82576,		WMP_F_FIBER },
1498 
1499 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
1500 	  "82576 gigabit Ethernet (SERDES)",
1501 	  WM_T_82576,		WMP_F_SERDES },
1502 
1503 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
1504 	  "82576 quad-1000BaseT Ethernet",
1505 	  WM_T_82576,		WMP_F_COPPER },
1506 
1507 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
1508 	  "82576 Gigabit ET2 Quad Port Server Adapter",
1509 	  WM_T_82576,		WMP_F_COPPER },
1510 
1511 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
1512 	  "82576 gigabit Ethernet",
1513 	  WM_T_82576,		WMP_F_COPPER },
1514 
1515 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
1516 	  "82576 gigabit Ethernet (SERDES)",
1517 	  WM_T_82576,		WMP_F_SERDES },
1518 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
1519 	  "82576 quad-gigabit Ethernet (SERDES)",
1520 	  WM_T_82576,		WMP_F_SERDES },
1521 
1522 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
1523 	  "82580 1000BaseT Ethernet",
1524 	  WM_T_82580,		WMP_F_COPPER },
1525 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
1526 	  "82580 1000BaseX Ethernet",
1527 	  WM_T_82580,		WMP_F_FIBER },
1528 
1529 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
1530 	  "82580 1000BaseT Ethernet (SERDES)",
1531 	  WM_T_82580,		WMP_F_SERDES },
1532 
1533 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
1534 	  "82580 gigabit Ethernet (SGMII)",
1535 	  WM_T_82580,		WMP_F_COPPER },
1536 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
1537 	  "82580 dual-1000BaseT Ethernet",
1538 	  WM_T_82580,		WMP_F_COPPER },
1539 
1540 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1541 	  "82580 quad-1000BaseX Ethernet",
1542 	  WM_T_82580,		WMP_F_FIBER },
1543 
1544 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
1545 	  "DH89XXCC Gigabit Ethernet (SGMII)",
1546 	  WM_T_82580,		WMP_F_COPPER },
1547 
1548 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
1549 	  "DH89XXCC Gigabit Ethernet (SERDES)",
1550 	  WM_T_82580,		WMP_F_SERDES },
1551 
1552 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
1553 	  "DH89XXCC 1000BASE-KX Ethernet",
1554 	  WM_T_82580,		WMP_F_SERDES },
1555 
1556 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
1557 	  "DH89XXCC Gigabit Ethernet (SFP)",
1558 	  WM_T_82580,		WMP_F_SERDES },
1559 
1560 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
1561 	  "I350 Gigabit Network Connection",
1562 	  WM_T_I350,		WMP_F_COPPER },
1563 
1564 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
1565 	  "I350 Gigabit Fiber Network Connection",
1566 	  WM_T_I350,		WMP_F_FIBER },
1567 
1568 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
1569 	  "I350 Gigabit Backplane Connection",
1570 	  WM_T_I350,		WMP_F_SERDES },
1571 
1572 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
1573 	  "I350 Quad Port Gigabit Ethernet",
1574 	  WM_T_I350,		WMP_F_SERDES },
1575 
1576 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
1577 	  "I350 Gigabit Connection",
1578 	  WM_T_I350,		WMP_F_COPPER },
1579 
1580 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
1581 	  "I354 Gigabit Ethernet (KX)",
1582 	  WM_T_I354,		WMP_F_SERDES },
1583 
1584 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
1585 	  "I354 Gigabit Ethernet (SGMII)",
1586 	  WM_T_I354,		WMP_F_COPPER },
1587 
1588 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
1589 	  "I354 Gigabit Ethernet (2.5G)",
1590 	  WM_T_I354,		WMP_F_COPPER },
1591 
1592 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
1593 	  "I210-T1 Ethernet Server Adapter",
1594 	  WM_T_I210,		WMP_F_COPPER },
1595 
1596 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
1597 	  "I210 Ethernet (Copper OEM)",
1598 	  WM_T_I210,		WMP_F_COPPER },
1599 
1600 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
1601 	  "I210 Ethernet (Copper IT)",
1602 	  WM_T_I210,		WMP_F_COPPER },
1603 
1604 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
1605 	  "I210 Ethernet (Copper, FLASH less)",
1606 	  WM_T_I210,		WMP_F_COPPER },
1607 
1608 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
1609 	  "I210 Gigabit Ethernet (Fiber)",
1610 	  WM_T_I210,		WMP_F_FIBER },
1611 
1612 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
1613 	  "I210 Gigabit Ethernet (SERDES)",
1614 	  WM_T_I210,		WMP_F_SERDES },
1615 
1616 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
1617 	  "I210 Gigabit Ethernet (SERDES, FLASH less)",
1618 	  WM_T_I210,		WMP_F_SERDES },
1619 
1620 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
1621 	  "I210 Gigabit Ethernet (SGMII)",
1622 	  WM_T_I210,		WMP_F_COPPER },
1623 
1624 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII_WOF,
1625 	  "I210 Gigabit Ethernet (SGMII, FLASH less)",
1626 	  WM_T_I210,		WMP_F_COPPER },
1627 
1628 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
1629 	  "I211 Ethernet (COPPER)",
1630 	  WM_T_I211,		WMP_F_COPPER },
1631 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
1632 	  "I217 V Ethernet Connection",
1633 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1634 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
1635 	  "I217 LM Ethernet Connection",
1636 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1637 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
1638 	  "I218 V Ethernet Connection",
1639 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1640 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
1641 	  "I218 V Ethernet Connection",
1642 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1643 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
1644 	  "I218 V Ethernet Connection",
1645 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1646 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
1647 	  "I218 LM Ethernet Connection",
1648 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1649 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
1650 	  "I218 LM Ethernet Connection",
1651 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1652 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
1653 	  "I218 LM Ethernet Connection",
1654 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1655 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
1656 	  "I219 LM Ethernet Connection",
1657 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1658 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
1659 	  "I219 LM (2) Ethernet Connection",
1660 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1661 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
1662 	  "I219 LM (3) Ethernet Connection",
1663 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1664 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
1665 	  "I219 LM (4) Ethernet Connection",
1666 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1667 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
1668 	  "I219 LM (5) Ethernet Connection",
1669 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1670 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM6,
1671 	  "I219 LM (6) Ethernet Connection",
1672 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1673 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM7,
1674 	  "I219 LM (7) Ethernet Connection",
1675 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1676 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM8,
1677 	  "I219 LM (8) Ethernet Connection",
1678 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1679 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM9,
1680 	  "I219 LM (9) Ethernet Connection",
1681 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1682 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM10,
1683 	  "I219 LM (10) Ethernet Connection",
1684 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1685 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM11,
1686 	  "I219 LM (11) Ethernet Connection",
1687 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1688 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM12,
1689 	  "I219 LM (12) Ethernet Connection",
1690 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1691 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM13,
1692 	  "I219 LM (13) Ethernet Connection",
1693 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1694 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM14,
1695 	  "I219 LM (14) Ethernet Connection",
1696 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1697 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM15,
1698 	  "I219 LM (15) Ethernet Connection",
1699 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1700 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM16,
1701 	  "I219 LM (16) Ethernet Connection",
1702 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1703 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM17,
1704 	  "I219 LM (17) Ethernet Connection",
1705 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1706 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM18,
1707 	  "I219 LM (18) Ethernet Connection",
1708 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1709 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM19,
1710 	  "I219 LM (19) Ethernet Connection",
1711 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1712 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
1713 	  "I219 V Ethernet Connection",
1714 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1715 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
1716 	  "I219 V (2) Ethernet Connection",
1717 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1718 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
1719 	  "I219 V (4) Ethernet Connection",
1720 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1721 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
1722 	  "I219 V (5) Ethernet Connection",
1723 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1724 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V6,
1725 	  "I219 V (6) Ethernet Connection",
1726 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1727 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V7,
1728 	  "I219 V (7) Ethernet Connection",
1729 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1730 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V8,
1731 	  "I219 V (8) Ethernet Connection",
1732 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1733 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V9,
1734 	  "I219 V (9) Ethernet Connection",
1735 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1736 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V10,
1737 	  "I219 V (10) Ethernet Connection",
1738 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1739 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V11,
1740 	  "I219 V (11) Ethernet Connection",
1741 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1742 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V12,
1743 	  "I219 V (12) Ethernet Connection",
1744 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1745 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V13,
1746 	  "I219 V (13) Ethernet Connection",
1747 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1748 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V14,
1749 	  "I219 V (14) Ethernet Connection",
1750 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1751 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V15,
1752 	  "I219 V (15) Ethernet Connection",
1753 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1754 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V16,
1755 	  "I219 V (16) Ethernet Connection",
1756 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1757 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V17,
1758 	  "I219 V (17) Ethernet Connection",
1759 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1760 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V18,
1761 	  "I219 V (18) Ethernet Connection",
1762 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1763 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V19,
1764 	  "I219 V (19) Ethernet Connection",
1765 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1766 	{ 0,			0,
1767 	  NULL,
1768 	  0,			0 },
1769 };
1770 
1771 /*
1772  * Register read/write functions.
1773  * Other than CSR_{READ|WRITE}().
1774  */
1775 
1776 #if 0 /* Not currently used */
1777 static inline uint32_t
1778 wm_io_read(struct wm_softc *sc, int reg)
1779 {
1780 
1781 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1782 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1783 }
1784 #endif
1785 
1786 static inline void
1787 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1788 {
1789 
1790 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1791 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1792 }
1793 
1794 static inline void
1795 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1796     uint32_t data)
1797 {
1798 	uint32_t regval;
1799 	int i;
1800 
1801 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1802 
1803 	CSR_WRITE(sc, reg, regval);
1804 
1805 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1806 		delay(5);
1807 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1808 			break;
1809 	}
1810 	if (i == SCTL_CTL_POLL_TIMEOUT) {
1811 		aprint_error("%s: WARNING:"
1812 		    " i82575 reg 0x%08x setup did not indicate ready\n",
1813 		    device_xname(sc->sc_dev), reg);
1814 	}
1815 }
1816 
1817 static inline void
1818 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1819 {
1820 	wa->wa_low = htole32(BUS_ADDR_LO32(v));
1821 	wa->wa_high = htole32(BUS_ADDR_HI32(v));
1822 }
1823 
1824 /*
1825  * Descriptor sync/init functions.
1826  */
1827 static inline void
1828 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
1829 {
1830 	struct wm_softc *sc = txq->txq_sc;
1831 
1832 	/* If it will wrap around, sync to the end of the ring. */
1833 	if ((start + num) > WM_NTXDESC(txq)) {
1834 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1835 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
1836 		    (WM_NTXDESC(txq) - start), ops);
1837 		num -= (WM_NTXDESC(txq) - start);
1838 		start = 0;
1839 	}
1840 
1841 	/* Now sync whatever is left. */
1842 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1843 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
1844 }
1845 
1846 static inline void
1847 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
1848 {
1849 	struct wm_softc *sc = rxq->rxq_sc;
1850 
1851 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
1852 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
1853 }
1854 
1855 static inline void
1856 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
1857 {
1858 	struct wm_softc *sc = rxq->rxq_sc;
1859 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
1860 	struct mbuf *m = rxs->rxs_mbuf;
1861 
1862 	/*
1863 	 * Note: We scoot the packet forward 2 bytes in the buffer
1864 	 * so that the payload after the Ethernet header is aligned
1865 	 * to a 4-byte boundary.
1866 
1867 	 * XXX BRAINDAMAGE ALERT!
1868 	 * The stupid chip uses the same size for every buffer, which
1869 	 * is set in the Receive Control register.  We are using the 2K
1870 	 * size option, but what we REALLY want is (2K - 2)!  For this
1871 	 * reason, we can't "scoot" packets longer than the standard
1872 	 * Ethernet MTU.  On strict-alignment platforms, if the total
1873 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
1874 	 * the upper layer copy the headers.
1875 	 */
1876 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
1877 
1878 	if (sc->sc_type == WM_T_82574) {
1879 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
1880 		rxd->erx_data.erxd_addr =
1881 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1882 		rxd->erx_data.erxd_dd = 0;
1883 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
1884 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
1885 
1886 		rxd->nqrx_data.nrxd_paddr =
1887 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1888 		/* Currently, split header is not supported. */
1889 		rxd->nqrx_data.nrxd_haddr = 0;
1890 	} else {
1891 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
1892 
1893 		wm_set_dma_addr(&rxd->wrx_addr,
1894 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1895 		rxd->wrx_len = 0;
1896 		rxd->wrx_cksum = 0;
1897 		rxd->wrx_status = 0;
1898 		rxd->wrx_errors = 0;
1899 		rxd->wrx_special = 0;
1900 	}
1901 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1902 
1903 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
1904 }
1905 
1906 /*
1907  * Device driver interface functions and commonly used functions.
1908  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
1909  */
1910 
1911 /* Lookup supported device table */
1912 static const struct wm_product *
1913 wm_lookup(const struct pci_attach_args *pa)
1914 {
1915 	const struct wm_product *wmp;
1916 
1917 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1918 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1919 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1920 			return wmp;
1921 	}
1922 	return NULL;
1923 }
1924 
1925 /* The match function (ca_match) */
1926 static int
1927 wm_match(device_t parent, cfdata_t cf, void *aux)
1928 {
1929 	struct pci_attach_args *pa = aux;
1930 
1931 	if (wm_lookup(pa) != NULL)
1932 		return 1;
1933 
1934 	return 0;
1935 }
1936 
1937 /* The attach function (ca_attach) */
1938 static void
1939 wm_attach(device_t parent, device_t self, void *aux)
1940 {
1941 	struct wm_softc *sc = device_private(self);
1942 	struct pci_attach_args *pa = aux;
1943 	prop_dictionary_t dict;
1944 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1945 	pci_chipset_tag_t pc = pa->pa_pc;
1946 	int counts[PCI_INTR_TYPE_SIZE];
1947 	pci_intr_type_t max_type;
1948 	const char *eetype, *xname;
1949 	bus_space_tag_t memt;
1950 	bus_space_handle_t memh;
1951 	bus_size_t memsize;
1952 	int memh_valid;
1953 	int i, error;
1954 	const struct wm_product *wmp;
1955 	prop_data_t ea;
1956 	prop_number_t pn;
1957 	uint8_t enaddr[ETHER_ADDR_LEN];
1958 	char buf[256];
1959 	char wqname[MAXCOMLEN];
1960 	uint16_t cfg1, cfg2, swdpin, nvmword;
1961 	pcireg_t preg, memtype;
1962 	uint16_t eeprom_data, apme_mask;
1963 	bool force_clear_smbi;
1964 	uint32_t link_mode;
1965 	uint32_t reg;
1966 
1967 #if defined(WM_DEBUG) && defined(WM_DEBUG_DEFAULT)
1968 	sc->sc_debug = WM_DEBUG_DEFAULT;
1969 #endif
1970 	sc->sc_dev = self;
1971 	callout_init(&sc->sc_tick_ch, CALLOUT_MPSAFE);
1972 	callout_setfunc(&sc->sc_tick_ch, wm_tick, sc);
1973 	sc->sc_core_stopping = false;
1974 
1975 	wmp = wm_lookup(pa);
1976 #ifdef DIAGNOSTIC
1977 	if (wmp == NULL) {
1978 		printf("\n");
1979 		panic("wm_attach: impossible");
1980 	}
1981 #endif
1982 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
1983 
1984 	sc->sc_pc = pa->pa_pc;
1985 	sc->sc_pcitag = pa->pa_tag;
1986 
1987 	if (pci_dma64_available(pa)) {
1988 		aprint_verbose(", 64-bit DMA");
1989 		sc->sc_dmat = pa->pa_dmat64;
1990 	} else {
1991 		aprint_verbose(", 32-bit DMA");
1992 		sc->sc_dmat = pa->pa_dmat;
1993 	}
1994 
1995 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
1996 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
1997 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
1998 
1999 	sc->sc_type = wmp->wmp_type;
2000 
2001 	/* Set default function pointers */
2002 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
2003 	sc->phy.release = sc->nvm.release = wm_put_null;
2004 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
2005 
2006 	if (sc->sc_type < WM_T_82543) {
2007 		if (sc->sc_rev < 2) {
2008 			aprint_error_dev(sc->sc_dev,
2009 			    "i82542 must be at least rev. 2\n");
2010 			return;
2011 		}
2012 		if (sc->sc_rev < 3)
2013 			sc->sc_type = WM_T_82542_2_0;
2014 	}
2015 
2016 	/*
2017 	 * Disable MSI for Errata:
2018 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
2019 	 *
2020 	 *  82544: Errata 25
2021 	 *  82540: Errata  6 (easy to reproduce device timeout)
2022 	 *  82545: Errata  4 (easy to reproduce device timeout)
2023 	 *  82546: Errata 26 (easy to reproduce device timeout)
2024 	 *  82541: Errata  7 (easy to reproduce device timeout)
2025 	 *
2026 	 * "Byte Enables 2 and 3 are not set on MSI writes"
2027 	 *
2028 	 *  82571 & 82572: Errata 63
2029 	 */
2030 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
2031 	    || (sc->sc_type == WM_T_82572))
2032 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
2033 
2034 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
2035 	    || (sc->sc_type == WM_T_82580)
2036 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
2037 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
2038 		sc->sc_flags |= WM_F_NEWQUEUE;
2039 
2040 	/* Set device properties (mactype) */
2041 	dict = device_properties(sc->sc_dev);
2042 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
2043 
2044 	/*
2045 	 * Map the device.  All devices support memory-mapped acccess,
2046 	 * and it is really required for normal operation.
2047 	 */
2048 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
2049 	switch (memtype) {
2050 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
2051 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
2052 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
2053 			memtype, 0, &memt, &memh, NULL, &memsize) == 0);
2054 		break;
2055 	default:
2056 		memh_valid = 0;
2057 		break;
2058 	}
2059 
2060 	if (memh_valid) {
2061 		sc->sc_st = memt;
2062 		sc->sc_sh = memh;
2063 		sc->sc_ss = memsize;
2064 	} else {
2065 		aprint_error_dev(sc->sc_dev,
2066 		    "unable to map device registers\n");
2067 		return;
2068 	}
2069 
2070 	/*
2071 	 * In addition, i82544 and later support I/O mapped indirect
2072 	 * register access.  It is not desirable (nor supported in
2073 	 * this driver) to use it for normal operation, though it is
2074 	 * required to work around bugs in some chip versions.
2075 	 */
2076 	switch (sc->sc_type) {
2077 	case WM_T_82544:
2078 	case WM_T_82541:
2079 	case WM_T_82541_2:
2080 	case WM_T_82547:
2081 	case WM_T_82547_2:
2082 		/* First we have to find the I/O BAR. */
2083 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
2084 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
2085 			if (memtype == PCI_MAPREG_TYPE_IO)
2086 				break;
2087 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
2088 			    PCI_MAPREG_MEM_TYPE_64BIT)
2089 				i += 4;	/* skip high bits, too */
2090 		}
2091 		if (i < PCI_MAPREG_END) {
2092 			/*
2093 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
2094 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
2095 			 * It's no problem because newer chips has no this
2096 			 * bug.
2097 			 *
2098 			 * The i8254x doesn't apparently respond when the
2099 			 * I/O BAR is 0, which looks somewhat like it's not
2100 			 * been configured.
2101 			 */
2102 			preg = pci_conf_read(pc, pa->pa_tag, i);
2103 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
2104 				aprint_error_dev(sc->sc_dev,
2105 				    "WARNING: I/O BAR at zero.\n");
2106 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
2107 			    0, &sc->sc_iot, &sc->sc_ioh, NULL, &sc->sc_ios)
2108 			    == 0) {
2109 				sc->sc_flags |= WM_F_IOH_VALID;
2110 			} else
2111 				aprint_error_dev(sc->sc_dev,
2112 				    "WARNING: unable to map I/O space\n");
2113 		}
2114 		break;
2115 	default:
2116 		break;
2117 	}
2118 
2119 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
2120 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
2121 	preg |= PCI_COMMAND_MASTER_ENABLE;
2122 	if (sc->sc_type < WM_T_82542_2_1)
2123 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
2124 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
2125 
2126 	/* Power up chip */
2127 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
2128 	    && error != EOPNOTSUPP) {
2129 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
2130 		return;
2131 	}
2132 
2133 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
2134 	/*
2135 	 *  Don't use MSI-X if we can use only one queue to save interrupt
2136 	 * resource.
2137 	 */
2138 	if (sc->sc_nqueues > 1) {
2139 		max_type = PCI_INTR_TYPE_MSIX;
2140 		/*
2141 		 *  82583 has a MSI-X capability in the PCI configuration space
2142 		 * but it doesn't support it. At least the document doesn't
2143 		 * say anything about MSI-X.
2144 		 */
2145 		counts[PCI_INTR_TYPE_MSIX]
2146 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
2147 	} else {
2148 		max_type = PCI_INTR_TYPE_MSI;
2149 		counts[PCI_INTR_TYPE_MSIX] = 0;
2150 	}
2151 
2152 	/* Allocation settings */
2153 	counts[PCI_INTR_TYPE_MSI] = 1;
2154 	counts[PCI_INTR_TYPE_INTX] = 1;
2155 	/* overridden by disable flags */
2156 	if (wm_disable_msi != 0) {
2157 		counts[PCI_INTR_TYPE_MSI] = 0;
2158 		if (wm_disable_msix != 0) {
2159 			max_type = PCI_INTR_TYPE_INTX;
2160 			counts[PCI_INTR_TYPE_MSIX] = 0;
2161 		}
2162 	} else if (wm_disable_msix != 0) {
2163 		max_type = PCI_INTR_TYPE_MSI;
2164 		counts[PCI_INTR_TYPE_MSIX] = 0;
2165 	}
2166 
2167 alloc_retry:
2168 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
2169 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
2170 		return;
2171 	}
2172 
2173 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
2174 		error = wm_setup_msix(sc);
2175 		if (error) {
2176 			pci_intr_release(pc, sc->sc_intrs,
2177 			    counts[PCI_INTR_TYPE_MSIX]);
2178 
2179 			/* Setup for MSI: Disable MSI-X */
2180 			max_type = PCI_INTR_TYPE_MSI;
2181 			counts[PCI_INTR_TYPE_MSI] = 1;
2182 			counts[PCI_INTR_TYPE_INTX] = 1;
2183 			goto alloc_retry;
2184 		}
2185 	} else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
2186 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
2187 		error = wm_setup_legacy(sc);
2188 		if (error) {
2189 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
2190 			    counts[PCI_INTR_TYPE_MSI]);
2191 
2192 			/* The next try is for INTx: Disable MSI */
2193 			max_type = PCI_INTR_TYPE_INTX;
2194 			counts[PCI_INTR_TYPE_INTX] = 1;
2195 			goto alloc_retry;
2196 		}
2197 	} else {
2198 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
2199 		error = wm_setup_legacy(sc);
2200 		if (error) {
2201 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
2202 			    counts[PCI_INTR_TYPE_INTX]);
2203 			return;
2204 		}
2205 	}
2206 
2207 	snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(sc->sc_dev));
2208 	error = workqueue_create(&sc->sc_queue_wq, wqname,
2209 	    wm_handle_queue_work, sc, WM_WORKQUEUE_PRI, IPL_NET,
2210 	    WQ_PERCPU | WQ_MPSAFE);
2211 	if (error) {
2212 		aprint_error_dev(sc->sc_dev,
2213 		    "unable to create TxRx workqueue\n");
2214 		goto out;
2215 	}
2216 
2217 	snprintf(wqname, sizeof(wqname), "%sReset", device_xname(sc->sc_dev));
2218 	error = workqueue_create(&sc->sc_reset_wq, wqname,
2219 	    wm_handle_reset_work, sc, WM_WORKQUEUE_PRI, IPL_SOFTCLOCK,
2220 	    WQ_MPSAFE);
2221 	if (error) {
2222 		workqueue_destroy(sc->sc_queue_wq);
2223 		aprint_error_dev(sc->sc_dev,
2224 		    "unable to create reset workqueue\n");
2225 		goto out;
2226 	}
2227 
2228 	/*
2229 	 * Check the function ID (unit number of the chip).
2230 	 */
2231 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
2232 	    || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
2233 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
2234 	    || (sc->sc_type == WM_T_82580)
2235 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
2236 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
2237 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
2238 	else
2239 		sc->sc_funcid = 0;
2240 
2241 	/*
2242 	 * Determine a few things about the bus we're connected to.
2243 	 */
2244 	if (sc->sc_type < WM_T_82543) {
2245 		/* We don't really know the bus characteristics here. */
2246 		sc->sc_bus_speed = 33;
2247 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
2248 		/*
2249 		 * CSA (Communication Streaming Architecture) is about as fast
2250 		 * a 32-bit 66MHz PCI Bus.
2251 		 */
2252 		sc->sc_flags |= WM_F_CSA;
2253 		sc->sc_bus_speed = 66;
2254 		aprint_verbose_dev(sc->sc_dev,
2255 		    "Communication Streaming Architecture\n");
2256 		if (sc->sc_type == WM_T_82547) {
2257 			callout_init(&sc->sc_txfifo_ch, CALLOUT_MPSAFE);
2258 			callout_setfunc(&sc->sc_txfifo_ch,
2259 			    wm_82547_txfifo_stall, sc);
2260 			aprint_verbose_dev(sc->sc_dev,
2261 			    "using 82547 Tx FIFO stall work-around\n");
2262 		}
2263 	} else if (sc->sc_type >= WM_T_82571) {
2264 		sc->sc_flags |= WM_F_PCIE;
2265 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
2266 		    && (sc->sc_type != WM_T_ICH10)
2267 		    && (sc->sc_type != WM_T_PCH)
2268 		    && (sc->sc_type != WM_T_PCH2)
2269 		    && (sc->sc_type != WM_T_PCH_LPT)
2270 		    && (sc->sc_type != WM_T_PCH_SPT)
2271 		    && (sc->sc_type != WM_T_PCH_CNP)) {
2272 			/* ICH* and PCH* have no PCIe capability registers */
2273 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
2274 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
2275 				NULL) == 0)
2276 				aprint_error_dev(sc->sc_dev,
2277 				    "unable to find PCIe capability\n");
2278 		}
2279 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
2280 	} else {
2281 		reg = CSR_READ(sc, WMREG_STATUS);
2282 		if (reg & STATUS_BUS64)
2283 			sc->sc_flags |= WM_F_BUS64;
2284 		if ((reg & STATUS_PCIX_MODE) != 0) {
2285 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
2286 
2287 			sc->sc_flags |= WM_F_PCIX;
2288 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
2289 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
2290 				aprint_error_dev(sc->sc_dev,
2291 				    "unable to find PCIX capability\n");
2292 			else if (sc->sc_type != WM_T_82545_3 &&
2293 			    sc->sc_type != WM_T_82546_3) {
2294 				/*
2295 				 * Work around a problem caused by the BIOS
2296 				 * setting the max memory read byte count
2297 				 * incorrectly.
2298 				 */
2299 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
2300 				    sc->sc_pcixe_capoff + PCIX_CMD);
2301 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
2302 				    sc->sc_pcixe_capoff + PCIX_STATUS);
2303 
2304 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
2305 				    PCIX_CMD_BYTECNT_SHIFT;
2306 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
2307 				    PCIX_STATUS_MAXB_SHIFT;
2308 				if (bytecnt > maxb) {
2309 					aprint_verbose_dev(sc->sc_dev,
2310 					    "resetting PCI-X MMRBC: %d -> %d\n",
2311 					    512 << bytecnt, 512 << maxb);
2312 					pcix_cmd = (pcix_cmd &
2313 					    ~PCIX_CMD_BYTECNT_MASK) |
2314 					    (maxb << PCIX_CMD_BYTECNT_SHIFT);
2315 					pci_conf_write(pa->pa_pc, pa->pa_tag,
2316 					    sc->sc_pcixe_capoff + PCIX_CMD,
2317 					    pcix_cmd);
2318 				}
2319 			}
2320 		}
2321 		/*
2322 		 * The quad port adapter is special; it has a PCIX-PCIX
2323 		 * bridge on the board, and can run the secondary bus at
2324 		 * a higher speed.
2325 		 */
2326 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
2327 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
2328 								      : 66;
2329 		} else if (sc->sc_flags & WM_F_PCIX) {
2330 			switch (reg & STATUS_PCIXSPD_MASK) {
2331 			case STATUS_PCIXSPD_50_66:
2332 				sc->sc_bus_speed = 66;
2333 				break;
2334 			case STATUS_PCIXSPD_66_100:
2335 				sc->sc_bus_speed = 100;
2336 				break;
2337 			case STATUS_PCIXSPD_100_133:
2338 				sc->sc_bus_speed = 133;
2339 				break;
2340 			default:
2341 				aprint_error_dev(sc->sc_dev,
2342 				    "unknown PCIXSPD %d; assuming 66MHz\n",
2343 				    reg & STATUS_PCIXSPD_MASK);
2344 				sc->sc_bus_speed = 66;
2345 				break;
2346 			}
2347 		} else
2348 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
2349 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
2350 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
2351 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
2352 	}
2353 
2354 	/* clear interesting stat counters */
2355 	CSR_READ(sc, WMREG_COLC);
2356 	CSR_READ(sc, WMREG_RXERRC);
2357 
2358 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
2359 	    || (sc->sc_type >= WM_T_ICH8))
2360 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2361 	if (sc->sc_type >= WM_T_ICH8)
2362 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2363 
2364 	/* Set PHY, NVM mutex related stuff */
2365 	switch (sc->sc_type) {
2366 	case WM_T_82542_2_0:
2367 	case WM_T_82542_2_1:
2368 	case WM_T_82543:
2369 	case WM_T_82544:
2370 		/* Microwire */
2371 		sc->nvm.read = wm_nvm_read_uwire;
2372 		sc->sc_nvm_wordsize = 64;
2373 		sc->sc_nvm_addrbits = 6;
2374 		break;
2375 	case WM_T_82540:
2376 	case WM_T_82545:
2377 	case WM_T_82545_3:
2378 	case WM_T_82546:
2379 	case WM_T_82546_3:
2380 		/* Microwire */
2381 		sc->nvm.read = wm_nvm_read_uwire;
2382 		reg = CSR_READ(sc, WMREG_EECD);
2383 		if (reg & EECD_EE_SIZE) {
2384 			sc->sc_nvm_wordsize = 256;
2385 			sc->sc_nvm_addrbits = 8;
2386 		} else {
2387 			sc->sc_nvm_wordsize = 64;
2388 			sc->sc_nvm_addrbits = 6;
2389 		}
2390 		sc->sc_flags |= WM_F_LOCK_EECD;
2391 		sc->nvm.acquire = wm_get_eecd;
2392 		sc->nvm.release = wm_put_eecd;
2393 		break;
2394 	case WM_T_82541:
2395 	case WM_T_82541_2:
2396 	case WM_T_82547:
2397 	case WM_T_82547_2:
2398 		reg = CSR_READ(sc, WMREG_EECD);
2399 		/*
2400 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
2401 		 * on 8254[17], so set flags and functios before calling it.
2402 		 */
2403 		sc->sc_flags |= WM_F_LOCK_EECD;
2404 		sc->nvm.acquire = wm_get_eecd;
2405 		sc->nvm.release = wm_put_eecd;
2406 		if (reg & EECD_EE_TYPE) {
2407 			/* SPI */
2408 			sc->nvm.read = wm_nvm_read_spi;
2409 			sc->sc_flags |= WM_F_EEPROM_SPI;
2410 			wm_nvm_set_addrbits_size_eecd(sc);
2411 		} else {
2412 			/* Microwire */
2413 			sc->nvm.read = wm_nvm_read_uwire;
2414 			if ((reg & EECD_EE_ABITS) != 0) {
2415 				sc->sc_nvm_wordsize = 256;
2416 				sc->sc_nvm_addrbits = 8;
2417 			} else {
2418 				sc->sc_nvm_wordsize = 64;
2419 				sc->sc_nvm_addrbits = 6;
2420 			}
2421 		}
2422 		break;
2423 	case WM_T_82571:
2424 	case WM_T_82572:
2425 		/* SPI */
2426 		sc->nvm.read = wm_nvm_read_eerd;
2427 		/* Not use WM_F_LOCK_EECD because we use EERD */
2428 		sc->sc_flags |= WM_F_EEPROM_SPI;
2429 		wm_nvm_set_addrbits_size_eecd(sc);
2430 		sc->phy.acquire = wm_get_swsm_semaphore;
2431 		sc->phy.release = wm_put_swsm_semaphore;
2432 		sc->nvm.acquire = wm_get_nvm_82571;
2433 		sc->nvm.release = wm_put_nvm_82571;
2434 		break;
2435 	case WM_T_82573:
2436 	case WM_T_82574:
2437 	case WM_T_82583:
2438 		sc->nvm.read = wm_nvm_read_eerd;
2439 		/* Not use WM_F_LOCK_EECD because we use EERD */
2440 		if (sc->sc_type == WM_T_82573) {
2441 			sc->phy.acquire = wm_get_swsm_semaphore;
2442 			sc->phy.release = wm_put_swsm_semaphore;
2443 			sc->nvm.acquire = wm_get_nvm_82571;
2444 			sc->nvm.release = wm_put_nvm_82571;
2445 		} else {
2446 			/* Both PHY and NVM use the same semaphore. */
2447 			sc->phy.acquire = sc->nvm.acquire
2448 			    = wm_get_swfwhw_semaphore;
2449 			sc->phy.release = sc->nvm.release
2450 			    = wm_put_swfwhw_semaphore;
2451 		}
2452 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
2453 			sc->sc_flags |= WM_F_EEPROM_FLASH;
2454 			sc->sc_nvm_wordsize = 2048;
2455 		} else {
2456 			/* SPI */
2457 			sc->sc_flags |= WM_F_EEPROM_SPI;
2458 			wm_nvm_set_addrbits_size_eecd(sc);
2459 		}
2460 		break;
2461 	case WM_T_82575:
2462 	case WM_T_82576:
2463 	case WM_T_82580:
2464 	case WM_T_I350:
2465 	case WM_T_I354:
2466 	case WM_T_80003:
2467 		/* SPI */
2468 		sc->sc_flags |= WM_F_EEPROM_SPI;
2469 		wm_nvm_set_addrbits_size_eecd(sc);
2470 		if ((sc->sc_type == WM_T_80003)
2471 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
2472 			sc->nvm.read = wm_nvm_read_eerd;
2473 			/* Don't use WM_F_LOCK_EECD because we use EERD */
2474 		} else {
2475 			sc->nvm.read = wm_nvm_read_spi;
2476 			sc->sc_flags |= WM_F_LOCK_EECD;
2477 		}
2478 		sc->phy.acquire = wm_get_phy_82575;
2479 		sc->phy.release = wm_put_phy_82575;
2480 		sc->nvm.acquire = wm_get_nvm_80003;
2481 		sc->nvm.release = wm_put_nvm_80003;
2482 		break;
2483 	case WM_T_ICH8:
2484 	case WM_T_ICH9:
2485 	case WM_T_ICH10:
2486 	case WM_T_PCH:
2487 	case WM_T_PCH2:
2488 	case WM_T_PCH_LPT:
2489 		sc->nvm.read = wm_nvm_read_ich8;
2490 		/* FLASH */
2491 		sc->sc_flags |= WM_F_EEPROM_FLASH;
2492 		sc->sc_nvm_wordsize = 2048;
2493 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
2494 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
2495 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
2496 			aprint_error_dev(sc->sc_dev,
2497 			    "can't map FLASH registers\n");
2498 			goto out;
2499 		}
2500 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
2501 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
2502 		    ICH_FLASH_SECTOR_SIZE;
2503 		sc->sc_ich8_flash_bank_size =
2504 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
2505 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
2506 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
2507 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
2508 		sc->sc_flashreg_offset = 0;
2509 		sc->phy.acquire = wm_get_swflag_ich8lan;
2510 		sc->phy.release = wm_put_swflag_ich8lan;
2511 		sc->nvm.acquire = wm_get_nvm_ich8lan;
2512 		sc->nvm.release = wm_put_nvm_ich8lan;
2513 		break;
2514 	case WM_T_PCH_SPT:
2515 	case WM_T_PCH_CNP:
2516 		sc->nvm.read = wm_nvm_read_spt;
2517 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
2518 		sc->sc_flags |= WM_F_EEPROM_FLASH;
2519 		sc->sc_flasht = sc->sc_st;
2520 		sc->sc_flashh = sc->sc_sh;
2521 		sc->sc_ich8_flash_base = 0;
2522 		sc->sc_nvm_wordsize =
2523 		    (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
2524 		    * NVM_SIZE_MULTIPLIER;
2525 		/* It is size in bytes, we want words */
2526 		sc->sc_nvm_wordsize /= 2;
2527 		/* Assume 2 banks */
2528 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
2529 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
2530 		sc->phy.acquire = wm_get_swflag_ich8lan;
2531 		sc->phy.release = wm_put_swflag_ich8lan;
2532 		sc->nvm.acquire = wm_get_nvm_ich8lan;
2533 		sc->nvm.release = wm_put_nvm_ich8lan;
2534 		break;
2535 	case WM_T_I210:
2536 	case WM_T_I211:
2537 		/* Allow a single clear of the SW semaphore on I210 and newer*/
2538 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
2539 		if (wm_nvm_flash_presence_i210(sc)) {
2540 			sc->nvm.read = wm_nvm_read_eerd;
2541 			/* Don't use WM_F_LOCK_EECD because we use EERD */
2542 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
2543 			wm_nvm_set_addrbits_size_eecd(sc);
2544 		} else {
2545 			sc->nvm.read = wm_nvm_read_invm;
2546 			sc->sc_flags |= WM_F_EEPROM_INVM;
2547 			sc->sc_nvm_wordsize = INVM_SIZE;
2548 		}
2549 		sc->phy.acquire = wm_get_phy_82575;
2550 		sc->phy.release = wm_put_phy_82575;
2551 		sc->nvm.acquire = wm_get_nvm_80003;
2552 		sc->nvm.release = wm_put_nvm_80003;
2553 		break;
2554 	default:
2555 		break;
2556 	}
2557 
2558 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
2559 	switch (sc->sc_type) {
2560 	case WM_T_82571:
2561 	case WM_T_82572:
2562 		reg = CSR_READ(sc, WMREG_SWSM2);
2563 		if ((reg & SWSM2_LOCK) == 0) {
2564 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
2565 			force_clear_smbi = true;
2566 		} else
2567 			force_clear_smbi = false;
2568 		break;
2569 	case WM_T_82573:
2570 	case WM_T_82574:
2571 	case WM_T_82583:
2572 		force_clear_smbi = true;
2573 		break;
2574 	default:
2575 		force_clear_smbi = false;
2576 		break;
2577 	}
2578 	if (force_clear_smbi) {
2579 		reg = CSR_READ(sc, WMREG_SWSM);
2580 		if ((reg & SWSM_SMBI) != 0)
2581 			aprint_error_dev(sc->sc_dev,
2582 			    "Please update the Bootagent\n");
2583 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
2584 	}
2585 
2586 	/*
2587 	 * Defer printing the EEPROM type until after verifying the checksum
2588 	 * This allows the EEPROM type to be printed correctly in the case
2589 	 * that no EEPROM is attached.
2590 	 */
2591 	/*
2592 	 * Validate the EEPROM checksum. If the checksum fails, flag
2593 	 * this for later, so we can fail future reads from the EEPROM.
2594 	 */
2595 	if (wm_nvm_validate_checksum(sc)) {
2596 		/*
2597 		 * Read twice again because some PCI-e parts fail the
2598 		 * first check due to the link being in sleep state.
2599 		 */
2600 		if (wm_nvm_validate_checksum(sc))
2601 			sc->sc_flags |= WM_F_EEPROM_INVALID;
2602 	}
2603 
2604 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
2605 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
2606 	else {
2607 		aprint_verbose_dev(sc->sc_dev, "%u words ",
2608 		    sc->sc_nvm_wordsize);
2609 		if (sc->sc_flags & WM_F_EEPROM_INVM)
2610 			aprint_verbose("iNVM");
2611 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
2612 			aprint_verbose("FLASH(HW)");
2613 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
2614 			aprint_verbose("FLASH");
2615 		else {
2616 			if (sc->sc_flags & WM_F_EEPROM_SPI)
2617 				eetype = "SPI";
2618 			else
2619 				eetype = "MicroWire";
2620 			aprint_verbose("(%d address bits) %s EEPROM",
2621 			    sc->sc_nvm_addrbits, eetype);
2622 		}
2623 	}
2624 	wm_nvm_version(sc);
2625 	aprint_verbose("\n");
2626 
2627 	/*
2628 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
2629 	 * incorrect.
2630 	 */
2631 	wm_gmii_setup_phytype(sc, 0, 0);
2632 
2633 	/* Check for WM_F_WOL on some chips before wm_reset() */
2634 	switch (sc->sc_type) {
2635 	case WM_T_ICH8:
2636 	case WM_T_ICH9:
2637 	case WM_T_ICH10:
2638 	case WM_T_PCH:
2639 	case WM_T_PCH2:
2640 	case WM_T_PCH_LPT:
2641 	case WM_T_PCH_SPT:
2642 	case WM_T_PCH_CNP:
2643 		apme_mask = WUC_APME;
2644 		eeprom_data = CSR_READ(sc, WMREG_WUC);
2645 		if ((eeprom_data & apme_mask) != 0)
2646 			sc->sc_flags |= WM_F_WOL;
2647 		break;
2648 	default:
2649 		break;
2650 	}
2651 
2652 	/* Reset the chip to a known state. */
2653 	wm_reset(sc);
2654 
2655 	/*
2656 	 * Check for I21[01] PLL workaround.
2657 	 *
2658 	 * Three cases:
2659 	 * a) Chip is I211.
2660 	 * b) Chip is I210 and it uses INVM (not FLASH).
2661 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
2662 	 */
2663 	if (sc->sc_type == WM_T_I211)
2664 		sc->sc_flags |= WM_F_PLL_WA_I210;
2665 	if (sc->sc_type == WM_T_I210) {
2666 		if (!wm_nvm_flash_presence_i210(sc))
2667 			sc->sc_flags |= WM_F_PLL_WA_I210;
2668 		else if ((sc->sc_nvm_ver_major < 3)
2669 		    || ((sc->sc_nvm_ver_major == 3)
2670 			&& (sc->sc_nvm_ver_minor < 25))) {
2671 			aprint_verbose_dev(sc->sc_dev,
2672 			    "ROM image version %d.%d is older than 3.25\n",
2673 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
2674 			sc->sc_flags |= WM_F_PLL_WA_I210;
2675 		}
2676 	}
2677 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
2678 		wm_pll_workaround_i210(sc);
2679 
2680 	wm_get_wakeup(sc);
2681 
2682 	/* Non-AMT based hardware can now take control from firmware */
2683 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
2684 		wm_get_hw_control(sc);
2685 
2686 	/*
2687 	 * Read the Ethernet address from the EEPROM, if not first found
2688 	 * in device properties.
2689 	 */
2690 	ea = prop_dictionary_get(dict, "mac-address");
2691 	if (ea != NULL) {
2692 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
2693 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
2694 		memcpy(enaddr, prop_data_value(ea), ETHER_ADDR_LEN);
2695 	} else {
2696 		if (wm_read_mac_addr(sc, enaddr) != 0) {
2697 			aprint_error_dev(sc->sc_dev,
2698 			    "unable to read Ethernet address\n");
2699 			goto out;
2700 		}
2701 	}
2702 
2703 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
2704 	    ether_sprintf(enaddr));
2705 
2706 	/*
2707 	 * Read the config info from the EEPROM, and set up various
2708 	 * bits in the control registers based on their contents.
2709 	 */
2710 	pn = prop_dictionary_get(dict, "i82543-cfg1");
2711 	if (pn != NULL) {
2712 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2713 		cfg1 = (uint16_t) prop_number_signed_value(pn);
2714 	} else {
2715 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
2716 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
2717 			goto out;
2718 		}
2719 	}
2720 
2721 	pn = prop_dictionary_get(dict, "i82543-cfg2");
2722 	if (pn != NULL) {
2723 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2724 		cfg2 = (uint16_t) prop_number_signed_value(pn);
2725 	} else {
2726 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
2727 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
2728 			goto out;
2729 		}
2730 	}
2731 
2732 	/* check for WM_F_WOL */
2733 	switch (sc->sc_type) {
2734 	case WM_T_82542_2_0:
2735 	case WM_T_82542_2_1:
2736 	case WM_T_82543:
2737 		/* dummy? */
2738 		eeprom_data = 0;
2739 		apme_mask = NVM_CFG3_APME;
2740 		break;
2741 	case WM_T_82544:
2742 		apme_mask = NVM_CFG2_82544_APM_EN;
2743 		eeprom_data = cfg2;
2744 		break;
2745 	case WM_T_82546:
2746 	case WM_T_82546_3:
2747 	case WM_T_82571:
2748 	case WM_T_82572:
2749 	case WM_T_82573:
2750 	case WM_T_82574:
2751 	case WM_T_82583:
2752 	case WM_T_80003:
2753 	case WM_T_82575:
2754 	case WM_T_82576:
2755 		apme_mask = NVM_CFG3_APME;
2756 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
2757 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
2758 		break;
2759 	case WM_T_82580:
2760 	case WM_T_I350:
2761 	case WM_T_I354:
2762 	case WM_T_I210:
2763 	case WM_T_I211:
2764 		apme_mask = NVM_CFG3_APME;
2765 		wm_nvm_read(sc,
2766 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
2767 		    1, &eeprom_data);
2768 		break;
2769 	case WM_T_ICH8:
2770 	case WM_T_ICH9:
2771 	case WM_T_ICH10:
2772 	case WM_T_PCH:
2773 	case WM_T_PCH2:
2774 	case WM_T_PCH_LPT:
2775 	case WM_T_PCH_SPT:
2776 	case WM_T_PCH_CNP:
2777 		/* Already checked before wm_reset () */
2778 		apme_mask = eeprom_data = 0;
2779 		break;
2780 	default: /* XXX 82540 */
2781 		apme_mask = NVM_CFG3_APME;
2782 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
2783 		break;
2784 	}
2785 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
2786 	if ((eeprom_data & apme_mask) != 0)
2787 		sc->sc_flags |= WM_F_WOL;
2788 
2789 	/*
2790 	 * We have the eeprom settings, now apply the special cases
2791 	 * where the eeprom may be wrong or the board won't support
2792 	 * wake on lan on a particular port
2793 	 */
2794 	switch (sc->sc_pcidevid) {
2795 	case PCI_PRODUCT_INTEL_82546GB_PCIE:
2796 		sc->sc_flags &= ~WM_F_WOL;
2797 		break;
2798 	case PCI_PRODUCT_INTEL_82546EB_FIBER:
2799 	case PCI_PRODUCT_INTEL_82546GB_FIBER:
2800 		/* Wake events only supported on port A for dual fiber
2801 		 * regardless of eeprom setting */
2802 		if (sc->sc_funcid == 1)
2803 			sc->sc_flags &= ~WM_F_WOL;
2804 		break;
2805 	case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3:
2806 		/* If quad port adapter, disable WoL on all but port A */
2807 		if (sc->sc_funcid != 0)
2808 			sc->sc_flags &= ~WM_F_WOL;
2809 		break;
2810 	case PCI_PRODUCT_INTEL_82571EB_FIBER:
2811 		/* Wake events only supported on port A for dual fiber
2812 		 * regardless of eeprom setting */
2813 		if (sc->sc_funcid == 1)
2814 			sc->sc_flags &= ~WM_F_WOL;
2815 		break;
2816 	case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER:
2817 	case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER:
2818 	case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER:
2819 		/* If quad port adapter, disable WoL on all but port A */
2820 		if (sc->sc_funcid != 0)
2821 			sc->sc_flags &= ~WM_F_WOL;
2822 		break;
2823 	}
2824 
2825 	if (sc->sc_type >= WM_T_82575) {
2826 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
2827 			aprint_debug_dev(sc->sc_dev, "COMPAT = %hx\n",
2828 			    nvmword);
2829 			if ((sc->sc_type == WM_T_82575) ||
2830 			    (sc->sc_type == WM_T_82576)) {
2831 				/* Check NVM for autonegotiation */
2832 				if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE)
2833 				    != 0)
2834 					sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
2835 			}
2836 			if ((sc->sc_type == WM_T_82575) ||
2837 			    (sc->sc_type == WM_T_I350)) {
2838 				if (nvmword & NVM_COMPAT_MAS_EN(sc->sc_funcid))
2839 					sc->sc_flags |= WM_F_MAS;
2840 			}
2841 		}
2842 	}
2843 
2844 	/*
2845 	 * XXX need special handling for some multiple port cards
2846 	 * to disable a paticular port.
2847 	 */
2848 
2849 	if (sc->sc_type >= WM_T_82544) {
2850 		pn = prop_dictionary_get(dict, "i82543-swdpin");
2851 		if (pn != NULL) {
2852 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2853 			swdpin = (uint16_t) prop_number_signed_value(pn);
2854 		} else {
2855 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
2856 				aprint_error_dev(sc->sc_dev,
2857 				    "unable to read SWDPIN\n");
2858 				goto out;
2859 			}
2860 		}
2861 	}
2862 
2863 	if (cfg1 & NVM_CFG1_ILOS)
2864 		sc->sc_ctrl |= CTRL_ILOS;
2865 
2866 	/*
2867 	 * XXX
2868 	 * This code isn't correct because pin 2 and 3 are located
2869 	 * in different position on newer chips. Check all datasheet.
2870 	 *
2871 	 * Until resolve this problem, check if a chip < 82580
2872 	 */
2873 	if (sc->sc_type <= WM_T_82580) {
2874 		if (sc->sc_type >= WM_T_82544) {
2875 			sc->sc_ctrl |=
2876 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
2877 			    CTRL_SWDPIO_SHIFT;
2878 			sc->sc_ctrl |=
2879 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
2880 			    CTRL_SWDPINS_SHIFT;
2881 		} else {
2882 			sc->sc_ctrl |=
2883 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
2884 			    CTRL_SWDPIO_SHIFT;
2885 		}
2886 	}
2887 
2888 	if ((sc->sc_type >= WM_T_82580) && (sc->sc_type <= WM_T_I211)) {
2889 		wm_nvm_read(sc,
2890 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
2891 		    1, &nvmword);
2892 		if (nvmword & NVM_CFG3_ILOS)
2893 			sc->sc_ctrl |= CTRL_ILOS;
2894 	}
2895 
2896 #if 0
2897 	if (sc->sc_type >= WM_T_82544) {
2898 		if (cfg1 & NVM_CFG1_IPS0)
2899 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
2900 		if (cfg1 & NVM_CFG1_IPS1)
2901 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
2902 		sc->sc_ctrl_ext |=
2903 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
2904 		    CTRL_EXT_SWDPIO_SHIFT;
2905 		sc->sc_ctrl_ext |=
2906 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
2907 		    CTRL_EXT_SWDPINS_SHIFT;
2908 	} else {
2909 		sc->sc_ctrl_ext |=
2910 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
2911 		    CTRL_EXT_SWDPIO_SHIFT;
2912 	}
2913 #endif
2914 
2915 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2916 #if 0
2917 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2918 #endif
2919 
2920 	if (sc->sc_type == WM_T_PCH) {
2921 		uint16_t val;
2922 
2923 		/* Save the NVM K1 bit setting */
2924 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
2925 
2926 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
2927 			sc->sc_nvm_k1_enabled = 1;
2928 		else
2929 			sc->sc_nvm_k1_enabled = 0;
2930 	}
2931 
2932 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
2933 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
2934 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
2935 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
2936 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
2937 	    || sc->sc_type == WM_T_82573
2938 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
2939 		/* Copper only */
2940 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
2941 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
2942 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
2943 	    || (sc->sc_type ==WM_T_I211)) {
2944 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
2945 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
2946 		switch (link_mode) {
2947 		case CTRL_EXT_LINK_MODE_1000KX:
2948 			aprint_normal_dev(sc->sc_dev, "1000KX\n");
2949 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
2950 			break;
2951 		case CTRL_EXT_LINK_MODE_SGMII:
2952 			if (wm_sgmii_uses_mdio(sc)) {
2953 				aprint_normal_dev(sc->sc_dev,
2954 				    "SGMII(MDIO)\n");
2955 				sc->sc_flags |= WM_F_SGMII;
2956 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2957 				break;
2958 			}
2959 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
2960 			/*FALLTHROUGH*/
2961 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
2962 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
2963 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
2964 				if (link_mode
2965 				    == CTRL_EXT_LINK_MODE_SGMII) {
2966 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2967 					sc->sc_flags |= WM_F_SGMII;
2968 					aprint_verbose_dev(sc->sc_dev,
2969 					    "SGMII\n");
2970 				} else {
2971 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
2972 					aprint_verbose_dev(sc->sc_dev,
2973 					    "SERDES\n");
2974 				}
2975 				break;
2976 			}
2977 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
2978 				aprint_normal_dev(sc->sc_dev, "SERDES(SFP)\n");
2979 			else if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
2980 				aprint_normal_dev(sc->sc_dev, "SGMII(SFP)\n");
2981 				sc->sc_flags |= WM_F_SGMII;
2982 			}
2983 			/* Do not change link mode for 100BaseFX */
2984 			if (sc->sc_sfptype == SFF_SFP_ETH_FLAGS_100FX)
2985 				break;
2986 
2987 			/* Change current link mode setting */
2988 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
2989 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
2990 				reg |= CTRL_EXT_LINK_MODE_SGMII;
2991 			else
2992 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
2993 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2994 			break;
2995 		case CTRL_EXT_LINK_MODE_GMII:
2996 		default:
2997 			aprint_normal_dev(sc->sc_dev, "Copper\n");
2998 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2999 			break;
3000 		}
3001 
3002 		reg &= ~CTRL_EXT_I2C_ENA;
3003 		if ((sc->sc_flags & WM_F_SGMII) != 0)
3004 			reg |= CTRL_EXT_I2C_ENA;
3005 		else
3006 			reg &= ~CTRL_EXT_I2C_ENA;
3007 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3008 		if ((sc->sc_flags & WM_F_SGMII) != 0) {
3009 			if (!wm_sgmii_uses_mdio(sc))
3010 				wm_gmii_setup_phytype(sc, 0, 0);
3011 			wm_reset_mdicnfg_82580(sc);
3012 		}
3013 	} else if (sc->sc_type < WM_T_82543 ||
3014 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
3015 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
3016 			aprint_error_dev(sc->sc_dev,
3017 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
3018 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
3019 		}
3020 	} else {
3021 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
3022 			aprint_error_dev(sc->sc_dev,
3023 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
3024 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
3025 		}
3026 	}
3027 
3028 	if (sc->sc_type >= WM_T_PCH2)
3029 		sc->sc_flags |= WM_F_EEE;
3030 	else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211)
3031 	    && (sc->sc_mediatype == WM_MEDIATYPE_COPPER)) {
3032 		/* XXX: Need special handling for I354. (not yet) */
3033 		if (sc->sc_type != WM_T_I354)
3034 			sc->sc_flags |= WM_F_EEE;
3035 	}
3036 
3037 	/*
3038 	 * The I350 has a bug where it always strips the CRC whether
3039 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
3040 	 */
3041 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
3042 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
3043 		sc->sc_flags |= WM_F_CRC_STRIP;
3044 
3045 	/* Set device properties (macflags) */
3046 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
3047 
3048 	if (sc->sc_flags != 0) {
3049 		snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
3050 		aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
3051 	}
3052 
3053 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
3054 
3055 	/* Initialize the media structures accordingly. */
3056 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
3057 		wm_gmii_mediainit(sc, wmp->wmp_product);
3058 	else
3059 		wm_tbi_mediainit(sc); /* All others */
3060 
3061 	ifp = &sc->sc_ethercom.ec_if;
3062 	xname = device_xname(sc->sc_dev);
3063 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
3064 	ifp->if_softc = sc;
3065 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
3066 	ifp->if_extflags = IFEF_MPSAFE;
3067 	ifp->if_ioctl = wm_ioctl;
3068 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3069 		ifp->if_start = wm_nq_start;
3070 		/*
3071 		 * When the number of CPUs is one and the controller can use
3072 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
3073 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
3074 		 * and the other is used for link status changing.
3075 		 * In this situation, wm_nq_transmit() is disadvantageous
3076 		 * because of wm_select_txqueue() and pcq(9) overhead.
3077 		 */
3078 		if (wm_is_using_multiqueue(sc))
3079 			ifp->if_transmit = wm_nq_transmit;
3080 	} else {
3081 		ifp->if_start = wm_start;
3082 		/*
3083 		 * wm_transmit() has the same disadvantages as wm_nq_transmit()
3084 		 * described above.
3085 		 */
3086 		if (wm_is_using_multiqueue(sc))
3087 			ifp->if_transmit = wm_transmit;
3088 	}
3089 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
3090 	ifp->if_init = wm_init;
3091 	ifp->if_stop = wm_stop;
3092 	IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
3093 	IFQ_SET_READY(&ifp->if_snd);
3094 
3095 	/* Check for jumbo frame */
3096 	switch (sc->sc_type) {
3097 	case WM_T_82573:
3098 		/* XXX limited to 9234 if ASPM is disabled */
3099 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
3100 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
3101 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
3102 		break;
3103 	case WM_T_82571:
3104 	case WM_T_82572:
3105 	case WM_T_82574:
3106 	case WM_T_82583:
3107 	case WM_T_82575:
3108 	case WM_T_82576:
3109 	case WM_T_82580:
3110 	case WM_T_I350:
3111 	case WM_T_I354:
3112 	case WM_T_I210:
3113 	case WM_T_I211:
3114 	case WM_T_80003:
3115 	case WM_T_ICH9:
3116 	case WM_T_ICH10:
3117 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
3118 	case WM_T_PCH_LPT:
3119 	case WM_T_PCH_SPT:
3120 	case WM_T_PCH_CNP:
3121 		/* XXX limited to 9234 */
3122 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
3123 		break;
3124 	case WM_T_PCH:
3125 		/* XXX limited to 4096 */
3126 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
3127 		break;
3128 	case WM_T_82542_2_0:
3129 	case WM_T_82542_2_1:
3130 	case WM_T_ICH8:
3131 		/* No support for jumbo frame */
3132 		break;
3133 	default:
3134 		/* ETHER_MAX_LEN_JUMBO */
3135 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
3136 		break;
3137 	}
3138 
3139 	/* If we're a i82543 or greater, we can support VLANs. */
3140 	if (sc->sc_type >= WM_T_82543) {
3141 		sc->sc_ethercom.ec_capabilities |=
3142 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
3143 		sc->sc_ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
3144 	}
3145 
3146 	if ((sc->sc_flags & WM_F_EEE) != 0)
3147 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_EEE;
3148 
3149 	/*
3150 	 * We can perform TCPv4 and UDPv4 checksums in-bound.  Only
3151 	 * on i82543 and later.
3152 	 */
3153 	if (sc->sc_type >= WM_T_82543) {
3154 		ifp->if_capabilities |=
3155 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
3156 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
3157 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
3158 		    IFCAP_CSUM_TCPv6_Tx |
3159 		    IFCAP_CSUM_UDPv6_Tx;
3160 	}
3161 
3162 	/*
3163 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
3164 	 *
3165 	 *	82541GI (8086:1076) ... no
3166 	 *	82572EI (8086:10b9) ... yes
3167 	 */
3168 	if (sc->sc_type >= WM_T_82571) {
3169 		ifp->if_capabilities |=
3170 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
3171 	}
3172 
3173 	/*
3174 	 * If we're a i82544 or greater (except i82547), we can do
3175 	 * TCP segmentation offload.
3176 	 */
3177 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547)
3178 		ifp->if_capabilities |= IFCAP_TSOv4;
3179 
3180 	if (sc->sc_type >= WM_T_82571)
3181 		ifp->if_capabilities |= IFCAP_TSOv6;
3182 
3183 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
3184 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
3185 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
3186 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
3187 
3188 	/* Attach the interface. */
3189 	if_initialize(ifp);
3190 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
3191 	ether_ifattach(ifp, enaddr);
3192 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
3193 	if_register(ifp);
3194 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
3195 	    RND_FLAG_DEFAULT);
3196 
3197 #ifdef WM_EVENT_COUNTERS
3198 	/* Attach event counters. */
3199 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
3200 	    NULL, xname, "linkintr");
3201 
3202 	if (sc->sc_type >= WM_T_82542_2_1) {
3203 		evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
3204 		    NULL, xname, "tx_xoff");
3205 		evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
3206 		    NULL, xname, "tx_xon");
3207 		evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
3208 		    NULL, xname, "rx_xoff");
3209 		evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
3210 		    NULL, xname, "rx_xon");
3211 		evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
3212 		    NULL, xname, "rx_macctl");
3213 	}
3214 
3215 	evcnt_attach_dynamic(&sc->sc_ev_crcerrs, EVCNT_TYPE_MISC,
3216 	    NULL, xname, "CRC Error");
3217 	evcnt_attach_dynamic(&sc->sc_ev_symerrc, EVCNT_TYPE_MISC,
3218 	    NULL, xname, "Symbol Error");
3219 
3220 	if (sc->sc_type >= WM_T_82543) {
3221 		evcnt_attach_dynamic(&sc->sc_ev_algnerrc, EVCNT_TYPE_MISC,
3222 		    NULL, xname, "Alignment Error");
3223 		evcnt_attach_dynamic(&sc->sc_ev_rxerrc, EVCNT_TYPE_MISC,
3224 		    NULL, xname, "Receive Error");
3225 		evcnt_attach_dynamic(&sc->sc_ev_cexterr, EVCNT_TYPE_MISC,
3226 		    NULL, xname, "Carrier Extension Error");
3227 	}
3228 
3229 	evcnt_attach_dynamic(&sc->sc_ev_mpc, EVCNT_TYPE_MISC,
3230 	    NULL, xname, "Missed Packets");
3231 	evcnt_attach_dynamic(&sc->sc_ev_colc, EVCNT_TYPE_MISC,
3232 	    NULL, xname, "Collision");
3233 	evcnt_attach_dynamic(&sc->sc_ev_sec, EVCNT_TYPE_MISC,
3234 	    NULL, xname, "Sequence Error");
3235 	evcnt_attach_dynamic(&sc->sc_ev_rlec, EVCNT_TYPE_MISC,
3236 	    NULL, xname, "Receive Length Error");
3237 	evcnt_attach_dynamic(&sc->sc_ev_scc, EVCNT_TYPE_MISC,
3238 	    NULL, xname, "Single Collision");
3239 	evcnt_attach_dynamic(&sc->sc_ev_ecol, EVCNT_TYPE_MISC,
3240 	    NULL, xname, "Excessive Collisions");
3241 	evcnt_attach_dynamic(&sc->sc_ev_mcc, EVCNT_TYPE_MISC,
3242 	    NULL, xname, "Multiple Collision");
3243 	evcnt_attach_dynamic(&sc->sc_ev_latecol, EVCNT_TYPE_MISC,
3244 	    NULL, xname, "Late Collisions");
3245 	evcnt_attach_dynamic(&sc->sc_ev_dc, EVCNT_TYPE_MISC,
3246 	    NULL, xname, "Defer");
3247 	evcnt_attach_dynamic(&sc->sc_ev_gprc, EVCNT_TYPE_MISC,
3248 	    NULL, xname, "Good Packets Rx");
3249 	evcnt_attach_dynamic(&sc->sc_ev_bprc, EVCNT_TYPE_MISC,
3250 	    NULL, xname, "Broadcast Packets Rx");
3251 	evcnt_attach_dynamic(&sc->sc_ev_mprc, EVCNT_TYPE_MISC,
3252 	    NULL, xname, "Multicast Packets Rx");
3253 	evcnt_attach_dynamic(&sc->sc_ev_gptc, EVCNT_TYPE_MISC,
3254 	    NULL, xname, "Good Packets Tx");
3255 	evcnt_attach_dynamic(&sc->sc_ev_gorc, EVCNT_TYPE_MISC,
3256 	    NULL, xname, "Good Octets Rx");
3257 	evcnt_attach_dynamic(&sc->sc_ev_gotc, EVCNT_TYPE_MISC,
3258 	    NULL, xname, "Good Octets Tx");
3259 	evcnt_attach_dynamic(&sc->sc_ev_rnbc, EVCNT_TYPE_MISC,
3260 	    NULL, xname, "Rx No Buffers");
3261 	evcnt_attach_dynamic(&sc->sc_ev_ruc, EVCNT_TYPE_MISC,
3262 	    NULL, xname, "Rx Undersize");
3263 	evcnt_attach_dynamic(&sc->sc_ev_rfc, EVCNT_TYPE_MISC,
3264 	    NULL, xname, "Rx Fragment");
3265 	evcnt_attach_dynamic(&sc->sc_ev_roc, EVCNT_TYPE_MISC,
3266 	    NULL, xname, "Rx Oversize");
3267 	evcnt_attach_dynamic(&sc->sc_ev_rjc, EVCNT_TYPE_MISC,
3268 	    NULL, xname, "Rx Jabber");
3269 	evcnt_attach_dynamic(&sc->sc_ev_tor, EVCNT_TYPE_MISC,
3270 	    NULL, xname, "Total Octets Rx");
3271 	evcnt_attach_dynamic(&sc->sc_ev_tot, EVCNT_TYPE_MISC,
3272 	    NULL, xname, "Total Octets Tx");
3273 	evcnt_attach_dynamic(&sc->sc_ev_tpr, EVCNT_TYPE_MISC,
3274 	    NULL, xname, "Total Packets Rx");
3275 	evcnt_attach_dynamic(&sc->sc_ev_tpt, EVCNT_TYPE_MISC,
3276 	    NULL, xname, "Total Packets Tx");
3277 	evcnt_attach_dynamic(&sc->sc_ev_mptc, EVCNT_TYPE_MISC,
3278 	    NULL, xname, "Multicast Packets Tx");
3279 	evcnt_attach_dynamic(&sc->sc_ev_bptc, EVCNT_TYPE_MISC,
3280 	    NULL, xname, "Broadcast Packets Tx Count");
3281 	evcnt_attach_dynamic(&sc->sc_ev_prc64, EVCNT_TYPE_MISC,
3282 	    NULL, xname, "Packets Rx (64 bytes)");
3283 	evcnt_attach_dynamic(&sc->sc_ev_prc127, EVCNT_TYPE_MISC,
3284 	    NULL, xname, "Packets Rx (65-127 bytes)");
3285 	evcnt_attach_dynamic(&sc->sc_ev_prc255, EVCNT_TYPE_MISC,
3286 	    NULL, xname, "Packets Rx (128-255 bytes)");
3287 	evcnt_attach_dynamic(&sc->sc_ev_prc511, EVCNT_TYPE_MISC,
3288 	    NULL, xname, "Packets Rx (255-511 bytes)");
3289 	evcnt_attach_dynamic(&sc->sc_ev_prc1023, EVCNT_TYPE_MISC,
3290 	    NULL, xname, "Packets Rx (512-1023 bytes)");
3291 	evcnt_attach_dynamic(&sc->sc_ev_prc1522, EVCNT_TYPE_MISC,
3292 	    NULL, xname, "Packets Rx (1024-1522 bytes)");
3293 	evcnt_attach_dynamic(&sc->sc_ev_ptc64, EVCNT_TYPE_MISC,
3294 	    NULL, xname, "Packets Tx (64 bytes)");
3295 	evcnt_attach_dynamic(&sc->sc_ev_ptc127, EVCNT_TYPE_MISC,
3296 	    NULL, xname, "Packets Tx (65-127 bytes)");
3297 	evcnt_attach_dynamic(&sc->sc_ev_ptc255, EVCNT_TYPE_MISC,
3298 	    NULL, xname, "Packets Tx (128-255 bytes)");
3299 	evcnt_attach_dynamic(&sc->sc_ev_ptc511, EVCNT_TYPE_MISC,
3300 	    NULL, xname, "Packets Tx (256-511 bytes)");
3301 	evcnt_attach_dynamic(&sc->sc_ev_ptc1023, EVCNT_TYPE_MISC,
3302 	    NULL, xname, "Packets Tx (512-1023 bytes)");
3303 	evcnt_attach_dynamic(&sc->sc_ev_ptc1522, EVCNT_TYPE_MISC,
3304 	    NULL, xname, "Packets Tx (1024-1522 Bytes)");
3305 	evcnt_attach_dynamic(&sc->sc_ev_iac, EVCNT_TYPE_MISC,
3306 	    NULL, xname, "Interrupt Assertion");
3307 	evcnt_attach_dynamic(&sc->sc_ev_icrxptc, EVCNT_TYPE_MISC,
3308 	    NULL, xname, "Intr. Cause Rx Pkt Timer Expire");
3309 	evcnt_attach_dynamic(&sc->sc_ev_icrxatc, EVCNT_TYPE_MISC,
3310 	    NULL, xname, "Intr. Cause Rx Abs Timer Expire");
3311 	evcnt_attach_dynamic(&sc->sc_ev_ictxptc, EVCNT_TYPE_MISC,
3312 	    NULL, xname, "Intr. Cause Tx Pkt Timer Expire");
3313 	evcnt_attach_dynamic(&sc->sc_ev_ictxact, EVCNT_TYPE_MISC,
3314 	    NULL, xname, "Intr. Cause Tx Abs Timer Expire");
3315 	evcnt_attach_dynamic(&sc->sc_ev_ictxqec, EVCNT_TYPE_MISC,
3316 	    NULL, xname, "Intr. Cause Tx Queue Empty");
3317 	evcnt_attach_dynamic(&sc->sc_ev_ictxqmtc, EVCNT_TYPE_MISC,
3318 	    NULL, xname, "Intr. Cause Tx Queue Min Thresh");
3319 	evcnt_attach_dynamic(&sc->sc_ev_icrxdmtc, EVCNT_TYPE_MISC,
3320 	    NULL, xname, "Intr. Cause Rx Desc Min Thresh");
3321 	evcnt_attach_dynamic(&sc->sc_ev_icrxoc, EVCNT_TYPE_MISC,
3322 	    NULL, xname, "Interrupt Cause Receiver Overrun");
3323 	if (sc->sc_type >= WM_T_82543) {
3324 		evcnt_attach_dynamic(&sc->sc_ev_tncrs, EVCNT_TYPE_MISC,
3325 		    NULL, xname, "Tx with No CRS");
3326 		evcnt_attach_dynamic(&sc->sc_ev_tsctc, EVCNT_TYPE_MISC,
3327 		    NULL, xname, "TCP Segmentation Context Tx");
3328 		evcnt_attach_dynamic(&sc->sc_ev_tsctfc, EVCNT_TYPE_MISC,
3329 		    NULL, xname, "TCP Segmentation Context Tx Fail");
3330 	}
3331 	if (sc->sc_type >= WM_T_82540) {
3332 		evcnt_attach_dynamic(&sc->sc_ev_mgtprc, EVCNT_TYPE_MISC,
3333 		    NULL, xname, "Management Packets RX");
3334 		evcnt_attach_dynamic(&sc->sc_ev_mgtpdc, EVCNT_TYPE_MISC,
3335 		    NULL, xname, "Management Packets Dropped");
3336 		evcnt_attach_dynamic(&sc->sc_ev_mgtptc, EVCNT_TYPE_MISC,
3337 		    NULL, xname, "Management Packets TX");
3338 	}
3339 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type < WM_T_80003)) {
3340 		evcnt_attach_dynamic(&sc->sc_ev_b2ogprc, EVCNT_TYPE_MISC,
3341 		    NULL, xname, "BMC2OS Packets received by host");
3342 		evcnt_attach_dynamic(&sc->sc_ev_o2bspc, EVCNT_TYPE_MISC,
3343 		    NULL, xname, "OS2BMC Packets transmitted by host");
3344 		evcnt_attach_dynamic(&sc->sc_ev_b2ospc, EVCNT_TYPE_MISC,
3345 		    NULL, xname, "BMC2OS Packets sent by BMC");
3346 		evcnt_attach_dynamic(&sc->sc_ev_o2bgptc, EVCNT_TYPE_MISC,
3347 		    NULL, xname, "OS2BMC Packets received by BMC");
3348 	}
3349 #endif /* WM_EVENT_COUNTERS */
3350 
3351 	sc->sc_txrx_use_workqueue = false;
3352 
3353 	if (wm_phy_need_linkdown_discard(sc)) {
3354 		DPRINTF(sc, WM_DEBUG_LINK,
3355 		    ("%s: %s: Set linkdown discard flag\n",
3356 			device_xname(sc->sc_dev), __func__));
3357 		wm_set_linkdown_discard(sc);
3358 	}
3359 
3360 	wm_init_sysctls(sc);
3361 
3362 	if (pmf_device_register(self, wm_suspend, wm_resume))
3363 		pmf_class_network_register(self, ifp);
3364 	else
3365 		aprint_error_dev(self, "couldn't establish power handler\n");
3366 
3367 	sc->sc_flags |= WM_F_ATTACHED;
3368 out:
3369 	return;
3370 }
3371 
3372 /* The detach function (ca_detach) */
3373 static int
3374 wm_detach(device_t self, int flags __unused)
3375 {
3376 	struct wm_softc *sc = device_private(self);
3377 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3378 	int i;
3379 
3380 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
3381 		return 0;
3382 
3383 	/* Stop the interface. Callouts are stopped in it. */
3384 	IFNET_LOCK(ifp);
3385 	sc->sc_dying = true;
3386 	wm_stop(ifp, 1);
3387 	IFNET_UNLOCK(ifp);
3388 
3389 	pmf_device_deregister(self);
3390 
3391 	sysctl_teardown(&sc->sc_sysctllog);
3392 
3393 #ifdef WM_EVENT_COUNTERS
3394 	evcnt_detach(&sc->sc_ev_linkintr);
3395 
3396 	if (sc->sc_type >= WM_T_82542_2_1) {
3397 		evcnt_detach(&sc->sc_ev_tx_xoff);
3398 		evcnt_detach(&sc->sc_ev_tx_xon);
3399 		evcnt_detach(&sc->sc_ev_rx_xoff);
3400 		evcnt_detach(&sc->sc_ev_rx_xon);
3401 		evcnt_detach(&sc->sc_ev_rx_macctl);
3402 	}
3403 
3404 	evcnt_detach(&sc->sc_ev_crcerrs);
3405 	evcnt_detach(&sc->sc_ev_symerrc);
3406 
3407 	if (sc->sc_type >= WM_T_82543) {
3408 		evcnt_detach(&sc->sc_ev_algnerrc);
3409 		evcnt_detach(&sc->sc_ev_rxerrc);
3410 		evcnt_detach(&sc->sc_ev_cexterr);
3411 	}
3412 	evcnt_detach(&sc->sc_ev_mpc);
3413 	evcnt_detach(&sc->sc_ev_colc);
3414 	evcnt_detach(&sc->sc_ev_sec);
3415 	evcnt_detach(&sc->sc_ev_rlec);
3416 	evcnt_detach(&sc->sc_ev_scc);
3417 	evcnt_detach(&sc->sc_ev_ecol);
3418 	evcnt_detach(&sc->sc_ev_mcc);
3419 	evcnt_detach(&sc->sc_ev_latecol);
3420 	evcnt_detach(&sc->sc_ev_dc);
3421 	evcnt_detach(&sc->sc_ev_gprc);
3422 	evcnt_detach(&sc->sc_ev_bprc);
3423 	evcnt_detach(&sc->sc_ev_mprc);
3424 	evcnt_detach(&sc->sc_ev_gptc);
3425 	evcnt_detach(&sc->sc_ev_gorc);
3426 	evcnt_detach(&sc->sc_ev_gotc);
3427 	evcnt_detach(&sc->sc_ev_rnbc);
3428 	evcnt_detach(&sc->sc_ev_ruc);
3429 	evcnt_detach(&sc->sc_ev_rfc);
3430 	evcnt_detach(&sc->sc_ev_roc);
3431 	evcnt_detach(&sc->sc_ev_rjc);
3432 	evcnt_detach(&sc->sc_ev_tor);
3433 	evcnt_detach(&sc->sc_ev_tot);
3434 	evcnt_detach(&sc->sc_ev_tpr);
3435 	evcnt_detach(&sc->sc_ev_tpt);
3436 	evcnt_detach(&sc->sc_ev_mptc);
3437 	evcnt_detach(&sc->sc_ev_bptc);
3438 	evcnt_detach(&sc->sc_ev_prc64);
3439 	evcnt_detach(&sc->sc_ev_prc127);
3440 	evcnt_detach(&sc->sc_ev_prc255);
3441 	evcnt_detach(&sc->sc_ev_prc511);
3442 	evcnt_detach(&sc->sc_ev_prc1023);
3443 	evcnt_detach(&sc->sc_ev_prc1522);
3444 	evcnt_detach(&sc->sc_ev_ptc64);
3445 	evcnt_detach(&sc->sc_ev_ptc127);
3446 	evcnt_detach(&sc->sc_ev_ptc255);
3447 	evcnt_detach(&sc->sc_ev_ptc511);
3448 	evcnt_detach(&sc->sc_ev_ptc1023);
3449 	evcnt_detach(&sc->sc_ev_ptc1522);
3450 	evcnt_detach(&sc->sc_ev_iac);
3451 	evcnt_detach(&sc->sc_ev_icrxptc);
3452 	evcnt_detach(&sc->sc_ev_icrxatc);
3453 	evcnt_detach(&sc->sc_ev_ictxptc);
3454 	evcnt_detach(&sc->sc_ev_ictxact);
3455 	evcnt_detach(&sc->sc_ev_ictxqec);
3456 	evcnt_detach(&sc->sc_ev_ictxqmtc);
3457 	evcnt_detach(&sc->sc_ev_icrxdmtc);
3458 	evcnt_detach(&sc->sc_ev_icrxoc);
3459 	if (sc->sc_type >= WM_T_82543) {
3460 		evcnt_detach(&sc->sc_ev_tncrs);
3461 		evcnt_detach(&sc->sc_ev_tsctc);
3462 		evcnt_detach(&sc->sc_ev_tsctfc);
3463 	}
3464 	if (sc->sc_type >= WM_T_82540) {
3465 		evcnt_detach(&sc->sc_ev_mgtprc);
3466 		evcnt_detach(&sc->sc_ev_mgtpdc);
3467 		evcnt_detach(&sc->sc_ev_mgtptc);
3468 	}
3469 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type < WM_T_80003)) {
3470 		evcnt_detach(&sc->sc_ev_b2ogprc);
3471 		evcnt_detach(&sc->sc_ev_o2bspc);
3472 		evcnt_detach(&sc->sc_ev_b2ospc);
3473 		evcnt_detach(&sc->sc_ev_o2bgptc);
3474 	}
3475 #endif /* WM_EVENT_COUNTERS */
3476 
3477 	rnd_detach_source(&sc->rnd_source);
3478 
3479 	/* Tell the firmware about the release */
3480 	mutex_enter(sc->sc_core_lock);
3481 	wm_release_manageability(sc);
3482 	wm_release_hw_control(sc);
3483 	wm_enable_wakeup(sc);
3484 	mutex_exit(sc->sc_core_lock);
3485 
3486 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
3487 
3488 	ether_ifdetach(ifp);
3489 	if_detach(ifp);
3490 	if_percpuq_destroy(sc->sc_ipq);
3491 
3492 	/* Delete all remaining media. */
3493 	ifmedia_fini(&sc->sc_mii.mii_media);
3494 
3495 	/* Unload RX dmamaps and free mbufs */
3496 	for (i = 0; i < sc->sc_nqueues; i++) {
3497 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
3498 		mutex_enter(rxq->rxq_lock);
3499 		wm_rxdrain(rxq);
3500 		mutex_exit(rxq->rxq_lock);
3501 	}
3502 	/* Must unlock here */
3503 
3504 	/* Disestablish the interrupt handler */
3505 	for (i = 0; i < sc->sc_nintrs; i++) {
3506 		if (sc->sc_ihs[i] != NULL) {
3507 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
3508 			sc->sc_ihs[i] = NULL;
3509 		}
3510 	}
3511 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
3512 
3513 	/* wm_stop() ensured that the workqueues are stopped. */
3514 	workqueue_destroy(sc->sc_queue_wq);
3515 	workqueue_destroy(sc->sc_reset_wq);
3516 
3517 	for (i = 0; i < sc->sc_nqueues; i++)
3518 		softint_disestablish(sc->sc_queue[i].wmq_si);
3519 
3520 	wm_free_txrx_queues(sc);
3521 
3522 	/* Unmap the registers */
3523 	if (sc->sc_ss) {
3524 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
3525 		sc->sc_ss = 0;
3526 	}
3527 	if (sc->sc_ios) {
3528 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
3529 		sc->sc_ios = 0;
3530 	}
3531 	if (sc->sc_flashs) {
3532 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
3533 		sc->sc_flashs = 0;
3534 	}
3535 
3536 	if (sc->sc_core_lock)
3537 		mutex_obj_free(sc->sc_core_lock);
3538 	if (sc->sc_ich_phymtx)
3539 		mutex_obj_free(sc->sc_ich_phymtx);
3540 	if (sc->sc_ich_nvmmtx)
3541 		mutex_obj_free(sc->sc_ich_nvmmtx);
3542 
3543 	return 0;
3544 }
3545 
3546 static bool
3547 wm_suspend(device_t self, const pmf_qual_t *qual)
3548 {
3549 	struct wm_softc *sc = device_private(self);
3550 
3551 	wm_release_manageability(sc);
3552 	wm_release_hw_control(sc);
3553 	wm_enable_wakeup(sc);
3554 
3555 	return true;
3556 }
3557 
3558 static bool
3559 wm_resume(device_t self, const pmf_qual_t *qual)
3560 {
3561 	struct wm_softc *sc = device_private(self);
3562 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3563 	pcireg_t reg;
3564 	char buf[256];
3565 
3566 	reg = CSR_READ(sc, WMREG_WUS);
3567 	if (reg != 0) {
3568 		snprintb(buf, sizeof(buf), WUS_FLAGS, reg);
3569 		device_printf(sc->sc_dev, "wakeup status %s\n", buf);
3570 		CSR_WRITE(sc, WMREG_WUS, 0xffffffff); /* W1C */
3571 	}
3572 
3573 	if (sc->sc_type >= WM_T_PCH2)
3574 		wm_resume_workarounds_pchlan(sc);
3575 	IFNET_LOCK(ifp);
3576 	if ((ifp->if_flags & IFF_UP) == 0) {
3577 		/* >= PCH_SPT hardware workaround before reset. */
3578 		if (sc->sc_type >= WM_T_PCH_SPT)
3579 			wm_flush_desc_rings(sc);
3580 
3581 		wm_reset(sc);
3582 		/* Non-AMT based hardware can now take control from firmware */
3583 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
3584 			wm_get_hw_control(sc);
3585 		wm_init_manageability(sc);
3586 	} else {
3587 		/*
3588 		 * We called pmf_class_network_register(), so if_init() is
3589 		 * automatically called when IFF_UP. wm_reset(),
3590 		 * wm_get_hw_control() and wm_init_manageability() are called
3591 		 * via wm_init().
3592 		 */
3593 	}
3594 	IFNET_UNLOCK(ifp);
3595 
3596 	return true;
3597 }
3598 
3599 /*
3600  * wm_watchdog:
3601  *
3602  *	Watchdog checker.
3603  */
3604 static bool
3605 wm_watchdog(struct ifnet *ifp)
3606 {
3607 	int qid;
3608 	struct wm_softc *sc = ifp->if_softc;
3609 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
3610 
3611 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
3612 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
3613 
3614 		wm_watchdog_txq(ifp, txq, &hang_queue);
3615 	}
3616 
3617 #ifdef WM_DEBUG
3618 	if (sc->sc_trigger_reset) {
3619 		/* debug operation, no need for atomicity or reliability */
3620 		sc->sc_trigger_reset = 0;
3621 		hang_queue++;
3622 	}
3623 #endif
3624 
3625 	if (hang_queue == 0)
3626 		return true;
3627 
3628 	if (atomic_swap_uint(&sc->sc_reset_pending, 1) == 0)
3629 		workqueue_enqueue(sc->sc_reset_wq, &sc->sc_reset_work, NULL);
3630 
3631 	return false;
3632 }
3633 
3634 /*
3635  * Perform an interface watchdog reset.
3636  */
3637 static void
3638 wm_handle_reset_work(struct work *work, void *arg)
3639 {
3640 	struct wm_softc * const sc = arg;
3641 	struct ifnet * const ifp = &sc->sc_ethercom.ec_if;
3642 
3643 	/* Don't want ioctl operations to happen */
3644 	IFNET_LOCK(ifp);
3645 
3646 	/* reset the interface. */
3647 	wm_init(ifp);
3648 
3649 	IFNET_UNLOCK(ifp);
3650 
3651 	/*
3652 	 * There are still some upper layer processing which call
3653 	 * ifp->if_start(). e.g. ALTQ or one CPU system
3654 	 */
3655 	/* Try to get more packets going. */
3656 	ifp->if_start(ifp);
3657 
3658 	atomic_store_relaxed(&sc->sc_reset_pending, 0);
3659 }
3660 
3661 
3662 static void
3663 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
3664 {
3665 
3666 	mutex_enter(txq->txq_lock);
3667 	if (txq->txq_sending &&
3668 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout)
3669 		wm_watchdog_txq_locked(ifp, txq, hang);
3670 
3671 	mutex_exit(txq->txq_lock);
3672 }
3673 
3674 static void
3675 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
3676     uint16_t *hang)
3677 {
3678 	struct wm_softc *sc = ifp->if_softc;
3679 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
3680 
3681 	KASSERT(mutex_owned(txq->txq_lock));
3682 
3683 	/*
3684 	 * Since we're using delayed interrupts, sweep up
3685 	 * before we report an error.
3686 	 */
3687 	wm_txeof(txq, UINT_MAX);
3688 
3689 	if (txq->txq_sending)
3690 		*hang |= __BIT(wmq->wmq_id);
3691 
3692 	if (txq->txq_free == WM_NTXDESC(txq)) {
3693 		log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
3694 		    device_xname(sc->sc_dev));
3695 	} else {
3696 #ifdef WM_DEBUG
3697 		int i, j;
3698 		struct wm_txsoft *txs;
3699 #endif
3700 		log(LOG_ERR,
3701 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
3702 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
3703 		    txq->txq_next);
3704 		if_statinc(ifp, if_oerrors);
3705 #ifdef WM_DEBUG
3706 		for (i = txq->txq_sdirty; i != txq->txq_snext;
3707 		     i = WM_NEXTTXS(txq, i)) {
3708 			txs = &txq->txq_soft[i];
3709 			printf("txs %d tx %d -> %d\n",
3710 			    i, txs->txs_firstdesc, txs->txs_lastdesc);
3711 			for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
3712 				if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3713 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
3714 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
3715 					printf("\t %#08x%08x\n",
3716 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
3717 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
3718 				} else {
3719 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
3720 					    (uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
3721 					    txq->txq_descs[j].wtx_addr.wa_low);
3722 					printf("\t %#04x%02x%02x%08x\n",
3723 					    txq->txq_descs[j].wtx_fields.wtxu_vlan,
3724 					    txq->txq_descs[j].wtx_fields.wtxu_options,
3725 					    txq->txq_descs[j].wtx_fields.wtxu_status,
3726 					    txq->txq_descs[j].wtx_cmdlen);
3727 				}
3728 				if (j == txs->txs_lastdesc)
3729 					break;
3730 			}
3731 		}
3732 #endif
3733 	}
3734 }
3735 
3736 /*
3737  * wm_tick:
3738  *
3739  *	One second timer, used to check link status, sweep up
3740  *	completed transmit jobs, etc.
3741  */
3742 static void
3743 wm_tick(void *arg)
3744 {
3745 	struct wm_softc *sc = arg;
3746 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3747 	uint64_t crcerrs, algnerrc, symerrc, mpc, colc,  sec, rlec, rxerrc,
3748 	    cexterr;
3749 
3750 	mutex_enter(sc->sc_core_lock);
3751 
3752 	if (sc->sc_core_stopping) {
3753 		mutex_exit(sc->sc_core_lock);
3754 		return;
3755 	}
3756 
3757 	crcerrs = CSR_READ(sc, WMREG_CRCERRS);
3758 	symerrc = CSR_READ(sc, WMREG_SYMERRC);
3759 	mpc = CSR_READ(sc, WMREG_MPC);
3760 	colc = CSR_READ(sc, WMREG_COLC);
3761 	sec = CSR_READ(sc, WMREG_SEC);
3762 	rlec = CSR_READ(sc, WMREG_RLEC);
3763 
3764 	WM_EVCNT_ADD(&sc->sc_ev_crcerrs, crcerrs);
3765 	WM_EVCNT_ADD(&sc->sc_ev_symerrc, symerrc);
3766 	WM_EVCNT_ADD(&sc->sc_ev_mpc, mpc);
3767 	WM_EVCNT_ADD(&sc->sc_ev_colc, colc);
3768 	WM_EVCNT_ADD(&sc->sc_ev_sec, sec);
3769 	WM_EVCNT_ADD(&sc->sc_ev_rlec, rlec);
3770 
3771 	if (sc->sc_type >= WM_T_82542_2_1) {
3772 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
3773 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
3774 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
3775 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
3776 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
3777 	}
3778 	WM_EVCNT_ADD(&sc->sc_ev_scc, CSR_READ(sc, WMREG_SCC));
3779 	WM_EVCNT_ADD(&sc->sc_ev_ecol, CSR_READ(sc, WMREG_ECOL));
3780 	WM_EVCNT_ADD(&sc->sc_ev_mcc, CSR_READ(sc, WMREG_MCC));
3781 	WM_EVCNT_ADD(&sc->sc_ev_latecol, CSR_READ(sc, WMREG_LATECOL));
3782 	WM_EVCNT_ADD(&sc->sc_ev_dc, CSR_READ(sc, WMREG_DC));
3783 	WM_EVCNT_ADD(&sc->sc_ev_gprc, CSR_READ(sc, WMREG_GPRC));
3784 	WM_EVCNT_ADD(&sc->sc_ev_bprc, CSR_READ(sc, WMREG_BPRC));
3785 	WM_EVCNT_ADD(&sc->sc_ev_mprc, CSR_READ(sc, WMREG_MPRC));
3786 	WM_EVCNT_ADD(&sc->sc_ev_gptc, CSR_READ(sc, WMREG_GPTC));
3787 
3788 	WM_EVCNT_ADD(&sc->sc_ev_gorc,
3789 	    CSR_READ(sc, WMREG_GORCL) + CSR_READ(sc, WMREG_GORCH));
3790 	WM_EVCNT_ADD(&sc->sc_ev_gotc,
3791 	    CSR_READ(sc, WMREG_GOTCL) + CSR_READ(sc, WMREG_GOTCH));
3792 
3793 	WM_EVCNT_ADD(&sc->sc_ev_rnbc, CSR_READ(sc, WMREG_RNBC));
3794 	WM_EVCNT_ADD(&sc->sc_ev_ruc, CSR_READ(sc, WMREG_RUC));
3795 	WM_EVCNT_ADD(&sc->sc_ev_rfc, CSR_READ(sc, WMREG_RFC));
3796 	WM_EVCNT_ADD(&sc->sc_ev_roc, CSR_READ(sc, WMREG_ROC));
3797 	WM_EVCNT_ADD(&sc->sc_ev_rjc, CSR_READ(sc, WMREG_RJC));
3798 
3799 	WM_EVCNT_ADD(&sc->sc_ev_tor,
3800 	    CSR_READ(sc, WMREG_TORL) + CSR_READ(sc, WMREG_TORH));
3801 	WM_EVCNT_ADD(&sc->sc_ev_tot,
3802 	    CSR_READ(sc, WMREG_TOTL) + CSR_READ(sc, WMREG_TOTH));
3803 
3804 	WM_EVCNT_ADD(&sc->sc_ev_tpr, CSR_READ(sc, WMREG_TPR));
3805 	WM_EVCNT_ADD(&sc->sc_ev_tpt, CSR_READ(sc, WMREG_TPT));
3806 	WM_EVCNT_ADD(&sc->sc_ev_mptc, CSR_READ(sc, WMREG_MPTC));
3807 	WM_EVCNT_ADD(&sc->sc_ev_bptc, CSR_READ(sc, WMREG_BPTC));
3808 	WM_EVCNT_ADD(&sc->sc_ev_prc64, CSR_READ(sc, WMREG_PRC64));
3809 	WM_EVCNT_ADD(&sc->sc_ev_prc127, CSR_READ(sc, WMREG_PRC127));
3810 	WM_EVCNT_ADD(&sc->sc_ev_prc255, CSR_READ(sc, WMREG_PRC255));
3811 	WM_EVCNT_ADD(&sc->sc_ev_prc511, CSR_READ(sc, WMREG_PRC511));
3812 	WM_EVCNT_ADD(&sc->sc_ev_prc1023, CSR_READ(sc, WMREG_PRC1023));
3813 	WM_EVCNT_ADD(&sc->sc_ev_prc1522, CSR_READ(sc, WMREG_PRC1522));
3814 	WM_EVCNT_ADD(&sc->sc_ev_ptc64, CSR_READ(sc, WMREG_PTC64));
3815 	WM_EVCNT_ADD(&sc->sc_ev_ptc127, CSR_READ(sc, WMREG_PTC127));
3816 	WM_EVCNT_ADD(&sc->sc_ev_ptc255, CSR_READ(sc, WMREG_PTC255));
3817 	WM_EVCNT_ADD(&sc->sc_ev_ptc511, CSR_READ(sc, WMREG_PTC511));
3818 	WM_EVCNT_ADD(&sc->sc_ev_ptc1023, CSR_READ(sc, WMREG_PTC1023));
3819 	WM_EVCNT_ADD(&sc->sc_ev_ptc1522, CSR_READ(sc, WMREG_PTC1522));
3820 	WM_EVCNT_ADD(&sc->sc_ev_iac, CSR_READ(sc, WMREG_IAC));
3821 	WM_EVCNT_ADD(&sc->sc_ev_icrxptc, CSR_READ(sc, WMREG_ICRXPTC));
3822 	WM_EVCNT_ADD(&sc->sc_ev_icrxatc, CSR_READ(sc, WMREG_ICRXATC));
3823 	WM_EVCNT_ADD(&sc->sc_ev_ictxptc, CSR_READ(sc, WMREG_ICTXPTC));
3824 	WM_EVCNT_ADD(&sc->sc_ev_ictxact, CSR_READ(sc, WMREG_ICTXATC));
3825 	WM_EVCNT_ADD(&sc->sc_ev_ictxqec, CSR_READ(sc, WMREG_ICTXQEC));
3826 	WM_EVCNT_ADD(&sc->sc_ev_ictxqmtc, CSR_READ(sc, WMREG_ICTXQMTC));
3827 	WM_EVCNT_ADD(&sc->sc_ev_icrxdmtc, CSR_READ(sc, WMREG_ICRXDMTC));
3828 	WM_EVCNT_ADD(&sc->sc_ev_icrxoc, CSR_READ(sc, WMREG_ICRXOC));
3829 
3830 	if (sc->sc_type >= WM_T_82543) {
3831 		algnerrc = CSR_READ(sc, WMREG_ALGNERRC);
3832 		rxerrc = CSR_READ(sc, WMREG_RXERRC);
3833 		cexterr = CSR_READ(sc, WMREG_CEXTERR);
3834 		WM_EVCNT_ADD(&sc->sc_ev_algnerrc, algnerrc);
3835 		WM_EVCNT_ADD(&sc->sc_ev_rxerrc, rxerrc);
3836 		WM_EVCNT_ADD(&sc->sc_ev_cexterr, cexterr);
3837 
3838 		WM_EVCNT_ADD(&sc->sc_ev_tncrs, CSR_READ(sc, WMREG_TNCRS));
3839 		WM_EVCNT_ADD(&sc->sc_ev_tsctc, CSR_READ(sc, WMREG_TSCTC));
3840 		WM_EVCNT_ADD(&sc->sc_ev_tsctfc, CSR_READ(sc, WMREG_TSCTFC));
3841 	} else
3842 		algnerrc = rxerrc = cexterr = 0;
3843 
3844 	if (sc->sc_type >= WM_T_82540) {
3845 		WM_EVCNT_ADD(&sc->sc_ev_mgtprc, CSR_READ(sc, WMREG_MGTPRC));
3846 		WM_EVCNT_ADD(&sc->sc_ev_mgtpdc, CSR_READ(sc, WMREG_MGTPDC));
3847 		WM_EVCNT_ADD(&sc->sc_ev_mgtptc, CSR_READ(sc, WMREG_MGTPTC));
3848 	}
3849 	if (((sc->sc_type >= WM_T_I350) && (sc->sc_type < WM_T_80003))
3850 	    && ((CSR_READ(sc, WMREG_MANC) & MANC_EN_BMC2OS) != 0)) {
3851 		WM_EVCNT_ADD(&sc->sc_ev_b2ogprc, CSR_READ(sc, WMREG_B2OGPRC));
3852 		WM_EVCNT_ADD(&sc->sc_ev_o2bspc, CSR_READ(sc, WMREG_O2BSPC));
3853 		WM_EVCNT_ADD(&sc->sc_ev_b2ospc, CSR_READ(sc, WMREG_B2OSPC));
3854 		WM_EVCNT_ADD(&sc->sc_ev_o2bgptc, CSR_READ(sc, WMREG_O2BGPTC));
3855 	}
3856 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
3857 	if_statadd_ref(nsr, if_collisions, colc);
3858 	if_statadd_ref(nsr, if_ierrors,
3859 	    crcerrs + algnerrc + symerrc + rxerrc + sec + cexterr + rlec);
3860 	/*
3861 	 * WMREG_RNBC is incremented when there are no available buffers in
3862 	 * host memory. It does not mean the number of dropped packets, because
3863 	 * an Ethernet controller can receive packets in such case if there is
3864 	 * space in the phy's FIFO.
3865 	 *
3866 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
3867 	 * own EVCNT instead of if_iqdrops.
3868 	 */
3869 	if_statadd_ref(nsr, if_iqdrops, mpc);
3870 	IF_STAT_PUTREF(ifp);
3871 
3872 	if (sc->sc_flags & WM_F_HAS_MII)
3873 		mii_tick(&sc->sc_mii);
3874 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
3875 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
3876 		wm_serdes_tick(sc);
3877 	else
3878 		wm_tbi_tick(sc);
3879 
3880 	mutex_exit(sc->sc_core_lock);
3881 
3882 	if (wm_watchdog(ifp))
3883 		callout_schedule(&sc->sc_tick_ch, hz);
3884 }
3885 
3886 static int
3887 wm_ifflags_cb(struct ethercom *ec)
3888 {
3889 	struct ifnet *ifp = &ec->ec_if;
3890 	struct wm_softc *sc = ifp->if_softc;
3891 	u_short iffchange;
3892 	int ecchange;
3893 	bool needreset = false;
3894 	int rc = 0;
3895 
3896 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
3897 		device_xname(sc->sc_dev), __func__));
3898 
3899 	KASSERT(IFNET_LOCKED(ifp));
3900 
3901 	mutex_enter(sc->sc_core_lock);
3902 
3903 	/*
3904 	 * Check for if_flags.
3905 	 * Main usage is to prevent linkdown when opening bpf.
3906 	 */
3907 	iffchange = ifp->if_flags ^ sc->sc_if_flags;
3908 	sc->sc_if_flags = ifp->if_flags;
3909 	if ((iffchange & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
3910 		needreset = true;
3911 		goto ec;
3912 	}
3913 
3914 	/* iff related updates */
3915 	if ((iffchange & IFF_PROMISC) != 0)
3916 		wm_set_filter(sc);
3917 
3918 	wm_set_vlan(sc);
3919 
3920 ec:
3921 	/* Check for ec_capenable. */
3922 	ecchange = ec->ec_capenable ^ sc->sc_ec_capenable;
3923 	sc->sc_ec_capenable = ec->ec_capenable;
3924 	if ((ecchange & ~ETHERCAP_EEE) != 0) {
3925 		needreset = true;
3926 		goto out;
3927 	}
3928 
3929 	/* ec related updates */
3930 	wm_set_eee(sc);
3931 
3932 out:
3933 	if (needreset)
3934 		rc = ENETRESET;
3935 	mutex_exit(sc->sc_core_lock);
3936 
3937 	return rc;
3938 }
3939 
3940 static bool
3941 wm_phy_need_linkdown_discard(struct wm_softc *sc)
3942 {
3943 
3944 	switch (sc->sc_phytype) {
3945 	case WMPHY_82577: /* ihphy */
3946 	case WMPHY_82578: /* atphy */
3947 	case WMPHY_82579: /* ihphy */
3948 	case WMPHY_I217: /* ihphy */
3949 	case WMPHY_82580: /* ihphy */
3950 	case WMPHY_I350: /* ihphy */
3951 		return true;
3952 	default:
3953 		return false;
3954 	}
3955 }
3956 
3957 static void
3958 wm_set_linkdown_discard(struct wm_softc *sc)
3959 {
3960 
3961 	for (int i = 0; i < sc->sc_nqueues; i++) {
3962 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
3963 
3964 		mutex_enter(txq->txq_lock);
3965 		txq->txq_flags |= WM_TXQ_LINKDOWN_DISCARD;
3966 		mutex_exit(txq->txq_lock);
3967 	}
3968 }
3969 
3970 static void
3971 wm_clear_linkdown_discard(struct wm_softc *sc)
3972 {
3973 
3974 	for (int i = 0; i < sc->sc_nqueues; i++) {
3975 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
3976 
3977 		mutex_enter(txq->txq_lock);
3978 		txq->txq_flags &= ~WM_TXQ_LINKDOWN_DISCARD;
3979 		mutex_exit(txq->txq_lock);
3980 	}
3981 }
3982 
3983 /*
3984  * wm_ioctl:		[ifnet interface function]
3985  *
3986  *	Handle control requests from the operator.
3987  */
3988 static int
3989 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
3990 {
3991 	struct wm_softc *sc = ifp->if_softc;
3992 	struct ifreq *ifr = (struct ifreq *)data;
3993 	struct ifaddr *ifa = (struct ifaddr *)data;
3994 	struct sockaddr_dl *sdl;
3995 	int error;
3996 
3997 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
3998 		device_xname(sc->sc_dev), __func__));
3999 
4000 	switch (cmd) {
4001 	case SIOCADDMULTI:
4002 	case SIOCDELMULTI:
4003 		break;
4004 	default:
4005 		KASSERT(IFNET_LOCKED(ifp));
4006 	}
4007 
4008 	switch (cmd) {
4009 	case SIOCSIFMEDIA:
4010 		mutex_enter(sc->sc_core_lock);
4011 		/* Flow control requires full-duplex mode. */
4012 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
4013 		    (ifr->ifr_media & IFM_FDX) == 0)
4014 			ifr->ifr_media &= ~IFM_ETH_FMASK;
4015 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
4016 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
4017 				/* We can do both TXPAUSE and RXPAUSE. */
4018 				ifr->ifr_media |=
4019 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
4020 			}
4021 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
4022 		}
4023 		mutex_exit(sc->sc_core_lock);
4024 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
4025 		if (error == 0 && wm_phy_need_linkdown_discard(sc)) {
4026 			if (IFM_SUBTYPE(ifr->ifr_media) == IFM_NONE) {
4027 				DPRINTF(sc, WM_DEBUG_LINK,
4028 				    ("%s: %s: Set linkdown discard flag\n",
4029 					device_xname(sc->sc_dev), __func__));
4030 				wm_set_linkdown_discard(sc);
4031 			}
4032 		}
4033 		break;
4034 	case SIOCINITIFADDR:
4035 		mutex_enter(sc->sc_core_lock);
4036 		if (ifa->ifa_addr->sa_family == AF_LINK) {
4037 			sdl = satosdl(ifp->if_dl->ifa_addr);
4038 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
4039 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
4040 			/* Unicast address is the first multicast entry */
4041 			wm_set_filter(sc);
4042 			error = 0;
4043 			mutex_exit(sc->sc_core_lock);
4044 			break;
4045 		}
4046 		mutex_exit(sc->sc_core_lock);
4047 		/*FALLTHROUGH*/
4048 	default:
4049 		if (cmd == SIOCSIFFLAGS && wm_phy_need_linkdown_discard(sc)) {
4050 			if (((ifp->if_flags & IFF_UP) != 0) &&
4051 			    ((ifr->ifr_flags & IFF_UP) == 0)) {
4052 				DPRINTF(sc, WM_DEBUG_LINK,
4053 				    ("%s: %s: Set linkdown discard flag\n",
4054 					device_xname(sc->sc_dev), __func__));
4055 				wm_set_linkdown_discard(sc);
4056 			}
4057 		}
4058 		const int s = splnet();
4059 		/* It may call wm_start, so unlock here */
4060 		error = ether_ioctl(ifp, cmd, data);
4061 		splx(s);
4062 		if (error != ENETRESET)
4063 			break;
4064 
4065 		error = 0;
4066 
4067 		if (cmd == SIOCSIFCAP)
4068 			error = if_init(ifp);
4069 		else if (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI) {
4070 			mutex_enter(sc->sc_core_lock);
4071 			if (sc->sc_if_flags & IFF_RUNNING) {
4072 				/*
4073 				 * Multicast list has changed; set the
4074 				 * hardware filter accordingly.
4075 				 */
4076 				wm_set_filter(sc);
4077 			}
4078 			mutex_exit(sc->sc_core_lock);
4079 		}
4080 		break;
4081 	}
4082 
4083 	return error;
4084 }
4085 
4086 /* MAC address related */
4087 
4088 /*
4089  * Get the offset of MAC address and return it.
4090  * If error occured, use offset 0.
4091  */
4092 static uint16_t
4093 wm_check_alt_mac_addr(struct wm_softc *sc)
4094 {
4095 	uint16_t myea[ETHER_ADDR_LEN / 2];
4096 	uint16_t offset = NVM_OFF_MACADDR;
4097 
4098 	/* Try to read alternative MAC address pointer */
4099 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
4100 		return 0;
4101 
4102 	/* Check pointer if it's valid or not. */
4103 	if ((offset == 0x0000) || (offset == 0xffff))
4104 		return 0;
4105 
4106 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
4107 	/*
4108 	 * Check whether alternative MAC address is valid or not.
4109 	 * Some cards have non 0xffff pointer but those don't use
4110 	 * alternative MAC address in reality.
4111 	 *
4112 	 * Check whether the broadcast bit is set or not.
4113 	 */
4114 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
4115 		if (((myea[0] & 0xff) & 0x01) == 0)
4116 			return offset; /* Found */
4117 
4118 	/* Not found */
4119 	return 0;
4120 }
4121 
4122 static int
4123 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
4124 {
4125 	uint16_t myea[ETHER_ADDR_LEN / 2];
4126 	uint16_t offset = NVM_OFF_MACADDR;
4127 	int do_invert = 0;
4128 
4129 	switch (sc->sc_type) {
4130 	case WM_T_82580:
4131 	case WM_T_I350:
4132 	case WM_T_I354:
4133 		/* EEPROM Top Level Partitioning */
4134 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
4135 		break;
4136 	case WM_T_82571:
4137 	case WM_T_82575:
4138 	case WM_T_82576:
4139 	case WM_T_80003:
4140 	case WM_T_I210:
4141 	case WM_T_I211:
4142 		offset = wm_check_alt_mac_addr(sc);
4143 		if (offset == 0)
4144 			if ((sc->sc_funcid & 0x01) == 1)
4145 				do_invert = 1;
4146 		break;
4147 	default:
4148 		if ((sc->sc_funcid & 0x01) == 1)
4149 			do_invert = 1;
4150 		break;
4151 	}
4152 
4153 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
4154 		goto bad;
4155 
4156 	enaddr[0] = myea[0] & 0xff;
4157 	enaddr[1] = myea[0] >> 8;
4158 	enaddr[2] = myea[1] & 0xff;
4159 	enaddr[3] = myea[1] >> 8;
4160 	enaddr[4] = myea[2] & 0xff;
4161 	enaddr[5] = myea[2] >> 8;
4162 
4163 	/*
4164 	 * Toggle the LSB of the MAC address on the second port
4165 	 * of some dual port cards.
4166 	 */
4167 	if (do_invert != 0)
4168 		enaddr[5] ^= 1;
4169 
4170 	return 0;
4171 
4172 bad:
4173 	return -1;
4174 }
4175 
4176 /*
4177  * wm_set_ral:
4178  *
4179  *	Set an entery in the receive address list.
4180  */
4181 static void
4182 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
4183 {
4184 	uint32_t ral_lo, ral_hi, addrl, addrh;
4185 	uint32_t wlock_mac;
4186 	int rv;
4187 
4188 	if (enaddr != NULL) {
4189 		ral_lo = (uint32_t)enaddr[0] | ((uint32_t)enaddr[1] << 8) |
4190 		    ((uint32_t)enaddr[2] << 16) | ((uint32_t)enaddr[3] << 24);
4191 		ral_hi = (uint32_t)enaddr[4] | ((uint32_t)enaddr[5] << 8);
4192 		ral_hi |= RAL_AV;
4193 	} else {
4194 		ral_lo = 0;
4195 		ral_hi = 0;
4196 	}
4197 
4198 	switch (sc->sc_type) {
4199 	case WM_T_82542_2_0:
4200 	case WM_T_82542_2_1:
4201 	case WM_T_82543:
4202 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
4203 		CSR_WRITE_FLUSH(sc);
4204 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
4205 		CSR_WRITE_FLUSH(sc);
4206 		break;
4207 	case WM_T_PCH2:
4208 	case WM_T_PCH_LPT:
4209 	case WM_T_PCH_SPT:
4210 	case WM_T_PCH_CNP:
4211 		if (idx == 0) {
4212 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
4213 			CSR_WRITE_FLUSH(sc);
4214 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
4215 			CSR_WRITE_FLUSH(sc);
4216 			return;
4217 		}
4218 		if (sc->sc_type != WM_T_PCH2) {
4219 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
4220 			    FWSM_WLOCK_MAC);
4221 			addrl = WMREG_SHRAL(idx - 1);
4222 			addrh = WMREG_SHRAH(idx - 1);
4223 		} else {
4224 			wlock_mac = 0;
4225 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
4226 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
4227 		}
4228 
4229 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
4230 			rv = wm_get_swflag_ich8lan(sc);
4231 			if (rv != 0)
4232 				return;
4233 			CSR_WRITE(sc, addrl, ral_lo);
4234 			CSR_WRITE_FLUSH(sc);
4235 			CSR_WRITE(sc, addrh, ral_hi);
4236 			CSR_WRITE_FLUSH(sc);
4237 			wm_put_swflag_ich8lan(sc);
4238 		}
4239 
4240 		break;
4241 	default:
4242 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
4243 		CSR_WRITE_FLUSH(sc);
4244 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
4245 		CSR_WRITE_FLUSH(sc);
4246 		break;
4247 	}
4248 }
4249 
4250 /*
4251  * wm_mchash:
4252  *
4253  *	Compute the hash of the multicast address for the 4096-bit
4254  *	multicast filter.
4255  */
4256 static uint32_t
4257 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
4258 {
4259 	static const int lo_shift[4] = { 4, 3, 2, 0 };
4260 	static const int hi_shift[4] = { 4, 5, 6, 8 };
4261 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
4262 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
4263 	uint32_t hash;
4264 
4265 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4266 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4267 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
4268 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
4269 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
4270 		    (((uint16_t)enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
4271 		return (hash & 0x3ff);
4272 	}
4273 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
4274 	    (((uint16_t)enaddr[5]) << hi_shift[sc->sc_mchash_type]);
4275 
4276 	return (hash & 0xfff);
4277 }
4278 
4279 /*
4280  *
4281  *
4282  */
4283 static int
4284 wm_rar_count(struct wm_softc *sc)
4285 {
4286 	int size;
4287 
4288 	switch (sc->sc_type) {
4289 	case WM_T_ICH8:
4290 		size = WM_RAL_TABSIZE_ICH8 -1;
4291 		break;
4292 	case WM_T_ICH9:
4293 	case WM_T_ICH10:
4294 	case WM_T_PCH:
4295 		size = WM_RAL_TABSIZE_ICH8;
4296 		break;
4297 	case WM_T_PCH2:
4298 		size = WM_RAL_TABSIZE_PCH2;
4299 		break;
4300 	case WM_T_PCH_LPT:
4301 	case WM_T_PCH_SPT:
4302 	case WM_T_PCH_CNP:
4303 		size = WM_RAL_TABSIZE_PCH_LPT;
4304 		break;
4305 	case WM_T_82575:
4306 	case WM_T_I210:
4307 	case WM_T_I211:
4308 		size = WM_RAL_TABSIZE_82575;
4309 		break;
4310 	case WM_T_82576:
4311 	case WM_T_82580:
4312 		size = WM_RAL_TABSIZE_82576;
4313 		break;
4314 	case WM_T_I350:
4315 	case WM_T_I354:
4316 		size = WM_RAL_TABSIZE_I350;
4317 		break;
4318 	default:
4319 		size = WM_RAL_TABSIZE;
4320 	}
4321 
4322 	return size;
4323 }
4324 
4325 /*
4326  * wm_set_filter:
4327  *
4328  *	Set up the receive filter.
4329  */
4330 static void
4331 wm_set_filter(struct wm_softc *sc)
4332 {
4333 	struct ethercom *ec = &sc->sc_ethercom;
4334 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4335 	struct ether_multi *enm;
4336 	struct ether_multistep step;
4337 	bus_addr_t mta_reg;
4338 	uint32_t hash, reg, bit;
4339 	int i, size, ralmax, rv;
4340 
4341 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4342 		device_xname(sc->sc_dev), __func__));
4343 	KASSERT(mutex_owned(sc->sc_core_lock));
4344 
4345 	if (sc->sc_type >= WM_T_82544)
4346 		mta_reg = WMREG_CORDOVA_MTA;
4347 	else
4348 		mta_reg = WMREG_MTA;
4349 
4350 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
4351 
4352 	if (sc->sc_if_flags & IFF_BROADCAST)
4353 		sc->sc_rctl |= RCTL_BAM;
4354 	if (sc->sc_if_flags & IFF_PROMISC) {
4355 		sc->sc_rctl |= RCTL_UPE;
4356 		ETHER_LOCK(ec);
4357 		ec->ec_flags |= ETHER_F_ALLMULTI;
4358 		ETHER_UNLOCK(ec);
4359 		goto allmulti;
4360 	}
4361 
4362 	/*
4363 	 * Set the station address in the first RAL slot, and
4364 	 * clear the remaining slots.
4365 	 */
4366 	size = wm_rar_count(sc);
4367 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
4368 
4369 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
4370 	    || (sc->sc_type == WM_T_PCH_CNP)) {
4371 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
4372 		switch (i) {
4373 		case 0:
4374 			/* We can use all entries */
4375 			ralmax = size;
4376 			break;
4377 		case 1:
4378 			/* Only RAR[0] */
4379 			ralmax = 1;
4380 			break;
4381 		default:
4382 			/* Available SHRA + RAR[0] */
4383 			ralmax = i + 1;
4384 		}
4385 	} else
4386 		ralmax = size;
4387 	for (i = 1; i < size; i++) {
4388 		if (i < ralmax)
4389 			wm_set_ral(sc, NULL, i);
4390 	}
4391 
4392 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4393 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4394 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
4395 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
4396 		size = WM_ICH8_MC_TABSIZE;
4397 	else
4398 		size = WM_MC_TABSIZE;
4399 	/* Clear out the multicast table. */
4400 	for (i = 0; i < size; i++) {
4401 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
4402 		CSR_WRITE_FLUSH(sc);
4403 	}
4404 
4405 	ETHER_LOCK(ec);
4406 	ETHER_FIRST_MULTI(step, ec, enm);
4407 	while (enm != NULL) {
4408 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
4409 			ec->ec_flags |= ETHER_F_ALLMULTI;
4410 			ETHER_UNLOCK(ec);
4411 			/*
4412 			 * We must listen to a range of multicast addresses.
4413 			 * For now, just accept all multicasts, rather than
4414 			 * trying to set only those filter bits needed to match
4415 			 * the range.  (At this time, the only use of address
4416 			 * ranges is for IP multicast routing, for which the
4417 			 * range is big enough to require all bits set.)
4418 			 */
4419 			goto allmulti;
4420 		}
4421 
4422 		hash = wm_mchash(sc, enm->enm_addrlo);
4423 
4424 		reg = (hash >> 5);
4425 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4426 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4427 		    || (sc->sc_type == WM_T_PCH2)
4428 		    || (sc->sc_type == WM_T_PCH_LPT)
4429 		    || (sc->sc_type == WM_T_PCH_SPT)
4430 		    || (sc->sc_type == WM_T_PCH_CNP))
4431 			reg &= 0x1f;
4432 		else
4433 			reg &= 0x7f;
4434 		bit = hash & 0x1f;
4435 
4436 		hash = CSR_READ(sc, mta_reg + (reg << 2));
4437 		hash |= 1U << bit;
4438 
4439 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
4440 			/*
4441 			 * 82544 Errata 9: Certain register cannot be written
4442 			 * with particular alignments in PCI-X bus operation
4443 			 * (FCAH, MTA and VFTA).
4444 			 */
4445 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
4446 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
4447 			CSR_WRITE_FLUSH(sc);
4448 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
4449 			CSR_WRITE_FLUSH(sc);
4450 		} else {
4451 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
4452 			CSR_WRITE_FLUSH(sc);
4453 		}
4454 
4455 		ETHER_NEXT_MULTI(step, enm);
4456 	}
4457 	ec->ec_flags &= ~ETHER_F_ALLMULTI;
4458 	ETHER_UNLOCK(ec);
4459 
4460 	goto setit;
4461 
4462 allmulti:
4463 	sc->sc_rctl |= RCTL_MPE;
4464 
4465 setit:
4466 	if (sc->sc_type >= WM_T_PCH2) {
4467 		if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
4468 		    && (ifp->if_mtu > ETHERMTU))
4469 			rv = wm_lv_jumbo_workaround_ich8lan(sc, true);
4470 		else
4471 			rv = wm_lv_jumbo_workaround_ich8lan(sc, false);
4472 		if (rv != 0)
4473 			device_printf(sc->sc_dev,
4474 			    "Failed to do workaround for jumbo frame.\n");
4475 	}
4476 
4477 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
4478 }
4479 
4480 /* Reset and init related */
4481 
4482 static void
4483 wm_set_vlan(struct wm_softc *sc)
4484 {
4485 
4486 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4487 		device_xname(sc->sc_dev), __func__));
4488 
4489 	/* Deal with VLAN enables. */
4490 	if (VLAN_ATTACHED(&sc->sc_ethercom))
4491 		sc->sc_ctrl |= CTRL_VME;
4492 	else
4493 		sc->sc_ctrl &= ~CTRL_VME;
4494 
4495 	/* Write the control registers. */
4496 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4497 }
4498 
4499 static void
4500 wm_set_pcie_completion_timeout(struct wm_softc *sc)
4501 {
4502 	uint32_t gcr;
4503 	pcireg_t ctrl2;
4504 
4505 	gcr = CSR_READ(sc, WMREG_GCR);
4506 
4507 	/* Only take action if timeout value is defaulted to 0 */
4508 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
4509 		goto out;
4510 
4511 	if ((gcr & GCR_CAP_VER2) == 0) {
4512 		gcr |= GCR_CMPL_TMOUT_10MS;
4513 		goto out;
4514 	}
4515 
4516 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
4517 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
4518 	ctrl2 |= WM_PCIE_DCSR2_16MS;
4519 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
4520 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
4521 
4522 out:
4523 	/* Disable completion timeout resend */
4524 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
4525 
4526 	CSR_WRITE(sc, WMREG_GCR, gcr);
4527 }
4528 
4529 void
4530 wm_get_auto_rd_done(struct wm_softc *sc)
4531 {
4532 	int i;
4533 
4534 	/* wait for eeprom to reload */
4535 	switch (sc->sc_type) {
4536 	case WM_T_82571:
4537 	case WM_T_82572:
4538 	case WM_T_82573:
4539 	case WM_T_82574:
4540 	case WM_T_82583:
4541 	case WM_T_82575:
4542 	case WM_T_82576:
4543 	case WM_T_82580:
4544 	case WM_T_I350:
4545 	case WM_T_I354:
4546 	case WM_T_I210:
4547 	case WM_T_I211:
4548 	case WM_T_80003:
4549 	case WM_T_ICH8:
4550 	case WM_T_ICH9:
4551 		for (i = 0; i < 10; i++) {
4552 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
4553 				break;
4554 			delay(1000);
4555 		}
4556 		if (i == 10) {
4557 			log(LOG_ERR, "%s: auto read from eeprom failed to "
4558 			    "complete\n", device_xname(sc->sc_dev));
4559 		}
4560 		break;
4561 	default:
4562 		break;
4563 	}
4564 }
4565 
4566 void
4567 wm_lan_init_done(struct wm_softc *sc)
4568 {
4569 	uint32_t reg = 0;
4570 	int i;
4571 
4572 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4573 		device_xname(sc->sc_dev), __func__));
4574 
4575 	/* Wait for eeprom to reload */
4576 	switch (sc->sc_type) {
4577 	case WM_T_ICH10:
4578 	case WM_T_PCH:
4579 	case WM_T_PCH2:
4580 	case WM_T_PCH_LPT:
4581 	case WM_T_PCH_SPT:
4582 	case WM_T_PCH_CNP:
4583 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
4584 			reg = CSR_READ(sc, WMREG_STATUS);
4585 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
4586 				break;
4587 			delay(100);
4588 		}
4589 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
4590 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
4591 			    "complete\n", device_xname(sc->sc_dev), __func__);
4592 		}
4593 		break;
4594 	default:
4595 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4596 		    __func__);
4597 		break;
4598 	}
4599 
4600 	reg &= ~STATUS_LAN_INIT_DONE;
4601 	CSR_WRITE(sc, WMREG_STATUS, reg);
4602 }
4603 
4604 void
4605 wm_get_cfg_done(struct wm_softc *sc)
4606 {
4607 	int mask;
4608 	uint32_t reg;
4609 	int i;
4610 
4611 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4612 		device_xname(sc->sc_dev), __func__));
4613 
4614 	/* Wait for eeprom to reload */
4615 	switch (sc->sc_type) {
4616 	case WM_T_82542_2_0:
4617 	case WM_T_82542_2_1:
4618 		/* null */
4619 		break;
4620 	case WM_T_82543:
4621 	case WM_T_82544:
4622 	case WM_T_82540:
4623 	case WM_T_82545:
4624 	case WM_T_82545_3:
4625 	case WM_T_82546:
4626 	case WM_T_82546_3:
4627 	case WM_T_82541:
4628 	case WM_T_82541_2:
4629 	case WM_T_82547:
4630 	case WM_T_82547_2:
4631 	case WM_T_82573:
4632 	case WM_T_82574:
4633 	case WM_T_82583:
4634 		/* generic */
4635 		delay(10*1000);
4636 		break;
4637 	case WM_T_80003:
4638 	case WM_T_82571:
4639 	case WM_T_82572:
4640 	case WM_T_82575:
4641 	case WM_T_82576:
4642 	case WM_T_82580:
4643 	case WM_T_I350:
4644 	case WM_T_I354:
4645 	case WM_T_I210:
4646 	case WM_T_I211:
4647 		if (sc->sc_type == WM_T_82571) {
4648 			/* Only 82571 shares port 0 */
4649 			mask = EEMNGCTL_CFGDONE_0;
4650 		} else
4651 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
4652 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
4653 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
4654 				break;
4655 			delay(1000);
4656 		}
4657 		if (i >= WM_PHY_CFG_TIMEOUT)
4658 			DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s failed\n",
4659 				device_xname(sc->sc_dev), __func__));
4660 		break;
4661 	case WM_T_ICH8:
4662 	case WM_T_ICH9:
4663 	case WM_T_ICH10:
4664 	case WM_T_PCH:
4665 	case WM_T_PCH2:
4666 	case WM_T_PCH_LPT:
4667 	case WM_T_PCH_SPT:
4668 	case WM_T_PCH_CNP:
4669 		delay(10*1000);
4670 		if (sc->sc_type >= WM_T_ICH10)
4671 			wm_lan_init_done(sc);
4672 		else
4673 			wm_get_auto_rd_done(sc);
4674 
4675 		/* Clear PHY Reset Asserted bit */
4676 		reg = CSR_READ(sc, WMREG_STATUS);
4677 		if ((reg & STATUS_PHYRA) != 0)
4678 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
4679 		break;
4680 	default:
4681 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4682 		    __func__);
4683 		break;
4684 	}
4685 }
4686 
4687 int
4688 wm_phy_post_reset(struct wm_softc *sc)
4689 {
4690 	device_t dev = sc->sc_dev;
4691 	uint16_t reg;
4692 	int rv = 0;
4693 
4694 	/* This function is only for ICH8 and newer. */
4695 	if (sc->sc_type < WM_T_ICH8)
4696 		return 0;
4697 
4698 	if (wm_phy_resetisblocked(sc)) {
4699 		/* XXX */
4700 		device_printf(dev, "PHY is blocked\n");
4701 		return -1;
4702 	}
4703 
4704 	/* Allow time for h/w to get to quiescent state after reset */
4705 	delay(10*1000);
4706 
4707 	/* Perform any necessary post-reset workarounds */
4708 	if (sc->sc_type == WM_T_PCH)
4709 		rv = wm_hv_phy_workarounds_ich8lan(sc);
4710 	else if (sc->sc_type == WM_T_PCH2)
4711 		rv = wm_lv_phy_workarounds_ich8lan(sc);
4712 	if (rv != 0)
4713 		return rv;
4714 
4715 	/* Clear the host wakeup bit after lcd reset */
4716 	if (sc->sc_type >= WM_T_PCH) {
4717 		wm_gmii_hv_readreg(dev, 2, BM_PORT_GEN_CFG, &reg);
4718 		reg &= ~BM_WUC_HOST_WU_BIT;
4719 		wm_gmii_hv_writereg(dev, 2, BM_PORT_GEN_CFG, reg);
4720 	}
4721 
4722 	/* Configure the LCD with the extended configuration region in NVM */
4723 	if ((rv = wm_init_lcd_from_nvm(sc)) != 0)
4724 		return rv;
4725 
4726 	/* Configure the LCD with the OEM bits in NVM */
4727 	rv = wm_oem_bits_config_ich8lan(sc, true);
4728 
4729 	if (sc->sc_type == WM_T_PCH2) {
4730 		/* Ungate automatic PHY configuration on non-managed 82579 */
4731 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
4732 			delay(10 * 1000);
4733 			wm_gate_hw_phy_config_ich8lan(sc, false);
4734 		}
4735 		/* Set EEE LPI Update Timer to 200usec */
4736 		rv = sc->phy.acquire(sc);
4737 		if (rv)
4738 			return rv;
4739 		rv = wm_write_emi_reg_locked(dev,
4740 		    I82579_LPI_UPDATE_TIMER, 0x1387);
4741 		sc->phy.release(sc);
4742 	}
4743 
4744 	return rv;
4745 }
4746 
4747 /* Only for PCH and newer */
4748 static int
4749 wm_write_smbus_addr(struct wm_softc *sc)
4750 {
4751 	uint32_t strap, freq;
4752 	uint16_t phy_data;
4753 	int rv;
4754 
4755 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4756 		device_xname(sc->sc_dev), __func__));
4757 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
4758 
4759 	strap = CSR_READ(sc, WMREG_STRAP);
4760 	freq = __SHIFTOUT(strap, STRAP_FREQ);
4761 
4762 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data);
4763 	if (rv != 0)
4764 		return rv;
4765 
4766 	phy_data &= ~HV_SMB_ADDR_ADDR;
4767 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
4768 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
4769 
4770 	if (sc->sc_phytype == WMPHY_I217) {
4771 		/* Restore SMBus frequency */
4772 		if (freq --) {
4773 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
4774 			    | HV_SMB_ADDR_FREQ_HIGH);
4775 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
4776 			    HV_SMB_ADDR_FREQ_LOW);
4777 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
4778 			    HV_SMB_ADDR_FREQ_HIGH);
4779 		} else
4780 			DPRINTF(sc, WM_DEBUG_INIT,
4781 			    ("%s: %s Unsupported SMB frequency in PHY\n",
4782 				device_xname(sc->sc_dev), __func__));
4783 	}
4784 
4785 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR,
4786 	    phy_data);
4787 }
4788 
4789 static int
4790 wm_init_lcd_from_nvm(struct wm_softc *sc)
4791 {
4792 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
4793 	uint16_t phy_page = 0;
4794 	int rv = 0;
4795 
4796 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4797 		device_xname(sc->sc_dev), __func__));
4798 
4799 	switch (sc->sc_type) {
4800 	case WM_T_ICH8:
4801 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
4802 		    || (sc->sc_phytype != WMPHY_IGP_3))
4803 			return 0;
4804 
4805 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
4806 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
4807 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
4808 			break;
4809 		}
4810 		/* FALLTHROUGH */
4811 	case WM_T_PCH:
4812 	case WM_T_PCH2:
4813 	case WM_T_PCH_LPT:
4814 	case WM_T_PCH_SPT:
4815 	case WM_T_PCH_CNP:
4816 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
4817 		break;
4818 	default:
4819 		return 0;
4820 	}
4821 
4822 	if ((rv = sc->phy.acquire(sc)) != 0)
4823 		return rv;
4824 
4825 	reg = CSR_READ(sc, WMREG_FEXTNVM);
4826 	if ((reg & sw_cfg_mask) == 0)
4827 		goto release;
4828 
4829 	/*
4830 	 * Make sure HW does not configure LCD from PHY extended configuration
4831 	 * before SW configuration
4832 	 */
4833 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
4834 	if ((sc->sc_type < WM_T_PCH2)
4835 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
4836 		goto release;
4837 
4838 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
4839 		device_xname(sc->sc_dev), __func__));
4840 	/* word_addr is in DWORD */
4841 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
4842 
4843 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
4844 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
4845 	if (cnf_size == 0)
4846 		goto release;
4847 
4848 	if (((sc->sc_type == WM_T_PCH)
4849 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
4850 	    || (sc->sc_type > WM_T_PCH)) {
4851 		/*
4852 		 * HW configures the SMBus address and LEDs when the OEM and
4853 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
4854 		 * are cleared, SW will configure them instead.
4855 		 */
4856 		DPRINTF(sc, WM_DEBUG_INIT,
4857 		    ("%s: %s: Configure SMBus and LED\n",
4858 			device_xname(sc->sc_dev), __func__));
4859 		if ((rv = wm_write_smbus_addr(sc)) != 0)
4860 			goto release;
4861 
4862 		reg = CSR_READ(sc, WMREG_LEDCTL);
4863 		rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG,
4864 		    (uint16_t)reg);
4865 		if (rv != 0)
4866 			goto release;
4867 	}
4868 
4869 	/* Configure LCD from extended configuration region. */
4870 	for (i = 0; i < cnf_size; i++) {
4871 		uint16_t reg_data, reg_addr;
4872 
4873 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
4874 			goto release;
4875 
4876 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
4877 			goto release;
4878 
4879 		if (reg_addr == IGPHY_PAGE_SELECT)
4880 			phy_page = reg_data;
4881 
4882 		reg_addr &= IGPHY_MAXREGADDR;
4883 		reg_addr |= phy_page;
4884 
4885 		KASSERT(sc->phy.writereg_locked != NULL);
4886 		rv = sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr,
4887 		    reg_data);
4888 	}
4889 
4890 release:
4891 	sc->phy.release(sc);
4892 	return rv;
4893 }
4894 
4895 /*
4896  *  wm_oem_bits_config_ich8lan - SW-based LCD Configuration
4897  *  @sc:       pointer to the HW structure
4898  *  @d0_state: boolean if entering d0 or d3 device state
4899  *
4900  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
4901  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
4902  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
4903  */
4904 int
4905 wm_oem_bits_config_ich8lan(struct wm_softc *sc, bool d0_state)
4906 {
4907 	uint32_t mac_reg;
4908 	uint16_t oem_reg;
4909 	int rv;
4910 
4911 	if (sc->sc_type < WM_T_PCH)
4912 		return 0;
4913 
4914 	rv = sc->phy.acquire(sc);
4915 	if (rv != 0)
4916 		return rv;
4917 
4918 	if (sc->sc_type == WM_T_PCH) {
4919 		mac_reg = CSR_READ(sc, WMREG_EXTCNFCTR);
4920 		if ((mac_reg & EXTCNFCTR_OEM_WRITE_ENABLE) != 0)
4921 			goto release;
4922 	}
4923 
4924 	mac_reg = CSR_READ(sc, WMREG_FEXTNVM);
4925 	if ((mac_reg & FEXTNVM_SW_CONFIG_ICH8M) == 0)
4926 		goto release;
4927 
4928 	mac_reg = CSR_READ(sc, WMREG_PHY_CTRL);
4929 
4930 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_OEM_BITS, &oem_reg);
4931 	if (rv != 0)
4932 		goto release;
4933 	oem_reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
4934 
4935 	if (d0_state) {
4936 		if ((mac_reg & PHY_CTRL_GBE_DIS) != 0)
4937 			oem_reg |= HV_OEM_BITS_A1KDIS;
4938 		if ((mac_reg & PHY_CTRL_D0A_LPLU) != 0)
4939 			oem_reg |= HV_OEM_BITS_LPLU;
4940 	} else {
4941 		if ((mac_reg & (PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS))
4942 		    != 0)
4943 			oem_reg |= HV_OEM_BITS_A1KDIS;
4944 		if ((mac_reg & (PHY_CTRL_D0A_LPLU | PHY_CTRL_NOND0A_LPLU))
4945 		    != 0)
4946 			oem_reg |= HV_OEM_BITS_LPLU;
4947 	}
4948 
4949 	/* Set Restart auto-neg to activate the bits */
4950 	if ((d0_state || (sc->sc_type != WM_T_PCH))
4951 	    && (wm_phy_resetisblocked(sc) == false))
4952 		oem_reg |= HV_OEM_BITS_ANEGNOW;
4953 
4954 	rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_OEM_BITS, oem_reg);
4955 
4956 release:
4957 	sc->phy.release(sc);
4958 
4959 	return rv;
4960 }
4961 
4962 /* Init hardware bits */
4963 void
4964 wm_initialize_hardware_bits(struct wm_softc *sc)
4965 {
4966 	uint32_t tarc0, tarc1, reg;
4967 
4968 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4969 		device_xname(sc->sc_dev), __func__));
4970 
4971 	/* For 82571 variant, 80003 and ICHs */
4972 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
4973 	    || (sc->sc_type >= WM_T_80003)) {
4974 
4975 		/* Transmit Descriptor Control 0 */
4976 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
4977 		reg |= TXDCTL_COUNT_DESC;
4978 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
4979 
4980 		/* Transmit Descriptor Control 1 */
4981 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
4982 		reg |= TXDCTL_COUNT_DESC;
4983 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
4984 
4985 		/* TARC0 */
4986 		tarc0 = CSR_READ(sc, WMREG_TARC0);
4987 		switch (sc->sc_type) {
4988 		case WM_T_82571:
4989 		case WM_T_82572:
4990 		case WM_T_82573:
4991 		case WM_T_82574:
4992 		case WM_T_82583:
4993 		case WM_T_80003:
4994 			/* Clear bits 30..27 */
4995 			tarc0 &= ~__BITS(30, 27);
4996 			break;
4997 		default:
4998 			break;
4999 		}
5000 
5001 		switch (sc->sc_type) {
5002 		case WM_T_82571:
5003 		case WM_T_82572:
5004 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
5005 
5006 			tarc1 = CSR_READ(sc, WMREG_TARC1);
5007 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
5008 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
5009 			/* 8257[12] Errata No.7 */
5010 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
5011 
5012 			/* TARC1 bit 28 */
5013 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
5014 				tarc1 &= ~__BIT(28);
5015 			else
5016 				tarc1 |= __BIT(28);
5017 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
5018 
5019 			/*
5020 			 * 8257[12] Errata No.13
5021 			 * Disable Dyamic Clock Gating.
5022 			 */
5023 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
5024 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
5025 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5026 			break;
5027 		case WM_T_82573:
5028 		case WM_T_82574:
5029 		case WM_T_82583:
5030 			if ((sc->sc_type == WM_T_82574)
5031 			    || (sc->sc_type == WM_T_82583))
5032 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
5033 
5034 			/* Extended Device Control */
5035 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
5036 			reg &= ~__BIT(23);	/* Clear bit 23 */
5037 			reg |= __BIT(22);	/* Set bit 22 */
5038 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5039 
5040 			/* Device Control */
5041 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
5042 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5043 
5044 			/* PCIe Control Register */
5045 			/*
5046 			 * 82573 Errata (unknown).
5047 			 *
5048 			 * 82574 Errata 25 and 82583 Errata 12
5049 			 * "Dropped Rx Packets":
5050 			 *   NVM Image Version 2.1.4 and newer has no this bug.
5051 			 */
5052 			reg = CSR_READ(sc, WMREG_GCR);
5053 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
5054 			CSR_WRITE(sc, WMREG_GCR, reg);
5055 
5056 			if ((sc->sc_type == WM_T_82574)
5057 			    || (sc->sc_type == WM_T_82583)) {
5058 				/*
5059 				 * Document says this bit must be set for
5060 				 * proper operation.
5061 				 */
5062 				reg = CSR_READ(sc, WMREG_GCR);
5063 				reg |= __BIT(22);
5064 				CSR_WRITE(sc, WMREG_GCR, reg);
5065 
5066 				/*
5067 				 * Apply workaround for hardware errata
5068 				 * documented in errata docs Fixes issue where
5069 				 * some error prone or unreliable PCIe
5070 				 * completions are occurring, particularly
5071 				 * with ASPM enabled. Without fix, issue can
5072 				 * cause Tx timeouts.
5073 				 */
5074 				reg = CSR_READ(sc, WMREG_GCR2);
5075 				reg |= __BIT(0);
5076 				CSR_WRITE(sc, WMREG_GCR2, reg);
5077 			}
5078 			break;
5079 		case WM_T_80003:
5080 			/* TARC0 */
5081 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
5082 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
5083 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
5084 
5085 			/* TARC1 bit 28 */
5086 			tarc1 = CSR_READ(sc, WMREG_TARC1);
5087 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
5088 				tarc1 &= ~__BIT(28);
5089 			else
5090 				tarc1 |= __BIT(28);
5091 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
5092 			break;
5093 		case WM_T_ICH8:
5094 		case WM_T_ICH9:
5095 		case WM_T_ICH10:
5096 		case WM_T_PCH:
5097 		case WM_T_PCH2:
5098 		case WM_T_PCH_LPT:
5099 		case WM_T_PCH_SPT:
5100 		case WM_T_PCH_CNP:
5101 			/* TARC0 */
5102 			if (sc->sc_type == WM_T_ICH8) {
5103 				/* Set TARC0 bits 29 and 28 */
5104 				tarc0 |= __BITS(29, 28);
5105 			} else if (sc->sc_type == WM_T_PCH_SPT) {
5106 				tarc0 |= __BIT(29);
5107 				/*
5108 				 *  Drop bit 28. From Linux.
5109 				 * See I218/I219 spec update
5110 				 * "5. Buffer Overrun While the I219 is
5111 				 * Processing DMA Transactions"
5112 				 */
5113 				tarc0 &= ~__BIT(28);
5114 			}
5115 			/* Set TARC0 bits 23,24,26,27 */
5116 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
5117 
5118 			/* CTRL_EXT */
5119 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
5120 			reg |= __BIT(22);	/* Set bit 22 */
5121 			/*
5122 			 * Enable PHY low-power state when MAC is at D3
5123 			 * w/o WoL
5124 			 */
5125 			if (sc->sc_type >= WM_T_PCH)
5126 				reg |= CTRL_EXT_PHYPDEN;
5127 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5128 
5129 			/* TARC1 */
5130 			tarc1 = CSR_READ(sc, WMREG_TARC1);
5131 			/* bit 28 */
5132 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
5133 				tarc1 &= ~__BIT(28);
5134 			else
5135 				tarc1 |= __BIT(28);
5136 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
5137 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
5138 
5139 			/* Device Status */
5140 			if (sc->sc_type == WM_T_ICH8) {
5141 				reg = CSR_READ(sc, WMREG_STATUS);
5142 				reg &= ~__BIT(31);
5143 				CSR_WRITE(sc, WMREG_STATUS, reg);
5144 
5145 			}
5146 
5147 			/* IOSFPC */
5148 			if (sc->sc_type == WM_T_PCH_SPT) {
5149 				reg = CSR_READ(sc, WMREG_IOSFPC);
5150 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
5151 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
5152 			}
5153 			/*
5154 			 * Work-around descriptor data corruption issue during
5155 			 * NFS v2 UDP traffic, just disable the NFS filtering
5156 			 * capability.
5157 			 */
5158 			reg = CSR_READ(sc, WMREG_RFCTL);
5159 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
5160 			CSR_WRITE(sc, WMREG_RFCTL, reg);
5161 			break;
5162 		default:
5163 			break;
5164 		}
5165 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
5166 
5167 		switch (sc->sc_type) {
5168 		case WM_T_82571:
5169 		case WM_T_82572:
5170 		case WM_T_82573:
5171 		case WM_T_80003:
5172 		case WM_T_ICH8:
5173 			/*
5174 			 * 8257[12] Errata No.52, 82573 Errata No.43 and some
5175 			 * others to avoid RSS Hash Value bug.
5176 			 */
5177 			reg = CSR_READ(sc, WMREG_RFCTL);
5178 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
5179 			CSR_WRITE(sc, WMREG_RFCTL, reg);
5180 			break;
5181 		case WM_T_82574:
5182 			/* Use extened Rx descriptor. */
5183 			reg = CSR_READ(sc, WMREG_RFCTL);
5184 			reg |= WMREG_RFCTL_EXSTEN;
5185 			CSR_WRITE(sc, WMREG_RFCTL, reg);
5186 			break;
5187 		default:
5188 			break;
5189 		}
5190 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
5191 		/*
5192 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
5193 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
5194 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
5195 		 * Correctly by the Device"
5196 		 *
5197 		 * I354(C2000) Errata AVR53:
5198 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
5199 		 * Hang"
5200 		 */
5201 		reg = CSR_READ(sc, WMREG_RFCTL);
5202 		reg |= WMREG_RFCTL_IPV6EXDIS;
5203 		CSR_WRITE(sc, WMREG_RFCTL, reg);
5204 	}
5205 }
5206 
5207 static uint32_t
5208 wm_rxpbs_adjust_82580(uint32_t val)
5209 {
5210 	uint32_t rv = 0;
5211 
5212 	if (val < __arraycount(wm_82580_rxpbs_table))
5213 		rv = wm_82580_rxpbs_table[val];
5214 
5215 	return rv;
5216 }
5217 
5218 /*
5219  * wm_reset_phy:
5220  *
5221  *	generic PHY reset function.
5222  *	Same as e1000_phy_hw_reset_generic()
5223  */
5224 static int
5225 wm_reset_phy(struct wm_softc *sc)
5226 {
5227 	uint32_t reg;
5228 	int rv;
5229 
5230 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
5231 		device_xname(sc->sc_dev), __func__));
5232 	if (wm_phy_resetisblocked(sc))
5233 		return -1;
5234 
5235 	rv = sc->phy.acquire(sc);
5236 	if (rv) {
5237 		device_printf(sc->sc_dev, "%s: failed to acquire phy: %d\n",
5238 		    __func__, rv);
5239 		return rv;
5240 	}
5241 
5242 	reg = CSR_READ(sc, WMREG_CTRL);
5243 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
5244 	CSR_WRITE_FLUSH(sc);
5245 
5246 	delay(sc->phy.reset_delay_us);
5247 
5248 	CSR_WRITE(sc, WMREG_CTRL, reg);
5249 	CSR_WRITE_FLUSH(sc);
5250 
5251 	delay(150);
5252 
5253 	sc->phy.release(sc);
5254 
5255 	wm_get_cfg_done(sc);
5256 	wm_phy_post_reset(sc);
5257 
5258 	return 0;
5259 }
5260 
5261 /*
5262  * wm_flush_desc_rings - remove all descriptors from the descriptor rings.
5263  *
5264  * In i219, the descriptor rings must be emptied before resetting the HW
5265  * or before changing the device state to D3 during runtime (runtime PM).
5266  *
5267  * Failure to do this will cause the HW to enter a unit hang state which can
5268  * only be released by PCI reset on the device.
5269  *
5270  * I219 does not use multiqueue, so it is enough to check sc->sc_queue[0] only.
5271  */
5272 static void
5273 wm_flush_desc_rings(struct wm_softc *sc)
5274 {
5275 	pcireg_t preg;
5276 	uint32_t reg;
5277 	struct wm_txqueue *txq;
5278 	wiseman_txdesc_t *txd;
5279 	int nexttx;
5280 	uint32_t rctl;
5281 
5282 	KASSERT(IFNET_LOCKED(&sc->sc_ethercom.ec_if));
5283 
5284 	/* First, disable MULR fix in FEXTNVM11 */
5285 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
5286 	reg |= FEXTNVM11_DIS_MULRFIX;
5287 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
5288 
5289 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
5290 	reg = CSR_READ(sc, WMREG_TDLEN(0));
5291 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
5292 		return;
5293 
5294 	/*
5295 	 * Remove all descriptors from the tx_ring.
5296 	 *
5297 	 * We want to clear all pending descriptors from the TX ring. Zeroing
5298 	 * happens when the HW reads the regs. We assign the ring itself as
5299 	 * the data of the next descriptor. We don't care about the data we are
5300 	 * about to reset the HW.
5301 	 */
5302 #ifdef WM_DEBUG
5303 	device_printf(sc->sc_dev, "Need TX flush (reg = %08x)\n", preg);
5304 #endif
5305 	reg = CSR_READ(sc, WMREG_TCTL);
5306 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
5307 
5308 	txq = &sc->sc_queue[0].wmq_txq;
5309 	nexttx = txq->txq_next;
5310 	txd = &txq->txq_descs[nexttx];
5311 	wm_set_dma_addr(&txd->wtx_addr, txq->txq_desc_dma);
5312 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
5313 	txd->wtx_fields.wtxu_status = 0;
5314 	txd->wtx_fields.wtxu_options = 0;
5315 	txd->wtx_fields.wtxu_vlan = 0;
5316 
5317 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
5318 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
5319 
5320 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
5321 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
5322 	CSR_WRITE_FLUSH(sc);
5323 	delay(250);
5324 
5325 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
5326 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
5327 		return;
5328 
5329 	/*
5330 	 * Mark all descriptors in the RX ring as consumed and disable the
5331 	 * rx ring.
5332 	 */
5333 #ifdef WM_DEBUG
5334 	device_printf(sc->sc_dev, "Need RX flush (reg = %08x)\n", preg);
5335 #endif
5336 	rctl = CSR_READ(sc, WMREG_RCTL);
5337 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
5338 	CSR_WRITE_FLUSH(sc);
5339 	delay(150);
5340 
5341 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
5342 	/* Zero the lower 14 bits (prefetch and host thresholds) */
5343 	reg &= 0xffffc000;
5344 	/*
5345 	 * Update thresholds: prefetch threshold to 31, host threshold
5346 	 * to 1 and make sure the granularity is "descriptors" and not
5347 	 * "cache lines"
5348 	 */
5349 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
5350 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
5351 
5352 	/* Momentarily enable the RX ring for the changes to take effect */
5353 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
5354 	CSR_WRITE_FLUSH(sc);
5355 	delay(150);
5356 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
5357 }
5358 
5359 /*
5360  * wm_reset:
5361  *
5362  *	Reset the i82542 chip.
5363  */
5364 static void
5365 wm_reset(struct wm_softc *sc)
5366 {
5367 	int phy_reset = 0;
5368 	int i, error = 0;
5369 	uint32_t reg;
5370 	uint16_t kmreg;
5371 	int rv;
5372 
5373 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
5374 		device_xname(sc->sc_dev), __func__));
5375 	KASSERT(sc->sc_type != 0);
5376 
5377 	/*
5378 	 * Allocate on-chip memory according to the MTU size.
5379 	 * The Packet Buffer Allocation register must be written
5380 	 * before the chip is reset.
5381 	 */
5382 	switch (sc->sc_type) {
5383 	case WM_T_82547:
5384 	case WM_T_82547_2:
5385 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
5386 		    PBA_22K : PBA_30K;
5387 		for (i = 0; i < sc->sc_nqueues; i++) {
5388 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
5389 			txq->txq_fifo_head = 0;
5390 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
5391 			txq->txq_fifo_size =
5392 			    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
5393 			txq->txq_fifo_stall = 0;
5394 		}
5395 		break;
5396 	case WM_T_82571:
5397 	case WM_T_82572:
5398 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
5399 	case WM_T_80003:
5400 		sc->sc_pba = PBA_32K;
5401 		break;
5402 	case WM_T_82573:
5403 		sc->sc_pba = PBA_12K;
5404 		break;
5405 	case WM_T_82574:
5406 	case WM_T_82583:
5407 		sc->sc_pba = PBA_20K;
5408 		break;
5409 	case WM_T_82576:
5410 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
5411 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
5412 		break;
5413 	case WM_T_82580:
5414 	case WM_T_I350:
5415 	case WM_T_I354:
5416 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
5417 		break;
5418 	case WM_T_I210:
5419 	case WM_T_I211:
5420 		sc->sc_pba = PBA_34K;
5421 		break;
5422 	case WM_T_ICH8:
5423 		/* Workaround for a bit corruption issue in FIFO memory */
5424 		sc->sc_pba = PBA_8K;
5425 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
5426 		break;
5427 	case WM_T_ICH9:
5428 	case WM_T_ICH10:
5429 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
5430 		    PBA_14K : PBA_10K;
5431 		break;
5432 	case WM_T_PCH:
5433 	case WM_T_PCH2:	/* XXX 14K? */
5434 	case WM_T_PCH_LPT:
5435 	case WM_T_PCH_SPT:
5436 	case WM_T_PCH_CNP:
5437 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 1500 ?
5438 		    PBA_12K : PBA_26K;
5439 		break;
5440 	default:
5441 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
5442 		    PBA_40K : PBA_48K;
5443 		break;
5444 	}
5445 	/*
5446 	 * Only old or non-multiqueue devices have the PBA register
5447 	 * XXX Need special handling for 82575.
5448 	 */
5449 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
5450 	    || (sc->sc_type == WM_T_82575))
5451 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
5452 
5453 	/* Prevent the PCI-E bus from sticking */
5454 	if (sc->sc_flags & WM_F_PCIE) {
5455 		int timeout = 800;
5456 
5457 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
5458 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5459 
5460 		while (timeout--) {
5461 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
5462 			    == 0)
5463 				break;
5464 			delay(100);
5465 		}
5466 		if (timeout == 0)
5467 			device_printf(sc->sc_dev,
5468 			    "failed to disable bus mastering\n");
5469 	}
5470 
5471 	/* Set the completion timeout for interface */
5472 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
5473 	    || (sc->sc_type == WM_T_82580)
5474 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
5475 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
5476 		wm_set_pcie_completion_timeout(sc);
5477 
5478 	/* Clear interrupt */
5479 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
5480 	if (wm_is_using_msix(sc)) {
5481 		if (sc->sc_type != WM_T_82574) {
5482 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
5483 			CSR_WRITE(sc, WMREG_EIAC, 0);
5484 		} else
5485 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
5486 	}
5487 
5488 	/* Stop the transmit and receive processes. */
5489 	CSR_WRITE(sc, WMREG_RCTL, 0);
5490 	sc->sc_rctl &= ~RCTL_EN;
5491 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
5492 	CSR_WRITE_FLUSH(sc);
5493 
5494 	/* XXX set_tbi_sbp_82543() */
5495 
5496 	delay(10*1000);
5497 
5498 	/* Must acquire the MDIO ownership before MAC reset */
5499 	switch (sc->sc_type) {
5500 	case WM_T_82573:
5501 	case WM_T_82574:
5502 	case WM_T_82583:
5503 		error = wm_get_hw_semaphore_82573(sc);
5504 		break;
5505 	default:
5506 		break;
5507 	}
5508 
5509 	/*
5510 	 * 82541 Errata 29? & 82547 Errata 28?
5511 	 * See also the description about PHY_RST bit in CTRL register
5512 	 * in 8254x_GBe_SDM.pdf.
5513 	 */
5514 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
5515 		CSR_WRITE(sc, WMREG_CTRL,
5516 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
5517 		CSR_WRITE_FLUSH(sc);
5518 		delay(5000);
5519 	}
5520 
5521 	switch (sc->sc_type) {
5522 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
5523 	case WM_T_82541:
5524 	case WM_T_82541_2:
5525 	case WM_T_82547:
5526 	case WM_T_82547_2:
5527 		/*
5528 		 * On some chipsets, a reset through a memory-mapped write
5529 		 * cycle can cause the chip to reset before completing the
5530 		 * write cycle. This causes major headache that can be avoided
5531 		 * by issuing the reset via indirect register writes through
5532 		 * I/O space.
5533 		 *
5534 		 * So, if we successfully mapped the I/O BAR at attach time,
5535 		 * use that. Otherwise, try our luck with a memory-mapped
5536 		 * reset.
5537 		 */
5538 		if (sc->sc_flags & WM_F_IOH_VALID)
5539 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
5540 		else
5541 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
5542 		break;
5543 	case WM_T_82545_3:
5544 	case WM_T_82546_3:
5545 		/* Use the shadow control register on these chips. */
5546 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
5547 		break;
5548 	case WM_T_80003:
5549 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
5550 		if (sc->phy.acquire(sc) != 0)
5551 			break;
5552 		CSR_WRITE(sc, WMREG_CTRL, reg);
5553 		sc->phy.release(sc);
5554 		break;
5555 	case WM_T_ICH8:
5556 	case WM_T_ICH9:
5557 	case WM_T_ICH10:
5558 	case WM_T_PCH:
5559 	case WM_T_PCH2:
5560 	case WM_T_PCH_LPT:
5561 	case WM_T_PCH_SPT:
5562 	case WM_T_PCH_CNP:
5563 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
5564 		if (wm_phy_resetisblocked(sc) == false) {
5565 			/*
5566 			 * Gate automatic PHY configuration by hardware on
5567 			 * non-managed 82579
5568 			 */
5569 			if ((sc->sc_type == WM_T_PCH2)
5570 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
5571 				== 0))
5572 				wm_gate_hw_phy_config_ich8lan(sc, true);
5573 
5574 			reg |= CTRL_PHY_RESET;
5575 			phy_reset = 1;
5576 		} else
5577 			device_printf(sc->sc_dev, "XXX reset is blocked!!!\n");
5578 		if (sc->phy.acquire(sc) != 0)
5579 			break;
5580 		CSR_WRITE(sc, WMREG_CTRL, reg);
5581 		/* Don't insert a completion barrier when reset */
5582 		delay(20*1000);
5583 		/*
5584 		 * The EXTCNFCTR_MDIO_SW_OWNERSHIP bit is cleared by the reset,
5585 		 * so don't use sc->phy.release(sc). Release sc_ich_phymtx
5586 		 * only. See also wm_get_swflag_ich8lan().
5587 		 */
5588 		mutex_exit(sc->sc_ich_phymtx);
5589 		break;
5590 	case WM_T_82580:
5591 	case WM_T_I350:
5592 	case WM_T_I354:
5593 	case WM_T_I210:
5594 	case WM_T_I211:
5595 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
5596 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
5597 			CSR_WRITE_FLUSH(sc);
5598 		delay(5000);
5599 		break;
5600 	case WM_T_82542_2_0:
5601 	case WM_T_82542_2_1:
5602 	case WM_T_82543:
5603 	case WM_T_82540:
5604 	case WM_T_82545:
5605 	case WM_T_82546:
5606 	case WM_T_82571:
5607 	case WM_T_82572:
5608 	case WM_T_82573:
5609 	case WM_T_82574:
5610 	case WM_T_82575:
5611 	case WM_T_82576:
5612 	case WM_T_82583:
5613 	default:
5614 		/* Everything else can safely use the documented method. */
5615 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
5616 		break;
5617 	}
5618 
5619 	/* Must release the MDIO ownership after MAC reset */
5620 	switch (sc->sc_type) {
5621 	case WM_T_82573:
5622 	case WM_T_82574:
5623 	case WM_T_82583:
5624 		if (error == 0)
5625 			wm_put_hw_semaphore_82573(sc);
5626 		break;
5627 	default:
5628 		break;
5629 	}
5630 
5631 	/* Set Phy Config Counter to 50msec */
5632 	if (sc->sc_type == WM_T_PCH2) {
5633 		reg = CSR_READ(sc, WMREG_FEXTNVM3);
5634 		reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
5635 		reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
5636 		CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
5637 	}
5638 
5639 	if (phy_reset != 0)
5640 		wm_get_cfg_done(sc);
5641 
5642 	/* Reload EEPROM */
5643 	switch (sc->sc_type) {
5644 	case WM_T_82542_2_0:
5645 	case WM_T_82542_2_1:
5646 	case WM_T_82543:
5647 	case WM_T_82544:
5648 		delay(10);
5649 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
5650 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5651 		CSR_WRITE_FLUSH(sc);
5652 		delay(2000);
5653 		break;
5654 	case WM_T_82540:
5655 	case WM_T_82545:
5656 	case WM_T_82545_3:
5657 	case WM_T_82546:
5658 	case WM_T_82546_3:
5659 		delay(5*1000);
5660 		/* XXX Disable HW ARPs on ASF enabled adapters */
5661 		break;
5662 	case WM_T_82541:
5663 	case WM_T_82541_2:
5664 	case WM_T_82547:
5665 	case WM_T_82547_2:
5666 		delay(20000);
5667 		/* XXX Disable HW ARPs on ASF enabled adapters */
5668 		break;
5669 	case WM_T_82571:
5670 	case WM_T_82572:
5671 	case WM_T_82573:
5672 	case WM_T_82574:
5673 	case WM_T_82583:
5674 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
5675 			delay(10);
5676 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
5677 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5678 			CSR_WRITE_FLUSH(sc);
5679 		}
5680 		/* check EECD_EE_AUTORD */
5681 		wm_get_auto_rd_done(sc);
5682 		/*
5683 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
5684 		 * is set.
5685 		 */
5686 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
5687 		    || (sc->sc_type == WM_T_82583))
5688 			delay(25*1000);
5689 		break;
5690 	case WM_T_82575:
5691 	case WM_T_82576:
5692 	case WM_T_82580:
5693 	case WM_T_I350:
5694 	case WM_T_I354:
5695 	case WM_T_I210:
5696 	case WM_T_I211:
5697 	case WM_T_80003:
5698 		/* check EECD_EE_AUTORD */
5699 		wm_get_auto_rd_done(sc);
5700 		break;
5701 	case WM_T_ICH8:
5702 	case WM_T_ICH9:
5703 	case WM_T_ICH10:
5704 	case WM_T_PCH:
5705 	case WM_T_PCH2:
5706 	case WM_T_PCH_LPT:
5707 	case WM_T_PCH_SPT:
5708 	case WM_T_PCH_CNP:
5709 		break;
5710 	default:
5711 		panic("%s: unknown type\n", __func__);
5712 	}
5713 
5714 	/* Check whether EEPROM is present or not */
5715 	switch (sc->sc_type) {
5716 	case WM_T_82575:
5717 	case WM_T_82576:
5718 	case WM_T_82580:
5719 	case WM_T_I350:
5720 	case WM_T_I354:
5721 	case WM_T_ICH8:
5722 	case WM_T_ICH9:
5723 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
5724 			/* Not found */
5725 			sc->sc_flags |= WM_F_EEPROM_INVALID;
5726 			if (sc->sc_type == WM_T_82575)
5727 				wm_reset_init_script_82575(sc);
5728 		}
5729 		break;
5730 	default:
5731 		break;
5732 	}
5733 
5734 	if (phy_reset != 0)
5735 		wm_phy_post_reset(sc);
5736 
5737 	if ((sc->sc_type == WM_T_82580)
5738 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
5739 		/* Clear global device reset status bit */
5740 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
5741 	}
5742 
5743 	/* Clear any pending interrupt events. */
5744 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
5745 	reg = CSR_READ(sc, WMREG_ICR);
5746 	if (wm_is_using_msix(sc)) {
5747 		if (sc->sc_type != WM_T_82574) {
5748 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
5749 			CSR_WRITE(sc, WMREG_EIAC, 0);
5750 		} else
5751 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
5752 	}
5753 
5754 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5755 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5756 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
5757 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
5758 		reg = CSR_READ(sc, WMREG_KABGTXD);
5759 		reg |= KABGTXD_BGSQLBIAS;
5760 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
5761 	}
5762 
5763 	/* Reload sc_ctrl */
5764 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
5765 
5766 	wm_set_eee(sc);
5767 
5768 	/*
5769 	 * For PCH, this write will make sure that any noise will be detected
5770 	 * as a CRC error and be dropped rather than show up as a bad packet
5771 	 * to the DMA engine
5772 	 */
5773 	if (sc->sc_type == WM_T_PCH)
5774 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
5775 
5776 	if (sc->sc_type >= WM_T_82544)
5777 		CSR_WRITE(sc, WMREG_WUC, 0);
5778 
5779 	if (sc->sc_type < WM_T_82575)
5780 		wm_disable_aspm(sc); /* Workaround for some chips */
5781 
5782 	wm_reset_mdicnfg_82580(sc);
5783 
5784 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
5785 		wm_pll_workaround_i210(sc);
5786 
5787 	if (sc->sc_type == WM_T_80003) {
5788 		/* Default to TRUE to enable the MDIC W/A */
5789 		sc->sc_flags |= WM_F_80003_MDIC_WA;
5790 
5791 		rv = wm_kmrn_readreg(sc,
5792 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
5793 		if (rv == 0) {
5794 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
5795 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
5796 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
5797 			else
5798 				sc->sc_flags |= WM_F_80003_MDIC_WA;
5799 		}
5800 	}
5801 }
5802 
5803 /*
5804  * wm_add_rxbuf:
5805  *
5806  *	Add a receive buffer to the indiciated descriptor.
5807  */
5808 static int
5809 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
5810 {
5811 	struct wm_softc *sc = rxq->rxq_sc;
5812 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
5813 	struct mbuf *m;
5814 	int error;
5815 
5816 	KASSERT(mutex_owned(rxq->rxq_lock));
5817 
5818 	MGETHDR(m, M_DONTWAIT, MT_DATA);
5819 	if (m == NULL)
5820 		return ENOBUFS;
5821 
5822 	MCLGET(m, M_DONTWAIT);
5823 	if ((m->m_flags & M_EXT) == 0) {
5824 		m_freem(m);
5825 		return ENOBUFS;
5826 	}
5827 
5828 	if (rxs->rxs_mbuf != NULL)
5829 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
5830 
5831 	rxs->rxs_mbuf = m;
5832 
5833 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
5834 	/*
5835 	 * Cannot use bus_dmamap_load_mbuf() here because m_data may be
5836 	 * sc_align_tweak'd between bus_dmamap_load() and bus_dmamap_sync().
5837 	 */
5838 	error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, m->m_ext.ext_buf,
5839 	    m->m_ext.ext_size, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT);
5840 	if (error) {
5841 		/* XXX XXX XXX */
5842 		aprint_error_dev(sc->sc_dev,
5843 		    "unable to load rx DMA map %d, error = %d\n", idx, error);
5844 		panic("wm_add_rxbuf");
5845 	}
5846 
5847 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
5848 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
5849 
5850 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5851 		if ((sc->sc_rctl & RCTL_EN) != 0)
5852 			wm_init_rxdesc(rxq, idx);
5853 	} else
5854 		wm_init_rxdesc(rxq, idx);
5855 
5856 	return 0;
5857 }
5858 
5859 /*
5860  * wm_rxdrain:
5861  *
5862  *	Drain the receive queue.
5863  */
5864 static void
5865 wm_rxdrain(struct wm_rxqueue *rxq)
5866 {
5867 	struct wm_softc *sc = rxq->rxq_sc;
5868 	struct wm_rxsoft *rxs;
5869 	int i;
5870 
5871 	KASSERT(mutex_owned(rxq->rxq_lock));
5872 
5873 	for (i = 0; i < WM_NRXDESC; i++) {
5874 		rxs = &rxq->rxq_soft[i];
5875 		if (rxs->rxs_mbuf != NULL) {
5876 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
5877 			m_freem(rxs->rxs_mbuf);
5878 			rxs->rxs_mbuf = NULL;
5879 		}
5880 	}
5881 }
5882 
5883 /*
5884  * Setup registers for RSS.
5885  *
5886  * XXX not yet VMDq support
5887  */
5888 static void
5889 wm_init_rss(struct wm_softc *sc)
5890 {
5891 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
5892 	int i;
5893 
5894 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
5895 
5896 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
5897 		unsigned int qid, reta_ent;
5898 
5899 		qid  = i % sc->sc_nqueues;
5900 		switch (sc->sc_type) {
5901 		case WM_T_82574:
5902 			reta_ent = __SHIFTIN(qid,
5903 			    RETA_ENT_QINDEX_MASK_82574);
5904 			break;
5905 		case WM_T_82575:
5906 			reta_ent = __SHIFTIN(qid,
5907 			    RETA_ENT_QINDEX1_MASK_82575);
5908 			break;
5909 		default:
5910 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
5911 			break;
5912 		}
5913 
5914 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
5915 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
5916 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
5917 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
5918 	}
5919 
5920 	rss_getkey((uint8_t *)rss_key);
5921 	for (i = 0; i < RSSRK_NUM_REGS; i++)
5922 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
5923 
5924 	if (sc->sc_type == WM_T_82574)
5925 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
5926 	else
5927 		mrqc = MRQC_ENABLE_RSS_MQ;
5928 
5929 	/*
5930 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
5931 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
5932 	 */
5933 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
5934 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
5935 #if 0
5936 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
5937 	mrqc |= MRQC_RSS_FIELD_IPV6_UDP_EX;
5938 #endif
5939 	mrqc |= MRQC_RSS_FIELD_IPV6_TCP_EX;
5940 
5941 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
5942 }
5943 
5944 /*
5945  * Adjust TX and RX queue numbers which the system actulally uses.
5946  *
5947  * The numbers are affected by below parameters.
5948  *     - The nubmer of hardware queues
5949  *     - The number of MSI-X vectors (= "nvectors" argument)
5950  *     - ncpu
5951  */
5952 static void
5953 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
5954 {
5955 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
5956 
5957 	if (nvectors < 2) {
5958 		sc->sc_nqueues = 1;
5959 		return;
5960 	}
5961 
5962 	switch (sc->sc_type) {
5963 	case WM_T_82572:
5964 		hw_ntxqueues = 2;
5965 		hw_nrxqueues = 2;
5966 		break;
5967 	case WM_T_82574:
5968 		hw_ntxqueues = 2;
5969 		hw_nrxqueues = 2;
5970 		break;
5971 	case WM_T_82575:
5972 		hw_ntxqueues = 4;
5973 		hw_nrxqueues = 4;
5974 		break;
5975 	case WM_T_82576:
5976 		hw_ntxqueues = 16;
5977 		hw_nrxqueues = 16;
5978 		break;
5979 	case WM_T_82580:
5980 	case WM_T_I350:
5981 	case WM_T_I354:
5982 		hw_ntxqueues = 8;
5983 		hw_nrxqueues = 8;
5984 		break;
5985 	case WM_T_I210:
5986 		hw_ntxqueues = 4;
5987 		hw_nrxqueues = 4;
5988 		break;
5989 	case WM_T_I211:
5990 		hw_ntxqueues = 2;
5991 		hw_nrxqueues = 2;
5992 		break;
5993 		/*
5994 		 * The below Ethernet controllers do not support MSI-X;
5995 		 * this driver doesn't let them use multiqueue.
5996 		 *     - WM_T_80003
5997 		 *     - WM_T_ICH8
5998 		 *     - WM_T_ICH9
5999 		 *     - WM_T_ICH10
6000 		 *     - WM_T_PCH
6001 		 *     - WM_T_PCH2
6002 		 *     - WM_T_PCH_LPT
6003 		 */
6004 	default:
6005 		hw_ntxqueues = 1;
6006 		hw_nrxqueues = 1;
6007 		break;
6008 	}
6009 
6010 	hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
6011 
6012 	/*
6013 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
6014 	 * the number of queues used actually.
6015 	 */
6016 	if (nvectors < hw_nqueues + 1)
6017 		sc->sc_nqueues = nvectors - 1;
6018 	else
6019 		sc->sc_nqueues = hw_nqueues;
6020 
6021 	/*
6022 	 * As queues more than CPUs cannot improve scaling, we limit
6023 	 * the number of queues used actually.
6024 	 */
6025 	if (ncpu < sc->sc_nqueues)
6026 		sc->sc_nqueues = ncpu;
6027 }
6028 
6029 static inline bool
6030 wm_is_using_msix(struct wm_softc *sc)
6031 {
6032 
6033 	return (sc->sc_nintrs > 1);
6034 }
6035 
6036 static inline bool
6037 wm_is_using_multiqueue(struct wm_softc *sc)
6038 {
6039 
6040 	return (sc->sc_nqueues > 1);
6041 }
6042 
6043 static int
6044 wm_softint_establish_queue(struct wm_softc *sc, int qidx, int intr_idx)
6045 {
6046 	struct wm_queue *wmq = &sc->sc_queue[qidx];
6047 
6048 	wmq->wmq_id = qidx;
6049 	wmq->wmq_intr_idx = intr_idx;
6050 	wmq->wmq_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
6051 	    wm_handle_queue, wmq);
6052 	if (wmq->wmq_si != NULL)
6053 		return 0;
6054 
6055 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
6056 	    wmq->wmq_id);
6057 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
6058 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
6059 	return ENOMEM;
6060 }
6061 
6062 /*
6063  * Both single interrupt MSI and INTx can use this function.
6064  */
6065 static int
6066 wm_setup_legacy(struct wm_softc *sc)
6067 {
6068 	pci_chipset_tag_t pc = sc->sc_pc;
6069 	const char *intrstr = NULL;
6070 	char intrbuf[PCI_INTRSTR_LEN];
6071 	int error;
6072 
6073 	error = wm_alloc_txrx_queues(sc);
6074 	if (error) {
6075 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
6076 		    error);
6077 		return ENOMEM;
6078 	}
6079 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
6080 	    sizeof(intrbuf));
6081 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
6082 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
6083 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
6084 	if (sc->sc_ihs[0] == NULL) {
6085 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
6086 		    (pci_intr_type(pc, sc->sc_intrs[0])
6087 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
6088 		return ENOMEM;
6089 	}
6090 
6091 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
6092 	sc->sc_nintrs = 1;
6093 
6094 	return wm_softint_establish_queue(sc, 0, 0);
6095 }
6096 
6097 static int
6098 wm_setup_msix(struct wm_softc *sc)
6099 {
6100 	void *vih;
6101 	kcpuset_t *affinity;
6102 	int qidx, error, intr_idx, txrx_established;
6103 	pci_chipset_tag_t pc = sc->sc_pc;
6104 	const char *intrstr = NULL;
6105 	char intrbuf[PCI_INTRSTR_LEN];
6106 	char intr_xname[INTRDEVNAMEBUF];
6107 
6108 	if (sc->sc_nqueues < ncpu) {
6109 		/*
6110 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
6111 		 * interrupts start from CPU#1.
6112 		 */
6113 		sc->sc_affinity_offset = 1;
6114 	} else {
6115 		/*
6116 		 * In this case, this device use all CPUs. So, we unify
6117 		 * affinitied cpu_index to msix vector number for readability.
6118 		 */
6119 		sc->sc_affinity_offset = 0;
6120 	}
6121 
6122 	error = wm_alloc_txrx_queues(sc);
6123 	if (error) {
6124 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
6125 		    error);
6126 		return ENOMEM;
6127 	}
6128 
6129 	kcpuset_create(&affinity, false);
6130 	intr_idx = 0;
6131 
6132 	/*
6133 	 * TX and RX
6134 	 */
6135 	txrx_established = 0;
6136 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
6137 		struct wm_queue *wmq = &sc->sc_queue[qidx];
6138 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
6139 
6140 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
6141 		    sizeof(intrbuf));
6142 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
6143 		    PCI_INTR_MPSAFE, true);
6144 		memset(intr_xname, 0, sizeof(intr_xname));
6145 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
6146 		    device_xname(sc->sc_dev), qidx);
6147 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
6148 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
6149 		if (vih == NULL) {
6150 			aprint_error_dev(sc->sc_dev,
6151 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
6152 			    intrstr ? " at " : "",
6153 			    intrstr ? intrstr : "");
6154 
6155 			goto fail;
6156 		}
6157 		kcpuset_zero(affinity);
6158 		/* Round-robin affinity */
6159 		kcpuset_set(affinity, affinity_to);
6160 		error = interrupt_distribute(vih, affinity, NULL);
6161 		if (error == 0) {
6162 			aprint_normal_dev(sc->sc_dev,
6163 			    "for TX and RX interrupting at %s affinity to %u\n",
6164 			    intrstr, affinity_to);
6165 		} else {
6166 			aprint_normal_dev(sc->sc_dev,
6167 			    "for TX and RX interrupting at %s\n", intrstr);
6168 		}
6169 		sc->sc_ihs[intr_idx] = vih;
6170 		if (wm_softint_establish_queue(sc, qidx, intr_idx) != 0)
6171 			goto fail;
6172 		txrx_established++;
6173 		intr_idx++;
6174 	}
6175 
6176 	/* LINK */
6177 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
6178 	    sizeof(intrbuf));
6179 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
6180 	memset(intr_xname, 0, sizeof(intr_xname));
6181 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
6182 	    device_xname(sc->sc_dev));
6183 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
6184 	    IPL_NET, wm_linkintr_msix, sc, intr_xname);
6185 	if (vih == NULL) {
6186 		aprint_error_dev(sc->sc_dev,
6187 		    "unable to establish MSI-X(for LINK)%s%s\n",
6188 		    intrstr ? " at " : "",
6189 		    intrstr ? intrstr : "");
6190 
6191 		goto fail;
6192 	}
6193 	/* Keep default affinity to LINK interrupt */
6194 	aprint_normal_dev(sc->sc_dev,
6195 	    "for LINK interrupting at %s\n", intrstr);
6196 	sc->sc_ihs[intr_idx] = vih;
6197 	sc->sc_link_intr_idx = intr_idx;
6198 
6199 	sc->sc_nintrs = sc->sc_nqueues + 1;
6200 	kcpuset_destroy(affinity);
6201 	return 0;
6202 
6203 fail:
6204 	for (qidx = 0; qidx < txrx_established; qidx++) {
6205 		struct wm_queue *wmq = &sc->sc_queue[qidx];
6206 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
6207 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
6208 	}
6209 
6210 	kcpuset_destroy(affinity);
6211 	return ENOMEM;
6212 }
6213 
6214 static void
6215 wm_unset_stopping_flags(struct wm_softc *sc)
6216 {
6217 	int i;
6218 
6219 	KASSERT(mutex_owned(sc->sc_core_lock));
6220 
6221 	/* Must unset stopping flags in ascending order. */
6222 	for (i = 0; i < sc->sc_nqueues; i++) {
6223 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
6224 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
6225 
6226 		mutex_enter(txq->txq_lock);
6227 		txq->txq_stopping = false;
6228 		mutex_exit(txq->txq_lock);
6229 
6230 		mutex_enter(rxq->rxq_lock);
6231 		rxq->rxq_stopping = false;
6232 		mutex_exit(rxq->rxq_lock);
6233 	}
6234 
6235 	sc->sc_core_stopping = false;
6236 }
6237 
6238 static void
6239 wm_set_stopping_flags(struct wm_softc *sc)
6240 {
6241 	int i;
6242 
6243 	KASSERT(mutex_owned(sc->sc_core_lock));
6244 
6245 	sc->sc_core_stopping = true;
6246 
6247 	/* Must set stopping flags in ascending order. */
6248 	for (i = 0; i < sc->sc_nqueues; i++) {
6249 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
6250 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
6251 
6252 		mutex_enter(rxq->rxq_lock);
6253 		rxq->rxq_stopping = true;
6254 		mutex_exit(rxq->rxq_lock);
6255 
6256 		mutex_enter(txq->txq_lock);
6257 		txq->txq_stopping = true;
6258 		mutex_exit(txq->txq_lock);
6259 	}
6260 }
6261 
6262 /*
6263  * Write interrupt interval value to ITR or EITR
6264  */
6265 static void
6266 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
6267 {
6268 
6269 	if (!wmq->wmq_set_itr)
6270 		return;
6271 
6272 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
6273 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
6274 
6275 		/*
6276 		 * 82575 doesn't have CNT_INGR field.
6277 		 * So, overwrite counter field by software.
6278 		 */
6279 		if (sc->sc_type == WM_T_82575)
6280 			eitr |= __SHIFTIN(wmq->wmq_itr,
6281 			    EITR_COUNTER_MASK_82575);
6282 		else
6283 			eitr |= EITR_CNT_INGR;
6284 
6285 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
6286 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
6287 		/*
6288 		 * 82574 has both ITR and EITR. SET EITR when we use
6289 		 * the multi queue function with MSI-X.
6290 		 */
6291 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
6292 		    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
6293 	} else {
6294 		KASSERT(wmq->wmq_id == 0);
6295 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
6296 	}
6297 
6298 	wmq->wmq_set_itr = false;
6299 }
6300 
6301 /*
6302  * TODO
6303  * Below dynamic calculation of itr is almost the same as Linux igb,
6304  * however it does not fit to wm(4). So, we will have been disable AIM
6305  * until we will find appropriate calculation of itr.
6306  */
6307 /*
6308  * Calculate interrupt interval value to be going to write register in
6309  * wm_itrs_writereg(). This function does not write ITR/EITR register.
6310  */
6311 static void
6312 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
6313 {
6314 #ifdef NOTYET
6315 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
6316 	struct wm_txqueue *txq = &wmq->wmq_txq;
6317 	uint32_t avg_size = 0;
6318 	uint32_t new_itr;
6319 
6320 	if (rxq->rxq_packets)
6321 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
6322 	if (txq->txq_packets)
6323 		avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
6324 
6325 	if (avg_size == 0) {
6326 		new_itr = 450; /* restore default value */
6327 		goto out;
6328 	}
6329 
6330 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
6331 	avg_size += 24;
6332 
6333 	/* Don't starve jumbo frames */
6334 	avg_size = uimin(avg_size, 3000);
6335 
6336 	/* Give a little boost to mid-size frames */
6337 	if ((avg_size > 300) && (avg_size < 1200))
6338 		new_itr = avg_size / 3;
6339 	else
6340 		new_itr = avg_size / 2;
6341 
6342 out:
6343 	/*
6344 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
6345 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
6346 	 */
6347 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
6348 		new_itr *= 4;
6349 
6350 	if (new_itr != wmq->wmq_itr) {
6351 		wmq->wmq_itr = new_itr;
6352 		wmq->wmq_set_itr = true;
6353 	} else
6354 		wmq->wmq_set_itr = false;
6355 
6356 	rxq->rxq_packets = 0;
6357 	rxq->rxq_bytes = 0;
6358 	txq->txq_packets = 0;
6359 	txq->txq_bytes = 0;
6360 #endif
6361 }
6362 
6363 static void
6364 wm_init_sysctls(struct wm_softc *sc)
6365 {
6366 	struct sysctllog **log;
6367 	const struct sysctlnode *rnode, *qnode, *cnode;
6368 	int i, rv;
6369 	const char *dvname;
6370 
6371 	log = &sc->sc_sysctllog;
6372 	dvname = device_xname(sc->sc_dev);
6373 
6374 	rv = sysctl_createv(log, 0, NULL, &rnode,
6375 	    0, CTLTYPE_NODE, dvname,
6376 	    SYSCTL_DESCR("wm information and settings"),
6377 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
6378 	if (rv != 0)
6379 		goto err;
6380 
6381 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
6382 	    CTLTYPE_BOOL, "txrx_workqueue",
6383 	    SYSCTL_DESCR("Use workqueue for packet processing"),
6384 	    NULL, 0, &sc->sc_txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL);
6385 	if (rv != 0)
6386 		goto teardown;
6387 
6388 	for (i = 0; i < sc->sc_nqueues; i++) {
6389 		struct wm_queue *wmq = &sc->sc_queue[i];
6390 		struct wm_txqueue *txq = &wmq->wmq_txq;
6391 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
6392 
6393 		snprintf(sc->sc_queue[i].sysctlname,
6394 		    sizeof(sc->sc_queue[i].sysctlname), "q%d", i);
6395 
6396 		if (sysctl_createv(log, 0, &rnode, &qnode,
6397 		    0, CTLTYPE_NODE,
6398 		    sc->sc_queue[i].sysctlname, SYSCTL_DESCR("Queue Name"),
6399 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
6400 			break;
6401 
6402 		if (sysctl_createv(log, 0, &qnode, &cnode,
6403 		    CTLFLAG_READONLY, CTLTYPE_INT,
6404 		    "txq_free", SYSCTL_DESCR("TX queue free"),
6405 		    NULL, 0, &txq->txq_free,
6406 		    0, CTL_CREATE, CTL_EOL) != 0)
6407 			break;
6408 		if (sysctl_createv(log, 0, &qnode, &cnode,
6409 		    CTLFLAG_READONLY, CTLTYPE_INT,
6410 		    "txd_head", SYSCTL_DESCR("TX descriptor head"),
6411 		    wm_sysctl_tdh_handler, 0, (void *)txq,
6412 		    0, CTL_CREATE, CTL_EOL) != 0)
6413 			break;
6414 		if (sysctl_createv(log, 0, &qnode, &cnode,
6415 		    CTLFLAG_READONLY, CTLTYPE_INT,
6416 		    "txd_tail", SYSCTL_DESCR("TX descriptor tail"),
6417 		    wm_sysctl_tdt_handler, 0, (void *)txq,
6418 		    0, CTL_CREATE, CTL_EOL) != 0)
6419 			break;
6420 		if (sysctl_createv(log, 0, &qnode, &cnode,
6421 		    CTLFLAG_READONLY, CTLTYPE_INT,
6422 		    "txq_next", SYSCTL_DESCR("TX queue next"),
6423 		    NULL, 0, &txq->txq_next,
6424 		    0, CTL_CREATE, CTL_EOL) != 0)
6425 			break;
6426 		if (sysctl_createv(log, 0, &qnode, &cnode,
6427 		    CTLFLAG_READONLY, CTLTYPE_INT,
6428 		    "txq_sfree", SYSCTL_DESCR("TX queue sfree"),
6429 		    NULL, 0, &txq->txq_sfree,
6430 		    0, CTL_CREATE, CTL_EOL) != 0)
6431 			break;
6432 		if (sysctl_createv(log, 0, &qnode, &cnode,
6433 		    CTLFLAG_READONLY, CTLTYPE_INT,
6434 		    "txq_snext", SYSCTL_DESCR("TX queue snext"),
6435 		    NULL, 0, &txq->txq_snext,
6436 		    0, CTL_CREATE, CTL_EOL) != 0)
6437 			break;
6438 		if (sysctl_createv(log, 0, &qnode, &cnode,
6439 		    CTLFLAG_READONLY, CTLTYPE_INT,
6440 		    "txq_sdirty", SYSCTL_DESCR("TX queue sdirty"),
6441 		    NULL, 0, &txq->txq_sdirty,
6442 		    0, CTL_CREATE, CTL_EOL) != 0)
6443 			break;
6444 		if (sysctl_createv(log, 0, &qnode, &cnode,
6445 		    CTLFLAG_READONLY, CTLTYPE_INT,
6446 		    "txq_flags", SYSCTL_DESCR("TX queue flags"),
6447 		    NULL, 0, &txq->txq_flags,
6448 		    0, CTL_CREATE, CTL_EOL) != 0)
6449 			break;
6450 		if (sysctl_createv(log, 0, &qnode, &cnode,
6451 		    CTLFLAG_READONLY, CTLTYPE_BOOL,
6452 		    "txq_stopping", SYSCTL_DESCR("TX queue stopping"),
6453 		    NULL, 0, &txq->txq_stopping,
6454 		    0, CTL_CREATE, CTL_EOL) != 0)
6455 			break;
6456 		if (sysctl_createv(log, 0, &qnode, &cnode,
6457 		    CTLFLAG_READONLY, CTLTYPE_BOOL,
6458 		    "txq_sending", SYSCTL_DESCR("TX queue sending"),
6459 		    NULL, 0, &txq->txq_sending,
6460 		    0, CTL_CREATE, CTL_EOL) != 0)
6461 			break;
6462 
6463 		if (sysctl_createv(log, 0, &qnode, &cnode,
6464 		    CTLFLAG_READONLY, CTLTYPE_INT,
6465 		    "rxq_ptr", SYSCTL_DESCR("RX queue pointer"),
6466 		    NULL, 0, &rxq->rxq_ptr,
6467 		    0, CTL_CREATE, CTL_EOL) != 0)
6468 			break;
6469 	}
6470 
6471 #ifdef WM_DEBUG
6472 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
6473 	    CTLTYPE_INT, "debug_flags",
6474 	    SYSCTL_DESCR(
6475 		    "Debug flags:\n"	\
6476 		    "\t0x01 LINK\n"	\
6477 		    "\t0x02 TX\n"	\
6478 		    "\t0x04 RX\n"	\
6479 		    "\t0x08 GMII\n"	\
6480 		    "\t0x10 MANAGE\n"	\
6481 		    "\t0x20 NVM\n"	\
6482 		    "\t0x40 INIT\n"	\
6483 		    "\t0x80 LOCK"),
6484 	    wm_sysctl_debug, 0, (void *)sc, 0, CTL_CREATE, CTL_EOL);
6485 	if (rv != 0)
6486 		goto teardown;
6487 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
6488 	    CTLTYPE_BOOL, "trigger_reset",
6489 	    SYSCTL_DESCR("Trigger an interface reset"),
6490 	    NULL, 0, &sc->sc_trigger_reset, 0, CTL_CREATE, CTL_EOL);
6491 	if (rv != 0)
6492 		goto teardown;
6493 #endif
6494 
6495 	return;
6496 
6497 teardown:
6498 	sysctl_teardown(log);
6499 err:
6500 	sc->sc_sysctllog = NULL;
6501 	device_printf(sc->sc_dev, "%s: sysctl_createv failed, rv = %d\n",
6502 	    __func__, rv);
6503 }
6504 
6505 /*
6506  * wm_init:		[ifnet interface function]
6507  *
6508  *	Initialize the interface.
6509  */
6510 static int
6511 wm_init(struct ifnet *ifp)
6512 {
6513 	struct wm_softc *sc = ifp->if_softc;
6514 	int ret;
6515 
6516 	KASSERT(IFNET_LOCKED(ifp));
6517 
6518 	if (sc->sc_dying)
6519 		return ENXIO;
6520 
6521 	mutex_enter(sc->sc_core_lock);
6522 	ret = wm_init_locked(ifp);
6523 	mutex_exit(sc->sc_core_lock);
6524 
6525 	return ret;
6526 }
6527 
6528 static int
6529 wm_init_locked(struct ifnet *ifp)
6530 {
6531 	struct wm_softc *sc = ifp->if_softc;
6532 	struct ethercom *ec = &sc->sc_ethercom;
6533 	int i, j, trynum, error = 0;
6534 	uint32_t reg, sfp_mask = 0;
6535 
6536 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
6537 		device_xname(sc->sc_dev), __func__));
6538 	KASSERT(IFNET_LOCKED(ifp));
6539 	KASSERT(mutex_owned(sc->sc_core_lock));
6540 
6541 	/*
6542 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
6543 	 * There is a small but measurable benefit to avoiding the adjusment
6544 	 * of the descriptor so that the headers are aligned, for normal mtu,
6545 	 * on such platforms.  One possibility is that the DMA itself is
6546 	 * slightly more efficient if the front of the entire packet (instead
6547 	 * of the front of the headers) is aligned.
6548 	 *
6549 	 * Note we must always set align_tweak to 0 if we are using
6550 	 * jumbo frames.
6551 	 */
6552 #ifdef __NO_STRICT_ALIGNMENT
6553 	sc->sc_align_tweak = 0;
6554 #else
6555 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
6556 		sc->sc_align_tweak = 0;
6557 	else
6558 		sc->sc_align_tweak = 2;
6559 #endif /* __NO_STRICT_ALIGNMENT */
6560 
6561 	/* Cancel any pending I/O. */
6562 	wm_stop_locked(ifp, false, false);
6563 
6564 	/* Update statistics before reset */
6565 	if_statadd2(ifp, if_collisions, CSR_READ(sc, WMREG_COLC),
6566 	    if_ierrors, CSR_READ(sc, WMREG_RXERRC));
6567 
6568 	/* >= PCH_SPT hardware workaround before reset. */
6569 	if (sc->sc_type >= WM_T_PCH_SPT)
6570 		wm_flush_desc_rings(sc);
6571 
6572 	/* Reset the chip to a known state. */
6573 	wm_reset(sc);
6574 
6575 	/*
6576 	 * AMT based hardware can now take control from firmware
6577 	 * Do this after reset.
6578 	 */
6579 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
6580 		wm_get_hw_control(sc);
6581 
6582 	if ((sc->sc_type >= WM_T_PCH_SPT) &&
6583 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
6584 		wm_legacy_irq_quirk_spt(sc);
6585 
6586 	/* Init hardware bits */
6587 	wm_initialize_hardware_bits(sc);
6588 
6589 	/* Reset the PHY. */
6590 	if (sc->sc_flags & WM_F_HAS_MII)
6591 		wm_gmii_reset(sc);
6592 
6593 	if (sc->sc_type >= WM_T_ICH8) {
6594 		reg = CSR_READ(sc, WMREG_GCR);
6595 		/*
6596 		 * ICH8 No-snoop bits are opposite polarity. Set to snoop by
6597 		 * default after reset.
6598 		 */
6599 		if (sc->sc_type == WM_T_ICH8)
6600 			reg |= GCR_NO_SNOOP_ALL;
6601 		else
6602 			reg &= ~GCR_NO_SNOOP_ALL;
6603 		CSR_WRITE(sc, WMREG_GCR, reg);
6604 	}
6605 
6606 	if ((sc->sc_type >= WM_T_ICH8)
6607 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER)
6608 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) {
6609 
6610 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
6611 		reg |= CTRL_EXT_RO_DIS;
6612 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
6613 	}
6614 
6615 	/* Calculate (E)ITR value */
6616 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
6617 		/*
6618 		 * For NEWQUEUE's EITR (except for 82575).
6619 		 * 82575's EITR should be set same throttling value as other
6620 		 * old controllers' ITR because the interrupt/sec calculation
6621 		 * is the same, that is, 1,000,000,000 / (N * 256).
6622 		 *
6623 		 * 82574's EITR should be set same throttling value as ITR.
6624 		 *
6625 		 * For N interrupts/sec, set this value to:
6626 		 * 1,000,000 / N in contrast to ITR throttling value.
6627 		 */
6628 		sc->sc_itr_init = 450;
6629 	} else if (sc->sc_type >= WM_T_82543) {
6630 		/*
6631 		 * Set up the interrupt throttling register (units of 256ns)
6632 		 * Note that a footnote in Intel's documentation says this
6633 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
6634 		 * or 10Mbit mode.  Empirically, it appears to be the case
6635 		 * that that is also true for the 1024ns units of the other
6636 		 * interrupt-related timer registers -- so, really, we ought
6637 		 * to divide this value by 4 when the link speed is low.
6638 		 *
6639 		 * XXX implement this division at link speed change!
6640 		 */
6641 
6642 		/*
6643 		 * For N interrupts/sec, set this value to:
6644 		 * 1,000,000,000 / (N * 256).  Note that we set the
6645 		 * absolute and packet timer values to this value
6646 		 * divided by 4 to get "simple timer" behavior.
6647 		 */
6648 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
6649 	}
6650 
6651 	error = wm_init_txrx_queues(sc);
6652 	if (error)
6653 		goto out;
6654 
6655 	if (((sc->sc_flags & WM_F_SGMII) == 0) &&
6656 	    (sc->sc_mediatype == WM_MEDIATYPE_SERDES) &&
6657 	    (sc->sc_type >= WM_T_82575))
6658 		wm_serdes_power_up_link_82575(sc);
6659 
6660 	/* Clear out the VLAN table -- we don't use it (yet). */
6661 	CSR_WRITE(sc, WMREG_VET, 0);
6662 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
6663 		trynum = 10; /* Due to hw errata */
6664 	else
6665 		trynum = 1;
6666 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
6667 		for (j = 0; j < trynum; j++)
6668 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
6669 
6670 	/*
6671 	 * Set up flow-control parameters.
6672 	 *
6673 	 * XXX Values could probably stand some tuning.
6674 	 */
6675 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
6676 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
6677 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
6678 	    && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
6679 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
6680 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
6681 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
6682 	}
6683 
6684 	sc->sc_fcrtl = FCRTL_DFLT;
6685 	if (sc->sc_type < WM_T_82543) {
6686 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
6687 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
6688 	} else {
6689 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
6690 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
6691 	}
6692 
6693 	if (sc->sc_type == WM_T_80003)
6694 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
6695 	else
6696 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
6697 
6698 	/* Writes the control register. */
6699 	wm_set_vlan(sc);
6700 
6701 	if (sc->sc_flags & WM_F_HAS_MII) {
6702 		uint16_t kmreg;
6703 
6704 		switch (sc->sc_type) {
6705 		case WM_T_80003:
6706 		case WM_T_ICH8:
6707 		case WM_T_ICH9:
6708 		case WM_T_ICH10:
6709 		case WM_T_PCH:
6710 		case WM_T_PCH2:
6711 		case WM_T_PCH_LPT:
6712 		case WM_T_PCH_SPT:
6713 		case WM_T_PCH_CNP:
6714 			/*
6715 			 * Set the mac to wait the maximum time between each
6716 			 * iteration and increase the max iterations when
6717 			 * polling the phy; this fixes erroneous timeouts at
6718 			 * 10Mbps.
6719 			 */
6720 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
6721 			    0xFFFF);
6722 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
6723 			    &kmreg);
6724 			kmreg |= 0x3F;
6725 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
6726 			    kmreg);
6727 			break;
6728 		default:
6729 			break;
6730 		}
6731 
6732 		if (sc->sc_type == WM_T_80003) {
6733 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
6734 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
6735 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
6736 
6737 			/* Bypass RX and TX FIFOs */
6738 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
6739 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
6740 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
6741 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
6742 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
6743 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
6744 		}
6745 	}
6746 #if 0
6747 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
6748 #endif
6749 
6750 	/* Set up checksum offload parameters. */
6751 	reg = CSR_READ(sc, WMREG_RXCSUM);
6752 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
6753 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
6754 		reg |= RXCSUM_IPOFL;
6755 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
6756 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
6757 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
6758 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
6759 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
6760 
6761 	/* Set registers about MSI-X */
6762 	if (wm_is_using_msix(sc)) {
6763 		uint32_t ivar, qintr_idx;
6764 		struct wm_queue *wmq;
6765 		unsigned int qid;
6766 
6767 		if (sc->sc_type == WM_T_82575) {
6768 			/* Interrupt control */
6769 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
6770 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
6771 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
6772 
6773 			/* TX and RX */
6774 			for (i = 0; i < sc->sc_nqueues; i++) {
6775 				wmq = &sc->sc_queue[i];
6776 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
6777 				    EITR_TX_QUEUE(wmq->wmq_id)
6778 				    | EITR_RX_QUEUE(wmq->wmq_id));
6779 			}
6780 			/* Link status */
6781 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
6782 			    EITR_OTHER);
6783 		} else if (sc->sc_type == WM_T_82574) {
6784 			/* Interrupt control */
6785 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
6786 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
6787 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
6788 
6789 			/*
6790 			 * Work around issue with spurious interrupts
6791 			 * in MSI-X mode.
6792 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
6793 			 * initialized yet. So re-initialize WMREG_RFCTL here.
6794 			 */
6795 			reg = CSR_READ(sc, WMREG_RFCTL);
6796 			reg |= WMREG_RFCTL_ACKDIS;
6797 			CSR_WRITE(sc, WMREG_RFCTL, reg);
6798 
6799 			ivar = 0;
6800 			/* TX and RX */
6801 			for (i = 0; i < sc->sc_nqueues; i++) {
6802 				wmq = &sc->sc_queue[i];
6803 				qid = wmq->wmq_id;
6804 				qintr_idx = wmq->wmq_intr_idx;
6805 
6806 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
6807 				    IVAR_TX_MASK_Q_82574(qid));
6808 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
6809 				    IVAR_RX_MASK_Q_82574(qid));
6810 			}
6811 			/* Link status */
6812 			ivar |= __SHIFTIN((IVAR_VALID_82574
6813 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
6814 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
6815 		} else {
6816 			/* Interrupt control */
6817 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
6818 			    | GPIE_EIAME | GPIE_PBA);
6819 
6820 			switch (sc->sc_type) {
6821 			case WM_T_82580:
6822 			case WM_T_I350:
6823 			case WM_T_I354:
6824 			case WM_T_I210:
6825 			case WM_T_I211:
6826 				/* TX and RX */
6827 				for (i = 0; i < sc->sc_nqueues; i++) {
6828 					wmq = &sc->sc_queue[i];
6829 					qid = wmq->wmq_id;
6830 					qintr_idx = wmq->wmq_intr_idx;
6831 
6832 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
6833 					ivar &= ~IVAR_TX_MASK_Q(qid);
6834 					ivar |= __SHIFTIN((qintr_idx
6835 						| IVAR_VALID),
6836 					    IVAR_TX_MASK_Q(qid));
6837 					ivar &= ~IVAR_RX_MASK_Q(qid);
6838 					ivar |= __SHIFTIN((qintr_idx
6839 						| IVAR_VALID),
6840 					    IVAR_RX_MASK_Q(qid));
6841 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
6842 				}
6843 				break;
6844 			case WM_T_82576:
6845 				/* TX and RX */
6846 				for (i = 0; i < sc->sc_nqueues; i++) {
6847 					wmq = &sc->sc_queue[i];
6848 					qid = wmq->wmq_id;
6849 					qintr_idx = wmq->wmq_intr_idx;
6850 
6851 					ivar = CSR_READ(sc,
6852 					    WMREG_IVAR_Q_82576(qid));
6853 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
6854 					ivar |= __SHIFTIN((qintr_idx
6855 						| IVAR_VALID),
6856 					    IVAR_TX_MASK_Q_82576(qid));
6857 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
6858 					ivar |= __SHIFTIN((qintr_idx
6859 						| IVAR_VALID),
6860 					    IVAR_RX_MASK_Q_82576(qid));
6861 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
6862 					    ivar);
6863 				}
6864 				break;
6865 			default:
6866 				break;
6867 			}
6868 
6869 			/* Link status */
6870 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
6871 			    IVAR_MISC_OTHER);
6872 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
6873 		}
6874 
6875 		if (wm_is_using_multiqueue(sc)) {
6876 			wm_init_rss(sc);
6877 
6878 			/*
6879 			** NOTE: Receive Full-Packet Checksum Offload
6880 			** is mutually exclusive with Multiqueue. However
6881 			** this is not the same as TCP/IP checksums which
6882 			** still work.
6883 			*/
6884 			reg = CSR_READ(sc, WMREG_RXCSUM);
6885 			reg |= RXCSUM_PCSD;
6886 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
6887 		}
6888 	}
6889 
6890 	/* Set up the interrupt registers. */
6891 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
6892 
6893 	/* Enable SFP module insertion interrupt if it's required */
6894 	if ((sc->sc_flags & WM_F_SFP) != 0) {
6895 		sc->sc_ctrl |= CTRL_EXTLINK_EN;
6896 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6897 		sfp_mask = ICR_GPI(0);
6898 	}
6899 
6900 	if (wm_is_using_msix(sc)) {
6901 		uint32_t mask;
6902 		struct wm_queue *wmq;
6903 
6904 		switch (sc->sc_type) {
6905 		case WM_T_82574:
6906 			mask = 0;
6907 			for (i = 0; i < sc->sc_nqueues; i++) {
6908 				wmq = &sc->sc_queue[i];
6909 				mask |= ICR_TXQ(wmq->wmq_id);
6910 				mask |= ICR_RXQ(wmq->wmq_id);
6911 			}
6912 			mask |= ICR_OTHER;
6913 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
6914 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
6915 			break;
6916 		default:
6917 			if (sc->sc_type == WM_T_82575) {
6918 				mask = 0;
6919 				for (i = 0; i < sc->sc_nqueues; i++) {
6920 					wmq = &sc->sc_queue[i];
6921 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
6922 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
6923 				}
6924 				mask |= EITR_OTHER;
6925 			} else {
6926 				mask = 0;
6927 				for (i = 0; i < sc->sc_nqueues; i++) {
6928 					wmq = &sc->sc_queue[i];
6929 					mask |= 1 << wmq->wmq_intr_idx;
6930 				}
6931 				mask |= 1 << sc->sc_link_intr_idx;
6932 			}
6933 			CSR_WRITE(sc, WMREG_EIAC, mask);
6934 			CSR_WRITE(sc, WMREG_EIAM, mask);
6935 			CSR_WRITE(sc, WMREG_EIMS, mask);
6936 
6937 			/* For other interrupts */
6938 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC | sfp_mask);
6939 			break;
6940 		}
6941 	} else {
6942 		sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
6943 		    ICR_RXO | ICR_RXT0 | sfp_mask;
6944 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
6945 	}
6946 
6947 	/* Set up the inter-packet gap. */
6948 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
6949 
6950 	if (sc->sc_type >= WM_T_82543) {
6951 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
6952 			struct wm_queue *wmq = &sc->sc_queue[qidx];
6953 			wm_itrs_writereg(sc, wmq);
6954 		}
6955 		/*
6956 		 * Link interrupts occur much less than TX
6957 		 * interrupts and RX interrupts. So, we don't
6958 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
6959 		 * FreeBSD's if_igb.
6960 		 */
6961 	}
6962 
6963 	/* Set the VLAN EtherType. */
6964 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
6965 
6966 	/*
6967 	 * Set up the transmit control register; we start out with
6968 	 * a collision distance suitable for FDX, but update it when
6969 	 * we resolve the media type.
6970 	 */
6971 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
6972 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
6973 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
6974 	if (sc->sc_type >= WM_T_82571)
6975 		sc->sc_tctl |= TCTL_MULR;
6976 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
6977 
6978 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
6979 		/* Write TDT after TCTL.EN is set. See the document. */
6980 		CSR_WRITE(sc, WMREG_TDT(0), 0);
6981 	}
6982 
6983 	if (sc->sc_type == WM_T_80003) {
6984 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
6985 		reg &= ~TCTL_EXT_GCEX_MASK;
6986 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
6987 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
6988 	}
6989 
6990 	/* Set the media. */
6991 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
6992 		goto out;
6993 
6994 	/* Configure for OS presence */
6995 	wm_init_manageability(sc);
6996 
6997 	/*
6998 	 * Set up the receive control register; we actually program the
6999 	 * register when we set the receive filter. Use multicast address
7000 	 * offset type 0.
7001 	 *
7002 	 * Only the i82544 has the ability to strip the incoming CRC, so we
7003 	 * don't enable that feature.
7004 	 */
7005 	sc->sc_mchash_type = 0;
7006 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
7007 	    | __SHIFTIN(sc->sc_mchash_type, RCTL_MO);
7008 
7009 	/* 82574 use one buffer extended Rx descriptor. */
7010 	if (sc->sc_type == WM_T_82574)
7011 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
7012 
7013 	if ((sc->sc_flags & WM_F_CRC_STRIP) != 0)
7014 		sc->sc_rctl |= RCTL_SECRC;
7015 
7016 	if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
7017 	    && (ifp->if_mtu > ETHERMTU)) {
7018 		sc->sc_rctl |= RCTL_LPE;
7019 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
7020 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
7021 	}
7022 
7023 	if (MCLBYTES == 2048)
7024 		sc->sc_rctl |= RCTL_2k;
7025 	else {
7026 		if (sc->sc_type >= WM_T_82543) {
7027 			switch (MCLBYTES) {
7028 			case 4096:
7029 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
7030 				break;
7031 			case 8192:
7032 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
7033 				break;
7034 			case 16384:
7035 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
7036 				break;
7037 			default:
7038 				panic("wm_init: MCLBYTES %d unsupported",
7039 				    MCLBYTES);
7040 				break;
7041 			}
7042 		} else
7043 			panic("wm_init: i82542 requires MCLBYTES = 2048");
7044 	}
7045 
7046 	/* Enable ECC */
7047 	switch (sc->sc_type) {
7048 	case WM_T_82571:
7049 		reg = CSR_READ(sc, WMREG_PBA_ECC);
7050 		reg |= PBA_ECC_CORR_EN;
7051 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
7052 		break;
7053 	case WM_T_PCH_LPT:
7054 	case WM_T_PCH_SPT:
7055 	case WM_T_PCH_CNP:
7056 		reg = CSR_READ(sc, WMREG_PBECCSTS);
7057 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
7058 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
7059 
7060 		sc->sc_ctrl |= CTRL_MEHE;
7061 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7062 		break;
7063 	default:
7064 		break;
7065 	}
7066 
7067 	/*
7068 	 * Set the receive filter.
7069 	 *
7070 	 * For 82575 and 82576, the RX descriptors must be initialized after
7071 	 * the setting of RCTL.EN in wm_set_filter()
7072 	 */
7073 	wm_set_filter(sc);
7074 
7075 	/* On 575 and later set RDT only if RX enabled */
7076 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
7077 		int qidx;
7078 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
7079 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
7080 			for (i = 0; i < WM_NRXDESC; i++) {
7081 				mutex_enter(rxq->rxq_lock);
7082 				wm_init_rxdesc(rxq, i);
7083 				mutex_exit(rxq->rxq_lock);
7084 
7085 			}
7086 		}
7087 	}
7088 
7089 	wm_unset_stopping_flags(sc);
7090 
7091 	/* Start the one second link check clock. */
7092 	callout_schedule(&sc->sc_tick_ch, hz);
7093 
7094 	/*
7095 	 * ...all done! (IFNET_LOCKED asserted above.)
7096 	 */
7097 	ifp->if_flags |= IFF_RUNNING;
7098 
7099 out:
7100 	/* Save last flags for the callback */
7101 	sc->sc_if_flags = ifp->if_flags;
7102 	sc->sc_ec_capenable = ec->ec_capenable;
7103 	if (error)
7104 		log(LOG_ERR, "%s: interface not running\n",
7105 		    device_xname(sc->sc_dev));
7106 	return error;
7107 }
7108 
7109 /*
7110  * wm_stop:		[ifnet interface function]
7111  *
7112  *	Stop transmission on the interface.
7113  */
7114 static void
7115 wm_stop(struct ifnet *ifp, int disable)
7116 {
7117 	struct wm_softc *sc = ifp->if_softc;
7118 
7119 	ASSERT_SLEEPABLE();
7120 	KASSERT(IFNET_LOCKED(ifp));
7121 
7122 	mutex_enter(sc->sc_core_lock);
7123 	wm_stop_locked(ifp, disable ? true : false, true);
7124 	mutex_exit(sc->sc_core_lock);
7125 
7126 	/*
7127 	 * After wm_set_stopping_flags(), it is guaranteed that
7128 	 * wm_handle_queue_work() does not call workqueue_enqueue().
7129 	 * However, workqueue_wait() cannot call in wm_stop_locked()
7130 	 * because it can sleep...
7131 	 * so, call workqueue_wait() here.
7132 	 */
7133 	for (int i = 0; i < sc->sc_nqueues; i++)
7134 		workqueue_wait(sc->sc_queue_wq, &sc->sc_queue[i].wmq_cookie);
7135 	workqueue_wait(sc->sc_reset_wq, &sc->sc_reset_work);
7136 }
7137 
7138 static void
7139 wm_stop_locked(struct ifnet *ifp, bool disable, bool wait)
7140 {
7141 	struct wm_softc *sc = ifp->if_softc;
7142 	struct wm_txsoft *txs;
7143 	int i, qidx;
7144 
7145 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
7146 		device_xname(sc->sc_dev), __func__));
7147 	KASSERT(IFNET_LOCKED(ifp));
7148 	KASSERT(mutex_owned(sc->sc_core_lock));
7149 
7150 	wm_set_stopping_flags(sc);
7151 
7152 	if (sc->sc_flags & WM_F_HAS_MII) {
7153 		/* Down the MII. */
7154 		mii_down(&sc->sc_mii);
7155 	} else {
7156 #if 0
7157 		/* Should we clear PHY's status properly? */
7158 		wm_reset(sc);
7159 #endif
7160 	}
7161 
7162 	/* Stop the transmit and receive processes. */
7163 	CSR_WRITE(sc, WMREG_TCTL, 0);
7164 	CSR_WRITE(sc, WMREG_RCTL, 0);
7165 	sc->sc_rctl &= ~RCTL_EN;
7166 
7167 	/*
7168 	 * Clear the interrupt mask to ensure the device cannot assert its
7169 	 * interrupt line.
7170 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
7171 	 * service any currently pending or shared interrupt.
7172 	 */
7173 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
7174 	sc->sc_icr = 0;
7175 	if (wm_is_using_msix(sc)) {
7176 		if (sc->sc_type != WM_T_82574) {
7177 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
7178 			CSR_WRITE(sc, WMREG_EIAC, 0);
7179 		} else
7180 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
7181 	}
7182 
7183 	/*
7184 	 * Stop callouts after interrupts are disabled; if we have
7185 	 * to wait for them, we will be releasing the CORE_LOCK
7186 	 * briefly, which will unblock interrupts on the current CPU.
7187 	 */
7188 
7189 	/* Stop the one second clock. */
7190 	if (wait)
7191 		callout_halt(&sc->sc_tick_ch, sc->sc_core_lock);
7192 	else
7193 		callout_stop(&sc->sc_tick_ch);
7194 
7195 	/* Stop the 82547 Tx FIFO stall check timer. */
7196 	if (sc->sc_type == WM_T_82547) {
7197 		if (wait)
7198 			callout_halt(&sc->sc_txfifo_ch, sc->sc_core_lock);
7199 		else
7200 			callout_stop(&sc->sc_txfifo_ch);
7201 	}
7202 
7203 	/* Release any queued transmit buffers. */
7204 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
7205 		struct wm_queue *wmq = &sc->sc_queue[qidx];
7206 		struct wm_txqueue *txq = &wmq->wmq_txq;
7207 		struct mbuf *m;
7208 
7209 		mutex_enter(txq->txq_lock);
7210 		txq->txq_sending = false; /* Ensure watchdog disabled */
7211 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
7212 			txs = &txq->txq_soft[i];
7213 			if (txs->txs_mbuf != NULL) {
7214 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
7215 				m_freem(txs->txs_mbuf);
7216 				txs->txs_mbuf = NULL;
7217 			}
7218 		}
7219 		/* Drain txq_interq */
7220 		while ((m = pcq_get(txq->txq_interq)) != NULL)
7221 			m_freem(m);
7222 		mutex_exit(txq->txq_lock);
7223 	}
7224 
7225 	/* Mark the interface as down and cancel the watchdog timer. */
7226 	ifp->if_flags &= ~IFF_RUNNING;
7227 	sc->sc_if_flags = ifp->if_flags;
7228 
7229 	if (disable) {
7230 		for (i = 0; i < sc->sc_nqueues; i++) {
7231 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
7232 			mutex_enter(rxq->rxq_lock);
7233 			wm_rxdrain(rxq);
7234 			mutex_exit(rxq->rxq_lock);
7235 		}
7236 	}
7237 
7238 #if 0 /* notyet */
7239 	if (sc->sc_type >= WM_T_82544)
7240 		CSR_WRITE(sc, WMREG_WUC, 0);
7241 #endif
7242 }
7243 
7244 static void
7245 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
7246 {
7247 	struct mbuf *m;
7248 	int i;
7249 
7250 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
7251 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
7252 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
7253 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
7254 		    m->m_data, m->m_len, m->m_flags);
7255 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
7256 	    i, i == 1 ? "" : "s");
7257 }
7258 
7259 /*
7260  * wm_82547_txfifo_stall:
7261  *
7262  *	Callout used to wait for the 82547 Tx FIFO to drain,
7263  *	reset the FIFO pointers, and restart packet transmission.
7264  */
7265 static void
7266 wm_82547_txfifo_stall(void *arg)
7267 {
7268 	struct wm_softc *sc = arg;
7269 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
7270 
7271 	mutex_enter(txq->txq_lock);
7272 
7273 	if (txq->txq_stopping)
7274 		goto out;
7275 
7276 	if (txq->txq_fifo_stall) {
7277 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
7278 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
7279 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
7280 			/*
7281 			 * Packets have drained.  Stop transmitter, reset
7282 			 * FIFO pointers, restart transmitter, and kick
7283 			 * the packet queue.
7284 			 */
7285 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
7286 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
7287 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
7288 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
7289 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
7290 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
7291 			CSR_WRITE(sc, WMREG_TCTL, tctl);
7292 			CSR_WRITE_FLUSH(sc);
7293 
7294 			txq->txq_fifo_head = 0;
7295 			txq->txq_fifo_stall = 0;
7296 			wm_start_locked(&sc->sc_ethercom.ec_if);
7297 		} else {
7298 			/*
7299 			 * Still waiting for packets to drain; try again in
7300 			 * another tick.
7301 			 */
7302 			callout_schedule(&sc->sc_txfifo_ch, 1);
7303 		}
7304 	}
7305 
7306 out:
7307 	mutex_exit(txq->txq_lock);
7308 }
7309 
7310 /*
7311  * wm_82547_txfifo_bugchk:
7312  *
7313  *	Check for bug condition in the 82547 Tx FIFO.  We need to
7314  *	prevent enqueueing a packet that would wrap around the end
7315  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
7316  *
7317  *	We do this by checking the amount of space before the end
7318  *	of the Tx FIFO buffer. If the packet will not fit, we "stall"
7319  *	the Tx FIFO, wait for all remaining packets to drain, reset
7320  *	the internal FIFO pointers to the beginning, and restart
7321  *	transmission on the interface.
7322  */
7323 #define	WM_FIFO_HDR		0x10
7324 #define	WM_82547_PAD_LEN	0x3e0
7325 static int
7326 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
7327 {
7328 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
7329 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
7330 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
7331 
7332 	/* Just return if already stalled. */
7333 	if (txq->txq_fifo_stall)
7334 		return 1;
7335 
7336 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
7337 		/* Stall only occurs in half-duplex mode. */
7338 		goto send_packet;
7339 	}
7340 
7341 	if (len >= WM_82547_PAD_LEN + space) {
7342 		txq->txq_fifo_stall = 1;
7343 		callout_schedule(&sc->sc_txfifo_ch, 1);
7344 		return 1;
7345 	}
7346 
7347 send_packet:
7348 	txq->txq_fifo_head += len;
7349 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
7350 		txq->txq_fifo_head -= txq->txq_fifo_size;
7351 
7352 	return 0;
7353 }
7354 
7355 static int
7356 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
7357 {
7358 	int error;
7359 
7360 	/*
7361 	 * Allocate the control data structures, and create and load the
7362 	 * DMA map for it.
7363 	 *
7364 	 * NOTE: All Tx descriptors must be in the same 4G segment of
7365 	 * memory.  So must Rx descriptors.  We simplify by allocating
7366 	 * both sets within the same 4G segment.
7367 	 */
7368 	if (sc->sc_type < WM_T_82544)
7369 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
7370 	else
7371 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
7372 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
7373 		txq->txq_descsize = sizeof(nq_txdesc_t);
7374 	else
7375 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
7376 
7377 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
7378 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
7379 		    1, &txq->txq_desc_rseg, 0)) != 0) {
7380 		aprint_error_dev(sc->sc_dev,
7381 		    "unable to allocate TX control data, error = %d\n",
7382 		    error);
7383 		goto fail_0;
7384 	}
7385 
7386 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
7387 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
7388 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
7389 		aprint_error_dev(sc->sc_dev,
7390 		    "unable to map TX control data, error = %d\n", error);
7391 		goto fail_1;
7392 	}
7393 
7394 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
7395 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
7396 		aprint_error_dev(sc->sc_dev,
7397 		    "unable to create TX control data DMA map, error = %d\n",
7398 		    error);
7399 		goto fail_2;
7400 	}
7401 
7402 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
7403 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
7404 		aprint_error_dev(sc->sc_dev,
7405 		    "unable to load TX control data DMA map, error = %d\n",
7406 		    error);
7407 		goto fail_3;
7408 	}
7409 
7410 	return 0;
7411 
7412 fail_3:
7413 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
7414 fail_2:
7415 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
7416 	    WM_TXDESCS_SIZE(txq));
7417 fail_1:
7418 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
7419 fail_0:
7420 	return error;
7421 }
7422 
7423 static void
7424 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
7425 {
7426 
7427 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
7428 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
7429 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
7430 	    WM_TXDESCS_SIZE(txq));
7431 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
7432 }
7433 
7434 static int
7435 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
7436 {
7437 	int error;
7438 	size_t rxq_descs_size;
7439 
7440 	/*
7441 	 * Allocate the control data structures, and create and load the
7442 	 * DMA map for it.
7443 	 *
7444 	 * NOTE: All Tx descriptors must be in the same 4G segment of
7445 	 * memory.  So must Rx descriptors.  We simplify by allocating
7446 	 * both sets within the same 4G segment.
7447 	 */
7448 	rxq->rxq_ndesc = WM_NRXDESC;
7449 	if (sc->sc_type == WM_T_82574)
7450 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
7451 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
7452 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
7453 	else
7454 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
7455 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
7456 
7457 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
7458 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
7459 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
7460 		aprint_error_dev(sc->sc_dev,
7461 		    "unable to allocate RX control data, error = %d\n",
7462 		    error);
7463 		goto fail_0;
7464 	}
7465 
7466 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
7467 		    rxq->rxq_desc_rseg, rxq_descs_size,
7468 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
7469 		aprint_error_dev(sc->sc_dev,
7470 		    "unable to map RX control data, error = %d\n", error);
7471 		goto fail_1;
7472 	}
7473 
7474 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
7475 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
7476 		aprint_error_dev(sc->sc_dev,
7477 		    "unable to create RX control data DMA map, error = %d\n",
7478 		    error);
7479 		goto fail_2;
7480 	}
7481 
7482 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
7483 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
7484 		aprint_error_dev(sc->sc_dev,
7485 		    "unable to load RX control data DMA map, error = %d\n",
7486 		    error);
7487 		goto fail_3;
7488 	}
7489 
7490 	return 0;
7491 
7492  fail_3:
7493 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
7494  fail_2:
7495 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
7496 	    rxq_descs_size);
7497  fail_1:
7498 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
7499  fail_0:
7500 	return error;
7501 }
7502 
7503 static void
7504 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
7505 {
7506 
7507 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
7508 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
7509 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
7510 	    rxq->rxq_descsize * rxq->rxq_ndesc);
7511 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
7512 }
7513 
7514 
7515 static int
7516 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
7517 {
7518 	int i, error;
7519 
7520 	/* Create the transmit buffer DMA maps. */
7521 	WM_TXQUEUELEN(txq) =
7522 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
7523 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
7524 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
7525 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
7526 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
7527 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
7528 			aprint_error_dev(sc->sc_dev,
7529 			    "unable to create Tx DMA map %d, error = %d\n",
7530 			    i, error);
7531 			goto fail;
7532 		}
7533 	}
7534 
7535 	return 0;
7536 
7537 fail:
7538 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
7539 		if (txq->txq_soft[i].txs_dmamap != NULL)
7540 			bus_dmamap_destroy(sc->sc_dmat,
7541 			    txq->txq_soft[i].txs_dmamap);
7542 	}
7543 	return error;
7544 }
7545 
7546 static void
7547 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
7548 {
7549 	int i;
7550 
7551 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
7552 		if (txq->txq_soft[i].txs_dmamap != NULL)
7553 			bus_dmamap_destroy(sc->sc_dmat,
7554 			    txq->txq_soft[i].txs_dmamap);
7555 	}
7556 }
7557 
7558 static int
7559 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
7560 {
7561 	int i, error;
7562 
7563 	/* Create the receive buffer DMA maps. */
7564 	for (i = 0; i < rxq->rxq_ndesc; i++) {
7565 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
7566 			    MCLBYTES, 0, 0,
7567 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
7568 			aprint_error_dev(sc->sc_dev,
7569 			    "unable to create Rx DMA map %d error = %d\n",
7570 			    i, error);
7571 			goto fail;
7572 		}
7573 		rxq->rxq_soft[i].rxs_mbuf = NULL;
7574 	}
7575 
7576 	return 0;
7577 
7578  fail:
7579 	for (i = 0; i < rxq->rxq_ndesc; i++) {
7580 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
7581 			bus_dmamap_destroy(sc->sc_dmat,
7582 			    rxq->rxq_soft[i].rxs_dmamap);
7583 	}
7584 	return error;
7585 }
7586 
7587 static void
7588 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
7589 {
7590 	int i;
7591 
7592 	for (i = 0; i < rxq->rxq_ndesc; i++) {
7593 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
7594 			bus_dmamap_destroy(sc->sc_dmat,
7595 			    rxq->rxq_soft[i].rxs_dmamap);
7596 	}
7597 }
7598 
7599 /*
7600  * wm_alloc_quques:
7601  *	Allocate {tx,rx}descs and {tx,rx} buffers
7602  */
7603 static int
7604 wm_alloc_txrx_queues(struct wm_softc *sc)
7605 {
7606 	int i, error, tx_done, rx_done;
7607 
7608 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
7609 	    KM_SLEEP);
7610 	if (sc->sc_queue == NULL) {
7611 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
7612 		error = ENOMEM;
7613 		goto fail_0;
7614 	}
7615 
7616 	/* For transmission */
7617 	error = 0;
7618 	tx_done = 0;
7619 	for (i = 0; i < sc->sc_nqueues; i++) {
7620 #ifdef WM_EVENT_COUNTERS
7621 		int j;
7622 		const char *xname;
7623 #endif
7624 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
7625 		txq->txq_sc = sc;
7626 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
7627 
7628 		error = wm_alloc_tx_descs(sc, txq);
7629 		if (error)
7630 			break;
7631 		error = wm_alloc_tx_buffer(sc, txq);
7632 		if (error) {
7633 			wm_free_tx_descs(sc, txq);
7634 			break;
7635 		}
7636 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
7637 		if (txq->txq_interq == NULL) {
7638 			wm_free_tx_descs(sc, txq);
7639 			wm_free_tx_buffer(sc, txq);
7640 			error = ENOMEM;
7641 			break;
7642 		}
7643 
7644 #ifdef WM_EVENT_COUNTERS
7645 		xname = device_xname(sc->sc_dev);
7646 
7647 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
7648 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
7649 		WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
7650 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
7651 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
7652 		WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
7653 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
7654 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
7655 		WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
7656 		WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
7657 		WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
7658 
7659 		for (j = 0; j < WM_NTXSEGS; j++) {
7660 			snprintf(txq->txq_txseg_evcnt_names[j],
7661 			    sizeof(txq->txq_txseg_evcnt_names[j]),
7662 			    "txq%02dtxseg%d", i, j);
7663 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j],
7664 			    EVCNT_TYPE_MISC,
7665 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
7666 		}
7667 
7668 		WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
7669 		WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
7670 		WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
7671 		WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
7672 		WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
7673 		WM_Q_MISC_EVCNT_ATTACH(txq, skipcontext, txq, i, xname);
7674 #endif /* WM_EVENT_COUNTERS */
7675 
7676 		tx_done++;
7677 	}
7678 	if (error)
7679 		goto fail_1;
7680 
7681 	/* For receive */
7682 	error = 0;
7683 	rx_done = 0;
7684 	for (i = 0; i < sc->sc_nqueues; i++) {
7685 #ifdef WM_EVENT_COUNTERS
7686 		const char *xname;
7687 #endif
7688 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
7689 		rxq->rxq_sc = sc;
7690 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
7691 
7692 		error = wm_alloc_rx_descs(sc, rxq);
7693 		if (error)
7694 			break;
7695 
7696 		error = wm_alloc_rx_buffer(sc, rxq);
7697 		if (error) {
7698 			wm_free_rx_descs(sc, rxq);
7699 			break;
7700 		}
7701 
7702 #ifdef WM_EVENT_COUNTERS
7703 		xname = device_xname(sc->sc_dev);
7704 
7705 		WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
7706 		WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
7707 
7708 		WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
7709 		WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
7710 #endif /* WM_EVENT_COUNTERS */
7711 
7712 		rx_done++;
7713 	}
7714 	if (error)
7715 		goto fail_2;
7716 
7717 	return 0;
7718 
7719 fail_2:
7720 	for (i = 0; i < rx_done; i++) {
7721 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
7722 		wm_free_rx_buffer(sc, rxq);
7723 		wm_free_rx_descs(sc, rxq);
7724 		if (rxq->rxq_lock)
7725 			mutex_obj_free(rxq->rxq_lock);
7726 	}
7727 fail_1:
7728 	for (i = 0; i < tx_done; i++) {
7729 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
7730 		pcq_destroy(txq->txq_interq);
7731 		wm_free_tx_buffer(sc, txq);
7732 		wm_free_tx_descs(sc, txq);
7733 		if (txq->txq_lock)
7734 			mutex_obj_free(txq->txq_lock);
7735 	}
7736 
7737 	kmem_free(sc->sc_queue,
7738 	    sizeof(struct wm_queue) * sc->sc_nqueues);
7739 fail_0:
7740 	return error;
7741 }
7742 
7743 /*
7744  * wm_free_quques:
7745  *	Free {tx,rx}descs and {tx,rx} buffers
7746  */
7747 static void
7748 wm_free_txrx_queues(struct wm_softc *sc)
7749 {
7750 	int i;
7751 
7752 	for (i = 0; i < sc->sc_nqueues; i++) {
7753 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
7754 
7755 #ifdef WM_EVENT_COUNTERS
7756 		WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
7757 		WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
7758 		WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
7759 		WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
7760 #endif /* WM_EVENT_COUNTERS */
7761 
7762 		wm_free_rx_buffer(sc, rxq);
7763 		wm_free_rx_descs(sc, rxq);
7764 		if (rxq->rxq_lock)
7765 			mutex_obj_free(rxq->rxq_lock);
7766 	}
7767 
7768 	for (i = 0; i < sc->sc_nqueues; i++) {
7769 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
7770 		struct mbuf *m;
7771 #ifdef WM_EVENT_COUNTERS
7772 		int j;
7773 
7774 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
7775 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
7776 		WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
7777 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
7778 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
7779 		WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
7780 		WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
7781 		WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
7782 		WM_Q_EVCNT_DETACH(txq, tso, txq, i);
7783 		WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
7784 		WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
7785 
7786 		for (j = 0; j < WM_NTXSEGS; j++)
7787 			evcnt_detach(&txq->txq_ev_txseg[j]);
7788 
7789 		WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
7790 		WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
7791 		WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
7792 		WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
7793 		WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
7794 		WM_Q_EVCNT_DETACH(txq, skipcontext, txq, i);
7795 #endif /* WM_EVENT_COUNTERS */
7796 
7797 		/* Drain txq_interq */
7798 		while ((m = pcq_get(txq->txq_interq)) != NULL)
7799 			m_freem(m);
7800 		pcq_destroy(txq->txq_interq);
7801 
7802 		wm_free_tx_buffer(sc, txq);
7803 		wm_free_tx_descs(sc, txq);
7804 		if (txq->txq_lock)
7805 			mutex_obj_free(txq->txq_lock);
7806 	}
7807 
7808 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
7809 }
7810 
7811 static void
7812 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
7813 {
7814 
7815 	KASSERT(mutex_owned(txq->txq_lock));
7816 
7817 	/* Initialize the transmit descriptor ring. */
7818 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
7819 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
7820 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
7821 	txq->txq_free = WM_NTXDESC(txq);
7822 	txq->txq_next = 0;
7823 }
7824 
7825 static void
7826 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
7827     struct wm_txqueue *txq)
7828 {
7829 
7830 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
7831 		device_xname(sc->sc_dev), __func__));
7832 	KASSERT(mutex_owned(txq->txq_lock));
7833 
7834 	if (sc->sc_type < WM_T_82543) {
7835 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
7836 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
7837 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
7838 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
7839 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
7840 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
7841 	} else {
7842 		int qid = wmq->wmq_id;
7843 
7844 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
7845 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
7846 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
7847 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
7848 
7849 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
7850 			/*
7851 			 * Don't write TDT before TCTL.EN is set.
7852 			 * See the document.
7853 			 */
7854 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
7855 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
7856 			    | TXDCTL_WTHRESH(0));
7857 		else {
7858 			/* XXX should update with AIM? */
7859 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
7860 			if (sc->sc_type >= WM_T_82540) {
7861 				/* Should be the same */
7862 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
7863 			}
7864 
7865 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
7866 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
7867 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
7868 		}
7869 	}
7870 }
7871 
7872 static void
7873 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
7874 {
7875 	int i;
7876 
7877 	KASSERT(mutex_owned(txq->txq_lock));
7878 
7879 	/* Initialize the transmit job descriptors. */
7880 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
7881 		txq->txq_soft[i].txs_mbuf = NULL;
7882 	txq->txq_sfree = WM_TXQUEUELEN(txq);
7883 	txq->txq_snext = 0;
7884 	txq->txq_sdirty = 0;
7885 }
7886 
7887 static void
7888 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
7889     struct wm_txqueue *txq)
7890 {
7891 
7892 	KASSERT(mutex_owned(txq->txq_lock));
7893 
7894 	/*
7895 	 * Set up some register offsets that are different between
7896 	 * the i82542 and the i82543 and later chips.
7897 	 */
7898 	if (sc->sc_type < WM_T_82543)
7899 		txq->txq_tdt_reg = WMREG_OLD_TDT;
7900 	else
7901 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
7902 
7903 	wm_init_tx_descs(sc, txq);
7904 	wm_init_tx_regs(sc, wmq, txq);
7905 	wm_init_tx_buffer(sc, txq);
7906 
7907 	/* Clear other than WM_TXQ_LINKDOWN_DISCARD */
7908 	txq->txq_flags &= WM_TXQ_LINKDOWN_DISCARD;
7909 
7910 	txq->txq_sending = false;
7911 }
7912 
7913 static void
7914 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
7915     struct wm_rxqueue *rxq)
7916 {
7917 
7918 	KASSERT(mutex_owned(rxq->rxq_lock));
7919 
7920 	/*
7921 	 * Initialize the receive descriptor and receive job
7922 	 * descriptor rings.
7923 	 */
7924 	if (sc->sc_type < WM_T_82543) {
7925 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
7926 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
7927 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
7928 		    rxq->rxq_descsize * rxq->rxq_ndesc);
7929 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
7930 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
7931 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
7932 
7933 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
7934 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
7935 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
7936 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
7937 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
7938 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
7939 	} else {
7940 		int qid = wmq->wmq_id;
7941 
7942 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
7943 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
7944 		CSR_WRITE(sc, WMREG_RDLEN(qid),
7945 		    rxq->rxq_descsize * rxq->rxq_ndesc);
7946 
7947 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
7948 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
7949 				panic("%s: MCLBYTES %d unsupported for 82575 "
7950 				    "or higher\n", __func__, MCLBYTES);
7951 
7952 			/*
7953 			 * Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF
7954 			 * only.
7955 			 */
7956 			CSR_WRITE(sc, WMREG_SRRCTL(qid),
7957 			    SRRCTL_DESCTYPE_ADV_ONEBUF
7958 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
7959 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
7960 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
7961 			    | RXDCTL_WTHRESH(1));
7962 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
7963 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
7964 		} else {
7965 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
7966 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
7967 			/* XXX should update with AIM? */
7968 			CSR_WRITE(sc, WMREG_RDTR,
7969 			    (wmq->wmq_itr / 4) | RDTR_FPD);
7970 			/* MUST be same */
7971 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
7972 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
7973 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
7974 		}
7975 	}
7976 }
7977 
7978 static int
7979 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
7980 {
7981 	struct wm_rxsoft *rxs;
7982 	int error, i;
7983 
7984 	KASSERT(mutex_owned(rxq->rxq_lock));
7985 
7986 	for (i = 0; i < rxq->rxq_ndesc; i++) {
7987 		rxs = &rxq->rxq_soft[i];
7988 		if (rxs->rxs_mbuf == NULL) {
7989 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
7990 				log(LOG_ERR, "%s: unable to allocate or map "
7991 				    "rx buffer %d, error = %d\n",
7992 				    device_xname(sc->sc_dev), i, error);
7993 				/*
7994 				 * XXX Should attempt to run with fewer receive
7995 				 * XXX buffers instead of just failing.
7996 				 */
7997 				wm_rxdrain(rxq);
7998 				return ENOMEM;
7999 			}
8000 		} else {
8001 			/*
8002 			 * For 82575 and 82576, the RX descriptors must be
8003 			 * initialized after the setting of RCTL.EN in
8004 			 * wm_set_filter()
8005 			 */
8006 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
8007 				wm_init_rxdesc(rxq, i);
8008 		}
8009 	}
8010 	rxq->rxq_ptr = 0;
8011 	rxq->rxq_discard = 0;
8012 	WM_RXCHAIN_RESET(rxq);
8013 
8014 	return 0;
8015 }
8016 
8017 static int
8018 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
8019     struct wm_rxqueue *rxq)
8020 {
8021 
8022 	KASSERT(mutex_owned(rxq->rxq_lock));
8023 
8024 	/*
8025 	 * Set up some register offsets that are different between
8026 	 * the i82542 and the i82543 and later chips.
8027 	 */
8028 	if (sc->sc_type < WM_T_82543)
8029 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
8030 	else
8031 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
8032 
8033 	wm_init_rx_regs(sc, wmq, rxq);
8034 	return wm_init_rx_buffer(sc, rxq);
8035 }
8036 
8037 /*
8038  * wm_init_quques:
8039  *	Initialize {tx,rx}descs and {tx,rx} buffers
8040  */
8041 static int
8042 wm_init_txrx_queues(struct wm_softc *sc)
8043 {
8044 	int i, error = 0;
8045 
8046 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
8047 		device_xname(sc->sc_dev), __func__));
8048 
8049 	for (i = 0; i < sc->sc_nqueues; i++) {
8050 		struct wm_queue *wmq = &sc->sc_queue[i];
8051 		struct wm_txqueue *txq = &wmq->wmq_txq;
8052 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
8053 
8054 		/*
8055 		 * TODO
8056 		 * Currently, use constant variable instead of AIM.
8057 		 * Furthermore, the interrupt interval of multiqueue which use
8058 		 * polling mode is less than default value.
8059 		 * More tuning and AIM are required.
8060 		 */
8061 		if (wm_is_using_multiqueue(sc))
8062 			wmq->wmq_itr = 50;
8063 		else
8064 			wmq->wmq_itr = sc->sc_itr_init;
8065 		wmq->wmq_set_itr = true;
8066 
8067 		mutex_enter(txq->txq_lock);
8068 		wm_init_tx_queue(sc, wmq, txq);
8069 		mutex_exit(txq->txq_lock);
8070 
8071 		mutex_enter(rxq->rxq_lock);
8072 		error = wm_init_rx_queue(sc, wmq, rxq);
8073 		mutex_exit(rxq->rxq_lock);
8074 		if (error)
8075 			break;
8076 	}
8077 
8078 	return error;
8079 }
8080 
8081 /*
8082  * wm_tx_offload:
8083  *
8084  *	Set up TCP/IP checksumming parameters for the
8085  *	specified packet.
8086  */
8087 static void
8088 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
8089     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
8090 {
8091 	struct mbuf *m0 = txs->txs_mbuf;
8092 	struct livengood_tcpip_ctxdesc *t;
8093 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
8094 	uint32_t ipcse;
8095 	struct ether_header *eh;
8096 	int offset, iphl;
8097 	uint8_t fields;
8098 
8099 	/*
8100 	 * XXX It would be nice if the mbuf pkthdr had offset
8101 	 * fields for the protocol headers.
8102 	 */
8103 
8104 	eh = mtod(m0, struct ether_header *);
8105 	switch (htons(eh->ether_type)) {
8106 	case ETHERTYPE_IP:
8107 	case ETHERTYPE_IPV6:
8108 		offset = ETHER_HDR_LEN;
8109 		break;
8110 
8111 	case ETHERTYPE_VLAN:
8112 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
8113 		break;
8114 
8115 	default:
8116 		/* Don't support this protocol or encapsulation. */
8117 		txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
8118 		txq->txq_last_hw_ipcs = 0;
8119 		txq->txq_last_hw_tucs = 0;
8120 		*fieldsp = 0;
8121 		*cmdp = 0;
8122 		return;
8123 	}
8124 
8125 	if ((m0->m_pkthdr.csum_flags &
8126 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
8127 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
8128 	} else
8129 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
8130 
8131 	ipcse = offset + iphl - 1;
8132 
8133 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
8134 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
8135 	seg = 0;
8136 	fields = 0;
8137 
8138 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
8139 		int hlen = offset + iphl;
8140 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
8141 
8142 		if (__predict_false(m0->m_len <
8143 				    (hlen + sizeof(struct tcphdr)))) {
8144 			/*
8145 			 * TCP/IP headers are not in the first mbuf; we need
8146 			 * to do this the slow and painful way. Let's just
8147 			 * hope this doesn't happen very often.
8148 			 */
8149 			struct tcphdr th;
8150 
8151 			WM_Q_EVCNT_INCR(txq, tsopain);
8152 
8153 			m_copydata(m0, hlen, sizeof(th), &th);
8154 			if (v4) {
8155 				struct ip ip;
8156 
8157 				m_copydata(m0, offset, sizeof(ip), &ip);
8158 				ip.ip_len = 0;
8159 				m_copyback(m0,
8160 				    offset + offsetof(struct ip, ip_len),
8161 				    sizeof(ip.ip_len), &ip.ip_len);
8162 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
8163 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
8164 			} else {
8165 				struct ip6_hdr ip6;
8166 
8167 				m_copydata(m0, offset, sizeof(ip6), &ip6);
8168 				ip6.ip6_plen = 0;
8169 				m_copyback(m0,
8170 				    offset + offsetof(struct ip6_hdr, ip6_plen),
8171 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
8172 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
8173 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
8174 			}
8175 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
8176 			    sizeof(th.th_sum), &th.th_sum);
8177 
8178 			hlen += th.th_off << 2;
8179 		} else {
8180 			/*
8181 			 * TCP/IP headers are in the first mbuf; we can do
8182 			 * this the easy way.
8183 			 */
8184 			struct tcphdr *th;
8185 
8186 			if (v4) {
8187 				struct ip *ip =
8188 				    (void *)(mtod(m0, char *) + offset);
8189 				th = (void *)(mtod(m0, char *) + hlen);
8190 
8191 				ip->ip_len = 0;
8192 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
8193 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
8194 			} else {
8195 				struct ip6_hdr *ip6 =
8196 				    (void *)(mtod(m0, char *) + offset);
8197 				th = (void *)(mtod(m0, char *) + hlen);
8198 
8199 				ip6->ip6_plen = 0;
8200 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
8201 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
8202 			}
8203 			hlen += th->th_off << 2;
8204 		}
8205 
8206 		if (v4) {
8207 			WM_Q_EVCNT_INCR(txq, tso);
8208 			cmdlen |= WTX_TCPIP_CMD_IP;
8209 		} else {
8210 			WM_Q_EVCNT_INCR(txq, tso6);
8211 			ipcse = 0;
8212 		}
8213 		cmd |= WTX_TCPIP_CMD_TSE;
8214 		cmdlen |= WTX_TCPIP_CMD_TSE |
8215 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
8216 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
8217 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
8218 	}
8219 
8220 	/*
8221 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
8222 	 * offload feature, if we load the context descriptor, we
8223 	 * MUST provide valid values for IPCSS and TUCSS fields.
8224 	 */
8225 
8226 	ipcs = WTX_TCPIP_IPCSS(offset) |
8227 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
8228 	    WTX_TCPIP_IPCSE(ipcse);
8229 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
8230 		WM_Q_EVCNT_INCR(txq, ipsum);
8231 		fields |= WTX_IXSM;
8232 	}
8233 
8234 	offset += iphl;
8235 
8236 	if (m0->m_pkthdr.csum_flags &
8237 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
8238 		WM_Q_EVCNT_INCR(txq, tusum);
8239 		fields |= WTX_TXSM;
8240 		tucs = WTX_TCPIP_TUCSS(offset) |
8241 		    WTX_TCPIP_TUCSO(offset +
8242 			M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
8243 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
8244 	} else if ((m0->m_pkthdr.csum_flags &
8245 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
8246 		WM_Q_EVCNT_INCR(txq, tusum6);
8247 		fields |= WTX_TXSM;
8248 		tucs = WTX_TCPIP_TUCSS(offset) |
8249 		    WTX_TCPIP_TUCSO(offset +
8250 			M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
8251 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
8252 	} else {
8253 		/* Just initialize it to a valid TCP context. */
8254 		tucs = WTX_TCPIP_TUCSS(offset) |
8255 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
8256 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
8257 	}
8258 
8259 	*cmdp = cmd;
8260 	*fieldsp = fields;
8261 
8262 	/*
8263 	 * We don't have to write context descriptor for every packet
8264 	 * except for 82574. For 82574, we must write context descriptor
8265 	 * for every packet when we use two descriptor queues.
8266 	 *
8267 	 * The 82574L can only remember the *last* context used
8268 	 * regardless of queue that it was use for.  We cannot reuse
8269 	 * contexts on this hardware platform and must generate a new
8270 	 * context every time.  82574L hardware spec, section 7.2.6,
8271 	 * second note.
8272 	 */
8273 	if (sc->sc_nqueues < 2) {
8274 		/*
8275 		 * Setting up new checksum offload context for every
8276 		 * frames takes a lot of processing time for hardware.
8277 		 * This also reduces performance a lot for small sized
8278 		 * frames so avoid it if driver can use previously
8279 		 * configured checksum offload context.
8280 		 * For TSO, in theory we can use the same TSO context only if
8281 		 * frame is the same type(IP/TCP) and the same MSS. However
8282 		 * checking whether a frame has the same IP/TCP structure is a
8283 		 * hard thing so just ignore that and always restablish a
8284 		 * new TSO context.
8285 		 */
8286 		if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6))
8287 		    == 0) {
8288 			if (txq->txq_last_hw_cmd == cmd &&
8289 			    txq->txq_last_hw_fields == fields &&
8290 			    txq->txq_last_hw_ipcs == (ipcs & 0xffff) &&
8291 			    txq->txq_last_hw_tucs == (tucs & 0xffff)) {
8292 				WM_Q_EVCNT_INCR(txq, skipcontext);
8293 				return;
8294 			}
8295 		}
8296 
8297 		txq->txq_last_hw_cmd = cmd;
8298 		txq->txq_last_hw_fields = fields;
8299 		txq->txq_last_hw_ipcs = (ipcs & 0xffff);
8300 		txq->txq_last_hw_tucs = (tucs & 0xffff);
8301 	}
8302 
8303 	/* Fill in the context descriptor. */
8304 	t = (struct livengood_tcpip_ctxdesc *)
8305 	    &txq->txq_descs[txq->txq_next];
8306 	t->tcpip_ipcs = htole32(ipcs);
8307 	t->tcpip_tucs = htole32(tucs);
8308 	t->tcpip_cmdlen = htole32(cmdlen);
8309 	t->tcpip_seg = htole32(seg);
8310 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
8311 
8312 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
8313 	txs->txs_ndesc++;
8314 }
8315 
8316 static inline int
8317 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
8318 {
8319 	struct wm_softc *sc = ifp->if_softc;
8320 	u_int cpuid = cpu_index(curcpu());
8321 
8322 	/*
8323 	 * Currently, simple distribute strategy.
8324 	 * TODO:
8325 	 * distribute by flowid(RSS has value).
8326 	 */
8327 	return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues;
8328 }
8329 
8330 static inline bool
8331 wm_linkdown_discard(struct wm_txqueue *txq)
8332 {
8333 
8334 	if ((txq->txq_flags & WM_TXQ_LINKDOWN_DISCARD) != 0)
8335 		return true;
8336 
8337 	return false;
8338 }
8339 
8340 /*
8341  * wm_start:		[ifnet interface function]
8342  *
8343  *	Start packet transmission on the interface.
8344  */
8345 static void
8346 wm_start(struct ifnet *ifp)
8347 {
8348 	struct wm_softc *sc = ifp->if_softc;
8349 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
8350 
8351 	KASSERT(if_is_mpsafe(ifp));
8352 	/*
8353 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
8354 	 */
8355 
8356 	mutex_enter(txq->txq_lock);
8357 	if (!txq->txq_stopping)
8358 		wm_start_locked(ifp);
8359 	mutex_exit(txq->txq_lock);
8360 }
8361 
8362 static void
8363 wm_start_locked(struct ifnet *ifp)
8364 {
8365 	struct wm_softc *sc = ifp->if_softc;
8366 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
8367 
8368 	wm_send_common_locked(ifp, txq, false);
8369 }
8370 
8371 static int
8372 wm_transmit(struct ifnet *ifp, struct mbuf *m)
8373 {
8374 	int qid;
8375 	struct wm_softc *sc = ifp->if_softc;
8376 	struct wm_txqueue *txq;
8377 
8378 	qid = wm_select_txqueue(ifp, m);
8379 	txq = &sc->sc_queue[qid].wmq_txq;
8380 
8381 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
8382 		m_freem(m);
8383 		WM_Q_EVCNT_INCR(txq, pcqdrop);
8384 		return ENOBUFS;
8385 	}
8386 
8387 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
8388 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
8389 	if (m->m_flags & M_MCAST)
8390 		if_statinc_ref(nsr, if_omcasts);
8391 	IF_STAT_PUTREF(ifp);
8392 
8393 	if (mutex_tryenter(txq->txq_lock)) {
8394 		if (!txq->txq_stopping)
8395 			wm_transmit_locked(ifp, txq);
8396 		mutex_exit(txq->txq_lock);
8397 	}
8398 
8399 	return 0;
8400 }
8401 
8402 static void
8403 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
8404 {
8405 
8406 	wm_send_common_locked(ifp, txq, true);
8407 }
8408 
8409 static void
8410 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
8411     bool is_transmit)
8412 {
8413 	struct wm_softc *sc = ifp->if_softc;
8414 	struct mbuf *m0;
8415 	struct wm_txsoft *txs;
8416 	bus_dmamap_t dmamap;
8417 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
8418 	bus_addr_t curaddr;
8419 	bus_size_t seglen, curlen;
8420 	uint32_t cksumcmd;
8421 	uint8_t cksumfields;
8422 	bool remap = true;
8423 
8424 	KASSERT(mutex_owned(txq->txq_lock));
8425 	KASSERT(!txq->txq_stopping);
8426 
8427 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
8428 		return;
8429 
8430 	if (__predict_false(wm_linkdown_discard(txq))) {
8431 		do {
8432 			if (is_transmit)
8433 				m0 = pcq_get(txq->txq_interq);
8434 			else
8435 				IFQ_DEQUEUE(&ifp->if_snd, m0);
8436 			/*
8437 			 * increment successed packet counter as in the case
8438 			 * which the packet is discarded by link down PHY.
8439 			 */
8440 			if (m0 != NULL) {
8441 				if_statinc(ifp, if_opackets);
8442 				m_freem(m0);
8443 			}
8444 		} while (m0 != NULL);
8445 		return;
8446 	}
8447 
8448 	/* Remember the previous number of free descriptors. */
8449 	ofree = txq->txq_free;
8450 
8451 	/*
8452 	 * Loop through the send queue, setting up transmit descriptors
8453 	 * until we drain the queue, or use up all available transmit
8454 	 * descriptors.
8455 	 */
8456 	for (;;) {
8457 		m0 = NULL;
8458 
8459 		/* Get a work queue entry. */
8460 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
8461 			wm_txeof(txq, UINT_MAX);
8462 			if (txq->txq_sfree == 0) {
8463 				DPRINTF(sc, WM_DEBUG_TX,
8464 				    ("%s: TX: no free job descriptors\n",
8465 					device_xname(sc->sc_dev)));
8466 				WM_Q_EVCNT_INCR(txq, txsstall);
8467 				break;
8468 			}
8469 		}
8470 
8471 		/* Grab a packet off the queue. */
8472 		if (is_transmit)
8473 			m0 = pcq_get(txq->txq_interq);
8474 		else
8475 			IFQ_DEQUEUE(&ifp->if_snd, m0);
8476 		if (m0 == NULL)
8477 			break;
8478 
8479 		DPRINTF(sc, WM_DEBUG_TX,
8480 		    ("%s: TX: have packet to transmit: %p\n",
8481 			device_xname(sc->sc_dev), m0));
8482 
8483 		txs = &txq->txq_soft[txq->txq_snext];
8484 		dmamap = txs->txs_dmamap;
8485 
8486 		use_tso = (m0->m_pkthdr.csum_flags &
8487 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
8488 
8489 		/*
8490 		 * So says the Linux driver:
8491 		 * The controller does a simple calculation to make sure
8492 		 * there is enough room in the FIFO before initiating the
8493 		 * DMA for each buffer. The calc is:
8494 		 *	4 = ceil(buffer len / MSS)
8495 		 * To make sure we don't overrun the FIFO, adjust the max
8496 		 * buffer len if the MSS drops.
8497 		 */
8498 		dmamap->dm_maxsegsz =
8499 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
8500 		    ? m0->m_pkthdr.segsz << 2
8501 		    : WTX_MAX_LEN;
8502 
8503 		/*
8504 		 * Load the DMA map.  If this fails, the packet either
8505 		 * didn't fit in the allotted number of segments, or we
8506 		 * were short on resources.  For the too-many-segments
8507 		 * case, we simply report an error and drop the packet,
8508 		 * since we can't sanely copy a jumbo packet to a single
8509 		 * buffer.
8510 		 */
8511 retry:
8512 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
8513 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
8514 		if (__predict_false(error)) {
8515 			if (error == EFBIG) {
8516 				if (remap == true) {
8517 					struct mbuf *m;
8518 
8519 					remap = false;
8520 					m = m_defrag(m0, M_NOWAIT);
8521 					if (m != NULL) {
8522 						WM_Q_EVCNT_INCR(txq, defrag);
8523 						m0 = m;
8524 						goto retry;
8525 					}
8526 				}
8527 				WM_Q_EVCNT_INCR(txq, toomanyseg);
8528 				log(LOG_ERR, "%s: Tx packet consumes too many "
8529 				    "DMA segments, dropping...\n",
8530 				    device_xname(sc->sc_dev));
8531 				wm_dump_mbuf_chain(sc, m0);
8532 				m_freem(m0);
8533 				continue;
8534 			}
8535 			/* Short on resources, just stop for now. */
8536 			DPRINTF(sc, WM_DEBUG_TX,
8537 			    ("%s: TX: dmamap load failed: %d\n",
8538 				device_xname(sc->sc_dev), error));
8539 			break;
8540 		}
8541 
8542 		segs_needed = dmamap->dm_nsegs;
8543 		if (use_tso) {
8544 			/* For sentinel descriptor; see below. */
8545 			segs_needed++;
8546 		}
8547 
8548 		/*
8549 		 * Ensure we have enough descriptors free to describe
8550 		 * the packet. Note, we always reserve one descriptor
8551 		 * at the end of the ring due to the semantics of the
8552 		 * TDT register, plus one more in the event we need
8553 		 * to load offload context.
8554 		 */
8555 		if (segs_needed > txq->txq_free - 2) {
8556 			/*
8557 			 * Not enough free descriptors to transmit this
8558 			 * packet.  We haven't committed anything yet,
8559 			 * so just unload the DMA map, put the packet
8560 			 * pack on the queue, and punt. Notify the upper
8561 			 * layer that there are no more slots left.
8562 			 */
8563 			DPRINTF(sc, WM_DEBUG_TX,
8564 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
8565 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
8566 				segs_needed, txq->txq_free - 1));
8567 			txq->txq_flags |= WM_TXQ_NO_SPACE;
8568 			bus_dmamap_unload(sc->sc_dmat, dmamap);
8569 			WM_Q_EVCNT_INCR(txq, txdstall);
8570 			break;
8571 		}
8572 
8573 		/*
8574 		 * Check for 82547 Tx FIFO bug. We need to do this
8575 		 * once we know we can transmit the packet, since we
8576 		 * do some internal FIFO space accounting here.
8577 		 */
8578 		if (sc->sc_type == WM_T_82547 &&
8579 		    wm_82547_txfifo_bugchk(sc, m0)) {
8580 			DPRINTF(sc, WM_DEBUG_TX,
8581 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
8582 				device_xname(sc->sc_dev)));
8583 			txq->txq_flags |= WM_TXQ_NO_SPACE;
8584 			bus_dmamap_unload(sc->sc_dmat, dmamap);
8585 			WM_Q_EVCNT_INCR(txq, fifo_stall);
8586 			break;
8587 		}
8588 
8589 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
8590 
8591 		DPRINTF(sc, WM_DEBUG_TX,
8592 		    ("%s: TX: packet has %d (%d) DMA segments\n",
8593 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
8594 
8595 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
8596 
8597 		/*
8598 		 * Store a pointer to the packet so that we can free it
8599 		 * later.
8600 		 *
8601 		 * Initially, we consider the number of descriptors the
8602 		 * packet uses the number of DMA segments.  This may be
8603 		 * incremented by 1 if we do checksum offload (a descriptor
8604 		 * is used to set the checksum context).
8605 		 */
8606 		txs->txs_mbuf = m0;
8607 		txs->txs_firstdesc = txq->txq_next;
8608 		txs->txs_ndesc = segs_needed;
8609 
8610 		/* Set up offload parameters for this packet. */
8611 		if (m0->m_pkthdr.csum_flags &
8612 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
8613 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
8614 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
8615 			wm_tx_offload(sc, txq, txs, &cksumcmd, &cksumfields);
8616 		} else {
8617 			txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
8618 			txq->txq_last_hw_ipcs = txq->txq_last_hw_tucs = 0;
8619 			cksumcmd = 0;
8620 			cksumfields = 0;
8621 		}
8622 
8623 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
8624 
8625 		/* Sync the DMA map. */
8626 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
8627 		    BUS_DMASYNC_PREWRITE);
8628 
8629 		/* Initialize the transmit descriptor. */
8630 		for (nexttx = txq->txq_next, seg = 0;
8631 		     seg < dmamap->dm_nsegs; seg++) {
8632 			for (seglen = dmamap->dm_segs[seg].ds_len,
8633 			     curaddr = dmamap->dm_segs[seg].ds_addr;
8634 			     seglen != 0;
8635 			     curaddr += curlen, seglen -= curlen,
8636 			     nexttx = WM_NEXTTX(txq, nexttx)) {
8637 				curlen = seglen;
8638 
8639 				/*
8640 				 * So says the Linux driver:
8641 				 * Work around for premature descriptor
8642 				 * write-backs in TSO mode.  Append a
8643 				 * 4-byte sentinel descriptor.
8644 				 */
8645 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
8646 				    curlen > 8)
8647 					curlen -= 4;
8648 
8649 				wm_set_dma_addr(
8650 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
8651 				txq->txq_descs[nexttx].wtx_cmdlen
8652 				    = htole32(cksumcmd | curlen);
8653 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
8654 				    = 0;
8655 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
8656 				    = cksumfields;
8657 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
8658 				lasttx = nexttx;
8659 
8660 				DPRINTF(sc, WM_DEBUG_TX,
8661 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
8662 					"len %#04zx\n",
8663 					device_xname(sc->sc_dev), nexttx,
8664 					(uint64_t)curaddr, curlen));
8665 			}
8666 		}
8667 
8668 		KASSERT(lasttx != -1);
8669 
8670 		/*
8671 		 * Set up the command byte on the last descriptor of
8672 		 * the packet. If we're in the interrupt delay window,
8673 		 * delay the interrupt.
8674 		 */
8675 		txq->txq_descs[lasttx].wtx_cmdlen |=
8676 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
8677 
8678 		/*
8679 		 * If VLANs are enabled and the packet has a VLAN tag, set
8680 		 * up the descriptor to encapsulate the packet for us.
8681 		 *
8682 		 * This is only valid on the last descriptor of the packet.
8683 		 */
8684 		if (vlan_has_tag(m0)) {
8685 			txq->txq_descs[lasttx].wtx_cmdlen |=
8686 			    htole32(WTX_CMD_VLE);
8687 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
8688 			    = htole16(vlan_get_tag(m0));
8689 		}
8690 
8691 		txs->txs_lastdesc = lasttx;
8692 
8693 		DPRINTF(sc, WM_DEBUG_TX,
8694 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
8695 			device_xname(sc->sc_dev),
8696 			lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
8697 
8698 		/* Sync the descriptors we're using. */
8699 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
8700 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
8701 
8702 		/* Give the packet to the chip. */
8703 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
8704 
8705 		DPRINTF(sc, WM_DEBUG_TX,
8706 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
8707 
8708 		DPRINTF(sc, WM_DEBUG_TX,
8709 		    ("%s: TX: finished transmitting packet, job %d\n",
8710 			device_xname(sc->sc_dev), txq->txq_snext));
8711 
8712 		/* Advance the tx pointer. */
8713 		txq->txq_free -= txs->txs_ndesc;
8714 		txq->txq_next = nexttx;
8715 
8716 		txq->txq_sfree--;
8717 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
8718 
8719 		/* Pass the packet to any BPF listeners. */
8720 		bpf_mtap(ifp, m0, BPF_D_OUT);
8721 	}
8722 
8723 	if (m0 != NULL) {
8724 		txq->txq_flags |= WM_TXQ_NO_SPACE;
8725 		WM_Q_EVCNT_INCR(txq, descdrop);
8726 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
8727 			__func__));
8728 		m_freem(m0);
8729 	}
8730 
8731 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
8732 		/* No more slots; notify upper layer. */
8733 		txq->txq_flags |= WM_TXQ_NO_SPACE;
8734 	}
8735 
8736 	if (txq->txq_free != ofree) {
8737 		/* Set a watchdog timer in case the chip flakes out. */
8738 		txq->txq_lastsent = time_uptime;
8739 		txq->txq_sending = true;
8740 	}
8741 }
8742 
8743 /*
8744  * wm_nq_tx_offload:
8745  *
8746  *	Set up TCP/IP checksumming parameters for the
8747  *	specified packet, for NEWQUEUE devices
8748  */
8749 static void
8750 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
8751     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
8752 {
8753 	struct mbuf *m0 = txs->txs_mbuf;
8754 	uint32_t vl_len, mssidx, cmdc;
8755 	struct ether_header *eh;
8756 	int offset, iphl;
8757 
8758 	/*
8759 	 * XXX It would be nice if the mbuf pkthdr had offset
8760 	 * fields for the protocol headers.
8761 	 */
8762 	*cmdlenp = 0;
8763 	*fieldsp = 0;
8764 
8765 	eh = mtod(m0, struct ether_header *);
8766 	switch (htons(eh->ether_type)) {
8767 	case ETHERTYPE_IP:
8768 	case ETHERTYPE_IPV6:
8769 		offset = ETHER_HDR_LEN;
8770 		break;
8771 
8772 	case ETHERTYPE_VLAN:
8773 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
8774 		break;
8775 
8776 	default:
8777 		/* Don't support this protocol or encapsulation. */
8778 		*do_csum = false;
8779 		return;
8780 	}
8781 	*do_csum = true;
8782 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
8783 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
8784 
8785 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
8786 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
8787 
8788 	if ((m0->m_pkthdr.csum_flags &
8789 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
8790 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
8791 	} else {
8792 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
8793 	}
8794 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
8795 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
8796 
8797 	if (vlan_has_tag(m0)) {
8798 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
8799 		    << NQTXC_VLLEN_VLAN_SHIFT);
8800 		*cmdlenp |= NQTX_CMD_VLE;
8801 	}
8802 
8803 	mssidx = 0;
8804 
8805 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
8806 		int hlen = offset + iphl;
8807 		int tcp_hlen;
8808 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
8809 
8810 		if (__predict_false(m0->m_len <
8811 				    (hlen + sizeof(struct tcphdr)))) {
8812 			/*
8813 			 * TCP/IP headers are not in the first mbuf; we need
8814 			 * to do this the slow and painful way. Let's just
8815 			 * hope this doesn't happen very often.
8816 			 */
8817 			struct tcphdr th;
8818 
8819 			WM_Q_EVCNT_INCR(txq, tsopain);
8820 
8821 			m_copydata(m0, hlen, sizeof(th), &th);
8822 			if (v4) {
8823 				struct ip ip;
8824 
8825 				m_copydata(m0, offset, sizeof(ip), &ip);
8826 				ip.ip_len = 0;
8827 				m_copyback(m0,
8828 				    offset + offsetof(struct ip, ip_len),
8829 				    sizeof(ip.ip_len), &ip.ip_len);
8830 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
8831 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
8832 			} else {
8833 				struct ip6_hdr ip6;
8834 
8835 				m_copydata(m0, offset, sizeof(ip6), &ip6);
8836 				ip6.ip6_plen = 0;
8837 				m_copyback(m0,
8838 				    offset + offsetof(struct ip6_hdr, ip6_plen),
8839 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
8840 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
8841 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
8842 			}
8843 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
8844 			    sizeof(th.th_sum), &th.th_sum);
8845 
8846 			tcp_hlen = th.th_off << 2;
8847 		} else {
8848 			/*
8849 			 * TCP/IP headers are in the first mbuf; we can do
8850 			 * this the easy way.
8851 			 */
8852 			struct tcphdr *th;
8853 
8854 			if (v4) {
8855 				struct ip *ip =
8856 				    (void *)(mtod(m0, char *) + offset);
8857 				th = (void *)(mtod(m0, char *) + hlen);
8858 
8859 				ip->ip_len = 0;
8860 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
8861 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
8862 			} else {
8863 				struct ip6_hdr *ip6 =
8864 				    (void *)(mtod(m0, char *) + offset);
8865 				th = (void *)(mtod(m0, char *) + hlen);
8866 
8867 				ip6->ip6_plen = 0;
8868 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
8869 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
8870 			}
8871 			tcp_hlen = th->th_off << 2;
8872 		}
8873 		hlen += tcp_hlen;
8874 		*cmdlenp |= NQTX_CMD_TSE;
8875 
8876 		if (v4) {
8877 			WM_Q_EVCNT_INCR(txq, tso);
8878 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
8879 		} else {
8880 			WM_Q_EVCNT_INCR(txq, tso6);
8881 			*fieldsp |= NQTXD_FIELDS_TUXSM;
8882 		}
8883 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
8884 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
8885 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
8886 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
8887 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
8888 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
8889 	} else {
8890 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
8891 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
8892 	}
8893 
8894 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
8895 		*fieldsp |= NQTXD_FIELDS_IXSM;
8896 		cmdc |= NQTXC_CMD_IP4;
8897 	}
8898 
8899 	if (m0->m_pkthdr.csum_flags &
8900 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
8901 		WM_Q_EVCNT_INCR(txq, tusum);
8902 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
8903 			cmdc |= NQTXC_CMD_TCP;
8904 		else
8905 			cmdc |= NQTXC_CMD_UDP;
8906 
8907 		cmdc |= NQTXC_CMD_IP4;
8908 		*fieldsp |= NQTXD_FIELDS_TUXSM;
8909 	}
8910 	if (m0->m_pkthdr.csum_flags &
8911 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
8912 		WM_Q_EVCNT_INCR(txq, tusum6);
8913 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
8914 			cmdc |= NQTXC_CMD_TCP;
8915 		else
8916 			cmdc |= NQTXC_CMD_UDP;
8917 
8918 		cmdc |= NQTXC_CMD_IP6;
8919 		*fieldsp |= NQTXD_FIELDS_TUXSM;
8920 	}
8921 
8922 	/*
8923 	 * We don't have to write context descriptor for every packet to
8924 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
8925 	 * I210 and I211. It is enough to write once per a Tx queue for these
8926 	 * controllers.
8927 	 * It would be overhead to write context descriptor for every packet,
8928 	 * however it does not cause problems.
8929 	 */
8930 	/* Fill in the context descriptor. */
8931 	txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_vl_len =
8932 	    htole32(vl_len);
8933 	txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_sn = 0;
8934 	txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_cmd =
8935 	    htole32(cmdc);
8936 	txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_mssidx =
8937 	    htole32(mssidx);
8938 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
8939 	DPRINTF(sc, WM_DEBUG_TX,
8940 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
8941 		txq->txq_next, 0, vl_len));
8942 	DPRINTF(sc, WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
8943 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
8944 	txs->txs_ndesc++;
8945 }
8946 
8947 /*
8948  * wm_nq_start:		[ifnet interface function]
8949  *
8950  *	Start packet transmission on the interface for NEWQUEUE devices
8951  */
8952 static void
8953 wm_nq_start(struct ifnet *ifp)
8954 {
8955 	struct wm_softc *sc = ifp->if_softc;
8956 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
8957 
8958 	KASSERT(if_is_mpsafe(ifp));
8959 	/*
8960 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
8961 	 */
8962 
8963 	mutex_enter(txq->txq_lock);
8964 	if (!txq->txq_stopping)
8965 		wm_nq_start_locked(ifp);
8966 	mutex_exit(txq->txq_lock);
8967 }
8968 
8969 static void
8970 wm_nq_start_locked(struct ifnet *ifp)
8971 {
8972 	struct wm_softc *sc = ifp->if_softc;
8973 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
8974 
8975 	wm_nq_send_common_locked(ifp, txq, false);
8976 }
8977 
8978 static int
8979 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
8980 {
8981 	int qid;
8982 	struct wm_softc *sc = ifp->if_softc;
8983 	struct wm_txqueue *txq;
8984 
8985 	qid = wm_select_txqueue(ifp, m);
8986 	txq = &sc->sc_queue[qid].wmq_txq;
8987 
8988 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
8989 		m_freem(m);
8990 		WM_Q_EVCNT_INCR(txq, pcqdrop);
8991 		return ENOBUFS;
8992 	}
8993 
8994 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
8995 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
8996 	if (m->m_flags & M_MCAST)
8997 		if_statinc_ref(nsr, if_omcasts);
8998 	IF_STAT_PUTREF(ifp);
8999 
9000 	/*
9001 	 * The situations which this mutex_tryenter() fails at running time
9002 	 * are below two patterns.
9003 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
9004 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
9005 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
9006 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
9007 	 * In the case of (2), the last packet enqueued to txq->txq_interq is
9008 	 * also dequeued by wm_deferred_start_locked(). So, it does not get
9009 	 * stuck, either.
9010 	 */
9011 	if (mutex_tryenter(txq->txq_lock)) {
9012 		if (!txq->txq_stopping)
9013 			wm_nq_transmit_locked(ifp, txq);
9014 		mutex_exit(txq->txq_lock);
9015 	}
9016 
9017 	return 0;
9018 }
9019 
9020 static void
9021 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
9022 {
9023 
9024 	wm_nq_send_common_locked(ifp, txq, true);
9025 }
9026 
9027 static void
9028 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
9029     bool is_transmit)
9030 {
9031 	struct wm_softc *sc = ifp->if_softc;
9032 	struct mbuf *m0;
9033 	struct wm_txsoft *txs;
9034 	bus_dmamap_t dmamap;
9035 	int error, nexttx, lasttx = -1, seg, segs_needed;
9036 	bool do_csum, sent;
9037 	bool remap = true;
9038 
9039 	KASSERT(mutex_owned(txq->txq_lock));
9040 	KASSERT(!txq->txq_stopping);
9041 
9042 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
9043 		return;
9044 
9045 	if (__predict_false(wm_linkdown_discard(txq))) {
9046 		do {
9047 			if (is_transmit)
9048 				m0 = pcq_get(txq->txq_interq);
9049 			else
9050 				IFQ_DEQUEUE(&ifp->if_snd, m0);
9051 			/*
9052 			 * increment successed packet counter as in the case
9053 			 * which the packet is discarded by link down PHY.
9054 			 */
9055 			if (m0 != NULL) {
9056 				if_statinc(ifp, if_opackets);
9057 				m_freem(m0);
9058 			}
9059 		} while (m0 != NULL);
9060 		return;
9061 	}
9062 
9063 	sent = false;
9064 
9065 	/*
9066 	 * Loop through the send queue, setting up transmit descriptors
9067 	 * until we drain the queue, or use up all available transmit
9068 	 * descriptors.
9069 	 */
9070 	for (;;) {
9071 		m0 = NULL;
9072 
9073 		/* Get a work queue entry. */
9074 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
9075 			wm_txeof(txq, UINT_MAX);
9076 			if (txq->txq_sfree == 0) {
9077 				DPRINTF(sc, WM_DEBUG_TX,
9078 				    ("%s: TX: no free job descriptors\n",
9079 					device_xname(sc->sc_dev)));
9080 				WM_Q_EVCNT_INCR(txq, txsstall);
9081 				break;
9082 			}
9083 		}
9084 
9085 		/* Grab a packet off the queue. */
9086 		if (is_transmit)
9087 			m0 = pcq_get(txq->txq_interq);
9088 		else
9089 			IFQ_DEQUEUE(&ifp->if_snd, m0);
9090 		if (m0 == NULL)
9091 			break;
9092 
9093 		DPRINTF(sc, WM_DEBUG_TX,
9094 		    ("%s: TX: have packet to transmit: %p\n",
9095 			device_xname(sc->sc_dev), m0));
9096 
9097 		txs = &txq->txq_soft[txq->txq_snext];
9098 		dmamap = txs->txs_dmamap;
9099 
9100 		/*
9101 		 * Load the DMA map.  If this fails, the packet either
9102 		 * didn't fit in the allotted number of segments, or we
9103 		 * were short on resources.  For the too-many-segments
9104 		 * case, we simply report an error and drop the packet,
9105 		 * since we can't sanely copy a jumbo packet to a single
9106 		 * buffer.
9107 		 */
9108 retry:
9109 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
9110 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
9111 		if (__predict_false(error)) {
9112 			if (error == EFBIG) {
9113 				if (remap == true) {
9114 					struct mbuf *m;
9115 
9116 					remap = false;
9117 					m = m_defrag(m0, M_NOWAIT);
9118 					if (m != NULL) {
9119 						WM_Q_EVCNT_INCR(txq, defrag);
9120 						m0 = m;
9121 						goto retry;
9122 					}
9123 				}
9124 				WM_Q_EVCNT_INCR(txq, toomanyseg);
9125 				log(LOG_ERR, "%s: Tx packet consumes too many "
9126 				    "DMA segments, dropping...\n",
9127 				    device_xname(sc->sc_dev));
9128 				wm_dump_mbuf_chain(sc, m0);
9129 				m_freem(m0);
9130 				continue;
9131 			}
9132 			/* Short on resources, just stop for now. */
9133 			DPRINTF(sc, WM_DEBUG_TX,
9134 			    ("%s: TX: dmamap load failed: %d\n",
9135 				device_xname(sc->sc_dev), error));
9136 			break;
9137 		}
9138 
9139 		segs_needed = dmamap->dm_nsegs;
9140 
9141 		/*
9142 		 * Ensure we have enough descriptors free to describe
9143 		 * the packet. Note, we always reserve one descriptor
9144 		 * at the end of the ring due to the semantics of the
9145 		 * TDT register, plus one more in the event we need
9146 		 * to load offload context.
9147 		 */
9148 		if (segs_needed > txq->txq_free - 2) {
9149 			/*
9150 			 * Not enough free descriptors to transmit this
9151 			 * packet.  We haven't committed anything yet,
9152 			 * so just unload the DMA map, put the packet
9153 			 * pack on the queue, and punt. Notify the upper
9154 			 * layer that there are no more slots left.
9155 			 */
9156 			DPRINTF(sc, WM_DEBUG_TX,
9157 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
9158 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
9159 				segs_needed, txq->txq_free - 1));
9160 			txq->txq_flags |= WM_TXQ_NO_SPACE;
9161 			bus_dmamap_unload(sc->sc_dmat, dmamap);
9162 			WM_Q_EVCNT_INCR(txq, txdstall);
9163 			break;
9164 		}
9165 
9166 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
9167 
9168 		DPRINTF(sc, WM_DEBUG_TX,
9169 		    ("%s: TX: packet has %d (%d) DMA segments\n",
9170 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
9171 
9172 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
9173 
9174 		/*
9175 		 * Store a pointer to the packet so that we can free it
9176 		 * later.
9177 		 *
9178 		 * Initially, we consider the number of descriptors the
9179 		 * packet uses the number of DMA segments.  This may be
9180 		 * incremented by 1 if we do checksum offload (a descriptor
9181 		 * is used to set the checksum context).
9182 		 */
9183 		txs->txs_mbuf = m0;
9184 		txs->txs_firstdesc = txq->txq_next;
9185 		txs->txs_ndesc = segs_needed;
9186 
9187 		/* Set up offload parameters for this packet. */
9188 		uint32_t cmdlen, fields, dcmdlen;
9189 		if (m0->m_pkthdr.csum_flags &
9190 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
9191 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
9192 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
9193 			wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
9194 			    &do_csum);
9195 		} else {
9196 			do_csum = false;
9197 			cmdlen = 0;
9198 			fields = 0;
9199 		}
9200 
9201 		/* Sync the DMA map. */
9202 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
9203 		    BUS_DMASYNC_PREWRITE);
9204 
9205 		/* Initialize the first transmit descriptor. */
9206 		nexttx = txq->txq_next;
9207 		if (!do_csum) {
9208 			/* Set up a legacy descriptor */
9209 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
9210 			    dmamap->dm_segs[0].ds_addr);
9211 			txq->txq_descs[nexttx].wtx_cmdlen =
9212 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
9213 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
9214 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
9215 			if (vlan_has_tag(m0)) {
9216 				txq->txq_descs[nexttx].wtx_cmdlen |=
9217 				    htole32(WTX_CMD_VLE);
9218 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
9219 				    htole16(vlan_get_tag(m0));
9220 			} else
9221 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
9222 
9223 			dcmdlen = 0;
9224 		} else {
9225 			/* Set up an advanced data descriptor */
9226 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
9227 			    htole64(dmamap->dm_segs[0].ds_addr);
9228 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
9229 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
9230 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen);
9231 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
9232 			    htole32(fields);
9233 			DPRINTF(sc, WM_DEBUG_TX,
9234 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
9235 				device_xname(sc->sc_dev), nexttx,
9236 				(uint64_t)dmamap->dm_segs[0].ds_addr));
9237 			DPRINTF(sc, WM_DEBUG_TX,
9238 			    ("\t 0x%08x%08x\n", fields,
9239 				(uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
9240 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
9241 		}
9242 
9243 		lasttx = nexttx;
9244 		nexttx = WM_NEXTTX(txq, nexttx);
9245 		/*
9246 		 * Fill in the next descriptors. Legacy or advanced format
9247 		 * is the same here.
9248 		 */
9249 		for (seg = 1; seg < dmamap->dm_nsegs;
9250 		     seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
9251 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
9252 			    htole64(dmamap->dm_segs[seg].ds_addr);
9253 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
9254 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
9255 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
9256 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
9257 			lasttx = nexttx;
9258 
9259 			DPRINTF(sc, WM_DEBUG_TX,
9260 			    ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
9261 				device_xname(sc->sc_dev), nexttx,
9262 				(uint64_t)dmamap->dm_segs[seg].ds_addr,
9263 				dmamap->dm_segs[seg].ds_len));
9264 		}
9265 
9266 		KASSERT(lasttx != -1);
9267 
9268 		/*
9269 		 * Set up the command byte on the last descriptor of
9270 		 * the packet. If we're in the interrupt delay window,
9271 		 * delay the interrupt.
9272 		 */
9273 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
9274 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
9275 		txq->txq_descs[lasttx].wtx_cmdlen |=
9276 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
9277 
9278 		txs->txs_lastdesc = lasttx;
9279 
9280 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
9281 		    device_xname(sc->sc_dev),
9282 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
9283 
9284 		/* Sync the descriptors we're using. */
9285 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
9286 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
9287 
9288 		/* Give the packet to the chip. */
9289 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
9290 		sent = true;
9291 
9292 		DPRINTF(sc, WM_DEBUG_TX,
9293 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
9294 
9295 		DPRINTF(sc, WM_DEBUG_TX,
9296 		    ("%s: TX: finished transmitting packet, job %d\n",
9297 			device_xname(sc->sc_dev), txq->txq_snext));
9298 
9299 		/* Advance the tx pointer. */
9300 		txq->txq_free -= txs->txs_ndesc;
9301 		txq->txq_next = nexttx;
9302 
9303 		txq->txq_sfree--;
9304 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
9305 
9306 		/* Pass the packet to any BPF listeners. */
9307 		bpf_mtap(ifp, m0, BPF_D_OUT);
9308 	}
9309 
9310 	if (m0 != NULL) {
9311 		txq->txq_flags |= WM_TXQ_NO_SPACE;
9312 		WM_Q_EVCNT_INCR(txq, descdrop);
9313 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
9314 			__func__));
9315 		m_freem(m0);
9316 	}
9317 
9318 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
9319 		/* No more slots; notify upper layer. */
9320 		txq->txq_flags |= WM_TXQ_NO_SPACE;
9321 	}
9322 
9323 	if (sent) {
9324 		/* Set a watchdog timer in case the chip flakes out. */
9325 		txq->txq_lastsent = time_uptime;
9326 		txq->txq_sending = true;
9327 	}
9328 }
9329 
9330 static void
9331 wm_deferred_start_locked(struct wm_txqueue *txq)
9332 {
9333 	struct wm_softc *sc = txq->txq_sc;
9334 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9335 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
9336 	int qid = wmq->wmq_id;
9337 
9338 	KASSERT(mutex_owned(txq->txq_lock));
9339 	KASSERT(!txq->txq_stopping);
9340 
9341 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
9342 		/* XXX need for ALTQ or one CPU system */
9343 		if (qid == 0)
9344 			wm_nq_start_locked(ifp);
9345 		wm_nq_transmit_locked(ifp, txq);
9346 	} else {
9347 		/* XXX need for ALTQ or one CPU system */
9348 		if (qid == 0)
9349 			wm_start_locked(ifp);
9350 		wm_transmit_locked(ifp, txq);
9351 	}
9352 }
9353 
9354 /* Interrupt */
9355 
9356 /*
9357  * wm_txeof:
9358  *
9359  *	Helper; handle transmit interrupts.
9360  */
9361 static bool
9362 wm_txeof(struct wm_txqueue *txq, u_int limit)
9363 {
9364 	struct wm_softc *sc = txq->txq_sc;
9365 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9366 	struct wm_txsoft *txs;
9367 	int count = 0;
9368 	int i;
9369 	uint8_t status;
9370 	bool more = false;
9371 
9372 	KASSERT(mutex_owned(txq->txq_lock));
9373 
9374 	if (txq->txq_stopping)
9375 		return false;
9376 
9377 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
9378 
9379 	/*
9380 	 * Go through the Tx list and free mbufs for those
9381 	 * frames which have been transmitted.
9382 	 */
9383 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
9384 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
9385 		txs = &txq->txq_soft[i];
9386 
9387 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: checking job %d\n",
9388 			device_xname(sc->sc_dev), i));
9389 
9390 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
9391 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
9392 
9393 		status =
9394 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
9395 		if ((status & WTX_ST_DD) == 0) {
9396 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
9397 			    BUS_DMASYNC_PREREAD);
9398 			break;
9399 		}
9400 
9401 		if (limit-- == 0) {
9402 			more = true;
9403 			DPRINTF(sc, WM_DEBUG_TX,
9404 			    ("%s: TX: loop limited, job %d is not processed\n",
9405 				device_xname(sc->sc_dev), i));
9406 			break;
9407 		}
9408 
9409 		count++;
9410 		DPRINTF(sc, WM_DEBUG_TX,
9411 		    ("%s: TX: job %d done: descs %d..%d\n",
9412 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
9413 		    txs->txs_lastdesc));
9414 
9415 		/*
9416 		 * XXX We should probably be using the statistics
9417 		 * XXX registers, but I don't know if they exist
9418 		 * XXX on chips before the i82544.
9419 		 */
9420 
9421 #ifdef WM_EVENT_COUNTERS
9422 		if (status & WTX_ST_TU)
9423 			WM_Q_EVCNT_INCR(txq, underrun);
9424 #endif /* WM_EVENT_COUNTERS */
9425 
9426 		/*
9427 		 * 82574 and newer's document says the status field has neither
9428 		 * EC (Excessive Collision) bit nor LC (Late Collision) bit
9429 		 * (reserved). Refer "PCIe GbE Controller Open Source Software
9430 		 * Developer's Manual", 82574 datasheet and newer.
9431 		 *
9432 		 * XXX I saw the LC bit was set on I218 even though the media
9433 		 * was full duplex, so the bit might be used for other
9434 		 * meaning ...(I have no document).
9435 		 */
9436 
9437 		if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
9438 		    && ((sc->sc_type < WM_T_82574)
9439 			|| (sc->sc_type == WM_T_80003))) {
9440 			if_statinc(ifp, if_oerrors);
9441 			if (status & WTX_ST_LC)
9442 				log(LOG_WARNING, "%s: late collision\n",
9443 				    device_xname(sc->sc_dev));
9444 			else if (status & WTX_ST_EC) {
9445 				if_statadd(ifp, if_collisions,
9446 				    TX_COLLISION_THRESHOLD + 1);
9447 				log(LOG_WARNING, "%s: excessive collisions\n",
9448 				    device_xname(sc->sc_dev));
9449 			}
9450 		} else
9451 			if_statinc(ifp, if_opackets);
9452 
9453 		txq->txq_packets++;
9454 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
9455 
9456 		txq->txq_free += txs->txs_ndesc;
9457 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
9458 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
9459 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
9460 		m_freem(txs->txs_mbuf);
9461 		txs->txs_mbuf = NULL;
9462 	}
9463 
9464 	/* Update the dirty transmit buffer pointer. */
9465 	txq->txq_sdirty = i;
9466 	DPRINTF(sc, WM_DEBUG_TX,
9467 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
9468 
9469 	if (count != 0)
9470 		rnd_add_uint32(&sc->rnd_source, count);
9471 
9472 	/*
9473 	 * If there are no more pending transmissions, cancel the watchdog
9474 	 * timer.
9475 	 */
9476 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
9477 		txq->txq_sending = false;
9478 
9479 	return more;
9480 }
9481 
9482 static inline uint32_t
9483 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
9484 {
9485 	struct wm_softc *sc = rxq->rxq_sc;
9486 
9487 	if (sc->sc_type == WM_T_82574)
9488 		return EXTRXC_STATUS(
9489 		    le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
9490 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9491 		return NQRXC_STATUS(
9492 		    le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
9493 	else
9494 		return rxq->rxq_descs[idx].wrx_status;
9495 }
9496 
9497 static inline uint32_t
9498 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
9499 {
9500 	struct wm_softc *sc = rxq->rxq_sc;
9501 
9502 	if (sc->sc_type == WM_T_82574)
9503 		return EXTRXC_ERROR(
9504 		    le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
9505 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9506 		return NQRXC_ERROR(
9507 		    le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
9508 	else
9509 		return rxq->rxq_descs[idx].wrx_errors;
9510 }
9511 
9512 static inline uint16_t
9513 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
9514 {
9515 	struct wm_softc *sc = rxq->rxq_sc;
9516 
9517 	if (sc->sc_type == WM_T_82574)
9518 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
9519 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9520 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
9521 	else
9522 		return rxq->rxq_descs[idx].wrx_special;
9523 }
9524 
9525 static inline int
9526 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
9527 {
9528 	struct wm_softc *sc = rxq->rxq_sc;
9529 
9530 	if (sc->sc_type == WM_T_82574)
9531 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
9532 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9533 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
9534 	else
9535 		return rxq->rxq_descs[idx].wrx_len;
9536 }
9537 
9538 #ifdef WM_DEBUG
9539 static inline uint32_t
9540 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
9541 {
9542 	struct wm_softc *sc = rxq->rxq_sc;
9543 
9544 	if (sc->sc_type == WM_T_82574)
9545 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
9546 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9547 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
9548 	else
9549 		return 0;
9550 }
9551 
9552 static inline uint8_t
9553 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
9554 {
9555 	struct wm_softc *sc = rxq->rxq_sc;
9556 
9557 	if (sc->sc_type == WM_T_82574)
9558 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
9559 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9560 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
9561 	else
9562 		return 0;
9563 }
9564 #endif /* WM_DEBUG */
9565 
9566 static inline bool
9567 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
9568     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
9569 {
9570 
9571 	if (sc->sc_type == WM_T_82574)
9572 		return (status & ext_bit) != 0;
9573 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9574 		return (status & nq_bit) != 0;
9575 	else
9576 		return (status & legacy_bit) != 0;
9577 }
9578 
9579 static inline bool
9580 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
9581     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
9582 {
9583 
9584 	if (sc->sc_type == WM_T_82574)
9585 		return (error & ext_bit) != 0;
9586 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9587 		return (error & nq_bit) != 0;
9588 	else
9589 		return (error & legacy_bit) != 0;
9590 }
9591 
9592 static inline bool
9593 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
9594 {
9595 
9596 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
9597 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
9598 		return true;
9599 	else
9600 		return false;
9601 }
9602 
9603 static inline bool
9604 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
9605 {
9606 	struct wm_softc *sc = rxq->rxq_sc;
9607 
9608 	/* XXX missing error bit for newqueue? */
9609 	if (wm_rxdesc_is_set_error(sc, errors,
9610 		WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
9611 		EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
9612 		| EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
9613 		NQRXC_ERROR_RXE)) {
9614 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
9615 		    EXTRXC_ERROR_SE, 0))
9616 			log(LOG_WARNING, "%s: symbol error\n",
9617 			    device_xname(sc->sc_dev));
9618 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
9619 		    EXTRXC_ERROR_SEQ, 0))
9620 			log(LOG_WARNING, "%s: receive sequence error\n",
9621 			    device_xname(sc->sc_dev));
9622 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
9623 		    EXTRXC_ERROR_CE, 0))
9624 			log(LOG_WARNING, "%s: CRC error\n",
9625 			    device_xname(sc->sc_dev));
9626 		return true;
9627 	}
9628 
9629 	return false;
9630 }
9631 
9632 static inline bool
9633 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
9634 {
9635 	struct wm_softc *sc = rxq->rxq_sc;
9636 
9637 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
9638 		NQRXC_STATUS_DD)) {
9639 		/* We have processed all of the receive descriptors. */
9640 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
9641 		return false;
9642 	}
9643 
9644 	return true;
9645 }
9646 
9647 static inline bool
9648 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
9649     uint16_t vlantag, struct mbuf *m)
9650 {
9651 
9652 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
9653 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
9654 		vlan_set_tag(m, le16toh(vlantag));
9655 	}
9656 
9657 	return true;
9658 }
9659 
9660 static inline void
9661 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
9662     uint32_t errors, struct mbuf *m)
9663 {
9664 	struct wm_softc *sc = rxq->rxq_sc;
9665 
9666 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
9667 		if (wm_rxdesc_is_set_status(sc, status,
9668 		    WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
9669 			WM_Q_EVCNT_INCR(rxq, ipsum);
9670 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
9671 			if (wm_rxdesc_is_set_error(sc, errors,
9672 			    WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
9673 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
9674 		}
9675 		if (wm_rxdesc_is_set_status(sc, status,
9676 		    WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
9677 			/*
9678 			 * Note: we don't know if this was TCP or UDP,
9679 			 * so we just set both bits, and expect the
9680 			 * upper layers to deal.
9681 			 */
9682 			WM_Q_EVCNT_INCR(rxq, tusum);
9683 			m->m_pkthdr.csum_flags |=
9684 			    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
9685 			    M_CSUM_TCPv6 | M_CSUM_UDPv6;
9686 			if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
9687 			    EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
9688 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
9689 		}
9690 	}
9691 }
9692 
9693 /*
9694  * wm_rxeof:
9695  *
9696  *	Helper; handle receive interrupts.
9697  */
9698 static bool
9699 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
9700 {
9701 	struct wm_softc *sc = rxq->rxq_sc;
9702 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9703 	struct wm_rxsoft *rxs;
9704 	struct mbuf *m;
9705 	int i, len;
9706 	int count = 0;
9707 	uint32_t status, errors;
9708 	uint16_t vlantag;
9709 	bool more = false;
9710 
9711 	KASSERT(mutex_owned(rxq->rxq_lock));
9712 
9713 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
9714 		rxs = &rxq->rxq_soft[i];
9715 
9716 		DPRINTF(sc, WM_DEBUG_RX,
9717 		    ("%s: RX: checking descriptor %d\n",
9718 			device_xname(sc->sc_dev), i));
9719 		wm_cdrxsync(rxq, i,
9720 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
9721 
9722 		status = wm_rxdesc_get_status(rxq, i);
9723 		errors = wm_rxdesc_get_errors(rxq, i);
9724 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
9725 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
9726 #ifdef WM_DEBUG
9727 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
9728 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
9729 #endif
9730 
9731 		if (!wm_rxdesc_dd(rxq, i, status))
9732 			break;
9733 
9734 		if (limit-- == 0) {
9735 			more = true;
9736 			DPRINTF(sc, WM_DEBUG_RX,
9737 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
9738 				device_xname(sc->sc_dev), i));
9739 			break;
9740 		}
9741 
9742 		count++;
9743 		if (__predict_false(rxq->rxq_discard)) {
9744 			DPRINTF(sc, WM_DEBUG_RX,
9745 			    ("%s: RX: discarding contents of descriptor %d\n",
9746 				device_xname(sc->sc_dev), i));
9747 			wm_init_rxdesc(rxq, i);
9748 			if (wm_rxdesc_is_eop(rxq, status)) {
9749 				/* Reset our state. */
9750 				DPRINTF(sc, WM_DEBUG_RX,
9751 				    ("%s: RX: resetting rxdiscard -> 0\n",
9752 					device_xname(sc->sc_dev)));
9753 				rxq->rxq_discard = 0;
9754 			}
9755 			continue;
9756 		}
9757 
9758 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
9759 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
9760 
9761 		m = rxs->rxs_mbuf;
9762 
9763 		/*
9764 		 * Add a new receive buffer to the ring, unless of
9765 		 * course the length is zero. Treat the latter as a
9766 		 * failed mapping.
9767 		 */
9768 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
9769 			/*
9770 			 * Failed, throw away what we've done so
9771 			 * far, and discard the rest of the packet.
9772 			 */
9773 			if_statinc(ifp, if_ierrors);
9774 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
9775 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
9776 			wm_init_rxdesc(rxq, i);
9777 			if (!wm_rxdesc_is_eop(rxq, status))
9778 				rxq->rxq_discard = 1;
9779 			if (rxq->rxq_head != NULL)
9780 				m_freem(rxq->rxq_head);
9781 			WM_RXCHAIN_RESET(rxq);
9782 			DPRINTF(sc, WM_DEBUG_RX,
9783 			    ("%s: RX: Rx buffer allocation failed, "
9784 			    "dropping packet%s\n", device_xname(sc->sc_dev),
9785 				rxq->rxq_discard ? " (discard)" : ""));
9786 			continue;
9787 		}
9788 
9789 		m->m_len = len;
9790 		rxq->rxq_len += len;
9791 		DPRINTF(sc, WM_DEBUG_RX,
9792 		    ("%s: RX: buffer at %p len %d\n",
9793 			device_xname(sc->sc_dev), m->m_data, len));
9794 
9795 		/* If this is not the end of the packet, keep looking. */
9796 		if (!wm_rxdesc_is_eop(rxq, status)) {
9797 			WM_RXCHAIN_LINK(rxq, m);
9798 			DPRINTF(sc, WM_DEBUG_RX,
9799 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
9800 				device_xname(sc->sc_dev), rxq->rxq_len));
9801 			continue;
9802 		}
9803 
9804 		/*
9805 		 * Okay, we have the entire packet now. The chip is
9806 		 * configured to include the FCS except I35[04], I21[01].
9807 		 * (not all chips can be configured to strip it), so we need
9808 		 * to trim it. Those chips have an eratta, the RCTL_SECRC bit
9809 		 * in RCTL register is always set, so we don't trim it.
9810 		 * PCH2 and newer chip also not include FCS when jumbo
9811 		 * frame is used to do workaround an errata.
9812 		 * May need to adjust length of previous mbuf in the
9813 		 * chain if the current mbuf is too short.
9814 		 */
9815 		if ((sc->sc_flags & WM_F_CRC_STRIP) == 0) {
9816 			if (m->m_len < ETHER_CRC_LEN) {
9817 				rxq->rxq_tail->m_len
9818 				    -= (ETHER_CRC_LEN - m->m_len);
9819 				m->m_len = 0;
9820 			} else
9821 				m->m_len -= ETHER_CRC_LEN;
9822 			len = rxq->rxq_len - ETHER_CRC_LEN;
9823 		} else
9824 			len = rxq->rxq_len;
9825 
9826 		WM_RXCHAIN_LINK(rxq, m);
9827 
9828 		*rxq->rxq_tailp = NULL;
9829 		m = rxq->rxq_head;
9830 
9831 		WM_RXCHAIN_RESET(rxq);
9832 
9833 		DPRINTF(sc, WM_DEBUG_RX,
9834 		    ("%s: RX: have entire packet, len -> %d\n",
9835 			device_xname(sc->sc_dev), len));
9836 
9837 		/* If an error occurred, update stats and drop the packet. */
9838 		if (wm_rxdesc_has_errors(rxq, errors)) {
9839 			m_freem(m);
9840 			continue;
9841 		}
9842 
9843 		/* No errors.  Receive the packet. */
9844 		m_set_rcvif(m, ifp);
9845 		m->m_pkthdr.len = len;
9846 		/*
9847 		 * TODO
9848 		 * should be save rsshash and rsstype to this mbuf.
9849 		 */
9850 		DPRINTF(sc, WM_DEBUG_RX,
9851 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
9852 			device_xname(sc->sc_dev), rsstype, rsshash));
9853 
9854 		/*
9855 		 * If VLANs are enabled, VLAN packets have been unwrapped
9856 		 * for us.  Associate the tag with the packet.
9857 		 */
9858 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
9859 			continue;
9860 
9861 		/* Set up checksum info for this packet. */
9862 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
9863 
9864 		rxq->rxq_packets++;
9865 		rxq->rxq_bytes += len;
9866 		/* Pass it on. */
9867 		if_percpuq_enqueue(sc->sc_ipq, m);
9868 
9869 		if (rxq->rxq_stopping)
9870 			break;
9871 	}
9872 	rxq->rxq_ptr = i;
9873 
9874 	if (count != 0)
9875 		rnd_add_uint32(&sc->rnd_source, count);
9876 
9877 	DPRINTF(sc, WM_DEBUG_RX,
9878 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
9879 
9880 	return more;
9881 }
9882 
9883 /*
9884  * wm_linkintr_gmii:
9885  *
9886  *	Helper; handle link interrupts for GMII.
9887  */
9888 static void
9889 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
9890 {
9891 	device_t dev = sc->sc_dev;
9892 	uint32_t status, reg;
9893 	bool link;
9894 	int rv;
9895 
9896 	KASSERT(mutex_owned(sc->sc_core_lock));
9897 
9898 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(dev),
9899 		__func__));
9900 
9901 	if ((icr & ICR_LSC) == 0) {
9902 		if (icr & ICR_RXSEQ)
9903 			DPRINTF(sc, WM_DEBUG_LINK,
9904 			    ("%s: LINK Receive sequence error\n",
9905 				device_xname(dev)));
9906 		return;
9907 	}
9908 
9909 	/* Link status changed */
9910 	status = CSR_READ(sc, WMREG_STATUS);
9911 	link = status & STATUS_LU;
9912 	if (link) {
9913 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
9914 			device_xname(dev),
9915 			(status & STATUS_FD) ? "FDX" : "HDX"));
9916 		if (wm_phy_need_linkdown_discard(sc)) {
9917 			DPRINTF(sc, WM_DEBUG_LINK,
9918 			    ("%s: linkintr: Clear linkdown discard flag\n",
9919 				device_xname(dev)));
9920 			wm_clear_linkdown_discard(sc);
9921 		}
9922 	} else {
9923 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
9924 			device_xname(dev)));
9925 		if (wm_phy_need_linkdown_discard(sc)) {
9926 			DPRINTF(sc, WM_DEBUG_LINK,
9927 			    ("%s: linkintr: Set linkdown discard flag\n",
9928 				device_xname(dev)));
9929 			wm_set_linkdown_discard(sc);
9930 		}
9931 	}
9932 	if ((sc->sc_type == WM_T_ICH8) && (link == false))
9933 		wm_gig_downshift_workaround_ich8lan(sc);
9934 
9935 	if ((sc->sc_type == WM_T_ICH8) && (sc->sc_phytype == WMPHY_IGP_3))
9936 		wm_kmrn_lock_loss_workaround_ich8lan(sc);
9937 
9938 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
9939 		device_xname(dev)));
9940 	mii_pollstat(&sc->sc_mii);
9941 	if (sc->sc_type == WM_T_82543) {
9942 		int miistatus, active;
9943 
9944 		/*
9945 		 * With 82543, we need to force speed and
9946 		 * duplex on the MAC equal to what the PHY
9947 		 * speed and duplex configuration is.
9948 		 */
9949 		miistatus = sc->sc_mii.mii_media_status;
9950 
9951 		if (miistatus & IFM_ACTIVE) {
9952 			active = sc->sc_mii.mii_media_active;
9953 			sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
9954 			switch (IFM_SUBTYPE(active)) {
9955 			case IFM_10_T:
9956 				sc->sc_ctrl |= CTRL_SPEED_10;
9957 				break;
9958 			case IFM_100_TX:
9959 				sc->sc_ctrl |= CTRL_SPEED_100;
9960 				break;
9961 			case IFM_1000_T:
9962 				sc->sc_ctrl |= CTRL_SPEED_1000;
9963 				break;
9964 			default:
9965 				/*
9966 				 * Fiber?
9967 				 * Shoud not enter here.
9968 				 */
9969 				device_printf(dev, "unknown media (%x)\n",
9970 				    active);
9971 				break;
9972 			}
9973 			if (active & IFM_FDX)
9974 				sc->sc_ctrl |= CTRL_FD;
9975 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9976 		}
9977 	} else if (sc->sc_type == WM_T_PCH) {
9978 		wm_k1_gig_workaround_hv(sc,
9979 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
9980 	}
9981 
9982 	/*
9983 	 * When connected at 10Mbps half-duplex, some parts are excessively
9984 	 * aggressive resulting in many collisions. To avoid this, increase
9985 	 * the IPG and reduce Rx latency in the PHY.
9986 	 */
9987 	if ((sc->sc_type >= WM_T_PCH2) && (sc->sc_type <= WM_T_PCH_CNP)
9988 	    && link) {
9989 		uint32_t tipg_reg;
9990 		uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
9991 		bool fdx;
9992 		uint16_t emi_addr, emi_val;
9993 
9994 		tipg_reg = CSR_READ(sc, WMREG_TIPG);
9995 		tipg_reg &= ~TIPG_IPGT_MASK;
9996 		fdx = status & STATUS_FD;
9997 
9998 		if (!fdx && (speed == STATUS_SPEED_10)) {
9999 			tipg_reg |= 0xff;
10000 			/* Reduce Rx latency in analog PHY */
10001 			emi_val = 0;
10002 		} else if ((sc->sc_type >= WM_T_PCH_SPT) &&
10003 		    fdx && speed != STATUS_SPEED_1000) {
10004 			tipg_reg |= 0xc;
10005 			emi_val = 1;
10006 		} else {
10007 			/* Roll back the default values */
10008 			tipg_reg |= 0x08;
10009 			emi_val = 1;
10010 		}
10011 
10012 		CSR_WRITE(sc, WMREG_TIPG, tipg_reg);
10013 
10014 		rv = sc->phy.acquire(sc);
10015 		if (rv)
10016 			return;
10017 
10018 		if (sc->sc_type == WM_T_PCH2)
10019 			emi_addr = I82579_RX_CONFIG;
10020 		else
10021 			emi_addr = I217_RX_CONFIG;
10022 		rv = wm_write_emi_reg_locked(dev, emi_addr, emi_val);
10023 
10024 		if (sc->sc_type >= WM_T_PCH_LPT) {
10025 			uint16_t phy_reg;
10026 
10027 			sc->phy.readreg_locked(dev, 2,
10028 			    I217_PLL_CLOCK_GATE_REG, &phy_reg);
10029 			phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
10030 			if (speed == STATUS_SPEED_100
10031 			    || speed == STATUS_SPEED_10)
10032 				phy_reg |= 0x3e8;
10033 			else
10034 				phy_reg |= 0xfa;
10035 			sc->phy.writereg_locked(dev, 2,
10036 			    I217_PLL_CLOCK_GATE_REG, phy_reg);
10037 
10038 			if (speed == STATUS_SPEED_1000) {
10039 				sc->phy.readreg_locked(dev, 2,
10040 				    HV_PM_CTRL, &phy_reg);
10041 
10042 				phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
10043 
10044 				sc->phy.writereg_locked(dev, 2,
10045 				    HV_PM_CTRL, phy_reg);
10046 			}
10047 		}
10048 		sc->phy.release(sc);
10049 
10050 		if (rv)
10051 			return;
10052 
10053 		if (sc->sc_type >= WM_T_PCH_SPT) {
10054 			uint16_t data, ptr_gap;
10055 
10056 			if (speed == STATUS_SPEED_1000) {
10057 				rv = sc->phy.acquire(sc);
10058 				if (rv)
10059 					return;
10060 
10061 				rv = sc->phy.readreg_locked(dev, 2,
10062 				    I82579_UNKNOWN1, &data);
10063 				if (rv) {
10064 					sc->phy.release(sc);
10065 					return;
10066 				}
10067 
10068 				ptr_gap = (data & (0x3ff << 2)) >> 2;
10069 				if (ptr_gap < 0x18) {
10070 					data &= ~(0x3ff << 2);
10071 					data |= (0x18 << 2);
10072 					rv = sc->phy.writereg_locked(dev,
10073 					    2, I82579_UNKNOWN1, data);
10074 				}
10075 				sc->phy.release(sc);
10076 				if (rv)
10077 					return;
10078 			} else {
10079 				rv = sc->phy.acquire(sc);
10080 				if (rv)
10081 					return;
10082 
10083 				rv = sc->phy.writereg_locked(dev, 2,
10084 				    I82579_UNKNOWN1, 0xc023);
10085 				sc->phy.release(sc);
10086 				if (rv)
10087 					return;
10088 
10089 			}
10090 		}
10091 	}
10092 
10093 	/*
10094 	 * I217 Packet Loss issue:
10095 	 * ensure that FEXTNVM4 Beacon Duration is set correctly
10096 	 * on power up.
10097 	 * Set the Beacon Duration for I217 to 8 usec
10098 	 */
10099 	if (sc->sc_type >= WM_T_PCH_LPT) {
10100 		reg = CSR_READ(sc, WMREG_FEXTNVM4);
10101 		reg &= ~FEXTNVM4_BEACON_DURATION;
10102 		reg |= FEXTNVM4_BEACON_DURATION_8US;
10103 		CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
10104 	}
10105 
10106 	/* Work-around I218 hang issue */
10107 	if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
10108 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
10109 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
10110 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
10111 		wm_k1_workaround_lpt_lp(sc, link);
10112 
10113 	if (sc->sc_type >= WM_T_PCH_LPT) {
10114 		/*
10115 		 * Set platform power management values for Latency
10116 		 * Tolerance Reporting (LTR)
10117 		 */
10118 		wm_platform_pm_pch_lpt(sc,
10119 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
10120 	}
10121 
10122 	/* Clear link partner's EEE ability */
10123 	sc->eee_lp_ability = 0;
10124 
10125 	/* FEXTNVM6 K1-off workaround */
10126 	if (sc->sc_type == WM_T_PCH_SPT) {
10127 		reg = CSR_READ(sc, WMREG_FEXTNVM6);
10128 		if (CSR_READ(sc, WMREG_PCIEANACFG) & FEXTNVM6_K1_OFF_ENABLE)
10129 			reg |= FEXTNVM6_K1_OFF_ENABLE;
10130 		else
10131 			reg &= ~FEXTNVM6_K1_OFF_ENABLE;
10132 		CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
10133 	}
10134 
10135 	if (!link)
10136 		return;
10137 
10138 	switch (sc->sc_type) {
10139 	case WM_T_PCH2:
10140 		wm_k1_workaround_lv(sc);
10141 		/* FALLTHROUGH */
10142 	case WM_T_PCH:
10143 		if (sc->sc_phytype == WMPHY_82578)
10144 			wm_link_stall_workaround_hv(sc);
10145 		break;
10146 	default:
10147 		break;
10148 	}
10149 
10150 	/* Enable/Disable EEE after link up */
10151 	if (sc->sc_phytype > WMPHY_82579)
10152 		wm_set_eee_pchlan(sc);
10153 }
10154 
10155 /*
10156  * wm_linkintr_tbi:
10157  *
10158  *	Helper; handle link interrupts for TBI mode.
10159  */
10160 static void
10161 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
10162 {
10163 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
10164 	uint32_t status;
10165 
10166 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
10167 		__func__));
10168 
10169 	status = CSR_READ(sc, WMREG_STATUS);
10170 	if (icr & ICR_LSC) {
10171 		wm_check_for_link(sc);
10172 		if (status & STATUS_LU) {
10173 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
10174 				device_xname(sc->sc_dev),
10175 				(status & STATUS_FD) ? "FDX" : "HDX"));
10176 			/*
10177 			 * NOTE: CTRL will update TFCE and RFCE automatically,
10178 			 * so we should update sc->sc_ctrl
10179 			 */
10180 
10181 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
10182 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
10183 			sc->sc_fcrtl &= ~FCRTL_XONE;
10184 			if (status & STATUS_FD)
10185 				sc->sc_tctl |=
10186 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
10187 			else
10188 				sc->sc_tctl |=
10189 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
10190 			if (sc->sc_ctrl & CTRL_TFCE)
10191 				sc->sc_fcrtl |= FCRTL_XONE;
10192 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
10193 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
10194 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
10195 			sc->sc_tbi_linkup = 1;
10196 			if_link_state_change(ifp, LINK_STATE_UP);
10197 		} else {
10198 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
10199 				device_xname(sc->sc_dev)));
10200 			sc->sc_tbi_linkup = 0;
10201 			if_link_state_change(ifp, LINK_STATE_DOWN);
10202 		}
10203 		/* Update LED */
10204 		wm_tbi_serdes_set_linkled(sc);
10205 	} else if (icr & ICR_RXSEQ)
10206 		DPRINTF(sc, WM_DEBUG_LINK,
10207 		    ("%s: LINK: Receive sequence error\n",
10208 			device_xname(sc->sc_dev)));
10209 }
10210 
10211 /*
10212  * wm_linkintr_serdes:
10213  *
10214  *	Helper; handle link interrupts for TBI mode.
10215  */
10216 static void
10217 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
10218 {
10219 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
10220 	struct mii_data *mii = &sc->sc_mii;
10221 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
10222 	uint32_t pcs_adv, pcs_lpab, reg;
10223 
10224 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
10225 		__func__));
10226 
10227 	if (icr & ICR_LSC) {
10228 		/* Check PCS */
10229 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
10230 		if ((reg & PCS_LSTS_LINKOK) != 0) {
10231 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
10232 				device_xname(sc->sc_dev)));
10233 			mii->mii_media_status |= IFM_ACTIVE;
10234 			sc->sc_tbi_linkup = 1;
10235 			if_link_state_change(ifp, LINK_STATE_UP);
10236 		} else {
10237 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
10238 				device_xname(sc->sc_dev)));
10239 			mii->mii_media_status |= IFM_NONE;
10240 			sc->sc_tbi_linkup = 0;
10241 			if_link_state_change(ifp, LINK_STATE_DOWN);
10242 			wm_tbi_serdes_set_linkled(sc);
10243 			return;
10244 		}
10245 		mii->mii_media_active |= IFM_1000_SX;
10246 		if ((reg & PCS_LSTS_FDX) != 0)
10247 			mii->mii_media_active |= IFM_FDX;
10248 		else
10249 			mii->mii_media_active |= IFM_HDX;
10250 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
10251 			/* Check flow */
10252 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
10253 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
10254 				DPRINTF(sc, WM_DEBUG_LINK,
10255 				    ("XXX LINKOK but not ACOMP\n"));
10256 				return;
10257 			}
10258 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
10259 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
10260 			DPRINTF(sc, WM_DEBUG_LINK,
10261 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
10262 			if ((pcs_adv & TXCW_SYM_PAUSE)
10263 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
10264 				mii->mii_media_active |= IFM_FLOW
10265 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
10266 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
10267 			    && (pcs_adv & TXCW_ASYM_PAUSE)
10268 			    && (pcs_lpab & TXCW_SYM_PAUSE)
10269 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
10270 				mii->mii_media_active |= IFM_FLOW
10271 				    | IFM_ETH_TXPAUSE;
10272 			else if ((pcs_adv & TXCW_SYM_PAUSE)
10273 			    && (pcs_adv & TXCW_ASYM_PAUSE)
10274 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
10275 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
10276 				mii->mii_media_active |= IFM_FLOW
10277 				    | IFM_ETH_RXPAUSE;
10278 		}
10279 		/* Update LED */
10280 		wm_tbi_serdes_set_linkled(sc);
10281 	} else
10282 		DPRINTF(sc, WM_DEBUG_LINK,
10283 		    ("%s: LINK: Receive sequence error\n",
10284 		    device_xname(sc->sc_dev)));
10285 }
10286 
10287 /*
10288  * wm_linkintr:
10289  *
10290  *	Helper; handle link interrupts.
10291  */
10292 static void
10293 wm_linkintr(struct wm_softc *sc, uint32_t icr)
10294 {
10295 
10296 	KASSERT(mutex_owned(sc->sc_core_lock));
10297 
10298 	if (sc->sc_flags & WM_F_HAS_MII)
10299 		wm_linkintr_gmii(sc, icr);
10300 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
10301 	    && ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)))
10302 		wm_linkintr_serdes(sc, icr);
10303 	else
10304 		wm_linkintr_tbi(sc, icr);
10305 }
10306 
10307 
10308 static inline void
10309 wm_sched_handle_queue(struct wm_softc *sc, struct wm_queue *wmq)
10310 {
10311 
10312 	if (wmq->wmq_txrx_use_workqueue) {
10313 		if (!wmq->wmq_wq_enqueued) {
10314 			wmq->wmq_wq_enqueued = true;
10315 			workqueue_enqueue(sc->sc_queue_wq, &wmq->wmq_cookie,
10316 			    curcpu());
10317 		}
10318 	} else
10319 		softint_schedule(wmq->wmq_si);
10320 }
10321 
10322 static inline void
10323 wm_legacy_intr_disable(struct wm_softc *sc)
10324 {
10325 
10326 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
10327 }
10328 
10329 static inline void
10330 wm_legacy_intr_enable(struct wm_softc *sc)
10331 {
10332 
10333 	CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
10334 }
10335 
10336 /*
10337  * wm_intr_legacy:
10338  *
10339  *	Interrupt service routine for INTx and MSI.
10340  */
10341 static int
10342 wm_intr_legacy(void *arg)
10343 {
10344 	struct wm_softc *sc = arg;
10345 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
10346 	struct wm_queue *wmq = &sc->sc_queue[0];
10347 	struct wm_txqueue *txq = &wmq->wmq_txq;
10348 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
10349 	u_int txlimit = sc->sc_tx_intr_process_limit;
10350 	u_int rxlimit = sc->sc_rx_intr_process_limit;
10351 	uint32_t icr, rndval = 0;
10352 	bool more = false;
10353 
10354 	icr = CSR_READ(sc, WMREG_ICR);
10355 	if ((icr & sc->sc_icr) == 0)
10356 		return 0;
10357 
10358 	DPRINTF(sc, WM_DEBUG_TX,
10359 	    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
10360 	if (rndval == 0)
10361 		rndval = icr;
10362 
10363 	mutex_enter(txq->txq_lock);
10364 
10365 	if (txq->txq_stopping) {
10366 		mutex_exit(txq->txq_lock);
10367 		return 1;
10368 	}
10369 
10370 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
10371 	if (icr & ICR_TXDW) {
10372 		DPRINTF(sc, WM_DEBUG_TX,
10373 		    ("%s: TX: got TXDW interrupt\n",
10374 			device_xname(sc->sc_dev)));
10375 		WM_Q_EVCNT_INCR(txq, txdw);
10376 	}
10377 #endif
10378 	if (txlimit > 0) {
10379 		more |= wm_txeof(txq, txlimit);
10380 		if (!IF_IS_EMPTY(&ifp->if_snd))
10381 			more = true;
10382 	} else
10383 		more = true;
10384 	mutex_exit(txq->txq_lock);
10385 
10386 	mutex_enter(rxq->rxq_lock);
10387 
10388 	if (rxq->rxq_stopping) {
10389 		mutex_exit(rxq->rxq_lock);
10390 		return 1;
10391 	}
10392 
10393 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
10394 	if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
10395 		DPRINTF(sc, WM_DEBUG_RX,
10396 		    ("%s: RX: got Rx intr %#" __PRIxBIT "\n",
10397 			device_xname(sc->sc_dev),
10398 			icr & (ICR_RXDMT0 | ICR_RXT0)));
10399 		WM_Q_EVCNT_INCR(rxq, intr);
10400 	}
10401 #endif
10402 	if (rxlimit > 0) {
10403 		/*
10404 		 * wm_rxeof() does *not* call upper layer functions directly,
10405 		 * as if_percpuq_enqueue() just call softint_schedule().
10406 		 * So, we can call wm_rxeof() in interrupt context.
10407 		 */
10408 		more = wm_rxeof(rxq, rxlimit);
10409 	} else
10410 		more = true;
10411 
10412 	mutex_exit(rxq->rxq_lock);
10413 
10414 	mutex_enter(sc->sc_core_lock);
10415 
10416 	if (sc->sc_core_stopping) {
10417 		mutex_exit(sc->sc_core_lock);
10418 		return 1;
10419 	}
10420 
10421 	if (icr & (ICR_LSC | ICR_RXSEQ)) {
10422 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
10423 		wm_linkintr(sc, icr);
10424 	}
10425 	if ((icr & ICR_GPI(0)) != 0)
10426 		device_printf(sc->sc_dev, "got module interrupt\n");
10427 
10428 	mutex_exit(sc->sc_core_lock);
10429 
10430 	if (icr & ICR_RXO) {
10431 #if defined(WM_DEBUG)
10432 		log(LOG_WARNING, "%s: Receive overrun\n",
10433 		    device_xname(sc->sc_dev));
10434 #endif /* defined(WM_DEBUG) */
10435 	}
10436 
10437 	rnd_add_uint32(&sc->rnd_source, rndval);
10438 
10439 	if (more) {
10440 		/* Try to get more packets going. */
10441 		wm_legacy_intr_disable(sc);
10442 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
10443 		wm_sched_handle_queue(sc, wmq);
10444 	}
10445 
10446 	return 1;
10447 }
10448 
10449 static inline void
10450 wm_txrxintr_disable(struct wm_queue *wmq)
10451 {
10452 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
10453 
10454 	if (__predict_false(!wm_is_using_msix(sc))) {
10455 		wm_legacy_intr_disable(sc);
10456 		return;
10457 	}
10458 
10459 	if (sc->sc_type == WM_T_82574)
10460 		CSR_WRITE(sc, WMREG_IMC,
10461 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
10462 	else if (sc->sc_type == WM_T_82575)
10463 		CSR_WRITE(sc, WMREG_EIMC,
10464 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
10465 	else
10466 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
10467 }
10468 
10469 static inline void
10470 wm_txrxintr_enable(struct wm_queue *wmq)
10471 {
10472 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
10473 
10474 	wm_itrs_calculate(sc, wmq);
10475 
10476 	if (__predict_false(!wm_is_using_msix(sc))) {
10477 		wm_legacy_intr_enable(sc);
10478 		return;
10479 	}
10480 
10481 	/*
10482 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
10483 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
10484 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
10485 	 * while each wm_handle_queue(wmq) is runnig.
10486 	 */
10487 	if (sc->sc_type == WM_T_82574)
10488 		CSR_WRITE(sc, WMREG_IMS,
10489 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
10490 	else if (sc->sc_type == WM_T_82575)
10491 		CSR_WRITE(sc, WMREG_EIMS,
10492 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
10493 	else
10494 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
10495 }
10496 
10497 static int
10498 wm_txrxintr_msix(void *arg)
10499 {
10500 	struct wm_queue *wmq = arg;
10501 	struct wm_txqueue *txq = &wmq->wmq_txq;
10502 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
10503 	struct wm_softc *sc = txq->txq_sc;
10504 	u_int txlimit = sc->sc_tx_intr_process_limit;
10505 	u_int rxlimit = sc->sc_rx_intr_process_limit;
10506 	bool txmore;
10507 	bool rxmore;
10508 
10509 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
10510 
10511 	DPRINTF(sc, WM_DEBUG_TX,
10512 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
10513 
10514 	wm_txrxintr_disable(wmq);
10515 
10516 	mutex_enter(txq->txq_lock);
10517 
10518 	if (txq->txq_stopping) {
10519 		mutex_exit(txq->txq_lock);
10520 		return 1;
10521 	}
10522 
10523 	WM_Q_EVCNT_INCR(txq, txdw);
10524 	if (txlimit > 0) {
10525 		txmore = wm_txeof(txq, txlimit);
10526 		/* wm_deferred start() is done in wm_handle_queue(). */
10527 	} else
10528 		txmore = true;
10529 	mutex_exit(txq->txq_lock);
10530 
10531 	DPRINTF(sc, WM_DEBUG_RX,
10532 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
10533 	mutex_enter(rxq->rxq_lock);
10534 
10535 	if (rxq->rxq_stopping) {
10536 		mutex_exit(rxq->rxq_lock);
10537 		return 1;
10538 	}
10539 
10540 	WM_Q_EVCNT_INCR(rxq, intr);
10541 	if (rxlimit > 0) {
10542 		rxmore = wm_rxeof(rxq, rxlimit);
10543 	} else
10544 		rxmore = true;
10545 	mutex_exit(rxq->rxq_lock);
10546 
10547 	wm_itrs_writereg(sc, wmq);
10548 
10549 	if (txmore || rxmore) {
10550 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
10551 		wm_sched_handle_queue(sc, wmq);
10552 	} else
10553 		wm_txrxintr_enable(wmq);
10554 
10555 	return 1;
10556 }
10557 
10558 static void
10559 wm_handle_queue(void *arg)
10560 {
10561 	struct wm_queue *wmq = arg;
10562 	struct wm_txqueue *txq = &wmq->wmq_txq;
10563 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
10564 	struct wm_softc *sc = txq->txq_sc;
10565 	u_int txlimit = sc->sc_tx_process_limit;
10566 	u_int rxlimit = sc->sc_rx_process_limit;
10567 	bool txmore;
10568 	bool rxmore;
10569 
10570 	mutex_enter(txq->txq_lock);
10571 	if (txq->txq_stopping) {
10572 		mutex_exit(txq->txq_lock);
10573 		return;
10574 	}
10575 	txmore = wm_txeof(txq, txlimit);
10576 	wm_deferred_start_locked(txq);
10577 	mutex_exit(txq->txq_lock);
10578 
10579 	mutex_enter(rxq->rxq_lock);
10580 	if (rxq->rxq_stopping) {
10581 		mutex_exit(rxq->rxq_lock);
10582 		return;
10583 	}
10584 	WM_Q_EVCNT_INCR(rxq, defer);
10585 	rxmore = wm_rxeof(rxq, rxlimit);
10586 	mutex_exit(rxq->rxq_lock);
10587 
10588 	if (txmore || rxmore) {
10589 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
10590 		wm_sched_handle_queue(sc, wmq);
10591 	} else
10592 		wm_txrxintr_enable(wmq);
10593 }
10594 
10595 static void
10596 wm_handle_queue_work(struct work *wk, void *context)
10597 {
10598 	struct wm_queue *wmq = container_of(wk, struct wm_queue, wmq_cookie);
10599 
10600 	/*
10601 	 * Some qemu environment workaround.  They don't stop interrupt
10602 	 * immediately.
10603 	 */
10604 	wmq->wmq_wq_enqueued = false;
10605 	wm_handle_queue(wmq);
10606 }
10607 
10608 /*
10609  * wm_linkintr_msix:
10610  *
10611  *	Interrupt service routine for link status change for MSI-X.
10612  */
10613 static int
10614 wm_linkintr_msix(void *arg)
10615 {
10616 	struct wm_softc *sc = arg;
10617 	uint32_t reg;
10618 	bool has_rxo;
10619 
10620 	reg = CSR_READ(sc, WMREG_ICR);
10621 	mutex_enter(sc->sc_core_lock);
10622 	DPRINTF(sc, WM_DEBUG_LINK,
10623 	    ("%s: LINK: got link intr. ICR = %08x\n",
10624 		device_xname(sc->sc_dev), reg));
10625 
10626 	if (sc->sc_core_stopping)
10627 		goto out;
10628 
10629 	if ((reg & ICR_LSC) != 0) {
10630 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
10631 		wm_linkintr(sc, ICR_LSC);
10632 	}
10633 	if ((reg & ICR_GPI(0)) != 0)
10634 		device_printf(sc->sc_dev, "got module interrupt\n");
10635 
10636 	/*
10637 	 * XXX 82574 MSI-X mode workaround
10638 	 *
10639 	 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
10640 	 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
10641 	 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
10642 	 * interrupts by writing WMREG_ICS to process receive packets.
10643 	 */
10644 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
10645 #if defined(WM_DEBUG)
10646 		log(LOG_WARNING, "%s: Receive overrun\n",
10647 		    device_xname(sc->sc_dev));
10648 #endif /* defined(WM_DEBUG) */
10649 
10650 		has_rxo = true;
10651 		/*
10652 		 * The RXO interrupt is very high rate when receive traffic is
10653 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
10654 		 * interrupts. ICR_OTHER will be enabled at the end of
10655 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
10656 		 * ICR_RXQ(1) interrupts.
10657 		 */
10658 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
10659 
10660 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
10661 	}
10662 
10663 
10664 
10665 out:
10666 	mutex_exit(sc->sc_core_lock);
10667 
10668 	if (sc->sc_type == WM_T_82574) {
10669 		if (!has_rxo)
10670 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
10671 		else
10672 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
10673 	} else if (sc->sc_type == WM_T_82575)
10674 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
10675 	else
10676 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
10677 
10678 	return 1;
10679 }
10680 
10681 /*
10682  * Media related.
10683  * GMII, SGMII, TBI (and SERDES)
10684  */
10685 
10686 /* Common */
10687 
10688 /*
10689  * wm_tbi_serdes_set_linkled:
10690  *
10691  *	Update the link LED on TBI and SERDES devices.
10692  */
10693 static void
10694 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
10695 {
10696 
10697 	if (sc->sc_tbi_linkup)
10698 		sc->sc_ctrl |= CTRL_SWDPIN(0);
10699 	else
10700 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
10701 
10702 	/* 82540 or newer devices are active low */
10703 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
10704 
10705 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10706 }
10707 
10708 /* GMII related */
10709 
10710 /*
10711  * wm_gmii_reset:
10712  *
10713  *	Reset the PHY.
10714  */
10715 static void
10716 wm_gmii_reset(struct wm_softc *sc)
10717 {
10718 	uint32_t reg;
10719 	int rv;
10720 
10721 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
10722 		device_xname(sc->sc_dev), __func__));
10723 
10724 	rv = sc->phy.acquire(sc);
10725 	if (rv != 0) {
10726 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
10727 		    __func__);
10728 		return;
10729 	}
10730 
10731 	switch (sc->sc_type) {
10732 	case WM_T_82542_2_0:
10733 	case WM_T_82542_2_1:
10734 		/* null */
10735 		break;
10736 	case WM_T_82543:
10737 		/*
10738 		 * With 82543, we need to force speed and duplex on the MAC
10739 		 * equal to what the PHY speed and duplex configuration is.
10740 		 * In addition, we need to perform a hardware reset on the PHY
10741 		 * to take it out of reset.
10742 		 */
10743 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
10744 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10745 
10746 		/* The PHY reset pin is active-low. */
10747 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
10748 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
10749 		    CTRL_EXT_SWDPIN(4));
10750 		reg |= CTRL_EXT_SWDPIO(4);
10751 
10752 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
10753 		CSR_WRITE_FLUSH(sc);
10754 		delay(10*1000);
10755 
10756 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
10757 		CSR_WRITE_FLUSH(sc);
10758 		delay(150);
10759 #if 0
10760 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
10761 #endif
10762 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
10763 		break;
10764 	case WM_T_82544:	/* Reset 10000us */
10765 	case WM_T_82540:
10766 	case WM_T_82545:
10767 	case WM_T_82545_3:
10768 	case WM_T_82546:
10769 	case WM_T_82546_3:
10770 	case WM_T_82541:
10771 	case WM_T_82541_2:
10772 	case WM_T_82547:
10773 	case WM_T_82547_2:
10774 	case WM_T_82571:	/* Reset 100us */
10775 	case WM_T_82572:
10776 	case WM_T_82573:
10777 	case WM_T_82574:
10778 	case WM_T_82575:
10779 	case WM_T_82576:
10780 	case WM_T_82580:
10781 	case WM_T_I350:
10782 	case WM_T_I354:
10783 	case WM_T_I210:
10784 	case WM_T_I211:
10785 	case WM_T_82583:
10786 	case WM_T_80003:
10787 		/* Generic reset */
10788 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
10789 		CSR_WRITE_FLUSH(sc);
10790 		delay(20000);
10791 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10792 		CSR_WRITE_FLUSH(sc);
10793 		delay(20000);
10794 
10795 		if ((sc->sc_type == WM_T_82541)
10796 		    || (sc->sc_type == WM_T_82541_2)
10797 		    || (sc->sc_type == WM_T_82547)
10798 		    || (sc->sc_type == WM_T_82547_2)) {
10799 			/* Workaround for igp are done in igp_reset() */
10800 			/* XXX add code to set LED after phy reset */
10801 		}
10802 		break;
10803 	case WM_T_ICH8:
10804 	case WM_T_ICH9:
10805 	case WM_T_ICH10:
10806 	case WM_T_PCH:
10807 	case WM_T_PCH2:
10808 	case WM_T_PCH_LPT:
10809 	case WM_T_PCH_SPT:
10810 	case WM_T_PCH_CNP:
10811 		/* Generic reset */
10812 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
10813 		CSR_WRITE_FLUSH(sc);
10814 		delay(100);
10815 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10816 		CSR_WRITE_FLUSH(sc);
10817 		delay(150);
10818 		break;
10819 	default:
10820 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
10821 		    __func__);
10822 		break;
10823 	}
10824 
10825 	sc->phy.release(sc);
10826 
10827 	/* get_cfg_done */
10828 	wm_get_cfg_done(sc);
10829 
10830 	/* Extra setup */
10831 	switch (sc->sc_type) {
10832 	case WM_T_82542_2_0:
10833 	case WM_T_82542_2_1:
10834 	case WM_T_82543:
10835 	case WM_T_82544:
10836 	case WM_T_82540:
10837 	case WM_T_82545:
10838 	case WM_T_82545_3:
10839 	case WM_T_82546:
10840 	case WM_T_82546_3:
10841 	case WM_T_82541_2:
10842 	case WM_T_82547_2:
10843 	case WM_T_82571:
10844 	case WM_T_82572:
10845 	case WM_T_82573:
10846 	case WM_T_82574:
10847 	case WM_T_82583:
10848 	case WM_T_82575:
10849 	case WM_T_82576:
10850 	case WM_T_82580:
10851 	case WM_T_I350:
10852 	case WM_T_I354:
10853 	case WM_T_I210:
10854 	case WM_T_I211:
10855 	case WM_T_80003:
10856 		/* Null */
10857 		break;
10858 	case WM_T_82541:
10859 	case WM_T_82547:
10860 		/* XXX Configure actively LED after PHY reset */
10861 		break;
10862 	case WM_T_ICH8:
10863 	case WM_T_ICH9:
10864 	case WM_T_ICH10:
10865 	case WM_T_PCH:
10866 	case WM_T_PCH2:
10867 	case WM_T_PCH_LPT:
10868 	case WM_T_PCH_SPT:
10869 	case WM_T_PCH_CNP:
10870 		wm_phy_post_reset(sc);
10871 		break;
10872 	default:
10873 		panic("%s: unknown type\n", __func__);
10874 		break;
10875 	}
10876 }
10877 
10878 /*
10879  * Set up sc_phytype and mii_{read|write}reg.
10880  *
10881  *  To identify PHY type, correct read/write function should be selected.
10882  * To select correct read/write function, PCI ID or MAC type are required
10883  * without accessing PHY registers.
10884  *
10885  *  On the first call of this function, PHY ID is not known yet. Check
10886  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
10887  * result might be incorrect.
10888  *
10889  *  In the second call, PHY OUI and model is used to identify PHY type.
10890  * It might not be perfect because of the lack of compared entry, but it
10891  * would be better than the first call.
10892  *
10893  *  If the detected new result and previous assumption is different,
10894  * a diagnostic message will be printed.
10895  */
10896 static void
10897 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
10898     uint16_t phy_model)
10899 {
10900 	device_t dev = sc->sc_dev;
10901 	struct mii_data *mii = &sc->sc_mii;
10902 	uint16_t new_phytype = WMPHY_UNKNOWN;
10903 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
10904 	mii_readreg_t new_readreg;
10905 	mii_writereg_t new_writereg;
10906 	bool dodiag = true;
10907 
10908 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
10909 		device_xname(sc->sc_dev), __func__));
10910 
10911 	/*
10912 	 * 1000BASE-T SFP uses SGMII and the first asumed PHY type is always
10913 	 * incorrect. So don't print diag output when it's 2nd call.
10914 	 */
10915 	if ((sc->sc_sfptype != 0) && (phy_oui == 0) && (phy_model == 0))
10916 		dodiag = false;
10917 
10918 	if (mii->mii_readreg == NULL) {
10919 		/*
10920 		 *  This is the first call of this function. For ICH and PCH
10921 		 * variants, it's difficult to determine the PHY access method
10922 		 * by sc_type, so use the PCI product ID for some devices.
10923 		 */
10924 
10925 		switch (sc->sc_pcidevid) {
10926 		case PCI_PRODUCT_INTEL_PCH_M_LM:
10927 		case PCI_PRODUCT_INTEL_PCH_M_LC:
10928 			/* 82577 */
10929 			new_phytype = WMPHY_82577;
10930 			break;
10931 		case PCI_PRODUCT_INTEL_PCH_D_DM:
10932 		case PCI_PRODUCT_INTEL_PCH_D_DC:
10933 			/* 82578 */
10934 			new_phytype = WMPHY_82578;
10935 			break;
10936 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
10937 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
10938 			/* 82579 */
10939 			new_phytype = WMPHY_82579;
10940 			break;
10941 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
10942 		case PCI_PRODUCT_INTEL_82801I_BM:
10943 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
10944 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
10945 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
10946 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
10947 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
10948 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
10949 			/* ICH8, 9, 10 with 82567 */
10950 			new_phytype = WMPHY_BM;
10951 			break;
10952 		default:
10953 			break;
10954 		}
10955 	} else {
10956 		/* It's not the first call. Use PHY OUI and model */
10957 		switch (phy_oui) {
10958 		case MII_OUI_ATTANSIC: /* atphy(4) */
10959 			switch (phy_model) {
10960 			case MII_MODEL_ATTANSIC_AR8021:
10961 				new_phytype = WMPHY_82578;
10962 				break;
10963 			default:
10964 				break;
10965 			}
10966 			break;
10967 		case MII_OUI_xxMARVELL:
10968 			switch (phy_model) {
10969 			case MII_MODEL_xxMARVELL_I210:
10970 				new_phytype = WMPHY_I210;
10971 				break;
10972 			case MII_MODEL_xxMARVELL_E1011:
10973 			case MII_MODEL_xxMARVELL_E1000_3:
10974 			case MII_MODEL_xxMARVELL_E1000_5:
10975 			case MII_MODEL_xxMARVELL_E1112:
10976 				new_phytype = WMPHY_M88;
10977 				break;
10978 			case MII_MODEL_xxMARVELL_E1149:
10979 				new_phytype = WMPHY_BM;
10980 				break;
10981 			case MII_MODEL_xxMARVELL_E1111:
10982 			case MII_MODEL_xxMARVELL_I347:
10983 			case MII_MODEL_xxMARVELL_E1512:
10984 			case MII_MODEL_xxMARVELL_E1340M:
10985 			case MII_MODEL_xxMARVELL_E1543:
10986 				new_phytype = WMPHY_M88;
10987 				break;
10988 			case MII_MODEL_xxMARVELL_I82563:
10989 				new_phytype = WMPHY_GG82563;
10990 				break;
10991 			default:
10992 				break;
10993 			}
10994 			break;
10995 		case MII_OUI_INTEL:
10996 			switch (phy_model) {
10997 			case MII_MODEL_INTEL_I82577:
10998 				new_phytype = WMPHY_82577;
10999 				break;
11000 			case MII_MODEL_INTEL_I82579:
11001 				new_phytype = WMPHY_82579;
11002 				break;
11003 			case MII_MODEL_INTEL_I217:
11004 				new_phytype = WMPHY_I217;
11005 				break;
11006 			case MII_MODEL_INTEL_I82580:
11007 				new_phytype = WMPHY_82580;
11008 				break;
11009 			case MII_MODEL_INTEL_I350:
11010 				new_phytype = WMPHY_I350;
11011 				break;
11012 			default:
11013 				break;
11014 			}
11015 			break;
11016 		case MII_OUI_yyINTEL:
11017 			switch (phy_model) {
11018 			case MII_MODEL_yyINTEL_I82562G:
11019 			case MII_MODEL_yyINTEL_I82562EM:
11020 			case MII_MODEL_yyINTEL_I82562ET:
11021 				new_phytype = WMPHY_IFE;
11022 				break;
11023 			case MII_MODEL_yyINTEL_IGP01E1000:
11024 				new_phytype = WMPHY_IGP;
11025 				break;
11026 			case MII_MODEL_yyINTEL_I82566:
11027 				new_phytype = WMPHY_IGP_3;
11028 				break;
11029 			default:
11030 				break;
11031 			}
11032 			break;
11033 		default:
11034 			break;
11035 		}
11036 
11037 		if (dodiag) {
11038 			if (new_phytype == WMPHY_UNKNOWN)
11039 				aprint_verbose_dev(dev,
11040 				    "%s: Unknown PHY model. OUI=%06x, "
11041 				    "model=%04x\n", __func__, phy_oui,
11042 				    phy_model);
11043 
11044 			if ((sc->sc_phytype != WMPHY_UNKNOWN)
11045 			    && (sc->sc_phytype != new_phytype)) {
11046 				aprint_error_dev(dev, "Previously assumed PHY "
11047 				    "type(%u) was incorrect. PHY type from PHY"
11048 				    "ID = %u\n", sc->sc_phytype, new_phytype);
11049 			}
11050 		}
11051 	}
11052 
11053 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
11054 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
11055 		/* SGMII */
11056 		new_readreg = wm_sgmii_readreg;
11057 		new_writereg = wm_sgmii_writereg;
11058 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
11059 		/* BM2 (phyaddr == 1) */
11060 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
11061 		    && (new_phytype != WMPHY_BM)
11062 		    && (new_phytype != WMPHY_UNKNOWN))
11063 			doubt_phytype = new_phytype;
11064 		new_phytype = WMPHY_BM;
11065 		new_readreg = wm_gmii_bm_readreg;
11066 		new_writereg = wm_gmii_bm_writereg;
11067 	} else if (sc->sc_type >= WM_T_PCH) {
11068 		/* All PCH* use _hv_ */
11069 		new_readreg = wm_gmii_hv_readreg;
11070 		new_writereg = wm_gmii_hv_writereg;
11071 	} else if (sc->sc_type >= WM_T_ICH8) {
11072 		/* non-82567 ICH8, 9 and 10 */
11073 		new_readreg = wm_gmii_i82544_readreg;
11074 		new_writereg = wm_gmii_i82544_writereg;
11075 	} else if (sc->sc_type >= WM_T_80003) {
11076 		/* 80003 */
11077 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
11078 		    && (new_phytype != WMPHY_GG82563)
11079 		    && (new_phytype != WMPHY_UNKNOWN))
11080 			doubt_phytype = new_phytype;
11081 		new_phytype = WMPHY_GG82563;
11082 		new_readreg = wm_gmii_i80003_readreg;
11083 		new_writereg = wm_gmii_i80003_writereg;
11084 	} else if (sc->sc_type >= WM_T_I210) {
11085 		/* I210 and I211 */
11086 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
11087 		    && (new_phytype != WMPHY_I210)
11088 		    && (new_phytype != WMPHY_UNKNOWN))
11089 			doubt_phytype = new_phytype;
11090 		new_phytype = WMPHY_I210;
11091 		new_readreg = wm_gmii_gs40g_readreg;
11092 		new_writereg = wm_gmii_gs40g_writereg;
11093 	} else if (sc->sc_type >= WM_T_82580) {
11094 		/* 82580, I350 and I354 */
11095 		new_readreg = wm_gmii_82580_readreg;
11096 		new_writereg = wm_gmii_82580_writereg;
11097 	} else if (sc->sc_type >= WM_T_82544) {
11098 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
11099 		new_readreg = wm_gmii_i82544_readreg;
11100 		new_writereg = wm_gmii_i82544_writereg;
11101 	} else {
11102 		new_readreg = wm_gmii_i82543_readreg;
11103 		new_writereg = wm_gmii_i82543_writereg;
11104 	}
11105 
11106 	if (new_phytype == WMPHY_BM) {
11107 		/* All BM use _bm_ */
11108 		new_readreg = wm_gmii_bm_readreg;
11109 		new_writereg = wm_gmii_bm_writereg;
11110 	}
11111 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
11112 		/* All PCH* use _hv_ */
11113 		new_readreg = wm_gmii_hv_readreg;
11114 		new_writereg = wm_gmii_hv_writereg;
11115 	}
11116 
11117 	/* Diag output */
11118 	if (dodiag) {
11119 		if (doubt_phytype != WMPHY_UNKNOWN)
11120 			aprint_error_dev(dev, "Assumed new PHY type was "
11121 			    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
11122 			    new_phytype);
11123 		else if ((sc->sc_phytype != WMPHY_UNKNOWN)
11124 		    && (sc->sc_phytype != new_phytype))
11125 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
11126 			    "was incorrect. New PHY type = %u\n",
11127 			    sc->sc_phytype, new_phytype);
11128 
11129 		if ((mii->mii_readreg != NULL) &&
11130 		    (new_phytype == WMPHY_UNKNOWN))
11131 			aprint_error_dev(dev, "PHY type is still unknown.\n");
11132 
11133 		if ((mii->mii_readreg != NULL) &&
11134 		    (mii->mii_readreg != new_readreg))
11135 			aprint_error_dev(dev, "Previously assumed PHY "
11136 			    "read/write function was incorrect.\n");
11137 	}
11138 
11139 	/* Update now */
11140 	sc->sc_phytype = new_phytype;
11141 	mii->mii_readreg = new_readreg;
11142 	mii->mii_writereg = new_writereg;
11143 	if (new_readreg == wm_gmii_hv_readreg) {
11144 		sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
11145 		sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
11146 	} else if (new_readreg == wm_sgmii_readreg) {
11147 		sc->phy.readreg_locked = wm_sgmii_readreg_locked;
11148 		sc->phy.writereg_locked = wm_sgmii_writereg_locked;
11149 	} else if (new_readreg == wm_gmii_i82544_readreg) {
11150 		sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
11151 		sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
11152 	}
11153 }
11154 
11155 /*
11156  * wm_get_phy_id_82575:
11157  *
11158  * Return PHY ID. Return -1 if it failed.
11159  */
11160 static int
11161 wm_get_phy_id_82575(struct wm_softc *sc)
11162 {
11163 	uint32_t reg;
11164 	int phyid = -1;
11165 
11166 	/* XXX */
11167 	if ((sc->sc_flags & WM_F_SGMII) == 0)
11168 		return -1;
11169 
11170 	if (wm_sgmii_uses_mdio(sc)) {
11171 		switch (sc->sc_type) {
11172 		case WM_T_82575:
11173 		case WM_T_82576:
11174 			reg = CSR_READ(sc, WMREG_MDIC);
11175 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
11176 			break;
11177 		case WM_T_82580:
11178 		case WM_T_I350:
11179 		case WM_T_I354:
11180 		case WM_T_I210:
11181 		case WM_T_I211:
11182 			reg = CSR_READ(sc, WMREG_MDICNFG);
11183 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
11184 			break;
11185 		default:
11186 			return -1;
11187 		}
11188 	}
11189 
11190 	return phyid;
11191 }
11192 
11193 /*
11194  * wm_gmii_mediainit:
11195  *
11196  *	Initialize media for use on 1000BASE-T devices.
11197  */
11198 static void
11199 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
11200 {
11201 	device_t dev = sc->sc_dev;
11202 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
11203 	struct mii_data *mii = &sc->sc_mii;
11204 
11205 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
11206 		device_xname(sc->sc_dev), __func__));
11207 
11208 	/* We have GMII. */
11209 	sc->sc_flags |= WM_F_HAS_MII;
11210 
11211 	if (sc->sc_type == WM_T_80003)
11212 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
11213 	else
11214 		sc->sc_tipg = TIPG_1000T_DFLT;
11215 
11216 	/*
11217 	 * Let the chip set speed/duplex on its own based on
11218 	 * signals from the PHY.
11219 	 * XXXbouyer - I'm not sure this is right for the 80003,
11220 	 * the em driver only sets CTRL_SLU here - but it seems to work.
11221 	 */
11222 	sc->sc_ctrl |= CTRL_SLU;
11223 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11224 
11225 	/* Initialize our media structures and probe the GMII. */
11226 	mii->mii_ifp = ifp;
11227 
11228 	mii->mii_statchg = wm_gmii_statchg;
11229 
11230 	/* get PHY control from SMBus to PCIe */
11231 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
11232 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
11233 	    || (sc->sc_type == WM_T_PCH_CNP))
11234 		wm_init_phy_workarounds_pchlan(sc);
11235 
11236 	wm_gmii_reset(sc);
11237 
11238 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
11239 	ifmedia_init_with_lock(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
11240 	    wm_gmii_mediastatus, sc->sc_core_lock);
11241 
11242 	/* Setup internal SGMII PHY for SFP */
11243 	wm_sgmii_sfp_preconfig(sc);
11244 
11245 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
11246 	    || (sc->sc_type == WM_T_82580)
11247 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
11248 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
11249 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
11250 			/* Attach only one port */
11251 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
11252 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
11253 		} else {
11254 			int i, id;
11255 			uint32_t ctrl_ext;
11256 
11257 			id = wm_get_phy_id_82575(sc);
11258 			if (id != -1) {
11259 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
11260 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
11261 			}
11262 			if ((id == -1)
11263 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
11264 				/* Power on sgmii phy if it is disabled */
11265 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
11266 				CSR_WRITE(sc, WMREG_CTRL_EXT,
11267 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
11268 				CSR_WRITE_FLUSH(sc);
11269 				delay(300*1000); /* XXX too long */
11270 
11271 				/*
11272 				 * From 1 to 8.
11273 				 *
11274 				 * I2C access fails with I2C register's ERROR
11275 				 * bit set, so prevent error message while
11276 				 * scanning.
11277 				 */
11278 				sc->phy.no_errprint = true;
11279 				for (i = 1; i < 8; i++)
11280 					mii_attach(sc->sc_dev, &sc->sc_mii,
11281 					    0xffffffff, i, MII_OFFSET_ANY,
11282 					    MIIF_DOPAUSE);
11283 				sc->phy.no_errprint = false;
11284 
11285 				/* Restore previous sfp cage power state */
11286 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
11287 			}
11288 		}
11289 	} else
11290 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
11291 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
11292 
11293 	/*
11294 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
11295 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
11296 	 */
11297 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
11298 		|| (sc->sc_type == WM_T_PCH_SPT)
11299 		|| (sc->sc_type == WM_T_PCH_CNP))
11300 	    && (LIST_FIRST(&mii->mii_phys) == NULL)) {
11301 		wm_set_mdio_slow_mode_hv(sc);
11302 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
11303 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
11304 	}
11305 
11306 	/*
11307 	 * (For ICH8 variants)
11308 	 * If PHY detection failed, use BM's r/w function and retry.
11309 	 */
11310 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
11311 		/* if failed, retry with *_bm_* */
11312 		aprint_verbose_dev(dev, "Assumed PHY access function "
11313 		    "(type = %d) might be incorrect. Use BM and retry.\n",
11314 		    sc->sc_phytype);
11315 		sc->sc_phytype = WMPHY_BM;
11316 		mii->mii_readreg = wm_gmii_bm_readreg;
11317 		mii->mii_writereg = wm_gmii_bm_writereg;
11318 
11319 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
11320 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
11321 	}
11322 
11323 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
11324 		/* Any PHY wasn't found */
11325 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
11326 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
11327 		sc->sc_phytype = WMPHY_NONE;
11328 	} else {
11329 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
11330 
11331 		/*
11332 		 * PHY found! Check PHY type again by the second call of
11333 		 * wm_gmii_setup_phytype.
11334 		 */
11335 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
11336 		    child->mii_mpd_model);
11337 
11338 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
11339 	}
11340 }
11341 
11342 /*
11343  * wm_gmii_mediachange:	[ifmedia interface function]
11344  *
11345  *	Set hardware to newly-selected media on a 1000BASE-T device.
11346  */
11347 static int
11348 wm_gmii_mediachange(struct ifnet *ifp)
11349 {
11350 	struct wm_softc *sc = ifp->if_softc;
11351 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
11352 	uint32_t reg;
11353 	int rc;
11354 
11355 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
11356 		device_xname(sc->sc_dev), __func__));
11357 
11358 	KASSERT(mutex_owned(sc->sc_core_lock));
11359 
11360 	if ((sc->sc_if_flags & IFF_UP) == 0)
11361 		return 0;
11362 
11363 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
11364 	if ((sc->sc_type == WM_T_82580)
11365 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
11366 	    || (sc->sc_type == WM_T_I211)) {
11367 		reg = CSR_READ(sc, WMREG_PHPM);
11368 		reg &= ~PHPM_GO_LINK_D;
11369 		CSR_WRITE(sc, WMREG_PHPM, reg);
11370 	}
11371 
11372 	/* Disable D0 LPLU. */
11373 	wm_lplu_d0_disable(sc);
11374 
11375 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
11376 	sc->sc_ctrl |= CTRL_SLU;
11377 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
11378 	    || (sc->sc_type > WM_T_82543)) {
11379 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
11380 	} else {
11381 		sc->sc_ctrl &= ~CTRL_ASDE;
11382 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
11383 		if (ife->ifm_media & IFM_FDX)
11384 			sc->sc_ctrl |= CTRL_FD;
11385 		switch (IFM_SUBTYPE(ife->ifm_media)) {
11386 		case IFM_10_T:
11387 			sc->sc_ctrl |= CTRL_SPEED_10;
11388 			break;
11389 		case IFM_100_TX:
11390 			sc->sc_ctrl |= CTRL_SPEED_100;
11391 			break;
11392 		case IFM_1000_T:
11393 			sc->sc_ctrl |= CTRL_SPEED_1000;
11394 			break;
11395 		case IFM_NONE:
11396 			/* There is no specific setting for IFM_NONE */
11397 			break;
11398 		default:
11399 			panic("wm_gmii_mediachange: bad media 0x%x",
11400 			    ife->ifm_media);
11401 		}
11402 	}
11403 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11404 	CSR_WRITE_FLUSH(sc);
11405 
11406 	if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
11407 		wm_serdes_mediachange(ifp);
11408 
11409 	if (sc->sc_type <= WM_T_82543)
11410 		wm_gmii_reset(sc);
11411 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
11412 	    && ((sc->sc_flags & WM_F_SGMII) != 0)) {
11413 		/* allow time for SFP cage time to power up phy */
11414 		delay(300 * 1000);
11415 		wm_gmii_reset(sc);
11416 	}
11417 
11418 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
11419 		return 0;
11420 	return rc;
11421 }
11422 
11423 /*
11424  * wm_gmii_mediastatus:	[ifmedia interface function]
11425  *
11426  *	Get the current interface media status on a 1000BASE-T device.
11427  */
11428 static void
11429 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
11430 {
11431 	struct wm_softc *sc = ifp->if_softc;
11432 
11433 	KASSERT(mutex_owned(sc->sc_core_lock));
11434 
11435 	ether_mediastatus(ifp, ifmr);
11436 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
11437 	    | sc->sc_flowflags;
11438 }
11439 
11440 #define	MDI_IO		CTRL_SWDPIN(2)
11441 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
11442 #define	MDI_CLK		CTRL_SWDPIN(3)
11443 
11444 static void
11445 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
11446 {
11447 	uint32_t i, v;
11448 
11449 	v = CSR_READ(sc, WMREG_CTRL);
11450 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
11451 	v |= MDI_DIR | CTRL_SWDPIO(3);
11452 
11453 	for (i = __BIT(nbits - 1); i != 0; i >>= 1) {
11454 		if (data & i)
11455 			v |= MDI_IO;
11456 		else
11457 			v &= ~MDI_IO;
11458 		CSR_WRITE(sc, WMREG_CTRL, v);
11459 		CSR_WRITE_FLUSH(sc);
11460 		delay(10);
11461 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
11462 		CSR_WRITE_FLUSH(sc);
11463 		delay(10);
11464 		CSR_WRITE(sc, WMREG_CTRL, v);
11465 		CSR_WRITE_FLUSH(sc);
11466 		delay(10);
11467 	}
11468 }
11469 
11470 static uint16_t
11471 wm_i82543_mii_recvbits(struct wm_softc *sc)
11472 {
11473 	uint32_t v, i;
11474 	uint16_t data = 0;
11475 
11476 	v = CSR_READ(sc, WMREG_CTRL);
11477 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
11478 	v |= CTRL_SWDPIO(3);
11479 
11480 	CSR_WRITE(sc, WMREG_CTRL, v);
11481 	CSR_WRITE_FLUSH(sc);
11482 	delay(10);
11483 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
11484 	CSR_WRITE_FLUSH(sc);
11485 	delay(10);
11486 	CSR_WRITE(sc, WMREG_CTRL, v);
11487 	CSR_WRITE_FLUSH(sc);
11488 	delay(10);
11489 
11490 	for (i = 0; i < 16; i++) {
11491 		data <<= 1;
11492 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
11493 		CSR_WRITE_FLUSH(sc);
11494 		delay(10);
11495 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
11496 			data |= 1;
11497 		CSR_WRITE(sc, WMREG_CTRL, v);
11498 		CSR_WRITE_FLUSH(sc);
11499 		delay(10);
11500 	}
11501 
11502 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
11503 	CSR_WRITE_FLUSH(sc);
11504 	delay(10);
11505 	CSR_WRITE(sc, WMREG_CTRL, v);
11506 	CSR_WRITE_FLUSH(sc);
11507 	delay(10);
11508 
11509 	return data;
11510 }
11511 
11512 #undef MDI_IO
11513 #undef MDI_DIR
11514 #undef MDI_CLK
11515 
11516 /*
11517  * wm_gmii_i82543_readreg:	[mii interface function]
11518  *
11519  *	Read a PHY register on the GMII (i82543 version).
11520  */
11521 static int
11522 wm_gmii_i82543_readreg(device_t dev, int phy, int reg, uint16_t *val)
11523 {
11524 	struct wm_softc *sc = device_private(dev);
11525 
11526 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
11527 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
11528 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
11529 	*val = wm_i82543_mii_recvbits(sc) & 0xffff;
11530 
11531 	DPRINTF(sc, WM_DEBUG_GMII,
11532 	    ("%s: GMII: read phy %d reg %d -> 0x%04hx\n",
11533 		device_xname(dev), phy, reg, *val));
11534 
11535 	return 0;
11536 }
11537 
11538 /*
11539  * wm_gmii_i82543_writereg:	[mii interface function]
11540  *
11541  *	Write a PHY register on the GMII (i82543 version).
11542  */
11543 static int
11544 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, uint16_t val)
11545 {
11546 	struct wm_softc *sc = device_private(dev);
11547 
11548 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
11549 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
11550 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
11551 	    (MII_COMMAND_START << 30), 32);
11552 
11553 	return 0;
11554 }
11555 
11556 /*
11557  * wm_gmii_mdic_readreg:	[mii interface function]
11558  *
11559  *	Read a PHY register on the GMII.
11560  */
11561 static int
11562 wm_gmii_mdic_readreg(device_t dev, int phy, int reg, uint16_t *val)
11563 {
11564 	struct wm_softc *sc = device_private(dev);
11565 	uint32_t mdic = 0;
11566 	int i;
11567 
11568 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
11569 	    && (reg > MII_ADDRMASK)) {
11570 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
11571 		    __func__, sc->sc_phytype, reg);
11572 		reg &= MII_ADDRMASK;
11573 	}
11574 
11575 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
11576 	    MDIC_REGADD(reg));
11577 
11578 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
11579 		delay(50);
11580 		mdic = CSR_READ(sc, WMREG_MDIC);
11581 		if (mdic & MDIC_READY)
11582 			break;
11583 	}
11584 
11585 	if ((mdic & MDIC_READY) == 0) {
11586 		DPRINTF(sc, WM_DEBUG_GMII,
11587 		    ("%s: MDIC read timed out: phy %d reg %d\n",
11588 			device_xname(dev), phy, reg));
11589 		return ETIMEDOUT;
11590 	} else if (mdic & MDIC_E) {
11591 		/* This is normal if no PHY is present. */
11592 		DPRINTF(sc, WM_DEBUG_GMII,
11593 		    ("%s: MDIC read error: phy %d reg %d\n",
11594 			device_xname(sc->sc_dev), phy, reg));
11595 		return -1;
11596 	} else
11597 		*val = MDIC_DATA(mdic);
11598 
11599 	/*
11600 	 * Allow some time after each MDIC transaction to avoid
11601 	 * reading duplicate data in the next MDIC transaction.
11602 	 */
11603 	if (sc->sc_type == WM_T_PCH2)
11604 		delay(100);
11605 
11606 	return 0;
11607 }
11608 
11609 /*
11610  * wm_gmii_mdic_writereg:	[mii interface function]
11611  *
11612  *	Write a PHY register on the GMII.
11613  */
11614 static int
11615 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, uint16_t val)
11616 {
11617 	struct wm_softc *sc = device_private(dev);
11618 	uint32_t mdic = 0;
11619 	int i;
11620 
11621 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
11622 	    && (reg > MII_ADDRMASK)) {
11623 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
11624 		    __func__, sc->sc_phytype, reg);
11625 		reg &= MII_ADDRMASK;
11626 	}
11627 
11628 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
11629 	    MDIC_REGADD(reg) | MDIC_DATA(val));
11630 
11631 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
11632 		delay(50);
11633 		mdic = CSR_READ(sc, WMREG_MDIC);
11634 		if (mdic & MDIC_READY)
11635 			break;
11636 	}
11637 
11638 	if ((mdic & MDIC_READY) == 0) {
11639 		DPRINTF(sc, WM_DEBUG_GMII,
11640 		    ("%s: MDIC write timed out: phy %d reg %d\n",
11641 			device_xname(dev), phy, reg));
11642 		return ETIMEDOUT;
11643 	} else if (mdic & MDIC_E) {
11644 		DPRINTF(sc, WM_DEBUG_GMII,
11645 		    ("%s: MDIC write error: phy %d reg %d\n",
11646 			device_xname(dev), phy, reg));
11647 		return -1;
11648 	}
11649 
11650 	/*
11651 	 * Allow some time after each MDIC transaction to avoid
11652 	 * reading duplicate data in the next MDIC transaction.
11653 	 */
11654 	if (sc->sc_type == WM_T_PCH2)
11655 		delay(100);
11656 
11657 	return 0;
11658 }
11659 
11660 /*
11661  * wm_gmii_i82544_readreg:	[mii interface function]
11662  *
11663  *	Read a PHY register on the GMII.
11664  */
11665 static int
11666 wm_gmii_i82544_readreg(device_t dev, int phy, int reg, uint16_t *val)
11667 {
11668 	struct wm_softc *sc = device_private(dev);
11669 	int rv;
11670 
11671 	rv = sc->phy.acquire(sc);
11672 	if (rv != 0) {
11673 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
11674 		return rv;
11675 	}
11676 
11677 	rv = wm_gmii_i82544_readreg_locked(dev, phy, reg, val);
11678 
11679 	sc->phy.release(sc);
11680 
11681 	return rv;
11682 }
11683 
11684 static int
11685 wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
11686 {
11687 	struct wm_softc *sc = device_private(dev);
11688 	int rv;
11689 
11690 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
11691 		switch (sc->sc_phytype) {
11692 		case WMPHY_IGP:
11693 		case WMPHY_IGP_2:
11694 		case WMPHY_IGP_3:
11695 			rv = wm_gmii_mdic_writereg(dev, phy,
11696 			    IGPHY_PAGE_SELECT, reg);
11697 			if (rv != 0)
11698 				return rv;
11699 			break;
11700 		default:
11701 #ifdef WM_DEBUG
11702 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
11703 			    __func__, sc->sc_phytype, reg);
11704 #endif
11705 			break;
11706 		}
11707 	}
11708 
11709 	return wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
11710 }
11711 
11712 /*
11713  * wm_gmii_i82544_writereg:	[mii interface function]
11714  *
11715  *	Write a PHY register on the GMII.
11716  */
11717 static int
11718 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, uint16_t val)
11719 {
11720 	struct wm_softc *sc = device_private(dev);
11721 	int rv;
11722 
11723 	rv = sc->phy.acquire(sc);
11724 	if (rv != 0) {
11725 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
11726 		return rv;
11727 	}
11728 
11729 	rv = wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val);
11730 	sc->phy.release(sc);
11731 
11732 	return rv;
11733 }
11734 
11735 static int
11736 wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
11737 {
11738 	struct wm_softc *sc = device_private(dev);
11739 	int rv;
11740 
11741 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
11742 		switch (sc->sc_phytype) {
11743 		case WMPHY_IGP:
11744 		case WMPHY_IGP_2:
11745 		case WMPHY_IGP_3:
11746 			rv = wm_gmii_mdic_writereg(dev, phy,
11747 			    IGPHY_PAGE_SELECT, reg);
11748 			if (rv != 0)
11749 				return rv;
11750 			break;
11751 		default:
11752 #ifdef WM_DEBUG
11753 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
11754 			    __func__, sc->sc_phytype, reg);
11755 #endif
11756 			break;
11757 		}
11758 	}
11759 
11760 	return wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
11761 }
11762 
11763 /*
11764  * wm_gmii_i80003_readreg:	[mii interface function]
11765  *
11766  *	Read a PHY register on the kumeran
11767  * This could be handled by the PHY layer if we didn't have to lock the
11768  * resource ...
11769  */
11770 static int
11771 wm_gmii_i80003_readreg(device_t dev, int phy, int reg, uint16_t *val)
11772 {
11773 	struct wm_softc *sc = device_private(dev);
11774 	int page_select;
11775 	uint16_t temp, temp2;
11776 	int rv;
11777 
11778 	if (phy != 1) /* Only one PHY on kumeran bus */
11779 		return -1;
11780 
11781 	rv = sc->phy.acquire(sc);
11782 	if (rv != 0) {
11783 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
11784 		return rv;
11785 	}
11786 
11787 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
11788 		page_select = GG82563_PHY_PAGE_SELECT;
11789 	else {
11790 		/*
11791 		 * Use Alternative Page Select register to access registers
11792 		 * 30 and 31.
11793 		 */
11794 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
11795 	}
11796 	temp = reg >> GG82563_PAGE_SHIFT;
11797 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
11798 		goto out;
11799 
11800 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
11801 		/*
11802 		 * Wait more 200us for a bug of the ready bit in the MDIC
11803 		 * register.
11804 		 */
11805 		delay(200);
11806 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
11807 		if ((rv != 0) || (temp2 != temp)) {
11808 			device_printf(dev, "%s failed\n", __func__);
11809 			rv = -1;
11810 			goto out;
11811 		}
11812 		delay(200);
11813 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
11814 		delay(200);
11815 	} else
11816 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
11817 
11818 out:
11819 	sc->phy.release(sc);
11820 	return rv;
11821 }
11822 
11823 /*
11824  * wm_gmii_i80003_writereg:	[mii interface function]
11825  *
11826  *	Write a PHY register on the kumeran.
11827  * This could be handled by the PHY layer if we didn't have to lock the
11828  * resource ...
11829  */
11830 static int
11831 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, uint16_t val)
11832 {
11833 	struct wm_softc *sc = device_private(dev);
11834 	int page_select, rv;
11835 	uint16_t temp, temp2;
11836 
11837 	if (phy != 1) /* Only one PHY on kumeran bus */
11838 		return -1;
11839 
11840 	rv = sc->phy.acquire(sc);
11841 	if (rv != 0) {
11842 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
11843 		return rv;
11844 	}
11845 
11846 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
11847 		page_select = GG82563_PHY_PAGE_SELECT;
11848 	else {
11849 		/*
11850 		 * Use Alternative Page Select register to access registers
11851 		 * 30 and 31.
11852 		 */
11853 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
11854 	}
11855 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
11856 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
11857 		goto out;
11858 
11859 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
11860 		/*
11861 		 * Wait more 200us for a bug of the ready bit in the MDIC
11862 		 * register.
11863 		 */
11864 		delay(200);
11865 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
11866 		if ((rv != 0) || (temp2 != temp)) {
11867 			device_printf(dev, "%s failed\n", __func__);
11868 			rv = -1;
11869 			goto out;
11870 		}
11871 		delay(200);
11872 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
11873 		delay(200);
11874 	} else
11875 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
11876 
11877 out:
11878 	sc->phy.release(sc);
11879 	return rv;
11880 }
11881 
11882 /*
11883  * wm_gmii_bm_readreg:	[mii interface function]
11884  *
11885  *	Read a PHY register on the kumeran
11886  * This could be handled by the PHY layer if we didn't have to lock the
11887  * resource ...
11888  */
11889 static int
11890 wm_gmii_bm_readreg(device_t dev, int phy, int reg, uint16_t *val)
11891 {
11892 	struct wm_softc *sc = device_private(dev);
11893 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
11894 	int rv;
11895 
11896 	rv = sc->phy.acquire(sc);
11897 	if (rv != 0) {
11898 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
11899 		return rv;
11900 	}
11901 
11902 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
11903 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
11904 		    || (reg == 31)) ? 1 : phy;
11905 	/* Page 800 works differently than the rest so it has its own func */
11906 	if (page == BM_WUC_PAGE) {
11907 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
11908 		goto release;
11909 	}
11910 
11911 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
11912 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
11913 		    && (sc->sc_type != WM_T_82583))
11914 			rv = wm_gmii_mdic_writereg(dev, phy,
11915 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
11916 		else
11917 			rv = wm_gmii_mdic_writereg(dev, phy,
11918 			    BME1000_PHY_PAGE_SELECT, page);
11919 		if (rv != 0)
11920 			goto release;
11921 	}
11922 
11923 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
11924 
11925 release:
11926 	sc->phy.release(sc);
11927 	return rv;
11928 }
11929 
11930 /*
11931  * wm_gmii_bm_writereg:	[mii interface function]
11932  *
11933  *	Write a PHY register on the kumeran.
11934  * This could be handled by the PHY layer if we didn't have to lock the
11935  * resource ...
11936  */
11937 static int
11938 wm_gmii_bm_writereg(device_t dev, int phy, int reg, uint16_t val)
11939 {
11940 	struct wm_softc *sc = device_private(dev);
11941 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
11942 	int rv;
11943 
11944 	rv = sc->phy.acquire(sc);
11945 	if (rv != 0) {
11946 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
11947 		return rv;
11948 	}
11949 
11950 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
11951 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
11952 		    || (reg == 31)) ? 1 : phy;
11953 	/* Page 800 works differently than the rest so it has its own func */
11954 	if (page == BM_WUC_PAGE) {
11955 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, &val, false, false);
11956 		goto release;
11957 	}
11958 
11959 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
11960 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
11961 		    && (sc->sc_type != WM_T_82583))
11962 			rv = wm_gmii_mdic_writereg(dev, phy,
11963 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
11964 		else
11965 			rv = wm_gmii_mdic_writereg(dev, phy,
11966 			    BME1000_PHY_PAGE_SELECT, page);
11967 		if (rv != 0)
11968 			goto release;
11969 	}
11970 
11971 	rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
11972 
11973 release:
11974 	sc->phy.release(sc);
11975 	return rv;
11976 }
11977 
11978 /*
11979  *  wm_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
11980  *  @dev: pointer to the HW structure
11981  *  @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
11982  *
11983  *  Assumes semaphore already acquired and phy_reg points to a valid memory
11984  *  address to store contents of the BM_WUC_ENABLE_REG register.
11985  */
11986 static int
11987 wm_enable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
11988 {
11989 #ifdef WM_DEBUG
11990 	struct wm_softc *sc = device_private(dev);
11991 #endif
11992 	uint16_t temp;
11993 	int rv;
11994 
11995 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
11996 		device_xname(dev), __func__));
11997 
11998 	if (!phy_regp)
11999 		return -1;
12000 
12001 	/* All page select, port ctrl and wakeup registers use phy address 1 */
12002 
12003 	/* Select Port Control Registers page */
12004 	rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
12005 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
12006 	if (rv != 0)
12007 		return rv;
12008 
12009 	/* Read WUCE and save it */
12010 	rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG, phy_regp);
12011 	if (rv != 0)
12012 		return rv;
12013 
12014 	/* Enable both PHY wakeup mode and Wakeup register page writes.
12015 	 * Prevent a power state change by disabling ME and Host PHY wakeup.
12016 	 */
12017 	temp = *phy_regp;
12018 	temp |= BM_WUC_ENABLE_BIT;
12019 	temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
12020 
12021 	if ((rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, temp)) != 0)
12022 		return rv;
12023 
12024 	/* Select Host Wakeup Registers page - caller now able to write
12025 	 * registers on the Wakeup registers page
12026 	 */
12027 	return wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
12028 	    BM_WUC_PAGE << IGP3_PAGE_SHIFT);
12029 }
12030 
12031 /*
12032  *  wm_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
12033  *  @dev: pointer to the HW structure
12034  *  @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
12035  *
12036  *  Restore BM_WUC_ENABLE_REG to its original value.
12037  *
12038  *  Assumes semaphore already acquired and *phy_reg is the contents of the
12039  *  BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
12040  *  caller.
12041  */
12042 static int
12043 wm_disable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
12044 {
12045 #ifdef WM_DEBUG
12046 	struct wm_softc *sc = device_private(dev);
12047 #endif
12048 
12049 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
12050 		device_xname(dev), __func__));
12051 
12052 	if (!phy_regp)
12053 		return -1;
12054 
12055 	/* Select Port Control Registers page */
12056 	wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
12057 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
12058 
12059 	/* Restore 769.17 to its original value */
12060 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, *phy_regp);
12061 
12062 	return 0;
12063 }
12064 
12065 /*
12066  *  wm_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
12067  *  @sc: pointer to the HW structure
12068  *  @offset: register offset to be read or written
12069  *  @val: pointer to the data to read or write
12070  *  @rd: determines if operation is read or write
12071  *  @page_set: BM_WUC_PAGE already set and access enabled
12072  *
12073  *  Read the PHY register at offset and store the retrieved information in
12074  *  data, or write data to PHY register at offset.  Note the procedure to
12075  *  access the PHY wakeup registers is different than reading the other PHY
12076  *  registers. It works as such:
12077  *  1) Set 769.17.2 (page 769, register 17, bit 2) = 1
12078  *  2) Set page to 800 for host (801 if we were manageability)
12079  *  3) Write the address using the address opcode (0x11)
12080  *  4) Read or write the data using the data opcode (0x12)
12081  *  5) Restore 769.17.2 to its original value
12082  *
12083  *  Steps 1 and 2 are done by wm_enable_phy_wakeup_reg_access_bm() and
12084  *  step 5 is done by wm_disable_phy_wakeup_reg_access_bm().
12085  *
12086  *  Assumes semaphore is already acquired.  When page_set==TRUE, assumes
12087  *  the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
12088  *  is responsible for calls to wm_[enable|disable]_phy_wakeup_reg_bm()).
12089  */
12090 static int
12091 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd,
12092     bool page_set)
12093 {
12094 	struct wm_softc *sc = device_private(dev);
12095 	uint16_t regnum = BM_PHY_REG_NUM(offset);
12096 	uint16_t page = BM_PHY_REG_PAGE(offset);
12097 	uint16_t wuce;
12098 	int rv = 0;
12099 
12100 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
12101 		device_xname(dev), __func__));
12102 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
12103 	if ((sc->sc_type == WM_T_PCH)
12104 	    && ((CSR_READ(sc, WMREG_PHY_CTRL) & PHY_CTRL_GBE_DIS) == 0)) {
12105 		device_printf(dev,
12106 		    "Attempting to access page %d while gig enabled.\n", page);
12107 	}
12108 
12109 	if (!page_set) {
12110 		/* Enable access to PHY wakeup registers */
12111 		rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
12112 		if (rv != 0) {
12113 			device_printf(dev,
12114 			    "%s: Could not enable PHY wakeup reg access\n",
12115 			    __func__);
12116 			return rv;
12117 		}
12118 	}
12119 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s: Accessing PHY page %d reg 0x%x\n",
12120 		device_xname(sc->sc_dev), __func__, page, regnum));
12121 
12122 	/*
12123 	 * 2) Access PHY wakeup register.
12124 	 * See wm_access_phy_wakeup_reg_bm.
12125 	 */
12126 
12127 	/* Write the Wakeup register page offset value using opcode 0x11 */
12128 	rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
12129 	if (rv != 0)
12130 		return rv;
12131 
12132 	if (rd) {
12133 		/* Read the Wakeup register page value using opcode 0x12 */
12134 		rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE, val);
12135 	} else {
12136 		/* Write the Wakeup register page value using opcode 0x12 */
12137 		rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
12138 	}
12139 	if (rv != 0)
12140 		return rv;
12141 
12142 	if (!page_set)
12143 		rv = wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
12144 
12145 	return rv;
12146 }
12147 
12148 /*
12149  * wm_gmii_hv_readreg:	[mii interface function]
12150  *
12151  *	Read a PHY register on the kumeran
12152  * This could be handled by the PHY layer if we didn't have to lock the
12153  * resource ...
12154  */
12155 static int
12156 wm_gmii_hv_readreg(device_t dev, int phy, int reg, uint16_t *val)
12157 {
12158 	struct wm_softc *sc = device_private(dev);
12159 	int rv;
12160 
12161 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
12162 		device_xname(dev), __func__));
12163 
12164 	rv = sc->phy.acquire(sc);
12165 	if (rv != 0) {
12166 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
12167 		return rv;
12168 	}
12169 
12170 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg, val);
12171 	sc->phy.release(sc);
12172 	return rv;
12173 }
12174 
12175 static int
12176 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
12177 {
12178 	uint16_t page = BM_PHY_REG_PAGE(reg);
12179 	uint16_t regnum = BM_PHY_REG_NUM(reg);
12180 	int rv;
12181 
12182 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
12183 
12184 	/* Page 800 works differently than the rest so it has its own func */
12185 	if (page == BM_WUC_PAGE)
12186 		return wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
12187 
12188 	/*
12189 	 * Lower than page 768 works differently than the rest so it has its
12190 	 * own func
12191 	 */
12192 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
12193 		device_printf(dev, "gmii_hv_readreg!!!\n");
12194 		return -1;
12195 	}
12196 
12197 	/*
12198 	 * XXX I21[789] documents say that the SMBus Address register is at
12199 	 * PHY address 01, Page 0 (not 768), Register 26.
12200 	 */
12201 	if (page == HV_INTC_FC_PAGE_START)
12202 		page = 0;
12203 
12204 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
12205 		rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
12206 		    page << BME1000_PAGE_SHIFT);
12207 		if (rv != 0)
12208 			return rv;
12209 	}
12210 
12211 	return wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK, val);
12212 }
12213 
12214 /*
12215  * wm_gmii_hv_writereg:	[mii interface function]
12216  *
12217  *	Write a PHY register on the kumeran.
12218  * This could be handled by the PHY layer if we didn't have to lock the
12219  * resource ...
12220  */
12221 static int
12222 wm_gmii_hv_writereg(device_t dev, int phy, int reg, uint16_t val)
12223 {
12224 	struct wm_softc *sc = device_private(dev);
12225 	int rv;
12226 
12227 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
12228 		device_xname(dev), __func__));
12229 
12230 	rv = sc->phy.acquire(sc);
12231 	if (rv != 0) {
12232 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
12233 		return rv;
12234 	}
12235 
12236 	rv = wm_gmii_hv_writereg_locked(dev, phy, reg, val);
12237 	sc->phy.release(sc);
12238 
12239 	return rv;
12240 }
12241 
12242 static int
12243 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
12244 {
12245 	struct wm_softc *sc = device_private(dev);
12246 	uint16_t page = BM_PHY_REG_PAGE(reg);
12247 	uint16_t regnum = BM_PHY_REG_NUM(reg);
12248 	int rv;
12249 
12250 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
12251 
12252 	/* Page 800 works differently than the rest so it has its own func */
12253 	if (page == BM_WUC_PAGE)
12254 		return wm_access_phy_wakeup_reg_bm(dev, reg, &val, false,
12255 		    false);
12256 
12257 	/*
12258 	 * Lower than page 768 works differently than the rest so it has its
12259 	 * own func
12260 	 */
12261 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
12262 		device_printf(dev, "gmii_hv_writereg!!!\n");
12263 		return -1;
12264 	}
12265 
12266 	{
12267 		/*
12268 		 * XXX I21[789] documents say that the SMBus Address register
12269 		 * is at PHY address 01, Page 0 (not 768), Register 26.
12270 		 */
12271 		if (page == HV_INTC_FC_PAGE_START)
12272 			page = 0;
12273 
12274 		/*
12275 		 * XXX Workaround MDIO accesses being disabled after entering
12276 		 * IEEE Power Down (whenever bit 11 of the PHY control
12277 		 * register is set)
12278 		 */
12279 		if (sc->sc_phytype == WMPHY_82578) {
12280 			struct mii_softc *child;
12281 
12282 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
12283 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
12284 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
12285 			    && ((val & (1 << 11)) != 0)) {
12286 				device_printf(dev, "XXX need workaround\n");
12287 			}
12288 		}
12289 
12290 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
12291 			rv = wm_gmii_mdic_writereg(dev, 1,
12292 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
12293 			if (rv != 0)
12294 				return rv;
12295 		}
12296 	}
12297 
12298 	return wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
12299 }
12300 
12301 /*
12302  * wm_gmii_82580_readreg:	[mii interface function]
12303  *
12304  *	Read a PHY register on the 82580 and I350.
12305  * This could be handled by the PHY layer if we didn't have to lock the
12306  * resource ...
12307  */
12308 static int
12309 wm_gmii_82580_readreg(device_t dev, int phy, int reg, uint16_t *val)
12310 {
12311 	struct wm_softc *sc = device_private(dev);
12312 	int rv;
12313 
12314 	rv = sc->phy.acquire(sc);
12315 	if (rv != 0) {
12316 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
12317 		return rv;
12318 	}
12319 
12320 #ifdef DIAGNOSTIC
12321 	if (reg > MII_ADDRMASK) {
12322 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
12323 		    __func__, sc->sc_phytype, reg);
12324 		reg &= MII_ADDRMASK;
12325 	}
12326 #endif
12327 	rv = wm_gmii_mdic_readreg(dev, phy, reg, val);
12328 
12329 	sc->phy.release(sc);
12330 	return rv;
12331 }
12332 
12333 /*
12334  * wm_gmii_82580_writereg:	[mii interface function]
12335  *
12336  *	Write a PHY register on the 82580 and I350.
12337  * This could be handled by the PHY layer if we didn't have to lock the
12338  * resource ...
12339  */
12340 static int
12341 wm_gmii_82580_writereg(device_t dev, int phy, int reg, uint16_t val)
12342 {
12343 	struct wm_softc *sc = device_private(dev);
12344 	int rv;
12345 
12346 	rv = sc->phy.acquire(sc);
12347 	if (rv != 0) {
12348 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
12349 		return rv;
12350 	}
12351 
12352 #ifdef DIAGNOSTIC
12353 	if (reg > MII_ADDRMASK) {
12354 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
12355 		    __func__, sc->sc_phytype, reg);
12356 		reg &= MII_ADDRMASK;
12357 	}
12358 #endif
12359 	rv = wm_gmii_mdic_writereg(dev, phy, reg, val);
12360 
12361 	sc->phy.release(sc);
12362 	return rv;
12363 }
12364 
12365 /*
12366  * wm_gmii_gs40g_readreg:	[mii interface function]
12367  *
12368  *	Read a PHY register on the I2100 and I211.
12369  * This could be handled by the PHY layer if we didn't have to lock the
12370  * resource ...
12371  */
12372 static int
12373 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg, uint16_t *val)
12374 {
12375 	struct wm_softc *sc = device_private(dev);
12376 	int page, offset;
12377 	int rv;
12378 
12379 	/* Acquire semaphore */
12380 	rv = sc->phy.acquire(sc);
12381 	if (rv != 0) {
12382 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
12383 		return rv;
12384 	}
12385 
12386 	/* Page select */
12387 	page = reg >> GS40G_PAGE_SHIFT;
12388 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
12389 	if (rv != 0)
12390 		goto release;
12391 
12392 	/* Read reg */
12393 	offset = reg & GS40G_OFFSET_MASK;
12394 	rv = wm_gmii_mdic_readreg(dev, phy, offset, val);
12395 
12396 release:
12397 	sc->phy.release(sc);
12398 	return rv;
12399 }
12400 
12401 /*
12402  * wm_gmii_gs40g_writereg:	[mii interface function]
12403  *
12404  *	Write a PHY register on the I210 and I211.
12405  * This could be handled by the PHY layer if we didn't have to lock the
12406  * resource ...
12407  */
12408 static int
12409 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, uint16_t val)
12410 {
12411 	struct wm_softc *sc = device_private(dev);
12412 	uint16_t page;
12413 	int offset, rv;
12414 
12415 	/* Acquire semaphore */
12416 	rv = sc->phy.acquire(sc);
12417 	if (rv != 0) {
12418 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
12419 		return rv;
12420 	}
12421 
12422 	/* Page select */
12423 	page = reg >> GS40G_PAGE_SHIFT;
12424 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
12425 	if (rv != 0)
12426 		goto release;
12427 
12428 	/* Write reg */
12429 	offset = reg & GS40G_OFFSET_MASK;
12430 	rv = wm_gmii_mdic_writereg(dev, phy, offset, val);
12431 
12432 release:
12433 	/* Release semaphore */
12434 	sc->phy.release(sc);
12435 	return rv;
12436 }
12437 
12438 /*
12439  * wm_gmii_statchg:	[mii interface function]
12440  *
12441  *	Callback from MII layer when media changes.
12442  */
12443 static void
12444 wm_gmii_statchg(struct ifnet *ifp)
12445 {
12446 	struct wm_softc *sc = ifp->if_softc;
12447 	struct mii_data *mii = &sc->sc_mii;
12448 
12449 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
12450 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
12451 	sc->sc_fcrtl &= ~FCRTL_XONE;
12452 
12453 	/* Get flow control negotiation result. */
12454 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
12455 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
12456 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
12457 		mii->mii_media_active &= ~IFM_ETH_FMASK;
12458 	}
12459 
12460 	if (sc->sc_flowflags & IFM_FLOW) {
12461 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
12462 			sc->sc_ctrl |= CTRL_TFCE;
12463 			sc->sc_fcrtl |= FCRTL_XONE;
12464 		}
12465 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
12466 			sc->sc_ctrl |= CTRL_RFCE;
12467 	}
12468 
12469 	if (mii->mii_media_active & IFM_FDX) {
12470 		DPRINTF(sc, WM_DEBUG_LINK,
12471 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
12472 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
12473 	} else {
12474 		DPRINTF(sc, WM_DEBUG_LINK,
12475 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
12476 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
12477 	}
12478 
12479 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
12480 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
12481 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
12482 	    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
12483 	if (sc->sc_type == WM_T_80003) {
12484 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
12485 		case IFM_1000_T:
12486 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
12487 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
12488 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
12489 			break;
12490 		default:
12491 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
12492 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
12493 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
12494 			break;
12495 		}
12496 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
12497 	}
12498 }
12499 
12500 /* kumeran related (80003, ICH* and PCH*) */
12501 
12502 /*
12503  * wm_kmrn_readreg:
12504  *
12505  *	Read a kumeran register
12506  */
12507 static int
12508 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
12509 {
12510 	int rv;
12511 
12512 	if (sc->sc_type == WM_T_80003)
12513 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
12514 	else
12515 		rv = sc->phy.acquire(sc);
12516 	if (rv != 0) {
12517 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
12518 		    __func__);
12519 		return rv;
12520 	}
12521 
12522 	rv = wm_kmrn_readreg_locked(sc, reg, val);
12523 
12524 	if (sc->sc_type == WM_T_80003)
12525 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
12526 	else
12527 		sc->phy.release(sc);
12528 
12529 	return rv;
12530 }
12531 
12532 static int
12533 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
12534 {
12535 
12536 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
12537 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
12538 	    KUMCTRLSTA_REN);
12539 	CSR_WRITE_FLUSH(sc);
12540 	delay(2);
12541 
12542 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
12543 
12544 	return 0;
12545 }
12546 
12547 /*
12548  * wm_kmrn_writereg:
12549  *
12550  *	Write a kumeran register
12551  */
12552 static int
12553 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
12554 {
12555 	int rv;
12556 
12557 	if (sc->sc_type == WM_T_80003)
12558 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
12559 	else
12560 		rv = sc->phy.acquire(sc);
12561 	if (rv != 0) {
12562 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
12563 		    __func__);
12564 		return rv;
12565 	}
12566 
12567 	rv = wm_kmrn_writereg_locked(sc, reg, val);
12568 
12569 	if (sc->sc_type == WM_T_80003)
12570 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
12571 	else
12572 		sc->phy.release(sc);
12573 
12574 	return rv;
12575 }
12576 
12577 static int
12578 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
12579 {
12580 
12581 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
12582 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
12583 
12584 	return 0;
12585 }
12586 
12587 /*
12588  * EMI register related (82579, WMPHY_I217(PCH2 and newer))
12589  * This access method is different from IEEE MMD.
12590  */
12591 static int
12592 wm_access_emi_reg_locked(device_t dev, int reg, uint16_t *val, bool rd)
12593 {
12594 	struct wm_softc *sc = device_private(dev);
12595 	int rv;
12596 
12597 	rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_ADDR, reg);
12598 	if (rv != 0)
12599 		return rv;
12600 
12601 	if (rd)
12602 		rv = sc->phy.readreg_locked(dev, 2, I82579_EMI_DATA, val);
12603 	else
12604 		rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_DATA, *val);
12605 	return rv;
12606 }
12607 
12608 static int
12609 wm_read_emi_reg_locked(device_t dev, int reg, uint16_t *val)
12610 {
12611 
12612 	return wm_access_emi_reg_locked(dev, reg, val, true);
12613 }
12614 
12615 static int
12616 wm_write_emi_reg_locked(device_t dev, int reg, uint16_t val)
12617 {
12618 
12619 	return wm_access_emi_reg_locked(dev, reg, &val, false);
12620 }
12621 
12622 /* SGMII related */
12623 
12624 /*
12625  * wm_sgmii_uses_mdio
12626  *
12627  * Check whether the transaction is to the internal PHY or the external
12628  * MDIO interface. Return true if it's MDIO.
12629  */
12630 static bool
12631 wm_sgmii_uses_mdio(struct wm_softc *sc)
12632 {
12633 	uint32_t reg;
12634 	bool ismdio = false;
12635 
12636 	switch (sc->sc_type) {
12637 	case WM_T_82575:
12638 	case WM_T_82576:
12639 		reg = CSR_READ(sc, WMREG_MDIC);
12640 		ismdio = ((reg & MDIC_DEST) != 0);
12641 		break;
12642 	case WM_T_82580:
12643 	case WM_T_I350:
12644 	case WM_T_I354:
12645 	case WM_T_I210:
12646 	case WM_T_I211:
12647 		reg = CSR_READ(sc, WMREG_MDICNFG);
12648 		ismdio = ((reg & MDICNFG_DEST) != 0);
12649 		break;
12650 	default:
12651 		break;
12652 	}
12653 
12654 	return ismdio;
12655 }
12656 
12657 /* Setup internal SGMII PHY for SFP */
12658 static void
12659 wm_sgmii_sfp_preconfig(struct wm_softc *sc)
12660 {
12661 	uint16_t id1, id2, phyreg;
12662 	int i, rv;
12663 
12664 	if (((sc->sc_flags & WM_F_SGMII) == 0)
12665 	    || ((sc->sc_flags & WM_F_SFP) == 0))
12666 		return;
12667 
12668 	for (i = 0; i < MII_NPHY; i++) {
12669 		sc->phy.no_errprint = true;
12670 		rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR1, &id1);
12671 		if (rv != 0)
12672 			continue;
12673 		rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR2, &id2);
12674 		if (rv != 0)
12675 			continue;
12676 		if (MII_OUI(id1, id2) != MII_OUI_xxMARVELL)
12677 			continue;
12678 		sc->phy.no_errprint = false;
12679 
12680 		sc->phy.readreg_locked(sc->sc_dev, i, MAKPHY_ESSR, &phyreg);
12681 		phyreg &= ~(ESSR_SER_ANEG_BYPASS | ESSR_HWCFG_MODE);
12682 		phyreg |= ESSR_SGMII_WOC_COPPER;
12683 		sc->phy.writereg_locked(sc->sc_dev, i, MAKPHY_ESSR, phyreg);
12684 		break;
12685 	}
12686 
12687 }
12688 
12689 /*
12690  * wm_sgmii_readreg:	[mii interface function]
12691  *
12692  *	Read a PHY register on the SGMII
12693  * This could be handled by the PHY layer if we didn't have to lock the
12694  * resource ...
12695  */
12696 static int
12697 wm_sgmii_readreg(device_t dev, int phy, int reg, uint16_t *val)
12698 {
12699 	struct wm_softc *sc = device_private(dev);
12700 	int rv;
12701 
12702 	rv = sc->phy.acquire(sc);
12703 	if (rv != 0) {
12704 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
12705 		return rv;
12706 	}
12707 
12708 	rv = wm_sgmii_readreg_locked(dev, phy, reg, val);
12709 
12710 	sc->phy.release(sc);
12711 	return rv;
12712 }
12713 
12714 static int
12715 wm_sgmii_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
12716 {
12717 	struct wm_softc *sc = device_private(dev);
12718 	uint32_t i2ccmd;
12719 	int i, rv = 0;
12720 
12721 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
12722 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
12723 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
12724 
12725 	/* Poll the ready bit */
12726 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
12727 		delay(50);
12728 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
12729 		if (i2ccmd & I2CCMD_READY)
12730 			break;
12731 	}
12732 	if ((i2ccmd & I2CCMD_READY) == 0) {
12733 		device_printf(dev, "I2CCMD Read did not complete\n");
12734 		rv = ETIMEDOUT;
12735 	}
12736 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
12737 		if (!sc->phy.no_errprint)
12738 			device_printf(dev, "I2CCMD Error bit set\n");
12739 		rv = EIO;
12740 	}
12741 
12742 	*val = (uint16_t)((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
12743 
12744 	return rv;
12745 }
12746 
12747 /*
12748  * wm_sgmii_writereg:	[mii interface function]
12749  *
12750  *	Write a PHY register on the SGMII.
12751  * This could be handled by the PHY layer if we didn't have to lock the
12752  * resource ...
12753  */
12754 static int
12755 wm_sgmii_writereg(device_t dev, int phy, int reg, uint16_t val)
12756 {
12757 	struct wm_softc *sc = device_private(dev);
12758 	int rv;
12759 
12760 	rv = sc->phy.acquire(sc);
12761 	if (rv != 0) {
12762 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
12763 		return rv;
12764 	}
12765 
12766 	rv = wm_sgmii_writereg_locked(dev, phy, reg, val);
12767 
12768 	sc->phy.release(sc);
12769 
12770 	return rv;
12771 }
12772 
12773 static int
12774 wm_sgmii_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
12775 {
12776 	struct wm_softc *sc = device_private(dev);
12777 	uint32_t i2ccmd;
12778 	uint16_t swapdata;
12779 	int rv = 0;
12780 	int i;
12781 
12782 	/* Swap the data bytes for the I2C interface */
12783 	swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
12784 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
12785 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
12786 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
12787 
12788 	/* Poll the ready bit */
12789 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
12790 		delay(50);
12791 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
12792 		if (i2ccmd & I2CCMD_READY)
12793 			break;
12794 	}
12795 	if ((i2ccmd & I2CCMD_READY) == 0) {
12796 		device_printf(dev, "I2CCMD Write did not complete\n");
12797 		rv = ETIMEDOUT;
12798 	}
12799 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
12800 		device_printf(dev, "I2CCMD Error bit set\n");
12801 		rv = EIO;
12802 	}
12803 
12804 	return rv;
12805 }
12806 
12807 /* TBI related */
12808 
12809 static bool
12810 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
12811 {
12812 	bool sig;
12813 
12814 	sig = ctrl & CTRL_SWDPIN(1);
12815 
12816 	/*
12817 	 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
12818 	 * detect a signal, 1 if they don't.
12819 	 */
12820 	if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
12821 		sig = !sig;
12822 
12823 	return sig;
12824 }
12825 
12826 /*
12827  * wm_tbi_mediainit:
12828  *
12829  *	Initialize media for use on 1000BASE-X devices.
12830  */
12831 static void
12832 wm_tbi_mediainit(struct wm_softc *sc)
12833 {
12834 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
12835 	const char *sep = "";
12836 
12837 	if (sc->sc_type < WM_T_82543)
12838 		sc->sc_tipg = TIPG_WM_DFLT;
12839 	else
12840 		sc->sc_tipg = TIPG_LG_DFLT;
12841 
12842 	sc->sc_tbi_serdes_anegticks = 5;
12843 
12844 	/* Initialize our media structures */
12845 	sc->sc_mii.mii_ifp = ifp;
12846 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
12847 
12848 	ifp->if_baudrate = IF_Gbps(1);
12849 	if (((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
12850 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
12851 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
12852 		    wm_serdes_mediachange, wm_serdes_mediastatus,
12853 		    sc->sc_core_lock);
12854 	} else {
12855 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
12856 		    wm_tbi_mediachange, wm_tbi_mediastatus, sc->sc_core_lock);
12857 	}
12858 
12859 	/*
12860 	 * SWD Pins:
12861 	 *
12862 	 *	0 = Link LED (output)
12863 	 *	1 = Loss Of Signal (input)
12864 	 */
12865 	sc->sc_ctrl |= CTRL_SWDPIO(0);
12866 
12867 	/* XXX Perhaps this is only for TBI */
12868 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
12869 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
12870 
12871 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
12872 		sc->sc_ctrl &= ~CTRL_LRST;
12873 
12874 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
12875 
12876 #define	ADD(ss, mm, dd)							  \
12877 do {									  \
12878 	aprint_normal("%s%s", sep, ss);					  \
12879 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
12880 	sep = ", ";							  \
12881 } while (/*CONSTCOND*/0)
12882 
12883 	aprint_normal_dev(sc->sc_dev, "");
12884 
12885 	if (sc->sc_type == WM_T_I354) {
12886 		uint32_t status;
12887 
12888 		status = CSR_READ(sc, WMREG_STATUS);
12889 		if (((status & STATUS_2P5_SKU) != 0)
12890 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
12891 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
12892 		} else
12893 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
12894 	} else if (sc->sc_type == WM_T_82545) {
12895 		/* Only 82545 is LX (XXX except SFP) */
12896 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
12897 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
12898 	} else if (sc->sc_sfptype != 0) {
12899 		/* XXX wm(4) fiber/serdes don't use ifm_data */
12900 		switch (sc->sc_sfptype) {
12901 		default:
12902 		case SFF_SFP_ETH_FLAGS_1000SX:
12903 			ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
12904 			ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
12905 			break;
12906 		case SFF_SFP_ETH_FLAGS_1000LX:
12907 			ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
12908 			ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
12909 			break;
12910 		case SFF_SFP_ETH_FLAGS_1000CX:
12911 			ADD("1000baseCX", IFM_1000_CX, ANAR_X_HD);
12912 			ADD("1000baseCX-FDX", IFM_1000_CX | IFM_FDX, ANAR_X_FD);
12913 			break;
12914 		case SFF_SFP_ETH_FLAGS_1000T:
12915 			ADD("1000baseT", IFM_1000_T, 0);
12916 			ADD("1000baseT-FDX", IFM_1000_T | IFM_FDX, 0);
12917 			break;
12918 		case SFF_SFP_ETH_FLAGS_100FX:
12919 			ADD("100baseFX", IFM_100_FX, ANAR_TX);
12920 			ADD("100baseFX-FDX", IFM_100_FX | IFM_FDX, ANAR_TX_FD);
12921 			break;
12922 		}
12923 	} else {
12924 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
12925 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
12926 	}
12927 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
12928 	aprint_normal("\n");
12929 
12930 #undef ADD
12931 
12932 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
12933 }
12934 
12935 /*
12936  * wm_tbi_mediachange:	[ifmedia interface function]
12937  *
12938  *	Set hardware to newly-selected media on a 1000BASE-X device.
12939  */
12940 static int
12941 wm_tbi_mediachange(struct ifnet *ifp)
12942 {
12943 	struct wm_softc *sc = ifp->if_softc;
12944 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
12945 	uint32_t status, ctrl;
12946 	bool signal;
12947 	int i;
12948 
12949 	KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
12950 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
12951 		/* XXX need some work for >= 82571 and < 82575 */
12952 		if (sc->sc_type < WM_T_82575)
12953 			return 0;
12954 	}
12955 
12956 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
12957 	    || (sc->sc_type >= WM_T_82575))
12958 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
12959 
12960 	sc->sc_ctrl &= ~CTRL_LRST;
12961 	sc->sc_txcw = TXCW_ANE;
12962 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
12963 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
12964 	else if (ife->ifm_media & IFM_FDX)
12965 		sc->sc_txcw |= TXCW_FD;
12966 	else
12967 		sc->sc_txcw |= TXCW_HD;
12968 
12969 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
12970 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
12971 
12972 	DPRINTF(sc, WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
12973 		device_xname(sc->sc_dev), sc->sc_txcw));
12974 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
12975 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
12976 	CSR_WRITE_FLUSH(sc);
12977 	delay(1000);
12978 
12979 	ctrl = CSR_READ(sc, WMREG_CTRL);
12980 	signal = wm_tbi_havesignal(sc, ctrl);
12981 
12982 	DPRINTF(sc, WM_DEBUG_LINK,
12983 	    ("%s: signal = %d\n", device_xname(sc->sc_dev), signal));
12984 
12985 	if (signal) {
12986 		/* Have signal; wait for the link to come up. */
12987 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
12988 			delay(10000);
12989 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
12990 				break;
12991 		}
12992 
12993 		DPRINTF(sc, WM_DEBUG_LINK,
12994 		    ("%s: i = %d after waiting for link\n",
12995 			device_xname(sc->sc_dev), i));
12996 
12997 		status = CSR_READ(sc, WMREG_STATUS);
12998 		DPRINTF(sc, WM_DEBUG_LINK,
12999 		    ("%s: status after final read = 0x%x, STATUS_LU = %#"
13000 			__PRIxBIT "\n",
13001 			device_xname(sc->sc_dev), status, STATUS_LU));
13002 		if (status & STATUS_LU) {
13003 			/* Link is up. */
13004 			DPRINTF(sc, WM_DEBUG_LINK,
13005 			    ("%s: LINK: set media -> link up %s\n",
13006 				device_xname(sc->sc_dev),
13007 				(status & STATUS_FD) ? "FDX" : "HDX"));
13008 
13009 			/*
13010 			 * NOTE: CTRL will update TFCE and RFCE automatically,
13011 			 * so we should update sc->sc_ctrl
13012 			 */
13013 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
13014 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
13015 			sc->sc_fcrtl &= ~FCRTL_XONE;
13016 			if (status & STATUS_FD)
13017 				sc->sc_tctl |=
13018 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
13019 			else
13020 				sc->sc_tctl |=
13021 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
13022 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
13023 				sc->sc_fcrtl |= FCRTL_XONE;
13024 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
13025 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
13026 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
13027 			sc->sc_tbi_linkup = 1;
13028 		} else {
13029 			if (i == WM_LINKUP_TIMEOUT)
13030 				wm_check_for_link(sc);
13031 			/* Link is down. */
13032 			DPRINTF(sc, WM_DEBUG_LINK,
13033 			    ("%s: LINK: set media -> link down\n",
13034 				device_xname(sc->sc_dev)));
13035 			sc->sc_tbi_linkup = 0;
13036 		}
13037 	} else {
13038 		DPRINTF(sc, WM_DEBUG_LINK,
13039 		    ("%s: LINK: set media -> no signal\n",
13040 			device_xname(sc->sc_dev)));
13041 		sc->sc_tbi_linkup = 0;
13042 	}
13043 
13044 	wm_tbi_serdes_set_linkled(sc);
13045 
13046 	return 0;
13047 }
13048 
13049 /*
13050  * wm_tbi_mediastatus:	[ifmedia interface function]
13051  *
13052  *	Get the current interface media status on a 1000BASE-X device.
13053  */
13054 static void
13055 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
13056 {
13057 	struct wm_softc *sc = ifp->if_softc;
13058 	uint32_t ctrl, status;
13059 
13060 	ifmr->ifm_status = IFM_AVALID;
13061 	ifmr->ifm_active = IFM_ETHER;
13062 
13063 	status = CSR_READ(sc, WMREG_STATUS);
13064 	if ((status & STATUS_LU) == 0) {
13065 		ifmr->ifm_active |= IFM_NONE;
13066 		return;
13067 	}
13068 
13069 	ifmr->ifm_status |= IFM_ACTIVE;
13070 	/* Only 82545 is LX */
13071 	if (sc->sc_type == WM_T_82545)
13072 		ifmr->ifm_active |= IFM_1000_LX;
13073 	else
13074 		ifmr->ifm_active |= IFM_1000_SX;
13075 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
13076 		ifmr->ifm_active |= IFM_FDX;
13077 	else
13078 		ifmr->ifm_active |= IFM_HDX;
13079 	ctrl = CSR_READ(sc, WMREG_CTRL);
13080 	if (ctrl & CTRL_RFCE)
13081 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
13082 	if (ctrl & CTRL_TFCE)
13083 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
13084 }
13085 
13086 /* XXX TBI only */
13087 static int
13088 wm_check_for_link(struct wm_softc *sc)
13089 {
13090 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
13091 	uint32_t rxcw;
13092 	uint32_t ctrl;
13093 	uint32_t status;
13094 	bool signal;
13095 
13096 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s called\n",
13097 		device_xname(sc->sc_dev), __func__));
13098 
13099 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
13100 		/* XXX need some work for >= 82571 */
13101 		if (sc->sc_type >= WM_T_82571) {
13102 			sc->sc_tbi_linkup = 1;
13103 			return 0;
13104 		}
13105 	}
13106 
13107 	rxcw = CSR_READ(sc, WMREG_RXCW);
13108 	ctrl = CSR_READ(sc, WMREG_CTRL);
13109 	status = CSR_READ(sc, WMREG_STATUS);
13110 	signal = wm_tbi_havesignal(sc, ctrl);
13111 
13112 	DPRINTF(sc, WM_DEBUG_LINK,
13113 	    ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
13114 		device_xname(sc->sc_dev), __func__, signal,
13115 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
13116 
13117 	/*
13118 	 * SWDPIN   LU RXCW
13119 	 *	0    0	  0
13120 	 *	0    0	  1	(should not happen)
13121 	 *	0    1	  0	(should not happen)
13122 	 *	0    1	  1	(should not happen)
13123 	 *	1    0	  0	Disable autonego and force linkup
13124 	 *	1    0	  1	got /C/ but not linkup yet
13125 	 *	1    1	  0	(linkup)
13126 	 *	1    1	  1	If IFM_AUTO, back to autonego
13127 	 *
13128 	 */
13129 	if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
13130 		DPRINTF(sc, WM_DEBUG_LINK,
13131 		    ("%s: %s: force linkup and fullduplex\n",
13132 			device_xname(sc->sc_dev), __func__));
13133 		sc->sc_tbi_linkup = 0;
13134 		/* Disable auto-negotiation in the TXCW register */
13135 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
13136 
13137 		/*
13138 		 * Force link-up and also force full-duplex.
13139 		 *
13140 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
13141 		 * so we should update sc->sc_ctrl
13142 		 */
13143 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
13144 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
13145 	} else if (((status & STATUS_LU) != 0)
13146 	    && ((rxcw & RXCW_C) != 0)
13147 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
13148 		sc->sc_tbi_linkup = 1;
13149 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
13150 			device_xname(sc->sc_dev), __func__));
13151 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
13152 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
13153 	} else if (signal && ((rxcw & RXCW_C) != 0)) {
13154 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: /C/",
13155 			device_xname(sc->sc_dev), __func__));
13156 	} else {
13157 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
13158 			device_xname(sc->sc_dev), __func__, rxcw, ctrl,
13159 			status));
13160 	}
13161 
13162 	return 0;
13163 }
13164 
13165 /*
13166  * wm_tbi_tick:
13167  *
13168  *	Check the link on TBI devices.
13169  *	This function acts as mii_tick().
13170  */
13171 static void
13172 wm_tbi_tick(struct wm_softc *sc)
13173 {
13174 	struct mii_data *mii = &sc->sc_mii;
13175 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
13176 	uint32_t status;
13177 
13178 	KASSERT(mutex_owned(sc->sc_core_lock));
13179 
13180 	status = CSR_READ(sc, WMREG_STATUS);
13181 
13182 	/* XXX is this needed? */
13183 	(void)CSR_READ(sc, WMREG_RXCW);
13184 	(void)CSR_READ(sc, WMREG_CTRL);
13185 
13186 	/* set link status */
13187 	if ((status & STATUS_LU) == 0) {
13188 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
13189 			device_xname(sc->sc_dev)));
13190 		sc->sc_tbi_linkup = 0;
13191 	} else if (sc->sc_tbi_linkup == 0) {
13192 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
13193 			device_xname(sc->sc_dev),
13194 			(status & STATUS_FD) ? "FDX" : "HDX"));
13195 		sc->sc_tbi_linkup = 1;
13196 		sc->sc_tbi_serdes_ticks = 0;
13197 	}
13198 
13199 	if ((sc->sc_if_flags & IFF_UP) == 0)
13200 		goto setled;
13201 
13202 	if ((status & STATUS_LU) == 0) {
13203 		sc->sc_tbi_linkup = 0;
13204 		/* If the timer expired, retry autonegotiation */
13205 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
13206 		    && (++sc->sc_tbi_serdes_ticks
13207 			>= sc->sc_tbi_serdes_anegticks)) {
13208 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
13209 				device_xname(sc->sc_dev), __func__));
13210 			sc->sc_tbi_serdes_ticks = 0;
13211 			/*
13212 			 * Reset the link, and let autonegotiation do
13213 			 * its thing
13214 			 */
13215 			sc->sc_ctrl |= CTRL_LRST;
13216 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
13217 			CSR_WRITE_FLUSH(sc);
13218 			delay(1000);
13219 			sc->sc_ctrl &= ~CTRL_LRST;
13220 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
13221 			CSR_WRITE_FLUSH(sc);
13222 			delay(1000);
13223 			CSR_WRITE(sc, WMREG_TXCW,
13224 			    sc->sc_txcw & ~TXCW_ANE);
13225 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
13226 		}
13227 	}
13228 
13229 setled:
13230 	wm_tbi_serdes_set_linkled(sc);
13231 }
13232 
13233 /* SERDES related */
13234 static void
13235 wm_serdes_power_up_link_82575(struct wm_softc *sc)
13236 {
13237 	uint32_t reg;
13238 
13239 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
13240 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
13241 		return;
13242 
13243 	/* Enable PCS to turn on link */
13244 	reg = CSR_READ(sc, WMREG_PCS_CFG);
13245 	reg |= PCS_CFG_PCS_EN;
13246 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
13247 
13248 	/* Power up the laser */
13249 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
13250 	reg &= ~CTRL_EXT_SWDPIN(3);
13251 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
13252 
13253 	/* Flush the write to verify completion */
13254 	CSR_WRITE_FLUSH(sc);
13255 	delay(1000);
13256 }
13257 
13258 static int
13259 wm_serdes_mediachange(struct ifnet *ifp)
13260 {
13261 	struct wm_softc *sc = ifp->if_softc;
13262 	bool pcs_autoneg = true; /* XXX */
13263 	uint32_t ctrl_ext, pcs_lctl, reg;
13264 
13265 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
13266 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
13267 		return 0;
13268 
13269 	/* XXX Currently, this function is not called on 8257[12] */
13270 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
13271 	    || (sc->sc_type >= WM_T_82575))
13272 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
13273 
13274 	/* Power on the sfp cage if present */
13275 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
13276 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
13277 	ctrl_ext |= CTRL_EXT_I2C_ENA;
13278 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
13279 
13280 	sc->sc_ctrl |= CTRL_SLU;
13281 
13282 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
13283 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
13284 
13285 		reg = CSR_READ(sc, WMREG_CONNSW);
13286 		reg |= CONNSW_ENRGSRC;
13287 		CSR_WRITE(sc, WMREG_CONNSW, reg);
13288 	}
13289 
13290 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
13291 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
13292 	case CTRL_EXT_LINK_MODE_SGMII:
13293 		/* SGMII mode lets the phy handle forcing speed/duplex */
13294 		pcs_autoneg = true;
13295 		/* Autoneg time out should be disabled for SGMII mode */
13296 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
13297 		break;
13298 	case CTRL_EXT_LINK_MODE_1000KX:
13299 		pcs_autoneg = false;
13300 		/* FALLTHROUGH */
13301 	default:
13302 		if ((sc->sc_type == WM_T_82575)
13303 		    || (sc->sc_type == WM_T_82576)) {
13304 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
13305 				pcs_autoneg = false;
13306 		}
13307 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
13308 		    | CTRL_FRCFDX;
13309 
13310 		/* Set speed of 1000/Full if speed/duplex is forced */
13311 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
13312 	}
13313 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
13314 
13315 	pcs_lctl &= ~(PCS_LCTL_AN_ENABLE | PCS_LCTL_FLV_LINK_UP |
13316 	    PCS_LCTL_FSD | PCS_LCTL_FORCE_LINK);
13317 
13318 	if (pcs_autoneg) {
13319 		/* Set PCS register for autoneg */
13320 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
13321 
13322 		/* Disable force flow control for autoneg */
13323 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
13324 
13325 		/* Configure flow control advertisement for autoneg */
13326 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
13327 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
13328 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
13329 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
13330 	} else
13331 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
13332 
13333 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
13334 
13335 	return 0;
13336 }
13337 
13338 static void
13339 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
13340 {
13341 	struct wm_softc *sc = ifp->if_softc;
13342 	struct mii_data *mii = &sc->sc_mii;
13343 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
13344 	uint32_t pcs_adv, pcs_lpab, reg;
13345 
13346 	ifmr->ifm_status = IFM_AVALID;
13347 	ifmr->ifm_active = IFM_ETHER;
13348 
13349 	/* Check PCS */
13350 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
13351 	if ((reg & PCS_LSTS_LINKOK) == 0) {
13352 		ifmr->ifm_active |= IFM_NONE;
13353 		sc->sc_tbi_linkup = 0;
13354 		goto setled;
13355 	}
13356 
13357 	sc->sc_tbi_linkup = 1;
13358 	ifmr->ifm_status |= IFM_ACTIVE;
13359 	if (sc->sc_type == WM_T_I354) {
13360 		uint32_t status;
13361 
13362 		status = CSR_READ(sc, WMREG_STATUS);
13363 		if (((status & STATUS_2P5_SKU) != 0)
13364 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
13365 			ifmr->ifm_active |= IFM_2500_KX;
13366 		} else
13367 			ifmr->ifm_active |= IFM_1000_KX;
13368 	} else {
13369 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
13370 		case PCS_LSTS_SPEED_10:
13371 			ifmr->ifm_active |= IFM_10_T; /* XXX */
13372 			break;
13373 		case PCS_LSTS_SPEED_100:
13374 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
13375 			break;
13376 		case PCS_LSTS_SPEED_1000:
13377 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
13378 			break;
13379 		default:
13380 			device_printf(sc->sc_dev, "Unknown speed\n");
13381 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
13382 			break;
13383 		}
13384 	}
13385 	ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
13386 	if ((reg & PCS_LSTS_FDX) != 0)
13387 		ifmr->ifm_active |= IFM_FDX;
13388 	else
13389 		ifmr->ifm_active |= IFM_HDX;
13390 	mii->mii_media_active &= ~IFM_ETH_FMASK;
13391 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
13392 		/* Check flow */
13393 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
13394 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
13395 			DPRINTF(sc, WM_DEBUG_LINK,
13396 			    ("XXX LINKOK but not ACOMP\n"));
13397 			goto setled;
13398 		}
13399 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
13400 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
13401 		DPRINTF(sc, WM_DEBUG_LINK,
13402 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
13403 		if ((pcs_adv & TXCW_SYM_PAUSE)
13404 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
13405 			mii->mii_media_active |= IFM_FLOW
13406 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
13407 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
13408 		    && (pcs_adv & TXCW_ASYM_PAUSE)
13409 		    && (pcs_lpab & TXCW_SYM_PAUSE)
13410 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
13411 			mii->mii_media_active |= IFM_FLOW
13412 			    | IFM_ETH_TXPAUSE;
13413 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
13414 		    && (pcs_adv & TXCW_ASYM_PAUSE)
13415 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
13416 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
13417 			mii->mii_media_active |= IFM_FLOW
13418 			    | IFM_ETH_RXPAUSE;
13419 		}
13420 	}
13421 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
13422 	    | (mii->mii_media_active & IFM_ETH_FMASK);
13423 setled:
13424 	wm_tbi_serdes_set_linkled(sc);
13425 }
13426 
13427 /*
13428  * wm_serdes_tick:
13429  *
13430  *	Check the link on serdes devices.
13431  */
13432 static void
13433 wm_serdes_tick(struct wm_softc *sc)
13434 {
13435 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
13436 	struct mii_data *mii = &sc->sc_mii;
13437 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
13438 	uint32_t reg;
13439 
13440 	KASSERT(mutex_owned(sc->sc_core_lock));
13441 
13442 	mii->mii_media_status = IFM_AVALID;
13443 	mii->mii_media_active = IFM_ETHER;
13444 
13445 	/* Check PCS */
13446 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
13447 	if ((reg & PCS_LSTS_LINKOK) != 0) {
13448 		mii->mii_media_status |= IFM_ACTIVE;
13449 		sc->sc_tbi_linkup = 1;
13450 		sc->sc_tbi_serdes_ticks = 0;
13451 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
13452 		if ((reg & PCS_LSTS_FDX) != 0)
13453 			mii->mii_media_active |= IFM_FDX;
13454 		else
13455 			mii->mii_media_active |= IFM_HDX;
13456 	} else {
13457 		mii->mii_media_status |= IFM_NONE;
13458 		sc->sc_tbi_linkup = 0;
13459 		/* If the timer expired, retry autonegotiation */
13460 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
13461 		    && (++sc->sc_tbi_serdes_ticks
13462 			>= sc->sc_tbi_serdes_anegticks)) {
13463 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
13464 				device_xname(sc->sc_dev), __func__));
13465 			sc->sc_tbi_serdes_ticks = 0;
13466 			/* XXX */
13467 			wm_serdes_mediachange(ifp);
13468 		}
13469 	}
13470 
13471 	wm_tbi_serdes_set_linkled(sc);
13472 }
13473 
13474 /* SFP related */
13475 
13476 static int
13477 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
13478 {
13479 	uint32_t i2ccmd;
13480 	int i;
13481 
13482 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
13483 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
13484 
13485 	/* Poll the ready bit */
13486 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
13487 		delay(50);
13488 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
13489 		if (i2ccmd & I2CCMD_READY)
13490 			break;
13491 	}
13492 	if ((i2ccmd & I2CCMD_READY) == 0)
13493 		return -1;
13494 	if ((i2ccmd & I2CCMD_ERROR) != 0)
13495 		return -1;
13496 
13497 	*data = i2ccmd & 0x00ff;
13498 
13499 	return 0;
13500 }
13501 
13502 static uint32_t
13503 wm_sfp_get_media_type(struct wm_softc *sc)
13504 {
13505 	uint32_t ctrl_ext;
13506 	uint8_t val = 0;
13507 	int timeout = 3;
13508 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
13509 	int rv = -1;
13510 
13511 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
13512 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
13513 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
13514 	CSR_WRITE_FLUSH(sc);
13515 
13516 	/* Read SFP module data */
13517 	while (timeout) {
13518 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
13519 		if (rv == 0)
13520 			break;
13521 		delay(100*1000); /* XXX too big */
13522 		timeout--;
13523 	}
13524 	if (rv != 0)
13525 		goto out;
13526 
13527 	switch (val) {
13528 	case SFF_SFP_ID_SFF:
13529 		aprint_normal_dev(sc->sc_dev,
13530 		    "Module/Connector soldered to board\n");
13531 		break;
13532 	case SFF_SFP_ID_SFP:
13533 		sc->sc_flags |= WM_F_SFP;
13534 		break;
13535 	case SFF_SFP_ID_UNKNOWN:
13536 		goto out;
13537 	default:
13538 		break;
13539 	}
13540 
13541 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
13542 	if (rv != 0)
13543 		goto out;
13544 
13545 	sc->sc_sfptype = val;
13546 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
13547 		mediatype = WM_MEDIATYPE_SERDES;
13548 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
13549 		sc->sc_flags |= WM_F_SGMII;
13550 		mediatype = WM_MEDIATYPE_COPPER;
13551 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
13552 		sc->sc_flags |= WM_F_SGMII;
13553 		mediatype = WM_MEDIATYPE_SERDES;
13554 	} else {
13555 		device_printf(sc->sc_dev, "%s: unknown media type? (0x%hhx)\n",
13556 		    __func__, sc->sc_sfptype);
13557 		sc->sc_sfptype = 0; /* XXX unknown */
13558 	}
13559 
13560 out:
13561 	/* Restore I2C interface setting */
13562 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
13563 
13564 	return mediatype;
13565 }
13566 
13567 /*
13568  * NVM related.
13569  * Microwire, SPI (w/wo EERD) and Flash.
13570  */
13571 
13572 /* Both spi and uwire */
13573 
13574 /*
13575  * wm_eeprom_sendbits:
13576  *
13577  *	Send a series of bits to the EEPROM.
13578  */
13579 static void
13580 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
13581 {
13582 	uint32_t reg;
13583 	int x;
13584 
13585 	reg = CSR_READ(sc, WMREG_EECD);
13586 
13587 	for (x = nbits; x > 0; x--) {
13588 		if (bits & (1U << (x - 1)))
13589 			reg |= EECD_DI;
13590 		else
13591 			reg &= ~EECD_DI;
13592 		CSR_WRITE(sc, WMREG_EECD, reg);
13593 		CSR_WRITE_FLUSH(sc);
13594 		delay(2);
13595 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
13596 		CSR_WRITE_FLUSH(sc);
13597 		delay(2);
13598 		CSR_WRITE(sc, WMREG_EECD, reg);
13599 		CSR_WRITE_FLUSH(sc);
13600 		delay(2);
13601 	}
13602 }
13603 
13604 /*
13605  * wm_eeprom_recvbits:
13606  *
13607  *	Receive a series of bits from the EEPROM.
13608  */
13609 static void
13610 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
13611 {
13612 	uint32_t reg, val;
13613 	int x;
13614 
13615 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
13616 
13617 	val = 0;
13618 	for (x = nbits; x > 0; x--) {
13619 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
13620 		CSR_WRITE_FLUSH(sc);
13621 		delay(2);
13622 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
13623 			val |= (1U << (x - 1));
13624 		CSR_WRITE(sc, WMREG_EECD, reg);
13625 		CSR_WRITE_FLUSH(sc);
13626 		delay(2);
13627 	}
13628 	*valp = val;
13629 }
13630 
13631 /* Microwire */
13632 
13633 /*
13634  * wm_nvm_read_uwire:
13635  *
13636  *	Read a word from the EEPROM using the MicroWire protocol.
13637  */
13638 static int
13639 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
13640 {
13641 	uint32_t reg, val;
13642 	int i, rv;
13643 
13644 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
13645 		device_xname(sc->sc_dev), __func__));
13646 
13647 	rv = sc->nvm.acquire(sc);
13648 	if (rv != 0)
13649 		return rv;
13650 
13651 	for (i = 0; i < wordcnt; i++) {
13652 		/* Clear SK and DI. */
13653 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
13654 		CSR_WRITE(sc, WMREG_EECD, reg);
13655 
13656 		/*
13657 		 * XXX: workaround for a bug in qemu-0.12.x and prior
13658 		 * and Xen.
13659 		 *
13660 		 * We use this workaround only for 82540 because qemu's
13661 		 * e1000 act as 82540.
13662 		 */
13663 		if (sc->sc_type == WM_T_82540) {
13664 			reg |= EECD_SK;
13665 			CSR_WRITE(sc, WMREG_EECD, reg);
13666 			reg &= ~EECD_SK;
13667 			CSR_WRITE(sc, WMREG_EECD, reg);
13668 			CSR_WRITE_FLUSH(sc);
13669 			delay(2);
13670 		}
13671 		/* XXX: end of workaround */
13672 
13673 		/* Set CHIP SELECT. */
13674 		reg |= EECD_CS;
13675 		CSR_WRITE(sc, WMREG_EECD, reg);
13676 		CSR_WRITE_FLUSH(sc);
13677 		delay(2);
13678 
13679 		/* Shift in the READ command. */
13680 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
13681 
13682 		/* Shift in address. */
13683 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
13684 
13685 		/* Shift out the data. */
13686 		wm_eeprom_recvbits(sc, &val, 16);
13687 		data[i] = val & 0xffff;
13688 
13689 		/* Clear CHIP SELECT. */
13690 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
13691 		CSR_WRITE(sc, WMREG_EECD, reg);
13692 		CSR_WRITE_FLUSH(sc);
13693 		delay(2);
13694 	}
13695 
13696 	sc->nvm.release(sc);
13697 	return 0;
13698 }
13699 
13700 /* SPI */
13701 
13702 /*
13703  * Set SPI and FLASH related information from the EECD register.
13704  * For 82541 and 82547, the word size is taken from EEPROM.
13705  */
13706 static int
13707 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
13708 {
13709 	int size;
13710 	uint32_t reg;
13711 	uint16_t data;
13712 
13713 	reg = CSR_READ(sc, WMREG_EECD);
13714 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
13715 
13716 	/* Read the size of NVM from EECD by default */
13717 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
13718 	switch (sc->sc_type) {
13719 	case WM_T_82541:
13720 	case WM_T_82541_2:
13721 	case WM_T_82547:
13722 	case WM_T_82547_2:
13723 		/* Set dummy value to access EEPROM */
13724 		sc->sc_nvm_wordsize = 64;
13725 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
13726 			aprint_error_dev(sc->sc_dev,
13727 			    "%s: failed to read EEPROM size\n", __func__);
13728 		}
13729 		reg = data;
13730 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
13731 		if (size == 0)
13732 			size = 6; /* 64 word size */
13733 		else
13734 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
13735 		break;
13736 	case WM_T_80003:
13737 	case WM_T_82571:
13738 	case WM_T_82572:
13739 	case WM_T_82573: /* SPI case */
13740 	case WM_T_82574: /* SPI case */
13741 	case WM_T_82583: /* SPI case */
13742 		size += NVM_WORD_SIZE_BASE_SHIFT;
13743 		if (size > 14)
13744 			size = 14;
13745 		break;
13746 	case WM_T_82575:
13747 	case WM_T_82576:
13748 	case WM_T_82580:
13749 	case WM_T_I350:
13750 	case WM_T_I354:
13751 	case WM_T_I210:
13752 	case WM_T_I211:
13753 		size += NVM_WORD_SIZE_BASE_SHIFT;
13754 		if (size > 15)
13755 			size = 15;
13756 		break;
13757 	default:
13758 		aprint_error_dev(sc->sc_dev,
13759 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
13760 		return -1;
13761 		break;
13762 	}
13763 
13764 	sc->sc_nvm_wordsize = 1 << size;
13765 
13766 	return 0;
13767 }
13768 
13769 /*
13770  * wm_nvm_ready_spi:
13771  *
13772  *	Wait for a SPI EEPROM to be ready for commands.
13773  */
13774 static int
13775 wm_nvm_ready_spi(struct wm_softc *sc)
13776 {
13777 	uint32_t val;
13778 	int usec;
13779 
13780 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
13781 		device_xname(sc->sc_dev), __func__));
13782 
13783 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
13784 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
13785 		wm_eeprom_recvbits(sc, &val, 8);
13786 		if ((val & SPI_SR_RDY) == 0)
13787 			break;
13788 	}
13789 	if (usec >= SPI_MAX_RETRIES) {
13790 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
13791 		return -1;
13792 	}
13793 	return 0;
13794 }
13795 
13796 /*
13797  * wm_nvm_read_spi:
13798  *
13799  *	Read a work from the EEPROM using the SPI protocol.
13800  */
13801 static int
13802 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
13803 {
13804 	uint32_t reg, val;
13805 	int i;
13806 	uint8_t opc;
13807 	int rv;
13808 
13809 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
13810 		device_xname(sc->sc_dev), __func__));
13811 
13812 	rv = sc->nvm.acquire(sc);
13813 	if (rv != 0)
13814 		return rv;
13815 
13816 	/* Clear SK and CS. */
13817 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
13818 	CSR_WRITE(sc, WMREG_EECD, reg);
13819 	CSR_WRITE_FLUSH(sc);
13820 	delay(2);
13821 
13822 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
13823 		goto out;
13824 
13825 	/* Toggle CS to flush commands. */
13826 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
13827 	CSR_WRITE_FLUSH(sc);
13828 	delay(2);
13829 	CSR_WRITE(sc, WMREG_EECD, reg);
13830 	CSR_WRITE_FLUSH(sc);
13831 	delay(2);
13832 
13833 	opc = SPI_OPC_READ;
13834 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
13835 		opc |= SPI_OPC_A8;
13836 
13837 	wm_eeprom_sendbits(sc, opc, 8);
13838 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
13839 
13840 	for (i = 0; i < wordcnt; i++) {
13841 		wm_eeprom_recvbits(sc, &val, 16);
13842 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
13843 	}
13844 
13845 	/* Raise CS and clear SK. */
13846 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
13847 	CSR_WRITE(sc, WMREG_EECD, reg);
13848 	CSR_WRITE_FLUSH(sc);
13849 	delay(2);
13850 
13851 out:
13852 	sc->nvm.release(sc);
13853 	return rv;
13854 }
13855 
13856 /* Using with EERD */
13857 
13858 static int
13859 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
13860 {
13861 	uint32_t attempts = 100000;
13862 	uint32_t i, reg = 0;
13863 	int32_t done = -1;
13864 
13865 	for (i = 0; i < attempts; i++) {
13866 		reg = CSR_READ(sc, rw);
13867 
13868 		if (reg & EERD_DONE) {
13869 			done = 0;
13870 			break;
13871 		}
13872 		delay(5);
13873 	}
13874 
13875 	return done;
13876 }
13877 
13878 static int
13879 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
13880 {
13881 	int i, eerd = 0;
13882 	int rv;
13883 
13884 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
13885 		device_xname(sc->sc_dev), __func__));
13886 
13887 	rv = sc->nvm.acquire(sc);
13888 	if (rv != 0)
13889 		return rv;
13890 
13891 	for (i = 0; i < wordcnt; i++) {
13892 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
13893 		CSR_WRITE(sc, WMREG_EERD, eerd);
13894 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
13895 		if (rv != 0) {
13896 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
13897 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
13898 			break;
13899 		}
13900 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
13901 	}
13902 
13903 	sc->nvm.release(sc);
13904 	return rv;
13905 }
13906 
13907 /* Flash */
13908 
13909 static int
13910 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
13911 {
13912 	uint32_t eecd;
13913 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
13914 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
13915 	uint32_t nvm_dword = 0;
13916 	uint8_t sig_byte = 0;
13917 	int rv;
13918 
13919 	switch (sc->sc_type) {
13920 	case WM_T_PCH_SPT:
13921 	case WM_T_PCH_CNP:
13922 		bank1_offset = sc->sc_ich8_flash_bank_size * 2;
13923 		act_offset = ICH_NVM_SIG_WORD * 2;
13924 
13925 		/* Set bank to 0 in case flash read fails. */
13926 		*bank = 0;
13927 
13928 		/* Check bank 0 */
13929 		rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
13930 		if (rv != 0)
13931 			return rv;
13932 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
13933 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
13934 			*bank = 0;
13935 			return 0;
13936 		}
13937 
13938 		/* Check bank 1 */
13939 		rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
13940 		    &nvm_dword);
13941 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
13942 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
13943 			*bank = 1;
13944 			return 0;
13945 		}
13946 		aprint_error_dev(sc->sc_dev,
13947 		    "%s: no valid NVM bank present (%u)\n", __func__, *bank);
13948 		return -1;
13949 	case WM_T_ICH8:
13950 	case WM_T_ICH9:
13951 		eecd = CSR_READ(sc, WMREG_EECD);
13952 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
13953 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
13954 			return 0;
13955 		}
13956 		/* FALLTHROUGH */
13957 	default:
13958 		/* Default to 0 */
13959 		*bank = 0;
13960 
13961 		/* Check bank 0 */
13962 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
13963 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
13964 			*bank = 0;
13965 			return 0;
13966 		}
13967 
13968 		/* Check bank 1 */
13969 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
13970 		    &sig_byte);
13971 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
13972 			*bank = 1;
13973 			return 0;
13974 		}
13975 	}
13976 
13977 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
13978 		device_xname(sc->sc_dev)));
13979 	return -1;
13980 }
13981 
13982 /******************************************************************************
13983  * This function does initial flash setup so that a new read/write/erase cycle
13984  * can be started.
13985  *
13986  * sc - The pointer to the hw structure
13987  ****************************************************************************/
13988 static int32_t
13989 wm_ich8_cycle_init(struct wm_softc *sc)
13990 {
13991 	uint16_t hsfsts;
13992 	int32_t error = 1;
13993 	int32_t i     = 0;
13994 
13995 	if (sc->sc_type >= WM_T_PCH_SPT)
13996 		hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
13997 	else
13998 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
13999 
14000 	/* May be check the Flash Des Valid bit in Hw status */
14001 	if ((hsfsts & HSFSTS_FLDVAL) == 0)
14002 		return error;
14003 
14004 	/* Clear FCERR in Hw status by writing 1 */
14005 	/* Clear DAEL in Hw status by writing a 1 */
14006 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
14007 
14008 	if (sc->sc_type >= WM_T_PCH_SPT)
14009 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
14010 	else
14011 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
14012 
14013 	/*
14014 	 * Either we should have a hardware SPI cycle in progress bit to check
14015 	 * against, in order to start a new cycle or FDONE bit should be
14016 	 * changed in the hardware so that it is 1 after hardware reset, which
14017 	 * can then be used as an indication whether a cycle is in progress or
14018 	 * has been completed .. we should also have some software semaphore
14019 	 * mechanism to guard FDONE or the cycle in progress bit so that two
14020 	 * threads access to those bits can be sequentiallized or a way so that
14021 	 * 2 threads don't start the cycle at the same time
14022 	 */
14023 
14024 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
14025 		/*
14026 		 * There is no cycle running at present, so we can start a
14027 		 * cycle
14028 		 */
14029 
14030 		/* Begin by setting Flash Cycle Done. */
14031 		hsfsts |= HSFSTS_DONE;
14032 		if (sc->sc_type >= WM_T_PCH_SPT)
14033 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
14034 			    hsfsts & 0xffffUL);
14035 		else
14036 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
14037 		error = 0;
14038 	} else {
14039 		/*
14040 		 * Otherwise poll for sometime so the current cycle has a
14041 		 * chance to end before giving up.
14042 		 */
14043 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
14044 			if (sc->sc_type >= WM_T_PCH_SPT)
14045 				hsfsts = ICH8_FLASH_READ32(sc,
14046 				    ICH_FLASH_HSFSTS) & 0xffffUL;
14047 			else
14048 				hsfsts = ICH8_FLASH_READ16(sc,
14049 				    ICH_FLASH_HSFSTS);
14050 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
14051 				error = 0;
14052 				break;
14053 			}
14054 			delay(1);
14055 		}
14056 		if (error == 0) {
14057 			/*
14058 			 * Successful in waiting for previous cycle to timeout,
14059 			 * now set the Flash Cycle Done.
14060 			 */
14061 			hsfsts |= HSFSTS_DONE;
14062 			if (sc->sc_type >= WM_T_PCH_SPT)
14063 				ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
14064 				    hsfsts & 0xffffUL);
14065 			else
14066 				ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
14067 				    hsfsts);
14068 		}
14069 	}
14070 	return error;
14071 }
14072 
14073 /******************************************************************************
14074  * This function starts a flash cycle and waits for its completion
14075  *
14076  * sc - The pointer to the hw structure
14077  ****************************************************************************/
14078 static int32_t
14079 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
14080 {
14081 	uint16_t hsflctl;
14082 	uint16_t hsfsts;
14083 	int32_t error = 1;
14084 	uint32_t i = 0;
14085 
14086 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
14087 	if (sc->sc_type >= WM_T_PCH_SPT)
14088 		hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
14089 	else
14090 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
14091 	hsflctl |= HSFCTL_GO;
14092 	if (sc->sc_type >= WM_T_PCH_SPT)
14093 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
14094 		    (uint32_t)hsflctl << 16);
14095 	else
14096 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
14097 
14098 	/* Wait till FDONE bit is set to 1 */
14099 	do {
14100 		if (sc->sc_type >= WM_T_PCH_SPT)
14101 			hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
14102 			    & 0xffffUL;
14103 		else
14104 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
14105 		if (hsfsts & HSFSTS_DONE)
14106 			break;
14107 		delay(1);
14108 		i++;
14109 	} while (i < timeout);
14110 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
14111 		error = 0;
14112 
14113 	return error;
14114 }
14115 
14116 /******************************************************************************
14117  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
14118  *
14119  * sc - The pointer to the hw structure
14120  * index - The index of the byte or word to read.
14121  * size - Size of data to read, 1=byte 2=word, 4=dword
14122  * data - Pointer to the word to store the value read.
14123  *****************************************************************************/
14124 static int32_t
14125 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
14126     uint32_t size, uint32_t *data)
14127 {
14128 	uint16_t hsfsts;
14129 	uint16_t hsflctl;
14130 	uint32_t flash_linear_address;
14131 	uint32_t flash_data = 0;
14132 	int32_t error = 1;
14133 	int32_t count = 0;
14134 
14135 	if (size < 1  || size > 4 || data == 0x0 ||
14136 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
14137 		return error;
14138 
14139 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
14140 	    sc->sc_ich8_flash_base;
14141 
14142 	do {
14143 		delay(1);
14144 		/* Steps */
14145 		error = wm_ich8_cycle_init(sc);
14146 		if (error)
14147 			break;
14148 
14149 		if (sc->sc_type >= WM_T_PCH_SPT)
14150 			hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
14151 			    >> 16;
14152 		else
14153 			hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
14154 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
14155 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
14156 		    & HSFCTL_BCOUNT_MASK;
14157 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
14158 		if (sc->sc_type >= WM_T_PCH_SPT) {
14159 			/*
14160 			 * In SPT, This register is in Lan memory space, not
14161 			 * flash. Therefore, only 32 bit access is supported.
14162 			 */
14163 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
14164 			    (uint32_t)hsflctl << 16);
14165 		} else
14166 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
14167 
14168 		/*
14169 		 * Write the last 24 bits of index into Flash Linear address
14170 		 * field in Flash Address
14171 		 */
14172 		/* TODO: TBD maybe check the index against the size of flash */
14173 
14174 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
14175 
14176 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
14177 
14178 		/*
14179 		 * Check if FCERR is set to 1, if set to 1, clear it and try
14180 		 * the whole sequence a few more times, else read in (shift in)
14181 		 * the Flash Data0, the order is least significant byte first
14182 		 * msb to lsb
14183 		 */
14184 		if (error == 0) {
14185 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
14186 			if (size == 1)
14187 				*data = (uint8_t)(flash_data & 0x000000FF);
14188 			else if (size == 2)
14189 				*data = (uint16_t)(flash_data & 0x0000FFFF);
14190 			else if (size == 4)
14191 				*data = (uint32_t)flash_data;
14192 			break;
14193 		} else {
14194 			/*
14195 			 * If we've gotten here, then things are probably
14196 			 * completely hosed, but if the error condition is
14197 			 * detected, it won't hurt to give it another try...
14198 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
14199 			 */
14200 			if (sc->sc_type >= WM_T_PCH_SPT)
14201 				hsfsts = ICH8_FLASH_READ32(sc,
14202 				    ICH_FLASH_HSFSTS) & 0xffffUL;
14203 			else
14204 				hsfsts = ICH8_FLASH_READ16(sc,
14205 				    ICH_FLASH_HSFSTS);
14206 
14207 			if (hsfsts & HSFSTS_ERR) {
14208 				/* Repeat for some time before giving up. */
14209 				continue;
14210 			} else if ((hsfsts & HSFSTS_DONE) == 0)
14211 				break;
14212 		}
14213 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
14214 
14215 	return error;
14216 }
14217 
14218 /******************************************************************************
14219  * Reads a single byte from the NVM using the ICH8 flash access registers.
14220  *
14221  * sc - pointer to wm_hw structure
14222  * index - The index of the byte to read.
14223  * data - Pointer to a byte to store the value read.
14224  *****************************************************************************/
14225 static int32_t
14226 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
14227 {
14228 	int32_t status;
14229 	uint32_t word = 0;
14230 
14231 	status = wm_read_ich8_data(sc, index, 1, &word);
14232 	if (status == 0)
14233 		*data = (uint8_t)word;
14234 	else
14235 		*data = 0;
14236 
14237 	return status;
14238 }
14239 
14240 /******************************************************************************
14241  * Reads a word from the NVM using the ICH8 flash access registers.
14242  *
14243  * sc - pointer to wm_hw structure
14244  * index - The starting byte index of the word to read.
14245  * data - Pointer to a word to store the value read.
14246  *****************************************************************************/
14247 static int32_t
14248 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
14249 {
14250 	int32_t status;
14251 	uint32_t word = 0;
14252 
14253 	status = wm_read_ich8_data(sc, index, 2, &word);
14254 	if (status == 0)
14255 		*data = (uint16_t)word;
14256 	else
14257 		*data = 0;
14258 
14259 	return status;
14260 }
14261 
14262 /******************************************************************************
14263  * Reads a dword from the NVM using the ICH8 flash access registers.
14264  *
14265  * sc - pointer to wm_hw structure
14266  * index - The starting byte index of the word to read.
14267  * data - Pointer to a word to store the value read.
14268  *****************************************************************************/
14269 static int32_t
14270 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
14271 {
14272 	int32_t status;
14273 
14274 	status = wm_read_ich8_data(sc, index, 4, data);
14275 	return status;
14276 }
14277 
14278 /******************************************************************************
14279  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
14280  * register.
14281  *
14282  * sc - Struct containing variables accessed by shared code
14283  * offset - offset of word in the EEPROM to read
14284  * data - word read from the EEPROM
14285  * words - number of words to read
14286  *****************************************************************************/
14287 static int
14288 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
14289 {
14290 	int rv;
14291 	uint32_t flash_bank = 0;
14292 	uint32_t act_offset = 0;
14293 	uint32_t bank_offset = 0;
14294 	uint16_t word = 0;
14295 	uint16_t i = 0;
14296 
14297 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
14298 		device_xname(sc->sc_dev), __func__));
14299 
14300 	rv = sc->nvm.acquire(sc);
14301 	if (rv != 0)
14302 		return rv;
14303 
14304 	/*
14305 	 * We need to know which is the valid flash bank.  In the event
14306 	 * that we didn't allocate eeprom_shadow_ram, we may not be
14307 	 * managing flash_bank. So it cannot be trusted and needs
14308 	 * to be updated with each read.
14309 	 */
14310 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
14311 	if (rv) {
14312 		DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
14313 			device_xname(sc->sc_dev)));
14314 		flash_bank = 0;
14315 	}
14316 
14317 	/*
14318 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
14319 	 * size
14320 	 */
14321 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
14322 
14323 	for (i = 0; i < words; i++) {
14324 		/* The NVM part needs a byte offset, hence * 2 */
14325 		act_offset = bank_offset + ((offset + i) * 2);
14326 		rv = wm_read_ich8_word(sc, act_offset, &word);
14327 		if (rv) {
14328 			aprint_error_dev(sc->sc_dev,
14329 			    "%s: failed to read NVM\n", __func__);
14330 			break;
14331 		}
14332 		data[i] = word;
14333 	}
14334 
14335 	sc->nvm.release(sc);
14336 	return rv;
14337 }
14338 
14339 /******************************************************************************
14340  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
14341  * register.
14342  *
14343  * sc - Struct containing variables accessed by shared code
14344  * offset - offset of word in the EEPROM to read
14345  * data - word read from the EEPROM
14346  * words - number of words to read
14347  *****************************************************************************/
14348 static int
14349 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
14350 {
14351 	int	 rv;
14352 	uint32_t flash_bank = 0;
14353 	uint32_t act_offset = 0;
14354 	uint32_t bank_offset = 0;
14355 	uint32_t dword = 0;
14356 	uint16_t i = 0;
14357 
14358 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
14359 		device_xname(sc->sc_dev), __func__));
14360 
14361 	rv = sc->nvm.acquire(sc);
14362 	if (rv != 0)
14363 		return rv;
14364 
14365 	/*
14366 	 * We need to know which is the valid flash bank.  In the event
14367 	 * that we didn't allocate eeprom_shadow_ram, we may not be
14368 	 * managing flash_bank. So it cannot be trusted and needs
14369 	 * to be updated with each read.
14370 	 */
14371 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
14372 	if (rv) {
14373 		DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
14374 			device_xname(sc->sc_dev)));
14375 		flash_bank = 0;
14376 	}
14377 
14378 	/*
14379 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
14380 	 * size
14381 	 */
14382 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
14383 
14384 	for (i = 0; i < words; i++) {
14385 		/* The NVM part needs a byte offset, hence * 2 */
14386 		act_offset = bank_offset + ((offset + i) * 2);
14387 		/* but we must read dword aligned, so mask ... */
14388 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
14389 		if (rv) {
14390 			aprint_error_dev(sc->sc_dev,
14391 			    "%s: failed to read NVM\n", __func__);
14392 			break;
14393 		}
14394 		/* ... and pick out low or high word */
14395 		if ((act_offset & 0x2) == 0)
14396 			data[i] = (uint16_t)(dword & 0xFFFF);
14397 		else
14398 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
14399 	}
14400 
14401 	sc->nvm.release(sc);
14402 	return rv;
14403 }
14404 
14405 /* iNVM */
14406 
14407 static int
14408 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
14409 {
14410 	int32_t	 rv = 0;
14411 	uint32_t invm_dword;
14412 	uint16_t i;
14413 	uint8_t record_type, word_address;
14414 
14415 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
14416 		device_xname(sc->sc_dev), __func__));
14417 
14418 	for (i = 0; i < INVM_SIZE; i++) {
14419 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
14420 		/* Get record type */
14421 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
14422 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
14423 			break;
14424 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
14425 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
14426 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
14427 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
14428 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
14429 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
14430 			if (word_address == address) {
14431 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
14432 				rv = 0;
14433 				break;
14434 			}
14435 		}
14436 	}
14437 
14438 	return rv;
14439 }
14440 
14441 static int
14442 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
14443 {
14444 	int i, rv;
14445 
14446 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
14447 		device_xname(sc->sc_dev), __func__));
14448 
14449 	rv = sc->nvm.acquire(sc);
14450 	if (rv != 0)
14451 		return rv;
14452 
14453 	for (i = 0; i < words; i++) {
14454 		switch (offset + i) {
14455 		case NVM_OFF_MACADDR:
14456 		case NVM_OFF_MACADDR1:
14457 		case NVM_OFF_MACADDR2:
14458 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
14459 			if (rv != 0) {
14460 				data[i] = 0xffff;
14461 				rv = -1;
14462 			}
14463 			break;
14464 		case NVM_OFF_CFG1: /* == INVM_AUTOLOAD */
14465 			rv = wm_nvm_read_word_invm(sc, offset, data);
14466 			if (rv != 0) {
14467 				*data = INVM_DEFAULT_AL;
14468 				rv = 0;
14469 			}
14470 			break;
14471 		case NVM_OFF_CFG2:
14472 			rv = wm_nvm_read_word_invm(sc, offset, data);
14473 			if (rv != 0) {
14474 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
14475 				rv = 0;
14476 			}
14477 			break;
14478 		case NVM_OFF_CFG4:
14479 			rv = wm_nvm_read_word_invm(sc, offset, data);
14480 			if (rv != 0) {
14481 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
14482 				rv = 0;
14483 			}
14484 			break;
14485 		case NVM_OFF_LED_1_CFG:
14486 			rv = wm_nvm_read_word_invm(sc, offset, data);
14487 			if (rv != 0) {
14488 				*data = NVM_LED_1_CFG_DEFAULT_I211;
14489 				rv = 0;
14490 			}
14491 			break;
14492 		case NVM_OFF_LED_0_2_CFG:
14493 			rv = wm_nvm_read_word_invm(sc, offset, data);
14494 			if (rv != 0) {
14495 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
14496 				rv = 0;
14497 			}
14498 			break;
14499 		case NVM_OFF_ID_LED_SETTINGS:
14500 			rv = wm_nvm_read_word_invm(sc, offset, data);
14501 			if (rv != 0) {
14502 				*data = ID_LED_RESERVED_FFFF;
14503 				rv = 0;
14504 			}
14505 			break;
14506 		default:
14507 			DPRINTF(sc, WM_DEBUG_NVM,
14508 			    ("NVM word 0x%02x is not mapped.\n", offset));
14509 			*data = NVM_RESERVED_WORD;
14510 			break;
14511 		}
14512 	}
14513 
14514 	sc->nvm.release(sc);
14515 	return rv;
14516 }
14517 
14518 /* Lock, detecting NVM type, validate checksum, version and read */
14519 
14520 static int
14521 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
14522 {
14523 	uint32_t eecd = 0;
14524 
14525 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
14526 	    || sc->sc_type == WM_T_82583) {
14527 		eecd = CSR_READ(sc, WMREG_EECD);
14528 
14529 		/* Isolate bits 15 & 16 */
14530 		eecd = ((eecd >> 15) & 0x03);
14531 
14532 		/* If both bits are set, device is Flash type */
14533 		if (eecd == 0x03)
14534 			return 0;
14535 	}
14536 	return 1;
14537 }
14538 
14539 static int
14540 wm_nvm_flash_presence_i210(struct wm_softc *sc)
14541 {
14542 	uint32_t eec;
14543 
14544 	eec = CSR_READ(sc, WMREG_EEC);
14545 	if ((eec & EEC_FLASH_DETECTED) != 0)
14546 		return 1;
14547 
14548 	return 0;
14549 }
14550 
14551 /*
14552  * wm_nvm_validate_checksum
14553  *
14554  * The checksum is defined as the sum of the first 64 (16 bit) words.
14555  */
14556 static int
14557 wm_nvm_validate_checksum(struct wm_softc *sc)
14558 {
14559 	uint16_t checksum;
14560 	uint16_t eeprom_data;
14561 #ifdef WM_DEBUG
14562 	uint16_t csum_wordaddr, valid_checksum;
14563 #endif
14564 	int i;
14565 
14566 	checksum = 0;
14567 
14568 	/* Don't check for I211 */
14569 	if (sc->sc_type == WM_T_I211)
14570 		return 0;
14571 
14572 #ifdef WM_DEBUG
14573 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
14574 	    || (sc->sc_type == WM_T_PCH_CNP)) {
14575 		csum_wordaddr = NVM_OFF_COMPAT;
14576 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
14577 	} else {
14578 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
14579 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
14580 	}
14581 
14582 	/* Dump EEPROM image for debug */
14583 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
14584 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
14585 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
14586 		/* XXX PCH_SPT? */
14587 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
14588 		if ((eeprom_data & valid_checksum) == 0)
14589 			DPRINTF(sc, WM_DEBUG_NVM,
14590 			    ("%s: NVM need to be updated (%04x != %04x)\n",
14591 				device_xname(sc->sc_dev), eeprom_data,
14592 				valid_checksum));
14593 	}
14594 
14595 	if ((sc->sc_debug & WM_DEBUG_NVM) != 0) {
14596 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
14597 		for (i = 0; i < NVM_SIZE; i++) {
14598 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
14599 				printf("XXXX ");
14600 			else
14601 				printf("%04hx ", eeprom_data);
14602 			if (i % 8 == 7)
14603 				printf("\n");
14604 		}
14605 	}
14606 
14607 #endif /* WM_DEBUG */
14608 
14609 	for (i = 0; i < NVM_SIZE; i++) {
14610 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
14611 			return -1;
14612 		checksum += eeprom_data;
14613 	}
14614 
14615 	if (checksum != (uint16_t) NVM_CHECKSUM) {
14616 #ifdef WM_DEBUG
14617 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
14618 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
14619 #endif
14620 	}
14621 
14622 	return 0;
14623 }
14624 
14625 static void
14626 wm_nvm_version_invm(struct wm_softc *sc)
14627 {
14628 	uint32_t dword;
14629 
14630 	/*
14631 	 * Linux's code to decode version is very strange, so we don't
14632 	 * obey that algorithm and just use word 61 as the document.
14633 	 * Perhaps it's not perfect though...
14634 	 *
14635 	 * Example:
14636 	 *
14637 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
14638 	 */
14639 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
14640 	dword = __SHIFTOUT(dword, INVM_VER_1);
14641 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
14642 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
14643 }
14644 
14645 static void
14646 wm_nvm_version(struct wm_softc *sc)
14647 {
14648 	uint16_t major, minor, build, patch;
14649 	uint16_t uid0, uid1;
14650 	uint16_t nvm_data;
14651 	uint16_t off;
14652 	bool check_version = false;
14653 	bool check_optionrom = false;
14654 	bool have_build = false;
14655 	bool have_uid = true;
14656 
14657 	/*
14658 	 * Version format:
14659 	 *
14660 	 * XYYZ
14661 	 * X0YZ
14662 	 * X0YY
14663 	 *
14664 	 * Example:
14665 	 *
14666 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
14667 	 *	82571	0x50a6	5.10.6?
14668 	 *	82572	0x506a	5.6.10?
14669 	 *	82572EI	0x5069	5.6.9?
14670 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
14671 	 *		0x2013	2.1.3?
14672 	 *	82583	0x10a0	1.10.0? (document says it's default value)
14673 	 * ICH8+82567	0x0040	0.4.0?
14674 	 * ICH9+82566	0x1040	1.4.0?
14675 	 *ICH10+82567	0x0043	0.4.3?
14676 	 *  PCH+82577	0x00c1	0.12.1?
14677 	 * PCH2+82579	0x00d3	0.13.3?
14678 	 *		0x00d4	0.13.4?
14679 	 *  LPT+I218	0x0023	0.2.3?
14680 	 *  SPT+I219	0x0084	0.8.4?
14681 	 *  CNP+I219	0x0054	0.5.4?
14682 	 */
14683 
14684 	/*
14685 	 * XXX
14686 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
14687 	 * I've never seen real 82574 hardware with such small SPI ROM.
14688 	 */
14689 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
14690 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
14691 		have_uid = false;
14692 
14693 	switch (sc->sc_type) {
14694 	case WM_T_82571:
14695 	case WM_T_82572:
14696 	case WM_T_82574:
14697 	case WM_T_82583:
14698 		check_version = true;
14699 		check_optionrom = true;
14700 		have_build = true;
14701 		break;
14702 	case WM_T_ICH8:
14703 	case WM_T_ICH9:
14704 	case WM_T_ICH10:
14705 	case WM_T_PCH:
14706 	case WM_T_PCH2:
14707 	case WM_T_PCH_LPT:
14708 	case WM_T_PCH_SPT:
14709 	case WM_T_PCH_CNP:
14710 		check_version = true;
14711 		have_build = true;
14712 		have_uid = false;
14713 		break;
14714 	case WM_T_82575:
14715 	case WM_T_82576:
14716 	case WM_T_82580:
14717 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
14718 			check_version = true;
14719 		break;
14720 	case WM_T_I211:
14721 		wm_nvm_version_invm(sc);
14722 		have_uid = false;
14723 		goto printver;
14724 	case WM_T_I210:
14725 		if (!wm_nvm_flash_presence_i210(sc)) {
14726 			wm_nvm_version_invm(sc);
14727 			have_uid = false;
14728 			goto printver;
14729 		}
14730 		/* FALLTHROUGH */
14731 	case WM_T_I350:
14732 	case WM_T_I354:
14733 		check_version = true;
14734 		check_optionrom = true;
14735 		break;
14736 	default:
14737 		return;
14738 	}
14739 	if (check_version
14740 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
14741 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
14742 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
14743 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
14744 			build = nvm_data & NVM_BUILD_MASK;
14745 			have_build = true;
14746 		} else
14747 			minor = nvm_data & 0x00ff;
14748 
14749 		/* Decimal */
14750 		minor = (minor / 16) * 10 + (minor % 16);
14751 		sc->sc_nvm_ver_major = major;
14752 		sc->sc_nvm_ver_minor = minor;
14753 
14754 printver:
14755 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
14756 		    sc->sc_nvm_ver_minor);
14757 		if (have_build) {
14758 			sc->sc_nvm_ver_build = build;
14759 			aprint_verbose(".%d", build);
14760 		}
14761 	}
14762 
14763 	/* Assume the Option ROM area is at avove NVM_SIZE */
14764 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
14765 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
14766 		/* Option ROM Version */
14767 		if ((off != 0x0000) && (off != 0xffff)) {
14768 			int rv;
14769 
14770 			off += NVM_COMBO_VER_OFF;
14771 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
14772 			rv |= wm_nvm_read(sc, off, 1, &uid0);
14773 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
14774 			    && (uid1 != 0) && (uid1 != 0xffff)) {
14775 				/* 16bits */
14776 				major = uid0 >> 8;
14777 				build = (uid0 << 8) | (uid1 >> 8);
14778 				patch = uid1 & 0x00ff;
14779 				aprint_verbose(", option ROM Version %d.%d.%d",
14780 				    major, build, patch);
14781 			}
14782 		}
14783 	}
14784 
14785 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
14786 		aprint_verbose(", Image Unique ID %08x",
14787 		    ((uint32_t)uid1 << 16) | uid0);
14788 }
14789 
14790 /*
14791  * wm_nvm_read:
14792  *
14793  *	Read data from the serial EEPROM.
14794  */
14795 static int
14796 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
14797 {
14798 	int rv;
14799 
14800 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
14801 		device_xname(sc->sc_dev), __func__));
14802 
14803 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
14804 		return -1;
14805 
14806 	rv = sc->nvm.read(sc, word, wordcnt, data);
14807 
14808 	return rv;
14809 }
14810 
14811 /*
14812  * Hardware semaphores.
14813  * Very complexed...
14814  */
14815 
14816 static int
14817 wm_get_null(struct wm_softc *sc)
14818 {
14819 
14820 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14821 		device_xname(sc->sc_dev), __func__));
14822 	return 0;
14823 }
14824 
14825 static void
14826 wm_put_null(struct wm_softc *sc)
14827 {
14828 
14829 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14830 		device_xname(sc->sc_dev), __func__));
14831 	return;
14832 }
14833 
14834 static int
14835 wm_get_eecd(struct wm_softc *sc)
14836 {
14837 	uint32_t reg;
14838 	int x;
14839 
14840 	DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
14841 		device_xname(sc->sc_dev), __func__));
14842 
14843 	reg = CSR_READ(sc, WMREG_EECD);
14844 
14845 	/* Request EEPROM access. */
14846 	reg |= EECD_EE_REQ;
14847 	CSR_WRITE(sc, WMREG_EECD, reg);
14848 
14849 	/* ..and wait for it to be granted. */
14850 	for (x = 0; x < 1000; x++) {
14851 		reg = CSR_READ(sc, WMREG_EECD);
14852 		if (reg & EECD_EE_GNT)
14853 			break;
14854 		delay(5);
14855 	}
14856 	if ((reg & EECD_EE_GNT) == 0) {
14857 		aprint_error_dev(sc->sc_dev,
14858 		    "could not acquire EEPROM GNT\n");
14859 		reg &= ~EECD_EE_REQ;
14860 		CSR_WRITE(sc, WMREG_EECD, reg);
14861 		return -1;
14862 	}
14863 
14864 	return 0;
14865 }
14866 
14867 static void
14868 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
14869 {
14870 
14871 	*eecd |= EECD_SK;
14872 	CSR_WRITE(sc, WMREG_EECD, *eecd);
14873 	CSR_WRITE_FLUSH(sc);
14874 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
14875 		delay(1);
14876 	else
14877 		delay(50);
14878 }
14879 
14880 static void
14881 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
14882 {
14883 
14884 	*eecd &= ~EECD_SK;
14885 	CSR_WRITE(sc, WMREG_EECD, *eecd);
14886 	CSR_WRITE_FLUSH(sc);
14887 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
14888 		delay(1);
14889 	else
14890 		delay(50);
14891 }
14892 
14893 static void
14894 wm_put_eecd(struct wm_softc *sc)
14895 {
14896 	uint32_t reg;
14897 
14898 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14899 		device_xname(sc->sc_dev), __func__));
14900 
14901 	/* Stop nvm */
14902 	reg = CSR_READ(sc, WMREG_EECD);
14903 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
14904 		/* Pull CS high */
14905 		reg |= EECD_CS;
14906 		wm_nvm_eec_clock_lower(sc, &reg);
14907 	} else {
14908 		/* CS on Microwire is active-high */
14909 		reg &= ~(EECD_CS | EECD_DI);
14910 		CSR_WRITE(sc, WMREG_EECD, reg);
14911 		wm_nvm_eec_clock_raise(sc, &reg);
14912 		wm_nvm_eec_clock_lower(sc, &reg);
14913 	}
14914 
14915 	reg = CSR_READ(sc, WMREG_EECD);
14916 	reg &= ~EECD_EE_REQ;
14917 	CSR_WRITE(sc, WMREG_EECD, reg);
14918 
14919 	return;
14920 }
14921 
14922 /*
14923  * Get hardware semaphore.
14924  * Same as e1000_get_hw_semaphore_generic()
14925  */
14926 static int
14927 wm_get_swsm_semaphore(struct wm_softc *sc)
14928 {
14929 	int32_t timeout;
14930 	uint32_t swsm;
14931 
14932 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14933 		device_xname(sc->sc_dev), __func__));
14934 	KASSERT(sc->sc_nvm_wordsize > 0);
14935 
14936 retry:
14937 	/* Get the SW semaphore. */
14938 	timeout = sc->sc_nvm_wordsize + 1;
14939 	while (timeout) {
14940 		swsm = CSR_READ(sc, WMREG_SWSM);
14941 
14942 		if ((swsm & SWSM_SMBI) == 0)
14943 			break;
14944 
14945 		delay(50);
14946 		timeout--;
14947 	}
14948 
14949 	if (timeout == 0) {
14950 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
14951 			/*
14952 			 * In rare circumstances, the SW semaphore may already
14953 			 * be held unintentionally. Clear the semaphore once
14954 			 * before giving up.
14955 			 */
14956 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
14957 			wm_put_swsm_semaphore(sc);
14958 			goto retry;
14959 		}
14960 		aprint_error_dev(sc->sc_dev, "could not acquire SWSM SMBI\n");
14961 		return -1;
14962 	}
14963 
14964 	/* Get the FW semaphore. */
14965 	timeout = sc->sc_nvm_wordsize + 1;
14966 	while (timeout) {
14967 		swsm = CSR_READ(sc, WMREG_SWSM);
14968 		swsm |= SWSM_SWESMBI;
14969 		CSR_WRITE(sc, WMREG_SWSM, swsm);
14970 		/* If we managed to set the bit we got the semaphore. */
14971 		swsm = CSR_READ(sc, WMREG_SWSM);
14972 		if (swsm & SWSM_SWESMBI)
14973 			break;
14974 
14975 		delay(50);
14976 		timeout--;
14977 	}
14978 
14979 	if (timeout == 0) {
14980 		aprint_error_dev(sc->sc_dev,
14981 		    "could not acquire SWSM SWESMBI\n");
14982 		/* Release semaphores */
14983 		wm_put_swsm_semaphore(sc);
14984 		return -1;
14985 	}
14986 	return 0;
14987 }
14988 
14989 /*
14990  * Put hardware semaphore.
14991  * Same as e1000_put_hw_semaphore_generic()
14992  */
14993 static void
14994 wm_put_swsm_semaphore(struct wm_softc *sc)
14995 {
14996 	uint32_t swsm;
14997 
14998 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14999 		device_xname(sc->sc_dev), __func__));
15000 
15001 	swsm = CSR_READ(sc, WMREG_SWSM);
15002 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
15003 	CSR_WRITE(sc, WMREG_SWSM, swsm);
15004 }
15005 
15006 /*
15007  * Get SW/FW semaphore.
15008  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
15009  */
15010 static int
15011 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
15012 {
15013 	uint32_t swfw_sync;
15014 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
15015 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
15016 	int timeout;
15017 
15018 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15019 		device_xname(sc->sc_dev), __func__));
15020 
15021 	if (sc->sc_type == WM_T_80003)
15022 		timeout = 50;
15023 	else
15024 		timeout = 200;
15025 
15026 	while (timeout) {
15027 		if (wm_get_swsm_semaphore(sc)) {
15028 			aprint_error_dev(sc->sc_dev,
15029 			    "%s: failed to get semaphore\n",
15030 			    __func__);
15031 			return -1;
15032 		}
15033 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
15034 		if ((swfw_sync & (swmask | fwmask)) == 0) {
15035 			swfw_sync |= swmask;
15036 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
15037 			wm_put_swsm_semaphore(sc);
15038 			return 0;
15039 		}
15040 		wm_put_swsm_semaphore(sc);
15041 		delay(5000);
15042 		timeout--;
15043 	}
15044 	device_printf(sc->sc_dev,
15045 	    "failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
15046 	    mask, swfw_sync);
15047 	return -1;
15048 }
15049 
15050 static void
15051 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
15052 {
15053 	uint32_t swfw_sync;
15054 
15055 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15056 		device_xname(sc->sc_dev), __func__));
15057 
15058 	while (wm_get_swsm_semaphore(sc) != 0)
15059 		continue;
15060 
15061 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
15062 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
15063 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
15064 
15065 	wm_put_swsm_semaphore(sc);
15066 }
15067 
15068 static int
15069 wm_get_nvm_80003(struct wm_softc *sc)
15070 {
15071 	int rv;
15072 
15073 	DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
15074 		device_xname(sc->sc_dev), __func__));
15075 
15076 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
15077 		aprint_error_dev(sc->sc_dev,
15078 		    "%s: failed to get semaphore(SWFW)\n", __func__);
15079 		return rv;
15080 	}
15081 
15082 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
15083 	    && (rv = wm_get_eecd(sc)) != 0) {
15084 		aprint_error_dev(sc->sc_dev,
15085 		    "%s: failed to get semaphore(EECD)\n", __func__);
15086 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
15087 		return rv;
15088 	}
15089 
15090 	return 0;
15091 }
15092 
15093 static void
15094 wm_put_nvm_80003(struct wm_softc *sc)
15095 {
15096 
15097 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15098 		device_xname(sc->sc_dev), __func__));
15099 
15100 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
15101 		wm_put_eecd(sc);
15102 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
15103 }
15104 
15105 static int
15106 wm_get_nvm_82571(struct wm_softc *sc)
15107 {
15108 	int rv;
15109 
15110 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15111 		device_xname(sc->sc_dev), __func__));
15112 
15113 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
15114 		return rv;
15115 
15116 	switch (sc->sc_type) {
15117 	case WM_T_82573:
15118 		break;
15119 	default:
15120 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
15121 			rv = wm_get_eecd(sc);
15122 		break;
15123 	}
15124 
15125 	if (rv != 0) {
15126 		aprint_error_dev(sc->sc_dev,
15127 		    "%s: failed to get semaphore\n",
15128 		    __func__);
15129 		wm_put_swsm_semaphore(sc);
15130 	}
15131 
15132 	return rv;
15133 }
15134 
15135 static void
15136 wm_put_nvm_82571(struct wm_softc *sc)
15137 {
15138 
15139 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15140 		device_xname(sc->sc_dev), __func__));
15141 
15142 	switch (sc->sc_type) {
15143 	case WM_T_82573:
15144 		break;
15145 	default:
15146 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
15147 			wm_put_eecd(sc);
15148 		break;
15149 	}
15150 
15151 	wm_put_swsm_semaphore(sc);
15152 }
15153 
15154 static int
15155 wm_get_phy_82575(struct wm_softc *sc)
15156 {
15157 
15158 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15159 		device_xname(sc->sc_dev), __func__));
15160 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
15161 }
15162 
15163 static void
15164 wm_put_phy_82575(struct wm_softc *sc)
15165 {
15166 
15167 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15168 		device_xname(sc->sc_dev), __func__));
15169 	wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
15170 }
15171 
15172 static int
15173 wm_get_swfwhw_semaphore(struct wm_softc *sc)
15174 {
15175 	uint32_t ext_ctrl;
15176 	int timeout = 200;
15177 
15178 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15179 		device_xname(sc->sc_dev), __func__));
15180 
15181 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
15182 	for (timeout = 0; timeout < 200; timeout++) {
15183 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
15184 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
15185 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
15186 
15187 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
15188 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
15189 			return 0;
15190 		delay(5000);
15191 	}
15192 	device_printf(sc->sc_dev,
15193 	    "failed to get swfwhw semaphore ext_ctrl 0x%x\n", ext_ctrl);
15194 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
15195 	return -1;
15196 }
15197 
15198 static void
15199 wm_put_swfwhw_semaphore(struct wm_softc *sc)
15200 {
15201 	uint32_t ext_ctrl;
15202 
15203 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15204 		device_xname(sc->sc_dev), __func__));
15205 
15206 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
15207 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
15208 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
15209 
15210 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
15211 }
15212 
15213 static int
15214 wm_get_swflag_ich8lan(struct wm_softc *sc)
15215 {
15216 	uint32_t ext_ctrl;
15217 	int timeout;
15218 
15219 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15220 		device_xname(sc->sc_dev), __func__));
15221 	mutex_enter(sc->sc_ich_phymtx);
15222 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
15223 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
15224 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
15225 			break;
15226 		delay(1000);
15227 	}
15228 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
15229 		device_printf(sc->sc_dev,
15230 		    "SW has already locked the resource\n");
15231 		goto out;
15232 	}
15233 
15234 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
15235 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
15236 	for (timeout = 0; timeout < 1000; timeout++) {
15237 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
15238 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
15239 			break;
15240 		delay(1000);
15241 	}
15242 	if (timeout >= 1000) {
15243 		device_printf(sc->sc_dev, "failed to acquire semaphore\n");
15244 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
15245 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
15246 		goto out;
15247 	}
15248 	return 0;
15249 
15250 out:
15251 	mutex_exit(sc->sc_ich_phymtx);
15252 	return -1;
15253 }
15254 
15255 static void
15256 wm_put_swflag_ich8lan(struct wm_softc *sc)
15257 {
15258 	uint32_t ext_ctrl;
15259 
15260 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15261 		device_xname(sc->sc_dev), __func__));
15262 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
15263 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
15264 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
15265 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
15266 	} else
15267 		device_printf(sc->sc_dev, "Semaphore unexpectedly released\n");
15268 
15269 	mutex_exit(sc->sc_ich_phymtx);
15270 }
15271 
15272 static int
15273 wm_get_nvm_ich8lan(struct wm_softc *sc)
15274 {
15275 
15276 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15277 		device_xname(sc->sc_dev), __func__));
15278 	mutex_enter(sc->sc_ich_nvmmtx);
15279 
15280 	return 0;
15281 }
15282 
15283 static void
15284 wm_put_nvm_ich8lan(struct wm_softc *sc)
15285 {
15286 
15287 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15288 		device_xname(sc->sc_dev), __func__));
15289 	mutex_exit(sc->sc_ich_nvmmtx);
15290 }
15291 
15292 static int
15293 wm_get_hw_semaphore_82573(struct wm_softc *sc)
15294 {
15295 	int i = 0;
15296 	uint32_t reg;
15297 
15298 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15299 		device_xname(sc->sc_dev), __func__));
15300 
15301 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
15302 	do {
15303 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
15304 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
15305 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
15306 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
15307 			break;
15308 		delay(2*1000);
15309 		i++;
15310 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
15311 
15312 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
15313 		wm_put_hw_semaphore_82573(sc);
15314 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
15315 		    device_xname(sc->sc_dev));
15316 		return -1;
15317 	}
15318 
15319 	return 0;
15320 }
15321 
15322 static void
15323 wm_put_hw_semaphore_82573(struct wm_softc *sc)
15324 {
15325 	uint32_t reg;
15326 
15327 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15328 		device_xname(sc->sc_dev), __func__));
15329 
15330 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
15331 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
15332 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
15333 }
15334 
15335 /*
15336  * Management mode and power management related subroutines.
15337  * BMC, AMT, suspend/resume and EEE.
15338  */
15339 
15340 #ifdef WM_WOL
15341 static int
15342 wm_check_mng_mode(struct wm_softc *sc)
15343 {
15344 	int rv;
15345 
15346 	switch (sc->sc_type) {
15347 	case WM_T_ICH8:
15348 	case WM_T_ICH9:
15349 	case WM_T_ICH10:
15350 	case WM_T_PCH:
15351 	case WM_T_PCH2:
15352 	case WM_T_PCH_LPT:
15353 	case WM_T_PCH_SPT:
15354 	case WM_T_PCH_CNP:
15355 		rv = wm_check_mng_mode_ich8lan(sc);
15356 		break;
15357 	case WM_T_82574:
15358 	case WM_T_82583:
15359 		rv = wm_check_mng_mode_82574(sc);
15360 		break;
15361 	case WM_T_82571:
15362 	case WM_T_82572:
15363 	case WM_T_82573:
15364 	case WM_T_80003:
15365 		rv = wm_check_mng_mode_generic(sc);
15366 		break;
15367 	default:
15368 		/* Noting to do */
15369 		rv = 0;
15370 		break;
15371 	}
15372 
15373 	return rv;
15374 }
15375 
15376 static int
15377 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
15378 {
15379 	uint32_t fwsm;
15380 
15381 	fwsm = CSR_READ(sc, WMREG_FWSM);
15382 
15383 	if (((fwsm & FWSM_FW_VALID) != 0)
15384 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
15385 		return 1;
15386 
15387 	return 0;
15388 }
15389 
15390 static int
15391 wm_check_mng_mode_82574(struct wm_softc *sc)
15392 {
15393 	uint16_t data;
15394 
15395 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
15396 
15397 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
15398 		return 1;
15399 
15400 	return 0;
15401 }
15402 
15403 static int
15404 wm_check_mng_mode_generic(struct wm_softc *sc)
15405 {
15406 	uint32_t fwsm;
15407 
15408 	fwsm = CSR_READ(sc, WMREG_FWSM);
15409 
15410 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
15411 		return 1;
15412 
15413 	return 0;
15414 }
15415 #endif /* WM_WOL */
15416 
15417 static int
15418 wm_enable_mng_pass_thru(struct wm_softc *sc)
15419 {
15420 	uint32_t manc, fwsm, factps;
15421 
15422 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
15423 		return 0;
15424 
15425 	manc = CSR_READ(sc, WMREG_MANC);
15426 
15427 	DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
15428 		device_xname(sc->sc_dev), manc));
15429 	if ((manc & MANC_RECV_TCO_EN) == 0)
15430 		return 0;
15431 
15432 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
15433 		fwsm = CSR_READ(sc, WMREG_FWSM);
15434 		factps = CSR_READ(sc, WMREG_FACTPS);
15435 		if (((factps & FACTPS_MNGCG) == 0)
15436 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
15437 			return 1;
15438 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
15439 		uint16_t data;
15440 
15441 		factps = CSR_READ(sc, WMREG_FACTPS);
15442 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
15443 		DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
15444 			device_xname(sc->sc_dev), factps, data));
15445 		if (((factps & FACTPS_MNGCG) == 0)
15446 		    && ((data & NVM_CFG2_MNGM_MASK)
15447 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
15448 			return 1;
15449 	} else if (((manc & MANC_SMBUS_EN) != 0)
15450 	    && ((manc & MANC_ASF_EN) == 0))
15451 		return 1;
15452 
15453 	return 0;
15454 }
15455 
15456 static bool
15457 wm_phy_resetisblocked(struct wm_softc *sc)
15458 {
15459 	bool blocked = false;
15460 	uint32_t reg;
15461 	int i = 0;
15462 
15463 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
15464 		device_xname(sc->sc_dev), __func__));
15465 
15466 	switch (sc->sc_type) {
15467 	case WM_T_ICH8:
15468 	case WM_T_ICH9:
15469 	case WM_T_ICH10:
15470 	case WM_T_PCH:
15471 	case WM_T_PCH2:
15472 	case WM_T_PCH_LPT:
15473 	case WM_T_PCH_SPT:
15474 	case WM_T_PCH_CNP:
15475 		do {
15476 			reg = CSR_READ(sc, WMREG_FWSM);
15477 			if ((reg & FWSM_RSPCIPHY) == 0) {
15478 				blocked = true;
15479 				delay(10*1000);
15480 				continue;
15481 			}
15482 			blocked = false;
15483 		} while (blocked && (i++ < 30));
15484 		return blocked;
15485 		break;
15486 	case WM_T_82571:
15487 	case WM_T_82572:
15488 	case WM_T_82573:
15489 	case WM_T_82574:
15490 	case WM_T_82583:
15491 	case WM_T_80003:
15492 		reg = CSR_READ(sc, WMREG_MANC);
15493 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
15494 			return true;
15495 		else
15496 			return false;
15497 		break;
15498 	default:
15499 		/* No problem */
15500 		break;
15501 	}
15502 
15503 	return false;
15504 }
15505 
15506 static void
15507 wm_get_hw_control(struct wm_softc *sc)
15508 {
15509 	uint32_t reg;
15510 
15511 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15512 		device_xname(sc->sc_dev), __func__));
15513 
15514 	if (sc->sc_type == WM_T_82573) {
15515 		reg = CSR_READ(sc, WMREG_SWSM);
15516 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
15517 	} else if (sc->sc_type >= WM_T_82571) {
15518 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
15519 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
15520 	}
15521 }
15522 
15523 static void
15524 wm_release_hw_control(struct wm_softc *sc)
15525 {
15526 	uint32_t reg;
15527 
15528 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15529 		device_xname(sc->sc_dev), __func__));
15530 
15531 	if (sc->sc_type == WM_T_82573) {
15532 		reg = CSR_READ(sc, WMREG_SWSM);
15533 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
15534 	} else if (sc->sc_type >= WM_T_82571) {
15535 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
15536 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
15537 	}
15538 }
15539 
15540 static void
15541 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
15542 {
15543 	uint32_t reg;
15544 
15545 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
15546 		device_xname(sc->sc_dev), __func__));
15547 
15548 	if (sc->sc_type < WM_T_PCH2)
15549 		return;
15550 
15551 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
15552 
15553 	if (gate)
15554 		reg |= EXTCNFCTR_GATE_PHY_CFG;
15555 	else
15556 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
15557 
15558 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
15559 }
15560 
15561 static int
15562 wm_init_phy_workarounds_pchlan(struct wm_softc *sc)
15563 {
15564 	uint32_t fwsm, reg;
15565 	int rv;
15566 
15567 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
15568 		device_xname(sc->sc_dev), __func__));
15569 
15570 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
15571 	wm_gate_hw_phy_config_ich8lan(sc, true);
15572 
15573 	/* Disable ULP */
15574 	wm_ulp_disable(sc);
15575 
15576 	/* Acquire PHY semaphore */
15577 	rv = sc->phy.acquire(sc);
15578 	if (rv != 0) {
15579 		DPRINTF(sc, WM_DEBUG_INIT,
15580 		    ("%s: %s: failed\n", device_xname(sc->sc_dev), __func__));
15581 		return rv;
15582 	}
15583 
15584 	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
15585 	 * inaccessible and resetting the PHY is not blocked, toggle the
15586 	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
15587 	 */
15588 	fwsm = CSR_READ(sc, WMREG_FWSM);
15589 	switch (sc->sc_type) {
15590 	case WM_T_PCH_LPT:
15591 	case WM_T_PCH_SPT:
15592 	case WM_T_PCH_CNP:
15593 		if (wm_phy_is_accessible_pchlan(sc))
15594 			break;
15595 
15596 		/* Before toggling LANPHYPC, see if PHY is accessible by
15597 		 * forcing MAC to SMBus mode first.
15598 		 */
15599 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
15600 		reg |= CTRL_EXT_FORCE_SMBUS;
15601 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
15602 #if 0
15603 		/* XXX Isn't this required??? */
15604 		CSR_WRITE_FLUSH(sc);
15605 #endif
15606 		/* Wait 50 milliseconds for MAC to finish any retries
15607 		 * that it might be trying to perform from previous
15608 		 * attempts to acknowledge any phy read requests.
15609 		 */
15610 		delay(50 * 1000);
15611 		/* FALLTHROUGH */
15612 	case WM_T_PCH2:
15613 		if (wm_phy_is_accessible_pchlan(sc) == true)
15614 			break;
15615 		/* FALLTHROUGH */
15616 	case WM_T_PCH:
15617 		if (sc->sc_type == WM_T_PCH)
15618 			if ((fwsm & FWSM_FW_VALID) != 0)
15619 				break;
15620 
15621 		if (wm_phy_resetisblocked(sc) == true) {
15622 			device_printf(sc->sc_dev, "XXX reset is blocked(2)\n");
15623 			break;
15624 		}
15625 
15626 		/* Toggle LANPHYPC Value bit */
15627 		wm_toggle_lanphypc_pch_lpt(sc);
15628 
15629 		if (sc->sc_type >= WM_T_PCH_LPT) {
15630 			if (wm_phy_is_accessible_pchlan(sc) == true)
15631 				break;
15632 
15633 			/* Toggling LANPHYPC brings the PHY out of SMBus mode
15634 			 * so ensure that the MAC is also out of SMBus mode
15635 			 */
15636 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
15637 			reg &= ~CTRL_EXT_FORCE_SMBUS;
15638 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
15639 
15640 			if (wm_phy_is_accessible_pchlan(sc) == true)
15641 				break;
15642 			rv = -1;
15643 		}
15644 		break;
15645 	default:
15646 		break;
15647 	}
15648 
15649 	/* Release semaphore */
15650 	sc->phy.release(sc);
15651 
15652 	if (rv == 0) {
15653 		/* Check to see if able to reset PHY.  Print error if not */
15654 		if (wm_phy_resetisblocked(sc)) {
15655 			device_printf(sc->sc_dev, "XXX reset is blocked(3)\n");
15656 			goto out;
15657 		}
15658 
15659 		/* Reset the PHY before any access to it.  Doing so, ensures
15660 		 * that the PHY is in a known good state before we read/write
15661 		 * PHY registers.  The generic reset is sufficient here,
15662 		 * because we haven't determined the PHY type yet.
15663 		 */
15664 		if (wm_reset_phy(sc) != 0)
15665 			goto out;
15666 
15667 		/* On a successful reset, possibly need to wait for the PHY
15668 		 * to quiesce to an accessible state before returning control
15669 		 * to the calling function.  If the PHY does not quiesce, then
15670 		 * return E1000E_BLK_PHY_RESET, as this is the condition that
15671 		 *  the PHY is in.
15672 		 */
15673 		if (wm_phy_resetisblocked(sc))
15674 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
15675 	}
15676 
15677 out:
15678 	/* Ungate automatic PHY configuration on non-managed 82579 */
15679 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
15680 		delay(10*1000);
15681 		wm_gate_hw_phy_config_ich8lan(sc, false);
15682 	}
15683 
15684 	return 0;
15685 }
15686 
15687 static void
15688 wm_init_manageability(struct wm_softc *sc)
15689 {
15690 
15691 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
15692 		device_xname(sc->sc_dev), __func__));
15693 	KASSERT(IFNET_LOCKED(&sc->sc_ethercom.ec_if));
15694 
15695 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
15696 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
15697 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
15698 
15699 		/* Disable hardware interception of ARP */
15700 		manc &= ~MANC_ARP_EN;
15701 
15702 		/* Enable receiving management packets to the host */
15703 		if (sc->sc_type >= WM_T_82571) {
15704 			manc |= MANC_EN_MNG2HOST;
15705 			manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
15706 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
15707 		}
15708 
15709 		CSR_WRITE(sc, WMREG_MANC, manc);
15710 	}
15711 }
15712 
15713 static void
15714 wm_release_manageability(struct wm_softc *sc)
15715 {
15716 
15717 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
15718 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
15719 
15720 		manc |= MANC_ARP_EN;
15721 		if (sc->sc_type >= WM_T_82571)
15722 			manc &= ~MANC_EN_MNG2HOST;
15723 
15724 		CSR_WRITE(sc, WMREG_MANC, manc);
15725 	}
15726 }
15727 
15728 static void
15729 wm_get_wakeup(struct wm_softc *sc)
15730 {
15731 
15732 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
15733 	switch (sc->sc_type) {
15734 	case WM_T_82573:
15735 	case WM_T_82583:
15736 		sc->sc_flags |= WM_F_HAS_AMT;
15737 		/* FALLTHROUGH */
15738 	case WM_T_80003:
15739 	case WM_T_82575:
15740 	case WM_T_82576:
15741 	case WM_T_82580:
15742 	case WM_T_I350:
15743 	case WM_T_I354:
15744 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
15745 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
15746 		/* FALLTHROUGH */
15747 	case WM_T_82541:
15748 	case WM_T_82541_2:
15749 	case WM_T_82547:
15750 	case WM_T_82547_2:
15751 	case WM_T_82571:
15752 	case WM_T_82572:
15753 	case WM_T_82574:
15754 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
15755 		break;
15756 	case WM_T_ICH8:
15757 	case WM_T_ICH9:
15758 	case WM_T_ICH10:
15759 	case WM_T_PCH:
15760 	case WM_T_PCH2:
15761 	case WM_T_PCH_LPT:
15762 	case WM_T_PCH_SPT:
15763 	case WM_T_PCH_CNP:
15764 		sc->sc_flags |= WM_F_HAS_AMT;
15765 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
15766 		break;
15767 	default:
15768 		break;
15769 	}
15770 
15771 	/* 1: HAS_MANAGE */
15772 	if (wm_enable_mng_pass_thru(sc) != 0)
15773 		sc->sc_flags |= WM_F_HAS_MANAGE;
15774 
15775 	/*
15776 	 * Note that the WOL flags is set after the resetting of the eeprom
15777 	 * stuff
15778 	 */
15779 }
15780 
15781 /*
15782  * Unconfigure Ultra Low Power mode.
15783  * Only for I217 and newer (see below).
15784  */
15785 static int
15786 wm_ulp_disable(struct wm_softc *sc)
15787 {
15788 	uint32_t reg;
15789 	uint16_t phyreg;
15790 	int i = 0, rv;
15791 
15792 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
15793 		device_xname(sc->sc_dev), __func__));
15794 	/* Exclude old devices */
15795 	if ((sc->sc_type < WM_T_PCH_LPT)
15796 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
15797 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
15798 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
15799 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
15800 		return 0;
15801 
15802 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
15803 		/* Request ME un-configure ULP mode in the PHY */
15804 		reg = CSR_READ(sc, WMREG_H2ME);
15805 		reg &= ~H2ME_ULP;
15806 		reg |= H2ME_ENFORCE_SETTINGS;
15807 		CSR_WRITE(sc, WMREG_H2ME, reg);
15808 
15809 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
15810 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
15811 			if (i++ == 30) {
15812 				device_printf(sc->sc_dev, "%s timed out\n",
15813 				    __func__);
15814 				return -1;
15815 			}
15816 			delay(10 * 1000);
15817 		}
15818 		reg = CSR_READ(sc, WMREG_H2ME);
15819 		reg &= ~H2ME_ENFORCE_SETTINGS;
15820 		CSR_WRITE(sc, WMREG_H2ME, reg);
15821 
15822 		return 0;
15823 	}
15824 
15825 	/* Acquire semaphore */
15826 	rv = sc->phy.acquire(sc);
15827 	if (rv != 0) {
15828 		DPRINTF(sc, WM_DEBUG_INIT,
15829 		    ("%s: %s: failed\n", device_xname(sc->sc_dev), __func__));
15830 		return rv;
15831 	}
15832 
15833 	/* Toggle LANPHYPC */
15834 	wm_toggle_lanphypc_pch_lpt(sc);
15835 
15836 	/* Unforce SMBus mode in PHY */
15837 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg);
15838 	if (rv != 0) {
15839 		uint32_t reg2;
15840 
15841 		aprint_debug_dev(sc->sc_dev, "%s: Force SMBus first.\n",
15842 		    __func__);
15843 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
15844 		reg2 |= CTRL_EXT_FORCE_SMBUS;
15845 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
15846 		delay(50 * 1000);
15847 
15848 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL,
15849 		    &phyreg);
15850 		if (rv != 0)
15851 			goto release;
15852 	}
15853 	phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
15854 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg);
15855 
15856 	/* Unforce SMBus mode in MAC */
15857 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
15858 	reg &= ~CTRL_EXT_FORCE_SMBUS;
15859 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
15860 
15861 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg);
15862 	if (rv != 0)
15863 		goto release;
15864 	phyreg |= HV_PM_CTRL_K1_ENA;
15865 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg);
15866 
15867 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1,
15868 	    &phyreg);
15869 	if (rv != 0)
15870 		goto release;
15871 	phyreg &= ~(I218_ULP_CONFIG1_IND
15872 	    | I218_ULP_CONFIG1_STICKY_ULP
15873 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
15874 	    | I218_ULP_CONFIG1_WOL_HOST
15875 	    | I218_ULP_CONFIG1_INBAND_EXIT
15876 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
15877 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
15878 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
15879 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
15880 	phyreg |= I218_ULP_CONFIG1_START;
15881 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
15882 
15883 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
15884 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
15885 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
15886 
15887 release:
15888 	/* Release semaphore */
15889 	sc->phy.release(sc);
15890 	wm_gmii_reset(sc);
15891 	delay(50 * 1000);
15892 
15893 	return rv;
15894 }
15895 
15896 /* WOL in the newer chipset interfaces (pchlan) */
15897 static int
15898 wm_enable_phy_wakeup(struct wm_softc *sc)
15899 {
15900 	device_t dev = sc->sc_dev;
15901 	uint32_t mreg, moff;
15902 	uint16_t wuce, wuc, wufc, preg;
15903 	int i, rv;
15904 
15905 	KASSERT(sc->sc_type >= WM_T_PCH);
15906 
15907 	/* Copy MAC RARs to PHY RARs */
15908 	wm_copy_rx_addrs_to_phy_ich8lan(sc);
15909 
15910 	/* Activate PHY wakeup */
15911 	rv = sc->phy.acquire(sc);
15912 	if (rv != 0) {
15913 		device_printf(dev, "%s: failed to acquire semaphore\n",
15914 		    __func__);
15915 		return rv;
15916 	}
15917 
15918 	/*
15919 	 * Enable access to PHY wakeup registers.
15920 	 * BM_MTA, BM_RCTL, BM_WUFC and BM_WUC are in BM_WUC_PAGE.
15921 	 */
15922 	rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
15923 	if (rv != 0) {
15924 		device_printf(dev,
15925 		    "%s: Could not enable PHY wakeup reg access\n", __func__);
15926 		goto release;
15927 	}
15928 
15929 	/* Copy MAC MTA to PHY MTA */
15930 	for (i = 0; i < WM_ICH8_MC_TABSIZE; i++) {
15931 		uint16_t lo, hi;
15932 
15933 		mreg = CSR_READ(sc, WMREG_CORDOVA_MTA + (i * 4));
15934 		lo = (uint16_t)(mreg & 0xffff);
15935 		hi = (uint16_t)((mreg >> 16) & 0xffff);
15936 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i), &lo, 0, true);
15937 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i) + 1, &hi, 0, true);
15938 	}
15939 
15940 	/* Configure PHY Rx Control register */
15941 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 1, true);
15942 	mreg = CSR_READ(sc, WMREG_RCTL);
15943 	if (mreg & RCTL_UPE)
15944 		preg |= BM_RCTL_UPE;
15945 	if (mreg & RCTL_MPE)
15946 		preg |= BM_RCTL_MPE;
15947 	preg &= ~(BM_RCTL_MO_MASK);
15948 	moff = __SHIFTOUT(mreg, RCTL_MO);
15949 	if (moff != 0)
15950 		preg |= moff << BM_RCTL_MO_SHIFT;
15951 	if (mreg & RCTL_BAM)
15952 		preg |= BM_RCTL_BAM;
15953 	if (mreg & RCTL_PMCF)
15954 		preg |= BM_RCTL_PMCF;
15955 	mreg = CSR_READ(sc, WMREG_CTRL);
15956 	if (mreg & CTRL_RFCE)
15957 		preg |= BM_RCTL_RFCE;
15958 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 0, true);
15959 
15960 	wuc = WUC_APME | WUC_PME_EN;
15961 	wufc = WUFC_MAG;
15962 	/* Enable PHY wakeup in MAC register */
15963 	CSR_WRITE(sc, WMREG_WUC,
15964 	    WUC_PHY_WAKE | WUC_PME_STATUS | WUC_APMPME | wuc);
15965 	CSR_WRITE(sc, WMREG_WUFC, wufc);
15966 
15967 	/* Configure and enable PHY wakeup in PHY registers */
15968 	wm_access_phy_wakeup_reg_bm(dev, BM_WUC, &wuc, 0, true);
15969 	wm_access_phy_wakeup_reg_bm(dev, BM_WUFC, &wufc, 0, true);
15970 
15971 	wuce |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
15972 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
15973 
15974 release:
15975 	sc->phy.release(sc);
15976 
15977 	return 0;
15978 }
15979 
15980 /* Power down workaround on D3 */
15981 static void
15982 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
15983 {
15984 	uint32_t reg;
15985 	uint16_t phyreg;
15986 	int i;
15987 
15988 	for (i = 0; i < 2; i++) {
15989 		/* Disable link */
15990 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
15991 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
15992 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
15993 
15994 		/*
15995 		 * Call gig speed drop workaround on Gig disable before
15996 		 * accessing any PHY registers
15997 		 */
15998 		if (sc->sc_type == WM_T_ICH8)
15999 			wm_gig_downshift_workaround_ich8lan(sc);
16000 
16001 		/* Write VR power-down enable */
16002 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
16003 		phyreg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
16004 		phyreg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
16005 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, phyreg);
16006 
16007 		/* Read it back and test */
16008 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
16009 		phyreg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
16010 		if ((phyreg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
16011 			break;
16012 
16013 		/* Issue PHY reset and repeat at most one more time */
16014 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
16015 	}
16016 }
16017 
16018 /*
16019  *  wm_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
16020  *  @sc: pointer to the HW structure
16021  *
16022  *  During S0 to Sx transition, it is possible the link remains at gig
16023  *  instead of negotiating to a lower speed.  Before going to Sx, set
16024  *  'Gig Disable' to force link speed negotiation to a lower speed based on
16025  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
16026  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
16027  *  needs to be written.
16028  *  Parts that support (and are linked to a partner which support) EEE in
16029  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
16030  *  than 10Mbps w/o EEE.
16031  */
16032 static void
16033 wm_suspend_workarounds_ich8lan(struct wm_softc *sc)
16034 {
16035 	device_t dev = sc->sc_dev;
16036 	struct ethercom *ec = &sc->sc_ethercom;
16037 	uint32_t phy_ctrl;
16038 	int rv;
16039 
16040 	phy_ctrl = CSR_READ(sc, WMREG_PHY_CTRL);
16041 	phy_ctrl |= PHY_CTRL_GBE_DIS;
16042 
16043 	KASSERT((sc->sc_type >= WM_T_ICH8) && (sc->sc_type <= WM_T_PCH_CNP));
16044 
16045 	if (sc->sc_phytype == WMPHY_I217) {
16046 		uint16_t devid = sc->sc_pcidevid;
16047 
16048 		if ((devid == PCI_PRODUCT_INTEL_I218_LM) ||
16049 		    (devid == PCI_PRODUCT_INTEL_I218_V) ||
16050 		    (devid == PCI_PRODUCT_INTEL_I218_LM3) ||
16051 		    (devid == PCI_PRODUCT_INTEL_I218_V3) ||
16052 		    (sc->sc_type >= WM_T_PCH_SPT))
16053 			CSR_WRITE(sc, WMREG_FEXTNVM6,
16054 			    CSR_READ(sc, WMREG_FEXTNVM6)
16055 			    & ~FEXTNVM6_REQ_PLL_CLK);
16056 
16057 		if (sc->phy.acquire(sc) != 0)
16058 			goto out;
16059 
16060 		if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
16061 			uint16_t eee_advert;
16062 
16063 			rv = wm_read_emi_reg_locked(dev,
16064 			    I217_EEE_ADVERTISEMENT, &eee_advert);
16065 			if (rv)
16066 				goto release;
16067 
16068 			/*
16069 			 * Disable LPLU if both link partners support 100BaseT
16070 			 * EEE and 100Full is advertised on both ends of the
16071 			 * link, and enable Auto Enable LPI since there will
16072 			 * be no driver to enable LPI while in Sx.
16073 			 */
16074 			if ((eee_advert & AN_EEEADVERT_100_TX) &&
16075 			    (sc->eee_lp_ability & AN_EEEADVERT_100_TX)) {
16076 				uint16_t anar, phy_reg;
16077 
16078 				sc->phy.readreg_locked(dev, 2, MII_ANAR,
16079 				    &anar);
16080 				if (anar & ANAR_TX_FD) {
16081 					phy_ctrl &= ~(PHY_CTRL_D0A_LPLU |
16082 					    PHY_CTRL_NOND0A_LPLU);
16083 
16084 					/* Set Auto Enable LPI after link up */
16085 					sc->phy.readreg_locked(dev, 2,
16086 					    I217_LPI_GPIO_CTRL, &phy_reg);
16087 					phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
16088 					sc->phy.writereg_locked(dev, 2,
16089 					    I217_LPI_GPIO_CTRL, phy_reg);
16090 				}
16091 			}
16092 		}
16093 
16094 		/*
16095 		 * For i217 Intel Rapid Start Technology support,
16096 		 * when the system is going into Sx and no manageability engine
16097 		 * is present, the driver must configure proxy to reset only on
16098 		 * power good.	LPI (Low Power Idle) state must also reset only
16099 		 * on power good, as well as the MTA (Multicast table array).
16100 		 * The SMBus release must also be disabled on LCD reset.
16101 		 */
16102 
16103 		/*
16104 		 * Enable MTA to reset for Intel Rapid Start Technology
16105 		 * Support
16106 		 */
16107 
16108 release:
16109 		sc->phy.release(sc);
16110 	}
16111 out:
16112 	CSR_WRITE(sc, WMREG_PHY_CTRL, phy_ctrl);
16113 
16114 	if (sc->sc_type == WM_T_ICH8)
16115 		wm_gig_downshift_workaround_ich8lan(sc);
16116 
16117 	if (sc->sc_type >= WM_T_PCH) {
16118 		wm_oem_bits_config_ich8lan(sc, false);
16119 
16120 		/* Reset PHY to activate OEM bits on 82577/8 */
16121 		if (sc->sc_type == WM_T_PCH)
16122 			wm_reset_phy(sc);
16123 
16124 		if (sc->phy.acquire(sc) != 0)
16125 			return;
16126 		wm_write_smbus_addr(sc);
16127 		sc->phy.release(sc);
16128 	}
16129 }
16130 
16131 /*
16132  *  wm_resume_workarounds_pchlan - workarounds needed during Sx->S0
16133  *  @sc: pointer to the HW structure
16134  *
16135  *  During Sx to S0 transitions on non-managed devices or managed devices
16136  *  on which PHY resets are not blocked, if the PHY registers cannot be
16137  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
16138  *  the PHY.
16139  *  On i217, setup Intel Rapid Start Technology.
16140  */
16141 static int
16142 wm_resume_workarounds_pchlan(struct wm_softc *sc)
16143 {
16144 	device_t dev = sc->sc_dev;
16145 	int rv;
16146 
16147 	if (sc->sc_type < WM_T_PCH2)
16148 		return 0;
16149 
16150 	rv = wm_init_phy_workarounds_pchlan(sc);
16151 	if (rv != 0)
16152 		return rv;
16153 
16154 	/* For i217 Intel Rapid Start Technology support when the system
16155 	 * is transitioning from Sx and no manageability engine is present
16156 	 * configure SMBus to restore on reset, disable proxy, and enable
16157 	 * the reset on MTA (Multicast table array).
16158 	 */
16159 	if (sc->sc_phytype == WMPHY_I217) {
16160 		uint16_t phy_reg;
16161 
16162 		rv = sc->phy.acquire(sc);
16163 		if (rv != 0)
16164 			return rv;
16165 
16166 		/* Clear Auto Enable LPI after link up */
16167 		sc->phy.readreg_locked(dev, 1, I217_LPI_GPIO_CTRL, &phy_reg);
16168 		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
16169 		sc->phy.writereg_locked(dev, 1, I217_LPI_GPIO_CTRL, phy_reg);
16170 
16171 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
16172 			/* Restore clear on SMB if no manageability engine
16173 			 * is present
16174 			 */
16175 			rv = sc->phy.readreg_locked(dev, 1, I217_MEMPWR,
16176 			    &phy_reg);
16177 			if (rv != 0)
16178 				goto release;
16179 			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
16180 			sc->phy.writereg_locked(dev, 1, I217_MEMPWR, phy_reg);
16181 
16182 			/* Disable Proxy */
16183 			sc->phy.writereg_locked(dev, 1, I217_PROXY_CTRL, 0);
16184 		}
16185 		/* Enable reset on MTA */
16186 		sc->phy.readreg_locked(dev, 1, I217_CFGREG, &phy_reg);
16187 		if (rv != 0)
16188 			goto release;
16189 		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
16190 		sc->phy.writereg_locked(dev, 1, I217_CFGREG, phy_reg);
16191 
16192 release:
16193 		sc->phy.release(sc);
16194 		return rv;
16195 	}
16196 
16197 	return 0;
16198 }
16199 
16200 static void
16201 wm_enable_wakeup(struct wm_softc *sc)
16202 {
16203 	uint32_t reg, pmreg;
16204 	pcireg_t pmode;
16205 	int rv = 0;
16206 
16207 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16208 		device_xname(sc->sc_dev), __func__));
16209 
16210 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
16211 	    &pmreg, NULL) == 0)
16212 		return;
16213 
16214 	if ((sc->sc_flags & WM_F_WOL) == 0)
16215 		goto pme;
16216 
16217 	/* Advertise the wakeup capability */
16218 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
16219 	    | CTRL_SWDPIN(3));
16220 
16221 	/* Keep the laser running on fiber adapters */
16222 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
16223 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
16224 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
16225 		reg |= CTRL_EXT_SWDPIN(3);
16226 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
16227 	}
16228 
16229 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) ||
16230 	    (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) ||
16231 	    (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
16232 	    (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
16233 		wm_suspend_workarounds_ich8lan(sc);
16234 
16235 #if 0	/* For the multicast packet */
16236 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
16237 	reg |= WUFC_MC;
16238 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
16239 #endif
16240 
16241 	if (sc->sc_type >= WM_T_PCH) {
16242 		rv = wm_enable_phy_wakeup(sc);
16243 		if (rv != 0)
16244 			goto pme;
16245 	} else {
16246 		/* Enable wakeup by the MAC */
16247 		CSR_WRITE(sc, WMREG_WUC, WUC_APME | WUC_PME_EN);
16248 		CSR_WRITE(sc, WMREG_WUFC, WUFC_MAG);
16249 	}
16250 
16251 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
16252 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
16253 		|| (sc->sc_type == WM_T_PCH2))
16254 	    && (sc->sc_phytype == WMPHY_IGP_3))
16255 		wm_igp3_phy_powerdown_workaround_ich8lan(sc);
16256 
16257 pme:
16258 	/* Request PME */
16259 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
16260 	pmode |= PCI_PMCSR_PME_STS; /* in case it's already set (W1C) */
16261 	if ((rv == 0) && (sc->sc_flags & WM_F_WOL) != 0) {
16262 		/* For WOL */
16263 		pmode |= PCI_PMCSR_PME_EN;
16264 	} else {
16265 		/* Disable WOL */
16266 		pmode &= ~PCI_PMCSR_PME_EN;
16267 	}
16268 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
16269 }
16270 
16271 /* Disable ASPM L0s and/or L1 for workaround */
16272 static void
16273 wm_disable_aspm(struct wm_softc *sc)
16274 {
16275 	pcireg_t reg, mask = 0;
16276 	unsigned const char *str = "";
16277 
16278 	/*
16279 	 *  Only for PCIe device which has PCIe capability in the PCI config
16280 	 * space.
16281 	 */
16282 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
16283 		return;
16284 
16285 	switch (sc->sc_type) {
16286 	case WM_T_82571:
16287 	case WM_T_82572:
16288 		/*
16289 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
16290 		 * State Power management L1 State (ASPM L1).
16291 		 */
16292 		mask = PCIE_LCSR_ASPM_L1;
16293 		str = "L1 is";
16294 		break;
16295 	case WM_T_82573:
16296 	case WM_T_82574:
16297 	case WM_T_82583:
16298 		/*
16299 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
16300 		 *
16301 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
16302 		 * some chipset.  The document of 82574 and 82583 says that
16303 		 * disabling L0s with some specific chipset is sufficient,
16304 		 * but we follow as of the Intel em driver does.
16305 		 *
16306 		 * References:
16307 		 * Errata 8 of the Specification Update of i82573.
16308 		 * Errata 20 of the Specification Update of i82574.
16309 		 * Errata 9 of the Specification Update of i82583.
16310 		 */
16311 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
16312 		str = "L0s and L1 are";
16313 		break;
16314 	default:
16315 		return;
16316 	}
16317 
16318 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
16319 	    sc->sc_pcixe_capoff + PCIE_LCSR);
16320 	reg &= ~mask;
16321 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
16322 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
16323 
16324 	/* Print only in wm_attach() */
16325 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
16326 		aprint_verbose_dev(sc->sc_dev,
16327 		    "ASPM %s disabled to workaround the errata.\n", str);
16328 }
16329 
16330 /* LPLU */
16331 
16332 static void
16333 wm_lplu_d0_disable(struct wm_softc *sc)
16334 {
16335 	struct mii_data *mii = &sc->sc_mii;
16336 	uint32_t reg;
16337 	uint16_t phyval;
16338 
16339 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16340 		device_xname(sc->sc_dev), __func__));
16341 
16342 	if (sc->sc_phytype == WMPHY_IFE)
16343 		return;
16344 
16345 	switch (sc->sc_type) {
16346 	case WM_T_82571:
16347 	case WM_T_82572:
16348 	case WM_T_82573:
16349 	case WM_T_82575:
16350 	case WM_T_82576:
16351 		mii->mii_readreg(sc->sc_dev, 1, IGPHY_POWER_MGMT, &phyval);
16352 		phyval &= ~PMR_D0_LPLU;
16353 		mii->mii_writereg(sc->sc_dev, 1, IGPHY_POWER_MGMT, phyval);
16354 		break;
16355 	case WM_T_82580:
16356 	case WM_T_I350:
16357 	case WM_T_I210:
16358 	case WM_T_I211:
16359 		reg = CSR_READ(sc, WMREG_PHPM);
16360 		reg &= ~PHPM_D0A_LPLU;
16361 		CSR_WRITE(sc, WMREG_PHPM, reg);
16362 		break;
16363 	case WM_T_82574:
16364 	case WM_T_82583:
16365 	case WM_T_ICH8:
16366 	case WM_T_ICH9:
16367 	case WM_T_ICH10:
16368 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
16369 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
16370 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
16371 		CSR_WRITE_FLUSH(sc);
16372 		break;
16373 	case WM_T_PCH:
16374 	case WM_T_PCH2:
16375 	case WM_T_PCH_LPT:
16376 	case WM_T_PCH_SPT:
16377 	case WM_T_PCH_CNP:
16378 		wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS, &phyval);
16379 		phyval &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
16380 		if (wm_phy_resetisblocked(sc) == false)
16381 			phyval |= HV_OEM_BITS_ANEGNOW;
16382 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, phyval);
16383 		break;
16384 	default:
16385 		break;
16386 	}
16387 }
16388 
16389 /* EEE */
16390 
16391 static int
16392 wm_set_eee_i350(struct wm_softc *sc)
16393 {
16394 	struct ethercom *ec = &sc->sc_ethercom;
16395 	uint32_t ipcnfg, eeer;
16396 	uint32_t ipcnfg_mask
16397 	    = IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN | IPCNFG_10BASE_TE;
16398 	uint32_t eeer_mask = EEER_TX_LPI_EN | EEER_RX_LPI_EN | EEER_LPI_FC;
16399 
16400 	KASSERT(sc->sc_mediatype == WM_MEDIATYPE_COPPER);
16401 
16402 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
16403 	eeer = CSR_READ(sc, WMREG_EEER);
16404 
16405 	/* Enable or disable per user setting */
16406 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
16407 		ipcnfg |= ipcnfg_mask;
16408 		eeer |= eeer_mask;
16409 	} else {
16410 		ipcnfg &= ~ipcnfg_mask;
16411 		eeer &= ~eeer_mask;
16412 	}
16413 
16414 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
16415 	CSR_WRITE(sc, WMREG_EEER, eeer);
16416 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
16417 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
16418 
16419 	return 0;
16420 }
16421 
16422 static int
16423 wm_set_eee_pchlan(struct wm_softc *sc)
16424 {
16425 	device_t dev = sc->sc_dev;
16426 	struct ethercom *ec = &sc->sc_ethercom;
16427 	uint16_t lpa, pcs_status, adv_addr, adv, lpi_ctrl, data;
16428 	int rv;
16429 
16430 	switch (sc->sc_phytype) {
16431 	case WMPHY_82579:
16432 		lpa = I82579_EEE_LP_ABILITY;
16433 		pcs_status = I82579_EEE_PCS_STATUS;
16434 		adv_addr = I82579_EEE_ADVERTISEMENT;
16435 		break;
16436 	case WMPHY_I217:
16437 		lpa = I217_EEE_LP_ABILITY;
16438 		pcs_status = I217_EEE_PCS_STATUS;
16439 		adv_addr = I217_EEE_ADVERTISEMENT;
16440 		break;
16441 	default:
16442 		return 0;
16443 	}
16444 
16445 	rv = sc->phy.acquire(sc);
16446 	if (rv != 0) {
16447 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
16448 		return rv;
16449 	}
16450 
16451 	rv = sc->phy.readreg_locked(dev, 1, I82579_LPI_CTRL, &lpi_ctrl);
16452 	if (rv != 0)
16453 		goto release;
16454 
16455 	/* Clear bits that enable EEE in various speeds */
16456 	lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE;
16457 
16458 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
16459 		/* Save off link partner's EEE ability */
16460 		rv = wm_read_emi_reg_locked(dev, lpa, &sc->eee_lp_ability);
16461 		if (rv != 0)
16462 			goto release;
16463 
16464 		/* Read EEE advertisement */
16465 		if ((rv = wm_read_emi_reg_locked(dev, adv_addr, &adv)) != 0)
16466 			goto release;
16467 
16468 		/*
16469 		 * Enable EEE only for speeds in which the link partner is
16470 		 * EEE capable and for which we advertise EEE.
16471 		 */
16472 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_1000_T)
16473 			lpi_ctrl |= I82579_LPI_CTRL_EN_1000;
16474 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_100_TX) {
16475 			sc->phy.readreg_locked(dev, 2, MII_ANLPAR, &data);
16476 			if ((data & ANLPAR_TX_FD) != 0)
16477 				lpi_ctrl |= I82579_LPI_CTRL_EN_100;
16478 			else {
16479 				/*
16480 				 * EEE is not supported in 100Half, so ignore
16481 				 * partner's EEE in 100 ability if full-duplex
16482 				 * is not advertised.
16483 				 */
16484 				sc->eee_lp_ability
16485 				    &= ~AN_EEEADVERT_100_TX;
16486 			}
16487 		}
16488 	}
16489 
16490 	if (sc->sc_phytype == WMPHY_82579) {
16491 		rv = wm_read_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, &data);
16492 		if (rv != 0)
16493 			goto release;
16494 
16495 		data &= ~I82579_LPI_PLL_SHUT_100;
16496 		rv = wm_write_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, data);
16497 	}
16498 
16499 	/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
16500 	if ((rv = wm_read_emi_reg_locked(dev, pcs_status, &data)) != 0)
16501 		goto release;
16502 
16503 	rv = sc->phy.writereg_locked(dev, 1, I82579_LPI_CTRL, lpi_ctrl);
16504 release:
16505 	sc->phy.release(sc);
16506 
16507 	return rv;
16508 }
16509 
16510 static int
16511 wm_set_eee(struct wm_softc *sc)
16512 {
16513 	struct ethercom *ec = &sc->sc_ethercom;
16514 
16515 	if ((ec->ec_capabilities & ETHERCAP_EEE) == 0)
16516 		return 0;
16517 
16518 	if (sc->sc_type == WM_T_I354) {
16519 		/* I354 uses an external PHY */
16520 		return 0; /* not yet */
16521 	} else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
16522 		return wm_set_eee_i350(sc);
16523 	else if (sc->sc_type >= WM_T_PCH2)
16524 		return wm_set_eee_pchlan(sc);
16525 
16526 	return 0;
16527 }
16528 
16529 /*
16530  * Workarounds (mainly PHY related).
16531  * Basically, PHY's workarounds are in the PHY drivers.
16532  */
16533 
16534 /* Workaround for 82566 Kumeran PCS lock loss */
16535 static int
16536 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
16537 {
16538 	struct mii_data *mii = &sc->sc_mii;
16539 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
16540 	int i, reg, rv;
16541 	uint16_t phyreg;
16542 
16543 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16544 		device_xname(sc->sc_dev), __func__));
16545 
16546 	/* If the link is not up, do nothing */
16547 	if ((status & STATUS_LU) == 0)
16548 		return 0;
16549 
16550 	/* Nothing to do if the link is other than 1Gbps */
16551 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
16552 		return 0;
16553 
16554 	for (i = 0; i < 10; i++) {
16555 		/* read twice */
16556 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
16557 		if (rv != 0)
16558 			return rv;
16559 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
16560 		if (rv != 0)
16561 			return rv;
16562 
16563 		if ((phyreg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
16564 			goto out;	/* GOOD! */
16565 
16566 		/* Reset the PHY */
16567 		wm_reset_phy(sc);
16568 		delay(5*1000);
16569 	}
16570 
16571 	/* Disable GigE link negotiation */
16572 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
16573 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
16574 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
16575 
16576 	/*
16577 	 * Call gig speed drop workaround on Gig disable before accessing
16578 	 * any PHY registers.
16579 	 */
16580 	wm_gig_downshift_workaround_ich8lan(sc);
16581 
16582 out:
16583 	return 0;
16584 }
16585 
16586 /*
16587  *  wm_gig_downshift_workaround_ich8lan - WoL from S5 stops working
16588  *  @sc: pointer to the HW structure
16589  *
16590  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
16591  *  LPLU, Gig disable, MDIC PHY reset):
16592  *    1) Set Kumeran Near-end loopback
16593  *    2) Clear Kumeran Near-end loopback
16594  *  Should only be called for ICH8[m] devices with any 1G Phy.
16595  */
16596 static void
16597 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
16598 {
16599 	uint16_t kmreg;
16600 
16601 	/* Only for igp3 */
16602 	if (sc->sc_phytype == WMPHY_IGP_3) {
16603 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
16604 			return;
16605 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
16606 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
16607 			return;
16608 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
16609 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
16610 	}
16611 }
16612 
16613 /*
16614  * Workaround for pch's PHYs
16615  * XXX should be moved to new PHY driver?
16616  */
16617 static int
16618 wm_hv_phy_workarounds_ich8lan(struct wm_softc *sc)
16619 {
16620 	device_t dev = sc->sc_dev;
16621 	struct mii_data *mii = &sc->sc_mii;
16622 	struct mii_softc *child;
16623 	uint16_t phy_data, phyrev = 0;
16624 	int phytype = sc->sc_phytype;
16625 	int rv;
16626 
16627 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16628 		device_xname(dev), __func__));
16629 	KASSERT(sc->sc_type == WM_T_PCH);
16630 
16631 	/* Set MDIO slow mode before any other MDIO access */
16632 	if (phytype == WMPHY_82577)
16633 		if ((rv = wm_set_mdio_slow_mode_hv(sc)) != 0)
16634 			return rv;
16635 
16636 	child = LIST_FIRST(&mii->mii_phys);
16637 	if (child != NULL)
16638 		phyrev = child->mii_mpd_rev;
16639 
16640 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
16641 	if ((child != NULL) &&
16642 	    (((phytype == WMPHY_82577) && ((phyrev == 1) || (phyrev == 2))) ||
16643 		((phytype == WMPHY_82578) && (phyrev == 1)))) {
16644 		/* Disable generation of early preamble (0x4431) */
16645 		rv = mii->mii_readreg(dev, 2, BM_RATE_ADAPTATION_CTRL,
16646 		    &phy_data);
16647 		if (rv != 0)
16648 			return rv;
16649 		phy_data &= ~(BM_RATE_ADAPTATION_CTRL_RX_RXDV_PRE |
16650 		    BM_RATE_ADAPTATION_CTRL_RX_CRS_PRE);
16651 		rv = mii->mii_writereg(dev, 2, BM_RATE_ADAPTATION_CTRL,
16652 		    phy_data);
16653 		if (rv != 0)
16654 			return rv;
16655 
16656 		/* Preamble tuning for SSC */
16657 		rv = mii->mii_writereg(dev, 2, HV_KMRN_FIFO_CTRLSTA, 0xa204);
16658 		if (rv != 0)
16659 			return rv;
16660 	}
16661 
16662 	/* 82578 */
16663 	if (phytype == WMPHY_82578) {
16664 		/*
16665 		 * Return registers to default by doing a soft reset then
16666 		 * writing 0x3140 to the control register
16667 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
16668 		 */
16669 		if ((child != NULL) && (phyrev < 2)) {
16670 			PHY_RESET(child);
16671 			rv = mii->mii_writereg(dev, 2, MII_BMCR, 0x3140);
16672 			if (rv != 0)
16673 				return rv;
16674 		}
16675 	}
16676 
16677 	/* Select page 0 */
16678 	if ((rv = sc->phy.acquire(sc)) != 0)
16679 		return rv;
16680 	rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT, 0);
16681 	sc->phy.release(sc);
16682 	if (rv != 0)
16683 		return rv;
16684 
16685 	/*
16686 	 * Configure the K1 Si workaround during phy reset assuming there is
16687 	 * link so that it disables K1 if link is in 1Gbps.
16688 	 */
16689 	if ((rv = wm_k1_gig_workaround_hv(sc, 1)) != 0)
16690 		return rv;
16691 
16692 	/* Workaround for link disconnects on a busy hub in half duplex */
16693 	rv = sc->phy.acquire(sc);
16694 	if (rv)
16695 		return rv;
16696 	rv = sc->phy.readreg_locked(dev, 2, BM_PORT_GEN_CFG, &phy_data);
16697 	if (rv)
16698 		goto release;
16699 	rv = sc->phy.writereg_locked(dev, 2, BM_PORT_GEN_CFG,
16700 	    phy_data & 0x00ff);
16701 	if (rv)
16702 		goto release;
16703 
16704 	/* Set MSE higher to enable link to stay up when noise is high */
16705 	rv = wm_write_emi_reg_locked(dev, I82577_MSE_THRESHOLD, 0x0034);
16706 release:
16707 	sc->phy.release(sc);
16708 
16709 	return rv;
16710 }
16711 
16712 /*
16713  *  wm_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
16714  *  @sc:   pointer to the HW structure
16715  */
16716 static void
16717 wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *sc)
16718 {
16719 
16720 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16721 		device_xname(sc->sc_dev), __func__));
16722 
16723 	if (sc->phy.acquire(sc) != 0)
16724 		return;
16725 
16726 	wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
16727 
16728 	sc->phy.release(sc);
16729 }
16730 
16731 static void
16732 wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *sc)
16733 {
16734 	device_t dev = sc->sc_dev;
16735 	uint32_t mac_reg;
16736 	uint16_t i, wuce;
16737 	int count;
16738 
16739 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16740 		device_xname(dev), __func__));
16741 
16742 	if (wm_enable_phy_wakeup_reg_access_bm(dev, &wuce) != 0)
16743 		return;
16744 
16745 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
16746 	count = wm_rar_count(sc);
16747 	for (i = 0; i < count; i++) {
16748 		uint16_t lo, hi;
16749 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
16750 		lo = (uint16_t)(mac_reg & 0xffff);
16751 		hi = (uint16_t)((mac_reg >> 16) & 0xffff);
16752 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_L(i), &lo, 0, true);
16753 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_M(i), &hi, 0, true);
16754 
16755 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
16756 		lo = (uint16_t)(mac_reg & 0xffff);
16757 		hi = (uint16_t)((mac_reg & RAL_AV) >> 16);
16758 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_H(i), &lo, 0, true);
16759 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_CTRL(i), &hi, 0, true);
16760 	}
16761 
16762 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
16763 }
16764 
16765 /*
16766  *  wm_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
16767  *  with 82579 PHY
16768  *  @enable: flag to enable/disable workaround when enabling/disabling jumbos
16769  */
16770 static int
16771 wm_lv_jumbo_workaround_ich8lan(struct wm_softc *sc, bool enable)
16772 {
16773 	device_t dev = sc->sc_dev;
16774 	int rar_count;
16775 	int rv;
16776 	uint32_t mac_reg;
16777 	uint16_t dft_ctrl, data;
16778 	uint16_t i;
16779 
16780 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16781 		device_xname(dev), __func__));
16782 
16783 	if (sc->sc_type < WM_T_PCH2)
16784 		return 0;
16785 
16786 	/* Acquire PHY semaphore */
16787 	rv = sc->phy.acquire(sc);
16788 	if (rv != 0)
16789 		return rv;
16790 
16791 	/* Disable Rx path while enabling/disabling workaround */
16792 	rv = sc->phy.readreg_locked(dev, 2, I82579_DFT_CTRL, &dft_ctrl);
16793 	if (rv != 0)
16794 		goto out;
16795 	rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
16796 	    dft_ctrl | (1 << 14));
16797 	if (rv != 0)
16798 		goto out;
16799 
16800 	if (enable) {
16801 		/* Write Rx addresses (rar_entry_count for RAL/H, and
16802 		 * SHRAL/H) and initial CRC values to the MAC
16803 		 */
16804 		rar_count = wm_rar_count(sc);
16805 		for (i = 0; i < rar_count; i++) {
16806 			uint8_t mac_addr[ETHER_ADDR_LEN] = {0};
16807 			uint32_t addr_high, addr_low;
16808 
16809 			addr_high = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
16810 			if (!(addr_high & RAL_AV))
16811 				continue;
16812 			addr_low = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
16813 			mac_addr[0] = (addr_low & 0xFF);
16814 			mac_addr[1] = ((addr_low >> 8) & 0xFF);
16815 			mac_addr[2] = ((addr_low >> 16) & 0xFF);
16816 			mac_addr[3] = ((addr_low >> 24) & 0xFF);
16817 			mac_addr[4] = (addr_high & 0xFF);
16818 			mac_addr[5] = ((addr_high >> 8) & 0xFF);
16819 
16820 			CSR_WRITE(sc, WMREG_PCH_RAICC(i),
16821 			    ~ether_crc32_le(mac_addr, ETHER_ADDR_LEN));
16822 		}
16823 
16824 		/* Write Rx addresses to the PHY */
16825 		wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
16826 	}
16827 
16828 	/*
16829 	 * If enable ==
16830 	 *	true: Enable jumbo frame workaround in the MAC.
16831 	 *	false: Write MAC register values back to h/w defaults.
16832 	 */
16833 	mac_reg = CSR_READ(sc, WMREG_FFLT_DBG);
16834 	if (enable) {
16835 		mac_reg &= ~(1 << 14);
16836 		mac_reg |= (7 << 15);
16837 	} else
16838 		mac_reg &= ~(0xf << 14);
16839 	CSR_WRITE(sc, WMREG_FFLT_DBG, mac_reg);
16840 
16841 	mac_reg = CSR_READ(sc, WMREG_RCTL);
16842 	if (enable) {
16843 		mac_reg |= RCTL_SECRC;
16844 		sc->sc_rctl |= RCTL_SECRC;
16845 		sc->sc_flags |= WM_F_CRC_STRIP;
16846 	} else {
16847 		mac_reg &= ~RCTL_SECRC;
16848 		sc->sc_rctl &= ~RCTL_SECRC;
16849 		sc->sc_flags &= ~WM_F_CRC_STRIP;
16850 	}
16851 	CSR_WRITE(sc, WMREG_RCTL, mac_reg);
16852 
16853 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, &data);
16854 	if (rv != 0)
16855 		goto out;
16856 	if (enable)
16857 		data |= 1 << 0;
16858 	else
16859 		data &= ~(1 << 0);
16860 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, data);
16861 	if (rv != 0)
16862 		goto out;
16863 
16864 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, &data);
16865 	if (rv != 0)
16866 		goto out;
16867 	/*
16868 	 * XXX FreeBSD and Linux do the same thing that they set the same value
16869 	 * on both the enable case and the disable case. Is it correct?
16870 	 */
16871 	data &= ~(0xf << 8);
16872 	data |= (0xb << 8);
16873 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, data);
16874 	if (rv != 0)
16875 		goto out;
16876 
16877 	/*
16878 	 * If enable ==
16879 	 *	true: Enable jumbo frame workaround in the PHY.
16880 	 *	false: Write PHY register values back to h/w defaults.
16881 	 */
16882 	rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 23), &data);
16883 	if (rv != 0)
16884 		goto out;
16885 	data &= ~(0x7F << 5);
16886 	if (enable)
16887 		data |= (0x37 << 5);
16888 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 23), data);
16889 	if (rv != 0)
16890 		goto out;
16891 
16892 	rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 16), &data);
16893 	if (rv != 0)
16894 		goto out;
16895 	if (enable)
16896 		data &= ~(1 << 13);
16897 	else
16898 		data |= (1 << 13);
16899 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 16), data);
16900 	if (rv != 0)
16901 		goto out;
16902 
16903 	rv = sc->phy.readreg_locked(dev, 2, I82579_UNKNOWN1, &data);
16904 	if (rv != 0)
16905 		goto out;
16906 	data &= ~(0x3FF << 2);
16907 	if (enable)
16908 		data |= (I82579_TX_PTR_GAP << 2);
16909 	else
16910 		data |= (0x8 << 2);
16911 	rv = sc->phy.writereg_locked(dev, 2, I82579_UNKNOWN1, data);
16912 	if (rv != 0)
16913 		goto out;
16914 
16915 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(776, 23),
16916 	    enable ? 0xf100 : 0x7e00);
16917 	if (rv != 0)
16918 		goto out;
16919 
16920 	rv = sc->phy.readreg_locked(dev, 2, HV_PM_CTRL, &data);
16921 	if (rv != 0)
16922 		goto out;
16923 	if (enable)
16924 		data |= 1 << 10;
16925 	else
16926 		data &= ~(1 << 10);
16927 	rv = sc->phy.writereg_locked(dev, 2, HV_PM_CTRL, data);
16928 	if (rv != 0)
16929 		goto out;
16930 
16931 	/* Re-enable Rx path after enabling/disabling workaround */
16932 	rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
16933 	    dft_ctrl & ~(1 << 14));
16934 
16935 out:
16936 	sc->phy.release(sc);
16937 
16938 	return rv;
16939 }
16940 
16941 /*
16942  *  wm_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
16943  *  done after every PHY reset.
16944  */
16945 static int
16946 wm_lv_phy_workarounds_ich8lan(struct wm_softc *sc)
16947 {
16948 	device_t dev = sc->sc_dev;
16949 	int rv;
16950 
16951 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16952 		device_xname(dev), __func__));
16953 	KASSERT(sc->sc_type == WM_T_PCH2);
16954 
16955 	/* Set MDIO slow mode before any other MDIO access */
16956 	rv = wm_set_mdio_slow_mode_hv(sc);
16957 	if (rv != 0)
16958 		return rv;
16959 
16960 	rv = sc->phy.acquire(sc);
16961 	if (rv != 0)
16962 		return rv;
16963 	/* Set MSE higher to enable link to stay up when noise is high */
16964 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_THRESHOLD, 0x0034);
16965 	if (rv != 0)
16966 		goto release;
16967 	/* Drop link after 5 times MSE threshold was reached */
16968 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_LINK_DOWN, 0x0005);
16969 release:
16970 	sc->phy.release(sc);
16971 
16972 	return rv;
16973 }
16974 
16975 /**
16976  *  wm_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
16977  *  @link: link up bool flag
16978  *
16979  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
16980  *  preventing further DMA write requests.  Workaround the issue by disabling
16981  *  the de-assertion of the clock request when in 1Gpbs mode.
16982  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
16983  *  speeds in order to avoid Tx hangs.
16984  **/
16985 static int
16986 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
16987 {
16988 	uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
16989 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
16990 	uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
16991 	uint16_t phyreg;
16992 
16993 	if (link && (speed == STATUS_SPEED_1000)) {
16994 		int rv;
16995 
16996 		rv = sc->phy.acquire(sc);
16997 		if (rv != 0)
16998 			return rv;
16999 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
17000 		    &phyreg);
17001 		if (rv != 0)
17002 			goto release;
17003 		rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
17004 		    phyreg & ~KUMCTRLSTA_K1_ENABLE);
17005 		if (rv != 0)
17006 			goto release;
17007 		delay(20);
17008 		CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
17009 
17010 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
17011 		    &phyreg);
17012 release:
17013 		sc->phy.release(sc);
17014 		return rv;
17015 	}
17016 
17017 	fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
17018 
17019 	struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys);
17020 	if (((child != NULL) && (child->mii_mpd_rev > 5))
17021 	    || !link
17022 	    || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
17023 		goto update_fextnvm6;
17024 
17025 	wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL, &phyreg);
17026 
17027 	/* Clear link status transmit timeout */
17028 	phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
17029 	if (speed == STATUS_SPEED_100) {
17030 		/* Set inband Tx timeout to 5x10us for 100Half */
17031 		phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
17032 
17033 		/* Do not extend the K1 entry latency for 100Half */
17034 		fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
17035 	} else {
17036 		/* Set inband Tx timeout to 50x10us for 10Full/Half */
17037 		phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
17038 
17039 		/* Extend the K1 entry latency for 10 Mbps */
17040 		fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
17041 	}
17042 
17043 	wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
17044 
17045 update_fextnvm6:
17046 	CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
17047 	return 0;
17048 }
17049 
17050 /*
17051  *  wm_k1_gig_workaround_hv - K1 Si workaround
17052  *  @sc:   pointer to the HW structure
17053  *  @link: link up bool flag
17054  *
17055  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
17056  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
17057  *  If link is down, the function will restore the default K1 setting located
17058  *  in the NVM.
17059  */
17060 static int
17061 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
17062 {
17063 	int k1_enable = sc->sc_nvm_k1_enabled;
17064 	int rv;
17065 
17066 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
17067 		device_xname(sc->sc_dev), __func__));
17068 
17069 	rv = sc->phy.acquire(sc);
17070 	if (rv != 0)
17071 		return rv;
17072 
17073 	if (link) {
17074 		k1_enable = 0;
17075 
17076 		/* Link stall fix for link up */
17077 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
17078 		    0x0100);
17079 	} else {
17080 		/* Link stall fix for link down */
17081 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
17082 		    0x4100);
17083 	}
17084 
17085 	wm_configure_k1_ich8lan(sc, k1_enable);
17086 	sc->phy.release(sc);
17087 
17088 	return 0;
17089 }
17090 
17091 /*
17092  *  wm_k1_workaround_lv - K1 Si workaround
17093  *  @sc:   pointer to the HW structure
17094  *
17095  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
17096  *  Disable K1 for 1000 and 100 speeds
17097  */
17098 static int
17099 wm_k1_workaround_lv(struct wm_softc *sc)
17100 {
17101 	uint32_t reg;
17102 	uint16_t phyreg;
17103 	int rv;
17104 
17105 	if (sc->sc_type != WM_T_PCH2)
17106 		return 0;
17107 
17108 	/* Set K1 beacon duration based on 10Mbps speed */
17109 	rv = wm_gmii_hv_readreg(sc->sc_dev, 2, HV_M_STATUS, &phyreg);
17110 	if (rv != 0)
17111 		return rv;
17112 
17113 	if ((phyreg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
17114 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
17115 		if (phyreg &
17116 		    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
17117 			/* LV 1G/100 Packet drop issue wa  */
17118 			rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_PM_CTRL,
17119 			    &phyreg);
17120 			if (rv != 0)
17121 				return rv;
17122 			phyreg &= ~HV_PM_CTRL_K1_ENA;
17123 			rv = wm_gmii_hv_writereg(sc->sc_dev, 1, HV_PM_CTRL,
17124 			    phyreg);
17125 			if (rv != 0)
17126 				return rv;
17127 		} else {
17128 			/* For 10Mbps */
17129 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
17130 			reg &= ~FEXTNVM4_BEACON_DURATION;
17131 			reg |= FEXTNVM4_BEACON_DURATION_16US;
17132 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
17133 		}
17134 	}
17135 
17136 	return 0;
17137 }
17138 
17139 /*
17140  *  wm_link_stall_workaround_hv - Si workaround
17141  *  @sc: pointer to the HW structure
17142  *
17143  *  This function works around a Si bug where the link partner can get
17144  *  a link up indication before the PHY does. If small packets are sent
17145  *  by the link partner they can be placed in the packet buffer without
17146  *  being properly accounted for by the PHY and will stall preventing
17147  *  further packets from being received.  The workaround is to clear the
17148  *  packet buffer after the PHY detects link up.
17149  */
17150 static int
17151 wm_link_stall_workaround_hv(struct wm_softc *sc)
17152 {
17153 	uint16_t phyreg;
17154 
17155 	if (sc->sc_phytype != WMPHY_82578)
17156 		return 0;
17157 
17158 	/* Do not apply workaround if in PHY loopback bit 14 set */
17159 	wm_gmii_hv_readreg(sc->sc_dev, 2, MII_BMCR, &phyreg);
17160 	if ((phyreg & BMCR_LOOP) != 0)
17161 		return 0;
17162 
17163 	/* Check if link is up and at 1Gbps */
17164 	wm_gmii_hv_readreg(sc->sc_dev, 2, BM_CS_STATUS, &phyreg);
17165 	phyreg &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
17166 	    | BM_CS_STATUS_SPEED_MASK;
17167 	if (phyreg != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
17168 		| BM_CS_STATUS_SPEED_1000))
17169 		return 0;
17170 
17171 	delay(200 * 1000);	/* XXX too big */
17172 
17173 	/* Flush the packets in the fifo buffer */
17174 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
17175 	    HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED);
17176 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
17177 	    HV_MUX_DATA_CTRL_GEN_TO_MAC);
17178 
17179 	return 0;
17180 }
17181 
17182 static int
17183 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
17184 {
17185 	int rv;
17186 
17187 	rv = sc->phy.acquire(sc);
17188 	if (rv != 0) {
17189 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
17190 		    __func__);
17191 		return rv;
17192 	}
17193 
17194 	rv = wm_set_mdio_slow_mode_hv_locked(sc);
17195 
17196 	sc->phy.release(sc);
17197 
17198 	return rv;
17199 }
17200 
17201 static int
17202 wm_set_mdio_slow_mode_hv_locked(struct wm_softc *sc)
17203 {
17204 	int rv;
17205 	uint16_t reg;
17206 
17207 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_KMRN_MODE_CTRL, &reg);
17208 	if (rv != 0)
17209 		return rv;
17210 
17211 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
17212 	    reg | HV_KMRN_MDIO_SLOW);
17213 }
17214 
17215 /*
17216  *  wm_configure_k1_ich8lan - Configure K1 power state
17217  *  @sc: pointer to the HW structure
17218  *  @enable: K1 state to configure
17219  *
17220  *  Configure the K1 power state based on the provided parameter.
17221  *  Assumes semaphore already acquired.
17222  */
17223 static void
17224 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
17225 {
17226 	uint32_t ctrl, ctrl_ext, tmp;
17227 	uint16_t kmreg;
17228 	int rv;
17229 
17230 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
17231 
17232 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
17233 	if (rv != 0)
17234 		return;
17235 
17236 	if (k1_enable)
17237 		kmreg |= KUMCTRLSTA_K1_ENABLE;
17238 	else
17239 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
17240 
17241 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
17242 	if (rv != 0)
17243 		return;
17244 
17245 	delay(20);
17246 
17247 	ctrl = CSR_READ(sc, WMREG_CTRL);
17248 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
17249 
17250 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
17251 	tmp |= CTRL_FRCSPD;
17252 
17253 	CSR_WRITE(sc, WMREG_CTRL, tmp);
17254 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
17255 	CSR_WRITE_FLUSH(sc);
17256 	delay(20);
17257 
17258 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
17259 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
17260 	CSR_WRITE_FLUSH(sc);
17261 	delay(20);
17262 
17263 	return;
17264 }
17265 
17266 /* special case - for 82575 - need to do manual init ... */
17267 static void
17268 wm_reset_init_script_82575(struct wm_softc *sc)
17269 {
17270 	/*
17271 	 * Remark: this is untested code - we have no board without EEPROM
17272 	 *  same setup as mentioned int the FreeBSD driver for the i82575
17273 	 */
17274 
17275 	/* SerDes configuration via SERDESCTRL */
17276 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
17277 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
17278 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
17279 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
17280 
17281 	/* CCM configuration via CCMCTL register */
17282 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
17283 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
17284 
17285 	/* PCIe lanes configuration */
17286 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
17287 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
17288 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
17289 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
17290 
17291 	/* PCIe PLL Configuration */
17292 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
17293 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
17294 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
17295 }
17296 
17297 static void
17298 wm_reset_mdicnfg_82580(struct wm_softc *sc)
17299 {
17300 	uint32_t reg;
17301 	uint16_t nvmword;
17302 	int rv;
17303 
17304 	if (sc->sc_type != WM_T_82580)
17305 		return;
17306 	if ((sc->sc_flags & WM_F_SGMII) == 0)
17307 		return;
17308 
17309 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
17310 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
17311 	if (rv != 0) {
17312 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
17313 		    __func__);
17314 		return;
17315 	}
17316 
17317 	reg = CSR_READ(sc, WMREG_MDICNFG);
17318 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
17319 		reg |= MDICNFG_DEST;
17320 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
17321 		reg |= MDICNFG_COM_MDIO;
17322 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
17323 }
17324 
17325 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
17326 
17327 static bool
17328 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
17329 {
17330 	uint32_t reg;
17331 	uint16_t id1, id2;
17332 	int i, rv;
17333 
17334 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
17335 		device_xname(sc->sc_dev), __func__));
17336 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
17337 
17338 	id1 = id2 = 0xffff;
17339 	for (i = 0; i < 2; i++) {
17340 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
17341 		    &id1);
17342 		if ((rv != 0) || MII_INVALIDID(id1))
17343 			continue;
17344 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
17345 		    &id2);
17346 		if ((rv != 0) || MII_INVALIDID(id2))
17347 			continue;
17348 		break;
17349 	}
17350 	if ((rv == 0) && !MII_INVALIDID(id1) && !MII_INVALIDID(id2))
17351 		goto out;
17352 
17353 	/*
17354 	 * In case the PHY needs to be in mdio slow mode,
17355 	 * set slow mode and try to get the PHY id again.
17356 	 */
17357 	rv = 0;
17358 	if (sc->sc_type < WM_T_PCH_LPT) {
17359 		wm_set_mdio_slow_mode_hv_locked(sc);
17360 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
17361 		    &id1);
17362 		rv |= wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
17363 		    &id2);
17364 	}
17365 	if ((rv != 0) || MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
17366 		device_printf(sc->sc_dev, "XXX return with false\n");
17367 		return false;
17368 	}
17369 out:
17370 	if (sc->sc_type >= WM_T_PCH_LPT) {
17371 		/* Only unforce SMBus if ME is not active */
17372 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
17373 			uint16_t phyreg;
17374 
17375 			/* Unforce SMBus mode in PHY */
17376 			rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
17377 			    CV_SMB_CTRL, &phyreg);
17378 			phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
17379 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
17380 			    CV_SMB_CTRL, phyreg);
17381 
17382 			/* Unforce SMBus mode in MAC */
17383 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
17384 			reg &= ~CTRL_EXT_FORCE_SMBUS;
17385 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
17386 		}
17387 	}
17388 	return true;
17389 }
17390 
17391 static void
17392 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
17393 {
17394 	uint32_t reg;
17395 	int i;
17396 
17397 	/* Set PHY Config Counter to 50msec */
17398 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
17399 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
17400 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
17401 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
17402 
17403 	/* Toggle LANPHYPC */
17404 	reg = CSR_READ(sc, WMREG_CTRL);
17405 	reg |= CTRL_LANPHYPC_OVERRIDE;
17406 	reg &= ~CTRL_LANPHYPC_VALUE;
17407 	CSR_WRITE(sc, WMREG_CTRL, reg);
17408 	CSR_WRITE_FLUSH(sc);
17409 	delay(1000);
17410 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
17411 	CSR_WRITE(sc, WMREG_CTRL, reg);
17412 	CSR_WRITE_FLUSH(sc);
17413 
17414 	if (sc->sc_type < WM_T_PCH_LPT)
17415 		delay(50 * 1000);
17416 	else {
17417 		i = 20;
17418 
17419 		do {
17420 			delay(5 * 1000);
17421 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
17422 		    && i--);
17423 
17424 		delay(30 * 1000);
17425 	}
17426 }
17427 
17428 static int
17429 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
17430 {
17431 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
17432 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
17433 	uint32_t rxa;
17434 	uint16_t scale = 0, lat_enc = 0;
17435 	int32_t obff_hwm = 0;
17436 	int64_t lat_ns, value;
17437 
17438 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
17439 		device_xname(sc->sc_dev), __func__));
17440 
17441 	if (link) {
17442 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
17443 		uint32_t status;
17444 		uint16_t speed;
17445 		pcireg_t preg;
17446 
17447 		status = CSR_READ(sc, WMREG_STATUS);
17448 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
17449 		case STATUS_SPEED_10:
17450 			speed = 10;
17451 			break;
17452 		case STATUS_SPEED_100:
17453 			speed = 100;
17454 			break;
17455 		case STATUS_SPEED_1000:
17456 			speed = 1000;
17457 			break;
17458 		default:
17459 			device_printf(sc->sc_dev, "Unknown speed "
17460 			    "(status = %08x)\n", status);
17461 			return -1;
17462 		}
17463 
17464 		/* Rx Packet Buffer Allocation size (KB) */
17465 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
17466 
17467 		/*
17468 		 * Determine the maximum latency tolerated by the device.
17469 		 *
17470 		 * Per the PCIe spec, the tolerated latencies are encoded as
17471 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
17472 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
17473 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
17474 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
17475 		 */
17476 		lat_ns = ((int64_t)rxa * 1024 -
17477 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
17478 			+ ETHER_HDR_LEN))) * 8 * 1000;
17479 		if (lat_ns < 0)
17480 			lat_ns = 0;
17481 		else
17482 			lat_ns /= speed;
17483 		value = lat_ns;
17484 
17485 		while (value > LTRV_VALUE) {
17486 			scale ++;
17487 			value = howmany(value, __BIT(5));
17488 		}
17489 		if (scale > LTRV_SCALE_MAX) {
17490 			device_printf(sc->sc_dev,
17491 			    "Invalid LTR latency scale %d\n", scale);
17492 			return -1;
17493 		}
17494 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
17495 
17496 		/* Determine the maximum latency tolerated by the platform */
17497 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
17498 		    WM_PCI_LTR_CAP_LPT);
17499 		max_snoop = preg & 0xffff;
17500 		max_nosnoop = preg >> 16;
17501 
17502 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
17503 
17504 		if (lat_enc > max_ltr_enc) {
17505 			lat_enc = max_ltr_enc;
17506 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
17507 			    * PCI_LTR_SCALETONS(
17508 				    __SHIFTOUT(lat_enc,
17509 					PCI_LTR_MAXSNOOPLAT_SCALE));
17510 		}
17511 
17512 		if (lat_ns) {
17513 			lat_ns *= speed * 1000;
17514 			lat_ns /= 8;
17515 			lat_ns /= 1000000000;
17516 			obff_hwm = (int32_t)(rxa - lat_ns);
17517 		}
17518 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
17519 			device_printf(sc->sc_dev, "Invalid high water mark %d"
17520 			    "(rxa = %d, lat_ns = %d)\n",
17521 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
17522 			return -1;
17523 		}
17524 	}
17525 	/* Snoop and No-Snoop latencies the same */
17526 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
17527 	CSR_WRITE(sc, WMREG_LTRV, reg);
17528 
17529 	/* Set OBFF high water mark */
17530 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
17531 	reg |= obff_hwm;
17532 	CSR_WRITE(sc, WMREG_SVT, reg);
17533 
17534 	/* Enable OBFF */
17535 	reg = CSR_READ(sc, WMREG_SVCR);
17536 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
17537 	CSR_WRITE(sc, WMREG_SVCR, reg);
17538 
17539 	return 0;
17540 }
17541 
17542 /*
17543  * I210 Errata 25 and I211 Errata 10
17544  * Slow System Clock.
17545  *
17546  * Note that this function is called on both FLASH and iNVM case on NetBSD.
17547  */
17548 static int
17549 wm_pll_workaround_i210(struct wm_softc *sc)
17550 {
17551 	uint32_t mdicnfg, wuc;
17552 	uint32_t reg;
17553 	pcireg_t pcireg;
17554 	uint32_t pmreg;
17555 	uint16_t nvmword, tmp_nvmword;
17556 	uint16_t phyval;
17557 	bool wa_done = false;
17558 	int i, rv = 0;
17559 
17560 	/* Get Power Management cap offset */
17561 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
17562 	    &pmreg, NULL) == 0)
17563 		return -1;
17564 
17565 	/* Save WUC and MDICNFG registers */
17566 	wuc = CSR_READ(sc, WMREG_WUC);
17567 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
17568 
17569 	reg = mdicnfg & ~MDICNFG_DEST;
17570 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
17571 
17572 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0) {
17573 		/*
17574 		 * The default value of the Initialization Control Word 1
17575 		 * is the same on both I210's FLASH_HW and I21[01]'s iNVM.
17576 		 */
17577 		nvmword = INVM_DEFAULT_AL;
17578 	}
17579 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
17580 
17581 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
17582 		wm_gmii_gs40g_readreg(sc->sc_dev, 1,
17583 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG, &phyval);
17584 
17585 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
17586 			rv = 0;
17587 			break; /* OK */
17588 		} else
17589 			rv = -1;
17590 
17591 		wa_done = true;
17592 		/* Directly reset the internal PHY */
17593 		reg = CSR_READ(sc, WMREG_CTRL);
17594 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
17595 
17596 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
17597 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
17598 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
17599 
17600 		CSR_WRITE(sc, WMREG_WUC, 0);
17601 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
17602 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
17603 
17604 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
17605 		    pmreg + PCI_PMCSR);
17606 		pcireg |= PCI_PMCSR_STATE_D3;
17607 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
17608 		    pmreg + PCI_PMCSR, pcireg);
17609 		delay(1000);
17610 		pcireg &= ~PCI_PMCSR_STATE_D3;
17611 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
17612 		    pmreg + PCI_PMCSR, pcireg);
17613 
17614 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
17615 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
17616 
17617 		/* Restore WUC register */
17618 		CSR_WRITE(sc, WMREG_WUC, wuc);
17619 	}
17620 
17621 	/* Restore MDICNFG setting */
17622 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
17623 	if (wa_done)
17624 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
17625 	return rv;
17626 }
17627 
17628 static void
17629 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
17630 {
17631 	uint32_t reg;
17632 
17633 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
17634 		device_xname(sc->sc_dev), __func__));
17635 	KASSERT((sc->sc_type == WM_T_PCH_SPT)
17636 	    || (sc->sc_type == WM_T_PCH_CNP));
17637 
17638 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
17639 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
17640 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
17641 
17642 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
17643 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
17644 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
17645 }
17646 
17647 /* Sysctl functions */
17648 static int
17649 wm_sysctl_tdh_handler(SYSCTLFN_ARGS)
17650 {
17651 	struct sysctlnode node = *rnode;
17652 	struct wm_txqueue *txq = (struct wm_txqueue *)node.sysctl_data;
17653 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
17654 	struct wm_softc *sc = txq->txq_sc;
17655 	uint32_t reg;
17656 
17657 	reg = CSR_READ(sc, WMREG_TDH(wmq->wmq_id));
17658 	node.sysctl_data = &reg;
17659 	return sysctl_lookup(SYSCTLFN_CALL(&node));
17660 }
17661 
17662 static int
17663 wm_sysctl_tdt_handler(SYSCTLFN_ARGS)
17664 {
17665 	struct sysctlnode node = *rnode;
17666 	struct wm_txqueue *txq = (struct wm_txqueue *)node.sysctl_data;
17667 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
17668 	struct wm_softc *sc = txq->txq_sc;
17669 	uint32_t reg;
17670 
17671 	reg = CSR_READ(sc, WMREG_TDT(wmq->wmq_id));
17672 	node.sysctl_data = &reg;
17673 	return sysctl_lookup(SYSCTLFN_CALL(&node));
17674 }
17675 
17676 #ifdef WM_DEBUG
17677 static int
17678 wm_sysctl_debug(SYSCTLFN_ARGS)
17679 {
17680 	struct sysctlnode node = *rnode;
17681 	struct wm_softc *sc = (struct wm_softc *)node.sysctl_data;
17682 	uint32_t dflags;
17683 	int error;
17684 
17685 	dflags = sc->sc_debug;
17686 	node.sysctl_data = &dflags;
17687 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
17688 
17689 	if (error || newp == NULL)
17690 		return error;
17691 
17692 	sc->sc_debug = dflags;
17693 	device_printf(sc->sc_dev, "TARC0: %08x\n", CSR_READ(sc, WMREG_TARC0));
17694 	device_printf(sc->sc_dev, "TDT0: %08x\n", CSR_READ(sc, WMREG_TDT(0)));
17695 
17696 	return 0;
17697 }
17698 #endif
17699