xref: /netbsd-src/sys/dev/pci/if_wm.c (revision 3117ece4fc4a4ca4489ba793710b60b0d26bab6c)
1 /*	$NetBSD: if_wm.c,v 1.800 2024/07/05 04:31:51 rin Exp $	*/
2 
3 /*
4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed for the NetBSD Project by
20  *	Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 /*******************************************************************************
39 
40   Copyright (c) 2001-2005, Intel Corporation
41   All rights reserved.
42 
43   Redistribution and use in source and binary forms, with or without
44   modification, are permitted provided that the following conditions are met:
45 
46    1. Redistributions of source code must retain the above copyright notice,
47       this list of conditions and the following disclaimer.
48 
49    2. Redistributions in binary form must reproduce the above copyright
50       notice, this list of conditions and the following disclaimer in the
51       documentation and/or other materials provided with the distribution.
52 
53    3. Neither the name of the Intel Corporation nor the names of its
54       contributors may be used to endorse or promote products derived from
55       this software without specific prior written permission.
56 
57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67   POSSIBILITY OF SUCH DAMAGE.
68 
69 *******************************************************************************/
70 /*
71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72  *
73  * TODO (in order of importance):
74  *
75  *	- Check XXX'ed comments
76  *	- TX Multi queue improvement (refine queue selection logic)
77  *	- Split header buffer for newer descriptors
78  *	- EEE (Energy Efficiency Ethernet) for I354
79  *	- Virtual Function
80  *	- Set LED correctly (based on contents in EEPROM)
81  *	- Rework how parameters are loaded from the EEPROM.
82  */
83 
84 #include <sys/cdefs.h>
85 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.800 2024/07/05 04:31:51 rin Exp $");
86 
87 #ifdef _KERNEL_OPT
88 #include "opt_if_wm.h"
89 #endif
90 
91 #include <sys/param.h>
92 
93 #include <sys/atomic.h>
94 #include <sys/callout.h>
95 #include <sys/cpu.h>
96 #include <sys/device.h>
97 #include <sys/errno.h>
98 #include <sys/interrupt.h>
99 #include <sys/ioctl.h>
100 #include <sys/kernel.h>
101 #include <sys/kmem.h>
102 #include <sys/mbuf.h>
103 #include <sys/pcq.h>
104 #include <sys/queue.h>
105 #include <sys/rndsource.h>
106 #include <sys/socket.h>
107 #include <sys/sysctl.h>
108 #include <sys/syslog.h>
109 #include <sys/systm.h>
110 #include <sys/workqueue.h>
111 
112 #include <net/if.h>
113 #include <net/if_dl.h>
114 #include <net/if_media.h>
115 #include <net/if_ether.h>
116 
117 #include <net/bpf.h>
118 
119 #include <net/rss_config.h>
120 
121 #include <netinet/in.h>			/* XXX for struct ip */
122 #include <netinet/in_systm.h>		/* XXX for struct ip */
123 #include <netinet/ip.h>			/* XXX for struct ip */
124 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
125 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
126 
127 #include <sys/bus.h>
128 #include <sys/intr.h>
129 #include <machine/endian.h>
130 
131 #include <dev/mii/mii.h>
132 #include <dev/mii/mdio.h>
133 #include <dev/mii/miivar.h>
134 #include <dev/mii/miidevs.h>
135 #include <dev/mii/mii_bitbang.h>
136 #include <dev/mii/ikphyreg.h>
137 #include <dev/mii/igphyreg.h>
138 #include <dev/mii/igphyvar.h>
139 #include <dev/mii/inbmphyreg.h>
140 #include <dev/mii/ihphyreg.h>
141 #include <dev/mii/makphyreg.h>
142 
143 #include <dev/pci/pcireg.h>
144 #include <dev/pci/pcivar.h>
145 #include <dev/pci/pcidevs.h>
146 
147 #include <dev/pci/if_wmreg.h>
148 #include <dev/pci/if_wmvar.h>
149 
150 #ifdef WM_DEBUG
151 #define	WM_DEBUG_LINK		__BIT(0)
152 #define	WM_DEBUG_TX		__BIT(1)
153 #define	WM_DEBUG_RX		__BIT(2)
154 #define	WM_DEBUG_GMII		__BIT(3)
155 #define	WM_DEBUG_MANAGE		__BIT(4)
156 #define	WM_DEBUG_NVM		__BIT(5)
157 #define	WM_DEBUG_INIT		__BIT(6)
158 #define	WM_DEBUG_LOCK		__BIT(7)
159 
160 #if 0
161 #define WM_DEBUG_DEFAULT	WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | \
162 	WM_DEBUG_GMII | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT |    \
163 	WM_DEBUG_LOCK
164 #endif
165 
166 #define	DPRINTF(sc, x, y)			  \
167 	do {					  \
168 		if ((sc)->sc_debug & (x))	  \
169 			printf y;		  \
170 	} while (0)
171 #else
172 #define	DPRINTF(sc, x, y)	__nothing
173 #endif /* WM_DEBUG */
174 
175 #define WM_WORKQUEUE_PRI PRI_SOFTNET
176 
177 /*
178  * This device driver's max interrupt numbers.
179  */
180 #define WM_MAX_NQUEUEINTR	16
181 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
182 
183 #ifndef WM_DISABLE_MSI
184 #define	WM_DISABLE_MSI 0
185 #endif
186 #ifndef WM_DISABLE_MSIX
187 #define	WM_DISABLE_MSIX 0
188 #endif
189 
190 int wm_disable_msi = WM_DISABLE_MSI;
191 int wm_disable_msix = WM_DISABLE_MSIX;
192 
193 #ifndef WM_WATCHDOG_TIMEOUT
194 #define WM_WATCHDOG_TIMEOUT 5
195 #endif
196 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
197 
198 /*
199  * Transmit descriptor list size.  Due to errata, we can only have
200  * 256 hardware descriptors in the ring on < 82544, but we use 4096
201  * on >= 82544. We tell the upper layers that they can queue a lot
202  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
203  * of them at a time.
204  *
205  * We allow up to 64 DMA segments per packet.  Pathological packet
206  * chains containing many small mbufs have been observed in zero-copy
207  * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
208  * m_defrag() is called to reduce it.
209  */
210 #define	WM_NTXSEGS		64
211 #define	WM_IFQUEUELEN		256
212 #define	WM_TXQUEUELEN_MAX	64
213 #define	WM_TXQUEUELEN_MAX_82547	16
214 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
215 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
216 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
217 #define	WM_NTXDESC_82542	256
218 #define	WM_NTXDESC_82544	4096
219 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
220 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
221 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
222 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
223 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
224 
225 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
226 
227 #define	WM_TXINTERQSIZE		256
228 
229 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
230 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
231 #endif
232 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
233 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
234 #endif
235 
236 /*
237  * Receive descriptor list size.  We have one Rx buffer for normal
238  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
239  * packet.  We allocate 256 receive descriptors, each with a 2k
240  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
241  */
242 #define	WM_NRXDESC		256U
243 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
244 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
245 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
246 
247 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
248 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
249 #endif
250 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
251 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
252 #endif
253 
254 typedef union txdescs {
255 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
256 	nq_txdesc_t	 sctxu_nq_txdescs[WM_NTXDESC_82544];
257 } txdescs_t;
258 
259 typedef union rxdescs {
260 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
261 	ext_rxdesc_t	 sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
262 	nq_rxdesc_t	 sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
263 } rxdescs_t;
264 
265 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
266 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
267 
268 /*
269  * Software state for transmit jobs.
270  */
271 struct wm_txsoft {
272 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
273 	bus_dmamap_t txs_dmamap;	/* our DMA map */
274 	int txs_firstdesc;		/* first descriptor in packet */
275 	int txs_lastdesc;		/* last descriptor in packet */
276 	int txs_ndesc;			/* # of descriptors used */
277 };
278 
279 /*
280  * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
281  * buffer and a DMA map. For packets which fill more than one buffer, we chain
282  * them together.
283  */
284 struct wm_rxsoft {
285 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
286 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
287 };
288 
289 #define WM_LINKUP_TIMEOUT	50
290 
291 static uint16_t swfwphysem[] = {
292 	SWFW_PHY0_SM,
293 	SWFW_PHY1_SM,
294 	SWFW_PHY2_SM,
295 	SWFW_PHY3_SM
296 };
297 
298 static const uint32_t wm_82580_rxpbs_table[] = {
299 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
300 };
301 
302 struct wm_softc;
303 
304 #if defined(_LP64) && !defined(WM_DISABLE_EVENT_COUNTERS)
305 #if !defined(WM_EVENT_COUNTERS)
306 #define WM_EVENT_COUNTERS 1
307 #endif
308 #endif
309 
310 #ifdef WM_EVENT_COUNTERS
311 #define WM_Q_EVCNT_DEFINE(qname, evname)				 \
312 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
313 	struct evcnt qname##_ev_##evname
314 
315 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
316 	do {								\
317 		snprintf((q)->qname##_##evname##_evcnt_name,		\
318 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
319 		    "%s%02d%s", #qname, (qnum), #evname);		\
320 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
321 		    (evtype), NULL, (xname),				\
322 		    (q)->qname##_##evname##_evcnt_name);		\
323 	} while (0)
324 
325 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
326 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
327 
328 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
329 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
330 
331 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
332 	evcnt_detach(&(q)->qname##_ev_##evname)
333 #endif /* WM_EVENT_COUNTERS */
334 
335 struct wm_txqueue {
336 	kmutex_t *txq_lock;		/* lock for tx operations */
337 
338 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
339 
340 	/* Software state for the transmit descriptors. */
341 	int txq_num;			/* must be a power of two */
342 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
343 
344 	/* TX control data structures. */
345 	int txq_ndesc;			/* must be a power of two */
346 	size_t txq_descsize;		/* a tx descriptor size */
347 	txdescs_t *txq_descs_u;
348 	bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
349 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
350 	int txq_desc_rseg;		/* real number of control segment */
351 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
352 #define	txq_descs	txq_descs_u->sctxu_txdescs
353 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
354 
355 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
356 
357 	int txq_free;			/* number of free Tx descriptors */
358 	int txq_next;			/* next ready Tx descriptor */
359 
360 	int txq_sfree;			/* number of free Tx jobs */
361 	int txq_snext;			/* next free Tx job */
362 	int txq_sdirty;			/* dirty Tx jobs */
363 
364 	/* These 4 variables are used only on the 82547. */
365 	int txq_fifo_size;		/* Tx FIFO size */
366 	int txq_fifo_head;		/* current head of FIFO */
367 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
368 	int txq_fifo_stall;		/* Tx FIFO is stalled */
369 
370 	/*
371 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
372 	 * CPUs. This queue intermediate them without block.
373 	 */
374 	pcq_t *txq_interq;
375 
376 	/*
377 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
378 	 * to manage Tx H/W queue's busy flag.
379 	 */
380 	int txq_flags;			/* flags for H/W queue, see below */
381 #define	WM_TXQ_NO_SPACE		0x1
382 #define	WM_TXQ_LINKDOWN_DISCARD	0x2
383 
384 	bool txq_stopping;
385 
386 	bool txq_sending;
387 	time_t txq_lastsent;
388 
389 	/* Checksum flags used for previous packet */
390 	uint32_t	txq_last_hw_cmd;
391 	uint8_t		txq_last_hw_fields;
392 	uint16_t	txq_last_hw_ipcs;
393 	uint16_t	txq_last_hw_tucs;
394 
395 	uint32_t txq_packets;		/* for AIM */
396 	uint32_t txq_bytes;		/* for AIM */
397 #ifdef WM_EVENT_COUNTERS
398 	/* TX event counters */
399 	WM_Q_EVCNT_DEFINE(txq, txsstall);   /* Stalled due to no txs */
400 	WM_Q_EVCNT_DEFINE(txq, txdstall);   /* Stalled due to no txd */
401 	WM_Q_EVCNT_DEFINE(txq, fifo_stall); /* FIFO stalls (82547) */
402 	WM_Q_EVCNT_DEFINE(txq, txdw);	    /* Tx descriptor interrupts */
403 	WM_Q_EVCNT_DEFINE(txq, txqe);	    /* Tx queue empty interrupts */
404 					    /* XXX not used? */
405 
406 	WM_Q_EVCNT_DEFINE(txq, ipsum);	    /* IP checksums comp. */
407 	WM_Q_EVCNT_DEFINE(txq, tusum);	    /* TCP/UDP cksums comp. */
408 	WM_Q_EVCNT_DEFINE(txq, tusum6);	    /* TCP/UDP v6 cksums comp. */
409 	WM_Q_EVCNT_DEFINE(txq, tso);	    /* TCP seg offload (IPv4) */
410 	WM_Q_EVCNT_DEFINE(txq, tso6);	    /* TCP seg offload (IPv6) */
411 	WM_Q_EVCNT_DEFINE(txq, tsopain);    /* Painful header manip. for TSO */
412 	WM_Q_EVCNT_DEFINE(txq, pcqdrop);    /* Pkt dropped in pcq */
413 	WM_Q_EVCNT_DEFINE(txq, descdrop);   /* Pkt dropped in MAC desc ring */
414 					    /* other than toomanyseg */
415 
416 	WM_Q_EVCNT_DEFINE(txq, toomanyseg); /* Pkt dropped(toomany DMA segs) */
417 	WM_Q_EVCNT_DEFINE(txq, defrag);	    /* m_defrag() */
418 	WM_Q_EVCNT_DEFINE(txq, underrun);   /* Tx underrun */
419 	WM_Q_EVCNT_DEFINE(txq, skipcontext); /* Tx skip wrong cksum context */
420 
421 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
422 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
423 #endif /* WM_EVENT_COUNTERS */
424 };
425 
426 struct wm_rxqueue {
427 	kmutex_t *rxq_lock;		/* lock for rx operations */
428 
429 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
430 
431 	/* Software state for the receive descriptors. */
432 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
433 
434 	/* RX control data structures. */
435 	int rxq_ndesc;			/* must be a power of two */
436 	size_t rxq_descsize;		/* a rx descriptor size */
437 	rxdescs_t *rxq_descs_u;
438 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
439 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
440 	int rxq_desc_rseg;		/* real number of control segment */
441 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
442 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
443 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
444 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
445 
446 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
447 
448 	int rxq_ptr;			/* next ready Rx desc/queue ent */
449 	int rxq_discard;
450 	int rxq_len;
451 	struct mbuf *rxq_head;
452 	struct mbuf *rxq_tail;
453 	struct mbuf **rxq_tailp;
454 
455 	bool rxq_stopping;
456 
457 	uint32_t rxq_packets;		/* for AIM */
458 	uint32_t rxq_bytes;		/* for AIM */
459 #ifdef WM_EVENT_COUNTERS
460 	/* RX event counters */
461 	WM_Q_EVCNT_DEFINE(rxq, intr);	/* Interrupts */
462 	WM_Q_EVCNT_DEFINE(rxq, defer);	/* Rx deferred processing */
463 	WM_Q_EVCNT_DEFINE(rxq, ipsum);	/* IP checksums checked */
464 	WM_Q_EVCNT_DEFINE(rxq, tusum);	/* TCP/UDP cksums checked */
465 	WM_Q_EVCNT_DEFINE(rxq, qdrop);	/* Rx queue drop packet */
466 #endif
467 };
468 
469 struct wm_queue {
470 	int wmq_id;			/* index of TX/RX queues */
471 	int wmq_intr_idx;		/* index of MSI-X tables */
472 
473 	uint32_t wmq_itr;		/* interrupt interval per queue. */
474 	bool wmq_set_itr;
475 
476 	struct wm_txqueue wmq_txq;
477 	struct wm_rxqueue wmq_rxq;
478 	char sysctlname[32];		/* Name for sysctl */
479 
480 	bool wmq_txrx_use_workqueue;
481 	bool wmq_wq_enqueued;
482 	struct work wmq_cookie;
483 	void *wmq_si;
484 };
485 
486 struct wm_phyop {
487 	int (*acquire)(struct wm_softc *) __attribute__((warn_unused_result));
488 	void (*release)(struct wm_softc *);
489 	int (*readreg_locked)(device_t, int, int, uint16_t *);
490 	int (*writereg_locked)(device_t, int, int, uint16_t);
491 	int reset_delay_us;
492 	bool no_errprint;
493 };
494 
495 struct wm_nvmop {
496 	int (*acquire)(struct wm_softc *) __attribute__((warn_unused_result));
497 	void (*release)(struct wm_softc *);
498 	int (*read)(struct wm_softc *, int, int, uint16_t *);
499 };
500 
501 /*
502  * Software state per device.
503  */
504 struct wm_softc {
505 	device_t sc_dev;		/* generic device information */
506 	bus_space_tag_t sc_st;		/* bus space tag */
507 	bus_space_handle_t sc_sh;	/* bus space handle */
508 	bus_size_t sc_ss;		/* bus space size */
509 	bus_space_tag_t sc_iot;		/* I/O space tag */
510 	bus_space_handle_t sc_ioh;	/* I/O space handle */
511 	bus_size_t sc_ios;		/* I/O space size */
512 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
513 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
514 	bus_size_t sc_flashs;		/* flash registers space size */
515 	off_t sc_flashreg_offset;	/*
516 					 * offset to flash registers from
517 					 * start of BAR
518 					 */
519 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
520 
521 	struct ethercom sc_ethercom;	/* Ethernet common data */
522 	struct mii_data sc_mii;		/* MII/media information */
523 
524 	pci_chipset_tag_t sc_pc;
525 	pcitag_t sc_pcitag;
526 	int sc_bus_speed;		/* PCI/PCIX bus speed */
527 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
528 
529 	uint16_t sc_pcidevid;		/* PCI device ID */
530 	wm_chip_type sc_type;		/* MAC type */
531 	int sc_rev;			/* MAC revision */
532 	wm_phy_type sc_phytype;		/* PHY type */
533 	uint8_t sc_sfptype;		/* SFP type */
534 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
535 #define	WM_MEDIATYPE_UNKNOWN		0x00
536 #define	WM_MEDIATYPE_FIBER		0x01
537 #define	WM_MEDIATYPE_COPPER		0x02
538 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
539 	int sc_funcid;			/* unit number of the chip (0 to 3) */
540 	u_int sc_flags;			/* flags; see below */
541 	u_short sc_if_flags;		/* last if_flags */
542 	int sc_ec_capenable;		/* last ec_capenable */
543 	int sc_flowflags;		/* 802.3x flow control flags */
544 	uint16_t eee_lp_ability;	/* EEE link partner's ability */
545 	int sc_align_tweak;
546 
547 	void *sc_ihs[WM_MAX_NINTR];	/*
548 					 * interrupt cookie.
549 					 * - legacy and msi use sc_ihs[0] only
550 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
551 					 */
552 	pci_intr_handle_t *sc_intrs;	/*
553 					 * legacy and msi use sc_intrs[0] only
554 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
555 					 */
556 	int sc_nintrs;			/* number of interrupts */
557 
558 	int sc_link_intr_idx;		/* index of MSI-X tables */
559 
560 	callout_t sc_tick_ch;		/* tick callout */
561 	bool sc_core_stopping;
562 
563 	int sc_nvm_ver_major;
564 	int sc_nvm_ver_minor;
565 	int sc_nvm_ver_build;
566 	int sc_nvm_addrbits;		/* NVM address bits */
567 	unsigned int sc_nvm_wordsize;	/* NVM word size */
568 	int sc_ich8_flash_base;
569 	int sc_ich8_flash_bank_size;
570 	int sc_nvm_k1_enabled;
571 
572 	int sc_nqueues;
573 	struct wm_queue *sc_queue;
574 	u_int sc_tx_process_limit;	/* Tx proc. repeat limit in softint */
575 	u_int sc_tx_intr_process_limit;	/* Tx proc. repeat limit in H/W intr */
576 	u_int sc_rx_process_limit;	/* Rx proc. repeat limit in softint */
577 	u_int sc_rx_intr_process_limit;	/* Rx proc. repeat limit in H/W intr */
578 	struct workqueue *sc_queue_wq;
579 	bool sc_txrx_use_workqueue;
580 
581 	int sc_affinity_offset;
582 
583 #ifdef WM_EVENT_COUNTERS
584 	/* Event counters. */
585 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
586 
587 	/* >= WM_T_82542_2_1 */
588 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
589 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
590 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
591 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
592 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
593 
594 	struct evcnt sc_ev_crcerrs;	/* CRC Error */
595 	struct evcnt sc_ev_algnerrc;	/* Alignment Error */
596 	struct evcnt sc_ev_symerrc;	/* Symbol Error */
597 	struct evcnt sc_ev_rxerrc;	/* Receive Error */
598 	struct evcnt sc_ev_mpc;		/* Missed Packets */
599 	struct evcnt sc_ev_scc;		/* Single Collision */
600 	struct evcnt sc_ev_ecol;	/* Excessive Collision */
601 	struct evcnt sc_ev_mcc;		/* Multiple Collision */
602 	struct evcnt sc_ev_latecol;	/* Late Collision */
603 	struct evcnt sc_ev_colc;	/* Collision */
604 	struct evcnt sc_ev_cbtmpc;	/* Circuit Breaker Tx Mng. Packet */
605 	struct evcnt sc_ev_dc;		/* Defer */
606 	struct evcnt sc_ev_tncrs;	/* Tx-No CRS */
607 	struct evcnt sc_ev_sec;		/* Sequence Error */
608 
609 	/* Old */
610 	struct evcnt sc_ev_cexterr;	/* Carrier Extension Error */
611 	/* New */
612 	struct evcnt sc_ev_htdpmc;	/* Host Tx Discarded Pkts by MAC */
613 
614 	struct evcnt sc_ev_rlec;	/* Receive Length Error */
615 	struct evcnt sc_ev_cbrdpc;	/* Circuit Breaker Rx Dropped Packet */
616 	struct evcnt sc_ev_prc64;	/* Packets Rx (64 bytes) */
617 	struct evcnt sc_ev_prc127;	/* Packets Rx (65-127 bytes) */
618 	struct evcnt sc_ev_prc255;	/* Packets Rx (128-255 bytes) */
619 	struct evcnt sc_ev_prc511;	/* Packets Rx (256-511 bytes) */
620 	struct evcnt sc_ev_prc1023;	/* Packets Rx (512-1023 bytes) */
621 	struct evcnt sc_ev_prc1522;	/* Packets Rx (1024-1522 bytes) */
622 	struct evcnt sc_ev_gprc;	/* Good Packets Rx */
623 	struct evcnt sc_ev_bprc;	/* Broadcast Packets Rx */
624 	struct evcnt sc_ev_mprc;	/* Multicast Packets Rx */
625 	struct evcnt sc_ev_gptc;	/* Good Packets Tx */
626 	struct evcnt sc_ev_gorc;	/* Good Octets Rx */
627 	struct evcnt sc_ev_gotc;	/* Good Octets Tx */
628 	struct evcnt sc_ev_rnbc;	/* Rx No Buffers */
629 	struct evcnt sc_ev_ruc;		/* Rx Undersize */
630 	struct evcnt sc_ev_rfc;		/* Rx Fragment */
631 	struct evcnt sc_ev_roc;		/* Rx Oversize */
632 	struct evcnt sc_ev_rjc;		/* Rx Jabber */
633 	struct evcnt sc_ev_mgtprc;	/* Management Packets RX */
634 	struct evcnt sc_ev_mgtpdc;	/* Management Packets Dropped */
635 	struct evcnt sc_ev_mgtptc;	/* Management Packets TX */
636 	struct evcnt sc_ev_tor;		/* Total Octets Rx */
637 	struct evcnt sc_ev_tot;		/* Total Octets Tx */
638 	struct evcnt sc_ev_tpr;		/* Total Packets Rx */
639 	struct evcnt sc_ev_tpt;		/* Total Packets Tx */
640 	struct evcnt sc_ev_ptc64;	/* Packets Tx (64 bytes) */
641 	struct evcnt sc_ev_ptc127;	/* Packets Tx (65-127 bytes) */
642 	struct evcnt sc_ev_ptc255;	/* Packets Tx (128-255 bytes) */
643 	struct evcnt sc_ev_ptc511;	/* Packets Tx (256-511 bytes) */
644 	struct evcnt sc_ev_ptc1023;	/* Packets Tx (512-1023 bytes) */
645 	struct evcnt sc_ev_ptc1522;	/* Packets Tx (1024-1522 Bytes) */
646 	struct evcnt sc_ev_mptc;	/* Multicast Packets Tx */
647 	struct evcnt sc_ev_bptc;	/* Broadcast Packets Tx */
648 	struct evcnt sc_ev_tsctc;	/* TCP Segmentation Context Tx */
649 
650 	/* Old */
651 	struct evcnt sc_ev_tsctfc;	/* TCP Segmentation Context Tx Fail */
652 	/* New */
653 	struct evcnt sc_ev_cbrmpc;	/* Circuit Breaker Rx Mng. Packet */
654 
655 	struct evcnt sc_ev_iac;		/* Interrupt Assertion */
656 
657 	/* Old */
658 	struct evcnt sc_ev_icrxptc;	/* Intr. Cause Rx Pkt Timer Expire */
659 	struct evcnt sc_ev_icrxatc;	/* Intr. Cause Rx Abs Timer Expire */
660 	struct evcnt sc_ev_ictxptc;	/* Intr. Cause Tx Pkt Timer Expire */
661 	struct evcnt sc_ev_ictxatc;	/* Intr. Cause Tx Abs Timer Expire */
662 	struct evcnt sc_ev_ictxqec;	/* Intr. Cause Tx Queue Empty */
663 	struct evcnt sc_ev_ictxqmtc;	/* Intr. Cause Tx Queue Min Thresh */
664 	/*
665 	 * sc_ev_rxdmtc is shared with both "Intr. cause" and
666 	 * non "Intr. cause" register.
667 	 */
668 	struct evcnt sc_ev_rxdmtc;	/* (Intr. Cause) Rx Desc Min Thresh */
669 	struct evcnt sc_ev_icrxoc;	/* Intr. Cause Receiver Overrun */
670 	/* New */
671 	struct evcnt sc_ev_rpthc;	/* Rx Packets To Host */
672 	struct evcnt sc_ev_debug1;	/* Debug Counter 1 */
673 	struct evcnt sc_ev_debug2;	/* Debug Counter 2 */
674 	struct evcnt sc_ev_debug3;	/* Debug Counter 3 */
675 	struct evcnt sc_ev_hgptc;	/* Host Good Packets TX */
676 	struct evcnt sc_ev_debug4;	/* Debug Counter 4 */
677 	struct evcnt sc_ev_htcbdpc;	/* Host Tx Circuit Breaker Drp. Pkts */
678 	struct evcnt sc_ev_hgorc;	/* Host Good Octets Rx */
679 	struct evcnt sc_ev_hgotc;	/* Host Good Octets Tx */
680 	struct evcnt sc_ev_lenerrs;	/* Length Error */
681 	struct evcnt sc_ev_tlpic;	/* EEE Tx LPI */
682 	struct evcnt sc_ev_rlpic;	/* EEE Rx LPI */
683 	struct evcnt sc_ev_b2ogprc;	/* BMC2OS pkts received by host */
684 	struct evcnt sc_ev_o2bspc;	/* OS2BMC pkts transmitted by host */
685 	struct evcnt sc_ev_b2ospc;	/* BMC2OS pkts sent by BMC */
686 	struct evcnt sc_ev_o2bgptc;	/* OS2BMC pkts received by BMC */
687 	struct evcnt sc_ev_scvpc;	/* SerDes/SGMII Code Violation Pkt. */
688 	struct evcnt sc_ev_hrmpc;	/* Header Redirection Missed Packet */
689 #endif /* WM_EVENT_COUNTERS */
690 
691 	struct sysctllog *sc_sysctllog;
692 
693 	/* This variable are used only on the 82547. */
694 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
695 
696 	uint32_t sc_ctrl;		/* prototype CTRL register */
697 #if 0
698 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
699 #endif
700 	uint32_t sc_icr;		/* prototype interrupt bits */
701 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
702 	uint32_t sc_tctl;		/* prototype TCTL register */
703 	uint32_t sc_rctl;		/* prototype RCTL register */
704 	uint32_t sc_txcw;		/* prototype TXCW register */
705 	uint32_t sc_tipg;		/* prototype TIPG register */
706 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
707 	uint32_t sc_pba;		/* prototype PBA register */
708 
709 	int sc_tbi_linkup;		/* TBI link status */
710 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
711 	int sc_tbi_serdes_ticks;	/* tbi ticks */
712 	struct timeval sc_linkup_delay_time; /* delay LINK_STATE_UP */
713 
714 	int sc_mchash_type;		/* multicast filter offset */
715 
716 	krndsource_t rnd_source;	/* random source */
717 
718 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
719 
720 	kmutex_t *sc_core_lock;		/* lock for softc operations */
721 	kmutex_t *sc_ich_phymtx;	/*
722 					 * 82574/82583/ICH/PCH specific PHY
723 					 * mutex. For 82574/82583, the mutex
724 					 * is used for both PHY and NVM.
725 					 */
726 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
727 
728 	struct wm_phyop phy;
729 	struct wm_nvmop nvm;
730 
731 	struct workqueue *sc_reset_wq;
732 	struct work sc_reset_work;
733 	volatile unsigned sc_reset_pending;
734 
735 	bool sc_dying;
736 
737 #ifdef WM_DEBUG
738 	uint32_t sc_debug;
739 	bool sc_trigger_reset;
740 #endif
741 };
742 
743 #define	WM_RXCHAIN_RESET(rxq)						\
744 do {									\
745 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
746 	*(rxq)->rxq_tailp = NULL;					\
747 	(rxq)->rxq_len = 0;						\
748 } while (/*CONSTCOND*/0)
749 
750 #define	WM_RXCHAIN_LINK(rxq, m)						\
751 do {									\
752 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
753 	(rxq)->rxq_tailp = &(m)->m_next;				\
754 } while (/*CONSTCOND*/0)
755 
756 #ifdef WM_EVENT_COUNTERS
757 #ifdef __HAVE_ATOMIC64_LOADSTORE
758 #define	WM_EVCNT_INCR(ev)						\
759 	atomic_store_relaxed(&((ev)->ev_count),				\
760 	    atomic_load_relaxed(&(ev)->ev_count) + 1)
761 #define	WM_EVCNT_STORE(ev, val)						\
762 	atomic_store_relaxed(&((ev)->ev_count), (val))
763 #define	WM_EVCNT_ADD(ev, val)						\
764 	atomic_store_relaxed(&((ev)->ev_count),				\
765 	    atomic_load_relaxed(&(ev)->ev_count) + (val))
766 #else
767 #define	WM_EVCNT_INCR(ev)						\
768 	((ev)->ev_count)++
769 #define	WM_EVCNT_STORE(ev, val)						\
770 	((ev)->ev_count = (val))
771 #define	WM_EVCNT_ADD(ev, val)						\
772 	(ev)->ev_count += (val)
773 #endif
774 
775 #define WM_Q_EVCNT_INCR(qname, evname)			\
776 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
777 #define WM_Q_EVCNT_STORE(qname, evname, val)		\
778 	WM_EVCNT_STORE(&(qname)->qname##_ev_##evname, (val))
779 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
780 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
781 #else /* !WM_EVENT_COUNTERS */
782 #define	WM_EVCNT_INCR(ev)	__nothing
783 #define	WM_EVCNT_STORE(ev, val)	__nothing
784 #define	WM_EVCNT_ADD(ev, val)	__nothing
785 
786 #define WM_Q_EVCNT_INCR(qname, evname)		__nothing
787 #define WM_Q_EVCNT_STORE(qname, evname, val)	__nothing
788 #define WM_Q_EVCNT_ADD(qname, evname, val)	__nothing
789 #endif /* !WM_EVENT_COUNTERS */
790 
791 #define	CSR_READ(sc, reg)						\
792 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
793 #define	CSR_WRITE(sc, reg, val)						\
794 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
795 #define	CSR_WRITE_FLUSH(sc)						\
796 	(void)CSR_READ((sc), WMREG_STATUS)
797 
798 #define ICH8_FLASH_READ32(sc, reg)					\
799 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
800 	    (reg) + sc->sc_flashreg_offset)
801 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
802 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
803 	    (reg) + sc->sc_flashreg_offset, (data))
804 
805 #define ICH8_FLASH_READ16(sc, reg)					\
806 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
807 	    (reg) + sc->sc_flashreg_offset)
808 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
809 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
810 	    (reg) + sc->sc_flashreg_offset, (data))
811 
812 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
813 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
814 
815 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
816 #define	WM_CDTXADDR_HI(txq, x)						\
817 	(sizeof(bus_addr_t) == 8 ?					\
818 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
819 
820 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
821 #define	WM_CDRXADDR_HI(rxq, x)						\
822 	(sizeof(bus_addr_t) == 8 ?					\
823 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
824 
825 /*
826  * Register read/write functions.
827  * Other than CSR_{READ|WRITE}().
828  */
829 #if 0
830 static inline uint32_t wm_io_read(struct wm_softc *, int);
831 #endif
832 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
833 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
834     uint32_t, uint32_t);
835 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
836 
837 /*
838  * Descriptor sync/init functions.
839  */
840 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
841 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
842 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
843 
844 /*
845  * Device driver interface functions and commonly used functions.
846  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
847  */
848 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
849 static int	wm_match(device_t, cfdata_t, void *);
850 static void	wm_attach(device_t, device_t, void *);
851 static int	wm_detach(device_t, int);
852 static bool	wm_suspend(device_t, const pmf_qual_t *);
853 static bool	wm_resume(device_t, const pmf_qual_t *);
854 static bool	wm_watchdog(struct ifnet *);
855 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
856     uint16_t *);
857 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
858     uint16_t *);
859 static void	wm_tick(void *);
860 static int	wm_ifflags_cb(struct ethercom *);
861 static int	wm_ioctl(struct ifnet *, u_long, void *);
862 /* MAC address related */
863 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
864 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
865 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
866 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
867 static int	wm_rar_count(struct wm_softc *);
868 static void	wm_set_filter(struct wm_softc *);
869 /* Reset and init related */
870 static void	wm_set_vlan(struct wm_softc *);
871 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
872 static void	wm_get_auto_rd_done(struct wm_softc *);
873 static void	wm_lan_init_done(struct wm_softc *);
874 static void	wm_get_cfg_done(struct wm_softc *);
875 static int	wm_phy_post_reset(struct wm_softc *);
876 static int	wm_write_smbus_addr(struct wm_softc *);
877 static int	wm_init_lcd_from_nvm(struct wm_softc *);
878 static int	wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
879 static void	wm_initialize_hardware_bits(struct wm_softc *);
880 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
881 static int	wm_reset_phy(struct wm_softc *);
882 static void	wm_flush_desc_rings(struct wm_softc *);
883 static void	wm_reset(struct wm_softc *);
884 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
885 static void	wm_rxdrain(struct wm_rxqueue *);
886 static void	wm_init_rss(struct wm_softc *);
887 static void	wm_adjust_qnum(struct wm_softc *, int);
888 static inline bool	wm_is_using_msix(struct wm_softc *);
889 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
890 static int	wm_softint_establish_queue(struct wm_softc *, int, int);
891 static int	wm_setup_legacy(struct wm_softc *);
892 static int	wm_setup_msix(struct wm_softc *);
893 static int	wm_init(struct ifnet *);
894 static int	wm_init_locked(struct ifnet *);
895 static void	wm_init_sysctls(struct wm_softc *);
896 static void	wm_update_stats(struct wm_softc *);
897 static void	wm_clear_evcnt(struct wm_softc *);
898 static void	wm_unset_stopping_flags(struct wm_softc *);
899 static void	wm_set_stopping_flags(struct wm_softc *);
900 static void	wm_stop(struct ifnet *, int);
901 static void	wm_stop_locked(struct ifnet *, bool, bool);
902 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
903 static void	wm_82547_txfifo_stall(void *);
904 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
905 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
906 /* DMA related */
907 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
908 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
909 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
910 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
911     struct wm_txqueue *);
912 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
913 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
914 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
915     struct wm_rxqueue *);
916 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
917 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
918 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
919 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
920 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
921 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
922 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
923     struct wm_txqueue *);
924 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
925     struct wm_rxqueue *);
926 static int	wm_alloc_txrx_queues(struct wm_softc *);
927 static void	wm_free_txrx_queues(struct wm_softc *);
928 static int	wm_init_txrx_queues(struct wm_softc *);
929 /* Start */
930 static void	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
931     struct wm_txsoft *, uint32_t *, uint8_t *);
932 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
933 static void	wm_start(struct ifnet *);
934 static void	wm_start_locked(struct ifnet *);
935 static int	wm_transmit(struct ifnet *, struct mbuf *);
936 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
937 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
938     bool);
939 static void	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
940     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
941 static void	wm_nq_start(struct ifnet *);
942 static void	wm_nq_start_locked(struct ifnet *);
943 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
944 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
945 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
946     bool);
947 static void	wm_deferred_start_locked(struct wm_txqueue *);
948 static void	wm_handle_queue(void *);
949 static void	wm_handle_queue_work(struct work *, void *);
950 static void	wm_handle_reset_work(struct work *, void *);
951 /* Interrupt */
952 static bool	wm_txeof(struct wm_txqueue *, u_int);
953 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
954 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
955 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
956 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
957 static void	wm_linkintr(struct wm_softc *, uint32_t);
958 static int	wm_intr_legacy(void *);
959 static inline void	wm_txrxintr_disable(struct wm_queue *);
960 static inline void	wm_txrxintr_enable(struct wm_queue *);
961 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
962 static int	wm_txrxintr_msix(void *);
963 static int	wm_linkintr_msix(void *);
964 
965 /*
966  * Media related.
967  * GMII, SGMII, TBI, SERDES and SFP.
968  */
969 /* Common */
970 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
971 /* GMII related */
972 static void	wm_gmii_reset(struct wm_softc *);
973 static void	wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
974 static int	wm_get_phy_id_82575(struct wm_softc *);
975 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
976 static int	wm_gmii_mediachange(struct ifnet *);
977 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
978 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
979 static uint16_t	wm_i82543_mii_recvbits(struct wm_softc *);
980 static int	wm_gmii_i82543_readreg(device_t, int, int, uint16_t *);
981 static int	wm_gmii_i82543_writereg(device_t, int, int, uint16_t);
982 static int	wm_gmii_mdic_readreg(device_t, int, int, uint16_t *);
983 static int	wm_gmii_mdic_writereg(device_t, int, int, uint16_t);
984 static int	wm_gmii_i82544_readreg(device_t, int, int, uint16_t *);
985 static int	wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
986 static int	wm_gmii_i82544_writereg(device_t, int, int, uint16_t);
987 static int	wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
988 static int	wm_gmii_i80003_readreg(device_t, int, int, uint16_t *);
989 static int	wm_gmii_i80003_writereg(device_t, int, int, uint16_t);
990 static int	wm_gmii_bm_readreg(device_t, int, int, uint16_t *);
991 static int	wm_gmii_bm_writereg(device_t, int, int, uint16_t);
992 static int	wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
993 static int	wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
994 static int	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int,
995 	bool);
996 static int	wm_gmii_hv_readreg(device_t, int, int, uint16_t *);
997 static int	wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
998 static int	wm_gmii_hv_writereg(device_t, int, int, uint16_t);
999 static int	wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
1000 static int	wm_gmii_82580_readreg(device_t, int, int, uint16_t *);
1001 static int	wm_gmii_82580_writereg(device_t, int, int, uint16_t);
1002 static int	wm_gmii_gs40g_readreg(device_t, int, int, uint16_t *);
1003 static int	wm_gmii_gs40g_writereg(device_t, int, int, uint16_t);
1004 static void	wm_gmii_statchg(struct ifnet *);
1005 /*
1006  * kumeran related (80003, ICH* and PCH*).
1007  * These functions are not for accessing MII registers but for accessing
1008  * kumeran specific registers.
1009  */
1010 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
1011 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
1012 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
1013 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
1014 /* EMI register related */
1015 static int	wm_access_emi_reg_locked(device_t, int, uint16_t *, bool);
1016 static int	wm_read_emi_reg_locked(device_t, int, uint16_t *);
1017 static int	wm_write_emi_reg_locked(device_t, int, uint16_t);
1018 /* SGMII */
1019 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
1020 static void	wm_sgmii_sfp_preconfig(struct wm_softc *);
1021 static int	wm_sgmii_readreg(device_t, int, int, uint16_t *);
1022 static int	wm_sgmii_readreg_locked(device_t, int, int, uint16_t *);
1023 static int	wm_sgmii_writereg(device_t, int, int, uint16_t);
1024 static int	wm_sgmii_writereg_locked(device_t, int, int, uint16_t);
1025 /* TBI related */
1026 static bool	wm_tbi_havesignal(struct wm_softc *, uint32_t);
1027 static void	wm_tbi_mediainit(struct wm_softc *);
1028 static int	wm_tbi_mediachange(struct ifnet *);
1029 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
1030 static int	wm_check_for_link(struct wm_softc *);
1031 static void	wm_tbi_tick(struct wm_softc *);
1032 /* SERDES related */
1033 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
1034 static int	wm_serdes_mediachange(struct ifnet *);
1035 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
1036 static void	wm_serdes_tick(struct wm_softc *);
1037 /* SFP related */
1038 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
1039 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
1040 
1041 /*
1042  * NVM related.
1043  * Microwire, SPI (w/wo EERD) and Flash.
1044  */
1045 /* Misc functions */
1046 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
1047 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
1048 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
1049 /* Microwire */
1050 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
1051 /* SPI */
1052 static int	wm_nvm_ready_spi(struct wm_softc *);
1053 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
1054 /* Using with EERD */
1055 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
1056 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
1057 /* Flash */
1058 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
1059     unsigned int *);
1060 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
1061 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
1062 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
1063     uint32_t *);
1064 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
1065 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
1066 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
1067 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
1068 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
1069 /* iNVM */
1070 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
1071 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
1072 /* Lock, detecting NVM type, validate checksum and read */
1073 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
1074 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
1075 static int	wm_nvm_validate_checksum(struct wm_softc *);
1076 static void	wm_nvm_version_invm(struct wm_softc *);
1077 static void	wm_nvm_version(struct wm_softc *);
1078 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
1079 
1080 /*
1081  * Hardware semaphores.
1082  * Very complexed...
1083  */
1084 static int	wm_get_null(struct wm_softc *);
1085 static void	wm_put_null(struct wm_softc *);
1086 static int	wm_get_eecd(struct wm_softc *);
1087 static void	wm_put_eecd(struct wm_softc *);
1088 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
1089 static void	wm_put_swsm_semaphore(struct wm_softc *);
1090 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
1091 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
1092 static int	wm_get_nvm_80003(struct wm_softc *);
1093 static void	wm_put_nvm_80003(struct wm_softc *);
1094 static int	wm_get_nvm_82571(struct wm_softc *);
1095 static void	wm_put_nvm_82571(struct wm_softc *);
1096 static int	wm_get_phy_82575(struct wm_softc *);
1097 static void	wm_put_phy_82575(struct wm_softc *);
1098 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
1099 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
1100 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
1101 static void	wm_put_swflag_ich8lan(struct wm_softc *);
1102 static int	wm_get_nvm_ich8lan(struct wm_softc *);
1103 static void	wm_put_nvm_ich8lan(struct wm_softc *);
1104 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
1105 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
1106 
1107 /*
1108  * Management mode and power management related subroutines.
1109  * BMC, AMT, suspend/resume and EEE.
1110  */
1111 #if 0
1112 static int	wm_check_mng_mode(struct wm_softc *);
1113 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
1114 static int	wm_check_mng_mode_82574(struct wm_softc *);
1115 static int	wm_check_mng_mode_generic(struct wm_softc *);
1116 #endif
1117 static int	wm_enable_mng_pass_thru(struct wm_softc *);
1118 static bool	wm_phy_resetisblocked(struct wm_softc *);
1119 static void	wm_get_hw_control(struct wm_softc *);
1120 static void	wm_release_hw_control(struct wm_softc *);
1121 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
1122 static int	wm_init_phy_workarounds_pchlan(struct wm_softc *);
1123 static void	wm_init_manageability(struct wm_softc *);
1124 static void	wm_release_manageability(struct wm_softc *);
1125 static void	wm_get_wakeup(struct wm_softc *);
1126 static int	wm_ulp_disable(struct wm_softc *);
1127 static int	wm_enable_phy_wakeup(struct wm_softc *);
1128 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
1129 static void	wm_suspend_workarounds_ich8lan(struct wm_softc *);
1130 static int	wm_resume_workarounds_pchlan(struct wm_softc *);
1131 static void	wm_enable_wakeup(struct wm_softc *);
1132 static void	wm_disable_aspm(struct wm_softc *);
1133 /* LPLU (Low Power Link Up) */
1134 static void	wm_lplu_d0_disable(struct wm_softc *);
1135 /* EEE */
1136 static int	wm_set_eee_i350(struct wm_softc *);
1137 static int	wm_set_eee_pchlan(struct wm_softc *);
1138 static int	wm_set_eee(struct wm_softc *);
1139 
1140 /*
1141  * Workarounds (mainly PHY related).
1142  * Basically, PHY's workarounds are in the PHY drivers.
1143  */
1144 static int	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
1145 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
1146 static int	wm_hv_phy_workarounds_ich8lan(struct wm_softc *);
1147 static void	wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *);
1148 static void	wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *);
1149 static int	wm_lv_jumbo_workaround_ich8lan(struct wm_softc *, bool);
1150 static int	wm_lv_phy_workarounds_ich8lan(struct wm_softc *);
1151 static int	wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
1152 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
1153 static int	wm_k1_workaround_lv(struct wm_softc *);
1154 static int	wm_link_stall_workaround_hv(struct wm_softc *);
1155 static int	wm_set_mdio_slow_mode_hv(struct wm_softc *);
1156 static int	wm_set_mdio_slow_mode_hv_locked(struct wm_softc *);
1157 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
1158 static void	wm_reset_init_script_82575(struct wm_softc *);
1159 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
1160 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
1161 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
1162 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
1163 static int	wm_pll_workaround_i210(struct wm_softc *);
1164 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
1165 static bool	wm_phy_need_linkdown_discard(struct wm_softc *);
1166 static void	wm_set_linkdown_discard(struct wm_softc *);
1167 static void	wm_clear_linkdown_discard(struct wm_softc *);
1168 
1169 static int	wm_sysctl_tdh_handler(SYSCTLFN_PROTO);
1170 static int	wm_sysctl_tdt_handler(SYSCTLFN_PROTO);
1171 #ifdef WM_DEBUG
1172 static int	wm_sysctl_debug(SYSCTLFN_PROTO);
1173 #endif
1174 
1175 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
1176     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
1177 
1178 /*
1179  * Devices supported by this driver.
1180  */
1181 static const struct wm_product {
1182 	pci_vendor_id_t		wmp_vendor;
1183 	pci_product_id_t	wmp_product;
1184 	const char		*wmp_name;
1185 	wm_chip_type		wmp_type;
1186 	uint32_t		wmp_flags;
1187 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
1188 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
1189 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
1190 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
1191 #define WMP_MEDIATYPE(x)	((x) & 0x03)
1192 } wm_products[] = {
1193 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
1194 	  "Intel i82542 1000BASE-X Ethernet",
1195 	  WM_T_82542_2_1,	WMP_F_FIBER },
1196 
1197 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
1198 	  "Intel i82543GC 1000BASE-X Ethernet",
1199 	  WM_T_82543,		WMP_F_FIBER },
1200 
1201 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
1202 	  "Intel i82543GC 1000BASE-T Ethernet",
1203 	  WM_T_82543,		WMP_F_COPPER },
1204 
1205 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
1206 	  "Intel i82544EI 1000BASE-T Ethernet",
1207 	  WM_T_82544,		WMP_F_COPPER },
1208 
1209 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
1210 	  "Intel i82544EI 1000BASE-X Ethernet",
1211 	  WM_T_82544,		WMP_F_FIBER },
1212 
1213 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
1214 	  "Intel i82544GC 1000BASE-T Ethernet",
1215 	  WM_T_82544,		WMP_F_COPPER },
1216 
1217 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
1218 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
1219 	  WM_T_82544,		WMP_F_COPPER },
1220 
1221 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
1222 	  "Intel i82540EM 1000BASE-T Ethernet",
1223 	  WM_T_82540,		WMP_F_COPPER },
1224 
1225 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
1226 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
1227 	  WM_T_82540,		WMP_F_COPPER },
1228 
1229 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
1230 	  "Intel i82540EP 1000BASE-T Ethernet",
1231 	  WM_T_82540,		WMP_F_COPPER },
1232 
1233 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
1234 	  "Intel i82540EP 1000BASE-T Ethernet",
1235 	  WM_T_82540,		WMP_F_COPPER },
1236 
1237 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
1238 	  "Intel i82540EP 1000BASE-T Ethernet",
1239 	  WM_T_82540,		WMP_F_COPPER },
1240 
1241 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
1242 	  "Intel i82545EM 1000BASE-T Ethernet",
1243 	  WM_T_82545,		WMP_F_COPPER },
1244 
1245 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
1246 	  "Intel i82545GM 1000BASE-T Ethernet",
1247 	  WM_T_82545_3,		WMP_F_COPPER },
1248 
1249 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
1250 	  "Intel i82545GM 1000BASE-X Ethernet",
1251 	  WM_T_82545_3,		WMP_F_FIBER },
1252 
1253 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
1254 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
1255 	  WM_T_82545_3,		WMP_F_SERDES },
1256 
1257 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
1258 	  "Intel i82546EB 1000BASE-T Ethernet",
1259 	  WM_T_82546,		WMP_F_COPPER },
1260 
1261 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
1262 	  "Intel i82546EB 1000BASE-T Ethernet",
1263 	  WM_T_82546,		WMP_F_COPPER },
1264 
1265 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
1266 	  "Intel i82545EM 1000BASE-X Ethernet",
1267 	  WM_T_82545,		WMP_F_FIBER },
1268 
1269 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
1270 	  "Intel i82546EB 1000BASE-X Ethernet",
1271 	  WM_T_82546,		WMP_F_FIBER },
1272 
1273 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
1274 	  "Intel i82546GB 1000BASE-T Ethernet",
1275 	  WM_T_82546_3,		WMP_F_COPPER },
1276 
1277 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
1278 	  "Intel i82546GB 1000BASE-X Ethernet",
1279 	  WM_T_82546_3,		WMP_F_FIBER },
1280 
1281 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
1282 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
1283 	  WM_T_82546_3,		WMP_F_SERDES },
1284 
1285 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
1286 	  "i82546GB quad-port Gigabit Ethernet",
1287 	  WM_T_82546_3,		WMP_F_COPPER },
1288 
1289 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
1290 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
1291 	  WM_T_82546_3,		WMP_F_COPPER },
1292 
1293 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
1294 	  "Intel PRO/1000MT (82546GB)",
1295 	  WM_T_82546_3,		WMP_F_COPPER },
1296 
1297 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
1298 	  "Intel i82541EI 1000BASE-T Ethernet",
1299 	  WM_T_82541,		WMP_F_COPPER },
1300 
1301 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
1302 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
1303 	  WM_T_82541,		WMP_F_COPPER },
1304 
1305 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
1306 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
1307 	  WM_T_82541,		WMP_F_COPPER },
1308 
1309 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
1310 	  "Intel i82541ER 1000BASE-T Ethernet",
1311 	  WM_T_82541_2,		WMP_F_COPPER },
1312 
1313 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
1314 	  "Intel i82541GI 1000BASE-T Ethernet",
1315 	  WM_T_82541_2,		WMP_F_COPPER },
1316 
1317 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
1318 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
1319 	  WM_T_82541_2,		WMP_F_COPPER },
1320 
1321 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
1322 	  "Intel i82541PI 1000BASE-T Ethernet",
1323 	  WM_T_82541_2,		WMP_F_COPPER },
1324 
1325 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
1326 	  "Intel i82547EI 1000BASE-T Ethernet",
1327 	  WM_T_82547,		WMP_F_COPPER },
1328 
1329 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
1330 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
1331 	  WM_T_82547,		WMP_F_COPPER },
1332 
1333 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
1334 	  "Intel i82547GI 1000BASE-T Ethernet",
1335 	  WM_T_82547_2,		WMP_F_COPPER },
1336 
1337 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
1338 	  "Intel PRO/1000 PT (82571EB)",
1339 	  WM_T_82571,		WMP_F_COPPER },
1340 
1341 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
1342 	  "Intel PRO/1000 PF (82571EB)",
1343 	  WM_T_82571,		WMP_F_FIBER },
1344 
1345 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
1346 	  "Intel PRO/1000 PB (82571EB)",
1347 	  WM_T_82571,		WMP_F_SERDES },
1348 
1349 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
1350 	  "Intel PRO/1000 QT (82571EB)",
1351 	  WM_T_82571,		WMP_F_COPPER },
1352 
1353 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
1354 	  "Intel PRO/1000 PT Quad Port Server Adapter",
1355 	  WM_T_82571,		WMP_F_COPPER },
1356 
1357 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
1358 	  "Intel Gigabit PT Quad Port Server ExpressModule",
1359 	  WM_T_82571,		WMP_F_COPPER },
1360 
1361 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
1362 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
1363 	  WM_T_82571,		WMP_F_SERDES },
1364 
1365 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
1366 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
1367 	  WM_T_82571,		WMP_F_SERDES },
1368 
1369 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
1370 	  "Intel 82571EB Quad 1000baseX Ethernet",
1371 	  WM_T_82571,		WMP_F_FIBER },
1372 
1373 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
1374 	  "Intel i82572EI 1000baseT Ethernet",
1375 	  WM_T_82572,		WMP_F_COPPER },
1376 
1377 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
1378 	  "Intel i82572EI 1000baseX Ethernet",
1379 	  WM_T_82572,		WMP_F_FIBER },
1380 
1381 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
1382 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
1383 	  WM_T_82572,		WMP_F_SERDES },
1384 
1385 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
1386 	  "Intel i82572EI 1000baseT Ethernet",
1387 	  WM_T_82572,		WMP_F_COPPER },
1388 
1389 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
1390 	  "Intel i82573E",
1391 	  WM_T_82573,		WMP_F_COPPER },
1392 
1393 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
1394 	  "Intel i82573E IAMT",
1395 	  WM_T_82573,		WMP_F_COPPER },
1396 
1397 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
1398 	  "Intel i82573L Gigabit Ethernet",
1399 	  WM_T_82573,		WMP_F_COPPER },
1400 
1401 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
1402 	  "Intel i82574L",
1403 	  WM_T_82574,		WMP_F_COPPER },
1404 
1405 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
1406 	  "Intel i82574L",
1407 	  WM_T_82574,		WMP_F_COPPER },
1408 
1409 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
1410 	  "Intel i82583V",
1411 	  WM_T_82583,		WMP_F_COPPER },
1412 
1413 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
1414 	  "i80003 dual 1000baseT Ethernet",
1415 	  WM_T_80003,		WMP_F_COPPER },
1416 
1417 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
1418 	  "i80003 dual 1000baseX Ethernet",
1419 	  WM_T_80003,		WMP_F_COPPER },
1420 
1421 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
1422 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
1423 	  WM_T_80003,		WMP_F_SERDES },
1424 
1425 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
1426 	  "Intel i80003 1000baseT Ethernet",
1427 	  WM_T_80003,		WMP_F_COPPER },
1428 
1429 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
1430 	  "Intel i80003 Gigabit Ethernet (SERDES)",
1431 	  WM_T_80003,		WMP_F_SERDES },
1432 
1433 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
1434 	  "Intel i82801H (M_AMT) LAN Controller",
1435 	  WM_T_ICH8,		WMP_F_COPPER },
1436 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
1437 	  "Intel i82801H (AMT) LAN Controller",
1438 	  WM_T_ICH8,		WMP_F_COPPER },
1439 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
1440 	  "Intel i82801H LAN Controller",
1441 	  WM_T_ICH8,		WMP_F_COPPER },
1442 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
1443 	  "Intel i82801H (IFE) 10/100 LAN Controller",
1444 	  WM_T_ICH8,		WMP_F_COPPER },
1445 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
1446 	  "Intel i82801H (M) LAN Controller",
1447 	  WM_T_ICH8,		WMP_F_COPPER },
1448 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
1449 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
1450 	  WM_T_ICH8,		WMP_F_COPPER },
1451 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
1452 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
1453 	  WM_T_ICH8,		WMP_F_COPPER },
1454 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
1455 	  "82567V-3 LAN Controller",
1456 	  WM_T_ICH8,		WMP_F_COPPER },
1457 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
1458 	  "82801I (AMT) LAN Controller",
1459 	  WM_T_ICH9,		WMP_F_COPPER },
1460 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
1461 	  "82801I 10/100 LAN Controller",
1462 	  WM_T_ICH9,		WMP_F_COPPER },
1463 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
1464 	  "82801I (G) 10/100 LAN Controller",
1465 	  WM_T_ICH9,		WMP_F_COPPER },
1466 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
1467 	  "82801I (GT) 10/100 LAN Controller",
1468 	  WM_T_ICH9,		WMP_F_COPPER },
1469 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
1470 	  "82801I (C) LAN Controller",
1471 	  WM_T_ICH9,		WMP_F_COPPER },
1472 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
1473 	  "82801I mobile LAN Controller",
1474 	  WM_T_ICH9,		WMP_F_COPPER },
1475 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
1476 	  "82801I mobile (V) LAN Controller",
1477 	  WM_T_ICH9,		WMP_F_COPPER },
1478 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
1479 	  "82801I mobile (AMT) LAN Controller",
1480 	  WM_T_ICH9,		WMP_F_COPPER },
1481 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
1482 	  "82567LM-4 LAN Controller",
1483 	  WM_T_ICH9,		WMP_F_COPPER },
1484 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
1485 	  "82567LM-2 LAN Controller",
1486 	  WM_T_ICH10,		WMP_F_COPPER },
1487 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
1488 	  "82567LF-2 LAN Controller",
1489 	  WM_T_ICH10,		WMP_F_COPPER },
1490 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
1491 	  "82567LM-3 LAN Controller",
1492 	  WM_T_ICH10,		WMP_F_COPPER },
1493 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
1494 	  "82567LF-3 LAN Controller",
1495 	  WM_T_ICH10,		WMP_F_COPPER },
1496 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
1497 	  "82567V-2 LAN Controller",
1498 	  WM_T_ICH10,		WMP_F_COPPER },
1499 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
1500 	  "82567V-3? LAN Controller",
1501 	  WM_T_ICH10,		WMP_F_COPPER },
1502 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
1503 	  "HANKSVILLE LAN Controller",
1504 	  WM_T_ICH10,		WMP_F_COPPER },
1505 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
1506 	  "PCH LAN (82577LM) Controller",
1507 	  WM_T_PCH,		WMP_F_COPPER },
1508 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
1509 	  "PCH LAN (82577LC) Controller",
1510 	  WM_T_PCH,		WMP_F_COPPER },
1511 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
1512 	  "PCH LAN (82578DM) Controller",
1513 	  WM_T_PCH,		WMP_F_COPPER },
1514 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
1515 	  "PCH LAN (82578DC) Controller",
1516 	  WM_T_PCH,		WMP_F_COPPER },
1517 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
1518 	  "PCH2 LAN (82579LM) Controller",
1519 	  WM_T_PCH2,		WMP_F_COPPER },
1520 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
1521 	  "PCH2 LAN (82579V) Controller",
1522 	  WM_T_PCH2,		WMP_F_COPPER },
1523 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
1524 	  "82575EB dual-1000baseT Ethernet",
1525 	  WM_T_82575,		WMP_F_COPPER },
1526 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
1527 	  "82575EB dual-1000baseX Ethernet (SERDES)",
1528 	  WM_T_82575,		WMP_F_SERDES },
1529 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
1530 	  "82575GB quad-1000baseT Ethernet",
1531 	  WM_T_82575,		WMP_F_COPPER },
1532 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
1533 	  "82575GB quad-1000baseT Ethernet (PM)",
1534 	  WM_T_82575,		WMP_F_COPPER },
1535 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
1536 	  "82576 1000BaseT Ethernet",
1537 	  WM_T_82576,		WMP_F_COPPER },
1538 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
1539 	  "82576 1000BaseX Ethernet",
1540 	  WM_T_82576,		WMP_F_FIBER },
1541 
1542 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
1543 	  "82576 gigabit Ethernet (SERDES)",
1544 	  WM_T_82576,		WMP_F_SERDES },
1545 
1546 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
1547 	  "82576 quad-1000BaseT Ethernet",
1548 	  WM_T_82576,		WMP_F_COPPER },
1549 
1550 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
1551 	  "82576 Gigabit ET2 Quad Port Server Adapter",
1552 	  WM_T_82576,		WMP_F_COPPER },
1553 
1554 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
1555 	  "82576 gigabit Ethernet",
1556 	  WM_T_82576,		WMP_F_COPPER },
1557 
1558 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
1559 	  "82576 gigabit Ethernet (SERDES)",
1560 	  WM_T_82576,		WMP_F_SERDES },
1561 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
1562 	  "82576 quad-gigabit Ethernet (SERDES)",
1563 	  WM_T_82576,		WMP_F_SERDES },
1564 
1565 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
1566 	  "82580 1000BaseT Ethernet",
1567 	  WM_T_82580,		WMP_F_COPPER },
1568 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
1569 	  "82580 1000BaseX Ethernet",
1570 	  WM_T_82580,		WMP_F_FIBER },
1571 
1572 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
1573 	  "82580 1000BaseT Ethernet (SERDES)",
1574 	  WM_T_82580,		WMP_F_SERDES },
1575 
1576 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
1577 	  "82580 gigabit Ethernet (SGMII)",
1578 	  WM_T_82580,		WMP_F_COPPER },
1579 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
1580 	  "82580 dual-1000BaseT Ethernet",
1581 	  WM_T_82580,		WMP_F_COPPER },
1582 
1583 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1584 	  "82580 quad-1000BaseX Ethernet",
1585 	  WM_T_82580,		WMP_F_FIBER },
1586 
1587 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
1588 	  "DH89XXCC Gigabit Ethernet (SGMII)",
1589 	  WM_T_82580,		WMP_F_COPPER },
1590 
1591 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
1592 	  "DH89XXCC Gigabit Ethernet (SERDES)",
1593 	  WM_T_82580,		WMP_F_SERDES },
1594 
1595 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
1596 	  "DH89XXCC 1000BASE-KX Ethernet",
1597 	  WM_T_82580,		WMP_F_SERDES },
1598 
1599 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
1600 	  "DH89XXCC Gigabit Ethernet (SFP)",
1601 	  WM_T_82580,		WMP_F_SERDES },
1602 
1603 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
1604 	  "I350 Gigabit Network Connection",
1605 	  WM_T_I350,		WMP_F_COPPER },
1606 
1607 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
1608 	  "I350 Gigabit Fiber Network Connection",
1609 	  WM_T_I350,		WMP_F_FIBER },
1610 
1611 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
1612 	  "I350 Gigabit Backplane Connection",
1613 	  WM_T_I350,		WMP_F_SERDES },
1614 
1615 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
1616 	  "I350 Quad Port Gigabit Ethernet",
1617 	  WM_T_I350,		WMP_F_SERDES },
1618 
1619 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
1620 	  "I350 Gigabit Connection",
1621 	  WM_T_I350,		WMP_F_COPPER },
1622 
1623 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
1624 	  "I354 Gigabit Ethernet (KX)",
1625 	  WM_T_I354,		WMP_F_SERDES },
1626 
1627 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
1628 	  "I354 Gigabit Ethernet (SGMII)",
1629 	  WM_T_I354,		WMP_F_COPPER },
1630 
1631 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
1632 	  "I354 Gigabit Ethernet (2.5G)",
1633 	  WM_T_I354,		WMP_F_COPPER },
1634 
1635 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
1636 	  "I210-T1 Ethernet Server Adapter",
1637 	  WM_T_I210,		WMP_F_COPPER },
1638 
1639 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
1640 	  "I210 Ethernet (Copper OEM)",
1641 	  WM_T_I210,		WMP_F_COPPER },
1642 
1643 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
1644 	  "I210 Ethernet (Copper IT)",
1645 	  WM_T_I210,		WMP_F_COPPER },
1646 
1647 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
1648 	  "I210 Ethernet (Copper, FLASH less)",
1649 	  WM_T_I210,		WMP_F_COPPER },
1650 
1651 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
1652 	  "I210 Gigabit Ethernet (Fiber)",
1653 	  WM_T_I210,		WMP_F_FIBER },
1654 
1655 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
1656 	  "I210 Gigabit Ethernet (SERDES)",
1657 	  WM_T_I210,		WMP_F_SERDES },
1658 
1659 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
1660 	  "I210 Gigabit Ethernet (SERDES, FLASH less)",
1661 	  WM_T_I210,		WMP_F_SERDES },
1662 
1663 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
1664 	  "I210 Gigabit Ethernet (SGMII)",
1665 	  WM_T_I210,		WMP_F_COPPER },
1666 
1667 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII_WOF,
1668 	  "I210 Gigabit Ethernet (SGMII, FLASH less)",
1669 	  WM_T_I210,		WMP_F_COPPER },
1670 
1671 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
1672 	  "I211 Ethernet (COPPER)",
1673 	  WM_T_I211,		WMP_F_COPPER },
1674 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
1675 	  "I217 V Ethernet Connection",
1676 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1677 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
1678 	  "I217 LM Ethernet Connection",
1679 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1680 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
1681 	  "I218 V Ethernet Connection",
1682 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1683 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
1684 	  "I218 V Ethernet Connection",
1685 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1686 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
1687 	  "I218 V Ethernet Connection",
1688 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1689 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
1690 	  "I218 LM Ethernet Connection",
1691 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1692 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
1693 	  "I218 LM Ethernet Connection",
1694 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1695 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
1696 	  "I218 LM Ethernet Connection",
1697 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1698 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
1699 	  "I219 LM Ethernet Connection",
1700 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1701 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
1702 	  "I219 LM (2) Ethernet Connection",
1703 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1704 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
1705 	  "I219 LM (3) Ethernet Connection",
1706 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1707 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
1708 	  "I219 LM (4) Ethernet Connection",
1709 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1710 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
1711 	  "I219 LM (5) Ethernet Connection",
1712 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1713 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM6,
1714 	  "I219 LM (6) Ethernet Connection",
1715 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1716 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM7,
1717 	  "I219 LM (7) Ethernet Connection",
1718 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1719 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM8,
1720 	  "I219 LM (8) Ethernet Connection",
1721 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1722 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM9,
1723 	  "I219 LM (9) Ethernet Connection",
1724 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1725 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM10,
1726 	  "I219 LM (10) Ethernet Connection",
1727 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1728 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM11,
1729 	  "I219 LM (11) Ethernet Connection",
1730 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1731 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM12,
1732 	  "I219 LM (12) Ethernet Connection",
1733 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1734 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM13,
1735 	  "I219 LM (13) Ethernet Connection",
1736 	  WM_T_PCH_TGP,		WMP_F_COPPER },
1737 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM14,
1738 	  "I219 LM (14) Ethernet Connection",
1739 	  WM_T_PCH_TGP,		WMP_F_COPPER },
1740 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM15,
1741 	  "I219 LM (15) Ethernet Connection",
1742 	  WM_T_PCH_TGP,		WMP_F_COPPER },
1743 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM16,
1744 	  "I219 LM (16) Ethernet Connection",
1745 	  WM_T_PCH_TGP,		WMP_F_COPPER }, /* ADP */
1746 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM17,
1747 	  "I219 LM (17) Ethernet Connection",
1748 	  WM_T_PCH_TGP,		WMP_F_COPPER }, /* ADP */
1749 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM18,
1750 	  "I219 LM (18) Ethernet Connection",
1751 	  WM_T_PCH_TGP,		WMP_F_COPPER }, /* MTP */
1752 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM19,
1753 	  "I219 LM (19) Ethernet Connection",
1754 	  WM_T_PCH_TGP,		WMP_F_COPPER }, /* MTP */
1755 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM20,
1756 	  "I219 LM (20) Ethernet Connection",
1757 	  WM_T_PCH_TGP,		WMP_F_COPPER }, /* MTP */
1758 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM21,
1759 	  "I219 LM (21) Ethernet Connection",
1760 	  WM_T_PCH_TGP,		WMP_F_COPPER }, /* MTP */
1761 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM22,
1762 	  "I219 LM (22) Ethernet Connection",
1763 	  WM_T_PCH_TGP,		WMP_F_COPPER }, /* ADP(RPL) */
1764 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM23,
1765 	  "I219 LM (23) Ethernet Connection",
1766 	  WM_T_PCH_TGP,		WMP_F_COPPER }, /* ADP(RPL) */
1767 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
1768 	  "I219 V Ethernet Connection",
1769 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1770 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
1771 	  "I219 V (2) Ethernet Connection",
1772 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1773 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
1774 	  "I219 V (4) Ethernet Connection",
1775 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1776 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
1777 	  "I219 V (5) Ethernet Connection",
1778 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1779 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V6,
1780 	  "I219 V (6) Ethernet Connection",
1781 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1782 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V7,
1783 	  "I219 V (7) Ethernet Connection",
1784 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1785 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V8,
1786 	  "I219 V (8) Ethernet Connection",
1787 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1788 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V9,
1789 	  "I219 V (9) Ethernet Connection",
1790 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1791 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V10,
1792 	  "I219 V (10) Ethernet Connection",
1793 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1794 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V11,
1795 	  "I219 V (11) Ethernet Connection",
1796 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1797 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V12,
1798 	  "I219 V (12) Ethernet Connection",
1799 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1800 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V13,
1801 	  "I219 V (13) Ethernet Connection",
1802 	  WM_T_PCH_TGP,		WMP_F_COPPER },
1803 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V14,
1804 	  "I219 V (14) Ethernet Connection",
1805 	  WM_T_PCH_TGP,		WMP_F_COPPER },
1806 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V15,
1807 	  "I219 V (15) Ethernet Connection",
1808 	  WM_T_PCH_TGP,		WMP_F_COPPER },
1809 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V16,
1810 	  "I219 V (16) Ethernet Connection",
1811 	  WM_T_PCH_TGP,		WMP_F_COPPER }, /* ADP */
1812 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V17,
1813 	  "I219 V (17) Ethernet Connection",
1814 	  WM_T_PCH_TGP,		WMP_F_COPPER }, /* ADP */
1815 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V18,
1816 	  "I219 V (18) Ethernet Connection",
1817 	  WM_T_PCH_TGP,		WMP_F_COPPER }, /* MTP */
1818 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V19,
1819 	  "I219 V (19) Ethernet Connection",
1820 	  WM_T_PCH_TGP,		WMP_F_COPPER }, /* MTP */
1821 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V20,
1822 	  "I219 V (20) Ethernet Connection",
1823 	  WM_T_PCH_TGP,		WMP_F_COPPER }, /* MTP */
1824 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V21,
1825 	  "I219 V (21) Ethernet Connection",
1826 	  WM_T_PCH_TGP,		WMP_F_COPPER }, /* MTP */
1827 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V22,
1828 	  "I219 V (22) Ethernet Connection",
1829 	  WM_T_PCH_TGP,		WMP_F_COPPER }, /* ADP(RPL) */
1830 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V23,
1831 	  "I219 V (23) Ethernet Connection",
1832 	  WM_T_PCH_TGP,		WMP_F_COPPER }, /* ADP(RPL) */
1833 	{ 0,			0,
1834 	  NULL,
1835 	  0,			0 },
1836 };
1837 
1838 /*
1839  * Register read/write functions.
1840  * Other than CSR_{READ|WRITE}().
1841  */
1842 
1843 #if 0 /* Not currently used */
1844 static inline uint32_t
1845 wm_io_read(struct wm_softc *sc, int reg)
1846 {
1847 
1848 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1849 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1850 }
1851 #endif
1852 
1853 static inline void
1854 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1855 {
1856 
1857 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1858 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1859 }
1860 
1861 static inline void
1862 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1863     uint32_t data)
1864 {
1865 	uint32_t regval;
1866 	int i;
1867 
1868 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1869 
1870 	CSR_WRITE(sc, reg, regval);
1871 
1872 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1873 		delay(5);
1874 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1875 			break;
1876 	}
1877 	if (i == SCTL_CTL_POLL_TIMEOUT) {
1878 		aprint_error("%s: WARNING:"
1879 		    " i82575 reg 0x%08x setup did not indicate ready\n",
1880 		    device_xname(sc->sc_dev), reg);
1881 	}
1882 }
1883 
1884 static inline void
1885 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1886 {
1887 	wa->wa_low = htole32(BUS_ADDR_LO32(v));
1888 	wa->wa_high = htole32(BUS_ADDR_HI32(v));
1889 }
1890 
1891 /*
1892  * Descriptor sync/init functions.
1893  */
1894 static inline void
1895 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
1896 {
1897 	struct wm_softc *sc = txq->txq_sc;
1898 
1899 	/* If it will wrap around, sync to the end of the ring. */
1900 	if ((start + num) > WM_NTXDESC(txq)) {
1901 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1902 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
1903 		    (WM_NTXDESC(txq) - start), ops);
1904 		num -= (WM_NTXDESC(txq) - start);
1905 		start = 0;
1906 	}
1907 
1908 	/* Now sync whatever is left. */
1909 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1910 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
1911 }
1912 
1913 static inline void
1914 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
1915 {
1916 	struct wm_softc *sc = rxq->rxq_sc;
1917 
1918 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
1919 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
1920 }
1921 
1922 static inline void
1923 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
1924 {
1925 	struct wm_softc *sc = rxq->rxq_sc;
1926 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
1927 	struct mbuf *m = rxs->rxs_mbuf;
1928 
1929 	/*
1930 	 * Note: We scoot the packet forward 2 bytes in the buffer
1931 	 * so that the payload after the Ethernet header is aligned
1932 	 * to a 4-byte boundary.
1933 
1934 	 * XXX BRAINDAMAGE ALERT!
1935 	 * The stupid chip uses the same size for every buffer, which
1936 	 * is set in the Receive Control register.  We are using the 2K
1937 	 * size option, but what we REALLY want is (2K - 2)!  For this
1938 	 * reason, we can't "scoot" packets longer than the standard
1939 	 * Ethernet MTU.  On strict-alignment platforms, if the total
1940 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
1941 	 * the upper layer copy the headers.
1942 	 */
1943 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
1944 
1945 	if (sc->sc_type == WM_T_82574) {
1946 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
1947 		rxd->erx_data.erxd_addr =
1948 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1949 		rxd->erx_data.erxd_dd = 0;
1950 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
1951 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
1952 
1953 		rxd->nqrx_data.nrxd_paddr =
1954 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1955 		/* Currently, split header is not supported. */
1956 		rxd->nqrx_data.nrxd_haddr = 0;
1957 	} else {
1958 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
1959 
1960 		wm_set_dma_addr(&rxd->wrx_addr,
1961 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1962 		rxd->wrx_len = 0;
1963 		rxd->wrx_cksum = 0;
1964 		rxd->wrx_status = 0;
1965 		rxd->wrx_errors = 0;
1966 		rxd->wrx_special = 0;
1967 	}
1968 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1969 
1970 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
1971 }
1972 
1973 /*
1974  * Device driver interface functions and commonly used functions.
1975  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
1976  */
1977 
1978 /* Lookup supported device table */
1979 static const struct wm_product *
1980 wm_lookup(const struct pci_attach_args *pa)
1981 {
1982 	const struct wm_product *wmp;
1983 
1984 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1985 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1986 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1987 			return wmp;
1988 	}
1989 	return NULL;
1990 }
1991 
1992 /* The match function (ca_match) */
1993 static int
1994 wm_match(device_t parent, cfdata_t cf, void *aux)
1995 {
1996 	struct pci_attach_args *pa = aux;
1997 
1998 	if (wm_lookup(pa) != NULL)
1999 		return 1;
2000 
2001 	return 0;
2002 }
2003 
2004 /* The attach function (ca_attach) */
2005 static void
2006 wm_attach(device_t parent, device_t self, void *aux)
2007 {
2008 	struct wm_softc *sc = device_private(self);
2009 	struct pci_attach_args *pa = aux;
2010 	prop_dictionary_t dict;
2011 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2012 	pci_chipset_tag_t pc = pa->pa_pc;
2013 	int counts[PCI_INTR_TYPE_SIZE];
2014 	pci_intr_type_t max_type;
2015 	const char *eetype, *xname;
2016 	bus_space_tag_t memt;
2017 	bus_space_handle_t memh;
2018 	bus_size_t memsize;
2019 	int memh_valid;
2020 	int i, error;
2021 	const struct wm_product *wmp;
2022 	prop_data_t ea;
2023 	prop_number_t pn;
2024 	uint8_t enaddr[ETHER_ADDR_LEN];
2025 	char buf[256];
2026 	char wqname[MAXCOMLEN];
2027 	uint16_t cfg1, cfg2, swdpin, nvmword;
2028 	pcireg_t preg, memtype;
2029 	uint16_t eeprom_data, apme_mask;
2030 	bool force_clear_smbi;
2031 	uint32_t link_mode;
2032 	uint32_t reg;
2033 
2034 #if defined(WM_DEBUG) && defined(WM_DEBUG_DEFAULT)
2035 	sc->sc_debug = WM_DEBUG_DEFAULT;
2036 #endif
2037 	sc->sc_dev = self;
2038 	callout_init(&sc->sc_tick_ch, CALLOUT_MPSAFE);
2039 	callout_setfunc(&sc->sc_tick_ch, wm_tick, sc);
2040 	sc->sc_core_stopping = false;
2041 
2042 	wmp = wm_lookup(pa);
2043 #ifdef DIAGNOSTIC
2044 	if (wmp == NULL) {
2045 		printf("\n");
2046 		panic("wm_attach: impossible");
2047 	}
2048 #endif
2049 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
2050 
2051 	sc->sc_pc = pa->pa_pc;
2052 	sc->sc_pcitag = pa->pa_tag;
2053 
2054 	if (pci_dma64_available(pa)) {
2055 		aprint_verbose(", 64-bit DMA");
2056 		sc->sc_dmat = pa->pa_dmat64;
2057 	} else {
2058 		aprint_verbose(", 32-bit DMA");
2059 		sc->sc_dmat = pa->pa_dmat;
2060 	}
2061 
2062 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
2063 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
2064 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
2065 
2066 	sc->sc_type = wmp->wmp_type;
2067 
2068 	/* Set default function pointers */
2069 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
2070 	sc->phy.release = sc->nvm.release = wm_put_null;
2071 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
2072 
2073 	if (sc->sc_type < WM_T_82543) {
2074 		if (sc->sc_rev < 2) {
2075 			aprint_error_dev(sc->sc_dev,
2076 			    "i82542 must be at least rev. 2\n");
2077 			return;
2078 		}
2079 		if (sc->sc_rev < 3)
2080 			sc->sc_type = WM_T_82542_2_0;
2081 	}
2082 
2083 	/*
2084 	 * Disable MSI for Errata:
2085 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
2086 	 *
2087 	 *  82544: Errata 25
2088 	 *  82540: Errata  6 (easy to reproduce device timeout)
2089 	 *  82545: Errata  4 (easy to reproduce device timeout)
2090 	 *  82546: Errata 26 (easy to reproduce device timeout)
2091 	 *  82541: Errata  7 (easy to reproduce device timeout)
2092 	 *
2093 	 * "Byte Enables 2 and 3 are not set on MSI writes"
2094 	 *
2095 	 *  82571 & 82572: Errata 63
2096 	 */
2097 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
2098 	    || (sc->sc_type == WM_T_82572))
2099 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
2100 
2101 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
2102 	    || (sc->sc_type == WM_T_82580)
2103 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
2104 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
2105 		sc->sc_flags |= WM_F_NEWQUEUE;
2106 
2107 	/* Set device properties (mactype) */
2108 	dict = device_properties(sc->sc_dev);
2109 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
2110 
2111 	/*
2112 	 * Map the device.  All devices support memory-mapped acccess,
2113 	 * and it is really required for normal operation.
2114 	 */
2115 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
2116 	switch (memtype) {
2117 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
2118 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
2119 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
2120 			memtype, 0, &memt, &memh, NULL, &memsize) == 0);
2121 		break;
2122 	default:
2123 		memh_valid = 0;
2124 		break;
2125 	}
2126 
2127 	if (memh_valid) {
2128 		sc->sc_st = memt;
2129 		sc->sc_sh = memh;
2130 		sc->sc_ss = memsize;
2131 	} else {
2132 		aprint_error_dev(sc->sc_dev,
2133 		    "unable to map device registers\n");
2134 		return;
2135 	}
2136 
2137 	/*
2138 	 * In addition, i82544 and later support I/O mapped indirect
2139 	 * register access.  It is not desirable (nor supported in
2140 	 * this driver) to use it for normal operation, though it is
2141 	 * required to work around bugs in some chip versions.
2142 	 */
2143 	switch (sc->sc_type) {
2144 	case WM_T_82544:
2145 	case WM_T_82541:
2146 	case WM_T_82541_2:
2147 	case WM_T_82547:
2148 	case WM_T_82547_2:
2149 		/* First we have to find the I/O BAR. */
2150 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
2151 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
2152 			if (memtype == PCI_MAPREG_TYPE_IO)
2153 				break;
2154 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
2155 			    PCI_MAPREG_MEM_TYPE_64BIT)
2156 				i += 4;	/* skip high bits, too */
2157 		}
2158 		if (i < PCI_MAPREG_END) {
2159 			/*
2160 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
2161 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
2162 			 * It's no problem because newer chips has no this
2163 			 * bug.
2164 			 *
2165 			 * The i8254x doesn't apparently respond when the
2166 			 * I/O BAR is 0, which looks somewhat like it's not
2167 			 * been configured.
2168 			 */
2169 			preg = pci_conf_read(pc, pa->pa_tag, i);
2170 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
2171 				aprint_error_dev(sc->sc_dev,
2172 				    "WARNING: I/O BAR at zero.\n");
2173 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
2174 			    0, &sc->sc_iot, &sc->sc_ioh, NULL, &sc->sc_ios)
2175 			    == 0) {
2176 				sc->sc_flags |= WM_F_IOH_VALID;
2177 			} else
2178 				aprint_error_dev(sc->sc_dev,
2179 				    "WARNING: unable to map I/O space\n");
2180 		}
2181 		break;
2182 	default:
2183 		break;
2184 	}
2185 
2186 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
2187 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
2188 	preg |= PCI_COMMAND_MASTER_ENABLE;
2189 	if (sc->sc_type < WM_T_82542_2_1)
2190 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
2191 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
2192 
2193 	/* Power up chip */
2194 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
2195 	    && error != EOPNOTSUPP) {
2196 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
2197 		return;
2198 	}
2199 
2200 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
2201 	/*
2202 	 *  Don't use MSI-X if we can use only one queue to save interrupt
2203 	 * resource.
2204 	 */
2205 	if (sc->sc_nqueues > 1) {
2206 		max_type = PCI_INTR_TYPE_MSIX;
2207 		/*
2208 		 *  82583 has a MSI-X capability in the PCI configuration space
2209 		 * but it doesn't support it. At least the document doesn't
2210 		 * say anything about MSI-X.
2211 		 */
2212 		counts[PCI_INTR_TYPE_MSIX]
2213 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
2214 	} else {
2215 		max_type = PCI_INTR_TYPE_MSI;
2216 		counts[PCI_INTR_TYPE_MSIX] = 0;
2217 	}
2218 
2219 	/* Allocation settings */
2220 	counts[PCI_INTR_TYPE_MSI] = 1;
2221 	counts[PCI_INTR_TYPE_INTX] = 1;
2222 	/* overridden by disable flags */
2223 	if (wm_disable_msi != 0) {
2224 		counts[PCI_INTR_TYPE_MSI] = 0;
2225 		if (wm_disable_msix != 0) {
2226 			max_type = PCI_INTR_TYPE_INTX;
2227 			counts[PCI_INTR_TYPE_MSIX] = 0;
2228 		}
2229 	} else if (wm_disable_msix != 0) {
2230 		max_type = PCI_INTR_TYPE_MSI;
2231 		counts[PCI_INTR_TYPE_MSIX] = 0;
2232 	}
2233 
2234 alloc_retry:
2235 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
2236 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
2237 		return;
2238 	}
2239 
2240 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
2241 		error = wm_setup_msix(sc);
2242 		if (error) {
2243 			pci_intr_release(pc, sc->sc_intrs,
2244 			    counts[PCI_INTR_TYPE_MSIX]);
2245 
2246 			/* Setup for MSI: Disable MSI-X */
2247 			max_type = PCI_INTR_TYPE_MSI;
2248 			counts[PCI_INTR_TYPE_MSI] = 1;
2249 			counts[PCI_INTR_TYPE_INTX] = 1;
2250 			goto alloc_retry;
2251 		}
2252 	} else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
2253 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
2254 		error = wm_setup_legacy(sc);
2255 		if (error) {
2256 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
2257 			    counts[PCI_INTR_TYPE_MSI]);
2258 
2259 			/* The next try is for INTx: Disable MSI */
2260 			max_type = PCI_INTR_TYPE_INTX;
2261 			counts[PCI_INTR_TYPE_INTX] = 1;
2262 			goto alloc_retry;
2263 		}
2264 	} else {
2265 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
2266 		error = wm_setup_legacy(sc);
2267 		if (error) {
2268 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
2269 			    counts[PCI_INTR_TYPE_INTX]);
2270 			return;
2271 		}
2272 	}
2273 
2274 	snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(sc->sc_dev));
2275 	error = workqueue_create(&sc->sc_queue_wq, wqname,
2276 	    wm_handle_queue_work, sc, WM_WORKQUEUE_PRI, IPL_NET,
2277 	    WQ_PERCPU | WQ_MPSAFE);
2278 	if (error) {
2279 		aprint_error_dev(sc->sc_dev,
2280 		    "unable to create TxRx workqueue\n");
2281 		goto out;
2282 	}
2283 
2284 	snprintf(wqname, sizeof(wqname), "%sReset", device_xname(sc->sc_dev));
2285 	error = workqueue_create(&sc->sc_reset_wq, wqname,
2286 	    wm_handle_reset_work, sc, WM_WORKQUEUE_PRI, IPL_SOFTCLOCK,
2287 	    WQ_MPSAFE);
2288 	if (error) {
2289 		workqueue_destroy(sc->sc_queue_wq);
2290 		aprint_error_dev(sc->sc_dev,
2291 		    "unable to create reset workqueue\n");
2292 		goto out;
2293 	}
2294 
2295 	/*
2296 	 * Check the function ID (unit number of the chip).
2297 	 */
2298 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
2299 	    || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
2300 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
2301 	    || (sc->sc_type == WM_T_82580)
2302 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
2303 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
2304 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
2305 	else
2306 		sc->sc_funcid = 0;
2307 
2308 	/*
2309 	 * Determine a few things about the bus we're connected to.
2310 	 */
2311 	if (sc->sc_type < WM_T_82543) {
2312 		/* We don't really know the bus characteristics here. */
2313 		sc->sc_bus_speed = 33;
2314 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
2315 		/*
2316 		 * CSA (Communication Streaming Architecture) is about as fast
2317 		 * a 32-bit 66MHz PCI Bus.
2318 		 */
2319 		sc->sc_flags |= WM_F_CSA;
2320 		sc->sc_bus_speed = 66;
2321 		aprint_verbose_dev(sc->sc_dev,
2322 		    "Communication Streaming Architecture\n");
2323 		if (sc->sc_type == WM_T_82547) {
2324 			callout_init(&sc->sc_txfifo_ch, CALLOUT_MPSAFE);
2325 			callout_setfunc(&sc->sc_txfifo_ch,
2326 			    wm_82547_txfifo_stall, sc);
2327 			aprint_verbose_dev(sc->sc_dev,
2328 			    "using 82547 Tx FIFO stall work-around\n");
2329 		}
2330 	} else if (sc->sc_type >= WM_T_82571) {
2331 		sc->sc_flags |= WM_F_PCIE;
2332 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
2333 		    && (sc->sc_type != WM_T_ICH10)
2334 		    && (sc->sc_type != WM_T_PCH)
2335 		    && (sc->sc_type != WM_T_PCH2)
2336 		    && (sc->sc_type != WM_T_PCH_LPT)
2337 		    && (sc->sc_type != WM_T_PCH_SPT)
2338 		    && (sc->sc_type != WM_T_PCH_CNP)
2339 		    && (sc->sc_type != WM_T_PCH_TGP)) {
2340 			/* ICH* and PCH* have no PCIe capability registers */
2341 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
2342 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
2343 				NULL) == 0)
2344 				aprint_error_dev(sc->sc_dev,
2345 				    "unable to find PCIe capability\n");
2346 		}
2347 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
2348 	} else {
2349 		reg = CSR_READ(sc, WMREG_STATUS);
2350 		if (reg & STATUS_BUS64)
2351 			sc->sc_flags |= WM_F_BUS64;
2352 		if ((reg & STATUS_PCIX_MODE) != 0) {
2353 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
2354 
2355 			sc->sc_flags |= WM_F_PCIX;
2356 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
2357 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
2358 				aprint_error_dev(sc->sc_dev,
2359 				    "unable to find PCIX capability\n");
2360 			else if (sc->sc_type != WM_T_82545_3 &&
2361 			    sc->sc_type != WM_T_82546_3) {
2362 				/*
2363 				 * Work around a problem caused by the BIOS
2364 				 * setting the max memory read byte count
2365 				 * incorrectly.
2366 				 */
2367 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
2368 				    sc->sc_pcixe_capoff + PCIX_CMD);
2369 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
2370 				    sc->sc_pcixe_capoff + PCIX_STATUS);
2371 
2372 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
2373 				    PCIX_CMD_BYTECNT_SHIFT;
2374 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
2375 				    PCIX_STATUS_MAXB_SHIFT;
2376 				if (bytecnt > maxb) {
2377 					aprint_verbose_dev(sc->sc_dev,
2378 					    "resetting PCI-X MMRBC: %d -> %d\n",
2379 					    512 << bytecnt, 512 << maxb);
2380 					pcix_cmd = (pcix_cmd &
2381 					    ~PCIX_CMD_BYTECNT_MASK) |
2382 					    (maxb << PCIX_CMD_BYTECNT_SHIFT);
2383 					pci_conf_write(pa->pa_pc, pa->pa_tag,
2384 					    sc->sc_pcixe_capoff + PCIX_CMD,
2385 					    pcix_cmd);
2386 				}
2387 			}
2388 		}
2389 		/*
2390 		 * The quad port adapter is special; it has a PCIX-PCIX
2391 		 * bridge on the board, and can run the secondary bus at
2392 		 * a higher speed.
2393 		 */
2394 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
2395 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
2396 								      : 66;
2397 		} else if (sc->sc_flags & WM_F_PCIX) {
2398 			switch (reg & STATUS_PCIXSPD_MASK) {
2399 			case STATUS_PCIXSPD_50_66:
2400 				sc->sc_bus_speed = 66;
2401 				break;
2402 			case STATUS_PCIXSPD_66_100:
2403 				sc->sc_bus_speed = 100;
2404 				break;
2405 			case STATUS_PCIXSPD_100_133:
2406 				sc->sc_bus_speed = 133;
2407 				break;
2408 			default:
2409 				aprint_error_dev(sc->sc_dev,
2410 				    "unknown PCIXSPD %d; assuming 66MHz\n",
2411 				    reg & STATUS_PCIXSPD_MASK);
2412 				sc->sc_bus_speed = 66;
2413 				break;
2414 			}
2415 		} else
2416 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
2417 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
2418 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
2419 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
2420 	}
2421 
2422 	/* clear interesting stat counters */
2423 	CSR_READ(sc, WMREG_COLC);
2424 	CSR_READ(sc, WMREG_RXERRC);
2425 
2426 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
2427 	    || (sc->sc_type >= WM_T_ICH8))
2428 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2429 	if (sc->sc_type >= WM_T_ICH8)
2430 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2431 
2432 	/* Set PHY, NVM mutex related stuff */
2433 	switch (sc->sc_type) {
2434 	case WM_T_82542_2_0:
2435 	case WM_T_82542_2_1:
2436 	case WM_T_82543:
2437 	case WM_T_82544:
2438 		/* Microwire */
2439 		sc->nvm.read = wm_nvm_read_uwire;
2440 		sc->sc_nvm_wordsize = 64;
2441 		sc->sc_nvm_addrbits = 6;
2442 		break;
2443 	case WM_T_82540:
2444 	case WM_T_82545:
2445 	case WM_T_82545_3:
2446 	case WM_T_82546:
2447 	case WM_T_82546_3:
2448 		/* Microwire */
2449 		sc->nvm.read = wm_nvm_read_uwire;
2450 		reg = CSR_READ(sc, WMREG_EECD);
2451 		if (reg & EECD_EE_SIZE) {
2452 			sc->sc_nvm_wordsize = 256;
2453 			sc->sc_nvm_addrbits = 8;
2454 		} else {
2455 			sc->sc_nvm_wordsize = 64;
2456 			sc->sc_nvm_addrbits = 6;
2457 		}
2458 		sc->sc_flags |= WM_F_LOCK_EECD;
2459 		sc->nvm.acquire = wm_get_eecd;
2460 		sc->nvm.release = wm_put_eecd;
2461 		break;
2462 	case WM_T_82541:
2463 	case WM_T_82541_2:
2464 	case WM_T_82547:
2465 	case WM_T_82547_2:
2466 		reg = CSR_READ(sc, WMREG_EECD);
2467 		/*
2468 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
2469 		 * on 8254[17], so set flags and functios before calling it.
2470 		 */
2471 		sc->sc_flags |= WM_F_LOCK_EECD;
2472 		sc->nvm.acquire = wm_get_eecd;
2473 		sc->nvm.release = wm_put_eecd;
2474 		if (reg & EECD_EE_TYPE) {
2475 			/* SPI */
2476 			sc->nvm.read = wm_nvm_read_spi;
2477 			sc->sc_flags |= WM_F_EEPROM_SPI;
2478 			wm_nvm_set_addrbits_size_eecd(sc);
2479 		} else {
2480 			/* Microwire */
2481 			sc->nvm.read = wm_nvm_read_uwire;
2482 			if ((reg & EECD_EE_ABITS) != 0) {
2483 				sc->sc_nvm_wordsize = 256;
2484 				sc->sc_nvm_addrbits = 8;
2485 			} else {
2486 				sc->sc_nvm_wordsize = 64;
2487 				sc->sc_nvm_addrbits = 6;
2488 			}
2489 		}
2490 		break;
2491 	case WM_T_82571:
2492 	case WM_T_82572:
2493 		/* SPI */
2494 		sc->nvm.read = wm_nvm_read_eerd;
2495 		/* Not use WM_F_LOCK_EECD because we use EERD */
2496 		sc->sc_flags |= WM_F_EEPROM_SPI;
2497 		wm_nvm_set_addrbits_size_eecd(sc);
2498 		sc->phy.acquire = wm_get_swsm_semaphore;
2499 		sc->phy.release = wm_put_swsm_semaphore;
2500 		sc->nvm.acquire = wm_get_nvm_82571;
2501 		sc->nvm.release = wm_put_nvm_82571;
2502 		break;
2503 	case WM_T_82573:
2504 	case WM_T_82574:
2505 	case WM_T_82583:
2506 		sc->nvm.read = wm_nvm_read_eerd;
2507 		/* Not use WM_F_LOCK_EECD because we use EERD */
2508 		if (sc->sc_type == WM_T_82573) {
2509 			sc->phy.acquire = wm_get_swsm_semaphore;
2510 			sc->phy.release = wm_put_swsm_semaphore;
2511 			sc->nvm.acquire = wm_get_nvm_82571;
2512 			sc->nvm.release = wm_put_nvm_82571;
2513 		} else {
2514 			/* Both PHY and NVM use the same semaphore. */
2515 			sc->phy.acquire = sc->nvm.acquire
2516 			    = wm_get_swfwhw_semaphore;
2517 			sc->phy.release = sc->nvm.release
2518 			    = wm_put_swfwhw_semaphore;
2519 		}
2520 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
2521 			sc->sc_flags |= WM_F_EEPROM_FLASH;
2522 			sc->sc_nvm_wordsize = 2048;
2523 		} else {
2524 			/* SPI */
2525 			sc->sc_flags |= WM_F_EEPROM_SPI;
2526 			wm_nvm_set_addrbits_size_eecd(sc);
2527 		}
2528 		break;
2529 	case WM_T_82575:
2530 	case WM_T_82576:
2531 	case WM_T_82580:
2532 	case WM_T_I350:
2533 	case WM_T_I354:
2534 	case WM_T_80003:
2535 		/* SPI */
2536 		sc->sc_flags |= WM_F_EEPROM_SPI;
2537 		wm_nvm_set_addrbits_size_eecd(sc);
2538 		if ((sc->sc_type == WM_T_80003)
2539 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
2540 			sc->nvm.read = wm_nvm_read_eerd;
2541 			/* Don't use WM_F_LOCK_EECD because we use EERD */
2542 		} else {
2543 			sc->nvm.read = wm_nvm_read_spi;
2544 			sc->sc_flags |= WM_F_LOCK_EECD;
2545 		}
2546 		sc->phy.acquire = wm_get_phy_82575;
2547 		sc->phy.release = wm_put_phy_82575;
2548 		sc->nvm.acquire = wm_get_nvm_80003;
2549 		sc->nvm.release = wm_put_nvm_80003;
2550 		break;
2551 	case WM_T_ICH8:
2552 	case WM_T_ICH9:
2553 	case WM_T_ICH10:
2554 	case WM_T_PCH:
2555 	case WM_T_PCH2:
2556 	case WM_T_PCH_LPT:
2557 		sc->nvm.read = wm_nvm_read_ich8;
2558 		/* FLASH */
2559 		sc->sc_flags |= WM_F_EEPROM_FLASH;
2560 		sc->sc_nvm_wordsize = 2048;
2561 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
2562 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
2563 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
2564 			aprint_error_dev(sc->sc_dev,
2565 			    "can't map FLASH registers\n");
2566 			goto out;
2567 		}
2568 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
2569 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
2570 		    ICH_FLASH_SECTOR_SIZE;
2571 		sc->sc_ich8_flash_bank_size =
2572 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
2573 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
2574 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
2575 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
2576 		sc->sc_flashreg_offset = 0;
2577 		sc->phy.acquire = wm_get_swflag_ich8lan;
2578 		sc->phy.release = wm_put_swflag_ich8lan;
2579 		sc->nvm.acquire = wm_get_nvm_ich8lan;
2580 		sc->nvm.release = wm_put_nvm_ich8lan;
2581 		break;
2582 	case WM_T_PCH_SPT:
2583 	case WM_T_PCH_CNP:
2584 	case WM_T_PCH_TGP:
2585 		sc->nvm.read = wm_nvm_read_spt;
2586 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
2587 		sc->sc_flags |= WM_F_EEPROM_FLASH;
2588 		sc->sc_flasht = sc->sc_st;
2589 		sc->sc_flashh = sc->sc_sh;
2590 		sc->sc_ich8_flash_base = 0;
2591 		sc->sc_nvm_wordsize =
2592 		    (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
2593 		    * NVM_SIZE_MULTIPLIER;
2594 		/* It is size in bytes, we want words */
2595 		sc->sc_nvm_wordsize /= 2;
2596 		/* Assume 2 banks */
2597 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
2598 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
2599 		sc->phy.acquire = wm_get_swflag_ich8lan;
2600 		sc->phy.release = wm_put_swflag_ich8lan;
2601 		sc->nvm.acquire = wm_get_nvm_ich8lan;
2602 		sc->nvm.release = wm_put_nvm_ich8lan;
2603 		break;
2604 	case WM_T_I210:
2605 	case WM_T_I211:
2606 		/* Allow a single clear of the SW semaphore on I210 and newer*/
2607 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
2608 		if (wm_nvm_flash_presence_i210(sc)) {
2609 			sc->nvm.read = wm_nvm_read_eerd;
2610 			/* Don't use WM_F_LOCK_EECD because we use EERD */
2611 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
2612 			wm_nvm_set_addrbits_size_eecd(sc);
2613 		} else {
2614 			sc->nvm.read = wm_nvm_read_invm;
2615 			sc->sc_flags |= WM_F_EEPROM_INVM;
2616 			sc->sc_nvm_wordsize = INVM_SIZE;
2617 		}
2618 		sc->phy.acquire = wm_get_phy_82575;
2619 		sc->phy.release = wm_put_phy_82575;
2620 		sc->nvm.acquire = wm_get_nvm_80003;
2621 		sc->nvm.release = wm_put_nvm_80003;
2622 		break;
2623 	default:
2624 		break;
2625 	}
2626 
2627 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
2628 	switch (sc->sc_type) {
2629 	case WM_T_82571:
2630 	case WM_T_82572:
2631 		reg = CSR_READ(sc, WMREG_SWSM2);
2632 		if ((reg & SWSM2_LOCK) == 0) {
2633 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
2634 			force_clear_smbi = true;
2635 		} else
2636 			force_clear_smbi = false;
2637 		break;
2638 	case WM_T_82573:
2639 	case WM_T_82574:
2640 	case WM_T_82583:
2641 		force_clear_smbi = true;
2642 		break;
2643 	default:
2644 		force_clear_smbi = false;
2645 		break;
2646 	}
2647 	if (force_clear_smbi) {
2648 		reg = CSR_READ(sc, WMREG_SWSM);
2649 		if ((reg & SWSM_SMBI) != 0)
2650 			aprint_error_dev(sc->sc_dev,
2651 			    "Please update the Bootagent\n");
2652 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
2653 	}
2654 
2655 	/*
2656 	 * Defer printing the EEPROM type until after verifying the checksum
2657 	 * This allows the EEPROM type to be printed correctly in the case
2658 	 * that no EEPROM is attached.
2659 	 */
2660 	/*
2661 	 * Validate the EEPROM checksum. If the checksum fails, flag
2662 	 * this for later, so we can fail future reads from the EEPROM.
2663 	 */
2664 	if (wm_nvm_validate_checksum(sc)) {
2665 		/*
2666 		 * Read twice again because some PCI-e parts fail the
2667 		 * first check due to the link being in sleep state.
2668 		 */
2669 		if (wm_nvm_validate_checksum(sc))
2670 			sc->sc_flags |= WM_F_EEPROM_INVALID;
2671 	}
2672 
2673 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
2674 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
2675 	else {
2676 		aprint_verbose_dev(sc->sc_dev, "%u words ",
2677 		    sc->sc_nvm_wordsize);
2678 		if (sc->sc_flags & WM_F_EEPROM_INVM)
2679 			aprint_verbose("iNVM");
2680 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
2681 			aprint_verbose("FLASH(HW)");
2682 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
2683 			aprint_verbose("FLASH");
2684 		else {
2685 			if (sc->sc_flags & WM_F_EEPROM_SPI)
2686 				eetype = "SPI";
2687 			else
2688 				eetype = "MicroWire";
2689 			aprint_verbose("(%d address bits) %s EEPROM",
2690 			    sc->sc_nvm_addrbits, eetype);
2691 		}
2692 	}
2693 	wm_nvm_version(sc);
2694 	aprint_verbose("\n");
2695 
2696 	/*
2697 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
2698 	 * incorrect.
2699 	 */
2700 	wm_gmii_setup_phytype(sc, 0, 0);
2701 
2702 	/* Check for WM_F_WOL on some chips before wm_reset() */
2703 	switch (sc->sc_type) {
2704 	case WM_T_ICH8:
2705 	case WM_T_ICH9:
2706 	case WM_T_ICH10:
2707 	case WM_T_PCH:
2708 	case WM_T_PCH2:
2709 	case WM_T_PCH_LPT:
2710 	case WM_T_PCH_SPT:
2711 	case WM_T_PCH_CNP:
2712 	case WM_T_PCH_TGP:
2713 		apme_mask = WUC_APME;
2714 		eeprom_data = CSR_READ(sc, WMREG_WUC);
2715 		if ((eeprom_data & apme_mask) != 0)
2716 			sc->sc_flags |= WM_F_WOL;
2717 		break;
2718 	default:
2719 		break;
2720 	}
2721 
2722 	/* Reset the chip to a known state. */
2723 	wm_reset(sc);
2724 
2725 	/* sc->sc_pba is set in wm_reset(). */
2726 	aprint_verbose_dev(sc->sc_dev, "RX packet buffer size: %uKB\n",
2727 	    sc->sc_pba);
2728 
2729 	/*
2730 	 * Check for I21[01] PLL workaround.
2731 	 *
2732 	 * Three cases:
2733 	 * a) Chip is I211.
2734 	 * b) Chip is I210 and it uses INVM (not FLASH).
2735 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
2736 	 */
2737 	if (sc->sc_type == WM_T_I211)
2738 		sc->sc_flags |= WM_F_PLL_WA_I210;
2739 	if (sc->sc_type == WM_T_I210) {
2740 		if (!wm_nvm_flash_presence_i210(sc))
2741 			sc->sc_flags |= WM_F_PLL_WA_I210;
2742 		else if ((sc->sc_nvm_ver_major < 3)
2743 		    || ((sc->sc_nvm_ver_major == 3)
2744 			&& (sc->sc_nvm_ver_minor < 25))) {
2745 			aprint_verbose_dev(sc->sc_dev,
2746 			    "ROM image version %d.%d is older than 3.25\n",
2747 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
2748 			sc->sc_flags |= WM_F_PLL_WA_I210;
2749 		}
2750 	}
2751 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
2752 		wm_pll_workaround_i210(sc);
2753 
2754 	wm_get_wakeup(sc);
2755 
2756 	/* Non-AMT based hardware can now take control from firmware */
2757 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
2758 		wm_get_hw_control(sc);
2759 
2760 	/*
2761 	 * Read the Ethernet address from the EEPROM, if not first found
2762 	 * in device properties.
2763 	 */
2764 	ea = prop_dictionary_get(dict, "mac-address");
2765 	if (ea != NULL) {
2766 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
2767 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
2768 		memcpy(enaddr, prop_data_value(ea), ETHER_ADDR_LEN);
2769 	} else {
2770 		if (wm_read_mac_addr(sc, enaddr) != 0) {
2771 			aprint_error_dev(sc->sc_dev,
2772 			    "unable to read Ethernet address\n");
2773 			goto out;
2774 		}
2775 	}
2776 
2777 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
2778 	    ether_sprintf(enaddr));
2779 
2780 	/*
2781 	 * Read the config info from the EEPROM, and set up various
2782 	 * bits in the control registers based on their contents.
2783 	 */
2784 	pn = prop_dictionary_get(dict, "i82543-cfg1");
2785 	if (pn != NULL) {
2786 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2787 		cfg1 = (uint16_t) prop_number_signed_value(pn);
2788 	} else {
2789 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
2790 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
2791 			goto out;
2792 		}
2793 	}
2794 
2795 	pn = prop_dictionary_get(dict, "i82543-cfg2");
2796 	if (pn != NULL) {
2797 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2798 		cfg2 = (uint16_t) prop_number_signed_value(pn);
2799 	} else {
2800 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
2801 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
2802 			goto out;
2803 		}
2804 	}
2805 
2806 	/* check for WM_F_WOL */
2807 	switch (sc->sc_type) {
2808 	case WM_T_82542_2_0:
2809 	case WM_T_82542_2_1:
2810 	case WM_T_82543:
2811 		/* dummy? */
2812 		eeprom_data = 0;
2813 		apme_mask = NVM_CFG3_APME;
2814 		break;
2815 	case WM_T_82544:
2816 		apme_mask = NVM_CFG2_82544_APM_EN;
2817 		eeprom_data = cfg2;
2818 		break;
2819 	case WM_T_82546:
2820 	case WM_T_82546_3:
2821 	case WM_T_82571:
2822 	case WM_T_82572:
2823 	case WM_T_82573:
2824 	case WM_T_82574:
2825 	case WM_T_82583:
2826 	case WM_T_80003:
2827 	case WM_T_82575:
2828 	case WM_T_82576:
2829 		apme_mask = NVM_CFG3_APME;
2830 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
2831 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
2832 		break;
2833 	case WM_T_82580:
2834 	case WM_T_I350:
2835 	case WM_T_I354:
2836 	case WM_T_I210:
2837 	case WM_T_I211:
2838 		apme_mask = NVM_CFG3_APME;
2839 		wm_nvm_read(sc,
2840 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
2841 		    1, &eeprom_data);
2842 		break;
2843 	case WM_T_ICH8:
2844 	case WM_T_ICH9:
2845 	case WM_T_ICH10:
2846 	case WM_T_PCH:
2847 	case WM_T_PCH2:
2848 	case WM_T_PCH_LPT:
2849 	case WM_T_PCH_SPT:
2850 	case WM_T_PCH_CNP:
2851 	case WM_T_PCH_TGP:
2852 		/* Already checked before wm_reset () */
2853 		apme_mask = eeprom_data = 0;
2854 		break;
2855 	default: /* XXX 82540 */
2856 		apme_mask = NVM_CFG3_APME;
2857 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
2858 		break;
2859 	}
2860 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
2861 	if ((eeprom_data & apme_mask) != 0)
2862 		sc->sc_flags |= WM_F_WOL;
2863 
2864 	/*
2865 	 * We have the eeprom settings, now apply the special cases
2866 	 * where the eeprom may be wrong or the board won't support
2867 	 * wake on lan on a particular port
2868 	 */
2869 	switch (sc->sc_pcidevid) {
2870 	case PCI_PRODUCT_INTEL_82546GB_PCIE:
2871 		sc->sc_flags &= ~WM_F_WOL;
2872 		break;
2873 	case PCI_PRODUCT_INTEL_82546EB_FIBER:
2874 	case PCI_PRODUCT_INTEL_82546GB_FIBER:
2875 		/* Wake events only supported on port A for dual fiber
2876 		 * regardless of eeprom setting */
2877 		if (sc->sc_funcid == 1)
2878 			sc->sc_flags &= ~WM_F_WOL;
2879 		break;
2880 	case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3:
2881 		/* If quad port adapter, disable WoL on all but port A */
2882 		if (sc->sc_funcid != 0)
2883 			sc->sc_flags &= ~WM_F_WOL;
2884 		break;
2885 	case PCI_PRODUCT_INTEL_82571EB_FIBER:
2886 		/* Wake events only supported on port A for dual fiber
2887 		 * regardless of eeprom setting */
2888 		if (sc->sc_funcid == 1)
2889 			sc->sc_flags &= ~WM_F_WOL;
2890 		break;
2891 	case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER:
2892 	case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER:
2893 	case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER:
2894 		/* If quad port adapter, disable WoL on all but port A */
2895 		if (sc->sc_funcid != 0)
2896 			sc->sc_flags &= ~WM_F_WOL;
2897 		break;
2898 	}
2899 
2900 	if (sc->sc_type >= WM_T_82575) {
2901 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
2902 			aprint_debug_dev(sc->sc_dev, "COMPAT = %hx\n",
2903 			    nvmword);
2904 			if ((sc->sc_type == WM_T_82575) ||
2905 			    (sc->sc_type == WM_T_82576)) {
2906 				/* Check NVM for autonegotiation */
2907 				if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE)
2908 				    != 0)
2909 					sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
2910 			}
2911 			if ((sc->sc_type == WM_T_82575) ||
2912 			    (sc->sc_type == WM_T_I350)) {
2913 				if (nvmword & NVM_COMPAT_MAS_EN(sc->sc_funcid))
2914 					sc->sc_flags |= WM_F_MAS;
2915 			}
2916 		}
2917 	}
2918 
2919 	/*
2920 	 * XXX need special handling for some multiple port cards
2921 	 * to disable a paticular port.
2922 	 */
2923 
2924 	if (sc->sc_type >= WM_T_82544) {
2925 		pn = prop_dictionary_get(dict, "i82543-swdpin");
2926 		if (pn != NULL) {
2927 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2928 			swdpin = (uint16_t) prop_number_signed_value(pn);
2929 		} else {
2930 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
2931 				aprint_error_dev(sc->sc_dev,
2932 				    "unable to read SWDPIN\n");
2933 				goto out;
2934 			}
2935 		}
2936 	}
2937 
2938 	if (cfg1 & NVM_CFG1_ILOS)
2939 		sc->sc_ctrl |= CTRL_ILOS;
2940 
2941 	/*
2942 	 * XXX
2943 	 * This code isn't correct because pin 2 and 3 are located
2944 	 * in different position on newer chips. Check all datasheet.
2945 	 *
2946 	 * Until resolve this problem, check if a chip < 82580
2947 	 */
2948 	if (sc->sc_type <= WM_T_82580) {
2949 		if (sc->sc_type >= WM_T_82544) {
2950 			sc->sc_ctrl |=
2951 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
2952 			    CTRL_SWDPIO_SHIFT;
2953 			sc->sc_ctrl |=
2954 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
2955 			    CTRL_SWDPINS_SHIFT;
2956 		} else {
2957 			sc->sc_ctrl |=
2958 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
2959 			    CTRL_SWDPIO_SHIFT;
2960 		}
2961 	}
2962 
2963 	if ((sc->sc_type >= WM_T_82580) && (sc->sc_type <= WM_T_I211)) {
2964 		wm_nvm_read(sc,
2965 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
2966 		    1, &nvmword);
2967 		if (nvmword & NVM_CFG3_ILOS)
2968 			sc->sc_ctrl |= CTRL_ILOS;
2969 	}
2970 
2971 #if 0
2972 	if (sc->sc_type >= WM_T_82544) {
2973 		if (cfg1 & NVM_CFG1_IPS0)
2974 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
2975 		if (cfg1 & NVM_CFG1_IPS1)
2976 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
2977 		sc->sc_ctrl_ext |=
2978 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
2979 		    CTRL_EXT_SWDPIO_SHIFT;
2980 		sc->sc_ctrl_ext |=
2981 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
2982 		    CTRL_EXT_SWDPINS_SHIFT;
2983 	} else {
2984 		sc->sc_ctrl_ext |=
2985 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
2986 		    CTRL_EXT_SWDPIO_SHIFT;
2987 	}
2988 #endif
2989 
2990 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2991 #if 0
2992 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2993 #endif
2994 
2995 	if (sc->sc_type == WM_T_PCH) {
2996 		uint16_t val;
2997 
2998 		/* Save the NVM K1 bit setting */
2999 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
3000 
3001 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
3002 			sc->sc_nvm_k1_enabled = 1;
3003 		else
3004 			sc->sc_nvm_k1_enabled = 0;
3005 	}
3006 
3007 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
3008 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
3009 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
3010 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
3011 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
3012 	    || sc->sc_type == WM_T_PCH_TGP
3013 	    || sc->sc_type == WM_T_82573
3014 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
3015 		/* Copper only */
3016 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
3017 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
3018 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
3019 	    || (sc->sc_type ==WM_T_I211)) {
3020 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
3021 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
3022 		switch (link_mode) {
3023 		case CTRL_EXT_LINK_MODE_1000KX:
3024 			aprint_normal_dev(sc->sc_dev, "1000KX\n");
3025 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
3026 			break;
3027 		case CTRL_EXT_LINK_MODE_SGMII:
3028 			if (wm_sgmii_uses_mdio(sc)) {
3029 				aprint_normal_dev(sc->sc_dev,
3030 				    "SGMII(MDIO)\n");
3031 				sc->sc_flags |= WM_F_SGMII;
3032 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
3033 				break;
3034 			}
3035 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
3036 			/*FALLTHROUGH*/
3037 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
3038 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
3039 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
3040 				if (link_mode
3041 				    == CTRL_EXT_LINK_MODE_SGMII) {
3042 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
3043 					sc->sc_flags |= WM_F_SGMII;
3044 					aprint_verbose_dev(sc->sc_dev,
3045 					    "SGMII\n");
3046 				} else {
3047 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
3048 					aprint_verbose_dev(sc->sc_dev,
3049 					    "SERDES\n");
3050 				}
3051 				break;
3052 			}
3053 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
3054 				aprint_normal_dev(sc->sc_dev, "SERDES(SFP)\n");
3055 			else if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
3056 				aprint_normal_dev(sc->sc_dev, "SGMII(SFP)\n");
3057 				sc->sc_flags |= WM_F_SGMII;
3058 			}
3059 			/* Do not change link mode for 100BaseFX */
3060 			if (sc->sc_sfptype == SFF_SFP_ETH_FLAGS_100FX)
3061 				break;
3062 
3063 			/* Change current link mode setting */
3064 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
3065 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
3066 				reg |= CTRL_EXT_LINK_MODE_SGMII;
3067 			else
3068 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
3069 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3070 			break;
3071 		case CTRL_EXT_LINK_MODE_GMII:
3072 		default:
3073 			aprint_normal_dev(sc->sc_dev, "Copper\n");
3074 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
3075 			break;
3076 		}
3077 
3078 		reg &= ~CTRL_EXT_I2C_ENA;
3079 		if ((sc->sc_flags & WM_F_SGMII) != 0)
3080 			reg |= CTRL_EXT_I2C_ENA;
3081 		else
3082 			reg &= ~CTRL_EXT_I2C_ENA;
3083 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3084 		if ((sc->sc_flags & WM_F_SGMII) != 0) {
3085 			if (!wm_sgmii_uses_mdio(sc))
3086 				wm_gmii_setup_phytype(sc, 0, 0);
3087 			wm_reset_mdicnfg_82580(sc);
3088 		}
3089 	} else if (sc->sc_type < WM_T_82543 ||
3090 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
3091 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
3092 			aprint_error_dev(sc->sc_dev,
3093 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
3094 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
3095 		}
3096 	} else {
3097 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
3098 			aprint_error_dev(sc->sc_dev,
3099 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
3100 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
3101 		}
3102 	}
3103 
3104 	if (sc->sc_type >= WM_T_PCH2)
3105 		sc->sc_flags |= WM_F_EEE;
3106 	else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211)
3107 	    && (sc->sc_mediatype == WM_MEDIATYPE_COPPER)) {
3108 		/* XXX: Need special handling for I354. (not yet) */
3109 		if (sc->sc_type != WM_T_I354)
3110 			sc->sc_flags |= WM_F_EEE;
3111 	}
3112 
3113 	/*
3114 	 * The I350 has a bug where it always strips the CRC whether
3115 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
3116 	 */
3117 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
3118 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
3119 		sc->sc_flags |= WM_F_CRC_STRIP;
3120 
3121 	/*
3122 	 * Workaround for some chips to delay sending LINK_STATE_UP.
3123 	 * Some systems can't send packet soon after linkup. See also
3124 	 * wm_linkintr_gmii(), wm_tick() and wm_gmii_mediastatus().
3125 	 */
3126 	switch (sc->sc_type) {
3127 	case WM_T_I350:
3128 	case WM_T_I354:
3129 	case WM_T_I210:
3130 	case WM_T_I211:
3131 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
3132 			sc->sc_flags |= WM_F_DELAY_LINKUP;
3133 		break;
3134 	default:
3135 		break;
3136 	}
3137 
3138 	/* Set device properties (macflags) */
3139 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
3140 
3141 	if (sc->sc_flags != 0) {
3142 		snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
3143 		aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
3144 	}
3145 
3146 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
3147 
3148 	/* Initialize the media structures accordingly. */
3149 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
3150 		wm_gmii_mediainit(sc, wmp->wmp_product);
3151 	else
3152 		wm_tbi_mediainit(sc); /* All others */
3153 
3154 	ifp = &sc->sc_ethercom.ec_if;
3155 	xname = device_xname(sc->sc_dev);
3156 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
3157 	ifp->if_softc = sc;
3158 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
3159 	ifp->if_extflags = IFEF_MPSAFE;
3160 	ifp->if_ioctl = wm_ioctl;
3161 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3162 		ifp->if_start = wm_nq_start;
3163 		/*
3164 		 * When the number of CPUs is one and the controller can use
3165 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
3166 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
3167 		 * and the other is used for link status changing.
3168 		 * In this situation, wm_nq_transmit() is disadvantageous
3169 		 * because of wm_select_txqueue() and pcq(9) overhead.
3170 		 */
3171 		if (wm_is_using_multiqueue(sc))
3172 			ifp->if_transmit = wm_nq_transmit;
3173 	} else {
3174 		ifp->if_start = wm_start;
3175 		/*
3176 		 * wm_transmit() has the same disadvantages as wm_nq_transmit()
3177 		 * described above.
3178 		 */
3179 		if (wm_is_using_multiqueue(sc))
3180 			ifp->if_transmit = wm_transmit;
3181 	}
3182 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
3183 	ifp->if_init = wm_init;
3184 	ifp->if_stop = wm_stop;
3185 	IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
3186 	IFQ_SET_READY(&ifp->if_snd);
3187 
3188 	/* Check for jumbo frame */
3189 	switch (sc->sc_type) {
3190 	case WM_T_82573:
3191 		/* XXX limited to 9234 if ASPM is disabled */
3192 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
3193 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
3194 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
3195 		break;
3196 	case WM_T_82571:
3197 	case WM_T_82572:
3198 	case WM_T_82574:
3199 	case WM_T_82583:
3200 	case WM_T_82575:
3201 	case WM_T_82576:
3202 	case WM_T_82580:
3203 	case WM_T_I350:
3204 	case WM_T_I354:
3205 	case WM_T_I210:
3206 	case WM_T_I211:
3207 	case WM_T_80003:
3208 	case WM_T_ICH9:
3209 	case WM_T_ICH10:
3210 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
3211 	case WM_T_PCH_LPT:
3212 	case WM_T_PCH_SPT:
3213 	case WM_T_PCH_CNP:
3214 	case WM_T_PCH_TGP:
3215 		/* XXX limited to 9234 */
3216 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
3217 		break;
3218 	case WM_T_PCH:
3219 		/* XXX limited to 4096 */
3220 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
3221 		break;
3222 	case WM_T_82542_2_0:
3223 	case WM_T_82542_2_1:
3224 	case WM_T_ICH8:
3225 		/* No support for jumbo frame */
3226 		break;
3227 	default:
3228 		/* ETHER_MAX_LEN_JUMBO */
3229 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
3230 		break;
3231 	}
3232 
3233 	/* If we're a i82543 or greater, we can support VLANs. */
3234 	if (sc->sc_type >= WM_T_82543) {
3235 		sc->sc_ethercom.ec_capabilities |=
3236 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
3237 		sc->sc_ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
3238 	}
3239 
3240 	if ((sc->sc_flags & WM_F_EEE) != 0)
3241 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_EEE;
3242 
3243 	/*
3244 	 * We can perform TCPv4 and UDPv4 checksums in-bound.  Only
3245 	 * on i82543 and later.
3246 	 */
3247 	if (sc->sc_type >= WM_T_82543) {
3248 		ifp->if_capabilities |=
3249 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
3250 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
3251 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
3252 		    IFCAP_CSUM_TCPv6_Tx |
3253 		    IFCAP_CSUM_UDPv6_Tx;
3254 	}
3255 
3256 	/*
3257 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
3258 	 *
3259 	 *	82541GI (8086:1076) ... no
3260 	 *	82572EI (8086:10b9) ... yes
3261 	 */
3262 	if (sc->sc_type >= WM_T_82571) {
3263 		ifp->if_capabilities |=
3264 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
3265 	}
3266 
3267 	/*
3268 	 * If we're a i82544 or greater (except i82547), we can do
3269 	 * TCP segmentation offload.
3270 	 */
3271 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547)
3272 		ifp->if_capabilities |= IFCAP_TSOv4;
3273 
3274 	if (sc->sc_type >= WM_T_82571)
3275 		ifp->if_capabilities |= IFCAP_TSOv6;
3276 
3277 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
3278 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
3279 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
3280 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
3281 
3282 	/* Attach the interface. */
3283 	if_initialize(ifp);
3284 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
3285 	ether_ifattach(ifp, enaddr);
3286 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
3287 	if_register(ifp);
3288 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
3289 	    RND_FLAG_DEFAULT);
3290 
3291 #ifdef WM_EVENT_COUNTERS
3292 	/* Attach event counters. */
3293 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
3294 	    NULL, xname, "linkintr");
3295 
3296 	evcnt_attach_dynamic(&sc->sc_ev_crcerrs, EVCNT_TYPE_MISC,
3297 	    NULL, xname, "CRC Error");
3298 	evcnt_attach_dynamic(&sc->sc_ev_symerrc, EVCNT_TYPE_MISC,
3299 	    NULL, xname, "Symbol Error");
3300 	evcnt_attach_dynamic(&sc->sc_ev_mpc, EVCNT_TYPE_MISC,
3301 	    NULL, xname, "Missed Packets");
3302 	evcnt_attach_dynamic(&sc->sc_ev_colc, EVCNT_TYPE_MISC,
3303 	    NULL, xname, "Collision");
3304 	evcnt_attach_dynamic(&sc->sc_ev_sec, EVCNT_TYPE_MISC,
3305 	    NULL, xname, "Sequence Error");
3306 	evcnt_attach_dynamic(&sc->sc_ev_rlec, EVCNT_TYPE_MISC,
3307 	    NULL, xname, "Receive Length Error");
3308 
3309 	if (sc->sc_type >= WM_T_82543) {
3310 		evcnt_attach_dynamic(&sc->sc_ev_algnerrc, EVCNT_TYPE_MISC,
3311 		    NULL, xname, "Alignment Error");
3312 		evcnt_attach_dynamic(&sc->sc_ev_rxerrc, EVCNT_TYPE_MISC,
3313 		    NULL, xname, "Receive Error");
3314 		/* XXX Does 82575 have HTDPMC? */
3315 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
3316 			evcnt_attach_dynamic(&sc->sc_ev_cexterr,
3317 			    EVCNT_TYPE_MISC, NULL, xname,
3318 			    "Carrier Extension Error");
3319 		else
3320 			evcnt_attach_dynamic(&sc->sc_ev_htdpmc,
3321 			    EVCNT_TYPE_MISC, NULL, xname,
3322 			    "Host Transmit Discarded Packets by MAC");
3323 
3324 		evcnt_attach_dynamic(&sc->sc_ev_tncrs, EVCNT_TYPE_MISC,
3325 		    NULL, xname, "Tx with No CRS");
3326 		evcnt_attach_dynamic(&sc->sc_ev_tsctc, EVCNT_TYPE_MISC,
3327 		    NULL, xname, "TCP Segmentation Context Tx");
3328 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
3329 			evcnt_attach_dynamic(&sc->sc_ev_tsctfc,
3330 			    EVCNT_TYPE_MISC, NULL, xname,
3331 			    "TCP Segmentation Context Tx Fail");
3332 		else {
3333 			/* XXX Is the circuit breaker only for 82576? */
3334 			evcnt_attach_dynamic(&sc->sc_ev_cbrdpc,
3335 			    EVCNT_TYPE_MISC, NULL, xname,
3336 			    "Circuit Breaker Rx Dropped Packet");
3337 			evcnt_attach_dynamic(&sc->sc_ev_cbrmpc,
3338 			    EVCNT_TYPE_MISC, NULL, xname,
3339 			    "Circuit Breaker Rx Manageability Packet");
3340 		}
3341 	}
3342 
3343 	if (sc->sc_type >= WM_T_82542_2_1) {
3344 		evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
3345 		    NULL, xname, "XOFF Transmitted");
3346 		evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
3347 		    NULL, xname, "XON Transmitted");
3348 		evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
3349 		    NULL, xname, "XOFF Received");
3350 		evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
3351 		    NULL, xname, "XON Received");
3352 		evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
3353 		    NULL, xname, "FC Received Unsupported");
3354 	}
3355 
3356 	evcnt_attach_dynamic(&sc->sc_ev_scc, EVCNT_TYPE_MISC,
3357 	    NULL, xname, "Single Collision");
3358 	evcnt_attach_dynamic(&sc->sc_ev_ecol, EVCNT_TYPE_MISC,
3359 	    NULL, xname, "Excessive Collisions");
3360 	evcnt_attach_dynamic(&sc->sc_ev_mcc, EVCNT_TYPE_MISC,
3361 	    NULL, xname, "Multiple Collision");
3362 	evcnt_attach_dynamic(&sc->sc_ev_latecol, EVCNT_TYPE_MISC,
3363 	    NULL, xname, "Late Collisions");
3364 
3365 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc))
3366 		evcnt_attach_dynamic(&sc->sc_ev_cbtmpc, EVCNT_TYPE_MISC,
3367 		    NULL, xname, "Circuit Breaker Tx Manageability Packet");
3368 
3369 	evcnt_attach_dynamic(&sc->sc_ev_dc, EVCNT_TYPE_MISC,
3370 	    NULL, xname, "Defer");
3371 	evcnt_attach_dynamic(&sc->sc_ev_prc64, EVCNT_TYPE_MISC,
3372 	    NULL, xname, "Packets Rx (64 bytes)");
3373 	evcnt_attach_dynamic(&sc->sc_ev_prc127, EVCNT_TYPE_MISC,
3374 	    NULL, xname, "Packets Rx (65-127 bytes)");
3375 	evcnt_attach_dynamic(&sc->sc_ev_prc255, EVCNT_TYPE_MISC,
3376 	    NULL, xname, "Packets Rx (128-255 bytes)");
3377 	evcnt_attach_dynamic(&sc->sc_ev_prc511, EVCNT_TYPE_MISC,
3378 	    NULL, xname, "Packets Rx (256-511 bytes)");
3379 	evcnt_attach_dynamic(&sc->sc_ev_prc1023, EVCNT_TYPE_MISC,
3380 	    NULL, xname, "Packets Rx (512-1023 bytes)");
3381 	evcnt_attach_dynamic(&sc->sc_ev_prc1522, EVCNT_TYPE_MISC,
3382 	    NULL, xname, "Packets Rx (1024-1522 bytes)");
3383 	evcnt_attach_dynamic(&sc->sc_ev_gprc, EVCNT_TYPE_MISC,
3384 	    NULL, xname, "Good Packets Rx");
3385 	evcnt_attach_dynamic(&sc->sc_ev_bprc, EVCNT_TYPE_MISC,
3386 	    NULL, xname, "Broadcast Packets Rx");
3387 	evcnt_attach_dynamic(&sc->sc_ev_mprc, EVCNT_TYPE_MISC,
3388 	    NULL, xname, "Multicast Packets Rx");
3389 	evcnt_attach_dynamic(&sc->sc_ev_gptc, EVCNT_TYPE_MISC,
3390 	    NULL, xname, "Good Packets Tx");
3391 	evcnt_attach_dynamic(&sc->sc_ev_gorc, EVCNT_TYPE_MISC,
3392 	    NULL, xname, "Good Octets Rx");
3393 	evcnt_attach_dynamic(&sc->sc_ev_gotc, EVCNT_TYPE_MISC,
3394 	    NULL, xname, "Good Octets Tx");
3395 	evcnt_attach_dynamic(&sc->sc_ev_rnbc, EVCNT_TYPE_MISC,
3396 	    NULL, xname, "Rx No Buffers");
3397 	evcnt_attach_dynamic(&sc->sc_ev_ruc, EVCNT_TYPE_MISC,
3398 	    NULL, xname, "Rx Undersize (valid CRC)");
3399 	evcnt_attach_dynamic(&sc->sc_ev_rfc, EVCNT_TYPE_MISC,
3400 	    NULL, xname, "Rx Fragment (bad CRC)");
3401 	evcnt_attach_dynamic(&sc->sc_ev_roc, EVCNT_TYPE_MISC,
3402 	    NULL, xname, "Rx Oversize (valid CRC)");
3403 	evcnt_attach_dynamic(&sc->sc_ev_rjc, EVCNT_TYPE_MISC,
3404 	    NULL, xname, "Rx Jabber (bad CRC)");
3405 	if (sc->sc_type >= WM_T_82540) {
3406 		evcnt_attach_dynamic(&sc->sc_ev_mgtprc, EVCNT_TYPE_MISC,
3407 		    NULL, xname, "Management Packets RX");
3408 		evcnt_attach_dynamic(&sc->sc_ev_mgtpdc, EVCNT_TYPE_MISC,
3409 		    NULL, xname, "Management Packets Dropped");
3410 		evcnt_attach_dynamic(&sc->sc_ev_mgtptc, EVCNT_TYPE_MISC,
3411 		    NULL, xname, "Management Packets TX");
3412 	}
3413 	evcnt_attach_dynamic(&sc->sc_ev_tor, EVCNT_TYPE_MISC,
3414 	    NULL, xname, "Total Octets Rx");
3415 	evcnt_attach_dynamic(&sc->sc_ev_tot, EVCNT_TYPE_MISC,
3416 	    NULL, xname, "Total Octets Tx");
3417 	evcnt_attach_dynamic(&sc->sc_ev_tpr, EVCNT_TYPE_MISC,
3418 	    NULL, xname, "Total Packets Rx");
3419 	evcnt_attach_dynamic(&sc->sc_ev_tpt, EVCNT_TYPE_MISC,
3420 	    NULL, xname, "Total Packets Tx");
3421 	evcnt_attach_dynamic(&sc->sc_ev_ptc64, EVCNT_TYPE_MISC,
3422 	    NULL, xname, "Packets Tx (64 bytes)");
3423 	evcnt_attach_dynamic(&sc->sc_ev_ptc127, EVCNT_TYPE_MISC,
3424 	    NULL, xname, "Packets Tx (65-127 bytes)");
3425 	evcnt_attach_dynamic(&sc->sc_ev_ptc255, EVCNT_TYPE_MISC,
3426 	    NULL, xname, "Packets Tx (128-255 bytes)");
3427 	evcnt_attach_dynamic(&sc->sc_ev_ptc511, EVCNT_TYPE_MISC,
3428 	    NULL, xname, "Packets Tx (256-511 bytes)");
3429 	evcnt_attach_dynamic(&sc->sc_ev_ptc1023, EVCNT_TYPE_MISC,
3430 	    NULL, xname, "Packets Tx (512-1023 bytes)");
3431 	evcnt_attach_dynamic(&sc->sc_ev_ptc1522, EVCNT_TYPE_MISC,
3432 	    NULL, xname, "Packets Tx (1024-1522 Bytes)");
3433 	evcnt_attach_dynamic(&sc->sc_ev_mptc, EVCNT_TYPE_MISC,
3434 	    NULL, xname, "Multicast Packets Tx");
3435 	evcnt_attach_dynamic(&sc->sc_ev_bptc, EVCNT_TYPE_MISC,
3436 	    NULL, xname, "Broadcast Packets Tx");
3437 	if (sc->sc_type >= WM_T_82571) /* PCIe, 80003 and ICH/PCHs */
3438 		evcnt_attach_dynamic(&sc->sc_ev_iac, EVCNT_TYPE_MISC,
3439 		    NULL, xname, "Interrupt Assertion");
3440 	if (sc->sc_type < WM_T_82575) {
3441 		evcnt_attach_dynamic(&sc->sc_ev_icrxptc, EVCNT_TYPE_MISC,
3442 		    NULL, xname, "Intr. Cause Rx Pkt Timer Expire");
3443 		evcnt_attach_dynamic(&sc->sc_ev_icrxatc, EVCNT_TYPE_MISC,
3444 		    NULL, xname, "Intr. Cause Rx Abs Timer Expire");
3445 		evcnt_attach_dynamic(&sc->sc_ev_ictxptc, EVCNT_TYPE_MISC,
3446 		    NULL, xname, "Intr. Cause Tx Pkt Timer Expire");
3447 		evcnt_attach_dynamic(&sc->sc_ev_ictxatc, EVCNT_TYPE_MISC,
3448 		    NULL, xname, "Intr. Cause Tx Abs Timer Expire");
3449 		evcnt_attach_dynamic(&sc->sc_ev_ictxqec, EVCNT_TYPE_MISC,
3450 		    NULL, xname, "Intr. Cause Tx Queue Empty");
3451 		evcnt_attach_dynamic(&sc->sc_ev_ictxqmtc, EVCNT_TYPE_MISC,
3452 		    NULL, xname, "Intr. Cause Tx Queue Min Thresh");
3453 		evcnt_attach_dynamic(&sc->sc_ev_rxdmtc, EVCNT_TYPE_MISC,
3454 		    NULL, xname, "Intr. Cause Rx Desc Min Thresh");
3455 
3456 		/* XXX 82575 document says it has ICRXOC. Is that right? */
3457 		evcnt_attach_dynamic(&sc->sc_ev_icrxoc, EVCNT_TYPE_MISC,
3458 		    NULL, xname, "Interrupt Cause Receiver Overrun");
3459 	} else if (!WM_IS_ICHPCH(sc)) {
3460 		/*
3461 		 * For 82575 and newer.
3462 		 *
3463 		 * On 80003, ICHs and PCHs, it seems all of the following
3464 		 * registers are zero.
3465 		 */
3466 		evcnt_attach_dynamic(&sc->sc_ev_rpthc, EVCNT_TYPE_MISC,
3467 		    NULL, xname, "Rx Packets To Host");
3468 		evcnt_attach_dynamic(&sc->sc_ev_debug1, EVCNT_TYPE_MISC,
3469 		    NULL, xname, "Debug Counter 1");
3470 		evcnt_attach_dynamic(&sc->sc_ev_debug2, EVCNT_TYPE_MISC,
3471 		    NULL, xname, "Debug Counter 2");
3472 		evcnt_attach_dynamic(&sc->sc_ev_debug3, EVCNT_TYPE_MISC,
3473 		    NULL, xname, "Debug Counter 3");
3474 
3475 		/*
3476 		 * 82575 datasheet says 0x4118 is for TXQEC(Tx Queue Empty).
3477 		 * I think it's wrong. The real count I observed is the same
3478 		 * as GPTC(Good Packets Tx) and TPT(Total Packets Tx).
3479 		 * It's HGPTC(Host Good Packets Tx) which is described in
3480 		 * 82576's datasheet.
3481 		 */
3482 		evcnt_attach_dynamic(&sc->sc_ev_hgptc, EVCNT_TYPE_MISC,
3483 		    NULL, xname, "Host Good Packets TX");
3484 
3485 		evcnt_attach_dynamic(&sc->sc_ev_debug4, EVCNT_TYPE_MISC,
3486 		    NULL, xname, "Debug Counter 4");
3487 		evcnt_attach_dynamic(&sc->sc_ev_rxdmtc, EVCNT_TYPE_MISC,
3488 		    NULL, xname, "Rx Desc Min Thresh");
3489 		/* XXX Is the circuit breaker only for 82576? */
3490 		evcnt_attach_dynamic(&sc->sc_ev_htcbdpc, EVCNT_TYPE_MISC,
3491 		    NULL, xname, "Host Tx Circuit Breaker Dropped Packets");
3492 
3493 		evcnt_attach_dynamic(&sc->sc_ev_hgorc, EVCNT_TYPE_MISC,
3494 		    NULL, xname, "Host Good Octets Rx");
3495 		evcnt_attach_dynamic(&sc->sc_ev_hgotc, EVCNT_TYPE_MISC,
3496 		    NULL, xname, "Host Good Octets Tx");
3497 		evcnt_attach_dynamic(&sc->sc_ev_lenerrs, EVCNT_TYPE_MISC,
3498 		    NULL, xname, "Length Errors (length/type <= 1500)");
3499 		evcnt_attach_dynamic(&sc->sc_ev_scvpc, EVCNT_TYPE_MISC,
3500 		    NULL, xname, "SerDes/SGMII Code Violation Packet");
3501 		evcnt_attach_dynamic(&sc->sc_ev_hrmpc, EVCNT_TYPE_MISC,
3502 		    NULL, xname, "Header Redirection Missed Packet");
3503 	}
3504 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc)) {
3505 		evcnt_attach_dynamic(&sc->sc_ev_tlpic, EVCNT_TYPE_MISC,
3506 		    NULL, xname, "EEE Tx LPI");
3507 		evcnt_attach_dynamic(&sc->sc_ev_rlpic, EVCNT_TYPE_MISC,
3508 		    NULL, xname, "EEE Rx LPI");
3509 		evcnt_attach_dynamic(&sc->sc_ev_b2ogprc, EVCNT_TYPE_MISC,
3510 		    NULL, xname, "BMC2OS Packets received by host");
3511 		evcnt_attach_dynamic(&sc->sc_ev_o2bspc, EVCNT_TYPE_MISC,
3512 		    NULL, xname, "OS2BMC Packets transmitted by host");
3513 		evcnt_attach_dynamic(&sc->sc_ev_b2ospc, EVCNT_TYPE_MISC,
3514 		    NULL, xname, "BMC2OS Packets sent by BMC");
3515 		evcnt_attach_dynamic(&sc->sc_ev_o2bgptc, EVCNT_TYPE_MISC,
3516 		    NULL, xname, "OS2BMC Packets received by BMC");
3517 	}
3518 #endif /* WM_EVENT_COUNTERS */
3519 
3520 	sc->sc_txrx_use_workqueue = false;
3521 
3522 	if (wm_phy_need_linkdown_discard(sc)) {
3523 		DPRINTF(sc, WM_DEBUG_LINK,
3524 		    ("%s: %s: Set linkdown discard flag\n",
3525 			device_xname(sc->sc_dev), __func__));
3526 		wm_set_linkdown_discard(sc);
3527 	}
3528 
3529 	wm_init_sysctls(sc);
3530 
3531 	if (pmf_device_register(self, wm_suspend, wm_resume))
3532 		pmf_class_network_register(self, ifp);
3533 	else
3534 		aprint_error_dev(self, "couldn't establish power handler\n");
3535 
3536 	sc->sc_flags |= WM_F_ATTACHED;
3537 out:
3538 	return;
3539 }
3540 
3541 /* The detach function (ca_detach) */
3542 static int
3543 wm_detach(device_t self, int flags __unused)
3544 {
3545 	struct wm_softc *sc = device_private(self);
3546 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3547 	int i;
3548 
3549 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
3550 		return 0;
3551 
3552 	/* Stop the interface. Callouts are stopped in it. */
3553 	IFNET_LOCK(ifp);
3554 	sc->sc_dying = true;
3555 	wm_stop(ifp, 1);
3556 	IFNET_UNLOCK(ifp);
3557 
3558 	pmf_device_deregister(self);
3559 
3560 	sysctl_teardown(&sc->sc_sysctllog);
3561 
3562 #ifdef WM_EVENT_COUNTERS
3563 	evcnt_detach(&sc->sc_ev_linkintr);
3564 
3565 	evcnt_detach(&sc->sc_ev_crcerrs);
3566 	evcnt_detach(&sc->sc_ev_symerrc);
3567 	evcnt_detach(&sc->sc_ev_mpc);
3568 	evcnt_detach(&sc->sc_ev_colc);
3569 	evcnt_detach(&sc->sc_ev_sec);
3570 	evcnt_detach(&sc->sc_ev_rlec);
3571 
3572 	if (sc->sc_type >= WM_T_82543) {
3573 		evcnt_detach(&sc->sc_ev_algnerrc);
3574 		evcnt_detach(&sc->sc_ev_rxerrc);
3575 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
3576 			evcnt_detach(&sc->sc_ev_cexterr);
3577 		else
3578 			evcnt_detach(&sc->sc_ev_htdpmc);
3579 
3580 		evcnt_detach(&sc->sc_ev_tncrs);
3581 		evcnt_detach(&sc->sc_ev_tsctc);
3582 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
3583 			evcnt_detach(&sc->sc_ev_tsctfc);
3584 		else {
3585 			evcnt_detach(&sc->sc_ev_cbrdpc);
3586 			evcnt_detach(&sc->sc_ev_cbrmpc);
3587 		}
3588 	}
3589 
3590 	if (sc->sc_type >= WM_T_82542_2_1) {
3591 		evcnt_detach(&sc->sc_ev_tx_xoff);
3592 		evcnt_detach(&sc->sc_ev_tx_xon);
3593 		evcnt_detach(&sc->sc_ev_rx_xoff);
3594 		evcnt_detach(&sc->sc_ev_rx_xon);
3595 		evcnt_detach(&sc->sc_ev_rx_macctl);
3596 	}
3597 
3598 	evcnt_detach(&sc->sc_ev_scc);
3599 	evcnt_detach(&sc->sc_ev_ecol);
3600 	evcnt_detach(&sc->sc_ev_mcc);
3601 	evcnt_detach(&sc->sc_ev_latecol);
3602 
3603 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc))
3604 		evcnt_detach(&sc->sc_ev_cbtmpc);
3605 
3606 	evcnt_detach(&sc->sc_ev_dc);
3607 	evcnt_detach(&sc->sc_ev_prc64);
3608 	evcnt_detach(&sc->sc_ev_prc127);
3609 	evcnt_detach(&sc->sc_ev_prc255);
3610 	evcnt_detach(&sc->sc_ev_prc511);
3611 	evcnt_detach(&sc->sc_ev_prc1023);
3612 	evcnt_detach(&sc->sc_ev_prc1522);
3613 	evcnt_detach(&sc->sc_ev_gprc);
3614 	evcnt_detach(&sc->sc_ev_bprc);
3615 	evcnt_detach(&sc->sc_ev_mprc);
3616 	evcnt_detach(&sc->sc_ev_gptc);
3617 	evcnt_detach(&sc->sc_ev_gorc);
3618 	evcnt_detach(&sc->sc_ev_gotc);
3619 	evcnt_detach(&sc->sc_ev_rnbc);
3620 	evcnt_detach(&sc->sc_ev_ruc);
3621 	evcnt_detach(&sc->sc_ev_rfc);
3622 	evcnt_detach(&sc->sc_ev_roc);
3623 	evcnt_detach(&sc->sc_ev_rjc);
3624 	if (sc->sc_type >= WM_T_82540) {
3625 		evcnt_detach(&sc->sc_ev_mgtprc);
3626 		evcnt_detach(&sc->sc_ev_mgtpdc);
3627 		evcnt_detach(&sc->sc_ev_mgtptc);
3628 	}
3629 	evcnt_detach(&sc->sc_ev_tor);
3630 	evcnt_detach(&sc->sc_ev_tot);
3631 	evcnt_detach(&sc->sc_ev_tpr);
3632 	evcnt_detach(&sc->sc_ev_tpt);
3633 	evcnt_detach(&sc->sc_ev_ptc64);
3634 	evcnt_detach(&sc->sc_ev_ptc127);
3635 	evcnt_detach(&sc->sc_ev_ptc255);
3636 	evcnt_detach(&sc->sc_ev_ptc511);
3637 	evcnt_detach(&sc->sc_ev_ptc1023);
3638 	evcnt_detach(&sc->sc_ev_ptc1522);
3639 	evcnt_detach(&sc->sc_ev_mptc);
3640 	evcnt_detach(&sc->sc_ev_bptc);
3641 	if (sc->sc_type >= WM_T_82571)
3642 		evcnt_detach(&sc->sc_ev_iac);
3643 	if (sc->sc_type < WM_T_82575) {
3644 		evcnt_detach(&sc->sc_ev_icrxptc);
3645 		evcnt_detach(&sc->sc_ev_icrxatc);
3646 		evcnt_detach(&sc->sc_ev_ictxptc);
3647 		evcnt_detach(&sc->sc_ev_ictxatc);
3648 		evcnt_detach(&sc->sc_ev_ictxqec);
3649 		evcnt_detach(&sc->sc_ev_ictxqmtc);
3650 		evcnt_detach(&sc->sc_ev_rxdmtc);
3651 		evcnt_detach(&sc->sc_ev_icrxoc);
3652 	} else if (!WM_IS_ICHPCH(sc)) {
3653 		evcnt_detach(&sc->sc_ev_rpthc);
3654 		evcnt_detach(&sc->sc_ev_debug1);
3655 		evcnt_detach(&sc->sc_ev_debug2);
3656 		evcnt_detach(&sc->sc_ev_debug3);
3657 		evcnt_detach(&sc->sc_ev_hgptc);
3658 		evcnt_detach(&sc->sc_ev_debug4);
3659 		evcnt_detach(&sc->sc_ev_rxdmtc);
3660 		evcnt_detach(&sc->sc_ev_htcbdpc);
3661 
3662 		evcnt_detach(&sc->sc_ev_hgorc);
3663 		evcnt_detach(&sc->sc_ev_hgotc);
3664 		evcnt_detach(&sc->sc_ev_lenerrs);
3665 		evcnt_detach(&sc->sc_ev_scvpc);
3666 		evcnt_detach(&sc->sc_ev_hrmpc);
3667 	}
3668 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc)) {
3669 		evcnt_detach(&sc->sc_ev_tlpic);
3670 		evcnt_detach(&sc->sc_ev_rlpic);
3671 		evcnt_detach(&sc->sc_ev_b2ogprc);
3672 		evcnt_detach(&sc->sc_ev_o2bspc);
3673 		evcnt_detach(&sc->sc_ev_b2ospc);
3674 		evcnt_detach(&sc->sc_ev_o2bgptc);
3675 	}
3676 #endif /* WM_EVENT_COUNTERS */
3677 
3678 	rnd_detach_source(&sc->rnd_source);
3679 
3680 	/* Tell the firmware about the release */
3681 	mutex_enter(sc->sc_core_lock);
3682 	wm_release_manageability(sc);
3683 	wm_release_hw_control(sc);
3684 	wm_enable_wakeup(sc);
3685 	mutex_exit(sc->sc_core_lock);
3686 
3687 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
3688 
3689 	ether_ifdetach(ifp);
3690 	if_detach(ifp);
3691 	if_percpuq_destroy(sc->sc_ipq);
3692 
3693 	/* Delete all remaining media. */
3694 	ifmedia_fini(&sc->sc_mii.mii_media);
3695 
3696 	/* Unload RX dmamaps and free mbufs */
3697 	for (i = 0; i < sc->sc_nqueues; i++) {
3698 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
3699 		mutex_enter(rxq->rxq_lock);
3700 		wm_rxdrain(rxq);
3701 		mutex_exit(rxq->rxq_lock);
3702 	}
3703 	/* Must unlock here */
3704 
3705 	/* Disestablish the interrupt handler */
3706 	for (i = 0; i < sc->sc_nintrs; i++) {
3707 		if (sc->sc_ihs[i] != NULL) {
3708 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
3709 			sc->sc_ihs[i] = NULL;
3710 		}
3711 	}
3712 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
3713 
3714 	/* wm_stop() ensured that the workqueues are stopped. */
3715 	workqueue_destroy(sc->sc_queue_wq);
3716 	workqueue_destroy(sc->sc_reset_wq);
3717 
3718 	for (i = 0; i < sc->sc_nqueues; i++)
3719 		softint_disestablish(sc->sc_queue[i].wmq_si);
3720 
3721 	wm_free_txrx_queues(sc);
3722 
3723 	/* Unmap the registers */
3724 	if (sc->sc_ss) {
3725 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
3726 		sc->sc_ss = 0;
3727 	}
3728 	if (sc->sc_ios) {
3729 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
3730 		sc->sc_ios = 0;
3731 	}
3732 	if (sc->sc_flashs) {
3733 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
3734 		sc->sc_flashs = 0;
3735 	}
3736 
3737 	if (sc->sc_core_lock)
3738 		mutex_obj_free(sc->sc_core_lock);
3739 	if (sc->sc_ich_phymtx)
3740 		mutex_obj_free(sc->sc_ich_phymtx);
3741 	if (sc->sc_ich_nvmmtx)
3742 		mutex_obj_free(sc->sc_ich_nvmmtx);
3743 
3744 	return 0;
3745 }
3746 
3747 static bool
3748 wm_suspend(device_t self, const pmf_qual_t *qual)
3749 {
3750 	struct wm_softc *sc = device_private(self);
3751 
3752 	wm_release_manageability(sc);
3753 	wm_release_hw_control(sc);
3754 	wm_enable_wakeup(sc);
3755 
3756 	return true;
3757 }
3758 
3759 static bool
3760 wm_resume(device_t self, const pmf_qual_t *qual)
3761 {
3762 	struct wm_softc *sc = device_private(self);
3763 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3764 	pcireg_t reg;
3765 	char buf[256];
3766 
3767 	reg = CSR_READ(sc, WMREG_WUS);
3768 	if (reg != 0) {
3769 		snprintb(buf, sizeof(buf), WUS_FLAGS, reg);
3770 		device_printf(sc->sc_dev, "wakeup status %s\n", buf);
3771 		CSR_WRITE(sc, WMREG_WUS, 0xffffffff); /* W1C */
3772 	}
3773 
3774 	if (sc->sc_type >= WM_T_PCH2)
3775 		wm_resume_workarounds_pchlan(sc);
3776 	IFNET_LOCK(ifp);
3777 	if ((ifp->if_flags & IFF_UP) == 0) {
3778 		/* >= PCH_SPT hardware workaround before reset. */
3779 		if (sc->sc_type >= WM_T_PCH_SPT)
3780 			wm_flush_desc_rings(sc);
3781 
3782 		wm_reset(sc);
3783 		/* Non-AMT based hardware can now take control from firmware */
3784 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
3785 			wm_get_hw_control(sc);
3786 		wm_init_manageability(sc);
3787 	} else {
3788 		/*
3789 		 * We called pmf_class_network_register(), so if_init() is
3790 		 * automatically called when IFF_UP. wm_reset(),
3791 		 * wm_get_hw_control() and wm_init_manageability() are called
3792 		 * via wm_init().
3793 		 */
3794 	}
3795 	IFNET_UNLOCK(ifp);
3796 
3797 	return true;
3798 }
3799 
3800 /*
3801  * wm_watchdog:
3802  *
3803  *	Watchdog checker.
3804  */
3805 static bool
3806 wm_watchdog(struct ifnet *ifp)
3807 {
3808 	int qid;
3809 	struct wm_softc *sc = ifp->if_softc;
3810 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
3811 
3812 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
3813 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
3814 
3815 		wm_watchdog_txq(ifp, txq, &hang_queue);
3816 	}
3817 
3818 #ifdef WM_DEBUG
3819 	if (sc->sc_trigger_reset) {
3820 		/* debug operation, no need for atomicity or reliability */
3821 		sc->sc_trigger_reset = 0;
3822 		hang_queue++;
3823 	}
3824 #endif
3825 
3826 	if (hang_queue == 0)
3827 		return true;
3828 
3829 	if (atomic_swap_uint(&sc->sc_reset_pending, 1) == 0)
3830 		workqueue_enqueue(sc->sc_reset_wq, &sc->sc_reset_work, NULL);
3831 
3832 	return false;
3833 }
3834 
3835 /*
3836  * Perform an interface watchdog reset.
3837  */
3838 static void
3839 wm_handle_reset_work(struct work *work, void *arg)
3840 {
3841 	struct wm_softc * const sc = arg;
3842 	struct ifnet * const ifp = &sc->sc_ethercom.ec_if;
3843 
3844 	/* Don't want ioctl operations to happen */
3845 	IFNET_LOCK(ifp);
3846 
3847 	/* reset the interface. */
3848 	wm_init(ifp);
3849 
3850 	IFNET_UNLOCK(ifp);
3851 
3852 	/*
3853 	 * There are still some upper layer processing which call
3854 	 * ifp->if_start(). e.g. ALTQ or one CPU system
3855 	 */
3856 	/* Try to get more packets going. */
3857 	ifp->if_start(ifp);
3858 
3859 	atomic_store_relaxed(&sc->sc_reset_pending, 0);
3860 }
3861 
3862 
3863 static void
3864 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
3865 {
3866 
3867 	mutex_enter(txq->txq_lock);
3868 	if (txq->txq_sending &&
3869 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout)
3870 		wm_watchdog_txq_locked(ifp, txq, hang);
3871 
3872 	mutex_exit(txq->txq_lock);
3873 }
3874 
3875 static void
3876 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
3877     uint16_t *hang)
3878 {
3879 	struct wm_softc *sc = ifp->if_softc;
3880 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
3881 
3882 	KASSERT(mutex_owned(txq->txq_lock));
3883 
3884 	/*
3885 	 * Since we're using delayed interrupts, sweep up
3886 	 * before we report an error.
3887 	 */
3888 	wm_txeof(txq, UINT_MAX);
3889 
3890 	if (txq->txq_sending)
3891 		*hang |= __BIT(wmq->wmq_id);
3892 
3893 	if (txq->txq_free == WM_NTXDESC(txq)) {
3894 		log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
3895 		    device_xname(sc->sc_dev));
3896 	} else {
3897 #ifdef WM_DEBUG
3898 		int i, j;
3899 		struct wm_txsoft *txs;
3900 #endif
3901 		log(LOG_ERR,
3902 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
3903 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
3904 		    txq->txq_next);
3905 		if_statinc(ifp, if_oerrors);
3906 #ifdef WM_DEBUG
3907 		for (i = txq->txq_sdirty; i != txq->txq_snext;
3908 		     i = WM_NEXTTXS(txq, i)) {
3909 			txs = &txq->txq_soft[i];
3910 			printf("txs %d tx %d -> %d\n",
3911 			    i, txs->txs_firstdesc, txs->txs_lastdesc);
3912 			for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
3913 				if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3914 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
3915 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
3916 					printf("\t %#08x%08x\n",
3917 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
3918 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
3919 				} else {
3920 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
3921 					    (uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
3922 					    txq->txq_descs[j].wtx_addr.wa_low);
3923 					printf("\t %#04x%02x%02x%08x\n",
3924 					    txq->txq_descs[j].wtx_fields.wtxu_vlan,
3925 					    txq->txq_descs[j].wtx_fields.wtxu_options,
3926 					    txq->txq_descs[j].wtx_fields.wtxu_status,
3927 					    txq->txq_descs[j].wtx_cmdlen);
3928 				}
3929 				if (j == txs->txs_lastdesc)
3930 					break;
3931 			}
3932 		}
3933 #endif
3934 	}
3935 }
3936 
3937 /*
3938  * wm_tick:
3939  *
3940  *	One second timer, used to check link status, sweep up
3941  *	completed transmit jobs, etc.
3942  */
3943 static void
3944 wm_tick(void *arg)
3945 {
3946 	struct wm_softc *sc = arg;
3947 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3948 
3949 	mutex_enter(sc->sc_core_lock);
3950 
3951 	if (sc->sc_core_stopping) {
3952 		mutex_exit(sc->sc_core_lock);
3953 		return;
3954 	}
3955 
3956 	wm_update_stats(sc);
3957 
3958 	if (sc->sc_flags & WM_F_HAS_MII) {
3959 		bool dotick = true;
3960 
3961 		/*
3962 		 * Workaround for some chips to delay sending LINK_STATE_UP.
3963 		 * See also wm_linkintr_gmii() and wm_gmii_mediastatus().
3964 		 */
3965 		if ((sc->sc_flags & WM_F_DELAY_LINKUP) != 0) {
3966 			struct timeval now;
3967 
3968 			getmicrotime(&now);
3969 			if (timercmp(&now, &sc->sc_linkup_delay_time, <))
3970 				dotick = false;
3971 			else if (sc->sc_linkup_delay_time.tv_sec != 0) {
3972 				/* Simplify by checking tv_sec only. */
3973 
3974 				sc->sc_linkup_delay_time.tv_sec = 0;
3975 				sc->sc_linkup_delay_time.tv_usec = 0;
3976 			}
3977 		}
3978 		if (dotick)
3979 			mii_tick(&sc->sc_mii);
3980 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
3981 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
3982 		wm_serdes_tick(sc);
3983 	else
3984 		wm_tbi_tick(sc);
3985 
3986 	mutex_exit(sc->sc_core_lock);
3987 
3988 	if (wm_watchdog(ifp))
3989 		callout_schedule(&sc->sc_tick_ch, hz);
3990 }
3991 
3992 static int
3993 wm_ifflags_cb(struct ethercom *ec)
3994 {
3995 	struct ifnet *ifp = &ec->ec_if;
3996 	struct wm_softc *sc = ifp->if_softc;
3997 	u_short iffchange;
3998 	int ecchange;
3999 	bool needreset = false;
4000 	int rc = 0;
4001 
4002 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4003 		device_xname(sc->sc_dev), __func__));
4004 
4005 	KASSERT(IFNET_LOCKED(ifp));
4006 
4007 	mutex_enter(sc->sc_core_lock);
4008 
4009 	/*
4010 	 * Check for if_flags.
4011 	 * Main usage is to prevent linkdown when opening bpf.
4012 	 */
4013 	iffchange = ifp->if_flags ^ sc->sc_if_flags;
4014 	sc->sc_if_flags = ifp->if_flags;
4015 	if ((iffchange & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
4016 		needreset = true;
4017 		goto ec;
4018 	}
4019 
4020 	/* iff related updates */
4021 	if ((iffchange & IFF_PROMISC) != 0)
4022 		wm_set_filter(sc);
4023 
4024 	wm_set_vlan(sc);
4025 
4026 ec:
4027 	/* Check for ec_capenable. */
4028 	ecchange = ec->ec_capenable ^ sc->sc_ec_capenable;
4029 	sc->sc_ec_capenable = ec->ec_capenable;
4030 	if ((ecchange & ~ETHERCAP_EEE) != 0) {
4031 		needreset = true;
4032 		goto out;
4033 	}
4034 
4035 	/* ec related updates */
4036 	wm_set_eee(sc);
4037 
4038 out:
4039 	if (needreset)
4040 		rc = ENETRESET;
4041 	mutex_exit(sc->sc_core_lock);
4042 
4043 	return rc;
4044 }
4045 
4046 static bool
4047 wm_phy_need_linkdown_discard(struct wm_softc *sc)
4048 {
4049 
4050 	switch (sc->sc_phytype) {
4051 	case WMPHY_82577: /* ihphy */
4052 	case WMPHY_82578: /* atphy */
4053 	case WMPHY_82579: /* ihphy */
4054 	case WMPHY_I217: /* ihphy */
4055 	case WMPHY_82580: /* ihphy */
4056 	case WMPHY_I350: /* ihphy */
4057 		return true;
4058 	default:
4059 		return false;
4060 	}
4061 }
4062 
4063 static void
4064 wm_set_linkdown_discard(struct wm_softc *sc)
4065 {
4066 
4067 	for (int i = 0; i < sc->sc_nqueues; i++) {
4068 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
4069 
4070 		mutex_enter(txq->txq_lock);
4071 		txq->txq_flags |= WM_TXQ_LINKDOWN_DISCARD;
4072 		mutex_exit(txq->txq_lock);
4073 	}
4074 }
4075 
4076 static void
4077 wm_clear_linkdown_discard(struct wm_softc *sc)
4078 {
4079 
4080 	for (int i = 0; i < sc->sc_nqueues; i++) {
4081 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
4082 
4083 		mutex_enter(txq->txq_lock);
4084 		txq->txq_flags &= ~WM_TXQ_LINKDOWN_DISCARD;
4085 		mutex_exit(txq->txq_lock);
4086 	}
4087 }
4088 
4089 /*
4090  * wm_ioctl:		[ifnet interface function]
4091  *
4092  *	Handle control requests from the operator.
4093  */
4094 static int
4095 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
4096 {
4097 	struct wm_softc *sc = ifp->if_softc;
4098 	struct ifreq *ifr = (struct ifreq *)data;
4099 	struct ifaddr *ifa = (struct ifaddr *)data;
4100 	struct sockaddr_dl *sdl;
4101 	int error;
4102 
4103 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4104 		device_xname(sc->sc_dev), __func__));
4105 
4106 	switch (cmd) {
4107 	case SIOCADDMULTI:
4108 	case SIOCDELMULTI:
4109 		break;
4110 	default:
4111 		KASSERT(IFNET_LOCKED(ifp));
4112 	}
4113 
4114 	if (cmd == SIOCZIFDATA) {
4115 		/*
4116 		 * Special handling for SIOCZIFDATA.
4117 		 * Copying and clearing the if_data structure is done with
4118 		 * ether_ioctl() below.
4119 		 */
4120 		mutex_enter(sc->sc_core_lock);
4121 		wm_update_stats(sc);
4122 		wm_clear_evcnt(sc);
4123 		mutex_exit(sc->sc_core_lock);
4124 	}
4125 
4126 	switch (cmd) {
4127 	case SIOCSIFMEDIA:
4128 		mutex_enter(sc->sc_core_lock);
4129 		/* Flow control requires full-duplex mode. */
4130 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
4131 		    (ifr->ifr_media & IFM_FDX) == 0)
4132 			ifr->ifr_media &= ~IFM_ETH_FMASK;
4133 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
4134 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
4135 				/* We can do both TXPAUSE and RXPAUSE. */
4136 				ifr->ifr_media |=
4137 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
4138 			}
4139 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
4140 		}
4141 		mutex_exit(sc->sc_core_lock);
4142 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
4143 		if (error == 0 && wm_phy_need_linkdown_discard(sc)) {
4144 			if (IFM_SUBTYPE(ifr->ifr_media) == IFM_NONE) {
4145 				DPRINTF(sc, WM_DEBUG_LINK,
4146 				    ("%s: %s: Set linkdown discard flag\n",
4147 					device_xname(sc->sc_dev), __func__));
4148 				wm_set_linkdown_discard(sc);
4149 			}
4150 		}
4151 		break;
4152 	case SIOCINITIFADDR:
4153 		mutex_enter(sc->sc_core_lock);
4154 		if (ifa->ifa_addr->sa_family == AF_LINK) {
4155 			sdl = satosdl(ifp->if_dl->ifa_addr);
4156 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
4157 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
4158 			/* Unicast address is the first multicast entry */
4159 			wm_set_filter(sc);
4160 			error = 0;
4161 			mutex_exit(sc->sc_core_lock);
4162 			break;
4163 		}
4164 		mutex_exit(sc->sc_core_lock);
4165 		/*FALLTHROUGH*/
4166 	default:
4167 		if (cmd == SIOCSIFFLAGS && wm_phy_need_linkdown_discard(sc)) {
4168 			if (((ifp->if_flags & IFF_UP) != 0) &&
4169 			    ((ifr->ifr_flags & IFF_UP) == 0)) {
4170 				DPRINTF(sc, WM_DEBUG_LINK,
4171 				    ("%s: %s: Set linkdown discard flag\n",
4172 					device_xname(sc->sc_dev), __func__));
4173 				wm_set_linkdown_discard(sc);
4174 			}
4175 		}
4176 		const int s = splnet();
4177 		/* It may call wm_start, so unlock here */
4178 		error = ether_ioctl(ifp, cmd, data);
4179 		splx(s);
4180 		if (error != ENETRESET)
4181 			break;
4182 
4183 		error = 0;
4184 
4185 		if (cmd == SIOCSIFCAP)
4186 			error = if_init(ifp);
4187 		else if (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI) {
4188 			mutex_enter(sc->sc_core_lock);
4189 			if (sc->sc_if_flags & IFF_RUNNING) {
4190 				/*
4191 				 * Multicast list has changed; set the
4192 				 * hardware filter accordingly.
4193 				 */
4194 				wm_set_filter(sc);
4195 			}
4196 			mutex_exit(sc->sc_core_lock);
4197 		}
4198 		break;
4199 	}
4200 
4201 	return error;
4202 }
4203 
4204 /* MAC address related */
4205 
4206 /*
4207  * Get the offset of MAC address and return it.
4208  * If error occured, use offset 0.
4209  */
4210 static uint16_t
4211 wm_check_alt_mac_addr(struct wm_softc *sc)
4212 {
4213 	uint16_t myea[ETHER_ADDR_LEN / 2];
4214 	uint16_t offset = NVM_OFF_MACADDR;
4215 
4216 	/* Try to read alternative MAC address pointer */
4217 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
4218 		return 0;
4219 
4220 	/* Check pointer if it's valid or not. */
4221 	if ((offset == 0x0000) || (offset == 0xffff))
4222 		return 0;
4223 
4224 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
4225 	/*
4226 	 * Check whether alternative MAC address is valid or not.
4227 	 * Some cards have non 0xffff pointer but those don't use
4228 	 * alternative MAC address in reality.
4229 	 *
4230 	 * Check whether the broadcast bit is set or not.
4231 	 */
4232 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
4233 		if (((myea[0] & 0xff) & 0x01) == 0)
4234 			return offset; /* Found */
4235 
4236 	/* Not found */
4237 	return 0;
4238 }
4239 
4240 static int
4241 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
4242 {
4243 	uint16_t myea[ETHER_ADDR_LEN / 2];
4244 	uint16_t offset = NVM_OFF_MACADDR;
4245 	int do_invert = 0;
4246 
4247 	switch (sc->sc_type) {
4248 	case WM_T_82580:
4249 	case WM_T_I350:
4250 	case WM_T_I354:
4251 		/* EEPROM Top Level Partitioning */
4252 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
4253 		break;
4254 	case WM_T_82571:
4255 	case WM_T_82575:
4256 	case WM_T_82576:
4257 	case WM_T_80003:
4258 	case WM_T_I210:
4259 	case WM_T_I211:
4260 		offset = wm_check_alt_mac_addr(sc);
4261 		if (offset == 0)
4262 			if ((sc->sc_funcid & 0x01) == 1)
4263 				do_invert = 1;
4264 		break;
4265 	default:
4266 		if ((sc->sc_funcid & 0x01) == 1)
4267 			do_invert = 1;
4268 		break;
4269 	}
4270 
4271 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
4272 		goto bad;
4273 
4274 	enaddr[0] = myea[0] & 0xff;
4275 	enaddr[1] = myea[0] >> 8;
4276 	enaddr[2] = myea[1] & 0xff;
4277 	enaddr[3] = myea[1] >> 8;
4278 	enaddr[4] = myea[2] & 0xff;
4279 	enaddr[5] = myea[2] >> 8;
4280 
4281 	/*
4282 	 * Toggle the LSB of the MAC address on the second port
4283 	 * of some dual port cards.
4284 	 */
4285 	if (do_invert != 0)
4286 		enaddr[5] ^= 1;
4287 
4288 	return 0;
4289 
4290 bad:
4291 	return -1;
4292 }
4293 
4294 /*
4295  * wm_set_ral:
4296  *
4297  *	Set an entery in the receive address list.
4298  */
4299 static void
4300 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
4301 {
4302 	uint32_t ral_lo, ral_hi, addrl, addrh;
4303 	uint32_t wlock_mac;
4304 	int rv;
4305 
4306 	if (enaddr != NULL) {
4307 		ral_lo = (uint32_t)enaddr[0] | ((uint32_t)enaddr[1] << 8) |
4308 		    ((uint32_t)enaddr[2] << 16) | ((uint32_t)enaddr[3] << 24);
4309 		ral_hi = (uint32_t)enaddr[4] | ((uint32_t)enaddr[5] << 8);
4310 		ral_hi |= RAL_AV;
4311 	} else {
4312 		ral_lo = 0;
4313 		ral_hi = 0;
4314 	}
4315 
4316 	switch (sc->sc_type) {
4317 	case WM_T_82542_2_0:
4318 	case WM_T_82542_2_1:
4319 	case WM_T_82543:
4320 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
4321 		CSR_WRITE_FLUSH(sc);
4322 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
4323 		CSR_WRITE_FLUSH(sc);
4324 		break;
4325 	case WM_T_PCH2:
4326 	case WM_T_PCH_LPT:
4327 	case WM_T_PCH_SPT:
4328 	case WM_T_PCH_CNP:
4329 	case WM_T_PCH_TGP:
4330 		if (idx == 0) {
4331 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
4332 			CSR_WRITE_FLUSH(sc);
4333 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
4334 			CSR_WRITE_FLUSH(sc);
4335 			return;
4336 		}
4337 		if (sc->sc_type != WM_T_PCH2) {
4338 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
4339 			    FWSM_WLOCK_MAC);
4340 			addrl = WMREG_SHRAL(idx - 1);
4341 			addrh = WMREG_SHRAH(idx - 1);
4342 		} else {
4343 			wlock_mac = 0;
4344 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
4345 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
4346 		}
4347 
4348 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
4349 			rv = wm_get_swflag_ich8lan(sc);
4350 			if (rv != 0)
4351 				return;
4352 			CSR_WRITE(sc, addrl, ral_lo);
4353 			CSR_WRITE_FLUSH(sc);
4354 			CSR_WRITE(sc, addrh, ral_hi);
4355 			CSR_WRITE_FLUSH(sc);
4356 			wm_put_swflag_ich8lan(sc);
4357 		}
4358 
4359 		break;
4360 	default:
4361 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
4362 		CSR_WRITE_FLUSH(sc);
4363 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
4364 		CSR_WRITE_FLUSH(sc);
4365 		break;
4366 	}
4367 }
4368 
4369 /*
4370  * wm_mchash:
4371  *
4372  *	Compute the hash of the multicast address for the 4096-bit
4373  *	multicast filter.
4374  */
4375 static uint32_t
4376 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
4377 {
4378 	static const int lo_shift[4] = { 4, 3, 2, 0 };
4379 	static const int hi_shift[4] = { 4, 5, 6, 8 };
4380 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
4381 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
4382 	uint32_t hash;
4383 
4384 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4385 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4386 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
4387 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)
4388 	    || (sc->sc_type == WM_T_PCH_TGP)) {
4389 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
4390 		    (((uint16_t)enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
4391 		return (hash & 0x3ff);
4392 	}
4393 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
4394 	    (((uint16_t)enaddr[5]) << hi_shift[sc->sc_mchash_type]);
4395 
4396 	return (hash & 0xfff);
4397 }
4398 
4399 /*
4400  *
4401  *
4402  */
4403 static int
4404 wm_rar_count(struct wm_softc *sc)
4405 {
4406 	int size;
4407 
4408 	switch (sc->sc_type) {
4409 	case WM_T_ICH8:
4410 		size = WM_RAL_TABSIZE_ICH8 -1;
4411 		break;
4412 	case WM_T_ICH9:
4413 	case WM_T_ICH10:
4414 	case WM_T_PCH:
4415 		size = WM_RAL_TABSIZE_ICH8;
4416 		break;
4417 	case WM_T_PCH2:
4418 		size = WM_RAL_TABSIZE_PCH2;
4419 		break;
4420 	case WM_T_PCH_LPT:
4421 	case WM_T_PCH_SPT:
4422 	case WM_T_PCH_CNP:
4423 	case WM_T_PCH_TGP:
4424 		size = WM_RAL_TABSIZE_PCH_LPT;
4425 		break;
4426 	case WM_T_82575:
4427 	case WM_T_I210:
4428 	case WM_T_I211:
4429 		size = WM_RAL_TABSIZE_82575;
4430 		break;
4431 	case WM_T_82576:
4432 	case WM_T_82580:
4433 		size = WM_RAL_TABSIZE_82576;
4434 		break;
4435 	case WM_T_I350:
4436 	case WM_T_I354:
4437 		size = WM_RAL_TABSIZE_I350;
4438 		break;
4439 	default:
4440 		size = WM_RAL_TABSIZE;
4441 	}
4442 
4443 	return size;
4444 }
4445 
4446 /*
4447  * wm_set_filter:
4448  *
4449  *	Set up the receive filter.
4450  */
4451 static void
4452 wm_set_filter(struct wm_softc *sc)
4453 {
4454 	struct ethercom *ec = &sc->sc_ethercom;
4455 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4456 	struct ether_multi *enm;
4457 	struct ether_multistep step;
4458 	bus_addr_t mta_reg;
4459 	uint32_t hash, reg, bit;
4460 	int i, size, ralmax, rv;
4461 
4462 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4463 		device_xname(sc->sc_dev), __func__));
4464 	KASSERT(mutex_owned(sc->sc_core_lock));
4465 
4466 	if (sc->sc_type >= WM_T_82544)
4467 		mta_reg = WMREG_CORDOVA_MTA;
4468 	else
4469 		mta_reg = WMREG_MTA;
4470 
4471 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
4472 
4473 	if (sc->sc_if_flags & IFF_BROADCAST)
4474 		sc->sc_rctl |= RCTL_BAM;
4475 	if (sc->sc_if_flags & IFF_PROMISC) {
4476 		sc->sc_rctl |= RCTL_UPE;
4477 		ETHER_LOCK(ec);
4478 		ec->ec_flags |= ETHER_F_ALLMULTI;
4479 		ETHER_UNLOCK(ec);
4480 		goto allmulti;
4481 	}
4482 
4483 	/*
4484 	 * Set the station address in the first RAL slot, and
4485 	 * clear the remaining slots.
4486 	 */
4487 	size = wm_rar_count(sc);
4488 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
4489 
4490 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT) ||
4491 	    (sc->sc_type == WM_T_PCH_CNP) || (sc->sc_type == WM_T_PCH_TGP)) {
4492 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
4493 		switch (i) {
4494 		case 0:
4495 			/* We can use all entries */
4496 			ralmax = size;
4497 			break;
4498 		case 1:
4499 			/* Only RAR[0] */
4500 			ralmax = 1;
4501 			break;
4502 		default:
4503 			/* Available SHRA + RAR[0] */
4504 			ralmax = i + 1;
4505 		}
4506 	} else
4507 		ralmax = size;
4508 	for (i = 1; i < size; i++) {
4509 		if (i < ralmax)
4510 			wm_set_ral(sc, NULL, i);
4511 	}
4512 
4513 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4514 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4515 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
4516 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)
4517 	    || (sc->sc_type == WM_T_PCH_TGP))
4518 		size = WM_ICH8_MC_TABSIZE;
4519 	else
4520 		size = WM_MC_TABSIZE;
4521 	/* Clear out the multicast table. */
4522 	for (i = 0; i < size; i++) {
4523 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
4524 		CSR_WRITE_FLUSH(sc);
4525 	}
4526 
4527 	ETHER_LOCK(ec);
4528 	ETHER_FIRST_MULTI(step, ec, enm);
4529 	while (enm != NULL) {
4530 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
4531 			ec->ec_flags |= ETHER_F_ALLMULTI;
4532 			ETHER_UNLOCK(ec);
4533 			/*
4534 			 * We must listen to a range of multicast addresses.
4535 			 * For now, just accept all multicasts, rather than
4536 			 * trying to set only those filter bits needed to match
4537 			 * the range.  (At this time, the only use of address
4538 			 * ranges is for IP multicast routing, for which the
4539 			 * range is big enough to require all bits set.)
4540 			 */
4541 			goto allmulti;
4542 		}
4543 
4544 		hash = wm_mchash(sc, enm->enm_addrlo);
4545 
4546 		reg = (hash >> 5);
4547 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4548 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4549 		    || (sc->sc_type == WM_T_PCH2)
4550 		    || (sc->sc_type == WM_T_PCH_LPT)
4551 		    || (sc->sc_type == WM_T_PCH_SPT)
4552 		    || (sc->sc_type == WM_T_PCH_CNP)
4553 		    || (sc->sc_type == WM_T_PCH_TGP))
4554 			reg &= 0x1f;
4555 		else
4556 			reg &= 0x7f;
4557 		bit = hash & 0x1f;
4558 
4559 		hash = CSR_READ(sc, mta_reg + (reg << 2));
4560 		hash |= 1U << bit;
4561 
4562 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
4563 			/*
4564 			 * 82544 Errata 9: Certain register cannot be written
4565 			 * with particular alignments in PCI-X bus operation
4566 			 * (FCAH, MTA and VFTA).
4567 			 */
4568 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
4569 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
4570 			CSR_WRITE_FLUSH(sc);
4571 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
4572 			CSR_WRITE_FLUSH(sc);
4573 		} else {
4574 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
4575 			CSR_WRITE_FLUSH(sc);
4576 		}
4577 
4578 		ETHER_NEXT_MULTI(step, enm);
4579 	}
4580 	ec->ec_flags &= ~ETHER_F_ALLMULTI;
4581 	ETHER_UNLOCK(ec);
4582 
4583 	goto setit;
4584 
4585 allmulti:
4586 	sc->sc_rctl |= RCTL_MPE;
4587 
4588 setit:
4589 	if (sc->sc_type >= WM_T_PCH2) {
4590 		if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
4591 		    && (ifp->if_mtu > ETHERMTU))
4592 			rv = wm_lv_jumbo_workaround_ich8lan(sc, true);
4593 		else
4594 			rv = wm_lv_jumbo_workaround_ich8lan(sc, false);
4595 		if (rv != 0)
4596 			device_printf(sc->sc_dev,
4597 			    "Failed to do workaround for jumbo frame.\n");
4598 	}
4599 
4600 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
4601 }
4602 
4603 /* Reset and init related */
4604 
4605 static void
4606 wm_set_vlan(struct wm_softc *sc)
4607 {
4608 
4609 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4610 		device_xname(sc->sc_dev), __func__));
4611 
4612 	/* Deal with VLAN enables. */
4613 	if (VLAN_ATTACHED(&sc->sc_ethercom))
4614 		sc->sc_ctrl |= CTRL_VME;
4615 	else
4616 		sc->sc_ctrl &= ~CTRL_VME;
4617 
4618 	/* Write the control registers. */
4619 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4620 }
4621 
4622 static void
4623 wm_set_pcie_completion_timeout(struct wm_softc *sc)
4624 {
4625 	uint32_t gcr;
4626 	pcireg_t ctrl2;
4627 
4628 	gcr = CSR_READ(sc, WMREG_GCR);
4629 
4630 	/* Only take action if timeout value is defaulted to 0 */
4631 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
4632 		goto out;
4633 
4634 	if ((gcr & GCR_CAP_VER2) == 0) {
4635 		gcr |= GCR_CMPL_TMOUT_10MS;
4636 		goto out;
4637 	}
4638 
4639 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
4640 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
4641 	ctrl2 |= WM_PCIE_DCSR2_16MS;
4642 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
4643 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
4644 
4645 out:
4646 	/* Disable completion timeout resend */
4647 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
4648 
4649 	CSR_WRITE(sc, WMREG_GCR, gcr);
4650 }
4651 
4652 void
4653 wm_get_auto_rd_done(struct wm_softc *sc)
4654 {
4655 	int i;
4656 
4657 	/* wait for eeprom to reload */
4658 	switch (sc->sc_type) {
4659 	case WM_T_82571:
4660 	case WM_T_82572:
4661 	case WM_T_82573:
4662 	case WM_T_82574:
4663 	case WM_T_82583:
4664 	case WM_T_82575:
4665 	case WM_T_82576:
4666 	case WM_T_82580:
4667 	case WM_T_I350:
4668 	case WM_T_I354:
4669 	case WM_T_I210:
4670 	case WM_T_I211:
4671 	case WM_T_80003:
4672 	case WM_T_ICH8:
4673 	case WM_T_ICH9:
4674 		for (i = 0; i < 10; i++) {
4675 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
4676 				break;
4677 			delay(1000);
4678 		}
4679 		if (i == 10) {
4680 			log(LOG_ERR, "%s: auto read from eeprom failed to "
4681 			    "complete\n", device_xname(sc->sc_dev));
4682 		}
4683 		break;
4684 	default:
4685 		break;
4686 	}
4687 }
4688 
4689 void
4690 wm_lan_init_done(struct wm_softc *sc)
4691 {
4692 	uint32_t reg = 0;
4693 	int i;
4694 
4695 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4696 		device_xname(sc->sc_dev), __func__));
4697 
4698 	/* Wait for eeprom to reload */
4699 	switch (sc->sc_type) {
4700 	case WM_T_ICH10:
4701 	case WM_T_PCH:
4702 	case WM_T_PCH2:
4703 	case WM_T_PCH_LPT:
4704 	case WM_T_PCH_SPT:
4705 	case WM_T_PCH_CNP:
4706 	case WM_T_PCH_TGP:
4707 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
4708 			reg = CSR_READ(sc, WMREG_STATUS);
4709 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
4710 				break;
4711 			delay(100);
4712 		}
4713 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
4714 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
4715 			    "complete\n", device_xname(sc->sc_dev), __func__);
4716 		}
4717 		break;
4718 	default:
4719 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4720 		    __func__);
4721 		break;
4722 	}
4723 
4724 	reg &= ~STATUS_LAN_INIT_DONE;
4725 	CSR_WRITE(sc, WMREG_STATUS, reg);
4726 }
4727 
4728 void
4729 wm_get_cfg_done(struct wm_softc *sc)
4730 {
4731 	int mask;
4732 	uint32_t reg;
4733 	int i;
4734 
4735 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4736 		device_xname(sc->sc_dev), __func__));
4737 
4738 	/* Wait for eeprom to reload */
4739 	switch (sc->sc_type) {
4740 	case WM_T_82542_2_0:
4741 	case WM_T_82542_2_1:
4742 		/* null */
4743 		break;
4744 	case WM_T_82543:
4745 	case WM_T_82544:
4746 	case WM_T_82540:
4747 	case WM_T_82545:
4748 	case WM_T_82545_3:
4749 	case WM_T_82546:
4750 	case WM_T_82546_3:
4751 	case WM_T_82541:
4752 	case WM_T_82541_2:
4753 	case WM_T_82547:
4754 	case WM_T_82547_2:
4755 	case WM_T_82573:
4756 	case WM_T_82574:
4757 	case WM_T_82583:
4758 		/* generic */
4759 		delay(10*1000);
4760 		break;
4761 	case WM_T_80003:
4762 	case WM_T_82571:
4763 	case WM_T_82572:
4764 	case WM_T_82575:
4765 	case WM_T_82576:
4766 	case WM_T_82580:
4767 	case WM_T_I350:
4768 	case WM_T_I354:
4769 	case WM_T_I210:
4770 	case WM_T_I211:
4771 		if (sc->sc_type == WM_T_82571) {
4772 			/* Only 82571 shares port 0 */
4773 			mask = EEMNGCTL_CFGDONE_0;
4774 		} else
4775 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
4776 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
4777 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
4778 				break;
4779 			delay(1000);
4780 		}
4781 		if (i >= WM_PHY_CFG_TIMEOUT)
4782 			DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s failed\n",
4783 				device_xname(sc->sc_dev), __func__));
4784 		break;
4785 	case WM_T_ICH8:
4786 	case WM_T_ICH9:
4787 	case WM_T_ICH10:
4788 	case WM_T_PCH:
4789 	case WM_T_PCH2:
4790 	case WM_T_PCH_LPT:
4791 	case WM_T_PCH_SPT:
4792 	case WM_T_PCH_CNP:
4793 	case WM_T_PCH_TGP:
4794 		delay(10*1000);
4795 		if (sc->sc_type >= WM_T_ICH10)
4796 			wm_lan_init_done(sc);
4797 		else
4798 			wm_get_auto_rd_done(sc);
4799 
4800 		/* Clear PHY Reset Asserted bit */
4801 		reg = CSR_READ(sc, WMREG_STATUS);
4802 		if ((reg & STATUS_PHYRA) != 0)
4803 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
4804 		break;
4805 	default:
4806 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4807 		    __func__);
4808 		break;
4809 	}
4810 }
4811 
4812 int
4813 wm_phy_post_reset(struct wm_softc *sc)
4814 {
4815 	device_t dev = sc->sc_dev;
4816 	uint16_t reg;
4817 	int rv = 0;
4818 
4819 	/* This function is only for ICH8 and newer. */
4820 	if (sc->sc_type < WM_T_ICH8)
4821 		return 0;
4822 
4823 	if (wm_phy_resetisblocked(sc)) {
4824 		/* XXX */
4825 		device_printf(dev, "PHY is blocked\n");
4826 		return -1;
4827 	}
4828 
4829 	/* Allow time for h/w to get to quiescent state after reset */
4830 	delay(10*1000);
4831 
4832 	/* Perform any necessary post-reset workarounds */
4833 	if (sc->sc_type == WM_T_PCH)
4834 		rv = wm_hv_phy_workarounds_ich8lan(sc);
4835 	else if (sc->sc_type == WM_T_PCH2)
4836 		rv = wm_lv_phy_workarounds_ich8lan(sc);
4837 	if (rv != 0)
4838 		return rv;
4839 
4840 	/* Clear the host wakeup bit after lcd reset */
4841 	if (sc->sc_type >= WM_T_PCH) {
4842 		wm_gmii_hv_readreg(dev, 2, BM_PORT_GEN_CFG, &reg);
4843 		reg &= ~BM_WUC_HOST_WU_BIT;
4844 		wm_gmii_hv_writereg(dev, 2, BM_PORT_GEN_CFG, reg);
4845 	}
4846 
4847 	/* Configure the LCD with the extended configuration region in NVM */
4848 	if ((rv = wm_init_lcd_from_nvm(sc)) != 0)
4849 		return rv;
4850 
4851 	/* Configure the LCD with the OEM bits in NVM */
4852 	rv = wm_oem_bits_config_ich8lan(sc, true);
4853 
4854 	if (sc->sc_type == WM_T_PCH2) {
4855 		/* Ungate automatic PHY configuration on non-managed 82579 */
4856 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
4857 			delay(10 * 1000);
4858 			wm_gate_hw_phy_config_ich8lan(sc, false);
4859 		}
4860 		/* Set EEE LPI Update Timer to 200usec */
4861 		rv = sc->phy.acquire(sc);
4862 		if (rv)
4863 			return rv;
4864 		rv = wm_write_emi_reg_locked(dev,
4865 		    I82579_LPI_UPDATE_TIMER, 0x1387);
4866 		sc->phy.release(sc);
4867 	}
4868 
4869 	return rv;
4870 }
4871 
4872 /* Only for PCH and newer */
4873 static int
4874 wm_write_smbus_addr(struct wm_softc *sc)
4875 {
4876 	uint32_t strap, freq;
4877 	uint16_t phy_data;
4878 	int rv;
4879 
4880 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4881 		device_xname(sc->sc_dev), __func__));
4882 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
4883 
4884 	strap = CSR_READ(sc, WMREG_STRAP);
4885 	freq = __SHIFTOUT(strap, STRAP_FREQ);
4886 
4887 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data);
4888 	if (rv != 0)
4889 		return rv;
4890 
4891 	phy_data &= ~HV_SMB_ADDR_ADDR;
4892 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
4893 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
4894 
4895 	if (sc->sc_phytype == WMPHY_I217) {
4896 		/* Restore SMBus frequency */
4897 		if (freq --) {
4898 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
4899 			    | HV_SMB_ADDR_FREQ_HIGH);
4900 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
4901 			    HV_SMB_ADDR_FREQ_LOW);
4902 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
4903 			    HV_SMB_ADDR_FREQ_HIGH);
4904 		} else
4905 			DPRINTF(sc, WM_DEBUG_INIT,
4906 			    ("%s: %s Unsupported SMB frequency in PHY\n",
4907 				device_xname(sc->sc_dev), __func__));
4908 	}
4909 
4910 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR,
4911 	    phy_data);
4912 }
4913 
4914 static int
4915 wm_init_lcd_from_nvm(struct wm_softc *sc)
4916 {
4917 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
4918 	uint16_t phy_page = 0;
4919 	int rv = 0;
4920 
4921 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4922 		device_xname(sc->sc_dev), __func__));
4923 
4924 	switch (sc->sc_type) {
4925 	case WM_T_ICH8:
4926 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
4927 		    || (sc->sc_phytype != WMPHY_IGP_3))
4928 			return 0;
4929 
4930 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
4931 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
4932 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
4933 			break;
4934 		}
4935 		/* FALLTHROUGH */
4936 	case WM_T_PCH:
4937 	case WM_T_PCH2:
4938 	case WM_T_PCH_LPT:
4939 	case WM_T_PCH_SPT:
4940 	case WM_T_PCH_CNP:
4941 	case WM_T_PCH_TGP:
4942 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
4943 		break;
4944 	default:
4945 		return 0;
4946 	}
4947 
4948 	if ((rv = sc->phy.acquire(sc)) != 0)
4949 		return rv;
4950 
4951 	reg = CSR_READ(sc, WMREG_FEXTNVM);
4952 	if ((reg & sw_cfg_mask) == 0)
4953 		goto release;
4954 
4955 	/*
4956 	 * Make sure HW does not configure LCD from PHY extended configuration
4957 	 * before SW configuration
4958 	 */
4959 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
4960 	if ((sc->sc_type < WM_T_PCH2)
4961 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
4962 		goto release;
4963 
4964 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
4965 		device_xname(sc->sc_dev), __func__));
4966 	/* word_addr is in DWORD */
4967 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
4968 
4969 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
4970 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
4971 	if (cnf_size == 0)
4972 		goto release;
4973 
4974 	if (((sc->sc_type == WM_T_PCH)
4975 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
4976 	    || (sc->sc_type > WM_T_PCH)) {
4977 		/*
4978 		 * HW configures the SMBus address and LEDs when the OEM and
4979 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
4980 		 * are cleared, SW will configure them instead.
4981 		 */
4982 		DPRINTF(sc, WM_DEBUG_INIT,
4983 		    ("%s: %s: Configure SMBus and LED\n",
4984 			device_xname(sc->sc_dev), __func__));
4985 		if ((rv = wm_write_smbus_addr(sc)) != 0)
4986 			goto release;
4987 
4988 		reg = CSR_READ(sc, WMREG_LEDCTL);
4989 		rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG,
4990 		    (uint16_t)reg);
4991 		if (rv != 0)
4992 			goto release;
4993 	}
4994 
4995 	/* Configure LCD from extended configuration region. */
4996 	for (i = 0; i < cnf_size; i++) {
4997 		uint16_t reg_data, reg_addr;
4998 
4999 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
5000 			goto release;
5001 
5002 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
5003 			goto release;
5004 
5005 		if (reg_addr == IGPHY_PAGE_SELECT)
5006 			phy_page = reg_data;
5007 
5008 		reg_addr &= IGPHY_MAXREGADDR;
5009 		reg_addr |= phy_page;
5010 
5011 		KASSERT(sc->phy.writereg_locked != NULL);
5012 		rv = sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr,
5013 		    reg_data);
5014 	}
5015 
5016 release:
5017 	sc->phy.release(sc);
5018 	return rv;
5019 }
5020 
5021 /*
5022  *  wm_oem_bits_config_ich8lan - SW-based LCD Configuration
5023  *  @sc:       pointer to the HW structure
5024  *  @d0_state: boolean if entering d0 or d3 device state
5025  *
5026  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
5027  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
5028  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
5029  */
5030 int
5031 wm_oem_bits_config_ich8lan(struct wm_softc *sc, bool d0_state)
5032 {
5033 	uint32_t mac_reg;
5034 	uint16_t oem_reg;
5035 	int rv;
5036 
5037 	if (sc->sc_type < WM_T_PCH)
5038 		return 0;
5039 
5040 	rv = sc->phy.acquire(sc);
5041 	if (rv != 0)
5042 		return rv;
5043 
5044 	if (sc->sc_type == WM_T_PCH) {
5045 		mac_reg = CSR_READ(sc, WMREG_EXTCNFCTR);
5046 		if ((mac_reg & EXTCNFCTR_OEM_WRITE_ENABLE) != 0)
5047 			goto release;
5048 	}
5049 
5050 	mac_reg = CSR_READ(sc, WMREG_FEXTNVM);
5051 	if ((mac_reg & FEXTNVM_SW_CONFIG_ICH8M) == 0)
5052 		goto release;
5053 
5054 	mac_reg = CSR_READ(sc, WMREG_PHY_CTRL);
5055 
5056 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_OEM_BITS, &oem_reg);
5057 	if (rv != 0)
5058 		goto release;
5059 	oem_reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
5060 
5061 	if (d0_state) {
5062 		if ((mac_reg & PHY_CTRL_GBE_DIS) != 0)
5063 			oem_reg |= HV_OEM_BITS_A1KDIS;
5064 		if ((mac_reg & PHY_CTRL_D0A_LPLU) != 0)
5065 			oem_reg |= HV_OEM_BITS_LPLU;
5066 	} else {
5067 		if ((mac_reg & (PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS))
5068 		    != 0)
5069 			oem_reg |= HV_OEM_BITS_A1KDIS;
5070 		if ((mac_reg & (PHY_CTRL_D0A_LPLU | PHY_CTRL_NOND0A_LPLU))
5071 		    != 0)
5072 			oem_reg |= HV_OEM_BITS_LPLU;
5073 	}
5074 
5075 	/* Set Restart auto-neg to activate the bits */
5076 	if ((d0_state || (sc->sc_type != WM_T_PCH))
5077 	    && (wm_phy_resetisblocked(sc) == false))
5078 		oem_reg |= HV_OEM_BITS_ANEGNOW;
5079 
5080 	rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_OEM_BITS, oem_reg);
5081 
5082 release:
5083 	sc->phy.release(sc);
5084 
5085 	return rv;
5086 }
5087 
5088 /* Init hardware bits */
5089 void
5090 wm_initialize_hardware_bits(struct wm_softc *sc)
5091 {
5092 	uint32_t tarc0, tarc1, reg;
5093 
5094 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
5095 		device_xname(sc->sc_dev), __func__));
5096 
5097 	/* For 82571 variant, 80003 and ICHs */
5098 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
5099 	    || WM_IS_ICHPCH(sc)) {
5100 
5101 		/* Transmit Descriptor Control 0 */
5102 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
5103 		reg |= TXDCTL_COUNT_DESC;
5104 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
5105 
5106 		/* Transmit Descriptor Control 1 */
5107 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
5108 		reg |= TXDCTL_COUNT_DESC;
5109 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
5110 
5111 		/* TARC0 */
5112 		tarc0 = CSR_READ(sc, WMREG_TARC0);
5113 		switch (sc->sc_type) {
5114 		case WM_T_82571:
5115 		case WM_T_82572:
5116 		case WM_T_82573:
5117 		case WM_T_82574:
5118 		case WM_T_82583:
5119 		case WM_T_80003:
5120 			/* Clear bits 30..27 */
5121 			tarc0 &= ~__BITS(30, 27);
5122 			break;
5123 		default:
5124 			break;
5125 		}
5126 
5127 		switch (sc->sc_type) {
5128 		case WM_T_82571:
5129 		case WM_T_82572:
5130 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
5131 
5132 			tarc1 = CSR_READ(sc, WMREG_TARC1);
5133 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
5134 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
5135 			/* 8257[12] Errata No.7 */
5136 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
5137 
5138 			/* TARC1 bit 28 */
5139 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
5140 				tarc1 &= ~__BIT(28);
5141 			else
5142 				tarc1 |= __BIT(28);
5143 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
5144 
5145 			/*
5146 			 * 8257[12] Errata No.13
5147 			 * Disable Dyamic Clock Gating.
5148 			 */
5149 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
5150 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
5151 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5152 			break;
5153 		case WM_T_82573:
5154 		case WM_T_82574:
5155 		case WM_T_82583:
5156 			if ((sc->sc_type == WM_T_82574)
5157 			    || (sc->sc_type == WM_T_82583))
5158 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
5159 
5160 			/* Extended Device Control */
5161 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
5162 			reg &= ~__BIT(23);	/* Clear bit 23 */
5163 			reg |= __BIT(22);	/* Set bit 22 */
5164 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5165 
5166 			/* Device Control */
5167 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
5168 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5169 
5170 			/* PCIe Control Register */
5171 			/*
5172 			 * 82573 Errata (unknown).
5173 			 *
5174 			 * 82574 Errata 25 and 82583 Errata 12
5175 			 * "Dropped Rx Packets":
5176 			 *   NVM Image Version 2.1.4 and newer has no this bug.
5177 			 */
5178 			reg = CSR_READ(sc, WMREG_GCR);
5179 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
5180 			CSR_WRITE(sc, WMREG_GCR, reg);
5181 
5182 			if ((sc->sc_type == WM_T_82574)
5183 			    || (sc->sc_type == WM_T_82583)) {
5184 				/*
5185 				 * Document says this bit must be set for
5186 				 * proper operation.
5187 				 */
5188 				reg = CSR_READ(sc, WMREG_GCR);
5189 				reg |= __BIT(22);
5190 				CSR_WRITE(sc, WMREG_GCR, reg);
5191 
5192 				/*
5193 				 * Apply workaround for hardware errata
5194 				 * documented in errata docs Fixes issue where
5195 				 * some error prone or unreliable PCIe
5196 				 * completions are occurring, particularly
5197 				 * with ASPM enabled. Without fix, issue can
5198 				 * cause Tx timeouts.
5199 				 */
5200 				reg = CSR_READ(sc, WMREG_GCR2);
5201 				reg |= __BIT(0);
5202 				CSR_WRITE(sc, WMREG_GCR2, reg);
5203 			}
5204 			break;
5205 		case WM_T_80003:
5206 			/* TARC0 */
5207 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
5208 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
5209 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
5210 
5211 			/* TARC1 bit 28 */
5212 			tarc1 = CSR_READ(sc, WMREG_TARC1);
5213 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
5214 				tarc1 &= ~__BIT(28);
5215 			else
5216 				tarc1 |= __BIT(28);
5217 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
5218 			break;
5219 		case WM_T_ICH8:
5220 		case WM_T_ICH9:
5221 		case WM_T_ICH10:
5222 		case WM_T_PCH:
5223 		case WM_T_PCH2:
5224 		case WM_T_PCH_LPT:
5225 		case WM_T_PCH_SPT:
5226 		case WM_T_PCH_CNP:
5227 		case WM_T_PCH_TGP:
5228 			/* TARC0 */
5229 			if (sc->sc_type == WM_T_ICH8) {
5230 				/* Set TARC0 bits 29 and 28 */
5231 				tarc0 |= __BITS(29, 28);
5232 			} else if (sc->sc_type == WM_T_PCH_SPT) {
5233 				tarc0 |= __BIT(29);
5234 				/*
5235 				 *  Drop bit 28. From Linux.
5236 				 * See I218/I219 spec update
5237 				 * "5. Buffer Overrun While the I219 is
5238 				 * Processing DMA Transactions"
5239 				 */
5240 				tarc0 &= ~__BIT(28);
5241 			}
5242 			/* Set TARC0 bits 23,24,26,27 */
5243 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
5244 
5245 			/* CTRL_EXT */
5246 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
5247 			reg |= __BIT(22);	/* Set bit 22 */
5248 			/*
5249 			 * Enable PHY low-power state when MAC is at D3
5250 			 * w/o WoL
5251 			 */
5252 			if (sc->sc_type >= WM_T_PCH)
5253 				reg |= CTRL_EXT_PHYPDEN;
5254 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5255 
5256 			/* TARC1 */
5257 			tarc1 = CSR_READ(sc, WMREG_TARC1);
5258 			/* bit 28 */
5259 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
5260 				tarc1 &= ~__BIT(28);
5261 			else
5262 				tarc1 |= __BIT(28);
5263 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
5264 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
5265 
5266 			/* Device Status */
5267 			if (sc->sc_type == WM_T_ICH8) {
5268 				reg = CSR_READ(sc, WMREG_STATUS);
5269 				reg &= ~__BIT(31);
5270 				CSR_WRITE(sc, WMREG_STATUS, reg);
5271 
5272 			}
5273 
5274 			/* IOSFPC */
5275 			if (sc->sc_type == WM_T_PCH_SPT) {
5276 				reg = CSR_READ(sc, WMREG_IOSFPC);
5277 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
5278 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
5279 			}
5280 			/*
5281 			 * Work-around descriptor data corruption issue during
5282 			 * NFS v2 UDP traffic, just disable the NFS filtering
5283 			 * capability.
5284 			 */
5285 			reg = CSR_READ(sc, WMREG_RFCTL);
5286 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
5287 			CSR_WRITE(sc, WMREG_RFCTL, reg);
5288 			break;
5289 		default:
5290 			break;
5291 		}
5292 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
5293 
5294 		switch (sc->sc_type) {
5295 		case WM_T_82571:
5296 		case WM_T_82572:
5297 		case WM_T_82573:
5298 		case WM_T_80003:
5299 		case WM_T_ICH8:
5300 			/*
5301 			 * 8257[12] Errata No.52, 82573 Errata No.43 and some
5302 			 * others to avoid RSS Hash Value bug.
5303 			 */
5304 			reg = CSR_READ(sc, WMREG_RFCTL);
5305 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
5306 			CSR_WRITE(sc, WMREG_RFCTL, reg);
5307 			break;
5308 		case WM_T_82574:
5309 			/* Use extened Rx descriptor. */
5310 			reg = CSR_READ(sc, WMREG_RFCTL);
5311 			reg |= WMREG_RFCTL_EXSTEN;
5312 			CSR_WRITE(sc, WMREG_RFCTL, reg);
5313 			break;
5314 		default:
5315 			break;
5316 		}
5317 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
5318 		/*
5319 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
5320 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
5321 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
5322 		 * Correctly by the Device"
5323 		 *
5324 		 * I354(C2000) Errata AVR53:
5325 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
5326 		 * Hang"
5327 		 */
5328 		reg = CSR_READ(sc, WMREG_RFCTL);
5329 		reg |= WMREG_RFCTL_IPV6EXDIS;
5330 		CSR_WRITE(sc, WMREG_RFCTL, reg);
5331 	}
5332 }
5333 
5334 static uint32_t
5335 wm_rxpbs_adjust_82580(uint32_t val)
5336 {
5337 	uint32_t rv = 0;
5338 
5339 	if (val < __arraycount(wm_82580_rxpbs_table))
5340 		rv = wm_82580_rxpbs_table[val];
5341 
5342 	return rv;
5343 }
5344 
5345 /*
5346  * wm_reset_phy:
5347  *
5348  *	generic PHY reset function.
5349  *	Same as e1000_phy_hw_reset_generic()
5350  */
5351 static int
5352 wm_reset_phy(struct wm_softc *sc)
5353 {
5354 	uint32_t reg;
5355 	int rv;
5356 
5357 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
5358 		device_xname(sc->sc_dev), __func__));
5359 	if (wm_phy_resetisblocked(sc))
5360 		return -1;
5361 
5362 	rv = sc->phy.acquire(sc);
5363 	if (rv) {
5364 		device_printf(sc->sc_dev, "%s: failed to acquire phy: %d\n",
5365 		    __func__, rv);
5366 		return rv;
5367 	}
5368 
5369 	reg = CSR_READ(sc, WMREG_CTRL);
5370 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
5371 	CSR_WRITE_FLUSH(sc);
5372 
5373 	delay(sc->phy.reset_delay_us);
5374 
5375 	CSR_WRITE(sc, WMREG_CTRL, reg);
5376 	CSR_WRITE_FLUSH(sc);
5377 
5378 	delay(150);
5379 
5380 	sc->phy.release(sc);
5381 
5382 	wm_get_cfg_done(sc);
5383 	wm_phy_post_reset(sc);
5384 
5385 	return 0;
5386 }
5387 
5388 /*
5389  * wm_flush_desc_rings - remove all descriptors from the descriptor rings.
5390  *
5391  * In i219, the descriptor rings must be emptied before resetting the HW
5392  * or before changing the device state to D3 during runtime (runtime PM).
5393  *
5394  * Failure to do this will cause the HW to enter a unit hang state which can
5395  * only be released by PCI reset on the device.
5396  *
5397  * I219 does not use multiqueue, so it is enough to check sc->sc_queue[0] only.
5398  */
5399 static void
5400 wm_flush_desc_rings(struct wm_softc *sc)
5401 {
5402 	pcireg_t preg;
5403 	uint32_t reg;
5404 	struct wm_txqueue *txq;
5405 	wiseman_txdesc_t *txd;
5406 	int nexttx;
5407 	uint32_t rctl;
5408 
5409 	KASSERT(IFNET_LOCKED(&sc->sc_ethercom.ec_if));
5410 
5411 	/* First, disable MULR fix in FEXTNVM11 */
5412 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
5413 	reg |= FEXTNVM11_DIS_MULRFIX;
5414 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
5415 
5416 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
5417 	reg = CSR_READ(sc, WMREG_TDLEN(0));
5418 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
5419 		return;
5420 
5421 	/*
5422 	 * Remove all descriptors from the tx_ring.
5423 	 *
5424 	 * We want to clear all pending descriptors from the TX ring. Zeroing
5425 	 * happens when the HW reads the regs. We assign the ring itself as
5426 	 * the data of the next descriptor. We don't care about the data we are
5427 	 * about to reset the HW.
5428 	 */
5429 #ifdef WM_DEBUG
5430 	device_printf(sc->sc_dev, "Need TX flush (reg = %08x)\n", preg);
5431 #endif
5432 	reg = CSR_READ(sc, WMREG_TCTL);
5433 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
5434 
5435 	txq = &sc->sc_queue[0].wmq_txq;
5436 	nexttx = txq->txq_next;
5437 	txd = &txq->txq_descs[nexttx];
5438 	wm_set_dma_addr(&txd->wtx_addr, txq->txq_desc_dma);
5439 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
5440 	txd->wtx_fields.wtxu_status = 0;
5441 	txd->wtx_fields.wtxu_options = 0;
5442 	txd->wtx_fields.wtxu_vlan = 0;
5443 
5444 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
5445 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
5446 
5447 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
5448 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
5449 	CSR_WRITE_FLUSH(sc);
5450 	delay(250);
5451 
5452 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
5453 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
5454 		return;
5455 
5456 	/*
5457 	 * Mark all descriptors in the RX ring as consumed and disable the
5458 	 * rx ring.
5459 	 */
5460 #ifdef WM_DEBUG
5461 	device_printf(sc->sc_dev, "Need RX flush (reg = %08x)\n", preg);
5462 #endif
5463 	rctl = CSR_READ(sc, WMREG_RCTL);
5464 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
5465 	CSR_WRITE_FLUSH(sc);
5466 	delay(150);
5467 
5468 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
5469 	/* Zero the lower 14 bits (prefetch and host thresholds) */
5470 	reg &= 0xffffc000;
5471 	/*
5472 	 * Update thresholds: prefetch threshold to 31, host threshold
5473 	 * to 1 and make sure the granularity is "descriptors" and not
5474 	 * "cache lines"
5475 	 */
5476 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
5477 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
5478 
5479 	/* Momentarily enable the RX ring for the changes to take effect */
5480 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
5481 	CSR_WRITE_FLUSH(sc);
5482 	delay(150);
5483 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
5484 }
5485 
5486 /*
5487  * wm_reset:
5488  *
5489  *	Reset the i82542 chip.
5490  */
5491 static void
5492 wm_reset(struct wm_softc *sc)
5493 {
5494 	int phy_reset = 0;
5495 	int i, error = 0;
5496 	uint32_t reg;
5497 	uint16_t kmreg;
5498 	int rv;
5499 
5500 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
5501 		device_xname(sc->sc_dev), __func__));
5502 	KASSERT(sc->sc_type != 0);
5503 
5504 	/*
5505 	 * Allocate on-chip memory according to the MTU size.
5506 	 * The Packet Buffer Allocation register must be written
5507 	 * before the chip is reset.
5508 	 */
5509 	switch (sc->sc_type) {
5510 	case WM_T_82547:
5511 	case WM_T_82547_2:
5512 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
5513 		    PBA_22K : PBA_30K;
5514 		for (i = 0; i < sc->sc_nqueues; i++) {
5515 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
5516 			txq->txq_fifo_head = 0;
5517 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
5518 			txq->txq_fifo_size =
5519 			    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
5520 			txq->txq_fifo_stall = 0;
5521 		}
5522 		break;
5523 	case WM_T_82571:
5524 	case WM_T_82572:
5525 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
5526 	case WM_T_80003:
5527 		sc->sc_pba = PBA_32K;
5528 		break;
5529 	case WM_T_82573:
5530 		sc->sc_pba = PBA_12K;
5531 		break;
5532 	case WM_T_82574:
5533 	case WM_T_82583:
5534 		sc->sc_pba = PBA_20K;
5535 		break;
5536 	case WM_T_82576:
5537 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
5538 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
5539 		break;
5540 	case WM_T_82580:
5541 	case WM_T_I350:
5542 	case WM_T_I354:
5543 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
5544 		break;
5545 	case WM_T_I210:
5546 	case WM_T_I211:
5547 		sc->sc_pba = PBA_34K;
5548 		break;
5549 	case WM_T_ICH8:
5550 		/* Workaround for a bit corruption issue in FIFO memory */
5551 		sc->sc_pba = PBA_8K;
5552 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
5553 		break;
5554 	case WM_T_ICH9:
5555 	case WM_T_ICH10:
5556 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
5557 		    PBA_14K : PBA_10K;
5558 		break;
5559 	case WM_T_PCH:
5560 	case WM_T_PCH2:	/* XXX 14K? */
5561 	case WM_T_PCH_LPT:
5562 	case WM_T_PCH_SPT:
5563 	case WM_T_PCH_CNP:
5564 	case WM_T_PCH_TGP:
5565 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 1500 ?
5566 		    PBA_12K : PBA_26K;
5567 		break;
5568 	default:
5569 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
5570 		    PBA_40K : PBA_48K;
5571 		break;
5572 	}
5573 	/*
5574 	 * Only old or non-multiqueue devices have the PBA register
5575 	 * XXX Need special handling for 82575.
5576 	 */
5577 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
5578 	    || (sc->sc_type == WM_T_82575))
5579 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
5580 
5581 	/* Prevent the PCI-E bus from sticking */
5582 	if (sc->sc_flags & WM_F_PCIE) {
5583 		int timeout = 800;
5584 
5585 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
5586 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5587 
5588 		while (timeout--) {
5589 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
5590 			    == 0)
5591 				break;
5592 			delay(100);
5593 		}
5594 		if (timeout == 0)
5595 			device_printf(sc->sc_dev,
5596 			    "failed to disable bus mastering\n");
5597 	}
5598 
5599 	/* Set the completion timeout for interface */
5600 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
5601 	    || (sc->sc_type == WM_T_82580)
5602 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
5603 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
5604 		wm_set_pcie_completion_timeout(sc);
5605 
5606 	/* Clear interrupt */
5607 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
5608 	if (wm_is_using_msix(sc)) {
5609 		if (sc->sc_type != WM_T_82574) {
5610 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
5611 			CSR_WRITE(sc, WMREG_EIAC, 0);
5612 		} else
5613 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
5614 	}
5615 
5616 	/* Stop the transmit and receive processes. */
5617 	CSR_WRITE(sc, WMREG_RCTL, 0);
5618 	sc->sc_rctl &= ~RCTL_EN;
5619 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
5620 	CSR_WRITE_FLUSH(sc);
5621 
5622 	/* XXX set_tbi_sbp_82543() */
5623 
5624 	delay(10*1000);
5625 
5626 	/* Must acquire the MDIO ownership before MAC reset */
5627 	switch (sc->sc_type) {
5628 	case WM_T_82573:
5629 	case WM_T_82574:
5630 	case WM_T_82583:
5631 		error = wm_get_hw_semaphore_82573(sc);
5632 		break;
5633 	default:
5634 		break;
5635 	}
5636 
5637 	/*
5638 	 * 82541 Errata 29? & 82547 Errata 28?
5639 	 * See also the description about PHY_RST bit in CTRL register
5640 	 * in 8254x_GBe_SDM.pdf.
5641 	 */
5642 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
5643 		CSR_WRITE(sc, WMREG_CTRL,
5644 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
5645 		CSR_WRITE_FLUSH(sc);
5646 		delay(5000);
5647 	}
5648 
5649 	switch (sc->sc_type) {
5650 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
5651 	case WM_T_82541:
5652 	case WM_T_82541_2:
5653 	case WM_T_82547:
5654 	case WM_T_82547_2:
5655 		/*
5656 		 * On some chipsets, a reset through a memory-mapped write
5657 		 * cycle can cause the chip to reset before completing the
5658 		 * write cycle. This causes major headache that can be avoided
5659 		 * by issuing the reset via indirect register writes through
5660 		 * I/O space.
5661 		 *
5662 		 * So, if we successfully mapped the I/O BAR at attach time,
5663 		 * use that. Otherwise, try our luck with a memory-mapped
5664 		 * reset.
5665 		 */
5666 		if (sc->sc_flags & WM_F_IOH_VALID)
5667 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
5668 		else
5669 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
5670 		break;
5671 	case WM_T_82545_3:
5672 	case WM_T_82546_3:
5673 		/* Use the shadow control register on these chips. */
5674 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
5675 		break;
5676 	case WM_T_80003:
5677 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
5678 		if (sc->phy.acquire(sc) != 0)
5679 			break;
5680 		CSR_WRITE(sc, WMREG_CTRL, reg);
5681 		sc->phy.release(sc);
5682 		break;
5683 	case WM_T_ICH8:
5684 	case WM_T_ICH9:
5685 	case WM_T_ICH10:
5686 	case WM_T_PCH:
5687 	case WM_T_PCH2:
5688 	case WM_T_PCH_LPT:
5689 	case WM_T_PCH_SPT:
5690 	case WM_T_PCH_CNP:
5691 	case WM_T_PCH_TGP:
5692 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
5693 		if (wm_phy_resetisblocked(sc) == false) {
5694 			/*
5695 			 * Gate automatic PHY configuration by hardware on
5696 			 * non-managed 82579
5697 			 */
5698 			if ((sc->sc_type == WM_T_PCH2)
5699 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
5700 				== 0))
5701 				wm_gate_hw_phy_config_ich8lan(sc, true);
5702 
5703 			reg |= CTRL_PHY_RESET;
5704 			phy_reset = 1;
5705 		} else
5706 			device_printf(sc->sc_dev, "XXX reset is blocked!!!\n");
5707 		if (sc->phy.acquire(sc) != 0)
5708 			break;
5709 		CSR_WRITE(sc, WMREG_CTRL, reg);
5710 		/* Don't insert a completion barrier when reset */
5711 		delay(20*1000);
5712 		/*
5713 		 * The EXTCNFCTR_MDIO_SW_OWNERSHIP bit is cleared by the reset,
5714 		 * so don't use sc->phy.release(sc). Release sc_ich_phymtx
5715 		 * only. See also wm_get_swflag_ich8lan().
5716 		 */
5717 		mutex_exit(sc->sc_ich_phymtx);
5718 		break;
5719 	case WM_T_82580:
5720 	case WM_T_I350:
5721 	case WM_T_I354:
5722 	case WM_T_I210:
5723 	case WM_T_I211:
5724 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
5725 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
5726 			CSR_WRITE_FLUSH(sc);
5727 		delay(5000);
5728 		break;
5729 	case WM_T_82542_2_0:
5730 	case WM_T_82542_2_1:
5731 	case WM_T_82543:
5732 	case WM_T_82540:
5733 	case WM_T_82545:
5734 	case WM_T_82546:
5735 	case WM_T_82571:
5736 	case WM_T_82572:
5737 	case WM_T_82573:
5738 	case WM_T_82574:
5739 	case WM_T_82575:
5740 	case WM_T_82576:
5741 	case WM_T_82583:
5742 	default:
5743 		/* Everything else can safely use the documented method. */
5744 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
5745 		break;
5746 	}
5747 
5748 	/* Must release the MDIO ownership after MAC reset */
5749 	switch (sc->sc_type) {
5750 	case WM_T_82573:
5751 	case WM_T_82574:
5752 	case WM_T_82583:
5753 		if (error == 0)
5754 			wm_put_hw_semaphore_82573(sc);
5755 		break;
5756 	default:
5757 		break;
5758 	}
5759 
5760 	/* Set Phy Config Counter to 50msec */
5761 	if (sc->sc_type == WM_T_PCH2) {
5762 		reg = CSR_READ(sc, WMREG_FEXTNVM3);
5763 		reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
5764 		reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
5765 		CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
5766 	}
5767 
5768 	if (phy_reset != 0)
5769 		wm_get_cfg_done(sc);
5770 
5771 	/* Reload EEPROM */
5772 	switch (sc->sc_type) {
5773 	case WM_T_82542_2_0:
5774 	case WM_T_82542_2_1:
5775 	case WM_T_82543:
5776 	case WM_T_82544:
5777 		delay(10);
5778 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
5779 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5780 		CSR_WRITE_FLUSH(sc);
5781 		delay(2000);
5782 		break;
5783 	case WM_T_82540:
5784 	case WM_T_82545:
5785 	case WM_T_82545_3:
5786 	case WM_T_82546:
5787 	case WM_T_82546_3:
5788 		delay(5*1000);
5789 		/* XXX Disable HW ARPs on ASF enabled adapters */
5790 		break;
5791 	case WM_T_82541:
5792 	case WM_T_82541_2:
5793 	case WM_T_82547:
5794 	case WM_T_82547_2:
5795 		delay(20000);
5796 		/* XXX Disable HW ARPs on ASF enabled adapters */
5797 		break;
5798 	case WM_T_82571:
5799 	case WM_T_82572:
5800 	case WM_T_82573:
5801 	case WM_T_82574:
5802 	case WM_T_82583:
5803 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
5804 			delay(10);
5805 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
5806 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5807 			CSR_WRITE_FLUSH(sc);
5808 		}
5809 		/* check EECD_EE_AUTORD */
5810 		wm_get_auto_rd_done(sc);
5811 		/*
5812 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
5813 		 * is set.
5814 		 */
5815 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
5816 		    || (sc->sc_type == WM_T_82583))
5817 			delay(25*1000);
5818 		break;
5819 	case WM_T_82575:
5820 	case WM_T_82576:
5821 	case WM_T_82580:
5822 	case WM_T_I350:
5823 	case WM_T_I354:
5824 	case WM_T_I210:
5825 	case WM_T_I211:
5826 	case WM_T_80003:
5827 		/* check EECD_EE_AUTORD */
5828 		wm_get_auto_rd_done(sc);
5829 		break;
5830 	case WM_T_ICH8:
5831 	case WM_T_ICH9:
5832 	case WM_T_ICH10:
5833 	case WM_T_PCH:
5834 	case WM_T_PCH2:
5835 	case WM_T_PCH_LPT:
5836 	case WM_T_PCH_SPT:
5837 	case WM_T_PCH_CNP:
5838 	case WM_T_PCH_TGP:
5839 		break;
5840 	default:
5841 		panic("%s: unknown type\n", __func__);
5842 	}
5843 
5844 	/* Check whether EEPROM is present or not */
5845 	switch (sc->sc_type) {
5846 	case WM_T_82575:
5847 	case WM_T_82576:
5848 	case WM_T_82580:
5849 	case WM_T_I350:
5850 	case WM_T_I354:
5851 	case WM_T_ICH8:
5852 	case WM_T_ICH9:
5853 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
5854 			/* Not found */
5855 			sc->sc_flags |= WM_F_EEPROM_INVALID;
5856 			if (sc->sc_type == WM_T_82575)
5857 				wm_reset_init_script_82575(sc);
5858 		}
5859 		break;
5860 	default:
5861 		break;
5862 	}
5863 
5864 	if (phy_reset != 0)
5865 		wm_phy_post_reset(sc);
5866 
5867 	if ((sc->sc_type == WM_T_82580)
5868 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
5869 		/* Clear global device reset status bit */
5870 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
5871 	}
5872 
5873 	/* Clear any pending interrupt events. */
5874 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
5875 	reg = CSR_READ(sc, WMREG_ICR);
5876 	if (wm_is_using_msix(sc)) {
5877 		if (sc->sc_type != WM_T_82574) {
5878 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
5879 			CSR_WRITE(sc, WMREG_EIAC, 0);
5880 		} else
5881 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
5882 	}
5883 
5884 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5885 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5886 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
5887 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)
5888 	    || (sc->sc_type == WM_T_PCH_TGP)) {
5889 		reg = CSR_READ(sc, WMREG_KABGTXD);
5890 		reg |= KABGTXD_BGSQLBIAS;
5891 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
5892 	}
5893 
5894 	/* Reload sc_ctrl */
5895 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
5896 
5897 	wm_set_eee(sc);
5898 
5899 	/*
5900 	 * For PCH, this write will make sure that any noise will be detected
5901 	 * as a CRC error and be dropped rather than show up as a bad packet
5902 	 * to the DMA engine
5903 	 */
5904 	if (sc->sc_type == WM_T_PCH)
5905 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
5906 
5907 	if (sc->sc_type >= WM_T_82544)
5908 		CSR_WRITE(sc, WMREG_WUC, 0);
5909 
5910 	if (sc->sc_type < WM_T_82575)
5911 		wm_disable_aspm(sc); /* Workaround for some chips */
5912 
5913 	wm_reset_mdicnfg_82580(sc);
5914 
5915 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
5916 		wm_pll_workaround_i210(sc);
5917 
5918 	if (sc->sc_type == WM_T_80003) {
5919 		/* Default to TRUE to enable the MDIC W/A */
5920 		sc->sc_flags |= WM_F_80003_MDIC_WA;
5921 
5922 		rv = wm_kmrn_readreg(sc,
5923 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
5924 		if (rv == 0) {
5925 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
5926 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
5927 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
5928 			else
5929 				sc->sc_flags |= WM_F_80003_MDIC_WA;
5930 		}
5931 	}
5932 }
5933 
5934 /*
5935  * wm_add_rxbuf:
5936  *
5937  *	Add a receive buffer to the indiciated descriptor.
5938  */
5939 static int
5940 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
5941 {
5942 	struct wm_softc *sc = rxq->rxq_sc;
5943 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
5944 	struct mbuf *m;
5945 	int error;
5946 
5947 	KASSERT(mutex_owned(rxq->rxq_lock));
5948 
5949 	MGETHDR(m, M_DONTWAIT, MT_DATA);
5950 	if (m == NULL)
5951 		return ENOBUFS;
5952 
5953 	MCLGET(m, M_DONTWAIT);
5954 	if ((m->m_flags & M_EXT) == 0) {
5955 		m_freem(m);
5956 		return ENOBUFS;
5957 	}
5958 
5959 	if (rxs->rxs_mbuf != NULL)
5960 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
5961 
5962 	rxs->rxs_mbuf = m;
5963 
5964 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
5965 	/*
5966 	 * Cannot use bus_dmamap_load_mbuf() here because m_data may be
5967 	 * sc_align_tweak'd between bus_dmamap_load() and bus_dmamap_sync().
5968 	 */
5969 	error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, m->m_ext.ext_buf,
5970 	    m->m_ext.ext_size, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT);
5971 	if (error) {
5972 		/* XXX XXX XXX */
5973 		aprint_error_dev(sc->sc_dev,
5974 		    "unable to load rx DMA map %d, error = %d\n", idx, error);
5975 		panic("wm_add_rxbuf");
5976 	}
5977 
5978 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
5979 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
5980 
5981 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5982 		if ((sc->sc_rctl & RCTL_EN) != 0)
5983 			wm_init_rxdesc(rxq, idx);
5984 	} else
5985 		wm_init_rxdesc(rxq, idx);
5986 
5987 	return 0;
5988 }
5989 
5990 /*
5991  * wm_rxdrain:
5992  *
5993  *	Drain the receive queue.
5994  */
5995 static void
5996 wm_rxdrain(struct wm_rxqueue *rxq)
5997 {
5998 	struct wm_softc *sc = rxq->rxq_sc;
5999 	struct wm_rxsoft *rxs;
6000 	int i;
6001 
6002 	KASSERT(mutex_owned(rxq->rxq_lock));
6003 
6004 	for (i = 0; i < WM_NRXDESC; i++) {
6005 		rxs = &rxq->rxq_soft[i];
6006 		if (rxs->rxs_mbuf != NULL) {
6007 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
6008 			m_freem(rxs->rxs_mbuf);
6009 			rxs->rxs_mbuf = NULL;
6010 		}
6011 	}
6012 }
6013 
6014 /*
6015  * Setup registers for RSS.
6016  *
6017  * XXX not yet VMDq support
6018  */
6019 static void
6020 wm_init_rss(struct wm_softc *sc)
6021 {
6022 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
6023 	int i;
6024 
6025 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
6026 
6027 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
6028 		unsigned int qid, reta_ent;
6029 
6030 		qid  = i % sc->sc_nqueues;
6031 		switch (sc->sc_type) {
6032 		case WM_T_82574:
6033 			reta_ent = __SHIFTIN(qid,
6034 			    RETA_ENT_QINDEX_MASK_82574);
6035 			break;
6036 		case WM_T_82575:
6037 			reta_ent = __SHIFTIN(qid,
6038 			    RETA_ENT_QINDEX1_MASK_82575);
6039 			break;
6040 		default:
6041 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
6042 			break;
6043 		}
6044 
6045 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
6046 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
6047 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
6048 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
6049 	}
6050 
6051 	rss_getkey((uint8_t *)rss_key);
6052 	for (i = 0; i < RSSRK_NUM_REGS; i++)
6053 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
6054 
6055 	if (sc->sc_type == WM_T_82574)
6056 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
6057 	else
6058 		mrqc = MRQC_ENABLE_RSS_MQ;
6059 
6060 	/*
6061 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
6062 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
6063 	 */
6064 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
6065 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
6066 #if 0
6067 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
6068 	mrqc |= MRQC_RSS_FIELD_IPV6_UDP_EX;
6069 #endif
6070 	mrqc |= MRQC_RSS_FIELD_IPV6_TCP_EX;
6071 
6072 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
6073 }
6074 
6075 /*
6076  * Adjust TX and RX queue numbers which the system actulally uses.
6077  *
6078  * The numbers are affected by below parameters.
6079  *     - The nubmer of hardware queues
6080  *     - The number of MSI-X vectors (= "nvectors" argument)
6081  *     - ncpu
6082  */
6083 static void
6084 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
6085 {
6086 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
6087 
6088 	if (nvectors < 2) {
6089 		sc->sc_nqueues = 1;
6090 		return;
6091 	}
6092 
6093 	switch (sc->sc_type) {
6094 	case WM_T_82572:
6095 		hw_ntxqueues = 2;
6096 		hw_nrxqueues = 2;
6097 		break;
6098 	case WM_T_82574:
6099 		hw_ntxqueues = 2;
6100 		hw_nrxqueues = 2;
6101 		break;
6102 	case WM_T_82575:
6103 		hw_ntxqueues = 4;
6104 		hw_nrxqueues = 4;
6105 		break;
6106 	case WM_T_82576:
6107 		hw_ntxqueues = 16;
6108 		hw_nrxqueues = 16;
6109 		break;
6110 	case WM_T_82580:
6111 	case WM_T_I350:
6112 	case WM_T_I354:
6113 		hw_ntxqueues = 8;
6114 		hw_nrxqueues = 8;
6115 		break;
6116 	case WM_T_I210:
6117 		hw_ntxqueues = 4;
6118 		hw_nrxqueues = 4;
6119 		break;
6120 	case WM_T_I211:
6121 		hw_ntxqueues = 2;
6122 		hw_nrxqueues = 2;
6123 		break;
6124 		/*
6125 		 * The below Ethernet controllers do not support MSI-X;
6126 		 * this driver doesn't let them use multiqueue.
6127 		 *     - WM_T_80003
6128 		 *     - WM_T_ICH8
6129 		 *     - WM_T_ICH9
6130 		 *     - WM_T_ICH10
6131 		 *     - WM_T_PCH
6132 		 *     - WM_T_PCH2
6133 		 *     - WM_T_PCH_LPT
6134 		 */
6135 	default:
6136 		hw_ntxqueues = 1;
6137 		hw_nrxqueues = 1;
6138 		break;
6139 	}
6140 
6141 	hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
6142 
6143 	/*
6144 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
6145 	 * the number of queues used actually.
6146 	 */
6147 	if (nvectors < hw_nqueues + 1)
6148 		sc->sc_nqueues = nvectors - 1;
6149 	else
6150 		sc->sc_nqueues = hw_nqueues;
6151 
6152 	/*
6153 	 * As queues more than CPUs cannot improve scaling, we limit
6154 	 * the number of queues used actually.
6155 	 */
6156 	if (ncpu < sc->sc_nqueues)
6157 		sc->sc_nqueues = ncpu;
6158 }
6159 
6160 static inline bool
6161 wm_is_using_msix(struct wm_softc *sc)
6162 {
6163 
6164 	return (sc->sc_nintrs > 1);
6165 }
6166 
6167 static inline bool
6168 wm_is_using_multiqueue(struct wm_softc *sc)
6169 {
6170 
6171 	return (sc->sc_nqueues > 1);
6172 }
6173 
6174 static int
6175 wm_softint_establish_queue(struct wm_softc *sc, int qidx, int intr_idx)
6176 {
6177 	struct wm_queue *wmq = &sc->sc_queue[qidx];
6178 
6179 	wmq->wmq_id = qidx;
6180 	wmq->wmq_intr_idx = intr_idx;
6181 	wmq->wmq_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
6182 	    wm_handle_queue, wmq);
6183 	if (wmq->wmq_si != NULL)
6184 		return 0;
6185 
6186 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
6187 	    wmq->wmq_id);
6188 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
6189 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
6190 	return ENOMEM;
6191 }
6192 
6193 /*
6194  * Both single interrupt MSI and INTx can use this function.
6195  */
6196 static int
6197 wm_setup_legacy(struct wm_softc *sc)
6198 {
6199 	pci_chipset_tag_t pc = sc->sc_pc;
6200 	const char *intrstr = NULL;
6201 	char intrbuf[PCI_INTRSTR_LEN];
6202 	int error;
6203 
6204 	error = wm_alloc_txrx_queues(sc);
6205 	if (error) {
6206 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
6207 		    error);
6208 		return ENOMEM;
6209 	}
6210 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
6211 	    sizeof(intrbuf));
6212 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
6213 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
6214 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
6215 	if (sc->sc_ihs[0] == NULL) {
6216 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
6217 		    (pci_intr_type(pc, sc->sc_intrs[0])
6218 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
6219 		return ENOMEM;
6220 	}
6221 
6222 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
6223 	sc->sc_nintrs = 1;
6224 
6225 	return wm_softint_establish_queue(sc, 0, 0);
6226 }
6227 
6228 static int
6229 wm_setup_msix(struct wm_softc *sc)
6230 {
6231 	void *vih;
6232 	kcpuset_t *affinity;
6233 	int qidx, error, intr_idx, txrx_established;
6234 	pci_chipset_tag_t pc = sc->sc_pc;
6235 	const char *intrstr = NULL;
6236 	char intrbuf[PCI_INTRSTR_LEN];
6237 	char intr_xname[INTRDEVNAMEBUF];
6238 
6239 	if (sc->sc_nqueues < ncpu) {
6240 		/*
6241 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
6242 		 * interrupts start from CPU#1.
6243 		 */
6244 		sc->sc_affinity_offset = 1;
6245 	} else {
6246 		/*
6247 		 * In this case, this device use all CPUs. So, we unify
6248 		 * affinitied cpu_index to msix vector number for readability.
6249 		 */
6250 		sc->sc_affinity_offset = 0;
6251 	}
6252 
6253 	error = wm_alloc_txrx_queues(sc);
6254 	if (error) {
6255 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
6256 		    error);
6257 		return ENOMEM;
6258 	}
6259 
6260 	kcpuset_create(&affinity, false);
6261 	intr_idx = 0;
6262 
6263 	/*
6264 	 * TX and RX
6265 	 */
6266 	txrx_established = 0;
6267 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
6268 		struct wm_queue *wmq = &sc->sc_queue[qidx];
6269 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
6270 
6271 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
6272 		    sizeof(intrbuf));
6273 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
6274 		    PCI_INTR_MPSAFE, true);
6275 		memset(intr_xname, 0, sizeof(intr_xname));
6276 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
6277 		    device_xname(sc->sc_dev), qidx);
6278 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
6279 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
6280 		if (vih == NULL) {
6281 			aprint_error_dev(sc->sc_dev,
6282 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
6283 			    intrstr ? " at " : "",
6284 			    intrstr ? intrstr : "");
6285 
6286 			goto fail;
6287 		}
6288 		kcpuset_zero(affinity);
6289 		/* Round-robin affinity */
6290 		kcpuset_set(affinity, affinity_to);
6291 		error = interrupt_distribute(vih, affinity, NULL);
6292 		if (error == 0) {
6293 			aprint_normal_dev(sc->sc_dev,
6294 			    "for TX and RX interrupting at %s affinity to %u\n",
6295 			    intrstr, affinity_to);
6296 		} else {
6297 			aprint_normal_dev(sc->sc_dev,
6298 			    "for TX and RX interrupting at %s\n", intrstr);
6299 		}
6300 		sc->sc_ihs[intr_idx] = vih;
6301 		if (wm_softint_establish_queue(sc, qidx, intr_idx) != 0)
6302 			goto fail;
6303 		txrx_established++;
6304 		intr_idx++;
6305 	}
6306 
6307 	/* LINK */
6308 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
6309 	    sizeof(intrbuf));
6310 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
6311 	memset(intr_xname, 0, sizeof(intr_xname));
6312 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
6313 	    device_xname(sc->sc_dev));
6314 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
6315 	    IPL_NET, wm_linkintr_msix, sc, intr_xname);
6316 	if (vih == NULL) {
6317 		aprint_error_dev(sc->sc_dev,
6318 		    "unable to establish MSI-X(for LINK)%s%s\n",
6319 		    intrstr ? " at " : "",
6320 		    intrstr ? intrstr : "");
6321 
6322 		goto fail;
6323 	}
6324 	/* Keep default affinity to LINK interrupt */
6325 	aprint_normal_dev(sc->sc_dev,
6326 	    "for LINK interrupting at %s\n", intrstr);
6327 	sc->sc_ihs[intr_idx] = vih;
6328 	sc->sc_link_intr_idx = intr_idx;
6329 
6330 	sc->sc_nintrs = sc->sc_nqueues + 1;
6331 	kcpuset_destroy(affinity);
6332 	return 0;
6333 
6334 fail:
6335 	for (qidx = 0; qidx < txrx_established; qidx++) {
6336 		struct wm_queue *wmq = &sc->sc_queue[qidx];
6337 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
6338 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
6339 	}
6340 
6341 	kcpuset_destroy(affinity);
6342 	return ENOMEM;
6343 }
6344 
6345 static void
6346 wm_unset_stopping_flags(struct wm_softc *sc)
6347 {
6348 	int i;
6349 
6350 	KASSERT(mutex_owned(sc->sc_core_lock));
6351 
6352 	/* Must unset stopping flags in ascending order. */
6353 	for (i = 0; i < sc->sc_nqueues; i++) {
6354 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
6355 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
6356 
6357 		mutex_enter(txq->txq_lock);
6358 		txq->txq_stopping = false;
6359 		mutex_exit(txq->txq_lock);
6360 
6361 		mutex_enter(rxq->rxq_lock);
6362 		rxq->rxq_stopping = false;
6363 		mutex_exit(rxq->rxq_lock);
6364 	}
6365 
6366 	sc->sc_core_stopping = false;
6367 }
6368 
6369 static void
6370 wm_set_stopping_flags(struct wm_softc *sc)
6371 {
6372 	int i;
6373 
6374 	KASSERT(mutex_owned(sc->sc_core_lock));
6375 
6376 	sc->sc_core_stopping = true;
6377 
6378 	/* Must set stopping flags in ascending order. */
6379 	for (i = 0; i < sc->sc_nqueues; i++) {
6380 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
6381 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
6382 
6383 		mutex_enter(rxq->rxq_lock);
6384 		rxq->rxq_stopping = true;
6385 		mutex_exit(rxq->rxq_lock);
6386 
6387 		mutex_enter(txq->txq_lock);
6388 		txq->txq_stopping = true;
6389 		mutex_exit(txq->txq_lock);
6390 	}
6391 }
6392 
6393 /*
6394  * Write interrupt interval value to ITR or EITR
6395  */
6396 static void
6397 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
6398 {
6399 
6400 	if (!wmq->wmq_set_itr)
6401 		return;
6402 
6403 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
6404 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
6405 
6406 		/*
6407 		 * 82575 doesn't have CNT_INGR field.
6408 		 * So, overwrite counter field by software.
6409 		 */
6410 		if (sc->sc_type == WM_T_82575)
6411 			eitr |= __SHIFTIN(wmq->wmq_itr,
6412 			    EITR_COUNTER_MASK_82575);
6413 		else
6414 			eitr |= EITR_CNT_INGR;
6415 
6416 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
6417 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
6418 		/*
6419 		 * 82574 has both ITR and EITR. SET EITR when we use
6420 		 * the multi queue function with MSI-X.
6421 		 */
6422 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
6423 		    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
6424 	} else {
6425 		KASSERT(wmq->wmq_id == 0);
6426 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
6427 	}
6428 
6429 	wmq->wmq_set_itr = false;
6430 }
6431 
6432 /*
6433  * TODO
6434  * Below dynamic calculation of itr is almost the same as Linux igb,
6435  * however it does not fit to wm(4). So, we will have been disable AIM
6436  * until we will find appropriate calculation of itr.
6437  */
6438 /*
6439  * Calculate interrupt interval value to be going to write register in
6440  * wm_itrs_writereg(). This function does not write ITR/EITR register.
6441  */
6442 static void
6443 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
6444 {
6445 #ifdef NOTYET
6446 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
6447 	struct wm_txqueue *txq = &wmq->wmq_txq;
6448 	uint32_t avg_size = 0;
6449 	uint32_t new_itr;
6450 
6451 	if (rxq->rxq_packets)
6452 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
6453 	if (txq->txq_packets)
6454 		avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
6455 
6456 	if (avg_size == 0) {
6457 		new_itr = 450; /* restore default value */
6458 		goto out;
6459 	}
6460 
6461 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
6462 	avg_size += 24;
6463 
6464 	/* Don't starve jumbo frames */
6465 	avg_size = uimin(avg_size, 3000);
6466 
6467 	/* Give a little boost to mid-size frames */
6468 	if ((avg_size > 300) && (avg_size < 1200))
6469 		new_itr = avg_size / 3;
6470 	else
6471 		new_itr = avg_size / 2;
6472 
6473 out:
6474 	/*
6475 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
6476 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
6477 	 */
6478 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
6479 		new_itr *= 4;
6480 
6481 	if (new_itr != wmq->wmq_itr) {
6482 		wmq->wmq_itr = new_itr;
6483 		wmq->wmq_set_itr = true;
6484 	} else
6485 		wmq->wmq_set_itr = false;
6486 
6487 	rxq->rxq_packets = 0;
6488 	rxq->rxq_bytes = 0;
6489 	txq->txq_packets = 0;
6490 	txq->txq_bytes = 0;
6491 #endif
6492 }
6493 
6494 static void
6495 wm_init_sysctls(struct wm_softc *sc)
6496 {
6497 	struct sysctllog **log;
6498 	const struct sysctlnode *rnode, *qnode, *cnode;
6499 	int i, rv;
6500 	const char *dvname;
6501 
6502 	log = &sc->sc_sysctllog;
6503 	dvname = device_xname(sc->sc_dev);
6504 
6505 	rv = sysctl_createv(log, 0, NULL, &rnode,
6506 	    0, CTLTYPE_NODE, dvname,
6507 	    SYSCTL_DESCR("wm information and settings"),
6508 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
6509 	if (rv != 0)
6510 		goto err;
6511 
6512 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
6513 	    CTLTYPE_BOOL, "txrx_workqueue",
6514 	    SYSCTL_DESCR("Use workqueue for packet processing"),
6515 	    NULL, 0, &sc->sc_txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL);
6516 	if (rv != 0)
6517 		goto teardown;
6518 
6519 	for (i = 0; i < sc->sc_nqueues; i++) {
6520 		struct wm_queue *wmq = &sc->sc_queue[i];
6521 		struct wm_txqueue *txq = &wmq->wmq_txq;
6522 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
6523 
6524 		snprintf(sc->sc_queue[i].sysctlname,
6525 		    sizeof(sc->sc_queue[i].sysctlname), "q%d", i);
6526 
6527 		if (sysctl_createv(log, 0, &rnode, &qnode,
6528 		    0, CTLTYPE_NODE,
6529 		    sc->sc_queue[i].sysctlname, SYSCTL_DESCR("Queue Name"),
6530 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
6531 			break;
6532 
6533 		if (sysctl_createv(log, 0, &qnode, &cnode,
6534 		    CTLFLAG_READONLY, CTLTYPE_INT,
6535 		    "txq_free", SYSCTL_DESCR("TX queue free"),
6536 		    NULL, 0, &txq->txq_free,
6537 		    0, CTL_CREATE, CTL_EOL) != 0)
6538 			break;
6539 		if (sysctl_createv(log, 0, &qnode, &cnode,
6540 		    CTLFLAG_READONLY, CTLTYPE_INT,
6541 		    "txd_head", SYSCTL_DESCR("TX descriptor head"),
6542 		    wm_sysctl_tdh_handler, 0, (void *)txq,
6543 		    0, CTL_CREATE, CTL_EOL) != 0)
6544 			break;
6545 		if (sysctl_createv(log, 0, &qnode, &cnode,
6546 		    CTLFLAG_READONLY, CTLTYPE_INT,
6547 		    "txd_tail", SYSCTL_DESCR("TX descriptor tail"),
6548 		    wm_sysctl_tdt_handler, 0, (void *)txq,
6549 		    0, CTL_CREATE, CTL_EOL) != 0)
6550 			break;
6551 		if (sysctl_createv(log, 0, &qnode, &cnode,
6552 		    CTLFLAG_READONLY, CTLTYPE_INT,
6553 		    "txq_next", SYSCTL_DESCR("TX queue next"),
6554 		    NULL, 0, &txq->txq_next,
6555 		    0, CTL_CREATE, CTL_EOL) != 0)
6556 			break;
6557 		if (sysctl_createv(log, 0, &qnode, &cnode,
6558 		    CTLFLAG_READONLY, CTLTYPE_INT,
6559 		    "txq_sfree", SYSCTL_DESCR("TX queue sfree"),
6560 		    NULL, 0, &txq->txq_sfree,
6561 		    0, CTL_CREATE, CTL_EOL) != 0)
6562 			break;
6563 		if (sysctl_createv(log, 0, &qnode, &cnode,
6564 		    CTLFLAG_READONLY, CTLTYPE_INT,
6565 		    "txq_snext", SYSCTL_DESCR("TX queue snext"),
6566 		    NULL, 0, &txq->txq_snext,
6567 		    0, CTL_CREATE, CTL_EOL) != 0)
6568 			break;
6569 		if (sysctl_createv(log, 0, &qnode, &cnode,
6570 		    CTLFLAG_READONLY, CTLTYPE_INT,
6571 		    "txq_sdirty", SYSCTL_DESCR("TX queue sdirty"),
6572 		    NULL, 0, &txq->txq_sdirty,
6573 		    0, CTL_CREATE, CTL_EOL) != 0)
6574 			break;
6575 		if (sysctl_createv(log, 0, &qnode, &cnode,
6576 		    CTLFLAG_READONLY, CTLTYPE_INT,
6577 		    "txq_flags", SYSCTL_DESCR("TX queue flags"),
6578 		    NULL, 0, &txq->txq_flags,
6579 		    0, CTL_CREATE, CTL_EOL) != 0)
6580 			break;
6581 		if (sysctl_createv(log, 0, &qnode, &cnode,
6582 		    CTLFLAG_READONLY, CTLTYPE_BOOL,
6583 		    "txq_stopping", SYSCTL_DESCR("TX queue stopping"),
6584 		    NULL, 0, &txq->txq_stopping,
6585 		    0, CTL_CREATE, CTL_EOL) != 0)
6586 			break;
6587 		if (sysctl_createv(log, 0, &qnode, &cnode,
6588 		    CTLFLAG_READONLY, CTLTYPE_BOOL,
6589 		    "txq_sending", SYSCTL_DESCR("TX queue sending"),
6590 		    NULL, 0, &txq->txq_sending,
6591 		    0, CTL_CREATE, CTL_EOL) != 0)
6592 			break;
6593 
6594 		if (sysctl_createv(log, 0, &qnode, &cnode,
6595 		    CTLFLAG_READONLY, CTLTYPE_INT,
6596 		    "rxq_ptr", SYSCTL_DESCR("RX queue pointer"),
6597 		    NULL, 0, &rxq->rxq_ptr,
6598 		    0, CTL_CREATE, CTL_EOL) != 0)
6599 			break;
6600 	}
6601 
6602 #ifdef WM_DEBUG
6603 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
6604 	    CTLTYPE_INT, "debug_flags",
6605 	    SYSCTL_DESCR(
6606 		    "Debug flags:\n"	\
6607 		    "\t0x01 LINK\n"	\
6608 		    "\t0x02 TX\n"	\
6609 		    "\t0x04 RX\n"	\
6610 		    "\t0x08 GMII\n"	\
6611 		    "\t0x10 MANAGE\n"	\
6612 		    "\t0x20 NVM\n"	\
6613 		    "\t0x40 INIT\n"	\
6614 		    "\t0x80 LOCK"),
6615 	    wm_sysctl_debug, 0, (void *)sc, 0, CTL_CREATE, CTL_EOL);
6616 	if (rv != 0)
6617 		goto teardown;
6618 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
6619 	    CTLTYPE_BOOL, "trigger_reset",
6620 	    SYSCTL_DESCR("Trigger an interface reset"),
6621 	    NULL, 0, &sc->sc_trigger_reset, 0, CTL_CREATE, CTL_EOL);
6622 	if (rv != 0)
6623 		goto teardown;
6624 #endif
6625 
6626 	return;
6627 
6628 teardown:
6629 	sysctl_teardown(log);
6630 err:
6631 	sc->sc_sysctllog = NULL;
6632 	device_printf(sc->sc_dev, "%s: sysctl_createv failed, rv = %d\n",
6633 	    __func__, rv);
6634 }
6635 
6636 static void
6637 wm_update_stats(struct wm_softc *sc)
6638 {
6639 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6640 	uint64_t crcerrs, algnerrc, symerrc, mpc, colc,  sec, rlec, rxerrc,
6641 	    cexterr;
6642 	uint64_t total_qdrop = 0;
6643 
6644 	crcerrs = CSR_READ(sc, WMREG_CRCERRS);
6645 	symerrc = CSR_READ(sc, WMREG_SYMERRC);
6646 	mpc = CSR_READ(sc, WMREG_MPC);
6647 	colc = CSR_READ(sc, WMREG_COLC);
6648 	sec = CSR_READ(sc, WMREG_SEC);
6649 	rlec = CSR_READ(sc, WMREG_RLEC);
6650 
6651 	WM_EVCNT_ADD(&sc->sc_ev_crcerrs, crcerrs);
6652 	WM_EVCNT_ADD(&sc->sc_ev_symerrc, symerrc);
6653 	WM_EVCNT_ADD(&sc->sc_ev_mpc, mpc);
6654 	WM_EVCNT_ADD(&sc->sc_ev_colc, colc);
6655 	WM_EVCNT_ADD(&sc->sc_ev_sec, sec);
6656 	WM_EVCNT_ADD(&sc->sc_ev_rlec, rlec);
6657 
6658 	if (sc->sc_type >= WM_T_82543) {
6659 		algnerrc = CSR_READ(sc, WMREG_ALGNERRC);
6660 		rxerrc = CSR_READ(sc, WMREG_RXERRC);
6661 		WM_EVCNT_ADD(&sc->sc_ev_algnerrc, algnerrc);
6662 		WM_EVCNT_ADD(&sc->sc_ev_rxerrc, rxerrc);
6663 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc)) {
6664 			cexterr = CSR_READ(sc, WMREG_CEXTERR);
6665 			WM_EVCNT_ADD(&sc->sc_ev_cexterr, cexterr);
6666 		} else {
6667 			cexterr = 0;
6668 			/* Excessive collision + Link down */
6669 			WM_EVCNT_ADD(&sc->sc_ev_htdpmc,
6670 			    CSR_READ(sc, WMREG_HTDPMC));
6671 		}
6672 
6673 		WM_EVCNT_ADD(&sc->sc_ev_tncrs, CSR_READ(sc, WMREG_TNCRS));
6674 		WM_EVCNT_ADD(&sc->sc_ev_tsctc, CSR_READ(sc, WMREG_TSCTC));
6675 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
6676 			WM_EVCNT_ADD(&sc->sc_ev_tsctfc,
6677 			    CSR_READ(sc, WMREG_TSCTFC));
6678 		else {
6679 			WM_EVCNT_ADD(&sc->sc_ev_cbrdpc,
6680 			    CSR_READ(sc, WMREG_CBRDPC));
6681 			WM_EVCNT_ADD(&sc->sc_ev_cbrmpc,
6682 			    CSR_READ(sc, WMREG_CBRMPC));
6683 		}
6684 	} else
6685 		algnerrc = rxerrc = cexterr = 0;
6686 
6687 	if (sc->sc_type >= WM_T_82542_2_1) {
6688 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
6689 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
6690 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
6691 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
6692 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
6693 	}
6694 
6695 	WM_EVCNT_ADD(&sc->sc_ev_scc, CSR_READ(sc, WMREG_SCC));
6696 	WM_EVCNT_ADD(&sc->sc_ev_ecol, CSR_READ(sc, WMREG_ECOL));
6697 	WM_EVCNT_ADD(&sc->sc_ev_mcc, CSR_READ(sc, WMREG_MCC));
6698 	WM_EVCNT_ADD(&sc->sc_ev_latecol, CSR_READ(sc, WMREG_LATECOL));
6699 
6700 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc)) {
6701 		WM_EVCNT_ADD(&sc->sc_ev_cbtmpc, CSR_READ(sc, WMREG_CBTMPC));
6702 	}
6703 
6704 	WM_EVCNT_ADD(&sc->sc_ev_dc, CSR_READ(sc, WMREG_DC));
6705 	WM_EVCNT_ADD(&sc->sc_ev_prc64, CSR_READ(sc, WMREG_PRC64));
6706 	WM_EVCNT_ADD(&sc->sc_ev_prc127, CSR_READ(sc, WMREG_PRC127));
6707 	WM_EVCNT_ADD(&sc->sc_ev_prc255, CSR_READ(sc, WMREG_PRC255));
6708 	WM_EVCNT_ADD(&sc->sc_ev_prc511, CSR_READ(sc, WMREG_PRC511));
6709 	WM_EVCNT_ADD(&sc->sc_ev_prc1023, CSR_READ(sc, WMREG_PRC1023));
6710 	WM_EVCNT_ADD(&sc->sc_ev_prc1522, CSR_READ(sc, WMREG_PRC1522));
6711 	WM_EVCNT_ADD(&sc->sc_ev_gprc, CSR_READ(sc, WMREG_GPRC));
6712 	WM_EVCNT_ADD(&sc->sc_ev_bprc, CSR_READ(sc, WMREG_BPRC));
6713 	WM_EVCNT_ADD(&sc->sc_ev_mprc, CSR_READ(sc, WMREG_MPRC));
6714 	WM_EVCNT_ADD(&sc->sc_ev_gptc, CSR_READ(sc, WMREG_GPTC));
6715 
6716 	WM_EVCNT_ADD(&sc->sc_ev_gorc,
6717 	    CSR_READ(sc, WMREG_GORCL) +
6718 	    ((uint64_t)CSR_READ(sc, WMREG_GORCH) << 32));
6719 	WM_EVCNT_ADD(&sc->sc_ev_gotc,
6720 	    CSR_READ(sc, WMREG_GOTCL) +
6721 	    ((uint64_t)CSR_READ(sc, WMREG_GOTCH) << 32));
6722 
6723 	WM_EVCNT_ADD(&sc->sc_ev_rnbc, CSR_READ(sc, WMREG_RNBC));
6724 	WM_EVCNT_ADD(&sc->sc_ev_ruc, CSR_READ(sc, WMREG_RUC));
6725 	WM_EVCNT_ADD(&sc->sc_ev_rfc, CSR_READ(sc, WMREG_RFC));
6726 	WM_EVCNT_ADD(&sc->sc_ev_roc, CSR_READ(sc, WMREG_ROC));
6727 	WM_EVCNT_ADD(&sc->sc_ev_rjc, CSR_READ(sc, WMREG_RJC));
6728 
6729 	if (sc->sc_type >= WM_T_82540) {
6730 		WM_EVCNT_ADD(&sc->sc_ev_mgtprc, CSR_READ(sc, WMREG_MGTPRC));
6731 		WM_EVCNT_ADD(&sc->sc_ev_mgtpdc, CSR_READ(sc, WMREG_MGTPDC));
6732 		WM_EVCNT_ADD(&sc->sc_ev_mgtptc, CSR_READ(sc, WMREG_MGTPTC));
6733 	}
6734 
6735 	/*
6736 	 * The TOR(L) register includes:
6737 	 *  - Error
6738 	 *  - Flow control
6739 	 *  - Broadcast rejected (This note is described in 82574 and newer
6740 	 *    datasheets. What does "broadcast rejected" mean?)
6741 	 */
6742 	WM_EVCNT_ADD(&sc->sc_ev_tor,
6743 	    CSR_READ(sc, WMREG_TORL) +
6744 	    ((uint64_t)CSR_READ(sc, WMREG_TORH) << 32));
6745 	WM_EVCNT_ADD(&sc->sc_ev_tot,
6746 	    CSR_READ(sc, WMREG_TOTL) +
6747 	    ((uint64_t)CSR_READ(sc, WMREG_TOTH) << 32));
6748 
6749 	WM_EVCNT_ADD(&sc->sc_ev_tpr, CSR_READ(sc, WMREG_TPR));
6750 	WM_EVCNT_ADD(&sc->sc_ev_tpt, CSR_READ(sc, WMREG_TPT));
6751 	WM_EVCNT_ADD(&sc->sc_ev_ptc64, CSR_READ(sc, WMREG_PTC64));
6752 	WM_EVCNT_ADD(&sc->sc_ev_ptc127, CSR_READ(sc, WMREG_PTC127));
6753 	WM_EVCNT_ADD(&sc->sc_ev_ptc255, CSR_READ(sc, WMREG_PTC255));
6754 	WM_EVCNT_ADD(&sc->sc_ev_ptc511, CSR_READ(sc, WMREG_PTC511));
6755 	WM_EVCNT_ADD(&sc->sc_ev_ptc1023, CSR_READ(sc, WMREG_PTC1023));
6756 	WM_EVCNT_ADD(&sc->sc_ev_ptc1522, CSR_READ(sc, WMREG_PTC1522));
6757 	WM_EVCNT_ADD(&sc->sc_ev_mptc, CSR_READ(sc, WMREG_MPTC));
6758 	WM_EVCNT_ADD(&sc->sc_ev_bptc, CSR_READ(sc, WMREG_BPTC));
6759 	if (sc->sc_type >= WM_T_82571)
6760 		WM_EVCNT_ADD(&sc->sc_ev_iac, CSR_READ(sc, WMREG_IAC));
6761 	if (sc->sc_type < WM_T_82575) {
6762 		WM_EVCNT_ADD(&sc->sc_ev_icrxptc, CSR_READ(sc, WMREG_ICRXPTC));
6763 		WM_EVCNT_ADD(&sc->sc_ev_icrxatc, CSR_READ(sc, WMREG_ICRXATC));
6764 		WM_EVCNT_ADD(&sc->sc_ev_ictxptc, CSR_READ(sc, WMREG_ICTXPTC));
6765 		WM_EVCNT_ADD(&sc->sc_ev_ictxatc, CSR_READ(sc, WMREG_ICTXATC));
6766 		WM_EVCNT_ADD(&sc->sc_ev_ictxqec, CSR_READ(sc, WMREG_ICTXQEC));
6767 		WM_EVCNT_ADD(&sc->sc_ev_ictxqmtc,
6768 		    CSR_READ(sc, WMREG_ICTXQMTC));
6769 		WM_EVCNT_ADD(&sc->sc_ev_rxdmtc,
6770 		    CSR_READ(sc, WMREG_ICRXDMTC));
6771 		WM_EVCNT_ADD(&sc->sc_ev_icrxoc, CSR_READ(sc, WMREG_ICRXOC));
6772 	} else if (!WM_IS_ICHPCH(sc)) {
6773 		WM_EVCNT_ADD(&sc->sc_ev_rpthc, CSR_READ(sc, WMREG_RPTHC));
6774 		WM_EVCNT_ADD(&sc->sc_ev_debug1, CSR_READ(sc, WMREG_DEBUG1));
6775 		WM_EVCNT_ADD(&sc->sc_ev_debug2, CSR_READ(sc, WMREG_DEBUG2));
6776 		WM_EVCNT_ADD(&sc->sc_ev_debug3, CSR_READ(sc, WMREG_DEBUG3));
6777 		WM_EVCNT_ADD(&sc->sc_ev_hgptc,  CSR_READ(sc, WMREG_HGPTC));
6778 		WM_EVCNT_ADD(&sc->sc_ev_debug4, CSR_READ(sc, WMREG_DEBUG4));
6779 		WM_EVCNT_ADD(&sc->sc_ev_rxdmtc, CSR_READ(sc, WMREG_RXDMTC));
6780 		WM_EVCNT_ADD(&sc->sc_ev_htcbdpc, CSR_READ(sc, WMREG_HTCBDPC));
6781 
6782 		WM_EVCNT_ADD(&sc->sc_ev_hgorc,
6783 		    CSR_READ(sc, WMREG_HGORCL) +
6784 		    ((uint64_t)CSR_READ(sc, WMREG_HGORCH) << 32));
6785 		WM_EVCNT_ADD(&sc->sc_ev_hgotc,
6786 		    CSR_READ(sc, WMREG_HGOTCL) +
6787 		    ((uint64_t)CSR_READ(sc, WMREG_HGOTCH) << 32));
6788 		WM_EVCNT_ADD(&sc->sc_ev_lenerrs, CSR_READ(sc, WMREG_LENERRS));
6789 		WM_EVCNT_ADD(&sc->sc_ev_scvpc, CSR_READ(sc, WMREG_SCVPC));
6790 		WM_EVCNT_ADD(&sc->sc_ev_hrmpc, CSR_READ(sc, WMREG_HRMPC));
6791 #ifdef WM_EVENT_COUNTERS
6792 		for (int i = 0; i < sc->sc_nqueues; i++) {
6793 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
6794 			uint32_t rqdpc;
6795 
6796 			rqdpc = CSR_READ(sc, WMREG_RQDPC(i));
6797 			/*
6798 			 * On I210 and newer device, the RQDPC register is not
6799 			 * cleard on read.
6800 			 */
6801 			if ((rqdpc != 0) && (sc->sc_type >= WM_T_I210))
6802 				CSR_WRITE(sc, WMREG_RQDPC(i), 0);
6803 			WM_Q_EVCNT_ADD(rxq, qdrop, rqdpc);
6804 			total_qdrop += rqdpc;
6805 		}
6806 #endif
6807 	}
6808 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc)) {
6809 		WM_EVCNT_ADD(&sc->sc_ev_tlpic, CSR_READ(sc, WMREG_TLPIC));
6810 		WM_EVCNT_ADD(&sc->sc_ev_rlpic, CSR_READ(sc, WMREG_RLPIC));
6811 		if ((CSR_READ(sc, WMREG_MANC) & MANC_EN_BMC2OS) != 0) {
6812 			WM_EVCNT_ADD(&sc->sc_ev_b2ogprc,
6813 			    CSR_READ(sc, WMREG_B2OGPRC));
6814 			WM_EVCNT_ADD(&sc->sc_ev_o2bspc,
6815 			    CSR_READ(sc, WMREG_O2BSPC));
6816 			WM_EVCNT_ADD(&sc->sc_ev_b2ospc,
6817 			    CSR_READ(sc, WMREG_B2OSPC));
6818 			WM_EVCNT_ADD(&sc->sc_ev_o2bgptc,
6819 			    CSR_READ(sc, WMREG_O2BGPTC));
6820 		}
6821 	}
6822 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
6823 	if_statadd_ref(ifp, nsr, if_collisions, colc);
6824 	if_statadd_ref(ifp, nsr, if_ierrors,
6825 	    crcerrs + algnerrc + symerrc + rxerrc + sec + cexterr + rlec);
6826 	/*
6827 	 * WMREG_RNBC is incremented when there are no available buffers in
6828 	 * host memory. It does not mean the number of dropped packets, because
6829 	 * an Ethernet controller can receive packets in such case if there is
6830 	 * space in the phy's FIFO.
6831 	 *
6832 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
6833 	 * own EVCNT instead of if_iqdrops.
6834 	 */
6835 	if_statadd_ref(ifp, nsr, if_iqdrops, mpc + total_qdrop);
6836 	IF_STAT_PUTREF(ifp);
6837 }
6838 
6839 void
6840 wm_clear_evcnt(struct wm_softc *sc)
6841 {
6842 #ifdef WM_EVENT_COUNTERS
6843 	int i;
6844 
6845 	/* RX queues */
6846 	for (i = 0; i < sc->sc_nqueues; i++) {
6847 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
6848 
6849 		WM_Q_EVCNT_STORE(rxq, intr, 0);
6850 		WM_Q_EVCNT_STORE(rxq, defer, 0);
6851 		WM_Q_EVCNT_STORE(rxq, ipsum, 0);
6852 		WM_Q_EVCNT_STORE(rxq, tusum, 0);
6853 		if ((sc->sc_type >= WM_T_82575) && !WM_IS_ICHPCH(sc))
6854 			WM_Q_EVCNT_STORE(rxq, qdrop, 0);
6855 	}
6856 
6857 	/* TX queues */
6858 	for (i = 0; i < sc->sc_nqueues; i++) {
6859 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
6860 		int j;
6861 
6862 		WM_Q_EVCNT_STORE(txq, txsstall, 0);
6863 		WM_Q_EVCNT_STORE(txq, txdstall, 0);
6864 		WM_Q_EVCNT_STORE(txq, fifo_stall, 0);
6865 		WM_Q_EVCNT_STORE(txq, txdw, 0);
6866 		WM_Q_EVCNT_STORE(txq, txqe, 0);
6867 		WM_Q_EVCNT_STORE(txq, ipsum, 0);
6868 		WM_Q_EVCNT_STORE(txq, tusum, 0);
6869 		WM_Q_EVCNT_STORE(txq, tusum6, 0);
6870 		WM_Q_EVCNT_STORE(txq, tso, 0);
6871 		WM_Q_EVCNT_STORE(txq, tso6, 0);
6872 		WM_Q_EVCNT_STORE(txq, tsopain, 0);
6873 
6874 		for (j = 0; j < WM_NTXSEGS; j++)
6875 			WM_EVCNT_STORE(&txq->txq_ev_txseg[j], 0);
6876 
6877 		WM_Q_EVCNT_STORE(txq, pcqdrop, 0);
6878 		WM_Q_EVCNT_STORE(txq, descdrop, 0);
6879 		WM_Q_EVCNT_STORE(txq, toomanyseg, 0);
6880 		WM_Q_EVCNT_STORE(txq, defrag, 0);
6881 		if (sc->sc_type <= WM_T_82544)
6882 			WM_Q_EVCNT_STORE(txq, underrun, 0);
6883 		WM_Q_EVCNT_STORE(txq, skipcontext, 0);
6884 	}
6885 
6886 	/* Miscs */
6887 	WM_EVCNT_STORE(&sc->sc_ev_linkintr, 0);
6888 
6889 	WM_EVCNT_STORE(&sc->sc_ev_crcerrs, 0);
6890 	WM_EVCNT_STORE(&sc->sc_ev_symerrc, 0);
6891 	WM_EVCNT_STORE(&sc->sc_ev_mpc, 0);
6892 	WM_EVCNT_STORE(&sc->sc_ev_colc, 0);
6893 	WM_EVCNT_STORE(&sc->sc_ev_sec, 0);
6894 	WM_EVCNT_STORE(&sc->sc_ev_rlec, 0);
6895 
6896 	if (sc->sc_type >= WM_T_82543) {
6897 		WM_EVCNT_STORE(&sc->sc_ev_algnerrc, 0);
6898 		WM_EVCNT_STORE(&sc->sc_ev_rxerrc, 0);
6899 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
6900 			WM_EVCNT_STORE(&sc->sc_ev_cexterr, 0);
6901 		else
6902 			WM_EVCNT_STORE(&sc->sc_ev_htdpmc, 0);
6903 
6904 		WM_EVCNT_STORE(&sc->sc_ev_tncrs, 0);
6905 		WM_EVCNT_STORE(&sc->sc_ev_tsctc, 0);
6906 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
6907 			WM_EVCNT_STORE(&sc->sc_ev_tsctfc, 0);
6908 		else {
6909 			WM_EVCNT_STORE(&sc->sc_ev_cbrdpc, 0);
6910 			WM_EVCNT_STORE(&sc->sc_ev_cbrmpc, 0);
6911 		}
6912 	}
6913 
6914 	if (sc->sc_type >= WM_T_82542_2_1) {
6915 		WM_EVCNT_STORE(&sc->sc_ev_tx_xoff, 0);
6916 		WM_EVCNT_STORE(&sc->sc_ev_tx_xon, 0);
6917 		WM_EVCNT_STORE(&sc->sc_ev_rx_xoff, 0);
6918 		WM_EVCNT_STORE(&sc->sc_ev_rx_xon, 0);
6919 		WM_EVCNT_STORE(&sc->sc_ev_rx_macctl, 0);
6920 	}
6921 
6922 	WM_EVCNT_STORE(&sc->sc_ev_scc, 0);
6923 	WM_EVCNT_STORE(&sc->sc_ev_ecol, 0);
6924 	WM_EVCNT_STORE(&sc->sc_ev_mcc, 0);
6925 	WM_EVCNT_STORE(&sc->sc_ev_latecol, 0);
6926 
6927 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc))
6928 		WM_EVCNT_STORE(&sc->sc_ev_cbtmpc, 0);
6929 
6930 	WM_EVCNT_STORE(&sc->sc_ev_dc, 0);
6931 	WM_EVCNT_STORE(&sc->sc_ev_prc64, 0);
6932 	WM_EVCNT_STORE(&sc->sc_ev_prc127, 0);
6933 	WM_EVCNT_STORE(&sc->sc_ev_prc255, 0);
6934 	WM_EVCNT_STORE(&sc->sc_ev_prc511, 0);
6935 	WM_EVCNT_STORE(&sc->sc_ev_prc1023, 0);
6936 	WM_EVCNT_STORE(&sc->sc_ev_prc1522, 0);
6937 	WM_EVCNT_STORE(&sc->sc_ev_gprc, 0);
6938 	WM_EVCNT_STORE(&sc->sc_ev_bprc, 0);
6939 	WM_EVCNT_STORE(&sc->sc_ev_mprc, 0);
6940 	WM_EVCNT_STORE(&sc->sc_ev_gptc, 0);
6941 	WM_EVCNT_STORE(&sc->sc_ev_gorc, 0);
6942 	WM_EVCNT_STORE(&sc->sc_ev_gotc, 0);
6943 	WM_EVCNT_STORE(&sc->sc_ev_rnbc, 0);
6944 	WM_EVCNT_STORE(&sc->sc_ev_ruc, 0);
6945 	WM_EVCNT_STORE(&sc->sc_ev_rfc, 0);
6946 	WM_EVCNT_STORE(&sc->sc_ev_roc, 0);
6947 	WM_EVCNT_STORE(&sc->sc_ev_rjc, 0);
6948 	if (sc->sc_type >= WM_T_82540) {
6949 		WM_EVCNT_STORE(&sc->sc_ev_mgtprc, 0);
6950 		WM_EVCNT_STORE(&sc->sc_ev_mgtpdc, 0);
6951 		WM_EVCNT_STORE(&sc->sc_ev_mgtptc, 0);
6952 	}
6953 	WM_EVCNT_STORE(&sc->sc_ev_tor, 0);
6954 	WM_EVCNT_STORE(&sc->sc_ev_tot, 0);
6955 	WM_EVCNT_STORE(&sc->sc_ev_tpr, 0);
6956 	WM_EVCNT_STORE(&sc->sc_ev_tpt, 0);
6957 	WM_EVCNT_STORE(&sc->sc_ev_ptc64, 0);
6958 	WM_EVCNT_STORE(&sc->sc_ev_ptc127, 0);
6959 	WM_EVCNT_STORE(&sc->sc_ev_ptc255, 0);
6960 	WM_EVCNT_STORE(&sc->sc_ev_ptc511, 0);
6961 	WM_EVCNT_STORE(&sc->sc_ev_ptc1023, 0);
6962 	WM_EVCNT_STORE(&sc->sc_ev_ptc1522, 0);
6963 	WM_EVCNT_STORE(&sc->sc_ev_mptc, 0);
6964 	WM_EVCNT_STORE(&sc->sc_ev_bptc, 0);
6965 	if (sc->sc_type >= WM_T_82571)
6966 		WM_EVCNT_STORE(&sc->sc_ev_iac, 0);
6967 	if (sc->sc_type < WM_T_82575) {
6968 		WM_EVCNT_STORE(&sc->sc_ev_icrxptc, 0);
6969 		WM_EVCNT_STORE(&sc->sc_ev_icrxatc, 0);
6970 		WM_EVCNT_STORE(&sc->sc_ev_ictxptc, 0);
6971 		WM_EVCNT_STORE(&sc->sc_ev_ictxatc, 0);
6972 		WM_EVCNT_STORE(&sc->sc_ev_ictxqec, 0);
6973 		WM_EVCNT_STORE(&sc->sc_ev_ictxqmtc, 0);
6974 		WM_EVCNT_STORE(&sc->sc_ev_rxdmtc, 0);
6975 		WM_EVCNT_STORE(&sc->sc_ev_icrxoc, 0);
6976 	} else if (!WM_IS_ICHPCH(sc)) {
6977 		WM_EVCNT_STORE(&sc->sc_ev_rpthc, 0);
6978 		WM_EVCNT_STORE(&sc->sc_ev_debug1, 0);
6979 		WM_EVCNT_STORE(&sc->sc_ev_debug2, 0);
6980 		WM_EVCNT_STORE(&sc->sc_ev_debug3, 0);
6981 		WM_EVCNT_STORE(&sc->sc_ev_hgptc, 0);
6982 		WM_EVCNT_STORE(&sc->sc_ev_debug4, 0);
6983 		WM_EVCNT_STORE(&sc->sc_ev_rxdmtc, 0);
6984 		WM_EVCNT_STORE(&sc->sc_ev_htcbdpc, 0);
6985 
6986 		WM_EVCNT_STORE(&sc->sc_ev_hgorc, 0);
6987 		WM_EVCNT_STORE(&sc->sc_ev_hgotc, 0);
6988 		WM_EVCNT_STORE(&sc->sc_ev_lenerrs, 0);
6989 		WM_EVCNT_STORE(&sc->sc_ev_scvpc, 0);
6990 		WM_EVCNT_STORE(&sc->sc_ev_hrmpc, 0);
6991 	}
6992 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc)) {
6993 		WM_EVCNT_STORE(&sc->sc_ev_tlpic, 0);
6994 		WM_EVCNT_STORE(&sc->sc_ev_rlpic, 0);
6995 		WM_EVCNT_STORE(&sc->sc_ev_b2ogprc, 0);
6996 		WM_EVCNT_STORE(&sc->sc_ev_o2bspc, 0);
6997 		WM_EVCNT_STORE(&sc->sc_ev_b2ospc, 0);
6998 		WM_EVCNT_STORE(&sc->sc_ev_o2bgptc, 0);
6999 	}
7000 #endif
7001 }
7002 
7003 /*
7004  * wm_init:		[ifnet interface function]
7005  *
7006  *	Initialize the interface.
7007  */
7008 static int
7009 wm_init(struct ifnet *ifp)
7010 {
7011 	struct wm_softc *sc = ifp->if_softc;
7012 	int ret;
7013 
7014 	KASSERT(IFNET_LOCKED(ifp));
7015 
7016 	if (sc->sc_dying)
7017 		return ENXIO;
7018 
7019 	mutex_enter(sc->sc_core_lock);
7020 	ret = wm_init_locked(ifp);
7021 	mutex_exit(sc->sc_core_lock);
7022 
7023 	return ret;
7024 }
7025 
7026 static int
7027 wm_init_locked(struct ifnet *ifp)
7028 {
7029 	struct wm_softc *sc = ifp->if_softc;
7030 	struct ethercom *ec = &sc->sc_ethercom;
7031 	int i, j, trynum, error = 0;
7032 	uint32_t reg, sfp_mask = 0;
7033 
7034 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
7035 		device_xname(sc->sc_dev), __func__));
7036 	KASSERT(IFNET_LOCKED(ifp));
7037 	KASSERT(mutex_owned(sc->sc_core_lock));
7038 
7039 	/*
7040 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
7041 	 * There is a small but measurable benefit to avoiding the adjusment
7042 	 * of the descriptor so that the headers are aligned, for normal mtu,
7043 	 * on such platforms.  One possibility is that the DMA itself is
7044 	 * slightly more efficient if the front of the entire packet (instead
7045 	 * of the front of the headers) is aligned.
7046 	 *
7047 	 * Note we must always set align_tweak to 0 if we are using
7048 	 * jumbo frames.
7049 	 */
7050 #ifdef __NO_STRICT_ALIGNMENT
7051 	sc->sc_align_tweak = 0;
7052 #else
7053 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
7054 		sc->sc_align_tweak = 0;
7055 	else
7056 		sc->sc_align_tweak = 2;
7057 #endif /* __NO_STRICT_ALIGNMENT */
7058 
7059 	/* Cancel any pending I/O. */
7060 	wm_stop_locked(ifp, false, false);
7061 
7062 	/* Update statistics before reset */
7063 	if_statadd2(ifp, if_collisions, CSR_READ(sc, WMREG_COLC),
7064 	    if_ierrors, CSR_READ(sc, WMREG_RXERRC));
7065 
7066 	/* >= PCH_SPT hardware workaround before reset. */
7067 	if (sc->sc_type >= WM_T_PCH_SPT)
7068 		wm_flush_desc_rings(sc);
7069 
7070 	/* Reset the chip to a known state. */
7071 	wm_reset(sc);
7072 
7073 	/*
7074 	 * AMT based hardware can now take control from firmware
7075 	 * Do this after reset.
7076 	 */
7077 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
7078 		wm_get_hw_control(sc);
7079 
7080 	if ((sc->sc_type >= WM_T_PCH_SPT) &&
7081 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
7082 		wm_legacy_irq_quirk_spt(sc);
7083 
7084 	/* Init hardware bits */
7085 	wm_initialize_hardware_bits(sc);
7086 
7087 	/* Reset the PHY. */
7088 	if (sc->sc_flags & WM_F_HAS_MII)
7089 		wm_gmii_reset(sc);
7090 
7091 	if (sc->sc_type >= WM_T_ICH8) {
7092 		reg = CSR_READ(sc, WMREG_GCR);
7093 		/*
7094 		 * ICH8 No-snoop bits are opposite polarity. Set to snoop by
7095 		 * default after reset.
7096 		 */
7097 		if (sc->sc_type == WM_T_ICH8)
7098 			reg |= GCR_NO_SNOOP_ALL;
7099 		else
7100 			reg &= ~GCR_NO_SNOOP_ALL;
7101 		CSR_WRITE(sc, WMREG_GCR, reg);
7102 	}
7103 
7104 	/* Ungate DMA clock to avoid packet loss */
7105 	if (sc->sc_type >= WM_T_PCH_TGP) {
7106 		reg = CSR_READ(sc, WMREG_FFLT_DBG);
7107 		reg |= (1 << 12);
7108 		CSR_WRITE(sc, WMREG_FFLT_DBG, reg);
7109 	}
7110 
7111 	if ((sc->sc_type >= WM_T_ICH8)
7112 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER)
7113 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) {
7114 
7115 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
7116 		reg |= CTRL_EXT_RO_DIS;
7117 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
7118 	}
7119 
7120 	/* Calculate (E)ITR value */
7121 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
7122 		/*
7123 		 * For NEWQUEUE's EITR (except for 82575).
7124 		 * 82575's EITR should be set same throttling value as other
7125 		 * old controllers' ITR because the interrupt/sec calculation
7126 		 * is the same, that is, 1,000,000,000 / (N * 256).
7127 		 *
7128 		 * 82574's EITR should be set same throttling value as ITR.
7129 		 *
7130 		 * For N interrupts/sec, set this value to:
7131 		 * 1,000,000 / N in contrast to ITR throttling value.
7132 		 */
7133 		sc->sc_itr_init = 450;
7134 	} else if (sc->sc_type >= WM_T_82543) {
7135 		/*
7136 		 * Set up the interrupt throttling register (units of 256ns)
7137 		 * Note that a footnote in Intel's documentation says this
7138 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
7139 		 * or 10Mbit mode.  Empirically, it appears to be the case
7140 		 * that that is also true for the 1024ns units of the other
7141 		 * interrupt-related timer registers -- so, really, we ought
7142 		 * to divide this value by 4 when the link speed is low.
7143 		 *
7144 		 * XXX implement this division at link speed change!
7145 		 */
7146 
7147 		/*
7148 		 * For N interrupts/sec, set this value to:
7149 		 * 1,000,000,000 / (N * 256).  Note that we set the
7150 		 * absolute and packet timer values to this value
7151 		 * divided by 4 to get "simple timer" behavior.
7152 		 */
7153 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
7154 	}
7155 
7156 	error = wm_init_txrx_queues(sc);
7157 	if (error)
7158 		goto out;
7159 
7160 	if (((sc->sc_flags & WM_F_SGMII) == 0) &&
7161 	    (sc->sc_mediatype == WM_MEDIATYPE_SERDES) &&
7162 	    (sc->sc_type >= WM_T_82575))
7163 		wm_serdes_power_up_link_82575(sc);
7164 
7165 	/* Clear out the VLAN table -- we don't use it (yet). */
7166 	CSR_WRITE(sc, WMREG_VET, 0);
7167 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
7168 		trynum = 10; /* Due to hw errata */
7169 	else
7170 		trynum = 1;
7171 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
7172 		for (j = 0; j < trynum; j++)
7173 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
7174 
7175 	/*
7176 	 * Set up flow-control parameters.
7177 	 *
7178 	 * XXX Values could probably stand some tuning.
7179 	 */
7180 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
7181 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
7182 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
7183 	    && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)
7184 	    && (sc->sc_type != WM_T_PCH_TGP)) {
7185 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
7186 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
7187 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
7188 	}
7189 
7190 	sc->sc_fcrtl = FCRTL_DFLT;
7191 	if (sc->sc_type < WM_T_82543) {
7192 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
7193 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
7194 	} else {
7195 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
7196 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
7197 	}
7198 
7199 	if (sc->sc_type == WM_T_80003)
7200 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
7201 	else
7202 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
7203 
7204 	/* Writes the control register. */
7205 	wm_set_vlan(sc);
7206 
7207 	if (sc->sc_flags & WM_F_HAS_MII) {
7208 		uint16_t kmreg;
7209 
7210 		switch (sc->sc_type) {
7211 		case WM_T_80003:
7212 		case WM_T_ICH8:
7213 		case WM_T_ICH9:
7214 		case WM_T_ICH10:
7215 		case WM_T_PCH:
7216 		case WM_T_PCH2:
7217 		case WM_T_PCH_LPT:
7218 		case WM_T_PCH_SPT:
7219 		case WM_T_PCH_CNP:
7220 		case WM_T_PCH_TGP:
7221 			/*
7222 			 * Set the mac to wait the maximum time between each
7223 			 * iteration and increase the max iterations when
7224 			 * polling the phy; this fixes erroneous timeouts at
7225 			 * 10Mbps.
7226 			 */
7227 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
7228 			    0xFFFF);
7229 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
7230 			    &kmreg);
7231 			kmreg |= 0x3F;
7232 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
7233 			    kmreg);
7234 			break;
7235 		default:
7236 			break;
7237 		}
7238 
7239 		if (sc->sc_type == WM_T_80003) {
7240 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
7241 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
7242 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
7243 
7244 			/* Bypass RX and TX FIFOs */
7245 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
7246 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
7247 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
7248 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
7249 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
7250 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
7251 		}
7252 	}
7253 #if 0
7254 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
7255 #endif
7256 
7257 	/* Set up checksum offload parameters. */
7258 	reg = CSR_READ(sc, WMREG_RXCSUM);
7259 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
7260 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
7261 		reg |= RXCSUM_IPOFL;
7262 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
7263 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
7264 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
7265 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
7266 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
7267 
7268 	/* Set registers about MSI-X */
7269 	if (wm_is_using_msix(sc)) {
7270 		uint32_t ivar, qintr_idx;
7271 		struct wm_queue *wmq;
7272 		unsigned int qid;
7273 
7274 		if (sc->sc_type == WM_T_82575) {
7275 			/* Interrupt control */
7276 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
7277 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
7278 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
7279 
7280 			/* TX and RX */
7281 			for (i = 0; i < sc->sc_nqueues; i++) {
7282 				wmq = &sc->sc_queue[i];
7283 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
7284 				    EITR_TX_QUEUE(wmq->wmq_id)
7285 				    | EITR_RX_QUEUE(wmq->wmq_id));
7286 			}
7287 			/* Link status */
7288 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
7289 			    EITR_OTHER);
7290 		} else if (sc->sc_type == WM_T_82574) {
7291 			/* Interrupt control */
7292 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
7293 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
7294 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
7295 
7296 			/*
7297 			 * Work around issue with spurious interrupts
7298 			 * in MSI-X mode.
7299 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
7300 			 * initialized yet. So re-initialize WMREG_RFCTL here.
7301 			 */
7302 			reg = CSR_READ(sc, WMREG_RFCTL);
7303 			reg |= WMREG_RFCTL_ACKDIS;
7304 			CSR_WRITE(sc, WMREG_RFCTL, reg);
7305 
7306 			ivar = 0;
7307 			/* TX and RX */
7308 			for (i = 0; i < sc->sc_nqueues; i++) {
7309 				wmq = &sc->sc_queue[i];
7310 				qid = wmq->wmq_id;
7311 				qintr_idx = wmq->wmq_intr_idx;
7312 
7313 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
7314 				    IVAR_TX_MASK_Q_82574(qid));
7315 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
7316 				    IVAR_RX_MASK_Q_82574(qid));
7317 			}
7318 			/* Link status */
7319 			ivar |= __SHIFTIN((IVAR_VALID_82574
7320 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
7321 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
7322 		} else {
7323 			/* Interrupt control */
7324 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
7325 			    | GPIE_EIAME | GPIE_PBA);
7326 
7327 			switch (sc->sc_type) {
7328 			case WM_T_82580:
7329 			case WM_T_I350:
7330 			case WM_T_I354:
7331 			case WM_T_I210:
7332 			case WM_T_I211:
7333 				/* TX and RX */
7334 				for (i = 0; i < sc->sc_nqueues; i++) {
7335 					wmq = &sc->sc_queue[i];
7336 					qid = wmq->wmq_id;
7337 					qintr_idx = wmq->wmq_intr_idx;
7338 
7339 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
7340 					ivar &= ~IVAR_TX_MASK_Q(qid);
7341 					ivar |= __SHIFTIN((qintr_idx
7342 						| IVAR_VALID),
7343 					    IVAR_TX_MASK_Q(qid));
7344 					ivar &= ~IVAR_RX_MASK_Q(qid);
7345 					ivar |= __SHIFTIN((qintr_idx
7346 						| IVAR_VALID),
7347 					    IVAR_RX_MASK_Q(qid));
7348 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
7349 				}
7350 				break;
7351 			case WM_T_82576:
7352 				/* TX and RX */
7353 				for (i = 0; i < sc->sc_nqueues; i++) {
7354 					wmq = &sc->sc_queue[i];
7355 					qid = wmq->wmq_id;
7356 					qintr_idx = wmq->wmq_intr_idx;
7357 
7358 					ivar = CSR_READ(sc,
7359 					    WMREG_IVAR_Q_82576(qid));
7360 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
7361 					ivar |= __SHIFTIN((qintr_idx
7362 						| IVAR_VALID),
7363 					    IVAR_TX_MASK_Q_82576(qid));
7364 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
7365 					ivar |= __SHIFTIN((qintr_idx
7366 						| IVAR_VALID),
7367 					    IVAR_RX_MASK_Q_82576(qid));
7368 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
7369 					    ivar);
7370 				}
7371 				break;
7372 			default:
7373 				break;
7374 			}
7375 
7376 			/* Link status */
7377 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
7378 			    IVAR_MISC_OTHER);
7379 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
7380 		}
7381 
7382 		if (wm_is_using_multiqueue(sc)) {
7383 			wm_init_rss(sc);
7384 
7385 			/*
7386 			** NOTE: Receive Full-Packet Checksum Offload
7387 			** is mutually exclusive with Multiqueue. However
7388 			** this is not the same as TCP/IP checksums which
7389 			** still work.
7390 			*/
7391 			reg = CSR_READ(sc, WMREG_RXCSUM);
7392 			reg |= RXCSUM_PCSD;
7393 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
7394 		}
7395 	}
7396 
7397 	/* Set up the interrupt registers. */
7398 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
7399 
7400 	/* Enable SFP module insertion interrupt if it's required */
7401 	if ((sc->sc_flags & WM_F_SFP) != 0) {
7402 		sc->sc_ctrl |= CTRL_EXTLINK_EN;
7403 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7404 		sfp_mask = ICR_GPI(0);
7405 	}
7406 
7407 	if (wm_is_using_msix(sc)) {
7408 		uint32_t mask;
7409 		struct wm_queue *wmq;
7410 
7411 		switch (sc->sc_type) {
7412 		case WM_T_82574:
7413 			mask = 0;
7414 			for (i = 0; i < sc->sc_nqueues; i++) {
7415 				wmq = &sc->sc_queue[i];
7416 				mask |= ICR_TXQ(wmq->wmq_id);
7417 				mask |= ICR_RXQ(wmq->wmq_id);
7418 			}
7419 			mask |= ICR_OTHER;
7420 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
7421 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
7422 			break;
7423 		default:
7424 			if (sc->sc_type == WM_T_82575) {
7425 				mask = 0;
7426 				for (i = 0; i < sc->sc_nqueues; i++) {
7427 					wmq = &sc->sc_queue[i];
7428 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
7429 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
7430 				}
7431 				mask |= EITR_OTHER;
7432 			} else {
7433 				mask = 0;
7434 				for (i = 0; i < sc->sc_nqueues; i++) {
7435 					wmq = &sc->sc_queue[i];
7436 					mask |= 1 << wmq->wmq_intr_idx;
7437 				}
7438 				mask |= 1 << sc->sc_link_intr_idx;
7439 			}
7440 			CSR_WRITE(sc, WMREG_EIAC, mask);
7441 			CSR_WRITE(sc, WMREG_EIAM, mask);
7442 			CSR_WRITE(sc, WMREG_EIMS, mask);
7443 
7444 			/* For other interrupts */
7445 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC | sfp_mask);
7446 			break;
7447 		}
7448 	} else {
7449 		sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
7450 		    ICR_RXO | ICR_RXT0 | sfp_mask;
7451 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
7452 	}
7453 
7454 	/* Set up the inter-packet gap. */
7455 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
7456 
7457 	if (sc->sc_type >= WM_T_82543) {
7458 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
7459 			struct wm_queue *wmq = &sc->sc_queue[qidx];
7460 			wm_itrs_writereg(sc, wmq);
7461 		}
7462 		/*
7463 		 * Link interrupts occur much less than TX
7464 		 * interrupts and RX interrupts. So, we don't
7465 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
7466 		 * FreeBSD's if_igb.
7467 		 */
7468 	}
7469 
7470 	/* Set the VLAN EtherType. */
7471 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
7472 
7473 	/*
7474 	 * Set up the transmit control register; we start out with
7475 	 * a collision distance suitable for FDX, but update it when
7476 	 * we resolve the media type.
7477 	 */
7478 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
7479 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
7480 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
7481 	if (sc->sc_type >= WM_T_82571)
7482 		sc->sc_tctl |= TCTL_MULR;
7483 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
7484 
7485 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
7486 		/* Write TDT after TCTL.EN is set. See the document. */
7487 		CSR_WRITE(sc, WMREG_TDT(0), 0);
7488 	}
7489 
7490 	if (sc->sc_type == WM_T_80003) {
7491 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
7492 		reg &= ~TCTL_EXT_GCEX_MASK;
7493 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
7494 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
7495 	}
7496 
7497 	/* Set the media. */
7498 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
7499 		goto out;
7500 
7501 	/* Configure for OS presence */
7502 	wm_init_manageability(sc);
7503 
7504 	/*
7505 	 * Set up the receive control register; we actually program the
7506 	 * register when we set the receive filter. Use multicast address
7507 	 * offset type 0.
7508 	 *
7509 	 * Only the i82544 has the ability to strip the incoming CRC, so we
7510 	 * don't enable that feature.
7511 	 */
7512 	sc->sc_mchash_type = 0;
7513 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
7514 	    | __SHIFTIN(sc->sc_mchash_type, RCTL_MO);
7515 
7516 	/* 82574 use one buffer extended Rx descriptor. */
7517 	if (sc->sc_type == WM_T_82574)
7518 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
7519 
7520 	if ((sc->sc_flags & WM_F_CRC_STRIP) != 0)
7521 		sc->sc_rctl |= RCTL_SECRC;
7522 
7523 	if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
7524 	    && (ifp->if_mtu > ETHERMTU)) {
7525 		sc->sc_rctl |= RCTL_LPE;
7526 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
7527 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
7528 	}
7529 
7530 	if (MCLBYTES == 2048)
7531 		sc->sc_rctl |= RCTL_2k;
7532 	else {
7533 		if (sc->sc_type >= WM_T_82543) {
7534 			switch (MCLBYTES) {
7535 			case 4096:
7536 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
7537 				break;
7538 			case 8192:
7539 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
7540 				break;
7541 			case 16384:
7542 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
7543 				break;
7544 			default:
7545 				panic("wm_init: MCLBYTES %d unsupported",
7546 				    MCLBYTES);
7547 				break;
7548 			}
7549 		} else
7550 			panic("wm_init: i82542 requires MCLBYTES = 2048");
7551 	}
7552 
7553 	/* Enable ECC */
7554 	switch (sc->sc_type) {
7555 	case WM_T_82571:
7556 		reg = CSR_READ(sc, WMREG_PBA_ECC);
7557 		reg |= PBA_ECC_CORR_EN;
7558 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
7559 		break;
7560 	case WM_T_PCH_LPT:
7561 	case WM_T_PCH_SPT:
7562 	case WM_T_PCH_CNP:
7563 	case WM_T_PCH_TGP:
7564 		reg = CSR_READ(sc, WMREG_PBECCSTS);
7565 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
7566 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
7567 
7568 		sc->sc_ctrl |= CTRL_MEHE;
7569 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7570 		break;
7571 	default:
7572 		break;
7573 	}
7574 
7575 	/*
7576 	 * Set the receive filter.
7577 	 *
7578 	 * For 82575 and 82576, the RX descriptors must be initialized after
7579 	 * the setting of RCTL.EN in wm_set_filter()
7580 	 */
7581 	wm_set_filter(sc);
7582 
7583 	/* On 575 and later set RDT only if RX enabled */
7584 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
7585 		int qidx;
7586 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
7587 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
7588 			for (i = 0; i < WM_NRXDESC; i++) {
7589 				mutex_enter(rxq->rxq_lock);
7590 				wm_init_rxdesc(rxq, i);
7591 				mutex_exit(rxq->rxq_lock);
7592 
7593 			}
7594 		}
7595 	}
7596 
7597 	wm_unset_stopping_flags(sc);
7598 
7599 	/* Start the one second link check clock. */
7600 	callout_schedule(&sc->sc_tick_ch, hz);
7601 
7602 	/*
7603 	 * ...all done! (IFNET_LOCKED asserted above.)
7604 	 */
7605 	ifp->if_flags |= IFF_RUNNING;
7606 
7607 out:
7608 	/* Save last flags for the callback */
7609 	sc->sc_if_flags = ifp->if_flags;
7610 	sc->sc_ec_capenable = ec->ec_capenable;
7611 	if (error)
7612 		log(LOG_ERR, "%s: interface not running\n",
7613 		    device_xname(sc->sc_dev));
7614 	return error;
7615 }
7616 
7617 /*
7618  * wm_stop:		[ifnet interface function]
7619  *
7620  *	Stop transmission on the interface.
7621  */
7622 static void
7623 wm_stop(struct ifnet *ifp, int disable)
7624 {
7625 	struct wm_softc *sc = ifp->if_softc;
7626 
7627 	ASSERT_SLEEPABLE();
7628 	KASSERT(IFNET_LOCKED(ifp));
7629 
7630 	mutex_enter(sc->sc_core_lock);
7631 	wm_stop_locked(ifp, disable ? true : false, true);
7632 	mutex_exit(sc->sc_core_lock);
7633 
7634 	/*
7635 	 * After wm_set_stopping_flags(), it is guaranteed that
7636 	 * wm_handle_queue_work() does not call workqueue_enqueue().
7637 	 * However, workqueue_wait() cannot call in wm_stop_locked()
7638 	 * because it can sleep...
7639 	 * so, call workqueue_wait() here.
7640 	 */
7641 	for (int i = 0; i < sc->sc_nqueues; i++)
7642 		workqueue_wait(sc->sc_queue_wq, &sc->sc_queue[i].wmq_cookie);
7643 	workqueue_wait(sc->sc_reset_wq, &sc->sc_reset_work);
7644 }
7645 
7646 static void
7647 wm_stop_locked(struct ifnet *ifp, bool disable, bool wait)
7648 {
7649 	struct wm_softc *sc = ifp->if_softc;
7650 	struct wm_txsoft *txs;
7651 	int i, qidx;
7652 
7653 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
7654 		device_xname(sc->sc_dev), __func__));
7655 	KASSERT(IFNET_LOCKED(ifp));
7656 	KASSERT(mutex_owned(sc->sc_core_lock));
7657 
7658 	wm_set_stopping_flags(sc);
7659 
7660 	if (sc->sc_flags & WM_F_HAS_MII) {
7661 		/* Down the MII. */
7662 		mii_down(&sc->sc_mii);
7663 	} else {
7664 #if 0
7665 		/* Should we clear PHY's status properly? */
7666 		wm_reset(sc);
7667 #endif
7668 	}
7669 
7670 	/* Stop the transmit and receive processes. */
7671 	CSR_WRITE(sc, WMREG_TCTL, 0);
7672 	CSR_WRITE(sc, WMREG_RCTL, 0);
7673 	sc->sc_rctl &= ~RCTL_EN;
7674 
7675 	/*
7676 	 * Clear the interrupt mask to ensure the device cannot assert its
7677 	 * interrupt line.
7678 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
7679 	 * service any currently pending or shared interrupt.
7680 	 */
7681 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
7682 	sc->sc_icr = 0;
7683 	if (wm_is_using_msix(sc)) {
7684 		if (sc->sc_type != WM_T_82574) {
7685 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
7686 			CSR_WRITE(sc, WMREG_EIAC, 0);
7687 		} else
7688 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
7689 	}
7690 
7691 	/*
7692 	 * Stop callouts after interrupts are disabled; if we have
7693 	 * to wait for them, we will be releasing the CORE_LOCK
7694 	 * briefly, which will unblock interrupts on the current CPU.
7695 	 */
7696 
7697 	/* Stop the one second clock. */
7698 	if (wait)
7699 		callout_halt(&sc->sc_tick_ch, sc->sc_core_lock);
7700 	else
7701 		callout_stop(&sc->sc_tick_ch);
7702 
7703 	/* Stop the 82547 Tx FIFO stall check timer. */
7704 	if (sc->sc_type == WM_T_82547) {
7705 		if (wait)
7706 			callout_halt(&sc->sc_txfifo_ch, sc->sc_core_lock);
7707 		else
7708 			callout_stop(&sc->sc_txfifo_ch);
7709 	}
7710 
7711 	/* Release any queued transmit buffers. */
7712 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
7713 		struct wm_queue *wmq = &sc->sc_queue[qidx];
7714 		struct wm_txqueue *txq = &wmq->wmq_txq;
7715 		struct mbuf *m;
7716 
7717 		mutex_enter(txq->txq_lock);
7718 		txq->txq_sending = false; /* Ensure watchdog disabled */
7719 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
7720 			txs = &txq->txq_soft[i];
7721 			if (txs->txs_mbuf != NULL) {
7722 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
7723 				m_freem(txs->txs_mbuf);
7724 				txs->txs_mbuf = NULL;
7725 			}
7726 		}
7727 		/* Drain txq_interq */
7728 		while ((m = pcq_get(txq->txq_interq)) != NULL)
7729 			m_freem(m);
7730 		mutex_exit(txq->txq_lock);
7731 	}
7732 
7733 	/* Mark the interface as down and cancel the watchdog timer. */
7734 	ifp->if_flags &= ~IFF_RUNNING;
7735 	sc->sc_if_flags = ifp->if_flags;
7736 
7737 	if (disable) {
7738 		for (i = 0; i < sc->sc_nqueues; i++) {
7739 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
7740 			mutex_enter(rxq->rxq_lock);
7741 			wm_rxdrain(rxq);
7742 			mutex_exit(rxq->rxq_lock);
7743 		}
7744 	}
7745 
7746 #if 0 /* notyet */
7747 	if (sc->sc_type >= WM_T_82544)
7748 		CSR_WRITE(sc, WMREG_WUC, 0);
7749 #endif
7750 }
7751 
7752 static void
7753 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
7754 {
7755 	struct mbuf *m;
7756 	int i;
7757 
7758 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
7759 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
7760 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
7761 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
7762 		    m->m_data, m->m_len, m->m_flags);
7763 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
7764 	    i, i == 1 ? "" : "s");
7765 }
7766 
7767 /*
7768  * wm_82547_txfifo_stall:
7769  *
7770  *	Callout used to wait for the 82547 Tx FIFO to drain,
7771  *	reset the FIFO pointers, and restart packet transmission.
7772  */
7773 static void
7774 wm_82547_txfifo_stall(void *arg)
7775 {
7776 	struct wm_softc *sc = arg;
7777 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
7778 
7779 	mutex_enter(txq->txq_lock);
7780 
7781 	if (txq->txq_stopping)
7782 		goto out;
7783 
7784 	if (txq->txq_fifo_stall) {
7785 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
7786 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
7787 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
7788 			/*
7789 			 * Packets have drained.  Stop transmitter, reset
7790 			 * FIFO pointers, restart transmitter, and kick
7791 			 * the packet queue.
7792 			 */
7793 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
7794 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
7795 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
7796 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
7797 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
7798 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
7799 			CSR_WRITE(sc, WMREG_TCTL, tctl);
7800 			CSR_WRITE_FLUSH(sc);
7801 
7802 			txq->txq_fifo_head = 0;
7803 			txq->txq_fifo_stall = 0;
7804 			wm_start_locked(&sc->sc_ethercom.ec_if);
7805 		} else {
7806 			/*
7807 			 * Still waiting for packets to drain; try again in
7808 			 * another tick.
7809 			 */
7810 			callout_schedule(&sc->sc_txfifo_ch, 1);
7811 		}
7812 	}
7813 
7814 out:
7815 	mutex_exit(txq->txq_lock);
7816 }
7817 
7818 /*
7819  * wm_82547_txfifo_bugchk:
7820  *
7821  *	Check for bug condition in the 82547 Tx FIFO.  We need to
7822  *	prevent enqueueing a packet that would wrap around the end
7823  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
7824  *
7825  *	We do this by checking the amount of space before the end
7826  *	of the Tx FIFO buffer. If the packet will not fit, we "stall"
7827  *	the Tx FIFO, wait for all remaining packets to drain, reset
7828  *	the internal FIFO pointers to the beginning, and restart
7829  *	transmission on the interface.
7830  */
7831 #define	WM_FIFO_HDR		0x10
7832 #define	WM_82547_PAD_LEN	0x3e0
7833 static int
7834 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
7835 {
7836 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
7837 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
7838 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
7839 
7840 	/* Just return if already stalled. */
7841 	if (txq->txq_fifo_stall)
7842 		return 1;
7843 
7844 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
7845 		/* Stall only occurs in half-duplex mode. */
7846 		goto send_packet;
7847 	}
7848 
7849 	if (len >= WM_82547_PAD_LEN + space) {
7850 		txq->txq_fifo_stall = 1;
7851 		callout_schedule(&sc->sc_txfifo_ch, 1);
7852 		return 1;
7853 	}
7854 
7855 send_packet:
7856 	txq->txq_fifo_head += len;
7857 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
7858 		txq->txq_fifo_head -= txq->txq_fifo_size;
7859 
7860 	return 0;
7861 }
7862 
7863 static int
7864 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
7865 {
7866 	int error;
7867 
7868 	/*
7869 	 * Allocate the control data structures, and create and load the
7870 	 * DMA map for it.
7871 	 *
7872 	 * NOTE: All Tx descriptors must be in the same 4G segment of
7873 	 * memory.  So must Rx descriptors.  We simplify by allocating
7874 	 * both sets within the same 4G segment.
7875 	 */
7876 	if (sc->sc_type < WM_T_82544)
7877 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
7878 	else
7879 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
7880 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
7881 		txq->txq_descsize = sizeof(nq_txdesc_t);
7882 	else
7883 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
7884 
7885 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
7886 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
7887 		    1, &txq->txq_desc_rseg, 0)) != 0) {
7888 		aprint_error_dev(sc->sc_dev,
7889 		    "unable to allocate TX control data, error = %d\n",
7890 		    error);
7891 		goto fail_0;
7892 	}
7893 
7894 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
7895 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
7896 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
7897 		aprint_error_dev(sc->sc_dev,
7898 		    "unable to map TX control data, error = %d\n", error);
7899 		goto fail_1;
7900 	}
7901 
7902 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
7903 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
7904 		aprint_error_dev(sc->sc_dev,
7905 		    "unable to create TX control data DMA map, error = %d\n",
7906 		    error);
7907 		goto fail_2;
7908 	}
7909 
7910 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
7911 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
7912 		aprint_error_dev(sc->sc_dev,
7913 		    "unable to load TX control data DMA map, error = %d\n",
7914 		    error);
7915 		goto fail_3;
7916 	}
7917 
7918 	return 0;
7919 
7920 fail_3:
7921 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
7922 fail_2:
7923 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
7924 	    WM_TXDESCS_SIZE(txq));
7925 fail_1:
7926 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
7927 fail_0:
7928 	return error;
7929 }
7930 
7931 static void
7932 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
7933 {
7934 
7935 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
7936 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
7937 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
7938 	    WM_TXDESCS_SIZE(txq));
7939 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
7940 }
7941 
7942 static int
7943 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
7944 {
7945 	int error;
7946 	size_t rxq_descs_size;
7947 
7948 	/*
7949 	 * Allocate the control data structures, and create and load the
7950 	 * DMA map for it.
7951 	 *
7952 	 * NOTE: All Tx descriptors must be in the same 4G segment of
7953 	 * memory.  So must Rx descriptors.  We simplify by allocating
7954 	 * both sets within the same 4G segment.
7955 	 */
7956 	rxq->rxq_ndesc = WM_NRXDESC;
7957 	if (sc->sc_type == WM_T_82574)
7958 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
7959 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
7960 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
7961 	else
7962 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
7963 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
7964 
7965 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
7966 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
7967 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
7968 		aprint_error_dev(sc->sc_dev,
7969 		    "unable to allocate RX control data, error = %d\n",
7970 		    error);
7971 		goto fail_0;
7972 	}
7973 
7974 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
7975 		    rxq->rxq_desc_rseg, rxq_descs_size,
7976 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
7977 		aprint_error_dev(sc->sc_dev,
7978 		    "unable to map RX control data, error = %d\n", error);
7979 		goto fail_1;
7980 	}
7981 
7982 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
7983 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
7984 		aprint_error_dev(sc->sc_dev,
7985 		    "unable to create RX control data DMA map, error = %d\n",
7986 		    error);
7987 		goto fail_2;
7988 	}
7989 
7990 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
7991 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
7992 		aprint_error_dev(sc->sc_dev,
7993 		    "unable to load RX control data DMA map, error = %d\n",
7994 		    error);
7995 		goto fail_3;
7996 	}
7997 
7998 	return 0;
7999 
8000  fail_3:
8001 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
8002  fail_2:
8003 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
8004 	    rxq_descs_size);
8005  fail_1:
8006 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
8007  fail_0:
8008 	return error;
8009 }
8010 
8011 static void
8012 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
8013 {
8014 
8015 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
8016 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
8017 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
8018 	    rxq->rxq_descsize * rxq->rxq_ndesc);
8019 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
8020 }
8021 
8022 
8023 static int
8024 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
8025 {
8026 	int i, error;
8027 
8028 	/* Create the transmit buffer DMA maps. */
8029 	WM_TXQUEUELEN(txq) =
8030 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
8031 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
8032 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
8033 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
8034 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
8035 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
8036 			aprint_error_dev(sc->sc_dev,
8037 			    "unable to create Tx DMA map %d, error = %d\n",
8038 			    i, error);
8039 			goto fail;
8040 		}
8041 	}
8042 
8043 	return 0;
8044 
8045 fail:
8046 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
8047 		if (txq->txq_soft[i].txs_dmamap != NULL)
8048 			bus_dmamap_destroy(sc->sc_dmat,
8049 			    txq->txq_soft[i].txs_dmamap);
8050 	}
8051 	return error;
8052 }
8053 
8054 static void
8055 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
8056 {
8057 	int i;
8058 
8059 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
8060 		if (txq->txq_soft[i].txs_dmamap != NULL)
8061 			bus_dmamap_destroy(sc->sc_dmat,
8062 			    txq->txq_soft[i].txs_dmamap);
8063 	}
8064 }
8065 
8066 static int
8067 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
8068 {
8069 	int i, error;
8070 
8071 	/* Create the receive buffer DMA maps. */
8072 	for (i = 0; i < rxq->rxq_ndesc; i++) {
8073 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
8074 			    MCLBYTES, 0, 0,
8075 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
8076 			aprint_error_dev(sc->sc_dev,
8077 			    "unable to create Rx DMA map %d error = %d\n",
8078 			    i, error);
8079 			goto fail;
8080 		}
8081 		rxq->rxq_soft[i].rxs_mbuf = NULL;
8082 	}
8083 
8084 	return 0;
8085 
8086  fail:
8087 	for (i = 0; i < rxq->rxq_ndesc; i++) {
8088 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
8089 			bus_dmamap_destroy(sc->sc_dmat,
8090 			    rxq->rxq_soft[i].rxs_dmamap);
8091 	}
8092 	return error;
8093 }
8094 
8095 static void
8096 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
8097 {
8098 	int i;
8099 
8100 	for (i = 0; i < rxq->rxq_ndesc; i++) {
8101 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
8102 			bus_dmamap_destroy(sc->sc_dmat,
8103 			    rxq->rxq_soft[i].rxs_dmamap);
8104 	}
8105 }
8106 
8107 /*
8108  * wm_alloc_quques:
8109  *	Allocate {tx,rx}descs and {tx,rx} buffers
8110  */
8111 static int
8112 wm_alloc_txrx_queues(struct wm_softc *sc)
8113 {
8114 	int i, error, tx_done, rx_done;
8115 
8116 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
8117 	    KM_SLEEP);
8118 	if (sc->sc_queue == NULL) {
8119 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
8120 		error = ENOMEM;
8121 		goto fail_0;
8122 	}
8123 
8124 	/* For transmission */
8125 	error = 0;
8126 	tx_done = 0;
8127 	for (i = 0; i < sc->sc_nqueues; i++) {
8128 #ifdef WM_EVENT_COUNTERS
8129 		int j;
8130 		const char *xname;
8131 #endif
8132 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
8133 		txq->txq_sc = sc;
8134 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
8135 
8136 		error = wm_alloc_tx_descs(sc, txq);
8137 		if (error)
8138 			break;
8139 		error = wm_alloc_tx_buffer(sc, txq);
8140 		if (error) {
8141 			wm_free_tx_descs(sc, txq);
8142 			break;
8143 		}
8144 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
8145 		if (txq->txq_interq == NULL) {
8146 			wm_free_tx_descs(sc, txq);
8147 			wm_free_tx_buffer(sc, txq);
8148 			error = ENOMEM;
8149 			break;
8150 		}
8151 
8152 #ifdef WM_EVENT_COUNTERS
8153 		xname = device_xname(sc->sc_dev);
8154 
8155 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
8156 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
8157 		WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
8158 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
8159 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
8160 		WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
8161 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
8162 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
8163 		WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
8164 		WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
8165 		WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
8166 
8167 		for (j = 0; j < WM_NTXSEGS; j++) {
8168 			snprintf(txq->txq_txseg_evcnt_names[j],
8169 			    sizeof(txq->txq_txseg_evcnt_names[j]),
8170 			    "txq%02dtxseg%d", i, j);
8171 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j],
8172 			    EVCNT_TYPE_MISC,
8173 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
8174 		}
8175 
8176 		WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
8177 		WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
8178 		WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
8179 		WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
8180 		/* Only for 82544 (and earlier?) */
8181 		if (sc->sc_type <= WM_T_82544)
8182 			WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
8183 		WM_Q_MISC_EVCNT_ATTACH(txq, skipcontext, txq, i, xname);
8184 #endif /* WM_EVENT_COUNTERS */
8185 
8186 		tx_done++;
8187 	}
8188 	if (error)
8189 		goto fail_1;
8190 
8191 	/* For receive */
8192 	error = 0;
8193 	rx_done = 0;
8194 	for (i = 0; i < sc->sc_nqueues; i++) {
8195 #ifdef WM_EVENT_COUNTERS
8196 		const char *xname;
8197 #endif
8198 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
8199 		rxq->rxq_sc = sc;
8200 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
8201 
8202 		error = wm_alloc_rx_descs(sc, rxq);
8203 		if (error)
8204 			break;
8205 
8206 		error = wm_alloc_rx_buffer(sc, rxq);
8207 		if (error) {
8208 			wm_free_rx_descs(sc, rxq);
8209 			break;
8210 		}
8211 
8212 #ifdef WM_EVENT_COUNTERS
8213 		xname = device_xname(sc->sc_dev);
8214 
8215 		WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
8216 		WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
8217 		WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
8218 		WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
8219 		if ((sc->sc_type >= WM_T_82575) && !WM_IS_ICHPCH(sc))
8220 			WM_Q_MISC_EVCNT_ATTACH(rxq, qdrop, rxq, i, xname);
8221 #endif /* WM_EVENT_COUNTERS */
8222 
8223 		rx_done++;
8224 	}
8225 	if (error)
8226 		goto fail_2;
8227 
8228 	return 0;
8229 
8230 fail_2:
8231 	for (i = 0; i < rx_done; i++) {
8232 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
8233 		wm_free_rx_buffer(sc, rxq);
8234 		wm_free_rx_descs(sc, rxq);
8235 		if (rxq->rxq_lock)
8236 			mutex_obj_free(rxq->rxq_lock);
8237 	}
8238 fail_1:
8239 	for (i = 0; i < tx_done; i++) {
8240 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
8241 		pcq_destroy(txq->txq_interq);
8242 		wm_free_tx_buffer(sc, txq);
8243 		wm_free_tx_descs(sc, txq);
8244 		if (txq->txq_lock)
8245 			mutex_obj_free(txq->txq_lock);
8246 	}
8247 
8248 	kmem_free(sc->sc_queue,
8249 	    sizeof(struct wm_queue) * sc->sc_nqueues);
8250 fail_0:
8251 	return error;
8252 }
8253 
8254 /*
8255  * wm_free_quques:
8256  *	Free {tx,rx}descs and {tx,rx} buffers
8257  */
8258 static void
8259 wm_free_txrx_queues(struct wm_softc *sc)
8260 {
8261 	int i;
8262 
8263 	for (i = 0; i < sc->sc_nqueues; i++) {
8264 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
8265 
8266 #ifdef WM_EVENT_COUNTERS
8267 		WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
8268 		WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
8269 		WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
8270 		WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
8271 		if ((sc->sc_type >= WM_T_82575) && !WM_IS_ICHPCH(sc))
8272 			WM_Q_EVCNT_DETACH(rxq, qdrop, rxq, i);
8273 #endif /* WM_EVENT_COUNTERS */
8274 
8275 		wm_free_rx_buffer(sc, rxq);
8276 		wm_free_rx_descs(sc, rxq);
8277 		if (rxq->rxq_lock)
8278 			mutex_obj_free(rxq->rxq_lock);
8279 	}
8280 
8281 	for (i = 0; i < sc->sc_nqueues; i++) {
8282 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
8283 		struct mbuf *m;
8284 #ifdef WM_EVENT_COUNTERS
8285 		int j;
8286 
8287 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
8288 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
8289 		WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
8290 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
8291 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
8292 		WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
8293 		WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
8294 		WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
8295 		WM_Q_EVCNT_DETACH(txq, tso, txq, i);
8296 		WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
8297 		WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
8298 
8299 		for (j = 0; j < WM_NTXSEGS; j++)
8300 			evcnt_detach(&txq->txq_ev_txseg[j]);
8301 
8302 		WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
8303 		WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
8304 		WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
8305 		WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
8306 		if (sc->sc_type <= WM_T_82544)
8307 			WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
8308 		WM_Q_EVCNT_DETACH(txq, skipcontext, txq, i);
8309 #endif /* WM_EVENT_COUNTERS */
8310 
8311 		/* Drain txq_interq */
8312 		while ((m = pcq_get(txq->txq_interq)) != NULL)
8313 			m_freem(m);
8314 		pcq_destroy(txq->txq_interq);
8315 
8316 		wm_free_tx_buffer(sc, txq);
8317 		wm_free_tx_descs(sc, txq);
8318 		if (txq->txq_lock)
8319 			mutex_obj_free(txq->txq_lock);
8320 	}
8321 
8322 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
8323 }
8324 
8325 static void
8326 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
8327 {
8328 
8329 	KASSERT(mutex_owned(txq->txq_lock));
8330 
8331 	/* Initialize the transmit descriptor ring. */
8332 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
8333 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
8334 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
8335 	txq->txq_free = WM_NTXDESC(txq);
8336 	txq->txq_next = 0;
8337 }
8338 
8339 static void
8340 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
8341     struct wm_txqueue *txq)
8342 {
8343 
8344 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
8345 		device_xname(sc->sc_dev), __func__));
8346 	KASSERT(mutex_owned(txq->txq_lock));
8347 
8348 	if (sc->sc_type < WM_T_82543) {
8349 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
8350 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
8351 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
8352 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
8353 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
8354 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
8355 	} else {
8356 		int qid = wmq->wmq_id;
8357 
8358 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
8359 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
8360 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
8361 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
8362 
8363 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
8364 			/*
8365 			 * Don't write TDT before TCTL.EN is set.
8366 			 * See the document.
8367 			 */
8368 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
8369 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
8370 			    | TXDCTL_WTHRESH(0));
8371 		else {
8372 			/* XXX should update with AIM? */
8373 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
8374 			if (sc->sc_type >= WM_T_82540) {
8375 				/* Should be the same */
8376 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
8377 			}
8378 
8379 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
8380 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
8381 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
8382 		}
8383 	}
8384 }
8385 
8386 static void
8387 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
8388 {
8389 	int i;
8390 
8391 	KASSERT(mutex_owned(txq->txq_lock));
8392 
8393 	/* Initialize the transmit job descriptors. */
8394 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
8395 		txq->txq_soft[i].txs_mbuf = NULL;
8396 	txq->txq_sfree = WM_TXQUEUELEN(txq);
8397 	txq->txq_snext = 0;
8398 	txq->txq_sdirty = 0;
8399 }
8400 
8401 static void
8402 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
8403     struct wm_txqueue *txq)
8404 {
8405 
8406 	KASSERT(mutex_owned(txq->txq_lock));
8407 
8408 	/*
8409 	 * Set up some register offsets that are different between
8410 	 * the i82542 and the i82543 and later chips.
8411 	 */
8412 	if (sc->sc_type < WM_T_82543)
8413 		txq->txq_tdt_reg = WMREG_OLD_TDT;
8414 	else
8415 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
8416 
8417 	wm_init_tx_descs(sc, txq);
8418 	wm_init_tx_regs(sc, wmq, txq);
8419 	wm_init_tx_buffer(sc, txq);
8420 
8421 	/* Clear other than WM_TXQ_LINKDOWN_DISCARD */
8422 	txq->txq_flags &= WM_TXQ_LINKDOWN_DISCARD;
8423 
8424 	txq->txq_sending = false;
8425 }
8426 
8427 static void
8428 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
8429     struct wm_rxqueue *rxq)
8430 {
8431 
8432 	KASSERT(mutex_owned(rxq->rxq_lock));
8433 
8434 	/*
8435 	 * Initialize the receive descriptor and receive job
8436 	 * descriptor rings.
8437 	 */
8438 	if (sc->sc_type < WM_T_82543) {
8439 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
8440 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
8441 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
8442 		    rxq->rxq_descsize * rxq->rxq_ndesc);
8443 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
8444 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
8445 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
8446 
8447 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
8448 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
8449 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
8450 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
8451 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
8452 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
8453 	} else {
8454 		int qid = wmq->wmq_id;
8455 
8456 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
8457 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
8458 		CSR_WRITE(sc, WMREG_RDLEN(qid),
8459 		    rxq->rxq_descsize * rxq->rxq_ndesc);
8460 
8461 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
8462 			uint32_t srrctl;
8463 
8464 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
8465 				panic("%s: MCLBYTES %d unsupported for 82575 "
8466 				    "or higher\n", __func__, MCLBYTES);
8467 
8468 			/*
8469 			 * Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF
8470 			 * only.
8471 			 */
8472 			srrctl = SRRCTL_DESCTYPE_ADV_ONEBUF
8473 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT);
8474 			/*
8475 			 * Drop frames if the RX descriptor ring has no room.
8476 			 * This is enabled only on multiqueue system to avoid
8477 			 * bad influence to other queues.
8478 			 */
8479 			if (sc->sc_nqueues > 1)
8480 				srrctl |= SRRCTL_DROP_EN;
8481 			CSR_WRITE(sc, WMREG_SRRCTL(qid), srrctl);
8482 
8483 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
8484 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
8485 			    | RXDCTL_WTHRESH(1));
8486 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
8487 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
8488 		} else {
8489 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
8490 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
8491 			/* XXX should update with AIM? */
8492 			CSR_WRITE(sc, WMREG_RDTR,
8493 			    (wmq->wmq_itr / 4) | RDTR_FPD);
8494 			/* MUST be same */
8495 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
8496 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
8497 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
8498 		}
8499 	}
8500 }
8501 
8502 static int
8503 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
8504 {
8505 	struct wm_rxsoft *rxs;
8506 	int error, i;
8507 
8508 	KASSERT(mutex_owned(rxq->rxq_lock));
8509 
8510 	for (i = 0; i < rxq->rxq_ndesc; i++) {
8511 		rxs = &rxq->rxq_soft[i];
8512 		if (rxs->rxs_mbuf == NULL) {
8513 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
8514 				log(LOG_ERR, "%s: unable to allocate or map "
8515 				    "rx buffer %d, error = %d\n",
8516 				    device_xname(sc->sc_dev), i, error);
8517 				/*
8518 				 * XXX Should attempt to run with fewer receive
8519 				 * XXX buffers instead of just failing.
8520 				 */
8521 				wm_rxdrain(rxq);
8522 				return ENOMEM;
8523 			}
8524 		} else {
8525 			/*
8526 			 * For 82575 and 82576, the RX descriptors must be
8527 			 * initialized after the setting of RCTL.EN in
8528 			 * wm_set_filter()
8529 			 */
8530 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
8531 				wm_init_rxdesc(rxq, i);
8532 		}
8533 	}
8534 	rxq->rxq_ptr = 0;
8535 	rxq->rxq_discard = 0;
8536 	WM_RXCHAIN_RESET(rxq);
8537 
8538 	return 0;
8539 }
8540 
8541 static int
8542 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
8543     struct wm_rxqueue *rxq)
8544 {
8545 
8546 	KASSERT(mutex_owned(rxq->rxq_lock));
8547 
8548 	/*
8549 	 * Set up some register offsets that are different between
8550 	 * the i82542 and the i82543 and later chips.
8551 	 */
8552 	if (sc->sc_type < WM_T_82543)
8553 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
8554 	else
8555 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
8556 
8557 	wm_init_rx_regs(sc, wmq, rxq);
8558 	return wm_init_rx_buffer(sc, rxq);
8559 }
8560 
8561 /*
8562  * wm_init_quques:
8563  *	Initialize {tx,rx}descs and {tx,rx} buffers
8564  */
8565 static int
8566 wm_init_txrx_queues(struct wm_softc *sc)
8567 {
8568 	int i, error = 0;
8569 
8570 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
8571 		device_xname(sc->sc_dev), __func__));
8572 
8573 	for (i = 0; i < sc->sc_nqueues; i++) {
8574 		struct wm_queue *wmq = &sc->sc_queue[i];
8575 		struct wm_txqueue *txq = &wmq->wmq_txq;
8576 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
8577 
8578 		/*
8579 		 * TODO
8580 		 * Currently, use constant variable instead of AIM.
8581 		 * Furthermore, the interrupt interval of multiqueue which use
8582 		 * polling mode is less than default value.
8583 		 * More tuning and AIM are required.
8584 		 */
8585 		if (wm_is_using_multiqueue(sc))
8586 			wmq->wmq_itr = 50;
8587 		else
8588 			wmq->wmq_itr = sc->sc_itr_init;
8589 		wmq->wmq_set_itr = true;
8590 
8591 		mutex_enter(txq->txq_lock);
8592 		wm_init_tx_queue(sc, wmq, txq);
8593 		mutex_exit(txq->txq_lock);
8594 
8595 		mutex_enter(rxq->rxq_lock);
8596 		error = wm_init_rx_queue(sc, wmq, rxq);
8597 		mutex_exit(rxq->rxq_lock);
8598 		if (error)
8599 			break;
8600 	}
8601 
8602 	return error;
8603 }
8604 
8605 /*
8606  * wm_tx_offload:
8607  *
8608  *	Set up TCP/IP checksumming parameters for the
8609  *	specified packet.
8610  */
8611 static void
8612 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
8613     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
8614 {
8615 	struct mbuf *m0 = txs->txs_mbuf;
8616 	struct livengood_tcpip_ctxdesc *t;
8617 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
8618 	uint32_t ipcse;
8619 	struct ether_header *eh;
8620 	int offset, iphl;
8621 	uint8_t fields;
8622 
8623 	/*
8624 	 * XXX It would be nice if the mbuf pkthdr had offset
8625 	 * fields for the protocol headers.
8626 	 */
8627 
8628 	eh = mtod(m0, struct ether_header *);
8629 	switch (htons(eh->ether_type)) {
8630 	case ETHERTYPE_IP:
8631 	case ETHERTYPE_IPV6:
8632 		offset = ETHER_HDR_LEN;
8633 		break;
8634 
8635 	case ETHERTYPE_VLAN:
8636 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
8637 		break;
8638 
8639 	default:
8640 		/* Don't support this protocol or encapsulation. */
8641 		txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
8642 		txq->txq_last_hw_ipcs = 0;
8643 		txq->txq_last_hw_tucs = 0;
8644 		*fieldsp = 0;
8645 		*cmdp = 0;
8646 		return;
8647 	}
8648 
8649 	if ((m0->m_pkthdr.csum_flags &
8650 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
8651 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
8652 	} else
8653 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
8654 
8655 	ipcse = offset + iphl - 1;
8656 
8657 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
8658 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
8659 	seg = 0;
8660 	fields = 0;
8661 
8662 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
8663 		int hlen = offset + iphl;
8664 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
8665 
8666 		if (__predict_false(m0->m_len <
8667 				    (hlen + sizeof(struct tcphdr)))) {
8668 			/*
8669 			 * TCP/IP headers are not in the first mbuf; we need
8670 			 * to do this the slow and painful way. Let's just
8671 			 * hope this doesn't happen very often.
8672 			 */
8673 			struct tcphdr th;
8674 
8675 			WM_Q_EVCNT_INCR(txq, tsopain);
8676 
8677 			m_copydata(m0, hlen, sizeof(th), &th);
8678 			if (v4) {
8679 				struct ip ip;
8680 
8681 				m_copydata(m0, offset, sizeof(ip), &ip);
8682 				ip.ip_len = 0;
8683 				m_copyback(m0,
8684 				    offset + offsetof(struct ip, ip_len),
8685 				    sizeof(ip.ip_len), &ip.ip_len);
8686 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
8687 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
8688 			} else {
8689 				struct ip6_hdr ip6;
8690 
8691 				m_copydata(m0, offset, sizeof(ip6), &ip6);
8692 				ip6.ip6_plen = 0;
8693 				m_copyback(m0,
8694 				    offset + offsetof(struct ip6_hdr, ip6_plen),
8695 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
8696 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
8697 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
8698 			}
8699 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
8700 			    sizeof(th.th_sum), &th.th_sum);
8701 
8702 			hlen += th.th_off << 2;
8703 		} else {
8704 			/*
8705 			 * TCP/IP headers are in the first mbuf; we can do
8706 			 * this the easy way.
8707 			 */
8708 			struct tcphdr *th;
8709 
8710 			if (v4) {
8711 				struct ip *ip =
8712 				    (void *)(mtod(m0, char *) + offset);
8713 				th = (void *)(mtod(m0, char *) + hlen);
8714 
8715 				ip->ip_len = 0;
8716 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
8717 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
8718 			} else {
8719 				struct ip6_hdr *ip6 =
8720 				    (void *)(mtod(m0, char *) + offset);
8721 				th = (void *)(mtod(m0, char *) + hlen);
8722 
8723 				ip6->ip6_plen = 0;
8724 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
8725 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
8726 			}
8727 			hlen += th->th_off << 2;
8728 		}
8729 
8730 		if (v4) {
8731 			WM_Q_EVCNT_INCR(txq, tso);
8732 			cmdlen |= WTX_TCPIP_CMD_IP;
8733 		} else {
8734 			WM_Q_EVCNT_INCR(txq, tso6);
8735 			ipcse = 0;
8736 		}
8737 		cmd |= WTX_TCPIP_CMD_TSE;
8738 		cmdlen |= WTX_TCPIP_CMD_TSE |
8739 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
8740 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
8741 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
8742 	}
8743 
8744 	/*
8745 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
8746 	 * offload feature, if we load the context descriptor, we
8747 	 * MUST provide valid values for IPCSS and TUCSS fields.
8748 	 */
8749 
8750 	ipcs = WTX_TCPIP_IPCSS(offset) |
8751 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
8752 	    WTX_TCPIP_IPCSE(ipcse);
8753 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
8754 		WM_Q_EVCNT_INCR(txq, ipsum);
8755 		fields |= WTX_IXSM;
8756 	}
8757 
8758 	offset += iphl;
8759 
8760 	if (m0->m_pkthdr.csum_flags &
8761 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
8762 		WM_Q_EVCNT_INCR(txq, tusum);
8763 		fields |= WTX_TXSM;
8764 		tucs = WTX_TCPIP_TUCSS(offset) |
8765 		    WTX_TCPIP_TUCSO(offset +
8766 			M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
8767 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
8768 	} else if ((m0->m_pkthdr.csum_flags &
8769 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
8770 		WM_Q_EVCNT_INCR(txq, tusum6);
8771 		fields |= WTX_TXSM;
8772 		tucs = WTX_TCPIP_TUCSS(offset) |
8773 		    WTX_TCPIP_TUCSO(offset +
8774 			M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
8775 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
8776 	} else {
8777 		/* Just initialize it to a valid TCP context. */
8778 		tucs = WTX_TCPIP_TUCSS(offset) |
8779 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
8780 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
8781 	}
8782 
8783 	*cmdp = cmd;
8784 	*fieldsp = fields;
8785 
8786 	/*
8787 	 * We don't have to write context descriptor for every packet
8788 	 * except for 82574. For 82574, we must write context descriptor
8789 	 * for every packet when we use two descriptor queues.
8790 	 *
8791 	 * The 82574L can only remember the *last* context used
8792 	 * regardless of queue that it was use for.  We cannot reuse
8793 	 * contexts on this hardware platform and must generate a new
8794 	 * context every time.  82574L hardware spec, section 7.2.6,
8795 	 * second note.
8796 	 */
8797 	if (sc->sc_nqueues < 2) {
8798 		/*
8799 		 * Setting up new checksum offload context for every
8800 		 * frames takes a lot of processing time for hardware.
8801 		 * This also reduces performance a lot for small sized
8802 		 * frames so avoid it if driver can use previously
8803 		 * configured checksum offload context.
8804 		 * For TSO, in theory we can use the same TSO context only if
8805 		 * frame is the same type(IP/TCP) and the same MSS. However
8806 		 * checking whether a frame has the same IP/TCP structure is a
8807 		 * hard thing so just ignore that and always restablish a
8808 		 * new TSO context.
8809 		 */
8810 		if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6))
8811 		    == 0) {
8812 			if (txq->txq_last_hw_cmd == cmd &&
8813 			    txq->txq_last_hw_fields == fields &&
8814 			    txq->txq_last_hw_ipcs == (ipcs & 0xffff) &&
8815 			    txq->txq_last_hw_tucs == (tucs & 0xffff)) {
8816 				WM_Q_EVCNT_INCR(txq, skipcontext);
8817 				return;
8818 			}
8819 		}
8820 
8821 		txq->txq_last_hw_cmd = cmd;
8822 		txq->txq_last_hw_fields = fields;
8823 		txq->txq_last_hw_ipcs = (ipcs & 0xffff);
8824 		txq->txq_last_hw_tucs = (tucs & 0xffff);
8825 	}
8826 
8827 	/* Fill in the context descriptor. */
8828 	t = (struct livengood_tcpip_ctxdesc *)
8829 	    &txq->txq_descs[txq->txq_next];
8830 	t->tcpip_ipcs = htole32(ipcs);
8831 	t->tcpip_tucs = htole32(tucs);
8832 	t->tcpip_cmdlen = htole32(cmdlen);
8833 	t->tcpip_seg = htole32(seg);
8834 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
8835 
8836 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
8837 	txs->txs_ndesc++;
8838 }
8839 
8840 static inline int
8841 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
8842 {
8843 	struct wm_softc *sc = ifp->if_softc;
8844 	u_int cpuid = cpu_index(curcpu());
8845 
8846 	/*
8847 	 * Currently, simple distribute strategy.
8848 	 * TODO:
8849 	 * distribute by flowid(RSS has value).
8850 	 */
8851 	return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues;
8852 }
8853 
8854 static inline bool
8855 wm_linkdown_discard(struct wm_txqueue *txq)
8856 {
8857 
8858 	if ((txq->txq_flags & WM_TXQ_LINKDOWN_DISCARD) != 0)
8859 		return true;
8860 
8861 	return false;
8862 }
8863 
8864 /*
8865  * wm_start:		[ifnet interface function]
8866  *
8867  *	Start packet transmission on the interface.
8868  */
8869 static void
8870 wm_start(struct ifnet *ifp)
8871 {
8872 	struct wm_softc *sc = ifp->if_softc;
8873 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
8874 
8875 	KASSERT(if_is_mpsafe(ifp));
8876 	/*
8877 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
8878 	 */
8879 
8880 	mutex_enter(txq->txq_lock);
8881 	if (!txq->txq_stopping)
8882 		wm_start_locked(ifp);
8883 	mutex_exit(txq->txq_lock);
8884 }
8885 
8886 static void
8887 wm_start_locked(struct ifnet *ifp)
8888 {
8889 	struct wm_softc *sc = ifp->if_softc;
8890 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
8891 
8892 	wm_send_common_locked(ifp, txq, false);
8893 }
8894 
8895 static int
8896 wm_transmit(struct ifnet *ifp, struct mbuf *m)
8897 {
8898 	int qid;
8899 	struct wm_softc *sc = ifp->if_softc;
8900 	struct wm_txqueue *txq;
8901 
8902 	qid = wm_select_txqueue(ifp, m);
8903 	txq = &sc->sc_queue[qid].wmq_txq;
8904 
8905 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
8906 		m_freem(m);
8907 		WM_Q_EVCNT_INCR(txq, pcqdrop);
8908 		return ENOBUFS;
8909 	}
8910 
8911 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
8912 	if_statadd_ref(ifp, nsr, if_obytes, m->m_pkthdr.len);
8913 	if (m->m_flags & M_MCAST)
8914 		if_statinc_ref(ifp, nsr, if_omcasts);
8915 	IF_STAT_PUTREF(ifp);
8916 
8917 	if (mutex_tryenter(txq->txq_lock)) {
8918 		if (!txq->txq_stopping)
8919 			wm_transmit_locked(ifp, txq);
8920 		mutex_exit(txq->txq_lock);
8921 	}
8922 
8923 	return 0;
8924 }
8925 
8926 static void
8927 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
8928 {
8929 
8930 	wm_send_common_locked(ifp, txq, true);
8931 }
8932 
8933 static void
8934 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
8935     bool is_transmit)
8936 {
8937 	struct wm_softc *sc = ifp->if_softc;
8938 	struct mbuf *m0;
8939 	struct wm_txsoft *txs;
8940 	bus_dmamap_t dmamap;
8941 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
8942 	bus_addr_t curaddr;
8943 	bus_size_t seglen, curlen;
8944 	uint32_t cksumcmd;
8945 	uint8_t cksumfields;
8946 	bool remap = true;
8947 
8948 	KASSERT(mutex_owned(txq->txq_lock));
8949 	KASSERT(!txq->txq_stopping);
8950 
8951 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
8952 		return;
8953 
8954 	if (__predict_false(wm_linkdown_discard(txq))) {
8955 		do {
8956 			if (is_transmit)
8957 				m0 = pcq_get(txq->txq_interq);
8958 			else
8959 				IFQ_DEQUEUE(&ifp->if_snd, m0);
8960 			/*
8961 			 * increment successed packet counter as in the case
8962 			 * which the packet is discarded by link down PHY.
8963 			 */
8964 			if (m0 != NULL) {
8965 				if_statinc(ifp, if_opackets);
8966 				m_freem(m0);
8967 			}
8968 		} while (m0 != NULL);
8969 		return;
8970 	}
8971 
8972 	/* Remember the previous number of free descriptors. */
8973 	ofree = txq->txq_free;
8974 
8975 	/*
8976 	 * Loop through the send queue, setting up transmit descriptors
8977 	 * until we drain the queue, or use up all available transmit
8978 	 * descriptors.
8979 	 */
8980 	for (;;) {
8981 		m0 = NULL;
8982 
8983 		/* Get a work queue entry. */
8984 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
8985 			wm_txeof(txq, UINT_MAX);
8986 			if (txq->txq_sfree == 0) {
8987 				DPRINTF(sc, WM_DEBUG_TX,
8988 				    ("%s: TX: no free job descriptors\n",
8989 					device_xname(sc->sc_dev)));
8990 				WM_Q_EVCNT_INCR(txq, txsstall);
8991 				break;
8992 			}
8993 		}
8994 
8995 		/* Grab a packet off the queue. */
8996 		if (is_transmit)
8997 			m0 = pcq_get(txq->txq_interq);
8998 		else
8999 			IFQ_DEQUEUE(&ifp->if_snd, m0);
9000 		if (m0 == NULL)
9001 			break;
9002 
9003 		DPRINTF(sc, WM_DEBUG_TX,
9004 		    ("%s: TX: have packet to transmit: %p\n",
9005 			device_xname(sc->sc_dev), m0));
9006 
9007 		txs = &txq->txq_soft[txq->txq_snext];
9008 		dmamap = txs->txs_dmamap;
9009 
9010 		use_tso = (m0->m_pkthdr.csum_flags &
9011 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
9012 
9013 		/*
9014 		 * So says the Linux driver:
9015 		 * The controller does a simple calculation to make sure
9016 		 * there is enough room in the FIFO before initiating the
9017 		 * DMA for each buffer. The calc is:
9018 		 *	4 = ceil(buffer len / MSS)
9019 		 * To make sure we don't overrun the FIFO, adjust the max
9020 		 * buffer len if the MSS drops.
9021 		 */
9022 		dmamap->dm_maxsegsz =
9023 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
9024 		    ? m0->m_pkthdr.segsz << 2
9025 		    : WTX_MAX_LEN;
9026 
9027 		/*
9028 		 * Load the DMA map.  If this fails, the packet either
9029 		 * didn't fit in the allotted number of segments, or we
9030 		 * were short on resources.  For the too-many-segments
9031 		 * case, we simply report an error and drop the packet,
9032 		 * since we can't sanely copy a jumbo packet to a single
9033 		 * buffer.
9034 		 */
9035 retry:
9036 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
9037 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
9038 		if (__predict_false(error)) {
9039 			if (error == EFBIG) {
9040 				if (remap == true) {
9041 					struct mbuf *m;
9042 
9043 					remap = false;
9044 					m = m_defrag(m0, M_NOWAIT);
9045 					if (m != NULL) {
9046 						WM_Q_EVCNT_INCR(txq, defrag);
9047 						m0 = m;
9048 						goto retry;
9049 					}
9050 				}
9051 				WM_Q_EVCNT_INCR(txq, toomanyseg);
9052 				log(LOG_ERR, "%s: Tx packet consumes too many "
9053 				    "DMA segments, dropping...\n",
9054 				    device_xname(sc->sc_dev));
9055 				wm_dump_mbuf_chain(sc, m0);
9056 				m_freem(m0);
9057 				continue;
9058 			}
9059 			/* Short on resources, just stop for now. */
9060 			DPRINTF(sc, WM_DEBUG_TX,
9061 			    ("%s: TX: dmamap load failed: %d\n",
9062 				device_xname(sc->sc_dev), error));
9063 			break;
9064 		}
9065 
9066 		segs_needed = dmamap->dm_nsegs;
9067 		if (use_tso) {
9068 			/* For sentinel descriptor; see below. */
9069 			segs_needed++;
9070 		}
9071 
9072 		/*
9073 		 * Ensure we have enough descriptors free to describe
9074 		 * the packet. Note, we always reserve one descriptor
9075 		 * at the end of the ring due to the semantics of the
9076 		 * TDT register, plus one more in the event we need
9077 		 * to load offload context.
9078 		 */
9079 		if (segs_needed > txq->txq_free - 2) {
9080 			/*
9081 			 * Not enough free descriptors to transmit this
9082 			 * packet.  We haven't committed anything yet,
9083 			 * so just unload the DMA map, put the packet
9084 			 * pack on the queue, and punt. Notify the upper
9085 			 * layer that there are no more slots left.
9086 			 */
9087 			DPRINTF(sc, WM_DEBUG_TX,
9088 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
9089 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
9090 				segs_needed, txq->txq_free - 1));
9091 			txq->txq_flags |= WM_TXQ_NO_SPACE;
9092 			bus_dmamap_unload(sc->sc_dmat, dmamap);
9093 			WM_Q_EVCNT_INCR(txq, txdstall);
9094 			break;
9095 		}
9096 
9097 		/*
9098 		 * Check for 82547 Tx FIFO bug. We need to do this
9099 		 * once we know we can transmit the packet, since we
9100 		 * do some internal FIFO space accounting here.
9101 		 */
9102 		if (sc->sc_type == WM_T_82547 &&
9103 		    wm_82547_txfifo_bugchk(sc, m0)) {
9104 			DPRINTF(sc, WM_DEBUG_TX,
9105 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
9106 				device_xname(sc->sc_dev)));
9107 			txq->txq_flags |= WM_TXQ_NO_SPACE;
9108 			bus_dmamap_unload(sc->sc_dmat, dmamap);
9109 			WM_Q_EVCNT_INCR(txq, fifo_stall);
9110 			break;
9111 		}
9112 
9113 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
9114 
9115 		DPRINTF(sc, WM_DEBUG_TX,
9116 		    ("%s: TX: packet has %d (%d) DMA segments\n",
9117 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
9118 
9119 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
9120 
9121 		/*
9122 		 * Store a pointer to the packet so that we can free it
9123 		 * later.
9124 		 *
9125 		 * Initially, we consider the number of descriptors the
9126 		 * packet uses the number of DMA segments.  This may be
9127 		 * incremented by 1 if we do checksum offload (a descriptor
9128 		 * is used to set the checksum context).
9129 		 */
9130 		txs->txs_mbuf = m0;
9131 		txs->txs_firstdesc = txq->txq_next;
9132 		txs->txs_ndesc = segs_needed;
9133 
9134 		/* Set up offload parameters for this packet. */
9135 		if (m0->m_pkthdr.csum_flags &
9136 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
9137 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
9138 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
9139 			wm_tx_offload(sc, txq, txs, &cksumcmd, &cksumfields);
9140 		} else {
9141 			txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
9142 			txq->txq_last_hw_ipcs = txq->txq_last_hw_tucs = 0;
9143 			cksumcmd = 0;
9144 			cksumfields = 0;
9145 		}
9146 
9147 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
9148 
9149 		/* Sync the DMA map. */
9150 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
9151 		    BUS_DMASYNC_PREWRITE);
9152 
9153 		/* Initialize the transmit descriptor. */
9154 		for (nexttx = txq->txq_next, seg = 0;
9155 		     seg < dmamap->dm_nsegs; seg++) {
9156 			for (seglen = dmamap->dm_segs[seg].ds_len,
9157 			     curaddr = dmamap->dm_segs[seg].ds_addr;
9158 			     seglen != 0;
9159 			     curaddr += curlen, seglen -= curlen,
9160 			     nexttx = WM_NEXTTX(txq, nexttx)) {
9161 				curlen = seglen;
9162 
9163 				/*
9164 				 * So says the Linux driver:
9165 				 * Work around for premature descriptor
9166 				 * write-backs in TSO mode.  Append a
9167 				 * 4-byte sentinel descriptor.
9168 				 */
9169 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
9170 				    curlen > 8)
9171 					curlen -= 4;
9172 
9173 				wm_set_dma_addr(
9174 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
9175 				txq->txq_descs[nexttx].wtx_cmdlen
9176 				    = htole32(cksumcmd | curlen);
9177 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
9178 				    = 0;
9179 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
9180 				    = cksumfields;
9181 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
9182 				lasttx = nexttx;
9183 
9184 				DPRINTF(sc, WM_DEBUG_TX,
9185 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
9186 					"len %#04zx\n",
9187 					device_xname(sc->sc_dev), nexttx,
9188 					(uint64_t)curaddr, curlen));
9189 			}
9190 		}
9191 
9192 		KASSERT(lasttx != -1);
9193 
9194 		/*
9195 		 * Set up the command byte on the last descriptor of
9196 		 * the packet. If we're in the interrupt delay window,
9197 		 * delay the interrupt.
9198 		 */
9199 		txq->txq_descs[lasttx].wtx_cmdlen |=
9200 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
9201 
9202 		/*
9203 		 * If VLANs are enabled and the packet has a VLAN tag, set
9204 		 * up the descriptor to encapsulate the packet for us.
9205 		 *
9206 		 * This is only valid on the last descriptor of the packet.
9207 		 */
9208 		if (vlan_has_tag(m0)) {
9209 			txq->txq_descs[lasttx].wtx_cmdlen |=
9210 			    htole32(WTX_CMD_VLE);
9211 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
9212 			    = htole16(vlan_get_tag(m0));
9213 		}
9214 
9215 		txs->txs_lastdesc = lasttx;
9216 
9217 		DPRINTF(sc, WM_DEBUG_TX,
9218 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
9219 			device_xname(sc->sc_dev),
9220 			lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
9221 
9222 		/* Sync the descriptors we're using. */
9223 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
9224 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
9225 
9226 		/* Give the packet to the chip. */
9227 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
9228 
9229 		DPRINTF(sc, WM_DEBUG_TX,
9230 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
9231 
9232 		DPRINTF(sc, WM_DEBUG_TX,
9233 		    ("%s: TX: finished transmitting packet, job %d\n",
9234 			device_xname(sc->sc_dev), txq->txq_snext));
9235 
9236 		/* Advance the tx pointer. */
9237 		txq->txq_free -= txs->txs_ndesc;
9238 		txq->txq_next = nexttx;
9239 
9240 		txq->txq_sfree--;
9241 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
9242 
9243 		/* Pass the packet to any BPF listeners. */
9244 		bpf_mtap(ifp, m0, BPF_D_OUT);
9245 	}
9246 
9247 	if (m0 != NULL) {
9248 		txq->txq_flags |= WM_TXQ_NO_SPACE;
9249 		WM_Q_EVCNT_INCR(txq, descdrop);
9250 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
9251 			__func__));
9252 		m_freem(m0);
9253 	}
9254 
9255 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
9256 		/* No more slots; notify upper layer. */
9257 		txq->txq_flags |= WM_TXQ_NO_SPACE;
9258 	}
9259 
9260 	if (txq->txq_free != ofree) {
9261 		/* Set a watchdog timer in case the chip flakes out. */
9262 		txq->txq_lastsent = time_uptime;
9263 		txq->txq_sending = true;
9264 	}
9265 }
9266 
9267 /*
9268  * wm_nq_tx_offload:
9269  *
9270  *	Set up TCP/IP checksumming parameters for the
9271  *	specified packet, for NEWQUEUE devices
9272  */
9273 static void
9274 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
9275     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
9276 {
9277 	struct mbuf *m0 = txs->txs_mbuf;
9278 	uint32_t vl_len, mssidx, cmdc;
9279 	struct ether_header *eh;
9280 	int offset, iphl;
9281 
9282 	/*
9283 	 * XXX It would be nice if the mbuf pkthdr had offset
9284 	 * fields for the protocol headers.
9285 	 */
9286 	*cmdlenp = 0;
9287 	*fieldsp = 0;
9288 
9289 	eh = mtod(m0, struct ether_header *);
9290 	switch (htons(eh->ether_type)) {
9291 	case ETHERTYPE_IP:
9292 	case ETHERTYPE_IPV6:
9293 		offset = ETHER_HDR_LEN;
9294 		break;
9295 
9296 	case ETHERTYPE_VLAN:
9297 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
9298 		break;
9299 
9300 	default:
9301 		/* Don't support this protocol or encapsulation. */
9302 		*do_csum = false;
9303 		return;
9304 	}
9305 	*do_csum = true;
9306 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
9307 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
9308 
9309 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
9310 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
9311 
9312 	if ((m0->m_pkthdr.csum_flags &
9313 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
9314 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
9315 	} else {
9316 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
9317 	}
9318 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
9319 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
9320 
9321 	if (vlan_has_tag(m0)) {
9322 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
9323 		    << NQTXC_VLLEN_VLAN_SHIFT);
9324 		*cmdlenp |= NQTX_CMD_VLE;
9325 	}
9326 
9327 	mssidx = 0;
9328 
9329 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
9330 		int hlen = offset + iphl;
9331 		int tcp_hlen;
9332 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
9333 
9334 		if (__predict_false(m0->m_len <
9335 				    (hlen + sizeof(struct tcphdr)))) {
9336 			/*
9337 			 * TCP/IP headers are not in the first mbuf; we need
9338 			 * to do this the slow and painful way. Let's just
9339 			 * hope this doesn't happen very often.
9340 			 */
9341 			struct tcphdr th;
9342 
9343 			WM_Q_EVCNT_INCR(txq, tsopain);
9344 
9345 			m_copydata(m0, hlen, sizeof(th), &th);
9346 			if (v4) {
9347 				struct ip ip;
9348 
9349 				m_copydata(m0, offset, sizeof(ip), &ip);
9350 				ip.ip_len = 0;
9351 				m_copyback(m0,
9352 				    offset + offsetof(struct ip, ip_len),
9353 				    sizeof(ip.ip_len), &ip.ip_len);
9354 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
9355 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
9356 			} else {
9357 				struct ip6_hdr ip6;
9358 
9359 				m_copydata(m0, offset, sizeof(ip6), &ip6);
9360 				ip6.ip6_plen = 0;
9361 				m_copyback(m0,
9362 				    offset + offsetof(struct ip6_hdr, ip6_plen),
9363 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
9364 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
9365 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
9366 			}
9367 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
9368 			    sizeof(th.th_sum), &th.th_sum);
9369 
9370 			tcp_hlen = th.th_off << 2;
9371 		} else {
9372 			/*
9373 			 * TCP/IP headers are in the first mbuf; we can do
9374 			 * this the easy way.
9375 			 */
9376 			struct tcphdr *th;
9377 
9378 			if (v4) {
9379 				struct ip *ip =
9380 				    (void *)(mtod(m0, char *) + offset);
9381 				th = (void *)(mtod(m0, char *) + hlen);
9382 
9383 				ip->ip_len = 0;
9384 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
9385 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
9386 			} else {
9387 				struct ip6_hdr *ip6 =
9388 				    (void *)(mtod(m0, char *) + offset);
9389 				th = (void *)(mtod(m0, char *) + hlen);
9390 
9391 				ip6->ip6_plen = 0;
9392 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
9393 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
9394 			}
9395 			tcp_hlen = th->th_off << 2;
9396 		}
9397 		hlen += tcp_hlen;
9398 		*cmdlenp |= NQTX_CMD_TSE;
9399 
9400 		if (v4) {
9401 			WM_Q_EVCNT_INCR(txq, tso);
9402 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
9403 		} else {
9404 			WM_Q_EVCNT_INCR(txq, tso6);
9405 			*fieldsp |= NQTXD_FIELDS_TUXSM;
9406 		}
9407 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
9408 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
9409 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
9410 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
9411 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
9412 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
9413 	} else {
9414 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
9415 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
9416 	}
9417 
9418 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
9419 		*fieldsp |= NQTXD_FIELDS_IXSM;
9420 		cmdc |= NQTXC_CMD_IP4;
9421 	}
9422 
9423 	if (m0->m_pkthdr.csum_flags &
9424 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
9425 		WM_Q_EVCNT_INCR(txq, tusum);
9426 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
9427 			cmdc |= NQTXC_CMD_TCP;
9428 		else
9429 			cmdc |= NQTXC_CMD_UDP;
9430 
9431 		cmdc |= NQTXC_CMD_IP4;
9432 		*fieldsp |= NQTXD_FIELDS_TUXSM;
9433 	}
9434 	if (m0->m_pkthdr.csum_flags &
9435 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
9436 		WM_Q_EVCNT_INCR(txq, tusum6);
9437 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
9438 			cmdc |= NQTXC_CMD_TCP;
9439 		else
9440 			cmdc |= NQTXC_CMD_UDP;
9441 
9442 		cmdc |= NQTXC_CMD_IP6;
9443 		*fieldsp |= NQTXD_FIELDS_TUXSM;
9444 	}
9445 
9446 	/*
9447 	 * We don't have to write context descriptor for every packet to
9448 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
9449 	 * I210 and I211. It is enough to write once per a Tx queue for these
9450 	 * controllers.
9451 	 * It would be overhead to write context descriptor for every packet,
9452 	 * however it does not cause problems.
9453 	 */
9454 	/* Fill in the context descriptor. */
9455 	txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_vl_len =
9456 	    htole32(vl_len);
9457 	txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_sn = 0;
9458 	txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_cmd =
9459 	    htole32(cmdc);
9460 	txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_mssidx =
9461 	    htole32(mssidx);
9462 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
9463 	DPRINTF(sc, WM_DEBUG_TX,
9464 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
9465 		txq->txq_next, 0, vl_len));
9466 	DPRINTF(sc, WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
9467 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
9468 	txs->txs_ndesc++;
9469 }
9470 
9471 /*
9472  * wm_nq_start:		[ifnet interface function]
9473  *
9474  *	Start packet transmission on the interface for NEWQUEUE devices
9475  */
9476 static void
9477 wm_nq_start(struct ifnet *ifp)
9478 {
9479 	struct wm_softc *sc = ifp->if_softc;
9480 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
9481 
9482 	KASSERT(if_is_mpsafe(ifp));
9483 	/*
9484 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
9485 	 */
9486 
9487 	mutex_enter(txq->txq_lock);
9488 	if (!txq->txq_stopping)
9489 		wm_nq_start_locked(ifp);
9490 	mutex_exit(txq->txq_lock);
9491 }
9492 
9493 static void
9494 wm_nq_start_locked(struct ifnet *ifp)
9495 {
9496 	struct wm_softc *sc = ifp->if_softc;
9497 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
9498 
9499 	wm_nq_send_common_locked(ifp, txq, false);
9500 }
9501 
9502 static int
9503 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
9504 {
9505 	int qid;
9506 	struct wm_softc *sc = ifp->if_softc;
9507 	struct wm_txqueue *txq;
9508 
9509 	qid = wm_select_txqueue(ifp, m);
9510 	txq = &sc->sc_queue[qid].wmq_txq;
9511 
9512 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
9513 		m_freem(m);
9514 		WM_Q_EVCNT_INCR(txq, pcqdrop);
9515 		return ENOBUFS;
9516 	}
9517 
9518 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
9519 	if_statadd_ref(ifp, nsr, if_obytes, m->m_pkthdr.len);
9520 	if (m->m_flags & M_MCAST)
9521 		if_statinc_ref(ifp, nsr, if_omcasts);
9522 	IF_STAT_PUTREF(ifp);
9523 
9524 	/*
9525 	 * The situations which this mutex_tryenter() fails at running time
9526 	 * are below two patterns.
9527 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
9528 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
9529 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
9530 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
9531 	 * In the case of (2), the last packet enqueued to txq->txq_interq is
9532 	 * also dequeued by wm_deferred_start_locked(). So, it does not get
9533 	 * stuck, either.
9534 	 */
9535 	if (mutex_tryenter(txq->txq_lock)) {
9536 		if (!txq->txq_stopping)
9537 			wm_nq_transmit_locked(ifp, txq);
9538 		mutex_exit(txq->txq_lock);
9539 	}
9540 
9541 	return 0;
9542 }
9543 
9544 static void
9545 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
9546 {
9547 
9548 	wm_nq_send_common_locked(ifp, txq, true);
9549 }
9550 
9551 static void
9552 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
9553     bool is_transmit)
9554 {
9555 	struct wm_softc *sc = ifp->if_softc;
9556 	struct mbuf *m0;
9557 	struct wm_txsoft *txs;
9558 	bus_dmamap_t dmamap;
9559 	int error, nexttx, lasttx = -1, seg, segs_needed;
9560 	bool do_csum, sent;
9561 	bool remap = true;
9562 
9563 	KASSERT(mutex_owned(txq->txq_lock));
9564 	KASSERT(!txq->txq_stopping);
9565 
9566 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
9567 		return;
9568 
9569 	if (__predict_false(wm_linkdown_discard(txq))) {
9570 		do {
9571 			if (is_transmit)
9572 				m0 = pcq_get(txq->txq_interq);
9573 			else
9574 				IFQ_DEQUEUE(&ifp->if_snd, m0);
9575 			/*
9576 			 * increment successed packet counter as in the case
9577 			 * which the packet is discarded by link down PHY.
9578 			 */
9579 			if (m0 != NULL) {
9580 				if_statinc(ifp, if_opackets);
9581 				m_freem(m0);
9582 			}
9583 		} while (m0 != NULL);
9584 		return;
9585 	}
9586 
9587 	sent = false;
9588 
9589 	/*
9590 	 * Loop through the send queue, setting up transmit descriptors
9591 	 * until we drain the queue, or use up all available transmit
9592 	 * descriptors.
9593 	 */
9594 	for (;;) {
9595 		m0 = NULL;
9596 
9597 		/* Get a work queue entry. */
9598 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
9599 			wm_txeof(txq, UINT_MAX);
9600 			if (txq->txq_sfree == 0) {
9601 				DPRINTF(sc, WM_DEBUG_TX,
9602 				    ("%s: TX: no free job descriptors\n",
9603 					device_xname(sc->sc_dev)));
9604 				WM_Q_EVCNT_INCR(txq, txsstall);
9605 				break;
9606 			}
9607 		}
9608 
9609 		/* Grab a packet off the queue. */
9610 		if (is_transmit)
9611 			m0 = pcq_get(txq->txq_interq);
9612 		else
9613 			IFQ_DEQUEUE(&ifp->if_snd, m0);
9614 		if (m0 == NULL)
9615 			break;
9616 
9617 		DPRINTF(sc, WM_DEBUG_TX,
9618 		    ("%s: TX: have packet to transmit: %p\n",
9619 			device_xname(sc->sc_dev), m0));
9620 
9621 		txs = &txq->txq_soft[txq->txq_snext];
9622 		dmamap = txs->txs_dmamap;
9623 
9624 		/*
9625 		 * Load the DMA map.  If this fails, the packet either
9626 		 * didn't fit in the allotted number of segments, or we
9627 		 * were short on resources.  For the too-many-segments
9628 		 * case, we simply report an error and drop the packet,
9629 		 * since we can't sanely copy a jumbo packet to a single
9630 		 * buffer.
9631 		 */
9632 retry:
9633 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
9634 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
9635 		if (__predict_false(error)) {
9636 			if (error == EFBIG) {
9637 				if (remap == true) {
9638 					struct mbuf *m;
9639 
9640 					remap = false;
9641 					m = m_defrag(m0, M_NOWAIT);
9642 					if (m != NULL) {
9643 						WM_Q_EVCNT_INCR(txq, defrag);
9644 						m0 = m;
9645 						goto retry;
9646 					}
9647 				}
9648 				WM_Q_EVCNT_INCR(txq, toomanyseg);
9649 				log(LOG_ERR, "%s: Tx packet consumes too many "
9650 				    "DMA segments, dropping...\n",
9651 				    device_xname(sc->sc_dev));
9652 				wm_dump_mbuf_chain(sc, m0);
9653 				m_freem(m0);
9654 				continue;
9655 			}
9656 			/* Short on resources, just stop for now. */
9657 			DPRINTF(sc, WM_DEBUG_TX,
9658 			    ("%s: TX: dmamap load failed: %d\n",
9659 				device_xname(sc->sc_dev), error));
9660 			break;
9661 		}
9662 
9663 		segs_needed = dmamap->dm_nsegs;
9664 
9665 		/*
9666 		 * Ensure we have enough descriptors free to describe
9667 		 * the packet. Note, we always reserve one descriptor
9668 		 * at the end of the ring due to the semantics of the
9669 		 * TDT register, plus one more in the event we need
9670 		 * to load offload context.
9671 		 */
9672 		if (segs_needed > txq->txq_free - 2) {
9673 			/*
9674 			 * Not enough free descriptors to transmit this
9675 			 * packet.  We haven't committed anything yet,
9676 			 * so just unload the DMA map, put the packet
9677 			 * pack on the queue, and punt. Notify the upper
9678 			 * layer that there are no more slots left.
9679 			 */
9680 			DPRINTF(sc, WM_DEBUG_TX,
9681 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
9682 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
9683 				segs_needed, txq->txq_free - 1));
9684 			txq->txq_flags |= WM_TXQ_NO_SPACE;
9685 			bus_dmamap_unload(sc->sc_dmat, dmamap);
9686 			WM_Q_EVCNT_INCR(txq, txdstall);
9687 			break;
9688 		}
9689 
9690 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
9691 
9692 		DPRINTF(sc, WM_DEBUG_TX,
9693 		    ("%s: TX: packet has %d (%d) DMA segments\n",
9694 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
9695 
9696 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
9697 
9698 		/*
9699 		 * Store a pointer to the packet so that we can free it
9700 		 * later.
9701 		 *
9702 		 * Initially, we consider the number of descriptors the
9703 		 * packet uses the number of DMA segments.  This may be
9704 		 * incremented by 1 if we do checksum offload (a descriptor
9705 		 * is used to set the checksum context).
9706 		 */
9707 		txs->txs_mbuf = m0;
9708 		txs->txs_firstdesc = txq->txq_next;
9709 		txs->txs_ndesc = segs_needed;
9710 
9711 		/* Set up offload parameters for this packet. */
9712 		uint32_t cmdlen, fields, dcmdlen;
9713 		if (m0->m_pkthdr.csum_flags &
9714 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
9715 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
9716 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
9717 			wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
9718 			    &do_csum);
9719 		} else {
9720 			do_csum = false;
9721 			cmdlen = 0;
9722 			fields = 0;
9723 		}
9724 
9725 		/* Sync the DMA map. */
9726 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
9727 		    BUS_DMASYNC_PREWRITE);
9728 
9729 		/* Initialize the first transmit descriptor. */
9730 		nexttx = txq->txq_next;
9731 		if (!do_csum) {
9732 			/* Set up a legacy descriptor */
9733 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
9734 			    dmamap->dm_segs[0].ds_addr);
9735 			txq->txq_descs[nexttx].wtx_cmdlen =
9736 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
9737 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
9738 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
9739 			if (vlan_has_tag(m0)) {
9740 				txq->txq_descs[nexttx].wtx_cmdlen |=
9741 				    htole32(WTX_CMD_VLE);
9742 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
9743 				    htole16(vlan_get_tag(m0));
9744 			} else
9745 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
9746 
9747 			dcmdlen = 0;
9748 		} else {
9749 			/* Set up an advanced data descriptor */
9750 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
9751 			    htole64(dmamap->dm_segs[0].ds_addr);
9752 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
9753 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
9754 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen);
9755 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
9756 			    htole32(fields);
9757 			DPRINTF(sc, WM_DEBUG_TX,
9758 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
9759 				device_xname(sc->sc_dev), nexttx,
9760 				(uint64_t)dmamap->dm_segs[0].ds_addr));
9761 			DPRINTF(sc, WM_DEBUG_TX,
9762 			    ("\t 0x%08x%08x\n", fields,
9763 				(uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
9764 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
9765 		}
9766 
9767 		lasttx = nexttx;
9768 		nexttx = WM_NEXTTX(txq, nexttx);
9769 		/*
9770 		 * Fill in the next descriptors. Legacy or advanced format
9771 		 * is the same here.
9772 		 */
9773 		for (seg = 1; seg < dmamap->dm_nsegs;
9774 		     seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
9775 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
9776 			    htole64(dmamap->dm_segs[seg].ds_addr);
9777 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
9778 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
9779 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
9780 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
9781 			lasttx = nexttx;
9782 
9783 			DPRINTF(sc, WM_DEBUG_TX,
9784 			    ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
9785 				device_xname(sc->sc_dev), nexttx,
9786 				(uint64_t)dmamap->dm_segs[seg].ds_addr,
9787 				dmamap->dm_segs[seg].ds_len));
9788 		}
9789 
9790 		KASSERT(lasttx != -1);
9791 
9792 		/*
9793 		 * Set up the command byte on the last descriptor of
9794 		 * the packet. If we're in the interrupt delay window,
9795 		 * delay the interrupt.
9796 		 */
9797 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
9798 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
9799 		txq->txq_descs[lasttx].wtx_cmdlen |=
9800 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
9801 
9802 		txs->txs_lastdesc = lasttx;
9803 
9804 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
9805 		    device_xname(sc->sc_dev),
9806 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
9807 
9808 		/* Sync the descriptors we're using. */
9809 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
9810 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
9811 
9812 		/* Give the packet to the chip. */
9813 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
9814 		sent = true;
9815 
9816 		DPRINTF(sc, WM_DEBUG_TX,
9817 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
9818 
9819 		DPRINTF(sc, WM_DEBUG_TX,
9820 		    ("%s: TX: finished transmitting packet, job %d\n",
9821 			device_xname(sc->sc_dev), txq->txq_snext));
9822 
9823 		/* Advance the tx pointer. */
9824 		txq->txq_free -= txs->txs_ndesc;
9825 		txq->txq_next = nexttx;
9826 
9827 		txq->txq_sfree--;
9828 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
9829 
9830 		/* Pass the packet to any BPF listeners. */
9831 		bpf_mtap(ifp, m0, BPF_D_OUT);
9832 	}
9833 
9834 	if (m0 != NULL) {
9835 		txq->txq_flags |= WM_TXQ_NO_SPACE;
9836 		WM_Q_EVCNT_INCR(txq, descdrop);
9837 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
9838 			__func__));
9839 		m_freem(m0);
9840 	}
9841 
9842 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
9843 		/* No more slots; notify upper layer. */
9844 		txq->txq_flags |= WM_TXQ_NO_SPACE;
9845 	}
9846 
9847 	if (sent) {
9848 		/* Set a watchdog timer in case the chip flakes out. */
9849 		txq->txq_lastsent = time_uptime;
9850 		txq->txq_sending = true;
9851 	}
9852 }
9853 
9854 static void
9855 wm_deferred_start_locked(struct wm_txqueue *txq)
9856 {
9857 	struct wm_softc *sc = txq->txq_sc;
9858 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9859 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
9860 	int qid = wmq->wmq_id;
9861 
9862 	KASSERT(mutex_owned(txq->txq_lock));
9863 	KASSERT(!txq->txq_stopping);
9864 
9865 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
9866 		/* XXX need for ALTQ or one CPU system */
9867 		if (qid == 0)
9868 			wm_nq_start_locked(ifp);
9869 		wm_nq_transmit_locked(ifp, txq);
9870 	} else {
9871 		/* XXX need for ALTQ or one CPU system */
9872 		if (qid == 0)
9873 			wm_start_locked(ifp);
9874 		wm_transmit_locked(ifp, txq);
9875 	}
9876 }
9877 
9878 /* Interrupt */
9879 
9880 /*
9881  * wm_txeof:
9882  *
9883  *	Helper; handle transmit interrupts.
9884  */
9885 static bool
9886 wm_txeof(struct wm_txqueue *txq, u_int limit)
9887 {
9888 	struct wm_softc *sc = txq->txq_sc;
9889 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9890 	struct wm_txsoft *txs;
9891 	int count = 0;
9892 	int i;
9893 	uint8_t status;
9894 	bool more = false;
9895 
9896 	KASSERT(mutex_owned(txq->txq_lock));
9897 
9898 	if (txq->txq_stopping)
9899 		return false;
9900 
9901 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
9902 
9903 	/*
9904 	 * Go through the Tx list and free mbufs for those
9905 	 * frames which have been transmitted.
9906 	 */
9907 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
9908 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
9909 		txs = &txq->txq_soft[i];
9910 
9911 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: checking job %d\n",
9912 			device_xname(sc->sc_dev), i));
9913 
9914 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
9915 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
9916 
9917 		status =
9918 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
9919 		if ((status & WTX_ST_DD) == 0) {
9920 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
9921 			    BUS_DMASYNC_PREREAD);
9922 			break;
9923 		}
9924 
9925 		if (limit-- == 0) {
9926 			more = true;
9927 			DPRINTF(sc, WM_DEBUG_TX,
9928 			    ("%s: TX: loop limited, job %d is not processed\n",
9929 				device_xname(sc->sc_dev), i));
9930 			break;
9931 		}
9932 
9933 		count++;
9934 		DPRINTF(sc, WM_DEBUG_TX,
9935 		    ("%s: TX: job %d done: descs %d..%d\n",
9936 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
9937 		    txs->txs_lastdesc));
9938 
9939 #ifdef WM_EVENT_COUNTERS
9940 		if ((status & WTX_ST_TU) && (sc->sc_type <= WM_T_82544))
9941 			WM_Q_EVCNT_INCR(txq, underrun);
9942 #endif /* WM_EVENT_COUNTERS */
9943 
9944 		/*
9945 		 * 82574 and newer's document says the status field has neither
9946 		 * EC (Excessive Collision) bit nor LC (Late Collision) bit
9947 		 * (reserved). Refer "PCIe GbE Controller Open Source Software
9948 		 * Developer's Manual", 82574 datasheet and newer.
9949 		 *
9950 		 * XXX I saw the LC bit was set on I218 even though the media
9951 		 * was full duplex, so the bit might be used for other
9952 		 * meaning ...(I have no document).
9953 		 */
9954 
9955 		if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
9956 		    && ((sc->sc_type < WM_T_82574)
9957 			|| (sc->sc_type == WM_T_80003))) {
9958 			if_statinc(ifp, if_oerrors);
9959 			if (status & WTX_ST_LC)
9960 				log(LOG_WARNING, "%s: late collision\n",
9961 				    device_xname(sc->sc_dev));
9962 			else if (status & WTX_ST_EC) {
9963 				if_statadd(ifp, if_collisions,
9964 				    TX_COLLISION_THRESHOLD + 1);
9965 				log(LOG_WARNING, "%s: excessive collisions\n",
9966 				    device_xname(sc->sc_dev));
9967 			}
9968 		} else
9969 			if_statinc(ifp, if_opackets);
9970 
9971 		txq->txq_packets++;
9972 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
9973 
9974 		txq->txq_free += txs->txs_ndesc;
9975 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
9976 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
9977 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
9978 		m_freem(txs->txs_mbuf);
9979 		txs->txs_mbuf = NULL;
9980 	}
9981 
9982 	/* Update the dirty transmit buffer pointer. */
9983 	txq->txq_sdirty = i;
9984 	DPRINTF(sc, WM_DEBUG_TX,
9985 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
9986 
9987 	if (count != 0)
9988 		rnd_add_uint32(&sc->rnd_source, count);
9989 
9990 	/*
9991 	 * If there are no more pending transmissions, cancel the watchdog
9992 	 * timer.
9993 	 */
9994 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
9995 		txq->txq_sending = false;
9996 
9997 	return more;
9998 }
9999 
10000 static inline uint32_t
10001 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
10002 {
10003 	struct wm_softc *sc = rxq->rxq_sc;
10004 
10005 	if (sc->sc_type == WM_T_82574)
10006 		return EXTRXC_STATUS(
10007 		    le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
10008 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
10009 		return NQRXC_STATUS(
10010 		    le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
10011 	else
10012 		return rxq->rxq_descs[idx].wrx_status;
10013 }
10014 
10015 static inline uint32_t
10016 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
10017 {
10018 	struct wm_softc *sc = rxq->rxq_sc;
10019 
10020 	if (sc->sc_type == WM_T_82574)
10021 		return EXTRXC_ERROR(
10022 		    le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
10023 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
10024 		return NQRXC_ERROR(
10025 		    le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
10026 	else
10027 		return rxq->rxq_descs[idx].wrx_errors;
10028 }
10029 
10030 static inline uint16_t
10031 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
10032 {
10033 	struct wm_softc *sc = rxq->rxq_sc;
10034 
10035 	if (sc->sc_type == WM_T_82574)
10036 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
10037 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
10038 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
10039 	else
10040 		return rxq->rxq_descs[idx].wrx_special;
10041 }
10042 
10043 static inline int
10044 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
10045 {
10046 	struct wm_softc *sc = rxq->rxq_sc;
10047 
10048 	if (sc->sc_type == WM_T_82574)
10049 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
10050 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
10051 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
10052 	else
10053 		return rxq->rxq_descs[idx].wrx_len;
10054 }
10055 
10056 #ifdef WM_DEBUG
10057 static inline uint32_t
10058 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
10059 {
10060 	struct wm_softc *sc = rxq->rxq_sc;
10061 
10062 	if (sc->sc_type == WM_T_82574)
10063 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
10064 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
10065 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
10066 	else
10067 		return 0;
10068 }
10069 
10070 static inline uint8_t
10071 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
10072 {
10073 	struct wm_softc *sc = rxq->rxq_sc;
10074 
10075 	if (sc->sc_type == WM_T_82574)
10076 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
10077 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
10078 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
10079 	else
10080 		return 0;
10081 }
10082 #endif /* WM_DEBUG */
10083 
10084 static inline bool
10085 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
10086     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
10087 {
10088 
10089 	if (sc->sc_type == WM_T_82574)
10090 		return (status & ext_bit) != 0;
10091 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
10092 		return (status & nq_bit) != 0;
10093 	else
10094 		return (status & legacy_bit) != 0;
10095 }
10096 
10097 static inline bool
10098 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
10099     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
10100 {
10101 
10102 	if (sc->sc_type == WM_T_82574)
10103 		return (error & ext_bit) != 0;
10104 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
10105 		return (error & nq_bit) != 0;
10106 	else
10107 		return (error & legacy_bit) != 0;
10108 }
10109 
10110 static inline bool
10111 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
10112 {
10113 
10114 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
10115 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
10116 		return true;
10117 	else
10118 		return false;
10119 }
10120 
10121 static inline bool
10122 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
10123 {
10124 	struct wm_softc *sc = rxq->rxq_sc;
10125 
10126 	/* XXX missing error bit for newqueue? */
10127 	if (wm_rxdesc_is_set_error(sc, errors,
10128 		WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
10129 		EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
10130 		| EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
10131 		NQRXC_ERROR_RXE)) {
10132 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
10133 		    EXTRXC_ERROR_SE, 0))
10134 			log(LOG_WARNING, "%s: symbol error\n",
10135 			    device_xname(sc->sc_dev));
10136 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
10137 		    EXTRXC_ERROR_SEQ, 0))
10138 			log(LOG_WARNING, "%s: receive sequence error\n",
10139 			    device_xname(sc->sc_dev));
10140 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
10141 		    EXTRXC_ERROR_CE, 0))
10142 			log(LOG_WARNING, "%s: CRC error\n",
10143 			    device_xname(sc->sc_dev));
10144 		return true;
10145 	}
10146 
10147 	return false;
10148 }
10149 
10150 static inline bool
10151 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
10152 {
10153 	struct wm_softc *sc = rxq->rxq_sc;
10154 
10155 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
10156 		NQRXC_STATUS_DD)) {
10157 		/* We have processed all of the receive descriptors. */
10158 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
10159 		return false;
10160 	}
10161 
10162 	return true;
10163 }
10164 
10165 static inline bool
10166 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
10167     uint16_t vlantag, struct mbuf *m)
10168 {
10169 
10170 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
10171 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
10172 		vlan_set_tag(m, le16toh(vlantag));
10173 	}
10174 
10175 	return true;
10176 }
10177 
10178 static inline void
10179 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
10180     uint32_t errors, struct mbuf *m)
10181 {
10182 	struct wm_softc *sc = rxq->rxq_sc;
10183 
10184 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
10185 		if (wm_rxdesc_is_set_status(sc, status,
10186 		    WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
10187 			WM_Q_EVCNT_INCR(rxq, ipsum);
10188 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
10189 			if (wm_rxdesc_is_set_error(sc, errors,
10190 			    WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
10191 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
10192 		}
10193 		if (wm_rxdesc_is_set_status(sc, status,
10194 		    WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
10195 			/*
10196 			 * Note: we don't know if this was TCP or UDP,
10197 			 * so we just set both bits, and expect the
10198 			 * upper layers to deal.
10199 			 */
10200 			WM_Q_EVCNT_INCR(rxq, tusum);
10201 			m->m_pkthdr.csum_flags |=
10202 			    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
10203 			    M_CSUM_TCPv6 | M_CSUM_UDPv6;
10204 			if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
10205 			    EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
10206 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
10207 		}
10208 	}
10209 }
10210 
10211 /*
10212  * wm_rxeof:
10213  *
10214  *	Helper; handle receive interrupts.
10215  */
10216 static bool
10217 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
10218 {
10219 	struct wm_softc *sc = rxq->rxq_sc;
10220 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
10221 	struct wm_rxsoft *rxs;
10222 	struct mbuf *m;
10223 	int i, len;
10224 	int count = 0;
10225 	uint32_t status, errors;
10226 	uint16_t vlantag;
10227 	bool more = false;
10228 
10229 	KASSERT(mutex_owned(rxq->rxq_lock));
10230 
10231 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
10232 		rxs = &rxq->rxq_soft[i];
10233 
10234 		DPRINTF(sc, WM_DEBUG_RX,
10235 		    ("%s: RX: checking descriptor %d\n",
10236 			device_xname(sc->sc_dev), i));
10237 		wm_cdrxsync(rxq, i,
10238 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
10239 
10240 		status = wm_rxdesc_get_status(rxq, i);
10241 		errors = wm_rxdesc_get_errors(rxq, i);
10242 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
10243 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
10244 #ifdef WM_DEBUG
10245 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
10246 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
10247 #endif
10248 
10249 		if (!wm_rxdesc_dd(rxq, i, status))
10250 			break;
10251 
10252 		if (limit-- == 0) {
10253 			more = true;
10254 			DPRINTF(sc, WM_DEBUG_RX,
10255 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
10256 				device_xname(sc->sc_dev), i));
10257 			break;
10258 		}
10259 
10260 		count++;
10261 		if (__predict_false(rxq->rxq_discard)) {
10262 			DPRINTF(sc, WM_DEBUG_RX,
10263 			    ("%s: RX: discarding contents of descriptor %d\n",
10264 				device_xname(sc->sc_dev), i));
10265 			wm_init_rxdesc(rxq, i);
10266 			if (wm_rxdesc_is_eop(rxq, status)) {
10267 				/* Reset our state. */
10268 				DPRINTF(sc, WM_DEBUG_RX,
10269 				    ("%s: RX: resetting rxdiscard -> 0\n",
10270 					device_xname(sc->sc_dev)));
10271 				rxq->rxq_discard = 0;
10272 			}
10273 			continue;
10274 		}
10275 
10276 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
10277 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
10278 
10279 		m = rxs->rxs_mbuf;
10280 
10281 		/*
10282 		 * Add a new receive buffer to the ring, unless of
10283 		 * course the length is zero. Treat the latter as a
10284 		 * failed mapping.
10285 		 */
10286 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
10287 			/*
10288 			 * Failed, throw away what we've done so
10289 			 * far, and discard the rest of the packet.
10290 			 */
10291 			if_statinc(ifp, if_ierrors);
10292 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
10293 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
10294 			wm_init_rxdesc(rxq, i);
10295 			if (!wm_rxdesc_is_eop(rxq, status))
10296 				rxq->rxq_discard = 1;
10297 			m_freem(rxq->rxq_head);
10298 			WM_RXCHAIN_RESET(rxq);
10299 			DPRINTF(sc, WM_DEBUG_RX,
10300 			    ("%s: RX: Rx buffer allocation failed, "
10301 			    "dropping packet%s\n", device_xname(sc->sc_dev),
10302 				rxq->rxq_discard ? " (discard)" : ""));
10303 			continue;
10304 		}
10305 
10306 		m->m_len = len;
10307 		rxq->rxq_len += len;
10308 		DPRINTF(sc, WM_DEBUG_RX,
10309 		    ("%s: RX: buffer at %p len %d\n",
10310 			device_xname(sc->sc_dev), m->m_data, len));
10311 
10312 		/* If this is not the end of the packet, keep looking. */
10313 		if (!wm_rxdesc_is_eop(rxq, status)) {
10314 			WM_RXCHAIN_LINK(rxq, m);
10315 			DPRINTF(sc, WM_DEBUG_RX,
10316 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
10317 				device_xname(sc->sc_dev), rxq->rxq_len));
10318 			continue;
10319 		}
10320 
10321 		/*
10322 		 * Okay, we have the entire packet now. The chip is
10323 		 * configured to include the FCS except I35[04], I21[01].
10324 		 * (not all chips can be configured to strip it), so we need
10325 		 * to trim it. Those chips have an eratta, the RCTL_SECRC bit
10326 		 * in RCTL register is always set, so we don't trim it.
10327 		 * PCH2 and newer chip also not include FCS when jumbo
10328 		 * frame is used to do workaround an errata.
10329 		 * May need to adjust length of previous mbuf in the
10330 		 * chain if the current mbuf is too short.
10331 		 */
10332 		if ((sc->sc_flags & WM_F_CRC_STRIP) == 0) {
10333 			if (m->m_len < ETHER_CRC_LEN) {
10334 				rxq->rxq_tail->m_len
10335 				    -= (ETHER_CRC_LEN - m->m_len);
10336 				m->m_len = 0;
10337 			} else
10338 				m->m_len -= ETHER_CRC_LEN;
10339 			len = rxq->rxq_len - ETHER_CRC_LEN;
10340 		} else
10341 			len = rxq->rxq_len;
10342 
10343 		WM_RXCHAIN_LINK(rxq, m);
10344 
10345 		*rxq->rxq_tailp = NULL;
10346 		m = rxq->rxq_head;
10347 
10348 		WM_RXCHAIN_RESET(rxq);
10349 
10350 		DPRINTF(sc, WM_DEBUG_RX,
10351 		    ("%s: RX: have entire packet, len -> %d\n",
10352 			device_xname(sc->sc_dev), len));
10353 
10354 		/* If an error occurred, update stats and drop the packet. */
10355 		if (wm_rxdesc_has_errors(rxq, errors)) {
10356 			m_freem(m);
10357 			continue;
10358 		}
10359 
10360 		/* No errors.  Receive the packet. */
10361 		m_set_rcvif(m, ifp);
10362 		m->m_pkthdr.len = len;
10363 		/*
10364 		 * TODO
10365 		 * should be save rsshash and rsstype to this mbuf.
10366 		 */
10367 		DPRINTF(sc, WM_DEBUG_RX,
10368 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
10369 			device_xname(sc->sc_dev), rsstype, rsshash));
10370 
10371 		/*
10372 		 * If VLANs are enabled, VLAN packets have been unwrapped
10373 		 * for us.  Associate the tag with the packet.
10374 		 */
10375 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
10376 			continue;
10377 
10378 		/* Set up checksum info for this packet. */
10379 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
10380 
10381 		rxq->rxq_packets++;
10382 		rxq->rxq_bytes += len;
10383 		/* Pass it on. */
10384 		if_percpuq_enqueue(sc->sc_ipq, m);
10385 
10386 		if (rxq->rxq_stopping)
10387 			break;
10388 	}
10389 	rxq->rxq_ptr = i;
10390 
10391 	if (count != 0)
10392 		rnd_add_uint32(&sc->rnd_source, count);
10393 
10394 	DPRINTF(sc, WM_DEBUG_RX,
10395 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
10396 
10397 	return more;
10398 }
10399 
10400 /*
10401  * wm_linkintr_gmii:
10402  *
10403  *	Helper; handle link interrupts for GMII.
10404  */
10405 static void
10406 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
10407 {
10408 	device_t dev = sc->sc_dev;
10409 	uint32_t status, reg;
10410 	bool link;
10411 	bool dopoll = true;
10412 	int rv;
10413 
10414 	KASSERT(mutex_owned(sc->sc_core_lock));
10415 
10416 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(dev),
10417 		__func__));
10418 
10419 	if ((icr & ICR_LSC) == 0) {
10420 		if (icr & ICR_RXSEQ)
10421 			DPRINTF(sc, WM_DEBUG_LINK,
10422 			    ("%s: LINK Receive sequence error\n",
10423 				device_xname(dev)));
10424 		return;
10425 	}
10426 
10427 	/* Link status changed */
10428 	status = CSR_READ(sc, WMREG_STATUS);
10429 	link = status & STATUS_LU;
10430 	if (link) {
10431 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
10432 			device_xname(dev),
10433 			(status & STATUS_FD) ? "FDX" : "HDX"));
10434 		if (wm_phy_need_linkdown_discard(sc)) {
10435 			DPRINTF(sc, WM_DEBUG_LINK,
10436 			    ("%s: linkintr: Clear linkdown discard flag\n",
10437 				device_xname(dev)));
10438 			wm_clear_linkdown_discard(sc);
10439 		}
10440 	} else {
10441 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
10442 			device_xname(dev)));
10443 		if (wm_phy_need_linkdown_discard(sc)) {
10444 			DPRINTF(sc, WM_DEBUG_LINK,
10445 			    ("%s: linkintr: Set linkdown discard flag\n",
10446 				device_xname(dev)));
10447 			wm_set_linkdown_discard(sc);
10448 		}
10449 	}
10450 	if ((sc->sc_type == WM_T_ICH8) && (link == false))
10451 		wm_gig_downshift_workaround_ich8lan(sc);
10452 
10453 	if ((sc->sc_type == WM_T_ICH8) && (sc->sc_phytype == WMPHY_IGP_3))
10454 		wm_kmrn_lock_loss_workaround_ich8lan(sc);
10455 
10456 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
10457 		device_xname(dev)));
10458 	if ((sc->sc_flags & WM_F_DELAY_LINKUP) != 0) {
10459 		if (link) {
10460 			/*
10461 			 * To workaround the problem, it's required to wait
10462 			 * several hundred miliseconds. The time depend
10463 			 * on the environment. Wait 1 second for the safety.
10464 			 */
10465 			dopoll = false;
10466 			getmicrotime(&sc->sc_linkup_delay_time);
10467 			sc->sc_linkup_delay_time.tv_sec += 1;
10468 		} else if (sc->sc_linkup_delay_time.tv_sec != 0) {
10469 			/*
10470 			 * Simplify by checking tv_sec only. It's enough.
10471 			 *
10472 			 * Currently, it's not required to clear the time.
10473 			 * It's just to know the timer is stopped
10474 			 * (for debugging).
10475 			 */
10476 
10477 			sc->sc_linkup_delay_time.tv_sec = 0;
10478 			sc->sc_linkup_delay_time.tv_usec = 0;
10479 		}
10480 	}
10481 
10482 	/*
10483 	 * Call mii_pollstat().
10484 	 *
10485 	 * Some (not all) systems use I35[04] or I21[01] don't send packet soon
10486 	 * after linkup. The MAC send a packet to the PHY and any error is not
10487 	 * observed. This behavior causes a problem that gratuitous ARP and/or
10488 	 * IPv6 DAD packet are silently dropped. To avoid this problem, don't
10489 	 * call mii_pollstat() here which will send LINK_STATE_UP notification
10490 	 * to the upper layer. Instead, mii_pollstat() will be called in
10491 	 * wm_gmii_mediastatus() or mii_tick() will be called in wm_tick().
10492 	 */
10493 	if (dopoll)
10494 		mii_pollstat(&sc->sc_mii);
10495 
10496 	/* Do some workarounds soon after link status is changed. */
10497 
10498 	if (sc->sc_type == WM_T_82543) {
10499 		int miistatus, active;
10500 
10501 		/*
10502 		 * With 82543, we need to force speed and
10503 		 * duplex on the MAC equal to what the PHY
10504 		 * speed and duplex configuration is.
10505 		 */
10506 		miistatus = sc->sc_mii.mii_media_status;
10507 
10508 		if (miistatus & IFM_ACTIVE) {
10509 			active = sc->sc_mii.mii_media_active;
10510 			sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
10511 			switch (IFM_SUBTYPE(active)) {
10512 			case IFM_10_T:
10513 				sc->sc_ctrl |= CTRL_SPEED_10;
10514 				break;
10515 			case IFM_100_TX:
10516 				sc->sc_ctrl |= CTRL_SPEED_100;
10517 				break;
10518 			case IFM_1000_T:
10519 				sc->sc_ctrl |= CTRL_SPEED_1000;
10520 				break;
10521 			default:
10522 				/*
10523 				 * Fiber?
10524 				 * Shoud not enter here.
10525 				 */
10526 				device_printf(dev, "unknown media (%x)\n",
10527 				    active);
10528 				break;
10529 			}
10530 			if (active & IFM_FDX)
10531 				sc->sc_ctrl |= CTRL_FD;
10532 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10533 		}
10534 	} else if (sc->sc_type == WM_T_PCH) {
10535 		wm_k1_gig_workaround_hv(sc,
10536 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
10537 	}
10538 
10539 	/*
10540 	 * When connected at 10Mbps half-duplex, some parts are excessively
10541 	 * aggressive resulting in many collisions. To avoid this, increase
10542 	 * the IPG and reduce Rx latency in the PHY.
10543 	 */
10544 	if ((sc->sc_type >= WM_T_PCH2) && (sc->sc_type <= WM_T_PCH_TGP)
10545 	    && link) {
10546 		uint32_t tipg_reg;
10547 		uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
10548 		bool fdx;
10549 		uint16_t emi_addr, emi_val;
10550 
10551 		tipg_reg = CSR_READ(sc, WMREG_TIPG);
10552 		tipg_reg &= ~TIPG_IPGT_MASK;
10553 		fdx = status & STATUS_FD;
10554 
10555 		if (!fdx && (speed == STATUS_SPEED_10)) {
10556 			tipg_reg |= 0xff;
10557 			/* Reduce Rx latency in analog PHY */
10558 			emi_val = 0;
10559 		} else if ((sc->sc_type >= WM_T_PCH_SPT) &&
10560 		    fdx && speed != STATUS_SPEED_1000) {
10561 			tipg_reg |= 0xc;
10562 			emi_val = 1;
10563 		} else {
10564 			/* Roll back the default values */
10565 			tipg_reg |= 0x08;
10566 			emi_val = 1;
10567 		}
10568 
10569 		CSR_WRITE(sc, WMREG_TIPG, tipg_reg);
10570 
10571 		rv = sc->phy.acquire(sc);
10572 		if (rv)
10573 			return;
10574 
10575 		if (sc->sc_type == WM_T_PCH2)
10576 			emi_addr = I82579_RX_CONFIG;
10577 		else
10578 			emi_addr = I217_RX_CONFIG;
10579 		rv = wm_write_emi_reg_locked(dev, emi_addr, emi_val);
10580 
10581 		if (sc->sc_type >= WM_T_PCH_LPT) {
10582 			uint16_t phy_reg;
10583 
10584 			sc->phy.readreg_locked(dev, 2,
10585 			    I217_PLL_CLOCK_GATE_REG, &phy_reg);
10586 			phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
10587 			if (speed == STATUS_SPEED_100
10588 			    || speed == STATUS_SPEED_10)
10589 				phy_reg |= 0x3e8;
10590 			else
10591 				phy_reg |= 0xfa;
10592 			sc->phy.writereg_locked(dev, 2,
10593 			    I217_PLL_CLOCK_GATE_REG, phy_reg);
10594 
10595 			if (speed == STATUS_SPEED_1000) {
10596 				sc->phy.readreg_locked(dev, 2,
10597 				    HV_PM_CTRL, &phy_reg);
10598 
10599 				phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
10600 
10601 				sc->phy.writereg_locked(dev, 2,
10602 				    HV_PM_CTRL, phy_reg);
10603 			}
10604 		}
10605 		sc->phy.release(sc);
10606 
10607 		if (rv)
10608 			return;
10609 
10610 		if (sc->sc_type >= WM_T_PCH_SPT) {
10611 			uint16_t data, ptr_gap;
10612 
10613 			if (speed == STATUS_SPEED_1000) {
10614 				rv = sc->phy.acquire(sc);
10615 				if (rv)
10616 					return;
10617 
10618 				rv = sc->phy.readreg_locked(dev, 2,
10619 				    I82579_UNKNOWN1, &data);
10620 				if (rv) {
10621 					sc->phy.release(sc);
10622 					return;
10623 				}
10624 
10625 				ptr_gap = (data & (0x3ff << 2)) >> 2;
10626 				if (ptr_gap < 0x18) {
10627 					data &= ~(0x3ff << 2);
10628 					data |= (0x18 << 2);
10629 					rv = sc->phy.writereg_locked(dev,
10630 					    2, I82579_UNKNOWN1, data);
10631 				}
10632 				sc->phy.release(sc);
10633 				if (rv)
10634 					return;
10635 			} else {
10636 				rv = sc->phy.acquire(sc);
10637 				if (rv)
10638 					return;
10639 
10640 				rv = sc->phy.writereg_locked(dev, 2,
10641 				    I82579_UNKNOWN1, 0xc023);
10642 				sc->phy.release(sc);
10643 				if (rv)
10644 					return;
10645 
10646 			}
10647 		}
10648 	}
10649 
10650 	/*
10651 	 * I217 Packet Loss issue:
10652 	 * ensure that FEXTNVM4 Beacon Duration is set correctly
10653 	 * on power up.
10654 	 * Set the Beacon Duration for I217 to 8 usec
10655 	 */
10656 	if (sc->sc_type >= WM_T_PCH_LPT) {
10657 		reg = CSR_READ(sc, WMREG_FEXTNVM4);
10658 		reg &= ~FEXTNVM4_BEACON_DURATION;
10659 		reg |= FEXTNVM4_BEACON_DURATION_8US;
10660 		CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
10661 	}
10662 
10663 	/* Work-around I218 hang issue */
10664 	if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
10665 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
10666 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
10667 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
10668 		wm_k1_workaround_lpt_lp(sc, link);
10669 
10670 	if (sc->sc_type >= WM_T_PCH_LPT) {
10671 		/*
10672 		 * Set platform power management values for Latency
10673 		 * Tolerance Reporting (LTR)
10674 		 */
10675 		wm_platform_pm_pch_lpt(sc,
10676 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
10677 	}
10678 
10679 	/* Clear link partner's EEE ability */
10680 	sc->eee_lp_ability = 0;
10681 
10682 	/* FEXTNVM6 K1-off workaround */
10683 	if (sc->sc_type == WM_T_PCH_SPT) {
10684 		reg = CSR_READ(sc, WMREG_FEXTNVM6);
10685 		if (CSR_READ(sc, WMREG_PCIEANACFG) & FEXTNVM6_K1_OFF_ENABLE)
10686 			reg |= FEXTNVM6_K1_OFF_ENABLE;
10687 		else
10688 			reg &= ~FEXTNVM6_K1_OFF_ENABLE;
10689 		CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
10690 	}
10691 
10692 	if (!link)
10693 		return;
10694 
10695 	switch (sc->sc_type) {
10696 	case WM_T_PCH2:
10697 		wm_k1_workaround_lv(sc);
10698 		/* FALLTHROUGH */
10699 	case WM_T_PCH:
10700 		if (sc->sc_phytype == WMPHY_82578)
10701 			wm_link_stall_workaround_hv(sc);
10702 		break;
10703 	default:
10704 		break;
10705 	}
10706 
10707 	/* Enable/Disable EEE after link up */
10708 	if (sc->sc_phytype > WMPHY_82579)
10709 		wm_set_eee_pchlan(sc);
10710 }
10711 
10712 /*
10713  * wm_linkintr_tbi:
10714  *
10715  *	Helper; handle link interrupts for TBI mode.
10716  */
10717 static void
10718 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
10719 {
10720 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
10721 	uint32_t status;
10722 
10723 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
10724 		__func__));
10725 
10726 	status = CSR_READ(sc, WMREG_STATUS);
10727 	if (icr & ICR_LSC) {
10728 		wm_check_for_link(sc);
10729 		if (status & STATUS_LU) {
10730 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
10731 				device_xname(sc->sc_dev),
10732 				(status & STATUS_FD) ? "FDX" : "HDX"));
10733 			/*
10734 			 * NOTE: CTRL will update TFCE and RFCE automatically,
10735 			 * so we should update sc->sc_ctrl
10736 			 */
10737 
10738 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
10739 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
10740 			sc->sc_fcrtl &= ~FCRTL_XONE;
10741 			if (status & STATUS_FD)
10742 				sc->sc_tctl |=
10743 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
10744 			else
10745 				sc->sc_tctl |=
10746 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
10747 			if (sc->sc_ctrl & CTRL_TFCE)
10748 				sc->sc_fcrtl |= FCRTL_XONE;
10749 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
10750 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
10751 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
10752 			sc->sc_tbi_linkup = 1;
10753 			if_link_state_change(ifp, LINK_STATE_UP);
10754 		} else {
10755 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
10756 				device_xname(sc->sc_dev)));
10757 			sc->sc_tbi_linkup = 0;
10758 			if_link_state_change(ifp, LINK_STATE_DOWN);
10759 		}
10760 		/* Update LED */
10761 		wm_tbi_serdes_set_linkled(sc);
10762 	} else if (icr & ICR_RXSEQ)
10763 		DPRINTF(sc, WM_DEBUG_LINK,
10764 		    ("%s: LINK: Receive sequence error\n",
10765 			device_xname(sc->sc_dev)));
10766 }
10767 
10768 /*
10769  * wm_linkintr_serdes:
10770  *
10771  *	Helper; handle link interrupts for TBI mode.
10772  */
10773 static void
10774 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
10775 {
10776 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
10777 	struct mii_data *mii = &sc->sc_mii;
10778 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
10779 	uint32_t pcs_adv, pcs_lpab, reg;
10780 
10781 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
10782 		__func__));
10783 
10784 	if (icr & ICR_LSC) {
10785 		/* Check PCS */
10786 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
10787 		if ((reg & PCS_LSTS_LINKOK) != 0) {
10788 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
10789 				device_xname(sc->sc_dev)));
10790 			mii->mii_media_status |= IFM_ACTIVE;
10791 			sc->sc_tbi_linkup = 1;
10792 			if_link_state_change(ifp, LINK_STATE_UP);
10793 		} else {
10794 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
10795 				device_xname(sc->sc_dev)));
10796 			mii->mii_media_status |= IFM_NONE;
10797 			sc->sc_tbi_linkup = 0;
10798 			if_link_state_change(ifp, LINK_STATE_DOWN);
10799 			wm_tbi_serdes_set_linkled(sc);
10800 			return;
10801 		}
10802 		mii->mii_media_active |= IFM_1000_SX;
10803 		if ((reg & PCS_LSTS_FDX) != 0)
10804 			mii->mii_media_active |= IFM_FDX;
10805 		else
10806 			mii->mii_media_active |= IFM_HDX;
10807 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
10808 			/* Check flow */
10809 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
10810 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
10811 				DPRINTF(sc, WM_DEBUG_LINK,
10812 				    ("XXX LINKOK but not ACOMP\n"));
10813 				return;
10814 			}
10815 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
10816 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
10817 			DPRINTF(sc, WM_DEBUG_LINK,
10818 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
10819 			if ((pcs_adv & TXCW_SYM_PAUSE)
10820 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
10821 				mii->mii_media_active |= IFM_FLOW
10822 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
10823 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
10824 			    && (pcs_adv & TXCW_ASYM_PAUSE)
10825 			    && (pcs_lpab & TXCW_SYM_PAUSE)
10826 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
10827 				mii->mii_media_active |= IFM_FLOW
10828 				    | IFM_ETH_TXPAUSE;
10829 			else if ((pcs_adv & TXCW_SYM_PAUSE)
10830 			    && (pcs_adv & TXCW_ASYM_PAUSE)
10831 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
10832 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
10833 				mii->mii_media_active |= IFM_FLOW
10834 				    | IFM_ETH_RXPAUSE;
10835 		}
10836 		/* Update LED */
10837 		wm_tbi_serdes_set_linkled(sc);
10838 	} else
10839 		DPRINTF(sc, WM_DEBUG_LINK,
10840 		    ("%s: LINK: Receive sequence error\n",
10841 		    device_xname(sc->sc_dev)));
10842 }
10843 
10844 /*
10845  * wm_linkintr:
10846  *
10847  *	Helper; handle link interrupts.
10848  */
10849 static void
10850 wm_linkintr(struct wm_softc *sc, uint32_t icr)
10851 {
10852 
10853 	KASSERT(mutex_owned(sc->sc_core_lock));
10854 
10855 	if (sc->sc_flags & WM_F_HAS_MII)
10856 		wm_linkintr_gmii(sc, icr);
10857 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
10858 	    && ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)))
10859 		wm_linkintr_serdes(sc, icr);
10860 	else
10861 		wm_linkintr_tbi(sc, icr);
10862 }
10863 
10864 
10865 static inline void
10866 wm_sched_handle_queue(struct wm_softc *sc, struct wm_queue *wmq)
10867 {
10868 
10869 	if (wmq->wmq_txrx_use_workqueue) {
10870 		if (!wmq->wmq_wq_enqueued) {
10871 			wmq->wmq_wq_enqueued = true;
10872 			workqueue_enqueue(sc->sc_queue_wq, &wmq->wmq_cookie,
10873 			    curcpu());
10874 		}
10875 	} else
10876 		softint_schedule(wmq->wmq_si);
10877 }
10878 
10879 static inline void
10880 wm_legacy_intr_disable(struct wm_softc *sc)
10881 {
10882 
10883 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
10884 }
10885 
10886 static inline void
10887 wm_legacy_intr_enable(struct wm_softc *sc)
10888 {
10889 
10890 	CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
10891 }
10892 
10893 /*
10894  * wm_intr_legacy:
10895  *
10896  *	Interrupt service routine for INTx and MSI.
10897  */
10898 static int
10899 wm_intr_legacy(void *arg)
10900 {
10901 	struct wm_softc *sc = arg;
10902 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
10903 	struct wm_queue *wmq = &sc->sc_queue[0];
10904 	struct wm_txqueue *txq = &wmq->wmq_txq;
10905 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
10906 	u_int txlimit = sc->sc_tx_intr_process_limit;
10907 	u_int rxlimit = sc->sc_rx_intr_process_limit;
10908 	uint32_t icr, rndval = 0;
10909 	bool more = false;
10910 
10911 	icr = CSR_READ(sc, WMREG_ICR);
10912 	if ((icr & sc->sc_icr) == 0)
10913 		return 0;
10914 
10915 	DPRINTF(sc, WM_DEBUG_TX,
10916 	    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
10917 	if (rndval == 0)
10918 		rndval = icr;
10919 
10920 	mutex_enter(txq->txq_lock);
10921 
10922 	if (txq->txq_stopping) {
10923 		mutex_exit(txq->txq_lock);
10924 		return 1;
10925 	}
10926 
10927 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
10928 	if (icr & ICR_TXDW) {
10929 		DPRINTF(sc, WM_DEBUG_TX,
10930 		    ("%s: TX: got TXDW interrupt\n",
10931 			device_xname(sc->sc_dev)));
10932 		WM_Q_EVCNT_INCR(txq, txdw);
10933 	}
10934 #endif
10935 	if (txlimit > 0) {
10936 		more |= wm_txeof(txq, txlimit);
10937 		if (!IF_IS_EMPTY(&ifp->if_snd))
10938 			more = true;
10939 	} else
10940 		more = true;
10941 	mutex_exit(txq->txq_lock);
10942 
10943 	mutex_enter(rxq->rxq_lock);
10944 
10945 	if (rxq->rxq_stopping) {
10946 		mutex_exit(rxq->rxq_lock);
10947 		return 1;
10948 	}
10949 
10950 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
10951 	if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
10952 		DPRINTF(sc, WM_DEBUG_RX,
10953 		    ("%s: RX: got Rx intr %#" __PRIxBIT "\n",
10954 			device_xname(sc->sc_dev),
10955 			icr & (ICR_RXDMT0 | ICR_RXT0)));
10956 		WM_Q_EVCNT_INCR(rxq, intr);
10957 	}
10958 #endif
10959 	if (rxlimit > 0) {
10960 		/*
10961 		 * wm_rxeof() does *not* call upper layer functions directly,
10962 		 * as if_percpuq_enqueue() just call softint_schedule().
10963 		 * So, we can call wm_rxeof() in interrupt context.
10964 		 */
10965 		more = wm_rxeof(rxq, rxlimit);
10966 	} else
10967 		more = true;
10968 
10969 	mutex_exit(rxq->rxq_lock);
10970 
10971 	mutex_enter(sc->sc_core_lock);
10972 
10973 	if (sc->sc_core_stopping) {
10974 		mutex_exit(sc->sc_core_lock);
10975 		return 1;
10976 	}
10977 
10978 	if (icr & (ICR_LSC | ICR_RXSEQ)) {
10979 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
10980 		wm_linkintr(sc, icr);
10981 	}
10982 	if ((icr & ICR_GPI(0)) != 0)
10983 		device_printf(sc->sc_dev, "got module interrupt\n");
10984 
10985 	mutex_exit(sc->sc_core_lock);
10986 
10987 	if (icr & ICR_RXO) {
10988 #if defined(WM_DEBUG)
10989 		log(LOG_WARNING, "%s: Receive overrun\n",
10990 		    device_xname(sc->sc_dev));
10991 #endif /* defined(WM_DEBUG) */
10992 	}
10993 
10994 	rnd_add_uint32(&sc->rnd_source, rndval);
10995 
10996 	if (more) {
10997 		/* Try to get more packets going. */
10998 		wm_legacy_intr_disable(sc);
10999 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
11000 		wm_sched_handle_queue(sc, wmq);
11001 	}
11002 
11003 	return 1;
11004 }
11005 
11006 static inline void
11007 wm_txrxintr_disable(struct wm_queue *wmq)
11008 {
11009 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
11010 
11011 	if (__predict_false(!wm_is_using_msix(sc))) {
11012 		wm_legacy_intr_disable(sc);
11013 		return;
11014 	}
11015 
11016 	if (sc->sc_type == WM_T_82574)
11017 		CSR_WRITE(sc, WMREG_IMC,
11018 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
11019 	else if (sc->sc_type == WM_T_82575)
11020 		CSR_WRITE(sc, WMREG_EIMC,
11021 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
11022 	else
11023 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
11024 }
11025 
11026 static inline void
11027 wm_txrxintr_enable(struct wm_queue *wmq)
11028 {
11029 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
11030 
11031 	wm_itrs_calculate(sc, wmq);
11032 
11033 	if (__predict_false(!wm_is_using_msix(sc))) {
11034 		wm_legacy_intr_enable(sc);
11035 		return;
11036 	}
11037 
11038 	/*
11039 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
11040 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
11041 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
11042 	 * while each wm_handle_queue(wmq) is runnig.
11043 	 */
11044 	if (sc->sc_type == WM_T_82574)
11045 		CSR_WRITE(sc, WMREG_IMS,
11046 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
11047 	else if (sc->sc_type == WM_T_82575)
11048 		CSR_WRITE(sc, WMREG_EIMS,
11049 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
11050 	else
11051 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
11052 }
11053 
11054 static int
11055 wm_txrxintr_msix(void *arg)
11056 {
11057 	struct wm_queue *wmq = arg;
11058 	struct wm_txqueue *txq = &wmq->wmq_txq;
11059 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
11060 	struct wm_softc *sc = txq->txq_sc;
11061 	u_int txlimit = sc->sc_tx_intr_process_limit;
11062 	u_int rxlimit = sc->sc_rx_intr_process_limit;
11063 	bool txmore;
11064 	bool rxmore;
11065 
11066 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
11067 
11068 	DPRINTF(sc, WM_DEBUG_TX,
11069 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
11070 
11071 	wm_txrxintr_disable(wmq);
11072 
11073 	mutex_enter(txq->txq_lock);
11074 
11075 	if (txq->txq_stopping) {
11076 		mutex_exit(txq->txq_lock);
11077 		return 1;
11078 	}
11079 
11080 	WM_Q_EVCNT_INCR(txq, txdw);
11081 	if (txlimit > 0) {
11082 		txmore = wm_txeof(txq, txlimit);
11083 		/* wm_deferred start() is done in wm_handle_queue(). */
11084 	} else
11085 		txmore = true;
11086 	mutex_exit(txq->txq_lock);
11087 
11088 	DPRINTF(sc, WM_DEBUG_RX,
11089 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
11090 	mutex_enter(rxq->rxq_lock);
11091 
11092 	if (rxq->rxq_stopping) {
11093 		mutex_exit(rxq->rxq_lock);
11094 		return 1;
11095 	}
11096 
11097 	WM_Q_EVCNT_INCR(rxq, intr);
11098 	if (rxlimit > 0) {
11099 		rxmore = wm_rxeof(rxq, rxlimit);
11100 	} else
11101 		rxmore = true;
11102 	mutex_exit(rxq->rxq_lock);
11103 
11104 	wm_itrs_writereg(sc, wmq);
11105 
11106 	if (txmore || rxmore) {
11107 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
11108 		wm_sched_handle_queue(sc, wmq);
11109 	} else
11110 		wm_txrxintr_enable(wmq);
11111 
11112 	return 1;
11113 }
11114 
11115 static void
11116 wm_handle_queue(void *arg)
11117 {
11118 	struct wm_queue *wmq = arg;
11119 	struct wm_txqueue *txq = &wmq->wmq_txq;
11120 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
11121 	struct wm_softc *sc = txq->txq_sc;
11122 	u_int txlimit = sc->sc_tx_process_limit;
11123 	u_int rxlimit = sc->sc_rx_process_limit;
11124 	bool txmore;
11125 	bool rxmore;
11126 
11127 	mutex_enter(txq->txq_lock);
11128 	if (txq->txq_stopping) {
11129 		mutex_exit(txq->txq_lock);
11130 		return;
11131 	}
11132 	txmore = wm_txeof(txq, txlimit);
11133 	wm_deferred_start_locked(txq);
11134 	mutex_exit(txq->txq_lock);
11135 
11136 	mutex_enter(rxq->rxq_lock);
11137 	if (rxq->rxq_stopping) {
11138 		mutex_exit(rxq->rxq_lock);
11139 		return;
11140 	}
11141 	WM_Q_EVCNT_INCR(rxq, defer);
11142 	rxmore = wm_rxeof(rxq, rxlimit);
11143 	mutex_exit(rxq->rxq_lock);
11144 
11145 	if (txmore || rxmore) {
11146 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
11147 		wm_sched_handle_queue(sc, wmq);
11148 	} else
11149 		wm_txrxintr_enable(wmq);
11150 }
11151 
11152 static void
11153 wm_handle_queue_work(struct work *wk, void *context)
11154 {
11155 	struct wm_queue *wmq = container_of(wk, struct wm_queue, wmq_cookie);
11156 
11157 	/*
11158 	 * Some qemu environment workaround.  They don't stop interrupt
11159 	 * immediately.
11160 	 */
11161 	wmq->wmq_wq_enqueued = false;
11162 	wm_handle_queue(wmq);
11163 }
11164 
11165 /*
11166  * wm_linkintr_msix:
11167  *
11168  *	Interrupt service routine for link status change for MSI-X.
11169  */
11170 static int
11171 wm_linkintr_msix(void *arg)
11172 {
11173 	struct wm_softc *sc = arg;
11174 	uint32_t reg;
11175 	bool has_rxo;
11176 
11177 	reg = CSR_READ(sc, WMREG_ICR);
11178 	mutex_enter(sc->sc_core_lock);
11179 	DPRINTF(sc, WM_DEBUG_LINK,
11180 	    ("%s: LINK: got link intr. ICR = %08x\n",
11181 		device_xname(sc->sc_dev), reg));
11182 
11183 	if (sc->sc_core_stopping)
11184 		goto out;
11185 
11186 	if ((reg & ICR_LSC) != 0) {
11187 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
11188 		wm_linkintr(sc, ICR_LSC);
11189 	}
11190 	if ((reg & ICR_GPI(0)) != 0)
11191 		device_printf(sc->sc_dev, "got module interrupt\n");
11192 
11193 	/*
11194 	 * XXX 82574 MSI-X mode workaround
11195 	 *
11196 	 * 82574 MSI-X mode causes a receive overrun(RXO) interrupt as an
11197 	 * ICR_OTHER MSI-X vector; furthermore it causes neither ICR_RXQ(0)
11198 	 * nor ICR_RXQ(1) vectors. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
11199 	 * interrupts by writing WMREG_ICS to process receive packets.
11200 	 */
11201 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
11202 #if defined(WM_DEBUG)
11203 		log(LOG_WARNING, "%s: Receive overrun\n",
11204 		    device_xname(sc->sc_dev));
11205 #endif /* defined(WM_DEBUG) */
11206 
11207 		has_rxo = true;
11208 		/*
11209 		 * The RXO interrupt is very high rate when receive traffic is
11210 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
11211 		 * interrupts. ICR_OTHER will be enabled at the end of
11212 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
11213 		 * ICR_RXQ(1) interrupts.
11214 		 */
11215 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
11216 
11217 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
11218 	}
11219 
11220 
11221 
11222 out:
11223 	mutex_exit(sc->sc_core_lock);
11224 
11225 	if (sc->sc_type == WM_T_82574) {
11226 		if (!has_rxo)
11227 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
11228 		else
11229 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
11230 	} else if (sc->sc_type == WM_T_82575)
11231 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
11232 	else
11233 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
11234 
11235 	return 1;
11236 }
11237 
11238 /*
11239  * Media related.
11240  * GMII, SGMII, TBI (and SERDES)
11241  */
11242 
11243 /* Common */
11244 
11245 /*
11246  * wm_tbi_serdes_set_linkled:
11247  *
11248  *	Update the link LED on TBI and SERDES devices.
11249  */
11250 static void
11251 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
11252 {
11253 
11254 	if (sc->sc_tbi_linkup)
11255 		sc->sc_ctrl |= CTRL_SWDPIN(0);
11256 	else
11257 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
11258 
11259 	/* 82540 or newer devices are active low */
11260 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
11261 
11262 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11263 }
11264 
11265 /* GMII related */
11266 
11267 /*
11268  * wm_gmii_reset:
11269  *
11270  *	Reset the PHY.
11271  */
11272 static void
11273 wm_gmii_reset(struct wm_softc *sc)
11274 {
11275 	uint32_t reg;
11276 	int rv;
11277 
11278 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
11279 		device_xname(sc->sc_dev), __func__));
11280 
11281 	rv = sc->phy.acquire(sc);
11282 	if (rv != 0) {
11283 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
11284 		    __func__);
11285 		return;
11286 	}
11287 
11288 	switch (sc->sc_type) {
11289 	case WM_T_82542_2_0:
11290 	case WM_T_82542_2_1:
11291 		/* null */
11292 		break;
11293 	case WM_T_82543:
11294 		/*
11295 		 * With 82543, we need to force speed and duplex on the MAC
11296 		 * equal to what the PHY speed and duplex configuration is.
11297 		 * In addition, we need to perform a hardware reset on the PHY
11298 		 * to take it out of reset.
11299 		 */
11300 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
11301 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11302 
11303 		/* The PHY reset pin is active-low. */
11304 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
11305 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
11306 		    CTRL_EXT_SWDPIN(4));
11307 		reg |= CTRL_EXT_SWDPIO(4);
11308 
11309 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
11310 		CSR_WRITE_FLUSH(sc);
11311 		delay(10*1000);
11312 
11313 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
11314 		CSR_WRITE_FLUSH(sc);
11315 		delay(150);
11316 #if 0
11317 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
11318 #endif
11319 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
11320 		break;
11321 	case WM_T_82544:	/* Reset 10000us */
11322 	case WM_T_82540:
11323 	case WM_T_82545:
11324 	case WM_T_82545_3:
11325 	case WM_T_82546:
11326 	case WM_T_82546_3:
11327 	case WM_T_82541:
11328 	case WM_T_82541_2:
11329 	case WM_T_82547:
11330 	case WM_T_82547_2:
11331 	case WM_T_82571:	/* Reset 100us */
11332 	case WM_T_82572:
11333 	case WM_T_82573:
11334 	case WM_T_82574:
11335 	case WM_T_82575:
11336 	case WM_T_82576:
11337 	case WM_T_82580:
11338 	case WM_T_I350:
11339 	case WM_T_I354:
11340 	case WM_T_I210:
11341 	case WM_T_I211:
11342 	case WM_T_82583:
11343 	case WM_T_80003:
11344 		/* Generic reset */
11345 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
11346 		CSR_WRITE_FLUSH(sc);
11347 		delay(20000);
11348 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11349 		CSR_WRITE_FLUSH(sc);
11350 		delay(20000);
11351 
11352 		if ((sc->sc_type == WM_T_82541)
11353 		    || (sc->sc_type == WM_T_82541_2)
11354 		    || (sc->sc_type == WM_T_82547)
11355 		    || (sc->sc_type == WM_T_82547_2)) {
11356 			/* Workaround for igp are done in igp_reset() */
11357 			/* XXX add code to set LED after phy reset */
11358 		}
11359 		break;
11360 	case WM_T_ICH8:
11361 	case WM_T_ICH9:
11362 	case WM_T_ICH10:
11363 	case WM_T_PCH:
11364 	case WM_T_PCH2:
11365 	case WM_T_PCH_LPT:
11366 	case WM_T_PCH_SPT:
11367 	case WM_T_PCH_CNP:
11368 	case WM_T_PCH_TGP:
11369 		/* Generic reset */
11370 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
11371 		CSR_WRITE_FLUSH(sc);
11372 		delay(100);
11373 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11374 		CSR_WRITE_FLUSH(sc);
11375 		delay(150);
11376 		break;
11377 	default:
11378 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
11379 		    __func__);
11380 		break;
11381 	}
11382 
11383 	sc->phy.release(sc);
11384 
11385 	/* get_cfg_done */
11386 	wm_get_cfg_done(sc);
11387 
11388 	/* Extra setup */
11389 	switch (sc->sc_type) {
11390 	case WM_T_82542_2_0:
11391 	case WM_T_82542_2_1:
11392 	case WM_T_82543:
11393 	case WM_T_82544:
11394 	case WM_T_82540:
11395 	case WM_T_82545:
11396 	case WM_T_82545_3:
11397 	case WM_T_82546:
11398 	case WM_T_82546_3:
11399 	case WM_T_82541_2:
11400 	case WM_T_82547_2:
11401 	case WM_T_82571:
11402 	case WM_T_82572:
11403 	case WM_T_82573:
11404 	case WM_T_82574:
11405 	case WM_T_82583:
11406 	case WM_T_82575:
11407 	case WM_T_82576:
11408 	case WM_T_82580:
11409 	case WM_T_I350:
11410 	case WM_T_I354:
11411 	case WM_T_I210:
11412 	case WM_T_I211:
11413 	case WM_T_80003:
11414 		/* Null */
11415 		break;
11416 	case WM_T_82541:
11417 	case WM_T_82547:
11418 		/* XXX Configure actively LED after PHY reset */
11419 		break;
11420 	case WM_T_ICH8:
11421 	case WM_T_ICH9:
11422 	case WM_T_ICH10:
11423 	case WM_T_PCH:
11424 	case WM_T_PCH2:
11425 	case WM_T_PCH_LPT:
11426 	case WM_T_PCH_SPT:
11427 	case WM_T_PCH_CNP:
11428 	case WM_T_PCH_TGP:
11429 		wm_phy_post_reset(sc);
11430 		break;
11431 	default:
11432 		panic("%s: unknown type\n", __func__);
11433 		break;
11434 	}
11435 }
11436 
11437 /*
11438  * Set up sc_phytype and mii_{read|write}reg.
11439  *
11440  *  To identify PHY type, correct read/write function should be selected.
11441  * To select correct read/write function, PCI ID or MAC type are required
11442  * without accessing PHY registers.
11443  *
11444  *  On the first call of this function, PHY ID is not known yet. Check
11445  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
11446  * result might be incorrect.
11447  *
11448  *  In the second call, PHY OUI and model is used to identify PHY type.
11449  * It might not be perfect because of the lack of compared entry, but it
11450  * would be better than the first call.
11451  *
11452  *  If the detected new result and previous assumption is different,
11453  * a diagnostic message will be printed.
11454  */
11455 static void
11456 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
11457     uint16_t phy_model)
11458 {
11459 	device_t dev = sc->sc_dev;
11460 	struct mii_data *mii = &sc->sc_mii;
11461 	uint16_t new_phytype = WMPHY_UNKNOWN;
11462 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
11463 	mii_readreg_t new_readreg;
11464 	mii_writereg_t new_writereg;
11465 	bool dodiag = true;
11466 
11467 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
11468 		device_xname(sc->sc_dev), __func__));
11469 
11470 	/*
11471 	 * 1000BASE-T SFP uses SGMII and the first asumed PHY type is always
11472 	 * incorrect. So don't print diag output when it's 2nd call.
11473 	 */
11474 	if ((sc->sc_sfptype != 0) && (phy_oui == 0) && (phy_model == 0))
11475 		dodiag = false;
11476 
11477 	if (mii->mii_readreg == NULL) {
11478 		/*
11479 		 *  This is the first call of this function. For ICH and PCH
11480 		 * variants, it's difficult to determine the PHY access method
11481 		 * by sc_type, so use the PCI product ID for some devices.
11482 		 */
11483 
11484 		switch (sc->sc_pcidevid) {
11485 		case PCI_PRODUCT_INTEL_PCH_M_LM:
11486 		case PCI_PRODUCT_INTEL_PCH_M_LC:
11487 			/* 82577 */
11488 			new_phytype = WMPHY_82577;
11489 			break;
11490 		case PCI_PRODUCT_INTEL_PCH_D_DM:
11491 		case PCI_PRODUCT_INTEL_PCH_D_DC:
11492 			/* 82578 */
11493 			new_phytype = WMPHY_82578;
11494 			break;
11495 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
11496 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
11497 			/* 82579 */
11498 			new_phytype = WMPHY_82579;
11499 			break;
11500 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
11501 		case PCI_PRODUCT_INTEL_82801I_BM:
11502 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
11503 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
11504 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
11505 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
11506 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
11507 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
11508 			/* ICH8, 9, 10 with 82567 */
11509 			new_phytype = WMPHY_BM;
11510 			break;
11511 		default:
11512 			break;
11513 		}
11514 	} else {
11515 		/* It's not the first call. Use PHY OUI and model */
11516 		switch (phy_oui) {
11517 		case MII_OUI_ATTANSIC: /* atphy(4) */
11518 			switch (phy_model) {
11519 			case MII_MODEL_ATTANSIC_AR8021:
11520 				new_phytype = WMPHY_82578;
11521 				break;
11522 			default:
11523 				break;
11524 			}
11525 			break;
11526 		case MII_OUI_xxMARVELL:
11527 			switch (phy_model) {
11528 			case MII_MODEL_xxMARVELL_I210:
11529 				new_phytype = WMPHY_I210;
11530 				break;
11531 			case MII_MODEL_xxMARVELL_E1011:
11532 			case MII_MODEL_xxMARVELL_E1000_3:
11533 			case MII_MODEL_xxMARVELL_E1000_5:
11534 			case MII_MODEL_xxMARVELL_E1112:
11535 				new_phytype = WMPHY_M88;
11536 				break;
11537 			case MII_MODEL_xxMARVELL_E1149:
11538 				new_phytype = WMPHY_BM;
11539 				break;
11540 			case MII_MODEL_xxMARVELL_E1111:
11541 			case MII_MODEL_xxMARVELL_I347:
11542 			case MII_MODEL_xxMARVELL_E1512:
11543 			case MII_MODEL_xxMARVELL_E1340M:
11544 			case MII_MODEL_xxMARVELL_E1543:
11545 				new_phytype = WMPHY_M88;
11546 				break;
11547 			case MII_MODEL_xxMARVELL_I82563:
11548 				new_phytype = WMPHY_GG82563;
11549 				break;
11550 			default:
11551 				break;
11552 			}
11553 			break;
11554 		case MII_OUI_INTEL:
11555 			switch (phy_model) {
11556 			case MII_MODEL_INTEL_I82577:
11557 				new_phytype = WMPHY_82577;
11558 				break;
11559 			case MII_MODEL_INTEL_I82579:
11560 				new_phytype = WMPHY_82579;
11561 				break;
11562 			case MII_MODEL_INTEL_I217:
11563 				new_phytype = WMPHY_I217;
11564 				break;
11565 			case MII_MODEL_INTEL_I82580:
11566 				new_phytype = WMPHY_82580;
11567 				break;
11568 			case MII_MODEL_INTEL_I350:
11569 				new_phytype = WMPHY_I350;
11570 				break;
11571 			default:
11572 				break;
11573 			}
11574 			break;
11575 		case MII_OUI_yyINTEL:
11576 			switch (phy_model) {
11577 			case MII_MODEL_yyINTEL_I82562G:
11578 			case MII_MODEL_yyINTEL_I82562EM:
11579 			case MII_MODEL_yyINTEL_I82562ET:
11580 				new_phytype = WMPHY_IFE;
11581 				break;
11582 			case MII_MODEL_yyINTEL_IGP01E1000:
11583 				new_phytype = WMPHY_IGP;
11584 				break;
11585 			case MII_MODEL_yyINTEL_I82566:
11586 				new_phytype = WMPHY_IGP_3;
11587 				break;
11588 			default:
11589 				break;
11590 			}
11591 			break;
11592 		default:
11593 			break;
11594 		}
11595 
11596 		if (dodiag) {
11597 			if (new_phytype == WMPHY_UNKNOWN)
11598 				aprint_verbose_dev(dev,
11599 				    "%s: Unknown PHY model. OUI=%06x, "
11600 				    "model=%04x\n", __func__, phy_oui,
11601 				    phy_model);
11602 
11603 			if ((sc->sc_phytype != WMPHY_UNKNOWN)
11604 			    && (sc->sc_phytype != new_phytype)) {
11605 				aprint_error_dev(dev, "Previously assumed PHY "
11606 				    "type(%u) was incorrect. PHY type from PHY"
11607 				    "ID = %u\n", sc->sc_phytype, new_phytype);
11608 			}
11609 		}
11610 	}
11611 
11612 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
11613 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
11614 		/* SGMII */
11615 		new_readreg = wm_sgmii_readreg;
11616 		new_writereg = wm_sgmii_writereg;
11617 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
11618 		/* BM2 (phyaddr == 1) */
11619 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
11620 		    && (new_phytype != WMPHY_BM)
11621 		    && (new_phytype != WMPHY_UNKNOWN))
11622 			doubt_phytype = new_phytype;
11623 		new_phytype = WMPHY_BM;
11624 		new_readreg = wm_gmii_bm_readreg;
11625 		new_writereg = wm_gmii_bm_writereg;
11626 	} else if (sc->sc_type >= WM_T_PCH) {
11627 		/* All PCH* use _hv_ */
11628 		new_readreg = wm_gmii_hv_readreg;
11629 		new_writereg = wm_gmii_hv_writereg;
11630 	} else if (sc->sc_type >= WM_T_ICH8) {
11631 		/* non-82567 ICH8, 9 and 10 */
11632 		new_readreg = wm_gmii_i82544_readreg;
11633 		new_writereg = wm_gmii_i82544_writereg;
11634 	} else if (sc->sc_type >= WM_T_80003) {
11635 		/* 80003 */
11636 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
11637 		    && (new_phytype != WMPHY_GG82563)
11638 		    && (new_phytype != WMPHY_UNKNOWN))
11639 			doubt_phytype = new_phytype;
11640 		new_phytype = WMPHY_GG82563;
11641 		new_readreg = wm_gmii_i80003_readreg;
11642 		new_writereg = wm_gmii_i80003_writereg;
11643 	} else if (sc->sc_type >= WM_T_I210) {
11644 		/* I210 and I211 */
11645 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
11646 		    && (new_phytype != WMPHY_I210)
11647 		    && (new_phytype != WMPHY_UNKNOWN))
11648 			doubt_phytype = new_phytype;
11649 		new_phytype = WMPHY_I210;
11650 		new_readreg = wm_gmii_gs40g_readreg;
11651 		new_writereg = wm_gmii_gs40g_writereg;
11652 	} else if (sc->sc_type >= WM_T_82580) {
11653 		/* 82580, I350 and I354 */
11654 		new_readreg = wm_gmii_82580_readreg;
11655 		new_writereg = wm_gmii_82580_writereg;
11656 	} else if (sc->sc_type >= WM_T_82544) {
11657 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
11658 		new_readreg = wm_gmii_i82544_readreg;
11659 		new_writereg = wm_gmii_i82544_writereg;
11660 	} else {
11661 		new_readreg = wm_gmii_i82543_readreg;
11662 		new_writereg = wm_gmii_i82543_writereg;
11663 	}
11664 
11665 	if (new_phytype == WMPHY_BM) {
11666 		/* All BM use _bm_ */
11667 		new_readreg = wm_gmii_bm_readreg;
11668 		new_writereg = wm_gmii_bm_writereg;
11669 	}
11670 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_TGP)) {
11671 		/* All PCH* use _hv_ */
11672 		new_readreg = wm_gmii_hv_readreg;
11673 		new_writereg = wm_gmii_hv_writereg;
11674 	}
11675 
11676 	/* Diag output */
11677 	if (dodiag) {
11678 		if (doubt_phytype != WMPHY_UNKNOWN)
11679 			aprint_error_dev(dev, "Assumed new PHY type was "
11680 			    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
11681 			    new_phytype);
11682 		else if ((sc->sc_phytype != WMPHY_UNKNOWN)
11683 		    && (sc->sc_phytype != new_phytype))
11684 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
11685 			    "was incorrect. New PHY type = %u\n",
11686 			    sc->sc_phytype, new_phytype);
11687 
11688 		if ((mii->mii_readreg != NULL) &&
11689 		    (new_phytype == WMPHY_UNKNOWN))
11690 			aprint_error_dev(dev, "PHY type is still unknown.\n");
11691 
11692 		if ((mii->mii_readreg != NULL) &&
11693 		    (mii->mii_readreg != new_readreg))
11694 			aprint_error_dev(dev, "Previously assumed PHY "
11695 			    "read/write function was incorrect.\n");
11696 	}
11697 
11698 	/* Update now */
11699 	sc->sc_phytype = new_phytype;
11700 	mii->mii_readreg = new_readreg;
11701 	mii->mii_writereg = new_writereg;
11702 	if (new_readreg == wm_gmii_hv_readreg) {
11703 		sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
11704 		sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
11705 	} else if (new_readreg == wm_sgmii_readreg) {
11706 		sc->phy.readreg_locked = wm_sgmii_readreg_locked;
11707 		sc->phy.writereg_locked = wm_sgmii_writereg_locked;
11708 	} else if (new_readreg == wm_gmii_i82544_readreg) {
11709 		sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
11710 		sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
11711 	}
11712 }
11713 
11714 /*
11715  * wm_get_phy_id_82575:
11716  *
11717  * Return PHY ID. Return -1 if it failed.
11718  */
11719 static int
11720 wm_get_phy_id_82575(struct wm_softc *sc)
11721 {
11722 	uint32_t reg;
11723 	int phyid = -1;
11724 
11725 	/* XXX */
11726 	if ((sc->sc_flags & WM_F_SGMII) == 0)
11727 		return -1;
11728 
11729 	if (wm_sgmii_uses_mdio(sc)) {
11730 		switch (sc->sc_type) {
11731 		case WM_T_82575:
11732 		case WM_T_82576:
11733 			reg = CSR_READ(sc, WMREG_MDIC);
11734 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
11735 			break;
11736 		case WM_T_82580:
11737 		case WM_T_I350:
11738 		case WM_T_I354:
11739 		case WM_T_I210:
11740 		case WM_T_I211:
11741 			reg = CSR_READ(sc, WMREG_MDICNFG);
11742 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
11743 			break;
11744 		default:
11745 			return -1;
11746 		}
11747 	}
11748 
11749 	return phyid;
11750 }
11751 
11752 /*
11753  * wm_gmii_mediainit:
11754  *
11755  *	Initialize media for use on 1000BASE-T devices.
11756  */
11757 static void
11758 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
11759 {
11760 	device_t dev = sc->sc_dev;
11761 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
11762 	struct mii_data *mii = &sc->sc_mii;
11763 
11764 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
11765 		device_xname(sc->sc_dev), __func__));
11766 
11767 	/* We have GMII. */
11768 	sc->sc_flags |= WM_F_HAS_MII;
11769 
11770 	if (sc->sc_type == WM_T_80003)
11771 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
11772 	else
11773 		sc->sc_tipg = TIPG_1000T_DFLT;
11774 
11775 	/*
11776 	 * Let the chip set speed/duplex on its own based on
11777 	 * signals from the PHY.
11778 	 * XXXbouyer - I'm not sure this is right for the 80003,
11779 	 * the em driver only sets CTRL_SLU here - but it seems to work.
11780 	 */
11781 	sc->sc_ctrl |= CTRL_SLU;
11782 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11783 
11784 	/* Initialize our media structures and probe the GMII. */
11785 	mii->mii_ifp = ifp;
11786 
11787 	mii->mii_statchg = wm_gmii_statchg;
11788 
11789 	/* get PHY control from SMBus to PCIe */
11790 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
11791 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
11792 	    || (sc->sc_type == WM_T_PCH_CNP) || (sc->sc_type == WM_T_PCH_TGP))
11793 		wm_init_phy_workarounds_pchlan(sc);
11794 
11795 	wm_gmii_reset(sc);
11796 
11797 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
11798 	ifmedia_init_with_lock(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
11799 	    wm_gmii_mediastatus, sc->sc_core_lock);
11800 
11801 	/* Setup internal SGMII PHY for SFP */
11802 	wm_sgmii_sfp_preconfig(sc);
11803 
11804 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
11805 	    || (sc->sc_type == WM_T_82580)
11806 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
11807 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
11808 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
11809 			/* Attach only one port */
11810 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
11811 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
11812 		} else {
11813 			int i, id;
11814 			uint32_t ctrl_ext;
11815 
11816 			id = wm_get_phy_id_82575(sc);
11817 			if (id != -1) {
11818 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
11819 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
11820 			}
11821 			if ((id == -1)
11822 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
11823 				/* Power on sgmii phy if it is disabled */
11824 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
11825 				CSR_WRITE(sc, WMREG_CTRL_EXT,
11826 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
11827 				CSR_WRITE_FLUSH(sc);
11828 				delay(300*1000); /* XXX too long */
11829 
11830 				/*
11831 				 * From 1 to 8.
11832 				 *
11833 				 * I2C access fails with I2C register's ERROR
11834 				 * bit set, so prevent error message while
11835 				 * scanning.
11836 				 */
11837 				sc->phy.no_errprint = true;
11838 				for (i = 1; i < 8; i++)
11839 					mii_attach(sc->sc_dev, &sc->sc_mii,
11840 					    0xffffffff, i, MII_OFFSET_ANY,
11841 					    MIIF_DOPAUSE);
11842 				sc->phy.no_errprint = false;
11843 
11844 				/* Restore previous sfp cage power state */
11845 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
11846 			}
11847 		}
11848 	} else
11849 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
11850 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
11851 
11852 	/*
11853 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
11854 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
11855 	 */
11856 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
11857 		(sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)
11858 		|| (sc->sc_type == WM_T_PCH_TGP))
11859 	    && (LIST_FIRST(&mii->mii_phys) == NULL)) {
11860 		wm_set_mdio_slow_mode_hv(sc);
11861 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
11862 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
11863 	}
11864 
11865 	/*
11866 	 * (For ICH8 variants)
11867 	 * If PHY detection failed, use BM's r/w function and retry.
11868 	 */
11869 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
11870 		/* if failed, retry with *_bm_* */
11871 		aprint_verbose_dev(dev, "Assumed PHY access function "
11872 		    "(type = %d) might be incorrect. Use BM and retry.\n",
11873 		    sc->sc_phytype);
11874 		sc->sc_phytype = WMPHY_BM;
11875 		mii->mii_readreg = wm_gmii_bm_readreg;
11876 		mii->mii_writereg = wm_gmii_bm_writereg;
11877 
11878 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
11879 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
11880 	}
11881 
11882 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
11883 		/* Any PHY wasn't found */
11884 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
11885 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
11886 		sc->sc_phytype = WMPHY_NONE;
11887 	} else {
11888 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
11889 
11890 		/*
11891 		 * PHY found! Check PHY type again by the second call of
11892 		 * wm_gmii_setup_phytype.
11893 		 */
11894 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
11895 		    child->mii_mpd_model);
11896 
11897 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
11898 	}
11899 }
11900 
11901 /*
11902  * wm_gmii_mediachange:	[ifmedia interface function]
11903  *
11904  *	Set hardware to newly-selected media on a 1000BASE-T device.
11905  */
11906 static int
11907 wm_gmii_mediachange(struct ifnet *ifp)
11908 {
11909 	struct wm_softc *sc = ifp->if_softc;
11910 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
11911 	uint32_t reg;
11912 	int rc;
11913 
11914 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
11915 		device_xname(sc->sc_dev), __func__));
11916 
11917 	KASSERT(mutex_owned(sc->sc_core_lock));
11918 
11919 	if ((sc->sc_if_flags & IFF_UP) == 0)
11920 		return 0;
11921 
11922 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
11923 	if ((sc->sc_type == WM_T_82580)
11924 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
11925 	    || (sc->sc_type == WM_T_I211)) {
11926 		reg = CSR_READ(sc, WMREG_PHPM);
11927 		reg &= ~PHPM_GO_LINK_D;
11928 		CSR_WRITE(sc, WMREG_PHPM, reg);
11929 	}
11930 
11931 	/* Disable D0 LPLU. */
11932 	wm_lplu_d0_disable(sc);
11933 
11934 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
11935 	sc->sc_ctrl |= CTRL_SLU;
11936 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
11937 	    || (sc->sc_type > WM_T_82543)) {
11938 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
11939 	} else {
11940 		sc->sc_ctrl &= ~CTRL_ASDE;
11941 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
11942 		if (ife->ifm_media & IFM_FDX)
11943 			sc->sc_ctrl |= CTRL_FD;
11944 		switch (IFM_SUBTYPE(ife->ifm_media)) {
11945 		case IFM_10_T:
11946 			sc->sc_ctrl |= CTRL_SPEED_10;
11947 			break;
11948 		case IFM_100_TX:
11949 			sc->sc_ctrl |= CTRL_SPEED_100;
11950 			break;
11951 		case IFM_1000_T:
11952 			sc->sc_ctrl |= CTRL_SPEED_1000;
11953 			break;
11954 		case IFM_NONE:
11955 			/* There is no specific setting for IFM_NONE */
11956 			break;
11957 		default:
11958 			panic("wm_gmii_mediachange: bad media 0x%x",
11959 			    ife->ifm_media);
11960 		}
11961 	}
11962 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11963 	CSR_WRITE_FLUSH(sc);
11964 
11965 	if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
11966 		wm_serdes_mediachange(ifp);
11967 
11968 	if (sc->sc_type <= WM_T_82543)
11969 		wm_gmii_reset(sc);
11970 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
11971 	    && ((sc->sc_flags & WM_F_SGMII) != 0)) {
11972 		/* allow time for SFP cage time to power up phy */
11973 		delay(300 * 1000);
11974 		wm_gmii_reset(sc);
11975 	}
11976 
11977 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
11978 		return 0;
11979 	return rc;
11980 }
11981 
11982 /*
11983  * wm_gmii_mediastatus:	[ifmedia interface function]
11984  *
11985  *	Get the current interface media status on a 1000BASE-T device.
11986  */
11987 static void
11988 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
11989 {
11990 	struct wm_softc *sc = ifp->if_softc;
11991 	struct ethercom *ec = &sc->sc_ethercom;
11992 	struct mii_data *mii;
11993 	bool dopoll = true;
11994 
11995 	/*
11996 	 * In normal drivers, ether_mediastatus() is called here.
11997 	 * To avoid calling mii_pollstat(), ether_mediastatus() is open coded.
11998 	 */
11999 	KASSERT(mutex_owned(sc->sc_core_lock));
12000 	KASSERT(ec->ec_mii != NULL);
12001 	KASSERT(mii_locked(ec->ec_mii));
12002 
12003 	mii = ec->ec_mii;
12004 	if ((sc->sc_flags & WM_F_DELAY_LINKUP) != 0) {
12005 		struct timeval now;
12006 
12007 		getmicrotime(&now);
12008 		if (timercmp(&now, &sc->sc_linkup_delay_time, <))
12009 			dopoll = false;
12010 		else if (sc->sc_linkup_delay_time.tv_sec != 0) {
12011 			/* Simplify by checking tv_sec only. It's enough. */
12012 
12013 			sc->sc_linkup_delay_time.tv_sec = 0;
12014 			sc->sc_linkup_delay_time.tv_usec = 0;
12015 		}
12016 	}
12017 
12018 	/*
12019 	 * Don't call mii_pollstat() while doing workaround.
12020 	 * See also wm_linkintr_gmii() and wm_tick().
12021 	 */
12022 	if (dopoll)
12023 		mii_pollstat(mii);
12024 	ifmr->ifm_active = mii->mii_media_active;
12025 	ifmr->ifm_status = mii->mii_media_status;
12026 
12027 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
12028 	    | sc->sc_flowflags;
12029 }
12030 
12031 #define	MDI_IO		CTRL_SWDPIN(2)
12032 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
12033 #define	MDI_CLK		CTRL_SWDPIN(3)
12034 
12035 static void
12036 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
12037 {
12038 	uint32_t i, v;
12039 
12040 	v = CSR_READ(sc, WMREG_CTRL);
12041 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
12042 	v |= MDI_DIR | CTRL_SWDPIO(3);
12043 
12044 	for (i = __BIT(nbits - 1); i != 0; i >>= 1) {
12045 		if (data & i)
12046 			v |= MDI_IO;
12047 		else
12048 			v &= ~MDI_IO;
12049 		CSR_WRITE(sc, WMREG_CTRL, v);
12050 		CSR_WRITE_FLUSH(sc);
12051 		delay(10);
12052 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
12053 		CSR_WRITE_FLUSH(sc);
12054 		delay(10);
12055 		CSR_WRITE(sc, WMREG_CTRL, v);
12056 		CSR_WRITE_FLUSH(sc);
12057 		delay(10);
12058 	}
12059 }
12060 
12061 static uint16_t
12062 wm_i82543_mii_recvbits(struct wm_softc *sc)
12063 {
12064 	uint32_t v, i;
12065 	uint16_t data = 0;
12066 
12067 	v = CSR_READ(sc, WMREG_CTRL);
12068 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
12069 	v |= CTRL_SWDPIO(3);
12070 
12071 	CSR_WRITE(sc, WMREG_CTRL, v);
12072 	CSR_WRITE_FLUSH(sc);
12073 	delay(10);
12074 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
12075 	CSR_WRITE_FLUSH(sc);
12076 	delay(10);
12077 	CSR_WRITE(sc, WMREG_CTRL, v);
12078 	CSR_WRITE_FLUSH(sc);
12079 	delay(10);
12080 
12081 	for (i = 0; i < 16; i++) {
12082 		data <<= 1;
12083 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
12084 		CSR_WRITE_FLUSH(sc);
12085 		delay(10);
12086 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
12087 			data |= 1;
12088 		CSR_WRITE(sc, WMREG_CTRL, v);
12089 		CSR_WRITE_FLUSH(sc);
12090 		delay(10);
12091 	}
12092 
12093 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
12094 	CSR_WRITE_FLUSH(sc);
12095 	delay(10);
12096 	CSR_WRITE(sc, WMREG_CTRL, v);
12097 	CSR_WRITE_FLUSH(sc);
12098 	delay(10);
12099 
12100 	return data;
12101 }
12102 
12103 #undef MDI_IO
12104 #undef MDI_DIR
12105 #undef MDI_CLK
12106 
12107 /*
12108  * wm_gmii_i82543_readreg:	[mii interface function]
12109  *
12110  *	Read a PHY register on the GMII (i82543 version).
12111  */
12112 static int
12113 wm_gmii_i82543_readreg(device_t dev, int phy, int reg, uint16_t *val)
12114 {
12115 	struct wm_softc *sc = device_private(dev);
12116 
12117 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
12118 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
12119 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
12120 	*val = wm_i82543_mii_recvbits(sc) & 0xffff;
12121 
12122 	DPRINTF(sc, WM_DEBUG_GMII,
12123 	    ("%s: GMII: read phy %d reg %d -> 0x%04hx\n",
12124 		device_xname(dev), phy, reg, *val));
12125 
12126 	return 0;
12127 }
12128 
12129 /*
12130  * wm_gmii_i82543_writereg:	[mii interface function]
12131  *
12132  *	Write a PHY register on the GMII (i82543 version).
12133  */
12134 static int
12135 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, uint16_t val)
12136 {
12137 	struct wm_softc *sc = device_private(dev);
12138 
12139 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
12140 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
12141 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
12142 	    (MII_COMMAND_START << 30), 32);
12143 
12144 	return 0;
12145 }
12146 
12147 /*
12148  * wm_gmii_mdic_readreg:	[mii interface function]
12149  *
12150  *	Read a PHY register on the GMII.
12151  */
12152 static int
12153 wm_gmii_mdic_readreg(device_t dev, int phy, int reg, uint16_t *val)
12154 {
12155 	struct wm_softc *sc = device_private(dev);
12156 	uint32_t mdic = 0;
12157 	int i;
12158 
12159 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
12160 	    && (reg > MII_ADDRMASK)) {
12161 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
12162 		    __func__, sc->sc_phytype, reg);
12163 		reg &= MII_ADDRMASK;
12164 	}
12165 
12166 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
12167 	    MDIC_REGADD(reg));
12168 
12169 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
12170 		delay(50);
12171 		mdic = CSR_READ(sc, WMREG_MDIC);
12172 		if (mdic & MDIC_READY)
12173 			break;
12174 	}
12175 
12176 	if ((mdic & MDIC_READY) == 0) {
12177 		DPRINTF(sc, WM_DEBUG_GMII,
12178 		    ("%s: MDIC read timed out: phy %d reg %d\n",
12179 			device_xname(dev), phy, reg));
12180 		return ETIMEDOUT;
12181 	} else if (mdic & MDIC_E) {
12182 		/* This is normal if no PHY is present. */
12183 		DPRINTF(sc, WM_DEBUG_GMII,
12184 		    ("%s: MDIC read error: phy %d reg %d\n",
12185 			device_xname(sc->sc_dev), phy, reg));
12186 		return -1;
12187 	} else
12188 		*val = MDIC_DATA(mdic);
12189 
12190 	/*
12191 	 * Allow some time after each MDIC transaction to avoid
12192 	 * reading duplicate data in the next MDIC transaction.
12193 	 */
12194 	if (sc->sc_type == WM_T_PCH2)
12195 		delay(100);
12196 
12197 	return 0;
12198 }
12199 
12200 /*
12201  * wm_gmii_mdic_writereg:	[mii interface function]
12202  *
12203  *	Write a PHY register on the GMII.
12204  */
12205 static int
12206 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, uint16_t val)
12207 {
12208 	struct wm_softc *sc = device_private(dev);
12209 	uint32_t mdic = 0;
12210 	int i;
12211 
12212 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
12213 	    && (reg > MII_ADDRMASK)) {
12214 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
12215 		    __func__, sc->sc_phytype, reg);
12216 		reg &= MII_ADDRMASK;
12217 	}
12218 
12219 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
12220 	    MDIC_REGADD(reg) | MDIC_DATA(val));
12221 
12222 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
12223 		delay(50);
12224 		mdic = CSR_READ(sc, WMREG_MDIC);
12225 		if (mdic & MDIC_READY)
12226 			break;
12227 	}
12228 
12229 	if ((mdic & MDIC_READY) == 0) {
12230 		DPRINTF(sc, WM_DEBUG_GMII,
12231 		    ("%s: MDIC write timed out: phy %d reg %d\n",
12232 			device_xname(dev), phy, reg));
12233 		return ETIMEDOUT;
12234 	} else if (mdic & MDIC_E) {
12235 		DPRINTF(sc, WM_DEBUG_GMII,
12236 		    ("%s: MDIC write error: phy %d reg %d\n",
12237 			device_xname(dev), phy, reg));
12238 		return -1;
12239 	}
12240 
12241 	/*
12242 	 * Allow some time after each MDIC transaction to avoid
12243 	 * reading duplicate data in the next MDIC transaction.
12244 	 */
12245 	if (sc->sc_type == WM_T_PCH2)
12246 		delay(100);
12247 
12248 	return 0;
12249 }
12250 
12251 /*
12252  * wm_gmii_i82544_readreg:	[mii interface function]
12253  *
12254  *	Read a PHY register on the GMII.
12255  */
12256 static int
12257 wm_gmii_i82544_readreg(device_t dev, int phy, int reg, uint16_t *val)
12258 {
12259 	struct wm_softc *sc = device_private(dev);
12260 	int rv;
12261 
12262 	rv = sc->phy.acquire(sc);
12263 	if (rv != 0) {
12264 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
12265 		return rv;
12266 	}
12267 
12268 	rv = wm_gmii_i82544_readreg_locked(dev, phy, reg, val);
12269 
12270 	sc->phy.release(sc);
12271 
12272 	return rv;
12273 }
12274 
12275 static int
12276 wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
12277 {
12278 	struct wm_softc *sc = device_private(dev);
12279 	int rv;
12280 
12281 	switch (sc->sc_phytype) {
12282 	case WMPHY_IGP:
12283 	case WMPHY_IGP_2:
12284 	case WMPHY_IGP_3:
12285 		if (reg > BME1000_MAX_MULTI_PAGE_REG) {
12286 			rv = wm_gmii_mdic_writereg(dev, phy,
12287 			    IGPHY_PAGE_SELECT, reg);
12288 			if (rv != 0)
12289 				return rv;
12290 		}
12291 		break;
12292 	default:
12293 #ifdef WM_DEBUG
12294 		if ((reg >> MII_ADDRBITS) != 0)
12295 			device_printf(dev,
12296 			    "%s: PHYTYPE = 0x%x, addr = 0x%02x\n",
12297 			    __func__, sc->sc_phytype, reg);
12298 #endif
12299 		break;
12300 	}
12301 
12302 	return wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
12303 }
12304 
12305 /*
12306  * wm_gmii_i82544_writereg:	[mii interface function]
12307  *
12308  *	Write a PHY register on the GMII.
12309  */
12310 static int
12311 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, uint16_t val)
12312 {
12313 	struct wm_softc *sc = device_private(dev);
12314 	int rv;
12315 
12316 	rv = sc->phy.acquire(sc);
12317 	if (rv != 0) {
12318 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
12319 		return rv;
12320 	}
12321 
12322 	rv = wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val);
12323 	sc->phy.release(sc);
12324 
12325 	return rv;
12326 }
12327 
12328 static int
12329 wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
12330 {
12331 	struct wm_softc *sc = device_private(dev);
12332 	int rv;
12333 
12334 	switch (sc->sc_phytype) {
12335 	case WMPHY_IGP:
12336 	case WMPHY_IGP_2:
12337 	case WMPHY_IGP_3:
12338 		if (reg > BME1000_MAX_MULTI_PAGE_REG) {
12339 			rv = wm_gmii_mdic_writereg(dev, phy,
12340 			    IGPHY_PAGE_SELECT, reg);
12341 			if (rv != 0)
12342 				return rv;
12343 		}
12344 		break;
12345 	default:
12346 #ifdef WM_DEBUG
12347 		if ((reg >> MII_ADDRBITS) != 0)
12348 			device_printf(dev,
12349 			    "%s: PHYTYPE == 0x%x, addr = 0x%02x",
12350 			    __func__, sc->sc_phytype, reg);
12351 #endif
12352 		break;
12353 	}
12354 
12355 	return wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
12356 }
12357 
12358 /*
12359  * wm_gmii_i80003_readreg:	[mii interface function]
12360  *
12361  *	Read a PHY register on the kumeran
12362  * This could be handled by the PHY layer if we didn't have to lock the
12363  * resource ...
12364  */
12365 static int
12366 wm_gmii_i80003_readreg(device_t dev, int phy, int reg, uint16_t *val)
12367 {
12368 	struct wm_softc *sc = device_private(dev);
12369 	int page_select;
12370 	uint16_t temp, temp2;
12371 	int rv;
12372 
12373 	if (phy != 1) /* Only one PHY on kumeran bus */
12374 		return -1;
12375 
12376 	rv = sc->phy.acquire(sc);
12377 	if (rv != 0) {
12378 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
12379 		return rv;
12380 	}
12381 
12382 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
12383 		page_select = GG82563_PHY_PAGE_SELECT;
12384 	else {
12385 		/*
12386 		 * Use Alternative Page Select register to access registers
12387 		 * 30 and 31.
12388 		 */
12389 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
12390 	}
12391 	temp = reg >> GG82563_PAGE_SHIFT;
12392 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
12393 		goto out;
12394 
12395 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
12396 		/*
12397 		 * Wait more 200us for a bug of the ready bit in the MDIC
12398 		 * register.
12399 		 */
12400 		delay(200);
12401 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
12402 		if ((rv != 0) || (temp2 != temp)) {
12403 			device_printf(dev, "%s failed\n", __func__);
12404 			rv = -1;
12405 			goto out;
12406 		}
12407 		delay(200);
12408 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
12409 		delay(200);
12410 	} else
12411 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
12412 
12413 out:
12414 	sc->phy.release(sc);
12415 	return rv;
12416 }
12417 
12418 /*
12419  * wm_gmii_i80003_writereg:	[mii interface function]
12420  *
12421  *	Write a PHY register on the kumeran.
12422  * This could be handled by the PHY layer if we didn't have to lock the
12423  * resource ...
12424  */
12425 static int
12426 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, uint16_t val)
12427 {
12428 	struct wm_softc *sc = device_private(dev);
12429 	int page_select, rv;
12430 	uint16_t temp, temp2;
12431 
12432 	if (phy != 1) /* Only one PHY on kumeran bus */
12433 		return -1;
12434 
12435 	rv = sc->phy.acquire(sc);
12436 	if (rv != 0) {
12437 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
12438 		return rv;
12439 	}
12440 
12441 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
12442 		page_select = GG82563_PHY_PAGE_SELECT;
12443 	else {
12444 		/*
12445 		 * Use Alternative Page Select register to access registers
12446 		 * 30 and 31.
12447 		 */
12448 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
12449 	}
12450 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
12451 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
12452 		goto out;
12453 
12454 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
12455 		/*
12456 		 * Wait more 200us for a bug of the ready bit in the MDIC
12457 		 * register.
12458 		 */
12459 		delay(200);
12460 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
12461 		if ((rv != 0) || (temp2 != temp)) {
12462 			device_printf(dev, "%s failed\n", __func__);
12463 			rv = -1;
12464 			goto out;
12465 		}
12466 		delay(200);
12467 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
12468 		delay(200);
12469 	} else
12470 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
12471 
12472 out:
12473 	sc->phy.release(sc);
12474 	return rv;
12475 }
12476 
12477 /*
12478  * wm_gmii_bm_readreg:	[mii interface function]
12479  *
12480  *	Read a PHY register on the kumeran
12481  * This could be handled by the PHY layer if we didn't have to lock the
12482  * resource ...
12483  */
12484 static int
12485 wm_gmii_bm_readreg(device_t dev, int phy, int reg, uint16_t *val)
12486 {
12487 	struct wm_softc *sc = device_private(dev);
12488 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
12489 	int rv;
12490 
12491 	rv = sc->phy.acquire(sc);
12492 	if (rv != 0) {
12493 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
12494 		return rv;
12495 	}
12496 
12497 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
12498 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
12499 		    || (reg == 31)) ? 1 : phy;
12500 	/* Page 800 works differently than the rest so it has its own func */
12501 	if (page == BM_WUC_PAGE) {
12502 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
12503 		goto release;
12504 	}
12505 
12506 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
12507 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
12508 		    && (sc->sc_type != WM_T_82583))
12509 			rv = wm_gmii_mdic_writereg(dev, phy,
12510 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
12511 		else
12512 			rv = wm_gmii_mdic_writereg(dev, phy,
12513 			    BME1000_PHY_PAGE_SELECT, page);
12514 		if (rv != 0)
12515 			goto release;
12516 	}
12517 
12518 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
12519 
12520 release:
12521 	sc->phy.release(sc);
12522 	return rv;
12523 }
12524 
12525 /*
12526  * wm_gmii_bm_writereg:	[mii interface function]
12527  *
12528  *	Write a PHY register on the kumeran.
12529  * This could be handled by the PHY layer if we didn't have to lock the
12530  * resource ...
12531  */
12532 static int
12533 wm_gmii_bm_writereg(device_t dev, int phy, int reg, uint16_t val)
12534 {
12535 	struct wm_softc *sc = device_private(dev);
12536 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
12537 	int rv;
12538 
12539 	rv = sc->phy.acquire(sc);
12540 	if (rv != 0) {
12541 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
12542 		return rv;
12543 	}
12544 
12545 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
12546 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
12547 		    || (reg == 31)) ? 1 : phy;
12548 	/* Page 800 works differently than the rest so it has its own func */
12549 	if (page == BM_WUC_PAGE) {
12550 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, &val, false, false);
12551 		goto release;
12552 	}
12553 
12554 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
12555 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
12556 		    && (sc->sc_type != WM_T_82583))
12557 			rv = wm_gmii_mdic_writereg(dev, phy,
12558 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
12559 		else
12560 			rv = wm_gmii_mdic_writereg(dev, phy,
12561 			    BME1000_PHY_PAGE_SELECT, page);
12562 		if (rv != 0)
12563 			goto release;
12564 	}
12565 
12566 	rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
12567 
12568 release:
12569 	sc->phy.release(sc);
12570 	return rv;
12571 }
12572 
12573 /*
12574  *  wm_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
12575  *  @dev: pointer to the HW structure
12576  *  @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
12577  *
12578  *  Assumes semaphore already acquired and phy_reg points to a valid memory
12579  *  address to store contents of the BM_WUC_ENABLE_REG register.
12580  */
12581 static int
12582 wm_enable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
12583 {
12584 #ifdef WM_DEBUG
12585 	struct wm_softc *sc = device_private(dev);
12586 #endif
12587 	uint16_t temp;
12588 	int rv;
12589 
12590 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
12591 		device_xname(dev), __func__));
12592 
12593 	if (!phy_regp)
12594 		return -1;
12595 
12596 	/* All page select, port ctrl and wakeup registers use phy address 1 */
12597 
12598 	/* Select Port Control Registers page */
12599 	rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
12600 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
12601 	if (rv != 0)
12602 		return rv;
12603 
12604 	/* Read WUCE and save it */
12605 	rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG, phy_regp);
12606 	if (rv != 0)
12607 		return rv;
12608 
12609 	/* Enable both PHY wakeup mode and Wakeup register page writes.
12610 	 * Prevent a power state change by disabling ME and Host PHY wakeup.
12611 	 */
12612 	temp = *phy_regp;
12613 	temp |= BM_WUC_ENABLE_BIT;
12614 	temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
12615 
12616 	if ((rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, temp)) != 0)
12617 		return rv;
12618 
12619 	/* Select Host Wakeup Registers page - caller now able to write
12620 	 * registers on the Wakeup registers page
12621 	 */
12622 	return wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
12623 	    BM_WUC_PAGE << IGP3_PAGE_SHIFT);
12624 }
12625 
12626 /*
12627  *  wm_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
12628  *  @dev: pointer to the HW structure
12629  *  @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
12630  *
12631  *  Restore BM_WUC_ENABLE_REG to its original value.
12632  *
12633  *  Assumes semaphore already acquired and *phy_reg is the contents of the
12634  *  BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
12635  *  caller.
12636  */
12637 static int
12638 wm_disable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
12639 {
12640 #ifdef WM_DEBUG
12641 	struct wm_softc *sc = device_private(dev);
12642 #endif
12643 
12644 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
12645 		device_xname(dev), __func__));
12646 
12647 	if (!phy_regp)
12648 		return -1;
12649 
12650 	/* Select Port Control Registers page */
12651 	wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
12652 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
12653 
12654 	/* Restore 769.17 to its original value */
12655 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, *phy_regp);
12656 
12657 	return 0;
12658 }
12659 
12660 /*
12661  *  wm_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
12662  *  @sc: pointer to the HW structure
12663  *  @offset: register offset to be read or written
12664  *  @val: pointer to the data to read or write
12665  *  @rd: determines if operation is read or write
12666  *  @page_set: BM_WUC_PAGE already set and access enabled
12667  *
12668  *  Read the PHY register at offset and store the retrieved information in
12669  *  data, or write data to PHY register at offset.  Note the procedure to
12670  *  access the PHY wakeup registers is different than reading the other PHY
12671  *  registers. It works as such:
12672  *  1) Set 769.17.2 (page 769, register 17, bit 2) = 1
12673  *  2) Set page to 800 for host (801 if we were manageability)
12674  *  3) Write the address using the address opcode (0x11)
12675  *  4) Read or write the data using the data opcode (0x12)
12676  *  5) Restore 769.17.2 to its original value
12677  *
12678  *  Steps 1 and 2 are done by wm_enable_phy_wakeup_reg_access_bm() and
12679  *  step 5 is done by wm_disable_phy_wakeup_reg_access_bm().
12680  *
12681  *  Assumes semaphore is already acquired.  When page_set==TRUE, assumes
12682  *  the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
12683  *  is responsible for calls to wm_[enable|disable]_phy_wakeup_reg_bm()).
12684  */
12685 static int
12686 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd,
12687     bool page_set)
12688 {
12689 	struct wm_softc *sc = device_private(dev);
12690 	uint16_t regnum = BM_PHY_REG_NUM(offset);
12691 	uint16_t page = BM_PHY_REG_PAGE(offset);
12692 	uint16_t wuce;
12693 	int rv = 0;
12694 
12695 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
12696 		device_xname(dev), __func__));
12697 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
12698 	if ((sc->sc_type == WM_T_PCH)
12699 	    && ((CSR_READ(sc, WMREG_PHY_CTRL) & PHY_CTRL_GBE_DIS) == 0)) {
12700 		device_printf(dev,
12701 		    "Attempting to access page %d while gig enabled.\n", page);
12702 	}
12703 
12704 	if (!page_set) {
12705 		/* Enable access to PHY wakeup registers */
12706 		rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
12707 		if (rv != 0) {
12708 			device_printf(dev,
12709 			    "%s: Could not enable PHY wakeup reg access\n",
12710 			    __func__);
12711 			return rv;
12712 		}
12713 	}
12714 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s: Accessing PHY page %d reg 0x%x\n",
12715 		device_xname(sc->sc_dev), __func__, page, regnum));
12716 
12717 	/*
12718 	 * 2) Access PHY wakeup register.
12719 	 * See wm_access_phy_wakeup_reg_bm.
12720 	 */
12721 
12722 	/* Write the Wakeup register page offset value using opcode 0x11 */
12723 	rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
12724 	if (rv != 0)
12725 		return rv;
12726 
12727 	if (rd) {
12728 		/* Read the Wakeup register page value using opcode 0x12 */
12729 		rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE, val);
12730 	} else {
12731 		/* Write the Wakeup register page value using opcode 0x12 */
12732 		rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
12733 	}
12734 	if (rv != 0)
12735 		return rv;
12736 
12737 	if (!page_set)
12738 		rv = wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
12739 
12740 	return rv;
12741 }
12742 
12743 /*
12744  * wm_gmii_hv_readreg:	[mii interface function]
12745  *
12746  *	Read a PHY register on the kumeran
12747  * This could be handled by the PHY layer if we didn't have to lock the
12748  * resource ...
12749  */
12750 static int
12751 wm_gmii_hv_readreg(device_t dev, int phy, int reg, uint16_t *val)
12752 {
12753 	struct wm_softc *sc = device_private(dev);
12754 	int rv;
12755 
12756 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
12757 		device_xname(dev), __func__));
12758 
12759 	rv = sc->phy.acquire(sc);
12760 	if (rv != 0) {
12761 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
12762 		return rv;
12763 	}
12764 
12765 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg, val);
12766 	sc->phy.release(sc);
12767 	return rv;
12768 }
12769 
12770 static int
12771 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
12772 {
12773 	uint16_t page = BM_PHY_REG_PAGE(reg);
12774 	uint16_t regnum = BM_PHY_REG_NUM(reg);
12775 	int rv;
12776 
12777 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
12778 
12779 	/* Page 800 works differently than the rest so it has its own func */
12780 	if (page == BM_WUC_PAGE)
12781 		return wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
12782 
12783 	/*
12784 	 * Lower than page 768 works differently than the rest so it has its
12785 	 * own func
12786 	 */
12787 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
12788 		device_printf(dev, "gmii_hv_readreg!!!\n");
12789 		return -1;
12790 	}
12791 
12792 	/*
12793 	 * XXX I21[789] documents say that the SMBus Address register is at
12794 	 * PHY address 01, Page 0 (not 768), Register 26.
12795 	 */
12796 	if (page == HV_INTC_FC_PAGE_START)
12797 		page = 0;
12798 
12799 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
12800 		rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
12801 		    page << BME1000_PAGE_SHIFT);
12802 		if (rv != 0)
12803 			return rv;
12804 	}
12805 
12806 	return wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK, val);
12807 }
12808 
12809 /*
12810  * wm_gmii_hv_writereg:	[mii interface function]
12811  *
12812  *	Write a PHY register on the kumeran.
12813  * This could be handled by the PHY layer if we didn't have to lock the
12814  * resource ...
12815  */
12816 static int
12817 wm_gmii_hv_writereg(device_t dev, int phy, int reg, uint16_t val)
12818 {
12819 	struct wm_softc *sc = device_private(dev);
12820 	int rv;
12821 
12822 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
12823 		device_xname(dev), __func__));
12824 
12825 	rv = sc->phy.acquire(sc);
12826 	if (rv != 0) {
12827 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
12828 		return rv;
12829 	}
12830 
12831 	rv = wm_gmii_hv_writereg_locked(dev, phy, reg, val);
12832 	sc->phy.release(sc);
12833 
12834 	return rv;
12835 }
12836 
12837 static int
12838 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
12839 {
12840 	struct wm_softc *sc = device_private(dev);
12841 	uint16_t page = BM_PHY_REG_PAGE(reg);
12842 	uint16_t regnum = BM_PHY_REG_NUM(reg);
12843 	int rv;
12844 
12845 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
12846 
12847 	/* Page 800 works differently than the rest so it has its own func */
12848 	if (page == BM_WUC_PAGE)
12849 		return wm_access_phy_wakeup_reg_bm(dev, reg, &val, false,
12850 		    false);
12851 
12852 	/*
12853 	 * Lower than page 768 works differently than the rest so it has its
12854 	 * own func
12855 	 */
12856 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
12857 		device_printf(dev, "gmii_hv_writereg!!!\n");
12858 		return -1;
12859 	}
12860 
12861 	{
12862 		/*
12863 		 * XXX I21[789] documents say that the SMBus Address register
12864 		 * is at PHY address 01, Page 0 (not 768), Register 26.
12865 		 */
12866 		if (page == HV_INTC_FC_PAGE_START)
12867 			page = 0;
12868 
12869 		/*
12870 		 * XXX Workaround MDIO accesses being disabled after entering
12871 		 * IEEE Power Down (whenever bit 11 of the PHY control
12872 		 * register is set)
12873 		 */
12874 		if (sc->sc_phytype == WMPHY_82578) {
12875 			struct mii_softc *child;
12876 
12877 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
12878 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
12879 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
12880 			    && ((val & (1 << 11)) != 0)) {
12881 				device_printf(dev, "XXX need workaround\n");
12882 			}
12883 		}
12884 
12885 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
12886 			rv = wm_gmii_mdic_writereg(dev, 1,
12887 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
12888 			if (rv != 0)
12889 				return rv;
12890 		}
12891 	}
12892 
12893 	return wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
12894 }
12895 
12896 /*
12897  * wm_gmii_82580_readreg:	[mii interface function]
12898  *
12899  *	Read a PHY register on the 82580 and I350.
12900  * This could be handled by the PHY layer if we didn't have to lock the
12901  * resource ...
12902  */
12903 static int
12904 wm_gmii_82580_readreg(device_t dev, int phy, int reg, uint16_t *val)
12905 {
12906 	struct wm_softc *sc = device_private(dev);
12907 	int rv;
12908 
12909 	rv = sc->phy.acquire(sc);
12910 	if (rv != 0) {
12911 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
12912 		return rv;
12913 	}
12914 
12915 #ifdef DIAGNOSTIC
12916 	if (reg > MII_ADDRMASK) {
12917 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
12918 		    __func__, sc->sc_phytype, reg);
12919 		reg &= MII_ADDRMASK;
12920 	}
12921 #endif
12922 	rv = wm_gmii_mdic_readreg(dev, phy, reg, val);
12923 
12924 	sc->phy.release(sc);
12925 	return rv;
12926 }
12927 
12928 /*
12929  * wm_gmii_82580_writereg:	[mii interface function]
12930  *
12931  *	Write a PHY register on the 82580 and I350.
12932  * This could be handled by the PHY layer if we didn't have to lock the
12933  * resource ...
12934  */
12935 static int
12936 wm_gmii_82580_writereg(device_t dev, int phy, int reg, uint16_t val)
12937 {
12938 	struct wm_softc *sc = device_private(dev);
12939 	int rv;
12940 
12941 	rv = sc->phy.acquire(sc);
12942 	if (rv != 0) {
12943 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
12944 		return rv;
12945 	}
12946 
12947 #ifdef DIAGNOSTIC
12948 	if (reg > MII_ADDRMASK) {
12949 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
12950 		    __func__, sc->sc_phytype, reg);
12951 		reg &= MII_ADDRMASK;
12952 	}
12953 #endif
12954 	rv = wm_gmii_mdic_writereg(dev, phy, reg, val);
12955 
12956 	sc->phy.release(sc);
12957 	return rv;
12958 }
12959 
12960 /*
12961  * wm_gmii_gs40g_readreg:	[mii interface function]
12962  *
12963  *	Read a PHY register on the I2100 and I211.
12964  * This could be handled by the PHY layer if we didn't have to lock the
12965  * resource ...
12966  */
12967 static int
12968 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg, uint16_t *val)
12969 {
12970 	struct wm_softc *sc = device_private(dev);
12971 	int page, offset;
12972 	int rv;
12973 
12974 	/* Acquire semaphore */
12975 	rv = sc->phy.acquire(sc);
12976 	if (rv != 0) {
12977 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
12978 		return rv;
12979 	}
12980 
12981 	/* Page select */
12982 	page = reg >> GS40G_PAGE_SHIFT;
12983 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
12984 	if (rv != 0)
12985 		goto release;
12986 
12987 	/* Read reg */
12988 	offset = reg & GS40G_OFFSET_MASK;
12989 	rv = wm_gmii_mdic_readreg(dev, phy, offset, val);
12990 
12991 release:
12992 	sc->phy.release(sc);
12993 	return rv;
12994 }
12995 
12996 /*
12997  * wm_gmii_gs40g_writereg:	[mii interface function]
12998  *
12999  *	Write a PHY register on the I210 and I211.
13000  * This could be handled by the PHY layer if we didn't have to lock the
13001  * resource ...
13002  */
13003 static int
13004 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, uint16_t val)
13005 {
13006 	struct wm_softc *sc = device_private(dev);
13007 	uint16_t page;
13008 	int offset, rv;
13009 
13010 	/* Acquire semaphore */
13011 	rv = sc->phy.acquire(sc);
13012 	if (rv != 0) {
13013 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
13014 		return rv;
13015 	}
13016 
13017 	/* Page select */
13018 	page = reg >> GS40G_PAGE_SHIFT;
13019 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
13020 	if (rv != 0)
13021 		goto release;
13022 
13023 	/* Write reg */
13024 	offset = reg & GS40G_OFFSET_MASK;
13025 	rv = wm_gmii_mdic_writereg(dev, phy, offset, val);
13026 
13027 release:
13028 	/* Release semaphore */
13029 	sc->phy.release(sc);
13030 	return rv;
13031 }
13032 
13033 /*
13034  * wm_gmii_statchg:	[mii interface function]
13035  *
13036  *	Callback from MII layer when media changes.
13037  */
13038 static void
13039 wm_gmii_statchg(struct ifnet *ifp)
13040 {
13041 	struct wm_softc *sc = ifp->if_softc;
13042 	struct mii_data *mii = &sc->sc_mii;
13043 
13044 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
13045 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
13046 	sc->sc_fcrtl &= ~FCRTL_XONE;
13047 
13048 	/* Get flow control negotiation result. */
13049 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
13050 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
13051 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
13052 		mii->mii_media_active &= ~IFM_ETH_FMASK;
13053 	}
13054 
13055 	if (sc->sc_flowflags & IFM_FLOW) {
13056 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
13057 			sc->sc_ctrl |= CTRL_TFCE;
13058 			sc->sc_fcrtl |= FCRTL_XONE;
13059 		}
13060 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
13061 			sc->sc_ctrl |= CTRL_RFCE;
13062 	}
13063 
13064 	if (mii->mii_media_active & IFM_FDX) {
13065 		DPRINTF(sc, WM_DEBUG_LINK,
13066 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
13067 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
13068 	} else {
13069 		DPRINTF(sc, WM_DEBUG_LINK,
13070 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
13071 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
13072 	}
13073 
13074 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
13075 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
13076 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
13077 	    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
13078 	if (sc->sc_type == WM_T_80003) {
13079 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
13080 		case IFM_1000_T:
13081 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
13082 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
13083 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
13084 			break;
13085 		default:
13086 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
13087 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
13088 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
13089 			break;
13090 		}
13091 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
13092 	}
13093 }
13094 
13095 /* kumeran related (80003, ICH* and PCH*) */
13096 
13097 /*
13098  * wm_kmrn_readreg:
13099  *
13100  *	Read a kumeran register
13101  */
13102 static int
13103 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
13104 {
13105 	int rv;
13106 
13107 	if (sc->sc_type == WM_T_80003)
13108 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
13109 	else
13110 		rv = sc->phy.acquire(sc);
13111 	if (rv != 0) {
13112 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
13113 		    __func__);
13114 		return rv;
13115 	}
13116 
13117 	rv = wm_kmrn_readreg_locked(sc, reg, val);
13118 
13119 	if (sc->sc_type == WM_T_80003)
13120 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
13121 	else
13122 		sc->phy.release(sc);
13123 
13124 	return rv;
13125 }
13126 
13127 static int
13128 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
13129 {
13130 
13131 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
13132 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
13133 	    KUMCTRLSTA_REN);
13134 	CSR_WRITE_FLUSH(sc);
13135 	delay(2);
13136 
13137 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
13138 
13139 	return 0;
13140 }
13141 
13142 /*
13143  * wm_kmrn_writereg:
13144  *
13145  *	Write a kumeran register
13146  */
13147 static int
13148 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
13149 {
13150 	int rv;
13151 
13152 	if (sc->sc_type == WM_T_80003)
13153 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
13154 	else
13155 		rv = sc->phy.acquire(sc);
13156 	if (rv != 0) {
13157 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
13158 		    __func__);
13159 		return rv;
13160 	}
13161 
13162 	rv = wm_kmrn_writereg_locked(sc, reg, val);
13163 
13164 	if (sc->sc_type == WM_T_80003)
13165 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
13166 	else
13167 		sc->phy.release(sc);
13168 
13169 	return rv;
13170 }
13171 
13172 static int
13173 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
13174 {
13175 
13176 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
13177 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
13178 
13179 	return 0;
13180 }
13181 
13182 /*
13183  * EMI register related (82579, WMPHY_I217(PCH2 and newer))
13184  * This access method is different from IEEE MMD.
13185  */
13186 static int
13187 wm_access_emi_reg_locked(device_t dev, int reg, uint16_t *val, bool rd)
13188 {
13189 	struct wm_softc *sc = device_private(dev);
13190 	int rv;
13191 
13192 	rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_ADDR, reg);
13193 	if (rv != 0)
13194 		return rv;
13195 
13196 	if (rd)
13197 		rv = sc->phy.readreg_locked(dev, 2, I82579_EMI_DATA, val);
13198 	else
13199 		rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_DATA, *val);
13200 	return rv;
13201 }
13202 
13203 static int
13204 wm_read_emi_reg_locked(device_t dev, int reg, uint16_t *val)
13205 {
13206 
13207 	return wm_access_emi_reg_locked(dev, reg, val, true);
13208 }
13209 
13210 static int
13211 wm_write_emi_reg_locked(device_t dev, int reg, uint16_t val)
13212 {
13213 
13214 	return wm_access_emi_reg_locked(dev, reg, &val, false);
13215 }
13216 
13217 /* SGMII related */
13218 
13219 /*
13220  * wm_sgmii_uses_mdio
13221  *
13222  * Check whether the transaction is to the internal PHY or the external
13223  * MDIO interface. Return true if it's MDIO.
13224  */
13225 static bool
13226 wm_sgmii_uses_mdio(struct wm_softc *sc)
13227 {
13228 	uint32_t reg;
13229 	bool ismdio = false;
13230 
13231 	switch (sc->sc_type) {
13232 	case WM_T_82575:
13233 	case WM_T_82576:
13234 		reg = CSR_READ(sc, WMREG_MDIC);
13235 		ismdio = ((reg & MDIC_DEST) != 0);
13236 		break;
13237 	case WM_T_82580:
13238 	case WM_T_I350:
13239 	case WM_T_I354:
13240 	case WM_T_I210:
13241 	case WM_T_I211:
13242 		reg = CSR_READ(sc, WMREG_MDICNFG);
13243 		ismdio = ((reg & MDICNFG_DEST) != 0);
13244 		break;
13245 	default:
13246 		break;
13247 	}
13248 
13249 	return ismdio;
13250 }
13251 
13252 /* Setup internal SGMII PHY for SFP */
13253 static void
13254 wm_sgmii_sfp_preconfig(struct wm_softc *sc)
13255 {
13256 	uint16_t id1, id2, phyreg;
13257 	int i, rv;
13258 
13259 	if (((sc->sc_flags & WM_F_SGMII) == 0)
13260 	    || ((sc->sc_flags & WM_F_SFP) == 0))
13261 		return;
13262 
13263 	for (i = 0; i < MII_NPHY; i++) {
13264 		sc->phy.no_errprint = true;
13265 		rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR1, &id1);
13266 		if (rv != 0)
13267 			continue;
13268 		rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR2, &id2);
13269 		if (rv != 0)
13270 			continue;
13271 		if (MII_OUI(id1, id2) != MII_OUI_xxMARVELL)
13272 			continue;
13273 		sc->phy.no_errprint = false;
13274 
13275 		sc->phy.readreg_locked(sc->sc_dev, i, MAKPHY_ESSR, &phyreg);
13276 		phyreg &= ~(ESSR_SER_ANEG_BYPASS | ESSR_HWCFG_MODE);
13277 		phyreg |= ESSR_SGMII_WOC_COPPER;
13278 		sc->phy.writereg_locked(sc->sc_dev, i, MAKPHY_ESSR, phyreg);
13279 		break;
13280 	}
13281 
13282 }
13283 
13284 /*
13285  * wm_sgmii_readreg:	[mii interface function]
13286  *
13287  *	Read a PHY register on the SGMII
13288  * This could be handled by the PHY layer if we didn't have to lock the
13289  * resource ...
13290  */
13291 static int
13292 wm_sgmii_readreg(device_t dev, int phy, int reg, uint16_t *val)
13293 {
13294 	struct wm_softc *sc = device_private(dev);
13295 	int rv;
13296 
13297 	rv = sc->phy.acquire(sc);
13298 	if (rv != 0) {
13299 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
13300 		return rv;
13301 	}
13302 
13303 	rv = wm_sgmii_readreg_locked(dev, phy, reg, val);
13304 
13305 	sc->phy.release(sc);
13306 	return rv;
13307 }
13308 
13309 static int
13310 wm_sgmii_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
13311 {
13312 	struct wm_softc *sc = device_private(dev);
13313 	uint32_t i2ccmd;
13314 	int i, rv = 0;
13315 
13316 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
13317 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
13318 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
13319 
13320 	/* Poll the ready bit */
13321 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
13322 		delay(50);
13323 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
13324 		if (i2ccmd & I2CCMD_READY)
13325 			break;
13326 	}
13327 	if ((i2ccmd & I2CCMD_READY) == 0) {
13328 		device_printf(dev, "I2CCMD Read did not complete\n");
13329 		rv = ETIMEDOUT;
13330 	}
13331 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
13332 		if (!sc->phy.no_errprint)
13333 			device_printf(dev, "I2CCMD Error bit set\n");
13334 		rv = EIO;
13335 	}
13336 
13337 	*val = (uint16_t)((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
13338 
13339 	return rv;
13340 }
13341 
13342 /*
13343  * wm_sgmii_writereg:	[mii interface function]
13344  *
13345  *	Write a PHY register on the SGMII.
13346  * This could be handled by the PHY layer if we didn't have to lock the
13347  * resource ...
13348  */
13349 static int
13350 wm_sgmii_writereg(device_t dev, int phy, int reg, uint16_t val)
13351 {
13352 	struct wm_softc *sc = device_private(dev);
13353 	int rv;
13354 
13355 	rv = sc->phy.acquire(sc);
13356 	if (rv != 0) {
13357 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
13358 		return rv;
13359 	}
13360 
13361 	rv = wm_sgmii_writereg_locked(dev, phy, reg, val);
13362 
13363 	sc->phy.release(sc);
13364 
13365 	return rv;
13366 }
13367 
13368 static int
13369 wm_sgmii_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
13370 {
13371 	struct wm_softc *sc = device_private(dev);
13372 	uint32_t i2ccmd;
13373 	uint16_t swapdata;
13374 	int rv = 0;
13375 	int i;
13376 
13377 	/* Swap the data bytes for the I2C interface */
13378 	swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
13379 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
13380 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
13381 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
13382 
13383 	/* Poll the ready bit */
13384 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
13385 		delay(50);
13386 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
13387 		if (i2ccmd & I2CCMD_READY)
13388 			break;
13389 	}
13390 	if ((i2ccmd & I2CCMD_READY) == 0) {
13391 		device_printf(dev, "I2CCMD Write did not complete\n");
13392 		rv = ETIMEDOUT;
13393 	}
13394 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
13395 		device_printf(dev, "I2CCMD Error bit set\n");
13396 		rv = EIO;
13397 	}
13398 
13399 	return rv;
13400 }
13401 
13402 /* TBI related */
13403 
13404 static bool
13405 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
13406 {
13407 	bool sig;
13408 
13409 	sig = ctrl & CTRL_SWDPIN(1);
13410 
13411 	/*
13412 	 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
13413 	 * detect a signal, 1 if they don't.
13414 	 */
13415 	if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
13416 		sig = !sig;
13417 
13418 	return sig;
13419 }
13420 
13421 /*
13422  * wm_tbi_mediainit:
13423  *
13424  *	Initialize media for use on 1000BASE-X devices.
13425  */
13426 static void
13427 wm_tbi_mediainit(struct wm_softc *sc)
13428 {
13429 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
13430 	const char *sep = "";
13431 
13432 	if (sc->sc_type < WM_T_82543)
13433 		sc->sc_tipg = TIPG_WM_DFLT;
13434 	else
13435 		sc->sc_tipg = TIPG_LG_DFLT;
13436 
13437 	sc->sc_tbi_serdes_anegticks = 5;
13438 
13439 	/* Initialize our media structures */
13440 	sc->sc_mii.mii_ifp = ifp;
13441 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
13442 
13443 	ifp->if_baudrate = IF_Gbps(1);
13444 	if (((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
13445 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
13446 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
13447 		    wm_serdes_mediachange, wm_serdes_mediastatus,
13448 		    sc->sc_core_lock);
13449 	} else {
13450 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
13451 		    wm_tbi_mediachange, wm_tbi_mediastatus, sc->sc_core_lock);
13452 	}
13453 
13454 	/*
13455 	 * SWD Pins:
13456 	 *
13457 	 *	0 = Link LED (output)
13458 	 *	1 = Loss Of Signal (input)
13459 	 */
13460 	sc->sc_ctrl |= CTRL_SWDPIO(0);
13461 
13462 	/* XXX Perhaps this is only for TBI */
13463 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
13464 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
13465 
13466 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
13467 		sc->sc_ctrl &= ~CTRL_LRST;
13468 
13469 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
13470 
13471 #define	ADD(ss, mm, dd)							  \
13472 do {									  \
13473 	aprint_normal("%s%s", sep, ss);					  \
13474 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
13475 	sep = ", ";							  \
13476 } while (/*CONSTCOND*/0)
13477 
13478 	aprint_normal_dev(sc->sc_dev, "");
13479 
13480 	if (sc->sc_type == WM_T_I354) {
13481 		uint32_t status;
13482 
13483 		status = CSR_READ(sc, WMREG_STATUS);
13484 		if (((status & STATUS_2P5_SKU) != 0)
13485 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
13486 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
13487 		} else
13488 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
13489 	} else if (sc->sc_type == WM_T_82545) {
13490 		/* Only 82545 is LX (XXX except SFP) */
13491 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
13492 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
13493 	} else if (sc->sc_sfptype != 0) {
13494 		/* XXX wm(4) fiber/serdes don't use ifm_data */
13495 		switch (sc->sc_sfptype) {
13496 		default:
13497 		case SFF_SFP_ETH_FLAGS_1000SX:
13498 			ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
13499 			ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
13500 			break;
13501 		case SFF_SFP_ETH_FLAGS_1000LX:
13502 			ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
13503 			ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
13504 			break;
13505 		case SFF_SFP_ETH_FLAGS_1000CX:
13506 			ADD("1000baseCX", IFM_1000_CX, ANAR_X_HD);
13507 			ADD("1000baseCX-FDX", IFM_1000_CX | IFM_FDX, ANAR_X_FD);
13508 			break;
13509 		case SFF_SFP_ETH_FLAGS_1000T:
13510 			ADD("1000baseT", IFM_1000_T, 0);
13511 			ADD("1000baseT-FDX", IFM_1000_T | IFM_FDX, 0);
13512 			break;
13513 		case SFF_SFP_ETH_FLAGS_100FX:
13514 			ADD("100baseFX", IFM_100_FX, ANAR_TX);
13515 			ADD("100baseFX-FDX", IFM_100_FX | IFM_FDX, ANAR_TX_FD);
13516 			break;
13517 		}
13518 	} else {
13519 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
13520 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
13521 	}
13522 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
13523 	aprint_normal("\n");
13524 
13525 #undef ADD
13526 
13527 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
13528 }
13529 
13530 /*
13531  * wm_tbi_mediachange:	[ifmedia interface function]
13532  *
13533  *	Set hardware to newly-selected media on a 1000BASE-X device.
13534  */
13535 static int
13536 wm_tbi_mediachange(struct ifnet *ifp)
13537 {
13538 	struct wm_softc *sc = ifp->if_softc;
13539 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
13540 	uint32_t status, ctrl;
13541 	bool signal;
13542 	int i;
13543 
13544 	KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
13545 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
13546 		/* XXX need some work for >= 82571 and < 82575 */
13547 		if (sc->sc_type < WM_T_82575)
13548 			return 0;
13549 	}
13550 
13551 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
13552 	    || (sc->sc_type >= WM_T_82575))
13553 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
13554 
13555 	sc->sc_ctrl &= ~CTRL_LRST;
13556 	sc->sc_txcw = TXCW_ANE;
13557 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
13558 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
13559 	else if (ife->ifm_media & IFM_FDX)
13560 		sc->sc_txcw |= TXCW_FD;
13561 	else
13562 		sc->sc_txcw |= TXCW_HD;
13563 
13564 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
13565 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
13566 
13567 	DPRINTF(sc, WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
13568 		device_xname(sc->sc_dev), sc->sc_txcw));
13569 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
13570 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
13571 	CSR_WRITE_FLUSH(sc);
13572 	delay(1000);
13573 
13574 	ctrl = CSR_READ(sc, WMREG_CTRL);
13575 	signal = wm_tbi_havesignal(sc, ctrl);
13576 
13577 	DPRINTF(sc, WM_DEBUG_LINK,
13578 	    ("%s: signal = %d\n", device_xname(sc->sc_dev), signal));
13579 
13580 	if (signal) {
13581 		/* Have signal; wait for the link to come up. */
13582 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
13583 			delay(10000);
13584 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
13585 				break;
13586 		}
13587 
13588 		DPRINTF(sc, WM_DEBUG_LINK,
13589 		    ("%s: i = %d after waiting for link\n",
13590 			device_xname(sc->sc_dev), i));
13591 
13592 		status = CSR_READ(sc, WMREG_STATUS);
13593 		DPRINTF(sc, WM_DEBUG_LINK,
13594 		    ("%s: status after final read = 0x%x, STATUS_LU = %#"
13595 			__PRIxBIT "\n",
13596 			device_xname(sc->sc_dev), status, STATUS_LU));
13597 		if (status & STATUS_LU) {
13598 			/* Link is up. */
13599 			DPRINTF(sc, WM_DEBUG_LINK,
13600 			    ("%s: LINK: set media -> link up %s\n",
13601 				device_xname(sc->sc_dev),
13602 				(status & STATUS_FD) ? "FDX" : "HDX"));
13603 
13604 			/*
13605 			 * NOTE: CTRL will update TFCE and RFCE automatically,
13606 			 * so we should update sc->sc_ctrl
13607 			 */
13608 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
13609 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
13610 			sc->sc_fcrtl &= ~FCRTL_XONE;
13611 			if (status & STATUS_FD)
13612 				sc->sc_tctl |=
13613 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
13614 			else
13615 				sc->sc_tctl |=
13616 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
13617 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
13618 				sc->sc_fcrtl |= FCRTL_XONE;
13619 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
13620 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
13621 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
13622 			sc->sc_tbi_linkup = 1;
13623 		} else {
13624 			if (i == WM_LINKUP_TIMEOUT)
13625 				wm_check_for_link(sc);
13626 			/* Link is down. */
13627 			DPRINTF(sc, WM_DEBUG_LINK,
13628 			    ("%s: LINK: set media -> link down\n",
13629 				device_xname(sc->sc_dev)));
13630 			sc->sc_tbi_linkup = 0;
13631 		}
13632 	} else {
13633 		DPRINTF(sc, WM_DEBUG_LINK,
13634 		    ("%s: LINK: set media -> no signal\n",
13635 			device_xname(sc->sc_dev)));
13636 		sc->sc_tbi_linkup = 0;
13637 	}
13638 
13639 	wm_tbi_serdes_set_linkled(sc);
13640 
13641 	return 0;
13642 }
13643 
13644 /*
13645  * wm_tbi_mediastatus:	[ifmedia interface function]
13646  *
13647  *	Get the current interface media status on a 1000BASE-X device.
13648  */
13649 static void
13650 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
13651 {
13652 	struct wm_softc *sc = ifp->if_softc;
13653 	uint32_t ctrl, status;
13654 
13655 	ifmr->ifm_status = IFM_AVALID;
13656 	ifmr->ifm_active = IFM_ETHER;
13657 
13658 	status = CSR_READ(sc, WMREG_STATUS);
13659 	if ((status & STATUS_LU) == 0) {
13660 		ifmr->ifm_active |= IFM_NONE;
13661 		return;
13662 	}
13663 
13664 	ifmr->ifm_status |= IFM_ACTIVE;
13665 	/* Only 82545 is LX */
13666 	if (sc->sc_type == WM_T_82545)
13667 		ifmr->ifm_active |= IFM_1000_LX;
13668 	else
13669 		ifmr->ifm_active |= IFM_1000_SX;
13670 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
13671 		ifmr->ifm_active |= IFM_FDX;
13672 	else
13673 		ifmr->ifm_active |= IFM_HDX;
13674 	ctrl = CSR_READ(sc, WMREG_CTRL);
13675 	if (ctrl & CTRL_RFCE)
13676 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
13677 	if (ctrl & CTRL_TFCE)
13678 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
13679 }
13680 
13681 /* XXX TBI only */
13682 static int
13683 wm_check_for_link(struct wm_softc *sc)
13684 {
13685 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
13686 	uint32_t rxcw;
13687 	uint32_t ctrl;
13688 	uint32_t status;
13689 	bool signal;
13690 
13691 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s called\n",
13692 		device_xname(sc->sc_dev), __func__));
13693 
13694 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
13695 		/* XXX need some work for >= 82571 */
13696 		if (sc->sc_type >= WM_T_82571) {
13697 			sc->sc_tbi_linkup = 1;
13698 			return 0;
13699 		}
13700 	}
13701 
13702 	rxcw = CSR_READ(sc, WMREG_RXCW);
13703 	ctrl = CSR_READ(sc, WMREG_CTRL);
13704 	status = CSR_READ(sc, WMREG_STATUS);
13705 	signal = wm_tbi_havesignal(sc, ctrl);
13706 
13707 	DPRINTF(sc, WM_DEBUG_LINK,
13708 	    ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
13709 		device_xname(sc->sc_dev), __func__, signal,
13710 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
13711 
13712 	/*
13713 	 * SWDPIN   LU RXCW
13714 	 *	0    0	  0
13715 	 *	0    0	  1	(should not happen)
13716 	 *	0    1	  0	(should not happen)
13717 	 *	0    1	  1	(should not happen)
13718 	 *	1    0	  0	Disable autonego and force linkup
13719 	 *	1    0	  1	got /C/ but not linkup yet
13720 	 *	1    1	  0	(linkup)
13721 	 *	1    1	  1	If IFM_AUTO, back to autonego
13722 	 *
13723 	 */
13724 	if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
13725 		DPRINTF(sc, WM_DEBUG_LINK,
13726 		    ("%s: %s: force linkup and fullduplex\n",
13727 			device_xname(sc->sc_dev), __func__));
13728 		sc->sc_tbi_linkup = 0;
13729 		/* Disable auto-negotiation in the TXCW register */
13730 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
13731 
13732 		/*
13733 		 * Force link-up and also force full-duplex.
13734 		 *
13735 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
13736 		 * so we should update sc->sc_ctrl
13737 		 */
13738 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
13739 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
13740 	} else if (((status & STATUS_LU) != 0)
13741 	    && ((rxcw & RXCW_C) != 0)
13742 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
13743 		sc->sc_tbi_linkup = 1;
13744 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
13745 			device_xname(sc->sc_dev), __func__));
13746 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
13747 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
13748 	} else if (signal && ((rxcw & RXCW_C) != 0)) {
13749 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: /C/",
13750 			device_xname(sc->sc_dev), __func__));
13751 	} else {
13752 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
13753 			device_xname(sc->sc_dev), __func__, rxcw, ctrl,
13754 			status));
13755 	}
13756 
13757 	return 0;
13758 }
13759 
13760 /*
13761  * wm_tbi_tick:
13762  *
13763  *	Check the link on TBI devices.
13764  *	This function acts as mii_tick().
13765  */
13766 static void
13767 wm_tbi_tick(struct wm_softc *sc)
13768 {
13769 	struct mii_data *mii = &sc->sc_mii;
13770 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
13771 	uint32_t status;
13772 
13773 	KASSERT(mutex_owned(sc->sc_core_lock));
13774 
13775 	status = CSR_READ(sc, WMREG_STATUS);
13776 
13777 	/* XXX is this needed? */
13778 	(void)CSR_READ(sc, WMREG_RXCW);
13779 	(void)CSR_READ(sc, WMREG_CTRL);
13780 
13781 	/* set link status */
13782 	if ((status & STATUS_LU) == 0) {
13783 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
13784 			device_xname(sc->sc_dev)));
13785 		sc->sc_tbi_linkup = 0;
13786 	} else if (sc->sc_tbi_linkup == 0) {
13787 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
13788 			device_xname(sc->sc_dev),
13789 			(status & STATUS_FD) ? "FDX" : "HDX"));
13790 		sc->sc_tbi_linkup = 1;
13791 		sc->sc_tbi_serdes_ticks = 0;
13792 	}
13793 
13794 	if ((sc->sc_if_flags & IFF_UP) == 0)
13795 		goto setled;
13796 
13797 	if ((status & STATUS_LU) == 0) {
13798 		sc->sc_tbi_linkup = 0;
13799 		/* If the timer expired, retry autonegotiation */
13800 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
13801 		    && (++sc->sc_tbi_serdes_ticks
13802 			>= sc->sc_tbi_serdes_anegticks)) {
13803 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
13804 				device_xname(sc->sc_dev), __func__));
13805 			sc->sc_tbi_serdes_ticks = 0;
13806 			/*
13807 			 * Reset the link, and let autonegotiation do
13808 			 * its thing
13809 			 */
13810 			sc->sc_ctrl |= CTRL_LRST;
13811 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
13812 			CSR_WRITE_FLUSH(sc);
13813 			delay(1000);
13814 			sc->sc_ctrl &= ~CTRL_LRST;
13815 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
13816 			CSR_WRITE_FLUSH(sc);
13817 			delay(1000);
13818 			CSR_WRITE(sc, WMREG_TXCW,
13819 			    sc->sc_txcw & ~TXCW_ANE);
13820 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
13821 		}
13822 	}
13823 
13824 setled:
13825 	wm_tbi_serdes_set_linkled(sc);
13826 }
13827 
13828 /* SERDES related */
13829 static void
13830 wm_serdes_power_up_link_82575(struct wm_softc *sc)
13831 {
13832 	uint32_t reg;
13833 
13834 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
13835 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
13836 		return;
13837 
13838 	/* Enable PCS to turn on link */
13839 	reg = CSR_READ(sc, WMREG_PCS_CFG);
13840 	reg |= PCS_CFG_PCS_EN;
13841 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
13842 
13843 	/* Power up the laser */
13844 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
13845 	reg &= ~CTRL_EXT_SWDPIN(3);
13846 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
13847 
13848 	/* Flush the write to verify completion */
13849 	CSR_WRITE_FLUSH(sc);
13850 	delay(1000);
13851 }
13852 
13853 static int
13854 wm_serdes_mediachange(struct ifnet *ifp)
13855 {
13856 	struct wm_softc *sc = ifp->if_softc;
13857 	bool pcs_autoneg = true; /* XXX */
13858 	uint32_t ctrl_ext, pcs_lctl, reg;
13859 
13860 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
13861 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
13862 		return 0;
13863 
13864 	/* XXX Currently, this function is not called on 8257[12] */
13865 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
13866 	    || (sc->sc_type >= WM_T_82575))
13867 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
13868 
13869 	/* Power on the sfp cage if present */
13870 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
13871 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
13872 	ctrl_ext |= CTRL_EXT_I2C_ENA;
13873 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
13874 
13875 	sc->sc_ctrl |= CTRL_SLU;
13876 
13877 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
13878 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
13879 
13880 		reg = CSR_READ(sc, WMREG_CONNSW);
13881 		reg |= CONNSW_ENRGSRC;
13882 		CSR_WRITE(sc, WMREG_CONNSW, reg);
13883 	}
13884 
13885 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
13886 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
13887 	case CTRL_EXT_LINK_MODE_SGMII:
13888 		/* SGMII mode lets the phy handle forcing speed/duplex */
13889 		pcs_autoneg = true;
13890 		/* Autoneg time out should be disabled for SGMII mode */
13891 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
13892 		break;
13893 	case CTRL_EXT_LINK_MODE_1000KX:
13894 		pcs_autoneg = false;
13895 		/* FALLTHROUGH */
13896 	default:
13897 		if ((sc->sc_type == WM_T_82575)
13898 		    || (sc->sc_type == WM_T_82576)) {
13899 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
13900 				pcs_autoneg = false;
13901 		}
13902 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
13903 		    | CTRL_FRCFDX;
13904 
13905 		/* Set speed of 1000/Full if speed/duplex is forced */
13906 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
13907 	}
13908 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
13909 
13910 	pcs_lctl &= ~(PCS_LCTL_AN_ENABLE | PCS_LCTL_FLV_LINK_UP |
13911 	    PCS_LCTL_FSD | PCS_LCTL_FORCE_LINK);
13912 
13913 	if (pcs_autoneg) {
13914 		/* Set PCS register for autoneg */
13915 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
13916 
13917 		/* Disable force flow control for autoneg */
13918 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
13919 
13920 		/* Configure flow control advertisement for autoneg */
13921 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
13922 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
13923 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
13924 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
13925 	} else
13926 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
13927 
13928 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
13929 
13930 	return 0;
13931 }
13932 
13933 static void
13934 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
13935 {
13936 	struct wm_softc *sc = ifp->if_softc;
13937 	struct mii_data *mii = &sc->sc_mii;
13938 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
13939 	uint32_t pcs_adv, pcs_lpab, reg;
13940 
13941 	ifmr->ifm_status = IFM_AVALID;
13942 	ifmr->ifm_active = IFM_ETHER;
13943 
13944 	/* Check PCS */
13945 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
13946 	if ((reg & PCS_LSTS_LINKOK) == 0) {
13947 		ifmr->ifm_active |= IFM_NONE;
13948 		sc->sc_tbi_linkup = 0;
13949 		goto setled;
13950 	}
13951 
13952 	sc->sc_tbi_linkup = 1;
13953 	ifmr->ifm_status |= IFM_ACTIVE;
13954 	if (sc->sc_type == WM_T_I354) {
13955 		uint32_t status;
13956 
13957 		status = CSR_READ(sc, WMREG_STATUS);
13958 		if (((status & STATUS_2P5_SKU) != 0)
13959 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
13960 			ifmr->ifm_active |= IFM_2500_KX;
13961 		} else
13962 			ifmr->ifm_active |= IFM_1000_KX;
13963 	} else {
13964 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
13965 		case PCS_LSTS_SPEED_10:
13966 			ifmr->ifm_active |= IFM_10_T; /* XXX */
13967 			break;
13968 		case PCS_LSTS_SPEED_100:
13969 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
13970 			break;
13971 		case PCS_LSTS_SPEED_1000:
13972 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
13973 			break;
13974 		default:
13975 			device_printf(sc->sc_dev, "Unknown speed\n");
13976 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
13977 			break;
13978 		}
13979 	}
13980 	ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
13981 	if ((reg & PCS_LSTS_FDX) != 0)
13982 		ifmr->ifm_active |= IFM_FDX;
13983 	else
13984 		ifmr->ifm_active |= IFM_HDX;
13985 	mii->mii_media_active &= ~IFM_ETH_FMASK;
13986 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
13987 		/* Check flow */
13988 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
13989 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
13990 			DPRINTF(sc, WM_DEBUG_LINK,
13991 			    ("XXX LINKOK but not ACOMP\n"));
13992 			goto setled;
13993 		}
13994 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
13995 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
13996 		DPRINTF(sc, WM_DEBUG_LINK,
13997 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
13998 		if ((pcs_adv & TXCW_SYM_PAUSE)
13999 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
14000 			mii->mii_media_active |= IFM_FLOW
14001 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
14002 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
14003 		    && (pcs_adv & TXCW_ASYM_PAUSE)
14004 		    && (pcs_lpab & TXCW_SYM_PAUSE)
14005 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
14006 			mii->mii_media_active |= IFM_FLOW
14007 			    | IFM_ETH_TXPAUSE;
14008 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
14009 		    && (pcs_adv & TXCW_ASYM_PAUSE)
14010 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
14011 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
14012 			mii->mii_media_active |= IFM_FLOW
14013 			    | IFM_ETH_RXPAUSE;
14014 		}
14015 	}
14016 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
14017 	    | (mii->mii_media_active & IFM_ETH_FMASK);
14018 setled:
14019 	wm_tbi_serdes_set_linkled(sc);
14020 }
14021 
14022 /*
14023  * wm_serdes_tick:
14024  *
14025  *	Check the link on serdes devices.
14026  */
14027 static void
14028 wm_serdes_tick(struct wm_softc *sc)
14029 {
14030 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
14031 	struct mii_data *mii = &sc->sc_mii;
14032 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
14033 	uint32_t reg;
14034 
14035 	KASSERT(mutex_owned(sc->sc_core_lock));
14036 
14037 	mii->mii_media_status = IFM_AVALID;
14038 	mii->mii_media_active = IFM_ETHER;
14039 
14040 	/* Check PCS */
14041 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
14042 	if ((reg & PCS_LSTS_LINKOK) != 0) {
14043 		mii->mii_media_status |= IFM_ACTIVE;
14044 		sc->sc_tbi_linkup = 1;
14045 		sc->sc_tbi_serdes_ticks = 0;
14046 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
14047 		if ((reg & PCS_LSTS_FDX) != 0)
14048 			mii->mii_media_active |= IFM_FDX;
14049 		else
14050 			mii->mii_media_active |= IFM_HDX;
14051 	} else {
14052 		mii->mii_media_status |= IFM_NONE;
14053 		sc->sc_tbi_linkup = 0;
14054 		/* If the timer expired, retry autonegotiation */
14055 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
14056 		    && (++sc->sc_tbi_serdes_ticks
14057 			>= sc->sc_tbi_serdes_anegticks)) {
14058 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
14059 				device_xname(sc->sc_dev), __func__));
14060 			sc->sc_tbi_serdes_ticks = 0;
14061 			/* XXX */
14062 			wm_serdes_mediachange(ifp);
14063 		}
14064 	}
14065 
14066 	wm_tbi_serdes_set_linkled(sc);
14067 }
14068 
14069 /* SFP related */
14070 
14071 static int
14072 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
14073 {
14074 	uint32_t i2ccmd;
14075 	int i;
14076 
14077 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
14078 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
14079 
14080 	/* Poll the ready bit */
14081 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
14082 		delay(50);
14083 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
14084 		if (i2ccmd & I2CCMD_READY)
14085 			break;
14086 	}
14087 	if ((i2ccmd & I2CCMD_READY) == 0)
14088 		return -1;
14089 	if ((i2ccmd & I2CCMD_ERROR) != 0)
14090 		return -1;
14091 
14092 	*data = i2ccmd & 0x00ff;
14093 
14094 	return 0;
14095 }
14096 
14097 static uint32_t
14098 wm_sfp_get_media_type(struct wm_softc *sc)
14099 {
14100 	uint32_t ctrl_ext;
14101 	uint8_t val = 0;
14102 	int timeout = 3;
14103 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
14104 	int rv = -1;
14105 
14106 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
14107 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
14108 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
14109 	CSR_WRITE_FLUSH(sc);
14110 
14111 	/* Read SFP module data */
14112 	while (timeout) {
14113 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
14114 		if (rv == 0)
14115 			break;
14116 		delay(100*1000); /* XXX too big */
14117 		timeout--;
14118 	}
14119 	if (rv != 0)
14120 		goto out;
14121 
14122 	switch (val) {
14123 	case SFF_SFP_ID_SFF:
14124 		aprint_normal_dev(sc->sc_dev,
14125 		    "Module/Connector soldered to board\n");
14126 		break;
14127 	case SFF_SFP_ID_SFP:
14128 		sc->sc_flags |= WM_F_SFP;
14129 		break;
14130 	case SFF_SFP_ID_UNKNOWN:
14131 		goto out;
14132 	default:
14133 		break;
14134 	}
14135 
14136 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
14137 	if (rv != 0)
14138 		goto out;
14139 
14140 	sc->sc_sfptype = val;
14141 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
14142 		mediatype = WM_MEDIATYPE_SERDES;
14143 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
14144 		sc->sc_flags |= WM_F_SGMII;
14145 		mediatype = WM_MEDIATYPE_COPPER;
14146 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
14147 		sc->sc_flags |= WM_F_SGMII;
14148 		mediatype = WM_MEDIATYPE_SERDES;
14149 	} else {
14150 		device_printf(sc->sc_dev, "%s: unknown media type? (0x%hhx)\n",
14151 		    __func__, sc->sc_sfptype);
14152 		sc->sc_sfptype = 0; /* XXX unknown */
14153 	}
14154 
14155 out:
14156 	/* Restore I2C interface setting */
14157 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
14158 
14159 	return mediatype;
14160 }
14161 
14162 /*
14163  * NVM related.
14164  * Microwire, SPI (w/wo EERD) and Flash.
14165  */
14166 
14167 /* Both spi and uwire */
14168 
14169 /*
14170  * wm_eeprom_sendbits:
14171  *
14172  *	Send a series of bits to the EEPROM.
14173  */
14174 static void
14175 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
14176 {
14177 	uint32_t reg;
14178 	int x;
14179 
14180 	reg = CSR_READ(sc, WMREG_EECD);
14181 
14182 	for (x = nbits; x > 0; x--) {
14183 		if (bits & (1U << (x - 1)))
14184 			reg |= EECD_DI;
14185 		else
14186 			reg &= ~EECD_DI;
14187 		CSR_WRITE(sc, WMREG_EECD, reg);
14188 		CSR_WRITE_FLUSH(sc);
14189 		delay(2);
14190 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
14191 		CSR_WRITE_FLUSH(sc);
14192 		delay(2);
14193 		CSR_WRITE(sc, WMREG_EECD, reg);
14194 		CSR_WRITE_FLUSH(sc);
14195 		delay(2);
14196 	}
14197 }
14198 
14199 /*
14200  * wm_eeprom_recvbits:
14201  *
14202  *	Receive a series of bits from the EEPROM.
14203  */
14204 static void
14205 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
14206 {
14207 	uint32_t reg, val;
14208 	int x;
14209 
14210 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
14211 
14212 	val = 0;
14213 	for (x = nbits; x > 0; x--) {
14214 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
14215 		CSR_WRITE_FLUSH(sc);
14216 		delay(2);
14217 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
14218 			val |= (1U << (x - 1));
14219 		CSR_WRITE(sc, WMREG_EECD, reg);
14220 		CSR_WRITE_FLUSH(sc);
14221 		delay(2);
14222 	}
14223 	*valp = val;
14224 }
14225 
14226 /* Microwire */
14227 
14228 /*
14229  * wm_nvm_read_uwire:
14230  *
14231  *	Read a word from the EEPROM using the MicroWire protocol.
14232  */
14233 static int
14234 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
14235 {
14236 	uint32_t reg, val;
14237 	int i, rv;
14238 
14239 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
14240 		device_xname(sc->sc_dev), __func__));
14241 
14242 	rv = sc->nvm.acquire(sc);
14243 	if (rv != 0)
14244 		return rv;
14245 
14246 	for (i = 0; i < wordcnt; i++) {
14247 		/* Clear SK and DI. */
14248 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
14249 		CSR_WRITE(sc, WMREG_EECD, reg);
14250 
14251 		/*
14252 		 * XXX: workaround for a bug in qemu-0.12.x and prior
14253 		 * and Xen.
14254 		 *
14255 		 * We use this workaround only for 82540 because qemu's
14256 		 * e1000 act as 82540.
14257 		 */
14258 		if (sc->sc_type == WM_T_82540) {
14259 			reg |= EECD_SK;
14260 			CSR_WRITE(sc, WMREG_EECD, reg);
14261 			reg &= ~EECD_SK;
14262 			CSR_WRITE(sc, WMREG_EECD, reg);
14263 			CSR_WRITE_FLUSH(sc);
14264 			delay(2);
14265 		}
14266 		/* XXX: end of workaround */
14267 
14268 		/* Set CHIP SELECT. */
14269 		reg |= EECD_CS;
14270 		CSR_WRITE(sc, WMREG_EECD, reg);
14271 		CSR_WRITE_FLUSH(sc);
14272 		delay(2);
14273 
14274 		/* Shift in the READ command. */
14275 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
14276 
14277 		/* Shift in address. */
14278 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
14279 
14280 		/* Shift out the data. */
14281 		wm_eeprom_recvbits(sc, &val, 16);
14282 		data[i] = val & 0xffff;
14283 
14284 		/* Clear CHIP SELECT. */
14285 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
14286 		CSR_WRITE(sc, WMREG_EECD, reg);
14287 		CSR_WRITE_FLUSH(sc);
14288 		delay(2);
14289 	}
14290 
14291 	sc->nvm.release(sc);
14292 	return 0;
14293 }
14294 
14295 /* SPI */
14296 
14297 /*
14298  * Set SPI and FLASH related information from the EECD register.
14299  * For 82541 and 82547, the word size is taken from EEPROM.
14300  */
14301 static int
14302 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
14303 {
14304 	int size;
14305 	uint32_t reg;
14306 	uint16_t data;
14307 
14308 	reg = CSR_READ(sc, WMREG_EECD);
14309 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
14310 
14311 	/* Read the size of NVM from EECD by default */
14312 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
14313 	switch (sc->sc_type) {
14314 	case WM_T_82541:
14315 	case WM_T_82541_2:
14316 	case WM_T_82547:
14317 	case WM_T_82547_2:
14318 		/* Set dummy value to access EEPROM */
14319 		sc->sc_nvm_wordsize = 64;
14320 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
14321 			aprint_error_dev(sc->sc_dev,
14322 			    "%s: failed to read EEPROM size\n", __func__);
14323 		}
14324 		reg = data;
14325 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
14326 		if (size == 0)
14327 			size = 6; /* 64 word size */
14328 		else
14329 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
14330 		break;
14331 	case WM_T_80003:
14332 	case WM_T_82571:
14333 	case WM_T_82572:
14334 	case WM_T_82573: /* SPI case */
14335 	case WM_T_82574: /* SPI case */
14336 	case WM_T_82583: /* SPI case */
14337 		size += NVM_WORD_SIZE_BASE_SHIFT;
14338 		if (size > 14)
14339 			size = 14;
14340 		break;
14341 	case WM_T_82575:
14342 	case WM_T_82576:
14343 	case WM_T_82580:
14344 	case WM_T_I350:
14345 	case WM_T_I354:
14346 	case WM_T_I210:
14347 	case WM_T_I211:
14348 		size += NVM_WORD_SIZE_BASE_SHIFT;
14349 		if (size > 15)
14350 			size = 15;
14351 		break;
14352 	default:
14353 		aprint_error_dev(sc->sc_dev,
14354 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
14355 		return -1;
14356 		break;
14357 	}
14358 
14359 	sc->sc_nvm_wordsize = 1 << size;
14360 
14361 	return 0;
14362 }
14363 
14364 /*
14365  * wm_nvm_ready_spi:
14366  *
14367  *	Wait for a SPI EEPROM to be ready for commands.
14368  */
14369 static int
14370 wm_nvm_ready_spi(struct wm_softc *sc)
14371 {
14372 	uint32_t val;
14373 	int usec;
14374 
14375 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
14376 		device_xname(sc->sc_dev), __func__));
14377 
14378 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
14379 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
14380 		wm_eeprom_recvbits(sc, &val, 8);
14381 		if ((val & SPI_SR_RDY) == 0)
14382 			break;
14383 	}
14384 	if (usec >= SPI_MAX_RETRIES) {
14385 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
14386 		return -1;
14387 	}
14388 	return 0;
14389 }
14390 
14391 /*
14392  * wm_nvm_read_spi:
14393  *
14394  *	Read a work from the EEPROM using the SPI protocol.
14395  */
14396 static int
14397 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
14398 {
14399 	uint32_t reg, val;
14400 	int i;
14401 	uint8_t opc;
14402 	int rv;
14403 
14404 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
14405 		device_xname(sc->sc_dev), __func__));
14406 
14407 	rv = sc->nvm.acquire(sc);
14408 	if (rv != 0)
14409 		return rv;
14410 
14411 	/* Clear SK and CS. */
14412 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
14413 	CSR_WRITE(sc, WMREG_EECD, reg);
14414 	CSR_WRITE_FLUSH(sc);
14415 	delay(2);
14416 
14417 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
14418 		goto out;
14419 
14420 	/* Toggle CS to flush commands. */
14421 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
14422 	CSR_WRITE_FLUSH(sc);
14423 	delay(2);
14424 	CSR_WRITE(sc, WMREG_EECD, reg);
14425 	CSR_WRITE_FLUSH(sc);
14426 	delay(2);
14427 
14428 	opc = SPI_OPC_READ;
14429 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
14430 		opc |= SPI_OPC_A8;
14431 
14432 	wm_eeprom_sendbits(sc, opc, 8);
14433 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
14434 
14435 	for (i = 0; i < wordcnt; i++) {
14436 		wm_eeprom_recvbits(sc, &val, 16);
14437 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
14438 	}
14439 
14440 	/* Raise CS and clear SK. */
14441 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
14442 	CSR_WRITE(sc, WMREG_EECD, reg);
14443 	CSR_WRITE_FLUSH(sc);
14444 	delay(2);
14445 
14446 out:
14447 	sc->nvm.release(sc);
14448 	return rv;
14449 }
14450 
14451 /* Using with EERD */
14452 
14453 static int
14454 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
14455 {
14456 	uint32_t attempts = 100000;
14457 	uint32_t i, reg = 0;
14458 	int32_t done = -1;
14459 
14460 	for (i = 0; i < attempts; i++) {
14461 		reg = CSR_READ(sc, rw);
14462 
14463 		if (reg & EERD_DONE) {
14464 			done = 0;
14465 			break;
14466 		}
14467 		delay(5);
14468 	}
14469 
14470 	return done;
14471 }
14472 
14473 static int
14474 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
14475 {
14476 	int i, eerd = 0;
14477 	int rv;
14478 
14479 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
14480 		device_xname(sc->sc_dev), __func__));
14481 
14482 	rv = sc->nvm.acquire(sc);
14483 	if (rv != 0)
14484 		return rv;
14485 
14486 	for (i = 0; i < wordcnt; i++) {
14487 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
14488 		CSR_WRITE(sc, WMREG_EERD, eerd);
14489 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
14490 		if (rv != 0) {
14491 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
14492 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
14493 			break;
14494 		}
14495 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
14496 	}
14497 
14498 	sc->nvm.release(sc);
14499 	return rv;
14500 }
14501 
14502 /* Flash */
14503 
14504 static int
14505 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
14506 {
14507 	uint32_t eecd;
14508 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
14509 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
14510 	uint32_t nvm_dword = 0;
14511 	uint8_t sig_byte = 0;
14512 	int rv;
14513 
14514 	switch (sc->sc_type) {
14515 	case WM_T_PCH_SPT:
14516 	case WM_T_PCH_CNP:
14517 	case WM_T_PCH_TGP:
14518 		bank1_offset = sc->sc_ich8_flash_bank_size * 2;
14519 		act_offset = ICH_NVM_SIG_WORD * 2;
14520 
14521 		/* Set bank to 0 in case flash read fails. */
14522 		*bank = 0;
14523 
14524 		/* Check bank 0 */
14525 		rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
14526 		if (rv != 0)
14527 			return rv;
14528 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
14529 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
14530 			*bank = 0;
14531 			return 0;
14532 		}
14533 
14534 		/* Check bank 1 */
14535 		rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
14536 		    &nvm_dword);
14537 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
14538 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
14539 			*bank = 1;
14540 			return 0;
14541 		}
14542 		aprint_error_dev(sc->sc_dev,
14543 		    "%s: no valid NVM bank present (%u)\n", __func__, *bank);
14544 		return -1;
14545 	case WM_T_ICH8:
14546 	case WM_T_ICH9:
14547 		eecd = CSR_READ(sc, WMREG_EECD);
14548 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
14549 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
14550 			return 0;
14551 		}
14552 		/* FALLTHROUGH */
14553 	default:
14554 		/* Default to 0 */
14555 		*bank = 0;
14556 
14557 		/* Check bank 0 */
14558 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
14559 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
14560 			*bank = 0;
14561 			return 0;
14562 		}
14563 
14564 		/* Check bank 1 */
14565 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
14566 		    &sig_byte);
14567 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
14568 			*bank = 1;
14569 			return 0;
14570 		}
14571 	}
14572 
14573 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
14574 		device_xname(sc->sc_dev)));
14575 	return -1;
14576 }
14577 
14578 /******************************************************************************
14579  * This function does initial flash setup so that a new read/write/erase cycle
14580  * can be started.
14581  *
14582  * sc - The pointer to the hw structure
14583  ****************************************************************************/
14584 static int32_t
14585 wm_ich8_cycle_init(struct wm_softc *sc)
14586 {
14587 	uint16_t hsfsts;
14588 	int32_t error = 1;
14589 	int32_t i     = 0;
14590 
14591 	if (sc->sc_type >= WM_T_PCH_SPT)
14592 		hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
14593 	else
14594 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
14595 
14596 	/* May be check the Flash Des Valid bit in Hw status */
14597 	if ((hsfsts & HSFSTS_FLDVAL) == 0)
14598 		return error;
14599 
14600 	/* Clear FCERR in Hw status by writing 1 */
14601 	/* Clear DAEL in Hw status by writing a 1 */
14602 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
14603 
14604 	if (sc->sc_type >= WM_T_PCH_SPT)
14605 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
14606 	else
14607 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
14608 
14609 	/*
14610 	 * Either we should have a hardware SPI cycle in progress bit to check
14611 	 * against, in order to start a new cycle or FDONE bit should be
14612 	 * changed in the hardware so that it is 1 after hardware reset, which
14613 	 * can then be used as an indication whether a cycle is in progress or
14614 	 * has been completed .. we should also have some software semaphore
14615 	 * mechanism to guard FDONE or the cycle in progress bit so that two
14616 	 * threads access to those bits can be sequentiallized or a way so that
14617 	 * 2 threads don't start the cycle at the same time
14618 	 */
14619 
14620 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
14621 		/*
14622 		 * There is no cycle running at present, so we can start a
14623 		 * cycle
14624 		 */
14625 
14626 		/* Begin by setting Flash Cycle Done. */
14627 		hsfsts |= HSFSTS_DONE;
14628 		if (sc->sc_type >= WM_T_PCH_SPT)
14629 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
14630 			    hsfsts & 0xffffUL);
14631 		else
14632 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
14633 		error = 0;
14634 	} else {
14635 		/*
14636 		 * Otherwise poll for sometime so the current cycle has a
14637 		 * chance to end before giving up.
14638 		 */
14639 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
14640 			if (sc->sc_type >= WM_T_PCH_SPT)
14641 				hsfsts = ICH8_FLASH_READ32(sc,
14642 				    ICH_FLASH_HSFSTS) & 0xffffUL;
14643 			else
14644 				hsfsts = ICH8_FLASH_READ16(sc,
14645 				    ICH_FLASH_HSFSTS);
14646 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
14647 				error = 0;
14648 				break;
14649 			}
14650 			delay(1);
14651 		}
14652 		if (error == 0) {
14653 			/*
14654 			 * Successful in waiting for previous cycle to timeout,
14655 			 * now set the Flash Cycle Done.
14656 			 */
14657 			hsfsts |= HSFSTS_DONE;
14658 			if (sc->sc_type >= WM_T_PCH_SPT)
14659 				ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
14660 				    hsfsts & 0xffffUL);
14661 			else
14662 				ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
14663 				    hsfsts);
14664 		}
14665 	}
14666 	return error;
14667 }
14668 
14669 /******************************************************************************
14670  * This function starts a flash cycle and waits for its completion
14671  *
14672  * sc - The pointer to the hw structure
14673  ****************************************************************************/
14674 static int32_t
14675 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
14676 {
14677 	uint16_t hsflctl;
14678 	uint16_t hsfsts;
14679 	int32_t error = 1;
14680 	uint32_t i = 0;
14681 
14682 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
14683 	if (sc->sc_type >= WM_T_PCH_SPT)
14684 		hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
14685 	else
14686 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
14687 	hsflctl |= HSFCTL_GO;
14688 	if (sc->sc_type >= WM_T_PCH_SPT)
14689 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
14690 		    (uint32_t)hsflctl << 16);
14691 	else
14692 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
14693 
14694 	/* Wait till FDONE bit is set to 1 */
14695 	do {
14696 		if (sc->sc_type >= WM_T_PCH_SPT)
14697 			hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
14698 			    & 0xffffUL;
14699 		else
14700 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
14701 		if (hsfsts & HSFSTS_DONE)
14702 			break;
14703 		delay(1);
14704 		i++;
14705 	} while (i < timeout);
14706 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
14707 		error = 0;
14708 
14709 	return error;
14710 }
14711 
14712 /******************************************************************************
14713  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
14714  *
14715  * sc - The pointer to the hw structure
14716  * index - The index of the byte or word to read.
14717  * size - Size of data to read, 1=byte 2=word, 4=dword
14718  * data - Pointer to the word to store the value read.
14719  *****************************************************************************/
14720 static int32_t
14721 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
14722     uint32_t size, uint32_t *data)
14723 {
14724 	uint16_t hsfsts;
14725 	uint16_t hsflctl;
14726 	uint32_t flash_linear_address;
14727 	uint32_t flash_data = 0;
14728 	int32_t error = 1;
14729 	int32_t count = 0;
14730 
14731 	if (size < 1  || size > 4 || data == 0x0 ||
14732 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
14733 		return error;
14734 
14735 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
14736 	    sc->sc_ich8_flash_base;
14737 
14738 	do {
14739 		delay(1);
14740 		/* Steps */
14741 		error = wm_ich8_cycle_init(sc);
14742 		if (error)
14743 			break;
14744 
14745 		if (sc->sc_type >= WM_T_PCH_SPT)
14746 			hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
14747 			    >> 16;
14748 		else
14749 			hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
14750 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
14751 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
14752 		    & HSFCTL_BCOUNT_MASK;
14753 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
14754 		if (sc->sc_type >= WM_T_PCH_SPT) {
14755 			/*
14756 			 * In SPT, This register is in Lan memory space, not
14757 			 * flash. Therefore, only 32 bit access is supported.
14758 			 */
14759 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
14760 			    (uint32_t)hsflctl << 16);
14761 		} else
14762 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
14763 
14764 		/*
14765 		 * Write the last 24 bits of index into Flash Linear address
14766 		 * field in Flash Address
14767 		 */
14768 		/* TODO: TBD maybe check the index against the size of flash */
14769 
14770 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
14771 
14772 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
14773 
14774 		/*
14775 		 * Check if FCERR is set to 1, if set to 1, clear it and try
14776 		 * the whole sequence a few more times, else read in (shift in)
14777 		 * the Flash Data0, the order is least significant byte first
14778 		 * msb to lsb
14779 		 */
14780 		if (error == 0) {
14781 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
14782 			if (size == 1)
14783 				*data = (uint8_t)(flash_data & 0x000000FF);
14784 			else if (size == 2)
14785 				*data = (uint16_t)(flash_data & 0x0000FFFF);
14786 			else if (size == 4)
14787 				*data = (uint32_t)flash_data;
14788 			break;
14789 		} else {
14790 			/*
14791 			 * If we've gotten here, then things are probably
14792 			 * completely hosed, but if the error condition is
14793 			 * detected, it won't hurt to give it another try...
14794 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
14795 			 */
14796 			if (sc->sc_type >= WM_T_PCH_SPT)
14797 				hsfsts = ICH8_FLASH_READ32(sc,
14798 				    ICH_FLASH_HSFSTS) & 0xffffUL;
14799 			else
14800 				hsfsts = ICH8_FLASH_READ16(sc,
14801 				    ICH_FLASH_HSFSTS);
14802 
14803 			if (hsfsts & HSFSTS_ERR) {
14804 				/* Repeat for some time before giving up. */
14805 				continue;
14806 			} else if ((hsfsts & HSFSTS_DONE) == 0)
14807 				break;
14808 		}
14809 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
14810 
14811 	return error;
14812 }
14813 
14814 /******************************************************************************
14815  * Reads a single byte from the NVM using the ICH8 flash access registers.
14816  *
14817  * sc - pointer to wm_hw structure
14818  * index - The index of the byte to read.
14819  * data - Pointer to a byte to store the value read.
14820  *****************************************************************************/
14821 static int32_t
14822 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
14823 {
14824 	int32_t status;
14825 	uint32_t word = 0;
14826 
14827 	status = wm_read_ich8_data(sc, index, 1, &word);
14828 	if (status == 0)
14829 		*data = (uint8_t)word;
14830 	else
14831 		*data = 0;
14832 
14833 	return status;
14834 }
14835 
14836 /******************************************************************************
14837  * Reads a word from the NVM using the ICH8 flash access registers.
14838  *
14839  * sc - pointer to wm_hw structure
14840  * index - The starting byte index of the word to read.
14841  * data - Pointer to a word to store the value read.
14842  *****************************************************************************/
14843 static int32_t
14844 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
14845 {
14846 	int32_t status;
14847 	uint32_t word = 0;
14848 
14849 	status = wm_read_ich8_data(sc, index, 2, &word);
14850 	if (status == 0)
14851 		*data = (uint16_t)word;
14852 	else
14853 		*data = 0;
14854 
14855 	return status;
14856 }
14857 
14858 /******************************************************************************
14859  * Reads a dword from the NVM using the ICH8 flash access registers.
14860  *
14861  * sc - pointer to wm_hw structure
14862  * index - The starting byte index of the word to read.
14863  * data - Pointer to a word to store the value read.
14864  *****************************************************************************/
14865 static int32_t
14866 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
14867 {
14868 	int32_t status;
14869 
14870 	status = wm_read_ich8_data(sc, index, 4, data);
14871 	return status;
14872 }
14873 
14874 /******************************************************************************
14875  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
14876  * register.
14877  *
14878  * sc - Struct containing variables accessed by shared code
14879  * offset - offset of word in the EEPROM to read
14880  * data - word read from the EEPROM
14881  * words - number of words to read
14882  *****************************************************************************/
14883 static int
14884 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
14885 {
14886 	int rv;
14887 	uint32_t flash_bank = 0;
14888 	uint32_t act_offset = 0;
14889 	uint32_t bank_offset = 0;
14890 	uint16_t word = 0;
14891 	uint16_t i = 0;
14892 
14893 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
14894 		device_xname(sc->sc_dev), __func__));
14895 
14896 	rv = sc->nvm.acquire(sc);
14897 	if (rv != 0)
14898 		return rv;
14899 
14900 	/*
14901 	 * We need to know which is the valid flash bank.  In the event
14902 	 * that we didn't allocate eeprom_shadow_ram, we may not be
14903 	 * managing flash_bank. So it cannot be trusted and needs
14904 	 * to be updated with each read.
14905 	 */
14906 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
14907 	if (rv) {
14908 		DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
14909 			device_xname(sc->sc_dev)));
14910 		flash_bank = 0;
14911 	}
14912 
14913 	/*
14914 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
14915 	 * size
14916 	 */
14917 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
14918 
14919 	for (i = 0; i < words; i++) {
14920 		/* The NVM part needs a byte offset, hence * 2 */
14921 		act_offset = bank_offset + ((offset + i) * 2);
14922 		rv = wm_read_ich8_word(sc, act_offset, &word);
14923 		if (rv) {
14924 			aprint_error_dev(sc->sc_dev,
14925 			    "%s: failed to read NVM\n", __func__);
14926 			break;
14927 		}
14928 		data[i] = word;
14929 	}
14930 
14931 	sc->nvm.release(sc);
14932 	return rv;
14933 }
14934 
14935 /******************************************************************************
14936  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
14937  * register.
14938  *
14939  * sc - Struct containing variables accessed by shared code
14940  * offset - offset of word in the EEPROM to read
14941  * data - word read from the EEPROM
14942  * words - number of words to read
14943  *****************************************************************************/
14944 static int
14945 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
14946 {
14947 	int	 rv;
14948 	uint32_t flash_bank = 0;
14949 	uint32_t act_offset = 0;
14950 	uint32_t bank_offset = 0;
14951 	uint32_t dword = 0;
14952 	uint16_t i = 0;
14953 
14954 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
14955 		device_xname(sc->sc_dev), __func__));
14956 
14957 	rv = sc->nvm.acquire(sc);
14958 	if (rv != 0)
14959 		return rv;
14960 
14961 	/*
14962 	 * We need to know which is the valid flash bank.  In the event
14963 	 * that we didn't allocate eeprom_shadow_ram, we may not be
14964 	 * managing flash_bank. So it cannot be trusted and needs
14965 	 * to be updated with each read.
14966 	 */
14967 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
14968 	if (rv) {
14969 		DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
14970 			device_xname(sc->sc_dev)));
14971 		flash_bank = 0;
14972 	}
14973 
14974 	/*
14975 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
14976 	 * size
14977 	 */
14978 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
14979 
14980 	for (i = 0; i < words; i++) {
14981 		/* The NVM part needs a byte offset, hence * 2 */
14982 		act_offset = bank_offset + ((offset + i) * 2);
14983 		/* but we must read dword aligned, so mask ... */
14984 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
14985 		if (rv) {
14986 			aprint_error_dev(sc->sc_dev,
14987 			    "%s: failed to read NVM\n", __func__);
14988 			break;
14989 		}
14990 		/* ... and pick out low or high word */
14991 		if ((act_offset & 0x2) == 0)
14992 			data[i] = (uint16_t)(dword & 0xFFFF);
14993 		else
14994 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
14995 	}
14996 
14997 	sc->nvm.release(sc);
14998 	return rv;
14999 }
15000 
15001 /* iNVM */
15002 
15003 static int
15004 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
15005 {
15006 	int32_t	 rv = 0;
15007 	uint32_t invm_dword;
15008 	uint16_t i;
15009 	uint8_t record_type, word_address;
15010 
15011 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
15012 		device_xname(sc->sc_dev), __func__));
15013 
15014 	for (i = 0; i < INVM_SIZE; i++) {
15015 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
15016 		/* Get record type */
15017 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
15018 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
15019 			break;
15020 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
15021 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
15022 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
15023 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
15024 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
15025 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
15026 			if (word_address == address) {
15027 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
15028 				rv = 0;
15029 				break;
15030 			}
15031 		}
15032 	}
15033 
15034 	return rv;
15035 }
15036 
15037 static int
15038 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
15039 {
15040 	int i, rv;
15041 
15042 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
15043 		device_xname(sc->sc_dev), __func__));
15044 
15045 	rv = sc->nvm.acquire(sc);
15046 	if (rv != 0)
15047 		return rv;
15048 
15049 	for (i = 0; i < words; i++) {
15050 		switch (offset + i) {
15051 		case NVM_OFF_MACADDR:
15052 		case NVM_OFF_MACADDR1:
15053 		case NVM_OFF_MACADDR2:
15054 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
15055 			if (rv != 0) {
15056 				data[i] = 0xffff;
15057 				rv = -1;
15058 			}
15059 			break;
15060 		case NVM_OFF_CFG1: /* == INVM_AUTOLOAD */
15061 			rv = wm_nvm_read_word_invm(sc, offset, data);
15062 			if (rv != 0) {
15063 				*data = INVM_DEFAULT_AL;
15064 				rv = 0;
15065 			}
15066 			break;
15067 		case NVM_OFF_CFG2:
15068 			rv = wm_nvm_read_word_invm(sc, offset, data);
15069 			if (rv != 0) {
15070 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
15071 				rv = 0;
15072 			}
15073 			break;
15074 		case NVM_OFF_CFG4:
15075 			rv = wm_nvm_read_word_invm(sc, offset, data);
15076 			if (rv != 0) {
15077 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
15078 				rv = 0;
15079 			}
15080 			break;
15081 		case NVM_OFF_LED_1_CFG:
15082 			rv = wm_nvm_read_word_invm(sc, offset, data);
15083 			if (rv != 0) {
15084 				*data = NVM_LED_1_CFG_DEFAULT_I211;
15085 				rv = 0;
15086 			}
15087 			break;
15088 		case NVM_OFF_LED_0_2_CFG:
15089 			rv = wm_nvm_read_word_invm(sc, offset, data);
15090 			if (rv != 0) {
15091 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
15092 				rv = 0;
15093 			}
15094 			break;
15095 		case NVM_OFF_ID_LED_SETTINGS:
15096 			rv = wm_nvm_read_word_invm(sc, offset, data);
15097 			if (rv != 0) {
15098 				*data = ID_LED_RESERVED_FFFF;
15099 				rv = 0;
15100 			}
15101 			break;
15102 		default:
15103 			DPRINTF(sc, WM_DEBUG_NVM,
15104 			    ("NVM word 0x%02x is not mapped.\n", offset));
15105 			*data = NVM_RESERVED_WORD;
15106 			break;
15107 		}
15108 	}
15109 
15110 	sc->nvm.release(sc);
15111 	return rv;
15112 }
15113 
15114 /* Lock, detecting NVM type, validate checksum, version and read */
15115 
15116 static int
15117 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
15118 {
15119 	uint32_t eecd = 0;
15120 
15121 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
15122 	    || sc->sc_type == WM_T_82583) {
15123 		eecd = CSR_READ(sc, WMREG_EECD);
15124 
15125 		/* Isolate bits 15 & 16 */
15126 		eecd = ((eecd >> 15) & 0x03);
15127 
15128 		/* If both bits are set, device is Flash type */
15129 		if (eecd == 0x03)
15130 			return 0;
15131 	}
15132 	return 1;
15133 }
15134 
15135 static int
15136 wm_nvm_flash_presence_i210(struct wm_softc *sc)
15137 {
15138 	uint32_t eec;
15139 
15140 	eec = CSR_READ(sc, WMREG_EEC);
15141 	if ((eec & EEC_FLASH_DETECTED) != 0)
15142 		return 1;
15143 
15144 	return 0;
15145 }
15146 
15147 /*
15148  * wm_nvm_validate_checksum
15149  *
15150  * The checksum is defined as the sum of the first 64 (16 bit) words.
15151  */
15152 static int
15153 wm_nvm_validate_checksum(struct wm_softc *sc)
15154 {
15155 	uint16_t checksum;
15156 	uint16_t eeprom_data;
15157 #ifdef WM_DEBUG
15158 	uint16_t csum_wordaddr, valid_checksum;
15159 #endif
15160 	int i;
15161 
15162 	checksum = 0;
15163 
15164 	/* Don't check for I211 */
15165 	if (sc->sc_type == WM_T_I211)
15166 		return 0;
15167 
15168 #ifdef WM_DEBUG
15169 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT) ||
15170 	    (sc->sc_type == WM_T_PCH_CNP) || (sc->sc_type == WM_T_PCH_TGP)) {
15171 		csum_wordaddr = NVM_OFF_COMPAT;
15172 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
15173 	} else {
15174 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
15175 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
15176 	}
15177 
15178 	/* Dump EEPROM image for debug */
15179 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
15180 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
15181 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
15182 		/* XXX PCH_SPT? */
15183 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
15184 		if ((eeprom_data & valid_checksum) == 0)
15185 			DPRINTF(sc, WM_DEBUG_NVM,
15186 			    ("%s: NVM need to be updated (%04x != %04x)\n",
15187 				device_xname(sc->sc_dev), eeprom_data,
15188 				valid_checksum));
15189 	}
15190 
15191 	if ((sc->sc_debug & WM_DEBUG_NVM) != 0) {
15192 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
15193 		for (i = 0; i < NVM_SIZE; i++) {
15194 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
15195 				printf("XXXX ");
15196 			else
15197 				printf("%04hx ", eeprom_data);
15198 			if (i % 8 == 7)
15199 				printf("\n");
15200 		}
15201 	}
15202 
15203 #endif /* WM_DEBUG */
15204 
15205 	for (i = 0; i < NVM_SIZE; i++) {
15206 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
15207 			return -1;
15208 		checksum += eeprom_data;
15209 	}
15210 
15211 	if (checksum != (uint16_t) NVM_CHECKSUM) {
15212 #ifdef WM_DEBUG
15213 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
15214 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
15215 #endif
15216 	}
15217 
15218 	return 0;
15219 }
15220 
15221 static void
15222 wm_nvm_version_invm(struct wm_softc *sc)
15223 {
15224 	uint32_t dword;
15225 
15226 	/*
15227 	 * Linux's code to decode version is very strange, so we don't
15228 	 * obey that algorithm and just use word 61 as the document.
15229 	 * Perhaps it's not perfect though...
15230 	 *
15231 	 * Example:
15232 	 *
15233 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
15234 	 */
15235 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
15236 	dword = __SHIFTOUT(dword, INVM_VER_1);
15237 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
15238 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
15239 }
15240 
15241 static void
15242 wm_nvm_version(struct wm_softc *sc)
15243 {
15244 	uint16_t major, minor, build, patch;
15245 	uint16_t uid0, uid1;
15246 	uint16_t nvm_data;
15247 	uint16_t off;
15248 	bool check_version = false;
15249 	bool check_optionrom = false;
15250 	bool have_build = false;
15251 	bool have_uid = true;
15252 
15253 	/*
15254 	 * Version format:
15255 	 *
15256 	 * XYYZ
15257 	 * X0YZ
15258 	 * X0YY
15259 	 *
15260 	 * Example:
15261 	 *
15262 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
15263 	 *	82571	0x50a6	5.10.6?
15264 	 *	82572	0x506a	5.6.10?
15265 	 *	82572EI	0x5069	5.6.9?
15266 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
15267 	 *		0x2013	2.1.3?
15268 	 *	82583	0x10a0	1.10.0? (document says it's default value)
15269 	 * ICH8+82567	0x0040	0.4.0?
15270 	 * ICH9+82566	0x1040	1.4.0?
15271 	 *ICH10+82567	0x0043	0.4.3?
15272 	 *  PCH+82577	0x00c1	0.12.1?
15273 	 * PCH2+82579	0x00d3	0.13.3?
15274 	 *		0x00d4	0.13.4?
15275 	 *  LPT+I218	0x0023	0.2.3?
15276 	 *  SPT+I219	0x0084	0.8.4?
15277 	 *  CNP+I219	0x0054	0.5.4?
15278 	 */
15279 
15280 	/*
15281 	 * XXX
15282 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
15283 	 * I've never seen real 82574 hardware with such small SPI ROM.
15284 	 */
15285 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
15286 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
15287 		have_uid = false;
15288 
15289 	switch (sc->sc_type) {
15290 	case WM_T_82571:
15291 	case WM_T_82572:
15292 	case WM_T_82574:
15293 	case WM_T_82583:
15294 		check_version = true;
15295 		check_optionrom = true;
15296 		have_build = true;
15297 		break;
15298 	case WM_T_ICH8:
15299 	case WM_T_ICH9:
15300 	case WM_T_ICH10:
15301 	case WM_T_PCH:
15302 	case WM_T_PCH2:
15303 	case WM_T_PCH_LPT:
15304 	case WM_T_PCH_SPT:
15305 	case WM_T_PCH_CNP:
15306 	case WM_T_PCH_TGP:
15307 		check_version = true;
15308 		have_build = true;
15309 		have_uid = false;
15310 		break;
15311 	case WM_T_82575:
15312 	case WM_T_82576:
15313 	case WM_T_82580:
15314 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
15315 			check_version = true;
15316 		break;
15317 	case WM_T_I211:
15318 		wm_nvm_version_invm(sc);
15319 		have_uid = false;
15320 		goto printver;
15321 	case WM_T_I210:
15322 		if (!wm_nvm_flash_presence_i210(sc)) {
15323 			wm_nvm_version_invm(sc);
15324 			have_uid = false;
15325 			goto printver;
15326 		}
15327 		/* FALLTHROUGH */
15328 	case WM_T_I350:
15329 	case WM_T_I354:
15330 		check_version = true;
15331 		check_optionrom = true;
15332 		break;
15333 	default:
15334 		return;
15335 	}
15336 	if (check_version
15337 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
15338 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
15339 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
15340 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
15341 			build = nvm_data & NVM_BUILD_MASK;
15342 			have_build = true;
15343 		} else
15344 			minor = nvm_data & 0x00ff;
15345 
15346 		/* Decimal */
15347 		minor = (minor / 16) * 10 + (minor % 16);
15348 		sc->sc_nvm_ver_major = major;
15349 		sc->sc_nvm_ver_minor = minor;
15350 
15351 printver:
15352 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
15353 		    sc->sc_nvm_ver_minor);
15354 		if (have_build) {
15355 			sc->sc_nvm_ver_build = build;
15356 			aprint_verbose(".%d", build);
15357 		}
15358 	}
15359 
15360 	/* Assume the Option ROM area is at avove NVM_SIZE */
15361 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
15362 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
15363 		/* Option ROM Version */
15364 		if ((off != 0x0000) && (off != 0xffff)) {
15365 			int rv;
15366 			uint16_t oid0, oid1;
15367 
15368 			off += NVM_COMBO_VER_OFF;
15369 			rv = wm_nvm_read(sc, off + 1, 1, &oid1);
15370 			rv |= wm_nvm_read(sc, off, 1, &oid0);
15371 			if ((rv == 0) && (oid0 != 0) && (oid0 != 0xffff)
15372 			    && (oid1 != 0) && (oid1 != 0xffff)) {
15373 				/* 16bits */
15374 				major = oid0 >> 8;
15375 				build = (oid0 << 8) | (oid1 >> 8);
15376 				patch = oid1 & 0x00ff;
15377 				aprint_verbose(", option ROM Version %d.%d.%d",
15378 				    major, build, patch);
15379 			}
15380 		}
15381 	}
15382 
15383 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
15384 		aprint_verbose(", Image Unique ID %08x",
15385 		    ((uint32_t)uid1 << 16) | uid0);
15386 }
15387 
15388 /*
15389  * wm_nvm_read:
15390  *
15391  *	Read data from the serial EEPROM.
15392  */
15393 static int
15394 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
15395 {
15396 	int rv;
15397 
15398 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
15399 		device_xname(sc->sc_dev), __func__));
15400 
15401 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
15402 		return -1;
15403 
15404 	rv = sc->nvm.read(sc, word, wordcnt, data);
15405 
15406 	return rv;
15407 }
15408 
15409 /*
15410  * Hardware semaphores.
15411  * Very complexed...
15412  */
15413 
15414 static int
15415 wm_get_null(struct wm_softc *sc)
15416 {
15417 
15418 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15419 		device_xname(sc->sc_dev), __func__));
15420 	return 0;
15421 }
15422 
15423 static void
15424 wm_put_null(struct wm_softc *sc)
15425 {
15426 
15427 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15428 		device_xname(sc->sc_dev), __func__));
15429 	return;
15430 }
15431 
15432 static int
15433 wm_get_eecd(struct wm_softc *sc)
15434 {
15435 	uint32_t reg;
15436 	int x;
15437 
15438 	DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
15439 		device_xname(sc->sc_dev), __func__));
15440 
15441 	reg = CSR_READ(sc, WMREG_EECD);
15442 
15443 	/* Request EEPROM access. */
15444 	reg |= EECD_EE_REQ;
15445 	CSR_WRITE(sc, WMREG_EECD, reg);
15446 
15447 	/* ..and wait for it to be granted. */
15448 	for (x = 0; x < 1000; x++) {
15449 		reg = CSR_READ(sc, WMREG_EECD);
15450 		if (reg & EECD_EE_GNT)
15451 			break;
15452 		delay(5);
15453 	}
15454 	if ((reg & EECD_EE_GNT) == 0) {
15455 		aprint_error_dev(sc->sc_dev,
15456 		    "could not acquire EEPROM GNT\n");
15457 		reg &= ~EECD_EE_REQ;
15458 		CSR_WRITE(sc, WMREG_EECD, reg);
15459 		return -1;
15460 	}
15461 
15462 	return 0;
15463 }
15464 
15465 static void
15466 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
15467 {
15468 
15469 	*eecd |= EECD_SK;
15470 	CSR_WRITE(sc, WMREG_EECD, *eecd);
15471 	CSR_WRITE_FLUSH(sc);
15472 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
15473 		delay(1);
15474 	else
15475 		delay(50);
15476 }
15477 
15478 static void
15479 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
15480 {
15481 
15482 	*eecd &= ~EECD_SK;
15483 	CSR_WRITE(sc, WMREG_EECD, *eecd);
15484 	CSR_WRITE_FLUSH(sc);
15485 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
15486 		delay(1);
15487 	else
15488 		delay(50);
15489 }
15490 
15491 static void
15492 wm_put_eecd(struct wm_softc *sc)
15493 {
15494 	uint32_t reg;
15495 
15496 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15497 		device_xname(sc->sc_dev), __func__));
15498 
15499 	/* Stop nvm */
15500 	reg = CSR_READ(sc, WMREG_EECD);
15501 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
15502 		/* Pull CS high */
15503 		reg |= EECD_CS;
15504 		wm_nvm_eec_clock_lower(sc, &reg);
15505 	} else {
15506 		/* CS on Microwire is active-high */
15507 		reg &= ~(EECD_CS | EECD_DI);
15508 		CSR_WRITE(sc, WMREG_EECD, reg);
15509 		wm_nvm_eec_clock_raise(sc, &reg);
15510 		wm_nvm_eec_clock_lower(sc, &reg);
15511 	}
15512 
15513 	reg = CSR_READ(sc, WMREG_EECD);
15514 	reg &= ~EECD_EE_REQ;
15515 	CSR_WRITE(sc, WMREG_EECD, reg);
15516 
15517 	return;
15518 }
15519 
15520 /*
15521  * Get hardware semaphore.
15522  * Same as e1000_get_hw_semaphore_generic()
15523  */
15524 static int
15525 wm_get_swsm_semaphore(struct wm_softc *sc)
15526 {
15527 	int32_t timeout;
15528 	uint32_t swsm;
15529 
15530 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15531 		device_xname(sc->sc_dev), __func__));
15532 	KASSERT(sc->sc_nvm_wordsize > 0);
15533 
15534 retry:
15535 	/* Get the SW semaphore. */
15536 	timeout = sc->sc_nvm_wordsize + 1;
15537 	while (timeout) {
15538 		swsm = CSR_READ(sc, WMREG_SWSM);
15539 
15540 		if ((swsm & SWSM_SMBI) == 0)
15541 			break;
15542 
15543 		delay(50);
15544 		timeout--;
15545 	}
15546 
15547 	if (timeout == 0) {
15548 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
15549 			/*
15550 			 * In rare circumstances, the SW semaphore may already
15551 			 * be held unintentionally. Clear the semaphore once
15552 			 * before giving up.
15553 			 */
15554 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
15555 			wm_put_swsm_semaphore(sc);
15556 			goto retry;
15557 		}
15558 		aprint_error_dev(sc->sc_dev, "could not acquire SWSM SMBI\n");
15559 		return -1;
15560 	}
15561 
15562 	/* Get the FW semaphore. */
15563 	timeout = sc->sc_nvm_wordsize + 1;
15564 	while (timeout) {
15565 		swsm = CSR_READ(sc, WMREG_SWSM);
15566 		swsm |= SWSM_SWESMBI;
15567 		CSR_WRITE(sc, WMREG_SWSM, swsm);
15568 		/* If we managed to set the bit we got the semaphore. */
15569 		swsm = CSR_READ(sc, WMREG_SWSM);
15570 		if (swsm & SWSM_SWESMBI)
15571 			break;
15572 
15573 		delay(50);
15574 		timeout--;
15575 	}
15576 
15577 	if (timeout == 0) {
15578 		aprint_error_dev(sc->sc_dev,
15579 		    "could not acquire SWSM SWESMBI\n");
15580 		/* Release semaphores */
15581 		wm_put_swsm_semaphore(sc);
15582 		return -1;
15583 	}
15584 	return 0;
15585 }
15586 
15587 /*
15588  * Put hardware semaphore.
15589  * Same as e1000_put_hw_semaphore_generic()
15590  */
15591 static void
15592 wm_put_swsm_semaphore(struct wm_softc *sc)
15593 {
15594 	uint32_t swsm;
15595 
15596 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15597 		device_xname(sc->sc_dev), __func__));
15598 
15599 	swsm = CSR_READ(sc, WMREG_SWSM);
15600 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
15601 	CSR_WRITE(sc, WMREG_SWSM, swsm);
15602 }
15603 
15604 /*
15605  * Get SW/FW semaphore.
15606  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
15607  */
15608 static int
15609 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
15610 {
15611 	uint32_t swfw_sync;
15612 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
15613 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
15614 	int timeout;
15615 
15616 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15617 		device_xname(sc->sc_dev), __func__));
15618 
15619 	if (sc->sc_type == WM_T_80003)
15620 		timeout = 50;
15621 	else
15622 		timeout = 200;
15623 
15624 	while (timeout) {
15625 		if (wm_get_swsm_semaphore(sc)) {
15626 			aprint_error_dev(sc->sc_dev,
15627 			    "%s: failed to get semaphore\n",
15628 			    __func__);
15629 			return -1;
15630 		}
15631 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
15632 		if ((swfw_sync & (swmask | fwmask)) == 0) {
15633 			swfw_sync |= swmask;
15634 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
15635 			wm_put_swsm_semaphore(sc);
15636 			return 0;
15637 		}
15638 		wm_put_swsm_semaphore(sc);
15639 		delay(5000);
15640 		timeout--;
15641 	}
15642 	device_printf(sc->sc_dev,
15643 	    "failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
15644 	    mask, swfw_sync);
15645 	return -1;
15646 }
15647 
15648 static void
15649 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
15650 {
15651 	uint32_t swfw_sync;
15652 
15653 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15654 		device_xname(sc->sc_dev), __func__));
15655 
15656 	while (wm_get_swsm_semaphore(sc) != 0)
15657 		continue;
15658 
15659 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
15660 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
15661 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
15662 
15663 	wm_put_swsm_semaphore(sc);
15664 }
15665 
15666 static int
15667 wm_get_nvm_80003(struct wm_softc *sc)
15668 {
15669 	int rv;
15670 
15671 	DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
15672 		device_xname(sc->sc_dev), __func__));
15673 
15674 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
15675 		aprint_error_dev(sc->sc_dev,
15676 		    "%s: failed to get semaphore(SWFW)\n", __func__);
15677 		return rv;
15678 	}
15679 
15680 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
15681 	    && (rv = wm_get_eecd(sc)) != 0) {
15682 		aprint_error_dev(sc->sc_dev,
15683 		    "%s: failed to get semaphore(EECD)\n", __func__);
15684 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
15685 		return rv;
15686 	}
15687 
15688 	return 0;
15689 }
15690 
15691 static void
15692 wm_put_nvm_80003(struct wm_softc *sc)
15693 {
15694 
15695 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15696 		device_xname(sc->sc_dev), __func__));
15697 
15698 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
15699 		wm_put_eecd(sc);
15700 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
15701 }
15702 
15703 static int
15704 wm_get_nvm_82571(struct wm_softc *sc)
15705 {
15706 	int rv;
15707 
15708 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15709 		device_xname(sc->sc_dev), __func__));
15710 
15711 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
15712 		return rv;
15713 
15714 	switch (sc->sc_type) {
15715 	case WM_T_82573:
15716 		break;
15717 	default:
15718 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
15719 			rv = wm_get_eecd(sc);
15720 		break;
15721 	}
15722 
15723 	if (rv != 0) {
15724 		aprint_error_dev(sc->sc_dev,
15725 		    "%s: failed to get semaphore\n",
15726 		    __func__);
15727 		wm_put_swsm_semaphore(sc);
15728 	}
15729 
15730 	return rv;
15731 }
15732 
15733 static void
15734 wm_put_nvm_82571(struct wm_softc *sc)
15735 {
15736 
15737 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15738 		device_xname(sc->sc_dev), __func__));
15739 
15740 	switch (sc->sc_type) {
15741 	case WM_T_82573:
15742 		break;
15743 	default:
15744 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
15745 			wm_put_eecd(sc);
15746 		break;
15747 	}
15748 
15749 	wm_put_swsm_semaphore(sc);
15750 }
15751 
15752 static int
15753 wm_get_phy_82575(struct wm_softc *sc)
15754 {
15755 
15756 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15757 		device_xname(sc->sc_dev), __func__));
15758 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
15759 }
15760 
15761 static void
15762 wm_put_phy_82575(struct wm_softc *sc)
15763 {
15764 
15765 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15766 		device_xname(sc->sc_dev), __func__));
15767 	wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
15768 }
15769 
15770 static int
15771 wm_get_swfwhw_semaphore(struct wm_softc *sc)
15772 {
15773 	uint32_t ext_ctrl;
15774 	int timeout = 200;
15775 
15776 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15777 		device_xname(sc->sc_dev), __func__));
15778 
15779 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
15780 	for (timeout = 0; timeout < 200; timeout++) {
15781 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
15782 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
15783 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
15784 
15785 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
15786 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
15787 			return 0;
15788 		delay(5000);
15789 	}
15790 	device_printf(sc->sc_dev,
15791 	    "failed to get swfwhw semaphore ext_ctrl 0x%x\n", ext_ctrl);
15792 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
15793 	return -1;
15794 }
15795 
15796 static void
15797 wm_put_swfwhw_semaphore(struct wm_softc *sc)
15798 {
15799 	uint32_t ext_ctrl;
15800 
15801 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15802 		device_xname(sc->sc_dev), __func__));
15803 
15804 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
15805 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
15806 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
15807 
15808 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
15809 }
15810 
15811 static int
15812 wm_get_swflag_ich8lan(struct wm_softc *sc)
15813 {
15814 	uint32_t ext_ctrl;
15815 	int timeout;
15816 
15817 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15818 		device_xname(sc->sc_dev), __func__));
15819 	mutex_enter(sc->sc_ich_phymtx);
15820 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
15821 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
15822 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
15823 			break;
15824 		delay(1000);
15825 	}
15826 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
15827 		device_printf(sc->sc_dev,
15828 		    "SW has already locked the resource\n");
15829 		goto out;
15830 	}
15831 
15832 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
15833 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
15834 	for (timeout = 0; timeout < 1000; timeout++) {
15835 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
15836 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
15837 			break;
15838 		delay(1000);
15839 	}
15840 	if (timeout >= 1000) {
15841 		device_printf(sc->sc_dev, "failed to acquire semaphore\n");
15842 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
15843 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
15844 		goto out;
15845 	}
15846 	return 0;
15847 
15848 out:
15849 	mutex_exit(sc->sc_ich_phymtx);
15850 	return -1;
15851 }
15852 
15853 static void
15854 wm_put_swflag_ich8lan(struct wm_softc *sc)
15855 {
15856 	uint32_t ext_ctrl;
15857 
15858 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15859 		device_xname(sc->sc_dev), __func__));
15860 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
15861 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
15862 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
15863 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
15864 	} else
15865 		device_printf(sc->sc_dev, "Semaphore unexpectedly released\n");
15866 
15867 	mutex_exit(sc->sc_ich_phymtx);
15868 }
15869 
15870 static int
15871 wm_get_nvm_ich8lan(struct wm_softc *sc)
15872 {
15873 
15874 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15875 		device_xname(sc->sc_dev), __func__));
15876 	mutex_enter(sc->sc_ich_nvmmtx);
15877 
15878 	return 0;
15879 }
15880 
15881 static void
15882 wm_put_nvm_ich8lan(struct wm_softc *sc)
15883 {
15884 
15885 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15886 		device_xname(sc->sc_dev), __func__));
15887 	mutex_exit(sc->sc_ich_nvmmtx);
15888 }
15889 
15890 static int
15891 wm_get_hw_semaphore_82573(struct wm_softc *sc)
15892 {
15893 	int i = 0;
15894 	uint32_t reg;
15895 
15896 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15897 		device_xname(sc->sc_dev), __func__));
15898 
15899 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
15900 	do {
15901 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
15902 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
15903 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
15904 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
15905 			break;
15906 		delay(2*1000);
15907 		i++;
15908 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
15909 
15910 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
15911 		wm_put_hw_semaphore_82573(sc);
15912 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
15913 		    device_xname(sc->sc_dev));
15914 		return -1;
15915 	}
15916 
15917 	return 0;
15918 }
15919 
15920 static void
15921 wm_put_hw_semaphore_82573(struct wm_softc *sc)
15922 {
15923 	uint32_t reg;
15924 
15925 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15926 		device_xname(sc->sc_dev), __func__));
15927 
15928 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
15929 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
15930 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
15931 }
15932 
15933 /*
15934  * Management mode and power management related subroutines.
15935  * BMC, AMT, suspend/resume and EEE.
15936  */
15937 
15938 #ifdef WM_WOL
15939 static int
15940 wm_check_mng_mode(struct wm_softc *sc)
15941 {
15942 	int rv;
15943 
15944 	switch (sc->sc_type) {
15945 	case WM_T_ICH8:
15946 	case WM_T_ICH9:
15947 	case WM_T_ICH10:
15948 	case WM_T_PCH:
15949 	case WM_T_PCH2:
15950 	case WM_T_PCH_LPT:
15951 	case WM_T_PCH_SPT:
15952 	case WM_T_PCH_CNP:
15953 	case WM_T_PCH_TGP:
15954 		rv = wm_check_mng_mode_ich8lan(sc);
15955 		break;
15956 	case WM_T_82574:
15957 	case WM_T_82583:
15958 		rv = wm_check_mng_mode_82574(sc);
15959 		break;
15960 	case WM_T_82571:
15961 	case WM_T_82572:
15962 	case WM_T_82573:
15963 	case WM_T_80003:
15964 		rv = wm_check_mng_mode_generic(sc);
15965 		break;
15966 	default:
15967 		/* Noting to do */
15968 		rv = 0;
15969 		break;
15970 	}
15971 
15972 	return rv;
15973 }
15974 
15975 static int
15976 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
15977 {
15978 	uint32_t fwsm;
15979 
15980 	fwsm = CSR_READ(sc, WMREG_FWSM);
15981 
15982 	if (((fwsm & FWSM_FW_VALID) != 0)
15983 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
15984 		return 1;
15985 
15986 	return 0;
15987 }
15988 
15989 static int
15990 wm_check_mng_mode_82574(struct wm_softc *sc)
15991 {
15992 	uint16_t data;
15993 
15994 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
15995 
15996 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
15997 		return 1;
15998 
15999 	return 0;
16000 }
16001 
16002 static int
16003 wm_check_mng_mode_generic(struct wm_softc *sc)
16004 {
16005 	uint32_t fwsm;
16006 
16007 	fwsm = CSR_READ(sc, WMREG_FWSM);
16008 
16009 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
16010 		return 1;
16011 
16012 	return 0;
16013 }
16014 #endif /* WM_WOL */
16015 
16016 static int
16017 wm_enable_mng_pass_thru(struct wm_softc *sc)
16018 {
16019 	uint32_t manc, fwsm, factps;
16020 
16021 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
16022 		return 0;
16023 
16024 	manc = CSR_READ(sc, WMREG_MANC);
16025 
16026 	DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
16027 		device_xname(sc->sc_dev), manc));
16028 	if ((manc & MANC_RECV_TCO_EN) == 0)
16029 		return 0;
16030 
16031 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
16032 		fwsm = CSR_READ(sc, WMREG_FWSM);
16033 		factps = CSR_READ(sc, WMREG_FACTPS);
16034 		if (((factps & FACTPS_MNGCG) == 0)
16035 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
16036 			return 1;
16037 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
16038 		uint16_t data;
16039 
16040 		factps = CSR_READ(sc, WMREG_FACTPS);
16041 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
16042 		DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
16043 			device_xname(sc->sc_dev), factps, data));
16044 		if (((factps & FACTPS_MNGCG) == 0)
16045 		    && ((data & NVM_CFG2_MNGM_MASK)
16046 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
16047 			return 1;
16048 	} else if (((manc & MANC_SMBUS_EN) != 0)
16049 	    && ((manc & MANC_ASF_EN) == 0))
16050 		return 1;
16051 
16052 	return 0;
16053 }
16054 
16055 static bool
16056 wm_phy_resetisblocked(struct wm_softc *sc)
16057 {
16058 	bool blocked = false;
16059 	uint32_t reg;
16060 	int i = 0;
16061 
16062 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16063 		device_xname(sc->sc_dev), __func__));
16064 
16065 	switch (sc->sc_type) {
16066 	case WM_T_ICH8:
16067 	case WM_T_ICH9:
16068 	case WM_T_ICH10:
16069 	case WM_T_PCH:
16070 	case WM_T_PCH2:
16071 	case WM_T_PCH_LPT:
16072 	case WM_T_PCH_SPT:
16073 	case WM_T_PCH_CNP:
16074 	case WM_T_PCH_TGP:
16075 		do {
16076 			reg = CSR_READ(sc, WMREG_FWSM);
16077 			if ((reg & FWSM_RSPCIPHY) == 0) {
16078 				blocked = true;
16079 				delay(10*1000);
16080 				continue;
16081 			}
16082 			blocked = false;
16083 		} while (blocked && (i++ < 30));
16084 		return blocked;
16085 		break;
16086 	case WM_T_82571:
16087 	case WM_T_82572:
16088 	case WM_T_82573:
16089 	case WM_T_82574:
16090 	case WM_T_82583:
16091 	case WM_T_80003:
16092 		reg = CSR_READ(sc, WMREG_MANC);
16093 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
16094 			return true;
16095 		else
16096 			return false;
16097 		break;
16098 	default:
16099 		/* No problem */
16100 		break;
16101 	}
16102 
16103 	return false;
16104 }
16105 
16106 static void
16107 wm_get_hw_control(struct wm_softc *sc)
16108 {
16109 	uint32_t reg;
16110 
16111 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
16112 		device_xname(sc->sc_dev), __func__));
16113 
16114 	if (sc->sc_type == WM_T_82573) {
16115 		reg = CSR_READ(sc, WMREG_SWSM);
16116 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
16117 	} else if (sc->sc_type >= WM_T_82571) {
16118 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
16119 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
16120 	}
16121 }
16122 
16123 static void
16124 wm_release_hw_control(struct wm_softc *sc)
16125 {
16126 	uint32_t reg;
16127 
16128 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
16129 		device_xname(sc->sc_dev), __func__));
16130 
16131 	if (sc->sc_type == WM_T_82573) {
16132 		reg = CSR_READ(sc, WMREG_SWSM);
16133 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
16134 	} else if (sc->sc_type >= WM_T_82571) {
16135 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
16136 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
16137 	}
16138 }
16139 
16140 static void
16141 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
16142 {
16143 	uint32_t reg;
16144 
16145 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16146 		device_xname(sc->sc_dev), __func__));
16147 
16148 	if (sc->sc_type < WM_T_PCH2)
16149 		return;
16150 
16151 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
16152 
16153 	if (gate)
16154 		reg |= EXTCNFCTR_GATE_PHY_CFG;
16155 	else
16156 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
16157 
16158 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
16159 }
16160 
16161 static int
16162 wm_init_phy_workarounds_pchlan(struct wm_softc *sc)
16163 {
16164 	uint32_t fwsm, reg;
16165 	int rv;
16166 
16167 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16168 		device_xname(sc->sc_dev), __func__));
16169 
16170 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
16171 	wm_gate_hw_phy_config_ich8lan(sc, true);
16172 
16173 	/* Disable ULP */
16174 	wm_ulp_disable(sc);
16175 
16176 	/* Acquire PHY semaphore */
16177 	rv = sc->phy.acquire(sc);
16178 	if (rv != 0) {
16179 		DPRINTF(sc, WM_DEBUG_INIT,
16180 		    ("%s: %s: failed\n", device_xname(sc->sc_dev), __func__));
16181 		return rv;
16182 	}
16183 
16184 	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
16185 	 * inaccessible and resetting the PHY is not blocked, toggle the
16186 	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
16187 	 */
16188 	fwsm = CSR_READ(sc, WMREG_FWSM);
16189 	switch (sc->sc_type) {
16190 	case WM_T_PCH_LPT:
16191 	case WM_T_PCH_SPT:
16192 	case WM_T_PCH_CNP:
16193 	case WM_T_PCH_TGP:
16194 		if (wm_phy_is_accessible_pchlan(sc))
16195 			break;
16196 
16197 		/* Before toggling LANPHYPC, see if PHY is accessible by
16198 		 * forcing MAC to SMBus mode first.
16199 		 */
16200 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
16201 		reg |= CTRL_EXT_FORCE_SMBUS;
16202 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
16203 #if 0
16204 		/* XXX Isn't this required??? */
16205 		CSR_WRITE_FLUSH(sc);
16206 #endif
16207 		/* Wait 50 milliseconds for MAC to finish any retries
16208 		 * that it might be trying to perform from previous
16209 		 * attempts to acknowledge any phy read requests.
16210 		 */
16211 		delay(50 * 1000);
16212 		/* FALLTHROUGH */
16213 	case WM_T_PCH2:
16214 		if (wm_phy_is_accessible_pchlan(sc) == true)
16215 			break;
16216 		/* FALLTHROUGH */
16217 	case WM_T_PCH:
16218 		if (sc->sc_type == WM_T_PCH)
16219 			if ((fwsm & FWSM_FW_VALID) != 0)
16220 				break;
16221 
16222 		if (wm_phy_resetisblocked(sc) == true) {
16223 			device_printf(sc->sc_dev, "XXX reset is blocked(2)\n");
16224 			break;
16225 		}
16226 
16227 		/* Toggle LANPHYPC Value bit */
16228 		wm_toggle_lanphypc_pch_lpt(sc);
16229 
16230 		if (sc->sc_type >= WM_T_PCH_LPT) {
16231 			if (wm_phy_is_accessible_pchlan(sc) == true)
16232 				break;
16233 
16234 			/* Toggling LANPHYPC brings the PHY out of SMBus mode
16235 			 * so ensure that the MAC is also out of SMBus mode
16236 			 */
16237 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
16238 			reg &= ~CTRL_EXT_FORCE_SMBUS;
16239 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
16240 
16241 			if (wm_phy_is_accessible_pchlan(sc) == true)
16242 				break;
16243 			rv = -1;
16244 		}
16245 		break;
16246 	default:
16247 		break;
16248 	}
16249 
16250 	/* Release semaphore */
16251 	sc->phy.release(sc);
16252 
16253 	if (rv == 0) {
16254 		/* Check to see if able to reset PHY.  Print error if not */
16255 		if (wm_phy_resetisblocked(sc)) {
16256 			device_printf(sc->sc_dev, "XXX reset is blocked(3)\n");
16257 			goto out;
16258 		}
16259 
16260 		/* Reset the PHY before any access to it.  Doing so, ensures
16261 		 * that the PHY is in a known good state before we read/write
16262 		 * PHY registers.  The generic reset is sufficient here,
16263 		 * because we haven't determined the PHY type yet.
16264 		 */
16265 		if (wm_reset_phy(sc) != 0)
16266 			goto out;
16267 
16268 		/* On a successful reset, possibly need to wait for the PHY
16269 		 * to quiesce to an accessible state before returning control
16270 		 * to the calling function.  If the PHY does not quiesce, then
16271 		 * return E1000E_BLK_PHY_RESET, as this is the condition that
16272 		 *  the PHY is in.
16273 		 */
16274 		if (wm_phy_resetisblocked(sc))
16275 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
16276 	}
16277 
16278 out:
16279 	/* Ungate automatic PHY configuration on non-managed 82579 */
16280 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
16281 		delay(10*1000);
16282 		wm_gate_hw_phy_config_ich8lan(sc, false);
16283 	}
16284 
16285 	return 0;
16286 }
16287 
16288 static void
16289 wm_init_manageability(struct wm_softc *sc)
16290 {
16291 
16292 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16293 		device_xname(sc->sc_dev), __func__));
16294 	KASSERT(IFNET_LOCKED(&sc->sc_ethercom.ec_if));
16295 
16296 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
16297 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
16298 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
16299 
16300 		/* Disable hardware interception of ARP */
16301 		manc &= ~MANC_ARP_EN;
16302 
16303 		/* Enable receiving management packets to the host */
16304 		if (sc->sc_type >= WM_T_82571) {
16305 			manc |= MANC_EN_MNG2HOST;
16306 			manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
16307 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
16308 		}
16309 
16310 		CSR_WRITE(sc, WMREG_MANC, manc);
16311 	}
16312 }
16313 
16314 static void
16315 wm_release_manageability(struct wm_softc *sc)
16316 {
16317 
16318 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
16319 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
16320 
16321 		manc |= MANC_ARP_EN;
16322 		if (sc->sc_type >= WM_T_82571)
16323 			manc &= ~MANC_EN_MNG2HOST;
16324 
16325 		CSR_WRITE(sc, WMREG_MANC, manc);
16326 	}
16327 }
16328 
16329 static void
16330 wm_get_wakeup(struct wm_softc *sc)
16331 {
16332 
16333 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
16334 	switch (sc->sc_type) {
16335 	case WM_T_82573:
16336 	case WM_T_82583:
16337 		sc->sc_flags |= WM_F_HAS_AMT;
16338 		/* FALLTHROUGH */
16339 	case WM_T_80003:
16340 	case WM_T_82575:
16341 	case WM_T_82576:
16342 	case WM_T_82580:
16343 	case WM_T_I350:
16344 	case WM_T_I354:
16345 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
16346 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
16347 		/* FALLTHROUGH */
16348 	case WM_T_82541:
16349 	case WM_T_82541_2:
16350 	case WM_T_82547:
16351 	case WM_T_82547_2:
16352 	case WM_T_82571:
16353 	case WM_T_82572:
16354 	case WM_T_82574:
16355 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
16356 		break;
16357 	case WM_T_ICH8:
16358 	case WM_T_ICH9:
16359 	case WM_T_ICH10:
16360 	case WM_T_PCH:
16361 	case WM_T_PCH2:
16362 	case WM_T_PCH_LPT:
16363 	case WM_T_PCH_SPT:
16364 	case WM_T_PCH_CNP:
16365 	case WM_T_PCH_TGP:
16366 		sc->sc_flags |= WM_F_HAS_AMT;
16367 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
16368 		break;
16369 	default:
16370 		break;
16371 	}
16372 
16373 	/* 1: HAS_MANAGE */
16374 	if (wm_enable_mng_pass_thru(sc) != 0)
16375 		sc->sc_flags |= WM_F_HAS_MANAGE;
16376 
16377 	/*
16378 	 * Note that the WOL flags is set after the resetting of the eeprom
16379 	 * stuff
16380 	 */
16381 }
16382 
16383 /*
16384  * Unconfigure Ultra Low Power mode.
16385  * Only for I217 and newer (see below).
16386  */
16387 static int
16388 wm_ulp_disable(struct wm_softc *sc)
16389 {
16390 	uint32_t reg;
16391 	uint16_t phyreg;
16392 	int i = 0, rv;
16393 
16394 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16395 		device_xname(sc->sc_dev), __func__));
16396 	/* Exclude old devices */
16397 	if ((sc->sc_type < WM_T_PCH_LPT)
16398 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
16399 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
16400 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
16401 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
16402 		return 0;
16403 
16404 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
16405 		/* Request ME un-configure ULP mode in the PHY */
16406 		reg = CSR_READ(sc, WMREG_H2ME);
16407 		reg &= ~H2ME_ULP;
16408 		reg |= H2ME_ENFORCE_SETTINGS;
16409 		CSR_WRITE(sc, WMREG_H2ME, reg);
16410 
16411 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
16412 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
16413 			if (i++ == 30) {
16414 				device_printf(sc->sc_dev, "%s timed out\n",
16415 				    __func__);
16416 				return -1;
16417 			}
16418 			delay(10 * 1000);
16419 		}
16420 		reg = CSR_READ(sc, WMREG_H2ME);
16421 		reg &= ~H2ME_ENFORCE_SETTINGS;
16422 		CSR_WRITE(sc, WMREG_H2ME, reg);
16423 
16424 		return 0;
16425 	}
16426 
16427 	/* Acquire semaphore */
16428 	rv = sc->phy.acquire(sc);
16429 	if (rv != 0) {
16430 		DPRINTF(sc, WM_DEBUG_INIT,
16431 		    ("%s: %s: failed\n", device_xname(sc->sc_dev), __func__));
16432 		return rv;
16433 	}
16434 
16435 	/* Toggle LANPHYPC */
16436 	wm_toggle_lanphypc_pch_lpt(sc);
16437 
16438 	/* Unforce SMBus mode in PHY */
16439 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg);
16440 	if (rv != 0) {
16441 		uint32_t reg2;
16442 
16443 		aprint_debug_dev(sc->sc_dev, "%s: Force SMBus first.\n",
16444 		    __func__);
16445 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
16446 		reg2 |= CTRL_EXT_FORCE_SMBUS;
16447 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
16448 		delay(50 * 1000);
16449 
16450 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL,
16451 		    &phyreg);
16452 		if (rv != 0)
16453 			goto release;
16454 	}
16455 	phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
16456 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg);
16457 
16458 	/* Unforce SMBus mode in MAC */
16459 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
16460 	reg &= ~CTRL_EXT_FORCE_SMBUS;
16461 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
16462 
16463 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg);
16464 	if (rv != 0)
16465 		goto release;
16466 	phyreg |= HV_PM_CTRL_K1_ENA;
16467 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg);
16468 
16469 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1,
16470 	    &phyreg);
16471 	if (rv != 0)
16472 		goto release;
16473 	phyreg &= ~(I218_ULP_CONFIG1_IND
16474 	    | I218_ULP_CONFIG1_STICKY_ULP
16475 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
16476 	    | I218_ULP_CONFIG1_WOL_HOST
16477 	    | I218_ULP_CONFIG1_INBAND_EXIT
16478 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
16479 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
16480 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
16481 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
16482 	phyreg |= I218_ULP_CONFIG1_START;
16483 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
16484 
16485 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
16486 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
16487 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
16488 
16489 release:
16490 	/* Release semaphore */
16491 	sc->phy.release(sc);
16492 	wm_gmii_reset(sc);
16493 	delay(50 * 1000);
16494 
16495 	return rv;
16496 }
16497 
16498 /* WOL in the newer chipset interfaces (pchlan) */
16499 static int
16500 wm_enable_phy_wakeup(struct wm_softc *sc)
16501 {
16502 	device_t dev = sc->sc_dev;
16503 	uint32_t mreg, moff;
16504 	uint16_t wuce, wuc, wufc, preg;
16505 	int i, rv;
16506 
16507 	KASSERT(sc->sc_type >= WM_T_PCH);
16508 
16509 	/* Copy MAC RARs to PHY RARs */
16510 	wm_copy_rx_addrs_to_phy_ich8lan(sc);
16511 
16512 	/* Activate PHY wakeup */
16513 	rv = sc->phy.acquire(sc);
16514 	if (rv != 0) {
16515 		device_printf(dev, "%s: failed to acquire semaphore\n",
16516 		    __func__);
16517 		return rv;
16518 	}
16519 
16520 	/*
16521 	 * Enable access to PHY wakeup registers.
16522 	 * BM_MTA, BM_RCTL, BM_WUFC and BM_WUC are in BM_WUC_PAGE.
16523 	 */
16524 	rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
16525 	if (rv != 0) {
16526 		device_printf(dev,
16527 		    "%s: Could not enable PHY wakeup reg access\n", __func__);
16528 		goto release;
16529 	}
16530 
16531 	/* Copy MAC MTA to PHY MTA */
16532 	for (i = 0; i < WM_ICH8_MC_TABSIZE; i++) {
16533 		uint16_t lo, hi;
16534 
16535 		mreg = CSR_READ(sc, WMREG_CORDOVA_MTA + (i * 4));
16536 		lo = (uint16_t)(mreg & 0xffff);
16537 		hi = (uint16_t)((mreg >> 16) & 0xffff);
16538 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i), &lo, 0, true);
16539 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i) + 1, &hi, 0, true);
16540 	}
16541 
16542 	/* Configure PHY Rx Control register */
16543 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 1, true);
16544 	mreg = CSR_READ(sc, WMREG_RCTL);
16545 	if (mreg & RCTL_UPE)
16546 		preg |= BM_RCTL_UPE;
16547 	if (mreg & RCTL_MPE)
16548 		preg |= BM_RCTL_MPE;
16549 	preg &= ~(BM_RCTL_MO_MASK);
16550 	moff = __SHIFTOUT(mreg, RCTL_MO);
16551 	if (moff != 0)
16552 		preg |= moff << BM_RCTL_MO_SHIFT;
16553 	if (mreg & RCTL_BAM)
16554 		preg |= BM_RCTL_BAM;
16555 	if (mreg & RCTL_PMCF)
16556 		preg |= BM_RCTL_PMCF;
16557 	mreg = CSR_READ(sc, WMREG_CTRL);
16558 	if (mreg & CTRL_RFCE)
16559 		preg |= BM_RCTL_RFCE;
16560 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 0, true);
16561 
16562 	wuc = WUC_APME | WUC_PME_EN;
16563 	wufc = WUFC_MAG;
16564 	/* Enable PHY wakeup in MAC register */
16565 	CSR_WRITE(sc, WMREG_WUC,
16566 	    WUC_PHY_WAKE | WUC_PME_STATUS | WUC_APMPME | wuc);
16567 	CSR_WRITE(sc, WMREG_WUFC, wufc);
16568 
16569 	/* Configure and enable PHY wakeup in PHY registers */
16570 	wm_access_phy_wakeup_reg_bm(dev, BM_WUC, &wuc, 0, true);
16571 	wm_access_phy_wakeup_reg_bm(dev, BM_WUFC, &wufc, 0, true);
16572 
16573 	wuce |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
16574 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
16575 
16576 release:
16577 	sc->phy.release(sc);
16578 
16579 	return 0;
16580 }
16581 
16582 /* Power down workaround on D3 */
16583 static void
16584 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
16585 {
16586 	uint32_t reg;
16587 	uint16_t phyreg;
16588 	int i;
16589 
16590 	for (i = 0; i < 2; i++) {
16591 		/* Disable link */
16592 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
16593 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
16594 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
16595 
16596 		/*
16597 		 * Call gig speed drop workaround on Gig disable before
16598 		 * accessing any PHY registers
16599 		 */
16600 		if (sc->sc_type == WM_T_ICH8)
16601 			wm_gig_downshift_workaround_ich8lan(sc);
16602 
16603 		/* Write VR power-down enable */
16604 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
16605 		phyreg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
16606 		phyreg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
16607 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, phyreg);
16608 
16609 		/* Read it back and test */
16610 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
16611 		phyreg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
16612 		if ((phyreg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
16613 			break;
16614 
16615 		/* Issue PHY reset and repeat at most one more time */
16616 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
16617 	}
16618 }
16619 
16620 /*
16621  *  wm_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
16622  *  @sc: pointer to the HW structure
16623  *
16624  *  During S0 to Sx transition, it is possible the link remains at gig
16625  *  instead of negotiating to a lower speed.  Before going to Sx, set
16626  *  'Gig Disable' to force link speed negotiation to a lower speed based on
16627  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
16628  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
16629  *  needs to be written.
16630  *  Parts that support (and are linked to a partner which support) EEE in
16631  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
16632  *  than 10Mbps w/o EEE.
16633  */
16634 static void
16635 wm_suspend_workarounds_ich8lan(struct wm_softc *sc)
16636 {
16637 	device_t dev = sc->sc_dev;
16638 	struct ethercom *ec = &sc->sc_ethercom;
16639 	uint32_t phy_ctrl;
16640 	int rv;
16641 
16642 	phy_ctrl = CSR_READ(sc, WMREG_PHY_CTRL);
16643 	phy_ctrl |= PHY_CTRL_GBE_DIS;
16644 
16645 	KASSERT((sc->sc_type >= WM_T_ICH8) && (sc->sc_type <= WM_T_PCH_TGP));
16646 
16647 	if (sc->sc_phytype == WMPHY_I217) {
16648 		uint16_t devid = sc->sc_pcidevid;
16649 
16650 		if ((devid == PCI_PRODUCT_INTEL_I218_LM) ||
16651 		    (devid == PCI_PRODUCT_INTEL_I218_V) ||
16652 		    (devid == PCI_PRODUCT_INTEL_I218_LM3) ||
16653 		    (devid == PCI_PRODUCT_INTEL_I218_V3) ||
16654 		    (sc->sc_type >= WM_T_PCH_SPT))
16655 			CSR_WRITE(sc, WMREG_FEXTNVM6,
16656 			    CSR_READ(sc, WMREG_FEXTNVM6)
16657 			    & ~FEXTNVM6_REQ_PLL_CLK);
16658 
16659 		if (sc->phy.acquire(sc) != 0)
16660 			goto out;
16661 
16662 		if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
16663 			uint16_t eee_advert;
16664 
16665 			rv = wm_read_emi_reg_locked(dev,
16666 			    I217_EEE_ADVERTISEMENT, &eee_advert);
16667 			if (rv)
16668 				goto release;
16669 
16670 			/*
16671 			 * Disable LPLU if both link partners support 100BaseT
16672 			 * EEE and 100Full is advertised on both ends of the
16673 			 * link, and enable Auto Enable LPI since there will
16674 			 * be no driver to enable LPI while in Sx.
16675 			 */
16676 			if ((eee_advert & AN_EEEADVERT_100_TX) &&
16677 			    (sc->eee_lp_ability & AN_EEEADVERT_100_TX)) {
16678 				uint16_t anar, phy_reg;
16679 
16680 				sc->phy.readreg_locked(dev, 2, MII_ANAR,
16681 				    &anar);
16682 				if (anar & ANAR_TX_FD) {
16683 					phy_ctrl &= ~(PHY_CTRL_D0A_LPLU |
16684 					    PHY_CTRL_NOND0A_LPLU);
16685 
16686 					/* Set Auto Enable LPI after link up */
16687 					sc->phy.readreg_locked(dev, 2,
16688 					    I217_LPI_GPIO_CTRL, &phy_reg);
16689 					phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
16690 					sc->phy.writereg_locked(dev, 2,
16691 					    I217_LPI_GPIO_CTRL, phy_reg);
16692 				}
16693 			}
16694 		}
16695 
16696 		/*
16697 		 * For i217 Intel Rapid Start Technology support,
16698 		 * when the system is going into Sx and no manageability engine
16699 		 * is present, the driver must configure proxy to reset only on
16700 		 * power good.	LPI (Low Power Idle) state must also reset only
16701 		 * on power good, as well as the MTA (Multicast table array).
16702 		 * The SMBus release must also be disabled on LCD reset.
16703 		 */
16704 
16705 		/*
16706 		 * Enable MTA to reset for Intel Rapid Start Technology
16707 		 * Support
16708 		 */
16709 
16710 release:
16711 		sc->phy.release(sc);
16712 	}
16713 out:
16714 	CSR_WRITE(sc, WMREG_PHY_CTRL, phy_ctrl);
16715 
16716 	if (sc->sc_type == WM_T_ICH8)
16717 		wm_gig_downshift_workaround_ich8lan(sc);
16718 
16719 	if (sc->sc_type >= WM_T_PCH) {
16720 		wm_oem_bits_config_ich8lan(sc, false);
16721 
16722 		/* Reset PHY to activate OEM bits on 82577/8 */
16723 		if (sc->sc_type == WM_T_PCH)
16724 			wm_reset_phy(sc);
16725 
16726 		if (sc->phy.acquire(sc) != 0)
16727 			return;
16728 		wm_write_smbus_addr(sc);
16729 		sc->phy.release(sc);
16730 	}
16731 }
16732 
16733 /*
16734  *  wm_resume_workarounds_pchlan - workarounds needed during Sx->S0
16735  *  @sc: pointer to the HW structure
16736  *
16737  *  During Sx to S0 transitions on non-managed devices or managed devices
16738  *  on which PHY resets are not blocked, if the PHY registers cannot be
16739  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
16740  *  the PHY.
16741  *  On i217, setup Intel Rapid Start Technology.
16742  */
16743 static int
16744 wm_resume_workarounds_pchlan(struct wm_softc *sc)
16745 {
16746 	device_t dev = sc->sc_dev;
16747 	int rv;
16748 
16749 	if (sc->sc_type < WM_T_PCH2)
16750 		return 0;
16751 
16752 	rv = wm_init_phy_workarounds_pchlan(sc);
16753 	if (rv != 0)
16754 		return rv;
16755 
16756 	/* For i217 Intel Rapid Start Technology support when the system
16757 	 * is transitioning from Sx and no manageability engine is present
16758 	 * configure SMBus to restore on reset, disable proxy, and enable
16759 	 * the reset on MTA (Multicast table array).
16760 	 */
16761 	if (sc->sc_phytype == WMPHY_I217) {
16762 		uint16_t phy_reg;
16763 
16764 		rv = sc->phy.acquire(sc);
16765 		if (rv != 0)
16766 			return rv;
16767 
16768 		/* Clear Auto Enable LPI after link up */
16769 		sc->phy.readreg_locked(dev, 1, I217_LPI_GPIO_CTRL, &phy_reg);
16770 		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
16771 		sc->phy.writereg_locked(dev, 1, I217_LPI_GPIO_CTRL, phy_reg);
16772 
16773 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
16774 			/* Restore clear on SMB if no manageability engine
16775 			 * is present
16776 			 */
16777 			rv = sc->phy.readreg_locked(dev, 1, I217_MEMPWR,
16778 			    &phy_reg);
16779 			if (rv != 0)
16780 				goto release;
16781 			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
16782 			sc->phy.writereg_locked(dev, 1, I217_MEMPWR, phy_reg);
16783 
16784 			/* Disable Proxy */
16785 			sc->phy.writereg_locked(dev, 1, I217_PROXY_CTRL, 0);
16786 		}
16787 		/* Enable reset on MTA */
16788 		sc->phy.readreg_locked(dev, 1, I217_CFGREG, &phy_reg);
16789 		if (rv != 0)
16790 			goto release;
16791 		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
16792 		sc->phy.writereg_locked(dev, 1, I217_CFGREG, phy_reg);
16793 
16794 release:
16795 		sc->phy.release(sc);
16796 		return rv;
16797 	}
16798 
16799 	return 0;
16800 }
16801 
16802 static void
16803 wm_enable_wakeup(struct wm_softc *sc)
16804 {
16805 	uint32_t reg, pmreg;
16806 	pcireg_t pmode;
16807 	int rv = 0;
16808 
16809 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16810 		device_xname(sc->sc_dev), __func__));
16811 
16812 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
16813 	    &pmreg, NULL) == 0)
16814 		return;
16815 
16816 	if ((sc->sc_flags & WM_F_WOL) == 0)
16817 		goto pme;
16818 
16819 	/* Advertise the wakeup capability */
16820 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
16821 	    | CTRL_SWDPIN(3));
16822 
16823 	/* Keep the laser running on fiber adapters */
16824 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
16825 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
16826 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
16827 		reg |= CTRL_EXT_SWDPIN(3);
16828 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
16829 	}
16830 
16831 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) ||
16832 	    (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) ||
16833 	    (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
16834 	    (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP) ||
16835 	    (sc->sc_type == WM_T_PCH_TGP))
16836 		wm_suspend_workarounds_ich8lan(sc);
16837 
16838 #if 0	/* For the multicast packet */
16839 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
16840 	reg |= WUFC_MC;
16841 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
16842 #endif
16843 
16844 	if (sc->sc_type >= WM_T_PCH) {
16845 		rv = wm_enable_phy_wakeup(sc);
16846 		if (rv != 0)
16847 			goto pme;
16848 	} else {
16849 		/* Enable wakeup by the MAC */
16850 		CSR_WRITE(sc, WMREG_WUC, WUC_APME | WUC_PME_EN);
16851 		CSR_WRITE(sc, WMREG_WUFC, WUFC_MAG);
16852 	}
16853 
16854 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
16855 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
16856 		|| (sc->sc_type == WM_T_PCH2))
16857 	    && (sc->sc_phytype == WMPHY_IGP_3))
16858 		wm_igp3_phy_powerdown_workaround_ich8lan(sc);
16859 
16860 pme:
16861 	/* Request PME */
16862 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
16863 	pmode |= PCI_PMCSR_PME_STS; /* in case it's already set (W1C) */
16864 	if ((rv == 0) && (sc->sc_flags & WM_F_WOL) != 0) {
16865 		/* For WOL */
16866 		pmode |= PCI_PMCSR_PME_EN;
16867 	} else {
16868 		/* Disable WOL */
16869 		pmode &= ~PCI_PMCSR_PME_EN;
16870 	}
16871 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
16872 }
16873 
16874 /* Disable ASPM L0s and/or L1 for workaround */
16875 static void
16876 wm_disable_aspm(struct wm_softc *sc)
16877 {
16878 	pcireg_t reg, mask = 0;
16879 	unsigned const char *str = "";
16880 
16881 	/*
16882 	 *  Only for PCIe device which has PCIe capability in the PCI config
16883 	 * space.
16884 	 */
16885 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
16886 		return;
16887 
16888 	switch (sc->sc_type) {
16889 	case WM_T_82571:
16890 	case WM_T_82572:
16891 		/*
16892 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
16893 		 * State Power management L1 State (ASPM L1).
16894 		 */
16895 		mask = PCIE_LCSR_ASPM_L1;
16896 		str = "L1 is";
16897 		break;
16898 	case WM_T_82573:
16899 	case WM_T_82574:
16900 	case WM_T_82583:
16901 		/*
16902 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
16903 		 *
16904 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
16905 		 * some chipset.  The document of 82574 and 82583 says that
16906 		 * disabling L0s with some specific chipset is sufficient,
16907 		 * but we follow as of the Intel em driver does.
16908 		 *
16909 		 * References:
16910 		 * Errata 8 of the Specification Update of i82573.
16911 		 * Errata 20 of the Specification Update of i82574.
16912 		 * Errata 9 of the Specification Update of i82583.
16913 		 */
16914 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
16915 		str = "L0s and L1 are";
16916 		break;
16917 	default:
16918 		return;
16919 	}
16920 
16921 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
16922 	    sc->sc_pcixe_capoff + PCIE_LCSR);
16923 	reg &= ~mask;
16924 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
16925 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
16926 
16927 	/* Print only in wm_attach() */
16928 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
16929 		aprint_verbose_dev(sc->sc_dev,
16930 		    "ASPM %s disabled to workaround the errata.\n", str);
16931 }
16932 
16933 /* LPLU */
16934 
16935 static void
16936 wm_lplu_d0_disable(struct wm_softc *sc)
16937 {
16938 	struct mii_data *mii = &sc->sc_mii;
16939 	uint32_t reg;
16940 	uint16_t phyval;
16941 
16942 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16943 		device_xname(sc->sc_dev), __func__));
16944 
16945 	if (sc->sc_phytype == WMPHY_IFE)
16946 		return;
16947 
16948 	switch (sc->sc_type) {
16949 	case WM_T_82571:
16950 	case WM_T_82572:
16951 	case WM_T_82573:
16952 	case WM_T_82575:
16953 	case WM_T_82576:
16954 		mii->mii_readreg(sc->sc_dev, 1, IGPHY_POWER_MGMT, &phyval);
16955 		phyval &= ~PMR_D0_LPLU;
16956 		mii->mii_writereg(sc->sc_dev, 1, IGPHY_POWER_MGMT, phyval);
16957 		break;
16958 	case WM_T_82580:
16959 	case WM_T_I350:
16960 	case WM_T_I210:
16961 	case WM_T_I211:
16962 		reg = CSR_READ(sc, WMREG_PHPM);
16963 		reg &= ~PHPM_D0A_LPLU;
16964 		CSR_WRITE(sc, WMREG_PHPM, reg);
16965 		break;
16966 	case WM_T_82574:
16967 	case WM_T_82583:
16968 	case WM_T_ICH8:
16969 	case WM_T_ICH9:
16970 	case WM_T_ICH10:
16971 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
16972 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
16973 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
16974 		CSR_WRITE_FLUSH(sc);
16975 		break;
16976 	case WM_T_PCH:
16977 	case WM_T_PCH2:
16978 	case WM_T_PCH_LPT:
16979 	case WM_T_PCH_SPT:
16980 	case WM_T_PCH_CNP:
16981 	case WM_T_PCH_TGP:
16982 		wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS, &phyval);
16983 		phyval &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
16984 		if (wm_phy_resetisblocked(sc) == false)
16985 			phyval |= HV_OEM_BITS_ANEGNOW;
16986 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, phyval);
16987 		break;
16988 	default:
16989 		break;
16990 	}
16991 }
16992 
16993 /* EEE */
16994 
16995 static int
16996 wm_set_eee_i350(struct wm_softc *sc)
16997 {
16998 	struct ethercom *ec = &sc->sc_ethercom;
16999 	uint32_t ipcnfg, eeer;
17000 	uint32_t ipcnfg_mask
17001 	    = IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN | IPCNFG_10BASE_TE;
17002 	uint32_t eeer_mask = EEER_TX_LPI_EN | EEER_RX_LPI_EN | EEER_LPI_FC;
17003 
17004 	KASSERT(sc->sc_mediatype == WM_MEDIATYPE_COPPER);
17005 
17006 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
17007 	eeer = CSR_READ(sc, WMREG_EEER);
17008 
17009 	/* Enable or disable per user setting */
17010 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
17011 		ipcnfg |= ipcnfg_mask;
17012 		eeer |= eeer_mask;
17013 	} else {
17014 		ipcnfg &= ~ipcnfg_mask;
17015 		eeer &= ~eeer_mask;
17016 	}
17017 
17018 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
17019 	CSR_WRITE(sc, WMREG_EEER, eeer);
17020 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
17021 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
17022 
17023 	return 0;
17024 }
17025 
17026 static int
17027 wm_set_eee_pchlan(struct wm_softc *sc)
17028 {
17029 	device_t dev = sc->sc_dev;
17030 	struct ethercom *ec = &sc->sc_ethercom;
17031 	uint16_t lpa, pcs_status, adv_addr, adv, lpi_ctrl, data;
17032 	int rv;
17033 
17034 	switch (sc->sc_phytype) {
17035 	case WMPHY_82579:
17036 		lpa = I82579_EEE_LP_ABILITY;
17037 		pcs_status = I82579_EEE_PCS_STATUS;
17038 		adv_addr = I82579_EEE_ADVERTISEMENT;
17039 		break;
17040 	case WMPHY_I217:
17041 		lpa = I217_EEE_LP_ABILITY;
17042 		pcs_status = I217_EEE_PCS_STATUS;
17043 		adv_addr = I217_EEE_ADVERTISEMENT;
17044 		break;
17045 	default:
17046 		return 0;
17047 	}
17048 
17049 	rv = sc->phy.acquire(sc);
17050 	if (rv != 0) {
17051 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
17052 		return rv;
17053 	}
17054 
17055 	rv = sc->phy.readreg_locked(dev, 1, I82579_LPI_CTRL, &lpi_ctrl);
17056 	if (rv != 0)
17057 		goto release;
17058 
17059 	/* Clear bits that enable EEE in various speeds */
17060 	lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE;
17061 
17062 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
17063 		/* Save off link partner's EEE ability */
17064 		rv = wm_read_emi_reg_locked(dev, lpa, &sc->eee_lp_ability);
17065 		if (rv != 0)
17066 			goto release;
17067 
17068 		/* Read EEE advertisement */
17069 		if ((rv = wm_read_emi_reg_locked(dev, adv_addr, &adv)) != 0)
17070 			goto release;
17071 
17072 		/*
17073 		 * Enable EEE only for speeds in which the link partner is
17074 		 * EEE capable and for which we advertise EEE.
17075 		 */
17076 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_1000_T)
17077 			lpi_ctrl |= I82579_LPI_CTRL_EN_1000;
17078 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_100_TX) {
17079 			sc->phy.readreg_locked(dev, 2, MII_ANLPAR, &data);
17080 			if ((data & ANLPAR_TX_FD) != 0)
17081 				lpi_ctrl |= I82579_LPI_CTRL_EN_100;
17082 			else {
17083 				/*
17084 				 * EEE is not supported in 100Half, so ignore
17085 				 * partner's EEE in 100 ability if full-duplex
17086 				 * is not advertised.
17087 				 */
17088 				sc->eee_lp_ability
17089 				    &= ~AN_EEEADVERT_100_TX;
17090 			}
17091 		}
17092 	}
17093 
17094 	if (sc->sc_phytype == WMPHY_82579) {
17095 		rv = wm_read_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, &data);
17096 		if (rv != 0)
17097 			goto release;
17098 
17099 		data &= ~I82579_LPI_PLL_SHUT_100;
17100 		rv = wm_write_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, data);
17101 	}
17102 
17103 	/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
17104 	if ((rv = wm_read_emi_reg_locked(dev, pcs_status, &data)) != 0)
17105 		goto release;
17106 
17107 	rv = sc->phy.writereg_locked(dev, 1, I82579_LPI_CTRL, lpi_ctrl);
17108 release:
17109 	sc->phy.release(sc);
17110 
17111 	return rv;
17112 }
17113 
17114 static int
17115 wm_set_eee(struct wm_softc *sc)
17116 {
17117 	struct ethercom *ec = &sc->sc_ethercom;
17118 
17119 	if ((ec->ec_capabilities & ETHERCAP_EEE) == 0)
17120 		return 0;
17121 
17122 	if (sc->sc_type == WM_T_I354) {
17123 		/* I354 uses an external PHY */
17124 		return 0; /* not yet */
17125 	} else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
17126 		return wm_set_eee_i350(sc);
17127 	else if (sc->sc_type >= WM_T_PCH2)
17128 		return wm_set_eee_pchlan(sc);
17129 
17130 	return 0;
17131 }
17132 
17133 /*
17134  * Workarounds (mainly PHY related).
17135  * Basically, PHY's workarounds are in the PHY drivers.
17136  */
17137 
17138 /* Workaround for 82566 Kumeran PCS lock loss */
17139 static int
17140 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
17141 {
17142 	struct mii_data *mii = &sc->sc_mii;
17143 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
17144 	int i, reg, rv;
17145 	uint16_t phyreg;
17146 
17147 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
17148 		device_xname(sc->sc_dev), __func__));
17149 
17150 	/* If the link is not up, do nothing */
17151 	if ((status & STATUS_LU) == 0)
17152 		return 0;
17153 
17154 	/* Nothing to do if the link is other than 1Gbps */
17155 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
17156 		return 0;
17157 
17158 	for (i = 0; i < 10; i++) {
17159 		/* read twice */
17160 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
17161 		if (rv != 0)
17162 			return rv;
17163 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
17164 		if (rv != 0)
17165 			return rv;
17166 
17167 		if ((phyreg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
17168 			goto out;	/* GOOD! */
17169 
17170 		/* Reset the PHY */
17171 		wm_reset_phy(sc);
17172 		delay(5*1000);
17173 	}
17174 
17175 	/* Disable GigE link negotiation */
17176 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
17177 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
17178 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
17179 
17180 	/*
17181 	 * Call gig speed drop workaround on Gig disable before accessing
17182 	 * any PHY registers.
17183 	 */
17184 	wm_gig_downshift_workaround_ich8lan(sc);
17185 
17186 out:
17187 	return 0;
17188 }
17189 
17190 /*
17191  *  wm_gig_downshift_workaround_ich8lan - WoL from S5 stops working
17192  *  @sc: pointer to the HW structure
17193  *
17194  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
17195  *  LPLU, Gig disable, MDIC PHY reset):
17196  *    1) Set Kumeran Near-end loopback
17197  *    2) Clear Kumeran Near-end loopback
17198  *  Should only be called for ICH8[m] devices with any 1G Phy.
17199  */
17200 static void
17201 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
17202 {
17203 	uint16_t kmreg;
17204 
17205 	/* Only for igp3 */
17206 	if (sc->sc_phytype == WMPHY_IGP_3) {
17207 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
17208 			return;
17209 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
17210 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
17211 			return;
17212 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
17213 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
17214 	}
17215 }
17216 
17217 /*
17218  * Workaround for pch's PHYs
17219  * XXX should be moved to new PHY driver?
17220  */
17221 static int
17222 wm_hv_phy_workarounds_ich8lan(struct wm_softc *sc)
17223 {
17224 	device_t dev = sc->sc_dev;
17225 	struct mii_data *mii = &sc->sc_mii;
17226 	struct mii_softc *child;
17227 	uint16_t phy_data, phyrev = 0;
17228 	int phytype = sc->sc_phytype;
17229 	int rv;
17230 
17231 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
17232 		device_xname(dev), __func__));
17233 	KASSERT(sc->sc_type == WM_T_PCH);
17234 
17235 	/* Set MDIO slow mode before any other MDIO access */
17236 	if (phytype == WMPHY_82577)
17237 		if ((rv = wm_set_mdio_slow_mode_hv(sc)) != 0)
17238 			return rv;
17239 
17240 	child = LIST_FIRST(&mii->mii_phys);
17241 	if (child != NULL)
17242 		phyrev = child->mii_mpd_rev;
17243 
17244 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
17245 	if ((child != NULL) &&
17246 	    (((phytype == WMPHY_82577) && ((phyrev == 1) || (phyrev == 2))) ||
17247 		((phytype == WMPHY_82578) && (phyrev == 1)))) {
17248 		/* Disable generation of early preamble (0x4431) */
17249 		rv = mii->mii_readreg(dev, 2, BM_RATE_ADAPTATION_CTRL,
17250 		    &phy_data);
17251 		if (rv != 0)
17252 			return rv;
17253 		phy_data &= ~(BM_RATE_ADAPTATION_CTRL_RX_RXDV_PRE |
17254 		    BM_RATE_ADAPTATION_CTRL_RX_CRS_PRE);
17255 		rv = mii->mii_writereg(dev, 2, BM_RATE_ADAPTATION_CTRL,
17256 		    phy_data);
17257 		if (rv != 0)
17258 			return rv;
17259 
17260 		/* Preamble tuning for SSC */
17261 		rv = mii->mii_writereg(dev, 2, HV_KMRN_FIFO_CTRLSTA, 0xa204);
17262 		if (rv != 0)
17263 			return rv;
17264 	}
17265 
17266 	/* 82578 */
17267 	if (phytype == WMPHY_82578) {
17268 		/*
17269 		 * Return registers to default by doing a soft reset then
17270 		 * writing 0x3140 to the control register
17271 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
17272 		 */
17273 		if ((child != NULL) && (phyrev < 2)) {
17274 			PHY_RESET(child);
17275 			rv = mii->mii_writereg(dev, 2, MII_BMCR, 0x3140);
17276 			if (rv != 0)
17277 				return rv;
17278 		}
17279 	}
17280 
17281 	/* Select page 0 */
17282 	if ((rv = sc->phy.acquire(sc)) != 0)
17283 		return rv;
17284 	rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT, 0);
17285 	sc->phy.release(sc);
17286 	if (rv != 0)
17287 		return rv;
17288 
17289 	/*
17290 	 * Configure the K1 Si workaround during phy reset assuming there is
17291 	 * link so that it disables K1 if link is in 1Gbps.
17292 	 */
17293 	if ((rv = wm_k1_gig_workaround_hv(sc, 1)) != 0)
17294 		return rv;
17295 
17296 	/* Workaround for link disconnects on a busy hub in half duplex */
17297 	rv = sc->phy.acquire(sc);
17298 	if (rv)
17299 		return rv;
17300 	rv = sc->phy.readreg_locked(dev, 2, BM_PORT_GEN_CFG, &phy_data);
17301 	if (rv)
17302 		goto release;
17303 	rv = sc->phy.writereg_locked(dev, 2, BM_PORT_GEN_CFG,
17304 	    phy_data & 0x00ff);
17305 	if (rv)
17306 		goto release;
17307 
17308 	/* Set MSE higher to enable link to stay up when noise is high */
17309 	rv = wm_write_emi_reg_locked(dev, I82577_MSE_THRESHOLD, 0x0034);
17310 release:
17311 	sc->phy.release(sc);
17312 
17313 	return rv;
17314 }
17315 
17316 /*
17317  *  wm_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
17318  *  @sc:   pointer to the HW structure
17319  */
17320 static void
17321 wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *sc)
17322 {
17323 
17324 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
17325 		device_xname(sc->sc_dev), __func__));
17326 
17327 	if (sc->phy.acquire(sc) != 0)
17328 		return;
17329 
17330 	wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
17331 
17332 	sc->phy.release(sc);
17333 }
17334 
17335 static void
17336 wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *sc)
17337 {
17338 	device_t dev = sc->sc_dev;
17339 	uint32_t mac_reg;
17340 	uint16_t i, wuce;
17341 	int count;
17342 
17343 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
17344 		device_xname(dev), __func__));
17345 
17346 	if (wm_enable_phy_wakeup_reg_access_bm(dev, &wuce) != 0)
17347 		return;
17348 
17349 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
17350 	count = wm_rar_count(sc);
17351 	for (i = 0; i < count; i++) {
17352 		uint16_t lo, hi;
17353 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
17354 		lo = (uint16_t)(mac_reg & 0xffff);
17355 		hi = (uint16_t)((mac_reg >> 16) & 0xffff);
17356 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_L(i), &lo, 0, true);
17357 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_M(i), &hi, 0, true);
17358 
17359 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
17360 		lo = (uint16_t)(mac_reg & 0xffff);
17361 		hi = (uint16_t)((mac_reg & RAL_AV) >> 16);
17362 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_H(i), &lo, 0, true);
17363 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_CTRL(i), &hi, 0, true);
17364 	}
17365 
17366 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
17367 }
17368 
17369 /*
17370  *  wm_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
17371  *  with 82579 PHY
17372  *  @enable: flag to enable/disable workaround when enabling/disabling jumbos
17373  */
17374 static int
17375 wm_lv_jumbo_workaround_ich8lan(struct wm_softc *sc, bool enable)
17376 {
17377 	device_t dev = sc->sc_dev;
17378 	int rar_count;
17379 	int rv;
17380 	uint32_t mac_reg;
17381 	uint16_t dft_ctrl, data;
17382 	uint16_t i;
17383 
17384 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
17385 		device_xname(dev), __func__));
17386 
17387 	if (sc->sc_type < WM_T_PCH2)
17388 		return 0;
17389 
17390 	/* Acquire PHY semaphore */
17391 	rv = sc->phy.acquire(sc);
17392 	if (rv != 0)
17393 		return rv;
17394 
17395 	/* Disable Rx path while enabling/disabling workaround */
17396 	rv = sc->phy.readreg_locked(dev, 2, I82579_DFT_CTRL, &dft_ctrl);
17397 	if (rv != 0)
17398 		goto out;
17399 	rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
17400 	    dft_ctrl | (1 << 14));
17401 	if (rv != 0)
17402 		goto out;
17403 
17404 	if (enable) {
17405 		/* Write Rx addresses (rar_entry_count for RAL/H, and
17406 		 * SHRAL/H) and initial CRC values to the MAC
17407 		 */
17408 		rar_count = wm_rar_count(sc);
17409 		for (i = 0; i < rar_count; i++) {
17410 			uint8_t mac_addr[ETHER_ADDR_LEN] = {0};
17411 			uint32_t addr_high, addr_low;
17412 
17413 			addr_high = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
17414 			if (!(addr_high & RAL_AV))
17415 				continue;
17416 			addr_low = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
17417 			mac_addr[0] = (addr_low & 0xFF);
17418 			mac_addr[1] = ((addr_low >> 8) & 0xFF);
17419 			mac_addr[2] = ((addr_low >> 16) & 0xFF);
17420 			mac_addr[3] = ((addr_low >> 24) & 0xFF);
17421 			mac_addr[4] = (addr_high & 0xFF);
17422 			mac_addr[5] = ((addr_high >> 8) & 0xFF);
17423 
17424 			CSR_WRITE(sc, WMREG_PCH_RAICC(i),
17425 			    ~ether_crc32_le(mac_addr, ETHER_ADDR_LEN));
17426 		}
17427 
17428 		/* Write Rx addresses to the PHY */
17429 		wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
17430 	}
17431 
17432 	/*
17433 	 * If enable ==
17434 	 *	true: Enable jumbo frame workaround in the MAC.
17435 	 *	false: Write MAC register values back to h/w defaults.
17436 	 */
17437 	mac_reg = CSR_READ(sc, WMREG_FFLT_DBG);
17438 	if (enable) {
17439 		mac_reg &= ~(1 << 14);
17440 		mac_reg |= (7 << 15);
17441 	} else
17442 		mac_reg &= ~(0xf << 14);
17443 	CSR_WRITE(sc, WMREG_FFLT_DBG, mac_reg);
17444 
17445 	mac_reg = CSR_READ(sc, WMREG_RCTL);
17446 	if (enable) {
17447 		mac_reg |= RCTL_SECRC;
17448 		sc->sc_rctl |= RCTL_SECRC;
17449 		sc->sc_flags |= WM_F_CRC_STRIP;
17450 	} else {
17451 		mac_reg &= ~RCTL_SECRC;
17452 		sc->sc_rctl &= ~RCTL_SECRC;
17453 		sc->sc_flags &= ~WM_F_CRC_STRIP;
17454 	}
17455 	CSR_WRITE(sc, WMREG_RCTL, mac_reg);
17456 
17457 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, &data);
17458 	if (rv != 0)
17459 		goto out;
17460 	if (enable)
17461 		data |= 1 << 0;
17462 	else
17463 		data &= ~(1 << 0);
17464 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, data);
17465 	if (rv != 0)
17466 		goto out;
17467 
17468 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, &data);
17469 	if (rv != 0)
17470 		goto out;
17471 	/*
17472 	 * XXX FreeBSD and Linux do the same thing that they set the same value
17473 	 * on both the enable case and the disable case. Is it correct?
17474 	 */
17475 	data &= ~(0xf << 8);
17476 	data |= (0xb << 8);
17477 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, data);
17478 	if (rv != 0)
17479 		goto out;
17480 
17481 	/*
17482 	 * If enable ==
17483 	 *	true: Enable jumbo frame workaround in the PHY.
17484 	 *	false: Write PHY register values back to h/w defaults.
17485 	 */
17486 	rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 23), &data);
17487 	if (rv != 0)
17488 		goto out;
17489 	data &= ~(0x7F << 5);
17490 	if (enable)
17491 		data |= (0x37 << 5);
17492 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 23), data);
17493 	if (rv != 0)
17494 		goto out;
17495 
17496 	rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 16), &data);
17497 	if (rv != 0)
17498 		goto out;
17499 	if (enable)
17500 		data &= ~(1 << 13);
17501 	else
17502 		data |= (1 << 13);
17503 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 16), data);
17504 	if (rv != 0)
17505 		goto out;
17506 
17507 	rv = sc->phy.readreg_locked(dev, 2, I82579_UNKNOWN1, &data);
17508 	if (rv != 0)
17509 		goto out;
17510 	data &= ~(0x3FF << 2);
17511 	if (enable)
17512 		data |= (I82579_TX_PTR_GAP << 2);
17513 	else
17514 		data |= (0x8 << 2);
17515 	rv = sc->phy.writereg_locked(dev, 2, I82579_UNKNOWN1, data);
17516 	if (rv != 0)
17517 		goto out;
17518 
17519 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(776, 23),
17520 	    enable ? 0xf100 : 0x7e00);
17521 	if (rv != 0)
17522 		goto out;
17523 
17524 	rv = sc->phy.readreg_locked(dev, 2, HV_PM_CTRL, &data);
17525 	if (rv != 0)
17526 		goto out;
17527 	if (enable)
17528 		data |= 1 << 10;
17529 	else
17530 		data &= ~(1 << 10);
17531 	rv = sc->phy.writereg_locked(dev, 2, HV_PM_CTRL, data);
17532 	if (rv != 0)
17533 		goto out;
17534 
17535 	/* Re-enable Rx path after enabling/disabling workaround */
17536 	rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
17537 	    dft_ctrl & ~(1 << 14));
17538 
17539 out:
17540 	sc->phy.release(sc);
17541 
17542 	return rv;
17543 }
17544 
17545 /*
17546  *  wm_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
17547  *  done after every PHY reset.
17548  */
17549 static int
17550 wm_lv_phy_workarounds_ich8lan(struct wm_softc *sc)
17551 {
17552 	device_t dev = sc->sc_dev;
17553 	int rv;
17554 
17555 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
17556 		device_xname(dev), __func__));
17557 	KASSERT(sc->sc_type == WM_T_PCH2);
17558 
17559 	/* Set MDIO slow mode before any other MDIO access */
17560 	rv = wm_set_mdio_slow_mode_hv(sc);
17561 	if (rv != 0)
17562 		return rv;
17563 
17564 	rv = sc->phy.acquire(sc);
17565 	if (rv != 0)
17566 		return rv;
17567 	/* Set MSE higher to enable link to stay up when noise is high */
17568 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_THRESHOLD, 0x0034);
17569 	if (rv != 0)
17570 		goto release;
17571 	/* Drop link after 5 times MSE threshold was reached */
17572 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_LINK_DOWN, 0x0005);
17573 release:
17574 	sc->phy.release(sc);
17575 
17576 	return rv;
17577 }
17578 
17579 /**
17580  *  wm_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
17581  *  @link: link up bool flag
17582  *
17583  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
17584  *  preventing further DMA write requests.  Workaround the issue by disabling
17585  *  the de-assertion of the clock request when in 1Gpbs mode.
17586  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
17587  *  speeds in order to avoid Tx hangs.
17588  **/
17589 static int
17590 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
17591 {
17592 	uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
17593 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
17594 	uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
17595 	uint16_t phyreg;
17596 
17597 	if (link && (speed == STATUS_SPEED_1000)) {
17598 		int rv;
17599 
17600 		rv = sc->phy.acquire(sc);
17601 		if (rv != 0)
17602 			return rv;
17603 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
17604 		    &phyreg);
17605 		if (rv != 0)
17606 			goto release;
17607 		rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
17608 		    phyreg & ~KUMCTRLSTA_K1_ENABLE);
17609 		if (rv != 0)
17610 			goto release;
17611 		delay(20);
17612 		CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
17613 
17614 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
17615 		    &phyreg);
17616 release:
17617 		sc->phy.release(sc);
17618 		return rv;
17619 	}
17620 
17621 	fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
17622 
17623 	struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys);
17624 	if (((child != NULL) && (child->mii_mpd_rev > 5))
17625 	    || !link
17626 	    || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
17627 		goto update_fextnvm6;
17628 
17629 	wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL, &phyreg);
17630 
17631 	/* Clear link status transmit timeout */
17632 	phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
17633 	if (speed == STATUS_SPEED_100) {
17634 		/* Set inband Tx timeout to 5x10us for 100Half */
17635 		phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
17636 
17637 		/* Do not extend the K1 entry latency for 100Half */
17638 		fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
17639 	} else {
17640 		/* Set inband Tx timeout to 50x10us for 10Full/Half */
17641 		phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
17642 
17643 		/* Extend the K1 entry latency for 10 Mbps */
17644 		fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
17645 	}
17646 
17647 	wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
17648 
17649 update_fextnvm6:
17650 	CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
17651 	return 0;
17652 }
17653 
17654 /*
17655  *  wm_k1_gig_workaround_hv - K1 Si workaround
17656  *  @sc:   pointer to the HW structure
17657  *  @link: link up bool flag
17658  *
17659  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
17660  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
17661  *  If link is down, the function will restore the default K1 setting located
17662  *  in the NVM.
17663  */
17664 static int
17665 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
17666 {
17667 	int k1_enable = sc->sc_nvm_k1_enabled;
17668 	int rv;
17669 
17670 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
17671 		device_xname(sc->sc_dev), __func__));
17672 
17673 	rv = sc->phy.acquire(sc);
17674 	if (rv != 0)
17675 		return rv;
17676 
17677 	if (link) {
17678 		k1_enable = 0;
17679 
17680 		/* Link stall fix for link up */
17681 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
17682 		    0x0100);
17683 	} else {
17684 		/* Link stall fix for link down */
17685 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
17686 		    0x4100);
17687 	}
17688 
17689 	wm_configure_k1_ich8lan(sc, k1_enable);
17690 	sc->phy.release(sc);
17691 
17692 	return 0;
17693 }
17694 
17695 /*
17696  *  wm_k1_workaround_lv - K1 Si workaround
17697  *  @sc:   pointer to the HW structure
17698  *
17699  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
17700  *  Disable K1 for 1000 and 100 speeds
17701  */
17702 static int
17703 wm_k1_workaround_lv(struct wm_softc *sc)
17704 {
17705 	uint32_t reg;
17706 	uint16_t phyreg;
17707 	int rv;
17708 
17709 	if (sc->sc_type != WM_T_PCH2)
17710 		return 0;
17711 
17712 	/* Set K1 beacon duration based on 10Mbps speed */
17713 	rv = wm_gmii_hv_readreg(sc->sc_dev, 2, HV_M_STATUS, &phyreg);
17714 	if (rv != 0)
17715 		return rv;
17716 
17717 	if ((phyreg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
17718 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
17719 		if (phyreg &
17720 		    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
17721 			/* LV 1G/100 Packet drop issue wa  */
17722 			rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_PM_CTRL,
17723 			    &phyreg);
17724 			if (rv != 0)
17725 				return rv;
17726 			phyreg &= ~HV_PM_CTRL_K1_ENA;
17727 			rv = wm_gmii_hv_writereg(sc->sc_dev, 1, HV_PM_CTRL,
17728 			    phyreg);
17729 			if (rv != 0)
17730 				return rv;
17731 		} else {
17732 			/* For 10Mbps */
17733 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
17734 			reg &= ~FEXTNVM4_BEACON_DURATION;
17735 			reg |= FEXTNVM4_BEACON_DURATION_16US;
17736 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
17737 		}
17738 	}
17739 
17740 	return 0;
17741 }
17742 
17743 /*
17744  *  wm_link_stall_workaround_hv - Si workaround
17745  *  @sc: pointer to the HW structure
17746  *
17747  *  This function works around a Si bug where the link partner can get
17748  *  a link up indication before the PHY does. If small packets are sent
17749  *  by the link partner they can be placed in the packet buffer without
17750  *  being properly accounted for by the PHY and will stall preventing
17751  *  further packets from being received.  The workaround is to clear the
17752  *  packet buffer after the PHY detects link up.
17753  */
17754 static int
17755 wm_link_stall_workaround_hv(struct wm_softc *sc)
17756 {
17757 	uint16_t phyreg;
17758 
17759 	if (sc->sc_phytype != WMPHY_82578)
17760 		return 0;
17761 
17762 	/* Do not apply workaround if in PHY loopback bit 14 set */
17763 	wm_gmii_hv_readreg(sc->sc_dev, 2, MII_BMCR, &phyreg);
17764 	if ((phyreg & BMCR_LOOP) != 0)
17765 		return 0;
17766 
17767 	/* Check if link is up and at 1Gbps */
17768 	wm_gmii_hv_readreg(sc->sc_dev, 2, BM_CS_STATUS, &phyreg);
17769 	phyreg &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
17770 	    | BM_CS_STATUS_SPEED_MASK;
17771 	if (phyreg != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
17772 		| BM_CS_STATUS_SPEED_1000))
17773 		return 0;
17774 
17775 	delay(200 * 1000);	/* XXX too big */
17776 
17777 	/* Flush the packets in the fifo buffer */
17778 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
17779 	    HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED);
17780 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
17781 	    HV_MUX_DATA_CTRL_GEN_TO_MAC);
17782 
17783 	return 0;
17784 }
17785 
17786 static int
17787 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
17788 {
17789 	int rv;
17790 
17791 	rv = sc->phy.acquire(sc);
17792 	if (rv != 0) {
17793 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
17794 		    __func__);
17795 		return rv;
17796 	}
17797 
17798 	rv = wm_set_mdio_slow_mode_hv_locked(sc);
17799 
17800 	sc->phy.release(sc);
17801 
17802 	return rv;
17803 }
17804 
17805 static int
17806 wm_set_mdio_slow_mode_hv_locked(struct wm_softc *sc)
17807 {
17808 	int rv;
17809 	uint16_t reg;
17810 
17811 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_KMRN_MODE_CTRL, &reg);
17812 	if (rv != 0)
17813 		return rv;
17814 
17815 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
17816 	    reg | HV_KMRN_MDIO_SLOW);
17817 }
17818 
17819 /*
17820  *  wm_configure_k1_ich8lan - Configure K1 power state
17821  *  @sc: pointer to the HW structure
17822  *  @enable: K1 state to configure
17823  *
17824  *  Configure the K1 power state based on the provided parameter.
17825  *  Assumes semaphore already acquired.
17826  */
17827 static void
17828 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
17829 {
17830 	uint32_t ctrl, ctrl_ext, tmp;
17831 	uint16_t kmreg;
17832 	int rv;
17833 
17834 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
17835 
17836 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
17837 	if (rv != 0)
17838 		return;
17839 
17840 	if (k1_enable)
17841 		kmreg |= KUMCTRLSTA_K1_ENABLE;
17842 	else
17843 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
17844 
17845 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
17846 	if (rv != 0)
17847 		return;
17848 
17849 	delay(20);
17850 
17851 	ctrl = CSR_READ(sc, WMREG_CTRL);
17852 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
17853 
17854 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
17855 	tmp |= CTRL_FRCSPD;
17856 
17857 	CSR_WRITE(sc, WMREG_CTRL, tmp);
17858 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
17859 	CSR_WRITE_FLUSH(sc);
17860 	delay(20);
17861 
17862 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
17863 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
17864 	CSR_WRITE_FLUSH(sc);
17865 	delay(20);
17866 
17867 	return;
17868 }
17869 
17870 /* special case - for 82575 - need to do manual init ... */
17871 static void
17872 wm_reset_init_script_82575(struct wm_softc *sc)
17873 {
17874 	/*
17875 	 * Remark: this is untested code - we have no board without EEPROM
17876 	 *  same setup as mentioned int the FreeBSD driver for the i82575
17877 	 */
17878 
17879 	/* SerDes configuration via SERDESCTRL */
17880 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
17881 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
17882 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
17883 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
17884 
17885 	/* CCM configuration via CCMCTL register */
17886 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
17887 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
17888 
17889 	/* PCIe lanes configuration */
17890 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
17891 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
17892 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
17893 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
17894 
17895 	/* PCIe PLL Configuration */
17896 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
17897 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
17898 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
17899 }
17900 
17901 static void
17902 wm_reset_mdicnfg_82580(struct wm_softc *sc)
17903 {
17904 	uint32_t reg;
17905 	uint16_t nvmword;
17906 	int rv;
17907 
17908 	if (sc->sc_type != WM_T_82580)
17909 		return;
17910 	if ((sc->sc_flags & WM_F_SGMII) == 0)
17911 		return;
17912 
17913 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
17914 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
17915 	if (rv != 0) {
17916 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
17917 		    __func__);
17918 		return;
17919 	}
17920 
17921 	reg = CSR_READ(sc, WMREG_MDICNFG);
17922 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
17923 		reg |= MDICNFG_DEST;
17924 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
17925 		reg |= MDICNFG_COM_MDIO;
17926 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
17927 }
17928 
17929 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
17930 
17931 static bool
17932 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
17933 {
17934 	uint32_t reg;
17935 	uint16_t id1, id2;
17936 	int i, rv;
17937 
17938 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
17939 		device_xname(sc->sc_dev), __func__));
17940 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
17941 
17942 	id1 = id2 = 0xffff;
17943 	for (i = 0; i < 2; i++) {
17944 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
17945 		    &id1);
17946 		if ((rv != 0) || MII_INVALIDID(id1))
17947 			continue;
17948 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
17949 		    &id2);
17950 		if ((rv != 0) || MII_INVALIDID(id2))
17951 			continue;
17952 		break;
17953 	}
17954 	if ((rv == 0) && !MII_INVALIDID(id1) && !MII_INVALIDID(id2))
17955 		goto out;
17956 
17957 	/*
17958 	 * In case the PHY needs to be in mdio slow mode,
17959 	 * set slow mode and try to get the PHY id again.
17960 	 */
17961 	rv = 0;
17962 	if (sc->sc_type < WM_T_PCH_LPT) {
17963 		wm_set_mdio_slow_mode_hv_locked(sc);
17964 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
17965 		    &id1);
17966 		rv |= wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
17967 		    &id2);
17968 	}
17969 	if ((rv != 0) || MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
17970 		device_printf(sc->sc_dev, "XXX return with false\n");
17971 		return false;
17972 	}
17973 out:
17974 	if (sc->sc_type >= WM_T_PCH_LPT) {
17975 		/* Only unforce SMBus if ME is not active */
17976 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
17977 			uint16_t phyreg;
17978 
17979 			/* Unforce SMBus mode in PHY */
17980 			rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
17981 			    CV_SMB_CTRL, &phyreg);
17982 			phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
17983 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
17984 			    CV_SMB_CTRL, phyreg);
17985 
17986 			/* Unforce SMBus mode in MAC */
17987 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
17988 			reg &= ~CTRL_EXT_FORCE_SMBUS;
17989 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
17990 		}
17991 	}
17992 	return true;
17993 }
17994 
17995 static void
17996 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
17997 {
17998 	uint32_t reg;
17999 	int i;
18000 
18001 	/* Set PHY Config Counter to 50msec */
18002 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
18003 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
18004 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
18005 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
18006 
18007 	/* Toggle LANPHYPC */
18008 	reg = CSR_READ(sc, WMREG_CTRL);
18009 	reg |= CTRL_LANPHYPC_OVERRIDE;
18010 	reg &= ~CTRL_LANPHYPC_VALUE;
18011 	CSR_WRITE(sc, WMREG_CTRL, reg);
18012 	CSR_WRITE_FLUSH(sc);
18013 	delay(1000);
18014 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
18015 	CSR_WRITE(sc, WMREG_CTRL, reg);
18016 	CSR_WRITE_FLUSH(sc);
18017 
18018 	if (sc->sc_type < WM_T_PCH_LPT)
18019 		delay(50 * 1000);
18020 	else {
18021 		i = 20;
18022 
18023 		do {
18024 			delay(5 * 1000);
18025 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
18026 		    && i--);
18027 
18028 		delay(30 * 1000);
18029 	}
18030 }
18031 
18032 static int
18033 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
18034 {
18035 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
18036 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
18037 	uint32_t rxa;
18038 	uint16_t scale = 0, lat_enc = 0;
18039 	int32_t obff_hwm = 0;
18040 	int64_t lat_ns, value;
18041 
18042 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
18043 		device_xname(sc->sc_dev), __func__));
18044 
18045 	if (link) {
18046 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
18047 		uint32_t status;
18048 		uint16_t speed;
18049 		pcireg_t preg;
18050 
18051 		status = CSR_READ(sc, WMREG_STATUS);
18052 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
18053 		case STATUS_SPEED_10:
18054 			speed = 10;
18055 			break;
18056 		case STATUS_SPEED_100:
18057 			speed = 100;
18058 			break;
18059 		case STATUS_SPEED_1000:
18060 			speed = 1000;
18061 			break;
18062 		default:
18063 			device_printf(sc->sc_dev, "Unknown speed "
18064 			    "(status = %08x)\n", status);
18065 			return -1;
18066 		}
18067 
18068 		/* Rx Packet Buffer Allocation size (KB) */
18069 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
18070 
18071 		/*
18072 		 * Determine the maximum latency tolerated by the device.
18073 		 *
18074 		 * Per the PCIe spec, the tolerated latencies are encoded as
18075 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
18076 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
18077 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
18078 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
18079 		 */
18080 		lat_ns = ((int64_t)rxa * 1024 -
18081 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
18082 			+ ETHER_HDR_LEN))) * 8 * 1000;
18083 		if (lat_ns < 0)
18084 			lat_ns = 0;
18085 		else
18086 			lat_ns /= speed;
18087 		value = lat_ns;
18088 
18089 		while (value > LTRV_VALUE) {
18090 			scale ++;
18091 			value = howmany(value, __BIT(5));
18092 		}
18093 		if (scale > LTRV_SCALE_MAX) {
18094 			device_printf(sc->sc_dev,
18095 			    "Invalid LTR latency scale %d\n", scale);
18096 			return -1;
18097 		}
18098 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
18099 
18100 		/* Determine the maximum latency tolerated by the platform */
18101 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
18102 		    WM_PCI_LTR_CAP_LPT);
18103 		max_snoop = preg & 0xffff;
18104 		max_nosnoop = preg >> 16;
18105 
18106 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
18107 
18108 		if (lat_enc > max_ltr_enc) {
18109 			lat_enc = max_ltr_enc;
18110 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
18111 			    * PCI_LTR_SCALETONS(
18112 				    __SHIFTOUT(lat_enc,
18113 					PCI_LTR_MAXSNOOPLAT_SCALE));
18114 		}
18115 
18116 		if (lat_ns) {
18117 			lat_ns *= speed * 1000;
18118 			lat_ns /= 8;
18119 			lat_ns /= 1000000000;
18120 			obff_hwm = (int32_t)(rxa - lat_ns);
18121 		}
18122 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
18123 			device_printf(sc->sc_dev, "Invalid high water mark %d"
18124 			    "(rxa = %d, lat_ns = %d)\n",
18125 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
18126 			return -1;
18127 		}
18128 	}
18129 	/* Snoop and No-Snoop latencies the same */
18130 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
18131 	CSR_WRITE(sc, WMREG_LTRV, reg);
18132 
18133 	/* Set OBFF high water mark */
18134 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
18135 	reg |= obff_hwm;
18136 	CSR_WRITE(sc, WMREG_SVT, reg);
18137 
18138 	/* Enable OBFF */
18139 	reg = CSR_READ(sc, WMREG_SVCR);
18140 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
18141 	CSR_WRITE(sc, WMREG_SVCR, reg);
18142 
18143 	return 0;
18144 }
18145 
18146 /*
18147  * I210 Errata 25 and I211 Errata 10
18148  * Slow System Clock.
18149  *
18150  * Note that this function is called on both FLASH and iNVM case on NetBSD.
18151  */
18152 static int
18153 wm_pll_workaround_i210(struct wm_softc *sc)
18154 {
18155 	uint32_t mdicnfg, wuc;
18156 	uint32_t reg;
18157 	pcireg_t pcireg;
18158 	uint32_t pmreg;
18159 	uint16_t nvmword, tmp_nvmword;
18160 	uint16_t phyval;
18161 	bool wa_done = false;
18162 	int i, rv = 0;
18163 
18164 	/* Get Power Management cap offset */
18165 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
18166 	    &pmreg, NULL) == 0)
18167 		return -1;
18168 
18169 	/* Save WUC and MDICNFG registers */
18170 	wuc = CSR_READ(sc, WMREG_WUC);
18171 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
18172 
18173 	reg = mdicnfg & ~MDICNFG_DEST;
18174 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
18175 
18176 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0) {
18177 		/*
18178 		 * The default value of the Initialization Control Word 1
18179 		 * is the same on both I210's FLASH_HW and I21[01]'s iNVM.
18180 		 */
18181 		nvmword = INVM_DEFAULT_AL;
18182 	}
18183 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
18184 
18185 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
18186 		wm_gmii_gs40g_readreg(sc->sc_dev, 1,
18187 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG, &phyval);
18188 
18189 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
18190 			rv = 0;
18191 			break; /* OK */
18192 		} else
18193 			rv = -1;
18194 
18195 		wa_done = true;
18196 		/* Directly reset the internal PHY */
18197 		reg = CSR_READ(sc, WMREG_CTRL);
18198 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
18199 
18200 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
18201 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
18202 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
18203 
18204 		CSR_WRITE(sc, WMREG_WUC, 0);
18205 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
18206 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
18207 
18208 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
18209 		    pmreg + PCI_PMCSR);
18210 		pcireg |= PCI_PMCSR_STATE_D3;
18211 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
18212 		    pmreg + PCI_PMCSR, pcireg);
18213 		delay(1000);
18214 		pcireg &= ~PCI_PMCSR_STATE_D3;
18215 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
18216 		    pmreg + PCI_PMCSR, pcireg);
18217 
18218 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
18219 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
18220 
18221 		/* Restore WUC register */
18222 		CSR_WRITE(sc, WMREG_WUC, wuc);
18223 	}
18224 
18225 	/* Restore MDICNFG setting */
18226 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
18227 	if (wa_done)
18228 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
18229 	return rv;
18230 }
18231 
18232 static void
18233 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
18234 {
18235 	uint32_t reg;
18236 
18237 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
18238 		device_xname(sc->sc_dev), __func__));
18239 	KASSERT((sc->sc_type == WM_T_PCH_SPT)
18240 	    || (sc->sc_type == WM_T_PCH_CNP) || (sc->sc_type == WM_T_PCH_TGP));
18241 
18242 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
18243 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
18244 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
18245 
18246 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
18247 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
18248 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
18249 }
18250 
18251 /* Sysctl functions */
18252 static int
18253 wm_sysctl_tdh_handler(SYSCTLFN_ARGS)
18254 {
18255 	struct sysctlnode node = *rnode;
18256 	struct wm_txqueue *txq = (struct wm_txqueue *)node.sysctl_data;
18257 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
18258 	struct wm_softc *sc = txq->txq_sc;
18259 	uint32_t reg;
18260 
18261 	reg = CSR_READ(sc, WMREG_TDH(wmq->wmq_id));
18262 	node.sysctl_data = &reg;
18263 	return sysctl_lookup(SYSCTLFN_CALL(&node));
18264 }
18265 
18266 static int
18267 wm_sysctl_tdt_handler(SYSCTLFN_ARGS)
18268 {
18269 	struct sysctlnode node = *rnode;
18270 	struct wm_txqueue *txq = (struct wm_txqueue *)node.sysctl_data;
18271 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
18272 	struct wm_softc *sc = txq->txq_sc;
18273 	uint32_t reg;
18274 
18275 	reg = CSR_READ(sc, WMREG_TDT(wmq->wmq_id));
18276 	node.sysctl_data = &reg;
18277 	return sysctl_lookup(SYSCTLFN_CALL(&node));
18278 }
18279 
18280 #ifdef WM_DEBUG
18281 static int
18282 wm_sysctl_debug(SYSCTLFN_ARGS)
18283 {
18284 	struct sysctlnode node = *rnode;
18285 	struct wm_softc *sc = (struct wm_softc *)node.sysctl_data;
18286 	uint32_t dflags;
18287 	int error;
18288 
18289 	dflags = sc->sc_debug;
18290 	node.sysctl_data = &dflags;
18291 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
18292 
18293 	if (error || newp == NULL)
18294 		return error;
18295 
18296 	sc->sc_debug = dflags;
18297 	device_printf(sc->sc_dev, "TARC0: %08x\n", CSR_READ(sc, WMREG_TARC0));
18298 	device_printf(sc->sc_dev, "TDT0: %08x\n", CSR_READ(sc, WMREG_TDT(0)));
18299 
18300 	return 0;
18301 }
18302 #endif
18303