xref: /netbsd-src/sys/dev/pci/if_wm.c (revision 965ff70d6cc168e208e3ec6b725c8ce156e95fd0)
1 /*	$NetBSD: if_wm.c,v 1.801 2024/11/10 11:46:24 mlelstv Exp $	*/
2 
3 /*
4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed for the NetBSD Project by
20  *	Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 /*******************************************************************************
39 
40   Copyright (c) 2001-2005, Intel Corporation
41   All rights reserved.
42 
43   Redistribution and use in source and binary forms, with or without
44   modification, are permitted provided that the following conditions are met:
45 
46    1. Redistributions of source code must retain the above copyright notice,
47       this list of conditions and the following disclaimer.
48 
49    2. Redistributions in binary form must reproduce the above copyright
50       notice, this list of conditions and the following disclaimer in the
51       documentation and/or other materials provided with the distribution.
52 
53    3. Neither the name of the Intel Corporation nor the names of its
54       contributors may be used to endorse or promote products derived from
55       this software without specific prior written permission.
56 
57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67   POSSIBILITY OF SUCH DAMAGE.
68 
69 *******************************************************************************/
70 /*
71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72  *
73  * TODO (in order of importance):
74  *
75  *	- Check XXX'ed comments
76  *	- TX Multi queue improvement (refine queue selection logic)
77  *	- Split header buffer for newer descriptors
78  *	- EEE (Energy Efficiency Ethernet) for I354
79  *	- Virtual Function
80  *	- Set LED correctly (based on contents in EEPROM)
81  *	- Rework how parameters are loaded from the EEPROM.
82  */
83 
84 #include <sys/cdefs.h>
85 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.801 2024/11/10 11:46:24 mlelstv Exp $");
86 
87 #ifdef _KERNEL_OPT
88 #include "opt_if_wm.h"
89 #endif
90 
91 #include <sys/param.h>
92 
93 #include <sys/atomic.h>
94 #include <sys/callout.h>
95 #include <sys/cpu.h>
96 #include <sys/device.h>
97 #include <sys/errno.h>
98 #include <sys/interrupt.h>
99 #include <sys/ioctl.h>
100 #include <sys/kernel.h>
101 #include <sys/kmem.h>
102 #include <sys/mbuf.h>
103 #include <sys/pcq.h>
104 #include <sys/queue.h>
105 #include <sys/rndsource.h>
106 #include <sys/socket.h>
107 #include <sys/sysctl.h>
108 #include <sys/syslog.h>
109 #include <sys/systm.h>
110 #include <sys/workqueue.h>
111 
112 #include <net/if.h>
113 #include <net/if_dl.h>
114 #include <net/if_media.h>
115 #include <net/if_ether.h>
116 
117 #include <net/bpf.h>
118 
119 #include <net/rss_config.h>
120 
121 #include <netinet/in.h>			/* XXX for struct ip */
122 #include <netinet/in_systm.h>		/* XXX for struct ip */
123 #include <netinet/ip.h>			/* XXX for struct ip */
124 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
125 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
126 
127 #include <sys/bus.h>
128 #include <sys/intr.h>
129 #include <machine/endian.h>
130 
131 #include <dev/mii/mii.h>
132 #include <dev/mii/mdio.h>
133 #include <dev/mii/miivar.h>
134 #include <dev/mii/miidevs.h>
135 #include <dev/mii/mii_bitbang.h>
136 #include <dev/mii/ikphyreg.h>
137 #include <dev/mii/igphyreg.h>
138 #include <dev/mii/igphyvar.h>
139 #include <dev/mii/inbmphyreg.h>
140 #include <dev/mii/ihphyreg.h>
141 #include <dev/mii/makphyreg.h>
142 
143 #include <dev/pci/pcireg.h>
144 #include <dev/pci/pcivar.h>
145 #include <dev/pci/pcidevs.h>
146 
147 #include <dev/pci/if_wmreg.h>
148 #include <dev/pci/if_wmvar.h>
149 
150 #ifdef WM_DEBUG
151 #define	WM_DEBUG_LINK		__BIT(0)
152 #define	WM_DEBUG_TX		__BIT(1)
153 #define	WM_DEBUG_RX		__BIT(2)
154 #define	WM_DEBUG_GMII		__BIT(3)
155 #define	WM_DEBUG_MANAGE		__BIT(4)
156 #define	WM_DEBUG_NVM		__BIT(5)
157 #define	WM_DEBUG_INIT		__BIT(6)
158 #define	WM_DEBUG_LOCK		__BIT(7)
159 
160 #if 0
161 #define WM_DEBUG_DEFAULT	WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | \
162 	WM_DEBUG_GMII | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT |    \
163 	WM_DEBUG_LOCK
164 #endif
165 
166 #define	DPRINTF(sc, x, y)			  \
167 	do {					  \
168 		if ((sc)->sc_debug & (x))	  \
169 			printf y;		  \
170 	} while (0)
171 #else
172 #define	DPRINTF(sc, x, y)	__nothing
173 #endif /* WM_DEBUG */
174 
175 #define WM_WORKQUEUE_PRI PRI_SOFTNET
176 
177 /*
178  * This device driver's max interrupt numbers.
179  */
180 #define WM_MAX_NQUEUEINTR	16
181 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
182 
183 #ifndef WM_DISABLE_MSI
184 #define	WM_DISABLE_MSI 0
185 #endif
186 #ifndef WM_DISABLE_MSIX
187 #define	WM_DISABLE_MSIX 0
188 #endif
189 
190 int wm_disable_msi = WM_DISABLE_MSI;
191 int wm_disable_msix = WM_DISABLE_MSIX;
192 
193 #ifndef WM_WATCHDOG_TIMEOUT
194 #define WM_WATCHDOG_TIMEOUT 5
195 #endif
196 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
197 
198 /*
199  * Transmit descriptor list size.  Due to errata, we can only have
200  * 256 hardware descriptors in the ring on < 82544, but we use 4096
201  * on >= 82544. We tell the upper layers that they can queue a lot
202  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
203  * of them at a time.
204  *
205  * We allow up to 64 DMA segments per packet.  Pathological packet
206  * chains containing many small mbufs have been observed in zero-copy
207  * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
208  * m_defrag() is called to reduce it.
209  */
210 #define	WM_NTXSEGS		64
211 #define	WM_IFQUEUELEN		256
212 #define	WM_TXQUEUELEN_MAX	64
213 #define	WM_TXQUEUELEN_MAX_82547	16
214 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
215 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
216 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
217 #define	WM_NTXDESC_82542	256
218 #define	WM_NTXDESC_82544	4096
219 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
220 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
221 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
222 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
223 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
224 
225 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
226 
227 #define	WM_TXINTERQSIZE		256
228 
229 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
230 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
231 #endif
232 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
233 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
234 #endif
235 
236 /*
237  * Receive descriptor list size.  We have one Rx buffer for normal
238  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
239  * packet.  We allocate 256 receive descriptors, each with a 2k
240  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
241  */
242 #define	WM_NRXDESC		256U
243 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
244 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
245 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
246 
247 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
248 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
249 #endif
250 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
251 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
252 #endif
253 
254 typedef union txdescs {
255 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
256 	nq_txdesc_t	 sctxu_nq_txdescs[WM_NTXDESC_82544];
257 } txdescs_t;
258 
259 typedef union rxdescs {
260 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
261 	ext_rxdesc_t	 sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
262 	nq_rxdesc_t	 sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
263 } rxdescs_t;
264 
265 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
266 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
267 
268 /*
269  * Software state for transmit jobs.
270  */
271 struct wm_txsoft {
272 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
273 	bus_dmamap_t txs_dmamap;	/* our DMA map */
274 	int txs_firstdesc;		/* first descriptor in packet */
275 	int txs_lastdesc;		/* last descriptor in packet */
276 	int txs_ndesc;			/* # of descriptors used */
277 };
278 
279 /*
280  * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
281  * buffer and a DMA map. For packets which fill more than one buffer, we chain
282  * them together.
283  */
284 struct wm_rxsoft {
285 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
286 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
287 };
288 
289 #define WM_LINKUP_TIMEOUT	50
290 
291 static uint16_t swfwphysem[] = {
292 	SWFW_PHY0_SM,
293 	SWFW_PHY1_SM,
294 	SWFW_PHY2_SM,
295 	SWFW_PHY3_SM
296 };
297 
298 static const uint32_t wm_82580_rxpbs_table[] = {
299 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
300 };
301 
302 struct wm_softc;
303 
304 #if defined(_LP64) && !defined(WM_DISABLE_EVENT_COUNTERS)
305 #if !defined(WM_EVENT_COUNTERS)
306 #define WM_EVENT_COUNTERS 1
307 #endif
308 #endif
309 
310 #ifdef WM_EVENT_COUNTERS
311 #define WM_Q_EVCNT_DEFINE(qname, evname)				 \
312 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
313 	struct evcnt qname##_ev_##evname
314 
315 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
316 	do {								\
317 		snprintf((q)->qname##_##evname##_evcnt_name,		\
318 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
319 		    "%s%02d%s", #qname, (qnum), #evname);		\
320 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
321 		    (evtype), NULL, (xname),				\
322 		    (q)->qname##_##evname##_evcnt_name);		\
323 	} while (0)
324 
325 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
326 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
327 
328 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
329 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
330 
331 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
332 	evcnt_detach(&(q)->qname##_ev_##evname)
333 #endif /* WM_EVENT_COUNTERS */
334 
335 struct wm_txqueue {
336 	kmutex_t *txq_lock;		/* lock for tx operations */
337 
338 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
339 
340 	/* Software state for the transmit descriptors. */
341 	int txq_num;			/* must be a power of two */
342 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
343 
344 	/* TX control data structures. */
345 	int txq_ndesc;			/* must be a power of two */
346 	size_t txq_descsize;		/* a tx descriptor size */
347 	txdescs_t *txq_descs_u;
348 	bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
349 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
350 	int txq_desc_rseg;		/* real number of control segment */
351 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
352 #define	txq_descs	txq_descs_u->sctxu_txdescs
353 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
354 
355 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
356 
357 	int txq_free;			/* number of free Tx descriptors */
358 	int txq_next;			/* next ready Tx descriptor */
359 
360 	int txq_sfree;			/* number of free Tx jobs */
361 	int txq_snext;			/* next free Tx job */
362 	int txq_sdirty;			/* dirty Tx jobs */
363 
364 	/* These 4 variables are used only on the 82547. */
365 	int txq_fifo_size;		/* Tx FIFO size */
366 	int txq_fifo_head;		/* current head of FIFO */
367 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
368 	int txq_fifo_stall;		/* Tx FIFO is stalled */
369 
370 	/*
371 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
372 	 * CPUs. This queue intermediate them without block.
373 	 */
374 	pcq_t *txq_interq;
375 
376 	/*
377 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
378 	 * to manage Tx H/W queue's busy flag.
379 	 */
380 	int txq_flags;			/* flags for H/W queue, see below */
381 #define	WM_TXQ_NO_SPACE		0x1
382 #define	WM_TXQ_LINKDOWN_DISCARD	0x2
383 
384 	bool txq_stopping;
385 
386 	bool txq_sending;
387 	time_t txq_lastsent;
388 
389 	/* Checksum flags used for previous packet */
390 	uint32_t	txq_last_hw_cmd;
391 	uint8_t		txq_last_hw_fields;
392 	uint16_t	txq_last_hw_ipcs;
393 	uint16_t	txq_last_hw_tucs;
394 
395 	uint32_t txq_packets;		/* for AIM */
396 	uint32_t txq_bytes;		/* for AIM */
397 #ifdef WM_EVENT_COUNTERS
398 	/* TX event counters */
399 	WM_Q_EVCNT_DEFINE(txq, txsstall);   /* Stalled due to no txs */
400 	WM_Q_EVCNT_DEFINE(txq, txdstall);   /* Stalled due to no txd */
401 	WM_Q_EVCNT_DEFINE(txq, fifo_stall); /* FIFO stalls (82547) */
402 	WM_Q_EVCNT_DEFINE(txq, txdw);	    /* Tx descriptor interrupts */
403 	WM_Q_EVCNT_DEFINE(txq, txqe);	    /* Tx queue empty interrupts */
404 					    /* XXX not used? */
405 
406 	WM_Q_EVCNT_DEFINE(txq, ipsum);	    /* IP checksums comp. */
407 	WM_Q_EVCNT_DEFINE(txq, tusum);	    /* TCP/UDP cksums comp. */
408 	WM_Q_EVCNT_DEFINE(txq, tusum6);	    /* TCP/UDP v6 cksums comp. */
409 	WM_Q_EVCNT_DEFINE(txq, tso);	    /* TCP seg offload (IPv4) */
410 	WM_Q_EVCNT_DEFINE(txq, tso6);	    /* TCP seg offload (IPv6) */
411 	WM_Q_EVCNT_DEFINE(txq, tsopain);    /* Painful header manip. for TSO */
412 	WM_Q_EVCNT_DEFINE(txq, pcqdrop);    /* Pkt dropped in pcq */
413 	WM_Q_EVCNT_DEFINE(txq, descdrop);   /* Pkt dropped in MAC desc ring */
414 					    /* other than toomanyseg */
415 
416 	WM_Q_EVCNT_DEFINE(txq, toomanyseg); /* Pkt dropped(toomany DMA segs) */
417 	WM_Q_EVCNT_DEFINE(txq, defrag);	    /* m_defrag() */
418 	WM_Q_EVCNT_DEFINE(txq, underrun);   /* Tx underrun */
419 	WM_Q_EVCNT_DEFINE(txq, skipcontext); /* Tx skip wrong cksum context */
420 
421 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
422 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
423 #endif /* WM_EVENT_COUNTERS */
424 };
425 
426 struct wm_rxqueue {
427 	kmutex_t *rxq_lock;		/* lock for rx operations */
428 
429 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
430 
431 	/* Software state for the receive descriptors. */
432 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
433 
434 	/* RX control data structures. */
435 	int rxq_ndesc;			/* must be a power of two */
436 	size_t rxq_descsize;		/* a rx descriptor size */
437 	rxdescs_t *rxq_descs_u;
438 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
439 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
440 	int rxq_desc_rseg;		/* real number of control segment */
441 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
442 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
443 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
444 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
445 
446 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
447 
448 	int rxq_ptr;			/* next ready Rx desc/queue ent */
449 	int rxq_discard;
450 	int rxq_len;
451 	struct mbuf *rxq_head;
452 	struct mbuf *rxq_tail;
453 	struct mbuf **rxq_tailp;
454 
455 	bool rxq_stopping;
456 
457 	uint32_t rxq_packets;		/* for AIM */
458 	uint32_t rxq_bytes;		/* for AIM */
459 #ifdef WM_EVENT_COUNTERS
460 	/* RX event counters */
461 	WM_Q_EVCNT_DEFINE(rxq, intr);	/* Interrupts */
462 	WM_Q_EVCNT_DEFINE(rxq, defer);	/* Rx deferred processing */
463 	WM_Q_EVCNT_DEFINE(rxq, ipsum);	/* IP checksums checked */
464 	WM_Q_EVCNT_DEFINE(rxq, tusum);	/* TCP/UDP cksums checked */
465 	WM_Q_EVCNT_DEFINE(rxq, qdrop);	/* Rx queue drop packet */
466 #endif
467 };
468 
469 struct wm_queue {
470 	int wmq_id;			/* index of TX/RX queues */
471 	int wmq_intr_idx;		/* index of MSI-X tables */
472 
473 	uint32_t wmq_itr;		/* interrupt interval per queue. */
474 	bool wmq_set_itr;
475 
476 	struct wm_txqueue wmq_txq;
477 	struct wm_rxqueue wmq_rxq;
478 	char sysctlname[32];		/* Name for sysctl */
479 
480 	bool wmq_txrx_use_workqueue;
481 	bool wmq_wq_enqueued;
482 	struct work wmq_cookie;
483 	void *wmq_si;
484 };
485 
486 struct wm_phyop {
487 	int (*acquire)(struct wm_softc *) __attribute__((warn_unused_result));
488 	void (*release)(struct wm_softc *);
489 	int (*readreg_locked)(device_t, int, int, uint16_t *);
490 	int (*writereg_locked)(device_t, int, int, uint16_t);
491 	int reset_delay_us;
492 	bool no_errprint;
493 };
494 
495 struct wm_nvmop {
496 	int (*acquire)(struct wm_softc *) __attribute__((warn_unused_result));
497 	void (*release)(struct wm_softc *);
498 	int (*read)(struct wm_softc *, int, int, uint16_t *);
499 };
500 
501 /*
502  * Software state per device.
503  */
504 struct wm_softc {
505 	device_t sc_dev;		/* generic device information */
506 	bus_space_tag_t sc_st;		/* bus space tag */
507 	bus_space_handle_t sc_sh;	/* bus space handle */
508 	bus_size_t sc_ss;		/* bus space size */
509 	bus_space_tag_t sc_iot;		/* I/O space tag */
510 	bus_space_handle_t sc_ioh;	/* I/O space handle */
511 	bus_size_t sc_ios;		/* I/O space size */
512 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
513 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
514 	bus_size_t sc_flashs;		/* flash registers space size */
515 	off_t sc_flashreg_offset;	/*
516 					 * offset to flash registers from
517 					 * start of BAR
518 					 */
519 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
520 
521 	struct ethercom sc_ethercom;	/* Ethernet common data */
522 	struct mii_data sc_mii;		/* MII/media information */
523 
524 	pci_chipset_tag_t sc_pc;
525 	pcitag_t sc_pcitag;
526 	int sc_bus_speed;		/* PCI/PCIX bus speed */
527 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
528 
529 	uint16_t sc_pcidevid;		/* PCI device ID */
530 	wm_chip_type sc_type;		/* MAC type */
531 	int sc_rev;			/* MAC revision */
532 	wm_phy_type sc_phytype;		/* PHY type */
533 	uint8_t sc_sfptype;		/* SFP type */
534 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
535 #define	WM_MEDIATYPE_UNKNOWN		0x00
536 #define	WM_MEDIATYPE_FIBER		0x01
537 #define	WM_MEDIATYPE_COPPER		0x02
538 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
539 	int sc_funcid;			/* unit number of the chip (0 to 3) */
540 	u_int sc_flags;			/* flags; see below */
541 	u_short sc_if_flags;		/* last if_flags */
542 	int sc_ec_capenable;		/* last ec_capenable */
543 	int sc_flowflags;		/* 802.3x flow control flags */
544 	uint16_t eee_lp_ability;	/* EEE link partner's ability */
545 	int sc_align_tweak;
546 
547 	void *sc_ihs[WM_MAX_NINTR];	/*
548 					 * interrupt cookie.
549 					 * - legacy and msi use sc_ihs[0] only
550 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
551 					 */
552 	pci_intr_handle_t *sc_intrs;	/*
553 					 * legacy and msi use sc_intrs[0] only
554 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
555 					 */
556 	int sc_nintrs;			/* number of interrupts */
557 
558 	int sc_link_intr_idx;		/* index of MSI-X tables */
559 
560 	callout_t sc_tick_ch;		/* tick callout */
561 	bool sc_core_stopping;
562 
563 	int sc_nvm_ver_major;
564 	int sc_nvm_ver_minor;
565 	int sc_nvm_ver_build;
566 	int sc_nvm_addrbits;		/* NVM address bits */
567 	unsigned int sc_nvm_wordsize;	/* NVM word size */
568 	int sc_ich8_flash_base;
569 	int sc_ich8_flash_bank_size;
570 	int sc_nvm_k1_enabled;
571 
572 	int sc_nqueues;
573 	struct wm_queue *sc_queue;
574 	u_int sc_tx_process_limit;	/* Tx proc. repeat limit in softint */
575 	u_int sc_tx_intr_process_limit;	/* Tx proc. repeat limit in H/W intr */
576 	u_int sc_rx_process_limit;	/* Rx proc. repeat limit in softint */
577 	u_int sc_rx_intr_process_limit;	/* Rx proc. repeat limit in H/W intr */
578 	struct workqueue *sc_queue_wq;
579 	bool sc_txrx_use_workqueue;
580 
581 	int sc_affinity_offset;
582 
583 #ifdef WM_EVENT_COUNTERS
584 	/* Event counters. */
585 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
586 
587 	/* >= WM_T_82542_2_1 */
588 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
589 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
590 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
591 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
592 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
593 
594 	struct evcnt sc_ev_crcerrs;	/* CRC Error */
595 	struct evcnt sc_ev_algnerrc;	/* Alignment Error */
596 	struct evcnt sc_ev_symerrc;	/* Symbol Error */
597 	struct evcnt sc_ev_rxerrc;	/* Receive Error */
598 	struct evcnt sc_ev_mpc;		/* Missed Packets */
599 	struct evcnt sc_ev_scc;		/* Single Collision */
600 	struct evcnt sc_ev_ecol;	/* Excessive Collision */
601 	struct evcnt sc_ev_mcc;		/* Multiple Collision */
602 	struct evcnt sc_ev_latecol;	/* Late Collision */
603 	struct evcnt sc_ev_colc;	/* Collision */
604 	struct evcnt sc_ev_cbtmpc;	/* Circuit Breaker Tx Mng. Packet */
605 	struct evcnt sc_ev_dc;		/* Defer */
606 	struct evcnt sc_ev_tncrs;	/* Tx-No CRS */
607 	struct evcnt sc_ev_sec;		/* Sequence Error */
608 
609 	/* Old */
610 	struct evcnt sc_ev_cexterr;	/* Carrier Extension Error */
611 	/* New */
612 	struct evcnt sc_ev_htdpmc;	/* Host Tx Discarded Pkts by MAC */
613 
614 	struct evcnt sc_ev_rlec;	/* Receive Length Error */
615 	struct evcnt sc_ev_cbrdpc;	/* Circuit Breaker Rx Dropped Packet */
616 	struct evcnt sc_ev_prc64;	/* Packets Rx (64 bytes) */
617 	struct evcnt sc_ev_prc127;	/* Packets Rx (65-127 bytes) */
618 	struct evcnt sc_ev_prc255;	/* Packets Rx (128-255 bytes) */
619 	struct evcnt sc_ev_prc511;	/* Packets Rx (256-511 bytes) */
620 	struct evcnt sc_ev_prc1023;	/* Packets Rx (512-1023 bytes) */
621 	struct evcnt sc_ev_prc1522;	/* Packets Rx (1024-1522 bytes) */
622 	struct evcnt sc_ev_gprc;	/* Good Packets Rx */
623 	struct evcnt sc_ev_bprc;	/* Broadcast Packets Rx */
624 	struct evcnt sc_ev_mprc;	/* Multicast Packets Rx */
625 	struct evcnt sc_ev_gptc;	/* Good Packets Tx */
626 	struct evcnt sc_ev_gorc;	/* Good Octets Rx */
627 	struct evcnt sc_ev_gotc;	/* Good Octets Tx */
628 	struct evcnt sc_ev_rnbc;	/* Rx No Buffers */
629 	struct evcnt sc_ev_ruc;		/* Rx Undersize */
630 	struct evcnt sc_ev_rfc;		/* Rx Fragment */
631 	struct evcnt sc_ev_roc;		/* Rx Oversize */
632 	struct evcnt sc_ev_rjc;		/* Rx Jabber */
633 	struct evcnt sc_ev_mgtprc;	/* Management Packets RX */
634 	struct evcnt sc_ev_mgtpdc;	/* Management Packets Dropped */
635 	struct evcnt sc_ev_mgtptc;	/* Management Packets TX */
636 	struct evcnt sc_ev_tor;		/* Total Octets Rx */
637 	struct evcnt sc_ev_tot;		/* Total Octets Tx */
638 	struct evcnt sc_ev_tpr;		/* Total Packets Rx */
639 	struct evcnt sc_ev_tpt;		/* Total Packets Tx */
640 	struct evcnt sc_ev_ptc64;	/* Packets Tx (64 bytes) */
641 	struct evcnt sc_ev_ptc127;	/* Packets Tx (65-127 bytes) */
642 	struct evcnt sc_ev_ptc255;	/* Packets Tx (128-255 bytes) */
643 	struct evcnt sc_ev_ptc511;	/* Packets Tx (256-511 bytes) */
644 	struct evcnt sc_ev_ptc1023;	/* Packets Tx (512-1023 bytes) */
645 	struct evcnt sc_ev_ptc1522;	/* Packets Tx (1024-1522 Bytes) */
646 	struct evcnt sc_ev_mptc;	/* Multicast Packets Tx */
647 	struct evcnt sc_ev_bptc;	/* Broadcast Packets Tx */
648 	struct evcnt sc_ev_tsctc;	/* TCP Segmentation Context Tx */
649 
650 	/* Old */
651 	struct evcnt sc_ev_tsctfc;	/* TCP Segmentation Context Tx Fail */
652 	/* New */
653 	struct evcnt sc_ev_cbrmpc;	/* Circuit Breaker Rx Mng. Packet */
654 
655 	struct evcnt sc_ev_iac;		/* Interrupt Assertion */
656 
657 	/* Old */
658 	struct evcnt sc_ev_icrxptc;	/* Intr. Cause Rx Pkt Timer Expire */
659 	struct evcnt sc_ev_icrxatc;	/* Intr. Cause Rx Abs Timer Expire */
660 	struct evcnt sc_ev_ictxptc;	/* Intr. Cause Tx Pkt Timer Expire */
661 	struct evcnt sc_ev_ictxatc;	/* Intr. Cause Tx Abs Timer Expire */
662 	struct evcnt sc_ev_ictxqec;	/* Intr. Cause Tx Queue Empty */
663 	struct evcnt sc_ev_ictxqmtc;	/* Intr. Cause Tx Queue Min Thresh */
664 	/*
665 	 * sc_ev_rxdmtc is shared with both "Intr. cause" and
666 	 * non "Intr. cause" register.
667 	 */
668 	struct evcnt sc_ev_rxdmtc;	/* (Intr. Cause) Rx Desc Min Thresh */
669 	struct evcnt sc_ev_icrxoc;	/* Intr. Cause Receiver Overrun */
670 	/* New */
671 	struct evcnt sc_ev_rpthc;	/* Rx Packets To Host */
672 	struct evcnt sc_ev_debug1;	/* Debug Counter 1 */
673 	struct evcnt sc_ev_debug2;	/* Debug Counter 2 */
674 	struct evcnt sc_ev_debug3;	/* Debug Counter 3 */
675 	struct evcnt sc_ev_hgptc;	/* Host Good Packets TX */
676 	struct evcnt sc_ev_debug4;	/* Debug Counter 4 */
677 	struct evcnt sc_ev_htcbdpc;	/* Host Tx Circuit Breaker Drp. Pkts */
678 	struct evcnt sc_ev_hgorc;	/* Host Good Octets Rx */
679 	struct evcnt sc_ev_hgotc;	/* Host Good Octets Tx */
680 	struct evcnt sc_ev_lenerrs;	/* Length Error */
681 	struct evcnt sc_ev_tlpic;	/* EEE Tx LPI */
682 	struct evcnt sc_ev_rlpic;	/* EEE Rx LPI */
683 	struct evcnt sc_ev_b2ogprc;	/* BMC2OS pkts received by host */
684 	struct evcnt sc_ev_o2bspc;	/* OS2BMC pkts transmitted by host */
685 	struct evcnt sc_ev_b2ospc;	/* BMC2OS pkts sent by BMC */
686 	struct evcnt sc_ev_o2bgptc;	/* OS2BMC pkts received by BMC */
687 	struct evcnt sc_ev_scvpc;	/* SerDes/SGMII Code Violation Pkt. */
688 	struct evcnt sc_ev_hrmpc;	/* Header Redirection Missed Packet */
689 #endif /* WM_EVENT_COUNTERS */
690 
691 	struct sysctllog *sc_sysctllog;
692 
693 	/* This variable are used only on the 82547. */
694 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
695 
696 	uint32_t sc_ctrl;		/* prototype CTRL register */
697 #if 0
698 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
699 #endif
700 	uint32_t sc_icr;		/* prototype interrupt bits */
701 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
702 	uint32_t sc_tctl;		/* prototype TCTL register */
703 	uint32_t sc_rctl;		/* prototype RCTL register */
704 	uint32_t sc_txcw;		/* prototype TXCW register */
705 	uint32_t sc_tipg;		/* prototype TIPG register */
706 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
707 	uint32_t sc_pba;		/* prototype PBA register */
708 
709 	int sc_tbi_linkup;		/* TBI link status */
710 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
711 	int sc_tbi_serdes_ticks;	/* tbi ticks */
712 	struct timeval sc_linkup_delay_time; /* delay LINK_STATE_UP */
713 
714 	int sc_mchash_type;		/* multicast filter offset */
715 
716 	krndsource_t rnd_source;	/* random source */
717 
718 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
719 
720 	kmutex_t *sc_core_lock;		/* lock for softc operations */
721 	kmutex_t *sc_ich_phymtx;	/*
722 					 * 82574/82583/ICH/PCH specific PHY
723 					 * mutex. For 82574/82583, the mutex
724 					 * is used for both PHY and NVM.
725 					 */
726 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
727 
728 	struct wm_phyop phy;
729 	struct wm_nvmop nvm;
730 
731 	struct workqueue *sc_reset_wq;
732 	struct work sc_reset_work;
733 	volatile unsigned sc_reset_pending;
734 
735 	bool sc_dying;
736 
737 #ifdef WM_DEBUG
738 	uint32_t sc_debug;
739 	bool sc_trigger_reset;
740 #endif
741 };
742 
743 #define	WM_RXCHAIN_RESET(rxq)						\
744 do {									\
745 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
746 	*(rxq)->rxq_tailp = NULL;					\
747 	(rxq)->rxq_len = 0;						\
748 } while (/*CONSTCOND*/0)
749 
750 #define	WM_RXCHAIN_LINK(rxq, m)						\
751 do {									\
752 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
753 	(rxq)->rxq_tailp = &(m)->m_next;				\
754 } while (/*CONSTCOND*/0)
755 
756 #ifdef WM_EVENT_COUNTERS
757 #ifdef __HAVE_ATOMIC64_LOADSTORE
758 #define	WM_EVCNT_INCR(ev)						\
759 	atomic_store_relaxed(&((ev)->ev_count),				\
760 	    atomic_load_relaxed(&(ev)->ev_count) + 1)
761 #define	WM_EVCNT_STORE(ev, val)						\
762 	atomic_store_relaxed(&((ev)->ev_count), (val))
763 #define	WM_EVCNT_ADD(ev, val)						\
764 	atomic_store_relaxed(&((ev)->ev_count),				\
765 	    atomic_load_relaxed(&(ev)->ev_count) + (val))
766 #else
767 #define	WM_EVCNT_INCR(ev)						\
768 	((ev)->ev_count)++
769 #define	WM_EVCNT_STORE(ev, val)						\
770 	((ev)->ev_count = (val))
771 #define	WM_EVCNT_ADD(ev, val)						\
772 	(ev)->ev_count += (val)
773 #endif
774 
775 #define WM_Q_EVCNT_INCR(qname, evname)			\
776 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
777 #define WM_Q_EVCNT_STORE(qname, evname, val)		\
778 	WM_EVCNT_STORE(&(qname)->qname##_ev_##evname, (val))
779 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
780 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
781 #else /* !WM_EVENT_COUNTERS */
782 #define	WM_EVCNT_INCR(ev)	__nothing
783 #define	WM_EVCNT_STORE(ev, val)	__nothing
784 #define	WM_EVCNT_ADD(ev, val)	__nothing
785 
786 #define WM_Q_EVCNT_INCR(qname, evname)		__nothing
787 #define WM_Q_EVCNT_STORE(qname, evname, val)	__nothing
788 #define WM_Q_EVCNT_ADD(qname, evname, val)	__nothing
789 #endif /* !WM_EVENT_COUNTERS */
790 
791 #define	CSR_READ(sc, reg)						\
792 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
793 #define	CSR_WRITE(sc, reg, val)						\
794 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
795 #define	CSR_WRITE_FLUSH(sc)						\
796 	(void)CSR_READ((sc), WMREG_STATUS)
797 
798 #define ICH8_FLASH_READ32(sc, reg)					\
799 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
800 	    (reg) + sc->sc_flashreg_offset)
801 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
802 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
803 	    (reg) + sc->sc_flashreg_offset, (data))
804 
805 #define ICH8_FLASH_READ16(sc, reg)					\
806 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
807 	    (reg) + sc->sc_flashreg_offset)
808 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
809 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
810 	    (reg) + sc->sc_flashreg_offset, (data))
811 
812 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
813 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
814 
815 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
816 #define	WM_CDTXADDR_HI(txq, x)						\
817 	(sizeof(bus_addr_t) == 8 ?					\
818 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
819 
820 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
821 #define	WM_CDRXADDR_HI(rxq, x)						\
822 	(sizeof(bus_addr_t) == 8 ?					\
823 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
824 
825 /*
826  * Register read/write functions.
827  * Other than CSR_{READ|WRITE}().
828  */
829 #if 0
830 static inline uint32_t wm_io_read(struct wm_softc *, int);
831 #endif
832 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
833 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
834     uint32_t, uint32_t);
835 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
836 
837 /*
838  * Descriptor sync/init functions.
839  */
840 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
841 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
842 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
843 
844 /*
845  * Device driver interface functions and commonly used functions.
846  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
847  */
848 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
849 static int	wm_match(device_t, cfdata_t, void *);
850 static void	wm_attach(device_t, device_t, void *);
851 static int	wm_detach(device_t, int);
852 static bool	wm_suspend(device_t, const pmf_qual_t *);
853 static bool	wm_resume(device_t, const pmf_qual_t *);
854 static bool	wm_watchdog(struct ifnet *);
855 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
856     uint16_t *);
857 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
858     uint16_t *);
859 static void	wm_tick(void *);
860 static int	wm_ifflags_cb(struct ethercom *);
861 static int	wm_ioctl(struct ifnet *, u_long, void *);
862 /* MAC address related */
863 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
864 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
865 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
866 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
867 static int	wm_rar_count(struct wm_softc *);
868 static void	wm_set_filter(struct wm_softc *);
869 /* Reset and init related */
870 static void	wm_set_vlan(struct wm_softc *);
871 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
872 static void	wm_get_auto_rd_done(struct wm_softc *);
873 static void	wm_lan_init_done(struct wm_softc *);
874 static void	wm_get_cfg_done(struct wm_softc *);
875 static int	wm_phy_post_reset(struct wm_softc *);
876 static int	wm_write_smbus_addr(struct wm_softc *);
877 static int	wm_init_lcd_from_nvm(struct wm_softc *);
878 static int	wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
879 static void	wm_initialize_hardware_bits(struct wm_softc *);
880 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
881 static int	wm_reset_phy(struct wm_softc *);
882 static void	wm_flush_desc_rings(struct wm_softc *);
883 static void	wm_reset(struct wm_softc *);
884 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
885 static void	wm_rxdrain(struct wm_rxqueue *);
886 static void	wm_init_rss(struct wm_softc *);
887 static void	wm_adjust_qnum(struct wm_softc *, int);
888 static inline bool	wm_is_using_msix(struct wm_softc *);
889 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
890 static int	wm_softint_establish_queue(struct wm_softc *, int, int);
891 static int	wm_setup_legacy(struct wm_softc *);
892 static int	wm_setup_msix(struct wm_softc *);
893 static int	wm_init(struct ifnet *);
894 static int	wm_init_locked(struct ifnet *);
895 static void	wm_init_sysctls(struct wm_softc *);
896 static void	wm_update_stats(struct wm_softc *);
897 static void	wm_clear_evcnt(struct wm_softc *);
898 static void	wm_unset_stopping_flags(struct wm_softc *);
899 static void	wm_set_stopping_flags(struct wm_softc *);
900 static void	wm_stop(struct ifnet *, int);
901 static void	wm_stop_locked(struct ifnet *, bool, bool);
902 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
903 static void	wm_82547_txfifo_stall(void *);
904 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
905 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
906 /* DMA related */
907 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
908 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
909 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
910 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
911     struct wm_txqueue *);
912 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
913 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
914 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
915     struct wm_rxqueue *);
916 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
917 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
918 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
919 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
920 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
921 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
922 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
923     struct wm_txqueue *);
924 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
925     struct wm_rxqueue *);
926 static int	wm_alloc_txrx_queues(struct wm_softc *);
927 static void	wm_free_txrx_queues(struct wm_softc *);
928 static int	wm_init_txrx_queues(struct wm_softc *);
929 /* Start */
930 static void	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
931     struct wm_txsoft *, uint32_t *, uint8_t *);
932 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
933 static void	wm_start(struct ifnet *);
934 static void	wm_start_locked(struct ifnet *);
935 static int	wm_transmit(struct ifnet *, struct mbuf *);
936 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
937 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
938     bool);
939 static void	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
940     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
941 static void	wm_nq_start(struct ifnet *);
942 static void	wm_nq_start_locked(struct ifnet *);
943 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
944 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
945 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
946     bool);
947 static void	wm_deferred_start_locked(struct wm_txqueue *);
948 static void	wm_handle_queue(void *);
949 static void	wm_handle_queue_work(struct work *, void *);
950 static void	wm_handle_reset_work(struct work *, void *);
951 /* Interrupt */
952 static bool	wm_txeof(struct wm_txqueue *, u_int);
953 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
954 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
955 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
956 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
957 static void	wm_linkintr(struct wm_softc *, uint32_t);
958 static int	wm_intr_legacy(void *);
959 static inline void	wm_txrxintr_disable(struct wm_queue *);
960 static inline void	wm_txrxintr_enable(struct wm_queue *);
961 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
962 static int	wm_txrxintr_msix(void *);
963 static int	wm_linkintr_msix(void *);
964 
965 /*
966  * Media related.
967  * GMII, SGMII, TBI, SERDES and SFP.
968  */
969 /* Common */
970 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
971 /* GMII related */
972 static void	wm_gmii_reset(struct wm_softc *);
973 static void	wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
974 static int	wm_get_phy_id_82575(struct wm_softc *);
975 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
976 static int	wm_gmii_mediachange(struct ifnet *);
977 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
978 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
979 static uint16_t	wm_i82543_mii_recvbits(struct wm_softc *);
980 static int	wm_gmii_i82543_readreg(device_t, int, int, uint16_t *);
981 static int	wm_gmii_i82543_writereg(device_t, int, int, uint16_t);
982 static int	wm_gmii_mdic_readreg(device_t, int, int, uint16_t *);
983 static int	wm_gmii_mdic_writereg(device_t, int, int, uint16_t);
984 static int	wm_gmii_i82544_readreg(device_t, int, int, uint16_t *);
985 static int	wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
986 static int	wm_gmii_i82544_writereg(device_t, int, int, uint16_t);
987 static int	wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
988 static int	wm_gmii_i80003_readreg(device_t, int, int, uint16_t *);
989 static int	wm_gmii_i80003_writereg(device_t, int, int, uint16_t);
990 static int	wm_gmii_bm_readreg(device_t, int, int, uint16_t *);
991 static int	wm_gmii_bm_writereg(device_t, int, int, uint16_t);
992 static int	wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
993 static int	wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
994 static int	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int,
995 	bool);
996 static int	wm_gmii_hv_readreg(device_t, int, int, uint16_t *);
997 static int	wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
998 static int	wm_gmii_hv_writereg(device_t, int, int, uint16_t);
999 static int	wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
1000 static int	wm_gmii_82580_readreg(device_t, int, int, uint16_t *);
1001 static int	wm_gmii_82580_writereg(device_t, int, int, uint16_t);
1002 static int	wm_gmii_gs40g_readreg(device_t, int, int, uint16_t *);
1003 static int	wm_gmii_gs40g_writereg(device_t, int, int, uint16_t);
1004 static void	wm_gmii_statchg(struct ifnet *);
1005 /*
1006  * kumeran related (80003, ICH* and PCH*).
1007  * These functions are not for accessing MII registers but for accessing
1008  * kumeran specific registers.
1009  */
1010 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
1011 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
1012 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
1013 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
1014 /* EMI register related */
1015 static int	wm_access_emi_reg_locked(device_t, int, uint16_t *, bool);
1016 static int	wm_read_emi_reg_locked(device_t, int, uint16_t *);
1017 static int	wm_write_emi_reg_locked(device_t, int, uint16_t);
1018 /* SGMII */
1019 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
1020 static void	wm_sgmii_sfp_preconfig(struct wm_softc *);
1021 static int	wm_sgmii_readreg(device_t, int, int, uint16_t *);
1022 static int	wm_sgmii_readreg_locked(device_t, int, int, uint16_t *);
1023 static int	wm_sgmii_writereg(device_t, int, int, uint16_t);
1024 static int	wm_sgmii_writereg_locked(device_t, int, int, uint16_t);
1025 /* TBI related */
1026 static bool	wm_tbi_havesignal(struct wm_softc *, uint32_t);
1027 static void	wm_tbi_mediainit(struct wm_softc *);
1028 static int	wm_tbi_mediachange(struct ifnet *);
1029 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
1030 static int	wm_check_for_link(struct wm_softc *);
1031 static void	wm_tbi_tick(struct wm_softc *);
1032 /* SERDES related */
1033 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
1034 static int	wm_serdes_mediachange(struct ifnet *);
1035 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
1036 static void	wm_serdes_tick(struct wm_softc *);
1037 /* SFP related */
1038 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
1039 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
1040 
1041 /*
1042  * NVM related.
1043  * Microwire, SPI (w/wo EERD) and Flash.
1044  */
1045 /* Misc functions */
1046 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
1047 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
1048 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
1049 /* Microwire */
1050 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
1051 /* SPI */
1052 static int	wm_nvm_ready_spi(struct wm_softc *);
1053 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
1054 /* Using with EERD */
1055 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
1056 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
1057 /* Flash */
1058 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
1059     unsigned int *);
1060 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
1061 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
1062 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
1063     uint32_t *);
1064 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
1065 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
1066 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
1067 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
1068 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
1069 /* iNVM */
1070 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
1071 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
1072 /* Lock, detecting NVM type, validate checksum and read */
1073 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
1074 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
1075 static int	wm_nvm_validate_checksum(struct wm_softc *);
1076 static void	wm_nvm_version_invm(struct wm_softc *);
1077 static void	wm_nvm_version(struct wm_softc *);
1078 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
1079 
1080 /*
1081  * Hardware semaphores.
1082  * Very complexed...
1083  */
1084 static int	wm_get_null(struct wm_softc *);
1085 static void	wm_put_null(struct wm_softc *);
1086 static int	wm_get_eecd(struct wm_softc *);
1087 static void	wm_put_eecd(struct wm_softc *);
1088 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
1089 static void	wm_put_swsm_semaphore(struct wm_softc *);
1090 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
1091 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
1092 static int	wm_get_nvm_80003(struct wm_softc *);
1093 static void	wm_put_nvm_80003(struct wm_softc *);
1094 static int	wm_get_nvm_82571(struct wm_softc *);
1095 static void	wm_put_nvm_82571(struct wm_softc *);
1096 static int	wm_get_phy_82575(struct wm_softc *);
1097 static void	wm_put_phy_82575(struct wm_softc *);
1098 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
1099 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
1100 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
1101 static void	wm_put_swflag_ich8lan(struct wm_softc *);
1102 static int	wm_get_nvm_ich8lan(struct wm_softc *);
1103 static void	wm_put_nvm_ich8lan(struct wm_softc *);
1104 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
1105 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
1106 
1107 /*
1108  * Management mode and power management related subroutines.
1109  * BMC, AMT, suspend/resume and EEE.
1110  */
1111 #if 0
1112 static int	wm_check_mng_mode(struct wm_softc *);
1113 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
1114 static int	wm_check_mng_mode_82574(struct wm_softc *);
1115 static int	wm_check_mng_mode_generic(struct wm_softc *);
1116 #endif
1117 static int	wm_enable_mng_pass_thru(struct wm_softc *);
1118 static bool	wm_phy_resetisblocked(struct wm_softc *);
1119 static void	wm_get_hw_control(struct wm_softc *);
1120 static void	wm_release_hw_control(struct wm_softc *);
1121 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
1122 static int	wm_init_phy_workarounds_pchlan(struct wm_softc *);
1123 static void	wm_init_manageability(struct wm_softc *);
1124 static void	wm_release_manageability(struct wm_softc *);
1125 static void	wm_get_wakeup(struct wm_softc *);
1126 static int	wm_ulp_disable(struct wm_softc *);
1127 static int	wm_enable_phy_wakeup(struct wm_softc *);
1128 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
1129 static void	wm_suspend_workarounds_ich8lan(struct wm_softc *);
1130 static int	wm_resume_workarounds_pchlan(struct wm_softc *);
1131 static void	wm_enable_wakeup(struct wm_softc *);
1132 static void	wm_disable_aspm(struct wm_softc *);
1133 /* LPLU (Low Power Link Up) */
1134 static void	wm_lplu_d0_disable(struct wm_softc *);
1135 /* EEE */
1136 static int	wm_set_eee_i350(struct wm_softc *);
1137 static int	wm_set_eee_pchlan(struct wm_softc *);
1138 static int	wm_set_eee(struct wm_softc *);
1139 
1140 /*
1141  * Workarounds (mainly PHY related).
1142  * Basically, PHY's workarounds are in the PHY drivers.
1143  */
1144 static int	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
1145 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
1146 static int	wm_hv_phy_workarounds_ich8lan(struct wm_softc *);
1147 static void	wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *);
1148 static void	wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *);
1149 static int	wm_lv_jumbo_workaround_ich8lan(struct wm_softc *, bool);
1150 static int	wm_lv_phy_workarounds_ich8lan(struct wm_softc *);
1151 static int	wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
1152 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
1153 static int	wm_k1_workaround_lv(struct wm_softc *);
1154 static int	wm_link_stall_workaround_hv(struct wm_softc *);
1155 static int	wm_set_mdio_slow_mode_hv(struct wm_softc *);
1156 static int	wm_set_mdio_slow_mode_hv_locked(struct wm_softc *);
1157 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
1158 static void	wm_reset_init_script_82575(struct wm_softc *);
1159 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
1160 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
1161 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
1162 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
1163 static int	wm_pll_workaround_i210(struct wm_softc *);
1164 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
1165 static bool	wm_phy_need_linkdown_discard(struct wm_softc *);
1166 static void	wm_set_linkdown_discard(struct wm_softc *);
1167 static void	wm_clear_linkdown_discard(struct wm_softc *);
1168 
1169 static int	wm_sysctl_tdh_handler(SYSCTLFN_PROTO);
1170 static int	wm_sysctl_tdt_handler(SYSCTLFN_PROTO);
1171 #ifdef WM_DEBUG
1172 static int	wm_sysctl_debug(SYSCTLFN_PROTO);
1173 #endif
1174 
1175 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
1176     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
1177 
1178 /*
1179  * Devices supported by this driver.
1180  */
1181 static const struct wm_product {
1182 	pci_vendor_id_t		wmp_vendor;
1183 	pci_product_id_t	wmp_product;
1184 	const char		*wmp_name;
1185 	wm_chip_type		wmp_type;
1186 	uint32_t		wmp_flags;
1187 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
1188 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
1189 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
1190 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
1191 #define WMP_MEDIATYPE(x)	((x) & 0x03)
1192 } wm_products[] = {
1193 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
1194 	  "Intel i82542 1000BASE-X Ethernet",
1195 	  WM_T_82542_2_1,	WMP_F_FIBER },
1196 
1197 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
1198 	  "Intel i82543GC 1000BASE-X Ethernet",
1199 	  WM_T_82543,		WMP_F_FIBER },
1200 
1201 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
1202 	  "Intel i82543GC 1000BASE-T Ethernet",
1203 	  WM_T_82543,		WMP_F_COPPER },
1204 
1205 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
1206 	  "Intel i82544EI 1000BASE-T Ethernet",
1207 	  WM_T_82544,		WMP_F_COPPER },
1208 
1209 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
1210 	  "Intel i82544EI 1000BASE-X Ethernet",
1211 	  WM_T_82544,		WMP_F_FIBER },
1212 
1213 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
1214 	  "Intel i82544GC 1000BASE-T Ethernet",
1215 	  WM_T_82544,		WMP_F_COPPER },
1216 
1217 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
1218 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
1219 	  WM_T_82544,		WMP_F_COPPER },
1220 
1221 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
1222 	  "Intel i82540EM 1000BASE-T Ethernet",
1223 	  WM_T_82540,		WMP_F_COPPER },
1224 
1225 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
1226 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
1227 	  WM_T_82540,		WMP_F_COPPER },
1228 
1229 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
1230 	  "Intel i82540EP 1000BASE-T Ethernet",
1231 	  WM_T_82540,		WMP_F_COPPER },
1232 
1233 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
1234 	  "Intel i82540EP 1000BASE-T Ethernet",
1235 	  WM_T_82540,		WMP_F_COPPER },
1236 
1237 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
1238 	  "Intel i82540EP 1000BASE-T Ethernet",
1239 	  WM_T_82540,		WMP_F_COPPER },
1240 
1241 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
1242 	  "Intel i82545EM 1000BASE-T Ethernet",
1243 	  WM_T_82545,		WMP_F_COPPER },
1244 
1245 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
1246 	  "Intel i82545GM 1000BASE-T Ethernet",
1247 	  WM_T_82545_3,		WMP_F_COPPER },
1248 
1249 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
1250 	  "Intel i82545GM 1000BASE-X Ethernet",
1251 	  WM_T_82545_3,		WMP_F_FIBER },
1252 
1253 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
1254 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
1255 	  WM_T_82545_3,		WMP_F_SERDES },
1256 
1257 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
1258 	  "Intel i82546EB 1000BASE-T Ethernet",
1259 	  WM_T_82546,		WMP_F_COPPER },
1260 
1261 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
1262 	  "Intel i82546EB 1000BASE-T Ethernet",
1263 	  WM_T_82546,		WMP_F_COPPER },
1264 
1265 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
1266 	  "Intel i82545EM 1000BASE-X Ethernet",
1267 	  WM_T_82545,		WMP_F_FIBER },
1268 
1269 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
1270 	  "Intel i82546EB 1000BASE-X Ethernet",
1271 	  WM_T_82546,		WMP_F_FIBER },
1272 
1273 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
1274 	  "Intel i82546GB 1000BASE-T Ethernet",
1275 	  WM_T_82546_3,		WMP_F_COPPER },
1276 
1277 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
1278 	  "Intel i82546GB 1000BASE-X Ethernet",
1279 	  WM_T_82546_3,		WMP_F_FIBER },
1280 
1281 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
1282 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
1283 	  WM_T_82546_3,		WMP_F_SERDES },
1284 
1285 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
1286 	  "i82546GB quad-port Gigabit Ethernet",
1287 	  WM_T_82546_3,		WMP_F_COPPER },
1288 
1289 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
1290 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
1291 	  WM_T_82546_3,		WMP_F_COPPER },
1292 
1293 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
1294 	  "Intel PRO/1000MT (82546GB)",
1295 	  WM_T_82546_3,		WMP_F_COPPER },
1296 
1297 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
1298 	  "Intel i82541EI 1000BASE-T Ethernet",
1299 	  WM_T_82541,		WMP_F_COPPER },
1300 
1301 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
1302 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
1303 	  WM_T_82541,		WMP_F_COPPER },
1304 
1305 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
1306 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
1307 	  WM_T_82541,		WMP_F_COPPER },
1308 
1309 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
1310 	  "Intel i82541ER 1000BASE-T Ethernet",
1311 	  WM_T_82541_2,		WMP_F_COPPER },
1312 
1313 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
1314 	  "Intel i82541GI 1000BASE-T Ethernet",
1315 	  WM_T_82541_2,		WMP_F_COPPER },
1316 
1317 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
1318 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
1319 	  WM_T_82541_2,		WMP_F_COPPER },
1320 
1321 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
1322 	  "Intel i82541PI 1000BASE-T Ethernet",
1323 	  WM_T_82541_2,		WMP_F_COPPER },
1324 
1325 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
1326 	  "Intel i82547EI 1000BASE-T Ethernet",
1327 	  WM_T_82547,		WMP_F_COPPER },
1328 
1329 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
1330 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
1331 	  WM_T_82547,		WMP_F_COPPER },
1332 
1333 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
1334 	  "Intel i82547GI 1000BASE-T Ethernet",
1335 	  WM_T_82547_2,		WMP_F_COPPER },
1336 
1337 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
1338 	  "Intel PRO/1000 PT (82571EB)",
1339 	  WM_T_82571,		WMP_F_COPPER },
1340 
1341 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
1342 	  "Intel PRO/1000 PF (82571EB)",
1343 	  WM_T_82571,		WMP_F_FIBER },
1344 
1345 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
1346 	  "Intel PRO/1000 PB (82571EB)",
1347 	  WM_T_82571,		WMP_F_SERDES },
1348 
1349 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
1350 	  "Intel PRO/1000 QT (82571EB)",
1351 	  WM_T_82571,		WMP_F_COPPER },
1352 
1353 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
1354 	  "Intel PRO/1000 PT Quad Port Server Adapter",
1355 	  WM_T_82571,		WMP_F_COPPER },
1356 
1357 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
1358 	  "Intel Gigabit PT Quad Port Server ExpressModule",
1359 	  WM_T_82571,		WMP_F_COPPER },
1360 
1361 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
1362 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
1363 	  WM_T_82571,		WMP_F_SERDES },
1364 
1365 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
1366 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
1367 	  WM_T_82571,		WMP_F_SERDES },
1368 
1369 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
1370 	  "Intel 82571EB Quad 1000baseX Ethernet",
1371 	  WM_T_82571,		WMP_F_FIBER },
1372 
1373 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
1374 	  "Intel i82572EI 1000baseT Ethernet",
1375 	  WM_T_82572,		WMP_F_COPPER },
1376 
1377 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
1378 	  "Intel i82572EI 1000baseX Ethernet",
1379 	  WM_T_82572,		WMP_F_FIBER },
1380 
1381 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
1382 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
1383 	  WM_T_82572,		WMP_F_SERDES },
1384 
1385 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
1386 	  "Intel i82572EI 1000baseT Ethernet",
1387 	  WM_T_82572,		WMP_F_COPPER },
1388 
1389 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
1390 	  "Intel i82573E",
1391 	  WM_T_82573,		WMP_F_COPPER },
1392 
1393 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
1394 	  "Intel i82573E IAMT",
1395 	  WM_T_82573,		WMP_F_COPPER },
1396 
1397 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
1398 	  "Intel i82573L Gigabit Ethernet",
1399 	  WM_T_82573,		WMP_F_COPPER },
1400 
1401 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
1402 	  "Intel i82574L",
1403 	  WM_T_82574,		WMP_F_COPPER },
1404 
1405 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
1406 	  "Intel i82574L",
1407 	  WM_T_82574,		WMP_F_COPPER },
1408 
1409 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
1410 	  "Intel i82583V",
1411 	  WM_T_82583,		WMP_F_COPPER },
1412 
1413 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
1414 	  "i80003 dual 1000baseT Ethernet",
1415 	  WM_T_80003,		WMP_F_COPPER },
1416 
1417 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
1418 	  "i80003 dual 1000baseX Ethernet",
1419 	  WM_T_80003,		WMP_F_COPPER },
1420 
1421 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
1422 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
1423 	  WM_T_80003,		WMP_F_SERDES },
1424 
1425 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
1426 	  "Intel i80003 1000baseT Ethernet",
1427 	  WM_T_80003,		WMP_F_COPPER },
1428 
1429 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
1430 	  "Intel i80003 Gigabit Ethernet (SERDES)",
1431 	  WM_T_80003,		WMP_F_SERDES },
1432 
1433 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
1434 	  "Intel i82801H (M_AMT) LAN Controller",
1435 	  WM_T_ICH8,		WMP_F_COPPER },
1436 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
1437 	  "Intel i82801H (AMT) LAN Controller",
1438 	  WM_T_ICH8,		WMP_F_COPPER },
1439 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
1440 	  "Intel i82801H LAN Controller",
1441 	  WM_T_ICH8,		WMP_F_COPPER },
1442 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
1443 	  "Intel i82801H (IFE) 10/100 LAN Controller",
1444 	  WM_T_ICH8,		WMP_F_COPPER },
1445 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
1446 	  "Intel i82801H (M) LAN Controller",
1447 	  WM_T_ICH8,		WMP_F_COPPER },
1448 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
1449 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
1450 	  WM_T_ICH8,		WMP_F_COPPER },
1451 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
1452 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
1453 	  WM_T_ICH8,		WMP_F_COPPER },
1454 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
1455 	  "82567V-3 LAN Controller",
1456 	  WM_T_ICH8,		WMP_F_COPPER },
1457 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
1458 	  "82801I (AMT) LAN Controller",
1459 	  WM_T_ICH9,		WMP_F_COPPER },
1460 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
1461 	  "82801I 10/100 LAN Controller",
1462 	  WM_T_ICH9,		WMP_F_COPPER },
1463 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
1464 	  "82801I (G) 10/100 LAN Controller",
1465 	  WM_T_ICH9,		WMP_F_COPPER },
1466 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
1467 	  "82801I (GT) 10/100 LAN Controller",
1468 	  WM_T_ICH9,		WMP_F_COPPER },
1469 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
1470 	  "82801I (C) LAN Controller",
1471 	  WM_T_ICH9,		WMP_F_COPPER },
1472 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
1473 	  "82801I mobile LAN Controller",
1474 	  WM_T_ICH9,		WMP_F_COPPER },
1475 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
1476 	  "82801I mobile (V) LAN Controller",
1477 	  WM_T_ICH9,		WMP_F_COPPER },
1478 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
1479 	  "82801I mobile (AMT) LAN Controller",
1480 	  WM_T_ICH9,		WMP_F_COPPER },
1481 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
1482 	  "82567LM-4 LAN Controller",
1483 	  WM_T_ICH9,		WMP_F_COPPER },
1484 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
1485 	  "82567LM-2 LAN Controller",
1486 	  WM_T_ICH10,		WMP_F_COPPER },
1487 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
1488 	  "82567LF-2 LAN Controller",
1489 	  WM_T_ICH10,		WMP_F_COPPER },
1490 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
1491 	  "82567LM-3 LAN Controller",
1492 	  WM_T_ICH10,		WMP_F_COPPER },
1493 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
1494 	  "82567LF-3 LAN Controller",
1495 	  WM_T_ICH10,		WMP_F_COPPER },
1496 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
1497 	  "82567V-2 LAN Controller",
1498 	  WM_T_ICH10,		WMP_F_COPPER },
1499 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
1500 	  "82567V-3? LAN Controller",
1501 	  WM_T_ICH10,		WMP_F_COPPER },
1502 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
1503 	  "HANKSVILLE LAN Controller",
1504 	  WM_T_ICH10,		WMP_F_COPPER },
1505 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
1506 	  "PCH LAN (82577LM) Controller",
1507 	  WM_T_PCH,		WMP_F_COPPER },
1508 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
1509 	  "PCH LAN (82577LC) Controller",
1510 	  WM_T_PCH,		WMP_F_COPPER },
1511 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
1512 	  "PCH LAN (82578DM) Controller",
1513 	  WM_T_PCH,		WMP_F_COPPER },
1514 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
1515 	  "PCH LAN (82578DC) Controller",
1516 	  WM_T_PCH,		WMP_F_COPPER },
1517 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
1518 	  "PCH2 LAN (82579LM) Controller",
1519 	  WM_T_PCH2,		WMP_F_COPPER },
1520 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
1521 	  "PCH2 LAN (82579V) Controller",
1522 	  WM_T_PCH2,		WMP_F_COPPER },
1523 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
1524 	  "82575EB dual-1000baseT Ethernet",
1525 	  WM_T_82575,		WMP_F_COPPER },
1526 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
1527 	  "82575EB dual-1000baseX Ethernet (SERDES)",
1528 	  WM_T_82575,		WMP_F_SERDES },
1529 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
1530 	  "82575GB quad-1000baseT Ethernet",
1531 	  WM_T_82575,		WMP_F_COPPER },
1532 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
1533 	  "82575GB quad-1000baseT Ethernet (PM)",
1534 	  WM_T_82575,		WMP_F_COPPER },
1535 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
1536 	  "82576 1000BaseT Ethernet",
1537 	  WM_T_82576,		WMP_F_COPPER },
1538 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
1539 	  "82576 1000BaseX Ethernet",
1540 	  WM_T_82576,		WMP_F_FIBER },
1541 
1542 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
1543 	  "82576 gigabit Ethernet (SERDES)",
1544 	  WM_T_82576,		WMP_F_SERDES },
1545 
1546 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
1547 	  "82576 quad-1000BaseT Ethernet",
1548 	  WM_T_82576,		WMP_F_COPPER },
1549 
1550 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
1551 	  "82576 Gigabit ET2 Quad Port Server Adapter",
1552 	  WM_T_82576,		WMP_F_COPPER },
1553 
1554 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
1555 	  "82576 gigabit Ethernet",
1556 	  WM_T_82576,		WMP_F_COPPER },
1557 
1558 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
1559 	  "82576 gigabit Ethernet (SERDES)",
1560 	  WM_T_82576,		WMP_F_SERDES },
1561 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
1562 	  "82576 quad-gigabit Ethernet (SERDES)",
1563 	  WM_T_82576,		WMP_F_SERDES },
1564 
1565 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
1566 	  "82580 1000BaseT Ethernet",
1567 	  WM_T_82580,		WMP_F_COPPER },
1568 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
1569 	  "82580 1000BaseX Ethernet",
1570 	  WM_T_82580,		WMP_F_FIBER },
1571 
1572 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
1573 	  "82580 1000BaseT Ethernet (SERDES)",
1574 	  WM_T_82580,		WMP_F_SERDES },
1575 
1576 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
1577 	  "82580 gigabit Ethernet (SGMII)",
1578 	  WM_T_82580,		WMP_F_COPPER },
1579 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
1580 	  "82580 dual-1000BaseT Ethernet",
1581 	  WM_T_82580,		WMP_F_COPPER },
1582 
1583 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1584 	  "82580 quad-1000BaseX Ethernet",
1585 	  WM_T_82580,		WMP_F_FIBER },
1586 
1587 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
1588 	  "DH89XXCC Gigabit Ethernet (SGMII)",
1589 	  WM_T_82580,		WMP_F_COPPER },
1590 
1591 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
1592 	  "DH89XXCC Gigabit Ethernet (SERDES)",
1593 	  WM_T_82580,		WMP_F_SERDES },
1594 
1595 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
1596 	  "DH89XXCC 1000BASE-KX Ethernet",
1597 	  WM_T_82580,		WMP_F_SERDES },
1598 
1599 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
1600 	  "DH89XXCC Gigabit Ethernet (SFP)",
1601 	  WM_T_82580,		WMP_F_SERDES },
1602 
1603 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
1604 	  "I350 Gigabit Network Connection",
1605 	  WM_T_I350,		WMP_F_COPPER },
1606 
1607 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
1608 	  "I350 Gigabit Fiber Network Connection",
1609 	  WM_T_I350,		WMP_F_FIBER },
1610 
1611 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
1612 	  "I350 Gigabit Backplane Connection",
1613 	  WM_T_I350,		WMP_F_SERDES },
1614 
1615 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
1616 	  "I350 Quad Port Gigabit Ethernet",
1617 	  WM_T_I350,		WMP_F_SERDES },
1618 
1619 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
1620 	  "I350 Gigabit Connection",
1621 	  WM_T_I350,		WMP_F_COPPER },
1622 
1623 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
1624 	  "I354 Gigabit Ethernet (KX)",
1625 	  WM_T_I354,		WMP_F_SERDES },
1626 
1627 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
1628 	  "I354 Gigabit Ethernet (SGMII)",
1629 	  WM_T_I354,		WMP_F_COPPER },
1630 
1631 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
1632 	  "I354 Gigabit Ethernet (2.5G)",
1633 	  WM_T_I354,		WMP_F_COPPER },
1634 
1635 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
1636 	  "I210-T1 Ethernet Server Adapter",
1637 	  WM_T_I210,		WMP_F_COPPER },
1638 
1639 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
1640 	  "I210 Ethernet (Copper OEM)",
1641 	  WM_T_I210,		WMP_F_COPPER },
1642 
1643 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
1644 	  "I210 Ethernet (Copper IT)",
1645 	  WM_T_I210,		WMP_F_COPPER },
1646 
1647 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
1648 	  "I210 Ethernet (Copper, FLASH less)",
1649 	  WM_T_I210,		WMP_F_COPPER },
1650 
1651 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
1652 	  "I210 Gigabit Ethernet (Fiber)",
1653 	  WM_T_I210,		WMP_F_FIBER },
1654 
1655 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
1656 	  "I210 Gigabit Ethernet (SERDES)",
1657 	  WM_T_I210,		WMP_F_SERDES },
1658 
1659 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
1660 	  "I210 Gigabit Ethernet (SERDES, FLASH less)",
1661 	  WM_T_I210,		WMP_F_SERDES },
1662 
1663 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
1664 	  "I210 Gigabit Ethernet (SGMII)",
1665 	  WM_T_I210,		WMP_F_COPPER },
1666 
1667 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII_WOF,
1668 	  "I210 Gigabit Ethernet (SGMII, FLASH less)",
1669 	  WM_T_I210,		WMP_F_COPPER },
1670 
1671 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
1672 	  "I211 Ethernet (COPPER)",
1673 	  WM_T_I211,		WMP_F_COPPER },
1674 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
1675 	  "I217 V Ethernet Connection",
1676 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1677 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
1678 	  "I217 LM Ethernet Connection",
1679 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1680 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
1681 	  "I218 V Ethernet Connection",
1682 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1683 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
1684 	  "I218 V Ethernet Connection",
1685 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1686 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
1687 	  "I218 V Ethernet Connection",
1688 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1689 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
1690 	  "I218 LM Ethernet Connection",
1691 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1692 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
1693 	  "I218 LM Ethernet Connection",
1694 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1695 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
1696 	  "I218 LM Ethernet Connection",
1697 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1698 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
1699 	  "I219 LM Ethernet Connection",
1700 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1701 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
1702 	  "I219 LM (2) Ethernet Connection",
1703 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1704 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
1705 	  "I219 LM (3) Ethernet Connection",
1706 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1707 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
1708 	  "I219 LM (4) Ethernet Connection",
1709 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1710 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
1711 	  "I219 LM (5) Ethernet Connection",
1712 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1713 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM6,
1714 	  "I219 LM (6) Ethernet Connection",
1715 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1716 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM7,
1717 	  "I219 LM (7) Ethernet Connection",
1718 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1719 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM8,
1720 	  "I219 LM (8) Ethernet Connection",
1721 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1722 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM9,
1723 	  "I219 LM (9) Ethernet Connection",
1724 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1725 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM10,
1726 	  "I219 LM (10) Ethernet Connection",
1727 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1728 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM11,
1729 	  "I219 LM (11) Ethernet Connection",
1730 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1731 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM12,
1732 	  "I219 LM (12) Ethernet Connection",
1733 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1734 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM13,
1735 	  "I219 LM (13) Ethernet Connection",
1736 	  WM_T_PCH_TGP,		WMP_F_COPPER },
1737 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM14,
1738 	  "I219 LM (14) Ethernet Connection",
1739 	  WM_T_PCH_TGP,		WMP_F_COPPER },
1740 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM15,
1741 	  "I219 LM (15) Ethernet Connection",
1742 	  WM_T_PCH_TGP,		WMP_F_COPPER },
1743 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM16,
1744 	  "I219 LM (16) Ethernet Connection",
1745 	  WM_T_PCH_TGP,		WMP_F_COPPER }, /* ADP */
1746 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM17,
1747 	  "I219 LM (17) Ethernet Connection",
1748 	  WM_T_PCH_TGP,		WMP_F_COPPER }, /* ADP */
1749 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM18,
1750 	  "I219 LM (18) Ethernet Connection",
1751 	  WM_T_PCH_TGP,		WMP_F_COPPER }, /* MTP */
1752 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM19,
1753 	  "I219 LM (19) Ethernet Connection",
1754 	  WM_T_PCH_TGP,		WMP_F_COPPER }, /* MTP */
1755 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM20,
1756 	  "I219 LM (20) Ethernet Connection",
1757 	  WM_T_PCH_TGP,		WMP_F_COPPER }, /* MTP */
1758 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM21,
1759 	  "I219 LM (21) Ethernet Connection",
1760 	  WM_T_PCH_TGP,		WMP_F_COPPER }, /* MTP */
1761 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM22,
1762 	  "I219 LM (22) Ethernet Connection",
1763 	  WM_T_PCH_TGP,		WMP_F_COPPER }, /* ADP(RPL) */
1764 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM23,
1765 	  "I219 LM (23) Ethernet Connection",
1766 	  WM_T_PCH_TGP,		WMP_F_COPPER }, /* ADP(RPL) */
1767 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
1768 	  "I219 V Ethernet Connection",
1769 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1770 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
1771 	  "I219 V (2) Ethernet Connection",
1772 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1773 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
1774 	  "I219 V (4) Ethernet Connection",
1775 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1776 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
1777 	  "I219 V (5) Ethernet Connection",
1778 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1779 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V6,
1780 	  "I219 V (6) Ethernet Connection",
1781 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1782 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V7,
1783 	  "I219 V (7) Ethernet Connection",
1784 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1785 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V8,
1786 	  "I219 V (8) Ethernet Connection",
1787 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1788 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V9,
1789 	  "I219 V (9) Ethernet Connection",
1790 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1791 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V10,
1792 	  "I219 V (10) Ethernet Connection",
1793 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1794 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V11,
1795 	  "I219 V (11) Ethernet Connection",
1796 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1797 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V12,
1798 	  "I219 V (12) Ethernet Connection",
1799 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1800 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V13,
1801 	  "I219 V (13) Ethernet Connection",
1802 	  WM_T_PCH_TGP,		WMP_F_COPPER },
1803 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V14,
1804 	  "I219 V (14) Ethernet Connection",
1805 	  WM_T_PCH_TGP,		WMP_F_COPPER },
1806 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V15,
1807 	  "I219 V (15) Ethernet Connection",
1808 	  WM_T_PCH_TGP,		WMP_F_COPPER },
1809 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V16,
1810 	  "I219 V (16) Ethernet Connection",
1811 	  WM_T_PCH_TGP,		WMP_F_COPPER }, /* ADP */
1812 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V17,
1813 	  "I219 V (17) Ethernet Connection",
1814 	  WM_T_PCH_TGP,		WMP_F_COPPER }, /* ADP */
1815 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V18,
1816 	  "I219 V (18) Ethernet Connection",
1817 	  WM_T_PCH_TGP,		WMP_F_COPPER }, /* MTP */
1818 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V19,
1819 	  "I219 V (19) Ethernet Connection",
1820 	  WM_T_PCH_TGP,		WMP_F_COPPER }, /* MTP */
1821 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V20,
1822 	  "I219 V (20) Ethernet Connection",
1823 	  WM_T_PCH_TGP,		WMP_F_COPPER }, /* MTP */
1824 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V21,
1825 	  "I219 V (21) Ethernet Connection",
1826 	  WM_T_PCH_TGP,		WMP_F_COPPER }, /* MTP */
1827 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V22,
1828 	  "I219 V (22) Ethernet Connection",
1829 	  WM_T_PCH_TGP,		WMP_F_COPPER }, /* ADP(RPL) */
1830 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V23,
1831 	  "I219 V (23) Ethernet Connection",
1832 	  WM_T_PCH_TGP,		WMP_F_COPPER }, /* ADP(RPL) */
1833 	{ 0,			0,
1834 	  NULL,
1835 	  0,			0 },
1836 };
1837 
1838 /*
1839  * Register read/write functions.
1840  * Other than CSR_{READ|WRITE}().
1841  */
1842 
1843 #if 0 /* Not currently used */
1844 static inline uint32_t
1845 wm_io_read(struct wm_softc *sc, int reg)
1846 {
1847 
1848 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1849 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1850 }
1851 #endif
1852 
1853 static inline void
1854 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1855 {
1856 
1857 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1858 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1859 }
1860 
1861 static inline void
1862 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1863     uint32_t data)
1864 {
1865 	uint32_t regval;
1866 	int i;
1867 
1868 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1869 
1870 	CSR_WRITE(sc, reg, regval);
1871 
1872 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1873 		delay(5);
1874 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1875 			break;
1876 	}
1877 	if (i == SCTL_CTL_POLL_TIMEOUT) {
1878 		aprint_error("%s: WARNING:"
1879 		    " i82575 reg 0x%08x setup did not indicate ready\n",
1880 		    device_xname(sc->sc_dev), reg);
1881 	}
1882 }
1883 
1884 static inline void
1885 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1886 {
1887 	wa->wa_low = htole32(BUS_ADDR_LO32(v));
1888 	wa->wa_high = htole32(BUS_ADDR_HI32(v));
1889 }
1890 
1891 /*
1892  * Descriptor sync/init functions.
1893  */
1894 static inline void
1895 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
1896 {
1897 	struct wm_softc *sc = txq->txq_sc;
1898 
1899 	/* If it will wrap around, sync to the end of the ring. */
1900 	if ((start + num) > WM_NTXDESC(txq)) {
1901 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1902 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
1903 		    (WM_NTXDESC(txq) - start), ops);
1904 		num -= (WM_NTXDESC(txq) - start);
1905 		start = 0;
1906 	}
1907 
1908 	/* Now sync whatever is left. */
1909 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1910 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
1911 }
1912 
1913 static inline void
1914 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
1915 {
1916 	struct wm_softc *sc = rxq->rxq_sc;
1917 
1918 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
1919 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
1920 }
1921 
1922 static inline void
1923 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
1924 {
1925 	struct wm_softc *sc = rxq->rxq_sc;
1926 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
1927 	struct mbuf *m = rxs->rxs_mbuf;
1928 
1929 	/*
1930 	 * Note: We scoot the packet forward 2 bytes in the buffer
1931 	 * so that the payload after the Ethernet header is aligned
1932 	 * to a 4-byte boundary.
1933 
1934 	 * XXX BRAINDAMAGE ALERT!
1935 	 * The stupid chip uses the same size for every buffer, which
1936 	 * is set in the Receive Control register.  We are using the 2K
1937 	 * size option, but what we REALLY want is (2K - 2)!  For this
1938 	 * reason, we can't "scoot" packets longer than the standard
1939 	 * Ethernet MTU.  On strict-alignment platforms, if the total
1940 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
1941 	 * the upper layer copy the headers.
1942 	 */
1943 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
1944 
1945 	if (sc->sc_type == WM_T_82574) {
1946 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
1947 		rxd->erx_data.erxd_addr =
1948 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1949 		rxd->erx_data.erxd_dd = 0;
1950 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
1951 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
1952 
1953 		rxd->nqrx_data.nrxd_paddr =
1954 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1955 		/* Currently, split header is not supported. */
1956 		rxd->nqrx_data.nrxd_haddr = 0;
1957 	} else {
1958 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
1959 
1960 		wm_set_dma_addr(&rxd->wrx_addr,
1961 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1962 		rxd->wrx_len = 0;
1963 		rxd->wrx_cksum = 0;
1964 		rxd->wrx_status = 0;
1965 		rxd->wrx_errors = 0;
1966 		rxd->wrx_special = 0;
1967 	}
1968 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1969 
1970 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
1971 }
1972 
1973 /*
1974  * Device driver interface functions and commonly used functions.
1975  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
1976  */
1977 
1978 /* Lookup supported device table */
1979 static const struct wm_product *
1980 wm_lookup(const struct pci_attach_args *pa)
1981 {
1982 	const struct wm_product *wmp;
1983 
1984 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1985 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1986 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1987 			return wmp;
1988 	}
1989 	return NULL;
1990 }
1991 
1992 /* The match function (ca_match) */
1993 static int
1994 wm_match(device_t parent, cfdata_t cf, void *aux)
1995 {
1996 	struct pci_attach_args *pa = aux;
1997 
1998 	if (wm_lookup(pa) != NULL)
1999 		return 1;
2000 
2001 	return 0;
2002 }
2003 
2004 /* The attach function (ca_attach) */
2005 static void
2006 wm_attach(device_t parent, device_t self, void *aux)
2007 {
2008 	struct wm_softc *sc = device_private(self);
2009 	struct pci_attach_args *pa = aux;
2010 	prop_dictionary_t dict;
2011 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2012 	pci_chipset_tag_t pc = pa->pa_pc;
2013 	int counts[PCI_INTR_TYPE_SIZE];
2014 	pci_intr_type_t max_type;
2015 	const char *eetype, *xname;
2016 	bus_space_tag_t memt;
2017 	bus_space_handle_t memh;
2018 	bus_size_t memsize;
2019 	int memh_valid;
2020 	int i, error;
2021 	const struct wm_product *wmp;
2022 	prop_data_t ea;
2023 	prop_number_t pn;
2024 	uint8_t enaddr[ETHER_ADDR_LEN];
2025 	char buf[256];
2026 	char wqname[MAXCOMLEN];
2027 	uint16_t cfg1, cfg2, swdpin, nvmword;
2028 	pcireg_t preg, memtype;
2029 	uint16_t eeprom_data, apme_mask;
2030 	bool force_clear_smbi;
2031 	uint32_t link_mode;
2032 	uint32_t reg;
2033 
2034 #if defined(WM_DEBUG) && defined(WM_DEBUG_DEFAULT)
2035 	sc->sc_debug = WM_DEBUG_DEFAULT;
2036 #endif
2037 	sc->sc_dev = self;
2038 	callout_init(&sc->sc_tick_ch, CALLOUT_MPSAFE);
2039 	callout_setfunc(&sc->sc_tick_ch, wm_tick, sc);
2040 	sc->sc_core_stopping = false;
2041 
2042 	wmp = wm_lookup(pa);
2043 #ifdef DIAGNOSTIC
2044 	if (wmp == NULL) {
2045 		printf("\n");
2046 		panic("wm_attach: impossible");
2047 	}
2048 #endif
2049 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
2050 
2051 	sc->sc_pc = pa->pa_pc;
2052 	sc->sc_pcitag = pa->pa_tag;
2053 
2054 	if (pci_dma64_available(pa)) {
2055 		aprint_verbose(", 64-bit DMA");
2056 		sc->sc_dmat = pa->pa_dmat64;
2057 	} else {
2058 		aprint_verbose(", 32-bit DMA");
2059 		sc->sc_dmat = pa->pa_dmat;
2060 	}
2061 
2062 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
2063 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
2064 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
2065 
2066 	sc->sc_type = wmp->wmp_type;
2067 
2068 	/* Set default function pointers */
2069 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
2070 	sc->phy.release = sc->nvm.release = wm_put_null;
2071 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
2072 
2073 	if (sc->sc_type < WM_T_82543) {
2074 		if (sc->sc_rev < 2) {
2075 			aprint_error_dev(sc->sc_dev,
2076 			    "i82542 must be at least rev. 2\n");
2077 			return;
2078 		}
2079 		if (sc->sc_rev < 3)
2080 			sc->sc_type = WM_T_82542_2_0;
2081 	}
2082 
2083 	/*
2084 	 * Disable MSI for Errata:
2085 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
2086 	 *
2087 	 *  82544: Errata 25
2088 	 *  82540: Errata  6 (easy to reproduce device timeout)
2089 	 *  82545: Errata  4 (easy to reproduce device timeout)
2090 	 *  82546: Errata 26 (easy to reproduce device timeout)
2091 	 *  82541: Errata  7 (easy to reproduce device timeout)
2092 	 *
2093 	 * "Byte Enables 2 and 3 are not set on MSI writes"
2094 	 *
2095 	 *  82571 & 82572: Errata 63
2096 	 */
2097 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
2098 	    || (sc->sc_type == WM_T_82572))
2099 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
2100 
2101 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
2102 	    || (sc->sc_type == WM_T_82580)
2103 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
2104 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
2105 		sc->sc_flags |= WM_F_NEWQUEUE;
2106 
2107 	/* Set device properties (mactype) */
2108 	dict = device_properties(sc->sc_dev);
2109 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
2110 
2111 	/*
2112 	 * Map the device.  All devices support memory-mapped acccess,
2113 	 * and it is really required for normal operation.
2114 	 */
2115 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
2116 	switch (memtype) {
2117 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
2118 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
2119 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
2120 			memtype, 0, &memt, &memh, NULL, &memsize) == 0);
2121 		break;
2122 	default:
2123 		memh_valid = 0;
2124 		break;
2125 	}
2126 
2127 	if (memh_valid) {
2128 		sc->sc_st = memt;
2129 		sc->sc_sh = memh;
2130 		sc->sc_ss = memsize;
2131 	} else {
2132 		aprint_error_dev(sc->sc_dev,
2133 		    "unable to map device registers\n");
2134 		return;
2135 	}
2136 
2137 	/*
2138 	 * In addition, i82544 and later support I/O mapped indirect
2139 	 * register access.  It is not desirable (nor supported in
2140 	 * this driver) to use it for normal operation, though it is
2141 	 * required to work around bugs in some chip versions.
2142 	 */
2143 	switch (sc->sc_type) {
2144 	case WM_T_82544:
2145 	case WM_T_82541:
2146 	case WM_T_82541_2:
2147 	case WM_T_82547:
2148 	case WM_T_82547_2:
2149 		/* First we have to find the I/O BAR. */
2150 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
2151 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
2152 			if (memtype == PCI_MAPREG_TYPE_IO)
2153 				break;
2154 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
2155 			    PCI_MAPREG_MEM_TYPE_64BIT)
2156 				i += 4;	/* skip high bits, too */
2157 		}
2158 		if (i < PCI_MAPREG_END) {
2159 			/*
2160 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
2161 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
2162 			 * It's no problem because newer chips has no this
2163 			 * bug.
2164 			 *
2165 			 * The i8254x doesn't apparently respond when the
2166 			 * I/O BAR is 0, which looks somewhat like it's not
2167 			 * been configured.
2168 			 */
2169 			preg = pci_conf_read(pc, pa->pa_tag, i);
2170 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
2171 				aprint_error_dev(sc->sc_dev,
2172 				    "WARNING: I/O BAR at zero.\n");
2173 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
2174 			    0, &sc->sc_iot, &sc->sc_ioh, NULL, &sc->sc_ios)
2175 			    == 0) {
2176 				sc->sc_flags |= WM_F_IOH_VALID;
2177 			} else
2178 				aprint_error_dev(sc->sc_dev,
2179 				    "WARNING: unable to map I/O space\n");
2180 		}
2181 		break;
2182 	default:
2183 		break;
2184 	}
2185 
2186 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
2187 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
2188 	preg |= PCI_COMMAND_MASTER_ENABLE;
2189 	if (sc->sc_type < WM_T_82542_2_1)
2190 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
2191 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
2192 
2193 	/* Power up chip */
2194 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
2195 	    && error != EOPNOTSUPP) {
2196 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
2197 		return;
2198 	}
2199 
2200 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
2201 	/*
2202 	 *  Don't use MSI-X if we can use only one queue to save interrupt
2203 	 * resource.
2204 	 */
2205 	if (sc->sc_nqueues > 1) {
2206 		max_type = PCI_INTR_TYPE_MSIX;
2207 		/*
2208 		 *  82583 has a MSI-X capability in the PCI configuration space
2209 		 * but it doesn't support it. At least the document doesn't
2210 		 * say anything about MSI-X.
2211 		 */
2212 		counts[PCI_INTR_TYPE_MSIX]
2213 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
2214 	} else {
2215 		max_type = PCI_INTR_TYPE_MSI;
2216 		counts[PCI_INTR_TYPE_MSIX] = 0;
2217 	}
2218 
2219 	/* Allocation settings */
2220 	counts[PCI_INTR_TYPE_MSI] = 1;
2221 	counts[PCI_INTR_TYPE_INTX] = 1;
2222 	/* overridden by disable flags */
2223 	if (wm_disable_msi != 0) {
2224 		counts[PCI_INTR_TYPE_MSI] = 0;
2225 		if (wm_disable_msix != 0) {
2226 			max_type = PCI_INTR_TYPE_INTX;
2227 			counts[PCI_INTR_TYPE_MSIX] = 0;
2228 		}
2229 	} else if (wm_disable_msix != 0) {
2230 		max_type = PCI_INTR_TYPE_MSI;
2231 		counts[PCI_INTR_TYPE_MSIX] = 0;
2232 	}
2233 
2234 alloc_retry:
2235 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
2236 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
2237 		return;
2238 	}
2239 
2240 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
2241 		error = wm_setup_msix(sc);
2242 		if (error) {
2243 			pci_intr_release(pc, sc->sc_intrs,
2244 			    counts[PCI_INTR_TYPE_MSIX]);
2245 
2246 			/* Setup for MSI: Disable MSI-X */
2247 			max_type = PCI_INTR_TYPE_MSI;
2248 			counts[PCI_INTR_TYPE_MSI] = 1;
2249 			counts[PCI_INTR_TYPE_INTX] = 1;
2250 			goto alloc_retry;
2251 		}
2252 	} else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
2253 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
2254 		error = wm_setup_legacy(sc);
2255 		if (error) {
2256 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
2257 			    counts[PCI_INTR_TYPE_MSI]);
2258 
2259 			/* The next try is for INTx: Disable MSI */
2260 			max_type = PCI_INTR_TYPE_INTX;
2261 			counts[PCI_INTR_TYPE_INTX] = 1;
2262 			goto alloc_retry;
2263 		}
2264 	} else {
2265 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
2266 		error = wm_setup_legacy(sc);
2267 		if (error) {
2268 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
2269 			    counts[PCI_INTR_TYPE_INTX]);
2270 			return;
2271 		}
2272 	}
2273 
2274 	snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(sc->sc_dev));
2275 	error = workqueue_create(&sc->sc_queue_wq, wqname,
2276 	    wm_handle_queue_work, sc, WM_WORKQUEUE_PRI, IPL_NET,
2277 	    WQ_PERCPU | WQ_MPSAFE);
2278 	if (error) {
2279 		aprint_error_dev(sc->sc_dev,
2280 		    "unable to create TxRx workqueue\n");
2281 		goto out;
2282 	}
2283 
2284 	snprintf(wqname, sizeof(wqname), "%sReset", device_xname(sc->sc_dev));
2285 	error = workqueue_create(&sc->sc_reset_wq, wqname,
2286 	    wm_handle_reset_work, sc, WM_WORKQUEUE_PRI, IPL_SOFTCLOCK,
2287 	    WQ_MPSAFE);
2288 	if (error) {
2289 		workqueue_destroy(sc->sc_queue_wq);
2290 		aprint_error_dev(sc->sc_dev,
2291 		    "unable to create reset workqueue\n");
2292 		goto out;
2293 	}
2294 
2295 	/*
2296 	 * Check the function ID (unit number of the chip).
2297 	 */
2298 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
2299 	    || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
2300 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
2301 	    || (sc->sc_type == WM_T_82580)
2302 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
2303 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
2304 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
2305 	else
2306 		sc->sc_funcid = 0;
2307 
2308 	/*
2309 	 * Determine a few things about the bus we're connected to.
2310 	 */
2311 	if (sc->sc_type < WM_T_82543) {
2312 		/* We don't really know the bus characteristics here. */
2313 		sc->sc_bus_speed = 33;
2314 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
2315 		/*
2316 		 * CSA (Communication Streaming Architecture) is about as fast
2317 		 * a 32-bit 66MHz PCI Bus.
2318 		 */
2319 		sc->sc_flags |= WM_F_CSA;
2320 		sc->sc_bus_speed = 66;
2321 		aprint_verbose_dev(sc->sc_dev,
2322 		    "Communication Streaming Architecture\n");
2323 		if (sc->sc_type == WM_T_82547) {
2324 			callout_init(&sc->sc_txfifo_ch, CALLOUT_MPSAFE);
2325 			callout_setfunc(&sc->sc_txfifo_ch,
2326 			    wm_82547_txfifo_stall, sc);
2327 			aprint_verbose_dev(sc->sc_dev,
2328 			    "using 82547 Tx FIFO stall work-around\n");
2329 		}
2330 	} else if (sc->sc_type >= WM_T_82571) {
2331 		sc->sc_flags |= WM_F_PCIE;
2332 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
2333 		    && (sc->sc_type != WM_T_ICH10)
2334 		    && (sc->sc_type != WM_T_PCH)
2335 		    && (sc->sc_type != WM_T_PCH2)
2336 		    && (sc->sc_type != WM_T_PCH_LPT)
2337 		    && (sc->sc_type != WM_T_PCH_SPT)
2338 		    && (sc->sc_type != WM_T_PCH_CNP)
2339 		    && (sc->sc_type != WM_T_PCH_TGP)) {
2340 			/* ICH* and PCH* have no PCIe capability registers */
2341 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
2342 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
2343 				NULL) == 0)
2344 				aprint_error_dev(sc->sc_dev,
2345 				    "unable to find PCIe capability\n");
2346 		}
2347 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
2348 	} else {
2349 		reg = CSR_READ(sc, WMREG_STATUS);
2350 		if (reg & STATUS_BUS64)
2351 			sc->sc_flags |= WM_F_BUS64;
2352 		if ((reg & STATUS_PCIX_MODE) != 0) {
2353 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
2354 
2355 			sc->sc_flags |= WM_F_PCIX;
2356 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
2357 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
2358 				aprint_error_dev(sc->sc_dev,
2359 				    "unable to find PCIX capability\n");
2360 			else if (sc->sc_type != WM_T_82545_3 &&
2361 			    sc->sc_type != WM_T_82546_3) {
2362 				/*
2363 				 * Work around a problem caused by the BIOS
2364 				 * setting the max memory read byte count
2365 				 * incorrectly.
2366 				 */
2367 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
2368 				    sc->sc_pcixe_capoff + PCIX_CMD);
2369 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
2370 				    sc->sc_pcixe_capoff + PCIX_STATUS);
2371 
2372 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
2373 				    PCIX_CMD_BYTECNT_SHIFT;
2374 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
2375 				    PCIX_STATUS_MAXB_SHIFT;
2376 				if (bytecnt > maxb) {
2377 					aprint_verbose_dev(sc->sc_dev,
2378 					    "resetting PCI-X MMRBC: %d -> %d\n",
2379 					    512 << bytecnt, 512 << maxb);
2380 					pcix_cmd = (pcix_cmd &
2381 					    ~PCIX_CMD_BYTECNT_MASK) |
2382 					    (maxb << PCIX_CMD_BYTECNT_SHIFT);
2383 					pci_conf_write(pa->pa_pc, pa->pa_tag,
2384 					    sc->sc_pcixe_capoff + PCIX_CMD,
2385 					    pcix_cmd);
2386 				}
2387 			}
2388 		}
2389 		/*
2390 		 * The quad port adapter is special; it has a PCIX-PCIX
2391 		 * bridge on the board, and can run the secondary bus at
2392 		 * a higher speed.
2393 		 */
2394 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
2395 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
2396 								      : 66;
2397 		} else if (sc->sc_flags & WM_F_PCIX) {
2398 			switch (reg & STATUS_PCIXSPD_MASK) {
2399 			case STATUS_PCIXSPD_50_66:
2400 				sc->sc_bus_speed = 66;
2401 				break;
2402 			case STATUS_PCIXSPD_66_100:
2403 				sc->sc_bus_speed = 100;
2404 				break;
2405 			case STATUS_PCIXSPD_100_133:
2406 				sc->sc_bus_speed = 133;
2407 				break;
2408 			default:
2409 				aprint_error_dev(sc->sc_dev,
2410 				    "unknown PCIXSPD %d; assuming 66MHz\n",
2411 				    reg & STATUS_PCIXSPD_MASK);
2412 				sc->sc_bus_speed = 66;
2413 				break;
2414 			}
2415 		} else
2416 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
2417 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
2418 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
2419 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
2420 	}
2421 
2422 	/* clear interesting stat counters */
2423 	CSR_READ(sc, WMREG_COLC);
2424 	CSR_READ(sc, WMREG_RXERRC);
2425 
2426 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
2427 	    || (sc->sc_type >= WM_T_ICH8))
2428 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2429 	if (sc->sc_type >= WM_T_ICH8)
2430 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2431 
2432 	/* Set PHY, NVM mutex related stuff */
2433 	switch (sc->sc_type) {
2434 	case WM_T_82542_2_0:
2435 	case WM_T_82542_2_1:
2436 	case WM_T_82543:
2437 	case WM_T_82544:
2438 		/* Microwire */
2439 		sc->nvm.read = wm_nvm_read_uwire;
2440 		sc->sc_nvm_wordsize = 64;
2441 		sc->sc_nvm_addrbits = 6;
2442 		break;
2443 	case WM_T_82540:
2444 	case WM_T_82545:
2445 	case WM_T_82545_3:
2446 	case WM_T_82546:
2447 	case WM_T_82546_3:
2448 		/* Microwire */
2449 		sc->nvm.read = wm_nvm_read_uwire;
2450 		reg = CSR_READ(sc, WMREG_EECD);
2451 		if (reg & EECD_EE_SIZE) {
2452 			sc->sc_nvm_wordsize = 256;
2453 			sc->sc_nvm_addrbits = 8;
2454 		} else {
2455 			sc->sc_nvm_wordsize = 64;
2456 			sc->sc_nvm_addrbits = 6;
2457 		}
2458 		sc->sc_flags |= WM_F_LOCK_EECD;
2459 		sc->nvm.acquire = wm_get_eecd;
2460 		sc->nvm.release = wm_put_eecd;
2461 		break;
2462 	case WM_T_82541:
2463 	case WM_T_82541_2:
2464 	case WM_T_82547:
2465 	case WM_T_82547_2:
2466 		reg = CSR_READ(sc, WMREG_EECD);
2467 		/*
2468 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
2469 		 * on 8254[17], so set flags and functios before calling it.
2470 		 */
2471 		sc->sc_flags |= WM_F_LOCK_EECD;
2472 		sc->nvm.acquire = wm_get_eecd;
2473 		sc->nvm.release = wm_put_eecd;
2474 		if (reg & EECD_EE_TYPE) {
2475 			/* SPI */
2476 			sc->nvm.read = wm_nvm_read_spi;
2477 			sc->sc_flags |= WM_F_EEPROM_SPI;
2478 			wm_nvm_set_addrbits_size_eecd(sc);
2479 		} else {
2480 			/* Microwire */
2481 			sc->nvm.read = wm_nvm_read_uwire;
2482 			if ((reg & EECD_EE_ABITS) != 0) {
2483 				sc->sc_nvm_wordsize = 256;
2484 				sc->sc_nvm_addrbits = 8;
2485 			} else {
2486 				sc->sc_nvm_wordsize = 64;
2487 				sc->sc_nvm_addrbits = 6;
2488 			}
2489 		}
2490 		break;
2491 	case WM_T_82571:
2492 	case WM_T_82572:
2493 		/* SPI */
2494 		sc->nvm.read = wm_nvm_read_eerd;
2495 		/* Not use WM_F_LOCK_EECD because we use EERD */
2496 		sc->sc_flags |= WM_F_EEPROM_SPI;
2497 		wm_nvm_set_addrbits_size_eecd(sc);
2498 		sc->phy.acquire = wm_get_swsm_semaphore;
2499 		sc->phy.release = wm_put_swsm_semaphore;
2500 		sc->nvm.acquire = wm_get_nvm_82571;
2501 		sc->nvm.release = wm_put_nvm_82571;
2502 		break;
2503 	case WM_T_82573:
2504 	case WM_T_82574:
2505 	case WM_T_82583:
2506 		sc->nvm.read = wm_nvm_read_eerd;
2507 		/* Not use WM_F_LOCK_EECD because we use EERD */
2508 		if (sc->sc_type == WM_T_82573) {
2509 			sc->phy.acquire = wm_get_swsm_semaphore;
2510 			sc->phy.release = wm_put_swsm_semaphore;
2511 			sc->nvm.acquire = wm_get_nvm_82571;
2512 			sc->nvm.release = wm_put_nvm_82571;
2513 		} else {
2514 			/* Both PHY and NVM use the same semaphore. */
2515 			sc->phy.acquire = sc->nvm.acquire
2516 			    = wm_get_swfwhw_semaphore;
2517 			sc->phy.release = sc->nvm.release
2518 			    = wm_put_swfwhw_semaphore;
2519 		}
2520 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
2521 			sc->sc_flags |= WM_F_EEPROM_FLASH;
2522 			sc->sc_nvm_wordsize = 2048;
2523 		} else {
2524 			/* SPI */
2525 			sc->sc_flags |= WM_F_EEPROM_SPI;
2526 			wm_nvm_set_addrbits_size_eecd(sc);
2527 		}
2528 		break;
2529 	case WM_T_82575:
2530 	case WM_T_82576:
2531 	case WM_T_82580:
2532 	case WM_T_I350:
2533 	case WM_T_I354:
2534 	case WM_T_80003:
2535 		/* SPI */
2536 		sc->sc_flags |= WM_F_EEPROM_SPI;
2537 		wm_nvm_set_addrbits_size_eecd(sc);
2538 		if ((sc->sc_type == WM_T_80003)
2539 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
2540 			sc->nvm.read = wm_nvm_read_eerd;
2541 			/* Don't use WM_F_LOCK_EECD because we use EERD */
2542 		} else {
2543 			sc->nvm.read = wm_nvm_read_spi;
2544 			sc->sc_flags |= WM_F_LOCK_EECD;
2545 		}
2546 		sc->phy.acquire = wm_get_phy_82575;
2547 		sc->phy.release = wm_put_phy_82575;
2548 		sc->nvm.acquire = wm_get_nvm_80003;
2549 		sc->nvm.release = wm_put_nvm_80003;
2550 		break;
2551 	case WM_T_ICH8:
2552 	case WM_T_ICH9:
2553 	case WM_T_ICH10:
2554 	case WM_T_PCH:
2555 	case WM_T_PCH2:
2556 	case WM_T_PCH_LPT:
2557 		sc->nvm.read = wm_nvm_read_ich8;
2558 		/* FLASH */
2559 		sc->sc_flags |= WM_F_EEPROM_FLASH;
2560 		sc->sc_nvm_wordsize = 2048;
2561 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
2562 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
2563 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
2564 			aprint_error_dev(sc->sc_dev,
2565 			    "can't map FLASH registers\n");
2566 			goto out;
2567 		}
2568 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
2569 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
2570 		    ICH_FLASH_SECTOR_SIZE;
2571 		sc->sc_ich8_flash_bank_size =
2572 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
2573 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
2574 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
2575 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
2576 		sc->sc_flashreg_offset = 0;
2577 		sc->phy.acquire = wm_get_swflag_ich8lan;
2578 		sc->phy.release = wm_put_swflag_ich8lan;
2579 		sc->nvm.acquire = wm_get_nvm_ich8lan;
2580 		sc->nvm.release = wm_put_nvm_ich8lan;
2581 		break;
2582 	case WM_T_PCH_SPT:
2583 	case WM_T_PCH_CNP:
2584 	case WM_T_PCH_TGP:
2585 		sc->nvm.read = wm_nvm_read_spt;
2586 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
2587 		sc->sc_flags |= WM_F_EEPROM_FLASH;
2588 		sc->sc_flasht = sc->sc_st;
2589 		sc->sc_flashh = sc->sc_sh;
2590 		sc->sc_ich8_flash_base = 0;
2591 		sc->sc_nvm_wordsize =
2592 		    (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
2593 		    * NVM_SIZE_MULTIPLIER;
2594 		/* It is size in bytes, we want words */
2595 		sc->sc_nvm_wordsize /= 2;
2596 		/* Assume 2 banks */
2597 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
2598 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
2599 		sc->phy.acquire = wm_get_swflag_ich8lan;
2600 		sc->phy.release = wm_put_swflag_ich8lan;
2601 		sc->nvm.acquire = wm_get_nvm_ich8lan;
2602 		sc->nvm.release = wm_put_nvm_ich8lan;
2603 		break;
2604 	case WM_T_I210:
2605 	case WM_T_I211:
2606 		/* Allow a single clear of the SW semaphore on I210 and newer*/
2607 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
2608 		if (wm_nvm_flash_presence_i210(sc)) {
2609 			sc->nvm.read = wm_nvm_read_eerd;
2610 			/* Don't use WM_F_LOCK_EECD because we use EERD */
2611 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
2612 			wm_nvm_set_addrbits_size_eecd(sc);
2613 		} else {
2614 			sc->nvm.read = wm_nvm_read_invm;
2615 			sc->sc_flags |= WM_F_EEPROM_INVM;
2616 			sc->sc_nvm_wordsize = INVM_SIZE;
2617 		}
2618 		sc->phy.acquire = wm_get_phy_82575;
2619 		sc->phy.release = wm_put_phy_82575;
2620 		sc->nvm.acquire = wm_get_nvm_80003;
2621 		sc->nvm.release = wm_put_nvm_80003;
2622 		break;
2623 	default:
2624 		break;
2625 	}
2626 
2627 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
2628 	switch (sc->sc_type) {
2629 	case WM_T_82571:
2630 	case WM_T_82572:
2631 		reg = CSR_READ(sc, WMREG_SWSM2);
2632 		if ((reg & SWSM2_LOCK) == 0) {
2633 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
2634 			force_clear_smbi = true;
2635 		} else
2636 			force_clear_smbi = false;
2637 		break;
2638 	case WM_T_82573:
2639 	case WM_T_82574:
2640 	case WM_T_82583:
2641 		force_clear_smbi = true;
2642 		break;
2643 	default:
2644 		force_clear_smbi = false;
2645 		break;
2646 	}
2647 	if (force_clear_smbi) {
2648 		reg = CSR_READ(sc, WMREG_SWSM);
2649 		if ((reg & SWSM_SMBI) != 0)
2650 			aprint_error_dev(sc->sc_dev,
2651 			    "Please update the Bootagent\n");
2652 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
2653 	}
2654 
2655 	/*
2656 	 * Defer printing the EEPROM type until after verifying the checksum
2657 	 * This allows the EEPROM type to be printed correctly in the case
2658 	 * that no EEPROM is attached.
2659 	 */
2660 	/*
2661 	 * Validate the EEPROM checksum. If the checksum fails, flag
2662 	 * this for later, so we can fail future reads from the EEPROM.
2663 	 */
2664 	if (wm_nvm_validate_checksum(sc)) {
2665 		/*
2666 		 * Read twice again because some PCI-e parts fail the
2667 		 * first check due to the link being in sleep state.
2668 		 */
2669 		if (wm_nvm_validate_checksum(sc))
2670 			sc->sc_flags |= WM_F_EEPROM_INVALID;
2671 	}
2672 
2673 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
2674 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
2675 	else {
2676 		aprint_verbose_dev(sc->sc_dev, "%u words ",
2677 		    sc->sc_nvm_wordsize);
2678 		if (sc->sc_flags & WM_F_EEPROM_INVM)
2679 			aprint_verbose("iNVM");
2680 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
2681 			aprint_verbose("FLASH(HW)");
2682 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
2683 			aprint_verbose("FLASH");
2684 		else {
2685 			if (sc->sc_flags & WM_F_EEPROM_SPI)
2686 				eetype = "SPI";
2687 			else
2688 				eetype = "MicroWire";
2689 			aprint_verbose("(%d address bits) %s EEPROM",
2690 			    sc->sc_nvm_addrbits, eetype);
2691 		}
2692 	}
2693 	wm_nvm_version(sc);
2694 	aprint_verbose("\n");
2695 
2696 	/*
2697 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
2698 	 * incorrect.
2699 	 */
2700 	wm_gmii_setup_phytype(sc, 0, 0);
2701 
2702 	/* Check for WM_F_WOL on some chips before wm_reset() */
2703 	switch (sc->sc_type) {
2704 	case WM_T_ICH8:
2705 	case WM_T_ICH9:
2706 	case WM_T_ICH10:
2707 	case WM_T_PCH:
2708 	case WM_T_PCH2:
2709 	case WM_T_PCH_LPT:
2710 	case WM_T_PCH_SPT:
2711 	case WM_T_PCH_CNP:
2712 	case WM_T_PCH_TGP:
2713 		apme_mask = WUC_APME;
2714 		eeprom_data = CSR_READ(sc, WMREG_WUC);
2715 		if ((eeprom_data & apme_mask) != 0)
2716 			sc->sc_flags |= WM_F_WOL;
2717 		break;
2718 	default:
2719 		break;
2720 	}
2721 
2722 	/* Reset the chip to a known state. */
2723 	wm_reset(sc);
2724 
2725 	/* sc->sc_pba is set in wm_reset(). */
2726 	aprint_verbose_dev(sc->sc_dev, "RX packet buffer size: %uKB\n",
2727 	    sc->sc_pba);
2728 
2729 	/*
2730 	 * Check for I21[01] PLL workaround.
2731 	 *
2732 	 * Three cases:
2733 	 * a) Chip is I211.
2734 	 * b) Chip is I210 and it uses INVM (not FLASH).
2735 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
2736 	 */
2737 	if (sc->sc_type == WM_T_I211)
2738 		sc->sc_flags |= WM_F_PLL_WA_I210;
2739 	if (sc->sc_type == WM_T_I210) {
2740 		if (!wm_nvm_flash_presence_i210(sc))
2741 			sc->sc_flags |= WM_F_PLL_WA_I210;
2742 		else if ((sc->sc_nvm_ver_major < 3)
2743 		    || ((sc->sc_nvm_ver_major == 3)
2744 			&& (sc->sc_nvm_ver_minor < 25))) {
2745 			aprint_verbose_dev(sc->sc_dev,
2746 			    "ROM image version %d.%d is older than 3.25\n",
2747 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
2748 			sc->sc_flags |= WM_F_PLL_WA_I210;
2749 		}
2750 	}
2751 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
2752 		wm_pll_workaround_i210(sc);
2753 
2754 	wm_get_wakeup(sc);
2755 
2756 	/* Non-AMT based hardware can now take control from firmware */
2757 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
2758 		wm_get_hw_control(sc);
2759 
2760 	/*
2761 	 * Read the Ethernet address from the EEPROM, if not first found
2762 	 * in device properties.
2763 	 */
2764 	ea = prop_dictionary_get(dict, "mac-address");
2765 	if (ea != NULL) {
2766 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
2767 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
2768 		memcpy(enaddr, prop_data_value(ea), ETHER_ADDR_LEN);
2769 	} else {
2770 		if (wm_read_mac_addr(sc, enaddr) != 0) {
2771 			aprint_error_dev(sc->sc_dev,
2772 			    "unable to read Ethernet address\n");
2773 			goto out;
2774 		}
2775 	}
2776 
2777 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
2778 	    ether_sprintf(enaddr));
2779 
2780 	/*
2781 	 * Read the config info from the EEPROM, and set up various
2782 	 * bits in the control registers based on their contents.
2783 	 */
2784 	pn = prop_dictionary_get(dict, "i82543-cfg1");
2785 	if (pn != NULL) {
2786 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2787 		cfg1 = (uint16_t) prop_number_signed_value(pn);
2788 	} else {
2789 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
2790 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
2791 			goto out;
2792 		}
2793 	}
2794 
2795 	pn = prop_dictionary_get(dict, "i82543-cfg2");
2796 	if (pn != NULL) {
2797 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2798 		cfg2 = (uint16_t) prop_number_signed_value(pn);
2799 	} else {
2800 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
2801 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
2802 			goto out;
2803 		}
2804 	}
2805 
2806 	/* check for WM_F_WOL */
2807 	switch (sc->sc_type) {
2808 	case WM_T_82542_2_0:
2809 	case WM_T_82542_2_1:
2810 	case WM_T_82543:
2811 		/* dummy? */
2812 		eeprom_data = 0;
2813 		apme_mask = NVM_CFG3_APME;
2814 		break;
2815 	case WM_T_82544:
2816 		apme_mask = NVM_CFG2_82544_APM_EN;
2817 		eeprom_data = cfg2;
2818 		break;
2819 	case WM_T_82546:
2820 	case WM_T_82546_3:
2821 	case WM_T_82571:
2822 	case WM_T_82572:
2823 	case WM_T_82573:
2824 	case WM_T_82574:
2825 	case WM_T_82583:
2826 	case WM_T_80003:
2827 	case WM_T_82575:
2828 	case WM_T_82576:
2829 		apme_mask = NVM_CFG3_APME;
2830 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
2831 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
2832 		break;
2833 	case WM_T_82580:
2834 	case WM_T_I350:
2835 	case WM_T_I354:
2836 	case WM_T_I210:
2837 	case WM_T_I211:
2838 		apme_mask = NVM_CFG3_APME;
2839 		wm_nvm_read(sc,
2840 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
2841 		    1, &eeprom_data);
2842 		break;
2843 	case WM_T_ICH8:
2844 	case WM_T_ICH9:
2845 	case WM_T_ICH10:
2846 	case WM_T_PCH:
2847 	case WM_T_PCH2:
2848 	case WM_T_PCH_LPT:
2849 	case WM_T_PCH_SPT:
2850 	case WM_T_PCH_CNP:
2851 	case WM_T_PCH_TGP:
2852 		/* Already checked before wm_reset () */
2853 		apme_mask = eeprom_data = 0;
2854 		break;
2855 	default: /* XXX 82540 */
2856 		apme_mask = NVM_CFG3_APME;
2857 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
2858 		break;
2859 	}
2860 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
2861 	if ((eeprom_data & apme_mask) != 0)
2862 		sc->sc_flags |= WM_F_WOL;
2863 
2864 	/*
2865 	 * We have the eeprom settings, now apply the special cases
2866 	 * where the eeprom may be wrong or the board won't support
2867 	 * wake on lan on a particular port
2868 	 */
2869 	switch (sc->sc_pcidevid) {
2870 	case PCI_PRODUCT_INTEL_82546GB_PCIE:
2871 		sc->sc_flags &= ~WM_F_WOL;
2872 		break;
2873 	case PCI_PRODUCT_INTEL_82546EB_FIBER:
2874 	case PCI_PRODUCT_INTEL_82546GB_FIBER:
2875 		/* Wake events only supported on port A for dual fiber
2876 		 * regardless of eeprom setting */
2877 		if (sc->sc_funcid == 1)
2878 			sc->sc_flags &= ~WM_F_WOL;
2879 		break;
2880 	case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3:
2881 		/* If quad port adapter, disable WoL on all but port A */
2882 		if (sc->sc_funcid != 0)
2883 			sc->sc_flags &= ~WM_F_WOL;
2884 		break;
2885 	case PCI_PRODUCT_INTEL_82571EB_FIBER:
2886 		/* Wake events only supported on port A for dual fiber
2887 		 * regardless of eeprom setting */
2888 		if (sc->sc_funcid == 1)
2889 			sc->sc_flags &= ~WM_F_WOL;
2890 		break;
2891 	case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER:
2892 	case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER:
2893 	case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER:
2894 		/* If quad port adapter, disable WoL on all but port A */
2895 		if (sc->sc_funcid != 0)
2896 			sc->sc_flags &= ~WM_F_WOL;
2897 		break;
2898 	}
2899 
2900 	if (sc->sc_type >= WM_T_82575) {
2901 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
2902 			aprint_debug_dev(sc->sc_dev, "COMPAT = %hx\n",
2903 			    nvmword);
2904 			if ((sc->sc_type == WM_T_82575) ||
2905 			    (sc->sc_type == WM_T_82576)) {
2906 				/* Check NVM for autonegotiation */
2907 				if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE)
2908 				    != 0)
2909 					sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
2910 			}
2911 			if ((sc->sc_type == WM_T_82575) ||
2912 			    (sc->sc_type == WM_T_I350)) {
2913 				if (nvmword & NVM_COMPAT_MAS_EN(sc->sc_funcid))
2914 					sc->sc_flags |= WM_F_MAS;
2915 			}
2916 		}
2917 	}
2918 
2919 	/*
2920 	 * XXX need special handling for some multiple port cards
2921 	 * to disable a paticular port.
2922 	 */
2923 
2924 	if (sc->sc_type >= WM_T_82544) {
2925 		pn = prop_dictionary_get(dict, "i82543-swdpin");
2926 		if (pn != NULL) {
2927 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2928 			swdpin = (uint16_t) prop_number_signed_value(pn);
2929 		} else {
2930 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
2931 				aprint_error_dev(sc->sc_dev,
2932 				    "unable to read SWDPIN\n");
2933 				goto out;
2934 			}
2935 		}
2936 	}
2937 
2938 	if (cfg1 & NVM_CFG1_ILOS)
2939 		sc->sc_ctrl |= CTRL_ILOS;
2940 
2941 	/*
2942 	 * XXX
2943 	 * This code isn't correct because pin 2 and 3 are located
2944 	 * in different position on newer chips. Check all datasheet.
2945 	 *
2946 	 * Until resolve this problem, check if a chip < 82580
2947 	 */
2948 	if (sc->sc_type <= WM_T_82580) {
2949 		if (sc->sc_type >= WM_T_82544) {
2950 			sc->sc_ctrl |=
2951 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
2952 			    CTRL_SWDPIO_SHIFT;
2953 			sc->sc_ctrl |=
2954 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
2955 			    CTRL_SWDPINS_SHIFT;
2956 		} else {
2957 			sc->sc_ctrl |=
2958 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
2959 			    CTRL_SWDPIO_SHIFT;
2960 		}
2961 	}
2962 
2963 	if ((sc->sc_type >= WM_T_82580) && (sc->sc_type <= WM_T_I211)) {
2964 		wm_nvm_read(sc,
2965 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
2966 		    1, &nvmword);
2967 		if (nvmword & NVM_CFG3_ILOS)
2968 			sc->sc_ctrl |= CTRL_ILOS;
2969 	}
2970 
2971 #if 0
2972 	if (sc->sc_type >= WM_T_82544) {
2973 		if (cfg1 & NVM_CFG1_IPS0)
2974 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
2975 		if (cfg1 & NVM_CFG1_IPS1)
2976 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
2977 		sc->sc_ctrl_ext |=
2978 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
2979 		    CTRL_EXT_SWDPIO_SHIFT;
2980 		sc->sc_ctrl_ext |=
2981 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
2982 		    CTRL_EXT_SWDPINS_SHIFT;
2983 	} else {
2984 		sc->sc_ctrl_ext |=
2985 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
2986 		    CTRL_EXT_SWDPIO_SHIFT;
2987 	}
2988 #endif
2989 
2990 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2991 #if 0
2992 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2993 #endif
2994 
2995 	if (sc->sc_type == WM_T_PCH) {
2996 		uint16_t val;
2997 
2998 		/* Save the NVM K1 bit setting */
2999 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
3000 
3001 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
3002 			sc->sc_nvm_k1_enabled = 1;
3003 		else
3004 			sc->sc_nvm_k1_enabled = 0;
3005 	}
3006 
3007 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
3008 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
3009 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
3010 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
3011 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
3012 	    || sc->sc_type == WM_T_PCH_TGP
3013 	    || sc->sc_type == WM_T_82573
3014 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
3015 		/* Copper only */
3016 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
3017 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
3018 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
3019 	    || (sc->sc_type ==WM_T_I211)) {
3020 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
3021 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
3022 		switch (link_mode) {
3023 		case CTRL_EXT_LINK_MODE_1000KX:
3024 			aprint_normal_dev(sc->sc_dev, "1000KX\n");
3025 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
3026 			break;
3027 		case CTRL_EXT_LINK_MODE_SGMII:
3028 			if (wm_sgmii_uses_mdio(sc)) {
3029 				aprint_normal_dev(sc->sc_dev,
3030 				    "SGMII(MDIO)\n");
3031 				sc->sc_flags |= WM_F_SGMII;
3032 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
3033 				break;
3034 			}
3035 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
3036 			/*FALLTHROUGH*/
3037 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
3038 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
3039 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
3040 				if (link_mode
3041 				    == CTRL_EXT_LINK_MODE_SGMII) {
3042 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
3043 					sc->sc_flags |= WM_F_SGMII;
3044 					aprint_verbose_dev(sc->sc_dev,
3045 					    "SGMII\n");
3046 				} else {
3047 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
3048 					aprint_verbose_dev(sc->sc_dev,
3049 					    "SERDES\n");
3050 				}
3051 				break;
3052 			}
3053 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
3054 				aprint_normal_dev(sc->sc_dev, "SERDES(SFP)\n");
3055 			else if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
3056 				aprint_normal_dev(sc->sc_dev, "SGMII(SFP)\n");
3057 				sc->sc_flags |= WM_F_SGMII;
3058 			}
3059 			/* Do not change link mode for 100BaseFX */
3060 			if (sc->sc_sfptype == SFF_SFP_ETH_FLAGS_100FX)
3061 				break;
3062 
3063 			/* Change current link mode setting */
3064 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
3065 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
3066 				reg |= CTRL_EXT_LINK_MODE_SGMII;
3067 			else
3068 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
3069 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3070 			break;
3071 		case CTRL_EXT_LINK_MODE_GMII:
3072 		default:
3073 			aprint_normal_dev(sc->sc_dev, "Copper\n");
3074 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
3075 			break;
3076 		}
3077 
3078 		reg &= ~CTRL_EXT_I2C_ENA;
3079 		if ((sc->sc_flags & WM_F_SGMII) != 0)
3080 			reg |= CTRL_EXT_I2C_ENA;
3081 		else
3082 			reg &= ~CTRL_EXT_I2C_ENA;
3083 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3084 		if ((sc->sc_flags & WM_F_SGMII) != 0) {
3085 			if (!wm_sgmii_uses_mdio(sc))
3086 				wm_gmii_setup_phytype(sc, 0, 0);
3087 			wm_reset_mdicnfg_82580(sc);
3088 		}
3089 	} else if (sc->sc_type < WM_T_82543 ||
3090 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
3091 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
3092 			aprint_error_dev(sc->sc_dev,
3093 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
3094 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
3095 		}
3096 	} else {
3097 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
3098 			aprint_error_dev(sc->sc_dev,
3099 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
3100 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
3101 		}
3102 	}
3103 
3104 	if (sc->sc_type >= WM_T_PCH2)
3105 		sc->sc_flags |= WM_F_EEE;
3106 	else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211)
3107 	    && (sc->sc_mediatype == WM_MEDIATYPE_COPPER)) {
3108 		/* XXX: Need special handling for I354. (not yet) */
3109 		if (sc->sc_type != WM_T_I354)
3110 			sc->sc_flags |= WM_F_EEE;
3111 	}
3112 
3113 	/*
3114 	 * The I350 has a bug where it always strips the CRC whether
3115 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
3116 	 */
3117 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
3118 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
3119 		sc->sc_flags |= WM_F_CRC_STRIP;
3120 
3121 	/*
3122 	 * Workaround for some chips to delay sending LINK_STATE_UP.
3123 	 * Some systems can't send packet soon after linkup. See also
3124 	 * wm_linkintr_gmii(), wm_tick() and wm_gmii_mediastatus().
3125 	 */
3126 	switch (sc->sc_type) {
3127 	case WM_T_I350:
3128 	case WM_T_I354:
3129 	case WM_T_I210:
3130 	case WM_T_I211:
3131 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
3132 			sc->sc_flags |= WM_F_DELAY_LINKUP;
3133 		break;
3134 	default:
3135 		break;
3136 	}
3137 
3138 	/* Set device properties (macflags) */
3139 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
3140 
3141 	if (sc->sc_flags != 0) {
3142 		snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
3143 		aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
3144 	}
3145 
3146 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
3147 
3148 	/* Initialize the media structures accordingly. */
3149 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
3150 		wm_gmii_mediainit(sc, wmp->wmp_product);
3151 	else
3152 		wm_tbi_mediainit(sc); /* All others */
3153 
3154 	ifp = &sc->sc_ethercom.ec_if;
3155 	xname = device_xname(sc->sc_dev);
3156 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
3157 	ifp->if_softc = sc;
3158 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
3159 	ifp->if_extflags = IFEF_MPSAFE;
3160 	ifp->if_ioctl = wm_ioctl;
3161 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3162 		ifp->if_start = wm_nq_start;
3163 		/*
3164 		 * When the number of CPUs is one and the controller can use
3165 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
3166 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
3167 		 * and the other is used for link status changing.
3168 		 * In this situation, wm_nq_transmit() is disadvantageous
3169 		 * because of wm_select_txqueue() and pcq(9) overhead.
3170 		 */
3171 		if (wm_is_using_multiqueue(sc))
3172 			ifp->if_transmit = wm_nq_transmit;
3173 	} else {
3174 		ifp->if_start = wm_start;
3175 		/*
3176 		 * wm_transmit() has the same disadvantages as wm_nq_transmit()
3177 		 * described above.
3178 		 */
3179 		if (wm_is_using_multiqueue(sc))
3180 			ifp->if_transmit = wm_transmit;
3181 	}
3182 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
3183 	ifp->if_init = wm_init;
3184 	ifp->if_stop = wm_stop;
3185 	IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
3186 	IFQ_SET_READY(&ifp->if_snd);
3187 
3188 	/* Check for jumbo frame */
3189 	switch (sc->sc_type) {
3190 	case WM_T_82573:
3191 		/* XXX limited to 9234 if ASPM is disabled */
3192 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
3193 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
3194 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
3195 		break;
3196 	case WM_T_82571:
3197 	case WM_T_82572:
3198 	case WM_T_82574:
3199 	case WM_T_82583:
3200 	case WM_T_82575:
3201 	case WM_T_82576:
3202 	case WM_T_82580:
3203 	case WM_T_I350:
3204 	case WM_T_I354:
3205 	case WM_T_I210:
3206 	case WM_T_I211:
3207 	case WM_T_80003:
3208 	case WM_T_ICH9:
3209 	case WM_T_ICH10:
3210 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
3211 	case WM_T_PCH_LPT:
3212 	case WM_T_PCH_SPT:
3213 	case WM_T_PCH_CNP:
3214 	case WM_T_PCH_TGP:
3215 		/* XXX limited to 9234 */
3216 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
3217 		break;
3218 	case WM_T_PCH:
3219 		/* XXX limited to 4096 */
3220 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
3221 		break;
3222 	case WM_T_82542_2_0:
3223 	case WM_T_82542_2_1:
3224 	case WM_T_ICH8:
3225 		/* No support for jumbo frame */
3226 		break;
3227 	default:
3228 		/* ETHER_MAX_LEN_JUMBO */
3229 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
3230 		break;
3231 	}
3232 
3233 	/* If we're a i82543 or greater, we can support VLANs. */
3234 	if (sc->sc_type >= WM_T_82543) {
3235 		sc->sc_ethercom.ec_capabilities |=
3236 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
3237 		sc->sc_ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
3238 	}
3239 
3240 	if ((sc->sc_flags & WM_F_EEE) != 0)
3241 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_EEE;
3242 
3243 	/*
3244 	 * We can perform TCPv4 and UDPv4 checksums in-bound.  Only
3245 	 * on i82543 and later.
3246 	 */
3247 	if (sc->sc_type >= WM_T_82543) {
3248 		ifp->if_capabilities |=
3249 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
3250 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
3251 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
3252 		    IFCAP_CSUM_TCPv6_Tx |
3253 		    IFCAP_CSUM_UDPv6_Tx;
3254 	}
3255 
3256 	/*
3257 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
3258 	 *
3259 	 *	82541GI (8086:1076) ... no
3260 	 *	82572EI (8086:10b9) ... yes
3261 	 */
3262 	if (sc->sc_type >= WM_T_82571) {
3263 		ifp->if_capabilities |=
3264 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
3265 	}
3266 
3267 	/*
3268 	 * If we're a i82544 or greater (except i82547), we can do
3269 	 * TCP segmentation offload.
3270 	 */
3271 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547)
3272 		ifp->if_capabilities |= IFCAP_TSOv4;
3273 
3274 	if (sc->sc_type >= WM_T_82571)
3275 		ifp->if_capabilities |= IFCAP_TSOv6;
3276 
3277 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
3278 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
3279 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
3280 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
3281 
3282 	/* Attach the interface. */
3283 	if_initialize(ifp);
3284 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
3285 	ether_ifattach(ifp, enaddr);
3286 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
3287 	if_register(ifp);
3288 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
3289 	    RND_FLAG_DEFAULT);
3290 
3291 #ifdef WM_EVENT_COUNTERS
3292 	/* Attach event counters. */
3293 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
3294 	    NULL, xname, "linkintr");
3295 
3296 	evcnt_attach_dynamic(&sc->sc_ev_crcerrs, EVCNT_TYPE_MISC,
3297 	    NULL, xname, "CRC Error");
3298 	evcnt_attach_dynamic(&sc->sc_ev_symerrc, EVCNT_TYPE_MISC,
3299 	    NULL, xname, "Symbol Error");
3300 	evcnt_attach_dynamic(&sc->sc_ev_mpc, EVCNT_TYPE_MISC,
3301 	    NULL, xname, "Missed Packets");
3302 	evcnt_attach_dynamic(&sc->sc_ev_colc, EVCNT_TYPE_MISC,
3303 	    NULL, xname, "Collision");
3304 	evcnt_attach_dynamic(&sc->sc_ev_sec, EVCNT_TYPE_MISC,
3305 	    NULL, xname, "Sequence Error");
3306 	evcnt_attach_dynamic(&sc->sc_ev_rlec, EVCNT_TYPE_MISC,
3307 	    NULL, xname, "Receive Length Error");
3308 
3309 	if (sc->sc_type >= WM_T_82543) {
3310 		evcnt_attach_dynamic(&sc->sc_ev_algnerrc, EVCNT_TYPE_MISC,
3311 		    NULL, xname, "Alignment Error");
3312 		evcnt_attach_dynamic(&sc->sc_ev_rxerrc, EVCNT_TYPE_MISC,
3313 		    NULL, xname, "Receive Error");
3314 		/* XXX Does 82575 have HTDPMC? */
3315 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
3316 			evcnt_attach_dynamic(&sc->sc_ev_cexterr,
3317 			    EVCNT_TYPE_MISC, NULL, xname,
3318 			    "Carrier Extension Error");
3319 		else
3320 			evcnt_attach_dynamic(&sc->sc_ev_htdpmc,
3321 			    EVCNT_TYPE_MISC, NULL, xname,
3322 			    "Host Transmit Discarded Packets by MAC");
3323 
3324 		evcnt_attach_dynamic(&sc->sc_ev_tncrs, EVCNT_TYPE_MISC,
3325 		    NULL, xname, "Tx with No CRS");
3326 		evcnt_attach_dynamic(&sc->sc_ev_tsctc, EVCNT_TYPE_MISC,
3327 		    NULL, xname, "TCP Segmentation Context Tx");
3328 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
3329 			evcnt_attach_dynamic(&sc->sc_ev_tsctfc,
3330 			    EVCNT_TYPE_MISC, NULL, xname,
3331 			    "TCP Segmentation Context Tx Fail");
3332 		else {
3333 			/* XXX Is the circuit breaker only for 82576? */
3334 			evcnt_attach_dynamic(&sc->sc_ev_cbrdpc,
3335 			    EVCNT_TYPE_MISC, NULL, xname,
3336 			    "Circuit Breaker Rx Dropped Packet");
3337 			evcnt_attach_dynamic(&sc->sc_ev_cbrmpc,
3338 			    EVCNT_TYPE_MISC, NULL, xname,
3339 			    "Circuit Breaker Rx Manageability Packet");
3340 		}
3341 	}
3342 
3343 	if (sc->sc_type >= WM_T_82542_2_1) {
3344 		evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
3345 		    NULL, xname, "XOFF Transmitted");
3346 		evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
3347 		    NULL, xname, "XON Transmitted");
3348 		evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
3349 		    NULL, xname, "XOFF Received");
3350 		evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
3351 		    NULL, xname, "XON Received");
3352 		evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
3353 		    NULL, xname, "FC Received Unsupported");
3354 	}
3355 
3356 	evcnt_attach_dynamic(&sc->sc_ev_scc, EVCNT_TYPE_MISC,
3357 	    NULL, xname, "Single Collision");
3358 	evcnt_attach_dynamic(&sc->sc_ev_ecol, EVCNT_TYPE_MISC,
3359 	    NULL, xname, "Excessive Collisions");
3360 	evcnt_attach_dynamic(&sc->sc_ev_mcc, EVCNT_TYPE_MISC,
3361 	    NULL, xname, "Multiple Collision");
3362 	evcnt_attach_dynamic(&sc->sc_ev_latecol, EVCNT_TYPE_MISC,
3363 	    NULL, xname, "Late Collisions");
3364 
3365 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc))
3366 		evcnt_attach_dynamic(&sc->sc_ev_cbtmpc, EVCNT_TYPE_MISC,
3367 		    NULL, xname, "Circuit Breaker Tx Manageability Packet");
3368 
3369 	evcnt_attach_dynamic(&sc->sc_ev_dc, EVCNT_TYPE_MISC,
3370 	    NULL, xname, "Defer");
3371 	evcnt_attach_dynamic(&sc->sc_ev_prc64, EVCNT_TYPE_MISC,
3372 	    NULL, xname, "Packets Rx (64 bytes)");
3373 	evcnt_attach_dynamic(&sc->sc_ev_prc127, EVCNT_TYPE_MISC,
3374 	    NULL, xname, "Packets Rx (65-127 bytes)");
3375 	evcnt_attach_dynamic(&sc->sc_ev_prc255, EVCNT_TYPE_MISC,
3376 	    NULL, xname, "Packets Rx (128-255 bytes)");
3377 	evcnt_attach_dynamic(&sc->sc_ev_prc511, EVCNT_TYPE_MISC,
3378 	    NULL, xname, "Packets Rx (256-511 bytes)");
3379 	evcnt_attach_dynamic(&sc->sc_ev_prc1023, EVCNT_TYPE_MISC,
3380 	    NULL, xname, "Packets Rx (512-1023 bytes)");
3381 	evcnt_attach_dynamic(&sc->sc_ev_prc1522, EVCNT_TYPE_MISC,
3382 	    NULL, xname, "Packets Rx (1024-1522 bytes)");
3383 	evcnt_attach_dynamic(&sc->sc_ev_gprc, EVCNT_TYPE_MISC,
3384 	    NULL, xname, "Good Packets Rx");
3385 	evcnt_attach_dynamic(&sc->sc_ev_bprc, EVCNT_TYPE_MISC,
3386 	    NULL, xname, "Broadcast Packets Rx");
3387 	evcnt_attach_dynamic(&sc->sc_ev_mprc, EVCNT_TYPE_MISC,
3388 	    NULL, xname, "Multicast Packets Rx");
3389 	evcnt_attach_dynamic(&sc->sc_ev_gptc, EVCNT_TYPE_MISC,
3390 	    NULL, xname, "Good Packets Tx");
3391 	evcnt_attach_dynamic(&sc->sc_ev_gorc, EVCNT_TYPE_MISC,
3392 	    NULL, xname, "Good Octets Rx");
3393 	evcnt_attach_dynamic(&sc->sc_ev_gotc, EVCNT_TYPE_MISC,
3394 	    NULL, xname, "Good Octets Tx");
3395 	evcnt_attach_dynamic(&sc->sc_ev_rnbc, EVCNT_TYPE_MISC,
3396 	    NULL, xname, "Rx No Buffers");
3397 	evcnt_attach_dynamic(&sc->sc_ev_ruc, EVCNT_TYPE_MISC,
3398 	    NULL, xname, "Rx Undersize (valid CRC)");
3399 	evcnt_attach_dynamic(&sc->sc_ev_rfc, EVCNT_TYPE_MISC,
3400 	    NULL, xname, "Rx Fragment (bad CRC)");
3401 	evcnt_attach_dynamic(&sc->sc_ev_roc, EVCNT_TYPE_MISC,
3402 	    NULL, xname, "Rx Oversize (valid CRC)");
3403 	evcnt_attach_dynamic(&sc->sc_ev_rjc, EVCNT_TYPE_MISC,
3404 	    NULL, xname, "Rx Jabber (bad CRC)");
3405 	if (sc->sc_type >= WM_T_82540) {
3406 		evcnt_attach_dynamic(&sc->sc_ev_mgtprc, EVCNT_TYPE_MISC,
3407 		    NULL, xname, "Management Packets RX");
3408 		evcnt_attach_dynamic(&sc->sc_ev_mgtpdc, EVCNT_TYPE_MISC,
3409 		    NULL, xname, "Management Packets Dropped");
3410 		evcnt_attach_dynamic(&sc->sc_ev_mgtptc, EVCNT_TYPE_MISC,
3411 		    NULL, xname, "Management Packets TX");
3412 	}
3413 	evcnt_attach_dynamic(&sc->sc_ev_tor, EVCNT_TYPE_MISC,
3414 	    NULL, xname, "Total Octets Rx");
3415 	evcnt_attach_dynamic(&sc->sc_ev_tot, EVCNT_TYPE_MISC,
3416 	    NULL, xname, "Total Octets Tx");
3417 	evcnt_attach_dynamic(&sc->sc_ev_tpr, EVCNT_TYPE_MISC,
3418 	    NULL, xname, "Total Packets Rx");
3419 	evcnt_attach_dynamic(&sc->sc_ev_tpt, EVCNT_TYPE_MISC,
3420 	    NULL, xname, "Total Packets Tx");
3421 	evcnt_attach_dynamic(&sc->sc_ev_ptc64, EVCNT_TYPE_MISC,
3422 	    NULL, xname, "Packets Tx (64 bytes)");
3423 	evcnt_attach_dynamic(&sc->sc_ev_ptc127, EVCNT_TYPE_MISC,
3424 	    NULL, xname, "Packets Tx (65-127 bytes)");
3425 	evcnt_attach_dynamic(&sc->sc_ev_ptc255, EVCNT_TYPE_MISC,
3426 	    NULL, xname, "Packets Tx (128-255 bytes)");
3427 	evcnt_attach_dynamic(&sc->sc_ev_ptc511, EVCNT_TYPE_MISC,
3428 	    NULL, xname, "Packets Tx (256-511 bytes)");
3429 	evcnt_attach_dynamic(&sc->sc_ev_ptc1023, EVCNT_TYPE_MISC,
3430 	    NULL, xname, "Packets Tx (512-1023 bytes)");
3431 	evcnt_attach_dynamic(&sc->sc_ev_ptc1522, EVCNT_TYPE_MISC,
3432 	    NULL, xname, "Packets Tx (1024-1522 Bytes)");
3433 	evcnt_attach_dynamic(&sc->sc_ev_mptc, EVCNT_TYPE_MISC,
3434 	    NULL, xname, "Multicast Packets Tx");
3435 	evcnt_attach_dynamic(&sc->sc_ev_bptc, EVCNT_TYPE_MISC,
3436 	    NULL, xname, "Broadcast Packets Tx");
3437 	if (sc->sc_type >= WM_T_82571) /* PCIe, 80003 and ICH/PCHs */
3438 		evcnt_attach_dynamic(&sc->sc_ev_iac, EVCNT_TYPE_MISC,
3439 		    NULL, xname, "Interrupt Assertion");
3440 	if (sc->sc_type < WM_T_82575) {
3441 		evcnt_attach_dynamic(&sc->sc_ev_icrxptc, EVCNT_TYPE_MISC,
3442 		    NULL, xname, "Intr. Cause Rx Pkt Timer Expire");
3443 		evcnt_attach_dynamic(&sc->sc_ev_icrxatc, EVCNT_TYPE_MISC,
3444 		    NULL, xname, "Intr. Cause Rx Abs Timer Expire");
3445 		evcnt_attach_dynamic(&sc->sc_ev_ictxptc, EVCNT_TYPE_MISC,
3446 		    NULL, xname, "Intr. Cause Tx Pkt Timer Expire");
3447 		evcnt_attach_dynamic(&sc->sc_ev_ictxatc, EVCNT_TYPE_MISC,
3448 		    NULL, xname, "Intr. Cause Tx Abs Timer Expire");
3449 		evcnt_attach_dynamic(&sc->sc_ev_ictxqec, EVCNT_TYPE_MISC,
3450 		    NULL, xname, "Intr. Cause Tx Queue Empty");
3451 		evcnt_attach_dynamic(&sc->sc_ev_ictxqmtc, EVCNT_TYPE_MISC,
3452 		    NULL, xname, "Intr. Cause Tx Queue Min Thresh");
3453 		evcnt_attach_dynamic(&sc->sc_ev_rxdmtc, EVCNT_TYPE_MISC,
3454 		    NULL, xname, "Intr. Cause Rx Desc Min Thresh");
3455 
3456 		/* XXX 82575 document says it has ICRXOC. Is that right? */
3457 		evcnt_attach_dynamic(&sc->sc_ev_icrxoc, EVCNT_TYPE_MISC,
3458 		    NULL, xname, "Interrupt Cause Receiver Overrun");
3459 	} else if (!WM_IS_ICHPCH(sc)) {
3460 		/*
3461 		 * For 82575 and newer.
3462 		 *
3463 		 * On 80003, ICHs and PCHs, it seems all of the following
3464 		 * registers are zero.
3465 		 */
3466 		evcnt_attach_dynamic(&sc->sc_ev_rpthc, EVCNT_TYPE_MISC,
3467 		    NULL, xname, "Rx Packets To Host");
3468 		evcnt_attach_dynamic(&sc->sc_ev_debug1, EVCNT_TYPE_MISC,
3469 		    NULL, xname, "Debug Counter 1");
3470 		evcnt_attach_dynamic(&sc->sc_ev_debug2, EVCNT_TYPE_MISC,
3471 		    NULL, xname, "Debug Counter 2");
3472 		evcnt_attach_dynamic(&sc->sc_ev_debug3, EVCNT_TYPE_MISC,
3473 		    NULL, xname, "Debug Counter 3");
3474 
3475 		/*
3476 		 * 82575 datasheet says 0x4118 is for TXQEC(Tx Queue Empty).
3477 		 * I think it's wrong. The real count I observed is the same
3478 		 * as GPTC(Good Packets Tx) and TPT(Total Packets Tx).
3479 		 * It's HGPTC(Host Good Packets Tx) which is described in
3480 		 * 82576's datasheet.
3481 		 */
3482 		evcnt_attach_dynamic(&sc->sc_ev_hgptc, EVCNT_TYPE_MISC,
3483 		    NULL, xname, "Host Good Packets TX");
3484 
3485 		evcnt_attach_dynamic(&sc->sc_ev_debug4, EVCNT_TYPE_MISC,
3486 		    NULL, xname, "Debug Counter 4");
3487 		evcnt_attach_dynamic(&sc->sc_ev_rxdmtc, EVCNT_TYPE_MISC,
3488 		    NULL, xname, "Rx Desc Min Thresh");
3489 		/* XXX Is the circuit breaker only for 82576? */
3490 		evcnt_attach_dynamic(&sc->sc_ev_htcbdpc, EVCNT_TYPE_MISC,
3491 		    NULL, xname, "Host Tx Circuit Breaker Dropped Packets");
3492 
3493 		evcnt_attach_dynamic(&sc->sc_ev_hgorc, EVCNT_TYPE_MISC,
3494 		    NULL, xname, "Host Good Octets Rx");
3495 		evcnt_attach_dynamic(&sc->sc_ev_hgotc, EVCNT_TYPE_MISC,
3496 		    NULL, xname, "Host Good Octets Tx");
3497 		evcnt_attach_dynamic(&sc->sc_ev_lenerrs, EVCNT_TYPE_MISC,
3498 		    NULL, xname, "Length Errors (length/type <= 1500)");
3499 		evcnt_attach_dynamic(&sc->sc_ev_scvpc, EVCNT_TYPE_MISC,
3500 		    NULL, xname, "SerDes/SGMII Code Violation Packet");
3501 		evcnt_attach_dynamic(&sc->sc_ev_hrmpc, EVCNT_TYPE_MISC,
3502 		    NULL, xname, "Header Redirection Missed Packet");
3503 	}
3504 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc)) {
3505 		evcnt_attach_dynamic(&sc->sc_ev_tlpic, EVCNT_TYPE_MISC,
3506 		    NULL, xname, "EEE Tx LPI");
3507 		evcnt_attach_dynamic(&sc->sc_ev_rlpic, EVCNT_TYPE_MISC,
3508 		    NULL, xname, "EEE Rx LPI");
3509 		evcnt_attach_dynamic(&sc->sc_ev_b2ogprc, EVCNT_TYPE_MISC,
3510 		    NULL, xname, "BMC2OS Packets received by host");
3511 		evcnt_attach_dynamic(&sc->sc_ev_o2bspc, EVCNT_TYPE_MISC,
3512 		    NULL, xname, "OS2BMC Packets transmitted by host");
3513 		evcnt_attach_dynamic(&sc->sc_ev_b2ospc, EVCNT_TYPE_MISC,
3514 		    NULL, xname, "BMC2OS Packets sent by BMC");
3515 		evcnt_attach_dynamic(&sc->sc_ev_o2bgptc, EVCNT_TYPE_MISC,
3516 		    NULL, xname, "OS2BMC Packets received by BMC");
3517 	}
3518 #endif /* WM_EVENT_COUNTERS */
3519 
3520 	sc->sc_txrx_use_workqueue = false;
3521 
3522 	if (wm_phy_need_linkdown_discard(sc)) {
3523 		DPRINTF(sc, WM_DEBUG_LINK,
3524 		    ("%s: %s: Set linkdown discard flag\n",
3525 			device_xname(sc->sc_dev), __func__));
3526 		wm_set_linkdown_discard(sc);
3527 	}
3528 
3529 	wm_init_sysctls(sc);
3530 
3531 	if (pmf_device_register(self, wm_suspend, wm_resume))
3532 		pmf_class_network_register(self, ifp);
3533 	else
3534 		aprint_error_dev(self, "couldn't establish power handler\n");
3535 
3536 	sc->sc_flags |= WM_F_ATTACHED;
3537 out:
3538 	return;
3539 }
3540 
3541 /* The detach function (ca_detach) */
3542 static int
3543 wm_detach(device_t self, int flags __unused)
3544 {
3545 	struct wm_softc *sc = device_private(self);
3546 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3547 	int i;
3548 
3549 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
3550 		return 0;
3551 
3552 	/* Stop the interface. Callouts are stopped in it. */
3553 	IFNET_LOCK(ifp);
3554 	sc->sc_dying = true;
3555 	wm_stop(ifp, 1);
3556 	IFNET_UNLOCK(ifp);
3557 
3558 	pmf_device_deregister(self);
3559 
3560 	sysctl_teardown(&sc->sc_sysctllog);
3561 
3562 #ifdef WM_EVENT_COUNTERS
3563 	evcnt_detach(&sc->sc_ev_linkintr);
3564 
3565 	evcnt_detach(&sc->sc_ev_crcerrs);
3566 	evcnt_detach(&sc->sc_ev_symerrc);
3567 	evcnt_detach(&sc->sc_ev_mpc);
3568 	evcnt_detach(&sc->sc_ev_colc);
3569 	evcnt_detach(&sc->sc_ev_sec);
3570 	evcnt_detach(&sc->sc_ev_rlec);
3571 
3572 	if (sc->sc_type >= WM_T_82543) {
3573 		evcnt_detach(&sc->sc_ev_algnerrc);
3574 		evcnt_detach(&sc->sc_ev_rxerrc);
3575 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
3576 			evcnt_detach(&sc->sc_ev_cexterr);
3577 		else
3578 			evcnt_detach(&sc->sc_ev_htdpmc);
3579 
3580 		evcnt_detach(&sc->sc_ev_tncrs);
3581 		evcnt_detach(&sc->sc_ev_tsctc);
3582 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
3583 			evcnt_detach(&sc->sc_ev_tsctfc);
3584 		else {
3585 			evcnt_detach(&sc->sc_ev_cbrdpc);
3586 			evcnt_detach(&sc->sc_ev_cbrmpc);
3587 		}
3588 	}
3589 
3590 	if (sc->sc_type >= WM_T_82542_2_1) {
3591 		evcnt_detach(&sc->sc_ev_tx_xoff);
3592 		evcnt_detach(&sc->sc_ev_tx_xon);
3593 		evcnt_detach(&sc->sc_ev_rx_xoff);
3594 		evcnt_detach(&sc->sc_ev_rx_xon);
3595 		evcnt_detach(&sc->sc_ev_rx_macctl);
3596 	}
3597 
3598 	evcnt_detach(&sc->sc_ev_scc);
3599 	evcnt_detach(&sc->sc_ev_ecol);
3600 	evcnt_detach(&sc->sc_ev_mcc);
3601 	evcnt_detach(&sc->sc_ev_latecol);
3602 
3603 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc))
3604 		evcnt_detach(&sc->sc_ev_cbtmpc);
3605 
3606 	evcnt_detach(&sc->sc_ev_dc);
3607 	evcnt_detach(&sc->sc_ev_prc64);
3608 	evcnt_detach(&sc->sc_ev_prc127);
3609 	evcnt_detach(&sc->sc_ev_prc255);
3610 	evcnt_detach(&sc->sc_ev_prc511);
3611 	evcnt_detach(&sc->sc_ev_prc1023);
3612 	evcnt_detach(&sc->sc_ev_prc1522);
3613 	evcnt_detach(&sc->sc_ev_gprc);
3614 	evcnt_detach(&sc->sc_ev_bprc);
3615 	evcnt_detach(&sc->sc_ev_mprc);
3616 	evcnt_detach(&sc->sc_ev_gptc);
3617 	evcnt_detach(&sc->sc_ev_gorc);
3618 	evcnt_detach(&sc->sc_ev_gotc);
3619 	evcnt_detach(&sc->sc_ev_rnbc);
3620 	evcnt_detach(&sc->sc_ev_ruc);
3621 	evcnt_detach(&sc->sc_ev_rfc);
3622 	evcnt_detach(&sc->sc_ev_roc);
3623 	evcnt_detach(&sc->sc_ev_rjc);
3624 	if (sc->sc_type >= WM_T_82540) {
3625 		evcnt_detach(&sc->sc_ev_mgtprc);
3626 		evcnt_detach(&sc->sc_ev_mgtpdc);
3627 		evcnt_detach(&sc->sc_ev_mgtptc);
3628 	}
3629 	evcnt_detach(&sc->sc_ev_tor);
3630 	evcnt_detach(&sc->sc_ev_tot);
3631 	evcnt_detach(&sc->sc_ev_tpr);
3632 	evcnt_detach(&sc->sc_ev_tpt);
3633 	evcnt_detach(&sc->sc_ev_ptc64);
3634 	evcnt_detach(&sc->sc_ev_ptc127);
3635 	evcnt_detach(&sc->sc_ev_ptc255);
3636 	evcnt_detach(&sc->sc_ev_ptc511);
3637 	evcnt_detach(&sc->sc_ev_ptc1023);
3638 	evcnt_detach(&sc->sc_ev_ptc1522);
3639 	evcnt_detach(&sc->sc_ev_mptc);
3640 	evcnt_detach(&sc->sc_ev_bptc);
3641 	if (sc->sc_type >= WM_T_82571)
3642 		evcnt_detach(&sc->sc_ev_iac);
3643 	if (sc->sc_type < WM_T_82575) {
3644 		evcnt_detach(&sc->sc_ev_icrxptc);
3645 		evcnt_detach(&sc->sc_ev_icrxatc);
3646 		evcnt_detach(&sc->sc_ev_ictxptc);
3647 		evcnt_detach(&sc->sc_ev_ictxatc);
3648 		evcnt_detach(&sc->sc_ev_ictxqec);
3649 		evcnt_detach(&sc->sc_ev_ictxqmtc);
3650 		evcnt_detach(&sc->sc_ev_rxdmtc);
3651 		evcnt_detach(&sc->sc_ev_icrxoc);
3652 	} else if (!WM_IS_ICHPCH(sc)) {
3653 		evcnt_detach(&sc->sc_ev_rpthc);
3654 		evcnt_detach(&sc->sc_ev_debug1);
3655 		evcnt_detach(&sc->sc_ev_debug2);
3656 		evcnt_detach(&sc->sc_ev_debug3);
3657 		evcnt_detach(&sc->sc_ev_hgptc);
3658 		evcnt_detach(&sc->sc_ev_debug4);
3659 		evcnt_detach(&sc->sc_ev_rxdmtc);
3660 		evcnt_detach(&sc->sc_ev_htcbdpc);
3661 
3662 		evcnt_detach(&sc->sc_ev_hgorc);
3663 		evcnt_detach(&sc->sc_ev_hgotc);
3664 		evcnt_detach(&sc->sc_ev_lenerrs);
3665 		evcnt_detach(&sc->sc_ev_scvpc);
3666 		evcnt_detach(&sc->sc_ev_hrmpc);
3667 	}
3668 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc)) {
3669 		evcnt_detach(&sc->sc_ev_tlpic);
3670 		evcnt_detach(&sc->sc_ev_rlpic);
3671 		evcnt_detach(&sc->sc_ev_b2ogprc);
3672 		evcnt_detach(&sc->sc_ev_o2bspc);
3673 		evcnt_detach(&sc->sc_ev_b2ospc);
3674 		evcnt_detach(&sc->sc_ev_o2bgptc);
3675 	}
3676 #endif /* WM_EVENT_COUNTERS */
3677 
3678 	rnd_detach_source(&sc->rnd_source);
3679 
3680 	/* Tell the firmware about the release */
3681 	mutex_enter(sc->sc_core_lock);
3682 	wm_release_manageability(sc);
3683 	wm_release_hw_control(sc);
3684 	wm_enable_wakeup(sc);
3685 	mutex_exit(sc->sc_core_lock);
3686 
3687 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
3688 
3689 	ether_ifdetach(ifp);
3690 	if_detach(ifp);
3691 	if_percpuq_destroy(sc->sc_ipq);
3692 
3693 	/* Delete all remaining media. */
3694 	ifmedia_fini(&sc->sc_mii.mii_media);
3695 
3696 	/* Unload RX dmamaps and free mbufs */
3697 	for (i = 0; i < sc->sc_nqueues; i++) {
3698 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
3699 		mutex_enter(rxq->rxq_lock);
3700 		wm_rxdrain(rxq);
3701 		mutex_exit(rxq->rxq_lock);
3702 	}
3703 	/* Must unlock here */
3704 
3705 	/* Disestablish the interrupt handler */
3706 	for (i = 0; i < sc->sc_nintrs; i++) {
3707 		if (sc->sc_ihs[i] != NULL) {
3708 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
3709 			sc->sc_ihs[i] = NULL;
3710 		}
3711 	}
3712 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
3713 
3714 	/* wm_stop() ensured that the workqueues are stopped. */
3715 	workqueue_destroy(sc->sc_queue_wq);
3716 	workqueue_destroy(sc->sc_reset_wq);
3717 
3718 	for (i = 0; i < sc->sc_nqueues; i++)
3719 		softint_disestablish(sc->sc_queue[i].wmq_si);
3720 
3721 	wm_free_txrx_queues(sc);
3722 
3723 	/* Unmap the registers */
3724 	if (sc->sc_ss) {
3725 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
3726 		sc->sc_ss = 0;
3727 	}
3728 	if (sc->sc_ios) {
3729 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
3730 		sc->sc_ios = 0;
3731 	}
3732 	if (sc->sc_flashs) {
3733 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
3734 		sc->sc_flashs = 0;
3735 	}
3736 
3737 	if (sc->sc_core_lock)
3738 		mutex_obj_free(sc->sc_core_lock);
3739 	if (sc->sc_ich_phymtx)
3740 		mutex_obj_free(sc->sc_ich_phymtx);
3741 	if (sc->sc_ich_nvmmtx)
3742 		mutex_obj_free(sc->sc_ich_nvmmtx);
3743 
3744 	return 0;
3745 }
3746 
3747 static bool
3748 wm_suspend(device_t self, const pmf_qual_t *qual)
3749 {
3750 	struct wm_softc *sc = device_private(self);
3751 
3752 	wm_release_manageability(sc);
3753 	wm_release_hw_control(sc);
3754 	wm_enable_wakeup(sc);
3755 
3756 	return true;
3757 }
3758 
3759 static bool
3760 wm_resume(device_t self, const pmf_qual_t *qual)
3761 {
3762 	struct wm_softc *sc = device_private(self);
3763 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3764 	pcireg_t reg;
3765 	char buf[256];
3766 
3767 	reg = CSR_READ(sc, WMREG_WUS);
3768 	if (reg != 0) {
3769 		snprintb(buf, sizeof(buf), WUS_FLAGS, reg);
3770 		device_printf(sc->sc_dev, "wakeup status %s\n", buf);
3771 		CSR_WRITE(sc, WMREG_WUS, 0xffffffff); /* W1C */
3772 	}
3773 
3774 	if (sc->sc_type >= WM_T_PCH2)
3775 		wm_resume_workarounds_pchlan(sc);
3776 	IFNET_LOCK(ifp);
3777 	if ((ifp->if_flags & IFF_UP) == 0) {
3778 		/* >= PCH_SPT hardware workaround before reset. */
3779 		if (sc->sc_type >= WM_T_PCH_SPT)
3780 			wm_flush_desc_rings(sc);
3781 
3782 		wm_reset(sc);
3783 		/* Non-AMT based hardware can now take control from firmware */
3784 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
3785 			wm_get_hw_control(sc);
3786 		wm_init_manageability(sc);
3787 	} else {
3788 		/*
3789 		 * We called pmf_class_network_register(), so if_init() is
3790 		 * automatically called when IFF_UP. wm_reset(),
3791 		 * wm_get_hw_control() and wm_init_manageability() are called
3792 		 * via wm_init().
3793 		 */
3794 	}
3795 	IFNET_UNLOCK(ifp);
3796 
3797 	return true;
3798 }
3799 
3800 /*
3801  * wm_watchdog:
3802  *
3803  *	Watchdog checker.
3804  */
3805 static bool
3806 wm_watchdog(struct ifnet *ifp)
3807 {
3808 	int qid;
3809 	struct wm_softc *sc = ifp->if_softc;
3810 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
3811 
3812 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
3813 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
3814 
3815 		wm_watchdog_txq(ifp, txq, &hang_queue);
3816 	}
3817 
3818 #ifdef WM_DEBUG
3819 	if (sc->sc_trigger_reset) {
3820 		/* debug operation, no need for atomicity or reliability */
3821 		sc->sc_trigger_reset = 0;
3822 		hang_queue++;
3823 	}
3824 #endif
3825 
3826 	if (hang_queue == 0)
3827 		return true;
3828 
3829 	if (atomic_swap_uint(&sc->sc_reset_pending, 1) == 0)
3830 		workqueue_enqueue(sc->sc_reset_wq, &sc->sc_reset_work, NULL);
3831 
3832 	return false;
3833 }
3834 
3835 /*
3836  * Perform an interface watchdog reset.
3837  */
3838 static void
3839 wm_handle_reset_work(struct work *work, void *arg)
3840 {
3841 	struct wm_softc * const sc = arg;
3842 	struct ifnet * const ifp = &sc->sc_ethercom.ec_if;
3843 
3844 	/* Don't want ioctl operations to happen */
3845 	IFNET_LOCK(ifp);
3846 
3847 	/* reset the interface. */
3848 	wm_init(ifp);
3849 
3850 	IFNET_UNLOCK(ifp);
3851 
3852 	/*
3853 	 * There are still some upper layer processing which call
3854 	 * ifp->if_start(). e.g. ALTQ or one CPU system
3855 	 */
3856 	/* Try to get more packets going. */
3857 	ifp->if_start(ifp);
3858 
3859 	atomic_store_relaxed(&sc->sc_reset_pending, 0);
3860 }
3861 
3862 
3863 static void
3864 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
3865 {
3866 
3867 	mutex_enter(txq->txq_lock);
3868 	if (txq->txq_sending &&
3869 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout)
3870 		wm_watchdog_txq_locked(ifp, txq, hang);
3871 
3872 	mutex_exit(txq->txq_lock);
3873 }
3874 
3875 static void
3876 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
3877     uint16_t *hang)
3878 {
3879 	struct wm_softc *sc = ifp->if_softc;
3880 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
3881 
3882 	KASSERT(mutex_owned(txq->txq_lock));
3883 
3884 	/*
3885 	 * Since we're using delayed interrupts, sweep up
3886 	 * before we report an error.
3887 	 */
3888 	wm_txeof(txq, UINT_MAX);
3889 
3890 	if (txq->txq_sending)
3891 		*hang |= __BIT(wmq->wmq_id);
3892 
3893 	if (txq->txq_free == WM_NTXDESC(txq)) {
3894 		log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
3895 		    device_xname(sc->sc_dev));
3896 	} else {
3897 #ifdef WM_DEBUG
3898 		int i, j;
3899 		struct wm_txsoft *txs;
3900 #endif
3901 		log(LOG_ERR,
3902 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
3903 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
3904 		    txq->txq_next);
3905 		if_statinc(ifp, if_oerrors);
3906 #ifdef WM_DEBUG
3907 		for (i = txq->txq_sdirty; i != txq->txq_snext;
3908 		     i = WM_NEXTTXS(txq, i)) {
3909 			txs = &txq->txq_soft[i];
3910 			printf("txs %d tx %d -> %d\n",
3911 			    i, txs->txs_firstdesc, txs->txs_lastdesc);
3912 			for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
3913 				if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3914 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
3915 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
3916 					printf("\t %#08x%08x\n",
3917 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
3918 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
3919 				} else {
3920 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
3921 					    (uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
3922 					    txq->txq_descs[j].wtx_addr.wa_low);
3923 					printf("\t %#04x%02x%02x%08x\n",
3924 					    txq->txq_descs[j].wtx_fields.wtxu_vlan,
3925 					    txq->txq_descs[j].wtx_fields.wtxu_options,
3926 					    txq->txq_descs[j].wtx_fields.wtxu_status,
3927 					    txq->txq_descs[j].wtx_cmdlen);
3928 				}
3929 				if (j == txs->txs_lastdesc)
3930 					break;
3931 			}
3932 		}
3933 #endif
3934 	}
3935 }
3936 
3937 /*
3938  * wm_tick:
3939  *
3940  *	One second timer, used to check link status, sweep up
3941  *	completed transmit jobs, etc.
3942  */
3943 static void
3944 wm_tick(void *arg)
3945 {
3946 	struct wm_softc *sc = arg;
3947 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3948 
3949 	mutex_enter(sc->sc_core_lock);
3950 
3951 	if (sc->sc_core_stopping) {
3952 		mutex_exit(sc->sc_core_lock);
3953 		return;
3954 	}
3955 
3956 	wm_update_stats(sc);
3957 
3958 	if (sc->sc_flags & WM_F_HAS_MII) {
3959 		bool dotick = true;
3960 
3961 		/*
3962 		 * Workaround for some chips to delay sending LINK_STATE_UP.
3963 		 * See also wm_linkintr_gmii() and wm_gmii_mediastatus().
3964 		 */
3965 		if ((sc->sc_flags & WM_F_DELAY_LINKUP) != 0) {
3966 			struct timeval now;
3967 
3968 			getmicrotime(&now);
3969 			if (timercmp(&now, &sc->sc_linkup_delay_time, <))
3970 				dotick = false;
3971 			else if (sc->sc_linkup_delay_time.tv_sec != 0) {
3972 				/* Simplify by checking tv_sec only. */
3973 
3974 				sc->sc_linkup_delay_time.tv_sec = 0;
3975 				sc->sc_linkup_delay_time.tv_usec = 0;
3976 			}
3977 		}
3978 		if (dotick)
3979 			mii_tick(&sc->sc_mii);
3980 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
3981 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
3982 		wm_serdes_tick(sc);
3983 	else
3984 		wm_tbi_tick(sc);
3985 
3986 	mutex_exit(sc->sc_core_lock);
3987 
3988 	if (wm_watchdog(ifp))
3989 		callout_schedule(&sc->sc_tick_ch, hz);
3990 }
3991 
3992 static int
3993 wm_ifflags_cb(struct ethercom *ec)
3994 {
3995 	struct ifnet *ifp = &ec->ec_if;
3996 	struct wm_softc *sc = ifp->if_softc;
3997 	u_short iffchange;
3998 	int ecchange;
3999 	bool needreset = false;
4000 	int rc = 0;
4001 
4002 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4003 		device_xname(sc->sc_dev), __func__));
4004 
4005 	KASSERT(IFNET_LOCKED(ifp));
4006 
4007 	mutex_enter(sc->sc_core_lock);
4008 
4009 	/*
4010 	 * Check for if_flags.
4011 	 * Main usage is to prevent linkdown when opening bpf.
4012 	 */
4013 	iffchange = ifp->if_flags ^ sc->sc_if_flags;
4014 	sc->sc_if_flags = ifp->if_flags;
4015 	if ((iffchange & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
4016 		needreset = true;
4017 		goto ec;
4018 	}
4019 
4020 	/* iff related updates */
4021 	if ((iffchange & IFF_PROMISC) != 0)
4022 		wm_set_filter(sc);
4023 
4024 	wm_set_vlan(sc);
4025 
4026 ec:
4027 	/* Check for ec_capenable. */
4028 	ecchange = ec->ec_capenable ^ sc->sc_ec_capenable;
4029 	sc->sc_ec_capenable = ec->ec_capenable;
4030 	if ((ecchange & ~ETHERCAP_EEE) != 0) {
4031 		needreset = true;
4032 		goto out;
4033 	}
4034 
4035 	/* ec related updates */
4036 	wm_set_eee(sc);
4037 
4038 out:
4039 	if (needreset)
4040 		rc = ENETRESET;
4041 	mutex_exit(sc->sc_core_lock);
4042 
4043 	return rc;
4044 }
4045 
4046 static bool
4047 wm_phy_need_linkdown_discard(struct wm_softc *sc)
4048 {
4049 
4050 	switch (sc->sc_phytype) {
4051 	case WMPHY_82577: /* ihphy */
4052 	case WMPHY_82578: /* atphy */
4053 	case WMPHY_82579: /* ihphy */
4054 	case WMPHY_I217: /* ihphy */
4055 	case WMPHY_82580: /* ihphy */
4056 	case WMPHY_I350: /* ihphy */
4057 		return true;
4058 	default:
4059 		return false;
4060 	}
4061 }
4062 
4063 static void
4064 wm_set_linkdown_discard(struct wm_softc *sc)
4065 {
4066 
4067 	for (int i = 0; i < sc->sc_nqueues; i++) {
4068 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
4069 
4070 		mutex_enter(txq->txq_lock);
4071 		txq->txq_flags |= WM_TXQ_LINKDOWN_DISCARD;
4072 		mutex_exit(txq->txq_lock);
4073 	}
4074 }
4075 
4076 static void
4077 wm_clear_linkdown_discard(struct wm_softc *sc)
4078 {
4079 
4080 	for (int i = 0; i < sc->sc_nqueues; i++) {
4081 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
4082 
4083 		mutex_enter(txq->txq_lock);
4084 		txq->txq_flags &= ~WM_TXQ_LINKDOWN_DISCARD;
4085 		mutex_exit(txq->txq_lock);
4086 	}
4087 }
4088 
4089 /*
4090  * wm_ioctl:		[ifnet interface function]
4091  *
4092  *	Handle control requests from the operator.
4093  */
4094 static int
4095 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
4096 {
4097 	struct wm_softc *sc = ifp->if_softc;
4098 	struct ifreq *ifr = (struct ifreq *)data;
4099 	struct ifaddr *ifa = (struct ifaddr *)data;
4100 	struct sockaddr_dl *sdl;
4101 	int error;
4102 
4103 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4104 		device_xname(sc->sc_dev), __func__));
4105 
4106 	switch (cmd) {
4107 	case SIOCADDMULTI:
4108 	case SIOCDELMULTI:
4109 		break;
4110 	default:
4111 		KASSERT(IFNET_LOCKED(ifp));
4112 	}
4113 
4114 	if (cmd == SIOCZIFDATA) {
4115 		/*
4116 		 * Special handling for SIOCZIFDATA.
4117 		 * Copying and clearing the if_data structure is done with
4118 		 * ether_ioctl() below.
4119 		 */
4120 		mutex_enter(sc->sc_core_lock);
4121 		wm_update_stats(sc);
4122 		wm_clear_evcnt(sc);
4123 		mutex_exit(sc->sc_core_lock);
4124 	}
4125 
4126 	switch (cmd) {
4127 	case SIOCSIFMEDIA:
4128 		mutex_enter(sc->sc_core_lock);
4129 		/* Flow control requires full-duplex mode. */
4130 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
4131 		    (ifr->ifr_media & IFM_FDX) == 0)
4132 			ifr->ifr_media &= ~IFM_ETH_FMASK;
4133 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
4134 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
4135 				/* We can do both TXPAUSE and RXPAUSE. */
4136 				ifr->ifr_media |=
4137 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
4138 			}
4139 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
4140 		}
4141 		mutex_exit(sc->sc_core_lock);
4142 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
4143 		if (error == 0 && wm_phy_need_linkdown_discard(sc)) {
4144 			if (IFM_SUBTYPE(ifr->ifr_media) == IFM_NONE) {
4145 				DPRINTF(sc, WM_DEBUG_LINK,
4146 				    ("%s: %s: Set linkdown discard flag\n",
4147 					device_xname(sc->sc_dev), __func__));
4148 				wm_set_linkdown_discard(sc);
4149 			}
4150 		}
4151 		break;
4152 	case SIOCINITIFADDR:
4153 		mutex_enter(sc->sc_core_lock);
4154 		if (ifa->ifa_addr->sa_family == AF_LINK) {
4155 			sdl = satosdl(ifp->if_dl->ifa_addr);
4156 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
4157 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
4158 			/* Unicast address is the first multicast entry */
4159 			wm_set_filter(sc);
4160 			error = 0;
4161 			mutex_exit(sc->sc_core_lock);
4162 			break;
4163 		}
4164 		mutex_exit(sc->sc_core_lock);
4165 		/*FALLTHROUGH*/
4166 	default:
4167 		if (cmd == SIOCSIFFLAGS && wm_phy_need_linkdown_discard(sc)) {
4168 			if (((ifp->if_flags & IFF_UP) != 0) &&
4169 			    ((ifr->ifr_flags & IFF_UP) == 0)) {
4170 				DPRINTF(sc, WM_DEBUG_LINK,
4171 				    ("%s: %s: Set linkdown discard flag\n",
4172 					device_xname(sc->sc_dev), __func__));
4173 				wm_set_linkdown_discard(sc);
4174 			}
4175 		}
4176 		const int s = splnet();
4177 		/* It may call wm_start, so unlock here */
4178 		error = ether_ioctl(ifp, cmd, data);
4179 		splx(s);
4180 		if (error != ENETRESET)
4181 			break;
4182 
4183 		error = 0;
4184 
4185 		if (cmd == SIOCSIFCAP)
4186 			error = if_init(ifp);
4187 		else if (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI) {
4188 			mutex_enter(sc->sc_core_lock);
4189 			if (sc->sc_if_flags & IFF_RUNNING) {
4190 				/*
4191 				 * Multicast list has changed; set the
4192 				 * hardware filter accordingly.
4193 				 */
4194 				wm_set_filter(sc);
4195 			}
4196 			mutex_exit(sc->sc_core_lock);
4197 		}
4198 		break;
4199 	}
4200 
4201 	return error;
4202 }
4203 
4204 /* MAC address related */
4205 
4206 /*
4207  * Get the offset of MAC address and return it.
4208  * If error occured, use offset 0.
4209  */
4210 static uint16_t
4211 wm_check_alt_mac_addr(struct wm_softc *sc)
4212 {
4213 	uint16_t myea[ETHER_ADDR_LEN / 2];
4214 	uint16_t offset = NVM_OFF_MACADDR;
4215 
4216 	/* Try to read alternative MAC address pointer */
4217 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
4218 		return 0;
4219 
4220 	/* Check pointer if it's valid or not. */
4221 	if ((offset == 0x0000) || (offset == 0xffff))
4222 		return 0;
4223 
4224 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
4225 	/*
4226 	 * Check whether alternative MAC address is valid or not.
4227 	 * Some cards have non 0xffff pointer but those don't use
4228 	 * alternative MAC address in reality.
4229 	 *
4230 	 * Check whether the broadcast bit is set or not.
4231 	 */
4232 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
4233 		if (((myea[0] & 0xff) & 0x01) == 0)
4234 			return offset; /* Found */
4235 
4236 	/* Not found */
4237 	return 0;
4238 }
4239 
4240 static int
4241 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
4242 {
4243 	uint16_t myea[ETHER_ADDR_LEN / 2];
4244 	uint16_t offset = NVM_OFF_MACADDR;
4245 	int do_invert = 0;
4246 
4247 	switch (sc->sc_type) {
4248 	case WM_T_82580:
4249 	case WM_T_I350:
4250 	case WM_T_I354:
4251 		/* EEPROM Top Level Partitioning */
4252 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
4253 		break;
4254 	case WM_T_82571:
4255 	case WM_T_82575:
4256 	case WM_T_82576:
4257 	case WM_T_80003:
4258 	case WM_T_I210:
4259 	case WM_T_I211:
4260 		offset = wm_check_alt_mac_addr(sc);
4261 		if (offset == 0)
4262 			if ((sc->sc_funcid & 0x01) == 1)
4263 				do_invert = 1;
4264 		break;
4265 	default:
4266 		if ((sc->sc_funcid & 0x01) == 1)
4267 			do_invert = 1;
4268 		break;
4269 	}
4270 
4271 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
4272 		goto bad;
4273 
4274 	enaddr[0] = myea[0] & 0xff;
4275 	enaddr[1] = myea[0] >> 8;
4276 	enaddr[2] = myea[1] & 0xff;
4277 	enaddr[3] = myea[1] >> 8;
4278 	enaddr[4] = myea[2] & 0xff;
4279 	enaddr[5] = myea[2] >> 8;
4280 
4281 	/*
4282 	 * Toggle the LSB of the MAC address on the second port
4283 	 * of some dual port cards.
4284 	 */
4285 	if (do_invert != 0)
4286 		enaddr[5] ^= 1;
4287 
4288 	return 0;
4289 
4290 bad:
4291 	return -1;
4292 }
4293 
4294 /*
4295  * wm_set_ral:
4296  *
4297  *	Set an entery in the receive address list.
4298  */
4299 static void
4300 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
4301 {
4302 	uint32_t ral_lo, ral_hi, addrl, addrh;
4303 	uint32_t wlock_mac;
4304 	int rv;
4305 
4306 	if (enaddr != NULL) {
4307 		ral_lo = (uint32_t)enaddr[0] | ((uint32_t)enaddr[1] << 8) |
4308 		    ((uint32_t)enaddr[2] << 16) | ((uint32_t)enaddr[3] << 24);
4309 		ral_hi = (uint32_t)enaddr[4] | ((uint32_t)enaddr[5] << 8);
4310 		ral_hi |= RAL_AV;
4311 	} else {
4312 		ral_lo = 0;
4313 		ral_hi = 0;
4314 	}
4315 
4316 	switch (sc->sc_type) {
4317 	case WM_T_82542_2_0:
4318 	case WM_T_82542_2_1:
4319 	case WM_T_82543:
4320 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
4321 		CSR_WRITE_FLUSH(sc);
4322 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
4323 		CSR_WRITE_FLUSH(sc);
4324 		break;
4325 	case WM_T_PCH2:
4326 	case WM_T_PCH_LPT:
4327 	case WM_T_PCH_SPT:
4328 	case WM_T_PCH_CNP:
4329 	case WM_T_PCH_TGP:
4330 		if (idx == 0) {
4331 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
4332 			CSR_WRITE_FLUSH(sc);
4333 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
4334 			CSR_WRITE_FLUSH(sc);
4335 			return;
4336 		}
4337 		if (sc->sc_type != WM_T_PCH2) {
4338 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
4339 			    FWSM_WLOCK_MAC);
4340 			addrl = WMREG_SHRAL(idx - 1);
4341 			addrh = WMREG_SHRAH(idx - 1);
4342 		} else {
4343 			wlock_mac = 0;
4344 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
4345 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
4346 		}
4347 
4348 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
4349 			rv = wm_get_swflag_ich8lan(sc);
4350 			if (rv != 0)
4351 				return;
4352 			CSR_WRITE(sc, addrl, ral_lo);
4353 			CSR_WRITE_FLUSH(sc);
4354 			CSR_WRITE(sc, addrh, ral_hi);
4355 			CSR_WRITE_FLUSH(sc);
4356 			wm_put_swflag_ich8lan(sc);
4357 		}
4358 
4359 		break;
4360 	default:
4361 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
4362 		CSR_WRITE_FLUSH(sc);
4363 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
4364 		CSR_WRITE_FLUSH(sc);
4365 		break;
4366 	}
4367 }
4368 
4369 /*
4370  * wm_mchash:
4371  *
4372  *	Compute the hash of the multicast address for the 4096-bit
4373  *	multicast filter.
4374  */
4375 static uint32_t
4376 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
4377 {
4378 	static const int lo_shift[4] = { 4, 3, 2, 0 };
4379 	static const int hi_shift[4] = { 4, 5, 6, 8 };
4380 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
4381 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
4382 	uint32_t hash;
4383 
4384 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4385 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4386 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
4387 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)
4388 	    || (sc->sc_type == WM_T_PCH_TGP)) {
4389 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
4390 		    (((uint16_t)enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
4391 		return (hash & 0x3ff);
4392 	}
4393 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
4394 	    (((uint16_t)enaddr[5]) << hi_shift[sc->sc_mchash_type]);
4395 
4396 	return (hash & 0xfff);
4397 }
4398 
4399 /*
4400  *
4401  *
4402  */
4403 static int
4404 wm_rar_count(struct wm_softc *sc)
4405 {
4406 	int size;
4407 
4408 	switch (sc->sc_type) {
4409 	case WM_T_ICH8:
4410 		size = WM_RAL_TABSIZE_ICH8 -1;
4411 		break;
4412 	case WM_T_ICH9:
4413 	case WM_T_ICH10:
4414 	case WM_T_PCH:
4415 		size = WM_RAL_TABSIZE_ICH8;
4416 		break;
4417 	case WM_T_PCH2:
4418 		size = WM_RAL_TABSIZE_PCH2;
4419 		break;
4420 	case WM_T_PCH_LPT:
4421 	case WM_T_PCH_SPT:
4422 	case WM_T_PCH_CNP:
4423 	case WM_T_PCH_TGP:
4424 		size = WM_RAL_TABSIZE_PCH_LPT;
4425 		break;
4426 	case WM_T_82575:
4427 	case WM_T_I210:
4428 	case WM_T_I211:
4429 		size = WM_RAL_TABSIZE_82575;
4430 		break;
4431 	case WM_T_82576:
4432 	case WM_T_82580:
4433 		size = WM_RAL_TABSIZE_82576;
4434 		break;
4435 	case WM_T_I350:
4436 	case WM_T_I354:
4437 		size = WM_RAL_TABSIZE_I350;
4438 		break;
4439 	default:
4440 		size = WM_RAL_TABSIZE;
4441 	}
4442 
4443 	return size;
4444 }
4445 
4446 /*
4447  * wm_set_filter:
4448  *
4449  *	Set up the receive filter.
4450  */
4451 static void
4452 wm_set_filter(struct wm_softc *sc)
4453 {
4454 	struct ethercom *ec = &sc->sc_ethercom;
4455 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4456 	struct ether_multi *enm;
4457 	struct ether_multistep step;
4458 	bus_addr_t mta_reg;
4459 	uint32_t hash, reg, bit;
4460 	int i, size, ralmax, rv;
4461 
4462 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4463 		device_xname(sc->sc_dev), __func__));
4464 	KASSERT(mutex_owned(sc->sc_core_lock));
4465 
4466 	if (sc->sc_type >= WM_T_82544)
4467 		mta_reg = WMREG_CORDOVA_MTA;
4468 	else
4469 		mta_reg = WMREG_MTA;
4470 
4471 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
4472 
4473 	if (sc->sc_if_flags & IFF_BROADCAST)
4474 		sc->sc_rctl |= RCTL_BAM;
4475 	if (sc->sc_if_flags & IFF_PROMISC) {
4476 		sc->sc_rctl |= RCTL_UPE;
4477 		ETHER_LOCK(ec);
4478 		ec->ec_flags |= ETHER_F_ALLMULTI;
4479 		ETHER_UNLOCK(ec);
4480 		goto allmulti;
4481 	}
4482 
4483 	/*
4484 	 * Set the station address in the first RAL slot, and
4485 	 * clear the remaining slots.
4486 	 */
4487 	size = wm_rar_count(sc);
4488 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
4489 
4490 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT) ||
4491 	    (sc->sc_type == WM_T_PCH_CNP) || (sc->sc_type == WM_T_PCH_TGP)) {
4492 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
4493 		switch (i) {
4494 		case 0:
4495 			/* We can use all entries */
4496 			ralmax = size;
4497 			break;
4498 		case 1:
4499 			/* Only RAR[0] */
4500 			ralmax = 1;
4501 			break;
4502 		default:
4503 			/* Available SHRA + RAR[0] */
4504 			ralmax = i + 1;
4505 		}
4506 	} else
4507 		ralmax = size;
4508 	for (i = 1; i < size; i++) {
4509 		if (i < ralmax)
4510 			wm_set_ral(sc, NULL, i);
4511 	}
4512 
4513 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4514 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4515 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
4516 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)
4517 	    || (sc->sc_type == WM_T_PCH_TGP))
4518 		size = WM_ICH8_MC_TABSIZE;
4519 	else
4520 		size = WM_MC_TABSIZE;
4521 	/* Clear out the multicast table. */
4522 	for (i = 0; i < size; i++) {
4523 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
4524 		CSR_WRITE_FLUSH(sc);
4525 	}
4526 
4527 	ETHER_LOCK(ec);
4528 	ETHER_FIRST_MULTI(step, ec, enm);
4529 	while (enm != NULL) {
4530 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
4531 			ec->ec_flags |= ETHER_F_ALLMULTI;
4532 			ETHER_UNLOCK(ec);
4533 			/*
4534 			 * We must listen to a range of multicast addresses.
4535 			 * For now, just accept all multicasts, rather than
4536 			 * trying to set only those filter bits needed to match
4537 			 * the range.  (At this time, the only use of address
4538 			 * ranges is for IP multicast routing, for which the
4539 			 * range is big enough to require all bits set.)
4540 			 */
4541 			goto allmulti;
4542 		}
4543 
4544 		hash = wm_mchash(sc, enm->enm_addrlo);
4545 
4546 		reg = (hash >> 5);
4547 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4548 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4549 		    || (sc->sc_type == WM_T_PCH2)
4550 		    || (sc->sc_type == WM_T_PCH_LPT)
4551 		    || (sc->sc_type == WM_T_PCH_SPT)
4552 		    || (sc->sc_type == WM_T_PCH_CNP)
4553 		    || (sc->sc_type == WM_T_PCH_TGP))
4554 			reg &= 0x1f;
4555 		else
4556 			reg &= 0x7f;
4557 		bit = hash & 0x1f;
4558 
4559 		hash = CSR_READ(sc, mta_reg + (reg << 2));
4560 		hash |= 1U << bit;
4561 
4562 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
4563 			/*
4564 			 * 82544 Errata 9: Certain register cannot be written
4565 			 * with particular alignments in PCI-X bus operation
4566 			 * (FCAH, MTA and VFTA).
4567 			 */
4568 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
4569 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
4570 			CSR_WRITE_FLUSH(sc);
4571 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
4572 			CSR_WRITE_FLUSH(sc);
4573 		} else {
4574 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
4575 			CSR_WRITE_FLUSH(sc);
4576 		}
4577 
4578 		ETHER_NEXT_MULTI(step, enm);
4579 	}
4580 	ec->ec_flags &= ~ETHER_F_ALLMULTI;
4581 	ETHER_UNLOCK(ec);
4582 
4583 	goto setit;
4584 
4585 allmulti:
4586 	sc->sc_rctl |= RCTL_MPE;
4587 
4588 setit:
4589 	if (sc->sc_type >= WM_T_PCH2) {
4590 		if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
4591 		    && (ifp->if_mtu > ETHERMTU))
4592 			rv = wm_lv_jumbo_workaround_ich8lan(sc, true);
4593 		else
4594 			rv = wm_lv_jumbo_workaround_ich8lan(sc, false);
4595 		if (rv != 0)
4596 			device_printf(sc->sc_dev,
4597 			    "Failed to do workaround for jumbo frame.\n");
4598 	}
4599 
4600 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
4601 }
4602 
4603 /* Reset and init related */
4604 
4605 static void
4606 wm_set_vlan(struct wm_softc *sc)
4607 {
4608 
4609 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4610 		device_xname(sc->sc_dev), __func__));
4611 
4612 	/* Deal with VLAN enables. */
4613 	if (VLAN_ATTACHED(&sc->sc_ethercom))
4614 		sc->sc_ctrl |= CTRL_VME;
4615 	else
4616 		sc->sc_ctrl &= ~CTRL_VME;
4617 
4618 	/* Write the control registers. */
4619 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4620 }
4621 
4622 static void
4623 wm_set_pcie_completion_timeout(struct wm_softc *sc)
4624 {
4625 	uint32_t gcr;
4626 	pcireg_t ctrl2;
4627 
4628 	gcr = CSR_READ(sc, WMREG_GCR);
4629 
4630 	/* Only take action if timeout value is defaulted to 0 */
4631 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
4632 		goto out;
4633 
4634 	if ((gcr & GCR_CAP_VER2) == 0) {
4635 		gcr |= GCR_CMPL_TMOUT_10MS;
4636 		goto out;
4637 	}
4638 
4639 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
4640 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
4641 	ctrl2 |= WM_PCIE_DCSR2_16MS;
4642 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
4643 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
4644 
4645 out:
4646 	/* Disable completion timeout resend */
4647 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
4648 
4649 	CSR_WRITE(sc, WMREG_GCR, gcr);
4650 }
4651 
4652 void
4653 wm_get_auto_rd_done(struct wm_softc *sc)
4654 {
4655 	int i;
4656 
4657 	/* wait for eeprom to reload */
4658 	switch (sc->sc_type) {
4659 	case WM_T_82571:
4660 	case WM_T_82572:
4661 	case WM_T_82573:
4662 	case WM_T_82574:
4663 	case WM_T_82583:
4664 	case WM_T_82575:
4665 	case WM_T_82576:
4666 	case WM_T_82580:
4667 	case WM_T_I350:
4668 	case WM_T_I354:
4669 	case WM_T_I210:
4670 	case WM_T_I211:
4671 	case WM_T_80003:
4672 	case WM_T_ICH8:
4673 	case WM_T_ICH9:
4674 		for (i = 0; i < 10; i++) {
4675 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
4676 				break;
4677 			delay(1000);
4678 		}
4679 		if (i == 10) {
4680 			log(LOG_ERR, "%s: auto read from eeprom failed to "
4681 			    "complete\n", device_xname(sc->sc_dev));
4682 		}
4683 		break;
4684 	default:
4685 		break;
4686 	}
4687 }
4688 
4689 void
4690 wm_lan_init_done(struct wm_softc *sc)
4691 {
4692 	uint32_t reg = 0;
4693 	int i;
4694 
4695 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4696 		device_xname(sc->sc_dev), __func__));
4697 
4698 	/* Wait for eeprom to reload */
4699 	switch (sc->sc_type) {
4700 	case WM_T_ICH10:
4701 	case WM_T_PCH:
4702 	case WM_T_PCH2:
4703 	case WM_T_PCH_LPT:
4704 	case WM_T_PCH_SPT:
4705 	case WM_T_PCH_CNP:
4706 	case WM_T_PCH_TGP:
4707 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
4708 			reg = CSR_READ(sc, WMREG_STATUS);
4709 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
4710 				break;
4711 			delay(100);
4712 		}
4713 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
4714 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
4715 			    "complete\n", device_xname(sc->sc_dev), __func__);
4716 		}
4717 		break;
4718 	default:
4719 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4720 		    __func__);
4721 		break;
4722 	}
4723 
4724 	reg &= ~STATUS_LAN_INIT_DONE;
4725 	CSR_WRITE(sc, WMREG_STATUS, reg);
4726 }
4727 
4728 void
4729 wm_get_cfg_done(struct wm_softc *sc)
4730 {
4731 	int mask;
4732 	uint32_t reg;
4733 	int i;
4734 
4735 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4736 		device_xname(sc->sc_dev), __func__));
4737 
4738 	/* Wait for eeprom to reload */
4739 	switch (sc->sc_type) {
4740 	case WM_T_82542_2_0:
4741 	case WM_T_82542_2_1:
4742 		/* null */
4743 		break;
4744 	case WM_T_82543:
4745 	case WM_T_82544:
4746 	case WM_T_82540:
4747 	case WM_T_82545:
4748 	case WM_T_82545_3:
4749 	case WM_T_82546:
4750 	case WM_T_82546_3:
4751 	case WM_T_82541:
4752 	case WM_T_82541_2:
4753 	case WM_T_82547:
4754 	case WM_T_82547_2:
4755 	case WM_T_82573:
4756 	case WM_T_82574:
4757 	case WM_T_82583:
4758 		/* generic */
4759 		delay(10*1000);
4760 		break;
4761 	case WM_T_80003:
4762 	case WM_T_82571:
4763 	case WM_T_82572:
4764 	case WM_T_82575:
4765 	case WM_T_82576:
4766 	case WM_T_82580:
4767 	case WM_T_I350:
4768 	case WM_T_I354:
4769 	case WM_T_I210:
4770 	case WM_T_I211:
4771 		if (sc->sc_type == WM_T_82571) {
4772 			/* Only 82571 shares port 0 */
4773 			mask = EEMNGCTL_CFGDONE_0;
4774 		} else
4775 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
4776 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
4777 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
4778 				break;
4779 			delay(1000);
4780 		}
4781 		if (i >= WM_PHY_CFG_TIMEOUT)
4782 			DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s failed\n",
4783 				device_xname(sc->sc_dev), __func__));
4784 		break;
4785 	case WM_T_ICH8:
4786 	case WM_T_ICH9:
4787 	case WM_T_ICH10:
4788 	case WM_T_PCH:
4789 	case WM_T_PCH2:
4790 	case WM_T_PCH_LPT:
4791 	case WM_T_PCH_SPT:
4792 	case WM_T_PCH_CNP:
4793 	case WM_T_PCH_TGP:
4794 		delay(10*1000);
4795 		if (sc->sc_type >= WM_T_ICH10)
4796 			wm_lan_init_done(sc);
4797 		else
4798 			wm_get_auto_rd_done(sc);
4799 
4800 		/* Clear PHY Reset Asserted bit */
4801 		reg = CSR_READ(sc, WMREG_STATUS);
4802 		if ((reg & STATUS_PHYRA) != 0)
4803 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
4804 		break;
4805 	default:
4806 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4807 		    __func__);
4808 		break;
4809 	}
4810 }
4811 
4812 int
4813 wm_phy_post_reset(struct wm_softc *sc)
4814 {
4815 	device_t dev = sc->sc_dev;
4816 	uint16_t reg;
4817 	int rv = 0;
4818 
4819 	/* This function is only for ICH8 and newer. */
4820 	if (sc->sc_type < WM_T_ICH8)
4821 		return 0;
4822 
4823 	if (wm_phy_resetisblocked(sc)) {
4824 		/* XXX */
4825 		device_printf(dev, "PHY is blocked\n");
4826 		return -1;
4827 	}
4828 
4829 	/* Allow time for h/w to get to quiescent state after reset */
4830 	delay(10*1000);
4831 
4832 	/* Perform any necessary post-reset workarounds */
4833 	if (sc->sc_type == WM_T_PCH)
4834 		rv = wm_hv_phy_workarounds_ich8lan(sc);
4835 	else if (sc->sc_type == WM_T_PCH2)
4836 		rv = wm_lv_phy_workarounds_ich8lan(sc);
4837 	if (rv != 0)
4838 		return rv;
4839 
4840 	/* Clear the host wakeup bit after lcd reset */
4841 	if (sc->sc_type >= WM_T_PCH) {
4842 		wm_gmii_hv_readreg(dev, 2, BM_PORT_GEN_CFG, &reg);
4843 		reg &= ~BM_WUC_HOST_WU_BIT;
4844 		wm_gmii_hv_writereg(dev, 2, BM_PORT_GEN_CFG, reg);
4845 	}
4846 
4847 	/* Configure the LCD with the extended configuration region in NVM */
4848 	if ((rv = wm_init_lcd_from_nvm(sc)) != 0)
4849 		return rv;
4850 
4851 	/* Configure the LCD with the OEM bits in NVM */
4852 	rv = wm_oem_bits_config_ich8lan(sc, true);
4853 
4854 	if (sc->sc_type == WM_T_PCH2) {
4855 		/* Ungate automatic PHY configuration on non-managed 82579 */
4856 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
4857 			delay(10 * 1000);
4858 			wm_gate_hw_phy_config_ich8lan(sc, false);
4859 		}
4860 		/* Set EEE LPI Update Timer to 200usec */
4861 		rv = sc->phy.acquire(sc);
4862 		if (rv)
4863 			return rv;
4864 		rv = wm_write_emi_reg_locked(dev,
4865 		    I82579_LPI_UPDATE_TIMER, 0x1387);
4866 		sc->phy.release(sc);
4867 	}
4868 
4869 	return rv;
4870 }
4871 
4872 /* Only for PCH and newer */
4873 static int
4874 wm_write_smbus_addr(struct wm_softc *sc)
4875 {
4876 	uint32_t strap, freq;
4877 	uint16_t phy_data;
4878 	int rv;
4879 
4880 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4881 		device_xname(sc->sc_dev), __func__));
4882 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
4883 
4884 	strap = CSR_READ(sc, WMREG_STRAP);
4885 	freq = __SHIFTOUT(strap, STRAP_FREQ);
4886 
4887 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data);
4888 	if (rv != 0)
4889 		return rv;
4890 
4891 	phy_data &= ~HV_SMB_ADDR_ADDR;
4892 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
4893 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
4894 
4895 	if (sc->sc_phytype == WMPHY_I217) {
4896 		/* Restore SMBus frequency */
4897 		if (freq --) {
4898 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
4899 			    | HV_SMB_ADDR_FREQ_HIGH);
4900 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
4901 			    HV_SMB_ADDR_FREQ_LOW);
4902 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
4903 			    HV_SMB_ADDR_FREQ_HIGH);
4904 		} else
4905 			DPRINTF(sc, WM_DEBUG_INIT,
4906 			    ("%s: %s Unsupported SMB frequency in PHY\n",
4907 				device_xname(sc->sc_dev), __func__));
4908 	}
4909 
4910 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR,
4911 	    phy_data);
4912 }
4913 
4914 static int
4915 wm_init_lcd_from_nvm(struct wm_softc *sc)
4916 {
4917 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
4918 	uint16_t phy_page = 0;
4919 	int rv = 0;
4920 
4921 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4922 		device_xname(sc->sc_dev), __func__));
4923 
4924 	switch (sc->sc_type) {
4925 	case WM_T_ICH8:
4926 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
4927 		    || (sc->sc_phytype != WMPHY_IGP_3))
4928 			return 0;
4929 
4930 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
4931 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
4932 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
4933 			break;
4934 		}
4935 		/* FALLTHROUGH */
4936 	case WM_T_PCH:
4937 	case WM_T_PCH2:
4938 	case WM_T_PCH_LPT:
4939 	case WM_T_PCH_SPT:
4940 	case WM_T_PCH_CNP:
4941 	case WM_T_PCH_TGP:
4942 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
4943 		break;
4944 	default:
4945 		return 0;
4946 	}
4947 
4948 	if ((rv = sc->phy.acquire(sc)) != 0)
4949 		return rv;
4950 
4951 	reg = CSR_READ(sc, WMREG_FEXTNVM);
4952 	if ((reg & sw_cfg_mask) == 0)
4953 		goto release;
4954 
4955 	/*
4956 	 * Make sure HW does not configure LCD from PHY extended configuration
4957 	 * before SW configuration
4958 	 */
4959 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
4960 	if ((sc->sc_type < WM_T_PCH2)
4961 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
4962 		goto release;
4963 
4964 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
4965 		device_xname(sc->sc_dev), __func__));
4966 	/* word_addr is in DWORD */
4967 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
4968 
4969 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
4970 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
4971 	if (cnf_size == 0)
4972 		goto release;
4973 
4974 	if (((sc->sc_type == WM_T_PCH)
4975 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
4976 	    || (sc->sc_type > WM_T_PCH)) {
4977 		/*
4978 		 * HW configures the SMBus address and LEDs when the OEM and
4979 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
4980 		 * are cleared, SW will configure them instead.
4981 		 */
4982 		DPRINTF(sc, WM_DEBUG_INIT,
4983 		    ("%s: %s: Configure SMBus and LED\n",
4984 			device_xname(sc->sc_dev), __func__));
4985 		if ((rv = wm_write_smbus_addr(sc)) != 0)
4986 			goto release;
4987 
4988 		reg = CSR_READ(sc, WMREG_LEDCTL);
4989 		rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG,
4990 		    (uint16_t)reg);
4991 		if (rv != 0)
4992 			goto release;
4993 	}
4994 
4995 	/* Configure LCD from extended configuration region. */
4996 	for (i = 0; i < cnf_size; i++) {
4997 		uint16_t reg_data, reg_addr;
4998 
4999 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
5000 			goto release;
5001 
5002 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
5003 			goto release;
5004 
5005 		if (reg_addr == IGPHY_PAGE_SELECT)
5006 			phy_page = reg_data;
5007 
5008 		reg_addr &= IGPHY_MAXREGADDR;
5009 		reg_addr |= phy_page;
5010 
5011 		KASSERT(sc->phy.writereg_locked != NULL);
5012 		rv = sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr,
5013 		    reg_data);
5014 	}
5015 
5016 release:
5017 	sc->phy.release(sc);
5018 	return rv;
5019 }
5020 
5021 /*
5022  *  wm_oem_bits_config_ich8lan - SW-based LCD Configuration
5023  *  @sc:       pointer to the HW structure
5024  *  @d0_state: boolean if entering d0 or d3 device state
5025  *
5026  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
5027  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
5028  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
5029  */
5030 int
5031 wm_oem_bits_config_ich8lan(struct wm_softc *sc, bool d0_state)
5032 {
5033 	uint32_t mac_reg;
5034 	uint16_t oem_reg;
5035 	int rv;
5036 
5037 	if (sc->sc_type < WM_T_PCH)
5038 		return 0;
5039 
5040 	rv = sc->phy.acquire(sc);
5041 	if (rv != 0)
5042 		return rv;
5043 
5044 	if (sc->sc_type == WM_T_PCH) {
5045 		mac_reg = CSR_READ(sc, WMREG_EXTCNFCTR);
5046 		if ((mac_reg & EXTCNFCTR_OEM_WRITE_ENABLE) != 0)
5047 			goto release;
5048 	}
5049 
5050 	mac_reg = CSR_READ(sc, WMREG_FEXTNVM);
5051 	if ((mac_reg & FEXTNVM_SW_CONFIG_ICH8M) == 0)
5052 		goto release;
5053 
5054 	mac_reg = CSR_READ(sc, WMREG_PHY_CTRL);
5055 
5056 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_OEM_BITS, &oem_reg);
5057 	if (rv != 0)
5058 		goto release;
5059 	oem_reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
5060 
5061 	if (d0_state) {
5062 		if ((mac_reg & PHY_CTRL_GBE_DIS) != 0)
5063 			oem_reg |= HV_OEM_BITS_A1KDIS;
5064 		if ((mac_reg & PHY_CTRL_D0A_LPLU) != 0)
5065 			oem_reg |= HV_OEM_BITS_LPLU;
5066 	} else {
5067 		if ((mac_reg & (PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS))
5068 		    != 0)
5069 			oem_reg |= HV_OEM_BITS_A1KDIS;
5070 		if ((mac_reg & (PHY_CTRL_D0A_LPLU | PHY_CTRL_NOND0A_LPLU))
5071 		    != 0)
5072 			oem_reg |= HV_OEM_BITS_LPLU;
5073 	}
5074 
5075 	/* Set Restart auto-neg to activate the bits */
5076 	if ((d0_state || (sc->sc_type != WM_T_PCH))
5077 	    && (wm_phy_resetisblocked(sc) == false))
5078 		oem_reg |= HV_OEM_BITS_ANEGNOW;
5079 
5080 	rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_OEM_BITS, oem_reg);
5081 
5082 release:
5083 	sc->phy.release(sc);
5084 
5085 	return rv;
5086 }
5087 
5088 /* Init hardware bits */
5089 void
5090 wm_initialize_hardware_bits(struct wm_softc *sc)
5091 {
5092 	uint32_t tarc0, tarc1, reg;
5093 
5094 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
5095 		device_xname(sc->sc_dev), __func__));
5096 
5097 	/* For 82571 variant, 80003 and ICHs */
5098 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
5099 	    || WM_IS_ICHPCH(sc)) {
5100 
5101 		/* Transmit Descriptor Control 0 */
5102 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
5103 		reg |= TXDCTL_COUNT_DESC;
5104 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
5105 
5106 		/* Transmit Descriptor Control 1 */
5107 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
5108 		reg |= TXDCTL_COUNT_DESC;
5109 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
5110 
5111 		/* TARC0 */
5112 		tarc0 = CSR_READ(sc, WMREG_TARC0);
5113 		switch (sc->sc_type) {
5114 		case WM_T_82571:
5115 		case WM_T_82572:
5116 		case WM_T_82573:
5117 		case WM_T_82574:
5118 		case WM_T_82583:
5119 		case WM_T_80003:
5120 			/* Clear bits 30..27 */
5121 			tarc0 &= ~__BITS(30, 27);
5122 			break;
5123 		default:
5124 			break;
5125 		}
5126 
5127 		switch (sc->sc_type) {
5128 		case WM_T_82571:
5129 		case WM_T_82572:
5130 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
5131 
5132 			tarc1 = CSR_READ(sc, WMREG_TARC1);
5133 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
5134 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
5135 			/* 8257[12] Errata No.7 */
5136 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
5137 
5138 			/* TARC1 bit 28 */
5139 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
5140 				tarc1 &= ~__BIT(28);
5141 			else
5142 				tarc1 |= __BIT(28);
5143 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
5144 
5145 			/*
5146 			 * 8257[12] Errata No.13
5147 			 * Disable Dyamic Clock Gating.
5148 			 */
5149 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
5150 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
5151 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5152 			break;
5153 		case WM_T_82573:
5154 		case WM_T_82574:
5155 		case WM_T_82583:
5156 			if ((sc->sc_type == WM_T_82574)
5157 			    || (sc->sc_type == WM_T_82583))
5158 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
5159 
5160 			/* Extended Device Control */
5161 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
5162 			reg &= ~__BIT(23);	/* Clear bit 23 */
5163 			reg |= __BIT(22);	/* Set bit 22 */
5164 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5165 
5166 			/* Device Control */
5167 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
5168 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5169 
5170 			/* PCIe Control Register */
5171 			/*
5172 			 * 82573 Errata (unknown).
5173 			 *
5174 			 * 82574 Errata 25 and 82583 Errata 12
5175 			 * "Dropped Rx Packets":
5176 			 *   NVM Image Version 2.1.4 and newer has no this bug.
5177 			 */
5178 			reg = CSR_READ(sc, WMREG_GCR);
5179 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
5180 			CSR_WRITE(sc, WMREG_GCR, reg);
5181 
5182 			if ((sc->sc_type == WM_T_82574)
5183 			    || (sc->sc_type == WM_T_82583)) {
5184 				/*
5185 				 * Document says this bit must be set for
5186 				 * proper operation.
5187 				 */
5188 				reg = CSR_READ(sc, WMREG_GCR);
5189 				reg |= __BIT(22);
5190 				CSR_WRITE(sc, WMREG_GCR, reg);
5191 
5192 				/*
5193 				 * Apply workaround for hardware errata
5194 				 * documented in errata docs Fixes issue where
5195 				 * some error prone or unreliable PCIe
5196 				 * completions are occurring, particularly
5197 				 * with ASPM enabled. Without fix, issue can
5198 				 * cause Tx timeouts.
5199 				 */
5200 				reg = CSR_READ(sc, WMREG_GCR2);
5201 				reg |= __BIT(0);
5202 				CSR_WRITE(sc, WMREG_GCR2, reg);
5203 			}
5204 			break;
5205 		case WM_T_80003:
5206 			/* TARC0 */
5207 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
5208 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
5209 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
5210 
5211 			/* TARC1 bit 28 */
5212 			tarc1 = CSR_READ(sc, WMREG_TARC1);
5213 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
5214 				tarc1 &= ~__BIT(28);
5215 			else
5216 				tarc1 |= __BIT(28);
5217 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
5218 			break;
5219 		case WM_T_ICH8:
5220 		case WM_T_ICH9:
5221 		case WM_T_ICH10:
5222 		case WM_T_PCH:
5223 		case WM_T_PCH2:
5224 		case WM_T_PCH_LPT:
5225 		case WM_T_PCH_SPT:
5226 		case WM_T_PCH_CNP:
5227 		case WM_T_PCH_TGP:
5228 			/* TARC0 */
5229 			if (sc->sc_type == WM_T_ICH8) {
5230 				/* Set TARC0 bits 29 and 28 */
5231 				tarc0 |= __BITS(29, 28);
5232 			} else if (sc->sc_type == WM_T_PCH_SPT) {
5233 				tarc0 |= __BIT(29);
5234 				/*
5235 				 *  Drop bit 28. From Linux.
5236 				 * See I218/I219 spec update
5237 				 * "5. Buffer Overrun While the I219 is
5238 				 * Processing DMA Transactions"
5239 				 */
5240 				tarc0 &= ~__BIT(28);
5241 			}
5242 			/* Set TARC0 bits 23,24,26,27 */
5243 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
5244 
5245 			/* CTRL_EXT */
5246 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
5247 			reg |= __BIT(22);	/* Set bit 22 */
5248 			/*
5249 			 * Enable PHY low-power state when MAC is at D3
5250 			 * w/o WoL
5251 			 */
5252 			if (sc->sc_type >= WM_T_PCH)
5253 				reg |= CTRL_EXT_PHYPDEN;
5254 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5255 
5256 			/* TARC1 */
5257 			tarc1 = CSR_READ(sc, WMREG_TARC1);
5258 			/* bit 28 */
5259 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
5260 				tarc1 &= ~__BIT(28);
5261 			else
5262 				tarc1 |= __BIT(28);
5263 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
5264 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
5265 
5266 			/* Device Status */
5267 			if (sc->sc_type == WM_T_ICH8) {
5268 				reg = CSR_READ(sc, WMREG_STATUS);
5269 				reg &= ~__BIT(31);
5270 				CSR_WRITE(sc, WMREG_STATUS, reg);
5271 
5272 			}
5273 
5274 			/* IOSFPC */
5275 			if (sc->sc_type == WM_T_PCH_SPT) {
5276 				reg = CSR_READ(sc, WMREG_IOSFPC);
5277 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
5278 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
5279 			}
5280 			/*
5281 			 * Work-around descriptor data corruption issue during
5282 			 * NFS v2 UDP traffic, just disable the NFS filtering
5283 			 * capability.
5284 			 */
5285 			reg = CSR_READ(sc, WMREG_RFCTL);
5286 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
5287 			CSR_WRITE(sc, WMREG_RFCTL, reg);
5288 			break;
5289 		default:
5290 			break;
5291 		}
5292 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
5293 
5294 		switch (sc->sc_type) {
5295 		case WM_T_82571:
5296 		case WM_T_82572:
5297 		case WM_T_82573:
5298 		case WM_T_80003:
5299 		case WM_T_ICH8:
5300 			/*
5301 			 * 8257[12] Errata No.52, 82573 Errata No.43 and some
5302 			 * others to avoid RSS Hash Value bug.
5303 			 */
5304 			reg = CSR_READ(sc, WMREG_RFCTL);
5305 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
5306 			CSR_WRITE(sc, WMREG_RFCTL, reg);
5307 			break;
5308 		case WM_T_82574:
5309 			/* Use extened Rx descriptor. */
5310 			reg = CSR_READ(sc, WMREG_RFCTL);
5311 			reg |= WMREG_RFCTL_EXSTEN;
5312 			CSR_WRITE(sc, WMREG_RFCTL, reg);
5313 			break;
5314 		default:
5315 			break;
5316 		}
5317 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
5318 		/*
5319 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
5320 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
5321 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
5322 		 * Correctly by the Device"
5323 		 *
5324 		 * I354(C2000) Errata AVR53:
5325 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
5326 		 * Hang"
5327 		 */
5328 		reg = CSR_READ(sc, WMREG_RFCTL);
5329 		reg |= WMREG_RFCTL_IPV6EXDIS;
5330 		CSR_WRITE(sc, WMREG_RFCTL, reg);
5331 	}
5332 }
5333 
5334 static uint32_t
5335 wm_rxpbs_adjust_82580(uint32_t val)
5336 {
5337 	uint32_t rv = 0;
5338 
5339 	if (val < __arraycount(wm_82580_rxpbs_table))
5340 		rv = wm_82580_rxpbs_table[val];
5341 
5342 	return rv;
5343 }
5344 
5345 /*
5346  * wm_reset_phy:
5347  *
5348  *	generic PHY reset function.
5349  *	Same as e1000_phy_hw_reset_generic()
5350  */
5351 static int
5352 wm_reset_phy(struct wm_softc *sc)
5353 {
5354 	uint32_t reg;
5355 	int rv;
5356 
5357 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
5358 		device_xname(sc->sc_dev), __func__));
5359 	if (wm_phy_resetisblocked(sc))
5360 		return -1;
5361 
5362 	rv = sc->phy.acquire(sc);
5363 	if (rv) {
5364 		device_printf(sc->sc_dev, "%s: failed to acquire phy: %d\n",
5365 		    __func__, rv);
5366 		return rv;
5367 	}
5368 
5369 	reg = CSR_READ(sc, WMREG_CTRL);
5370 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
5371 	CSR_WRITE_FLUSH(sc);
5372 
5373 	delay(sc->phy.reset_delay_us);
5374 
5375 	CSR_WRITE(sc, WMREG_CTRL, reg);
5376 	CSR_WRITE_FLUSH(sc);
5377 
5378 	delay(150);
5379 
5380 	sc->phy.release(sc);
5381 
5382 	wm_get_cfg_done(sc);
5383 	wm_phy_post_reset(sc);
5384 
5385 	return 0;
5386 }
5387 
5388 /*
5389  * wm_flush_desc_rings - remove all descriptors from the descriptor rings.
5390  *
5391  * In i219, the descriptor rings must be emptied before resetting the HW
5392  * or before changing the device state to D3 during runtime (runtime PM).
5393  *
5394  * Failure to do this will cause the HW to enter a unit hang state which can
5395  * only be released by PCI reset on the device.
5396  *
5397  * I219 does not use multiqueue, so it is enough to check sc->sc_queue[0] only.
5398  */
5399 static void
5400 wm_flush_desc_rings(struct wm_softc *sc)
5401 {
5402 	pcireg_t preg;
5403 	uint32_t reg;
5404 	struct wm_txqueue *txq;
5405 	wiseman_txdesc_t *txd;
5406 	int nexttx;
5407 	uint32_t rctl;
5408 
5409 	KASSERT(IFNET_LOCKED(&sc->sc_ethercom.ec_if));
5410 
5411 	/* First, disable MULR fix in FEXTNVM11 */
5412 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
5413 	reg |= FEXTNVM11_DIS_MULRFIX;
5414 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
5415 
5416 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
5417 	reg = CSR_READ(sc, WMREG_TDLEN(0));
5418 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
5419 		return;
5420 
5421 	/*
5422 	 * Remove all descriptors from the tx_ring.
5423 	 *
5424 	 * We want to clear all pending descriptors from the TX ring. Zeroing
5425 	 * happens when the HW reads the regs. We assign the ring itself as
5426 	 * the data of the next descriptor. We don't care about the data we are
5427 	 * about to reset the HW.
5428 	 */
5429 #ifdef WM_DEBUG
5430 	device_printf(sc->sc_dev, "Need TX flush (reg = %08x)\n", preg);
5431 #endif
5432 	reg = CSR_READ(sc, WMREG_TCTL);
5433 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
5434 
5435 	txq = &sc->sc_queue[0].wmq_txq;
5436 	nexttx = txq->txq_next;
5437 	txd = &txq->txq_descs[nexttx];
5438 	wm_set_dma_addr(&txd->wtx_addr, txq->txq_desc_dma);
5439 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
5440 	txd->wtx_fields.wtxu_status = 0;
5441 	txd->wtx_fields.wtxu_options = 0;
5442 	txd->wtx_fields.wtxu_vlan = 0;
5443 
5444 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
5445 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
5446 
5447 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
5448 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
5449 	CSR_WRITE_FLUSH(sc);
5450 	delay(250);
5451 
5452 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
5453 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
5454 		return;
5455 
5456 	/*
5457 	 * Mark all descriptors in the RX ring as consumed and disable the
5458 	 * rx ring.
5459 	 */
5460 #ifdef WM_DEBUG
5461 	device_printf(sc->sc_dev, "Need RX flush (reg = %08x)\n", preg);
5462 #endif
5463 	rctl = CSR_READ(sc, WMREG_RCTL);
5464 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
5465 	CSR_WRITE_FLUSH(sc);
5466 	delay(150);
5467 
5468 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
5469 	/* Zero the lower 14 bits (prefetch and host thresholds) */
5470 	reg &= 0xffffc000;
5471 	/*
5472 	 * Update thresholds: prefetch threshold to 31, host threshold
5473 	 * to 1 and make sure the granularity is "descriptors" and not
5474 	 * "cache lines"
5475 	 */
5476 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
5477 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
5478 
5479 	/* Momentarily enable the RX ring for the changes to take effect */
5480 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
5481 	CSR_WRITE_FLUSH(sc);
5482 	delay(150);
5483 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
5484 }
5485 
5486 /*
5487  * wm_reset:
5488  *
5489  *	Reset the i82542 chip.
5490  */
5491 static void
5492 wm_reset(struct wm_softc *sc)
5493 {
5494 	int phy_reset = 0;
5495 	int i, error = 0;
5496 	uint32_t reg;
5497 	uint16_t kmreg;
5498 	int rv;
5499 
5500 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
5501 		device_xname(sc->sc_dev), __func__));
5502 	KASSERT(sc->sc_type != 0);
5503 
5504 	/*
5505 	 * Allocate on-chip memory according to the MTU size.
5506 	 * The Packet Buffer Allocation register must be written
5507 	 * before the chip is reset.
5508 	 */
5509 	switch (sc->sc_type) {
5510 	case WM_T_82547:
5511 	case WM_T_82547_2:
5512 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
5513 		    PBA_22K : PBA_30K;
5514 		for (i = 0; i < sc->sc_nqueues; i++) {
5515 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
5516 			txq->txq_fifo_head = 0;
5517 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
5518 			txq->txq_fifo_size =
5519 			    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
5520 			txq->txq_fifo_stall = 0;
5521 		}
5522 		break;
5523 	case WM_T_82571:
5524 	case WM_T_82572:
5525 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
5526 	case WM_T_80003:
5527 		sc->sc_pba = PBA_32K;
5528 		break;
5529 	case WM_T_82573:
5530 		sc->sc_pba = PBA_12K;
5531 		break;
5532 	case WM_T_82574:
5533 	case WM_T_82583:
5534 		sc->sc_pba = PBA_20K;
5535 		break;
5536 	case WM_T_82576:
5537 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
5538 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
5539 		break;
5540 	case WM_T_82580:
5541 	case WM_T_I350:
5542 	case WM_T_I354:
5543 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
5544 		break;
5545 	case WM_T_I210:
5546 	case WM_T_I211:
5547 		sc->sc_pba = PBA_34K;
5548 		break;
5549 	case WM_T_ICH8:
5550 		/* Workaround for a bit corruption issue in FIFO memory */
5551 		sc->sc_pba = PBA_8K;
5552 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
5553 		break;
5554 	case WM_T_ICH9:
5555 	case WM_T_ICH10:
5556 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
5557 		    PBA_14K : PBA_10K;
5558 		break;
5559 	case WM_T_PCH:
5560 	case WM_T_PCH2:	/* XXX 14K? */
5561 	case WM_T_PCH_LPT:
5562 	case WM_T_PCH_SPT:
5563 	case WM_T_PCH_CNP:
5564 	case WM_T_PCH_TGP:
5565 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 1500 ?
5566 		    PBA_12K : PBA_26K;
5567 		break;
5568 	default:
5569 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
5570 		    PBA_40K : PBA_48K;
5571 		break;
5572 	}
5573 	/*
5574 	 * Only old or non-multiqueue devices have the PBA register
5575 	 * XXX Need special handling for 82575.
5576 	 */
5577 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
5578 	    || (sc->sc_type == WM_T_82575))
5579 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
5580 
5581 	/* Prevent the PCI-E bus from sticking */
5582 	if (sc->sc_flags & WM_F_PCIE) {
5583 		int timeout = 800;
5584 
5585 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
5586 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5587 
5588 		while (timeout--) {
5589 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
5590 			    == 0)
5591 				break;
5592 			delay(100);
5593 		}
5594 		if (timeout == 0)
5595 			device_printf(sc->sc_dev,
5596 			    "failed to disable bus mastering\n");
5597 	}
5598 
5599 	/* Set the completion timeout for interface */
5600 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
5601 	    || (sc->sc_type == WM_T_82580)
5602 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
5603 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
5604 		wm_set_pcie_completion_timeout(sc);
5605 
5606 	/* Clear interrupt */
5607 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
5608 	if (wm_is_using_msix(sc)) {
5609 		if (sc->sc_type != WM_T_82574) {
5610 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
5611 			CSR_WRITE(sc, WMREG_EIAC, 0);
5612 		} else
5613 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
5614 	}
5615 
5616 	/* Stop the transmit and receive processes. */
5617 	CSR_WRITE(sc, WMREG_RCTL, 0);
5618 	sc->sc_rctl &= ~RCTL_EN;
5619 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
5620 	CSR_WRITE_FLUSH(sc);
5621 
5622 	/* XXX set_tbi_sbp_82543() */
5623 
5624 	delay(10*1000);
5625 
5626 	/* Must acquire the MDIO ownership before MAC reset */
5627 	switch (sc->sc_type) {
5628 	case WM_T_82573:
5629 	case WM_T_82574:
5630 	case WM_T_82583:
5631 		error = wm_get_hw_semaphore_82573(sc);
5632 		break;
5633 	default:
5634 		break;
5635 	}
5636 
5637 	/*
5638 	 * 82541 Errata 29? & 82547 Errata 28?
5639 	 * See also the description about PHY_RST bit in CTRL register
5640 	 * in 8254x_GBe_SDM.pdf.
5641 	 */
5642 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
5643 		CSR_WRITE(sc, WMREG_CTRL,
5644 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
5645 		CSR_WRITE_FLUSH(sc);
5646 		delay(5000);
5647 	}
5648 
5649 	switch (sc->sc_type) {
5650 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
5651 	case WM_T_82541:
5652 	case WM_T_82541_2:
5653 	case WM_T_82547:
5654 	case WM_T_82547_2:
5655 		/*
5656 		 * On some chipsets, a reset through a memory-mapped write
5657 		 * cycle can cause the chip to reset before completing the
5658 		 * write cycle. This causes major headache that can be avoided
5659 		 * by issuing the reset via indirect register writes through
5660 		 * I/O space.
5661 		 *
5662 		 * So, if we successfully mapped the I/O BAR at attach time,
5663 		 * use that. Otherwise, try our luck with a memory-mapped
5664 		 * reset.
5665 		 */
5666 		if (sc->sc_flags & WM_F_IOH_VALID)
5667 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
5668 		else
5669 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
5670 		break;
5671 	case WM_T_82545_3:
5672 	case WM_T_82546_3:
5673 		/* Use the shadow control register on these chips. */
5674 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
5675 		break;
5676 	case WM_T_80003:
5677 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
5678 		if (sc->phy.acquire(sc) != 0)
5679 			break;
5680 		CSR_WRITE(sc, WMREG_CTRL, reg);
5681 		sc->phy.release(sc);
5682 		break;
5683 	case WM_T_ICH8:
5684 	case WM_T_ICH9:
5685 	case WM_T_ICH10:
5686 	case WM_T_PCH:
5687 	case WM_T_PCH2:
5688 	case WM_T_PCH_LPT:
5689 	case WM_T_PCH_SPT:
5690 	case WM_T_PCH_CNP:
5691 	case WM_T_PCH_TGP:
5692 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
5693 		if (wm_phy_resetisblocked(sc) == false) {
5694 			/*
5695 			 * Gate automatic PHY configuration by hardware on
5696 			 * non-managed 82579
5697 			 */
5698 			if ((sc->sc_type == WM_T_PCH2)
5699 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
5700 				== 0))
5701 				wm_gate_hw_phy_config_ich8lan(sc, true);
5702 
5703 			reg |= CTRL_PHY_RESET;
5704 			phy_reset = 1;
5705 		} else
5706 			device_printf(sc->sc_dev, "XXX reset is blocked!!!\n");
5707 		if (sc->phy.acquire(sc) != 0)
5708 			break;
5709 		CSR_WRITE(sc, WMREG_CTRL, reg);
5710 		/* Don't insert a completion barrier when reset */
5711 		delay(20*1000);
5712 		/*
5713 		 * The EXTCNFCTR_MDIO_SW_OWNERSHIP bit is cleared by the reset,
5714 		 * so don't use sc->phy.release(sc). Release sc_ich_phymtx
5715 		 * only. See also wm_get_swflag_ich8lan().
5716 		 */
5717 		mutex_exit(sc->sc_ich_phymtx);
5718 		break;
5719 	case WM_T_82580:
5720 	case WM_T_I350:
5721 	case WM_T_I354:
5722 	case WM_T_I210:
5723 	case WM_T_I211:
5724 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
5725 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
5726 			CSR_WRITE_FLUSH(sc);
5727 		delay(5000);
5728 		break;
5729 	case WM_T_82542_2_0:
5730 	case WM_T_82542_2_1:
5731 	case WM_T_82543:
5732 	case WM_T_82540:
5733 	case WM_T_82545:
5734 	case WM_T_82546:
5735 	case WM_T_82571:
5736 	case WM_T_82572:
5737 	case WM_T_82573:
5738 	case WM_T_82574:
5739 	case WM_T_82575:
5740 	case WM_T_82576:
5741 	case WM_T_82583:
5742 	default:
5743 		/* Everything else can safely use the documented method. */
5744 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
5745 		break;
5746 	}
5747 
5748 	/* Must release the MDIO ownership after MAC reset */
5749 	switch (sc->sc_type) {
5750 	case WM_T_82573:
5751 	case WM_T_82574:
5752 	case WM_T_82583:
5753 		if (error == 0)
5754 			wm_put_hw_semaphore_82573(sc);
5755 		break;
5756 	default:
5757 		break;
5758 	}
5759 
5760 	/* Set Phy Config Counter to 50msec */
5761 	if (sc->sc_type == WM_T_PCH2) {
5762 		reg = CSR_READ(sc, WMREG_FEXTNVM3);
5763 		reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
5764 		reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
5765 		CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
5766 	}
5767 
5768 	if (phy_reset != 0)
5769 		wm_get_cfg_done(sc);
5770 
5771 	/* Reload EEPROM */
5772 	switch (sc->sc_type) {
5773 	case WM_T_82542_2_0:
5774 	case WM_T_82542_2_1:
5775 	case WM_T_82543:
5776 	case WM_T_82544:
5777 		delay(10);
5778 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
5779 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5780 		CSR_WRITE_FLUSH(sc);
5781 		delay(2000);
5782 		break;
5783 	case WM_T_82540:
5784 	case WM_T_82545:
5785 	case WM_T_82545_3:
5786 	case WM_T_82546:
5787 	case WM_T_82546_3:
5788 		delay(5*1000);
5789 		/* XXX Disable HW ARPs on ASF enabled adapters */
5790 		break;
5791 	case WM_T_82541:
5792 	case WM_T_82541_2:
5793 	case WM_T_82547:
5794 	case WM_T_82547_2:
5795 		delay(20000);
5796 		/* XXX Disable HW ARPs on ASF enabled adapters */
5797 		break;
5798 	case WM_T_82571:
5799 	case WM_T_82572:
5800 	case WM_T_82573:
5801 	case WM_T_82574:
5802 	case WM_T_82583:
5803 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
5804 			delay(10);
5805 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
5806 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5807 			CSR_WRITE_FLUSH(sc);
5808 		}
5809 		/* check EECD_EE_AUTORD */
5810 		wm_get_auto_rd_done(sc);
5811 		/*
5812 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
5813 		 * is set.
5814 		 */
5815 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
5816 		    || (sc->sc_type == WM_T_82583))
5817 			delay(25*1000);
5818 		break;
5819 	case WM_T_82575:
5820 	case WM_T_82576:
5821 	case WM_T_82580:
5822 	case WM_T_I350:
5823 	case WM_T_I354:
5824 	case WM_T_I210:
5825 	case WM_T_I211:
5826 	case WM_T_80003:
5827 		/* check EECD_EE_AUTORD */
5828 		wm_get_auto_rd_done(sc);
5829 		break;
5830 	case WM_T_ICH8:
5831 	case WM_T_ICH9:
5832 	case WM_T_ICH10:
5833 	case WM_T_PCH:
5834 	case WM_T_PCH2:
5835 	case WM_T_PCH_LPT:
5836 	case WM_T_PCH_SPT:
5837 	case WM_T_PCH_CNP:
5838 	case WM_T_PCH_TGP:
5839 		break;
5840 	default:
5841 		panic("%s: unknown type\n", __func__);
5842 	}
5843 
5844 	/* Check whether EEPROM is present or not */
5845 	switch (sc->sc_type) {
5846 	case WM_T_82575:
5847 	case WM_T_82576:
5848 	case WM_T_82580:
5849 	case WM_T_I350:
5850 	case WM_T_I354:
5851 	case WM_T_ICH8:
5852 	case WM_T_ICH9:
5853 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
5854 			/* Not found */
5855 			sc->sc_flags |= WM_F_EEPROM_INVALID;
5856 			if (sc->sc_type == WM_T_82575)
5857 				wm_reset_init_script_82575(sc);
5858 		}
5859 		break;
5860 	default:
5861 		break;
5862 	}
5863 
5864 	if (phy_reset != 0)
5865 		wm_phy_post_reset(sc);
5866 
5867 	if ((sc->sc_type == WM_T_82580)
5868 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
5869 		/* Clear global device reset status bit */
5870 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
5871 	}
5872 
5873 	/* Clear any pending interrupt events. */
5874 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
5875 	reg = CSR_READ(sc, WMREG_ICR);
5876 	if (wm_is_using_msix(sc)) {
5877 		if (sc->sc_type != WM_T_82574) {
5878 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
5879 			CSR_WRITE(sc, WMREG_EIAC, 0);
5880 		} else
5881 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
5882 	}
5883 
5884 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5885 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5886 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
5887 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)
5888 	    || (sc->sc_type == WM_T_PCH_TGP)) {
5889 		reg = CSR_READ(sc, WMREG_KABGTXD);
5890 		reg |= KABGTXD_BGSQLBIAS;
5891 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
5892 	}
5893 
5894 	/* Reload sc_ctrl */
5895 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
5896 
5897 	wm_set_eee(sc);
5898 
5899 	/*
5900 	 * For PCH, this write will make sure that any noise will be detected
5901 	 * as a CRC error and be dropped rather than show up as a bad packet
5902 	 * to the DMA engine
5903 	 */
5904 	if (sc->sc_type == WM_T_PCH)
5905 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
5906 
5907 	if (sc->sc_type >= WM_T_82544)
5908 		CSR_WRITE(sc, WMREG_WUC, 0);
5909 
5910 	if (sc->sc_type < WM_T_82575)
5911 		wm_disable_aspm(sc); /* Workaround for some chips */
5912 
5913 	wm_reset_mdicnfg_82580(sc);
5914 
5915 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
5916 		wm_pll_workaround_i210(sc);
5917 
5918 	if (sc->sc_type == WM_T_80003) {
5919 		/* Default to TRUE to enable the MDIC W/A */
5920 		sc->sc_flags |= WM_F_80003_MDIC_WA;
5921 
5922 		rv = wm_kmrn_readreg(sc,
5923 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
5924 		if (rv == 0) {
5925 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
5926 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
5927 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
5928 			else
5929 				sc->sc_flags |= WM_F_80003_MDIC_WA;
5930 		}
5931 	}
5932 }
5933 
5934 /*
5935  * wm_add_rxbuf:
5936  *
5937  *	Add a receive buffer to the indiciated descriptor.
5938  */
5939 static int
5940 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
5941 {
5942 	struct wm_softc *sc = rxq->rxq_sc;
5943 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
5944 	struct mbuf *m;
5945 	int error;
5946 
5947 	KASSERT(mutex_owned(rxq->rxq_lock));
5948 
5949 	MGETHDR(m, M_DONTWAIT, MT_DATA);
5950 	if (m == NULL)
5951 		return ENOBUFS;
5952 	MCLAIM(m, &sc->sc_ethercom.ec_rx_mowner);
5953 
5954 	MCLGET(m, M_DONTWAIT);
5955 	if ((m->m_flags & M_EXT) == 0) {
5956 		m_freem(m);
5957 		return ENOBUFS;
5958 	}
5959 
5960 	if (rxs->rxs_mbuf != NULL)
5961 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
5962 
5963 	rxs->rxs_mbuf = m;
5964 
5965 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
5966 	/*
5967 	 * Cannot use bus_dmamap_load_mbuf() here because m_data may be
5968 	 * sc_align_tweak'd between bus_dmamap_load() and bus_dmamap_sync().
5969 	 */
5970 	error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, m->m_ext.ext_buf,
5971 	    m->m_ext.ext_size, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT);
5972 	if (error) {
5973 		/* XXX XXX XXX */
5974 		aprint_error_dev(sc->sc_dev,
5975 		    "unable to load rx DMA map %d, error = %d\n", idx, error);
5976 		panic("wm_add_rxbuf");
5977 	}
5978 
5979 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
5980 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
5981 
5982 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5983 		if ((sc->sc_rctl & RCTL_EN) != 0)
5984 			wm_init_rxdesc(rxq, idx);
5985 	} else
5986 		wm_init_rxdesc(rxq, idx);
5987 
5988 	return 0;
5989 }
5990 
5991 /*
5992  * wm_rxdrain:
5993  *
5994  *	Drain the receive queue.
5995  */
5996 static void
5997 wm_rxdrain(struct wm_rxqueue *rxq)
5998 {
5999 	struct wm_softc *sc = rxq->rxq_sc;
6000 	struct wm_rxsoft *rxs;
6001 	int i;
6002 
6003 	KASSERT(mutex_owned(rxq->rxq_lock));
6004 
6005 	for (i = 0; i < WM_NRXDESC; i++) {
6006 		rxs = &rxq->rxq_soft[i];
6007 		if (rxs->rxs_mbuf != NULL) {
6008 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
6009 			m_freem(rxs->rxs_mbuf);
6010 			rxs->rxs_mbuf = NULL;
6011 		}
6012 	}
6013 }
6014 
6015 /*
6016  * Setup registers for RSS.
6017  *
6018  * XXX not yet VMDq support
6019  */
6020 static void
6021 wm_init_rss(struct wm_softc *sc)
6022 {
6023 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
6024 	int i;
6025 
6026 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
6027 
6028 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
6029 		unsigned int qid, reta_ent;
6030 
6031 		qid  = i % sc->sc_nqueues;
6032 		switch (sc->sc_type) {
6033 		case WM_T_82574:
6034 			reta_ent = __SHIFTIN(qid,
6035 			    RETA_ENT_QINDEX_MASK_82574);
6036 			break;
6037 		case WM_T_82575:
6038 			reta_ent = __SHIFTIN(qid,
6039 			    RETA_ENT_QINDEX1_MASK_82575);
6040 			break;
6041 		default:
6042 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
6043 			break;
6044 		}
6045 
6046 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
6047 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
6048 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
6049 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
6050 	}
6051 
6052 	rss_getkey((uint8_t *)rss_key);
6053 	for (i = 0; i < RSSRK_NUM_REGS; i++)
6054 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
6055 
6056 	if (sc->sc_type == WM_T_82574)
6057 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
6058 	else
6059 		mrqc = MRQC_ENABLE_RSS_MQ;
6060 
6061 	/*
6062 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
6063 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
6064 	 */
6065 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
6066 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
6067 #if 0
6068 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
6069 	mrqc |= MRQC_RSS_FIELD_IPV6_UDP_EX;
6070 #endif
6071 	mrqc |= MRQC_RSS_FIELD_IPV6_TCP_EX;
6072 
6073 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
6074 }
6075 
6076 /*
6077  * Adjust TX and RX queue numbers which the system actulally uses.
6078  *
6079  * The numbers are affected by below parameters.
6080  *     - The nubmer of hardware queues
6081  *     - The number of MSI-X vectors (= "nvectors" argument)
6082  *     - ncpu
6083  */
6084 static void
6085 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
6086 {
6087 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
6088 
6089 	if (nvectors < 2) {
6090 		sc->sc_nqueues = 1;
6091 		return;
6092 	}
6093 
6094 	switch (sc->sc_type) {
6095 	case WM_T_82572:
6096 		hw_ntxqueues = 2;
6097 		hw_nrxqueues = 2;
6098 		break;
6099 	case WM_T_82574:
6100 		hw_ntxqueues = 2;
6101 		hw_nrxqueues = 2;
6102 		break;
6103 	case WM_T_82575:
6104 		hw_ntxqueues = 4;
6105 		hw_nrxqueues = 4;
6106 		break;
6107 	case WM_T_82576:
6108 		hw_ntxqueues = 16;
6109 		hw_nrxqueues = 16;
6110 		break;
6111 	case WM_T_82580:
6112 	case WM_T_I350:
6113 	case WM_T_I354:
6114 		hw_ntxqueues = 8;
6115 		hw_nrxqueues = 8;
6116 		break;
6117 	case WM_T_I210:
6118 		hw_ntxqueues = 4;
6119 		hw_nrxqueues = 4;
6120 		break;
6121 	case WM_T_I211:
6122 		hw_ntxqueues = 2;
6123 		hw_nrxqueues = 2;
6124 		break;
6125 		/*
6126 		 * The below Ethernet controllers do not support MSI-X;
6127 		 * this driver doesn't let them use multiqueue.
6128 		 *     - WM_T_80003
6129 		 *     - WM_T_ICH8
6130 		 *     - WM_T_ICH9
6131 		 *     - WM_T_ICH10
6132 		 *     - WM_T_PCH
6133 		 *     - WM_T_PCH2
6134 		 *     - WM_T_PCH_LPT
6135 		 */
6136 	default:
6137 		hw_ntxqueues = 1;
6138 		hw_nrxqueues = 1;
6139 		break;
6140 	}
6141 
6142 	hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
6143 
6144 	/*
6145 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
6146 	 * the number of queues used actually.
6147 	 */
6148 	if (nvectors < hw_nqueues + 1)
6149 		sc->sc_nqueues = nvectors - 1;
6150 	else
6151 		sc->sc_nqueues = hw_nqueues;
6152 
6153 	/*
6154 	 * As queues more than CPUs cannot improve scaling, we limit
6155 	 * the number of queues used actually.
6156 	 */
6157 	if (ncpu < sc->sc_nqueues)
6158 		sc->sc_nqueues = ncpu;
6159 }
6160 
6161 static inline bool
6162 wm_is_using_msix(struct wm_softc *sc)
6163 {
6164 
6165 	return (sc->sc_nintrs > 1);
6166 }
6167 
6168 static inline bool
6169 wm_is_using_multiqueue(struct wm_softc *sc)
6170 {
6171 
6172 	return (sc->sc_nqueues > 1);
6173 }
6174 
6175 static int
6176 wm_softint_establish_queue(struct wm_softc *sc, int qidx, int intr_idx)
6177 {
6178 	struct wm_queue *wmq = &sc->sc_queue[qidx];
6179 
6180 	wmq->wmq_id = qidx;
6181 	wmq->wmq_intr_idx = intr_idx;
6182 	wmq->wmq_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
6183 	    wm_handle_queue, wmq);
6184 	if (wmq->wmq_si != NULL)
6185 		return 0;
6186 
6187 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
6188 	    wmq->wmq_id);
6189 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
6190 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
6191 	return ENOMEM;
6192 }
6193 
6194 /*
6195  * Both single interrupt MSI and INTx can use this function.
6196  */
6197 static int
6198 wm_setup_legacy(struct wm_softc *sc)
6199 {
6200 	pci_chipset_tag_t pc = sc->sc_pc;
6201 	const char *intrstr = NULL;
6202 	char intrbuf[PCI_INTRSTR_LEN];
6203 	int error;
6204 
6205 	error = wm_alloc_txrx_queues(sc);
6206 	if (error) {
6207 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
6208 		    error);
6209 		return ENOMEM;
6210 	}
6211 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
6212 	    sizeof(intrbuf));
6213 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
6214 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
6215 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
6216 	if (sc->sc_ihs[0] == NULL) {
6217 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
6218 		    (pci_intr_type(pc, sc->sc_intrs[0])
6219 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
6220 		return ENOMEM;
6221 	}
6222 
6223 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
6224 	sc->sc_nintrs = 1;
6225 
6226 	return wm_softint_establish_queue(sc, 0, 0);
6227 }
6228 
6229 static int
6230 wm_setup_msix(struct wm_softc *sc)
6231 {
6232 	void *vih;
6233 	kcpuset_t *affinity;
6234 	int qidx, error, intr_idx, txrx_established;
6235 	pci_chipset_tag_t pc = sc->sc_pc;
6236 	const char *intrstr = NULL;
6237 	char intrbuf[PCI_INTRSTR_LEN];
6238 	char intr_xname[INTRDEVNAMEBUF];
6239 
6240 	if (sc->sc_nqueues < ncpu) {
6241 		/*
6242 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
6243 		 * interrupts start from CPU#1.
6244 		 */
6245 		sc->sc_affinity_offset = 1;
6246 	} else {
6247 		/*
6248 		 * In this case, this device use all CPUs. So, we unify
6249 		 * affinitied cpu_index to msix vector number for readability.
6250 		 */
6251 		sc->sc_affinity_offset = 0;
6252 	}
6253 
6254 	error = wm_alloc_txrx_queues(sc);
6255 	if (error) {
6256 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
6257 		    error);
6258 		return ENOMEM;
6259 	}
6260 
6261 	kcpuset_create(&affinity, false);
6262 	intr_idx = 0;
6263 
6264 	/*
6265 	 * TX and RX
6266 	 */
6267 	txrx_established = 0;
6268 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
6269 		struct wm_queue *wmq = &sc->sc_queue[qidx];
6270 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
6271 
6272 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
6273 		    sizeof(intrbuf));
6274 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
6275 		    PCI_INTR_MPSAFE, true);
6276 		memset(intr_xname, 0, sizeof(intr_xname));
6277 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
6278 		    device_xname(sc->sc_dev), qidx);
6279 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
6280 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
6281 		if (vih == NULL) {
6282 			aprint_error_dev(sc->sc_dev,
6283 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
6284 			    intrstr ? " at " : "",
6285 			    intrstr ? intrstr : "");
6286 
6287 			goto fail;
6288 		}
6289 		kcpuset_zero(affinity);
6290 		/* Round-robin affinity */
6291 		kcpuset_set(affinity, affinity_to);
6292 		error = interrupt_distribute(vih, affinity, NULL);
6293 		if (error == 0) {
6294 			aprint_normal_dev(sc->sc_dev,
6295 			    "for TX and RX interrupting at %s affinity to %u\n",
6296 			    intrstr, affinity_to);
6297 		} else {
6298 			aprint_normal_dev(sc->sc_dev,
6299 			    "for TX and RX interrupting at %s\n", intrstr);
6300 		}
6301 		sc->sc_ihs[intr_idx] = vih;
6302 		if (wm_softint_establish_queue(sc, qidx, intr_idx) != 0)
6303 			goto fail;
6304 		txrx_established++;
6305 		intr_idx++;
6306 	}
6307 
6308 	/* LINK */
6309 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
6310 	    sizeof(intrbuf));
6311 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
6312 	memset(intr_xname, 0, sizeof(intr_xname));
6313 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
6314 	    device_xname(sc->sc_dev));
6315 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
6316 	    IPL_NET, wm_linkintr_msix, sc, intr_xname);
6317 	if (vih == NULL) {
6318 		aprint_error_dev(sc->sc_dev,
6319 		    "unable to establish MSI-X(for LINK)%s%s\n",
6320 		    intrstr ? " at " : "",
6321 		    intrstr ? intrstr : "");
6322 
6323 		goto fail;
6324 	}
6325 	/* Keep default affinity to LINK interrupt */
6326 	aprint_normal_dev(sc->sc_dev,
6327 	    "for LINK interrupting at %s\n", intrstr);
6328 	sc->sc_ihs[intr_idx] = vih;
6329 	sc->sc_link_intr_idx = intr_idx;
6330 
6331 	sc->sc_nintrs = sc->sc_nqueues + 1;
6332 	kcpuset_destroy(affinity);
6333 	return 0;
6334 
6335 fail:
6336 	for (qidx = 0; qidx < txrx_established; qidx++) {
6337 		struct wm_queue *wmq = &sc->sc_queue[qidx];
6338 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
6339 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
6340 	}
6341 
6342 	kcpuset_destroy(affinity);
6343 	return ENOMEM;
6344 }
6345 
6346 static void
6347 wm_unset_stopping_flags(struct wm_softc *sc)
6348 {
6349 	int i;
6350 
6351 	KASSERT(mutex_owned(sc->sc_core_lock));
6352 
6353 	/* Must unset stopping flags in ascending order. */
6354 	for (i = 0; i < sc->sc_nqueues; i++) {
6355 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
6356 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
6357 
6358 		mutex_enter(txq->txq_lock);
6359 		txq->txq_stopping = false;
6360 		mutex_exit(txq->txq_lock);
6361 
6362 		mutex_enter(rxq->rxq_lock);
6363 		rxq->rxq_stopping = false;
6364 		mutex_exit(rxq->rxq_lock);
6365 	}
6366 
6367 	sc->sc_core_stopping = false;
6368 }
6369 
6370 static void
6371 wm_set_stopping_flags(struct wm_softc *sc)
6372 {
6373 	int i;
6374 
6375 	KASSERT(mutex_owned(sc->sc_core_lock));
6376 
6377 	sc->sc_core_stopping = true;
6378 
6379 	/* Must set stopping flags in ascending order. */
6380 	for (i = 0; i < sc->sc_nqueues; i++) {
6381 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
6382 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
6383 
6384 		mutex_enter(rxq->rxq_lock);
6385 		rxq->rxq_stopping = true;
6386 		mutex_exit(rxq->rxq_lock);
6387 
6388 		mutex_enter(txq->txq_lock);
6389 		txq->txq_stopping = true;
6390 		mutex_exit(txq->txq_lock);
6391 	}
6392 }
6393 
6394 /*
6395  * Write interrupt interval value to ITR or EITR
6396  */
6397 static void
6398 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
6399 {
6400 
6401 	if (!wmq->wmq_set_itr)
6402 		return;
6403 
6404 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
6405 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
6406 
6407 		/*
6408 		 * 82575 doesn't have CNT_INGR field.
6409 		 * So, overwrite counter field by software.
6410 		 */
6411 		if (sc->sc_type == WM_T_82575)
6412 			eitr |= __SHIFTIN(wmq->wmq_itr,
6413 			    EITR_COUNTER_MASK_82575);
6414 		else
6415 			eitr |= EITR_CNT_INGR;
6416 
6417 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
6418 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
6419 		/*
6420 		 * 82574 has both ITR and EITR. SET EITR when we use
6421 		 * the multi queue function with MSI-X.
6422 		 */
6423 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
6424 		    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
6425 	} else {
6426 		KASSERT(wmq->wmq_id == 0);
6427 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
6428 	}
6429 
6430 	wmq->wmq_set_itr = false;
6431 }
6432 
6433 /*
6434  * TODO
6435  * Below dynamic calculation of itr is almost the same as Linux igb,
6436  * however it does not fit to wm(4). So, we will have been disable AIM
6437  * until we will find appropriate calculation of itr.
6438  */
6439 /*
6440  * Calculate interrupt interval value to be going to write register in
6441  * wm_itrs_writereg(). This function does not write ITR/EITR register.
6442  */
6443 static void
6444 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
6445 {
6446 #ifdef NOTYET
6447 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
6448 	struct wm_txqueue *txq = &wmq->wmq_txq;
6449 	uint32_t avg_size = 0;
6450 	uint32_t new_itr;
6451 
6452 	if (rxq->rxq_packets)
6453 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
6454 	if (txq->txq_packets)
6455 		avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
6456 
6457 	if (avg_size == 0) {
6458 		new_itr = 450; /* restore default value */
6459 		goto out;
6460 	}
6461 
6462 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
6463 	avg_size += 24;
6464 
6465 	/* Don't starve jumbo frames */
6466 	avg_size = uimin(avg_size, 3000);
6467 
6468 	/* Give a little boost to mid-size frames */
6469 	if ((avg_size > 300) && (avg_size < 1200))
6470 		new_itr = avg_size / 3;
6471 	else
6472 		new_itr = avg_size / 2;
6473 
6474 out:
6475 	/*
6476 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
6477 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
6478 	 */
6479 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
6480 		new_itr *= 4;
6481 
6482 	if (new_itr != wmq->wmq_itr) {
6483 		wmq->wmq_itr = new_itr;
6484 		wmq->wmq_set_itr = true;
6485 	} else
6486 		wmq->wmq_set_itr = false;
6487 
6488 	rxq->rxq_packets = 0;
6489 	rxq->rxq_bytes = 0;
6490 	txq->txq_packets = 0;
6491 	txq->txq_bytes = 0;
6492 #endif
6493 }
6494 
6495 static void
6496 wm_init_sysctls(struct wm_softc *sc)
6497 {
6498 	struct sysctllog **log;
6499 	const struct sysctlnode *rnode, *qnode, *cnode;
6500 	int i, rv;
6501 	const char *dvname;
6502 
6503 	log = &sc->sc_sysctllog;
6504 	dvname = device_xname(sc->sc_dev);
6505 
6506 	rv = sysctl_createv(log, 0, NULL, &rnode,
6507 	    0, CTLTYPE_NODE, dvname,
6508 	    SYSCTL_DESCR("wm information and settings"),
6509 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
6510 	if (rv != 0)
6511 		goto err;
6512 
6513 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
6514 	    CTLTYPE_BOOL, "txrx_workqueue",
6515 	    SYSCTL_DESCR("Use workqueue for packet processing"),
6516 	    NULL, 0, &sc->sc_txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL);
6517 	if (rv != 0)
6518 		goto teardown;
6519 
6520 	for (i = 0; i < sc->sc_nqueues; i++) {
6521 		struct wm_queue *wmq = &sc->sc_queue[i];
6522 		struct wm_txqueue *txq = &wmq->wmq_txq;
6523 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
6524 
6525 		snprintf(sc->sc_queue[i].sysctlname,
6526 		    sizeof(sc->sc_queue[i].sysctlname), "q%d", i);
6527 
6528 		if (sysctl_createv(log, 0, &rnode, &qnode,
6529 		    0, CTLTYPE_NODE,
6530 		    sc->sc_queue[i].sysctlname, SYSCTL_DESCR("Queue Name"),
6531 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
6532 			break;
6533 
6534 		if (sysctl_createv(log, 0, &qnode, &cnode,
6535 		    CTLFLAG_READONLY, CTLTYPE_INT,
6536 		    "txq_free", SYSCTL_DESCR("TX queue free"),
6537 		    NULL, 0, &txq->txq_free,
6538 		    0, CTL_CREATE, CTL_EOL) != 0)
6539 			break;
6540 		if (sysctl_createv(log, 0, &qnode, &cnode,
6541 		    CTLFLAG_READONLY, CTLTYPE_INT,
6542 		    "txd_head", SYSCTL_DESCR("TX descriptor head"),
6543 		    wm_sysctl_tdh_handler, 0, (void *)txq,
6544 		    0, CTL_CREATE, CTL_EOL) != 0)
6545 			break;
6546 		if (sysctl_createv(log, 0, &qnode, &cnode,
6547 		    CTLFLAG_READONLY, CTLTYPE_INT,
6548 		    "txd_tail", SYSCTL_DESCR("TX descriptor tail"),
6549 		    wm_sysctl_tdt_handler, 0, (void *)txq,
6550 		    0, CTL_CREATE, CTL_EOL) != 0)
6551 			break;
6552 		if (sysctl_createv(log, 0, &qnode, &cnode,
6553 		    CTLFLAG_READONLY, CTLTYPE_INT,
6554 		    "txq_next", SYSCTL_DESCR("TX queue next"),
6555 		    NULL, 0, &txq->txq_next,
6556 		    0, CTL_CREATE, CTL_EOL) != 0)
6557 			break;
6558 		if (sysctl_createv(log, 0, &qnode, &cnode,
6559 		    CTLFLAG_READONLY, CTLTYPE_INT,
6560 		    "txq_sfree", SYSCTL_DESCR("TX queue sfree"),
6561 		    NULL, 0, &txq->txq_sfree,
6562 		    0, CTL_CREATE, CTL_EOL) != 0)
6563 			break;
6564 		if (sysctl_createv(log, 0, &qnode, &cnode,
6565 		    CTLFLAG_READONLY, CTLTYPE_INT,
6566 		    "txq_snext", SYSCTL_DESCR("TX queue snext"),
6567 		    NULL, 0, &txq->txq_snext,
6568 		    0, CTL_CREATE, CTL_EOL) != 0)
6569 			break;
6570 		if (sysctl_createv(log, 0, &qnode, &cnode,
6571 		    CTLFLAG_READONLY, CTLTYPE_INT,
6572 		    "txq_sdirty", SYSCTL_DESCR("TX queue sdirty"),
6573 		    NULL, 0, &txq->txq_sdirty,
6574 		    0, CTL_CREATE, CTL_EOL) != 0)
6575 			break;
6576 		if (sysctl_createv(log, 0, &qnode, &cnode,
6577 		    CTLFLAG_READONLY, CTLTYPE_INT,
6578 		    "txq_flags", SYSCTL_DESCR("TX queue flags"),
6579 		    NULL, 0, &txq->txq_flags,
6580 		    0, CTL_CREATE, CTL_EOL) != 0)
6581 			break;
6582 		if (sysctl_createv(log, 0, &qnode, &cnode,
6583 		    CTLFLAG_READONLY, CTLTYPE_BOOL,
6584 		    "txq_stopping", SYSCTL_DESCR("TX queue stopping"),
6585 		    NULL, 0, &txq->txq_stopping,
6586 		    0, CTL_CREATE, CTL_EOL) != 0)
6587 			break;
6588 		if (sysctl_createv(log, 0, &qnode, &cnode,
6589 		    CTLFLAG_READONLY, CTLTYPE_BOOL,
6590 		    "txq_sending", SYSCTL_DESCR("TX queue sending"),
6591 		    NULL, 0, &txq->txq_sending,
6592 		    0, CTL_CREATE, CTL_EOL) != 0)
6593 			break;
6594 
6595 		if (sysctl_createv(log, 0, &qnode, &cnode,
6596 		    CTLFLAG_READONLY, CTLTYPE_INT,
6597 		    "rxq_ptr", SYSCTL_DESCR("RX queue pointer"),
6598 		    NULL, 0, &rxq->rxq_ptr,
6599 		    0, CTL_CREATE, CTL_EOL) != 0)
6600 			break;
6601 	}
6602 
6603 #ifdef WM_DEBUG
6604 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
6605 	    CTLTYPE_INT, "debug_flags",
6606 	    SYSCTL_DESCR(
6607 		    "Debug flags:\n"	\
6608 		    "\t0x01 LINK\n"	\
6609 		    "\t0x02 TX\n"	\
6610 		    "\t0x04 RX\n"	\
6611 		    "\t0x08 GMII\n"	\
6612 		    "\t0x10 MANAGE\n"	\
6613 		    "\t0x20 NVM\n"	\
6614 		    "\t0x40 INIT\n"	\
6615 		    "\t0x80 LOCK"),
6616 	    wm_sysctl_debug, 0, (void *)sc, 0, CTL_CREATE, CTL_EOL);
6617 	if (rv != 0)
6618 		goto teardown;
6619 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
6620 	    CTLTYPE_BOOL, "trigger_reset",
6621 	    SYSCTL_DESCR("Trigger an interface reset"),
6622 	    NULL, 0, &sc->sc_trigger_reset, 0, CTL_CREATE, CTL_EOL);
6623 	if (rv != 0)
6624 		goto teardown;
6625 #endif
6626 
6627 	return;
6628 
6629 teardown:
6630 	sysctl_teardown(log);
6631 err:
6632 	sc->sc_sysctllog = NULL;
6633 	device_printf(sc->sc_dev, "%s: sysctl_createv failed, rv = %d\n",
6634 	    __func__, rv);
6635 }
6636 
6637 static void
6638 wm_update_stats(struct wm_softc *sc)
6639 {
6640 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6641 	uint64_t crcerrs, algnerrc, symerrc, mpc, colc,  sec, rlec, rxerrc,
6642 	    cexterr;
6643 	uint64_t total_qdrop = 0;
6644 
6645 	crcerrs = CSR_READ(sc, WMREG_CRCERRS);
6646 	symerrc = CSR_READ(sc, WMREG_SYMERRC);
6647 	mpc = CSR_READ(sc, WMREG_MPC);
6648 	colc = CSR_READ(sc, WMREG_COLC);
6649 	sec = CSR_READ(sc, WMREG_SEC);
6650 	rlec = CSR_READ(sc, WMREG_RLEC);
6651 
6652 	WM_EVCNT_ADD(&sc->sc_ev_crcerrs, crcerrs);
6653 	WM_EVCNT_ADD(&sc->sc_ev_symerrc, symerrc);
6654 	WM_EVCNT_ADD(&sc->sc_ev_mpc, mpc);
6655 	WM_EVCNT_ADD(&sc->sc_ev_colc, colc);
6656 	WM_EVCNT_ADD(&sc->sc_ev_sec, sec);
6657 	WM_EVCNT_ADD(&sc->sc_ev_rlec, rlec);
6658 
6659 	if (sc->sc_type >= WM_T_82543) {
6660 		algnerrc = CSR_READ(sc, WMREG_ALGNERRC);
6661 		rxerrc = CSR_READ(sc, WMREG_RXERRC);
6662 		WM_EVCNT_ADD(&sc->sc_ev_algnerrc, algnerrc);
6663 		WM_EVCNT_ADD(&sc->sc_ev_rxerrc, rxerrc);
6664 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc)) {
6665 			cexterr = CSR_READ(sc, WMREG_CEXTERR);
6666 			WM_EVCNT_ADD(&sc->sc_ev_cexterr, cexterr);
6667 		} else {
6668 			cexterr = 0;
6669 			/* Excessive collision + Link down */
6670 			WM_EVCNT_ADD(&sc->sc_ev_htdpmc,
6671 			    CSR_READ(sc, WMREG_HTDPMC));
6672 		}
6673 
6674 		WM_EVCNT_ADD(&sc->sc_ev_tncrs, CSR_READ(sc, WMREG_TNCRS));
6675 		WM_EVCNT_ADD(&sc->sc_ev_tsctc, CSR_READ(sc, WMREG_TSCTC));
6676 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
6677 			WM_EVCNT_ADD(&sc->sc_ev_tsctfc,
6678 			    CSR_READ(sc, WMREG_TSCTFC));
6679 		else {
6680 			WM_EVCNT_ADD(&sc->sc_ev_cbrdpc,
6681 			    CSR_READ(sc, WMREG_CBRDPC));
6682 			WM_EVCNT_ADD(&sc->sc_ev_cbrmpc,
6683 			    CSR_READ(sc, WMREG_CBRMPC));
6684 		}
6685 	} else
6686 		algnerrc = rxerrc = cexterr = 0;
6687 
6688 	if (sc->sc_type >= WM_T_82542_2_1) {
6689 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
6690 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
6691 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
6692 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
6693 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
6694 	}
6695 
6696 	WM_EVCNT_ADD(&sc->sc_ev_scc, CSR_READ(sc, WMREG_SCC));
6697 	WM_EVCNT_ADD(&sc->sc_ev_ecol, CSR_READ(sc, WMREG_ECOL));
6698 	WM_EVCNT_ADD(&sc->sc_ev_mcc, CSR_READ(sc, WMREG_MCC));
6699 	WM_EVCNT_ADD(&sc->sc_ev_latecol, CSR_READ(sc, WMREG_LATECOL));
6700 
6701 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc)) {
6702 		WM_EVCNT_ADD(&sc->sc_ev_cbtmpc, CSR_READ(sc, WMREG_CBTMPC));
6703 	}
6704 
6705 	WM_EVCNT_ADD(&sc->sc_ev_dc, CSR_READ(sc, WMREG_DC));
6706 	WM_EVCNT_ADD(&sc->sc_ev_prc64, CSR_READ(sc, WMREG_PRC64));
6707 	WM_EVCNT_ADD(&sc->sc_ev_prc127, CSR_READ(sc, WMREG_PRC127));
6708 	WM_EVCNT_ADD(&sc->sc_ev_prc255, CSR_READ(sc, WMREG_PRC255));
6709 	WM_EVCNT_ADD(&sc->sc_ev_prc511, CSR_READ(sc, WMREG_PRC511));
6710 	WM_EVCNT_ADD(&sc->sc_ev_prc1023, CSR_READ(sc, WMREG_PRC1023));
6711 	WM_EVCNT_ADD(&sc->sc_ev_prc1522, CSR_READ(sc, WMREG_PRC1522));
6712 	WM_EVCNT_ADD(&sc->sc_ev_gprc, CSR_READ(sc, WMREG_GPRC));
6713 	WM_EVCNT_ADD(&sc->sc_ev_bprc, CSR_READ(sc, WMREG_BPRC));
6714 	WM_EVCNT_ADD(&sc->sc_ev_mprc, CSR_READ(sc, WMREG_MPRC));
6715 	WM_EVCNT_ADD(&sc->sc_ev_gptc, CSR_READ(sc, WMREG_GPTC));
6716 
6717 	WM_EVCNT_ADD(&sc->sc_ev_gorc,
6718 	    CSR_READ(sc, WMREG_GORCL) +
6719 	    ((uint64_t)CSR_READ(sc, WMREG_GORCH) << 32));
6720 	WM_EVCNT_ADD(&sc->sc_ev_gotc,
6721 	    CSR_READ(sc, WMREG_GOTCL) +
6722 	    ((uint64_t)CSR_READ(sc, WMREG_GOTCH) << 32));
6723 
6724 	WM_EVCNT_ADD(&sc->sc_ev_rnbc, CSR_READ(sc, WMREG_RNBC));
6725 	WM_EVCNT_ADD(&sc->sc_ev_ruc, CSR_READ(sc, WMREG_RUC));
6726 	WM_EVCNT_ADD(&sc->sc_ev_rfc, CSR_READ(sc, WMREG_RFC));
6727 	WM_EVCNT_ADD(&sc->sc_ev_roc, CSR_READ(sc, WMREG_ROC));
6728 	WM_EVCNT_ADD(&sc->sc_ev_rjc, CSR_READ(sc, WMREG_RJC));
6729 
6730 	if (sc->sc_type >= WM_T_82540) {
6731 		WM_EVCNT_ADD(&sc->sc_ev_mgtprc, CSR_READ(sc, WMREG_MGTPRC));
6732 		WM_EVCNT_ADD(&sc->sc_ev_mgtpdc, CSR_READ(sc, WMREG_MGTPDC));
6733 		WM_EVCNT_ADD(&sc->sc_ev_mgtptc, CSR_READ(sc, WMREG_MGTPTC));
6734 	}
6735 
6736 	/*
6737 	 * The TOR(L) register includes:
6738 	 *  - Error
6739 	 *  - Flow control
6740 	 *  - Broadcast rejected (This note is described in 82574 and newer
6741 	 *    datasheets. What does "broadcast rejected" mean?)
6742 	 */
6743 	WM_EVCNT_ADD(&sc->sc_ev_tor,
6744 	    CSR_READ(sc, WMREG_TORL) +
6745 	    ((uint64_t)CSR_READ(sc, WMREG_TORH) << 32));
6746 	WM_EVCNT_ADD(&sc->sc_ev_tot,
6747 	    CSR_READ(sc, WMREG_TOTL) +
6748 	    ((uint64_t)CSR_READ(sc, WMREG_TOTH) << 32));
6749 
6750 	WM_EVCNT_ADD(&sc->sc_ev_tpr, CSR_READ(sc, WMREG_TPR));
6751 	WM_EVCNT_ADD(&sc->sc_ev_tpt, CSR_READ(sc, WMREG_TPT));
6752 	WM_EVCNT_ADD(&sc->sc_ev_ptc64, CSR_READ(sc, WMREG_PTC64));
6753 	WM_EVCNT_ADD(&sc->sc_ev_ptc127, CSR_READ(sc, WMREG_PTC127));
6754 	WM_EVCNT_ADD(&sc->sc_ev_ptc255, CSR_READ(sc, WMREG_PTC255));
6755 	WM_EVCNT_ADD(&sc->sc_ev_ptc511, CSR_READ(sc, WMREG_PTC511));
6756 	WM_EVCNT_ADD(&sc->sc_ev_ptc1023, CSR_READ(sc, WMREG_PTC1023));
6757 	WM_EVCNT_ADD(&sc->sc_ev_ptc1522, CSR_READ(sc, WMREG_PTC1522));
6758 	WM_EVCNT_ADD(&sc->sc_ev_mptc, CSR_READ(sc, WMREG_MPTC));
6759 	WM_EVCNT_ADD(&sc->sc_ev_bptc, CSR_READ(sc, WMREG_BPTC));
6760 	if (sc->sc_type >= WM_T_82571)
6761 		WM_EVCNT_ADD(&sc->sc_ev_iac, CSR_READ(sc, WMREG_IAC));
6762 	if (sc->sc_type < WM_T_82575) {
6763 		WM_EVCNT_ADD(&sc->sc_ev_icrxptc, CSR_READ(sc, WMREG_ICRXPTC));
6764 		WM_EVCNT_ADD(&sc->sc_ev_icrxatc, CSR_READ(sc, WMREG_ICRXATC));
6765 		WM_EVCNT_ADD(&sc->sc_ev_ictxptc, CSR_READ(sc, WMREG_ICTXPTC));
6766 		WM_EVCNT_ADD(&sc->sc_ev_ictxatc, CSR_READ(sc, WMREG_ICTXATC));
6767 		WM_EVCNT_ADD(&sc->sc_ev_ictxqec, CSR_READ(sc, WMREG_ICTXQEC));
6768 		WM_EVCNT_ADD(&sc->sc_ev_ictxqmtc,
6769 		    CSR_READ(sc, WMREG_ICTXQMTC));
6770 		WM_EVCNT_ADD(&sc->sc_ev_rxdmtc,
6771 		    CSR_READ(sc, WMREG_ICRXDMTC));
6772 		WM_EVCNT_ADD(&sc->sc_ev_icrxoc, CSR_READ(sc, WMREG_ICRXOC));
6773 	} else if (!WM_IS_ICHPCH(sc)) {
6774 		WM_EVCNT_ADD(&sc->sc_ev_rpthc, CSR_READ(sc, WMREG_RPTHC));
6775 		WM_EVCNT_ADD(&sc->sc_ev_debug1, CSR_READ(sc, WMREG_DEBUG1));
6776 		WM_EVCNT_ADD(&sc->sc_ev_debug2, CSR_READ(sc, WMREG_DEBUG2));
6777 		WM_EVCNT_ADD(&sc->sc_ev_debug3, CSR_READ(sc, WMREG_DEBUG3));
6778 		WM_EVCNT_ADD(&sc->sc_ev_hgptc,  CSR_READ(sc, WMREG_HGPTC));
6779 		WM_EVCNT_ADD(&sc->sc_ev_debug4, CSR_READ(sc, WMREG_DEBUG4));
6780 		WM_EVCNT_ADD(&sc->sc_ev_rxdmtc, CSR_READ(sc, WMREG_RXDMTC));
6781 		WM_EVCNT_ADD(&sc->sc_ev_htcbdpc, CSR_READ(sc, WMREG_HTCBDPC));
6782 
6783 		WM_EVCNT_ADD(&sc->sc_ev_hgorc,
6784 		    CSR_READ(sc, WMREG_HGORCL) +
6785 		    ((uint64_t)CSR_READ(sc, WMREG_HGORCH) << 32));
6786 		WM_EVCNT_ADD(&sc->sc_ev_hgotc,
6787 		    CSR_READ(sc, WMREG_HGOTCL) +
6788 		    ((uint64_t)CSR_READ(sc, WMREG_HGOTCH) << 32));
6789 		WM_EVCNT_ADD(&sc->sc_ev_lenerrs, CSR_READ(sc, WMREG_LENERRS));
6790 		WM_EVCNT_ADD(&sc->sc_ev_scvpc, CSR_READ(sc, WMREG_SCVPC));
6791 		WM_EVCNT_ADD(&sc->sc_ev_hrmpc, CSR_READ(sc, WMREG_HRMPC));
6792 #ifdef WM_EVENT_COUNTERS
6793 		for (int i = 0; i < sc->sc_nqueues; i++) {
6794 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
6795 			uint32_t rqdpc;
6796 
6797 			rqdpc = CSR_READ(sc, WMREG_RQDPC(i));
6798 			/*
6799 			 * On I210 and newer device, the RQDPC register is not
6800 			 * cleard on read.
6801 			 */
6802 			if ((rqdpc != 0) && (sc->sc_type >= WM_T_I210))
6803 				CSR_WRITE(sc, WMREG_RQDPC(i), 0);
6804 			WM_Q_EVCNT_ADD(rxq, qdrop, rqdpc);
6805 			total_qdrop += rqdpc;
6806 		}
6807 #endif
6808 	}
6809 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc)) {
6810 		WM_EVCNT_ADD(&sc->sc_ev_tlpic, CSR_READ(sc, WMREG_TLPIC));
6811 		WM_EVCNT_ADD(&sc->sc_ev_rlpic, CSR_READ(sc, WMREG_RLPIC));
6812 		if ((CSR_READ(sc, WMREG_MANC) & MANC_EN_BMC2OS) != 0) {
6813 			WM_EVCNT_ADD(&sc->sc_ev_b2ogprc,
6814 			    CSR_READ(sc, WMREG_B2OGPRC));
6815 			WM_EVCNT_ADD(&sc->sc_ev_o2bspc,
6816 			    CSR_READ(sc, WMREG_O2BSPC));
6817 			WM_EVCNT_ADD(&sc->sc_ev_b2ospc,
6818 			    CSR_READ(sc, WMREG_B2OSPC));
6819 			WM_EVCNT_ADD(&sc->sc_ev_o2bgptc,
6820 			    CSR_READ(sc, WMREG_O2BGPTC));
6821 		}
6822 	}
6823 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
6824 	if_statadd_ref(ifp, nsr, if_collisions, colc);
6825 	if_statadd_ref(ifp, nsr, if_ierrors,
6826 	    crcerrs + algnerrc + symerrc + rxerrc + sec + cexterr + rlec);
6827 	/*
6828 	 * WMREG_RNBC is incremented when there are no available buffers in
6829 	 * host memory. It does not mean the number of dropped packets, because
6830 	 * an Ethernet controller can receive packets in such case if there is
6831 	 * space in the phy's FIFO.
6832 	 *
6833 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
6834 	 * own EVCNT instead of if_iqdrops.
6835 	 */
6836 	if_statadd_ref(ifp, nsr, if_iqdrops, mpc + total_qdrop);
6837 	IF_STAT_PUTREF(ifp);
6838 }
6839 
6840 void
6841 wm_clear_evcnt(struct wm_softc *sc)
6842 {
6843 #ifdef WM_EVENT_COUNTERS
6844 	int i;
6845 
6846 	/* RX queues */
6847 	for (i = 0; i < sc->sc_nqueues; i++) {
6848 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
6849 
6850 		WM_Q_EVCNT_STORE(rxq, intr, 0);
6851 		WM_Q_EVCNT_STORE(rxq, defer, 0);
6852 		WM_Q_EVCNT_STORE(rxq, ipsum, 0);
6853 		WM_Q_EVCNT_STORE(rxq, tusum, 0);
6854 		if ((sc->sc_type >= WM_T_82575) && !WM_IS_ICHPCH(sc))
6855 			WM_Q_EVCNT_STORE(rxq, qdrop, 0);
6856 	}
6857 
6858 	/* TX queues */
6859 	for (i = 0; i < sc->sc_nqueues; i++) {
6860 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
6861 		int j;
6862 
6863 		WM_Q_EVCNT_STORE(txq, txsstall, 0);
6864 		WM_Q_EVCNT_STORE(txq, txdstall, 0);
6865 		WM_Q_EVCNT_STORE(txq, fifo_stall, 0);
6866 		WM_Q_EVCNT_STORE(txq, txdw, 0);
6867 		WM_Q_EVCNT_STORE(txq, txqe, 0);
6868 		WM_Q_EVCNT_STORE(txq, ipsum, 0);
6869 		WM_Q_EVCNT_STORE(txq, tusum, 0);
6870 		WM_Q_EVCNT_STORE(txq, tusum6, 0);
6871 		WM_Q_EVCNT_STORE(txq, tso, 0);
6872 		WM_Q_EVCNT_STORE(txq, tso6, 0);
6873 		WM_Q_EVCNT_STORE(txq, tsopain, 0);
6874 
6875 		for (j = 0; j < WM_NTXSEGS; j++)
6876 			WM_EVCNT_STORE(&txq->txq_ev_txseg[j], 0);
6877 
6878 		WM_Q_EVCNT_STORE(txq, pcqdrop, 0);
6879 		WM_Q_EVCNT_STORE(txq, descdrop, 0);
6880 		WM_Q_EVCNT_STORE(txq, toomanyseg, 0);
6881 		WM_Q_EVCNT_STORE(txq, defrag, 0);
6882 		if (sc->sc_type <= WM_T_82544)
6883 			WM_Q_EVCNT_STORE(txq, underrun, 0);
6884 		WM_Q_EVCNT_STORE(txq, skipcontext, 0);
6885 	}
6886 
6887 	/* Miscs */
6888 	WM_EVCNT_STORE(&sc->sc_ev_linkintr, 0);
6889 
6890 	WM_EVCNT_STORE(&sc->sc_ev_crcerrs, 0);
6891 	WM_EVCNT_STORE(&sc->sc_ev_symerrc, 0);
6892 	WM_EVCNT_STORE(&sc->sc_ev_mpc, 0);
6893 	WM_EVCNT_STORE(&sc->sc_ev_colc, 0);
6894 	WM_EVCNT_STORE(&sc->sc_ev_sec, 0);
6895 	WM_EVCNT_STORE(&sc->sc_ev_rlec, 0);
6896 
6897 	if (sc->sc_type >= WM_T_82543) {
6898 		WM_EVCNT_STORE(&sc->sc_ev_algnerrc, 0);
6899 		WM_EVCNT_STORE(&sc->sc_ev_rxerrc, 0);
6900 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
6901 			WM_EVCNT_STORE(&sc->sc_ev_cexterr, 0);
6902 		else
6903 			WM_EVCNT_STORE(&sc->sc_ev_htdpmc, 0);
6904 
6905 		WM_EVCNT_STORE(&sc->sc_ev_tncrs, 0);
6906 		WM_EVCNT_STORE(&sc->sc_ev_tsctc, 0);
6907 		if ((sc->sc_type < WM_T_82575) || WM_IS_ICHPCH(sc))
6908 			WM_EVCNT_STORE(&sc->sc_ev_tsctfc, 0);
6909 		else {
6910 			WM_EVCNT_STORE(&sc->sc_ev_cbrdpc, 0);
6911 			WM_EVCNT_STORE(&sc->sc_ev_cbrmpc, 0);
6912 		}
6913 	}
6914 
6915 	if (sc->sc_type >= WM_T_82542_2_1) {
6916 		WM_EVCNT_STORE(&sc->sc_ev_tx_xoff, 0);
6917 		WM_EVCNT_STORE(&sc->sc_ev_tx_xon, 0);
6918 		WM_EVCNT_STORE(&sc->sc_ev_rx_xoff, 0);
6919 		WM_EVCNT_STORE(&sc->sc_ev_rx_xon, 0);
6920 		WM_EVCNT_STORE(&sc->sc_ev_rx_macctl, 0);
6921 	}
6922 
6923 	WM_EVCNT_STORE(&sc->sc_ev_scc, 0);
6924 	WM_EVCNT_STORE(&sc->sc_ev_ecol, 0);
6925 	WM_EVCNT_STORE(&sc->sc_ev_mcc, 0);
6926 	WM_EVCNT_STORE(&sc->sc_ev_latecol, 0);
6927 
6928 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc))
6929 		WM_EVCNT_STORE(&sc->sc_ev_cbtmpc, 0);
6930 
6931 	WM_EVCNT_STORE(&sc->sc_ev_dc, 0);
6932 	WM_EVCNT_STORE(&sc->sc_ev_prc64, 0);
6933 	WM_EVCNT_STORE(&sc->sc_ev_prc127, 0);
6934 	WM_EVCNT_STORE(&sc->sc_ev_prc255, 0);
6935 	WM_EVCNT_STORE(&sc->sc_ev_prc511, 0);
6936 	WM_EVCNT_STORE(&sc->sc_ev_prc1023, 0);
6937 	WM_EVCNT_STORE(&sc->sc_ev_prc1522, 0);
6938 	WM_EVCNT_STORE(&sc->sc_ev_gprc, 0);
6939 	WM_EVCNT_STORE(&sc->sc_ev_bprc, 0);
6940 	WM_EVCNT_STORE(&sc->sc_ev_mprc, 0);
6941 	WM_EVCNT_STORE(&sc->sc_ev_gptc, 0);
6942 	WM_EVCNT_STORE(&sc->sc_ev_gorc, 0);
6943 	WM_EVCNT_STORE(&sc->sc_ev_gotc, 0);
6944 	WM_EVCNT_STORE(&sc->sc_ev_rnbc, 0);
6945 	WM_EVCNT_STORE(&sc->sc_ev_ruc, 0);
6946 	WM_EVCNT_STORE(&sc->sc_ev_rfc, 0);
6947 	WM_EVCNT_STORE(&sc->sc_ev_roc, 0);
6948 	WM_EVCNT_STORE(&sc->sc_ev_rjc, 0);
6949 	if (sc->sc_type >= WM_T_82540) {
6950 		WM_EVCNT_STORE(&sc->sc_ev_mgtprc, 0);
6951 		WM_EVCNT_STORE(&sc->sc_ev_mgtpdc, 0);
6952 		WM_EVCNT_STORE(&sc->sc_ev_mgtptc, 0);
6953 	}
6954 	WM_EVCNT_STORE(&sc->sc_ev_tor, 0);
6955 	WM_EVCNT_STORE(&sc->sc_ev_tot, 0);
6956 	WM_EVCNT_STORE(&sc->sc_ev_tpr, 0);
6957 	WM_EVCNT_STORE(&sc->sc_ev_tpt, 0);
6958 	WM_EVCNT_STORE(&sc->sc_ev_ptc64, 0);
6959 	WM_EVCNT_STORE(&sc->sc_ev_ptc127, 0);
6960 	WM_EVCNT_STORE(&sc->sc_ev_ptc255, 0);
6961 	WM_EVCNT_STORE(&sc->sc_ev_ptc511, 0);
6962 	WM_EVCNT_STORE(&sc->sc_ev_ptc1023, 0);
6963 	WM_EVCNT_STORE(&sc->sc_ev_ptc1522, 0);
6964 	WM_EVCNT_STORE(&sc->sc_ev_mptc, 0);
6965 	WM_EVCNT_STORE(&sc->sc_ev_bptc, 0);
6966 	if (sc->sc_type >= WM_T_82571)
6967 		WM_EVCNT_STORE(&sc->sc_ev_iac, 0);
6968 	if (sc->sc_type < WM_T_82575) {
6969 		WM_EVCNT_STORE(&sc->sc_ev_icrxptc, 0);
6970 		WM_EVCNT_STORE(&sc->sc_ev_icrxatc, 0);
6971 		WM_EVCNT_STORE(&sc->sc_ev_ictxptc, 0);
6972 		WM_EVCNT_STORE(&sc->sc_ev_ictxatc, 0);
6973 		WM_EVCNT_STORE(&sc->sc_ev_ictxqec, 0);
6974 		WM_EVCNT_STORE(&sc->sc_ev_ictxqmtc, 0);
6975 		WM_EVCNT_STORE(&sc->sc_ev_rxdmtc, 0);
6976 		WM_EVCNT_STORE(&sc->sc_ev_icrxoc, 0);
6977 	} else if (!WM_IS_ICHPCH(sc)) {
6978 		WM_EVCNT_STORE(&sc->sc_ev_rpthc, 0);
6979 		WM_EVCNT_STORE(&sc->sc_ev_debug1, 0);
6980 		WM_EVCNT_STORE(&sc->sc_ev_debug2, 0);
6981 		WM_EVCNT_STORE(&sc->sc_ev_debug3, 0);
6982 		WM_EVCNT_STORE(&sc->sc_ev_hgptc, 0);
6983 		WM_EVCNT_STORE(&sc->sc_ev_debug4, 0);
6984 		WM_EVCNT_STORE(&sc->sc_ev_rxdmtc, 0);
6985 		WM_EVCNT_STORE(&sc->sc_ev_htcbdpc, 0);
6986 
6987 		WM_EVCNT_STORE(&sc->sc_ev_hgorc, 0);
6988 		WM_EVCNT_STORE(&sc->sc_ev_hgotc, 0);
6989 		WM_EVCNT_STORE(&sc->sc_ev_lenerrs, 0);
6990 		WM_EVCNT_STORE(&sc->sc_ev_scvpc, 0);
6991 		WM_EVCNT_STORE(&sc->sc_ev_hrmpc, 0);
6992 	}
6993 	if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc)) {
6994 		WM_EVCNT_STORE(&sc->sc_ev_tlpic, 0);
6995 		WM_EVCNT_STORE(&sc->sc_ev_rlpic, 0);
6996 		WM_EVCNT_STORE(&sc->sc_ev_b2ogprc, 0);
6997 		WM_EVCNT_STORE(&sc->sc_ev_o2bspc, 0);
6998 		WM_EVCNT_STORE(&sc->sc_ev_b2ospc, 0);
6999 		WM_EVCNT_STORE(&sc->sc_ev_o2bgptc, 0);
7000 	}
7001 #endif
7002 }
7003 
7004 /*
7005  * wm_init:		[ifnet interface function]
7006  *
7007  *	Initialize the interface.
7008  */
7009 static int
7010 wm_init(struct ifnet *ifp)
7011 {
7012 	struct wm_softc *sc = ifp->if_softc;
7013 	int ret;
7014 
7015 	KASSERT(IFNET_LOCKED(ifp));
7016 
7017 	if (sc->sc_dying)
7018 		return ENXIO;
7019 
7020 	mutex_enter(sc->sc_core_lock);
7021 	ret = wm_init_locked(ifp);
7022 	mutex_exit(sc->sc_core_lock);
7023 
7024 	return ret;
7025 }
7026 
7027 static int
7028 wm_init_locked(struct ifnet *ifp)
7029 {
7030 	struct wm_softc *sc = ifp->if_softc;
7031 	struct ethercom *ec = &sc->sc_ethercom;
7032 	int i, j, trynum, error = 0;
7033 	uint32_t reg, sfp_mask = 0;
7034 
7035 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
7036 		device_xname(sc->sc_dev), __func__));
7037 	KASSERT(IFNET_LOCKED(ifp));
7038 	KASSERT(mutex_owned(sc->sc_core_lock));
7039 
7040 	/*
7041 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
7042 	 * There is a small but measurable benefit to avoiding the adjusment
7043 	 * of the descriptor so that the headers are aligned, for normal mtu,
7044 	 * on such platforms.  One possibility is that the DMA itself is
7045 	 * slightly more efficient if the front of the entire packet (instead
7046 	 * of the front of the headers) is aligned.
7047 	 *
7048 	 * Note we must always set align_tweak to 0 if we are using
7049 	 * jumbo frames.
7050 	 */
7051 #ifdef __NO_STRICT_ALIGNMENT
7052 	sc->sc_align_tweak = 0;
7053 #else
7054 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
7055 		sc->sc_align_tweak = 0;
7056 	else
7057 		sc->sc_align_tweak = 2;
7058 #endif /* __NO_STRICT_ALIGNMENT */
7059 
7060 	/* Cancel any pending I/O. */
7061 	wm_stop_locked(ifp, false, false);
7062 
7063 	/* Update statistics before reset */
7064 	if_statadd2(ifp, if_collisions, CSR_READ(sc, WMREG_COLC),
7065 	    if_ierrors, CSR_READ(sc, WMREG_RXERRC));
7066 
7067 	/* >= PCH_SPT hardware workaround before reset. */
7068 	if (sc->sc_type >= WM_T_PCH_SPT)
7069 		wm_flush_desc_rings(sc);
7070 
7071 	/* Reset the chip to a known state. */
7072 	wm_reset(sc);
7073 
7074 	/*
7075 	 * AMT based hardware can now take control from firmware
7076 	 * Do this after reset.
7077 	 */
7078 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
7079 		wm_get_hw_control(sc);
7080 
7081 	if ((sc->sc_type >= WM_T_PCH_SPT) &&
7082 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
7083 		wm_legacy_irq_quirk_spt(sc);
7084 
7085 	/* Init hardware bits */
7086 	wm_initialize_hardware_bits(sc);
7087 
7088 	/* Reset the PHY. */
7089 	if (sc->sc_flags & WM_F_HAS_MII)
7090 		wm_gmii_reset(sc);
7091 
7092 	if (sc->sc_type >= WM_T_ICH8) {
7093 		reg = CSR_READ(sc, WMREG_GCR);
7094 		/*
7095 		 * ICH8 No-snoop bits are opposite polarity. Set to snoop by
7096 		 * default after reset.
7097 		 */
7098 		if (sc->sc_type == WM_T_ICH8)
7099 			reg |= GCR_NO_SNOOP_ALL;
7100 		else
7101 			reg &= ~GCR_NO_SNOOP_ALL;
7102 		CSR_WRITE(sc, WMREG_GCR, reg);
7103 	}
7104 
7105 	/* Ungate DMA clock to avoid packet loss */
7106 	if (sc->sc_type >= WM_T_PCH_TGP) {
7107 		reg = CSR_READ(sc, WMREG_FFLT_DBG);
7108 		reg |= (1 << 12);
7109 		CSR_WRITE(sc, WMREG_FFLT_DBG, reg);
7110 	}
7111 
7112 	if ((sc->sc_type >= WM_T_ICH8)
7113 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER)
7114 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) {
7115 
7116 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
7117 		reg |= CTRL_EXT_RO_DIS;
7118 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
7119 	}
7120 
7121 	/* Calculate (E)ITR value */
7122 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
7123 		/*
7124 		 * For NEWQUEUE's EITR (except for 82575).
7125 		 * 82575's EITR should be set same throttling value as other
7126 		 * old controllers' ITR because the interrupt/sec calculation
7127 		 * is the same, that is, 1,000,000,000 / (N * 256).
7128 		 *
7129 		 * 82574's EITR should be set same throttling value as ITR.
7130 		 *
7131 		 * For N interrupts/sec, set this value to:
7132 		 * 1,000,000 / N in contrast to ITR throttling value.
7133 		 */
7134 		sc->sc_itr_init = 450;
7135 	} else if (sc->sc_type >= WM_T_82543) {
7136 		/*
7137 		 * Set up the interrupt throttling register (units of 256ns)
7138 		 * Note that a footnote in Intel's documentation says this
7139 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
7140 		 * or 10Mbit mode.  Empirically, it appears to be the case
7141 		 * that that is also true for the 1024ns units of the other
7142 		 * interrupt-related timer registers -- so, really, we ought
7143 		 * to divide this value by 4 when the link speed is low.
7144 		 *
7145 		 * XXX implement this division at link speed change!
7146 		 */
7147 
7148 		/*
7149 		 * For N interrupts/sec, set this value to:
7150 		 * 1,000,000,000 / (N * 256).  Note that we set the
7151 		 * absolute and packet timer values to this value
7152 		 * divided by 4 to get "simple timer" behavior.
7153 		 */
7154 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
7155 	}
7156 
7157 	error = wm_init_txrx_queues(sc);
7158 	if (error)
7159 		goto out;
7160 
7161 	if (((sc->sc_flags & WM_F_SGMII) == 0) &&
7162 	    (sc->sc_mediatype == WM_MEDIATYPE_SERDES) &&
7163 	    (sc->sc_type >= WM_T_82575))
7164 		wm_serdes_power_up_link_82575(sc);
7165 
7166 	/* Clear out the VLAN table -- we don't use it (yet). */
7167 	CSR_WRITE(sc, WMREG_VET, 0);
7168 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
7169 		trynum = 10; /* Due to hw errata */
7170 	else
7171 		trynum = 1;
7172 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
7173 		for (j = 0; j < trynum; j++)
7174 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
7175 
7176 	/*
7177 	 * Set up flow-control parameters.
7178 	 *
7179 	 * XXX Values could probably stand some tuning.
7180 	 */
7181 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
7182 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
7183 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
7184 	    && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)
7185 	    && (sc->sc_type != WM_T_PCH_TGP)) {
7186 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
7187 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
7188 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
7189 	}
7190 
7191 	sc->sc_fcrtl = FCRTL_DFLT;
7192 	if (sc->sc_type < WM_T_82543) {
7193 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
7194 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
7195 	} else {
7196 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
7197 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
7198 	}
7199 
7200 	if (sc->sc_type == WM_T_80003)
7201 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
7202 	else
7203 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
7204 
7205 	/* Writes the control register. */
7206 	wm_set_vlan(sc);
7207 
7208 	if (sc->sc_flags & WM_F_HAS_MII) {
7209 		uint16_t kmreg;
7210 
7211 		switch (sc->sc_type) {
7212 		case WM_T_80003:
7213 		case WM_T_ICH8:
7214 		case WM_T_ICH9:
7215 		case WM_T_ICH10:
7216 		case WM_T_PCH:
7217 		case WM_T_PCH2:
7218 		case WM_T_PCH_LPT:
7219 		case WM_T_PCH_SPT:
7220 		case WM_T_PCH_CNP:
7221 		case WM_T_PCH_TGP:
7222 			/*
7223 			 * Set the mac to wait the maximum time between each
7224 			 * iteration and increase the max iterations when
7225 			 * polling the phy; this fixes erroneous timeouts at
7226 			 * 10Mbps.
7227 			 */
7228 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
7229 			    0xFFFF);
7230 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
7231 			    &kmreg);
7232 			kmreg |= 0x3F;
7233 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
7234 			    kmreg);
7235 			break;
7236 		default:
7237 			break;
7238 		}
7239 
7240 		if (sc->sc_type == WM_T_80003) {
7241 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
7242 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
7243 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
7244 
7245 			/* Bypass RX and TX FIFOs */
7246 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
7247 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
7248 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
7249 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
7250 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
7251 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
7252 		}
7253 	}
7254 #if 0
7255 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
7256 #endif
7257 
7258 	/* Set up checksum offload parameters. */
7259 	reg = CSR_READ(sc, WMREG_RXCSUM);
7260 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
7261 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
7262 		reg |= RXCSUM_IPOFL;
7263 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
7264 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
7265 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
7266 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
7267 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
7268 
7269 	/* Set registers about MSI-X */
7270 	if (wm_is_using_msix(sc)) {
7271 		uint32_t ivar, qintr_idx;
7272 		struct wm_queue *wmq;
7273 		unsigned int qid;
7274 
7275 		if (sc->sc_type == WM_T_82575) {
7276 			/* Interrupt control */
7277 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
7278 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
7279 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
7280 
7281 			/* TX and RX */
7282 			for (i = 0; i < sc->sc_nqueues; i++) {
7283 				wmq = &sc->sc_queue[i];
7284 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
7285 				    EITR_TX_QUEUE(wmq->wmq_id)
7286 				    | EITR_RX_QUEUE(wmq->wmq_id));
7287 			}
7288 			/* Link status */
7289 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
7290 			    EITR_OTHER);
7291 		} else if (sc->sc_type == WM_T_82574) {
7292 			/* Interrupt control */
7293 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
7294 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
7295 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
7296 
7297 			/*
7298 			 * Work around issue with spurious interrupts
7299 			 * in MSI-X mode.
7300 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
7301 			 * initialized yet. So re-initialize WMREG_RFCTL here.
7302 			 */
7303 			reg = CSR_READ(sc, WMREG_RFCTL);
7304 			reg |= WMREG_RFCTL_ACKDIS;
7305 			CSR_WRITE(sc, WMREG_RFCTL, reg);
7306 
7307 			ivar = 0;
7308 			/* TX and RX */
7309 			for (i = 0; i < sc->sc_nqueues; i++) {
7310 				wmq = &sc->sc_queue[i];
7311 				qid = wmq->wmq_id;
7312 				qintr_idx = wmq->wmq_intr_idx;
7313 
7314 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
7315 				    IVAR_TX_MASK_Q_82574(qid));
7316 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
7317 				    IVAR_RX_MASK_Q_82574(qid));
7318 			}
7319 			/* Link status */
7320 			ivar |= __SHIFTIN((IVAR_VALID_82574
7321 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
7322 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
7323 		} else {
7324 			/* Interrupt control */
7325 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
7326 			    | GPIE_EIAME | GPIE_PBA);
7327 
7328 			switch (sc->sc_type) {
7329 			case WM_T_82580:
7330 			case WM_T_I350:
7331 			case WM_T_I354:
7332 			case WM_T_I210:
7333 			case WM_T_I211:
7334 				/* TX and RX */
7335 				for (i = 0; i < sc->sc_nqueues; i++) {
7336 					wmq = &sc->sc_queue[i];
7337 					qid = wmq->wmq_id;
7338 					qintr_idx = wmq->wmq_intr_idx;
7339 
7340 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
7341 					ivar &= ~IVAR_TX_MASK_Q(qid);
7342 					ivar |= __SHIFTIN((qintr_idx
7343 						| IVAR_VALID),
7344 					    IVAR_TX_MASK_Q(qid));
7345 					ivar &= ~IVAR_RX_MASK_Q(qid);
7346 					ivar |= __SHIFTIN((qintr_idx
7347 						| IVAR_VALID),
7348 					    IVAR_RX_MASK_Q(qid));
7349 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
7350 				}
7351 				break;
7352 			case WM_T_82576:
7353 				/* TX and RX */
7354 				for (i = 0; i < sc->sc_nqueues; i++) {
7355 					wmq = &sc->sc_queue[i];
7356 					qid = wmq->wmq_id;
7357 					qintr_idx = wmq->wmq_intr_idx;
7358 
7359 					ivar = CSR_READ(sc,
7360 					    WMREG_IVAR_Q_82576(qid));
7361 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
7362 					ivar |= __SHIFTIN((qintr_idx
7363 						| IVAR_VALID),
7364 					    IVAR_TX_MASK_Q_82576(qid));
7365 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
7366 					ivar |= __SHIFTIN((qintr_idx
7367 						| IVAR_VALID),
7368 					    IVAR_RX_MASK_Q_82576(qid));
7369 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
7370 					    ivar);
7371 				}
7372 				break;
7373 			default:
7374 				break;
7375 			}
7376 
7377 			/* Link status */
7378 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
7379 			    IVAR_MISC_OTHER);
7380 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
7381 		}
7382 
7383 		if (wm_is_using_multiqueue(sc)) {
7384 			wm_init_rss(sc);
7385 
7386 			/*
7387 			** NOTE: Receive Full-Packet Checksum Offload
7388 			** is mutually exclusive with Multiqueue. However
7389 			** this is not the same as TCP/IP checksums which
7390 			** still work.
7391 			*/
7392 			reg = CSR_READ(sc, WMREG_RXCSUM);
7393 			reg |= RXCSUM_PCSD;
7394 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
7395 		}
7396 	}
7397 
7398 	/* Set up the interrupt registers. */
7399 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
7400 
7401 	/* Enable SFP module insertion interrupt if it's required */
7402 	if ((sc->sc_flags & WM_F_SFP) != 0) {
7403 		sc->sc_ctrl |= CTRL_EXTLINK_EN;
7404 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7405 		sfp_mask = ICR_GPI(0);
7406 	}
7407 
7408 	if (wm_is_using_msix(sc)) {
7409 		uint32_t mask;
7410 		struct wm_queue *wmq;
7411 
7412 		switch (sc->sc_type) {
7413 		case WM_T_82574:
7414 			mask = 0;
7415 			for (i = 0; i < sc->sc_nqueues; i++) {
7416 				wmq = &sc->sc_queue[i];
7417 				mask |= ICR_TXQ(wmq->wmq_id);
7418 				mask |= ICR_RXQ(wmq->wmq_id);
7419 			}
7420 			mask |= ICR_OTHER;
7421 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
7422 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
7423 			break;
7424 		default:
7425 			if (sc->sc_type == WM_T_82575) {
7426 				mask = 0;
7427 				for (i = 0; i < sc->sc_nqueues; i++) {
7428 					wmq = &sc->sc_queue[i];
7429 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
7430 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
7431 				}
7432 				mask |= EITR_OTHER;
7433 			} else {
7434 				mask = 0;
7435 				for (i = 0; i < sc->sc_nqueues; i++) {
7436 					wmq = &sc->sc_queue[i];
7437 					mask |= 1 << wmq->wmq_intr_idx;
7438 				}
7439 				mask |= 1 << sc->sc_link_intr_idx;
7440 			}
7441 			CSR_WRITE(sc, WMREG_EIAC, mask);
7442 			CSR_WRITE(sc, WMREG_EIAM, mask);
7443 			CSR_WRITE(sc, WMREG_EIMS, mask);
7444 
7445 			/* For other interrupts */
7446 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC | sfp_mask);
7447 			break;
7448 		}
7449 	} else {
7450 		sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
7451 		    ICR_RXO | ICR_RXT0 | sfp_mask;
7452 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
7453 	}
7454 
7455 	/* Set up the inter-packet gap. */
7456 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
7457 
7458 	if (sc->sc_type >= WM_T_82543) {
7459 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
7460 			struct wm_queue *wmq = &sc->sc_queue[qidx];
7461 			wm_itrs_writereg(sc, wmq);
7462 		}
7463 		/*
7464 		 * Link interrupts occur much less than TX
7465 		 * interrupts and RX interrupts. So, we don't
7466 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
7467 		 * FreeBSD's if_igb.
7468 		 */
7469 	}
7470 
7471 	/* Set the VLAN EtherType. */
7472 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
7473 
7474 	/*
7475 	 * Set up the transmit control register; we start out with
7476 	 * a collision distance suitable for FDX, but update it when
7477 	 * we resolve the media type.
7478 	 */
7479 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
7480 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
7481 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
7482 	if (sc->sc_type >= WM_T_82571)
7483 		sc->sc_tctl |= TCTL_MULR;
7484 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
7485 
7486 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
7487 		/* Write TDT after TCTL.EN is set. See the document. */
7488 		CSR_WRITE(sc, WMREG_TDT(0), 0);
7489 	}
7490 
7491 	if (sc->sc_type == WM_T_80003) {
7492 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
7493 		reg &= ~TCTL_EXT_GCEX_MASK;
7494 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
7495 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
7496 	}
7497 
7498 	/* Set the media. */
7499 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
7500 		goto out;
7501 
7502 	/* Configure for OS presence */
7503 	wm_init_manageability(sc);
7504 
7505 	/*
7506 	 * Set up the receive control register; we actually program the
7507 	 * register when we set the receive filter. Use multicast address
7508 	 * offset type 0.
7509 	 *
7510 	 * Only the i82544 has the ability to strip the incoming CRC, so we
7511 	 * don't enable that feature.
7512 	 */
7513 	sc->sc_mchash_type = 0;
7514 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
7515 	    | __SHIFTIN(sc->sc_mchash_type, RCTL_MO);
7516 
7517 	/* 82574 use one buffer extended Rx descriptor. */
7518 	if (sc->sc_type == WM_T_82574)
7519 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
7520 
7521 	if ((sc->sc_flags & WM_F_CRC_STRIP) != 0)
7522 		sc->sc_rctl |= RCTL_SECRC;
7523 
7524 	if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
7525 	    && (ifp->if_mtu > ETHERMTU)) {
7526 		sc->sc_rctl |= RCTL_LPE;
7527 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
7528 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
7529 	}
7530 
7531 	if (MCLBYTES == 2048)
7532 		sc->sc_rctl |= RCTL_2k;
7533 	else {
7534 		if (sc->sc_type >= WM_T_82543) {
7535 			switch (MCLBYTES) {
7536 			case 4096:
7537 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
7538 				break;
7539 			case 8192:
7540 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
7541 				break;
7542 			case 16384:
7543 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
7544 				break;
7545 			default:
7546 				panic("wm_init: MCLBYTES %d unsupported",
7547 				    MCLBYTES);
7548 				break;
7549 			}
7550 		} else
7551 			panic("wm_init: i82542 requires MCLBYTES = 2048");
7552 	}
7553 
7554 	/* Enable ECC */
7555 	switch (sc->sc_type) {
7556 	case WM_T_82571:
7557 		reg = CSR_READ(sc, WMREG_PBA_ECC);
7558 		reg |= PBA_ECC_CORR_EN;
7559 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
7560 		break;
7561 	case WM_T_PCH_LPT:
7562 	case WM_T_PCH_SPT:
7563 	case WM_T_PCH_CNP:
7564 	case WM_T_PCH_TGP:
7565 		reg = CSR_READ(sc, WMREG_PBECCSTS);
7566 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
7567 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
7568 
7569 		sc->sc_ctrl |= CTRL_MEHE;
7570 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7571 		break;
7572 	default:
7573 		break;
7574 	}
7575 
7576 	/*
7577 	 * Set the receive filter.
7578 	 *
7579 	 * For 82575 and 82576, the RX descriptors must be initialized after
7580 	 * the setting of RCTL.EN in wm_set_filter()
7581 	 */
7582 	wm_set_filter(sc);
7583 
7584 	/* On 575 and later set RDT only if RX enabled */
7585 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
7586 		int qidx;
7587 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
7588 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
7589 			for (i = 0; i < WM_NRXDESC; i++) {
7590 				mutex_enter(rxq->rxq_lock);
7591 				wm_init_rxdesc(rxq, i);
7592 				mutex_exit(rxq->rxq_lock);
7593 
7594 			}
7595 		}
7596 	}
7597 
7598 	wm_unset_stopping_flags(sc);
7599 
7600 	/* Start the one second link check clock. */
7601 	callout_schedule(&sc->sc_tick_ch, hz);
7602 
7603 	/*
7604 	 * ...all done! (IFNET_LOCKED asserted above.)
7605 	 */
7606 	ifp->if_flags |= IFF_RUNNING;
7607 
7608 out:
7609 	/* Save last flags for the callback */
7610 	sc->sc_if_flags = ifp->if_flags;
7611 	sc->sc_ec_capenable = ec->ec_capenable;
7612 	if (error)
7613 		log(LOG_ERR, "%s: interface not running\n",
7614 		    device_xname(sc->sc_dev));
7615 	return error;
7616 }
7617 
7618 /*
7619  * wm_stop:		[ifnet interface function]
7620  *
7621  *	Stop transmission on the interface.
7622  */
7623 static void
7624 wm_stop(struct ifnet *ifp, int disable)
7625 {
7626 	struct wm_softc *sc = ifp->if_softc;
7627 
7628 	ASSERT_SLEEPABLE();
7629 	KASSERT(IFNET_LOCKED(ifp));
7630 
7631 	mutex_enter(sc->sc_core_lock);
7632 	wm_stop_locked(ifp, disable ? true : false, true);
7633 	mutex_exit(sc->sc_core_lock);
7634 
7635 	/*
7636 	 * After wm_set_stopping_flags(), it is guaranteed that
7637 	 * wm_handle_queue_work() does not call workqueue_enqueue().
7638 	 * However, workqueue_wait() cannot call in wm_stop_locked()
7639 	 * because it can sleep...
7640 	 * so, call workqueue_wait() here.
7641 	 */
7642 	for (int i = 0; i < sc->sc_nqueues; i++)
7643 		workqueue_wait(sc->sc_queue_wq, &sc->sc_queue[i].wmq_cookie);
7644 	workqueue_wait(sc->sc_reset_wq, &sc->sc_reset_work);
7645 }
7646 
7647 static void
7648 wm_stop_locked(struct ifnet *ifp, bool disable, bool wait)
7649 {
7650 	struct wm_softc *sc = ifp->if_softc;
7651 	struct wm_txsoft *txs;
7652 	int i, qidx;
7653 
7654 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
7655 		device_xname(sc->sc_dev), __func__));
7656 	KASSERT(IFNET_LOCKED(ifp));
7657 	KASSERT(mutex_owned(sc->sc_core_lock));
7658 
7659 	wm_set_stopping_flags(sc);
7660 
7661 	if (sc->sc_flags & WM_F_HAS_MII) {
7662 		/* Down the MII. */
7663 		mii_down(&sc->sc_mii);
7664 	} else {
7665 #if 0
7666 		/* Should we clear PHY's status properly? */
7667 		wm_reset(sc);
7668 #endif
7669 	}
7670 
7671 	/* Stop the transmit and receive processes. */
7672 	CSR_WRITE(sc, WMREG_TCTL, 0);
7673 	CSR_WRITE(sc, WMREG_RCTL, 0);
7674 	sc->sc_rctl &= ~RCTL_EN;
7675 
7676 	/*
7677 	 * Clear the interrupt mask to ensure the device cannot assert its
7678 	 * interrupt line.
7679 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
7680 	 * service any currently pending or shared interrupt.
7681 	 */
7682 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
7683 	sc->sc_icr = 0;
7684 	if (wm_is_using_msix(sc)) {
7685 		if (sc->sc_type != WM_T_82574) {
7686 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
7687 			CSR_WRITE(sc, WMREG_EIAC, 0);
7688 		} else
7689 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
7690 	}
7691 
7692 	/*
7693 	 * Stop callouts after interrupts are disabled; if we have
7694 	 * to wait for them, we will be releasing the CORE_LOCK
7695 	 * briefly, which will unblock interrupts on the current CPU.
7696 	 */
7697 
7698 	/* Stop the one second clock. */
7699 	if (wait)
7700 		callout_halt(&sc->sc_tick_ch, sc->sc_core_lock);
7701 	else
7702 		callout_stop(&sc->sc_tick_ch);
7703 
7704 	/* Stop the 82547 Tx FIFO stall check timer. */
7705 	if (sc->sc_type == WM_T_82547) {
7706 		if (wait)
7707 			callout_halt(&sc->sc_txfifo_ch, sc->sc_core_lock);
7708 		else
7709 			callout_stop(&sc->sc_txfifo_ch);
7710 	}
7711 
7712 	/* Release any queued transmit buffers. */
7713 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
7714 		struct wm_queue *wmq = &sc->sc_queue[qidx];
7715 		struct wm_txqueue *txq = &wmq->wmq_txq;
7716 		struct mbuf *m;
7717 
7718 		mutex_enter(txq->txq_lock);
7719 		txq->txq_sending = false; /* Ensure watchdog disabled */
7720 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
7721 			txs = &txq->txq_soft[i];
7722 			if (txs->txs_mbuf != NULL) {
7723 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
7724 				m_freem(txs->txs_mbuf);
7725 				txs->txs_mbuf = NULL;
7726 			}
7727 		}
7728 		/* Drain txq_interq */
7729 		while ((m = pcq_get(txq->txq_interq)) != NULL)
7730 			m_freem(m);
7731 		mutex_exit(txq->txq_lock);
7732 	}
7733 
7734 	/* Mark the interface as down and cancel the watchdog timer. */
7735 	ifp->if_flags &= ~IFF_RUNNING;
7736 	sc->sc_if_flags = ifp->if_flags;
7737 
7738 	if (disable) {
7739 		for (i = 0; i < sc->sc_nqueues; i++) {
7740 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
7741 			mutex_enter(rxq->rxq_lock);
7742 			wm_rxdrain(rxq);
7743 			mutex_exit(rxq->rxq_lock);
7744 		}
7745 	}
7746 
7747 #if 0 /* notyet */
7748 	if (sc->sc_type >= WM_T_82544)
7749 		CSR_WRITE(sc, WMREG_WUC, 0);
7750 #endif
7751 }
7752 
7753 static void
7754 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
7755 {
7756 	struct mbuf *m;
7757 	int i;
7758 
7759 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
7760 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
7761 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
7762 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
7763 		    m->m_data, m->m_len, m->m_flags);
7764 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
7765 	    i, i == 1 ? "" : "s");
7766 }
7767 
7768 /*
7769  * wm_82547_txfifo_stall:
7770  *
7771  *	Callout used to wait for the 82547 Tx FIFO to drain,
7772  *	reset the FIFO pointers, and restart packet transmission.
7773  */
7774 static void
7775 wm_82547_txfifo_stall(void *arg)
7776 {
7777 	struct wm_softc *sc = arg;
7778 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
7779 
7780 	mutex_enter(txq->txq_lock);
7781 
7782 	if (txq->txq_stopping)
7783 		goto out;
7784 
7785 	if (txq->txq_fifo_stall) {
7786 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
7787 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
7788 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
7789 			/*
7790 			 * Packets have drained.  Stop transmitter, reset
7791 			 * FIFO pointers, restart transmitter, and kick
7792 			 * the packet queue.
7793 			 */
7794 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
7795 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
7796 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
7797 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
7798 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
7799 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
7800 			CSR_WRITE(sc, WMREG_TCTL, tctl);
7801 			CSR_WRITE_FLUSH(sc);
7802 
7803 			txq->txq_fifo_head = 0;
7804 			txq->txq_fifo_stall = 0;
7805 			wm_start_locked(&sc->sc_ethercom.ec_if);
7806 		} else {
7807 			/*
7808 			 * Still waiting for packets to drain; try again in
7809 			 * another tick.
7810 			 */
7811 			callout_schedule(&sc->sc_txfifo_ch, 1);
7812 		}
7813 	}
7814 
7815 out:
7816 	mutex_exit(txq->txq_lock);
7817 }
7818 
7819 /*
7820  * wm_82547_txfifo_bugchk:
7821  *
7822  *	Check for bug condition in the 82547 Tx FIFO.  We need to
7823  *	prevent enqueueing a packet that would wrap around the end
7824  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
7825  *
7826  *	We do this by checking the amount of space before the end
7827  *	of the Tx FIFO buffer. If the packet will not fit, we "stall"
7828  *	the Tx FIFO, wait for all remaining packets to drain, reset
7829  *	the internal FIFO pointers to the beginning, and restart
7830  *	transmission on the interface.
7831  */
7832 #define	WM_FIFO_HDR		0x10
7833 #define	WM_82547_PAD_LEN	0x3e0
7834 static int
7835 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
7836 {
7837 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
7838 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
7839 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
7840 
7841 	/* Just return if already stalled. */
7842 	if (txq->txq_fifo_stall)
7843 		return 1;
7844 
7845 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
7846 		/* Stall only occurs in half-duplex mode. */
7847 		goto send_packet;
7848 	}
7849 
7850 	if (len >= WM_82547_PAD_LEN + space) {
7851 		txq->txq_fifo_stall = 1;
7852 		callout_schedule(&sc->sc_txfifo_ch, 1);
7853 		return 1;
7854 	}
7855 
7856 send_packet:
7857 	txq->txq_fifo_head += len;
7858 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
7859 		txq->txq_fifo_head -= txq->txq_fifo_size;
7860 
7861 	return 0;
7862 }
7863 
7864 static int
7865 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
7866 {
7867 	int error;
7868 
7869 	/*
7870 	 * Allocate the control data structures, and create and load the
7871 	 * DMA map for it.
7872 	 *
7873 	 * NOTE: All Tx descriptors must be in the same 4G segment of
7874 	 * memory.  So must Rx descriptors.  We simplify by allocating
7875 	 * both sets within the same 4G segment.
7876 	 */
7877 	if (sc->sc_type < WM_T_82544)
7878 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
7879 	else
7880 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
7881 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
7882 		txq->txq_descsize = sizeof(nq_txdesc_t);
7883 	else
7884 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
7885 
7886 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
7887 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
7888 		    1, &txq->txq_desc_rseg, 0)) != 0) {
7889 		aprint_error_dev(sc->sc_dev,
7890 		    "unable to allocate TX control data, error = %d\n",
7891 		    error);
7892 		goto fail_0;
7893 	}
7894 
7895 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
7896 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
7897 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
7898 		aprint_error_dev(sc->sc_dev,
7899 		    "unable to map TX control data, error = %d\n", error);
7900 		goto fail_1;
7901 	}
7902 
7903 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
7904 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
7905 		aprint_error_dev(sc->sc_dev,
7906 		    "unable to create TX control data DMA map, error = %d\n",
7907 		    error);
7908 		goto fail_2;
7909 	}
7910 
7911 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
7912 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
7913 		aprint_error_dev(sc->sc_dev,
7914 		    "unable to load TX control data DMA map, error = %d\n",
7915 		    error);
7916 		goto fail_3;
7917 	}
7918 
7919 	return 0;
7920 
7921 fail_3:
7922 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
7923 fail_2:
7924 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
7925 	    WM_TXDESCS_SIZE(txq));
7926 fail_1:
7927 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
7928 fail_0:
7929 	return error;
7930 }
7931 
7932 static void
7933 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
7934 {
7935 
7936 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
7937 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
7938 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
7939 	    WM_TXDESCS_SIZE(txq));
7940 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
7941 }
7942 
7943 static int
7944 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
7945 {
7946 	int error;
7947 	size_t rxq_descs_size;
7948 
7949 	/*
7950 	 * Allocate the control data structures, and create and load the
7951 	 * DMA map for it.
7952 	 *
7953 	 * NOTE: All Tx descriptors must be in the same 4G segment of
7954 	 * memory.  So must Rx descriptors.  We simplify by allocating
7955 	 * both sets within the same 4G segment.
7956 	 */
7957 	rxq->rxq_ndesc = WM_NRXDESC;
7958 	if (sc->sc_type == WM_T_82574)
7959 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
7960 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
7961 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
7962 	else
7963 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
7964 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
7965 
7966 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
7967 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
7968 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
7969 		aprint_error_dev(sc->sc_dev,
7970 		    "unable to allocate RX control data, error = %d\n",
7971 		    error);
7972 		goto fail_0;
7973 	}
7974 
7975 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
7976 		    rxq->rxq_desc_rseg, rxq_descs_size,
7977 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
7978 		aprint_error_dev(sc->sc_dev,
7979 		    "unable to map RX control data, error = %d\n", error);
7980 		goto fail_1;
7981 	}
7982 
7983 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
7984 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
7985 		aprint_error_dev(sc->sc_dev,
7986 		    "unable to create RX control data DMA map, error = %d\n",
7987 		    error);
7988 		goto fail_2;
7989 	}
7990 
7991 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
7992 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
7993 		aprint_error_dev(sc->sc_dev,
7994 		    "unable to load RX control data DMA map, error = %d\n",
7995 		    error);
7996 		goto fail_3;
7997 	}
7998 
7999 	return 0;
8000 
8001  fail_3:
8002 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
8003  fail_2:
8004 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
8005 	    rxq_descs_size);
8006  fail_1:
8007 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
8008  fail_0:
8009 	return error;
8010 }
8011 
8012 static void
8013 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
8014 {
8015 
8016 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
8017 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
8018 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
8019 	    rxq->rxq_descsize * rxq->rxq_ndesc);
8020 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
8021 }
8022 
8023 
8024 static int
8025 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
8026 {
8027 	int i, error;
8028 
8029 	/* Create the transmit buffer DMA maps. */
8030 	WM_TXQUEUELEN(txq) =
8031 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
8032 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
8033 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
8034 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
8035 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
8036 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
8037 			aprint_error_dev(sc->sc_dev,
8038 			    "unable to create Tx DMA map %d, error = %d\n",
8039 			    i, error);
8040 			goto fail;
8041 		}
8042 	}
8043 
8044 	return 0;
8045 
8046 fail:
8047 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
8048 		if (txq->txq_soft[i].txs_dmamap != NULL)
8049 			bus_dmamap_destroy(sc->sc_dmat,
8050 			    txq->txq_soft[i].txs_dmamap);
8051 	}
8052 	return error;
8053 }
8054 
8055 static void
8056 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
8057 {
8058 	int i;
8059 
8060 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
8061 		if (txq->txq_soft[i].txs_dmamap != NULL)
8062 			bus_dmamap_destroy(sc->sc_dmat,
8063 			    txq->txq_soft[i].txs_dmamap);
8064 	}
8065 }
8066 
8067 static int
8068 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
8069 {
8070 	int i, error;
8071 
8072 	/* Create the receive buffer DMA maps. */
8073 	for (i = 0; i < rxq->rxq_ndesc; i++) {
8074 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
8075 			    MCLBYTES, 0, 0,
8076 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
8077 			aprint_error_dev(sc->sc_dev,
8078 			    "unable to create Rx DMA map %d error = %d\n",
8079 			    i, error);
8080 			goto fail;
8081 		}
8082 		rxq->rxq_soft[i].rxs_mbuf = NULL;
8083 	}
8084 
8085 	return 0;
8086 
8087  fail:
8088 	for (i = 0; i < rxq->rxq_ndesc; i++) {
8089 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
8090 			bus_dmamap_destroy(sc->sc_dmat,
8091 			    rxq->rxq_soft[i].rxs_dmamap);
8092 	}
8093 	return error;
8094 }
8095 
8096 static void
8097 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
8098 {
8099 	int i;
8100 
8101 	for (i = 0; i < rxq->rxq_ndesc; i++) {
8102 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
8103 			bus_dmamap_destroy(sc->sc_dmat,
8104 			    rxq->rxq_soft[i].rxs_dmamap);
8105 	}
8106 }
8107 
8108 /*
8109  * wm_alloc_quques:
8110  *	Allocate {tx,rx}descs and {tx,rx} buffers
8111  */
8112 static int
8113 wm_alloc_txrx_queues(struct wm_softc *sc)
8114 {
8115 	int i, error, tx_done, rx_done;
8116 
8117 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
8118 	    KM_SLEEP);
8119 	if (sc->sc_queue == NULL) {
8120 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
8121 		error = ENOMEM;
8122 		goto fail_0;
8123 	}
8124 
8125 	/* For transmission */
8126 	error = 0;
8127 	tx_done = 0;
8128 	for (i = 0; i < sc->sc_nqueues; i++) {
8129 #ifdef WM_EVENT_COUNTERS
8130 		int j;
8131 		const char *xname;
8132 #endif
8133 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
8134 		txq->txq_sc = sc;
8135 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
8136 
8137 		error = wm_alloc_tx_descs(sc, txq);
8138 		if (error)
8139 			break;
8140 		error = wm_alloc_tx_buffer(sc, txq);
8141 		if (error) {
8142 			wm_free_tx_descs(sc, txq);
8143 			break;
8144 		}
8145 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
8146 		if (txq->txq_interq == NULL) {
8147 			wm_free_tx_descs(sc, txq);
8148 			wm_free_tx_buffer(sc, txq);
8149 			error = ENOMEM;
8150 			break;
8151 		}
8152 
8153 #ifdef WM_EVENT_COUNTERS
8154 		xname = device_xname(sc->sc_dev);
8155 
8156 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
8157 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
8158 		WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
8159 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
8160 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
8161 		WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
8162 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
8163 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
8164 		WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
8165 		WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
8166 		WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
8167 
8168 		for (j = 0; j < WM_NTXSEGS; j++) {
8169 			snprintf(txq->txq_txseg_evcnt_names[j],
8170 			    sizeof(txq->txq_txseg_evcnt_names[j]),
8171 			    "txq%02dtxseg%d", i, j);
8172 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j],
8173 			    EVCNT_TYPE_MISC,
8174 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
8175 		}
8176 
8177 		WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
8178 		WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
8179 		WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
8180 		WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
8181 		/* Only for 82544 (and earlier?) */
8182 		if (sc->sc_type <= WM_T_82544)
8183 			WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
8184 		WM_Q_MISC_EVCNT_ATTACH(txq, skipcontext, txq, i, xname);
8185 #endif /* WM_EVENT_COUNTERS */
8186 
8187 		tx_done++;
8188 	}
8189 	if (error)
8190 		goto fail_1;
8191 
8192 	/* For receive */
8193 	error = 0;
8194 	rx_done = 0;
8195 	for (i = 0; i < sc->sc_nqueues; i++) {
8196 #ifdef WM_EVENT_COUNTERS
8197 		const char *xname;
8198 #endif
8199 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
8200 		rxq->rxq_sc = sc;
8201 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
8202 
8203 		error = wm_alloc_rx_descs(sc, rxq);
8204 		if (error)
8205 			break;
8206 
8207 		error = wm_alloc_rx_buffer(sc, rxq);
8208 		if (error) {
8209 			wm_free_rx_descs(sc, rxq);
8210 			break;
8211 		}
8212 
8213 #ifdef WM_EVENT_COUNTERS
8214 		xname = device_xname(sc->sc_dev);
8215 
8216 		WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
8217 		WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
8218 		WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
8219 		WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
8220 		if ((sc->sc_type >= WM_T_82575) && !WM_IS_ICHPCH(sc))
8221 			WM_Q_MISC_EVCNT_ATTACH(rxq, qdrop, rxq, i, xname);
8222 #endif /* WM_EVENT_COUNTERS */
8223 
8224 		rx_done++;
8225 	}
8226 	if (error)
8227 		goto fail_2;
8228 
8229 	return 0;
8230 
8231 fail_2:
8232 	for (i = 0; i < rx_done; i++) {
8233 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
8234 		wm_free_rx_buffer(sc, rxq);
8235 		wm_free_rx_descs(sc, rxq);
8236 		if (rxq->rxq_lock)
8237 			mutex_obj_free(rxq->rxq_lock);
8238 	}
8239 fail_1:
8240 	for (i = 0; i < tx_done; i++) {
8241 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
8242 		pcq_destroy(txq->txq_interq);
8243 		wm_free_tx_buffer(sc, txq);
8244 		wm_free_tx_descs(sc, txq);
8245 		if (txq->txq_lock)
8246 			mutex_obj_free(txq->txq_lock);
8247 	}
8248 
8249 	kmem_free(sc->sc_queue,
8250 	    sizeof(struct wm_queue) * sc->sc_nqueues);
8251 fail_0:
8252 	return error;
8253 }
8254 
8255 /*
8256  * wm_free_quques:
8257  *	Free {tx,rx}descs and {tx,rx} buffers
8258  */
8259 static void
8260 wm_free_txrx_queues(struct wm_softc *sc)
8261 {
8262 	int i;
8263 
8264 	for (i = 0; i < sc->sc_nqueues; i++) {
8265 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
8266 
8267 #ifdef WM_EVENT_COUNTERS
8268 		WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
8269 		WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
8270 		WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
8271 		WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
8272 		if ((sc->sc_type >= WM_T_82575) && !WM_IS_ICHPCH(sc))
8273 			WM_Q_EVCNT_DETACH(rxq, qdrop, rxq, i);
8274 #endif /* WM_EVENT_COUNTERS */
8275 
8276 		wm_free_rx_buffer(sc, rxq);
8277 		wm_free_rx_descs(sc, rxq);
8278 		if (rxq->rxq_lock)
8279 			mutex_obj_free(rxq->rxq_lock);
8280 	}
8281 
8282 	for (i = 0; i < sc->sc_nqueues; i++) {
8283 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
8284 		struct mbuf *m;
8285 #ifdef WM_EVENT_COUNTERS
8286 		int j;
8287 
8288 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
8289 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
8290 		WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
8291 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
8292 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
8293 		WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
8294 		WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
8295 		WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
8296 		WM_Q_EVCNT_DETACH(txq, tso, txq, i);
8297 		WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
8298 		WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
8299 
8300 		for (j = 0; j < WM_NTXSEGS; j++)
8301 			evcnt_detach(&txq->txq_ev_txseg[j]);
8302 
8303 		WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
8304 		WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
8305 		WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
8306 		WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
8307 		if (sc->sc_type <= WM_T_82544)
8308 			WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
8309 		WM_Q_EVCNT_DETACH(txq, skipcontext, txq, i);
8310 #endif /* WM_EVENT_COUNTERS */
8311 
8312 		/* Drain txq_interq */
8313 		while ((m = pcq_get(txq->txq_interq)) != NULL)
8314 			m_freem(m);
8315 		pcq_destroy(txq->txq_interq);
8316 
8317 		wm_free_tx_buffer(sc, txq);
8318 		wm_free_tx_descs(sc, txq);
8319 		if (txq->txq_lock)
8320 			mutex_obj_free(txq->txq_lock);
8321 	}
8322 
8323 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
8324 }
8325 
8326 static void
8327 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
8328 {
8329 
8330 	KASSERT(mutex_owned(txq->txq_lock));
8331 
8332 	/* Initialize the transmit descriptor ring. */
8333 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
8334 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
8335 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
8336 	txq->txq_free = WM_NTXDESC(txq);
8337 	txq->txq_next = 0;
8338 }
8339 
8340 static void
8341 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
8342     struct wm_txqueue *txq)
8343 {
8344 
8345 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
8346 		device_xname(sc->sc_dev), __func__));
8347 	KASSERT(mutex_owned(txq->txq_lock));
8348 
8349 	if (sc->sc_type < WM_T_82543) {
8350 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
8351 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
8352 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
8353 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
8354 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
8355 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
8356 	} else {
8357 		int qid = wmq->wmq_id;
8358 
8359 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
8360 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
8361 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
8362 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
8363 
8364 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
8365 			/*
8366 			 * Don't write TDT before TCTL.EN is set.
8367 			 * See the document.
8368 			 */
8369 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
8370 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
8371 			    | TXDCTL_WTHRESH(0));
8372 		else {
8373 			/* XXX should update with AIM? */
8374 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
8375 			if (sc->sc_type >= WM_T_82540) {
8376 				/* Should be the same */
8377 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
8378 			}
8379 
8380 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
8381 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
8382 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
8383 		}
8384 	}
8385 }
8386 
8387 static void
8388 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
8389 {
8390 	int i;
8391 
8392 	KASSERT(mutex_owned(txq->txq_lock));
8393 
8394 	/* Initialize the transmit job descriptors. */
8395 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
8396 		txq->txq_soft[i].txs_mbuf = NULL;
8397 	txq->txq_sfree = WM_TXQUEUELEN(txq);
8398 	txq->txq_snext = 0;
8399 	txq->txq_sdirty = 0;
8400 }
8401 
8402 static void
8403 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
8404     struct wm_txqueue *txq)
8405 {
8406 
8407 	KASSERT(mutex_owned(txq->txq_lock));
8408 
8409 	/*
8410 	 * Set up some register offsets that are different between
8411 	 * the i82542 and the i82543 and later chips.
8412 	 */
8413 	if (sc->sc_type < WM_T_82543)
8414 		txq->txq_tdt_reg = WMREG_OLD_TDT;
8415 	else
8416 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
8417 
8418 	wm_init_tx_descs(sc, txq);
8419 	wm_init_tx_regs(sc, wmq, txq);
8420 	wm_init_tx_buffer(sc, txq);
8421 
8422 	/* Clear other than WM_TXQ_LINKDOWN_DISCARD */
8423 	txq->txq_flags &= WM_TXQ_LINKDOWN_DISCARD;
8424 
8425 	txq->txq_sending = false;
8426 }
8427 
8428 static void
8429 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
8430     struct wm_rxqueue *rxq)
8431 {
8432 
8433 	KASSERT(mutex_owned(rxq->rxq_lock));
8434 
8435 	/*
8436 	 * Initialize the receive descriptor and receive job
8437 	 * descriptor rings.
8438 	 */
8439 	if (sc->sc_type < WM_T_82543) {
8440 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
8441 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
8442 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
8443 		    rxq->rxq_descsize * rxq->rxq_ndesc);
8444 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
8445 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
8446 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
8447 
8448 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
8449 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
8450 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
8451 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
8452 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
8453 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
8454 	} else {
8455 		int qid = wmq->wmq_id;
8456 
8457 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
8458 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
8459 		CSR_WRITE(sc, WMREG_RDLEN(qid),
8460 		    rxq->rxq_descsize * rxq->rxq_ndesc);
8461 
8462 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
8463 			uint32_t srrctl;
8464 
8465 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
8466 				panic("%s: MCLBYTES %d unsupported for 82575 "
8467 				    "or higher\n", __func__, MCLBYTES);
8468 
8469 			/*
8470 			 * Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF
8471 			 * only.
8472 			 */
8473 			srrctl = SRRCTL_DESCTYPE_ADV_ONEBUF
8474 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT);
8475 			/*
8476 			 * Drop frames if the RX descriptor ring has no room.
8477 			 * This is enabled only on multiqueue system to avoid
8478 			 * bad influence to other queues.
8479 			 */
8480 			if (sc->sc_nqueues > 1)
8481 				srrctl |= SRRCTL_DROP_EN;
8482 			CSR_WRITE(sc, WMREG_SRRCTL(qid), srrctl);
8483 
8484 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
8485 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
8486 			    | RXDCTL_WTHRESH(1));
8487 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
8488 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
8489 		} else {
8490 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
8491 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
8492 			/* XXX should update with AIM? */
8493 			CSR_WRITE(sc, WMREG_RDTR,
8494 			    (wmq->wmq_itr / 4) | RDTR_FPD);
8495 			/* MUST be same */
8496 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
8497 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
8498 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
8499 		}
8500 	}
8501 }
8502 
8503 static int
8504 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
8505 {
8506 	struct wm_rxsoft *rxs;
8507 	int error, i;
8508 
8509 	KASSERT(mutex_owned(rxq->rxq_lock));
8510 
8511 	for (i = 0; i < rxq->rxq_ndesc; i++) {
8512 		rxs = &rxq->rxq_soft[i];
8513 		if (rxs->rxs_mbuf == NULL) {
8514 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
8515 				log(LOG_ERR, "%s: unable to allocate or map "
8516 				    "rx buffer %d, error = %d\n",
8517 				    device_xname(sc->sc_dev), i, error);
8518 				/*
8519 				 * XXX Should attempt to run with fewer receive
8520 				 * XXX buffers instead of just failing.
8521 				 */
8522 				wm_rxdrain(rxq);
8523 				return ENOMEM;
8524 			}
8525 		} else {
8526 			/*
8527 			 * For 82575 and 82576, the RX descriptors must be
8528 			 * initialized after the setting of RCTL.EN in
8529 			 * wm_set_filter()
8530 			 */
8531 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
8532 				wm_init_rxdesc(rxq, i);
8533 		}
8534 	}
8535 	rxq->rxq_ptr = 0;
8536 	rxq->rxq_discard = 0;
8537 	WM_RXCHAIN_RESET(rxq);
8538 
8539 	return 0;
8540 }
8541 
8542 static int
8543 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
8544     struct wm_rxqueue *rxq)
8545 {
8546 
8547 	KASSERT(mutex_owned(rxq->rxq_lock));
8548 
8549 	/*
8550 	 * Set up some register offsets that are different between
8551 	 * the i82542 and the i82543 and later chips.
8552 	 */
8553 	if (sc->sc_type < WM_T_82543)
8554 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
8555 	else
8556 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
8557 
8558 	wm_init_rx_regs(sc, wmq, rxq);
8559 	return wm_init_rx_buffer(sc, rxq);
8560 }
8561 
8562 /*
8563  * wm_init_quques:
8564  *	Initialize {tx,rx}descs and {tx,rx} buffers
8565  */
8566 static int
8567 wm_init_txrx_queues(struct wm_softc *sc)
8568 {
8569 	int i, error = 0;
8570 
8571 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
8572 		device_xname(sc->sc_dev), __func__));
8573 
8574 	for (i = 0; i < sc->sc_nqueues; i++) {
8575 		struct wm_queue *wmq = &sc->sc_queue[i];
8576 		struct wm_txqueue *txq = &wmq->wmq_txq;
8577 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
8578 
8579 		/*
8580 		 * TODO
8581 		 * Currently, use constant variable instead of AIM.
8582 		 * Furthermore, the interrupt interval of multiqueue which use
8583 		 * polling mode is less than default value.
8584 		 * More tuning and AIM are required.
8585 		 */
8586 		if (wm_is_using_multiqueue(sc))
8587 			wmq->wmq_itr = 50;
8588 		else
8589 			wmq->wmq_itr = sc->sc_itr_init;
8590 		wmq->wmq_set_itr = true;
8591 
8592 		mutex_enter(txq->txq_lock);
8593 		wm_init_tx_queue(sc, wmq, txq);
8594 		mutex_exit(txq->txq_lock);
8595 
8596 		mutex_enter(rxq->rxq_lock);
8597 		error = wm_init_rx_queue(sc, wmq, rxq);
8598 		mutex_exit(rxq->rxq_lock);
8599 		if (error)
8600 			break;
8601 	}
8602 
8603 	return error;
8604 }
8605 
8606 /*
8607  * wm_tx_offload:
8608  *
8609  *	Set up TCP/IP checksumming parameters for the
8610  *	specified packet.
8611  */
8612 static void
8613 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
8614     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
8615 {
8616 	struct mbuf *m0 = txs->txs_mbuf;
8617 	struct livengood_tcpip_ctxdesc *t;
8618 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
8619 	uint32_t ipcse;
8620 	struct ether_header *eh;
8621 	int offset, iphl;
8622 	uint8_t fields;
8623 
8624 	/*
8625 	 * XXX It would be nice if the mbuf pkthdr had offset
8626 	 * fields for the protocol headers.
8627 	 */
8628 
8629 	eh = mtod(m0, struct ether_header *);
8630 	switch (htons(eh->ether_type)) {
8631 	case ETHERTYPE_IP:
8632 	case ETHERTYPE_IPV6:
8633 		offset = ETHER_HDR_LEN;
8634 		break;
8635 
8636 	case ETHERTYPE_VLAN:
8637 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
8638 		break;
8639 
8640 	default:
8641 		/* Don't support this protocol or encapsulation. */
8642 		txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
8643 		txq->txq_last_hw_ipcs = 0;
8644 		txq->txq_last_hw_tucs = 0;
8645 		*fieldsp = 0;
8646 		*cmdp = 0;
8647 		return;
8648 	}
8649 
8650 	if ((m0->m_pkthdr.csum_flags &
8651 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
8652 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
8653 	} else
8654 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
8655 
8656 	ipcse = offset + iphl - 1;
8657 
8658 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
8659 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
8660 	seg = 0;
8661 	fields = 0;
8662 
8663 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
8664 		int hlen = offset + iphl;
8665 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
8666 
8667 		if (__predict_false(m0->m_len <
8668 				    (hlen + sizeof(struct tcphdr)))) {
8669 			/*
8670 			 * TCP/IP headers are not in the first mbuf; we need
8671 			 * to do this the slow and painful way. Let's just
8672 			 * hope this doesn't happen very often.
8673 			 */
8674 			struct tcphdr th;
8675 
8676 			WM_Q_EVCNT_INCR(txq, tsopain);
8677 
8678 			m_copydata(m0, hlen, sizeof(th), &th);
8679 			if (v4) {
8680 				struct ip ip;
8681 
8682 				m_copydata(m0, offset, sizeof(ip), &ip);
8683 				ip.ip_len = 0;
8684 				m_copyback(m0,
8685 				    offset + offsetof(struct ip, ip_len),
8686 				    sizeof(ip.ip_len), &ip.ip_len);
8687 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
8688 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
8689 			} else {
8690 				struct ip6_hdr ip6;
8691 
8692 				m_copydata(m0, offset, sizeof(ip6), &ip6);
8693 				ip6.ip6_plen = 0;
8694 				m_copyback(m0,
8695 				    offset + offsetof(struct ip6_hdr, ip6_plen),
8696 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
8697 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
8698 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
8699 			}
8700 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
8701 			    sizeof(th.th_sum), &th.th_sum);
8702 
8703 			hlen += th.th_off << 2;
8704 		} else {
8705 			/*
8706 			 * TCP/IP headers are in the first mbuf; we can do
8707 			 * this the easy way.
8708 			 */
8709 			struct tcphdr *th;
8710 
8711 			if (v4) {
8712 				struct ip *ip =
8713 				    (void *)(mtod(m0, char *) + offset);
8714 				th = (void *)(mtod(m0, char *) + hlen);
8715 
8716 				ip->ip_len = 0;
8717 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
8718 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
8719 			} else {
8720 				struct ip6_hdr *ip6 =
8721 				    (void *)(mtod(m0, char *) + offset);
8722 				th = (void *)(mtod(m0, char *) + hlen);
8723 
8724 				ip6->ip6_plen = 0;
8725 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
8726 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
8727 			}
8728 			hlen += th->th_off << 2;
8729 		}
8730 
8731 		if (v4) {
8732 			WM_Q_EVCNT_INCR(txq, tso);
8733 			cmdlen |= WTX_TCPIP_CMD_IP;
8734 		} else {
8735 			WM_Q_EVCNT_INCR(txq, tso6);
8736 			ipcse = 0;
8737 		}
8738 		cmd |= WTX_TCPIP_CMD_TSE;
8739 		cmdlen |= WTX_TCPIP_CMD_TSE |
8740 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
8741 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
8742 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
8743 	}
8744 
8745 	/*
8746 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
8747 	 * offload feature, if we load the context descriptor, we
8748 	 * MUST provide valid values for IPCSS and TUCSS fields.
8749 	 */
8750 
8751 	ipcs = WTX_TCPIP_IPCSS(offset) |
8752 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
8753 	    WTX_TCPIP_IPCSE(ipcse);
8754 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
8755 		WM_Q_EVCNT_INCR(txq, ipsum);
8756 		fields |= WTX_IXSM;
8757 	}
8758 
8759 	offset += iphl;
8760 
8761 	if (m0->m_pkthdr.csum_flags &
8762 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
8763 		WM_Q_EVCNT_INCR(txq, tusum);
8764 		fields |= WTX_TXSM;
8765 		tucs = WTX_TCPIP_TUCSS(offset) |
8766 		    WTX_TCPIP_TUCSO(offset +
8767 			M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
8768 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
8769 	} else if ((m0->m_pkthdr.csum_flags &
8770 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
8771 		WM_Q_EVCNT_INCR(txq, tusum6);
8772 		fields |= WTX_TXSM;
8773 		tucs = WTX_TCPIP_TUCSS(offset) |
8774 		    WTX_TCPIP_TUCSO(offset +
8775 			M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
8776 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
8777 	} else {
8778 		/* Just initialize it to a valid TCP context. */
8779 		tucs = WTX_TCPIP_TUCSS(offset) |
8780 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
8781 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
8782 	}
8783 
8784 	*cmdp = cmd;
8785 	*fieldsp = fields;
8786 
8787 	/*
8788 	 * We don't have to write context descriptor for every packet
8789 	 * except for 82574. For 82574, we must write context descriptor
8790 	 * for every packet when we use two descriptor queues.
8791 	 *
8792 	 * The 82574L can only remember the *last* context used
8793 	 * regardless of queue that it was use for.  We cannot reuse
8794 	 * contexts on this hardware platform and must generate a new
8795 	 * context every time.  82574L hardware spec, section 7.2.6,
8796 	 * second note.
8797 	 */
8798 	if (sc->sc_nqueues < 2) {
8799 		/*
8800 		 * Setting up new checksum offload context for every
8801 		 * frames takes a lot of processing time for hardware.
8802 		 * This also reduces performance a lot for small sized
8803 		 * frames so avoid it if driver can use previously
8804 		 * configured checksum offload context.
8805 		 * For TSO, in theory we can use the same TSO context only if
8806 		 * frame is the same type(IP/TCP) and the same MSS. However
8807 		 * checking whether a frame has the same IP/TCP structure is a
8808 		 * hard thing so just ignore that and always restablish a
8809 		 * new TSO context.
8810 		 */
8811 		if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6))
8812 		    == 0) {
8813 			if (txq->txq_last_hw_cmd == cmd &&
8814 			    txq->txq_last_hw_fields == fields &&
8815 			    txq->txq_last_hw_ipcs == (ipcs & 0xffff) &&
8816 			    txq->txq_last_hw_tucs == (tucs & 0xffff)) {
8817 				WM_Q_EVCNT_INCR(txq, skipcontext);
8818 				return;
8819 			}
8820 		}
8821 
8822 		txq->txq_last_hw_cmd = cmd;
8823 		txq->txq_last_hw_fields = fields;
8824 		txq->txq_last_hw_ipcs = (ipcs & 0xffff);
8825 		txq->txq_last_hw_tucs = (tucs & 0xffff);
8826 	}
8827 
8828 	/* Fill in the context descriptor. */
8829 	t = (struct livengood_tcpip_ctxdesc *)
8830 	    &txq->txq_descs[txq->txq_next];
8831 	t->tcpip_ipcs = htole32(ipcs);
8832 	t->tcpip_tucs = htole32(tucs);
8833 	t->tcpip_cmdlen = htole32(cmdlen);
8834 	t->tcpip_seg = htole32(seg);
8835 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
8836 
8837 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
8838 	txs->txs_ndesc++;
8839 }
8840 
8841 static inline int
8842 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
8843 {
8844 	struct wm_softc *sc = ifp->if_softc;
8845 	u_int cpuid = cpu_index(curcpu());
8846 
8847 	/*
8848 	 * Currently, simple distribute strategy.
8849 	 * TODO:
8850 	 * distribute by flowid(RSS has value).
8851 	 */
8852 	return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues;
8853 }
8854 
8855 static inline bool
8856 wm_linkdown_discard(struct wm_txqueue *txq)
8857 {
8858 
8859 	if ((txq->txq_flags & WM_TXQ_LINKDOWN_DISCARD) != 0)
8860 		return true;
8861 
8862 	return false;
8863 }
8864 
8865 /*
8866  * wm_start:		[ifnet interface function]
8867  *
8868  *	Start packet transmission on the interface.
8869  */
8870 static void
8871 wm_start(struct ifnet *ifp)
8872 {
8873 	struct wm_softc *sc = ifp->if_softc;
8874 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
8875 
8876 	KASSERT(if_is_mpsafe(ifp));
8877 	/*
8878 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
8879 	 */
8880 
8881 	mutex_enter(txq->txq_lock);
8882 	if (!txq->txq_stopping)
8883 		wm_start_locked(ifp);
8884 	mutex_exit(txq->txq_lock);
8885 }
8886 
8887 static void
8888 wm_start_locked(struct ifnet *ifp)
8889 {
8890 	struct wm_softc *sc = ifp->if_softc;
8891 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
8892 
8893 	wm_send_common_locked(ifp, txq, false);
8894 }
8895 
8896 static int
8897 wm_transmit(struct ifnet *ifp, struct mbuf *m)
8898 {
8899 	int qid;
8900 	struct wm_softc *sc = ifp->if_softc;
8901 	struct wm_txqueue *txq;
8902 
8903 	qid = wm_select_txqueue(ifp, m);
8904 	txq = &sc->sc_queue[qid].wmq_txq;
8905 
8906 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
8907 		m_freem(m);
8908 		WM_Q_EVCNT_INCR(txq, pcqdrop);
8909 		return ENOBUFS;
8910 	}
8911 
8912 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
8913 	if_statadd_ref(ifp, nsr, if_obytes, m->m_pkthdr.len);
8914 	if (m->m_flags & M_MCAST)
8915 		if_statinc_ref(ifp, nsr, if_omcasts);
8916 	IF_STAT_PUTREF(ifp);
8917 
8918 	if (mutex_tryenter(txq->txq_lock)) {
8919 		if (!txq->txq_stopping)
8920 			wm_transmit_locked(ifp, txq);
8921 		mutex_exit(txq->txq_lock);
8922 	}
8923 
8924 	return 0;
8925 }
8926 
8927 static void
8928 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
8929 {
8930 
8931 	wm_send_common_locked(ifp, txq, true);
8932 }
8933 
8934 static void
8935 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
8936     bool is_transmit)
8937 {
8938 	struct wm_softc *sc = ifp->if_softc;
8939 	struct mbuf *m0;
8940 	struct wm_txsoft *txs;
8941 	bus_dmamap_t dmamap;
8942 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
8943 	bus_addr_t curaddr;
8944 	bus_size_t seglen, curlen;
8945 	uint32_t cksumcmd;
8946 	uint8_t cksumfields;
8947 	bool remap = true;
8948 
8949 	KASSERT(mutex_owned(txq->txq_lock));
8950 	KASSERT(!txq->txq_stopping);
8951 
8952 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
8953 		return;
8954 
8955 	if (__predict_false(wm_linkdown_discard(txq))) {
8956 		do {
8957 			if (is_transmit)
8958 				m0 = pcq_get(txq->txq_interq);
8959 			else
8960 				IFQ_DEQUEUE(&ifp->if_snd, m0);
8961 			/*
8962 			 * increment successed packet counter as in the case
8963 			 * which the packet is discarded by link down PHY.
8964 			 */
8965 			if (m0 != NULL) {
8966 				if_statinc(ifp, if_opackets);
8967 				m_freem(m0);
8968 			}
8969 		} while (m0 != NULL);
8970 		return;
8971 	}
8972 
8973 	/* Remember the previous number of free descriptors. */
8974 	ofree = txq->txq_free;
8975 
8976 	/*
8977 	 * Loop through the send queue, setting up transmit descriptors
8978 	 * until we drain the queue, or use up all available transmit
8979 	 * descriptors.
8980 	 */
8981 	for (;;) {
8982 		m0 = NULL;
8983 
8984 		/* Get a work queue entry. */
8985 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
8986 			wm_txeof(txq, UINT_MAX);
8987 			if (txq->txq_sfree == 0) {
8988 				DPRINTF(sc, WM_DEBUG_TX,
8989 				    ("%s: TX: no free job descriptors\n",
8990 					device_xname(sc->sc_dev)));
8991 				WM_Q_EVCNT_INCR(txq, txsstall);
8992 				break;
8993 			}
8994 		}
8995 
8996 		/* Grab a packet off the queue. */
8997 		if (is_transmit)
8998 			m0 = pcq_get(txq->txq_interq);
8999 		else
9000 			IFQ_DEQUEUE(&ifp->if_snd, m0);
9001 		if (m0 == NULL)
9002 			break;
9003 
9004 		DPRINTF(sc, WM_DEBUG_TX,
9005 		    ("%s: TX: have packet to transmit: %p\n",
9006 			device_xname(sc->sc_dev), m0));
9007 
9008 		txs = &txq->txq_soft[txq->txq_snext];
9009 		dmamap = txs->txs_dmamap;
9010 
9011 		use_tso = (m0->m_pkthdr.csum_flags &
9012 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
9013 
9014 		/*
9015 		 * So says the Linux driver:
9016 		 * The controller does a simple calculation to make sure
9017 		 * there is enough room in the FIFO before initiating the
9018 		 * DMA for each buffer. The calc is:
9019 		 *	4 = ceil(buffer len / MSS)
9020 		 * To make sure we don't overrun the FIFO, adjust the max
9021 		 * buffer len if the MSS drops.
9022 		 */
9023 		dmamap->dm_maxsegsz =
9024 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
9025 		    ? m0->m_pkthdr.segsz << 2
9026 		    : WTX_MAX_LEN;
9027 
9028 		/*
9029 		 * Load the DMA map.  If this fails, the packet either
9030 		 * didn't fit in the allotted number of segments, or we
9031 		 * were short on resources.  For the too-many-segments
9032 		 * case, we simply report an error and drop the packet,
9033 		 * since we can't sanely copy a jumbo packet to a single
9034 		 * buffer.
9035 		 */
9036 retry:
9037 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
9038 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
9039 		if (__predict_false(error)) {
9040 			if (error == EFBIG) {
9041 				if (remap == true) {
9042 					struct mbuf *m;
9043 
9044 					remap = false;
9045 					m = m_defrag(m0, M_NOWAIT);
9046 					if (m != NULL) {
9047 						WM_Q_EVCNT_INCR(txq, defrag);
9048 						m0 = m;
9049 						goto retry;
9050 					}
9051 				}
9052 				WM_Q_EVCNT_INCR(txq, toomanyseg);
9053 				log(LOG_ERR, "%s: Tx packet consumes too many "
9054 				    "DMA segments, dropping...\n",
9055 				    device_xname(sc->sc_dev));
9056 				wm_dump_mbuf_chain(sc, m0);
9057 				m_freem(m0);
9058 				continue;
9059 			}
9060 			/* Short on resources, just stop for now. */
9061 			DPRINTF(sc, WM_DEBUG_TX,
9062 			    ("%s: TX: dmamap load failed: %d\n",
9063 				device_xname(sc->sc_dev), error));
9064 			break;
9065 		}
9066 
9067 		segs_needed = dmamap->dm_nsegs;
9068 		if (use_tso) {
9069 			/* For sentinel descriptor; see below. */
9070 			segs_needed++;
9071 		}
9072 
9073 		/*
9074 		 * Ensure we have enough descriptors free to describe
9075 		 * the packet. Note, we always reserve one descriptor
9076 		 * at the end of the ring due to the semantics of the
9077 		 * TDT register, plus one more in the event we need
9078 		 * to load offload context.
9079 		 */
9080 		if (segs_needed > txq->txq_free - 2) {
9081 			/*
9082 			 * Not enough free descriptors to transmit this
9083 			 * packet.  We haven't committed anything yet,
9084 			 * so just unload the DMA map, put the packet
9085 			 * pack on the queue, and punt. Notify the upper
9086 			 * layer that there are no more slots left.
9087 			 */
9088 			DPRINTF(sc, WM_DEBUG_TX,
9089 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
9090 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
9091 				segs_needed, txq->txq_free - 1));
9092 			txq->txq_flags |= WM_TXQ_NO_SPACE;
9093 			bus_dmamap_unload(sc->sc_dmat, dmamap);
9094 			WM_Q_EVCNT_INCR(txq, txdstall);
9095 			break;
9096 		}
9097 
9098 		/*
9099 		 * Check for 82547 Tx FIFO bug. We need to do this
9100 		 * once we know we can transmit the packet, since we
9101 		 * do some internal FIFO space accounting here.
9102 		 */
9103 		if (sc->sc_type == WM_T_82547 &&
9104 		    wm_82547_txfifo_bugchk(sc, m0)) {
9105 			DPRINTF(sc, WM_DEBUG_TX,
9106 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
9107 				device_xname(sc->sc_dev)));
9108 			txq->txq_flags |= WM_TXQ_NO_SPACE;
9109 			bus_dmamap_unload(sc->sc_dmat, dmamap);
9110 			WM_Q_EVCNT_INCR(txq, fifo_stall);
9111 			break;
9112 		}
9113 
9114 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
9115 
9116 		DPRINTF(sc, WM_DEBUG_TX,
9117 		    ("%s: TX: packet has %d (%d) DMA segments\n",
9118 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
9119 
9120 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
9121 
9122 		/*
9123 		 * Store a pointer to the packet so that we can free it
9124 		 * later.
9125 		 *
9126 		 * Initially, we consider the number of descriptors the
9127 		 * packet uses the number of DMA segments.  This may be
9128 		 * incremented by 1 if we do checksum offload (a descriptor
9129 		 * is used to set the checksum context).
9130 		 */
9131 		txs->txs_mbuf = m0;
9132 		txs->txs_firstdesc = txq->txq_next;
9133 		txs->txs_ndesc = segs_needed;
9134 
9135 		/* Set up offload parameters for this packet. */
9136 		if (m0->m_pkthdr.csum_flags &
9137 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
9138 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
9139 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
9140 			wm_tx_offload(sc, txq, txs, &cksumcmd, &cksumfields);
9141 		} else {
9142 			txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
9143 			txq->txq_last_hw_ipcs = txq->txq_last_hw_tucs = 0;
9144 			cksumcmd = 0;
9145 			cksumfields = 0;
9146 		}
9147 
9148 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
9149 
9150 		/* Sync the DMA map. */
9151 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
9152 		    BUS_DMASYNC_PREWRITE);
9153 
9154 		/* Initialize the transmit descriptor. */
9155 		for (nexttx = txq->txq_next, seg = 0;
9156 		     seg < dmamap->dm_nsegs; seg++) {
9157 			for (seglen = dmamap->dm_segs[seg].ds_len,
9158 			     curaddr = dmamap->dm_segs[seg].ds_addr;
9159 			     seglen != 0;
9160 			     curaddr += curlen, seglen -= curlen,
9161 			     nexttx = WM_NEXTTX(txq, nexttx)) {
9162 				curlen = seglen;
9163 
9164 				/*
9165 				 * So says the Linux driver:
9166 				 * Work around for premature descriptor
9167 				 * write-backs in TSO mode.  Append a
9168 				 * 4-byte sentinel descriptor.
9169 				 */
9170 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
9171 				    curlen > 8)
9172 					curlen -= 4;
9173 
9174 				wm_set_dma_addr(
9175 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
9176 				txq->txq_descs[nexttx].wtx_cmdlen
9177 				    = htole32(cksumcmd | curlen);
9178 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
9179 				    = 0;
9180 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
9181 				    = cksumfields;
9182 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
9183 				lasttx = nexttx;
9184 
9185 				DPRINTF(sc, WM_DEBUG_TX,
9186 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
9187 					"len %#04zx\n",
9188 					device_xname(sc->sc_dev), nexttx,
9189 					(uint64_t)curaddr, curlen));
9190 			}
9191 		}
9192 
9193 		KASSERT(lasttx != -1);
9194 
9195 		/*
9196 		 * Set up the command byte on the last descriptor of
9197 		 * the packet. If we're in the interrupt delay window,
9198 		 * delay the interrupt.
9199 		 */
9200 		txq->txq_descs[lasttx].wtx_cmdlen |=
9201 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
9202 
9203 		/*
9204 		 * If VLANs are enabled and the packet has a VLAN tag, set
9205 		 * up the descriptor to encapsulate the packet for us.
9206 		 *
9207 		 * This is only valid on the last descriptor of the packet.
9208 		 */
9209 		if (vlan_has_tag(m0)) {
9210 			txq->txq_descs[lasttx].wtx_cmdlen |=
9211 			    htole32(WTX_CMD_VLE);
9212 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
9213 			    = htole16(vlan_get_tag(m0));
9214 		}
9215 
9216 		txs->txs_lastdesc = lasttx;
9217 
9218 		DPRINTF(sc, WM_DEBUG_TX,
9219 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
9220 			device_xname(sc->sc_dev),
9221 			lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
9222 
9223 		/* Sync the descriptors we're using. */
9224 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
9225 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
9226 
9227 		/* Give the packet to the chip. */
9228 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
9229 
9230 		DPRINTF(sc, WM_DEBUG_TX,
9231 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
9232 
9233 		DPRINTF(sc, WM_DEBUG_TX,
9234 		    ("%s: TX: finished transmitting packet, job %d\n",
9235 			device_xname(sc->sc_dev), txq->txq_snext));
9236 
9237 		/* Advance the tx pointer. */
9238 		txq->txq_free -= txs->txs_ndesc;
9239 		txq->txq_next = nexttx;
9240 
9241 		txq->txq_sfree--;
9242 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
9243 
9244 		/* Pass the packet to any BPF listeners. */
9245 		bpf_mtap(ifp, m0, BPF_D_OUT);
9246 	}
9247 
9248 	if (m0 != NULL) {
9249 		txq->txq_flags |= WM_TXQ_NO_SPACE;
9250 		WM_Q_EVCNT_INCR(txq, descdrop);
9251 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
9252 			__func__));
9253 		m_freem(m0);
9254 	}
9255 
9256 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
9257 		/* No more slots; notify upper layer. */
9258 		txq->txq_flags |= WM_TXQ_NO_SPACE;
9259 	}
9260 
9261 	if (txq->txq_free != ofree) {
9262 		/* Set a watchdog timer in case the chip flakes out. */
9263 		txq->txq_lastsent = time_uptime;
9264 		txq->txq_sending = true;
9265 	}
9266 }
9267 
9268 /*
9269  * wm_nq_tx_offload:
9270  *
9271  *	Set up TCP/IP checksumming parameters for the
9272  *	specified packet, for NEWQUEUE devices
9273  */
9274 static void
9275 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
9276     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
9277 {
9278 	struct mbuf *m0 = txs->txs_mbuf;
9279 	uint32_t vl_len, mssidx, cmdc;
9280 	struct ether_header *eh;
9281 	int offset, iphl;
9282 
9283 	/*
9284 	 * XXX It would be nice if the mbuf pkthdr had offset
9285 	 * fields for the protocol headers.
9286 	 */
9287 	*cmdlenp = 0;
9288 	*fieldsp = 0;
9289 
9290 	eh = mtod(m0, struct ether_header *);
9291 	switch (htons(eh->ether_type)) {
9292 	case ETHERTYPE_IP:
9293 	case ETHERTYPE_IPV6:
9294 		offset = ETHER_HDR_LEN;
9295 		break;
9296 
9297 	case ETHERTYPE_VLAN:
9298 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
9299 		break;
9300 
9301 	default:
9302 		/* Don't support this protocol or encapsulation. */
9303 		*do_csum = false;
9304 		return;
9305 	}
9306 	*do_csum = true;
9307 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
9308 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
9309 
9310 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
9311 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
9312 
9313 	if ((m0->m_pkthdr.csum_flags &
9314 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
9315 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
9316 	} else {
9317 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
9318 	}
9319 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
9320 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
9321 
9322 	if (vlan_has_tag(m0)) {
9323 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
9324 		    << NQTXC_VLLEN_VLAN_SHIFT);
9325 		*cmdlenp |= NQTX_CMD_VLE;
9326 	}
9327 
9328 	mssidx = 0;
9329 
9330 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
9331 		int hlen = offset + iphl;
9332 		int tcp_hlen;
9333 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
9334 
9335 		if (__predict_false(m0->m_len <
9336 				    (hlen + sizeof(struct tcphdr)))) {
9337 			/*
9338 			 * TCP/IP headers are not in the first mbuf; we need
9339 			 * to do this the slow and painful way. Let's just
9340 			 * hope this doesn't happen very often.
9341 			 */
9342 			struct tcphdr th;
9343 
9344 			WM_Q_EVCNT_INCR(txq, tsopain);
9345 
9346 			m_copydata(m0, hlen, sizeof(th), &th);
9347 			if (v4) {
9348 				struct ip ip;
9349 
9350 				m_copydata(m0, offset, sizeof(ip), &ip);
9351 				ip.ip_len = 0;
9352 				m_copyback(m0,
9353 				    offset + offsetof(struct ip, ip_len),
9354 				    sizeof(ip.ip_len), &ip.ip_len);
9355 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
9356 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
9357 			} else {
9358 				struct ip6_hdr ip6;
9359 
9360 				m_copydata(m0, offset, sizeof(ip6), &ip6);
9361 				ip6.ip6_plen = 0;
9362 				m_copyback(m0,
9363 				    offset + offsetof(struct ip6_hdr, ip6_plen),
9364 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
9365 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
9366 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
9367 			}
9368 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
9369 			    sizeof(th.th_sum), &th.th_sum);
9370 
9371 			tcp_hlen = th.th_off << 2;
9372 		} else {
9373 			/*
9374 			 * TCP/IP headers are in the first mbuf; we can do
9375 			 * this the easy way.
9376 			 */
9377 			struct tcphdr *th;
9378 
9379 			if (v4) {
9380 				struct ip *ip =
9381 				    (void *)(mtod(m0, char *) + offset);
9382 				th = (void *)(mtod(m0, char *) + hlen);
9383 
9384 				ip->ip_len = 0;
9385 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
9386 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
9387 			} else {
9388 				struct ip6_hdr *ip6 =
9389 				    (void *)(mtod(m0, char *) + offset);
9390 				th = (void *)(mtod(m0, char *) + hlen);
9391 
9392 				ip6->ip6_plen = 0;
9393 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
9394 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
9395 			}
9396 			tcp_hlen = th->th_off << 2;
9397 		}
9398 		hlen += tcp_hlen;
9399 		*cmdlenp |= NQTX_CMD_TSE;
9400 
9401 		if (v4) {
9402 			WM_Q_EVCNT_INCR(txq, tso);
9403 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
9404 		} else {
9405 			WM_Q_EVCNT_INCR(txq, tso6);
9406 			*fieldsp |= NQTXD_FIELDS_TUXSM;
9407 		}
9408 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
9409 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
9410 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
9411 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
9412 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
9413 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
9414 	} else {
9415 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
9416 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
9417 	}
9418 
9419 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
9420 		*fieldsp |= NQTXD_FIELDS_IXSM;
9421 		cmdc |= NQTXC_CMD_IP4;
9422 	}
9423 
9424 	if (m0->m_pkthdr.csum_flags &
9425 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
9426 		WM_Q_EVCNT_INCR(txq, tusum);
9427 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
9428 			cmdc |= NQTXC_CMD_TCP;
9429 		else
9430 			cmdc |= NQTXC_CMD_UDP;
9431 
9432 		cmdc |= NQTXC_CMD_IP4;
9433 		*fieldsp |= NQTXD_FIELDS_TUXSM;
9434 	}
9435 	if (m0->m_pkthdr.csum_flags &
9436 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
9437 		WM_Q_EVCNT_INCR(txq, tusum6);
9438 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
9439 			cmdc |= NQTXC_CMD_TCP;
9440 		else
9441 			cmdc |= NQTXC_CMD_UDP;
9442 
9443 		cmdc |= NQTXC_CMD_IP6;
9444 		*fieldsp |= NQTXD_FIELDS_TUXSM;
9445 	}
9446 
9447 	/*
9448 	 * We don't have to write context descriptor for every packet to
9449 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
9450 	 * I210 and I211. It is enough to write once per a Tx queue for these
9451 	 * controllers.
9452 	 * It would be overhead to write context descriptor for every packet,
9453 	 * however it does not cause problems.
9454 	 */
9455 	/* Fill in the context descriptor. */
9456 	txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_vl_len =
9457 	    htole32(vl_len);
9458 	txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_sn = 0;
9459 	txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_cmd =
9460 	    htole32(cmdc);
9461 	txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_mssidx =
9462 	    htole32(mssidx);
9463 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
9464 	DPRINTF(sc, WM_DEBUG_TX,
9465 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
9466 		txq->txq_next, 0, vl_len));
9467 	DPRINTF(sc, WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
9468 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
9469 	txs->txs_ndesc++;
9470 }
9471 
9472 /*
9473  * wm_nq_start:		[ifnet interface function]
9474  *
9475  *	Start packet transmission on the interface for NEWQUEUE devices
9476  */
9477 static void
9478 wm_nq_start(struct ifnet *ifp)
9479 {
9480 	struct wm_softc *sc = ifp->if_softc;
9481 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
9482 
9483 	KASSERT(if_is_mpsafe(ifp));
9484 	/*
9485 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
9486 	 */
9487 
9488 	mutex_enter(txq->txq_lock);
9489 	if (!txq->txq_stopping)
9490 		wm_nq_start_locked(ifp);
9491 	mutex_exit(txq->txq_lock);
9492 }
9493 
9494 static void
9495 wm_nq_start_locked(struct ifnet *ifp)
9496 {
9497 	struct wm_softc *sc = ifp->if_softc;
9498 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
9499 
9500 	wm_nq_send_common_locked(ifp, txq, false);
9501 }
9502 
9503 static int
9504 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
9505 {
9506 	int qid;
9507 	struct wm_softc *sc = ifp->if_softc;
9508 	struct wm_txqueue *txq;
9509 
9510 	qid = wm_select_txqueue(ifp, m);
9511 	txq = &sc->sc_queue[qid].wmq_txq;
9512 
9513 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
9514 		m_freem(m);
9515 		WM_Q_EVCNT_INCR(txq, pcqdrop);
9516 		return ENOBUFS;
9517 	}
9518 
9519 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
9520 	if_statadd_ref(ifp, nsr, if_obytes, m->m_pkthdr.len);
9521 	if (m->m_flags & M_MCAST)
9522 		if_statinc_ref(ifp, nsr, if_omcasts);
9523 	IF_STAT_PUTREF(ifp);
9524 
9525 	/*
9526 	 * The situations which this mutex_tryenter() fails at running time
9527 	 * are below two patterns.
9528 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
9529 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
9530 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
9531 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
9532 	 * In the case of (2), the last packet enqueued to txq->txq_interq is
9533 	 * also dequeued by wm_deferred_start_locked(). So, it does not get
9534 	 * stuck, either.
9535 	 */
9536 	if (mutex_tryenter(txq->txq_lock)) {
9537 		if (!txq->txq_stopping)
9538 			wm_nq_transmit_locked(ifp, txq);
9539 		mutex_exit(txq->txq_lock);
9540 	}
9541 
9542 	return 0;
9543 }
9544 
9545 static void
9546 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
9547 {
9548 
9549 	wm_nq_send_common_locked(ifp, txq, true);
9550 }
9551 
9552 static void
9553 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
9554     bool is_transmit)
9555 {
9556 	struct wm_softc *sc = ifp->if_softc;
9557 	struct mbuf *m0;
9558 	struct wm_txsoft *txs;
9559 	bus_dmamap_t dmamap;
9560 	int error, nexttx, lasttx = -1, seg, segs_needed;
9561 	bool do_csum, sent;
9562 	bool remap = true;
9563 
9564 	KASSERT(mutex_owned(txq->txq_lock));
9565 	KASSERT(!txq->txq_stopping);
9566 
9567 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
9568 		return;
9569 
9570 	if (__predict_false(wm_linkdown_discard(txq))) {
9571 		do {
9572 			if (is_transmit)
9573 				m0 = pcq_get(txq->txq_interq);
9574 			else
9575 				IFQ_DEQUEUE(&ifp->if_snd, m0);
9576 			/*
9577 			 * increment successed packet counter as in the case
9578 			 * which the packet is discarded by link down PHY.
9579 			 */
9580 			if (m0 != NULL) {
9581 				if_statinc(ifp, if_opackets);
9582 				m_freem(m0);
9583 			}
9584 		} while (m0 != NULL);
9585 		return;
9586 	}
9587 
9588 	sent = false;
9589 
9590 	/*
9591 	 * Loop through the send queue, setting up transmit descriptors
9592 	 * until we drain the queue, or use up all available transmit
9593 	 * descriptors.
9594 	 */
9595 	for (;;) {
9596 		m0 = NULL;
9597 
9598 		/* Get a work queue entry. */
9599 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
9600 			wm_txeof(txq, UINT_MAX);
9601 			if (txq->txq_sfree == 0) {
9602 				DPRINTF(sc, WM_DEBUG_TX,
9603 				    ("%s: TX: no free job descriptors\n",
9604 					device_xname(sc->sc_dev)));
9605 				WM_Q_EVCNT_INCR(txq, txsstall);
9606 				break;
9607 			}
9608 		}
9609 
9610 		/* Grab a packet off the queue. */
9611 		if (is_transmit)
9612 			m0 = pcq_get(txq->txq_interq);
9613 		else
9614 			IFQ_DEQUEUE(&ifp->if_snd, m0);
9615 		if (m0 == NULL)
9616 			break;
9617 
9618 		DPRINTF(sc, WM_DEBUG_TX,
9619 		    ("%s: TX: have packet to transmit: %p\n",
9620 			device_xname(sc->sc_dev), m0));
9621 
9622 		txs = &txq->txq_soft[txq->txq_snext];
9623 		dmamap = txs->txs_dmamap;
9624 
9625 		/*
9626 		 * Load the DMA map.  If this fails, the packet either
9627 		 * didn't fit in the allotted number of segments, or we
9628 		 * were short on resources.  For the too-many-segments
9629 		 * case, we simply report an error and drop the packet,
9630 		 * since we can't sanely copy a jumbo packet to a single
9631 		 * buffer.
9632 		 */
9633 retry:
9634 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
9635 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
9636 		if (__predict_false(error)) {
9637 			if (error == EFBIG) {
9638 				if (remap == true) {
9639 					struct mbuf *m;
9640 
9641 					remap = false;
9642 					m = m_defrag(m0, M_NOWAIT);
9643 					if (m != NULL) {
9644 						WM_Q_EVCNT_INCR(txq, defrag);
9645 						m0 = m;
9646 						goto retry;
9647 					}
9648 				}
9649 				WM_Q_EVCNT_INCR(txq, toomanyseg);
9650 				log(LOG_ERR, "%s: Tx packet consumes too many "
9651 				    "DMA segments, dropping...\n",
9652 				    device_xname(sc->sc_dev));
9653 				wm_dump_mbuf_chain(sc, m0);
9654 				m_freem(m0);
9655 				continue;
9656 			}
9657 			/* Short on resources, just stop for now. */
9658 			DPRINTF(sc, WM_DEBUG_TX,
9659 			    ("%s: TX: dmamap load failed: %d\n",
9660 				device_xname(sc->sc_dev), error));
9661 			break;
9662 		}
9663 
9664 		segs_needed = dmamap->dm_nsegs;
9665 
9666 		/*
9667 		 * Ensure we have enough descriptors free to describe
9668 		 * the packet. Note, we always reserve one descriptor
9669 		 * at the end of the ring due to the semantics of the
9670 		 * TDT register, plus one more in the event we need
9671 		 * to load offload context.
9672 		 */
9673 		if (segs_needed > txq->txq_free - 2) {
9674 			/*
9675 			 * Not enough free descriptors to transmit this
9676 			 * packet.  We haven't committed anything yet,
9677 			 * so just unload the DMA map, put the packet
9678 			 * pack on the queue, and punt. Notify the upper
9679 			 * layer that there are no more slots left.
9680 			 */
9681 			DPRINTF(sc, WM_DEBUG_TX,
9682 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
9683 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
9684 				segs_needed, txq->txq_free - 1));
9685 			txq->txq_flags |= WM_TXQ_NO_SPACE;
9686 			bus_dmamap_unload(sc->sc_dmat, dmamap);
9687 			WM_Q_EVCNT_INCR(txq, txdstall);
9688 			break;
9689 		}
9690 
9691 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
9692 
9693 		DPRINTF(sc, WM_DEBUG_TX,
9694 		    ("%s: TX: packet has %d (%d) DMA segments\n",
9695 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
9696 
9697 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
9698 
9699 		/*
9700 		 * Store a pointer to the packet so that we can free it
9701 		 * later.
9702 		 *
9703 		 * Initially, we consider the number of descriptors the
9704 		 * packet uses the number of DMA segments.  This may be
9705 		 * incremented by 1 if we do checksum offload (a descriptor
9706 		 * is used to set the checksum context).
9707 		 */
9708 		txs->txs_mbuf = m0;
9709 		txs->txs_firstdesc = txq->txq_next;
9710 		txs->txs_ndesc = segs_needed;
9711 
9712 		/* Set up offload parameters for this packet. */
9713 		uint32_t cmdlen, fields, dcmdlen;
9714 		if (m0->m_pkthdr.csum_flags &
9715 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
9716 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
9717 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
9718 			wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
9719 			    &do_csum);
9720 		} else {
9721 			do_csum = false;
9722 			cmdlen = 0;
9723 			fields = 0;
9724 		}
9725 
9726 		/* Sync the DMA map. */
9727 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
9728 		    BUS_DMASYNC_PREWRITE);
9729 
9730 		/* Initialize the first transmit descriptor. */
9731 		nexttx = txq->txq_next;
9732 		if (!do_csum) {
9733 			/* Set up a legacy descriptor */
9734 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
9735 			    dmamap->dm_segs[0].ds_addr);
9736 			txq->txq_descs[nexttx].wtx_cmdlen =
9737 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
9738 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
9739 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
9740 			if (vlan_has_tag(m0)) {
9741 				txq->txq_descs[nexttx].wtx_cmdlen |=
9742 				    htole32(WTX_CMD_VLE);
9743 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
9744 				    htole16(vlan_get_tag(m0));
9745 			} else
9746 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
9747 
9748 			dcmdlen = 0;
9749 		} else {
9750 			/* Set up an advanced data descriptor */
9751 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
9752 			    htole64(dmamap->dm_segs[0].ds_addr);
9753 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
9754 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
9755 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen);
9756 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
9757 			    htole32(fields);
9758 			DPRINTF(sc, WM_DEBUG_TX,
9759 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
9760 				device_xname(sc->sc_dev), nexttx,
9761 				(uint64_t)dmamap->dm_segs[0].ds_addr));
9762 			DPRINTF(sc, WM_DEBUG_TX,
9763 			    ("\t 0x%08x%08x\n", fields,
9764 				(uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
9765 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
9766 		}
9767 
9768 		lasttx = nexttx;
9769 		nexttx = WM_NEXTTX(txq, nexttx);
9770 		/*
9771 		 * Fill in the next descriptors. Legacy or advanced format
9772 		 * is the same here.
9773 		 */
9774 		for (seg = 1; seg < dmamap->dm_nsegs;
9775 		     seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
9776 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
9777 			    htole64(dmamap->dm_segs[seg].ds_addr);
9778 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
9779 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
9780 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
9781 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
9782 			lasttx = nexttx;
9783 
9784 			DPRINTF(sc, WM_DEBUG_TX,
9785 			    ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
9786 				device_xname(sc->sc_dev), nexttx,
9787 				(uint64_t)dmamap->dm_segs[seg].ds_addr,
9788 				dmamap->dm_segs[seg].ds_len));
9789 		}
9790 
9791 		KASSERT(lasttx != -1);
9792 
9793 		/*
9794 		 * Set up the command byte on the last descriptor of
9795 		 * the packet. If we're in the interrupt delay window,
9796 		 * delay the interrupt.
9797 		 */
9798 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
9799 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
9800 		txq->txq_descs[lasttx].wtx_cmdlen |=
9801 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
9802 
9803 		txs->txs_lastdesc = lasttx;
9804 
9805 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
9806 		    device_xname(sc->sc_dev),
9807 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
9808 
9809 		/* Sync the descriptors we're using. */
9810 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
9811 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
9812 
9813 		/* Give the packet to the chip. */
9814 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
9815 		sent = true;
9816 
9817 		DPRINTF(sc, WM_DEBUG_TX,
9818 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
9819 
9820 		DPRINTF(sc, WM_DEBUG_TX,
9821 		    ("%s: TX: finished transmitting packet, job %d\n",
9822 			device_xname(sc->sc_dev), txq->txq_snext));
9823 
9824 		/* Advance the tx pointer. */
9825 		txq->txq_free -= txs->txs_ndesc;
9826 		txq->txq_next = nexttx;
9827 
9828 		txq->txq_sfree--;
9829 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
9830 
9831 		/* Pass the packet to any BPF listeners. */
9832 		bpf_mtap(ifp, m0, BPF_D_OUT);
9833 	}
9834 
9835 	if (m0 != NULL) {
9836 		txq->txq_flags |= WM_TXQ_NO_SPACE;
9837 		WM_Q_EVCNT_INCR(txq, descdrop);
9838 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
9839 			__func__));
9840 		m_freem(m0);
9841 	}
9842 
9843 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
9844 		/* No more slots; notify upper layer. */
9845 		txq->txq_flags |= WM_TXQ_NO_SPACE;
9846 	}
9847 
9848 	if (sent) {
9849 		/* Set a watchdog timer in case the chip flakes out. */
9850 		txq->txq_lastsent = time_uptime;
9851 		txq->txq_sending = true;
9852 	}
9853 }
9854 
9855 static void
9856 wm_deferred_start_locked(struct wm_txqueue *txq)
9857 {
9858 	struct wm_softc *sc = txq->txq_sc;
9859 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9860 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
9861 	int qid = wmq->wmq_id;
9862 
9863 	KASSERT(mutex_owned(txq->txq_lock));
9864 	KASSERT(!txq->txq_stopping);
9865 
9866 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
9867 		/* XXX need for ALTQ or one CPU system */
9868 		if (qid == 0)
9869 			wm_nq_start_locked(ifp);
9870 		wm_nq_transmit_locked(ifp, txq);
9871 	} else {
9872 		/* XXX need for ALTQ or one CPU system */
9873 		if (qid == 0)
9874 			wm_start_locked(ifp);
9875 		wm_transmit_locked(ifp, txq);
9876 	}
9877 }
9878 
9879 /* Interrupt */
9880 
9881 /*
9882  * wm_txeof:
9883  *
9884  *	Helper; handle transmit interrupts.
9885  */
9886 static bool
9887 wm_txeof(struct wm_txqueue *txq, u_int limit)
9888 {
9889 	struct wm_softc *sc = txq->txq_sc;
9890 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9891 	struct wm_txsoft *txs;
9892 	int count = 0;
9893 	int i;
9894 	uint8_t status;
9895 	bool more = false;
9896 
9897 	KASSERT(mutex_owned(txq->txq_lock));
9898 
9899 	if (txq->txq_stopping)
9900 		return false;
9901 
9902 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
9903 
9904 	/*
9905 	 * Go through the Tx list and free mbufs for those
9906 	 * frames which have been transmitted.
9907 	 */
9908 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
9909 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
9910 		txs = &txq->txq_soft[i];
9911 
9912 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: checking job %d\n",
9913 			device_xname(sc->sc_dev), i));
9914 
9915 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
9916 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
9917 
9918 		status =
9919 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
9920 		if ((status & WTX_ST_DD) == 0) {
9921 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
9922 			    BUS_DMASYNC_PREREAD);
9923 			break;
9924 		}
9925 
9926 		if (limit-- == 0) {
9927 			more = true;
9928 			DPRINTF(sc, WM_DEBUG_TX,
9929 			    ("%s: TX: loop limited, job %d is not processed\n",
9930 				device_xname(sc->sc_dev), i));
9931 			break;
9932 		}
9933 
9934 		count++;
9935 		DPRINTF(sc, WM_DEBUG_TX,
9936 		    ("%s: TX: job %d done: descs %d..%d\n",
9937 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
9938 		    txs->txs_lastdesc));
9939 
9940 #ifdef WM_EVENT_COUNTERS
9941 		if ((status & WTX_ST_TU) && (sc->sc_type <= WM_T_82544))
9942 			WM_Q_EVCNT_INCR(txq, underrun);
9943 #endif /* WM_EVENT_COUNTERS */
9944 
9945 		/*
9946 		 * 82574 and newer's document says the status field has neither
9947 		 * EC (Excessive Collision) bit nor LC (Late Collision) bit
9948 		 * (reserved). Refer "PCIe GbE Controller Open Source Software
9949 		 * Developer's Manual", 82574 datasheet and newer.
9950 		 *
9951 		 * XXX I saw the LC bit was set on I218 even though the media
9952 		 * was full duplex, so the bit might be used for other
9953 		 * meaning ...(I have no document).
9954 		 */
9955 
9956 		if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
9957 		    && ((sc->sc_type < WM_T_82574)
9958 			|| (sc->sc_type == WM_T_80003))) {
9959 			if_statinc(ifp, if_oerrors);
9960 			if (status & WTX_ST_LC)
9961 				log(LOG_WARNING, "%s: late collision\n",
9962 				    device_xname(sc->sc_dev));
9963 			else if (status & WTX_ST_EC) {
9964 				if_statadd(ifp, if_collisions,
9965 				    TX_COLLISION_THRESHOLD + 1);
9966 				log(LOG_WARNING, "%s: excessive collisions\n",
9967 				    device_xname(sc->sc_dev));
9968 			}
9969 		} else
9970 			if_statinc(ifp, if_opackets);
9971 
9972 		txq->txq_packets++;
9973 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
9974 
9975 		txq->txq_free += txs->txs_ndesc;
9976 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
9977 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
9978 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
9979 		m_freem(txs->txs_mbuf);
9980 		txs->txs_mbuf = NULL;
9981 	}
9982 
9983 	/* Update the dirty transmit buffer pointer. */
9984 	txq->txq_sdirty = i;
9985 	DPRINTF(sc, WM_DEBUG_TX,
9986 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
9987 
9988 	if (count != 0)
9989 		rnd_add_uint32(&sc->rnd_source, count);
9990 
9991 	/*
9992 	 * If there are no more pending transmissions, cancel the watchdog
9993 	 * timer.
9994 	 */
9995 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
9996 		txq->txq_sending = false;
9997 
9998 	return more;
9999 }
10000 
10001 static inline uint32_t
10002 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
10003 {
10004 	struct wm_softc *sc = rxq->rxq_sc;
10005 
10006 	if (sc->sc_type == WM_T_82574)
10007 		return EXTRXC_STATUS(
10008 		    le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
10009 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
10010 		return NQRXC_STATUS(
10011 		    le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
10012 	else
10013 		return rxq->rxq_descs[idx].wrx_status;
10014 }
10015 
10016 static inline uint32_t
10017 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
10018 {
10019 	struct wm_softc *sc = rxq->rxq_sc;
10020 
10021 	if (sc->sc_type == WM_T_82574)
10022 		return EXTRXC_ERROR(
10023 		    le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
10024 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
10025 		return NQRXC_ERROR(
10026 		    le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
10027 	else
10028 		return rxq->rxq_descs[idx].wrx_errors;
10029 }
10030 
10031 static inline uint16_t
10032 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
10033 {
10034 	struct wm_softc *sc = rxq->rxq_sc;
10035 
10036 	if (sc->sc_type == WM_T_82574)
10037 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
10038 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
10039 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
10040 	else
10041 		return rxq->rxq_descs[idx].wrx_special;
10042 }
10043 
10044 static inline int
10045 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
10046 {
10047 	struct wm_softc *sc = rxq->rxq_sc;
10048 
10049 	if (sc->sc_type == WM_T_82574)
10050 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
10051 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
10052 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
10053 	else
10054 		return rxq->rxq_descs[idx].wrx_len;
10055 }
10056 
10057 #ifdef WM_DEBUG
10058 static inline uint32_t
10059 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
10060 {
10061 	struct wm_softc *sc = rxq->rxq_sc;
10062 
10063 	if (sc->sc_type == WM_T_82574)
10064 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
10065 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
10066 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
10067 	else
10068 		return 0;
10069 }
10070 
10071 static inline uint8_t
10072 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
10073 {
10074 	struct wm_softc *sc = rxq->rxq_sc;
10075 
10076 	if (sc->sc_type == WM_T_82574)
10077 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
10078 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
10079 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
10080 	else
10081 		return 0;
10082 }
10083 #endif /* WM_DEBUG */
10084 
10085 static inline bool
10086 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
10087     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
10088 {
10089 
10090 	if (sc->sc_type == WM_T_82574)
10091 		return (status & ext_bit) != 0;
10092 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
10093 		return (status & nq_bit) != 0;
10094 	else
10095 		return (status & legacy_bit) != 0;
10096 }
10097 
10098 static inline bool
10099 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
10100     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
10101 {
10102 
10103 	if (sc->sc_type == WM_T_82574)
10104 		return (error & ext_bit) != 0;
10105 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
10106 		return (error & nq_bit) != 0;
10107 	else
10108 		return (error & legacy_bit) != 0;
10109 }
10110 
10111 static inline bool
10112 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
10113 {
10114 
10115 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
10116 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
10117 		return true;
10118 	else
10119 		return false;
10120 }
10121 
10122 static inline bool
10123 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
10124 {
10125 	struct wm_softc *sc = rxq->rxq_sc;
10126 
10127 	/* XXX missing error bit for newqueue? */
10128 	if (wm_rxdesc_is_set_error(sc, errors,
10129 		WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
10130 		EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
10131 		| EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
10132 		NQRXC_ERROR_RXE)) {
10133 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
10134 		    EXTRXC_ERROR_SE, 0))
10135 			log(LOG_WARNING, "%s: symbol error\n",
10136 			    device_xname(sc->sc_dev));
10137 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
10138 		    EXTRXC_ERROR_SEQ, 0))
10139 			log(LOG_WARNING, "%s: receive sequence error\n",
10140 			    device_xname(sc->sc_dev));
10141 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
10142 		    EXTRXC_ERROR_CE, 0))
10143 			log(LOG_WARNING, "%s: CRC error\n",
10144 			    device_xname(sc->sc_dev));
10145 		return true;
10146 	}
10147 
10148 	return false;
10149 }
10150 
10151 static inline bool
10152 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
10153 {
10154 	struct wm_softc *sc = rxq->rxq_sc;
10155 
10156 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
10157 		NQRXC_STATUS_DD)) {
10158 		/* We have processed all of the receive descriptors. */
10159 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
10160 		return false;
10161 	}
10162 
10163 	return true;
10164 }
10165 
10166 static inline bool
10167 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
10168     uint16_t vlantag, struct mbuf *m)
10169 {
10170 
10171 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
10172 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
10173 		vlan_set_tag(m, le16toh(vlantag));
10174 	}
10175 
10176 	return true;
10177 }
10178 
10179 static inline void
10180 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
10181     uint32_t errors, struct mbuf *m)
10182 {
10183 	struct wm_softc *sc = rxq->rxq_sc;
10184 
10185 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
10186 		if (wm_rxdesc_is_set_status(sc, status,
10187 		    WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
10188 			WM_Q_EVCNT_INCR(rxq, ipsum);
10189 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
10190 			if (wm_rxdesc_is_set_error(sc, errors,
10191 			    WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
10192 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
10193 		}
10194 		if (wm_rxdesc_is_set_status(sc, status,
10195 		    WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
10196 			/*
10197 			 * Note: we don't know if this was TCP or UDP,
10198 			 * so we just set both bits, and expect the
10199 			 * upper layers to deal.
10200 			 */
10201 			WM_Q_EVCNT_INCR(rxq, tusum);
10202 			m->m_pkthdr.csum_flags |=
10203 			    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
10204 			    M_CSUM_TCPv6 | M_CSUM_UDPv6;
10205 			if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
10206 			    EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
10207 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
10208 		}
10209 	}
10210 }
10211 
10212 /*
10213  * wm_rxeof:
10214  *
10215  *	Helper; handle receive interrupts.
10216  */
10217 static bool
10218 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
10219 {
10220 	struct wm_softc *sc = rxq->rxq_sc;
10221 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
10222 	struct wm_rxsoft *rxs;
10223 	struct mbuf *m;
10224 	int i, len;
10225 	int count = 0;
10226 	uint32_t status, errors;
10227 	uint16_t vlantag;
10228 	bool more = false;
10229 
10230 	KASSERT(mutex_owned(rxq->rxq_lock));
10231 
10232 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
10233 		rxs = &rxq->rxq_soft[i];
10234 
10235 		DPRINTF(sc, WM_DEBUG_RX,
10236 		    ("%s: RX: checking descriptor %d\n",
10237 			device_xname(sc->sc_dev), i));
10238 		wm_cdrxsync(rxq, i,
10239 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
10240 
10241 		status = wm_rxdesc_get_status(rxq, i);
10242 		errors = wm_rxdesc_get_errors(rxq, i);
10243 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
10244 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
10245 #ifdef WM_DEBUG
10246 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
10247 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
10248 #endif
10249 
10250 		if (!wm_rxdesc_dd(rxq, i, status))
10251 			break;
10252 
10253 		if (limit-- == 0) {
10254 			more = true;
10255 			DPRINTF(sc, WM_DEBUG_RX,
10256 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
10257 				device_xname(sc->sc_dev), i));
10258 			break;
10259 		}
10260 
10261 		count++;
10262 		if (__predict_false(rxq->rxq_discard)) {
10263 			DPRINTF(sc, WM_DEBUG_RX,
10264 			    ("%s: RX: discarding contents of descriptor %d\n",
10265 				device_xname(sc->sc_dev), i));
10266 			wm_init_rxdesc(rxq, i);
10267 			if (wm_rxdesc_is_eop(rxq, status)) {
10268 				/* Reset our state. */
10269 				DPRINTF(sc, WM_DEBUG_RX,
10270 				    ("%s: RX: resetting rxdiscard -> 0\n",
10271 					device_xname(sc->sc_dev)));
10272 				rxq->rxq_discard = 0;
10273 			}
10274 			continue;
10275 		}
10276 
10277 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
10278 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
10279 
10280 		m = rxs->rxs_mbuf;
10281 
10282 		/*
10283 		 * Add a new receive buffer to the ring, unless of
10284 		 * course the length is zero. Treat the latter as a
10285 		 * failed mapping.
10286 		 */
10287 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
10288 			/*
10289 			 * Failed, throw away what we've done so
10290 			 * far, and discard the rest of the packet.
10291 			 */
10292 			if_statinc(ifp, if_ierrors);
10293 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
10294 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
10295 			wm_init_rxdesc(rxq, i);
10296 			if (!wm_rxdesc_is_eop(rxq, status))
10297 				rxq->rxq_discard = 1;
10298 			m_freem(rxq->rxq_head);
10299 			WM_RXCHAIN_RESET(rxq);
10300 			DPRINTF(sc, WM_DEBUG_RX,
10301 			    ("%s: RX: Rx buffer allocation failed, "
10302 			    "dropping packet%s\n", device_xname(sc->sc_dev),
10303 				rxq->rxq_discard ? " (discard)" : ""));
10304 			continue;
10305 		}
10306 
10307 		m->m_len = len;
10308 		rxq->rxq_len += len;
10309 		DPRINTF(sc, WM_DEBUG_RX,
10310 		    ("%s: RX: buffer at %p len %d\n",
10311 			device_xname(sc->sc_dev), m->m_data, len));
10312 
10313 		/* If this is not the end of the packet, keep looking. */
10314 		if (!wm_rxdesc_is_eop(rxq, status)) {
10315 			WM_RXCHAIN_LINK(rxq, m);
10316 			DPRINTF(sc, WM_DEBUG_RX,
10317 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
10318 				device_xname(sc->sc_dev), rxq->rxq_len));
10319 			continue;
10320 		}
10321 
10322 		/*
10323 		 * Okay, we have the entire packet now. The chip is
10324 		 * configured to include the FCS except I35[04], I21[01].
10325 		 * (not all chips can be configured to strip it), so we need
10326 		 * to trim it. Those chips have an eratta, the RCTL_SECRC bit
10327 		 * in RCTL register is always set, so we don't trim it.
10328 		 * PCH2 and newer chip also not include FCS when jumbo
10329 		 * frame is used to do workaround an errata.
10330 		 * May need to adjust length of previous mbuf in the
10331 		 * chain if the current mbuf is too short.
10332 		 */
10333 		if ((sc->sc_flags & WM_F_CRC_STRIP) == 0) {
10334 			if (m->m_len < ETHER_CRC_LEN) {
10335 				rxq->rxq_tail->m_len
10336 				    -= (ETHER_CRC_LEN - m->m_len);
10337 				m->m_len = 0;
10338 			} else
10339 				m->m_len -= ETHER_CRC_LEN;
10340 			len = rxq->rxq_len - ETHER_CRC_LEN;
10341 		} else
10342 			len = rxq->rxq_len;
10343 
10344 		WM_RXCHAIN_LINK(rxq, m);
10345 
10346 		*rxq->rxq_tailp = NULL;
10347 		m = rxq->rxq_head;
10348 
10349 		WM_RXCHAIN_RESET(rxq);
10350 
10351 		DPRINTF(sc, WM_DEBUG_RX,
10352 		    ("%s: RX: have entire packet, len -> %d\n",
10353 			device_xname(sc->sc_dev), len));
10354 
10355 		/* If an error occurred, update stats and drop the packet. */
10356 		if (wm_rxdesc_has_errors(rxq, errors)) {
10357 			m_freem(m);
10358 			continue;
10359 		}
10360 
10361 		/* No errors.  Receive the packet. */
10362 		m_set_rcvif(m, ifp);
10363 		m->m_pkthdr.len = len;
10364 		/*
10365 		 * TODO
10366 		 * should be save rsshash and rsstype to this mbuf.
10367 		 */
10368 		DPRINTF(sc, WM_DEBUG_RX,
10369 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
10370 			device_xname(sc->sc_dev), rsstype, rsshash));
10371 
10372 		/*
10373 		 * If VLANs are enabled, VLAN packets have been unwrapped
10374 		 * for us.  Associate the tag with the packet.
10375 		 */
10376 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
10377 			continue;
10378 
10379 		/* Set up checksum info for this packet. */
10380 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
10381 
10382 		rxq->rxq_packets++;
10383 		rxq->rxq_bytes += len;
10384 		/* Pass it on. */
10385 		if_percpuq_enqueue(sc->sc_ipq, m);
10386 
10387 		if (rxq->rxq_stopping)
10388 			break;
10389 	}
10390 	rxq->rxq_ptr = i;
10391 
10392 	if (count != 0)
10393 		rnd_add_uint32(&sc->rnd_source, count);
10394 
10395 	DPRINTF(sc, WM_DEBUG_RX,
10396 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
10397 
10398 	return more;
10399 }
10400 
10401 /*
10402  * wm_linkintr_gmii:
10403  *
10404  *	Helper; handle link interrupts for GMII.
10405  */
10406 static void
10407 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
10408 {
10409 	device_t dev = sc->sc_dev;
10410 	uint32_t status, reg;
10411 	bool link;
10412 	bool dopoll = true;
10413 	int rv;
10414 
10415 	KASSERT(mutex_owned(sc->sc_core_lock));
10416 
10417 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(dev),
10418 		__func__));
10419 
10420 	if ((icr & ICR_LSC) == 0) {
10421 		if (icr & ICR_RXSEQ)
10422 			DPRINTF(sc, WM_DEBUG_LINK,
10423 			    ("%s: LINK Receive sequence error\n",
10424 				device_xname(dev)));
10425 		return;
10426 	}
10427 
10428 	/* Link status changed */
10429 	status = CSR_READ(sc, WMREG_STATUS);
10430 	link = status & STATUS_LU;
10431 	if (link) {
10432 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
10433 			device_xname(dev),
10434 			(status & STATUS_FD) ? "FDX" : "HDX"));
10435 		if (wm_phy_need_linkdown_discard(sc)) {
10436 			DPRINTF(sc, WM_DEBUG_LINK,
10437 			    ("%s: linkintr: Clear linkdown discard flag\n",
10438 				device_xname(dev)));
10439 			wm_clear_linkdown_discard(sc);
10440 		}
10441 	} else {
10442 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
10443 			device_xname(dev)));
10444 		if (wm_phy_need_linkdown_discard(sc)) {
10445 			DPRINTF(sc, WM_DEBUG_LINK,
10446 			    ("%s: linkintr: Set linkdown discard flag\n",
10447 				device_xname(dev)));
10448 			wm_set_linkdown_discard(sc);
10449 		}
10450 	}
10451 	if ((sc->sc_type == WM_T_ICH8) && (link == false))
10452 		wm_gig_downshift_workaround_ich8lan(sc);
10453 
10454 	if ((sc->sc_type == WM_T_ICH8) && (sc->sc_phytype == WMPHY_IGP_3))
10455 		wm_kmrn_lock_loss_workaround_ich8lan(sc);
10456 
10457 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
10458 		device_xname(dev)));
10459 	if ((sc->sc_flags & WM_F_DELAY_LINKUP) != 0) {
10460 		if (link) {
10461 			/*
10462 			 * To workaround the problem, it's required to wait
10463 			 * several hundred miliseconds. The time depend
10464 			 * on the environment. Wait 1 second for the safety.
10465 			 */
10466 			dopoll = false;
10467 			getmicrotime(&sc->sc_linkup_delay_time);
10468 			sc->sc_linkup_delay_time.tv_sec += 1;
10469 		} else if (sc->sc_linkup_delay_time.tv_sec != 0) {
10470 			/*
10471 			 * Simplify by checking tv_sec only. It's enough.
10472 			 *
10473 			 * Currently, it's not required to clear the time.
10474 			 * It's just to know the timer is stopped
10475 			 * (for debugging).
10476 			 */
10477 
10478 			sc->sc_linkup_delay_time.tv_sec = 0;
10479 			sc->sc_linkup_delay_time.tv_usec = 0;
10480 		}
10481 	}
10482 
10483 	/*
10484 	 * Call mii_pollstat().
10485 	 *
10486 	 * Some (not all) systems use I35[04] or I21[01] don't send packet soon
10487 	 * after linkup. The MAC send a packet to the PHY and any error is not
10488 	 * observed. This behavior causes a problem that gratuitous ARP and/or
10489 	 * IPv6 DAD packet are silently dropped. To avoid this problem, don't
10490 	 * call mii_pollstat() here which will send LINK_STATE_UP notification
10491 	 * to the upper layer. Instead, mii_pollstat() will be called in
10492 	 * wm_gmii_mediastatus() or mii_tick() will be called in wm_tick().
10493 	 */
10494 	if (dopoll)
10495 		mii_pollstat(&sc->sc_mii);
10496 
10497 	/* Do some workarounds soon after link status is changed. */
10498 
10499 	if (sc->sc_type == WM_T_82543) {
10500 		int miistatus, active;
10501 
10502 		/*
10503 		 * With 82543, we need to force speed and
10504 		 * duplex on the MAC equal to what the PHY
10505 		 * speed and duplex configuration is.
10506 		 */
10507 		miistatus = sc->sc_mii.mii_media_status;
10508 
10509 		if (miistatus & IFM_ACTIVE) {
10510 			active = sc->sc_mii.mii_media_active;
10511 			sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
10512 			switch (IFM_SUBTYPE(active)) {
10513 			case IFM_10_T:
10514 				sc->sc_ctrl |= CTRL_SPEED_10;
10515 				break;
10516 			case IFM_100_TX:
10517 				sc->sc_ctrl |= CTRL_SPEED_100;
10518 				break;
10519 			case IFM_1000_T:
10520 				sc->sc_ctrl |= CTRL_SPEED_1000;
10521 				break;
10522 			default:
10523 				/*
10524 				 * Fiber?
10525 				 * Shoud not enter here.
10526 				 */
10527 				device_printf(dev, "unknown media (%x)\n",
10528 				    active);
10529 				break;
10530 			}
10531 			if (active & IFM_FDX)
10532 				sc->sc_ctrl |= CTRL_FD;
10533 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10534 		}
10535 	} else if (sc->sc_type == WM_T_PCH) {
10536 		wm_k1_gig_workaround_hv(sc,
10537 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
10538 	}
10539 
10540 	/*
10541 	 * When connected at 10Mbps half-duplex, some parts are excessively
10542 	 * aggressive resulting in many collisions. To avoid this, increase
10543 	 * the IPG and reduce Rx latency in the PHY.
10544 	 */
10545 	if ((sc->sc_type >= WM_T_PCH2) && (sc->sc_type <= WM_T_PCH_TGP)
10546 	    && link) {
10547 		uint32_t tipg_reg;
10548 		uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
10549 		bool fdx;
10550 		uint16_t emi_addr, emi_val;
10551 
10552 		tipg_reg = CSR_READ(sc, WMREG_TIPG);
10553 		tipg_reg &= ~TIPG_IPGT_MASK;
10554 		fdx = status & STATUS_FD;
10555 
10556 		if (!fdx && (speed == STATUS_SPEED_10)) {
10557 			tipg_reg |= 0xff;
10558 			/* Reduce Rx latency in analog PHY */
10559 			emi_val = 0;
10560 		} else if ((sc->sc_type >= WM_T_PCH_SPT) &&
10561 		    fdx && speed != STATUS_SPEED_1000) {
10562 			tipg_reg |= 0xc;
10563 			emi_val = 1;
10564 		} else {
10565 			/* Roll back the default values */
10566 			tipg_reg |= 0x08;
10567 			emi_val = 1;
10568 		}
10569 
10570 		CSR_WRITE(sc, WMREG_TIPG, tipg_reg);
10571 
10572 		rv = sc->phy.acquire(sc);
10573 		if (rv)
10574 			return;
10575 
10576 		if (sc->sc_type == WM_T_PCH2)
10577 			emi_addr = I82579_RX_CONFIG;
10578 		else
10579 			emi_addr = I217_RX_CONFIG;
10580 		rv = wm_write_emi_reg_locked(dev, emi_addr, emi_val);
10581 
10582 		if (sc->sc_type >= WM_T_PCH_LPT) {
10583 			uint16_t phy_reg;
10584 
10585 			sc->phy.readreg_locked(dev, 2,
10586 			    I217_PLL_CLOCK_GATE_REG, &phy_reg);
10587 			phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
10588 			if (speed == STATUS_SPEED_100
10589 			    || speed == STATUS_SPEED_10)
10590 				phy_reg |= 0x3e8;
10591 			else
10592 				phy_reg |= 0xfa;
10593 			sc->phy.writereg_locked(dev, 2,
10594 			    I217_PLL_CLOCK_GATE_REG, phy_reg);
10595 
10596 			if (speed == STATUS_SPEED_1000) {
10597 				sc->phy.readreg_locked(dev, 2,
10598 				    HV_PM_CTRL, &phy_reg);
10599 
10600 				phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
10601 
10602 				sc->phy.writereg_locked(dev, 2,
10603 				    HV_PM_CTRL, phy_reg);
10604 			}
10605 		}
10606 		sc->phy.release(sc);
10607 
10608 		if (rv)
10609 			return;
10610 
10611 		if (sc->sc_type >= WM_T_PCH_SPT) {
10612 			uint16_t data, ptr_gap;
10613 
10614 			if (speed == STATUS_SPEED_1000) {
10615 				rv = sc->phy.acquire(sc);
10616 				if (rv)
10617 					return;
10618 
10619 				rv = sc->phy.readreg_locked(dev, 2,
10620 				    I82579_UNKNOWN1, &data);
10621 				if (rv) {
10622 					sc->phy.release(sc);
10623 					return;
10624 				}
10625 
10626 				ptr_gap = (data & (0x3ff << 2)) >> 2;
10627 				if (ptr_gap < 0x18) {
10628 					data &= ~(0x3ff << 2);
10629 					data |= (0x18 << 2);
10630 					rv = sc->phy.writereg_locked(dev,
10631 					    2, I82579_UNKNOWN1, data);
10632 				}
10633 				sc->phy.release(sc);
10634 				if (rv)
10635 					return;
10636 			} else {
10637 				rv = sc->phy.acquire(sc);
10638 				if (rv)
10639 					return;
10640 
10641 				rv = sc->phy.writereg_locked(dev, 2,
10642 				    I82579_UNKNOWN1, 0xc023);
10643 				sc->phy.release(sc);
10644 				if (rv)
10645 					return;
10646 
10647 			}
10648 		}
10649 	}
10650 
10651 	/*
10652 	 * I217 Packet Loss issue:
10653 	 * ensure that FEXTNVM4 Beacon Duration is set correctly
10654 	 * on power up.
10655 	 * Set the Beacon Duration for I217 to 8 usec
10656 	 */
10657 	if (sc->sc_type >= WM_T_PCH_LPT) {
10658 		reg = CSR_READ(sc, WMREG_FEXTNVM4);
10659 		reg &= ~FEXTNVM4_BEACON_DURATION;
10660 		reg |= FEXTNVM4_BEACON_DURATION_8US;
10661 		CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
10662 	}
10663 
10664 	/* Work-around I218 hang issue */
10665 	if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
10666 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
10667 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
10668 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
10669 		wm_k1_workaround_lpt_lp(sc, link);
10670 
10671 	if (sc->sc_type >= WM_T_PCH_LPT) {
10672 		/*
10673 		 * Set platform power management values for Latency
10674 		 * Tolerance Reporting (LTR)
10675 		 */
10676 		wm_platform_pm_pch_lpt(sc,
10677 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
10678 	}
10679 
10680 	/* Clear link partner's EEE ability */
10681 	sc->eee_lp_ability = 0;
10682 
10683 	/* FEXTNVM6 K1-off workaround */
10684 	if (sc->sc_type == WM_T_PCH_SPT) {
10685 		reg = CSR_READ(sc, WMREG_FEXTNVM6);
10686 		if (CSR_READ(sc, WMREG_PCIEANACFG) & FEXTNVM6_K1_OFF_ENABLE)
10687 			reg |= FEXTNVM6_K1_OFF_ENABLE;
10688 		else
10689 			reg &= ~FEXTNVM6_K1_OFF_ENABLE;
10690 		CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
10691 	}
10692 
10693 	if (!link)
10694 		return;
10695 
10696 	switch (sc->sc_type) {
10697 	case WM_T_PCH2:
10698 		wm_k1_workaround_lv(sc);
10699 		/* FALLTHROUGH */
10700 	case WM_T_PCH:
10701 		if (sc->sc_phytype == WMPHY_82578)
10702 			wm_link_stall_workaround_hv(sc);
10703 		break;
10704 	default:
10705 		break;
10706 	}
10707 
10708 	/* Enable/Disable EEE after link up */
10709 	if (sc->sc_phytype > WMPHY_82579)
10710 		wm_set_eee_pchlan(sc);
10711 }
10712 
10713 /*
10714  * wm_linkintr_tbi:
10715  *
10716  *	Helper; handle link interrupts for TBI mode.
10717  */
10718 static void
10719 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
10720 {
10721 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
10722 	uint32_t status;
10723 
10724 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
10725 		__func__));
10726 
10727 	status = CSR_READ(sc, WMREG_STATUS);
10728 	if (icr & ICR_LSC) {
10729 		wm_check_for_link(sc);
10730 		if (status & STATUS_LU) {
10731 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
10732 				device_xname(sc->sc_dev),
10733 				(status & STATUS_FD) ? "FDX" : "HDX"));
10734 			/*
10735 			 * NOTE: CTRL will update TFCE and RFCE automatically,
10736 			 * so we should update sc->sc_ctrl
10737 			 */
10738 
10739 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
10740 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
10741 			sc->sc_fcrtl &= ~FCRTL_XONE;
10742 			if (status & STATUS_FD)
10743 				sc->sc_tctl |=
10744 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
10745 			else
10746 				sc->sc_tctl |=
10747 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
10748 			if (sc->sc_ctrl & CTRL_TFCE)
10749 				sc->sc_fcrtl |= FCRTL_XONE;
10750 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
10751 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
10752 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
10753 			sc->sc_tbi_linkup = 1;
10754 			if_link_state_change(ifp, LINK_STATE_UP);
10755 		} else {
10756 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
10757 				device_xname(sc->sc_dev)));
10758 			sc->sc_tbi_linkup = 0;
10759 			if_link_state_change(ifp, LINK_STATE_DOWN);
10760 		}
10761 		/* Update LED */
10762 		wm_tbi_serdes_set_linkled(sc);
10763 	} else if (icr & ICR_RXSEQ)
10764 		DPRINTF(sc, WM_DEBUG_LINK,
10765 		    ("%s: LINK: Receive sequence error\n",
10766 			device_xname(sc->sc_dev)));
10767 }
10768 
10769 /*
10770  * wm_linkintr_serdes:
10771  *
10772  *	Helper; handle link interrupts for TBI mode.
10773  */
10774 static void
10775 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
10776 {
10777 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
10778 	struct mii_data *mii = &sc->sc_mii;
10779 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
10780 	uint32_t pcs_adv, pcs_lpab, reg;
10781 
10782 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
10783 		__func__));
10784 
10785 	if (icr & ICR_LSC) {
10786 		/* Check PCS */
10787 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
10788 		if ((reg & PCS_LSTS_LINKOK) != 0) {
10789 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
10790 				device_xname(sc->sc_dev)));
10791 			mii->mii_media_status |= IFM_ACTIVE;
10792 			sc->sc_tbi_linkup = 1;
10793 			if_link_state_change(ifp, LINK_STATE_UP);
10794 		} else {
10795 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
10796 				device_xname(sc->sc_dev)));
10797 			mii->mii_media_status |= IFM_NONE;
10798 			sc->sc_tbi_linkup = 0;
10799 			if_link_state_change(ifp, LINK_STATE_DOWN);
10800 			wm_tbi_serdes_set_linkled(sc);
10801 			return;
10802 		}
10803 		mii->mii_media_active |= IFM_1000_SX;
10804 		if ((reg & PCS_LSTS_FDX) != 0)
10805 			mii->mii_media_active |= IFM_FDX;
10806 		else
10807 			mii->mii_media_active |= IFM_HDX;
10808 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
10809 			/* Check flow */
10810 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
10811 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
10812 				DPRINTF(sc, WM_DEBUG_LINK,
10813 				    ("XXX LINKOK but not ACOMP\n"));
10814 				return;
10815 			}
10816 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
10817 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
10818 			DPRINTF(sc, WM_DEBUG_LINK,
10819 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
10820 			if ((pcs_adv & TXCW_SYM_PAUSE)
10821 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
10822 				mii->mii_media_active |= IFM_FLOW
10823 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
10824 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
10825 			    && (pcs_adv & TXCW_ASYM_PAUSE)
10826 			    && (pcs_lpab & TXCW_SYM_PAUSE)
10827 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
10828 				mii->mii_media_active |= IFM_FLOW
10829 				    | IFM_ETH_TXPAUSE;
10830 			else if ((pcs_adv & TXCW_SYM_PAUSE)
10831 			    && (pcs_adv & TXCW_ASYM_PAUSE)
10832 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
10833 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
10834 				mii->mii_media_active |= IFM_FLOW
10835 				    | IFM_ETH_RXPAUSE;
10836 		}
10837 		/* Update LED */
10838 		wm_tbi_serdes_set_linkled(sc);
10839 	} else
10840 		DPRINTF(sc, WM_DEBUG_LINK,
10841 		    ("%s: LINK: Receive sequence error\n",
10842 		    device_xname(sc->sc_dev)));
10843 }
10844 
10845 /*
10846  * wm_linkintr:
10847  *
10848  *	Helper; handle link interrupts.
10849  */
10850 static void
10851 wm_linkintr(struct wm_softc *sc, uint32_t icr)
10852 {
10853 
10854 	KASSERT(mutex_owned(sc->sc_core_lock));
10855 
10856 	if (sc->sc_flags & WM_F_HAS_MII)
10857 		wm_linkintr_gmii(sc, icr);
10858 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
10859 	    && ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)))
10860 		wm_linkintr_serdes(sc, icr);
10861 	else
10862 		wm_linkintr_tbi(sc, icr);
10863 }
10864 
10865 
10866 static inline void
10867 wm_sched_handle_queue(struct wm_softc *sc, struct wm_queue *wmq)
10868 {
10869 
10870 	if (wmq->wmq_txrx_use_workqueue) {
10871 		if (!wmq->wmq_wq_enqueued) {
10872 			wmq->wmq_wq_enqueued = true;
10873 			workqueue_enqueue(sc->sc_queue_wq, &wmq->wmq_cookie,
10874 			    curcpu());
10875 		}
10876 	} else
10877 		softint_schedule(wmq->wmq_si);
10878 }
10879 
10880 static inline void
10881 wm_legacy_intr_disable(struct wm_softc *sc)
10882 {
10883 
10884 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
10885 }
10886 
10887 static inline void
10888 wm_legacy_intr_enable(struct wm_softc *sc)
10889 {
10890 
10891 	CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
10892 }
10893 
10894 /*
10895  * wm_intr_legacy:
10896  *
10897  *	Interrupt service routine for INTx and MSI.
10898  */
10899 static int
10900 wm_intr_legacy(void *arg)
10901 {
10902 	struct wm_softc *sc = arg;
10903 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
10904 	struct wm_queue *wmq = &sc->sc_queue[0];
10905 	struct wm_txqueue *txq = &wmq->wmq_txq;
10906 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
10907 	u_int txlimit = sc->sc_tx_intr_process_limit;
10908 	u_int rxlimit = sc->sc_rx_intr_process_limit;
10909 	uint32_t icr, rndval = 0;
10910 	bool more = false;
10911 
10912 	icr = CSR_READ(sc, WMREG_ICR);
10913 	if ((icr & sc->sc_icr) == 0)
10914 		return 0;
10915 
10916 	DPRINTF(sc, WM_DEBUG_TX,
10917 	    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
10918 	if (rndval == 0)
10919 		rndval = icr;
10920 
10921 	mutex_enter(txq->txq_lock);
10922 
10923 	if (txq->txq_stopping) {
10924 		mutex_exit(txq->txq_lock);
10925 		return 1;
10926 	}
10927 
10928 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
10929 	if (icr & ICR_TXDW) {
10930 		DPRINTF(sc, WM_DEBUG_TX,
10931 		    ("%s: TX: got TXDW interrupt\n",
10932 			device_xname(sc->sc_dev)));
10933 		WM_Q_EVCNT_INCR(txq, txdw);
10934 	}
10935 #endif
10936 	if (txlimit > 0) {
10937 		more |= wm_txeof(txq, txlimit);
10938 		if (!IF_IS_EMPTY(&ifp->if_snd))
10939 			more = true;
10940 	} else
10941 		more = true;
10942 	mutex_exit(txq->txq_lock);
10943 
10944 	mutex_enter(rxq->rxq_lock);
10945 
10946 	if (rxq->rxq_stopping) {
10947 		mutex_exit(rxq->rxq_lock);
10948 		return 1;
10949 	}
10950 
10951 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
10952 	if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
10953 		DPRINTF(sc, WM_DEBUG_RX,
10954 		    ("%s: RX: got Rx intr %#" __PRIxBIT "\n",
10955 			device_xname(sc->sc_dev),
10956 			icr & (ICR_RXDMT0 | ICR_RXT0)));
10957 		WM_Q_EVCNT_INCR(rxq, intr);
10958 	}
10959 #endif
10960 	if (rxlimit > 0) {
10961 		/*
10962 		 * wm_rxeof() does *not* call upper layer functions directly,
10963 		 * as if_percpuq_enqueue() just call softint_schedule().
10964 		 * So, we can call wm_rxeof() in interrupt context.
10965 		 */
10966 		more = wm_rxeof(rxq, rxlimit);
10967 	} else
10968 		more = true;
10969 
10970 	mutex_exit(rxq->rxq_lock);
10971 
10972 	mutex_enter(sc->sc_core_lock);
10973 
10974 	if (sc->sc_core_stopping) {
10975 		mutex_exit(sc->sc_core_lock);
10976 		return 1;
10977 	}
10978 
10979 	if (icr & (ICR_LSC | ICR_RXSEQ)) {
10980 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
10981 		wm_linkintr(sc, icr);
10982 	}
10983 	if ((icr & ICR_GPI(0)) != 0)
10984 		device_printf(sc->sc_dev, "got module interrupt\n");
10985 
10986 	mutex_exit(sc->sc_core_lock);
10987 
10988 	if (icr & ICR_RXO) {
10989 #if defined(WM_DEBUG)
10990 		log(LOG_WARNING, "%s: Receive overrun\n",
10991 		    device_xname(sc->sc_dev));
10992 #endif /* defined(WM_DEBUG) */
10993 	}
10994 
10995 	rnd_add_uint32(&sc->rnd_source, rndval);
10996 
10997 	if (more) {
10998 		/* Try to get more packets going. */
10999 		wm_legacy_intr_disable(sc);
11000 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
11001 		wm_sched_handle_queue(sc, wmq);
11002 	}
11003 
11004 	return 1;
11005 }
11006 
11007 static inline void
11008 wm_txrxintr_disable(struct wm_queue *wmq)
11009 {
11010 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
11011 
11012 	if (__predict_false(!wm_is_using_msix(sc))) {
11013 		wm_legacy_intr_disable(sc);
11014 		return;
11015 	}
11016 
11017 	if (sc->sc_type == WM_T_82574)
11018 		CSR_WRITE(sc, WMREG_IMC,
11019 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
11020 	else if (sc->sc_type == WM_T_82575)
11021 		CSR_WRITE(sc, WMREG_EIMC,
11022 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
11023 	else
11024 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
11025 }
11026 
11027 static inline void
11028 wm_txrxintr_enable(struct wm_queue *wmq)
11029 {
11030 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
11031 
11032 	wm_itrs_calculate(sc, wmq);
11033 
11034 	if (__predict_false(!wm_is_using_msix(sc))) {
11035 		wm_legacy_intr_enable(sc);
11036 		return;
11037 	}
11038 
11039 	/*
11040 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
11041 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
11042 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
11043 	 * while each wm_handle_queue(wmq) is runnig.
11044 	 */
11045 	if (sc->sc_type == WM_T_82574)
11046 		CSR_WRITE(sc, WMREG_IMS,
11047 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
11048 	else if (sc->sc_type == WM_T_82575)
11049 		CSR_WRITE(sc, WMREG_EIMS,
11050 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
11051 	else
11052 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
11053 }
11054 
11055 static int
11056 wm_txrxintr_msix(void *arg)
11057 {
11058 	struct wm_queue *wmq = arg;
11059 	struct wm_txqueue *txq = &wmq->wmq_txq;
11060 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
11061 	struct wm_softc *sc = txq->txq_sc;
11062 	u_int txlimit = sc->sc_tx_intr_process_limit;
11063 	u_int rxlimit = sc->sc_rx_intr_process_limit;
11064 	bool txmore;
11065 	bool rxmore;
11066 
11067 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
11068 
11069 	DPRINTF(sc, WM_DEBUG_TX,
11070 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
11071 
11072 	wm_txrxintr_disable(wmq);
11073 
11074 	mutex_enter(txq->txq_lock);
11075 
11076 	if (txq->txq_stopping) {
11077 		mutex_exit(txq->txq_lock);
11078 		return 1;
11079 	}
11080 
11081 	WM_Q_EVCNT_INCR(txq, txdw);
11082 	if (txlimit > 0) {
11083 		txmore = wm_txeof(txq, txlimit);
11084 		/* wm_deferred start() is done in wm_handle_queue(). */
11085 	} else
11086 		txmore = true;
11087 	mutex_exit(txq->txq_lock);
11088 
11089 	DPRINTF(sc, WM_DEBUG_RX,
11090 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
11091 	mutex_enter(rxq->rxq_lock);
11092 
11093 	if (rxq->rxq_stopping) {
11094 		mutex_exit(rxq->rxq_lock);
11095 		return 1;
11096 	}
11097 
11098 	WM_Q_EVCNT_INCR(rxq, intr);
11099 	if (rxlimit > 0) {
11100 		rxmore = wm_rxeof(rxq, rxlimit);
11101 	} else
11102 		rxmore = true;
11103 	mutex_exit(rxq->rxq_lock);
11104 
11105 	wm_itrs_writereg(sc, wmq);
11106 
11107 	if (txmore || rxmore) {
11108 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
11109 		wm_sched_handle_queue(sc, wmq);
11110 	} else
11111 		wm_txrxintr_enable(wmq);
11112 
11113 	return 1;
11114 }
11115 
11116 static void
11117 wm_handle_queue(void *arg)
11118 {
11119 	struct wm_queue *wmq = arg;
11120 	struct wm_txqueue *txq = &wmq->wmq_txq;
11121 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
11122 	struct wm_softc *sc = txq->txq_sc;
11123 	u_int txlimit = sc->sc_tx_process_limit;
11124 	u_int rxlimit = sc->sc_rx_process_limit;
11125 	bool txmore;
11126 	bool rxmore;
11127 
11128 	mutex_enter(txq->txq_lock);
11129 	if (txq->txq_stopping) {
11130 		mutex_exit(txq->txq_lock);
11131 		return;
11132 	}
11133 	txmore = wm_txeof(txq, txlimit);
11134 	wm_deferred_start_locked(txq);
11135 	mutex_exit(txq->txq_lock);
11136 
11137 	mutex_enter(rxq->rxq_lock);
11138 	if (rxq->rxq_stopping) {
11139 		mutex_exit(rxq->rxq_lock);
11140 		return;
11141 	}
11142 	WM_Q_EVCNT_INCR(rxq, defer);
11143 	rxmore = wm_rxeof(rxq, rxlimit);
11144 	mutex_exit(rxq->rxq_lock);
11145 
11146 	if (txmore || rxmore) {
11147 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
11148 		wm_sched_handle_queue(sc, wmq);
11149 	} else
11150 		wm_txrxintr_enable(wmq);
11151 }
11152 
11153 static void
11154 wm_handle_queue_work(struct work *wk, void *context)
11155 {
11156 	struct wm_queue *wmq = container_of(wk, struct wm_queue, wmq_cookie);
11157 
11158 	/*
11159 	 * Some qemu environment workaround.  They don't stop interrupt
11160 	 * immediately.
11161 	 */
11162 	wmq->wmq_wq_enqueued = false;
11163 	wm_handle_queue(wmq);
11164 }
11165 
11166 /*
11167  * wm_linkintr_msix:
11168  *
11169  *	Interrupt service routine for link status change for MSI-X.
11170  */
11171 static int
11172 wm_linkintr_msix(void *arg)
11173 {
11174 	struct wm_softc *sc = arg;
11175 	uint32_t reg;
11176 	bool has_rxo;
11177 
11178 	reg = CSR_READ(sc, WMREG_ICR);
11179 	mutex_enter(sc->sc_core_lock);
11180 	DPRINTF(sc, WM_DEBUG_LINK,
11181 	    ("%s: LINK: got link intr. ICR = %08x\n",
11182 		device_xname(sc->sc_dev), reg));
11183 
11184 	if (sc->sc_core_stopping)
11185 		goto out;
11186 
11187 	if ((reg & ICR_LSC) != 0) {
11188 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
11189 		wm_linkintr(sc, ICR_LSC);
11190 	}
11191 	if ((reg & ICR_GPI(0)) != 0)
11192 		device_printf(sc->sc_dev, "got module interrupt\n");
11193 
11194 	/*
11195 	 * XXX 82574 MSI-X mode workaround
11196 	 *
11197 	 * 82574 MSI-X mode causes a receive overrun(RXO) interrupt as an
11198 	 * ICR_OTHER MSI-X vector; furthermore it causes neither ICR_RXQ(0)
11199 	 * nor ICR_RXQ(1) vectors. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
11200 	 * interrupts by writing WMREG_ICS to process receive packets.
11201 	 */
11202 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
11203 #if defined(WM_DEBUG)
11204 		log(LOG_WARNING, "%s: Receive overrun\n",
11205 		    device_xname(sc->sc_dev));
11206 #endif /* defined(WM_DEBUG) */
11207 
11208 		has_rxo = true;
11209 		/*
11210 		 * The RXO interrupt is very high rate when receive traffic is
11211 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
11212 		 * interrupts. ICR_OTHER will be enabled at the end of
11213 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
11214 		 * ICR_RXQ(1) interrupts.
11215 		 */
11216 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
11217 
11218 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
11219 	}
11220 
11221 
11222 
11223 out:
11224 	mutex_exit(sc->sc_core_lock);
11225 
11226 	if (sc->sc_type == WM_T_82574) {
11227 		if (!has_rxo)
11228 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
11229 		else
11230 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
11231 	} else if (sc->sc_type == WM_T_82575)
11232 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
11233 	else
11234 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
11235 
11236 	return 1;
11237 }
11238 
11239 /*
11240  * Media related.
11241  * GMII, SGMII, TBI (and SERDES)
11242  */
11243 
11244 /* Common */
11245 
11246 /*
11247  * wm_tbi_serdes_set_linkled:
11248  *
11249  *	Update the link LED on TBI and SERDES devices.
11250  */
11251 static void
11252 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
11253 {
11254 
11255 	if (sc->sc_tbi_linkup)
11256 		sc->sc_ctrl |= CTRL_SWDPIN(0);
11257 	else
11258 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
11259 
11260 	/* 82540 or newer devices are active low */
11261 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
11262 
11263 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11264 }
11265 
11266 /* GMII related */
11267 
11268 /*
11269  * wm_gmii_reset:
11270  *
11271  *	Reset the PHY.
11272  */
11273 static void
11274 wm_gmii_reset(struct wm_softc *sc)
11275 {
11276 	uint32_t reg;
11277 	int rv;
11278 
11279 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
11280 		device_xname(sc->sc_dev), __func__));
11281 
11282 	rv = sc->phy.acquire(sc);
11283 	if (rv != 0) {
11284 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
11285 		    __func__);
11286 		return;
11287 	}
11288 
11289 	switch (sc->sc_type) {
11290 	case WM_T_82542_2_0:
11291 	case WM_T_82542_2_1:
11292 		/* null */
11293 		break;
11294 	case WM_T_82543:
11295 		/*
11296 		 * With 82543, we need to force speed and duplex on the MAC
11297 		 * equal to what the PHY speed and duplex configuration is.
11298 		 * In addition, we need to perform a hardware reset on the PHY
11299 		 * to take it out of reset.
11300 		 */
11301 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
11302 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11303 
11304 		/* The PHY reset pin is active-low. */
11305 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
11306 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
11307 		    CTRL_EXT_SWDPIN(4));
11308 		reg |= CTRL_EXT_SWDPIO(4);
11309 
11310 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
11311 		CSR_WRITE_FLUSH(sc);
11312 		delay(10*1000);
11313 
11314 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
11315 		CSR_WRITE_FLUSH(sc);
11316 		delay(150);
11317 #if 0
11318 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
11319 #endif
11320 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
11321 		break;
11322 	case WM_T_82544:	/* Reset 10000us */
11323 	case WM_T_82540:
11324 	case WM_T_82545:
11325 	case WM_T_82545_3:
11326 	case WM_T_82546:
11327 	case WM_T_82546_3:
11328 	case WM_T_82541:
11329 	case WM_T_82541_2:
11330 	case WM_T_82547:
11331 	case WM_T_82547_2:
11332 	case WM_T_82571:	/* Reset 100us */
11333 	case WM_T_82572:
11334 	case WM_T_82573:
11335 	case WM_T_82574:
11336 	case WM_T_82575:
11337 	case WM_T_82576:
11338 	case WM_T_82580:
11339 	case WM_T_I350:
11340 	case WM_T_I354:
11341 	case WM_T_I210:
11342 	case WM_T_I211:
11343 	case WM_T_82583:
11344 	case WM_T_80003:
11345 		/* Generic reset */
11346 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
11347 		CSR_WRITE_FLUSH(sc);
11348 		delay(20000);
11349 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11350 		CSR_WRITE_FLUSH(sc);
11351 		delay(20000);
11352 
11353 		if ((sc->sc_type == WM_T_82541)
11354 		    || (sc->sc_type == WM_T_82541_2)
11355 		    || (sc->sc_type == WM_T_82547)
11356 		    || (sc->sc_type == WM_T_82547_2)) {
11357 			/* Workaround for igp are done in igp_reset() */
11358 			/* XXX add code to set LED after phy reset */
11359 		}
11360 		break;
11361 	case WM_T_ICH8:
11362 	case WM_T_ICH9:
11363 	case WM_T_ICH10:
11364 	case WM_T_PCH:
11365 	case WM_T_PCH2:
11366 	case WM_T_PCH_LPT:
11367 	case WM_T_PCH_SPT:
11368 	case WM_T_PCH_CNP:
11369 	case WM_T_PCH_TGP:
11370 		/* Generic reset */
11371 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
11372 		CSR_WRITE_FLUSH(sc);
11373 		delay(100);
11374 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11375 		CSR_WRITE_FLUSH(sc);
11376 		delay(150);
11377 		break;
11378 	default:
11379 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
11380 		    __func__);
11381 		break;
11382 	}
11383 
11384 	sc->phy.release(sc);
11385 
11386 	/* get_cfg_done */
11387 	wm_get_cfg_done(sc);
11388 
11389 	/* Extra setup */
11390 	switch (sc->sc_type) {
11391 	case WM_T_82542_2_0:
11392 	case WM_T_82542_2_1:
11393 	case WM_T_82543:
11394 	case WM_T_82544:
11395 	case WM_T_82540:
11396 	case WM_T_82545:
11397 	case WM_T_82545_3:
11398 	case WM_T_82546:
11399 	case WM_T_82546_3:
11400 	case WM_T_82541_2:
11401 	case WM_T_82547_2:
11402 	case WM_T_82571:
11403 	case WM_T_82572:
11404 	case WM_T_82573:
11405 	case WM_T_82574:
11406 	case WM_T_82583:
11407 	case WM_T_82575:
11408 	case WM_T_82576:
11409 	case WM_T_82580:
11410 	case WM_T_I350:
11411 	case WM_T_I354:
11412 	case WM_T_I210:
11413 	case WM_T_I211:
11414 	case WM_T_80003:
11415 		/* Null */
11416 		break;
11417 	case WM_T_82541:
11418 	case WM_T_82547:
11419 		/* XXX Configure actively LED after PHY reset */
11420 		break;
11421 	case WM_T_ICH8:
11422 	case WM_T_ICH9:
11423 	case WM_T_ICH10:
11424 	case WM_T_PCH:
11425 	case WM_T_PCH2:
11426 	case WM_T_PCH_LPT:
11427 	case WM_T_PCH_SPT:
11428 	case WM_T_PCH_CNP:
11429 	case WM_T_PCH_TGP:
11430 		wm_phy_post_reset(sc);
11431 		break;
11432 	default:
11433 		panic("%s: unknown type\n", __func__);
11434 		break;
11435 	}
11436 }
11437 
11438 /*
11439  * Set up sc_phytype and mii_{read|write}reg.
11440  *
11441  *  To identify PHY type, correct read/write function should be selected.
11442  * To select correct read/write function, PCI ID or MAC type are required
11443  * without accessing PHY registers.
11444  *
11445  *  On the first call of this function, PHY ID is not known yet. Check
11446  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
11447  * result might be incorrect.
11448  *
11449  *  In the second call, PHY OUI and model is used to identify PHY type.
11450  * It might not be perfect because of the lack of compared entry, but it
11451  * would be better than the first call.
11452  *
11453  *  If the detected new result and previous assumption is different,
11454  * a diagnostic message will be printed.
11455  */
11456 static void
11457 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
11458     uint16_t phy_model)
11459 {
11460 	device_t dev = sc->sc_dev;
11461 	struct mii_data *mii = &sc->sc_mii;
11462 	uint16_t new_phytype = WMPHY_UNKNOWN;
11463 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
11464 	mii_readreg_t new_readreg;
11465 	mii_writereg_t new_writereg;
11466 	bool dodiag = true;
11467 
11468 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
11469 		device_xname(sc->sc_dev), __func__));
11470 
11471 	/*
11472 	 * 1000BASE-T SFP uses SGMII and the first asumed PHY type is always
11473 	 * incorrect. So don't print diag output when it's 2nd call.
11474 	 */
11475 	if ((sc->sc_sfptype != 0) && (phy_oui == 0) && (phy_model == 0))
11476 		dodiag = false;
11477 
11478 	if (mii->mii_readreg == NULL) {
11479 		/*
11480 		 *  This is the first call of this function. For ICH and PCH
11481 		 * variants, it's difficult to determine the PHY access method
11482 		 * by sc_type, so use the PCI product ID for some devices.
11483 		 */
11484 
11485 		switch (sc->sc_pcidevid) {
11486 		case PCI_PRODUCT_INTEL_PCH_M_LM:
11487 		case PCI_PRODUCT_INTEL_PCH_M_LC:
11488 			/* 82577 */
11489 			new_phytype = WMPHY_82577;
11490 			break;
11491 		case PCI_PRODUCT_INTEL_PCH_D_DM:
11492 		case PCI_PRODUCT_INTEL_PCH_D_DC:
11493 			/* 82578 */
11494 			new_phytype = WMPHY_82578;
11495 			break;
11496 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
11497 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
11498 			/* 82579 */
11499 			new_phytype = WMPHY_82579;
11500 			break;
11501 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
11502 		case PCI_PRODUCT_INTEL_82801I_BM:
11503 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
11504 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
11505 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
11506 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
11507 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
11508 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
11509 			/* ICH8, 9, 10 with 82567 */
11510 			new_phytype = WMPHY_BM;
11511 			break;
11512 		default:
11513 			break;
11514 		}
11515 	} else {
11516 		/* It's not the first call. Use PHY OUI and model */
11517 		switch (phy_oui) {
11518 		case MII_OUI_ATTANSIC: /* atphy(4) */
11519 			switch (phy_model) {
11520 			case MII_MODEL_ATTANSIC_AR8021:
11521 				new_phytype = WMPHY_82578;
11522 				break;
11523 			default:
11524 				break;
11525 			}
11526 			break;
11527 		case MII_OUI_xxMARVELL:
11528 			switch (phy_model) {
11529 			case MII_MODEL_xxMARVELL_I210:
11530 				new_phytype = WMPHY_I210;
11531 				break;
11532 			case MII_MODEL_xxMARVELL_E1011:
11533 			case MII_MODEL_xxMARVELL_E1000_3:
11534 			case MII_MODEL_xxMARVELL_E1000_5:
11535 			case MII_MODEL_xxMARVELL_E1112:
11536 				new_phytype = WMPHY_M88;
11537 				break;
11538 			case MII_MODEL_xxMARVELL_E1149:
11539 				new_phytype = WMPHY_BM;
11540 				break;
11541 			case MII_MODEL_xxMARVELL_E1111:
11542 			case MII_MODEL_xxMARVELL_I347:
11543 			case MII_MODEL_xxMARVELL_E1512:
11544 			case MII_MODEL_xxMARVELL_E1340M:
11545 			case MII_MODEL_xxMARVELL_E1543:
11546 				new_phytype = WMPHY_M88;
11547 				break;
11548 			case MII_MODEL_xxMARVELL_I82563:
11549 				new_phytype = WMPHY_GG82563;
11550 				break;
11551 			default:
11552 				break;
11553 			}
11554 			break;
11555 		case MII_OUI_INTEL:
11556 			switch (phy_model) {
11557 			case MII_MODEL_INTEL_I82577:
11558 				new_phytype = WMPHY_82577;
11559 				break;
11560 			case MII_MODEL_INTEL_I82579:
11561 				new_phytype = WMPHY_82579;
11562 				break;
11563 			case MII_MODEL_INTEL_I217:
11564 				new_phytype = WMPHY_I217;
11565 				break;
11566 			case MII_MODEL_INTEL_I82580:
11567 				new_phytype = WMPHY_82580;
11568 				break;
11569 			case MII_MODEL_INTEL_I350:
11570 				new_phytype = WMPHY_I350;
11571 				break;
11572 			default:
11573 				break;
11574 			}
11575 			break;
11576 		case MII_OUI_yyINTEL:
11577 			switch (phy_model) {
11578 			case MII_MODEL_yyINTEL_I82562G:
11579 			case MII_MODEL_yyINTEL_I82562EM:
11580 			case MII_MODEL_yyINTEL_I82562ET:
11581 				new_phytype = WMPHY_IFE;
11582 				break;
11583 			case MII_MODEL_yyINTEL_IGP01E1000:
11584 				new_phytype = WMPHY_IGP;
11585 				break;
11586 			case MII_MODEL_yyINTEL_I82566:
11587 				new_phytype = WMPHY_IGP_3;
11588 				break;
11589 			default:
11590 				break;
11591 			}
11592 			break;
11593 		default:
11594 			break;
11595 		}
11596 
11597 		if (dodiag) {
11598 			if (new_phytype == WMPHY_UNKNOWN)
11599 				aprint_verbose_dev(dev,
11600 				    "%s: Unknown PHY model. OUI=%06x, "
11601 				    "model=%04x\n", __func__, phy_oui,
11602 				    phy_model);
11603 
11604 			if ((sc->sc_phytype != WMPHY_UNKNOWN)
11605 			    && (sc->sc_phytype != new_phytype)) {
11606 				aprint_error_dev(dev, "Previously assumed PHY "
11607 				    "type(%u) was incorrect. PHY type from PHY"
11608 				    "ID = %u\n", sc->sc_phytype, new_phytype);
11609 			}
11610 		}
11611 	}
11612 
11613 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
11614 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
11615 		/* SGMII */
11616 		new_readreg = wm_sgmii_readreg;
11617 		new_writereg = wm_sgmii_writereg;
11618 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
11619 		/* BM2 (phyaddr == 1) */
11620 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
11621 		    && (new_phytype != WMPHY_BM)
11622 		    && (new_phytype != WMPHY_UNKNOWN))
11623 			doubt_phytype = new_phytype;
11624 		new_phytype = WMPHY_BM;
11625 		new_readreg = wm_gmii_bm_readreg;
11626 		new_writereg = wm_gmii_bm_writereg;
11627 	} else if (sc->sc_type >= WM_T_PCH) {
11628 		/* All PCH* use _hv_ */
11629 		new_readreg = wm_gmii_hv_readreg;
11630 		new_writereg = wm_gmii_hv_writereg;
11631 	} else if (sc->sc_type >= WM_T_ICH8) {
11632 		/* non-82567 ICH8, 9 and 10 */
11633 		new_readreg = wm_gmii_i82544_readreg;
11634 		new_writereg = wm_gmii_i82544_writereg;
11635 	} else if (sc->sc_type >= WM_T_80003) {
11636 		/* 80003 */
11637 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
11638 		    && (new_phytype != WMPHY_GG82563)
11639 		    && (new_phytype != WMPHY_UNKNOWN))
11640 			doubt_phytype = new_phytype;
11641 		new_phytype = WMPHY_GG82563;
11642 		new_readreg = wm_gmii_i80003_readreg;
11643 		new_writereg = wm_gmii_i80003_writereg;
11644 	} else if (sc->sc_type >= WM_T_I210) {
11645 		/* I210 and I211 */
11646 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
11647 		    && (new_phytype != WMPHY_I210)
11648 		    && (new_phytype != WMPHY_UNKNOWN))
11649 			doubt_phytype = new_phytype;
11650 		new_phytype = WMPHY_I210;
11651 		new_readreg = wm_gmii_gs40g_readreg;
11652 		new_writereg = wm_gmii_gs40g_writereg;
11653 	} else if (sc->sc_type >= WM_T_82580) {
11654 		/* 82580, I350 and I354 */
11655 		new_readreg = wm_gmii_82580_readreg;
11656 		new_writereg = wm_gmii_82580_writereg;
11657 	} else if (sc->sc_type >= WM_T_82544) {
11658 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
11659 		new_readreg = wm_gmii_i82544_readreg;
11660 		new_writereg = wm_gmii_i82544_writereg;
11661 	} else {
11662 		new_readreg = wm_gmii_i82543_readreg;
11663 		new_writereg = wm_gmii_i82543_writereg;
11664 	}
11665 
11666 	if (new_phytype == WMPHY_BM) {
11667 		/* All BM use _bm_ */
11668 		new_readreg = wm_gmii_bm_readreg;
11669 		new_writereg = wm_gmii_bm_writereg;
11670 	}
11671 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_TGP)) {
11672 		/* All PCH* use _hv_ */
11673 		new_readreg = wm_gmii_hv_readreg;
11674 		new_writereg = wm_gmii_hv_writereg;
11675 	}
11676 
11677 	/* Diag output */
11678 	if (dodiag) {
11679 		if (doubt_phytype != WMPHY_UNKNOWN)
11680 			aprint_error_dev(dev, "Assumed new PHY type was "
11681 			    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
11682 			    new_phytype);
11683 		else if ((sc->sc_phytype != WMPHY_UNKNOWN)
11684 		    && (sc->sc_phytype != new_phytype))
11685 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
11686 			    "was incorrect. New PHY type = %u\n",
11687 			    sc->sc_phytype, new_phytype);
11688 
11689 		if ((mii->mii_readreg != NULL) &&
11690 		    (new_phytype == WMPHY_UNKNOWN))
11691 			aprint_error_dev(dev, "PHY type is still unknown.\n");
11692 
11693 		if ((mii->mii_readreg != NULL) &&
11694 		    (mii->mii_readreg != new_readreg))
11695 			aprint_error_dev(dev, "Previously assumed PHY "
11696 			    "read/write function was incorrect.\n");
11697 	}
11698 
11699 	/* Update now */
11700 	sc->sc_phytype = new_phytype;
11701 	mii->mii_readreg = new_readreg;
11702 	mii->mii_writereg = new_writereg;
11703 	if (new_readreg == wm_gmii_hv_readreg) {
11704 		sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
11705 		sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
11706 	} else if (new_readreg == wm_sgmii_readreg) {
11707 		sc->phy.readreg_locked = wm_sgmii_readreg_locked;
11708 		sc->phy.writereg_locked = wm_sgmii_writereg_locked;
11709 	} else if (new_readreg == wm_gmii_i82544_readreg) {
11710 		sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
11711 		sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
11712 	}
11713 }
11714 
11715 /*
11716  * wm_get_phy_id_82575:
11717  *
11718  * Return PHY ID. Return -1 if it failed.
11719  */
11720 static int
11721 wm_get_phy_id_82575(struct wm_softc *sc)
11722 {
11723 	uint32_t reg;
11724 	int phyid = -1;
11725 
11726 	/* XXX */
11727 	if ((sc->sc_flags & WM_F_SGMII) == 0)
11728 		return -1;
11729 
11730 	if (wm_sgmii_uses_mdio(sc)) {
11731 		switch (sc->sc_type) {
11732 		case WM_T_82575:
11733 		case WM_T_82576:
11734 			reg = CSR_READ(sc, WMREG_MDIC);
11735 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
11736 			break;
11737 		case WM_T_82580:
11738 		case WM_T_I350:
11739 		case WM_T_I354:
11740 		case WM_T_I210:
11741 		case WM_T_I211:
11742 			reg = CSR_READ(sc, WMREG_MDICNFG);
11743 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
11744 			break;
11745 		default:
11746 			return -1;
11747 		}
11748 	}
11749 
11750 	return phyid;
11751 }
11752 
11753 /*
11754  * wm_gmii_mediainit:
11755  *
11756  *	Initialize media for use on 1000BASE-T devices.
11757  */
11758 static void
11759 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
11760 {
11761 	device_t dev = sc->sc_dev;
11762 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
11763 	struct mii_data *mii = &sc->sc_mii;
11764 
11765 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
11766 		device_xname(sc->sc_dev), __func__));
11767 
11768 	/* We have GMII. */
11769 	sc->sc_flags |= WM_F_HAS_MII;
11770 
11771 	if (sc->sc_type == WM_T_80003)
11772 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
11773 	else
11774 		sc->sc_tipg = TIPG_1000T_DFLT;
11775 
11776 	/*
11777 	 * Let the chip set speed/duplex on its own based on
11778 	 * signals from the PHY.
11779 	 * XXXbouyer - I'm not sure this is right for the 80003,
11780 	 * the em driver only sets CTRL_SLU here - but it seems to work.
11781 	 */
11782 	sc->sc_ctrl |= CTRL_SLU;
11783 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11784 
11785 	/* Initialize our media structures and probe the GMII. */
11786 	mii->mii_ifp = ifp;
11787 
11788 	mii->mii_statchg = wm_gmii_statchg;
11789 
11790 	/* get PHY control from SMBus to PCIe */
11791 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
11792 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
11793 	    || (sc->sc_type == WM_T_PCH_CNP) || (sc->sc_type == WM_T_PCH_TGP))
11794 		wm_init_phy_workarounds_pchlan(sc);
11795 
11796 	wm_gmii_reset(sc);
11797 
11798 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
11799 	ifmedia_init_with_lock(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
11800 	    wm_gmii_mediastatus, sc->sc_core_lock);
11801 
11802 	/* Setup internal SGMII PHY for SFP */
11803 	wm_sgmii_sfp_preconfig(sc);
11804 
11805 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
11806 	    || (sc->sc_type == WM_T_82580)
11807 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
11808 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
11809 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
11810 			/* Attach only one port */
11811 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
11812 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
11813 		} else {
11814 			int i, id;
11815 			uint32_t ctrl_ext;
11816 
11817 			id = wm_get_phy_id_82575(sc);
11818 			if (id != -1) {
11819 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
11820 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
11821 			}
11822 			if ((id == -1)
11823 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
11824 				/* Power on sgmii phy if it is disabled */
11825 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
11826 				CSR_WRITE(sc, WMREG_CTRL_EXT,
11827 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
11828 				CSR_WRITE_FLUSH(sc);
11829 				delay(300*1000); /* XXX too long */
11830 
11831 				/*
11832 				 * From 1 to 8.
11833 				 *
11834 				 * I2C access fails with I2C register's ERROR
11835 				 * bit set, so prevent error message while
11836 				 * scanning.
11837 				 */
11838 				sc->phy.no_errprint = true;
11839 				for (i = 1; i < 8; i++)
11840 					mii_attach(sc->sc_dev, &sc->sc_mii,
11841 					    0xffffffff, i, MII_OFFSET_ANY,
11842 					    MIIF_DOPAUSE);
11843 				sc->phy.no_errprint = false;
11844 
11845 				/* Restore previous sfp cage power state */
11846 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
11847 			}
11848 		}
11849 	} else
11850 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
11851 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
11852 
11853 	/*
11854 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
11855 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
11856 	 */
11857 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
11858 		(sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)
11859 		|| (sc->sc_type == WM_T_PCH_TGP))
11860 	    && (LIST_FIRST(&mii->mii_phys) == NULL)) {
11861 		wm_set_mdio_slow_mode_hv(sc);
11862 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
11863 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
11864 	}
11865 
11866 	/*
11867 	 * (For ICH8 variants)
11868 	 * If PHY detection failed, use BM's r/w function and retry.
11869 	 */
11870 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
11871 		/* if failed, retry with *_bm_* */
11872 		aprint_verbose_dev(dev, "Assumed PHY access function "
11873 		    "(type = %d) might be incorrect. Use BM and retry.\n",
11874 		    sc->sc_phytype);
11875 		sc->sc_phytype = WMPHY_BM;
11876 		mii->mii_readreg = wm_gmii_bm_readreg;
11877 		mii->mii_writereg = wm_gmii_bm_writereg;
11878 
11879 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
11880 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
11881 	}
11882 
11883 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
11884 		/* Any PHY wasn't found */
11885 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
11886 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
11887 		sc->sc_phytype = WMPHY_NONE;
11888 	} else {
11889 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
11890 
11891 		/*
11892 		 * PHY found! Check PHY type again by the second call of
11893 		 * wm_gmii_setup_phytype.
11894 		 */
11895 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
11896 		    child->mii_mpd_model);
11897 
11898 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
11899 	}
11900 }
11901 
11902 /*
11903  * wm_gmii_mediachange:	[ifmedia interface function]
11904  *
11905  *	Set hardware to newly-selected media on a 1000BASE-T device.
11906  */
11907 static int
11908 wm_gmii_mediachange(struct ifnet *ifp)
11909 {
11910 	struct wm_softc *sc = ifp->if_softc;
11911 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
11912 	uint32_t reg;
11913 	int rc;
11914 
11915 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
11916 		device_xname(sc->sc_dev), __func__));
11917 
11918 	KASSERT(mutex_owned(sc->sc_core_lock));
11919 
11920 	if ((sc->sc_if_flags & IFF_UP) == 0)
11921 		return 0;
11922 
11923 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
11924 	if ((sc->sc_type == WM_T_82580)
11925 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
11926 	    || (sc->sc_type == WM_T_I211)) {
11927 		reg = CSR_READ(sc, WMREG_PHPM);
11928 		reg &= ~PHPM_GO_LINK_D;
11929 		CSR_WRITE(sc, WMREG_PHPM, reg);
11930 	}
11931 
11932 	/* Disable D0 LPLU. */
11933 	wm_lplu_d0_disable(sc);
11934 
11935 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
11936 	sc->sc_ctrl |= CTRL_SLU;
11937 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
11938 	    || (sc->sc_type > WM_T_82543)) {
11939 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
11940 	} else {
11941 		sc->sc_ctrl &= ~CTRL_ASDE;
11942 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
11943 		if (ife->ifm_media & IFM_FDX)
11944 			sc->sc_ctrl |= CTRL_FD;
11945 		switch (IFM_SUBTYPE(ife->ifm_media)) {
11946 		case IFM_10_T:
11947 			sc->sc_ctrl |= CTRL_SPEED_10;
11948 			break;
11949 		case IFM_100_TX:
11950 			sc->sc_ctrl |= CTRL_SPEED_100;
11951 			break;
11952 		case IFM_1000_T:
11953 			sc->sc_ctrl |= CTRL_SPEED_1000;
11954 			break;
11955 		case IFM_NONE:
11956 			/* There is no specific setting for IFM_NONE */
11957 			break;
11958 		default:
11959 			panic("wm_gmii_mediachange: bad media 0x%x",
11960 			    ife->ifm_media);
11961 		}
11962 	}
11963 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11964 	CSR_WRITE_FLUSH(sc);
11965 
11966 	if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
11967 		wm_serdes_mediachange(ifp);
11968 
11969 	if (sc->sc_type <= WM_T_82543)
11970 		wm_gmii_reset(sc);
11971 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
11972 	    && ((sc->sc_flags & WM_F_SGMII) != 0)) {
11973 		/* allow time for SFP cage time to power up phy */
11974 		delay(300 * 1000);
11975 		wm_gmii_reset(sc);
11976 	}
11977 
11978 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
11979 		return 0;
11980 	return rc;
11981 }
11982 
11983 /*
11984  * wm_gmii_mediastatus:	[ifmedia interface function]
11985  *
11986  *	Get the current interface media status on a 1000BASE-T device.
11987  */
11988 static void
11989 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
11990 {
11991 	struct wm_softc *sc = ifp->if_softc;
11992 	struct ethercom *ec = &sc->sc_ethercom;
11993 	struct mii_data *mii;
11994 	bool dopoll = true;
11995 
11996 	/*
11997 	 * In normal drivers, ether_mediastatus() is called here.
11998 	 * To avoid calling mii_pollstat(), ether_mediastatus() is open coded.
11999 	 */
12000 	KASSERT(mutex_owned(sc->sc_core_lock));
12001 	KASSERT(ec->ec_mii != NULL);
12002 	KASSERT(mii_locked(ec->ec_mii));
12003 
12004 	mii = ec->ec_mii;
12005 	if ((sc->sc_flags & WM_F_DELAY_LINKUP) != 0) {
12006 		struct timeval now;
12007 
12008 		getmicrotime(&now);
12009 		if (timercmp(&now, &sc->sc_linkup_delay_time, <))
12010 			dopoll = false;
12011 		else if (sc->sc_linkup_delay_time.tv_sec != 0) {
12012 			/* Simplify by checking tv_sec only. It's enough. */
12013 
12014 			sc->sc_linkup_delay_time.tv_sec = 0;
12015 			sc->sc_linkup_delay_time.tv_usec = 0;
12016 		}
12017 	}
12018 
12019 	/*
12020 	 * Don't call mii_pollstat() while doing workaround.
12021 	 * See also wm_linkintr_gmii() and wm_tick().
12022 	 */
12023 	if (dopoll)
12024 		mii_pollstat(mii);
12025 	ifmr->ifm_active = mii->mii_media_active;
12026 	ifmr->ifm_status = mii->mii_media_status;
12027 
12028 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
12029 	    | sc->sc_flowflags;
12030 }
12031 
12032 #define	MDI_IO		CTRL_SWDPIN(2)
12033 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
12034 #define	MDI_CLK		CTRL_SWDPIN(3)
12035 
12036 static void
12037 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
12038 {
12039 	uint32_t i, v;
12040 
12041 	v = CSR_READ(sc, WMREG_CTRL);
12042 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
12043 	v |= MDI_DIR | CTRL_SWDPIO(3);
12044 
12045 	for (i = __BIT(nbits - 1); i != 0; i >>= 1) {
12046 		if (data & i)
12047 			v |= MDI_IO;
12048 		else
12049 			v &= ~MDI_IO;
12050 		CSR_WRITE(sc, WMREG_CTRL, v);
12051 		CSR_WRITE_FLUSH(sc);
12052 		delay(10);
12053 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
12054 		CSR_WRITE_FLUSH(sc);
12055 		delay(10);
12056 		CSR_WRITE(sc, WMREG_CTRL, v);
12057 		CSR_WRITE_FLUSH(sc);
12058 		delay(10);
12059 	}
12060 }
12061 
12062 static uint16_t
12063 wm_i82543_mii_recvbits(struct wm_softc *sc)
12064 {
12065 	uint32_t v, i;
12066 	uint16_t data = 0;
12067 
12068 	v = CSR_READ(sc, WMREG_CTRL);
12069 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
12070 	v |= CTRL_SWDPIO(3);
12071 
12072 	CSR_WRITE(sc, WMREG_CTRL, v);
12073 	CSR_WRITE_FLUSH(sc);
12074 	delay(10);
12075 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
12076 	CSR_WRITE_FLUSH(sc);
12077 	delay(10);
12078 	CSR_WRITE(sc, WMREG_CTRL, v);
12079 	CSR_WRITE_FLUSH(sc);
12080 	delay(10);
12081 
12082 	for (i = 0; i < 16; i++) {
12083 		data <<= 1;
12084 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
12085 		CSR_WRITE_FLUSH(sc);
12086 		delay(10);
12087 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
12088 			data |= 1;
12089 		CSR_WRITE(sc, WMREG_CTRL, v);
12090 		CSR_WRITE_FLUSH(sc);
12091 		delay(10);
12092 	}
12093 
12094 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
12095 	CSR_WRITE_FLUSH(sc);
12096 	delay(10);
12097 	CSR_WRITE(sc, WMREG_CTRL, v);
12098 	CSR_WRITE_FLUSH(sc);
12099 	delay(10);
12100 
12101 	return data;
12102 }
12103 
12104 #undef MDI_IO
12105 #undef MDI_DIR
12106 #undef MDI_CLK
12107 
12108 /*
12109  * wm_gmii_i82543_readreg:	[mii interface function]
12110  *
12111  *	Read a PHY register on the GMII (i82543 version).
12112  */
12113 static int
12114 wm_gmii_i82543_readreg(device_t dev, int phy, int reg, uint16_t *val)
12115 {
12116 	struct wm_softc *sc = device_private(dev);
12117 
12118 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
12119 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
12120 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
12121 	*val = wm_i82543_mii_recvbits(sc) & 0xffff;
12122 
12123 	DPRINTF(sc, WM_DEBUG_GMII,
12124 	    ("%s: GMII: read phy %d reg %d -> 0x%04hx\n",
12125 		device_xname(dev), phy, reg, *val));
12126 
12127 	return 0;
12128 }
12129 
12130 /*
12131  * wm_gmii_i82543_writereg:	[mii interface function]
12132  *
12133  *	Write a PHY register on the GMII (i82543 version).
12134  */
12135 static int
12136 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, uint16_t val)
12137 {
12138 	struct wm_softc *sc = device_private(dev);
12139 
12140 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
12141 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
12142 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
12143 	    (MII_COMMAND_START << 30), 32);
12144 
12145 	return 0;
12146 }
12147 
12148 /*
12149  * wm_gmii_mdic_readreg:	[mii interface function]
12150  *
12151  *	Read a PHY register on the GMII.
12152  */
12153 static int
12154 wm_gmii_mdic_readreg(device_t dev, int phy, int reg, uint16_t *val)
12155 {
12156 	struct wm_softc *sc = device_private(dev);
12157 	uint32_t mdic = 0;
12158 	int i;
12159 
12160 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
12161 	    && (reg > MII_ADDRMASK)) {
12162 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
12163 		    __func__, sc->sc_phytype, reg);
12164 		reg &= MII_ADDRMASK;
12165 	}
12166 
12167 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
12168 	    MDIC_REGADD(reg));
12169 
12170 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
12171 		delay(50);
12172 		mdic = CSR_READ(sc, WMREG_MDIC);
12173 		if (mdic & MDIC_READY)
12174 			break;
12175 	}
12176 
12177 	if ((mdic & MDIC_READY) == 0) {
12178 		DPRINTF(sc, WM_DEBUG_GMII,
12179 		    ("%s: MDIC read timed out: phy %d reg %d\n",
12180 			device_xname(dev), phy, reg));
12181 		return ETIMEDOUT;
12182 	} else if (mdic & MDIC_E) {
12183 		/* This is normal if no PHY is present. */
12184 		DPRINTF(sc, WM_DEBUG_GMII,
12185 		    ("%s: MDIC read error: phy %d reg %d\n",
12186 			device_xname(sc->sc_dev), phy, reg));
12187 		return -1;
12188 	} else
12189 		*val = MDIC_DATA(mdic);
12190 
12191 	/*
12192 	 * Allow some time after each MDIC transaction to avoid
12193 	 * reading duplicate data in the next MDIC transaction.
12194 	 */
12195 	if (sc->sc_type == WM_T_PCH2)
12196 		delay(100);
12197 
12198 	return 0;
12199 }
12200 
12201 /*
12202  * wm_gmii_mdic_writereg:	[mii interface function]
12203  *
12204  *	Write a PHY register on the GMII.
12205  */
12206 static int
12207 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, uint16_t val)
12208 {
12209 	struct wm_softc *sc = device_private(dev);
12210 	uint32_t mdic = 0;
12211 	int i;
12212 
12213 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
12214 	    && (reg > MII_ADDRMASK)) {
12215 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
12216 		    __func__, sc->sc_phytype, reg);
12217 		reg &= MII_ADDRMASK;
12218 	}
12219 
12220 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
12221 	    MDIC_REGADD(reg) | MDIC_DATA(val));
12222 
12223 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
12224 		delay(50);
12225 		mdic = CSR_READ(sc, WMREG_MDIC);
12226 		if (mdic & MDIC_READY)
12227 			break;
12228 	}
12229 
12230 	if ((mdic & MDIC_READY) == 0) {
12231 		DPRINTF(sc, WM_DEBUG_GMII,
12232 		    ("%s: MDIC write timed out: phy %d reg %d\n",
12233 			device_xname(dev), phy, reg));
12234 		return ETIMEDOUT;
12235 	} else if (mdic & MDIC_E) {
12236 		DPRINTF(sc, WM_DEBUG_GMII,
12237 		    ("%s: MDIC write error: phy %d reg %d\n",
12238 			device_xname(dev), phy, reg));
12239 		return -1;
12240 	}
12241 
12242 	/*
12243 	 * Allow some time after each MDIC transaction to avoid
12244 	 * reading duplicate data in the next MDIC transaction.
12245 	 */
12246 	if (sc->sc_type == WM_T_PCH2)
12247 		delay(100);
12248 
12249 	return 0;
12250 }
12251 
12252 /*
12253  * wm_gmii_i82544_readreg:	[mii interface function]
12254  *
12255  *	Read a PHY register on the GMII.
12256  */
12257 static int
12258 wm_gmii_i82544_readreg(device_t dev, int phy, int reg, uint16_t *val)
12259 {
12260 	struct wm_softc *sc = device_private(dev);
12261 	int rv;
12262 
12263 	rv = sc->phy.acquire(sc);
12264 	if (rv != 0) {
12265 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
12266 		return rv;
12267 	}
12268 
12269 	rv = wm_gmii_i82544_readreg_locked(dev, phy, reg, val);
12270 
12271 	sc->phy.release(sc);
12272 
12273 	return rv;
12274 }
12275 
12276 static int
12277 wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
12278 {
12279 	struct wm_softc *sc = device_private(dev);
12280 	int rv;
12281 
12282 	switch (sc->sc_phytype) {
12283 	case WMPHY_IGP:
12284 	case WMPHY_IGP_2:
12285 	case WMPHY_IGP_3:
12286 		if (reg > BME1000_MAX_MULTI_PAGE_REG) {
12287 			rv = wm_gmii_mdic_writereg(dev, phy,
12288 			    IGPHY_PAGE_SELECT, reg);
12289 			if (rv != 0)
12290 				return rv;
12291 		}
12292 		break;
12293 	default:
12294 #ifdef WM_DEBUG
12295 		if ((reg >> MII_ADDRBITS) != 0)
12296 			device_printf(dev,
12297 			    "%s: PHYTYPE = 0x%x, addr = 0x%02x\n",
12298 			    __func__, sc->sc_phytype, reg);
12299 #endif
12300 		break;
12301 	}
12302 
12303 	return wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
12304 }
12305 
12306 /*
12307  * wm_gmii_i82544_writereg:	[mii interface function]
12308  *
12309  *	Write a PHY register on the GMII.
12310  */
12311 static int
12312 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, uint16_t val)
12313 {
12314 	struct wm_softc *sc = device_private(dev);
12315 	int rv;
12316 
12317 	rv = sc->phy.acquire(sc);
12318 	if (rv != 0) {
12319 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
12320 		return rv;
12321 	}
12322 
12323 	rv = wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val);
12324 	sc->phy.release(sc);
12325 
12326 	return rv;
12327 }
12328 
12329 static int
12330 wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
12331 {
12332 	struct wm_softc *sc = device_private(dev);
12333 	int rv;
12334 
12335 	switch (sc->sc_phytype) {
12336 	case WMPHY_IGP:
12337 	case WMPHY_IGP_2:
12338 	case WMPHY_IGP_3:
12339 		if (reg > BME1000_MAX_MULTI_PAGE_REG) {
12340 			rv = wm_gmii_mdic_writereg(dev, phy,
12341 			    IGPHY_PAGE_SELECT, reg);
12342 			if (rv != 0)
12343 				return rv;
12344 		}
12345 		break;
12346 	default:
12347 #ifdef WM_DEBUG
12348 		if ((reg >> MII_ADDRBITS) != 0)
12349 			device_printf(dev,
12350 			    "%s: PHYTYPE == 0x%x, addr = 0x%02x",
12351 			    __func__, sc->sc_phytype, reg);
12352 #endif
12353 		break;
12354 	}
12355 
12356 	return wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
12357 }
12358 
12359 /*
12360  * wm_gmii_i80003_readreg:	[mii interface function]
12361  *
12362  *	Read a PHY register on the kumeran
12363  * This could be handled by the PHY layer if we didn't have to lock the
12364  * resource ...
12365  */
12366 static int
12367 wm_gmii_i80003_readreg(device_t dev, int phy, int reg, uint16_t *val)
12368 {
12369 	struct wm_softc *sc = device_private(dev);
12370 	int page_select;
12371 	uint16_t temp, temp2;
12372 	int rv;
12373 
12374 	if (phy != 1) /* Only one PHY on kumeran bus */
12375 		return -1;
12376 
12377 	rv = sc->phy.acquire(sc);
12378 	if (rv != 0) {
12379 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
12380 		return rv;
12381 	}
12382 
12383 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
12384 		page_select = GG82563_PHY_PAGE_SELECT;
12385 	else {
12386 		/*
12387 		 * Use Alternative Page Select register to access registers
12388 		 * 30 and 31.
12389 		 */
12390 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
12391 	}
12392 	temp = reg >> GG82563_PAGE_SHIFT;
12393 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
12394 		goto out;
12395 
12396 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
12397 		/*
12398 		 * Wait more 200us for a bug of the ready bit in the MDIC
12399 		 * register.
12400 		 */
12401 		delay(200);
12402 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
12403 		if ((rv != 0) || (temp2 != temp)) {
12404 			device_printf(dev, "%s failed\n", __func__);
12405 			rv = -1;
12406 			goto out;
12407 		}
12408 		delay(200);
12409 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
12410 		delay(200);
12411 	} else
12412 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
12413 
12414 out:
12415 	sc->phy.release(sc);
12416 	return rv;
12417 }
12418 
12419 /*
12420  * wm_gmii_i80003_writereg:	[mii interface function]
12421  *
12422  *	Write a PHY register on the kumeran.
12423  * This could be handled by the PHY layer if we didn't have to lock the
12424  * resource ...
12425  */
12426 static int
12427 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, uint16_t val)
12428 {
12429 	struct wm_softc *sc = device_private(dev);
12430 	int page_select, rv;
12431 	uint16_t temp, temp2;
12432 
12433 	if (phy != 1) /* Only one PHY on kumeran bus */
12434 		return -1;
12435 
12436 	rv = sc->phy.acquire(sc);
12437 	if (rv != 0) {
12438 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
12439 		return rv;
12440 	}
12441 
12442 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
12443 		page_select = GG82563_PHY_PAGE_SELECT;
12444 	else {
12445 		/*
12446 		 * Use Alternative Page Select register to access registers
12447 		 * 30 and 31.
12448 		 */
12449 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
12450 	}
12451 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
12452 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
12453 		goto out;
12454 
12455 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
12456 		/*
12457 		 * Wait more 200us for a bug of the ready bit in the MDIC
12458 		 * register.
12459 		 */
12460 		delay(200);
12461 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
12462 		if ((rv != 0) || (temp2 != temp)) {
12463 			device_printf(dev, "%s failed\n", __func__);
12464 			rv = -1;
12465 			goto out;
12466 		}
12467 		delay(200);
12468 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
12469 		delay(200);
12470 	} else
12471 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
12472 
12473 out:
12474 	sc->phy.release(sc);
12475 	return rv;
12476 }
12477 
12478 /*
12479  * wm_gmii_bm_readreg:	[mii interface function]
12480  *
12481  *	Read a PHY register on the kumeran
12482  * This could be handled by the PHY layer if we didn't have to lock the
12483  * resource ...
12484  */
12485 static int
12486 wm_gmii_bm_readreg(device_t dev, int phy, int reg, uint16_t *val)
12487 {
12488 	struct wm_softc *sc = device_private(dev);
12489 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
12490 	int rv;
12491 
12492 	rv = sc->phy.acquire(sc);
12493 	if (rv != 0) {
12494 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
12495 		return rv;
12496 	}
12497 
12498 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
12499 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
12500 		    || (reg == 31)) ? 1 : phy;
12501 	/* Page 800 works differently than the rest so it has its own func */
12502 	if (page == BM_WUC_PAGE) {
12503 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
12504 		goto release;
12505 	}
12506 
12507 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
12508 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
12509 		    && (sc->sc_type != WM_T_82583))
12510 			rv = wm_gmii_mdic_writereg(dev, phy,
12511 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
12512 		else
12513 			rv = wm_gmii_mdic_writereg(dev, phy,
12514 			    BME1000_PHY_PAGE_SELECT, page);
12515 		if (rv != 0)
12516 			goto release;
12517 	}
12518 
12519 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
12520 
12521 release:
12522 	sc->phy.release(sc);
12523 	return rv;
12524 }
12525 
12526 /*
12527  * wm_gmii_bm_writereg:	[mii interface function]
12528  *
12529  *	Write a PHY register on the kumeran.
12530  * This could be handled by the PHY layer if we didn't have to lock the
12531  * resource ...
12532  */
12533 static int
12534 wm_gmii_bm_writereg(device_t dev, int phy, int reg, uint16_t val)
12535 {
12536 	struct wm_softc *sc = device_private(dev);
12537 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
12538 	int rv;
12539 
12540 	rv = sc->phy.acquire(sc);
12541 	if (rv != 0) {
12542 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
12543 		return rv;
12544 	}
12545 
12546 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
12547 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
12548 		    || (reg == 31)) ? 1 : phy;
12549 	/* Page 800 works differently than the rest so it has its own func */
12550 	if (page == BM_WUC_PAGE) {
12551 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, &val, false, false);
12552 		goto release;
12553 	}
12554 
12555 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
12556 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
12557 		    && (sc->sc_type != WM_T_82583))
12558 			rv = wm_gmii_mdic_writereg(dev, phy,
12559 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
12560 		else
12561 			rv = wm_gmii_mdic_writereg(dev, phy,
12562 			    BME1000_PHY_PAGE_SELECT, page);
12563 		if (rv != 0)
12564 			goto release;
12565 	}
12566 
12567 	rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
12568 
12569 release:
12570 	sc->phy.release(sc);
12571 	return rv;
12572 }
12573 
12574 /*
12575  *  wm_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
12576  *  @dev: pointer to the HW structure
12577  *  @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
12578  *
12579  *  Assumes semaphore already acquired and phy_reg points to a valid memory
12580  *  address to store contents of the BM_WUC_ENABLE_REG register.
12581  */
12582 static int
12583 wm_enable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
12584 {
12585 #ifdef WM_DEBUG
12586 	struct wm_softc *sc = device_private(dev);
12587 #endif
12588 	uint16_t temp;
12589 	int rv;
12590 
12591 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
12592 		device_xname(dev), __func__));
12593 
12594 	if (!phy_regp)
12595 		return -1;
12596 
12597 	/* All page select, port ctrl and wakeup registers use phy address 1 */
12598 
12599 	/* Select Port Control Registers page */
12600 	rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
12601 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
12602 	if (rv != 0)
12603 		return rv;
12604 
12605 	/* Read WUCE and save it */
12606 	rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG, phy_regp);
12607 	if (rv != 0)
12608 		return rv;
12609 
12610 	/* Enable both PHY wakeup mode and Wakeup register page writes.
12611 	 * Prevent a power state change by disabling ME and Host PHY wakeup.
12612 	 */
12613 	temp = *phy_regp;
12614 	temp |= BM_WUC_ENABLE_BIT;
12615 	temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
12616 
12617 	if ((rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, temp)) != 0)
12618 		return rv;
12619 
12620 	/* Select Host Wakeup Registers page - caller now able to write
12621 	 * registers on the Wakeup registers page
12622 	 */
12623 	return wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
12624 	    BM_WUC_PAGE << IGP3_PAGE_SHIFT);
12625 }
12626 
12627 /*
12628  *  wm_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
12629  *  @dev: pointer to the HW structure
12630  *  @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
12631  *
12632  *  Restore BM_WUC_ENABLE_REG to its original value.
12633  *
12634  *  Assumes semaphore already acquired and *phy_reg is the contents of the
12635  *  BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
12636  *  caller.
12637  */
12638 static int
12639 wm_disable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
12640 {
12641 #ifdef WM_DEBUG
12642 	struct wm_softc *sc = device_private(dev);
12643 #endif
12644 
12645 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
12646 		device_xname(dev), __func__));
12647 
12648 	if (!phy_regp)
12649 		return -1;
12650 
12651 	/* Select Port Control Registers page */
12652 	wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
12653 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
12654 
12655 	/* Restore 769.17 to its original value */
12656 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, *phy_regp);
12657 
12658 	return 0;
12659 }
12660 
12661 /*
12662  *  wm_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
12663  *  @sc: pointer to the HW structure
12664  *  @offset: register offset to be read or written
12665  *  @val: pointer to the data to read or write
12666  *  @rd: determines if operation is read or write
12667  *  @page_set: BM_WUC_PAGE already set and access enabled
12668  *
12669  *  Read the PHY register at offset and store the retrieved information in
12670  *  data, or write data to PHY register at offset.  Note the procedure to
12671  *  access the PHY wakeup registers is different than reading the other PHY
12672  *  registers. It works as such:
12673  *  1) Set 769.17.2 (page 769, register 17, bit 2) = 1
12674  *  2) Set page to 800 for host (801 if we were manageability)
12675  *  3) Write the address using the address opcode (0x11)
12676  *  4) Read or write the data using the data opcode (0x12)
12677  *  5) Restore 769.17.2 to its original value
12678  *
12679  *  Steps 1 and 2 are done by wm_enable_phy_wakeup_reg_access_bm() and
12680  *  step 5 is done by wm_disable_phy_wakeup_reg_access_bm().
12681  *
12682  *  Assumes semaphore is already acquired.  When page_set==TRUE, assumes
12683  *  the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
12684  *  is responsible for calls to wm_[enable|disable]_phy_wakeup_reg_bm()).
12685  */
12686 static int
12687 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd,
12688     bool page_set)
12689 {
12690 	struct wm_softc *sc = device_private(dev);
12691 	uint16_t regnum = BM_PHY_REG_NUM(offset);
12692 	uint16_t page = BM_PHY_REG_PAGE(offset);
12693 	uint16_t wuce;
12694 	int rv = 0;
12695 
12696 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
12697 		device_xname(dev), __func__));
12698 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
12699 	if ((sc->sc_type == WM_T_PCH)
12700 	    && ((CSR_READ(sc, WMREG_PHY_CTRL) & PHY_CTRL_GBE_DIS) == 0)) {
12701 		device_printf(dev,
12702 		    "Attempting to access page %d while gig enabled.\n", page);
12703 	}
12704 
12705 	if (!page_set) {
12706 		/* Enable access to PHY wakeup registers */
12707 		rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
12708 		if (rv != 0) {
12709 			device_printf(dev,
12710 			    "%s: Could not enable PHY wakeup reg access\n",
12711 			    __func__);
12712 			return rv;
12713 		}
12714 	}
12715 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s: Accessing PHY page %d reg 0x%x\n",
12716 		device_xname(sc->sc_dev), __func__, page, regnum));
12717 
12718 	/*
12719 	 * 2) Access PHY wakeup register.
12720 	 * See wm_access_phy_wakeup_reg_bm.
12721 	 */
12722 
12723 	/* Write the Wakeup register page offset value using opcode 0x11 */
12724 	rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
12725 	if (rv != 0)
12726 		return rv;
12727 
12728 	if (rd) {
12729 		/* Read the Wakeup register page value using opcode 0x12 */
12730 		rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE, val);
12731 	} else {
12732 		/* Write the Wakeup register page value using opcode 0x12 */
12733 		rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
12734 	}
12735 	if (rv != 0)
12736 		return rv;
12737 
12738 	if (!page_set)
12739 		rv = wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
12740 
12741 	return rv;
12742 }
12743 
12744 /*
12745  * wm_gmii_hv_readreg:	[mii interface function]
12746  *
12747  *	Read a PHY register on the kumeran
12748  * This could be handled by the PHY layer if we didn't have to lock the
12749  * resource ...
12750  */
12751 static int
12752 wm_gmii_hv_readreg(device_t dev, int phy, int reg, uint16_t *val)
12753 {
12754 	struct wm_softc *sc = device_private(dev);
12755 	int rv;
12756 
12757 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
12758 		device_xname(dev), __func__));
12759 
12760 	rv = sc->phy.acquire(sc);
12761 	if (rv != 0) {
12762 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
12763 		return rv;
12764 	}
12765 
12766 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg, val);
12767 	sc->phy.release(sc);
12768 	return rv;
12769 }
12770 
12771 static int
12772 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
12773 {
12774 	uint16_t page = BM_PHY_REG_PAGE(reg);
12775 	uint16_t regnum = BM_PHY_REG_NUM(reg);
12776 	int rv;
12777 
12778 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
12779 
12780 	/* Page 800 works differently than the rest so it has its own func */
12781 	if (page == BM_WUC_PAGE)
12782 		return wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
12783 
12784 	/*
12785 	 * Lower than page 768 works differently than the rest so it has its
12786 	 * own func
12787 	 */
12788 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
12789 		device_printf(dev, "gmii_hv_readreg!!!\n");
12790 		return -1;
12791 	}
12792 
12793 	/*
12794 	 * XXX I21[789] documents say that the SMBus Address register is at
12795 	 * PHY address 01, Page 0 (not 768), Register 26.
12796 	 */
12797 	if (page == HV_INTC_FC_PAGE_START)
12798 		page = 0;
12799 
12800 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
12801 		rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
12802 		    page << BME1000_PAGE_SHIFT);
12803 		if (rv != 0)
12804 			return rv;
12805 	}
12806 
12807 	return wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK, val);
12808 }
12809 
12810 /*
12811  * wm_gmii_hv_writereg:	[mii interface function]
12812  *
12813  *	Write a PHY register on the kumeran.
12814  * This could be handled by the PHY layer if we didn't have to lock the
12815  * resource ...
12816  */
12817 static int
12818 wm_gmii_hv_writereg(device_t dev, int phy, int reg, uint16_t val)
12819 {
12820 	struct wm_softc *sc = device_private(dev);
12821 	int rv;
12822 
12823 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
12824 		device_xname(dev), __func__));
12825 
12826 	rv = sc->phy.acquire(sc);
12827 	if (rv != 0) {
12828 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
12829 		return rv;
12830 	}
12831 
12832 	rv = wm_gmii_hv_writereg_locked(dev, phy, reg, val);
12833 	sc->phy.release(sc);
12834 
12835 	return rv;
12836 }
12837 
12838 static int
12839 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
12840 {
12841 	struct wm_softc *sc = device_private(dev);
12842 	uint16_t page = BM_PHY_REG_PAGE(reg);
12843 	uint16_t regnum = BM_PHY_REG_NUM(reg);
12844 	int rv;
12845 
12846 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
12847 
12848 	/* Page 800 works differently than the rest so it has its own func */
12849 	if (page == BM_WUC_PAGE)
12850 		return wm_access_phy_wakeup_reg_bm(dev, reg, &val, false,
12851 		    false);
12852 
12853 	/*
12854 	 * Lower than page 768 works differently than the rest so it has its
12855 	 * own func
12856 	 */
12857 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
12858 		device_printf(dev, "gmii_hv_writereg!!!\n");
12859 		return -1;
12860 	}
12861 
12862 	{
12863 		/*
12864 		 * XXX I21[789] documents say that the SMBus Address register
12865 		 * is at PHY address 01, Page 0 (not 768), Register 26.
12866 		 */
12867 		if (page == HV_INTC_FC_PAGE_START)
12868 			page = 0;
12869 
12870 		/*
12871 		 * XXX Workaround MDIO accesses being disabled after entering
12872 		 * IEEE Power Down (whenever bit 11 of the PHY control
12873 		 * register is set)
12874 		 */
12875 		if (sc->sc_phytype == WMPHY_82578) {
12876 			struct mii_softc *child;
12877 
12878 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
12879 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
12880 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
12881 			    && ((val & (1 << 11)) != 0)) {
12882 				device_printf(dev, "XXX need workaround\n");
12883 			}
12884 		}
12885 
12886 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
12887 			rv = wm_gmii_mdic_writereg(dev, 1,
12888 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
12889 			if (rv != 0)
12890 				return rv;
12891 		}
12892 	}
12893 
12894 	return wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
12895 }
12896 
12897 /*
12898  * wm_gmii_82580_readreg:	[mii interface function]
12899  *
12900  *	Read a PHY register on the 82580 and I350.
12901  * This could be handled by the PHY layer if we didn't have to lock the
12902  * resource ...
12903  */
12904 static int
12905 wm_gmii_82580_readreg(device_t dev, int phy, int reg, uint16_t *val)
12906 {
12907 	struct wm_softc *sc = device_private(dev);
12908 	int rv;
12909 
12910 	rv = sc->phy.acquire(sc);
12911 	if (rv != 0) {
12912 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
12913 		return rv;
12914 	}
12915 
12916 #ifdef DIAGNOSTIC
12917 	if (reg > MII_ADDRMASK) {
12918 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
12919 		    __func__, sc->sc_phytype, reg);
12920 		reg &= MII_ADDRMASK;
12921 	}
12922 #endif
12923 	rv = wm_gmii_mdic_readreg(dev, phy, reg, val);
12924 
12925 	sc->phy.release(sc);
12926 	return rv;
12927 }
12928 
12929 /*
12930  * wm_gmii_82580_writereg:	[mii interface function]
12931  *
12932  *	Write a PHY register on the 82580 and I350.
12933  * This could be handled by the PHY layer if we didn't have to lock the
12934  * resource ...
12935  */
12936 static int
12937 wm_gmii_82580_writereg(device_t dev, int phy, int reg, uint16_t val)
12938 {
12939 	struct wm_softc *sc = device_private(dev);
12940 	int rv;
12941 
12942 	rv = sc->phy.acquire(sc);
12943 	if (rv != 0) {
12944 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
12945 		return rv;
12946 	}
12947 
12948 #ifdef DIAGNOSTIC
12949 	if (reg > MII_ADDRMASK) {
12950 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
12951 		    __func__, sc->sc_phytype, reg);
12952 		reg &= MII_ADDRMASK;
12953 	}
12954 #endif
12955 	rv = wm_gmii_mdic_writereg(dev, phy, reg, val);
12956 
12957 	sc->phy.release(sc);
12958 	return rv;
12959 }
12960 
12961 /*
12962  * wm_gmii_gs40g_readreg:	[mii interface function]
12963  *
12964  *	Read a PHY register on the I2100 and I211.
12965  * This could be handled by the PHY layer if we didn't have to lock the
12966  * resource ...
12967  */
12968 static int
12969 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg, uint16_t *val)
12970 {
12971 	struct wm_softc *sc = device_private(dev);
12972 	int page, offset;
12973 	int rv;
12974 
12975 	/* Acquire semaphore */
12976 	rv = sc->phy.acquire(sc);
12977 	if (rv != 0) {
12978 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
12979 		return rv;
12980 	}
12981 
12982 	/* Page select */
12983 	page = reg >> GS40G_PAGE_SHIFT;
12984 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
12985 	if (rv != 0)
12986 		goto release;
12987 
12988 	/* Read reg */
12989 	offset = reg & GS40G_OFFSET_MASK;
12990 	rv = wm_gmii_mdic_readreg(dev, phy, offset, val);
12991 
12992 release:
12993 	sc->phy.release(sc);
12994 	return rv;
12995 }
12996 
12997 /*
12998  * wm_gmii_gs40g_writereg:	[mii interface function]
12999  *
13000  *	Write a PHY register on the I210 and I211.
13001  * This could be handled by the PHY layer if we didn't have to lock the
13002  * resource ...
13003  */
13004 static int
13005 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, uint16_t val)
13006 {
13007 	struct wm_softc *sc = device_private(dev);
13008 	uint16_t page;
13009 	int offset, rv;
13010 
13011 	/* Acquire semaphore */
13012 	rv = sc->phy.acquire(sc);
13013 	if (rv != 0) {
13014 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
13015 		return rv;
13016 	}
13017 
13018 	/* Page select */
13019 	page = reg >> GS40G_PAGE_SHIFT;
13020 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
13021 	if (rv != 0)
13022 		goto release;
13023 
13024 	/* Write reg */
13025 	offset = reg & GS40G_OFFSET_MASK;
13026 	rv = wm_gmii_mdic_writereg(dev, phy, offset, val);
13027 
13028 release:
13029 	/* Release semaphore */
13030 	sc->phy.release(sc);
13031 	return rv;
13032 }
13033 
13034 /*
13035  * wm_gmii_statchg:	[mii interface function]
13036  *
13037  *	Callback from MII layer when media changes.
13038  */
13039 static void
13040 wm_gmii_statchg(struct ifnet *ifp)
13041 {
13042 	struct wm_softc *sc = ifp->if_softc;
13043 	struct mii_data *mii = &sc->sc_mii;
13044 
13045 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
13046 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
13047 	sc->sc_fcrtl &= ~FCRTL_XONE;
13048 
13049 	/* Get flow control negotiation result. */
13050 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
13051 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
13052 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
13053 		mii->mii_media_active &= ~IFM_ETH_FMASK;
13054 	}
13055 
13056 	if (sc->sc_flowflags & IFM_FLOW) {
13057 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
13058 			sc->sc_ctrl |= CTRL_TFCE;
13059 			sc->sc_fcrtl |= FCRTL_XONE;
13060 		}
13061 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
13062 			sc->sc_ctrl |= CTRL_RFCE;
13063 	}
13064 
13065 	if (mii->mii_media_active & IFM_FDX) {
13066 		DPRINTF(sc, WM_DEBUG_LINK,
13067 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
13068 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
13069 	} else {
13070 		DPRINTF(sc, WM_DEBUG_LINK,
13071 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
13072 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
13073 	}
13074 
13075 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
13076 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
13077 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
13078 	    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
13079 	if (sc->sc_type == WM_T_80003) {
13080 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
13081 		case IFM_1000_T:
13082 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
13083 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
13084 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
13085 			break;
13086 		default:
13087 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
13088 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
13089 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
13090 			break;
13091 		}
13092 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
13093 	}
13094 }
13095 
13096 /* kumeran related (80003, ICH* and PCH*) */
13097 
13098 /*
13099  * wm_kmrn_readreg:
13100  *
13101  *	Read a kumeran register
13102  */
13103 static int
13104 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
13105 {
13106 	int rv;
13107 
13108 	if (sc->sc_type == WM_T_80003)
13109 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
13110 	else
13111 		rv = sc->phy.acquire(sc);
13112 	if (rv != 0) {
13113 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
13114 		    __func__);
13115 		return rv;
13116 	}
13117 
13118 	rv = wm_kmrn_readreg_locked(sc, reg, val);
13119 
13120 	if (sc->sc_type == WM_T_80003)
13121 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
13122 	else
13123 		sc->phy.release(sc);
13124 
13125 	return rv;
13126 }
13127 
13128 static int
13129 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
13130 {
13131 
13132 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
13133 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
13134 	    KUMCTRLSTA_REN);
13135 	CSR_WRITE_FLUSH(sc);
13136 	delay(2);
13137 
13138 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
13139 
13140 	return 0;
13141 }
13142 
13143 /*
13144  * wm_kmrn_writereg:
13145  *
13146  *	Write a kumeran register
13147  */
13148 static int
13149 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
13150 {
13151 	int rv;
13152 
13153 	if (sc->sc_type == WM_T_80003)
13154 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
13155 	else
13156 		rv = sc->phy.acquire(sc);
13157 	if (rv != 0) {
13158 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
13159 		    __func__);
13160 		return rv;
13161 	}
13162 
13163 	rv = wm_kmrn_writereg_locked(sc, reg, val);
13164 
13165 	if (sc->sc_type == WM_T_80003)
13166 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
13167 	else
13168 		sc->phy.release(sc);
13169 
13170 	return rv;
13171 }
13172 
13173 static int
13174 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
13175 {
13176 
13177 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
13178 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
13179 
13180 	return 0;
13181 }
13182 
13183 /*
13184  * EMI register related (82579, WMPHY_I217(PCH2 and newer))
13185  * This access method is different from IEEE MMD.
13186  */
13187 static int
13188 wm_access_emi_reg_locked(device_t dev, int reg, uint16_t *val, bool rd)
13189 {
13190 	struct wm_softc *sc = device_private(dev);
13191 	int rv;
13192 
13193 	rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_ADDR, reg);
13194 	if (rv != 0)
13195 		return rv;
13196 
13197 	if (rd)
13198 		rv = sc->phy.readreg_locked(dev, 2, I82579_EMI_DATA, val);
13199 	else
13200 		rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_DATA, *val);
13201 	return rv;
13202 }
13203 
13204 static int
13205 wm_read_emi_reg_locked(device_t dev, int reg, uint16_t *val)
13206 {
13207 
13208 	return wm_access_emi_reg_locked(dev, reg, val, true);
13209 }
13210 
13211 static int
13212 wm_write_emi_reg_locked(device_t dev, int reg, uint16_t val)
13213 {
13214 
13215 	return wm_access_emi_reg_locked(dev, reg, &val, false);
13216 }
13217 
13218 /* SGMII related */
13219 
13220 /*
13221  * wm_sgmii_uses_mdio
13222  *
13223  * Check whether the transaction is to the internal PHY or the external
13224  * MDIO interface. Return true if it's MDIO.
13225  */
13226 static bool
13227 wm_sgmii_uses_mdio(struct wm_softc *sc)
13228 {
13229 	uint32_t reg;
13230 	bool ismdio = false;
13231 
13232 	switch (sc->sc_type) {
13233 	case WM_T_82575:
13234 	case WM_T_82576:
13235 		reg = CSR_READ(sc, WMREG_MDIC);
13236 		ismdio = ((reg & MDIC_DEST) != 0);
13237 		break;
13238 	case WM_T_82580:
13239 	case WM_T_I350:
13240 	case WM_T_I354:
13241 	case WM_T_I210:
13242 	case WM_T_I211:
13243 		reg = CSR_READ(sc, WMREG_MDICNFG);
13244 		ismdio = ((reg & MDICNFG_DEST) != 0);
13245 		break;
13246 	default:
13247 		break;
13248 	}
13249 
13250 	return ismdio;
13251 }
13252 
13253 /* Setup internal SGMII PHY for SFP */
13254 static void
13255 wm_sgmii_sfp_preconfig(struct wm_softc *sc)
13256 {
13257 	uint16_t id1, id2, phyreg;
13258 	int i, rv;
13259 
13260 	if (((sc->sc_flags & WM_F_SGMII) == 0)
13261 	    || ((sc->sc_flags & WM_F_SFP) == 0))
13262 		return;
13263 
13264 	for (i = 0; i < MII_NPHY; i++) {
13265 		sc->phy.no_errprint = true;
13266 		rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR1, &id1);
13267 		if (rv != 0)
13268 			continue;
13269 		rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR2, &id2);
13270 		if (rv != 0)
13271 			continue;
13272 		if (MII_OUI(id1, id2) != MII_OUI_xxMARVELL)
13273 			continue;
13274 		sc->phy.no_errprint = false;
13275 
13276 		sc->phy.readreg_locked(sc->sc_dev, i, MAKPHY_ESSR, &phyreg);
13277 		phyreg &= ~(ESSR_SER_ANEG_BYPASS | ESSR_HWCFG_MODE);
13278 		phyreg |= ESSR_SGMII_WOC_COPPER;
13279 		sc->phy.writereg_locked(sc->sc_dev, i, MAKPHY_ESSR, phyreg);
13280 		break;
13281 	}
13282 
13283 }
13284 
13285 /*
13286  * wm_sgmii_readreg:	[mii interface function]
13287  *
13288  *	Read a PHY register on the SGMII
13289  * This could be handled by the PHY layer if we didn't have to lock the
13290  * resource ...
13291  */
13292 static int
13293 wm_sgmii_readreg(device_t dev, int phy, int reg, uint16_t *val)
13294 {
13295 	struct wm_softc *sc = device_private(dev);
13296 	int rv;
13297 
13298 	rv = sc->phy.acquire(sc);
13299 	if (rv != 0) {
13300 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
13301 		return rv;
13302 	}
13303 
13304 	rv = wm_sgmii_readreg_locked(dev, phy, reg, val);
13305 
13306 	sc->phy.release(sc);
13307 	return rv;
13308 }
13309 
13310 static int
13311 wm_sgmii_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
13312 {
13313 	struct wm_softc *sc = device_private(dev);
13314 	uint32_t i2ccmd;
13315 	int i, rv = 0;
13316 
13317 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
13318 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
13319 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
13320 
13321 	/* Poll the ready bit */
13322 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
13323 		delay(50);
13324 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
13325 		if (i2ccmd & I2CCMD_READY)
13326 			break;
13327 	}
13328 	if ((i2ccmd & I2CCMD_READY) == 0) {
13329 		device_printf(dev, "I2CCMD Read did not complete\n");
13330 		rv = ETIMEDOUT;
13331 	}
13332 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
13333 		if (!sc->phy.no_errprint)
13334 			device_printf(dev, "I2CCMD Error bit set\n");
13335 		rv = EIO;
13336 	}
13337 
13338 	*val = (uint16_t)((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
13339 
13340 	return rv;
13341 }
13342 
13343 /*
13344  * wm_sgmii_writereg:	[mii interface function]
13345  *
13346  *	Write a PHY register on the SGMII.
13347  * This could be handled by the PHY layer if we didn't have to lock the
13348  * resource ...
13349  */
13350 static int
13351 wm_sgmii_writereg(device_t dev, int phy, int reg, uint16_t val)
13352 {
13353 	struct wm_softc *sc = device_private(dev);
13354 	int rv;
13355 
13356 	rv = sc->phy.acquire(sc);
13357 	if (rv != 0) {
13358 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
13359 		return rv;
13360 	}
13361 
13362 	rv = wm_sgmii_writereg_locked(dev, phy, reg, val);
13363 
13364 	sc->phy.release(sc);
13365 
13366 	return rv;
13367 }
13368 
13369 static int
13370 wm_sgmii_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
13371 {
13372 	struct wm_softc *sc = device_private(dev);
13373 	uint32_t i2ccmd;
13374 	uint16_t swapdata;
13375 	int rv = 0;
13376 	int i;
13377 
13378 	/* Swap the data bytes for the I2C interface */
13379 	swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
13380 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
13381 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
13382 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
13383 
13384 	/* Poll the ready bit */
13385 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
13386 		delay(50);
13387 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
13388 		if (i2ccmd & I2CCMD_READY)
13389 			break;
13390 	}
13391 	if ((i2ccmd & I2CCMD_READY) == 0) {
13392 		device_printf(dev, "I2CCMD Write did not complete\n");
13393 		rv = ETIMEDOUT;
13394 	}
13395 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
13396 		device_printf(dev, "I2CCMD Error bit set\n");
13397 		rv = EIO;
13398 	}
13399 
13400 	return rv;
13401 }
13402 
13403 /* TBI related */
13404 
13405 static bool
13406 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
13407 {
13408 	bool sig;
13409 
13410 	sig = ctrl & CTRL_SWDPIN(1);
13411 
13412 	/*
13413 	 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
13414 	 * detect a signal, 1 if they don't.
13415 	 */
13416 	if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
13417 		sig = !sig;
13418 
13419 	return sig;
13420 }
13421 
13422 /*
13423  * wm_tbi_mediainit:
13424  *
13425  *	Initialize media for use on 1000BASE-X devices.
13426  */
13427 static void
13428 wm_tbi_mediainit(struct wm_softc *sc)
13429 {
13430 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
13431 	const char *sep = "";
13432 
13433 	if (sc->sc_type < WM_T_82543)
13434 		sc->sc_tipg = TIPG_WM_DFLT;
13435 	else
13436 		sc->sc_tipg = TIPG_LG_DFLT;
13437 
13438 	sc->sc_tbi_serdes_anegticks = 5;
13439 
13440 	/* Initialize our media structures */
13441 	sc->sc_mii.mii_ifp = ifp;
13442 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
13443 
13444 	ifp->if_baudrate = IF_Gbps(1);
13445 	if (((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
13446 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
13447 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
13448 		    wm_serdes_mediachange, wm_serdes_mediastatus,
13449 		    sc->sc_core_lock);
13450 	} else {
13451 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
13452 		    wm_tbi_mediachange, wm_tbi_mediastatus, sc->sc_core_lock);
13453 	}
13454 
13455 	/*
13456 	 * SWD Pins:
13457 	 *
13458 	 *	0 = Link LED (output)
13459 	 *	1 = Loss Of Signal (input)
13460 	 */
13461 	sc->sc_ctrl |= CTRL_SWDPIO(0);
13462 
13463 	/* XXX Perhaps this is only for TBI */
13464 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
13465 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
13466 
13467 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
13468 		sc->sc_ctrl &= ~CTRL_LRST;
13469 
13470 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
13471 
13472 #define	ADD(ss, mm, dd)							  \
13473 do {									  \
13474 	aprint_normal("%s%s", sep, ss);					  \
13475 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
13476 	sep = ", ";							  \
13477 } while (/*CONSTCOND*/0)
13478 
13479 	aprint_normal_dev(sc->sc_dev, "");
13480 
13481 	if (sc->sc_type == WM_T_I354) {
13482 		uint32_t status;
13483 
13484 		status = CSR_READ(sc, WMREG_STATUS);
13485 		if (((status & STATUS_2P5_SKU) != 0)
13486 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
13487 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
13488 		} else
13489 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
13490 	} else if (sc->sc_type == WM_T_82545) {
13491 		/* Only 82545 is LX (XXX except SFP) */
13492 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
13493 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
13494 	} else if (sc->sc_sfptype != 0) {
13495 		/* XXX wm(4) fiber/serdes don't use ifm_data */
13496 		switch (sc->sc_sfptype) {
13497 		default:
13498 		case SFF_SFP_ETH_FLAGS_1000SX:
13499 			ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
13500 			ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
13501 			break;
13502 		case SFF_SFP_ETH_FLAGS_1000LX:
13503 			ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
13504 			ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
13505 			break;
13506 		case SFF_SFP_ETH_FLAGS_1000CX:
13507 			ADD("1000baseCX", IFM_1000_CX, ANAR_X_HD);
13508 			ADD("1000baseCX-FDX", IFM_1000_CX | IFM_FDX, ANAR_X_FD);
13509 			break;
13510 		case SFF_SFP_ETH_FLAGS_1000T:
13511 			ADD("1000baseT", IFM_1000_T, 0);
13512 			ADD("1000baseT-FDX", IFM_1000_T | IFM_FDX, 0);
13513 			break;
13514 		case SFF_SFP_ETH_FLAGS_100FX:
13515 			ADD("100baseFX", IFM_100_FX, ANAR_TX);
13516 			ADD("100baseFX-FDX", IFM_100_FX | IFM_FDX, ANAR_TX_FD);
13517 			break;
13518 		}
13519 	} else {
13520 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
13521 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
13522 	}
13523 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
13524 	aprint_normal("\n");
13525 
13526 #undef ADD
13527 
13528 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
13529 }
13530 
13531 /*
13532  * wm_tbi_mediachange:	[ifmedia interface function]
13533  *
13534  *	Set hardware to newly-selected media on a 1000BASE-X device.
13535  */
13536 static int
13537 wm_tbi_mediachange(struct ifnet *ifp)
13538 {
13539 	struct wm_softc *sc = ifp->if_softc;
13540 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
13541 	uint32_t status, ctrl;
13542 	bool signal;
13543 	int i;
13544 
13545 	KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
13546 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
13547 		/* XXX need some work for >= 82571 and < 82575 */
13548 		if (sc->sc_type < WM_T_82575)
13549 			return 0;
13550 	}
13551 
13552 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
13553 	    || (sc->sc_type >= WM_T_82575))
13554 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
13555 
13556 	sc->sc_ctrl &= ~CTRL_LRST;
13557 	sc->sc_txcw = TXCW_ANE;
13558 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
13559 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
13560 	else if (ife->ifm_media & IFM_FDX)
13561 		sc->sc_txcw |= TXCW_FD;
13562 	else
13563 		sc->sc_txcw |= TXCW_HD;
13564 
13565 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
13566 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
13567 
13568 	DPRINTF(sc, WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
13569 		device_xname(sc->sc_dev), sc->sc_txcw));
13570 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
13571 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
13572 	CSR_WRITE_FLUSH(sc);
13573 	delay(1000);
13574 
13575 	ctrl = CSR_READ(sc, WMREG_CTRL);
13576 	signal = wm_tbi_havesignal(sc, ctrl);
13577 
13578 	DPRINTF(sc, WM_DEBUG_LINK,
13579 	    ("%s: signal = %d\n", device_xname(sc->sc_dev), signal));
13580 
13581 	if (signal) {
13582 		/* Have signal; wait for the link to come up. */
13583 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
13584 			delay(10000);
13585 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
13586 				break;
13587 		}
13588 
13589 		DPRINTF(sc, WM_DEBUG_LINK,
13590 		    ("%s: i = %d after waiting for link\n",
13591 			device_xname(sc->sc_dev), i));
13592 
13593 		status = CSR_READ(sc, WMREG_STATUS);
13594 		DPRINTF(sc, WM_DEBUG_LINK,
13595 		    ("%s: status after final read = 0x%x, STATUS_LU = %#"
13596 			__PRIxBIT "\n",
13597 			device_xname(sc->sc_dev), status, STATUS_LU));
13598 		if (status & STATUS_LU) {
13599 			/* Link is up. */
13600 			DPRINTF(sc, WM_DEBUG_LINK,
13601 			    ("%s: LINK: set media -> link up %s\n",
13602 				device_xname(sc->sc_dev),
13603 				(status & STATUS_FD) ? "FDX" : "HDX"));
13604 
13605 			/*
13606 			 * NOTE: CTRL will update TFCE and RFCE automatically,
13607 			 * so we should update sc->sc_ctrl
13608 			 */
13609 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
13610 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
13611 			sc->sc_fcrtl &= ~FCRTL_XONE;
13612 			if (status & STATUS_FD)
13613 				sc->sc_tctl |=
13614 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
13615 			else
13616 				sc->sc_tctl |=
13617 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
13618 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
13619 				sc->sc_fcrtl |= FCRTL_XONE;
13620 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
13621 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
13622 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
13623 			sc->sc_tbi_linkup = 1;
13624 		} else {
13625 			if (i == WM_LINKUP_TIMEOUT)
13626 				wm_check_for_link(sc);
13627 			/* Link is down. */
13628 			DPRINTF(sc, WM_DEBUG_LINK,
13629 			    ("%s: LINK: set media -> link down\n",
13630 				device_xname(sc->sc_dev)));
13631 			sc->sc_tbi_linkup = 0;
13632 		}
13633 	} else {
13634 		DPRINTF(sc, WM_DEBUG_LINK,
13635 		    ("%s: LINK: set media -> no signal\n",
13636 			device_xname(sc->sc_dev)));
13637 		sc->sc_tbi_linkup = 0;
13638 	}
13639 
13640 	wm_tbi_serdes_set_linkled(sc);
13641 
13642 	return 0;
13643 }
13644 
13645 /*
13646  * wm_tbi_mediastatus:	[ifmedia interface function]
13647  *
13648  *	Get the current interface media status on a 1000BASE-X device.
13649  */
13650 static void
13651 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
13652 {
13653 	struct wm_softc *sc = ifp->if_softc;
13654 	uint32_t ctrl, status;
13655 
13656 	ifmr->ifm_status = IFM_AVALID;
13657 	ifmr->ifm_active = IFM_ETHER;
13658 
13659 	status = CSR_READ(sc, WMREG_STATUS);
13660 	if ((status & STATUS_LU) == 0) {
13661 		ifmr->ifm_active |= IFM_NONE;
13662 		return;
13663 	}
13664 
13665 	ifmr->ifm_status |= IFM_ACTIVE;
13666 	/* Only 82545 is LX */
13667 	if (sc->sc_type == WM_T_82545)
13668 		ifmr->ifm_active |= IFM_1000_LX;
13669 	else
13670 		ifmr->ifm_active |= IFM_1000_SX;
13671 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
13672 		ifmr->ifm_active |= IFM_FDX;
13673 	else
13674 		ifmr->ifm_active |= IFM_HDX;
13675 	ctrl = CSR_READ(sc, WMREG_CTRL);
13676 	if (ctrl & CTRL_RFCE)
13677 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
13678 	if (ctrl & CTRL_TFCE)
13679 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
13680 }
13681 
13682 /* XXX TBI only */
13683 static int
13684 wm_check_for_link(struct wm_softc *sc)
13685 {
13686 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
13687 	uint32_t rxcw;
13688 	uint32_t ctrl;
13689 	uint32_t status;
13690 	bool signal;
13691 
13692 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s called\n",
13693 		device_xname(sc->sc_dev), __func__));
13694 
13695 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
13696 		/* XXX need some work for >= 82571 */
13697 		if (sc->sc_type >= WM_T_82571) {
13698 			sc->sc_tbi_linkup = 1;
13699 			return 0;
13700 		}
13701 	}
13702 
13703 	rxcw = CSR_READ(sc, WMREG_RXCW);
13704 	ctrl = CSR_READ(sc, WMREG_CTRL);
13705 	status = CSR_READ(sc, WMREG_STATUS);
13706 	signal = wm_tbi_havesignal(sc, ctrl);
13707 
13708 	DPRINTF(sc, WM_DEBUG_LINK,
13709 	    ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
13710 		device_xname(sc->sc_dev), __func__, signal,
13711 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
13712 
13713 	/*
13714 	 * SWDPIN   LU RXCW
13715 	 *	0    0	  0
13716 	 *	0    0	  1	(should not happen)
13717 	 *	0    1	  0	(should not happen)
13718 	 *	0    1	  1	(should not happen)
13719 	 *	1    0	  0	Disable autonego and force linkup
13720 	 *	1    0	  1	got /C/ but not linkup yet
13721 	 *	1    1	  0	(linkup)
13722 	 *	1    1	  1	If IFM_AUTO, back to autonego
13723 	 *
13724 	 */
13725 	if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
13726 		DPRINTF(sc, WM_DEBUG_LINK,
13727 		    ("%s: %s: force linkup and fullduplex\n",
13728 			device_xname(sc->sc_dev), __func__));
13729 		sc->sc_tbi_linkup = 0;
13730 		/* Disable auto-negotiation in the TXCW register */
13731 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
13732 
13733 		/*
13734 		 * Force link-up and also force full-duplex.
13735 		 *
13736 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
13737 		 * so we should update sc->sc_ctrl
13738 		 */
13739 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
13740 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
13741 	} else if (((status & STATUS_LU) != 0)
13742 	    && ((rxcw & RXCW_C) != 0)
13743 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
13744 		sc->sc_tbi_linkup = 1;
13745 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
13746 			device_xname(sc->sc_dev), __func__));
13747 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
13748 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
13749 	} else if (signal && ((rxcw & RXCW_C) != 0)) {
13750 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: /C/",
13751 			device_xname(sc->sc_dev), __func__));
13752 	} else {
13753 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
13754 			device_xname(sc->sc_dev), __func__, rxcw, ctrl,
13755 			status));
13756 	}
13757 
13758 	return 0;
13759 }
13760 
13761 /*
13762  * wm_tbi_tick:
13763  *
13764  *	Check the link on TBI devices.
13765  *	This function acts as mii_tick().
13766  */
13767 static void
13768 wm_tbi_tick(struct wm_softc *sc)
13769 {
13770 	struct mii_data *mii = &sc->sc_mii;
13771 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
13772 	uint32_t status;
13773 
13774 	KASSERT(mutex_owned(sc->sc_core_lock));
13775 
13776 	status = CSR_READ(sc, WMREG_STATUS);
13777 
13778 	/* XXX is this needed? */
13779 	(void)CSR_READ(sc, WMREG_RXCW);
13780 	(void)CSR_READ(sc, WMREG_CTRL);
13781 
13782 	/* set link status */
13783 	if ((status & STATUS_LU) == 0) {
13784 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
13785 			device_xname(sc->sc_dev)));
13786 		sc->sc_tbi_linkup = 0;
13787 	} else if (sc->sc_tbi_linkup == 0) {
13788 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
13789 			device_xname(sc->sc_dev),
13790 			(status & STATUS_FD) ? "FDX" : "HDX"));
13791 		sc->sc_tbi_linkup = 1;
13792 		sc->sc_tbi_serdes_ticks = 0;
13793 	}
13794 
13795 	if ((sc->sc_if_flags & IFF_UP) == 0)
13796 		goto setled;
13797 
13798 	if ((status & STATUS_LU) == 0) {
13799 		sc->sc_tbi_linkup = 0;
13800 		/* If the timer expired, retry autonegotiation */
13801 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
13802 		    && (++sc->sc_tbi_serdes_ticks
13803 			>= sc->sc_tbi_serdes_anegticks)) {
13804 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
13805 				device_xname(sc->sc_dev), __func__));
13806 			sc->sc_tbi_serdes_ticks = 0;
13807 			/*
13808 			 * Reset the link, and let autonegotiation do
13809 			 * its thing
13810 			 */
13811 			sc->sc_ctrl |= CTRL_LRST;
13812 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
13813 			CSR_WRITE_FLUSH(sc);
13814 			delay(1000);
13815 			sc->sc_ctrl &= ~CTRL_LRST;
13816 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
13817 			CSR_WRITE_FLUSH(sc);
13818 			delay(1000);
13819 			CSR_WRITE(sc, WMREG_TXCW,
13820 			    sc->sc_txcw & ~TXCW_ANE);
13821 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
13822 		}
13823 	}
13824 
13825 setled:
13826 	wm_tbi_serdes_set_linkled(sc);
13827 }
13828 
13829 /* SERDES related */
13830 static void
13831 wm_serdes_power_up_link_82575(struct wm_softc *sc)
13832 {
13833 	uint32_t reg;
13834 
13835 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
13836 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
13837 		return;
13838 
13839 	/* Enable PCS to turn on link */
13840 	reg = CSR_READ(sc, WMREG_PCS_CFG);
13841 	reg |= PCS_CFG_PCS_EN;
13842 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
13843 
13844 	/* Power up the laser */
13845 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
13846 	reg &= ~CTRL_EXT_SWDPIN(3);
13847 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
13848 
13849 	/* Flush the write to verify completion */
13850 	CSR_WRITE_FLUSH(sc);
13851 	delay(1000);
13852 }
13853 
13854 static int
13855 wm_serdes_mediachange(struct ifnet *ifp)
13856 {
13857 	struct wm_softc *sc = ifp->if_softc;
13858 	bool pcs_autoneg = true; /* XXX */
13859 	uint32_t ctrl_ext, pcs_lctl, reg;
13860 
13861 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
13862 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
13863 		return 0;
13864 
13865 	/* XXX Currently, this function is not called on 8257[12] */
13866 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
13867 	    || (sc->sc_type >= WM_T_82575))
13868 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
13869 
13870 	/* Power on the sfp cage if present */
13871 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
13872 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
13873 	ctrl_ext |= CTRL_EXT_I2C_ENA;
13874 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
13875 
13876 	sc->sc_ctrl |= CTRL_SLU;
13877 
13878 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
13879 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
13880 
13881 		reg = CSR_READ(sc, WMREG_CONNSW);
13882 		reg |= CONNSW_ENRGSRC;
13883 		CSR_WRITE(sc, WMREG_CONNSW, reg);
13884 	}
13885 
13886 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
13887 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
13888 	case CTRL_EXT_LINK_MODE_SGMII:
13889 		/* SGMII mode lets the phy handle forcing speed/duplex */
13890 		pcs_autoneg = true;
13891 		/* Autoneg time out should be disabled for SGMII mode */
13892 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
13893 		break;
13894 	case CTRL_EXT_LINK_MODE_1000KX:
13895 		pcs_autoneg = false;
13896 		/* FALLTHROUGH */
13897 	default:
13898 		if ((sc->sc_type == WM_T_82575)
13899 		    || (sc->sc_type == WM_T_82576)) {
13900 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
13901 				pcs_autoneg = false;
13902 		}
13903 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
13904 		    | CTRL_FRCFDX;
13905 
13906 		/* Set speed of 1000/Full if speed/duplex is forced */
13907 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
13908 	}
13909 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
13910 
13911 	pcs_lctl &= ~(PCS_LCTL_AN_ENABLE | PCS_LCTL_FLV_LINK_UP |
13912 	    PCS_LCTL_FSD | PCS_LCTL_FORCE_LINK);
13913 
13914 	if (pcs_autoneg) {
13915 		/* Set PCS register for autoneg */
13916 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
13917 
13918 		/* Disable force flow control for autoneg */
13919 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
13920 
13921 		/* Configure flow control advertisement for autoneg */
13922 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
13923 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
13924 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
13925 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
13926 	} else
13927 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
13928 
13929 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
13930 
13931 	return 0;
13932 }
13933 
13934 static void
13935 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
13936 {
13937 	struct wm_softc *sc = ifp->if_softc;
13938 	struct mii_data *mii = &sc->sc_mii;
13939 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
13940 	uint32_t pcs_adv, pcs_lpab, reg;
13941 
13942 	ifmr->ifm_status = IFM_AVALID;
13943 	ifmr->ifm_active = IFM_ETHER;
13944 
13945 	/* Check PCS */
13946 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
13947 	if ((reg & PCS_LSTS_LINKOK) == 0) {
13948 		ifmr->ifm_active |= IFM_NONE;
13949 		sc->sc_tbi_linkup = 0;
13950 		goto setled;
13951 	}
13952 
13953 	sc->sc_tbi_linkup = 1;
13954 	ifmr->ifm_status |= IFM_ACTIVE;
13955 	if (sc->sc_type == WM_T_I354) {
13956 		uint32_t status;
13957 
13958 		status = CSR_READ(sc, WMREG_STATUS);
13959 		if (((status & STATUS_2P5_SKU) != 0)
13960 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
13961 			ifmr->ifm_active |= IFM_2500_KX;
13962 		} else
13963 			ifmr->ifm_active |= IFM_1000_KX;
13964 	} else {
13965 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
13966 		case PCS_LSTS_SPEED_10:
13967 			ifmr->ifm_active |= IFM_10_T; /* XXX */
13968 			break;
13969 		case PCS_LSTS_SPEED_100:
13970 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
13971 			break;
13972 		case PCS_LSTS_SPEED_1000:
13973 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
13974 			break;
13975 		default:
13976 			device_printf(sc->sc_dev, "Unknown speed\n");
13977 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
13978 			break;
13979 		}
13980 	}
13981 	ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
13982 	if ((reg & PCS_LSTS_FDX) != 0)
13983 		ifmr->ifm_active |= IFM_FDX;
13984 	else
13985 		ifmr->ifm_active |= IFM_HDX;
13986 	mii->mii_media_active &= ~IFM_ETH_FMASK;
13987 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
13988 		/* Check flow */
13989 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
13990 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
13991 			DPRINTF(sc, WM_DEBUG_LINK,
13992 			    ("XXX LINKOK but not ACOMP\n"));
13993 			goto setled;
13994 		}
13995 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
13996 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
13997 		DPRINTF(sc, WM_DEBUG_LINK,
13998 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
13999 		if ((pcs_adv & TXCW_SYM_PAUSE)
14000 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
14001 			mii->mii_media_active |= IFM_FLOW
14002 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
14003 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
14004 		    && (pcs_adv & TXCW_ASYM_PAUSE)
14005 		    && (pcs_lpab & TXCW_SYM_PAUSE)
14006 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
14007 			mii->mii_media_active |= IFM_FLOW
14008 			    | IFM_ETH_TXPAUSE;
14009 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
14010 		    && (pcs_adv & TXCW_ASYM_PAUSE)
14011 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
14012 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
14013 			mii->mii_media_active |= IFM_FLOW
14014 			    | IFM_ETH_RXPAUSE;
14015 		}
14016 	}
14017 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
14018 	    | (mii->mii_media_active & IFM_ETH_FMASK);
14019 setled:
14020 	wm_tbi_serdes_set_linkled(sc);
14021 }
14022 
14023 /*
14024  * wm_serdes_tick:
14025  *
14026  *	Check the link on serdes devices.
14027  */
14028 static void
14029 wm_serdes_tick(struct wm_softc *sc)
14030 {
14031 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
14032 	struct mii_data *mii = &sc->sc_mii;
14033 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
14034 	uint32_t reg;
14035 
14036 	KASSERT(mutex_owned(sc->sc_core_lock));
14037 
14038 	mii->mii_media_status = IFM_AVALID;
14039 	mii->mii_media_active = IFM_ETHER;
14040 
14041 	/* Check PCS */
14042 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
14043 	if ((reg & PCS_LSTS_LINKOK) != 0) {
14044 		mii->mii_media_status |= IFM_ACTIVE;
14045 		sc->sc_tbi_linkup = 1;
14046 		sc->sc_tbi_serdes_ticks = 0;
14047 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
14048 		if ((reg & PCS_LSTS_FDX) != 0)
14049 			mii->mii_media_active |= IFM_FDX;
14050 		else
14051 			mii->mii_media_active |= IFM_HDX;
14052 	} else {
14053 		mii->mii_media_status |= IFM_NONE;
14054 		sc->sc_tbi_linkup = 0;
14055 		/* If the timer expired, retry autonegotiation */
14056 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
14057 		    && (++sc->sc_tbi_serdes_ticks
14058 			>= sc->sc_tbi_serdes_anegticks)) {
14059 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
14060 				device_xname(sc->sc_dev), __func__));
14061 			sc->sc_tbi_serdes_ticks = 0;
14062 			/* XXX */
14063 			wm_serdes_mediachange(ifp);
14064 		}
14065 	}
14066 
14067 	wm_tbi_serdes_set_linkled(sc);
14068 }
14069 
14070 /* SFP related */
14071 
14072 static int
14073 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
14074 {
14075 	uint32_t i2ccmd;
14076 	int i;
14077 
14078 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
14079 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
14080 
14081 	/* Poll the ready bit */
14082 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
14083 		delay(50);
14084 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
14085 		if (i2ccmd & I2CCMD_READY)
14086 			break;
14087 	}
14088 	if ((i2ccmd & I2CCMD_READY) == 0)
14089 		return -1;
14090 	if ((i2ccmd & I2CCMD_ERROR) != 0)
14091 		return -1;
14092 
14093 	*data = i2ccmd & 0x00ff;
14094 
14095 	return 0;
14096 }
14097 
14098 static uint32_t
14099 wm_sfp_get_media_type(struct wm_softc *sc)
14100 {
14101 	uint32_t ctrl_ext;
14102 	uint8_t val = 0;
14103 	int timeout = 3;
14104 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
14105 	int rv = -1;
14106 
14107 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
14108 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
14109 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
14110 	CSR_WRITE_FLUSH(sc);
14111 
14112 	/* Read SFP module data */
14113 	while (timeout) {
14114 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
14115 		if (rv == 0)
14116 			break;
14117 		delay(100*1000); /* XXX too big */
14118 		timeout--;
14119 	}
14120 	if (rv != 0)
14121 		goto out;
14122 
14123 	switch (val) {
14124 	case SFF_SFP_ID_SFF:
14125 		aprint_normal_dev(sc->sc_dev,
14126 		    "Module/Connector soldered to board\n");
14127 		break;
14128 	case SFF_SFP_ID_SFP:
14129 		sc->sc_flags |= WM_F_SFP;
14130 		break;
14131 	case SFF_SFP_ID_UNKNOWN:
14132 		goto out;
14133 	default:
14134 		break;
14135 	}
14136 
14137 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
14138 	if (rv != 0)
14139 		goto out;
14140 
14141 	sc->sc_sfptype = val;
14142 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
14143 		mediatype = WM_MEDIATYPE_SERDES;
14144 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
14145 		sc->sc_flags |= WM_F_SGMII;
14146 		mediatype = WM_MEDIATYPE_COPPER;
14147 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
14148 		sc->sc_flags |= WM_F_SGMII;
14149 		mediatype = WM_MEDIATYPE_SERDES;
14150 	} else {
14151 		device_printf(sc->sc_dev, "%s: unknown media type? (0x%hhx)\n",
14152 		    __func__, sc->sc_sfptype);
14153 		sc->sc_sfptype = 0; /* XXX unknown */
14154 	}
14155 
14156 out:
14157 	/* Restore I2C interface setting */
14158 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
14159 
14160 	return mediatype;
14161 }
14162 
14163 /*
14164  * NVM related.
14165  * Microwire, SPI (w/wo EERD) and Flash.
14166  */
14167 
14168 /* Both spi and uwire */
14169 
14170 /*
14171  * wm_eeprom_sendbits:
14172  *
14173  *	Send a series of bits to the EEPROM.
14174  */
14175 static void
14176 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
14177 {
14178 	uint32_t reg;
14179 	int x;
14180 
14181 	reg = CSR_READ(sc, WMREG_EECD);
14182 
14183 	for (x = nbits; x > 0; x--) {
14184 		if (bits & (1U << (x - 1)))
14185 			reg |= EECD_DI;
14186 		else
14187 			reg &= ~EECD_DI;
14188 		CSR_WRITE(sc, WMREG_EECD, reg);
14189 		CSR_WRITE_FLUSH(sc);
14190 		delay(2);
14191 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
14192 		CSR_WRITE_FLUSH(sc);
14193 		delay(2);
14194 		CSR_WRITE(sc, WMREG_EECD, reg);
14195 		CSR_WRITE_FLUSH(sc);
14196 		delay(2);
14197 	}
14198 }
14199 
14200 /*
14201  * wm_eeprom_recvbits:
14202  *
14203  *	Receive a series of bits from the EEPROM.
14204  */
14205 static void
14206 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
14207 {
14208 	uint32_t reg, val;
14209 	int x;
14210 
14211 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
14212 
14213 	val = 0;
14214 	for (x = nbits; x > 0; x--) {
14215 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
14216 		CSR_WRITE_FLUSH(sc);
14217 		delay(2);
14218 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
14219 			val |= (1U << (x - 1));
14220 		CSR_WRITE(sc, WMREG_EECD, reg);
14221 		CSR_WRITE_FLUSH(sc);
14222 		delay(2);
14223 	}
14224 	*valp = val;
14225 }
14226 
14227 /* Microwire */
14228 
14229 /*
14230  * wm_nvm_read_uwire:
14231  *
14232  *	Read a word from the EEPROM using the MicroWire protocol.
14233  */
14234 static int
14235 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
14236 {
14237 	uint32_t reg, val;
14238 	int i, rv;
14239 
14240 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
14241 		device_xname(sc->sc_dev), __func__));
14242 
14243 	rv = sc->nvm.acquire(sc);
14244 	if (rv != 0)
14245 		return rv;
14246 
14247 	for (i = 0; i < wordcnt; i++) {
14248 		/* Clear SK and DI. */
14249 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
14250 		CSR_WRITE(sc, WMREG_EECD, reg);
14251 
14252 		/*
14253 		 * XXX: workaround for a bug in qemu-0.12.x and prior
14254 		 * and Xen.
14255 		 *
14256 		 * We use this workaround only for 82540 because qemu's
14257 		 * e1000 act as 82540.
14258 		 */
14259 		if (sc->sc_type == WM_T_82540) {
14260 			reg |= EECD_SK;
14261 			CSR_WRITE(sc, WMREG_EECD, reg);
14262 			reg &= ~EECD_SK;
14263 			CSR_WRITE(sc, WMREG_EECD, reg);
14264 			CSR_WRITE_FLUSH(sc);
14265 			delay(2);
14266 		}
14267 		/* XXX: end of workaround */
14268 
14269 		/* Set CHIP SELECT. */
14270 		reg |= EECD_CS;
14271 		CSR_WRITE(sc, WMREG_EECD, reg);
14272 		CSR_WRITE_FLUSH(sc);
14273 		delay(2);
14274 
14275 		/* Shift in the READ command. */
14276 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
14277 
14278 		/* Shift in address. */
14279 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
14280 
14281 		/* Shift out the data. */
14282 		wm_eeprom_recvbits(sc, &val, 16);
14283 		data[i] = val & 0xffff;
14284 
14285 		/* Clear CHIP SELECT. */
14286 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
14287 		CSR_WRITE(sc, WMREG_EECD, reg);
14288 		CSR_WRITE_FLUSH(sc);
14289 		delay(2);
14290 	}
14291 
14292 	sc->nvm.release(sc);
14293 	return 0;
14294 }
14295 
14296 /* SPI */
14297 
14298 /*
14299  * Set SPI and FLASH related information from the EECD register.
14300  * For 82541 and 82547, the word size is taken from EEPROM.
14301  */
14302 static int
14303 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
14304 {
14305 	int size;
14306 	uint32_t reg;
14307 	uint16_t data;
14308 
14309 	reg = CSR_READ(sc, WMREG_EECD);
14310 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
14311 
14312 	/* Read the size of NVM from EECD by default */
14313 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
14314 	switch (sc->sc_type) {
14315 	case WM_T_82541:
14316 	case WM_T_82541_2:
14317 	case WM_T_82547:
14318 	case WM_T_82547_2:
14319 		/* Set dummy value to access EEPROM */
14320 		sc->sc_nvm_wordsize = 64;
14321 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
14322 			aprint_error_dev(sc->sc_dev,
14323 			    "%s: failed to read EEPROM size\n", __func__);
14324 		}
14325 		reg = data;
14326 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
14327 		if (size == 0)
14328 			size = 6; /* 64 word size */
14329 		else
14330 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
14331 		break;
14332 	case WM_T_80003:
14333 	case WM_T_82571:
14334 	case WM_T_82572:
14335 	case WM_T_82573: /* SPI case */
14336 	case WM_T_82574: /* SPI case */
14337 	case WM_T_82583: /* SPI case */
14338 		size += NVM_WORD_SIZE_BASE_SHIFT;
14339 		if (size > 14)
14340 			size = 14;
14341 		break;
14342 	case WM_T_82575:
14343 	case WM_T_82576:
14344 	case WM_T_82580:
14345 	case WM_T_I350:
14346 	case WM_T_I354:
14347 	case WM_T_I210:
14348 	case WM_T_I211:
14349 		size += NVM_WORD_SIZE_BASE_SHIFT;
14350 		if (size > 15)
14351 			size = 15;
14352 		break;
14353 	default:
14354 		aprint_error_dev(sc->sc_dev,
14355 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
14356 		return -1;
14357 		break;
14358 	}
14359 
14360 	sc->sc_nvm_wordsize = 1 << size;
14361 
14362 	return 0;
14363 }
14364 
14365 /*
14366  * wm_nvm_ready_spi:
14367  *
14368  *	Wait for a SPI EEPROM to be ready for commands.
14369  */
14370 static int
14371 wm_nvm_ready_spi(struct wm_softc *sc)
14372 {
14373 	uint32_t val;
14374 	int usec;
14375 
14376 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
14377 		device_xname(sc->sc_dev), __func__));
14378 
14379 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
14380 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
14381 		wm_eeprom_recvbits(sc, &val, 8);
14382 		if ((val & SPI_SR_RDY) == 0)
14383 			break;
14384 	}
14385 	if (usec >= SPI_MAX_RETRIES) {
14386 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
14387 		return -1;
14388 	}
14389 	return 0;
14390 }
14391 
14392 /*
14393  * wm_nvm_read_spi:
14394  *
14395  *	Read a work from the EEPROM using the SPI protocol.
14396  */
14397 static int
14398 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
14399 {
14400 	uint32_t reg, val;
14401 	int i;
14402 	uint8_t opc;
14403 	int rv;
14404 
14405 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
14406 		device_xname(sc->sc_dev), __func__));
14407 
14408 	rv = sc->nvm.acquire(sc);
14409 	if (rv != 0)
14410 		return rv;
14411 
14412 	/* Clear SK and CS. */
14413 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
14414 	CSR_WRITE(sc, WMREG_EECD, reg);
14415 	CSR_WRITE_FLUSH(sc);
14416 	delay(2);
14417 
14418 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
14419 		goto out;
14420 
14421 	/* Toggle CS to flush commands. */
14422 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
14423 	CSR_WRITE_FLUSH(sc);
14424 	delay(2);
14425 	CSR_WRITE(sc, WMREG_EECD, reg);
14426 	CSR_WRITE_FLUSH(sc);
14427 	delay(2);
14428 
14429 	opc = SPI_OPC_READ;
14430 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
14431 		opc |= SPI_OPC_A8;
14432 
14433 	wm_eeprom_sendbits(sc, opc, 8);
14434 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
14435 
14436 	for (i = 0; i < wordcnt; i++) {
14437 		wm_eeprom_recvbits(sc, &val, 16);
14438 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
14439 	}
14440 
14441 	/* Raise CS and clear SK. */
14442 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
14443 	CSR_WRITE(sc, WMREG_EECD, reg);
14444 	CSR_WRITE_FLUSH(sc);
14445 	delay(2);
14446 
14447 out:
14448 	sc->nvm.release(sc);
14449 	return rv;
14450 }
14451 
14452 /* Using with EERD */
14453 
14454 static int
14455 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
14456 {
14457 	uint32_t attempts = 100000;
14458 	uint32_t i, reg = 0;
14459 	int32_t done = -1;
14460 
14461 	for (i = 0; i < attempts; i++) {
14462 		reg = CSR_READ(sc, rw);
14463 
14464 		if (reg & EERD_DONE) {
14465 			done = 0;
14466 			break;
14467 		}
14468 		delay(5);
14469 	}
14470 
14471 	return done;
14472 }
14473 
14474 static int
14475 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
14476 {
14477 	int i, eerd = 0;
14478 	int rv;
14479 
14480 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
14481 		device_xname(sc->sc_dev), __func__));
14482 
14483 	rv = sc->nvm.acquire(sc);
14484 	if (rv != 0)
14485 		return rv;
14486 
14487 	for (i = 0; i < wordcnt; i++) {
14488 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
14489 		CSR_WRITE(sc, WMREG_EERD, eerd);
14490 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
14491 		if (rv != 0) {
14492 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
14493 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
14494 			break;
14495 		}
14496 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
14497 	}
14498 
14499 	sc->nvm.release(sc);
14500 	return rv;
14501 }
14502 
14503 /* Flash */
14504 
14505 static int
14506 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
14507 {
14508 	uint32_t eecd;
14509 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
14510 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
14511 	uint32_t nvm_dword = 0;
14512 	uint8_t sig_byte = 0;
14513 	int rv;
14514 
14515 	switch (sc->sc_type) {
14516 	case WM_T_PCH_SPT:
14517 	case WM_T_PCH_CNP:
14518 	case WM_T_PCH_TGP:
14519 		bank1_offset = sc->sc_ich8_flash_bank_size * 2;
14520 		act_offset = ICH_NVM_SIG_WORD * 2;
14521 
14522 		/* Set bank to 0 in case flash read fails. */
14523 		*bank = 0;
14524 
14525 		/* Check bank 0 */
14526 		rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
14527 		if (rv != 0)
14528 			return rv;
14529 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
14530 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
14531 			*bank = 0;
14532 			return 0;
14533 		}
14534 
14535 		/* Check bank 1 */
14536 		rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
14537 		    &nvm_dword);
14538 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
14539 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
14540 			*bank = 1;
14541 			return 0;
14542 		}
14543 		aprint_error_dev(sc->sc_dev,
14544 		    "%s: no valid NVM bank present (%u)\n", __func__, *bank);
14545 		return -1;
14546 	case WM_T_ICH8:
14547 	case WM_T_ICH9:
14548 		eecd = CSR_READ(sc, WMREG_EECD);
14549 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
14550 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
14551 			return 0;
14552 		}
14553 		/* FALLTHROUGH */
14554 	default:
14555 		/* Default to 0 */
14556 		*bank = 0;
14557 
14558 		/* Check bank 0 */
14559 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
14560 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
14561 			*bank = 0;
14562 			return 0;
14563 		}
14564 
14565 		/* Check bank 1 */
14566 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
14567 		    &sig_byte);
14568 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
14569 			*bank = 1;
14570 			return 0;
14571 		}
14572 	}
14573 
14574 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
14575 		device_xname(sc->sc_dev)));
14576 	return -1;
14577 }
14578 
14579 /******************************************************************************
14580  * This function does initial flash setup so that a new read/write/erase cycle
14581  * can be started.
14582  *
14583  * sc - The pointer to the hw structure
14584  ****************************************************************************/
14585 static int32_t
14586 wm_ich8_cycle_init(struct wm_softc *sc)
14587 {
14588 	uint16_t hsfsts;
14589 	int32_t error = 1;
14590 	int32_t i     = 0;
14591 
14592 	if (sc->sc_type >= WM_T_PCH_SPT)
14593 		hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
14594 	else
14595 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
14596 
14597 	/* May be check the Flash Des Valid bit in Hw status */
14598 	if ((hsfsts & HSFSTS_FLDVAL) == 0)
14599 		return error;
14600 
14601 	/* Clear FCERR in Hw status by writing 1 */
14602 	/* Clear DAEL in Hw status by writing a 1 */
14603 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
14604 
14605 	if (sc->sc_type >= WM_T_PCH_SPT)
14606 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
14607 	else
14608 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
14609 
14610 	/*
14611 	 * Either we should have a hardware SPI cycle in progress bit to check
14612 	 * against, in order to start a new cycle or FDONE bit should be
14613 	 * changed in the hardware so that it is 1 after hardware reset, which
14614 	 * can then be used as an indication whether a cycle is in progress or
14615 	 * has been completed .. we should also have some software semaphore
14616 	 * mechanism to guard FDONE or the cycle in progress bit so that two
14617 	 * threads access to those bits can be sequentiallized or a way so that
14618 	 * 2 threads don't start the cycle at the same time
14619 	 */
14620 
14621 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
14622 		/*
14623 		 * There is no cycle running at present, so we can start a
14624 		 * cycle
14625 		 */
14626 
14627 		/* Begin by setting Flash Cycle Done. */
14628 		hsfsts |= HSFSTS_DONE;
14629 		if (sc->sc_type >= WM_T_PCH_SPT)
14630 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
14631 			    hsfsts & 0xffffUL);
14632 		else
14633 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
14634 		error = 0;
14635 	} else {
14636 		/*
14637 		 * Otherwise poll for sometime so the current cycle has a
14638 		 * chance to end before giving up.
14639 		 */
14640 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
14641 			if (sc->sc_type >= WM_T_PCH_SPT)
14642 				hsfsts = ICH8_FLASH_READ32(sc,
14643 				    ICH_FLASH_HSFSTS) & 0xffffUL;
14644 			else
14645 				hsfsts = ICH8_FLASH_READ16(sc,
14646 				    ICH_FLASH_HSFSTS);
14647 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
14648 				error = 0;
14649 				break;
14650 			}
14651 			delay(1);
14652 		}
14653 		if (error == 0) {
14654 			/*
14655 			 * Successful in waiting for previous cycle to timeout,
14656 			 * now set the Flash Cycle Done.
14657 			 */
14658 			hsfsts |= HSFSTS_DONE;
14659 			if (sc->sc_type >= WM_T_PCH_SPT)
14660 				ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
14661 				    hsfsts & 0xffffUL);
14662 			else
14663 				ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
14664 				    hsfsts);
14665 		}
14666 	}
14667 	return error;
14668 }
14669 
14670 /******************************************************************************
14671  * This function starts a flash cycle and waits for its completion
14672  *
14673  * sc - The pointer to the hw structure
14674  ****************************************************************************/
14675 static int32_t
14676 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
14677 {
14678 	uint16_t hsflctl;
14679 	uint16_t hsfsts;
14680 	int32_t error = 1;
14681 	uint32_t i = 0;
14682 
14683 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
14684 	if (sc->sc_type >= WM_T_PCH_SPT)
14685 		hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
14686 	else
14687 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
14688 	hsflctl |= HSFCTL_GO;
14689 	if (sc->sc_type >= WM_T_PCH_SPT)
14690 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
14691 		    (uint32_t)hsflctl << 16);
14692 	else
14693 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
14694 
14695 	/* Wait till FDONE bit is set to 1 */
14696 	do {
14697 		if (sc->sc_type >= WM_T_PCH_SPT)
14698 			hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
14699 			    & 0xffffUL;
14700 		else
14701 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
14702 		if (hsfsts & HSFSTS_DONE)
14703 			break;
14704 		delay(1);
14705 		i++;
14706 	} while (i < timeout);
14707 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
14708 		error = 0;
14709 
14710 	return error;
14711 }
14712 
14713 /******************************************************************************
14714  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
14715  *
14716  * sc - The pointer to the hw structure
14717  * index - The index of the byte or word to read.
14718  * size - Size of data to read, 1=byte 2=word, 4=dword
14719  * data - Pointer to the word to store the value read.
14720  *****************************************************************************/
14721 static int32_t
14722 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
14723     uint32_t size, uint32_t *data)
14724 {
14725 	uint16_t hsfsts;
14726 	uint16_t hsflctl;
14727 	uint32_t flash_linear_address;
14728 	uint32_t flash_data = 0;
14729 	int32_t error = 1;
14730 	int32_t count = 0;
14731 
14732 	if (size < 1  || size > 4 || data == 0x0 ||
14733 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
14734 		return error;
14735 
14736 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
14737 	    sc->sc_ich8_flash_base;
14738 
14739 	do {
14740 		delay(1);
14741 		/* Steps */
14742 		error = wm_ich8_cycle_init(sc);
14743 		if (error)
14744 			break;
14745 
14746 		if (sc->sc_type >= WM_T_PCH_SPT)
14747 			hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
14748 			    >> 16;
14749 		else
14750 			hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
14751 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
14752 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
14753 		    & HSFCTL_BCOUNT_MASK;
14754 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
14755 		if (sc->sc_type >= WM_T_PCH_SPT) {
14756 			/*
14757 			 * In SPT, This register is in Lan memory space, not
14758 			 * flash. Therefore, only 32 bit access is supported.
14759 			 */
14760 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
14761 			    (uint32_t)hsflctl << 16);
14762 		} else
14763 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
14764 
14765 		/*
14766 		 * Write the last 24 bits of index into Flash Linear address
14767 		 * field in Flash Address
14768 		 */
14769 		/* TODO: TBD maybe check the index against the size of flash */
14770 
14771 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
14772 
14773 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
14774 
14775 		/*
14776 		 * Check if FCERR is set to 1, if set to 1, clear it and try
14777 		 * the whole sequence a few more times, else read in (shift in)
14778 		 * the Flash Data0, the order is least significant byte first
14779 		 * msb to lsb
14780 		 */
14781 		if (error == 0) {
14782 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
14783 			if (size == 1)
14784 				*data = (uint8_t)(flash_data & 0x000000FF);
14785 			else if (size == 2)
14786 				*data = (uint16_t)(flash_data & 0x0000FFFF);
14787 			else if (size == 4)
14788 				*data = (uint32_t)flash_data;
14789 			break;
14790 		} else {
14791 			/*
14792 			 * If we've gotten here, then things are probably
14793 			 * completely hosed, but if the error condition is
14794 			 * detected, it won't hurt to give it another try...
14795 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
14796 			 */
14797 			if (sc->sc_type >= WM_T_PCH_SPT)
14798 				hsfsts = ICH8_FLASH_READ32(sc,
14799 				    ICH_FLASH_HSFSTS) & 0xffffUL;
14800 			else
14801 				hsfsts = ICH8_FLASH_READ16(sc,
14802 				    ICH_FLASH_HSFSTS);
14803 
14804 			if (hsfsts & HSFSTS_ERR) {
14805 				/* Repeat for some time before giving up. */
14806 				continue;
14807 			} else if ((hsfsts & HSFSTS_DONE) == 0)
14808 				break;
14809 		}
14810 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
14811 
14812 	return error;
14813 }
14814 
14815 /******************************************************************************
14816  * Reads a single byte from the NVM using the ICH8 flash access registers.
14817  *
14818  * sc - pointer to wm_hw structure
14819  * index - The index of the byte to read.
14820  * data - Pointer to a byte to store the value read.
14821  *****************************************************************************/
14822 static int32_t
14823 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
14824 {
14825 	int32_t status;
14826 	uint32_t word = 0;
14827 
14828 	status = wm_read_ich8_data(sc, index, 1, &word);
14829 	if (status == 0)
14830 		*data = (uint8_t)word;
14831 	else
14832 		*data = 0;
14833 
14834 	return status;
14835 }
14836 
14837 /******************************************************************************
14838  * Reads a word from the NVM using the ICH8 flash access registers.
14839  *
14840  * sc - pointer to wm_hw structure
14841  * index - The starting byte index of the word to read.
14842  * data - Pointer to a word to store the value read.
14843  *****************************************************************************/
14844 static int32_t
14845 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
14846 {
14847 	int32_t status;
14848 	uint32_t word = 0;
14849 
14850 	status = wm_read_ich8_data(sc, index, 2, &word);
14851 	if (status == 0)
14852 		*data = (uint16_t)word;
14853 	else
14854 		*data = 0;
14855 
14856 	return status;
14857 }
14858 
14859 /******************************************************************************
14860  * Reads a dword from the NVM using the ICH8 flash access registers.
14861  *
14862  * sc - pointer to wm_hw structure
14863  * index - The starting byte index of the word to read.
14864  * data - Pointer to a word to store the value read.
14865  *****************************************************************************/
14866 static int32_t
14867 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
14868 {
14869 	int32_t status;
14870 
14871 	status = wm_read_ich8_data(sc, index, 4, data);
14872 	return status;
14873 }
14874 
14875 /******************************************************************************
14876  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
14877  * register.
14878  *
14879  * sc - Struct containing variables accessed by shared code
14880  * offset - offset of word in the EEPROM to read
14881  * data - word read from the EEPROM
14882  * words - number of words to read
14883  *****************************************************************************/
14884 static int
14885 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
14886 {
14887 	int rv;
14888 	uint32_t flash_bank = 0;
14889 	uint32_t act_offset = 0;
14890 	uint32_t bank_offset = 0;
14891 	uint16_t word = 0;
14892 	uint16_t i = 0;
14893 
14894 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
14895 		device_xname(sc->sc_dev), __func__));
14896 
14897 	rv = sc->nvm.acquire(sc);
14898 	if (rv != 0)
14899 		return rv;
14900 
14901 	/*
14902 	 * We need to know which is the valid flash bank.  In the event
14903 	 * that we didn't allocate eeprom_shadow_ram, we may not be
14904 	 * managing flash_bank. So it cannot be trusted and needs
14905 	 * to be updated with each read.
14906 	 */
14907 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
14908 	if (rv) {
14909 		DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
14910 			device_xname(sc->sc_dev)));
14911 		flash_bank = 0;
14912 	}
14913 
14914 	/*
14915 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
14916 	 * size
14917 	 */
14918 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
14919 
14920 	for (i = 0; i < words; i++) {
14921 		/* The NVM part needs a byte offset, hence * 2 */
14922 		act_offset = bank_offset + ((offset + i) * 2);
14923 		rv = wm_read_ich8_word(sc, act_offset, &word);
14924 		if (rv) {
14925 			aprint_error_dev(sc->sc_dev,
14926 			    "%s: failed to read NVM\n", __func__);
14927 			break;
14928 		}
14929 		data[i] = word;
14930 	}
14931 
14932 	sc->nvm.release(sc);
14933 	return rv;
14934 }
14935 
14936 /******************************************************************************
14937  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
14938  * register.
14939  *
14940  * sc - Struct containing variables accessed by shared code
14941  * offset - offset of word in the EEPROM to read
14942  * data - word read from the EEPROM
14943  * words - number of words to read
14944  *****************************************************************************/
14945 static int
14946 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
14947 {
14948 	int	 rv;
14949 	uint32_t flash_bank = 0;
14950 	uint32_t act_offset = 0;
14951 	uint32_t bank_offset = 0;
14952 	uint32_t dword = 0;
14953 	uint16_t i = 0;
14954 
14955 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
14956 		device_xname(sc->sc_dev), __func__));
14957 
14958 	rv = sc->nvm.acquire(sc);
14959 	if (rv != 0)
14960 		return rv;
14961 
14962 	/*
14963 	 * We need to know which is the valid flash bank.  In the event
14964 	 * that we didn't allocate eeprom_shadow_ram, we may not be
14965 	 * managing flash_bank. So it cannot be trusted and needs
14966 	 * to be updated with each read.
14967 	 */
14968 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
14969 	if (rv) {
14970 		DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
14971 			device_xname(sc->sc_dev)));
14972 		flash_bank = 0;
14973 	}
14974 
14975 	/*
14976 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
14977 	 * size
14978 	 */
14979 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
14980 
14981 	for (i = 0; i < words; i++) {
14982 		/* The NVM part needs a byte offset, hence * 2 */
14983 		act_offset = bank_offset + ((offset + i) * 2);
14984 		/* but we must read dword aligned, so mask ... */
14985 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
14986 		if (rv) {
14987 			aprint_error_dev(sc->sc_dev,
14988 			    "%s: failed to read NVM\n", __func__);
14989 			break;
14990 		}
14991 		/* ... and pick out low or high word */
14992 		if ((act_offset & 0x2) == 0)
14993 			data[i] = (uint16_t)(dword & 0xFFFF);
14994 		else
14995 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
14996 	}
14997 
14998 	sc->nvm.release(sc);
14999 	return rv;
15000 }
15001 
15002 /* iNVM */
15003 
15004 static int
15005 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
15006 {
15007 	int32_t	 rv = 0;
15008 	uint32_t invm_dword;
15009 	uint16_t i;
15010 	uint8_t record_type, word_address;
15011 
15012 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
15013 		device_xname(sc->sc_dev), __func__));
15014 
15015 	for (i = 0; i < INVM_SIZE; i++) {
15016 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
15017 		/* Get record type */
15018 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
15019 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
15020 			break;
15021 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
15022 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
15023 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
15024 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
15025 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
15026 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
15027 			if (word_address == address) {
15028 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
15029 				rv = 0;
15030 				break;
15031 			}
15032 		}
15033 	}
15034 
15035 	return rv;
15036 }
15037 
15038 static int
15039 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
15040 {
15041 	int i, rv;
15042 
15043 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
15044 		device_xname(sc->sc_dev), __func__));
15045 
15046 	rv = sc->nvm.acquire(sc);
15047 	if (rv != 0)
15048 		return rv;
15049 
15050 	for (i = 0; i < words; i++) {
15051 		switch (offset + i) {
15052 		case NVM_OFF_MACADDR:
15053 		case NVM_OFF_MACADDR1:
15054 		case NVM_OFF_MACADDR2:
15055 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
15056 			if (rv != 0) {
15057 				data[i] = 0xffff;
15058 				rv = -1;
15059 			}
15060 			break;
15061 		case NVM_OFF_CFG1: /* == INVM_AUTOLOAD */
15062 			rv = wm_nvm_read_word_invm(sc, offset, data);
15063 			if (rv != 0) {
15064 				*data = INVM_DEFAULT_AL;
15065 				rv = 0;
15066 			}
15067 			break;
15068 		case NVM_OFF_CFG2:
15069 			rv = wm_nvm_read_word_invm(sc, offset, data);
15070 			if (rv != 0) {
15071 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
15072 				rv = 0;
15073 			}
15074 			break;
15075 		case NVM_OFF_CFG4:
15076 			rv = wm_nvm_read_word_invm(sc, offset, data);
15077 			if (rv != 0) {
15078 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
15079 				rv = 0;
15080 			}
15081 			break;
15082 		case NVM_OFF_LED_1_CFG:
15083 			rv = wm_nvm_read_word_invm(sc, offset, data);
15084 			if (rv != 0) {
15085 				*data = NVM_LED_1_CFG_DEFAULT_I211;
15086 				rv = 0;
15087 			}
15088 			break;
15089 		case NVM_OFF_LED_0_2_CFG:
15090 			rv = wm_nvm_read_word_invm(sc, offset, data);
15091 			if (rv != 0) {
15092 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
15093 				rv = 0;
15094 			}
15095 			break;
15096 		case NVM_OFF_ID_LED_SETTINGS:
15097 			rv = wm_nvm_read_word_invm(sc, offset, data);
15098 			if (rv != 0) {
15099 				*data = ID_LED_RESERVED_FFFF;
15100 				rv = 0;
15101 			}
15102 			break;
15103 		default:
15104 			DPRINTF(sc, WM_DEBUG_NVM,
15105 			    ("NVM word 0x%02x is not mapped.\n", offset));
15106 			*data = NVM_RESERVED_WORD;
15107 			break;
15108 		}
15109 	}
15110 
15111 	sc->nvm.release(sc);
15112 	return rv;
15113 }
15114 
15115 /* Lock, detecting NVM type, validate checksum, version and read */
15116 
15117 static int
15118 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
15119 {
15120 	uint32_t eecd = 0;
15121 
15122 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
15123 	    || sc->sc_type == WM_T_82583) {
15124 		eecd = CSR_READ(sc, WMREG_EECD);
15125 
15126 		/* Isolate bits 15 & 16 */
15127 		eecd = ((eecd >> 15) & 0x03);
15128 
15129 		/* If both bits are set, device is Flash type */
15130 		if (eecd == 0x03)
15131 			return 0;
15132 	}
15133 	return 1;
15134 }
15135 
15136 static int
15137 wm_nvm_flash_presence_i210(struct wm_softc *sc)
15138 {
15139 	uint32_t eec;
15140 
15141 	eec = CSR_READ(sc, WMREG_EEC);
15142 	if ((eec & EEC_FLASH_DETECTED) != 0)
15143 		return 1;
15144 
15145 	return 0;
15146 }
15147 
15148 /*
15149  * wm_nvm_validate_checksum
15150  *
15151  * The checksum is defined as the sum of the first 64 (16 bit) words.
15152  */
15153 static int
15154 wm_nvm_validate_checksum(struct wm_softc *sc)
15155 {
15156 	uint16_t checksum;
15157 	uint16_t eeprom_data;
15158 #ifdef WM_DEBUG
15159 	uint16_t csum_wordaddr, valid_checksum;
15160 #endif
15161 	int i;
15162 
15163 	checksum = 0;
15164 
15165 	/* Don't check for I211 */
15166 	if (sc->sc_type == WM_T_I211)
15167 		return 0;
15168 
15169 #ifdef WM_DEBUG
15170 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT) ||
15171 	    (sc->sc_type == WM_T_PCH_CNP) || (sc->sc_type == WM_T_PCH_TGP)) {
15172 		csum_wordaddr = NVM_OFF_COMPAT;
15173 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
15174 	} else {
15175 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
15176 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
15177 	}
15178 
15179 	/* Dump EEPROM image for debug */
15180 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
15181 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
15182 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
15183 		/* XXX PCH_SPT? */
15184 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
15185 		if ((eeprom_data & valid_checksum) == 0)
15186 			DPRINTF(sc, WM_DEBUG_NVM,
15187 			    ("%s: NVM need to be updated (%04x != %04x)\n",
15188 				device_xname(sc->sc_dev), eeprom_data,
15189 				valid_checksum));
15190 	}
15191 
15192 	if ((sc->sc_debug & WM_DEBUG_NVM) != 0) {
15193 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
15194 		for (i = 0; i < NVM_SIZE; i++) {
15195 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
15196 				printf("XXXX ");
15197 			else
15198 				printf("%04hx ", eeprom_data);
15199 			if (i % 8 == 7)
15200 				printf("\n");
15201 		}
15202 	}
15203 
15204 #endif /* WM_DEBUG */
15205 
15206 	for (i = 0; i < NVM_SIZE; i++) {
15207 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
15208 			return -1;
15209 		checksum += eeprom_data;
15210 	}
15211 
15212 	if (checksum != (uint16_t) NVM_CHECKSUM) {
15213 #ifdef WM_DEBUG
15214 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
15215 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
15216 #endif
15217 	}
15218 
15219 	return 0;
15220 }
15221 
15222 static void
15223 wm_nvm_version_invm(struct wm_softc *sc)
15224 {
15225 	uint32_t dword;
15226 
15227 	/*
15228 	 * Linux's code to decode version is very strange, so we don't
15229 	 * obey that algorithm and just use word 61 as the document.
15230 	 * Perhaps it's not perfect though...
15231 	 *
15232 	 * Example:
15233 	 *
15234 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
15235 	 */
15236 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
15237 	dword = __SHIFTOUT(dword, INVM_VER_1);
15238 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
15239 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
15240 }
15241 
15242 static void
15243 wm_nvm_version(struct wm_softc *sc)
15244 {
15245 	uint16_t major, minor, build, patch;
15246 	uint16_t uid0, uid1;
15247 	uint16_t nvm_data;
15248 	uint16_t off;
15249 	bool check_version = false;
15250 	bool check_optionrom = false;
15251 	bool have_build = false;
15252 	bool have_uid = true;
15253 
15254 	/*
15255 	 * Version format:
15256 	 *
15257 	 * XYYZ
15258 	 * X0YZ
15259 	 * X0YY
15260 	 *
15261 	 * Example:
15262 	 *
15263 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
15264 	 *	82571	0x50a6	5.10.6?
15265 	 *	82572	0x506a	5.6.10?
15266 	 *	82572EI	0x5069	5.6.9?
15267 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
15268 	 *		0x2013	2.1.3?
15269 	 *	82583	0x10a0	1.10.0? (document says it's default value)
15270 	 * ICH8+82567	0x0040	0.4.0?
15271 	 * ICH9+82566	0x1040	1.4.0?
15272 	 *ICH10+82567	0x0043	0.4.3?
15273 	 *  PCH+82577	0x00c1	0.12.1?
15274 	 * PCH2+82579	0x00d3	0.13.3?
15275 	 *		0x00d4	0.13.4?
15276 	 *  LPT+I218	0x0023	0.2.3?
15277 	 *  SPT+I219	0x0084	0.8.4?
15278 	 *  CNP+I219	0x0054	0.5.4?
15279 	 */
15280 
15281 	/*
15282 	 * XXX
15283 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
15284 	 * I've never seen real 82574 hardware with such small SPI ROM.
15285 	 */
15286 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
15287 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
15288 		have_uid = false;
15289 
15290 	switch (sc->sc_type) {
15291 	case WM_T_82571:
15292 	case WM_T_82572:
15293 	case WM_T_82574:
15294 	case WM_T_82583:
15295 		check_version = true;
15296 		check_optionrom = true;
15297 		have_build = true;
15298 		break;
15299 	case WM_T_ICH8:
15300 	case WM_T_ICH9:
15301 	case WM_T_ICH10:
15302 	case WM_T_PCH:
15303 	case WM_T_PCH2:
15304 	case WM_T_PCH_LPT:
15305 	case WM_T_PCH_SPT:
15306 	case WM_T_PCH_CNP:
15307 	case WM_T_PCH_TGP:
15308 		check_version = true;
15309 		have_build = true;
15310 		have_uid = false;
15311 		break;
15312 	case WM_T_82575:
15313 	case WM_T_82576:
15314 	case WM_T_82580:
15315 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
15316 			check_version = true;
15317 		break;
15318 	case WM_T_I211:
15319 		wm_nvm_version_invm(sc);
15320 		have_uid = false;
15321 		goto printver;
15322 	case WM_T_I210:
15323 		if (!wm_nvm_flash_presence_i210(sc)) {
15324 			wm_nvm_version_invm(sc);
15325 			have_uid = false;
15326 			goto printver;
15327 		}
15328 		/* FALLTHROUGH */
15329 	case WM_T_I350:
15330 	case WM_T_I354:
15331 		check_version = true;
15332 		check_optionrom = true;
15333 		break;
15334 	default:
15335 		return;
15336 	}
15337 	if (check_version
15338 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
15339 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
15340 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
15341 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
15342 			build = nvm_data & NVM_BUILD_MASK;
15343 			have_build = true;
15344 		} else
15345 			minor = nvm_data & 0x00ff;
15346 
15347 		/* Decimal */
15348 		minor = (minor / 16) * 10 + (minor % 16);
15349 		sc->sc_nvm_ver_major = major;
15350 		sc->sc_nvm_ver_minor = minor;
15351 
15352 printver:
15353 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
15354 		    sc->sc_nvm_ver_minor);
15355 		if (have_build) {
15356 			sc->sc_nvm_ver_build = build;
15357 			aprint_verbose(".%d", build);
15358 		}
15359 	}
15360 
15361 	/* Assume the Option ROM area is at avove NVM_SIZE */
15362 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
15363 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
15364 		/* Option ROM Version */
15365 		if ((off != 0x0000) && (off != 0xffff)) {
15366 			int rv;
15367 			uint16_t oid0, oid1;
15368 
15369 			off += NVM_COMBO_VER_OFF;
15370 			rv = wm_nvm_read(sc, off + 1, 1, &oid1);
15371 			rv |= wm_nvm_read(sc, off, 1, &oid0);
15372 			if ((rv == 0) && (oid0 != 0) && (oid0 != 0xffff)
15373 			    && (oid1 != 0) && (oid1 != 0xffff)) {
15374 				/* 16bits */
15375 				major = oid0 >> 8;
15376 				build = (oid0 << 8) | (oid1 >> 8);
15377 				patch = oid1 & 0x00ff;
15378 				aprint_verbose(", option ROM Version %d.%d.%d",
15379 				    major, build, patch);
15380 			}
15381 		}
15382 	}
15383 
15384 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
15385 		aprint_verbose(", Image Unique ID %08x",
15386 		    ((uint32_t)uid1 << 16) | uid0);
15387 }
15388 
15389 /*
15390  * wm_nvm_read:
15391  *
15392  *	Read data from the serial EEPROM.
15393  */
15394 static int
15395 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
15396 {
15397 	int rv;
15398 
15399 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
15400 		device_xname(sc->sc_dev), __func__));
15401 
15402 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
15403 		return -1;
15404 
15405 	rv = sc->nvm.read(sc, word, wordcnt, data);
15406 
15407 	return rv;
15408 }
15409 
15410 /*
15411  * Hardware semaphores.
15412  * Very complexed...
15413  */
15414 
15415 static int
15416 wm_get_null(struct wm_softc *sc)
15417 {
15418 
15419 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15420 		device_xname(sc->sc_dev), __func__));
15421 	return 0;
15422 }
15423 
15424 static void
15425 wm_put_null(struct wm_softc *sc)
15426 {
15427 
15428 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15429 		device_xname(sc->sc_dev), __func__));
15430 	return;
15431 }
15432 
15433 static int
15434 wm_get_eecd(struct wm_softc *sc)
15435 {
15436 	uint32_t reg;
15437 	int x;
15438 
15439 	DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
15440 		device_xname(sc->sc_dev), __func__));
15441 
15442 	reg = CSR_READ(sc, WMREG_EECD);
15443 
15444 	/* Request EEPROM access. */
15445 	reg |= EECD_EE_REQ;
15446 	CSR_WRITE(sc, WMREG_EECD, reg);
15447 
15448 	/* ..and wait for it to be granted. */
15449 	for (x = 0; x < 1000; x++) {
15450 		reg = CSR_READ(sc, WMREG_EECD);
15451 		if (reg & EECD_EE_GNT)
15452 			break;
15453 		delay(5);
15454 	}
15455 	if ((reg & EECD_EE_GNT) == 0) {
15456 		aprint_error_dev(sc->sc_dev,
15457 		    "could not acquire EEPROM GNT\n");
15458 		reg &= ~EECD_EE_REQ;
15459 		CSR_WRITE(sc, WMREG_EECD, reg);
15460 		return -1;
15461 	}
15462 
15463 	return 0;
15464 }
15465 
15466 static void
15467 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
15468 {
15469 
15470 	*eecd |= EECD_SK;
15471 	CSR_WRITE(sc, WMREG_EECD, *eecd);
15472 	CSR_WRITE_FLUSH(sc);
15473 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
15474 		delay(1);
15475 	else
15476 		delay(50);
15477 }
15478 
15479 static void
15480 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
15481 {
15482 
15483 	*eecd &= ~EECD_SK;
15484 	CSR_WRITE(sc, WMREG_EECD, *eecd);
15485 	CSR_WRITE_FLUSH(sc);
15486 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
15487 		delay(1);
15488 	else
15489 		delay(50);
15490 }
15491 
15492 static void
15493 wm_put_eecd(struct wm_softc *sc)
15494 {
15495 	uint32_t reg;
15496 
15497 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15498 		device_xname(sc->sc_dev), __func__));
15499 
15500 	/* Stop nvm */
15501 	reg = CSR_READ(sc, WMREG_EECD);
15502 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
15503 		/* Pull CS high */
15504 		reg |= EECD_CS;
15505 		wm_nvm_eec_clock_lower(sc, &reg);
15506 	} else {
15507 		/* CS on Microwire is active-high */
15508 		reg &= ~(EECD_CS | EECD_DI);
15509 		CSR_WRITE(sc, WMREG_EECD, reg);
15510 		wm_nvm_eec_clock_raise(sc, &reg);
15511 		wm_nvm_eec_clock_lower(sc, &reg);
15512 	}
15513 
15514 	reg = CSR_READ(sc, WMREG_EECD);
15515 	reg &= ~EECD_EE_REQ;
15516 	CSR_WRITE(sc, WMREG_EECD, reg);
15517 
15518 	return;
15519 }
15520 
15521 /*
15522  * Get hardware semaphore.
15523  * Same as e1000_get_hw_semaphore_generic()
15524  */
15525 static int
15526 wm_get_swsm_semaphore(struct wm_softc *sc)
15527 {
15528 	int32_t timeout;
15529 	uint32_t swsm;
15530 
15531 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15532 		device_xname(sc->sc_dev), __func__));
15533 	KASSERT(sc->sc_nvm_wordsize > 0);
15534 
15535 retry:
15536 	/* Get the SW semaphore. */
15537 	timeout = sc->sc_nvm_wordsize + 1;
15538 	while (timeout) {
15539 		swsm = CSR_READ(sc, WMREG_SWSM);
15540 
15541 		if ((swsm & SWSM_SMBI) == 0)
15542 			break;
15543 
15544 		delay(50);
15545 		timeout--;
15546 	}
15547 
15548 	if (timeout == 0) {
15549 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
15550 			/*
15551 			 * In rare circumstances, the SW semaphore may already
15552 			 * be held unintentionally. Clear the semaphore once
15553 			 * before giving up.
15554 			 */
15555 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
15556 			wm_put_swsm_semaphore(sc);
15557 			goto retry;
15558 		}
15559 		aprint_error_dev(sc->sc_dev, "could not acquire SWSM SMBI\n");
15560 		return -1;
15561 	}
15562 
15563 	/* Get the FW semaphore. */
15564 	timeout = sc->sc_nvm_wordsize + 1;
15565 	while (timeout) {
15566 		swsm = CSR_READ(sc, WMREG_SWSM);
15567 		swsm |= SWSM_SWESMBI;
15568 		CSR_WRITE(sc, WMREG_SWSM, swsm);
15569 		/* If we managed to set the bit we got the semaphore. */
15570 		swsm = CSR_READ(sc, WMREG_SWSM);
15571 		if (swsm & SWSM_SWESMBI)
15572 			break;
15573 
15574 		delay(50);
15575 		timeout--;
15576 	}
15577 
15578 	if (timeout == 0) {
15579 		aprint_error_dev(sc->sc_dev,
15580 		    "could not acquire SWSM SWESMBI\n");
15581 		/* Release semaphores */
15582 		wm_put_swsm_semaphore(sc);
15583 		return -1;
15584 	}
15585 	return 0;
15586 }
15587 
15588 /*
15589  * Put hardware semaphore.
15590  * Same as e1000_put_hw_semaphore_generic()
15591  */
15592 static void
15593 wm_put_swsm_semaphore(struct wm_softc *sc)
15594 {
15595 	uint32_t swsm;
15596 
15597 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15598 		device_xname(sc->sc_dev), __func__));
15599 
15600 	swsm = CSR_READ(sc, WMREG_SWSM);
15601 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
15602 	CSR_WRITE(sc, WMREG_SWSM, swsm);
15603 }
15604 
15605 /*
15606  * Get SW/FW semaphore.
15607  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
15608  */
15609 static int
15610 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
15611 {
15612 	uint32_t swfw_sync;
15613 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
15614 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
15615 	int timeout;
15616 
15617 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15618 		device_xname(sc->sc_dev), __func__));
15619 
15620 	if (sc->sc_type == WM_T_80003)
15621 		timeout = 50;
15622 	else
15623 		timeout = 200;
15624 
15625 	while (timeout) {
15626 		if (wm_get_swsm_semaphore(sc)) {
15627 			aprint_error_dev(sc->sc_dev,
15628 			    "%s: failed to get semaphore\n",
15629 			    __func__);
15630 			return -1;
15631 		}
15632 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
15633 		if ((swfw_sync & (swmask | fwmask)) == 0) {
15634 			swfw_sync |= swmask;
15635 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
15636 			wm_put_swsm_semaphore(sc);
15637 			return 0;
15638 		}
15639 		wm_put_swsm_semaphore(sc);
15640 		delay(5000);
15641 		timeout--;
15642 	}
15643 	device_printf(sc->sc_dev,
15644 	    "failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
15645 	    mask, swfw_sync);
15646 	return -1;
15647 }
15648 
15649 static void
15650 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
15651 {
15652 	uint32_t swfw_sync;
15653 
15654 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15655 		device_xname(sc->sc_dev), __func__));
15656 
15657 	while (wm_get_swsm_semaphore(sc) != 0)
15658 		continue;
15659 
15660 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
15661 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
15662 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
15663 
15664 	wm_put_swsm_semaphore(sc);
15665 }
15666 
15667 static int
15668 wm_get_nvm_80003(struct wm_softc *sc)
15669 {
15670 	int rv;
15671 
15672 	DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
15673 		device_xname(sc->sc_dev), __func__));
15674 
15675 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
15676 		aprint_error_dev(sc->sc_dev,
15677 		    "%s: failed to get semaphore(SWFW)\n", __func__);
15678 		return rv;
15679 	}
15680 
15681 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
15682 	    && (rv = wm_get_eecd(sc)) != 0) {
15683 		aprint_error_dev(sc->sc_dev,
15684 		    "%s: failed to get semaphore(EECD)\n", __func__);
15685 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
15686 		return rv;
15687 	}
15688 
15689 	return 0;
15690 }
15691 
15692 static void
15693 wm_put_nvm_80003(struct wm_softc *sc)
15694 {
15695 
15696 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15697 		device_xname(sc->sc_dev), __func__));
15698 
15699 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
15700 		wm_put_eecd(sc);
15701 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
15702 }
15703 
15704 static int
15705 wm_get_nvm_82571(struct wm_softc *sc)
15706 {
15707 	int rv;
15708 
15709 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15710 		device_xname(sc->sc_dev), __func__));
15711 
15712 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
15713 		return rv;
15714 
15715 	switch (sc->sc_type) {
15716 	case WM_T_82573:
15717 		break;
15718 	default:
15719 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
15720 			rv = wm_get_eecd(sc);
15721 		break;
15722 	}
15723 
15724 	if (rv != 0) {
15725 		aprint_error_dev(sc->sc_dev,
15726 		    "%s: failed to get semaphore\n",
15727 		    __func__);
15728 		wm_put_swsm_semaphore(sc);
15729 	}
15730 
15731 	return rv;
15732 }
15733 
15734 static void
15735 wm_put_nvm_82571(struct wm_softc *sc)
15736 {
15737 
15738 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15739 		device_xname(sc->sc_dev), __func__));
15740 
15741 	switch (sc->sc_type) {
15742 	case WM_T_82573:
15743 		break;
15744 	default:
15745 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
15746 			wm_put_eecd(sc);
15747 		break;
15748 	}
15749 
15750 	wm_put_swsm_semaphore(sc);
15751 }
15752 
15753 static int
15754 wm_get_phy_82575(struct wm_softc *sc)
15755 {
15756 
15757 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15758 		device_xname(sc->sc_dev), __func__));
15759 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
15760 }
15761 
15762 static void
15763 wm_put_phy_82575(struct wm_softc *sc)
15764 {
15765 
15766 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15767 		device_xname(sc->sc_dev), __func__));
15768 	wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
15769 }
15770 
15771 static int
15772 wm_get_swfwhw_semaphore(struct wm_softc *sc)
15773 {
15774 	uint32_t ext_ctrl;
15775 	int timeout = 200;
15776 
15777 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15778 		device_xname(sc->sc_dev), __func__));
15779 
15780 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
15781 	for (timeout = 0; timeout < 200; timeout++) {
15782 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
15783 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
15784 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
15785 
15786 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
15787 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
15788 			return 0;
15789 		delay(5000);
15790 	}
15791 	device_printf(sc->sc_dev,
15792 	    "failed to get swfwhw semaphore ext_ctrl 0x%x\n", ext_ctrl);
15793 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
15794 	return -1;
15795 }
15796 
15797 static void
15798 wm_put_swfwhw_semaphore(struct wm_softc *sc)
15799 {
15800 	uint32_t ext_ctrl;
15801 
15802 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15803 		device_xname(sc->sc_dev), __func__));
15804 
15805 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
15806 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
15807 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
15808 
15809 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
15810 }
15811 
15812 static int
15813 wm_get_swflag_ich8lan(struct wm_softc *sc)
15814 {
15815 	uint32_t ext_ctrl;
15816 	int timeout;
15817 
15818 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15819 		device_xname(sc->sc_dev), __func__));
15820 	mutex_enter(sc->sc_ich_phymtx);
15821 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
15822 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
15823 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
15824 			break;
15825 		delay(1000);
15826 	}
15827 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
15828 		device_printf(sc->sc_dev,
15829 		    "SW has already locked the resource\n");
15830 		goto out;
15831 	}
15832 
15833 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
15834 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
15835 	for (timeout = 0; timeout < 1000; timeout++) {
15836 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
15837 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
15838 			break;
15839 		delay(1000);
15840 	}
15841 	if (timeout >= 1000) {
15842 		device_printf(sc->sc_dev, "failed to acquire semaphore\n");
15843 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
15844 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
15845 		goto out;
15846 	}
15847 	return 0;
15848 
15849 out:
15850 	mutex_exit(sc->sc_ich_phymtx);
15851 	return -1;
15852 }
15853 
15854 static void
15855 wm_put_swflag_ich8lan(struct wm_softc *sc)
15856 {
15857 	uint32_t ext_ctrl;
15858 
15859 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15860 		device_xname(sc->sc_dev), __func__));
15861 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
15862 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
15863 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
15864 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
15865 	} else
15866 		device_printf(sc->sc_dev, "Semaphore unexpectedly released\n");
15867 
15868 	mutex_exit(sc->sc_ich_phymtx);
15869 }
15870 
15871 static int
15872 wm_get_nvm_ich8lan(struct wm_softc *sc)
15873 {
15874 
15875 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15876 		device_xname(sc->sc_dev), __func__));
15877 	mutex_enter(sc->sc_ich_nvmmtx);
15878 
15879 	return 0;
15880 }
15881 
15882 static void
15883 wm_put_nvm_ich8lan(struct wm_softc *sc)
15884 {
15885 
15886 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15887 		device_xname(sc->sc_dev), __func__));
15888 	mutex_exit(sc->sc_ich_nvmmtx);
15889 }
15890 
15891 static int
15892 wm_get_hw_semaphore_82573(struct wm_softc *sc)
15893 {
15894 	int i = 0;
15895 	uint32_t reg;
15896 
15897 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15898 		device_xname(sc->sc_dev), __func__));
15899 
15900 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
15901 	do {
15902 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
15903 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
15904 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
15905 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
15906 			break;
15907 		delay(2*1000);
15908 		i++;
15909 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
15910 
15911 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
15912 		wm_put_hw_semaphore_82573(sc);
15913 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
15914 		    device_xname(sc->sc_dev));
15915 		return -1;
15916 	}
15917 
15918 	return 0;
15919 }
15920 
15921 static void
15922 wm_put_hw_semaphore_82573(struct wm_softc *sc)
15923 {
15924 	uint32_t reg;
15925 
15926 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15927 		device_xname(sc->sc_dev), __func__));
15928 
15929 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
15930 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
15931 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
15932 }
15933 
15934 /*
15935  * Management mode and power management related subroutines.
15936  * BMC, AMT, suspend/resume and EEE.
15937  */
15938 
15939 #ifdef WM_WOL
15940 static int
15941 wm_check_mng_mode(struct wm_softc *sc)
15942 {
15943 	int rv;
15944 
15945 	switch (sc->sc_type) {
15946 	case WM_T_ICH8:
15947 	case WM_T_ICH9:
15948 	case WM_T_ICH10:
15949 	case WM_T_PCH:
15950 	case WM_T_PCH2:
15951 	case WM_T_PCH_LPT:
15952 	case WM_T_PCH_SPT:
15953 	case WM_T_PCH_CNP:
15954 	case WM_T_PCH_TGP:
15955 		rv = wm_check_mng_mode_ich8lan(sc);
15956 		break;
15957 	case WM_T_82574:
15958 	case WM_T_82583:
15959 		rv = wm_check_mng_mode_82574(sc);
15960 		break;
15961 	case WM_T_82571:
15962 	case WM_T_82572:
15963 	case WM_T_82573:
15964 	case WM_T_80003:
15965 		rv = wm_check_mng_mode_generic(sc);
15966 		break;
15967 	default:
15968 		/* Noting to do */
15969 		rv = 0;
15970 		break;
15971 	}
15972 
15973 	return rv;
15974 }
15975 
15976 static int
15977 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
15978 {
15979 	uint32_t fwsm;
15980 
15981 	fwsm = CSR_READ(sc, WMREG_FWSM);
15982 
15983 	if (((fwsm & FWSM_FW_VALID) != 0)
15984 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
15985 		return 1;
15986 
15987 	return 0;
15988 }
15989 
15990 static int
15991 wm_check_mng_mode_82574(struct wm_softc *sc)
15992 {
15993 	uint16_t data;
15994 
15995 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
15996 
15997 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
15998 		return 1;
15999 
16000 	return 0;
16001 }
16002 
16003 static int
16004 wm_check_mng_mode_generic(struct wm_softc *sc)
16005 {
16006 	uint32_t fwsm;
16007 
16008 	fwsm = CSR_READ(sc, WMREG_FWSM);
16009 
16010 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
16011 		return 1;
16012 
16013 	return 0;
16014 }
16015 #endif /* WM_WOL */
16016 
16017 static int
16018 wm_enable_mng_pass_thru(struct wm_softc *sc)
16019 {
16020 	uint32_t manc, fwsm, factps;
16021 
16022 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
16023 		return 0;
16024 
16025 	manc = CSR_READ(sc, WMREG_MANC);
16026 
16027 	DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
16028 		device_xname(sc->sc_dev), manc));
16029 	if ((manc & MANC_RECV_TCO_EN) == 0)
16030 		return 0;
16031 
16032 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
16033 		fwsm = CSR_READ(sc, WMREG_FWSM);
16034 		factps = CSR_READ(sc, WMREG_FACTPS);
16035 		if (((factps & FACTPS_MNGCG) == 0)
16036 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
16037 			return 1;
16038 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
16039 		uint16_t data;
16040 
16041 		factps = CSR_READ(sc, WMREG_FACTPS);
16042 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
16043 		DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
16044 			device_xname(sc->sc_dev), factps, data));
16045 		if (((factps & FACTPS_MNGCG) == 0)
16046 		    && ((data & NVM_CFG2_MNGM_MASK)
16047 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
16048 			return 1;
16049 	} else if (((manc & MANC_SMBUS_EN) != 0)
16050 	    && ((manc & MANC_ASF_EN) == 0))
16051 		return 1;
16052 
16053 	return 0;
16054 }
16055 
16056 static bool
16057 wm_phy_resetisblocked(struct wm_softc *sc)
16058 {
16059 	bool blocked = false;
16060 	uint32_t reg;
16061 	int i = 0;
16062 
16063 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16064 		device_xname(sc->sc_dev), __func__));
16065 
16066 	switch (sc->sc_type) {
16067 	case WM_T_ICH8:
16068 	case WM_T_ICH9:
16069 	case WM_T_ICH10:
16070 	case WM_T_PCH:
16071 	case WM_T_PCH2:
16072 	case WM_T_PCH_LPT:
16073 	case WM_T_PCH_SPT:
16074 	case WM_T_PCH_CNP:
16075 	case WM_T_PCH_TGP:
16076 		do {
16077 			reg = CSR_READ(sc, WMREG_FWSM);
16078 			if ((reg & FWSM_RSPCIPHY) == 0) {
16079 				blocked = true;
16080 				delay(10*1000);
16081 				continue;
16082 			}
16083 			blocked = false;
16084 		} while (blocked && (i++ < 30));
16085 		return blocked;
16086 		break;
16087 	case WM_T_82571:
16088 	case WM_T_82572:
16089 	case WM_T_82573:
16090 	case WM_T_82574:
16091 	case WM_T_82583:
16092 	case WM_T_80003:
16093 		reg = CSR_READ(sc, WMREG_MANC);
16094 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
16095 			return true;
16096 		else
16097 			return false;
16098 		break;
16099 	default:
16100 		/* No problem */
16101 		break;
16102 	}
16103 
16104 	return false;
16105 }
16106 
16107 static void
16108 wm_get_hw_control(struct wm_softc *sc)
16109 {
16110 	uint32_t reg;
16111 
16112 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
16113 		device_xname(sc->sc_dev), __func__));
16114 
16115 	if (sc->sc_type == WM_T_82573) {
16116 		reg = CSR_READ(sc, WMREG_SWSM);
16117 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
16118 	} else if (sc->sc_type >= WM_T_82571) {
16119 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
16120 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
16121 	}
16122 }
16123 
16124 static void
16125 wm_release_hw_control(struct wm_softc *sc)
16126 {
16127 	uint32_t reg;
16128 
16129 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
16130 		device_xname(sc->sc_dev), __func__));
16131 
16132 	if (sc->sc_type == WM_T_82573) {
16133 		reg = CSR_READ(sc, WMREG_SWSM);
16134 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
16135 	} else if (sc->sc_type >= WM_T_82571) {
16136 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
16137 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
16138 	}
16139 }
16140 
16141 static void
16142 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
16143 {
16144 	uint32_t reg;
16145 
16146 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16147 		device_xname(sc->sc_dev), __func__));
16148 
16149 	if (sc->sc_type < WM_T_PCH2)
16150 		return;
16151 
16152 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
16153 
16154 	if (gate)
16155 		reg |= EXTCNFCTR_GATE_PHY_CFG;
16156 	else
16157 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
16158 
16159 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
16160 }
16161 
16162 static int
16163 wm_init_phy_workarounds_pchlan(struct wm_softc *sc)
16164 {
16165 	uint32_t fwsm, reg;
16166 	int rv;
16167 
16168 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16169 		device_xname(sc->sc_dev), __func__));
16170 
16171 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
16172 	wm_gate_hw_phy_config_ich8lan(sc, true);
16173 
16174 	/* Disable ULP */
16175 	wm_ulp_disable(sc);
16176 
16177 	/* Acquire PHY semaphore */
16178 	rv = sc->phy.acquire(sc);
16179 	if (rv != 0) {
16180 		DPRINTF(sc, WM_DEBUG_INIT,
16181 		    ("%s: %s: failed\n", device_xname(sc->sc_dev), __func__));
16182 		return rv;
16183 	}
16184 
16185 	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
16186 	 * inaccessible and resetting the PHY is not blocked, toggle the
16187 	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
16188 	 */
16189 	fwsm = CSR_READ(sc, WMREG_FWSM);
16190 	switch (sc->sc_type) {
16191 	case WM_T_PCH_LPT:
16192 	case WM_T_PCH_SPT:
16193 	case WM_T_PCH_CNP:
16194 	case WM_T_PCH_TGP:
16195 		if (wm_phy_is_accessible_pchlan(sc))
16196 			break;
16197 
16198 		/* Before toggling LANPHYPC, see if PHY is accessible by
16199 		 * forcing MAC to SMBus mode first.
16200 		 */
16201 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
16202 		reg |= CTRL_EXT_FORCE_SMBUS;
16203 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
16204 #if 0
16205 		/* XXX Isn't this required??? */
16206 		CSR_WRITE_FLUSH(sc);
16207 #endif
16208 		/* Wait 50 milliseconds for MAC to finish any retries
16209 		 * that it might be trying to perform from previous
16210 		 * attempts to acknowledge any phy read requests.
16211 		 */
16212 		delay(50 * 1000);
16213 		/* FALLTHROUGH */
16214 	case WM_T_PCH2:
16215 		if (wm_phy_is_accessible_pchlan(sc) == true)
16216 			break;
16217 		/* FALLTHROUGH */
16218 	case WM_T_PCH:
16219 		if (sc->sc_type == WM_T_PCH)
16220 			if ((fwsm & FWSM_FW_VALID) != 0)
16221 				break;
16222 
16223 		if (wm_phy_resetisblocked(sc) == true) {
16224 			device_printf(sc->sc_dev, "XXX reset is blocked(2)\n");
16225 			break;
16226 		}
16227 
16228 		/* Toggle LANPHYPC Value bit */
16229 		wm_toggle_lanphypc_pch_lpt(sc);
16230 
16231 		if (sc->sc_type >= WM_T_PCH_LPT) {
16232 			if (wm_phy_is_accessible_pchlan(sc) == true)
16233 				break;
16234 
16235 			/* Toggling LANPHYPC brings the PHY out of SMBus mode
16236 			 * so ensure that the MAC is also out of SMBus mode
16237 			 */
16238 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
16239 			reg &= ~CTRL_EXT_FORCE_SMBUS;
16240 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
16241 
16242 			if (wm_phy_is_accessible_pchlan(sc) == true)
16243 				break;
16244 			rv = -1;
16245 		}
16246 		break;
16247 	default:
16248 		break;
16249 	}
16250 
16251 	/* Release semaphore */
16252 	sc->phy.release(sc);
16253 
16254 	if (rv == 0) {
16255 		/* Check to see if able to reset PHY.  Print error if not */
16256 		if (wm_phy_resetisblocked(sc)) {
16257 			device_printf(sc->sc_dev, "XXX reset is blocked(3)\n");
16258 			goto out;
16259 		}
16260 
16261 		/* Reset the PHY before any access to it.  Doing so, ensures
16262 		 * that the PHY is in a known good state before we read/write
16263 		 * PHY registers.  The generic reset is sufficient here,
16264 		 * because we haven't determined the PHY type yet.
16265 		 */
16266 		if (wm_reset_phy(sc) != 0)
16267 			goto out;
16268 
16269 		/* On a successful reset, possibly need to wait for the PHY
16270 		 * to quiesce to an accessible state before returning control
16271 		 * to the calling function.  If the PHY does not quiesce, then
16272 		 * return E1000E_BLK_PHY_RESET, as this is the condition that
16273 		 *  the PHY is in.
16274 		 */
16275 		if (wm_phy_resetisblocked(sc))
16276 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
16277 	}
16278 
16279 out:
16280 	/* Ungate automatic PHY configuration on non-managed 82579 */
16281 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
16282 		delay(10*1000);
16283 		wm_gate_hw_phy_config_ich8lan(sc, false);
16284 	}
16285 
16286 	return 0;
16287 }
16288 
16289 static void
16290 wm_init_manageability(struct wm_softc *sc)
16291 {
16292 
16293 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16294 		device_xname(sc->sc_dev), __func__));
16295 	KASSERT(IFNET_LOCKED(&sc->sc_ethercom.ec_if));
16296 
16297 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
16298 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
16299 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
16300 
16301 		/* Disable hardware interception of ARP */
16302 		manc &= ~MANC_ARP_EN;
16303 
16304 		/* Enable receiving management packets to the host */
16305 		if (sc->sc_type >= WM_T_82571) {
16306 			manc |= MANC_EN_MNG2HOST;
16307 			manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
16308 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
16309 		}
16310 
16311 		CSR_WRITE(sc, WMREG_MANC, manc);
16312 	}
16313 }
16314 
16315 static void
16316 wm_release_manageability(struct wm_softc *sc)
16317 {
16318 
16319 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
16320 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
16321 
16322 		manc |= MANC_ARP_EN;
16323 		if (sc->sc_type >= WM_T_82571)
16324 			manc &= ~MANC_EN_MNG2HOST;
16325 
16326 		CSR_WRITE(sc, WMREG_MANC, manc);
16327 	}
16328 }
16329 
16330 static void
16331 wm_get_wakeup(struct wm_softc *sc)
16332 {
16333 
16334 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
16335 	switch (sc->sc_type) {
16336 	case WM_T_82573:
16337 	case WM_T_82583:
16338 		sc->sc_flags |= WM_F_HAS_AMT;
16339 		/* FALLTHROUGH */
16340 	case WM_T_80003:
16341 	case WM_T_82575:
16342 	case WM_T_82576:
16343 	case WM_T_82580:
16344 	case WM_T_I350:
16345 	case WM_T_I354:
16346 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
16347 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
16348 		/* FALLTHROUGH */
16349 	case WM_T_82541:
16350 	case WM_T_82541_2:
16351 	case WM_T_82547:
16352 	case WM_T_82547_2:
16353 	case WM_T_82571:
16354 	case WM_T_82572:
16355 	case WM_T_82574:
16356 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
16357 		break;
16358 	case WM_T_ICH8:
16359 	case WM_T_ICH9:
16360 	case WM_T_ICH10:
16361 	case WM_T_PCH:
16362 	case WM_T_PCH2:
16363 	case WM_T_PCH_LPT:
16364 	case WM_T_PCH_SPT:
16365 	case WM_T_PCH_CNP:
16366 	case WM_T_PCH_TGP:
16367 		sc->sc_flags |= WM_F_HAS_AMT;
16368 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
16369 		break;
16370 	default:
16371 		break;
16372 	}
16373 
16374 	/* 1: HAS_MANAGE */
16375 	if (wm_enable_mng_pass_thru(sc) != 0)
16376 		sc->sc_flags |= WM_F_HAS_MANAGE;
16377 
16378 	/*
16379 	 * Note that the WOL flags is set after the resetting of the eeprom
16380 	 * stuff
16381 	 */
16382 }
16383 
16384 /*
16385  * Unconfigure Ultra Low Power mode.
16386  * Only for I217 and newer (see below).
16387  */
16388 static int
16389 wm_ulp_disable(struct wm_softc *sc)
16390 {
16391 	uint32_t reg;
16392 	uint16_t phyreg;
16393 	int i = 0, rv;
16394 
16395 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16396 		device_xname(sc->sc_dev), __func__));
16397 	/* Exclude old devices */
16398 	if ((sc->sc_type < WM_T_PCH_LPT)
16399 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
16400 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
16401 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
16402 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
16403 		return 0;
16404 
16405 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
16406 		/* Request ME un-configure ULP mode in the PHY */
16407 		reg = CSR_READ(sc, WMREG_H2ME);
16408 		reg &= ~H2ME_ULP;
16409 		reg |= H2ME_ENFORCE_SETTINGS;
16410 		CSR_WRITE(sc, WMREG_H2ME, reg);
16411 
16412 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
16413 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
16414 			if (i++ == 30) {
16415 				device_printf(sc->sc_dev, "%s timed out\n",
16416 				    __func__);
16417 				return -1;
16418 			}
16419 			delay(10 * 1000);
16420 		}
16421 		reg = CSR_READ(sc, WMREG_H2ME);
16422 		reg &= ~H2ME_ENFORCE_SETTINGS;
16423 		CSR_WRITE(sc, WMREG_H2ME, reg);
16424 
16425 		return 0;
16426 	}
16427 
16428 	/* Acquire semaphore */
16429 	rv = sc->phy.acquire(sc);
16430 	if (rv != 0) {
16431 		DPRINTF(sc, WM_DEBUG_INIT,
16432 		    ("%s: %s: failed\n", device_xname(sc->sc_dev), __func__));
16433 		return rv;
16434 	}
16435 
16436 	/* Toggle LANPHYPC */
16437 	wm_toggle_lanphypc_pch_lpt(sc);
16438 
16439 	/* Unforce SMBus mode in PHY */
16440 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg);
16441 	if (rv != 0) {
16442 		uint32_t reg2;
16443 
16444 		aprint_debug_dev(sc->sc_dev, "%s: Force SMBus first.\n",
16445 		    __func__);
16446 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
16447 		reg2 |= CTRL_EXT_FORCE_SMBUS;
16448 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
16449 		delay(50 * 1000);
16450 
16451 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL,
16452 		    &phyreg);
16453 		if (rv != 0)
16454 			goto release;
16455 	}
16456 	phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
16457 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg);
16458 
16459 	/* Unforce SMBus mode in MAC */
16460 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
16461 	reg &= ~CTRL_EXT_FORCE_SMBUS;
16462 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
16463 
16464 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg);
16465 	if (rv != 0)
16466 		goto release;
16467 	phyreg |= HV_PM_CTRL_K1_ENA;
16468 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg);
16469 
16470 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1,
16471 	    &phyreg);
16472 	if (rv != 0)
16473 		goto release;
16474 	phyreg &= ~(I218_ULP_CONFIG1_IND
16475 	    | I218_ULP_CONFIG1_STICKY_ULP
16476 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
16477 	    | I218_ULP_CONFIG1_WOL_HOST
16478 	    | I218_ULP_CONFIG1_INBAND_EXIT
16479 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
16480 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
16481 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
16482 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
16483 	phyreg |= I218_ULP_CONFIG1_START;
16484 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
16485 
16486 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
16487 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
16488 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
16489 
16490 release:
16491 	/* Release semaphore */
16492 	sc->phy.release(sc);
16493 	wm_gmii_reset(sc);
16494 	delay(50 * 1000);
16495 
16496 	return rv;
16497 }
16498 
16499 /* WOL in the newer chipset interfaces (pchlan) */
16500 static int
16501 wm_enable_phy_wakeup(struct wm_softc *sc)
16502 {
16503 	device_t dev = sc->sc_dev;
16504 	uint32_t mreg, moff;
16505 	uint16_t wuce, wuc, wufc, preg;
16506 	int i, rv;
16507 
16508 	KASSERT(sc->sc_type >= WM_T_PCH);
16509 
16510 	/* Copy MAC RARs to PHY RARs */
16511 	wm_copy_rx_addrs_to_phy_ich8lan(sc);
16512 
16513 	/* Activate PHY wakeup */
16514 	rv = sc->phy.acquire(sc);
16515 	if (rv != 0) {
16516 		device_printf(dev, "%s: failed to acquire semaphore\n",
16517 		    __func__);
16518 		return rv;
16519 	}
16520 
16521 	/*
16522 	 * Enable access to PHY wakeup registers.
16523 	 * BM_MTA, BM_RCTL, BM_WUFC and BM_WUC are in BM_WUC_PAGE.
16524 	 */
16525 	rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
16526 	if (rv != 0) {
16527 		device_printf(dev,
16528 		    "%s: Could not enable PHY wakeup reg access\n", __func__);
16529 		goto release;
16530 	}
16531 
16532 	/* Copy MAC MTA to PHY MTA */
16533 	for (i = 0; i < WM_ICH8_MC_TABSIZE; i++) {
16534 		uint16_t lo, hi;
16535 
16536 		mreg = CSR_READ(sc, WMREG_CORDOVA_MTA + (i * 4));
16537 		lo = (uint16_t)(mreg & 0xffff);
16538 		hi = (uint16_t)((mreg >> 16) & 0xffff);
16539 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i), &lo, 0, true);
16540 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i) + 1, &hi, 0, true);
16541 	}
16542 
16543 	/* Configure PHY Rx Control register */
16544 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 1, true);
16545 	mreg = CSR_READ(sc, WMREG_RCTL);
16546 	if (mreg & RCTL_UPE)
16547 		preg |= BM_RCTL_UPE;
16548 	if (mreg & RCTL_MPE)
16549 		preg |= BM_RCTL_MPE;
16550 	preg &= ~(BM_RCTL_MO_MASK);
16551 	moff = __SHIFTOUT(mreg, RCTL_MO);
16552 	if (moff != 0)
16553 		preg |= moff << BM_RCTL_MO_SHIFT;
16554 	if (mreg & RCTL_BAM)
16555 		preg |= BM_RCTL_BAM;
16556 	if (mreg & RCTL_PMCF)
16557 		preg |= BM_RCTL_PMCF;
16558 	mreg = CSR_READ(sc, WMREG_CTRL);
16559 	if (mreg & CTRL_RFCE)
16560 		preg |= BM_RCTL_RFCE;
16561 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 0, true);
16562 
16563 	wuc = WUC_APME | WUC_PME_EN;
16564 	wufc = WUFC_MAG;
16565 	/* Enable PHY wakeup in MAC register */
16566 	CSR_WRITE(sc, WMREG_WUC,
16567 	    WUC_PHY_WAKE | WUC_PME_STATUS | WUC_APMPME | wuc);
16568 	CSR_WRITE(sc, WMREG_WUFC, wufc);
16569 
16570 	/* Configure and enable PHY wakeup in PHY registers */
16571 	wm_access_phy_wakeup_reg_bm(dev, BM_WUC, &wuc, 0, true);
16572 	wm_access_phy_wakeup_reg_bm(dev, BM_WUFC, &wufc, 0, true);
16573 
16574 	wuce |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
16575 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
16576 
16577 release:
16578 	sc->phy.release(sc);
16579 
16580 	return 0;
16581 }
16582 
16583 /* Power down workaround on D3 */
16584 static void
16585 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
16586 {
16587 	uint32_t reg;
16588 	uint16_t phyreg;
16589 	int i;
16590 
16591 	for (i = 0; i < 2; i++) {
16592 		/* Disable link */
16593 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
16594 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
16595 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
16596 
16597 		/*
16598 		 * Call gig speed drop workaround on Gig disable before
16599 		 * accessing any PHY registers
16600 		 */
16601 		if (sc->sc_type == WM_T_ICH8)
16602 			wm_gig_downshift_workaround_ich8lan(sc);
16603 
16604 		/* Write VR power-down enable */
16605 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
16606 		phyreg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
16607 		phyreg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
16608 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, phyreg);
16609 
16610 		/* Read it back and test */
16611 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
16612 		phyreg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
16613 		if ((phyreg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
16614 			break;
16615 
16616 		/* Issue PHY reset and repeat at most one more time */
16617 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
16618 	}
16619 }
16620 
16621 /*
16622  *  wm_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
16623  *  @sc: pointer to the HW structure
16624  *
16625  *  During S0 to Sx transition, it is possible the link remains at gig
16626  *  instead of negotiating to a lower speed.  Before going to Sx, set
16627  *  'Gig Disable' to force link speed negotiation to a lower speed based on
16628  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
16629  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
16630  *  needs to be written.
16631  *  Parts that support (and are linked to a partner which support) EEE in
16632  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
16633  *  than 10Mbps w/o EEE.
16634  */
16635 static void
16636 wm_suspend_workarounds_ich8lan(struct wm_softc *sc)
16637 {
16638 	device_t dev = sc->sc_dev;
16639 	struct ethercom *ec = &sc->sc_ethercom;
16640 	uint32_t phy_ctrl;
16641 	int rv;
16642 
16643 	phy_ctrl = CSR_READ(sc, WMREG_PHY_CTRL);
16644 	phy_ctrl |= PHY_CTRL_GBE_DIS;
16645 
16646 	KASSERT((sc->sc_type >= WM_T_ICH8) && (sc->sc_type <= WM_T_PCH_TGP));
16647 
16648 	if (sc->sc_phytype == WMPHY_I217) {
16649 		uint16_t devid = sc->sc_pcidevid;
16650 
16651 		if ((devid == PCI_PRODUCT_INTEL_I218_LM) ||
16652 		    (devid == PCI_PRODUCT_INTEL_I218_V) ||
16653 		    (devid == PCI_PRODUCT_INTEL_I218_LM3) ||
16654 		    (devid == PCI_PRODUCT_INTEL_I218_V3) ||
16655 		    (sc->sc_type >= WM_T_PCH_SPT))
16656 			CSR_WRITE(sc, WMREG_FEXTNVM6,
16657 			    CSR_READ(sc, WMREG_FEXTNVM6)
16658 			    & ~FEXTNVM6_REQ_PLL_CLK);
16659 
16660 		if (sc->phy.acquire(sc) != 0)
16661 			goto out;
16662 
16663 		if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
16664 			uint16_t eee_advert;
16665 
16666 			rv = wm_read_emi_reg_locked(dev,
16667 			    I217_EEE_ADVERTISEMENT, &eee_advert);
16668 			if (rv)
16669 				goto release;
16670 
16671 			/*
16672 			 * Disable LPLU if both link partners support 100BaseT
16673 			 * EEE and 100Full is advertised on both ends of the
16674 			 * link, and enable Auto Enable LPI since there will
16675 			 * be no driver to enable LPI while in Sx.
16676 			 */
16677 			if ((eee_advert & AN_EEEADVERT_100_TX) &&
16678 			    (sc->eee_lp_ability & AN_EEEADVERT_100_TX)) {
16679 				uint16_t anar, phy_reg;
16680 
16681 				sc->phy.readreg_locked(dev, 2, MII_ANAR,
16682 				    &anar);
16683 				if (anar & ANAR_TX_FD) {
16684 					phy_ctrl &= ~(PHY_CTRL_D0A_LPLU |
16685 					    PHY_CTRL_NOND0A_LPLU);
16686 
16687 					/* Set Auto Enable LPI after link up */
16688 					sc->phy.readreg_locked(dev, 2,
16689 					    I217_LPI_GPIO_CTRL, &phy_reg);
16690 					phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
16691 					sc->phy.writereg_locked(dev, 2,
16692 					    I217_LPI_GPIO_CTRL, phy_reg);
16693 				}
16694 			}
16695 		}
16696 
16697 		/*
16698 		 * For i217 Intel Rapid Start Technology support,
16699 		 * when the system is going into Sx and no manageability engine
16700 		 * is present, the driver must configure proxy to reset only on
16701 		 * power good.	LPI (Low Power Idle) state must also reset only
16702 		 * on power good, as well as the MTA (Multicast table array).
16703 		 * The SMBus release must also be disabled on LCD reset.
16704 		 */
16705 
16706 		/*
16707 		 * Enable MTA to reset for Intel Rapid Start Technology
16708 		 * Support
16709 		 */
16710 
16711 release:
16712 		sc->phy.release(sc);
16713 	}
16714 out:
16715 	CSR_WRITE(sc, WMREG_PHY_CTRL, phy_ctrl);
16716 
16717 	if (sc->sc_type == WM_T_ICH8)
16718 		wm_gig_downshift_workaround_ich8lan(sc);
16719 
16720 	if (sc->sc_type >= WM_T_PCH) {
16721 		wm_oem_bits_config_ich8lan(sc, false);
16722 
16723 		/* Reset PHY to activate OEM bits on 82577/8 */
16724 		if (sc->sc_type == WM_T_PCH)
16725 			wm_reset_phy(sc);
16726 
16727 		if (sc->phy.acquire(sc) != 0)
16728 			return;
16729 		wm_write_smbus_addr(sc);
16730 		sc->phy.release(sc);
16731 	}
16732 }
16733 
16734 /*
16735  *  wm_resume_workarounds_pchlan - workarounds needed during Sx->S0
16736  *  @sc: pointer to the HW structure
16737  *
16738  *  During Sx to S0 transitions on non-managed devices or managed devices
16739  *  on which PHY resets are not blocked, if the PHY registers cannot be
16740  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
16741  *  the PHY.
16742  *  On i217, setup Intel Rapid Start Technology.
16743  */
16744 static int
16745 wm_resume_workarounds_pchlan(struct wm_softc *sc)
16746 {
16747 	device_t dev = sc->sc_dev;
16748 	int rv;
16749 
16750 	if (sc->sc_type < WM_T_PCH2)
16751 		return 0;
16752 
16753 	rv = wm_init_phy_workarounds_pchlan(sc);
16754 	if (rv != 0)
16755 		return rv;
16756 
16757 	/* For i217 Intel Rapid Start Technology support when the system
16758 	 * is transitioning from Sx and no manageability engine is present
16759 	 * configure SMBus to restore on reset, disable proxy, and enable
16760 	 * the reset on MTA (Multicast table array).
16761 	 */
16762 	if (sc->sc_phytype == WMPHY_I217) {
16763 		uint16_t phy_reg;
16764 
16765 		rv = sc->phy.acquire(sc);
16766 		if (rv != 0)
16767 			return rv;
16768 
16769 		/* Clear Auto Enable LPI after link up */
16770 		sc->phy.readreg_locked(dev, 1, I217_LPI_GPIO_CTRL, &phy_reg);
16771 		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
16772 		sc->phy.writereg_locked(dev, 1, I217_LPI_GPIO_CTRL, phy_reg);
16773 
16774 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
16775 			/* Restore clear on SMB if no manageability engine
16776 			 * is present
16777 			 */
16778 			rv = sc->phy.readreg_locked(dev, 1, I217_MEMPWR,
16779 			    &phy_reg);
16780 			if (rv != 0)
16781 				goto release;
16782 			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
16783 			sc->phy.writereg_locked(dev, 1, I217_MEMPWR, phy_reg);
16784 
16785 			/* Disable Proxy */
16786 			sc->phy.writereg_locked(dev, 1, I217_PROXY_CTRL, 0);
16787 		}
16788 		/* Enable reset on MTA */
16789 		sc->phy.readreg_locked(dev, 1, I217_CFGREG, &phy_reg);
16790 		if (rv != 0)
16791 			goto release;
16792 		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
16793 		sc->phy.writereg_locked(dev, 1, I217_CFGREG, phy_reg);
16794 
16795 release:
16796 		sc->phy.release(sc);
16797 		return rv;
16798 	}
16799 
16800 	return 0;
16801 }
16802 
16803 static void
16804 wm_enable_wakeup(struct wm_softc *sc)
16805 {
16806 	uint32_t reg, pmreg;
16807 	pcireg_t pmode;
16808 	int rv = 0;
16809 
16810 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16811 		device_xname(sc->sc_dev), __func__));
16812 
16813 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
16814 	    &pmreg, NULL) == 0)
16815 		return;
16816 
16817 	if ((sc->sc_flags & WM_F_WOL) == 0)
16818 		goto pme;
16819 
16820 	/* Advertise the wakeup capability */
16821 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
16822 	    | CTRL_SWDPIN(3));
16823 
16824 	/* Keep the laser running on fiber adapters */
16825 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
16826 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
16827 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
16828 		reg |= CTRL_EXT_SWDPIN(3);
16829 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
16830 	}
16831 
16832 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) ||
16833 	    (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) ||
16834 	    (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
16835 	    (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP) ||
16836 	    (sc->sc_type == WM_T_PCH_TGP))
16837 		wm_suspend_workarounds_ich8lan(sc);
16838 
16839 #if 0	/* For the multicast packet */
16840 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
16841 	reg |= WUFC_MC;
16842 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
16843 #endif
16844 
16845 	if (sc->sc_type >= WM_T_PCH) {
16846 		rv = wm_enable_phy_wakeup(sc);
16847 		if (rv != 0)
16848 			goto pme;
16849 	} else {
16850 		/* Enable wakeup by the MAC */
16851 		CSR_WRITE(sc, WMREG_WUC, WUC_APME | WUC_PME_EN);
16852 		CSR_WRITE(sc, WMREG_WUFC, WUFC_MAG);
16853 	}
16854 
16855 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
16856 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
16857 		|| (sc->sc_type == WM_T_PCH2))
16858 	    && (sc->sc_phytype == WMPHY_IGP_3))
16859 		wm_igp3_phy_powerdown_workaround_ich8lan(sc);
16860 
16861 pme:
16862 	/* Request PME */
16863 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
16864 	pmode |= PCI_PMCSR_PME_STS; /* in case it's already set (W1C) */
16865 	if ((rv == 0) && (sc->sc_flags & WM_F_WOL) != 0) {
16866 		/* For WOL */
16867 		pmode |= PCI_PMCSR_PME_EN;
16868 	} else {
16869 		/* Disable WOL */
16870 		pmode &= ~PCI_PMCSR_PME_EN;
16871 	}
16872 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
16873 }
16874 
16875 /* Disable ASPM L0s and/or L1 for workaround */
16876 static void
16877 wm_disable_aspm(struct wm_softc *sc)
16878 {
16879 	pcireg_t reg, mask = 0;
16880 	unsigned const char *str = "";
16881 
16882 	/*
16883 	 *  Only for PCIe device which has PCIe capability in the PCI config
16884 	 * space.
16885 	 */
16886 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
16887 		return;
16888 
16889 	switch (sc->sc_type) {
16890 	case WM_T_82571:
16891 	case WM_T_82572:
16892 		/*
16893 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
16894 		 * State Power management L1 State (ASPM L1).
16895 		 */
16896 		mask = PCIE_LCSR_ASPM_L1;
16897 		str = "L1 is";
16898 		break;
16899 	case WM_T_82573:
16900 	case WM_T_82574:
16901 	case WM_T_82583:
16902 		/*
16903 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
16904 		 *
16905 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
16906 		 * some chipset.  The document of 82574 and 82583 says that
16907 		 * disabling L0s with some specific chipset is sufficient,
16908 		 * but we follow as of the Intel em driver does.
16909 		 *
16910 		 * References:
16911 		 * Errata 8 of the Specification Update of i82573.
16912 		 * Errata 20 of the Specification Update of i82574.
16913 		 * Errata 9 of the Specification Update of i82583.
16914 		 */
16915 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
16916 		str = "L0s and L1 are";
16917 		break;
16918 	default:
16919 		return;
16920 	}
16921 
16922 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
16923 	    sc->sc_pcixe_capoff + PCIE_LCSR);
16924 	reg &= ~mask;
16925 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
16926 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
16927 
16928 	/* Print only in wm_attach() */
16929 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
16930 		aprint_verbose_dev(sc->sc_dev,
16931 		    "ASPM %s disabled to workaround the errata.\n", str);
16932 }
16933 
16934 /* LPLU */
16935 
16936 static void
16937 wm_lplu_d0_disable(struct wm_softc *sc)
16938 {
16939 	struct mii_data *mii = &sc->sc_mii;
16940 	uint32_t reg;
16941 	uint16_t phyval;
16942 
16943 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16944 		device_xname(sc->sc_dev), __func__));
16945 
16946 	if (sc->sc_phytype == WMPHY_IFE)
16947 		return;
16948 
16949 	switch (sc->sc_type) {
16950 	case WM_T_82571:
16951 	case WM_T_82572:
16952 	case WM_T_82573:
16953 	case WM_T_82575:
16954 	case WM_T_82576:
16955 		mii->mii_readreg(sc->sc_dev, 1, IGPHY_POWER_MGMT, &phyval);
16956 		phyval &= ~PMR_D0_LPLU;
16957 		mii->mii_writereg(sc->sc_dev, 1, IGPHY_POWER_MGMT, phyval);
16958 		break;
16959 	case WM_T_82580:
16960 	case WM_T_I350:
16961 	case WM_T_I210:
16962 	case WM_T_I211:
16963 		reg = CSR_READ(sc, WMREG_PHPM);
16964 		reg &= ~PHPM_D0A_LPLU;
16965 		CSR_WRITE(sc, WMREG_PHPM, reg);
16966 		break;
16967 	case WM_T_82574:
16968 	case WM_T_82583:
16969 	case WM_T_ICH8:
16970 	case WM_T_ICH9:
16971 	case WM_T_ICH10:
16972 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
16973 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
16974 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
16975 		CSR_WRITE_FLUSH(sc);
16976 		break;
16977 	case WM_T_PCH:
16978 	case WM_T_PCH2:
16979 	case WM_T_PCH_LPT:
16980 	case WM_T_PCH_SPT:
16981 	case WM_T_PCH_CNP:
16982 	case WM_T_PCH_TGP:
16983 		wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS, &phyval);
16984 		phyval &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
16985 		if (wm_phy_resetisblocked(sc) == false)
16986 			phyval |= HV_OEM_BITS_ANEGNOW;
16987 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, phyval);
16988 		break;
16989 	default:
16990 		break;
16991 	}
16992 }
16993 
16994 /* EEE */
16995 
16996 static int
16997 wm_set_eee_i350(struct wm_softc *sc)
16998 {
16999 	struct ethercom *ec = &sc->sc_ethercom;
17000 	uint32_t ipcnfg, eeer;
17001 	uint32_t ipcnfg_mask
17002 	    = IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN | IPCNFG_10BASE_TE;
17003 	uint32_t eeer_mask = EEER_TX_LPI_EN | EEER_RX_LPI_EN | EEER_LPI_FC;
17004 
17005 	KASSERT(sc->sc_mediatype == WM_MEDIATYPE_COPPER);
17006 
17007 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
17008 	eeer = CSR_READ(sc, WMREG_EEER);
17009 
17010 	/* Enable or disable per user setting */
17011 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
17012 		ipcnfg |= ipcnfg_mask;
17013 		eeer |= eeer_mask;
17014 	} else {
17015 		ipcnfg &= ~ipcnfg_mask;
17016 		eeer &= ~eeer_mask;
17017 	}
17018 
17019 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
17020 	CSR_WRITE(sc, WMREG_EEER, eeer);
17021 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
17022 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
17023 
17024 	return 0;
17025 }
17026 
17027 static int
17028 wm_set_eee_pchlan(struct wm_softc *sc)
17029 {
17030 	device_t dev = sc->sc_dev;
17031 	struct ethercom *ec = &sc->sc_ethercom;
17032 	uint16_t lpa, pcs_status, adv_addr, adv, lpi_ctrl, data;
17033 	int rv;
17034 
17035 	switch (sc->sc_phytype) {
17036 	case WMPHY_82579:
17037 		lpa = I82579_EEE_LP_ABILITY;
17038 		pcs_status = I82579_EEE_PCS_STATUS;
17039 		adv_addr = I82579_EEE_ADVERTISEMENT;
17040 		break;
17041 	case WMPHY_I217:
17042 		lpa = I217_EEE_LP_ABILITY;
17043 		pcs_status = I217_EEE_PCS_STATUS;
17044 		adv_addr = I217_EEE_ADVERTISEMENT;
17045 		break;
17046 	default:
17047 		return 0;
17048 	}
17049 
17050 	rv = sc->phy.acquire(sc);
17051 	if (rv != 0) {
17052 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
17053 		return rv;
17054 	}
17055 
17056 	rv = sc->phy.readreg_locked(dev, 1, I82579_LPI_CTRL, &lpi_ctrl);
17057 	if (rv != 0)
17058 		goto release;
17059 
17060 	/* Clear bits that enable EEE in various speeds */
17061 	lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE;
17062 
17063 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
17064 		/* Save off link partner's EEE ability */
17065 		rv = wm_read_emi_reg_locked(dev, lpa, &sc->eee_lp_ability);
17066 		if (rv != 0)
17067 			goto release;
17068 
17069 		/* Read EEE advertisement */
17070 		if ((rv = wm_read_emi_reg_locked(dev, adv_addr, &adv)) != 0)
17071 			goto release;
17072 
17073 		/*
17074 		 * Enable EEE only for speeds in which the link partner is
17075 		 * EEE capable and for which we advertise EEE.
17076 		 */
17077 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_1000_T)
17078 			lpi_ctrl |= I82579_LPI_CTRL_EN_1000;
17079 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_100_TX) {
17080 			sc->phy.readreg_locked(dev, 2, MII_ANLPAR, &data);
17081 			if ((data & ANLPAR_TX_FD) != 0)
17082 				lpi_ctrl |= I82579_LPI_CTRL_EN_100;
17083 			else {
17084 				/*
17085 				 * EEE is not supported in 100Half, so ignore
17086 				 * partner's EEE in 100 ability if full-duplex
17087 				 * is not advertised.
17088 				 */
17089 				sc->eee_lp_ability
17090 				    &= ~AN_EEEADVERT_100_TX;
17091 			}
17092 		}
17093 	}
17094 
17095 	if (sc->sc_phytype == WMPHY_82579) {
17096 		rv = wm_read_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, &data);
17097 		if (rv != 0)
17098 			goto release;
17099 
17100 		data &= ~I82579_LPI_PLL_SHUT_100;
17101 		rv = wm_write_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, data);
17102 	}
17103 
17104 	/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
17105 	if ((rv = wm_read_emi_reg_locked(dev, pcs_status, &data)) != 0)
17106 		goto release;
17107 
17108 	rv = sc->phy.writereg_locked(dev, 1, I82579_LPI_CTRL, lpi_ctrl);
17109 release:
17110 	sc->phy.release(sc);
17111 
17112 	return rv;
17113 }
17114 
17115 static int
17116 wm_set_eee(struct wm_softc *sc)
17117 {
17118 	struct ethercom *ec = &sc->sc_ethercom;
17119 
17120 	if ((ec->ec_capabilities & ETHERCAP_EEE) == 0)
17121 		return 0;
17122 
17123 	if (sc->sc_type == WM_T_I354) {
17124 		/* I354 uses an external PHY */
17125 		return 0; /* not yet */
17126 	} else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
17127 		return wm_set_eee_i350(sc);
17128 	else if (sc->sc_type >= WM_T_PCH2)
17129 		return wm_set_eee_pchlan(sc);
17130 
17131 	return 0;
17132 }
17133 
17134 /*
17135  * Workarounds (mainly PHY related).
17136  * Basically, PHY's workarounds are in the PHY drivers.
17137  */
17138 
17139 /* Workaround for 82566 Kumeran PCS lock loss */
17140 static int
17141 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
17142 {
17143 	struct mii_data *mii = &sc->sc_mii;
17144 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
17145 	int i, reg, rv;
17146 	uint16_t phyreg;
17147 
17148 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
17149 		device_xname(sc->sc_dev), __func__));
17150 
17151 	/* If the link is not up, do nothing */
17152 	if ((status & STATUS_LU) == 0)
17153 		return 0;
17154 
17155 	/* Nothing to do if the link is other than 1Gbps */
17156 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
17157 		return 0;
17158 
17159 	for (i = 0; i < 10; i++) {
17160 		/* read twice */
17161 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
17162 		if (rv != 0)
17163 			return rv;
17164 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
17165 		if (rv != 0)
17166 			return rv;
17167 
17168 		if ((phyreg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
17169 			goto out;	/* GOOD! */
17170 
17171 		/* Reset the PHY */
17172 		wm_reset_phy(sc);
17173 		delay(5*1000);
17174 	}
17175 
17176 	/* Disable GigE link negotiation */
17177 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
17178 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
17179 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
17180 
17181 	/*
17182 	 * Call gig speed drop workaround on Gig disable before accessing
17183 	 * any PHY registers.
17184 	 */
17185 	wm_gig_downshift_workaround_ich8lan(sc);
17186 
17187 out:
17188 	return 0;
17189 }
17190 
17191 /*
17192  *  wm_gig_downshift_workaround_ich8lan - WoL from S5 stops working
17193  *  @sc: pointer to the HW structure
17194  *
17195  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
17196  *  LPLU, Gig disable, MDIC PHY reset):
17197  *    1) Set Kumeran Near-end loopback
17198  *    2) Clear Kumeran Near-end loopback
17199  *  Should only be called for ICH8[m] devices with any 1G Phy.
17200  */
17201 static void
17202 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
17203 {
17204 	uint16_t kmreg;
17205 
17206 	/* Only for igp3 */
17207 	if (sc->sc_phytype == WMPHY_IGP_3) {
17208 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
17209 			return;
17210 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
17211 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
17212 			return;
17213 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
17214 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
17215 	}
17216 }
17217 
17218 /*
17219  * Workaround for pch's PHYs
17220  * XXX should be moved to new PHY driver?
17221  */
17222 static int
17223 wm_hv_phy_workarounds_ich8lan(struct wm_softc *sc)
17224 {
17225 	device_t dev = sc->sc_dev;
17226 	struct mii_data *mii = &sc->sc_mii;
17227 	struct mii_softc *child;
17228 	uint16_t phy_data, phyrev = 0;
17229 	int phytype = sc->sc_phytype;
17230 	int rv;
17231 
17232 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
17233 		device_xname(dev), __func__));
17234 	KASSERT(sc->sc_type == WM_T_PCH);
17235 
17236 	/* Set MDIO slow mode before any other MDIO access */
17237 	if (phytype == WMPHY_82577)
17238 		if ((rv = wm_set_mdio_slow_mode_hv(sc)) != 0)
17239 			return rv;
17240 
17241 	child = LIST_FIRST(&mii->mii_phys);
17242 	if (child != NULL)
17243 		phyrev = child->mii_mpd_rev;
17244 
17245 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
17246 	if ((child != NULL) &&
17247 	    (((phytype == WMPHY_82577) && ((phyrev == 1) || (phyrev == 2))) ||
17248 		((phytype == WMPHY_82578) && (phyrev == 1)))) {
17249 		/* Disable generation of early preamble (0x4431) */
17250 		rv = mii->mii_readreg(dev, 2, BM_RATE_ADAPTATION_CTRL,
17251 		    &phy_data);
17252 		if (rv != 0)
17253 			return rv;
17254 		phy_data &= ~(BM_RATE_ADAPTATION_CTRL_RX_RXDV_PRE |
17255 		    BM_RATE_ADAPTATION_CTRL_RX_CRS_PRE);
17256 		rv = mii->mii_writereg(dev, 2, BM_RATE_ADAPTATION_CTRL,
17257 		    phy_data);
17258 		if (rv != 0)
17259 			return rv;
17260 
17261 		/* Preamble tuning for SSC */
17262 		rv = mii->mii_writereg(dev, 2, HV_KMRN_FIFO_CTRLSTA, 0xa204);
17263 		if (rv != 0)
17264 			return rv;
17265 	}
17266 
17267 	/* 82578 */
17268 	if (phytype == WMPHY_82578) {
17269 		/*
17270 		 * Return registers to default by doing a soft reset then
17271 		 * writing 0x3140 to the control register
17272 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
17273 		 */
17274 		if ((child != NULL) && (phyrev < 2)) {
17275 			PHY_RESET(child);
17276 			rv = mii->mii_writereg(dev, 2, MII_BMCR, 0x3140);
17277 			if (rv != 0)
17278 				return rv;
17279 		}
17280 	}
17281 
17282 	/* Select page 0 */
17283 	if ((rv = sc->phy.acquire(sc)) != 0)
17284 		return rv;
17285 	rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT, 0);
17286 	sc->phy.release(sc);
17287 	if (rv != 0)
17288 		return rv;
17289 
17290 	/*
17291 	 * Configure the K1 Si workaround during phy reset assuming there is
17292 	 * link so that it disables K1 if link is in 1Gbps.
17293 	 */
17294 	if ((rv = wm_k1_gig_workaround_hv(sc, 1)) != 0)
17295 		return rv;
17296 
17297 	/* Workaround for link disconnects on a busy hub in half duplex */
17298 	rv = sc->phy.acquire(sc);
17299 	if (rv)
17300 		return rv;
17301 	rv = sc->phy.readreg_locked(dev, 2, BM_PORT_GEN_CFG, &phy_data);
17302 	if (rv)
17303 		goto release;
17304 	rv = sc->phy.writereg_locked(dev, 2, BM_PORT_GEN_CFG,
17305 	    phy_data & 0x00ff);
17306 	if (rv)
17307 		goto release;
17308 
17309 	/* Set MSE higher to enable link to stay up when noise is high */
17310 	rv = wm_write_emi_reg_locked(dev, I82577_MSE_THRESHOLD, 0x0034);
17311 release:
17312 	sc->phy.release(sc);
17313 
17314 	return rv;
17315 }
17316 
17317 /*
17318  *  wm_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
17319  *  @sc:   pointer to the HW structure
17320  */
17321 static void
17322 wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *sc)
17323 {
17324 
17325 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
17326 		device_xname(sc->sc_dev), __func__));
17327 
17328 	if (sc->phy.acquire(sc) != 0)
17329 		return;
17330 
17331 	wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
17332 
17333 	sc->phy.release(sc);
17334 }
17335 
17336 static void
17337 wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *sc)
17338 {
17339 	device_t dev = sc->sc_dev;
17340 	uint32_t mac_reg;
17341 	uint16_t i, wuce;
17342 	int count;
17343 
17344 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
17345 		device_xname(dev), __func__));
17346 
17347 	if (wm_enable_phy_wakeup_reg_access_bm(dev, &wuce) != 0)
17348 		return;
17349 
17350 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
17351 	count = wm_rar_count(sc);
17352 	for (i = 0; i < count; i++) {
17353 		uint16_t lo, hi;
17354 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
17355 		lo = (uint16_t)(mac_reg & 0xffff);
17356 		hi = (uint16_t)((mac_reg >> 16) & 0xffff);
17357 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_L(i), &lo, 0, true);
17358 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_M(i), &hi, 0, true);
17359 
17360 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
17361 		lo = (uint16_t)(mac_reg & 0xffff);
17362 		hi = (uint16_t)((mac_reg & RAL_AV) >> 16);
17363 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_H(i), &lo, 0, true);
17364 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_CTRL(i), &hi, 0, true);
17365 	}
17366 
17367 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
17368 }
17369 
17370 /*
17371  *  wm_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
17372  *  with 82579 PHY
17373  *  @enable: flag to enable/disable workaround when enabling/disabling jumbos
17374  */
17375 static int
17376 wm_lv_jumbo_workaround_ich8lan(struct wm_softc *sc, bool enable)
17377 {
17378 	device_t dev = sc->sc_dev;
17379 	int rar_count;
17380 	int rv;
17381 	uint32_t mac_reg;
17382 	uint16_t dft_ctrl, data;
17383 	uint16_t i;
17384 
17385 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
17386 		device_xname(dev), __func__));
17387 
17388 	if (sc->sc_type < WM_T_PCH2)
17389 		return 0;
17390 
17391 	/* Acquire PHY semaphore */
17392 	rv = sc->phy.acquire(sc);
17393 	if (rv != 0)
17394 		return rv;
17395 
17396 	/* Disable Rx path while enabling/disabling workaround */
17397 	rv = sc->phy.readreg_locked(dev, 2, I82579_DFT_CTRL, &dft_ctrl);
17398 	if (rv != 0)
17399 		goto out;
17400 	rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
17401 	    dft_ctrl | (1 << 14));
17402 	if (rv != 0)
17403 		goto out;
17404 
17405 	if (enable) {
17406 		/* Write Rx addresses (rar_entry_count for RAL/H, and
17407 		 * SHRAL/H) and initial CRC values to the MAC
17408 		 */
17409 		rar_count = wm_rar_count(sc);
17410 		for (i = 0; i < rar_count; i++) {
17411 			uint8_t mac_addr[ETHER_ADDR_LEN] = {0};
17412 			uint32_t addr_high, addr_low;
17413 
17414 			addr_high = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
17415 			if (!(addr_high & RAL_AV))
17416 				continue;
17417 			addr_low = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
17418 			mac_addr[0] = (addr_low & 0xFF);
17419 			mac_addr[1] = ((addr_low >> 8) & 0xFF);
17420 			mac_addr[2] = ((addr_low >> 16) & 0xFF);
17421 			mac_addr[3] = ((addr_low >> 24) & 0xFF);
17422 			mac_addr[4] = (addr_high & 0xFF);
17423 			mac_addr[5] = ((addr_high >> 8) & 0xFF);
17424 
17425 			CSR_WRITE(sc, WMREG_PCH_RAICC(i),
17426 			    ~ether_crc32_le(mac_addr, ETHER_ADDR_LEN));
17427 		}
17428 
17429 		/* Write Rx addresses to the PHY */
17430 		wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
17431 	}
17432 
17433 	/*
17434 	 * If enable ==
17435 	 *	true: Enable jumbo frame workaround in the MAC.
17436 	 *	false: Write MAC register values back to h/w defaults.
17437 	 */
17438 	mac_reg = CSR_READ(sc, WMREG_FFLT_DBG);
17439 	if (enable) {
17440 		mac_reg &= ~(1 << 14);
17441 		mac_reg |= (7 << 15);
17442 	} else
17443 		mac_reg &= ~(0xf << 14);
17444 	CSR_WRITE(sc, WMREG_FFLT_DBG, mac_reg);
17445 
17446 	mac_reg = CSR_READ(sc, WMREG_RCTL);
17447 	if (enable) {
17448 		mac_reg |= RCTL_SECRC;
17449 		sc->sc_rctl |= RCTL_SECRC;
17450 		sc->sc_flags |= WM_F_CRC_STRIP;
17451 	} else {
17452 		mac_reg &= ~RCTL_SECRC;
17453 		sc->sc_rctl &= ~RCTL_SECRC;
17454 		sc->sc_flags &= ~WM_F_CRC_STRIP;
17455 	}
17456 	CSR_WRITE(sc, WMREG_RCTL, mac_reg);
17457 
17458 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, &data);
17459 	if (rv != 0)
17460 		goto out;
17461 	if (enable)
17462 		data |= 1 << 0;
17463 	else
17464 		data &= ~(1 << 0);
17465 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, data);
17466 	if (rv != 0)
17467 		goto out;
17468 
17469 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, &data);
17470 	if (rv != 0)
17471 		goto out;
17472 	/*
17473 	 * XXX FreeBSD and Linux do the same thing that they set the same value
17474 	 * on both the enable case and the disable case. Is it correct?
17475 	 */
17476 	data &= ~(0xf << 8);
17477 	data |= (0xb << 8);
17478 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, data);
17479 	if (rv != 0)
17480 		goto out;
17481 
17482 	/*
17483 	 * If enable ==
17484 	 *	true: Enable jumbo frame workaround in the PHY.
17485 	 *	false: Write PHY register values back to h/w defaults.
17486 	 */
17487 	rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 23), &data);
17488 	if (rv != 0)
17489 		goto out;
17490 	data &= ~(0x7F << 5);
17491 	if (enable)
17492 		data |= (0x37 << 5);
17493 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 23), data);
17494 	if (rv != 0)
17495 		goto out;
17496 
17497 	rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 16), &data);
17498 	if (rv != 0)
17499 		goto out;
17500 	if (enable)
17501 		data &= ~(1 << 13);
17502 	else
17503 		data |= (1 << 13);
17504 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 16), data);
17505 	if (rv != 0)
17506 		goto out;
17507 
17508 	rv = sc->phy.readreg_locked(dev, 2, I82579_UNKNOWN1, &data);
17509 	if (rv != 0)
17510 		goto out;
17511 	data &= ~(0x3FF << 2);
17512 	if (enable)
17513 		data |= (I82579_TX_PTR_GAP << 2);
17514 	else
17515 		data |= (0x8 << 2);
17516 	rv = sc->phy.writereg_locked(dev, 2, I82579_UNKNOWN1, data);
17517 	if (rv != 0)
17518 		goto out;
17519 
17520 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(776, 23),
17521 	    enable ? 0xf100 : 0x7e00);
17522 	if (rv != 0)
17523 		goto out;
17524 
17525 	rv = sc->phy.readreg_locked(dev, 2, HV_PM_CTRL, &data);
17526 	if (rv != 0)
17527 		goto out;
17528 	if (enable)
17529 		data |= 1 << 10;
17530 	else
17531 		data &= ~(1 << 10);
17532 	rv = sc->phy.writereg_locked(dev, 2, HV_PM_CTRL, data);
17533 	if (rv != 0)
17534 		goto out;
17535 
17536 	/* Re-enable Rx path after enabling/disabling workaround */
17537 	rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
17538 	    dft_ctrl & ~(1 << 14));
17539 
17540 out:
17541 	sc->phy.release(sc);
17542 
17543 	return rv;
17544 }
17545 
17546 /*
17547  *  wm_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
17548  *  done after every PHY reset.
17549  */
17550 static int
17551 wm_lv_phy_workarounds_ich8lan(struct wm_softc *sc)
17552 {
17553 	device_t dev = sc->sc_dev;
17554 	int rv;
17555 
17556 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
17557 		device_xname(dev), __func__));
17558 	KASSERT(sc->sc_type == WM_T_PCH2);
17559 
17560 	/* Set MDIO slow mode before any other MDIO access */
17561 	rv = wm_set_mdio_slow_mode_hv(sc);
17562 	if (rv != 0)
17563 		return rv;
17564 
17565 	rv = sc->phy.acquire(sc);
17566 	if (rv != 0)
17567 		return rv;
17568 	/* Set MSE higher to enable link to stay up when noise is high */
17569 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_THRESHOLD, 0x0034);
17570 	if (rv != 0)
17571 		goto release;
17572 	/* Drop link after 5 times MSE threshold was reached */
17573 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_LINK_DOWN, 0x0005);
17574 release:
17575 	sc->phy.release(sc);
17576 
17577 	return rv;
17578 }
17579 
17580 /**
17581  *  wm_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
17582  *  @link: link up bool flag
17583  *
17584  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
17585  *  preventing further DMA write requests.  Workaround the issue by disabling
17586  *  the de-assertion of the clock request when in 1Gpbs mode.
17587  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
17588  *  speeds in order to avoid Tx hangs.
17589  **/
17590 static int
17591 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
17592 {
17593 	uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
17594 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
17595 	uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
17596 	uint16_t phyreg;
17597 
17598 	if (link && (speed == STATUS_SPEED_1000)) {
17599 		int rv;
17600 
17601 		rv = sc->phy.acquire(sc);
17602 		if (rv != 0)
17603 			return rv;
17604 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
17605 		    &phyreg);
17606 		if (rv != 0)
17607 			goto release;
17608 		rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
17609 		    phyreg & ~KUMCTRLSTA_K1_ENABLE);
17610 		if (rv != 0)
17611 			goto release;
17612 		delay(20);
17613 		CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
17614 
17615 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
17616 		    &phyreg);
17617 release:
17618 		sc->phy.release(sc);
17619 		return rv;
17620 	}
17621 
17622 	fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
17623 
17624 	struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys);
17625 	if (((child != NULL) && (child->mii_mpd_rev > 5))
17626 	    || !link
17627 	    || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
17628 		goto update_fextnvm6;
17629 
17630 	wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL, &phyreg);
17631 
17632 	/* Clear link status transmit timeout */
17633 	phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
17634 	if (speed == STATUS_SPEED_100) {
17635 		/* Set inband Tx timeout to 5x10us for 100Half */
17636 		phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
17637 
17638 		/* Do not extend the K1 entry latency for 100Half */
17639 		fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
17640 	} else {
17641 		/* Set inband Tx timeout to 50x10us for 10Full/Half */
17642 		phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
17643 
17644 		/* Extend the K1 entry latency for 10 Mbps */
17645 		fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
17646 	}
17647 
17648 	wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
17649 
17650 update_fextnvm6:
17651 	CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
17652 	return 0;
17653 }
17654 
17655 /*
17656  *  wm_k1_gig_workaround_hv - K1 Si workaround
17657  *  @sc:   pointer to the HW structure
17658  *  @link: link up bool flag
17659  *
17660  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
17661  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
17662  *  If link is down, the function will restore the default K1 setting located
17663  *  in the NVM.
17664  */
17665 static int
17666 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
17667 {
17668 	int k1_enable = sc->sc_nvm_k1_enabled;
17669 	int rv;
17670 
17671 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
17672 		device_xname(sc->sc_dev), __func__));
17673 
17674 	rv = sc->phy.acquire(sc);
17675 	if (rv != 0)
17676 		return rv;
17677 
17678 	if (link) {
17679 		k1_enable = 0;
17680 
17681 		/* Link stall fix for link up */
17682 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
17683 		    0x0100);
17684 	} else {
17685 		/* Link stall fix for link down */
17686 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
17687 		    0x4100);
17688 	}
17689 
17690 	wm_configure_k1_ich8lan(sc, k1_enable);
17691 	sc->phy.release(sc);
17692 
17693 	return 0;
17694 }
17695 
17696 /*
17697  *  wm_k1_workaround_lv - K1 Si workaround
17698  *  @sc:   pointer to the HW structure
17699  *
17700  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
17701  *  Disable K1 for 1000 and 100 speeds
17702  */
17703 static int
17704 wm_k1_workaround_lv(struct wm_softc *sc)
17705 {
17706 	uint32_t reg;
17707 	uint16_t phyreg;
17708 	int rv;
17709 
17710 	if (sc->sc_type != WM_T_PCH2)
17711 		return 0;
17712 
17713 	/* Set K1 beacon duration based on 10Mbps speed */
17714 	rv = wm_gmii_hv_readreg(sc->sc_dev, 2, HV_M_STATUS, &phyreg);
17715 	if (rv != 0)
17716 		return rv;
17717 
17718 	if ((phyreg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
17719 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
17720 		if (phyreg &
17721 		    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
17722 			/* LV 1G/100 Packet drop issue wa  */
17723 			rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_PM_CTRL,
17724 			    &phyreg);
17725 			if (rv != 0)
17726 				return rv;
17727 			phyreg &= ~HV_PM_CTRL_K1_ENA;
17728 			rv = wm_gmii_hv_writereg(sc->sc_dev, 1, HV_PM_CTRL,
17729 			    phyreg);
17730 			if (rv != 0)
17731 				return rv;
17732 		} else {
17733 			/* For 10Mbps */
17734 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
17735 			reg &= ~FEXTNVM4_BEACON_DURATION;
17736 			reg |= FEXTNVM4_BEACON_DURATION_16US;
17737 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
17738 		}
17739 	}
17740 
17741 	return 0;
17742 }
17743 
17744 /*
17745  *  wm_link_stall_workaround_hv - Si workaround
17746  *  @sc: pointer to the HW structure
17747  *
17748  *  This function works around a Si bug where the link partner can get
17749  *  a link up indication before the PHY does. If small packets are sent
17750  *  by the link partner they can be placed in the packet buffer without
17751  *  being properly accounted for by the PHY and will stall preventing
17752  *  further packets from being received.  The workaround is to clear the
17753  *  packet buffer after the PHY detects link up.
17754  */
17755 static int
17756 wm_link_stall_workaround_hv(struct wm_softc *sc)
17757 {
17758 	uint16_t phyreg;
17759 
17760 	if (sc->sc_phytype != WMPHY_82578)
17761 		return 0;
17762 
17763 	/* Do not apply workaround if in PHY loopback bit 14 set */
17764 	wm_gmii_hv_readreg(sc->sc_dev, 2, MII_BMCR, &phyreg);
17765 	if ((phyreg & BMCR_LOOP) != 0)
17766 		return 0;
17767 
17768 	/* Check if link is up and at 1Gbps */
17769 	wm_gmii_hv_readreg(sc->sc_dev, 2, BM_CS_STATUS, &phyreg);
17770 	phyreg &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
17771 	    | BM_CS_STATUS_SPEED_MASK;
17772 	if (phyreg != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
17773 		| BM_CS_STATUS_SPEED_1000))
17774 		return 0;
17775 
17776 	delay(200 * 1000);	/* XXX too big */
17777 
17778 	/* Flush the packets in the fifo buffer */
17779 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
17780 	    HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED);
17781 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
17782 	    HV_MUX_DATA_CTRL_GEN_TO_MAC);
17783 
17784 	return 0;
17785 }
17786 
17787 static int
17788 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
17789 {
17790 	int rv;
17791 
17792 	rv = sc->phy.acquire(sc);
17793 	if (rv != 0) {
17794 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
17795 		    __func__);
17796 		return rv;
17797 	}
17798 
17799 	rv = wm_set_mdio_slow_mode_hv_locked(sc);
17800 
17801 	sc->phy.release(sc);
17802 
17803 	return rv;
17804 }
17805 
17806 static int
17807 wm_set_mdio_slow_mode_hv_locked(struct wm_softc *sc)
17808 {
17809 	int rv;
17810 	uint16_t reg;
17811 
17812 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_KMRN_MODE_CTRL, &reg);
17813 	if (rv != 0)
17814 		return rv;
17815 
17816 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
17817 	    reg | HV_KMRN_MDIO_SLOW);
17818 }
17819 
17820 /*
17821  *  wm_configure_k1_ich8lan - Configure K1 power state
17822  *  @sc: pointer to the HW structure
17823  *  @enable: K1 state to configure
17824  *
17825  *  Configure the K1 power state based on the provided parameter.
17826  *  Assumes semaphore already acquired.
17827  */
17828 static void
17829 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
17830 {
17831 	uint32_t ctrl, ctrl_ext, tmp;
17832 	uint16_t kmreg;
17833 	int rv;
17834 
17835 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
17836 
17837 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
17838 	if (rv != 0)
17839 		return;
17840 
17841 	if (k1_enable)
17842 		kmreg |= KUMCTRLSTA_K1_ENABLE;
17843 	else
17844 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
17845 
17846 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
17847 	if (rv != 0)
17848 		return;
17849 
17850 	delay(20);
17851 
17852 	ctrl = CSR_READ(sc, WMREG_CTRL);
17853 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
17854 
17855 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
17856 	tmp |= CTRL_FRCSPD;
17857 
17858 	CSR_WRITE(sc, WMREG_CTRL, tmp);
17859 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
17860 	CSR_WRITE_FLUSH(sc);
17861 	delay(20);
17862 
17863 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
17864 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
17865 	CSR_WRITE_FLUSH(sc);
17866 	delay(20);
17867 
17868 	return;
17869 }
17870 
17871 /* special case - for 82575 - need to do manual init ... */
17872 static void
17873 wm_reset_init_script_82575(struct wm_softc *sc)
17874 {
17875 	/*
17876 	 * Remark: this is untested code - we have no board without EEPROM
17877 	 *  same setup as mentioned int the FreeBSD driver for the i82575
17878 	 */
17879 
17880 	/* SerDes configuration via SERDESCTRL */
17881 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
17882 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
17883 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
17884 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
17885 
17886 	/* CCM configuration via CCMCTL register */
17887 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
17888 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
17889 
17890 	/* PCIe lanes configuration */
17891 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
17892 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
17893 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
17894 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
17895 
17896 	/* PCIe PLL Configuration */
17897 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
17898 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
17899 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
17900 }
17901 
17902 static void
17903 wm_reset_mdicnfg_82580(struct wm_softc *sc)
17904 {
17905 	uint32_t reg;
17906 	uint16_t nvmword;
17907 	int rv;
17908 
17909 	if (sc->sc_type != WM_T_82580)
17910 		return;
17911 	if ((sc->sc_flags & WM_F_SGMII) == 0)
17912 		return;
17913 
17914 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
17915 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
17916 	if (rv != 0) {
17917 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
17918 		    __func__);
17919 		return;
17920 	}
17921 
17922 	reg = CSR_READ(sc, WMREG_MDICNFG);
17923 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
17924 		reg |= MDICNFG_DEST;
17925 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
17926 		reg |= MDICNFG_COM_MDIO;
17927 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
17928 }
17929 
17930 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
17931 
17932 static bool
17933 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
17934 {
17935 	uint32_t reg;
17936 	uint16_t id1, id2;
17937 	int i, rv;
17938 
17939 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
17940 		device_xname(sc->sc_dev), __func__));
17941 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
17942 
17943 	id1 = id2 = 0xffff;
17944 	for (i = 0; i < 2; i++) {
17945 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
17946 		    &id1);
17947 		if ((rv != 0) || MII_INVALIDID(id1))
17948 			continue;
17949 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
17950 		    &id2);
17951 		if ((rv != 0) || MII_INVALIDID(id2))
17952 			continue;
17953 		break;
17954 	}
17955 	if ((rv == 0) && !MII_INVALIDID(id1) && !MII_INVALIDID(id2))
17956 		goto out;
17957 
17958 	/*
17959 	 * In case the PHY needs to be in mdio slow mode,
17960 	 * set slow mode and try to get the PHY id again.
17961 	 */
17962 	rv = 0;
17963 	if (sc->sc_type < WM_T_PCH_LPT) {
17964 		wm_set_mdio_slow_mode_hv_locked(sc);
17965 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
17966 		    &id1);
17967 		rv |= wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
17968 		    &id2);
17969 	}
17970 	if ((rv != 0) || MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
17971 		device_printf(sc->sc_dev, "XXX return with false\n");
17972 		return false;
17973 	}
17974 out:
17975 	if (sc->sc_type >= WM_T_PCH_LPT) {
17976 		/* Only unforce SMBus if ME is not active */
17977 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
17978 			uint16_t phyreg;
17979 
17980 			/* Unforce SMBus mode in PHY */
17981 			rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
17982 			    CV_SMB_CTRL, &phyreg);
17983 			phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
17984 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
17985 			    CV_SMB_CTRL, phyreg);
17986 
17987 			/* Unforce SMBus mode in MAC */
17988 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
17989 			reg &= ~CTRL_EXT_FORCE_SMBUS;
17990 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
17991 		}
17992 	}
17993 	return true;
17994 }
17995 
17996 static void
17997 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
17998 {
17999 	uint32_t reg;
18000 	int i;
18001 
18002 	/* Set PHY Config Counter to 50msec */
18003 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
18004 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
18005 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
18006 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
18007 
18008 	/* Toggle LANPHYPC */
18009 	reg = CSR_READ(sc, WMREG_CTRL);
18010 	reg |= CTRL_LANPHYPC_OVERRIDE;
18011 	reg &= ~CTRL_LANPHYPC_VALUE;
18012 	CSR_WRITE(sc, WMREG_CTRL, reg);
18013 	CSR_WRITE_FLUSH(sc);
18014 	delay(1000);
18015 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
18016 	CSR_WRITE(sc, WMREG_CTRL, reg);
18017 	CSR_WRITE_FLUSH(sc);
18018 
18019 	if (sc->sc_type < WM_T_PCH_LPT)
18020 		delay(50 * 1000);
18021 	else {
18022 		i = 20;
18023 
18024 		do {
18025 			delay(5 * 1000);
18026 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
18027 		    && i--);
18028 
18029 		delay(30 * 1000);
18030 	}
18031 }
18032 
18033 static int
18034 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
18035 {
18036 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
18037 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
18038 	uint32_t rxa;
18039 	uint16_t scale = 0, lat_enc = 0;
18040 	int32_t obff_hwm = 0;
18041 	int64_t lat_ns, value;
18042 
18043 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
18044 		device_xname(sc->sc_dev), __func__));
18045 
18046 	if (link) {
18047 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
18048 		uint32_t status;
18049 		uint16_t speed;
18050 		pcireg_t preg;
18051 
18052 		status = CSR_READ(sc, WMREG_STATUS);
18053 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
18054 		case STATUS_SPEED_10:
18055 			speed = 10;
18056 			break;
18057 		case STATUS_SPEED_100:
18058 			speed = 100;
18059 			break;
18060 		case STATUS_SPEED_1000:
18061 			speed = 1000;
18062 			break;
18063 		default:
18064 			device_printf(sc->sc_dev, "Unknown speed "
18065 			    "(status = %08x)\n", status);
18066 			return -1;
18067 		}
18068 
18069 		/* Rx Packet Buffer Allocation size (KB) */
18070 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
18071 
18072 		/*
18073 		 * Determine the maximum latency tolerated by the device.
18074 		 *
18075 		 * Per the PCIe spec, the tolerated latencies are encoded as
18076 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
18077 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
18078 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
18079 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
18080 		 */
18081 		lat_ns = ((int64_t)rxa * 1024 -
18082 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
18083 			+ ETHER_HDR_LEN))) * 8 * 1000;
18084 		if (lat_ns < 0)
18085 			lat_ns = 0;
18086 		else
18087 			lat_ns /= speed;
18088 		value = lat_ns;
18089 
18090 		while (value > LTRV_VALUE) {
18091 			scale ++;
18092 			value = howmany(value, __BIT(5));
18093 		}
18094 		if (scale > LTRV_SCALE_MAX) {
18095 			device_printf(sc->sc_dev,
18096 			    "Invalid LTR latency scale %d\n", scale);
18097 			return -1;
18098 		}
18099 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
18100 
18101 		/* Determine the maximum latency tolerated by the platform */
18102 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
18103 		    WM_PCI_LTR_CAP_LPT);
18104 		max_snoop = preg & 0xffff;
18105 		max_nosnoop = preg >> 16;
18106 
18107 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
18108 
18109 		if (lat_enc > max_ltr_enc) {
18110 			lat_enc = max_ltr_enc;
18111 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
18112 			    * PCI_LTR_SCALETONS(
18113 				    __SHIFTOUT(lat_enc,
18114 					PCI_LTR_MAXSNOOPLAT_SCALE));
18115 		}
18116 
18117 		if (lat_ns) {
18118 			lat_ns *= speed * 1000;
18119 			lat_ns /= 8;
18120 			lat_ns /= 1000000000;
18121 			obff_hwm = (int32_t)(rxa - lat_ns);
18122 		}
18123 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
18124 			device_printf(sc->sc_dev, "Invalid high water mark %d"
18125 			    "(rxa = %d, lat_ns = %d)\n",
18126 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
18127 			return -1;
18128 		}
18129 	}
18130 	/* Snoop and No-Snoop latencies the same */
18131 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
18132 	CSR_WRITE(sc, WMREG_LTRV, reg);
18133 
18134 	/* Set OBFF high water mark */
18135 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
18136 	reg |= obff_hwm;
18137 	CSR_WRITE(sc, WMREG_SVT, reg);
18138 
18139 	/* Enable OBFF */
18140 	reg = CSR_READ(sc, WMREG_SVCR);
18141 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
18142 	CSR_WRITE(sc, WMREG_SVCR, reg);
18143 
18144 	return 0;
18145 }
18146 
18147 /*
18148  * I210 Errata 25 and I211 Errata 10
18149  * Slow System Clock.
18150  *
18151  * Note that this function is called on both FLASH and iNVM case on NetBSD.
18152  */
18153 static int
18154 wm_pll_workaround_i210(struct wm_softc *sc)
18155 {
18156 	uint32_t mdicnfg, wuc;
18157 	uint32_t reg;
18158 	pcireg_t pcireg;
18159 	uint32_t pmreg;
18160 	uint16_t nvmword, tmp_nvmword;
18161 	uint16_t phyval;
18162 	bool wa_done = false;
18163 	int i, rv = 0;
18164 
18165 	/* Get Power Management cap offset */
18166 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
18167 	    &pmreg, NULL) == 0)
18168 		return -1;
18169 
18170 	/* Save WUC and MDICNFG registers */
18171 	wuc = CSR_READ(sc, WMREG_WUC);
18172 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
18173 
18174 	reg = mdicnfg & ~MDICNFG_DEST;
18175 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
18176 
18177 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0) {
18178 		/*
18179 		 * The default value of the Initialization Control Word 1
18180 		 * is the same on both I210's FLASH_HW and I21[01]'s iNVM.
18181 		 */
18182 		nvmword = INVM_DEFAULT_AL;
18183 	}
18184 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
18185 
18186 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
18187 		wm_gmii_gs40g_readreg(sc->sc_dev, 1,
18188 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG, &phyval);
18189 
18190 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
18191 			rv = 0;
18192 			break; /* OK */
18193 		} else
18194 			rv = -1;
18195 
18196 		wa_done = true;
18197 		/* Directly reset the internal PHY */
18198 		reg = CSR_READ(sc, WMREG_CTRL);
18199 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
18200 
18201 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
18202 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
18203 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
18204 
18205 		CSR_WRITE(sc, WMREG_WUC, 0);
18206 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
18207 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
18208 
18209 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
18210 		    pmreg + PCI_PMCSR);
18211 		pcireg |= PCI_PMCSR_STATE_D3;
18212 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
18213 		    pmreg + PCI_PMCSR, pcireg);
18214 		delay(1000);
18215 		pcireg &= ~PCI_PMCSR_STATE_D3;
18216 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
18217 		    pmreg + PCI_PMCSR, pcireg);
18218 
18219 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
18220 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
18221 
18222 		/* Restore WUC register */
18223 		CSR_WRITE(sc, WMREG_WUC, wuc);
18224 	}
18225 
18226 	/* Restore MDICNFG setting */
18227 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
18228 	if (wa_done)
18229 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
18230 	return rv;
18231 }
18232 
18233 static void
18234 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
18235 {
18236 	uint32_t reg;
18237 
18238 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
18239 		device_xname(sc->sc_dev), __func__));
18240 	KASSERT((sc->sc_type == WM_T_PCH_SPT)
18241 	    || (sc->sc_type == WM_T_PCH_CNP) || (sc->sc_type == WM_T_PCH_TGP));
18242 
18243 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
18244 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
18245 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
18246 
18247 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
18248 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
18249 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
18250 }
18251 
18252 /* Sysctl functions */
18253 static int
18254 wm_sysctl_tdh_handler(SYSCTLFN_ARGS)
18255 {
18256 	struct sysctlnode node = *rnode;
18257 	struct wm_txqueue *txq = (struct wm_txqueue *)node.sysctl_data;
18258 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
18259 	struct wm_softc *sc = txq->txq_sc;
18260 	uint32_t reg;
18261 
18262 	reg = CSR_READ(sc, WMREG_TDH(wmq->wmq_id));
18263 	node.sysctl_data = &reg;
18264 	return sysctl_lookup(SYSCTLFN_CALL(&node));
18265 }
18266 
18267 static int
18268 wm_sysctl_tdt_handler(SYSCTLFN_ARGS)
18269 {
18270 	struct sysctlnode node = *rnode;
18271 	struct wm_txqueue *txq = (struct wm_txqueue *)node.sysctl_data;
18272 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
18273 	struct wm_softc *sc = txq->txq_sc;
18274 	uint32_t reg;
18275 
18276 	reg = CSR_READ(sc, WMREG_TDT(wmq->wmq_id));
18277 	node.sysctl_data = &reg;
18278 	return sysctl_lookup(SYSCTLFN_CALL(&node));
18279 }
18280 
18281 #ifdef WM_DEBUG
18282 static int
18283 wm_sysctl_debug(SYSCTLFN_ARGS)
18284 {
18285 	struct sysctlnode node = *rnode;
18286 	struct wm_softc *sc = (struct wm_softc *)node.sysctl_data;
18287 	uint32_t dflags;
18288 	int error;
18289 
18290 	dflags = sc->sc_debug;
18291 	node.sysctl_data = &dflags;
18292 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
18293 
18294 	if (error || newp == NULL)
18295 		return error;
18296 
18297 	sc->sc_debug = dflags;
18298 	device_printf(sc->sc_dev, "TARC0: %08x\n", CSR_READ(sc, WMREG_TARC0));
18299 	device_printf(sc->sc_dev, "TDT0: %08x\n", CSR_READ(sc, WMREG_TDT(0)));
18300 
18301 	return 0;
18302 }
18303 #endif
18304