xref: /netbsd-src/sys/dev/pci/if_wm.c (revision 4d342c046e3288fb5a1edcd33cfec48c41c80664)
1 /*	$NetBSD: if_wm.c,v 1.689 2020/09/16 15:04:57 msaitoh Exp $	*/
2 
3 /*
4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed for the NetBSD Project by
20  *	Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 /*******************************************************************************
39 
40   Copyright (c) 2001-2005, Intel Corporation
41   All rights reserved.
42 
43   Redistribution and use in source and binary forms, with or without
44   modification, are permitted provided that the following conditions are met:
45 
46    1. Redistributions of source code must retain the above copyright notice,
47       this list of conditions and the following disclaimer.
48 
49    2. Redistributions in binary form must reproduce the above copyright
50       notice, this list of conditions and the following disclaimer in the
51       documentation and/or other materials provided with the distribution.
52 
53    3. Neither the name of the Intel Corporation nor the names of its
54       contributors may be used to endorse or promote products derived from
55       this software without specific prior written permission.
56 
57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67   POSSIBILITY OF SUCH DAMAGE.
68 
69 *******************************************************************************/
70 /*
71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72  *
73  * TODO (in order of importance):
74  *
75  *	- Check XXX'ed comments
76  *	- TX Multi queue improvement (refine queue selection logic)
77  *	- Split header buffer for newer descriptors
78  *	- EEE (Energy Efficiency Ethernet) for I354
79  *	- Virtual Function
80  *	- Set LED correctly (based on contents in EEPROM)
81  *	- Rework how parameters are loaded from the EEPROM.
82  */
83 
84 #include <sys/cdefs.h>
85 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.689 2020/09/16 15:04:57 msaitoh Exp $");
86 
87 #ifdef _KERNEL_OPT
88 #include "opt_net_mpsafe.h"
89 #include "opt_if_wm.h"
90 #endif
91 
92 #include <sys/param.h>
93 #include <sys/systm.h>
94 #include <sys/callout.h>
95 #include <sys/mbuf.h>
96 #include <sys/malloc.h>
97 #include <sys/kmem.h>
98 #include <sys/kernel.h>
99 #include <sys/socket.h>
100 #include <sys/ioctl.h>
101 #include <sys/errno.h>
102 #include <sys/device.h>
103 #include <sys/queue.h>
104 #include <sys/syslog.h>
105 #include <sys/interrupt.h>
106 #include <sys/cpu.h>
107 #include <sys/pcq.h>
108 #include <sys/sysctl.h>
109 #include <sys/workqueue.h>
110 
111 #include <sys/rndsource.h>
112 
113 #include <net/if.h>
114 #include <net/if_dl.h>
115 #include <net/if_media.h>
116 #include <net/if_ether.h>
117 
118 #include <net/bpf.h>
119 
120 #include <net/rss_config.h>
121 
122 #include <netinet/in.h>			/* XXX for struct ip */
123 #include <netinet/in_systm.h>		/* XXX for struct ip */
124 #include <netinet/ip.h>			/* XXX for struct ip */
125 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
126 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
127 
128 #include <sys/bus.h>
129 #include <sys/intr.h>
130 #include <machine/endian.h>
131 
132 #include <dev/mii/mii.h>
133 #include <dev/mii/mdio.h>
134 #include <dev/mii/miivar.h>
135 #include <dev/mii/miidevs.h>
136 #include <dev/mii/mii_bitbang.h>
137 #include <dev/mii/ikphyreg.h>
138 #include <dev/mii/igphyreg.h>
139 #include <dev/mii/igphyvar.h>
140 #include <dev/mii/inbmphyreg.h>
141 #include <dev/mii/ihphyreg.h>
142 #include <dev/mii/makphyreg.h>
143 
144 #include <dev/pci/pcireg.h>
145 #include <dev/pci/pcivar.h>
146 #include <dev/pci/pcidevs.h>
147 
148 #include <dev/pci/if_wmreg.h>
149 #include <dev/pci/if_wmvar.h>
150 
151 #ifdef WM_DEBUG
152 #define	WM_DEBUG_LINK		__BIT(0)
153 #define	WM_DEBUG_TX		__BIT(1)
154 #define	WM_DEBUG_RX		__BIT(2)
155 #define	WM_DEBUG_GMII		__BIT(3)
156 #define	WM_DEBUG_MANAGE		__BIT(4)
157 #define	WM_DEBUG_NVM		__BIT(5)
158 #define	WM_DEBUG_INIT		__BIT(6)
159 #define	WM_DEBUG_LOCK		__BIT(7)
160 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
161     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
162 #define	DPRINTF(x, y)	do { if (wm_debug & (x)) printf y; } while (0)
163 #else
164 #define	DPRINTF(x, y)	__nothing
165 #endif /* WM_DEBUG */
166 
167 #ifdef NET_MPSAFE
168 #define WM_MPSAFE	1
169 #define WM_CALLOUT_FLAGS	CALLOUT_MPSAFE
170 #define WM_SOFTINT_FLAGS	SOFTINT_MPSAFE
171 #define WM_WORKQUEUE_FLAGS	WQ_PERCPU | WQ_MPSAFE
172 #else
173 #define WM_CALLOUT_FLAGS	0
174 #define WM_SOFTINT_FLAGS	0
175 #define WM_WORKQUEUE_FLAGS	WQ_PERCPU
176 #endif
177 
178 #define WM_WORKQUEUE_PRI PRI_SOFTNET
179 
180 /*
181  * This device driver's max interrupt numbers.
182  */
183 #define WM_MAX_NQUEUEINTR	16
184 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
185 
186 #ifndef WM_DISABLE_MSI
187 #define	WM_DISABLE_MSI 0
188 #endif
189 #ifndef WM_DISABLE_MSIX
190 #define	WM_DISABLE_MSIX 0
191 #endif
192 
193 int wm_disable_msi = WM_DISABLE_MSI;
194 int wm_disable_msix = WM_DISABLE_MSIX;
195 
196 #ifndef WM_WATCHDOG_TIMEOUT
197 #define WM_WATCHDOG_TIMEOUT 5
198 #endif
199 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
200 
201 /*
202  * Transmit descriptor list size.  Due to errata, we can only have
203  * 256 hardware descriptors in the ring on < 82544, but we use 4096
204  * on >= 82544. We tell the upper layers that they can queue a lot
205  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
206  * of them at a time.
207  *
208  * We allow up to 64 DMA segments per packet.  Pathological packet
209  * chains containing many small mbufs have been observed in zero-copy
210  * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
211  * m_defrag() is called to reduce it.
212  */
213 #define	WM_NTXSEGS		64
214 #define	WM_IFQUEUELEN		256
215 #define	WM_TXQUEUELEN_MAX	64
216 #define	WM_TXQUEUELEN_MAX_82547	16
217 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
218 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
219 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
220 #define	WM_NTXDESC_82542	256
221 #define	WM_NTXDESC_82544	4096
222 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
223 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
224 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
225 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
226 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
227 
228 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
229 
230 #define	WM_TXINTERQSIZE		256
231 
232 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
233 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
234 #endif
235 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
236 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
237 #endif
238 
239 /*
240  * Receive descriptor list size.  We have one Rx buffer for normal
241  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
242  * packet.  We allocate 256 receive descriptors, each with a 2k
243  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
244  */
245 #define	WM_NRXDESC		256U
246 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
247 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
248 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
249 
250 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
251 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
252 #endif
253 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
254 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
255 #endif
256 
257 typedef union txdescs {
258 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
259 	nq_txdesc_t	 sctxu_nq_txdescs[WM_NTXDESC_82544];
260 } txdescs_t;
261 
262 typedef union rxdescs {
263 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
264 	ext_rxdesc_t	  sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
265 	nq_rxdesc_t	 sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
266 } rxdescs_t;
267 
268 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
269 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
270 
271 /*
272  * Software state for transmit jobs.
273  */
274 struct wm_txsoft {
275 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
276 	bus_dmamap_t txs_dmamap;	/* our DMA map */
277 	int txs_firstdesc;		/* first descriptor in packet */
278 	int txs_lastdesc;		/* last descriptor in packet */
279 	int txs_ndesc;			/* # of descriptors used */
280 };
281 
282 /*
283  * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
284  * buffer and a DMA map. For packets which fill more than one buffer, we chain
285  * them together.
286  */
287 struct wm_rxsoft {
288 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
289 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
290 };
291 
292 #define WM_LINKUP_TIMEOUT	50
293 
294 static uint16_t swfwphysem[] = {
295 	SWFW_PHY0_SM,
296 	SWFW_PHY1_SM,
297 	SWFW_PHY2_SM,
298 	SWFW_PHY3_SM
299 };
300 
301 static const uint32_t wm_82580_rxpbs_table[] = {
302 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
303 };
304 
305 struct wm_softc;
306 
307 #ifdef WM_EVENT_COUNTERS
308 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
309 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
310 	struct evcnt qname##_ev_##evname;
311 
312 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
313 	do {								\
314 		snprintf((q)->qname##_##evname##_evcnt_name,		\
315 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
316 		    "%s%02d%s", #qname, (qnum), #evname);		\
317 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
318 		    (evtype), NULL, (xname),				\
319 		    (q)->qname##_##evname##_evcnt_name);		\
320 	} while (0)
321 
322 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
323 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
324 
325 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
326 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
327 
328 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
329 	evcnt_detach(&(q)->qname##_ev_##evname);
330 #endif /* WM_EVENT_COUNTERS */
331 
332 struct wm_txqueue {
333 	kmutex_t *txq_lock;		/* lock for tx operations */
334 
335 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
336 
337 	/* Software state for the transmit descriptors. */
338 	int txq_num;			/* must be a power of two */
339 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
340 
341 	/* TX control data structures. */
342 	int txq_ndesc;			/* must be a power of two */
343 	size_t txq_descsize;		/* a tx descriptor size */
344 	txdescs_t *txq_descs_u;
345 	bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
346 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
347 	int txq_desc_rseg;		/* real number of control segment */
348 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
349 #define	txq_descs	txq_descs_u->sctxu_txdescs
350 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
351 
352 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
353 
354 	int txq_free;			/* number of free Tx descriptors */
355 	int txq_next;			/* next ready Tx descriptor */
356 
357 	int txq_sfree;			/* number of free Tx jobs */
358 	int txq_snext;			/* next free Tx job */
359 	int txq_sdirty;			/* dirty Tx jobs */
360 
361 	/* These 4 variables are used only on the 82547. */
362 	int txq_fifo_size;		/* Tx FIFO size */
363 	int txq_fifo_head;		/* current head of FIFO */
364 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
365 	int txq_fifo_stall;		/* Tx FIFO is stalled */
366 
367 	/*
368 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
369 	 * CPUs. This queue intermediate them without block.
370 	 */
371 	pcq_t *txq_interq;
372 
373 	/*
374 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
375 	 * to manage Tx H/W queue's busy flag.
376 	 */
377 	int txq_flags;			/* flags for H/W queue, see below */
378 #define	WM_TXQ_NO_SPACE	0x1
379 
380 	bool txq_stopping;
381 
382 	bool txq_sending;
383 	time_t txq_lastsent;
384 
385 	/* Checksum flags used for previous packet */
386 	uint32_t 	txq_last_hw_cmd;
387 	uint8_t 	txq_last_hw_fields;
388 	uint16_t	txq_last_hw_ipcs;
389 	uint16_t	txq_last_hw_tucs;
390 
391 	uint32_t txq_packets;		/* for AIM */
392 	uint32_t txq_bytes;		/* for AIM */
393 #ifdef WM_EVENT_COUNTERS
394 	/* TX event counters */
395 	WM_Q_EVCNT_DEFINE(txq, txsstall)    /* Stalled due to no txs */
396 	WM_Q_EVCNT_DEFINE(txq, txdstall)    /* Stalled due to no txd */
397 	WM_Q_EVCNT_DEFINE(txq, fifo_stall)  /* FIFO stalls (82547) */
398 	WM_Q_EVCNT_DEFINE(txq, txdw)	    /* Tx descriptor interrupts */
399 	WM_Q_EVCNT_DEFINE(txq, txqe)	    /* Tx queue empty interrupts */
400 					    /* XXX not used? */
401 
402 	WM_Q_EVCNT_DEFINE(txq, ipsum)	    /* IP checksums comp. */
403 	WM_Q_EVCNT_DEFINE(txq, tusum)	    /* TCP/UDP cksums comp. */
404 	WM_Q_EVCNT_DEFINE(txq, tusum6)	    /* TCP/UDP v6 cksums comp. */
405 	WM_Q_EVCNT_DEFINE(txq, tso)	    /* TCP seg offload (IPv4) */
406 	WM_Q_EVCNT_DEFINE(txq, tso6)	    /* TCP seg offload (IPv6) */
407 	WM_Q_EVCNT_DEFINE(txq, tsopain)	    /* Painful header manip. for TSO */
408 	WM_Q_EVCNT_DEFINE(txq, pcqdrop)	    /* Pkt dropped in pcq */
409 	WM_Q_EVCNT_DEFINE(txq, descdrop)    /* Pkt dropped in MAC desc ring */
410 					    /* other than toomanyseg */
411 
412 	WM_Q_EVCNT_DEFINE(txq, toomanyseg)  /* Pkt dropped(toomany DMA segs) */
413 	WM_Q_EVCNT_DEFINE(txq, defrag)	    /* m_defrag() */
414 	WM_Q_EVCNT_DEFINE(txq, underrun)    /* Tx underrun */
415 	WM_Q_EVCNT_DEFINE(txq, skipcontext) /* Tx skip wring cksum context */
416 
417 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
418 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
419 #endif /* WM_EVENT_COUNTERS */
420 };
421 
422 struct wm_rxqueue {
423 	kmutex_t *rxq_lock;		/* lock for rx operations */
424 
425 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
426 
427 	/* Software state for the receive descriptors. */
428 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
429 
430 	/* RX control data structures. */
431 	int rxq_ndesc;			/* must be a power of two */
432 	size_t rxq_descsize;		/* a rx descriptor size */
433 	rxdescs_t *rxq_descs_u;
434 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
435 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
436 	int rxq_desc_rseg;		/* real number of control segment */
437 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
438 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
439 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
440 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
441 
442 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
443 
444 	int rxq_ptr;			/* next ready Rx desc/queue ent */
445 	int rxq_discard;
446 	int rxq_len;
447 	struct mbuf *rxq_head;
448 	struct mbuf *rxq_tail;
449 	struct mbuf **rxq_tailp;
450 
451 	bool rxq_stopping;
452 
453 	uint32_t rxq_packets;		/* for AIM */
454 	uint32_t rxq_bytes;		/* for AIM */
455 #ifdef WM_EVENT_COUNTERS
456 	/* RX event counters */
457 	WM_Q_EVCNT_DEFINE(rxq, intr);	/* Interrupts */
458 	WM_Q_EVCNT_DEFINE(rxq, defer);	/* Rx deferred processing */
459 
460 	WM_Q_EVCNT_DEFINE(rxq, ipsum);	/* IP checksums checked */
461 	WM_Q_EVCNT_DEFINE(rxq, tusum);	/* TCP/UDP cksums checked */
462 #endif
463 };
464 
465 struct wm_queue {
466 	int wmq_id;			/* index of TX/RX queues */
467 	int wmq_intr_idx;		/* index of MSI-X tables */
468 
469 	uint32_t wmq_itr;		/* interrupt interval per queue. */
470 	bool wmq_set_itr;
471 
472 	struct wm_txqueue wmq_txq;
473 	struct wm_rxqueue wmq_rxq;
474 
475 	bool wmq_txrx_use_workqueue;
476 	struct work wmq_cookie;
477 	void *wmq_si;
478 };
479 
480 struct wm_phyop {
481 	int (*acquire)(struct wm_softc *);
482 	void (*release)(struct wm_softc *);
483 	int (*readreg_locked)(device_t, int, int, uint16_t *);
484 	int (*writereg_locked)(device_t, int, int, uint16_t);
485 	int reset_delay_us;
486 	bool no_errprint;
487 };
488 
489 struct wm_nvmop {
490 	int (*acquire)(struct wm_softc *);
491 	void (*release)(struct wm_softc *);
492 	int (*read)(struct wm_softc *, int, int, uint16_t *);
493 };
494 
495 /*
496  * Software state per device.
497  */
498 struct wm_softc {
499 	device_t sc_dev;		/* generic device information */
500 	bus_space_tag_t sc_st;		/* bus space tag */
501 	bus_space_handle_t sc_sh;	/* bus space handle */
502 	bus_size_t sc_ss;		/* bus space size */
503 	bus_space_tag_t sc_iot;		/* I/O space tag */
504 	bus_space_handle_t sc_ioh;	/* I/O space handle */
505 	bus_size_t sc_ios;		/* I/O space size */
506 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
507 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
508 	bus_size_t sc_flashs;		/* flash registers space size */
509 	off_t sc_flashreg_offset;	/*
510 					 * offset to flash registers from
511 					 * start of BAR
512 					 */
513 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
514 
515 	struct ethercom sc_ethercom;	/* ethernet common data */
516 	struct mii_data sc_mii;		/* MII/media information */
517 
518 	pci_chipset_tag_t sc_pc;
519 	pcitag_t sc_pcitag;
520 	int sc_bus_speed;		/* PCI/PCIX bus speed */
521 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
522 
523 	uint16_t sc_pcidevid;		/* PCI device ID */
524 	wm_chip_type sc_type;		/* MAC type */
525 	int sc_rev;			/* MAC revision */
526 	wm_phy_type sc_phytype;		/* PHY type */
527 	uint8_t sc_sfptype;		/* SFP type */
528 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
529 #define	WM_MEDIATYPE_UNKNOWN		0x00
530 #define	WM_MEDIATYPE_FIBER		0x01
531 #define	WM_MEDIATYPE_COPPER		0x02
532 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
533 	int sc_funcid;			/* unit number of the chip (0 to 3) */
534 	int sc_flags;			/* flags; see below */
535 	u_short sc_if_flags;		/* last if_flags */
536 	int sc_ec_capenable;		/* last ec_capenable */
537 	int sc_flowflags;		/* 802.3x flow control flags */
538 	uint16_t eee_lp_ability;	/* EEE link partner's ability */
539 	int sc_align_tweak;
540 
541 	void *sc_ihs[WM_MAX_NINTR];	/*
542 					 * interrupt cookie.
543 					 * - legacy and msi use sc_ihs[0] only
544 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
545 					 */
546 	pci_intr_handle_t *sc_intrs;	/*
547 					 * legacy and msi use sc_intrs[0] only
548 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
549 					 */
550 	int sc_nintrs;			/* number of interrupts */
551 
552 	int sc_link_intr_idx;		/* index of MSI-X tables */
553 
554 	callout_t sc_tick_ch;		/* tick callout */
555 	bool sc_core_stopping;
556 
557 	int sc_nvm_ver_major;
558 	int sc_nvm_ver_minor;
559 	int sc_nvm_ver_build;
560 	int sc_nvm_addrbits;		/* NVM address bits */
561 	unsigned int sc_nvm_wordsize;	/* NVM word size */
562 	int sc_ich8_flash_base;
563 	int sc_ich8_flash_bank_size;
564 	int sc_nvm_k1_enabled;
565 
566 	int sc_nqueues;
567 	struct wm_queue *sc_queue;
568 	u_int sc_tx_process_limit;	/* Tx proc. repeat limit in softint */
569 	u_int sc_tx_intr_process_limit;	/* Tx proc. repeat limit in H/W intr */
570 	u_int sc_rx_process_limit;	/* Rx proc. repeat limit in softint */
571 	u_int sc_rx_intr_process_limit;	/* Rx proc. repeat limit in H/W intr */
572 	struct workqueue *sc_queue_wq;
573 	bool sc_txrx_use_workqueue;
574 
575 	int sc_affinity_offset;
576 
577 #ifdef WM_EVENT_COUNTERS
578 	/* Event counters. */
579 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
580 
581 	/* WM_T_82542_2_1 only */
582 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
583 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
584 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
585 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
586 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
587 #endif /* WM_EVENT_COUNTERS */
588 
589 	struct sysctllog *sc_sysctllog;
590 
591 	/* This variable are used only on the 82547. */
592 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
593 
594 	uint32_t sc_ctrl;		/* prototype CTRL register */
595 #if 0
596 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
597 #endif
598 	uint32_t sc_icr;		/* prototype interrupt bits */
599 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
600 	uint32_t sc_tctl;		/* prototype TCTL register */
601 	uint32_t sc_rctl;		/* prototype RCTL register */
602 	uint32_t sc_txcw;		/* prototype TXCW register */
603 	uint32_t sc_tipg;		/* prototype TIPG register */
604 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
605 	uint32_t sc_pba;		/* prototype PBA register */
606 
607 	int sc_tbi_linkup;		/* TBI link status */
608 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
609 	int sc_tbi_serdes_ticks;	/* tbi ticks */
610 
611 	int sc_mchash_type;		/* multicast filter offset */
612 
613 	krndsource_t rnd_source;	/* random source */
614 
615 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
616 
617 	kmutex_t *sc_core_lock;		/* lock for softc operations */
618 	kmutex_t *sc_ich_phymtx;	/*
619 					 * 82574/82583/ICH/PCH specific PHY
620 					 * mutex. For 82574/82583, the mutex
621 					 * is used for both PHY and NVM.
622 					 */
623 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
624 
625 	struct wm_phyop phy;
626 	struct wm_nvmop nvm;
627 };
628 
629 #define WM_CORE_LOCK(_sc)						\
630 	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
631 #define WM_CORE_UNLOCK(_sc)						\
632 	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
633 #define WM_CORE_LOCKED(_sc)						\
634 	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
635 
636 #define	WM_RXCHAIN_RESET(rxq)						\
637 do {									\
638 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
639 	*(rxq)->rxq_tailp = NULL;					\
640 	(rxq)->rxq_len = 0;						\
641 } while (/*CONSTCOND*/0)
642 
643 #define	WM_RXCHAIN_LINK(rxq, m)						\
644 do {									\
645 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
646 	(rxq)->rxq_tailp = &(m)->m_next;				\
647 } while (/*CONSTCOND*/0)
648 
649 #ifdef WM_EVENT_COUNTERS
650 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
651 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
652 
653 #define WM_Q_EVCNT_INCR(qname, evname)			\
654 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
655 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
656 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
657 #else /* !WM_EVENT_COUNTERS */
658 #define	WM_EVCNT_INCR(ev)	/* nothing */
659 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
660 
661 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
662 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
663 #endif /* !WM_EVENT_COUNTERS */
664 
665 #define	CSR_READ(sc, reg)						\
666 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
667 #define	CSR_WRITE(sc, reg, val)						\
668 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
669 #define	CSR_WRITE_FLUSH(sc)						\
670 	(void)CSR_READ((sc), WMREG_STATUS)
671 
672 #define ICH8_FLASH_READ32(sc, reg)					\
673 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
674 	    (reg) + sc->sc_flashreg_offset)
675 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
676 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
677 	    (reg) + sc->sc_flashreg_offset, (data))
678 
679 #define ICH8_FLASH_READ16(sc, reg)					\
680 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
681 	    (reg) + sc->sc_flashreg_offset)
682 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
683 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
684 	    (reg) + sc->sc_flashreg_offset, (data))
685 
686 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
687 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
688 
689 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
690 #define	WM_CDTXADDR_HI(txq, x)						\
691 	(sizeof(bus_addr_t) == 8 ?					\
692 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
693 
694 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
695 #define	WM_CDRXADDR_HI(rxq, x)						\
696 	(sizeof(bus_addr_t) == 8 ?					\
697 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
698 
699 /*
700  * Register read/write functions.
701  * Other than CSR_{READ|WRITE}().
702  */
703 #if 0
704 static inline uint32_t wm_io_read(struct wm_softc *, int);
705 #endif
706 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
707 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
708     uint32_t, uint32_t);
709 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
710 
711 /*
712  * Descriptor sync/init functions.
713  */
714 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
715 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
716 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
717 
718 /*
719  * Device driver interface functions and commonly used functions.
720  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
721  */
722 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
723 static int	wm_match(device_t, cfdata_t, void *);
724 static void	wm_attach(device_t, device_t, void *);
725 static int	wm_detach(device_t, int);
726 static bool	wm_suspend(device_t, const pmf_qual_t *);
727 static bool	wm_resume(device_t, const pmf_qual_t *);
728 static void	wm_watchdog(struct ifnet *);
729 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
730     uint16_t *);
731 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
732     uint16_t *);
733 static void	wm_tick(void *);
734 static int	wm_ifflags_cb(struct ethercom *);
735 static int	wm_ioctl(struct ifnet *, u_long, void *);
736 /* MAC address related */
737 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
738 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
739 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
740 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
741 static int	wm_rar_count(struct wm_softc *);
742 static void	wm_set_filter(struct wm_softc *);
743 /* Reset and init related */
744 static void	wm_set_vlan(struct wm_softc *);
745 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
746 static void	wm_get_auto_rd_done(struct wm_softc *);
747 static void	wm_lan_init_done(struct wm_softc *);
748 static void	wm_get_cfg_done(struct wm_softc *);
749 static int	wm_phy_post_reset(struct wm_softc *);
750 static int	wm_write_smbus_addr(struct wm_softc *);
751 static int	wm_init_lcd_from_nvm(struct wm_softc *);
752 static int	wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
753 static void	wm_initialize_hardware_bits(struct wm_softc *);
754 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
755 static int	wm_reset_phy(struct wm_softc *);
756 static void	wm_flush_desc_rings(struct wm_softc *);
757 static void	wm_reset(struct wm_softc *);
758 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
759 static void	wm_rxdrain(struct wm_rxqueue *);
760 static void	wm_init_rss(struct wm_softc *);
761 static void	wm_adjust_qnum(struct wm_softc *, int);
762 static inline bool	wm_is_using_msix(struct wm_softc *);
763 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
764 static int	wm_softint_establish_queue(struct wm_softc *, int, int);
765 static int	wm_setup_legacy(struct wm_softc *);
766 static int	wm_setup_msix(struct wm_softc *);
767 static int	wm_init(struct ifnet *);
768 static int	wm_init_locked(struct ifnet *);
769 static void	wm_init_sysctls(struct wm_softc *);
770 static void	wm_unset_stopping_flags(struct wm_softc *);
771 static void	wm_set_stopping_flags(struct wm_softc *);
772 static void	wm_stop(struct ifnet *, int);
773 static void	wm_stop_locked(struct ifnet *, bool, bool);
774 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
775 static void	wm_82547_txfifo_stall(void *);
776 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
777 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
778 /* DMA related */
779 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
780 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
781 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
782 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
783     struct wm_txqueue *);
784 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
785 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
786 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
787     struct wm_rxqueue *);
788 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
789 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
790 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
791 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
792 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
793 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
794 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
795     struct wm_txqueue *);
796 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
797     struct wm_rxqueue *);
798 static int	wm_alloc_txrx_queues(struct wm_softc *);
799 static void	wm_free_txrx_queues(struct wm_softc *);
800 static int	wm_init_txrx_queues(struct wm_softc *);
801 /* Start */
802 static void	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
803     struct wm_txsoft *, uint32_t *, uint8_t *);
804 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
805 static void	wm_start(struct ifnet *);
806 static void	wm_start_locked(struct ifnet *);
807 static int	wm_transmit(struct ifnet *, struct mbuf *);
808 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
809 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
810 		    bool);
811 static void	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
812     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
813 static void	wm_nq_start(struct ifnet *);
814 static void	wm_nq_start_locked(struct ifnet *);
815 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
816 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
817 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
818 		    bool);
819 static void	wm_deferred_start_locked(struct wm_txqueue *);
820 static void	wm_handle_queue(void *);
821 static void	wm_handle_queue_work(struct work *, void *);
822 /* Interrupt */
823 static bool	wm_txeof(struct wm_txqueue *, u_int);
824 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
825 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
826 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
827 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
828 static void	wm_linkintr(struct wm_softc *, uint32_t);
829 static int	wm_intr_legacy(void *);
830 static inline void	wm_txrxintr_disable(struct wm_queue *);
831 static inline void	wm_txrxintr_enable(struct wm_queue *);
832 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
833 static int	wm_txrxintr_msix(void *);
834 static int	wm_linkintr_msix(void *);
835 
836 /*
837  * Media related.
838  * GMII, SGMII, TBI, SERDES and SFP.
839  */
840 /* Common */
841 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
842 /* GMII related */
843 static void	wm_gmii_reset(struct wm_softc *);
844 static void	wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
845 static int	wm_get_phy_id_82575(struct wm_softc *);
846 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
847 static int	wm_gmii_mediachange(struct ifnet *);
848 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
849 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
850 static uint16_t	wm_i82543_mii_recvbits(struct wm_softc *);
851 static int	wm_gmii_i82543_readreg(device_t, int, int, uint16_t *);
852 static int	wm_gmii_i82543_writereg(device_t, int, int, uint16_t);
853 static int	wm_gmii_mdic_readreg(device_t, int, int, uint16_t *);
854 static int	wm_gmii_mdic_writereg(device_t, int, int, uint16_t);
855 static int	wm_gmii_i82544_readreg(device_t, int, int, uint16_t *);
856 static int	wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
857 static int	wm_gmii_i82544_writereg(device_t, int, int, uint16_t);
858 static int	wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
859 static int	wm_gmii_i80003_readreg(device_t, int, int, uint16_t *);
860 static int	wm_gmii_i80003_writereg(device_t, int, int, uint16_t);
861 static int	wm_gmii_bm_readreg(device_t, int, int, uint16_t *);
862 static int	wm_gmii_bm_writereg(device_t, int, int, uint16_t);
863 static int	wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
864 static int	wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
865 static int	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int,
866 	bool);
867 static int	wm_gmii_hv_readreg(device_t, int, int, uint16_t *);
868 static int	wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
869 static int	wm_gmii_hv_writereg(device_t, int, int, uint16_t);
870 static int	wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
871 static int	wm_gmii_82580_readreg(device_t, int, int, uint16_t *);
872 static int	wm_gmii_82580_writereg(device_t, int, int, uint16_t);
873 static int	wm_gmii_gs40g_readreg(device_t, int, int, uint16_t *);
874 static int	wm_gmii_gs40g_writereg(device_t, int, int, uint16_t);
875 static void	wm_gmii_statchg(struct ifnet *);
876 /*
877  * kumeran related (80003, ICH* and PCH*).
878  * These functions are not for accessing MII registers but for accessing
879  * kumeran specific registers.
880  */
881 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
882 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
883 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
884 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
885 /* EMI register related */
886 static int	wm_access_emi_reg_locked(device_t, int, uint16_t *, bool);
887 static int	wm_read_emi_reg_locked(device_t, int, uint16_t *);
888 static int	wm_write_emi_reg_locked(device_t, int, uint16_t);
889 /* SGMII */
890 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
891 static void	wm_sgmii_sfp_preconfig(struct wm_softc *);
892 static int	wm_sgmii_readreg(device_t, int, int, uint16_t *);
893 static int	wm_sgmii_readreg_locked(device_t, int, int, uint16_t *);
894 static int	wm_sgmii_writereg(device_t, int, int, uint16_t);
895 static int	wm_sgmii_writereg_locked(device_t, int, int, uint16_t);
896 /* TBI related */
897 static bool	wm_tbi_havesignal(struct wm_softc *, uint32_t);
898 static void	wm_tbi_mediainit(struct wm_softc *);
899 static int	wm_tbi_mediachange(struct ifnet *);
900 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
901 static int	wm_check_for_link(struct wm_softc *);
902 static void	wm_tbi_tick(struct wm_softc *);
903 /* SERDES related */
904 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
905 static int	wm_serdes_mediachange(struct ifnet *);
906 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
907 static void	wm_serdes_tick(struct wm_softc *);
908 /* SFP related */
909 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
910 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
911 
912 /*
913  * NVM related.
914  * Microwire, SPI (w/wo EERD) and Flash.
915  */
916 /* Misc functions */
917 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
918 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
919 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
920 /* Microwire */
921 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
922 /* SPI */
923 static int	wm_nvm_ready_spi(struct wm_softc *);
924 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
925 /* Using with EERD */
926 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
927 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
928 /* Flash */
929 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
930     unsigned int *);
931 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
932 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
933 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
934     uint32_t *);
935 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
936 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
937 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
938 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
939 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
940 /* iNVM */
941 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
942 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
943 /* Lock, detecting NVM type, validate checksum and read */
944 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
945 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
946 static int	wm_nvm_validate_checksum(struct wm_softc *);
947 static void	wm_nvm_version_invm(struct wm_softc *);
948 static void	wm_nvm_version(struct wm_softc *);
949 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
950 
951 /*
952  * Hardware semaphores.
953  * Very complexed...
954  */
955 static int	wm_get_null(struct wm_softc *);
956 static void	wm_put_null(struct wm_softc *);
957 static int	wm_get_eecd(struct wm_softc *);
958 static void	wm_put_eecd(struct wm_softc *);
959 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
960 static void	wm_put_swsm_semaphore(struct wm_softc *);
961 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
962 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
963 static int	wm_get_nvm_80003(struct wm_softc *);
964 static void	wm_put_nvm_80003(struct wm_softc *);
965 static int	wm_get_nvm_82571(struct wm_softc *);
966 static void	wm_put_nvm_82571(struct wm_softc *);
967 static int	wm_get_phy_82575(struct wm_softc *);
968 static void	wm_put_phy_82575(struct wm_softc *);
969 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
970 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
971 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
972 static void	wm_put_swflag_ich8lan(struct wm_softc *);
973 static int	wm_get_nvm_ich8lan(struct wm_softc *);
974 static void	wm_put_nvm_ich8lan(struct wm_softc *);
975 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
976 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
977 
978 /*
979  * Management mode and power management related subroutines.
980  * BMC, AMT, suspend/resume and EEE.
981  */
982 #if 0
983 static int	wm_check_mng_mode(struct wm_softc *);
984 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
985 static int	wm_check_mng_mode_82574(struct wm_softc *);
986 static int	wm_check_mng_mode_generic(struct wm_softc *);
987 #endif
988 static int	wm_enable_mng_pass_thru(struct wm_softc *);
989 static bool	wm_phy_resetisblocked(struct wm_softc *);
990 static void	wm_get_hw_control(struct wm_softc *);
991 static void	wm_release_hw_control(struct wm_softc *);
992 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
993 static int	wm_init_phy_workarounds_pchlan(struct wm_softc *);
994 static void	wm_init_manageability(struct wm_softc *);
995 static void	wm_release_manageability(struct wm_softc *);
996 static void	wm_get_wakeup(struct wm_softc *);
997 static int	wm_ulp_disable(struct wm_softc *);
998 static int	wm_enable_phy_wakeup(struct wm_softc *);
999 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
1000 static void	wm_suspend_workarounds_ich8lan(struct wm_softc *);
1001 static int	wm_resume_workarounds_pchlan(struct wm_softc *);
1002 static void	wm_enable_wakeup(struct wm_softc *);
1003 static void	wm_disable_aspm(struct wm_softc *);
1004 /* LPLU (Low Power Link Up) */
1005 static void	wm_lplu_d0_disable(struct wm_softc *);
1006 /* EEE */
1007 static int	wm_set_eee_i350(struct wm_softc *);
1008 static int	wm_set_eee_pchlan(struct wm_softc *);
1009 static int	wm_set_eee(struct wm_softc *);
1010 
1011 /*
1012  * Workarounds (mainly PHY related).
1013  * Basically, PHY's workarounds are in the PHY drivers.
1014  */
1015 static int	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
1016 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
1017 static int	wm_hv_phy_workarounds_ich8lan(struct wm_softc *);
1018 static void	wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *);
1019 static void	wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *);
1020 static int	wm_lv_jumbo_workaround_ich8lan(struct wm_softc *, bool);
1021 static int	wm_lv_phy_workarounds_ich8lan(struct wm_softc *);
1022 static int	wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
1023 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
1024 static int	wm_k1_workaround_lv(struct wm_softc *);
1025 static int	wm_link_stall_workaround_hv(struct wm_softc *);
1026 static int	wm_set_mdio_slow_mode_hv(struct wm_softc *);
1027 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
1028 static void	wm_reset_init_script_82575(struct wm_softc *);
1029 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
1030 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
1031 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
1032 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
1033 static int	wm_pll_workaround_i210(struct wm_softc *);
1034 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
1035 
1036 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
1037     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
1038 
1039 /*
1040  * Devices supported by this driver.
1041  */
1042 static const struct wm_product {
1043 	pci_vendor_id_t		wmp_vendor;
1044 	pci_product_id_t	wmp_product;
1045 	const char		*wmp_name;
1046 	wm_chip_type		wmp_type;
1047 	uint32_t		wmp_flags;
1048 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
1049 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
1050 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
1051 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
1052 #define WMP_MEDIATYPE(x)	((x) & 0x03)
1053 } wm_products[] = {
1054 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
1055 	  "Intel i82542 1000BASE-X Ethernet",
1056 	  WM_T_82542_2_1,	WMP_F_FIBER },
1057 
1058 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
1059 	  "Intel i82543GC 1000BASE-X Ethernet",
1060 	  WM_T_82543,		WMP_F_FIBER },
1061 
1062 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
1063 	  "Intel i82543GC 1000BASE-T Ethernet",
1064 	  WM_T_82543,		WMP_F_COPPER },
1065 
1066 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
1067 	  "Intel i82544EI 1000BASE-T Ethernet",
1068 	  WM_T_82544,		WMP_F_COPPER },
1069 
1070 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
1071 	  "Intel i82544EI 1000BASE-X Ethernet",
1072 	  WM_T_82544,		WMP_F_FIBER },
1073 
1074 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
1075 	  "Intel i82544GC 1000BASE-T Ethernet",
1076 	  WM_T_82544,		WMP_F_COPPER },
1077 
1078 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
1079 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
1080 	  WM_T_82544,		WMP_F_COPPER },
1081 
1082 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
1083 	  "Intel i82540EM 1000BASE-T Ethernet",
1084 	  WM_T_82540,		WMP_F_COPPER },
1085 
1086 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
1087 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
1088 	  WM_T_82540,		WMP_F_COPPER },
1089 
1090 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
1091 	  "Intel i82540EP 1000BASE-T Ethernet",
1092 	  WM_T_82540,		WMP_F_COPPER },
1093 
1094 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
1095 	  "Intel i82540EP 1000BASE-T Ethernet",
1096 	  WM_T_82540,		WMP_F_COPPER },
1097 
1098 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
1099 	  "Intel i82540EP 1000BASE-T Ethernet",
1100 	  WM_T_82540,		WMP_F_COPPER },
1101 
1102 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
1103 	  "Intel i82545EM 1000BASE-T Ethernet",
1104 	  WM_T_82545,		WMP_F_COPPER },
1105 
1106 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
1107 	  "Intel i82545GM 1000BASE-T Ethernet",
1108 	  WM_T_82545_3,		WMP_F_COPPER },
1109 
1110 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
1111 	  "Intel i82545GM 1000BASE-X Ethernet",
1112 	  WM_T_82545_3,		WMP_F_FIBER },
1113 
1114 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
1115 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
1116 	  WM_T_82545_3,		WMP_F_SERDES },
1117 
1118 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
1119 	  "Intel i82546EB 1000BASE-T Ethernet",
1120 	  WM_T_82546,		WMP_F_COPPER },
1121 
1122 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
1123 	  "Intel i82546EB 1000BASE-T Ethernet",
1124 	  WM_T_82546,		WMP_F_COPPER },
1125 
1126 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
1127 	  "Intel i82545EM 1000BASE-X Ethernet",
1128 	  WM_T_82545,		WMP_F_FIBER },
1129 
1130 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
1131 	  "Intel i82546EB 1000BASE-X Ethernet",
1132 	  WM_T_82546,		WMP_F_FIBER },
1133 
1134 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
1135 	  "Intel i82546GB 1000BASE-T Ethernet",
1136 	  WM_T_82546_3,		WMP_F_COPPER },
1137 
1138 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
1139 	  "Intel i82546GB 1000BASE-X Ethernet",
1140 	  WM_T_82546_3,		WMP_F_FIBER },
1141 
1142 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
1143 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
1144 	  WM_T_82546_3,		WMP_F_SERDES },
1145 
1146 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
1147 	  "i82546GB quad-port Gigabit Ethernet",
1148 	  WM_T_82546_3,		WMP_F_COPPER },
1149 
1150 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
1151 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
1152 	  WM_T_82546_3,		WMP_F_COPPER },
1153 
1154 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
1155 	  "Intel PRO/1000MT (82546GB)",
1156 	  WM_T_82546_3,		WMP_F_COPPER },
1157 
1158 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
1159 	  "Intel i82541EI 1000BASE-T Ethernet",
1160 	  WM_T_82541,		WMP_F_COPPER },
1161 
1162 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
1163 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
1164 	  WM_T_82541,		WMP_F_COPPER },
1165 
1166 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
1167 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
1168 	  WM_T_82541,		WMP_F_COPPER },
1169 
1170 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
1171 	  "Intel i82541ER 1000BASE-T Ethernet",
1172 	  WM_T_82541_2,		WMP_F_COPPER },
1173 
1174 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
1175 	  "Intel i82541GI 1000BASE-T Ethernet",
1176 	  WM_T_82541_2,		WMP_F_COPPER },
1177 
1178 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
1179 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
1180 	  WM_T_82541_2,		WMP_F_COPPER },
1181 
1182 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
1183 	  "Intel i82541PI 1000BASE-T Ethernet",
1184 	  WM_T_82541_2,		WMP_F_COPPER },
1185 
1186 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
1187 	  "Intel i82547EI 1000BASE-T Ethernet",
1188 	  WM_T_82547,		WMP_F_COPPER },
1189 
1190 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
1191 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
1192 	  WM_T_82547,		WMP_F_COPPER },
1193 
1194 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
1195 	  "Intel i82547GI 1000BASE-T Ethernet",
1196 	  WM_T_82547_2,		WMP_F_COPPER },
1197 
1198 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
1199 	  "Intel PRO/1000 PT (82571EB)",
1200 	  WM_T_82571,		WMP_F_COPPER },
1201 
1202 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
1203 	  "Intel PRO/1000 PF (82571EB)",
1204 	  WM_T_82571,		WMP_F_FIBER },
1205 
1206 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
1207 	  "Intel PRO/1000 PB (82571EB)",
1208 	  WM_T_82571,		WMP_F_SERDES },
1209 
1210 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
1211 	  "Intel PRO/1000 QT (82571EB)",
1212 	  WM_T_82571,		WMP_F_COPPER },
1213 
1214 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
1215 	  "Intel PRO/1000 PT Quad Port Server Adapter",
1216 	  WM_T_82571,		WMP_F_COPPER },
1217 
1218 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
1219 	  "Intel Gigabit PT Quad Port Server ExpressModule",
1220 	  WM_T_82571,		WMP_F_COPPER },
1221 
1222 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
1223 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
1224 	  WM_T_82571,		WMP_F_SERDES },
1225 
1226 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
1227 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
1228 	  WM_T_82571,		WMP_F_SERDES },
1229 
1230 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
1231 	  "Intel 82571EB Quad 1000baseX Ethernet",
1232 	  WM_T_82571,		WMP_F_FIBER },
1233 
1234 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
1235 	  "Intel i82572EI 1000baseT Ethernet",
1236 	  WM_T_82572,		WMP_F_COPPER },
1237 
1238 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
1239 	  "Intel i82572EI 1000baseX Ethernet",
1240 	  WM_T_82572,		WMP_F_FIBER },
1241 
1242 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
1243 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
1244 	  WM_T_82572,		WMP_F_SERDES },
1245 
1246 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
1247 	  "Intel i82572EI 1000baseT Ethernet",
1248 	  WM_T_82572,		WMP_F_COPPER },
1249 
1250 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
1251 	  "Intel i82573E",
1252 	  WM_T_82573,		WMP_F_COPPER },
1253 
1254 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
1255 	  "Intel i82573E IAMT",
1256 	  WM_T_82573,		WMP_F_COPPER },
1257 
1258 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
1259 	  "Intel i82573L Gigabit Ethernet",
1260 	  WM_T_82573,		WMP_F_COPPER },
1261 
1262 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
1263 	  "Intel i82574L",
1264 	  WM_T_82574,		WMP_F_COPPER },
1265 
1266 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
1267 	  "Intel i82574L",
1268 	  WM_T_82574,		WMP_F_COPPER },
1269 
1270 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
1271 	  "Intel i82583V",
1272 	  WM_T_82583,		WMP_F_COPPER },
1273 
1274 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
1275 	  "i80003 dual 1000baseT Ethernet",
1276 	  WM_T_80003,		WMP_F_COPPER },
1277 
1278 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
1279 	  "i80003 dual 1000baseX Ethernet",
1280 	  WM_T_80003,		WMP_F_COPPER },
1281 
1282 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
1283 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
1284 	  WM_T_80003,		WMP_F_SERDES },
1285 
1286 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
1287 	  "Intel i80003 1000baseT Ethernet",
1288 	  WM_T_80003,		WMP_F_COPPER },
1289 
1290 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
1291 	  "Intel i80003 Gigabit Ethernet (SERDES)",
1292 	  WM_T_80003,		WMP_F_SERDES },
1293 
1294 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
1295 	  "Intel i82801H (M_AMT) LAN Controller",
1296 	  WM_T_ICH8,		WMP_F_COPPER },
1297 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
1298 	  "Intel i82801H (AMT) LAN Controller",
1299 	  WM_T_ICH8,		WMP_F_COPPER },
1300 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
1301 	  "Intel i82801H LAN Controller",
1302 	  WM_T_ICH8,		WMP_F_COPPER },
1303 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
1304 	  "Intel i82801H (IFE) 10/100 LAN Controller",
1305 	  WM_T_ICH8,		WMP_F_COPPER },
1306 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
1307 	  "Intel i82801H (M) LAN Controller",
1308 	  WM_T_ICH8,		WMP_F_COPPER },
1309 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
1310 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
1311 	  WM_T_ICH8,		WMP_F_COPPER },
1312 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
1313 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
1314 	  WM_T_ICH8,		WMP_F_COPPER },
1315 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
1316 	  "82567V-3 LAN Controller",
1317 	  WM_T_ICH8,		WMP_F_COPPER },
1318 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
1319 	  "82801I (AMT) LAN Controller",
1320 	  WM_T_ICH9,		WMP_F_COPPER },
1321 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
1322 	  "82801I 10/100 LAN Controller",
1323 	  WM_T_ICH9,		WMP_F_COPPER },
1324 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
1325 	  "82801I (G) 10/100 LAN Controller",
1326 	  WM_T_ICH9,		WMP_F_COPPER },
1327 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
1328 	  "82801I (GT) 10/100 LAN Controller",
1329 	  WM_T_ICH9,		WMP_F_COPPER },
1330 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
1331 	  "82801I (C) LAN Controller",
1332 	  WM_T_ICH9,		WMP_F_COPPER },
1333 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
1334 	  "82801I mobile LAN Controller",
1335 	  WM_T_ICH9,		WMP_F_COPPER },
1336 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
1337 	  "82801I mobile (V) LAN Controller",
1338 	  WM_T_ICH9,		WMP_F_COPPER },
1339 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
1340 	  "82801I mobile (AMT) LAN Controller",
1341 	  WM_T_ICH9,		WMP_F_COPPER },
1342 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
1343 	  "82567LM-4 LAN Controller",
1344 	  WM_T_ICH9,		WMP_F_COPPER },
1345 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
1346 	  "82567LM-2 LAN Controller",
1347 	  WM_T_ICH10,		WMP_F_COPPER },
1348 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
1349 	  "82567LF-2 LAN Controller",
1350 	  WM_T_ICH10,		WMP_F_COPPER },
1351 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
1352 	  "82567LM-3 LAN Controller",
1353 	  WM_T_ICH10,		WMP_F_COPPER },
1354 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
1355 	  "82567LF-3 LAN Controller",
1356 	  WM_T_ICH10,		WMP_F_COPPER },
1357 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
1358 	  "82567V-2 LAN Controller",
1359 	  WM_T_ICH10,		WMP_F_COPPER },
1360 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
1361 	  "82567V-3? LAN Controller",
1362 	  WM_T_ICH10,		WMP_F_COPPER },
1363 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
1364 	  "HANKSVILLE LAN Controller",
1365 	  WM_T_ICH10,		WMP_F_COPPER },
1366 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
1367 	  "PCH LAN (82577LM) Controller",
1368 	  WM_T_PCH,		WMP_F_COPPER },
1369 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
1370 	  "PCH LAN (82577LC) Controller",
1371 	  WM_T_PCH,		WMP_F_COPPER },
1372 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
1373 	  "PCH LAN (82578DM) Controller",
1374 	  WM_T_PCH,		WMP_F_COPPER },
1375 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
1376 	  "PCH LAN (82578DC) Controller",
1377 	  WM_T_PCH,		WMP_F_COPPER },
1378 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
1379 	  "PCH2 LAN (82579LM) Controller",
1380 	  WM_T_PCH2,		WMP_F_COPPER },
1381 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
1382 	  "PCH2 LAN (82579V) Controller",
1383 	  WM_T_PCH2,		WMP_F_COPPER },
1384 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
1385 	  "82575EB dual-1000baseT Ethernet",
1386 	  WM_T_82575,		WMP_F_COPPER },
1387 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
1388 	  "82575EB dual-1000baseX Ethernet (SERDES)",
1389 	  WM_T_82575,		WMP_F_SERDES },
1390 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
1391 	  "82575GB quad-1000baseT Ethernet",
1392 	  WM_T_82575,		WMP_F_COPPER },
1393 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
1394 	  "82575GB quad-1000baseT Ethernet (PM)",
1395 	  WM_T_82575,		WMP_F_COPPER },
1396 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
1397 	  "82576 1000BaseT Ethernet",
1398 	  WM_T_82576,		WMP_F_COPPER },
1399 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
1400 	  "82576 1000BaseX Ethernet",
1401 	  WM_T_82576,		WMP_F_FIBER },
1402 
1403 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
1404 	  "82576 gigabit Ethernet (SERDES)",
1405 	  WM_T_82576,		WMP_F_SERDES },
1406 
1407 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
1408 	  "82576 quad-1000BaseT Ethernet",
1409 	  WM_T_82576,		WMP_F_COPPER },
1410 
1411 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
1412 	  "82576 Gigabit ET2 Quad Port Server Adapter",
1413 	  WM_T_82576,		WMP_F_COPPER },
1414 
1415 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
1416 	  "82576 gigabit Ethernet",
1417 	  WM_T_82576,		WMP_F_COPPER },
1418 
1419 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
1420 	  "82576 gigabit Ethernet (SERDES)",
1421 	  WM_T_82576,		WMP_F_SERDES },
1422 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
1423 	  "82576 quad-gigabit Ethernet (SERDES)",
1424 	  WM_T_82576,		WMP_F_SERDES },
1425 
1426 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
1427 	  "82580 1000BaseT Ethernet",
1428 	  WM_T_82580,		WMP_F_COPPER },
1429 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
1430 	  "82580 1000BaseX Ethernet",
1431 	  WM_T_82580,		WMP_F_FIBER },
1432 
1433 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
1434 	  "82580 1000BaseT Ethernet (SERDES)",
1435 	  WM_T_82580,		WMP_F_SERDES },
1436 
1437 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
1438 	  "82580 gigabit Ethernet (SGMII)",
1439 	  WM_T_82580,		WMP_F_COPPER },
1440 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
1441 	  "82580 dual-1000BaseT Ethernet",
1442 	  WM_T_82580,		WMP_F_COPPER },
1443 
1444 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1445 	  "82580 quad-1000BaseX Ethernet",
1446 	  WM_T_82580,		WMP_F_FIBER },
1447 
1448 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
1449 	  "DH89XXCC Gigabit Ethernet (SGMII)",
1450 	  WM_T_82580,		WMP_F_COPPER },
1451 
1452 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
1453 	  "DH89XXCC Gigabit Ethernet (SERDES)",
1454 	  WM_T_82580,		WMP_F_SERDES },
1455 
1456 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
1457 	  "DH89XXCC 1000BASE-KX Ethernet",
1458 	  WM_T_82580,		WMP_F_SERDES },
1459 
1460 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
1461 	  "DH89XXCC Gigabit Ethernet (SFP)",
1462 	  WM_T_82580,		WMP_F_SERDES },
1463 
1464 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
1465 	  "I350 Gigabit Network Connection",
1466 	  WM_T_I350,		WMP_F_COPPER },
1467 
1468 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
1469 	  "I350 Gigabit Fiber Network Connection",
1470 	  WM_T_I350,		WMP_F_FIBER },
1471 
1472 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
1473 	  "I350 Gigabit Backplane Connection",
1474 	  WM_T_I350,		WMP_F_SERDES },
1475 
1476 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
1477 	  "I350 Quad Port Gigabit Ethernet",
1478 	  WM_T_I350,		WMP_F_SERDES },
1479 
1480 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
1481 	  "I350 Gigabit Connection",
1482 	  WM_T_I350,		WMP_F_COPPER },
1483 
1484 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
1485 	  "I354 Gigabit Ethernet (KX)",
1486 	  WM_T_I354,		WMP_F_SERDES },
1487 
1488 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
1489 	  "I354 Gigabit Ethernet (SGMII)",
1490 	  WM_T_I354,		WMP_F_COPPER },
1491 
1492 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
1493 	  "I354 Gigabit Ethernet (2.5G)",
1494 	  WM_T_I354,		WMP_F_COPPER },
1495 
1496 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
1497 	  "I210-T1 Ethernet Server Adapter",
1498 	  WM_T_I210,		WMP_F_COPPER },
1499 
1500 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
1501 	  "I210 Ethernet (Copper OEM)",
1502 	  WM_T_I210,		WMP_F_COPPER },
1503 
1504 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
1505 	  "I210 Ethernet (Copper IT)",
1506 	  WM_T_I210,		WMP_F_COPPER },
1507 
1508 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
1509 	  "I210 Ethernet (Copper, FLASH less)",
1510 	  WM_T_I210,		WMP_F_COPPER },
1511 
1512 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
1513 	  "I210 Gigabit Ethernet (Fiber)",
1514 	  WM_T_I210,		WMP_F_FIBER },
1515 
1516 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
1517 	  "I210 Gigabit Ethernet (SERDES)",
1518 	  WM_T_I210,		WMP_F_SERDES },
1519 
1520 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
1521 	  "I210 Gigabit Ethernet (SERDES, FLASH less)",
1522 	  WM_T_I210,		WMP_F_SERDES },
1523 
1524 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
1525 	  "I210 Gigabit Ethernet (SGMII)",
1526 	  WM_T_I210,		WMP_F_COPPER },
1527 
1528 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII_WOF,
1529 	  "I210 Gigabit Ethernet (SGMII, FLASH less)",
1530 	  WM_T_I210,		WMP_F_COPPER },
1531 
1532 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
1533 	  "I211 Ethernet (COPPER)",
1534 	  WM_T_I211,		WMP_F_COPPER },
1535 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
1536 	  "I217 V Ethernet Connection",
1537 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1538 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
1539 	  "I217 LM Ethernet Connection",
1540 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1541 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
1542 	  "I218 V Ethernet Connection",
1543 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1544 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
1545 	  "I218 V Ethernet Connection",
1546 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1547 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
1548 	  "I218 V Ethernet Connection",
1549 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1550 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
1551 	  "I218 LM Ethernet Connection",
1552 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1553 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
1554 	  "I218 LM Ethernet Connection",
1555 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1556 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
1557 	  "I218 LM Ethernet Connection",
1558 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1559 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
1560 	  "I219 LM Ethernet Connection",
1561 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1562 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
1563 	  "I219 LM Ethernet Connection",
1564 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1565 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
1566 	  "I219 LM Ethernet Connection",
1567 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1568 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
1569 	  "I219 LM Ethernet Connection",
1570 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1571 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
1572 	  "I219 LM Ethernet Connection",
1573 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1574 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM6,
1575 	  "I219 LM Ethernet Connection",
1576 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1577 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM7,
1578 	  "I219 LM Ethernet Connection",
1579 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1580 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM8,
1581 	  "I219 LM Ethernet Connection",
1582 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1583 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM9,
1584 	  "I219 LM Ethernet Connection",
1585 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1586 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM10,
1587 	  "I219 LM Ethernet Connection",
1588 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1589 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM11,
1590 	  "I219 LM Ethernet Connection",
1591 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1592 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM12,
1593 	  "I219 LM Ethernet Connection",
1594 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1595 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM13,
1596 	  "I219 LM Ethernet Connection",
1597 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1598 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM14,
1599 	  "I219 LM Ethernet Connection",
1600 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1601 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM15,
1602 	  "I219 LM Ethernet Connection",
1603 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1604 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
1605 	  "I219 V Ethernet Connection",
1606 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1607 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
1608 	  "I219 V Ethernet Connection",
1609 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1610 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
1611 	  "I219 V Ethernet Connection",
1612 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1613 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
1614 	  "I219 V Ethernet Connection",
1615 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1616 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V6,
1617 	  "I219 V Ethernet Connection",
1618 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1619 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V7,
1620 	  "I219 V Ethernet Connection",
1621 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1622 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V8,
1623 	  "I219 V Ethernet Connection",
1624 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1625 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V9,
1626 	  "I219 V Ethernet Connection",
1627 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1628 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V10,
1629 	  "I219 V Ethernet Connection",
1630 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1631 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V11,
1632 	  "I219 V Ethernet Connection",
1633 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1634 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V12,
1635 	  "I219 V Ethernet Connection",
1636 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1637 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V13,
1638 	  "I219 V Ethernet Connection",
1639 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1640 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V14,
1641 	  "I219 V Ethernet Connection",
1642 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1643 	{ 0,			0,
1644 	  NULL,
1645 	  0,			0 },
1646 };
1647 
1648 /*
1649  * Register read/write functions.
1650  * Other than CSR_{READ|WRITE}().
1651  */
1652 
1653 #if 0 /* Not currently used */
1654 static inline uint32_t
1655 wm_io_read(struct wm_softc *sc, int reg)
1656 {
1657 
1658 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1659 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1660 }
1661 #endif
1662 
1663 static inline void
1664 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1665 {
1666 
1667 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1668 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1669 }
1670 
1671 static inline void
1672 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1673     uint32_t data)
1674 {
1675 	uint32_t regval;
1676 	int i;
1677 
1678 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1679 
1680 	CSR_WRITE(sc, reg, regval);
1681 
1682 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1683 		delay(5);
1684 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1685 			break;
1686 	}
1687 	if (i == SCTL_CTL_POLL_TIMEOUT) {
1688 		aprint_error("%s: WARNING:"
1689 		    " i82575 reg 0x%08x setup did not indicate ready\n",
1690 		    device_xname(sc->sc_dev), reg);
1691 	}
1692 }
1693 
1694 static inline void
1695 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1696 {
1697 	wa->wa_low = htole32(v & 0xffffffffU);
1698 	if (sizeof(bus_addr_t) == 8)
1699 		wa->wa_high = htole32((uint64_t) v >> 32);
1700 	else
1701 		wa->wa_high = 0;
1702 }
1703 
1704 /*
1705  * Descriptor sync/init functions.
1706  */
1707 static inline void
1708 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
1709 {
1710 	struct wm_softc *sc = txq->txq_sc;
1711 
1712 	/* If it will wrap around, sync to the end of the ring. */
1713 	if ((start + num) > WM_NTXDESC(txq)) {
1714 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1715 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
1716 		    (WM_NTXDESC(txq) - start), ops);
1717 		num -= (WM_NTXDESC(txq) - start);
1718 		start = 0;
1719 	}
1720 
1721 	/* Now sync whatever is left. */
1722 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1723 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
1724 }
1725 
1726 static inline void
1727 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
1728 {
1729 	struct wm_softc *sc = rxq->rxq_sc;
1730 
1731 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
1732 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
1733 }
1734 
1735 static inline void
1736 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
1737 {
1738 	struct wm_softc *sc = rxq->rxq_sc;
1739 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
1740 	struct mbuf *m = rxs->rxs_mbuf;
1741 
1742 	/*
1743 	 * Note: We scoot the packet forward 2 bytes in the buffer
1744 	 * so that the payload after the Ethernet header is aligned
1745 	 * to a 4-byte boundary.
1746 
1747 	 * XXX BRAINDAMAGE ALERT!
1748 	 * The stupid chip uses the same size for every buffer, which
1749 	 * is set in the Receive Control register.  We are using the 2K
1750 	 * size option, but what we REALLY want is (2K - 2)!  For this
1751 	 * reason, we can't "scoot" packets longer than the standard
1752 	 * Ethernet MTU.  On strict-alignment platforms, if the total
1753 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
1754 	 * the upper layer copy the headers.
1755 	 */
1756 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
1757 
1758 	if (sc->sc_type == WM_T_82574) {
1759 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
1760 		rxd->erx_data.erxd_addr =
1761 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1762 		rxd->erx_data.erxd_dd = 0;
1763 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
1764 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
1765 
1766 		rxd->nqrx_data.nrxd_paddr =
1767 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1768 		/* Currently, split header is not supported. */
1769 		rxd->nqrx_data.nrxd_haddr = 0;
1770 	} else {
1771 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
1772 
1773 		wm_set_dma_addr(&rxd->wrx_addr,
1774 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1775 		rxd->wrx_len = 0;
1776 		rxd->wrx_cksum = 0;
1777 		rxd->wrx_status = 0;
1778 		rxd->wrx_errors = 0;
1779 		rxd->wrx_special = 0;
1780 	}
1781 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1782 
1783 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
1784 }
1785 
1786 /*
1787  * Device driver interface functions and commonly used functions.
1788  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
1789  */
1790 
1791 /* Lookup supported device table */
1792 static const struct wm_product *
1793 wm_lookup(const struct pci_attach_args *pa)
1794 {
1795 	const struct wm_product *wmp;
1796 
1797 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1798 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1799 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1800 			return wmp;
1801 	}
1802 	return NULL;
1803 }
1804 
1805 /* The match function (ca_match) */
1806 static int
1807 wm_match(device_t parent, cfdata_t cf, void *aux)
1808 {
1809 	struct pci_attach_args *pa = aux;
1810 
1811 	if (wm_lookup(pa) != NULL)
1812 		return 1;
1813 
1814 	return 0;
1815 }
1816 
1817 /* The attach function (ca_attach) */
1818 static void
1819 wm_attach(device_t parent, device_t self, void *aux)
1820 {
1821 	struct wm_softc *sc = device_private(self);
1822 	struct pci_attach_args *pa = aux;
1823 	prop_dictionary_t dict;
1824 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1825 	pci_chipset_tag_t pc = pa->pa_pc;
1826 	int counts[PCI_INTR_TYPE_SIZE];
1827 	pci_intr_type_t max_type;
1828 	const char *eetype, *xname;
1829 	bus_space_tag_t memt;
1830 	bus_space_handle_t memh;
1831 	bus_size_t memsize;
1832 	int memh_valid;
1833 	int i, error;
1834 	const struct wm_product *wmp;
1835 	prop_data_t ea;
1836 	prop_number_t pn;
1837 	uint8_t enaddr[ETHER_ADDR_LEN];
1838 	char buf[256];
1839 	char wqname[MAXCOMLEN];
1840 	uint16_t cfg1, cfg2, swdpin, nvmword;
1841 	pcireg_t preg, memtype;
1842 	uint16_t eeprom_data, apme_mask;
1843 	bool force_clear_smbi;
1844 	uint32_t link_mode;
1845 	uint32_t reg;
1846 
1847 	sc->sc_dev = self;
1848 	callout_init(&sc->sc_tick_ch, WM_CALLOUT_FLAGS);
1849 	callout_setfunc(&sc->sc_tick_ch, wm_tick, sc);
1850 	sc->sc_core_stopping = false;
1851 
1852 	wmp = wm_lookup(pa);
1853 #ifdef DIAGNOSTIC
1854 	if (wmp == NULL) {
1855 		printf("\n");
1856 		panic("wm_attach: impossible");
1857 	}
1858 #endif
1859 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
1860 
1861 	sc->sc_pc = pa->pa_pc;
1862 	sc->sc_pcitag = pa->pa_tag;
1863 
1864 	if (pci_dma64_available(pa))
1865 		sc->sc_dmat = pa->pa_dmat64;
1866 	else
1867 		sc->sc_dmat = pa->pa_dmat;
1868 
1869 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
1870 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
1871 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
1872 
1873 	sc->sc_type = wmp->wmp_type;
1874 
1875 	/* Set default function pointers */
1876 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
1877 	sc->phy.release = sc->nvm.release = wm_put_null;
1878 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
1879 
1880 	if (sc->sc_type < WM_T_82543) {
1881 		if (sc->sc_rev < 2) {
1882 			aprint_error_dev(sc->sc_dev,
1883 			    "i82542 must be at least rev. 2\n");
1884 			return;
1885 		}
1886 		if (sc->sc_rev < 3)
1887 			sc->sc_type = WM_T_82542_2_0;
1888 	}
1889 
1890 	/*
1891 	 * Disable MSI for Errata:
1892 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
1893 	 *
1894 	 *  82544: Errata 25
1895 	 *  82540: Errata  6 (easy to reproduce device timeout)
1896 	 *  82545: Errata  4 (easy to reproduce device timeout)
1897 	 *  82546: Errata 26 (easy to reproduce device timeout)
1898 	 *  82541: Errata  7 (easy to reproduce device timeout)
1899 	 *
1900 	 * "Byte Enables 2 and 3 are not set on MSI writes"
1901 	 *
1902 	 *  82571 & 82572: Errata 63
1903 	 */
1904 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
1905 	    || (sc->sc_type == WM_T_82572))
1906 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
1907 
1908 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1909 	    || (sc->sc_type == WM_T_82580)
1910 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
1911 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
1912 		sc->sc_flags |= WM_F_NEWQUEUE;
1913 
1914 	/* Set device properties (mactype) */
1915 	dict = device_properties(sc->sc_dev);
1916 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1917 
1918 	/*
1919 	 * Map the device.  All devices support memory-mapped acccess,
1920 	 * and it is really required for normal operation.
1921 	 */
1922 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1923 	switch (memtype) {
1924 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1925 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1926 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1927 			memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1928 		break;
1929 	default:
1930 		memh_valid = 0;
1931 		break;
1932 	}
1933 
1934 	if (memh_valid) {
1935 		sc->sc_st = memt;
1936 		sc->sc_sh = memh;
1937 		sc->sc_ss = memsize;
1938 	} else {
1939 		aprint_error_dev(sc->sc_dev,
1940 		    "unable to map device registers\n");
1941 		return;
1942 	}
1943 
1944 	/*
1945 	 * In addition, i82544 and later support I/O mapped indirect
1946 	 * register access.  It is not desirable (nor supported in
1947 	 * this driver) to use it for normal operation, though it is
1948 	 * required to work around bugs in some chip versions.
1949 	 */
1950 	if (sc->sc_type >= WM_T_82544) {
1951 		/* First we have to find the I/O BAR. */
1952 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1953 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
1954 			if (memtype == PCI_MAPREG_TYPE_IO)
1955 				break;
1956 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
1957 			    PCI_MAPREG_MEM_TYPE_64BIT)
1958 				i += 4;	/* skip high bits, too */
1959 		}
1960 		if (i < PCI_MAPREG_END) {
1961 			/*
1962 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
1963 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
1964 			 * It's no problem because newer chips has no this
1965 			 * bug.
1966 			 *
1967 			 * The i8254x doesn't apparently respond when the
1968 			 * I/O BAR is 0, which looks somewhat like it's not
1969 			 * been configured.
1970 			 */
1971 			preg = pci_conf_read(pc, pa->pa_tag, i);
1972 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1973 				aprint_error_dev(sc->sc_dev,
1974 				    "WARNING: I/O BAR at zero.\n");
1975 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1976 					0, &sc->sc_iot, &sc->sc_ioh,
1977 					NULL, &sc->sc_ios) == 0) {
1978 				sc->sc_flags |= WM_F_IOH_VALID;
1979 			} else
1980 				aprint_error_dev(sc->sc_dev,
1981 				    "WARNING: unable to map I/O space\n");
1982 		}
1983 
1984 	}
1985 
1986 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
1987 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1988 	preg |= PCI_COMMAND_MASTER_ENABLE;
1989 	if (sc->sc_type < WM_T_82542_2_1)
1990 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1991 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1992 
1993 	/* Power up chip */
1994 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
1995 	    && error != EOPNOTSUPP) {
1996 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1997 		return;
1998 	}
1999 
2000 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
2001 	/*
2002 	 *  Don't use MSI-X if we can use only one queue to save interrupt
2003 	 * resource.
2004 	 */
2005 	if (sc->sc_nqueues > 1) {
2006 		max_type = PCI_INTR_TYPE_MSIX;
2007 		/*
2008 		 *  82583 has a MSI-X capability in the PCI configuration space
2009 		 * but it doesn't support it. At least the document doesn't
2010 		 * say anything about MSI-X.
2011 		 */
2012 		counts[PCI_INTR_TYPE_MSIX]
2013 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
2014 	} else {
2015 		max_type = PCI_INTR_TYPE_MSI;
2016 		counts[PCI_INTR_TYPE_MSIX] = 0;
2017 	}
2018 
2019 	/* Allocation settings */
2020 	counts[PCI_INTR_TYPE_MSI] = 1;
2021 	counts[PCI_INTR_TYPE_INTX] = 1;
2022 	/* overridden by disable flags */
2023 	if (wm_disable_msi != 0) {
2024 		counts[PCI_INTR_TYPE_MSI] = 0;
2025 		if (wm_disable_msix != 0) {
2026 			max_type = PCI_INTR_TYPE_INTX;
2027 			counts[PCI_INTR_TYPE_MSIX] = 0;
2028 		}
2029 	} else if (wm_disable_msix != 0) {
2030 		max_type = PCI_INTR_TYPE_MSI;
2031 		counts[PCI_INTR_TYPE_MSIX] = 0;
2032 	}
2033 
2034 alloc_retry:
2035 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
2036 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
2037 		return;
2038 	}
2039 
2040 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
2041 		error = wm_setup_msix(sc);
2042 		if (error) {
2043 			pci_intr_release(pc, sc->sc_intrs,
2044 			    counts[PCI_INTR_TYPE_MSIX]);
2045 
2046 			/* Setup for MSI: Disable MSI-X */
2047 			max_type = PCI_INTR_TYPE_MSI;
2048 			counts[PCI_INTR_TYPE_MSI] = 1;
2049 			counts[PCI_INTR_TYPE_INTX] = 1;
2050 			goto alloc_retry;
2051 		}
2052 	} else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
2053 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
2054 		error = wm_setup_legacy(sc);
2055 		if (error) {
2056 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
2057 			    counts[PCI_INTR_TYPE_MSI]);
2058 
2059 			/* The next try is for INTx: Disable MSI */
2060 			max_type = PCI_INTR_TYPE_INTX;
2061 			counts[PCI_INTR_TYPE_INTX] = 1;
2062 			goto alloc_retry;
2063 		}
2064 	} else {
2065 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
2066 		error = wm_setup_legacy(sc);
2067 		if (error) {
2068 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
2069 			    counts[PCI_INTR_TYPE_INTX]);
2070 			return;
2071 		}
2072 	}
2073 
2074 	snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(sc->sc_dev));
2075 	error = workqueue_create(&sc->sc_queue_wq, wqname,
2076 	    wm_handle_queue_work, sc, WM_WORKQUEUE_PRI, IPL_NET,
2077 	    WM_WORKQUEUE_FLAGS);
2078 	if (error) {
2079 		aprint_error_dev(sc->sc_dev,
2080 		    "unable to create workqueue\n");
2081 		goto out;
2082 	}
2083 
2084 	/*
2085 	 * Check the function ID (unit number of the chip).
2086 	 */
2087 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
2088 	    || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
2089 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
2090 	    || (sc->sc_type == WM_T_82580)
2091 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
2092 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
2093 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
2094 	else
2095 		sc->sc_funcid = 0;
2096 
2097 	/*
2098 	 * Determine a few things about the bus we're connected to.
2099 	 */
2100 	if (sc->sc_type < WM_T_82543) {
2101 		/* We don't really know the bus characteristics here. */
2102 		sc->sc_bus_speed = 33;
2103 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
2104 		/*
2105 		 * CSA (Communication Streaming Architecture) is about as fast
2106 		 * a 32-bit 66MHz PCI Bus.
2107 		 */
2108 		sc->sc_flags |= WM_F_CSA;
2109 		sc->sc_bus_speed = 66;
2110 		aprint_verbose_dev(sc->sc_dev,
2111 		    "Communication Streaming Architecture\n");
2112 		if (sc->sc_type == WM_T_82547) {
2113 			callout_init(&sc->sc_txfifo_ch, WM_CALLOUT_FLAGS);
2114 			callout_setfunc(&sc->sc_txfifo_ch,
2115 			    wm_82547_txfifo_stall, sc);
2116 			aprint_verbose_dev(sc->sc_dev,
2117 			    "using 82547 Tx FIFO stall work-around\n");
2118 		}
2119 	} else if (sc->sc_type >= WM_T_82571) {
2120 		sc->sc_flags |= WM_F_PCIE;
2121 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
2122 		    && (sc->sc_type != WM_T_ICH10)
2123 		    && (sc->sc_type != WM_T_PCH)
2124 		    && (sc->sc_type != WM_T_PCH2)
2125 		    && (sc->sc_type != WM_T_PCH_LPT)
2126 		    && (sc->sc_type != WM_T_PCH_SPT)
2127 		    && (sc->sc_type != WM_T_PCH_CNP)) {
2128 			/* ICH* and PCH* have no PCIe capability registers */
2129 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
2130 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
2131 				NULL) == 0)
2132 				aprint_error_dev(sc->sc_dev,
2133 				    "unable to find PCIe capability\n");
2134 		}
2135 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
2136 	} else {
2137 		reg = CSR_READ(sc, WMREG_STATUS);
2138 		if (reg & STATUS_BUS64)
2139 			sc->sc_flags |= WM_F_BUS64;
2140 		if ((reg & STATUS_PCIX_MODE) != 0) {
2141 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
2142 
2143 			sc->sc_flags |= WM_F_PCIX;
2144 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
2145 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
2146 				aprint_error_dev(sc->sc_dev,
2147 				    "unable to find PCIX capability\n");
2148 			else if (sc->sc_type != WM_T_82545_3 &&
2149 				 sc->sc_type != WM_T_82546_3) {
2150 				/*
2151 				 * Work around a problem caused by the BIOS
2152 				 * setting the max memory read byte count
2153 				 * incorrectly.
2154 				 */
2155 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
2156 				    sc->sc_pcixe_capoff + PCIX_CMD);
2157 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
2158 				    sc->sc_pcixe_capoff + PCIX_STATUS);
2159 
2160 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
2161 				    PCIX_CMD_BYTECNT_SHIFT;
2162 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
2163 				    PCIX_STATUS_MAXB_SHIFT;
2164 				if (bytecnt > maxb) {
2165 					aprint_verbose_dev(sc->sc_dev,
2166 					    "resetting PCI-X MMRBC: %d -> %d\n",
2167 					    512 << bytecnt, 512 << maxb);
2168 					pcix_cmd = (pcix_cmd &
2169 					    ~PCIX_CMD_BYTECNT_MASK) |
2170 					    (maxb << PCIX_CMD_BYTECNT_SHIFT);
2171 					pci_conf_write(pa->pa_pc, pa->pa_tag,
2172 					    sc->sc_pcixe_capoff + PCIX_CMD,
2173 					    pcix_cmd);
2174 				}
2175 			}
2176 		}
2177 		/*
2178 		 * The quad port adapter is special; it has a PCIX-PCIX
2179 		 * bridge on the board, and can run the secondary bus at
2180 		 * a higher speed.
2181 		 */
2182 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
2183 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
2184 								      : 66;
2185 		} else if (sc->sc_flags & WM_F_PCIX) {
2186 			switch (reg & STATUS_PCIXSPD_MASK) {
2187 			case STATUS_PCIXSPD_50_66:
2188 				sc->sc_bus_speed = 66;
2189 				break;
2190 			case STATUS_PCIXSPD_66_100:
2191 				sc->sc_bus_speed = 100;
2192 				break;
2193 			case STATUS_PCIXSPD_100_133:
2194 				sc->sc_bus_speed = 133;
2195 				break;
2196 			default:
2197 				aprint_error_dev(sc->sc_dev,
2198 				    "unknown PCIXSPD %d; assuming 66MHz\n",
2199 				    reg & STATUS_PCIXSPD_MASK);
2200 				sc->sc_bus_speed = 66;
2201 				break;
2202 			}
2203 		} else
2204 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
2205 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
2206 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
2207 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
2208 	}
2209 
2210 	/* clear interesting stat counters */
2211 	CSR_READ(sc, WMREG_COLC);
2212 	CSR_READ(sc, WMREG_RXERRC);
2213 
2214 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
2215 	    || (sc->sc_type >= WM_T_ICH8))
2216 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2217 	if (sc->sc_type >= WM_T_ICH8)
2218 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2219 
2220 	/* Set PHY, NVM mutex related stuff */
2221 	switch (sc->sc_type) {
2222 	case WM_T_82542_2_0:
2223 	case WM_T_82542_2_1:
2224 	case WM_T_82543:
2225 	case WM_T_82544:
2226 		/* Microwire */
2227 		sc->nvm.read = wm_nvm_read_uwire;
2228 		sc->sc_nvm_wordsize = 64;
2229 		sc->sc_nvm_addrbits = 6;
2230 		break;
2231 	case WM_T_82540:
2232 	case WM_T_82545:
2233 	case WM_T_82545_3:
2234 	case WM_T_82546:
2235 	case WM_T_82546_3:
2236 		/* Microwire */
2237 		sc->nvm.read = wm_nvm_read_uwire;
2238 		reg = CSR_READ(sc, WMREG_EECD);
2239 		if (reg & EECD_EE_SIZE) {
2240 			sc->sc_nvm_wordsize = 256;
2241 			sc->sc_nvm_addrbits = 8;
2242 		} else {
2243 			sc->sc_nvm_wordsize = 64;
2244 			sc->sc_nvm_addrbits = 6;
2245 		}
2246 		sc->sc_flags |= WM_F_LOCK_EECD;
2247 		sc->nvm.acquire = wm_get_eecd;
2248 		sc->nvm.release = wm_put_eecd;
2249 		break;
2250 	case WM_T_82541:
2251 	case WM_T_82541_2:
2252 	case WM_T_82547:
2253 	case WM_T_82547_2:
2254 		reg = CSR_READ(sc, WMREG_EECD);
2255 		/*
2256 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
2257 		 * on 8254[17], so set flags and functios before calling it.
2258 		 */
2259 		sc->sc_flags |= WM_F_LOCK_EECD;
2260 		sc->nvm.acquire = wm_get_eecd;
2261 		sc->nvm.release = wm_put_eecd;
2262 		if (reg & EECD_EE_TYPE) {
2263 			/* SPI */
2264 			sc->nvm.read = wm_nvm_read_spi;
2265 			sc->sc_flags |= WM_F_EEPROM_SPI;
2266 			wm_nvm_set_addrbits_size_eecd(sc);
2267 		} else {
2268 			/* Microwire */
2269 			sc->nvm.read = wm_nvm_read_uwire;
2270 			if ((reg & EECD_EE_ABITS) != 0) {
2271 				sc->sc_nvm_wordsize = 256;
2272 				sc->sc_nvm_addrbits = 8;
2273 			} else {
2274 				sc->sc_nvm_wordsize = 64;
2275 				sc->sc_nvm_addrbits = 6;
2276 			}
2277 		}
2278 		break;
2279 	case WM_T_82571:
2280 	case WM_T_82572:
2281 		/* SPI */
2282 		sc->nvm.read = wm_nvm_read_eerd;
2283 		/* Not use WM_F_LOCK_EECD because we use EERD */
2284 		sc->sc_flags |= WM_F_EEPROM_SPI;
2285 		wm_nvm_set_addrbits_size_eecd(sc);
2286 		sc->phy.acquire = wm_get_swsm_semaphore;
2287 		sc->phy.release = wm_put_swsm_semaphore;
2288 		sc->nvm.acquire = wm_get_nvm_82571;
2289 		sc->nvm.release = wm_put_nvm_82571;
2290 		break;
2291 	case WM_T_82573:
2292 	case WM_T_82574:
2293 	case WM_T_82583:
2294 		sc->nvm.read = wm_nvm_read_eerd;
2295 		/* Not use WM_F_LOCK_EECD because we use EERD */
2296 		if (sc->sc_type == WM_T_82573) {
2297 			sc->phy.acquire = wm_get_swsm_semaphore;
2298 			sc->phy.release = wm_put_swsm_semaphore;
2299 			sc->nvm.acquire = wm_get_nvm_82571;
2300 			sc->nvm.release = wm_put_nvm_82571;
2301 		} else {
2302 			/* Both PHY and NVM use the same semaphore. */
2303 			sc->phy.acquire = sc->nvm.acquire
2304 			    = wm_get_swfwhw_semaphore;
2305 			sc->phy.release = sc->nvm.release
2306 			    = wm_put_swfwhw_semaphore;
2307 		}
2308 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
2309 			sc->sc_flags |= WM_F_EEPROM_FLASH;
2310 			sc->sc_nvm_wordsize = 2048;
2311 		} else {
2312 			/* SPI */
2313 			sc->sc_flags |= WM_F_EEPROM_SPI;
2314 			wm_nvm_set_addrbits_size_eecd(sc);
2315 		}
2316 		break;
2317 	case WM_T_82575:
2318 	case WM_T_82576:
2319 	case WM_T_82580:
2320 	case WM_T_I350:
2321 	case WM_T_I354:
2322 	case WM_T_80003:
2323 		/* SPI */
2324 		sc->sc_flags |= WM_F_EEPROM_SPI;
2325 		wm_nvm_set_addrbits_size_eecd(sc);
2326 		if ((sc->sc_type == WM_T_80003)
2327 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
2328 			sc->nvm.read = wm_nvm_read_eerd;
2329 			/* Don't use WM_F_LOCK_EECD because we use EERD */
2330 		} else {
2331 			sc->nvm.read = wm_nvm_read_spi;
2332 			sc->sc_flags |= WM_F_LOCK_EECD;
2333 		}
2334 		sc->phy.acquire = wm_get_phy_82575;
2335 		sc->phy.release = wm_put_phy_82575;
2336 		sc->nvm.acquire = wm_get_nvm_80003;
2337 		sc->nvm.release = wm_put_nvm_80003;
2338 		break;
2339 	case WM_T_ICH8:
2340 	case WM_T_ICH9:
2341 	case WM_T_ICH10:
2342 	case WM_T_PCH:
2343 	case WM_T_PCH2:
2344 	case WM_T_PCH_LPT:
2345 		sc->nvm.read = wm_nvm_read_ich8;
2346 		/* FLASH */
2347 		sc->sc_flags |= WM_F_EEPROM_FLASH;
2348 		sc->sc_nvm_wordsize = 2048;
2349 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
2350 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
2351 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
2352 			aprint_error_dev(sc->sc_dev,
2353 			    "can't map FLASH registers\n");
2354 			goto out;
2355 		}
2356 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
2357 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
2358 		    ICH_FLASH_SECTOR_SIZE;
2359 		sc->sc_ich8_flash_bank_size =
2360 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
2361 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
2362 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
2363 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
2364 		sc->sc_flashreg_offset = 0;
2365 		sc->phy.acquire = wm_get_swflag_ich8lan;
2366 		sc->phy.release = wm_put_swflag_ich8lan;
2367 		sc->nvm.acquire = wm_get_nvm_ich8lan;
2368 		sc->nvm.release = wm_put_nvm_ich8lan;
2369 		break;
2370 	case WM_T_PCH_SPT:
2371 	case WM_T_PCH_CNP:
2372 		sc->nvm.read = wm_nvm_read_spt;
2373 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
2374 		sc->sc_flags |= WM_F_EEPROM_FLASH;
2375 		sc->sc_flasht = sc->sc_st;
2376 		sc->sc_flashh = sc->sc_sh;
2377 		sc->sc_ich8_flash_base = 0;
2378 		sc->sc_nvm_wordsize =
2379 		    (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
2380 		    * NVM_SIZE_MULTIPLIER;
2381 		/* It is size in bytes, we want words */
2382 		sc->sc_nvm_wordsize /= 2;
2383 		/* Assume 2 banks */
2384 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
2385 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
2386 		sc->phy.acquire = wm_get_swflag_ich8lan;
2387 		sc->phy.release = wm_put_swflag_ich8lan;
2388 		sc->nvm.acquire = wm_get_nvm_ich8lan;
2389 		sc->nvm.release = wm_put_nvm_ich8lan;
2390 		break;
2391 	case WM_T_I210:
2392 	case WM_T_I211:
2393 		/* Allow a single clear of the SW semaphore on I210 and newer*/
2394 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
2395 		if (wm_nvm_flash_presence_i210(sc)) {
2396 			sc->nvm.read = wm_nvm_read_eerd;
2397 			/* Don't use WM_F_LOCK_EECD because we use EERD */
2398 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
2399 			wm_nvm_set_addrbits_size_eecd(sc);
2400 		} else {
2401 			sc->nvm.read = wm_nvm_read_invm;
2402 			sc->sc_flags |= WM_F_EEPROM_INVM;
2403 			sc->sc_nvm_wordsize = INVM_SIZE;
2404 		}
2405 		sc->phy.acquire = wm_get_phy_82575;
2406 		sc->phy.release = wm_put_phy_82575;
2407 		sc->nvm.acquire = wm_get_nvm_80003;
2408 		sc->nvm.release = wm_put_nvm_80003;
2409 		break;
2410 	default:
2411 		break;
2412 	}
2413 
2414 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
2415 	switch (sc->sc_type) {
2416 	case WM_T_82571:
2417 	case WM_T_82572:
2418 		reg = CSR_READ(sc, WMREG_SWSM2);
2419 		if ((reg & SWSM2_LOCK) == 0) {
2420 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
2421 			force_clear_smbi = true;
2422 		} else
2423 			force_clear_smbi = false;
2424 		break;
2425 	case WM_T_82573:
2426 	case WM_T_82574:
2427 	case WM_T_82583:
2428 		force_clear_smbi = true;
2429 		break;
2430 	default:
2431 		force_clear_smbi = false;
2432 		break;
2433 	}
2434 	if (force_clear_smbi) {
2435 		reg = CSR_READ(sc, WMREG_SWSM);
2436 		if ((reg & SWSM_SMBI) != 0)
2437 			aprint_error_dev(sc->sc_dev,
2438 			    "Please update the Bootagent\n");
2439 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
2440 	}
2441 
2442 	/*
2443 	 * Defer printing the EEPROM type until after verifying the checksum
2444 	 * This allows the EEPROM type to be printed correctly in the case
2445 	 * that no EEPROM is attached.
2446 	 */
2447 	/*
2448 	 * Validate the EEPROM checksum. If the checksum fails, flag
2449 	 * this for later, so we can fail future reads from the EEPROM.
2450 	 */
2451 	if (wm_nvm_validate_checksum(sc)) {
2452 		/*
2453 		 * Read twice again because some PCI-e parts fail the
2454 		 * first check due to the link being in sleep state.
2455 		 */
2456 		if (wm_nvm_validate_checksum(sc))
2457 			sc->sc_flags |= WM_F_EEPROM_INVALID;
2458 	}
2459 
2460 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
2461 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
2462 	else {
2463 		aprint_verbose_dev(sc->sc_dev, "%u words ",
2464 		    sc->sc_nvm_wordsize);
2465 		if (sc->sc_flags & WM_F_EEPROM_INVM)
2466 			aprint_verbose("iNVM");
2467 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
2468 			aprint_verbose("FLASH(HW)");
2469 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
2470 			aprint_verbose("FLASH");
2471 		else {
2472 			if (sc->sc_flags & WM_F_EEPROM_SPI)
2473 				eetype = "SPI";
2474 			else
2475 				eetype = "MicroWire";
2476 			aprint_verbose("(%d address bits) %s EEPROM",
2477 			    sc->sc_nvm_addrbits, eetype);
2478 		}
2479 	}
2480 	wm_nvm_version(sc);
2481 	aprint_verbose("\n");
2482 
2483 	/*
2484 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
2485 	 * incorrect.
2486 	 */
2487 	wm_gmii_setup_phytype(sc, 0, 0);
2488 
2489 	/* Check for WM_F_WOL on some chips before wm_reset() */
2490 	switch (sc->sc_type) {
2491 	case WM_T_ICH8:
2492 	case WM_T_ICH9:
2493 	case WM_T_ICH10:
2494 	case WM_T_PCH:
2495 	case WM_T_PCH2:
2496 	case WM_T_PCH_LPT:
2497 	case WM_T_PCH_SPT:
2498 	case WM_T_PCH_CNP:
2499 		apme_mask = WUC_APME;
2500 		eeprom_data = CSR_READ(sc, WMREG_WUC);
2501 		if ((eeprom_data & apme_mask) != 0)
2502 			sc->sc_flags |= WM_F_WOL;
2503 		break;
2504 	default:
2505 		break;
2506 	}
2507 
2508 	/* Reset the chip to a known state. */
2509 	wm_reset(sc);
2510 
2511 	/*
2512 	 * Check for I21[01] PLL workaround.
2513 	 *
2514 	 * Three cases:
2515 	 * a) Chip is I211.
2516 	 * b) Chip is I210 and it uses INVM (not FLASH).
2517 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
2518 	 */
2519 	if (sc->sc_type == WM_T_I211)
2520 		sc->sc_flags |= WM_F_PLL_WA_I210;
2521 	if (sc->sc_type == WM_T_I210) {
2522 		if (!wm_nvm_flash_presence_i210(sc))
2523 			sc->sc_flags |= WM_F_PLL_WA_I210;
2524 		else if ((sc->sc_nvm_ver_major < 3)
2525 		    || ((sc->sc_nvm_ver_major == 3)
2526 			&& (sc->sc_nvm_ver_minor < 25))) {
2527 			aprint_verbose_dev(sc->sc_dev,
2528 			    "ROM image version %d.%d is older than 3.25\n",
2529 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
2530 			sc->sc_flags |= WM_F_PLL_WA_I210;
2531 		}
2532 	}
2533 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
2534 		wm_pll_workaround_i210(sc);
2535 
2536 	wm_get_wakeup(sc);
2537 
2538 	/* Non-AMT based hardware can now take control from firmware */
2539 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
2540 		wm_get_hw_control(sc);
2541 
2542 	/*
2543 	 * Read the Ethernet address from the EEPROM, if not first found
2544 	 * in device properties.
2545 	 */
2546 	ea = prop_dictionary_get(dict, "mac-address");
2547 	if (ea != NULL) {
2548 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
2549 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
2550 		memcpy(enaddr, prop_data_value(ea), ETHER_ADDR_LEN);
2551 	} else {
2552 		if (wm_read_mac_addr(sc, enaddr) != 0) {
2553 			aprint_error_dev(sc->sc_dev,
2554 			    "unable to read Ethernet address\n");
2555 			goto out;
2556 		}
2557 	}
2558 
2559 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
2560 	    ether_sprintf(enaddr));
2561 
2562 	/*
2563 	 * Read the config info from the EEPROM, and set up various
2564 	 * bits in the control registers based on their contents.
2565 	 */
2566 	pn = prop_dictionary_get(dict, "i82543-cfg1");
2567 	if (pn != NULL) {
2568 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2569 		cfg1 = (uint16_t) prop_number_signed_value(pn);
2570 	} else {
2571 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
2572 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
2573 			goto out;
2574 		}
2575 	}
2576 
2577 	pn = prop_dictionary_get(dict, "i82543-cfg2");
2578 	if (pn != NULL) {
2579 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2580 		cfg2 = (uint16_t) prop_number_signed_value(pn);
2581 	} else {
2582 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
2583 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
2584 			goto out;
2585 		}
2586 	}
2587 
2588 	/* check for WM_F_WOL */
2589 	switch (sc->sc_type) {
2590 	case WM_T_82542_2_0:
2591 	case WM_T_82542_2_1:
2592 	case WM_T_82543:
2593 		/* dummy? */
2594 		eeprom_data = 0;
2595 		apme_mask = NVM_CFG3_APME;
2596 		break;
2597 	case WM_T_82544:
2598 		apme_mask = NVM_CFG2_82544_APM_EN;
2599 		eeprom_data = cfg2;
2600 		break;
2601 	case WM_T_82546:
2602 	case WM_T_82546_3:
2603 	case WM_T_82571:
2604 	case WM_T_82572:
2605 	case WM_T_82573:
2606 	case WM_T_82574:
2607 	case WM_T_82583:
2608 	case WM_T_80003:
2609 	case WM_T_82575:
2610 	case WM_T_82576:
2611 		apme_mask = NVM_CFG3_APME;
2612 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
2613 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
2614 		break;
2615 	case WM_T_82580:
2616 	case WM_T_I350:
2617 	case WM_T_I354:
2618 	case WM_T_I210:
2619 	case WM_T_I211:
2620 		apme_mask = NVM_CFG3_APME;
2621 		wm_nvm_read(sc,
2622 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
2623 		    1, &eeprom_data);
2624 		break;
2625 	case WM_T_ICH8:
2626 	case WM_T_ICH9:
2627 	case WM_T_ICH10:
2628 	case WM_T_PCH:
2629 	case WM_T_PCH2:
2630 	case WM_T_PCH_LPT:
2631 	case WM_T_PCH_SPT:
2632 	case WM_T_PCH_CNP:
2633 		/* Already checked before wm_reset () */
2634 		apme_mask = eeprom_data = 0;
2635 		break;
2636 	default: /* XXX 82540 */
2637 		apme_mask = NVM_CFG3_APME;
2638 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
2639 		break;
2640 	}
2641 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
2642 	if ((eeprom_data & apme_mask) != 0)
2643 		sc->sc_flags |= WM_F_WOL;
2644 
2645 	/*
2646 	 * We have the eeprom settings, now apply the special cases
2647 	 * where the eeprom may be wrong or the board won't support
2648 	 * wake on lan on a particular port
2649 	 */
2650 	switch (sc->sc_pcidevid) {
2651 	case PCI_PRODUCT_INTEL_82546GB_PCIE:
2652 		sc->sc_flags &= ~WM_F_WOL;
2653 		break;
2654 	case PCI_PRODUCT_INTEL_82546EB_FIBER:
2655 	case PCI_PRODUCT_INTEL_82546GB_FIBER:
2656 		/* Wake events only supported on port A for dual fiber
2657 		 * regardless of eeprom setting */
2658 		if (sc->sc_funcid == 1)
2659 			sc->sc_flags &= ~WM_F_WOL;
2660 		break;
2661 	case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3:
2662 		/* If quad port adapter, disable WoL on all but port A */
2663 		if (sc->sc_funcid != 0)
2664 			sc->sc_flags &= ~WM_F_WOL;
2665 		break;
2666 	case PCI_PRODUCT_INTEL_82571EB_FIBER:
2667 		/* Wake events only supported on port A for dual fiber
2668 		 * regardless of eeprom setting */
2669 		if (sc->sc_funcid == 1)
2670 			sc->sc_flags &= ~WM_F_WOL;
2671 		break;
2672 	case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER:
2673 	case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER:
2674 	case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER:
2675 		/* If quad port adapter, disable WoL on all but port A */
2676 		if (sc->sc_funcid != 0)
2677 			sc->sc_flags &= ~WM_F_WOL;
2678 		break;
2679 	}
2680 
2681 	if (sc->sc_type >= WM_T_82575) {
2682 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
2683 			aprint_debug_dev(sc->sc_dev, "COMPAT = %hx\n",
2684 			    nvmword);
2685 			if ((sc->sc_type == WM_T_82575) ||
2686 			    (sc->sc_type == WM_T_82576)) {
2687 				/* Check NVM for autonegotiation */
2688 				if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE)
2689 				    != 0)
2690 					sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
2691 			}
2692 			if ((sc->sc_type == WM_T_82575) ||
2693 			    (sc->sc_type == WM_T_I350)) {
2694 				if (nvmword & NVM_COMPAT_MAS_EN(sc->sc_funcid))
2695 					sc->sc_flags |= WM_F_MAS;
2696 			}
2697 		}
2698 	}
2699 
2700 	/*
2701 	 * XXX need special handling for some multiple port cards
2702 	 * to disable a paticular port.
2703 	 */
2704 
2705 	if (sc->sc_type >= WM_T_82544) {
2706 		pn = prop_dictionary_get(dict, "i82543-swdpin");
2707 		if (pn != NULL) {
2708 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2709 			swdpin = (uint16_t) prop_number_signed_value(pn);
2710 		} else {
2711 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
2712 				aprint_error_dev(sc->sc_dev,
2713 				    "unable to read SWDPIN\n");
2714 				goto out;
2715 			}
2716 		}
2717 	}
2718 
2719 	if (cfg1 & NVM_CFG1_ILOS)
2720 		sc->sc_ctrl |= CTRL_ILOS;
2721 
2722 	/*
2723 	 * XXX
2724 	 * This code isn't correct because pin 2 and 3 are located
2725 	 * in different position on newer chips. Check all datasheet.
2726 	 *
2727 	 * Until resolve this problem, check if a chip < 82580
2728 	 */
2729 	if (sc->sc_type <= WM_T_82580) {
2730 		if (sc->sc_type >= WM_T_82544) {
2731 			sc->sc_ctrl |=
2732 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
2733 			    CTRL_SWDPIO_SHIFT;
2734 			sc->sc_ctrl |=
2735 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
2736 			    CTRL_SWDPINS_SHIFT;
2737 		} else {
2738 			sc->sc_ctrl |=
2739 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
2740 			    CTRL_SWDPIO_SHIFT;
2741 		}
2742 	}
2743 
2744 	if ((sc->sc_type >= WM_T_82580) && (sc->sc_type <= WM_T_I211)) {
2745 		wm_nvm_read(sc,
2746 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
2747 		    1, &nvmword);
2748 		if (nvmword & NVM_CFG3_ILOS)
2749 			sc->sc_ctrl |= CTRL_ILOS;
2750 	}
2751 
2752 #if 0
2753 	if (sc->sc_type >= WM_T_82544) {
2754 		if (cfg1 & NVM_CFG1_IPS0)
2755 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
2756 		if (cfg1 & NVM_CFG1_IPS1)
2757 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
2758 		sc->sc_ctrl_ext |=
2759 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
2760 		    CTRL_EXT_SWDPIO_SHIFT;
2761 		sc->sc_ctrl_ext |=
2762 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
2763 		    CTRL_EXT_SWDPINS_SHIFT;
2764 	} else {
2765 		sc->sc_ctrl_ext |=
2766 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
2767 		    CTRL_EXT_SWDPIO_SHIFT;
2768 	}
2769 #endif
2770 
2771 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2772 #if 0
2773 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2774 #endif
2775 
2776 	if (sc->sc_type == WM_T_PCH) {
2777 		uint16_t val;
2778 
2779 		/* Save the NVM K1 bit setting */
2780 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
2781 
2782 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
2783 			sc->sc_nvm_k1_enabled = 1;
2784 		else
2785 			sc->sc_nvm_k1_enabled = 0;
2786 	}
2787 
2788 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
2789 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
2790 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
2791 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
2792 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
2793 	    || sc->sc_type == WM_T_82573
2794 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
2795 		/* Copper only */
2796 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
2797 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
2798 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
2799 	    || (sc->sc_type ==WM_T_I211)) {
2800 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
2801 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
2802 		switch (link_mode) {
2803 		case CTRL_EXT_LINK_MODE_1000KX:
2804 			aprint_normal_dev(sc->sc_dev, "1000KX\n");
2805 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
2806 			break;
2807 		case CTRL_EXT_LINK_MODE_SGMII:
2808 			if (wm_sgmii_uses_mdio(sc)) {
2809 				aprint_normal_dev(sc->sc_dev,
2810 				    "SGMII(MDIO)\n");
2811 				sc->sc_flags |= WM_F_SGMII;
2812 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2813 				break;
2814 			}
2815 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
2816 			/*FALLTHROUGH*/
2817 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
2818 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
2819 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
2820 				if (link_mode
2821 				    == CTRL_EXT_LINK_MODE_SGMII) {
2822 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2823 					sc->sc_flags |= WM_F_SGMII;
2824 					aprint_verbose_dev(sc->sc_dev,
2825 					    "SGMII\n");
2826 				} else {
2827 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
2828 					aprint_verbose_dev(sc->sc_dev,
2829 					    "SERDES\n");
2830 				}
2831 				break;
2832 			}
2833 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
2834 				aprint_normal_dev(sc->sc_dev, "SERDES(SFP)\n");
2835 			else if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
2836 				aprint_normal_dev(sc->sc_dev, "SGMII(SFP)\n");
2837 				sc->sc_flags |= WM_F_SGMII;
2838 			}
2839 			/* Do not change link mode for 100BaseFX */
2840 			if (sc->sc_sfptype == SFF_SFP_ETH_FLAGS_100FX)
2841 				break;
2842 
2843 			/* Change current link mode setting */
2844 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
2845 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
2846 				reg |= CTRL_EXT_LINK_MODE_SGMII;
2847 			else
2848 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
2849 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2850 			break;
2851 		case CTRL_EXT_LINK_MODE_GMII:
2852 		default:
2853 			aprint_normal_dev(sc->sc_dev, "Copper\n");
2854 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2855 			break;
2856 		}
2857 
2858 		reg &= ~CTRL_EXT_I2C_ENA;
2859 		if ((sc->sc_flags & WM_F_SGMII) != 0)
2860 			reg |= CTRL_EXT_I2C_ENA;
2861 		else
2862 			reg &= ~CTRL_EXT_I2C_ENA;
2863 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2864 		if ((sc->sc_flags & WM_F_SGMII) != 0) {
2865 			wm_gmii_setup_phytype(sc, 0, 0);
2866 			wm_reset_mdicnfg_82580(sc);
2867 		}
2868 	} else if (sc->sc_type < WM_T_82543 ||
2869 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
2870 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
2871 			aprint_error_dev(sc->sc_dev,
2872 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
2873 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
2874 		}
2875 	} else {
2876 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
2877 			aprint_error_dev(sc->sc_dev,
2878 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
2879 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2880 		}
2881 	}
2882 
2883 	if (sc->sc_type >= WM_T_PCH2)
2884 		sc->sc_flags |= WM_F_EEE;
2885 	else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211)
2886 	    && (sc->sc_mediatype == WM_MEDIATYPE_COPPER)) {
2887 		/* XXX: Need special handling for I354. (not yet) */
2888 		if (sc->sc_type != WM_T_I354)
2889 			sc->sc_flags |= WM_F_EEE;
2890 	}
2891 
2892 	/*
2893 	 * The I350 has a bug where it always strips the CRC whether
2894 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
2895 	 */
2896 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
2897 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
2898 		sc->sc_flags |= WM_F_CRC_STRIP;
2899 
2900 	/* Set device properties (macflags) */
2901 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
2902 
2903 	if (sc->sc_flags != 0) {
2904 		snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
2905 		aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
2906 	}
2907 
2908 #ifdef WM_MPSAFE
2909 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2910 #else
2911 	sc->sc_core_lock = NULL;
2912 #endif
2913 
2914 	/* Initialize the media structures accordingly. */
2915 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
2916 		wm_gmii_mediainit(sc, wmp->wmp_product);
2917 	else
2918 		wm_tbi_mediainit(sc); /* All others */
2919 
2920 	ifp = &sc->sc_ethercom.ec_if;
2921 	xname = device_xname(sc->sc_dev);
2922 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
2923 	ifp->if_softc = sc;
2924 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2925 #ifdef WM_MPSAFE
2926 	ifp->if_extflags = IFEF_MPSAFE;
2927 #endif
2928 	ifp->if_ioctl = wm_ioctl;
2929 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
2930 		ifp->if_start = wm_nq_start;
2931 		/*
2932 		 * When the number of CPUs is one and the controller can use
2933 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
2934 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
2935 		 * and the other is used for link status changing.
2936 		 * In this situation, wm_nq_transmit() is disadvantageous
2937 		 * because of wm_select_txqueue() and pcq(9) overhead.
2938 		 */
2939 		if (wm_is_using_multiqueue(sc))
2940 			ifp->if_transmit = wm_nq_transmit;
2941 	} else {
2942 		ifp->if_start = wm_start;
2943 		/*
2944 		 * wm_transmit() has the same disadvantage as wm_transmit().
2945 		 */
2946 		if (wm_is_using_multiqueue(sc))
2947 			ifp->if_transmit = wm_transmit;
2948 	}
2949 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
2950 	ifp->if_init = wm_init;
2951 	ifp->if_stop = wm_stop;
2952 	IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
2953 	IFQ_SET_READY(&ifp->if_snd);
2954 
2955 	/* Check for jumbo frame */
2956 	switch (sc->sc_type) {
2957 	case WM_T_82573:
2958 		/* XXX limited to 9234 if ASPM is disabled */
2959 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
2960 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
2961 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2962 		break;
2963 	case WM_T_82571:
2964 	case WM_T_82572:
2965 	case WM_T_82574:
2966 	case WM_T_82583:
2967 	case WM_T_82575:
2968 	case WM_T_82576:
2969 	case WM_T_82580:
2970 	case WM_T_I350:
2971 	case WM_T_I354:
2972 	case WM_T_I210:
2973 	case WM_T_I211:
2974 	case WM_T_80003:
2975 	case WM_T_ICH9:
2976 	case WM_T_ICH10:
2977 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
2978 	case WM_T_PCH_LPT:
2979 	case WM_T_PCH_SPT:
2980 	case WM_T_PCH_CNP:
2981 		/* XXX limited to 9234 */
2982 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2983 		break;
2984 	case WM_T_PCH:
2985 		/* XXX limited to 4096 */
2986 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2987 		break;
2988 	case WM_T_82542_2_0:
2989 	case WM_T_82542_2_1:
2990 	case WM_T_ICH8:
2991 		/* No support for jumbo frame */
2992 		break;
2993 	default:
2994 		/* ETHER_MAX_LEN_JUMBO */
2995 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2996 		break;
2997 	}
2998 
2999 	/* If we're a i82543 or greater, we can support VLANs. */
3000 	if (sc->sc_type >= WM_T_82543) {
3001 		sc->sc_ethercom.ec_capabilities |=
3002 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
3003 		sc->sc_ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
3004 	}
3005 
3006 	if ((sc->sc_flags & WM_F_EEE) != 0)
3007 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_EEE;
3008 
3009 	/*
3010 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
3011 	 * on i82543 and later.
3012 	 */
3013 	if (sc->sc_type >= WM_T_82543) {
3014 		ifp->if_capabilities |=
3015 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
3016 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
3017 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
3018 		    IFCAP_CSUM_TCPv6_Tx |
3019 		    IFCAP_CSUM_UDPv6_Tx;
3020 	}
3021 
3022 	/*
3023 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
3024 	 *
3025 	 *	82541GI (8086:1076) ... no
3026 	 *	82572EI (8086:10b9) ... yes
3027 	 */
3028 	if (sc->sc_type >= WM_T_82571) {
3029 		ifp->if_capabilities |=
3030 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
3031 	}
3032 
3033 	/*
3034 	 * If we're a i82544 or greater (except i82547), we can do
3035 	 * TCP segmentation offload.
3036 	 */
3037 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
3038 		ifp->if_capabilities |= IFCAP_TSOv4;
3039 	}
3040 
3041 	if (sc->sc_type >= WM_T_82571) {
3042 		ifp->if_capabilities |= IFCAP_TSOv6;
3043 	}
3044 
3045 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
3046 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
3047 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
3048 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
3049 
3050 	/* Attach the interface. */
3051 	error = if_initialize(ifp);
3052 	if (error != 0) {
3053 		aprint_error_dev(sc->sc_dev, "if_initialize failed(%d)\n",
3054 		    error);
3055 		return; /* Error */
3056 	}
3057 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
3058 	ether_ifattach(ifp, enaddr);
3059 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
3060 	if_register(ifp);
3061 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
3062 	    RND_FLAG_DEFAULT);
3063 
3064 #ifdef WM_EVENT_COUNTERS
3065 	/* Attach event counters. */
3066 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
3067 	    NULL, xname, "linkintr");
3068 
3069 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
3070 	    NULL, xname, "tx_xoff");
3071 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
3072 	    NULL, xname, "tx_xon");
3073 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
3074 	    NULL, xname, "rx_xoff");
3075 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
3076 	    NULL, xname, "rx_xon");
3077 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
3078 	    NULL, xname, "rx_macctl");
3079 #endif /* WM_EVENT_COUNTERS */
3080 
3081 	sc->sc_txrx_use_workqueue = false;
3082 
3083 	wm_init_sysctls(sc);
3084 
3085 	if (pmf_device_register(self, wm_suspend, wm_resume))
3086 		pmf_class_network_register(self, ifp);
3087 	else
3088 		aprint_error_dev(self, "couldn't establish power handler\n");
3089 
3090 	sc->sc_flags |= WM_F_ATTACHED;
3091 out:
3092 	return;
3093 }
3094 
3095 /* The detach function (ca_detach) */
3096 static int
3097 wm_detach(device_t self, int flags __unused)
3098 {
3099 	struct wm_softc *sc = device_private(self);
3100 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3101 	int i;
3102 
3103 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
3104 		return 0;
3105 
3106 	/* Stop the interface. Callouts are stopped in it. */
3107 	wm_stop(ifp, 1);
3108 
3109 	pmf_device_deregister(self);
3110 
3111 	sysctl_teardown(&sc->sc_sysctllog);
3112 
3113 #ifdef WM_EVENT_COUNTERS
3114 	evcnt_detach(&sc->sc_ev_linkintr);
3115 
3116 	evcnt_detach(&sc->sc_ev_tx_xoff);
3117 	evcnt_detach(&sc->sc_ev_tx_xon);
3118 	evcnt_detach(&sc->sc_ev_rx_xoff);
3119 	evcnt_detach(&sc->sc_ev_rx_xon);
3120 	evcnt_detach(&sc->sc_ev_rx_macctl);
3121 #endif /* WM_EVENT_COUNTERS */
3122 
3123 	rnd_detach_source(&sc->rnd_source);
3124 
3125 	/* Tell the firmware about the release */
3126 	WM_CORE_LOCK(sc);
3127 	wm_release_manageability(sc);
3128 	wm_release_hw_control(sc);
3129 	wm_enable_wakeup(sc);
3130 	WM_CORE_UNLOCK(sc);
3131 
3132 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
3133 
3134 	ether_ifdetach(ifp);
3135 	if_detach(ifp);
3136 	if_percpuq_destroy(sc->sc_ipq);
3137 
3138 	/* Delete all remaining media. */
3139 	ifmedia_fini(&sc->sc_mii.mii_media);
3140 
3141 	/* Unload RX dmamaps and free mbufs */
3142 	for (i = 0; i < sc->sc_nqueues; i++) {
3143 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
3144 		mutex_enter(rxq->rxq_lock);
3145 		wm_rxdrain(rxq);
3146 		mutex_exit(rxq->rxq_lock);
3147 	}
3148 	/* Must unlock here */
3149 
3150 	/* Disestablish the interrupt handler */
3151 	for (i = 0; i < sc->sc_nintrs; i++) {
3152 		if (sc->sc_ihs[i] != NULL) {
3153 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
3154 			sc->sc_ihs[i] = NULL;
3155 		}
3156 	}
3157 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
3158 
3159 	/* wm_stop() ensure workqueue is stopped. */
3160 	workqueue_destroy(sc->sc_queue_wq);
3161 
3162 	for (i = 0; i < sc->sc_nqueues; i++)
3163 		softint_disestablish(sc->sc_queue[i].wmq_si);
3164 
3165 	wm_free_txrx_queues(sc);
3166 
3167 	/* Unmap the registers */
3168 	if (sc->sc_ss) {
3169 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
3170 		sc->sc_ss = 0;
3171 	}
3172 	if (sc->sc_ios) {
3173 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
3174 		sc->sc_ios = 0;
3175 	}
3176 	if (sc->sc_flashs) {
3177 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
3178 		sc->sc_flashs = 0;
3179 	}
3180 
3181 	if (sc->sc_core_lock)
3182 		mutex_obj_free(sc->sc_core_lock);
3183 	if (sc->sc_ich_phymtx)
3184 		mutex_obj_free(sc->sc_ich_phymtx);
3185 	if (sc->sc_ich_nvmmtx)
3186 		mutex_obj_free(sc->sc_ich_nvmmtx);
3187 
3188 	return 0;
3189 }
3190 
3191 static bool
3192 wm_suspend(device_t self, const pmf_qual_t *qual)
3193 {
3194 	struct wm_softc *sc = device_private(self);
3195 
3196 	wm_release_manageability(sc);
3197 	wm_release_hw_control(sc);
3198 	wm_enable_wakeup(sc);
3199 
3200 	return true;
3201 }
3202 
3203 static bool
3204 wm_resume(device_t self, const pmf_qual_t *qual)
3205 {
3206 	struct wm_softc *sc = device_private(self);
3207 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3208 	pcireg_t reg;
3209 	char buf[256];
3210 
3211 	reg = CSR_READ(sc, WMREG_WUS);
3212 	if (reg != 0) {
3213 		snprintb(buf, sizeof(buf), WUS_FLAGS, reg);
3214 		device_printf(sc->sc_dev, "wakeup status %s\n", buf);
3215 		CSR_WRITE(sc, WMREG_WUS, 0xffffffff); /* W1C */
3216 	}
3217 
3218 	if (sc->sc_type >= WM_T_PCH2)
3219 		wm_resume_workarounds_pchlan(sc);
3220 	if ((ifp->if_flags & IFF_UP) == 0) {
3221 		wm_reset(sc);
3222 		/* Non-AMT based hardware can now take control from firmware */
3223 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
3224 			wm_get_hw_control(sc);
3225 		wm_init_manageability(sc);
3226 	} else {
3227 		/*
3228 		 * We called pmf_class_network_register(), so if_init() is
3229 		 * automatically called when IFF_UP. wm_reset(),
3230 		 * wm_get_hw_control() and wm_init_manageability() are called
3231 		 * via wm_init().
3232 		 */
3233 	}
3234 
3235 	return true;
3236 }
3237 
3238 /*
3239  * wm_watchdog:		[ifnet interface function]
3240  *
3241  *	Watchdog timer handler.
3242  */
3243 static void
3244 wm_watchdog(struct ifnet *ifp)
3245 {
3246 	int qid;
3247 	struct wm_softc *sc = ifp->if_softc;
3248 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
3249 
3250 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
3251 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
3252 
3253 		wm_watchdog_txq(ifp, txq, &hang_queue);
3254 	}
3255 
3256 	/* IF any of queues hanged up, reset the interface. */
3257 	if (hang_queue != 0) {
3258 		(void)wm_init(ifp);
3259 
3260 		/*
3261 		 * There are still some upper layer processing which call
3262 		 * ifp->if_start(). e.g. ALTQ or one CPU system
3263 		 */
3264 		/* Try to get more packets going. */
3265 		ifp->if_start(ifp);
3266 	}
3267 }
3268 
3269 
3270 static void
3271 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
3272 {
3273 
3274 	mutex_enter(txq->txq_lock);
3275 	if (txq->txq_sending &&
3276 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout)
3277 		wm_watchdog_txq_locked(ifp, txq, hang);
3278 
3279 	mutex_exit(txq->txq_lock);
3280 }
3281 
3282 static void
3283 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
3284     uint16_t *hang)
3285 {
3286 	struct wm_softc *sc = ifp->if_softc;
3287 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
3288 
3289 	KASSERT(mutex_owned(txq->txq_lock));
3290 
3291 	/*
3292 	 * Since we're using delayed interrupts, sweep up
3293 	 * before we report an error.
3294 	 */
3295 	wm_txeof(txq, UINT_MAX);
3296 
3297 	if (txq->txq_sending)
3298 		*hang |= __BIT(wmq->wmq_id);
3299 
3300 	if (txq->txq_free == WM_NTXDESC(txq)) {
3301 		log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
3302 		    device_xname(sc->sc_dev));
3303 	} else {
3304 #ifdef WM_DEBUG
3305 		int i, j;
3306 		struct wm_txsoft *txs;
3307 #endif
3308 		log(LOG_ERR,
3309 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
3310 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
3311 		    txq->txq_next);
3312 		if_statinc(ifp, if_oerrors);
3313 #ifdef WM_DEBUG
3314 		for (i = txq->txq_sdirty; i != txq->txq_snext;
3315 		    i = WM_NEXTTXS(txq, i)) {
3316 			txs = &txq->txq_soft[i];
3317 			printf("txs %d tx %d -> %d\n",
3318 			    i, txs->txs_firstdesc, txs->txs_lastdesc);
3319 			for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
3320 				if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3321 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
3322 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
3323 					printf("\t %#08x%08x\n",
3324 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
3325 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
3326 				} else {
3327 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
3328 					    (uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
3329 					    txq->txq_descs[j].wtx_addr.wa_low);
3330 					printf("\t %#04x%02x%02x%08x\n",
3331 					    txq->txq_descs[j].wtx_fields.wtxu_vlan,
3332 					    txq->txq_descs[j].wtx_fields.wtxu_options,
3333 					    txq->txq_descs[j].wtx_fields.wtxu_status,
3334 					    txq->txq_descs[j].wtx_cmdlen);
3335 				}
3336 				if (j == txs->txs_lastdesc)
3337 					break;
3338 			}
3339 		}
3340 #endif
3341 	}
3342 }
3343 
3344 /*
3345  * wm_tick:
3346  *
3347  *	One second timer, used to check link status, sweep up
3348  *	completed transmit jobs, etc.
3349  */
3350 static void
3351 wm_tick(void *arg)
3352 {
3353 	struct wm_softc *sc = arg;
3354 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3355 #ifndef WM_MPSAFE
3356 	int s = splnet();
3357 #endif
3358 
3359 	WM_CORE_LOCK(sc);
3360 
3361 	if (sc->sc_core_stopping) {
3362 		WM_CORE_UNLOCK(sc);
3363 #ifndef WM_MPSAFE
3364 		splx(s);
3365 #endif
3366 		return;
3367 	}
3368 
3369 	if (sc->sc_type >= WM_T_82542_2_1) {
3370 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
3371 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
3372 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
3373 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
3374 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
3375 	}
3376 
3377 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
3378 	if_statadd_ref(nsr, if_collisions, CSR_READ(sc, WMREG_COLC));
3379 	if_statadd_ref(nsr, if_ierrors, 0ULL /* ensure quad_t */
3380 	    + CSR_READ(sc, WMREG_CRCERRS)
3381 	    + CSR_READ(sc, WMREG_ALGNERRC)
3382 	    + CSR_READ(sc, WMREG_SYMERRC)
3383 	    + CSR_READ(sc, WMREG_RXERRC)
3384 	    + CSR_READ(sc, WMREG_SEC)
3385 	    + CSR_READ(sc, WMREG_CEXTERR)
3386 	    + CSR_READ(sc, WMREG_RLEC));
3387 	/*
3388 	 * WMREG_RNBC is incremented when there is no available buffers in host
3389 	 * memory. It does not mean the number of dropped packet. Because
3390 	 * ethernet controller can receive packets in such case if there is
3391 	 * space in phy's FIFO.
3392 	 *
3393 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
3394 	 * own EVCNT instead of if_iqdrops.
3395 	 */
3396 	if_statadd_ref(nsr, if_iqdrops, CSR_READ(sc, WMREG_MPC));
3397 	IF_STAT_PUTREF(ifp);
3398 
3399 	if (sc->sc_flags & WM_F_HAS_MII)
3400 		mii_tick(&sc->sc_mii);
3401 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
3402 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
3403 		wm_serdes_tick(sc);
3404 	else
3405 		wm_tbi_tick(sc);
3406 
3407 	WM_CORE_UNLOCK(sc);
3408 
3409 	wm_watchdog(ifp);
3410 
3411 	callout_schedule(&sc->sc_tick_ch, hz);
3412 }
3413 
3414 static int
3415 wm_ifflags_cb(struct ethercom *ec)
3416 {
3417 	struct ifnet *ifp = &ec->ec_if;
3418 	struct wm_softc *sc = ifp->if_softc;
3419 	u_short iffchange;
3420 	int ecchange;
3421 	bool needreset = false;
3422 	int rc = 0;
3423 
3424 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3425 		device_xname(sc->sc_dev), __func__));
3426 
3427 	WM_CORE_LOCK(sc);
3428 
3429 	/*
3430 	 * Check for if_flags.
3431 	 * Main usage is to prevent linkdown when opening bpf.
3432 	 */
3433 	iffchange = ifp->if_flags ^ sc->sc_if_flags;
3434 	sc->sc_if_flags = ifp->if_flags;
3435 	if ((iffchange & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
3436 		needreset = true;
3437 		goto ec;
3438 	}
3439 
3440 	/* iff related updates */
3441 	if ((iffchange & IFF_PROMISC) != 0)
3442 		wm_set_filter(sc);
3443 
3444 	wm_set_vlan(sc);
3445 
3446 ec:
3447 	/* Check for ec_capenable. */
3448 	ecchange = ec->ec_capenable ^ sc->sc_ec_capenable;
3449 	sc->sc_ec_capenable = ec->ec_capenable;
3450 	if ((ecchange & ~ETHERCAP_EEE) != 0) {
3451 		needreset = true;
3452 		goto out;
3453 	}
3454 
3455 	/* ec related updates */
3456 	wm_set_eee(sc);
3457 
3458 out:
3459 	if (needreset)
3460 		rc = ENETRESET;
3461 	WM_CORE_UNLOCK(sc);
3462 
3463 	return rc;
3464 }
3465 
3466 /*
3467  * wm_ioctl:		[ifnet interface function]
3468  *
3469  *	Handle control requests from the operator.
3470  */
3471 static int
3472 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
3473 {
3474 	struct wm_softc *sc = ifp->if_softc;
3475 	struct ifreq *ifr = (struct ifreq *)data;
3476 	struct ifaddr *ifa = (struct ifaddr *)data;
3477 	struct sockaddr_dl *sdl;
3478 	int s, error;
3479 
3480 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3481 		device_xname(sc->sc_dev), __func__));
3482 
3483 #ifndef WM_MPSAFE
3484 	s = splnet();
3485 #endif
3486 	switch (cmd) {
3487 	case SIOCSIFMEDIA:
3488 		WM_CORE_LOCK(sc);
3489 		/* Flow control requires full-duplex mode. */
3490 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
3491 		    (ifr->ifr_media & IFM_FDX) == 0)
3492 			ifr->ifr_media &= ~IFM_ETH_FMASK;
3493 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
3494 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
3495 				/* We can do both TXPAUSE and RXPAUSE. */
3496 				ifr->ifr_media |=
3497 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
3498 			}
3499 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
3500 		}
3501 		WM_CORE_UNLOCK(sc);
3502 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
3503 		break;
3504 	case SIOCINITIFADDR:
3505 		WM_CORE_LOCK(sc);
3506 		if (ifa->ifa_addr->sa_family == AF_LINK) {
3507 			sdl = satosdl(ifp->if_dl->ifa_addr);
3508 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
3509 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
3510 			/* Unicast address is the first multicast entry */
3511 			wm_set_filter(sc);
3512 			error = 0;
3513 			WM_CORE_UNLOCK(sc);
3514 			break;
3515 		}
3516 		WM_CORE_UNLOCK(sc);
3517 		/*FALLTHROUGH*/
3518 	default:
3519 #ifdef WM_MPSAFE
3520 		s = splnet();
3521 #endif
3522 		/* It may call wm_start, so unlock here */
3523 		error = ether_ioctl(ifp, cmd, data);
3524 #ifdef WM_MPSAFE
3525 		splx(s);
3526 #endif
3527 		if (error != ENETRESET)
3528 			break;
3529 
3530 		error = 0;
3531 
3532 		if (cmd == SIOCSIFCAP)
3533 			error = (*ifp->if_init)(ifp);
3534 		else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
3535 			;
3536 		else if (ifp->if_flags & IFF_RUNNING) {
3537 			/*
3538 			 * Multicast list has changed; set the hardware filter
3539 			 * accordingly.
3540 			 */
3541 			WM_CORE_LOCK(sc);
3542 			wm_set_filter(sc);
3543 			WM_CORE_UNLOCK(sc);
3544 		}
3545 		break;
3546 	}
3547 
3548 #ifndef WM_MPSAFE
3549 	splx(s);
3550 #endif
3551 	return error;
3552 }
3553 
3554 /* MAC address related */
3555 
3556 /*
3557  * Get the offset of MAC address and return it.
3558  * If error occured, use offset 0.
3559  */
3560 static uint16_t
3561 wm_check_alt_mac_addr(struct wm_softc *sc)
3562 {
3563 	uint16_t myea[ETHER_ADDR_LEN / 2];
3564 	uint16_t offset = NVM_OFF_MACADDR;
3565 
3566 	/* Try to read alternative MAC address pointer */
3567 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
3568 		return 0;
3569 
3570 	/* Check pointer if it's valid or not. */
3571 	if ((offset == 0x0000) || (offset == 0xffff))
3572 		return 0;
3573 
3574 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
3575 	/*
3576 	 * Check whether alternative MAC address is valid or not.
3577 	 * Some cards have non 0xffff pointer but those don't use
3578 	 * alternative MAC address in reality.
3579 	 *
3580 	 * Check whether the broadcast bit is set or not.
3581 	 */
3582 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
3583 		if (((myea[0] & 0xff) & 0x01) == 0)
3584 			return offset; /* Found */
3585 
3586 	/* Not found */
3587 	return 0;
3588 }
3589 
3590 static int
3591 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
3592 {
3593 	uint16_t myea[ETHER_ADDR_LEN / 2];
3594 	uint16_t offset = NVM_OFF_MACADDR;
3595 	int do_invert = 0;
3596 
3597 	switch (sc->sc_type) {
3598 	case WM_T_82580:
3599 	case WM_T_I350:
3600 	case WM_T_I354:
3601 		/* EEPROM Top Level Partitioning */
3602 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
3603 		break;
3604 	case WM_T_82571:
3605 	case WM_T_82575:
3606 	case WM_T_82576:
3607 	case WM_T_80003:
3608 	case WM_T_I210:
3609 	case WM_T_I211:
3610 		offset = wm_check_alt_mac_addr(sc);
3611 		if (offset == 0)
3612 			if ((sc->sc_funcid & 0x01) == 1)
3613 				do_invert = 1;
3614 		break;
3615 	default:
3616 		if ((sc->sc_funcid & 0x01) == 1)
3617 			do_invert = 1;
3618 		break;
3619 	}
3620 
3621 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
3622 		goto bad;
3623 
3624 	enaddr[0] = myea[0] & 0xff;
3625 	enaddr[1] = myea[0] >> 8;
3626 	enaddr[2] = myea[1] & 0xff;
3627 	enaddr[3] = myea[1] >> 8;
3628 	enaddr[4] = myea[2] & 0xff;
3629 	enaddr[5] = myea[2] >> 8;
3630 
3631 	/*
3632 	 * Toggle the LSB of the MAC address on the second port
3633 	 * of some dual port cards.
3634 	 */
3635 	if (do_invert != 0)
3636 		enaddr[5] ^= 1;
3637 
3638 	return 0;
3639 
3640  bad:
3641 	return -1;
3642 }
3643 
3644 /*
3645  * wm_set_ral:
3646  *
3647  *	Set an entery in the receive address list.
3648  */
3649 static void
3650 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
3651 {
3652 	uint32_t ral_lo, ral_hi, addrl, addrh;
3653 	uint32_t wlock_mac;
3654 	int rv;
3655 
3656 	if (enaddr != NULL) {
3657 		ral_lo = (uint32_t)enaddr[0] | ((uint32_t)enaddr[1] << 8) |
3658 		    ((uint32_t)enaddr[2] << 16) | ((uint32_t)enaddr[3] << 24);
3659 		ral_hi = (uint32_t)enaddr[4] | ((uint32_t)enaddr[5] << 8);
3660 		ral_hi |= RAL_AV;
3661 	} else {
3662 		ral_lo = 0;
3663 		ral_hi = 0;
3664 	}
3665 
3666 	switch (sc->sc_type) {
3667 	case WM_T_82542_2_0:
3668 	case WM_T_82542_2_1:
3669 	case WM_T_82543:
3670 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
3671 		CSR_WRITE_FLUSH(sc);
3672 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
3673 		CSR_WRITE_FLUSH(sc);
3674 		break;
3675 	case WM_T_PCH2:
3676 	case WM_T_PCH_LPT:
3677 	case WM_T_PCH_SPT:
3678 	case WM_T_PCH_CNP:
3679 		if (idx == 0) {
3680 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
3681 			CSR_WRITE_FLUSH(sc);
3682 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
3683 			CSR_WRITE_FLUSH(sc);
3684 			return;
3685 		}
3686 		if (sc->sc_type != WM_T_PCH2) {
3687 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
3688 			    FWSM_WLOCK_MAC);
3689 			addrl = WMREG_SHRAL(idx - 1);
3690 			addrh = WMREG_SHRAH(idx - 1);
3691 		} else {
3692 			wlock_mac = 0;
3693 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
3694 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
3695 		}
3696 
3697 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
3698 			rv = wm_get_swflag_ich8lan(sc);
3699 			if (rv != 0)
3700 				return;
3701 			CSR_WRITE(sc, addrl, ral_lo);
3702 			CSR_WRITE_FLUSH(sc);
3703 			CSR_WRITE(sc, addrh, ral_hi);
3704 			CSR_WRITE_FLUSH(sc);
3705 			wm_put_swflag_ich8lan(sc);
3706 		}
3707 
3708 		break;
3709 	default:
3710 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
3711 		CSR_WRITE_FLUSH(sc);
3712 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
3713 		CSR_WRITE_FLUSH(sc);
3714 		break;
3715 	}
3716 }
3717 
3718 /*
3719  * wm_mchash:
3720  *
3721  *	Compute the hash of the multicast address for the 4096-bit
3722  *	multicast filter.
3723  */
3724 static uint32_t
3725 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
3726 {
3727 	static const int lo_shift[4] = { 4, 3, 2, 0 };
3728 	static const int hi_shift[4] = { 4, 5, 6, 8 };
3729 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
3730 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
3731 	uint32_t hash;
3732 
3733 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3734 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3735 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
3736 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
3737 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
3738 		    (((uint16_t)enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
3739 		return (hash & 0x3ff);
3740 	}
3741 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
3742 	    (((uint16_t)enaddr[5]) << hi_shift[sc->sc_mchash_type]);
3743 
3744 	return (hash & 0xfff);
3745 }
3746 
3747 /*
3748  *
3749  *
3750  */
3751 static int
3752 wm_rar_count(struct wm_softc *sc)
3753 {
3754 	int size;
3755 
3756 	switch (sc->sc_type) {
3757 	case WM_T_ICH8:
3758 		size = WM_RAL_TABSIZE_ICH8 -1;
3759 		break;
3760 	case WM_T_ICH9:
3761 	case WM_T_ICH10:
3762 	case WM_T_PCH:
3763 		size = WM_RAL_TABSIZE_ICH8;
3764 		break;
3765 	case WM_T_PCH2:
3766 		size = WM_RAL_TABSIZE_PCH2;
3767 		break;
3768 	case WM_T_PCH_LPT:
3769 	case WM_T_PCH_SPT:
3770 	case WM_T_PCH_CNP:
3771 		size = WM_RAL_TABSIZE_PCH_LPT;
3772 		break;
3773 	case WM_T_82575:
3774 	case WM_T_I210:
3775 	case WM_T_I211:
3776 		size = WM_RAL_TABSIZE_82575;
3777 		break;
3778 	case WM_T_82576:
3779 	case WM_T_82580:
3780 		size = WM_RAL_TABSIZE_82576;
3781 		break;
3782 	case WM_T_I350:
3783 	case WM_T_I354:
3784 		size = WM_RAL_TABSIZE_I350;
3785 		break;
3786 	default:
3787 		size = WM_RAL_TABSIZE;
3788 	}
3789 
3790 	return size;
3791 }
3792 
3793 /*
3794  * wm_set_filter:
3795  *
3796  *	Set up the receive filter.
3797  */
3798 static void
3799 wm_set_filter(struct wm_softc *sc)
3800 {
3801 	struct ethercom *ec = &sc->sc_ethercom;
3802 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3803 	struct ether_multi *enm;
3804 	struct ether_multistep step;
3805 	bus_addr_t mta_reg;
3806 	uint32_t hash, reg, bit;
3807 	int i, size, ralmax, rv;
3808 
3809 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3810 		device_xname(sc->sc_dev), __func__));
3811 
3812 	if (sc->sc_type >= WM_T_82544)
3813 		mta_reg = WMREG_CORDOVA_MTA;
3814 	else
3815 		mta_reg = WMREG_MTA;
3816 
3817 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
3818 
3819 	if (ifp->if_flags & IFF_BROADCAST)
3820 		sc->sc_rctl |= RCTL_BAM;
3821 	if (ifp->if_flags & IFF_PROMISC) {
3822 		sc->sc_rctl |= RCTL_UPE;
3823 		ETHER_LOCK(ec);
3824 		ec->ec_flags |= ETHER_F_ALLMULTI;
3825 		ETHER_UNLOCK(ec);
3826 		goto allmulti;
3827 	}
3828 
3829 	/*
3830 	 * Set the station address in the first RAL slot, and
3831 	 * clear the remaining slots.
3832 	 */
3833 	size = wm_rar_count(sc);
3834 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
3835 
3836 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
3837 	    || (sc->sc_type == WM_T_PCH_CNP)) {
3838 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
3839 		switch (i) {
3840 		case 0:
3841 			/* We can use all entries */
3842 			ralmax = size;
3843 			break;
3844 		case 1:
3845 			/* Only RAR[0] */
3846 			ralmax = 1;
3847 			break;
3848 		default:
3849 			/* Available SHRA + RAR[0] */
3850 			ralmax = i + 1;
3851 		}
3852 	} else
3853 		ralmax = size;
3854 	for (i = 1; i < size; i++) {
3855 		if (i < ralmax)
3856 			wm_set_ral(sc, NULL, i);
3857 	}
3858 
3859 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3860 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3861 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
3862 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
3863 		size = WM_ICH8_MC_TABSIZE;
3864 	else
3865 		size = WM_MC_TABSIZE;
3866 	/* Clear out the multicast table. */
3867 	for (i = 0; i < size; i++) {
3868 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
3869 		CSR_WRITE_FLUSH(sc);
3870 	}
3871 
3872 	ETHER_LOCK(ec);
3873 	ETHER_FIRST_MULTI(step, ec, enm);
3874 	while (enm != NULL) {
3875 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
3876 			ec->ec_flags |= ETHER_F_ALLMULTI;
3877 			ETHER_UNLOCK(ec);
3878 			/*
3879 			 * We must listen to a range of multicast addresses.
3880 			 * For now, just accept all multicasts, rather than
3881 			 * trying to set only those filter bits needed to match
3882 			 * the range.  (At this time, the only use of address
3883 			 * ranges is for IP multicast routing, for which the
3884 			 * range is big enough to require all bits set.)
3885 			 */
3886 			goto allmulti;
3887 		}
3888 
3889 		hash = wm_mchash(sc, enm->enm_addrlo);
3890 
3891 		reg = (hash >> 5);
3892 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3893 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3894 		    || (sc->sc_type == WM_T_PCH2)
3895 		    || (sc->sc_type == WM_T_PCH_LPT)
3896 		    || (sc->sc_type == WM_T_PCH_SPT)
3897 		    || (sc->sc_type == WM_T_PCH_CNP))
3898 			reg &= 0x1f;
3899 		else
3900 			reg &= 0x7f;
3901 		bit = hash & 0x1f;
3902 
3903 		hash = CSR_READ(sc, mta_reg + (reg << 2));
3904 		hash |= 1U << bit;
3905 
3906 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
3907 			/*
3908 			 * 82544 Errata 9: Certain register cannot be written
3909 			 * with particular alignments in PCI-X bus operation
3910 			 * (FCAH, MTA and VFTA).
3911 			 */
3912 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
3913 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3914 			CSR_WRITE_FLUSH(sc);
3915 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
3916 			CSR_WRITE_FLUSH(sc);
3917 		} else {
3918 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3919 			CSR_WRITE_FLUSH(sc);
3920 		}
3921 
3922 		ETHER_NEXT_MULTI(step, enm);
3923 	}
3924 	ec->ec_flags &= ~ETHER_F_ALLMULTI;
3925 	ETHER_UNLOCK(ec);
3926 
3927 	goto setit;
3928 
3929  allmulti:
3930 	sc->sc_rctl |= RCTL_MPE;
3931 
3932  setit:
3933 	if (sc->sc_type >= WM_T_PCH2) {
3934 		if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
3935 		    && (ifp->if_mtu > ETHERMTU))
3936 			rv = wm_lv_jumbo_workaround_ich8lan(sc, true);
3937 		else
3938 			rv = wm_lv_jumbo_workaround_ich8lan(sc, false);
3939 		if (rv != 0)
3940 			device_printf(sc->sc_dev,
3941 			    "Failed to do workaround for jumbo frame.\n");
3942 	}
3943 
3944 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
3945 }
3946 
3947 /* Reset and init related */
3948 
3949 static void
3950 wm_set_vlan(struct wm_softc *sc)
3951 {
3952 
3953 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3954 		device_xname(sc->sc_dev), __func__));
3955 
3956 	/* Deal with VLAN enables. */
3957 	if (VLAN_ATTACHED(&sc->sc_ethercom))
3958 		sc->sc_ctrl |= CTRL_VME;
3959 	else
3960 		sc->sc_ctrl &= ~CTRL_VME;
3961 
3962 	/* Write the control registers. */
3963 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3964 }
3965 
3966 static void
3967 wm_set_pcie_completion_timeout(struct wm_softc *sc)
3968 {
3969 	uint32_t gcr;
3970 	pcireg_t ctrl2;
3971 
3972 	gcr = CSR_READ(sc, WMREG_GCR);
3973 
3974 	/* Only take action if timeout value is defaulted to 0 */
3975 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
3976 		goto out;
3977 
3978 	if ((gcr & GCR_CAP_VER2) == 0) {
3979 		gcr |= GCR_CMPL_TMOUT_10MS;
3980 		goto out;
3981 	}
3982 
3983 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
3984 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
3985 	ctrl2 |= WM_PCIE_DCSR2_16MS;
3986 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
3987 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
3988 
3989 out:
3990 	/* Disable completion timeout resend */
3991 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
3992 
3993 	CSR_WRITE(sc, WMREG_GCR, gcr);
3994 }
3995 
3996 void
3997 wm_get_auto_rd_done(struct wm_softc *sc)
3998 {
3999 	int i;
4000 
4001 	/* wait for eeprom to reload */
4002 	switch (sc->sc_type) {
4003 	case WM_T_82571:
4004 	case WM_T_82572:
4005 	case WM_T_82573:
4006 	case WM_T_82574:
4007 	case WM_T_82583:
4008 	case WM_T_82575:
4009 	case WM_T_82576:
4010 	case WM_T_82580:
4011 	case WM_T_I350:
4012 	case WM_T_I354:
4013 	case WM_T_I210:
4014 	case WM_T_I211:
4015 	case WM_T_80003:
4016 	case WM_T_ICH8:
4017 	case WM_T_ICH9:
4018 		for (i = 0; i < 10; i++) {
4019 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
4020 				break;
4021 			delay(1000);
4022 		}
4023 		if (i == 10) {
4024 			log(LOG_ERR, "%s: auto read from eeprom failed to "
4025 			    "complete\n", device_xname(sc->sc_dev));
4026 		}
4027 		break;
4028 	default:
4029 		break;
4030 	}
4031 }
4032 
4033 void
4034 wm_lan_init_done(struct wm_softc *sc)
4035 {
4036 	uint32_t reg = 0;
4037 	int i;
4038 
4039 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
4040 		device_xname(sc->sc_dev), __func__));
4041 
4042 	/* Wait for eeprom to reload */
4043 	switch (sc->sc_type) {
4044 	case WM_T_ICH10:
4045 	case WM_T_PCH:
4046 	case WM_T_PCH2:
4047 	case WM_T_PCH_LPT:
4048 	case WM_T_PCH_SPT:
4049 	case WM_T_PCH_CNP:
4050 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
4051 			reg = CSR_READ(sc, WMREG_STATUS);
4052 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
4053 				break;
4054 			delay(100);
4055 		}
4056 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
4057 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
4058 			    "complete\n", device_xname(sc->sc_dev), __func__);
4059 		}
4060 		break;
4061 	default:
4062 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4063 		    __func__);
4064 		break;
4065 	}
4066 
4067 	reg &= ~STATUS_LAN_INIT_DONE;
4068 	CSR_WRITE(sc, WMREG_STATUS, reg);
4069 }
4070 
4071 void
4072 wm_get_cfg_done(struct wm_softc *sc)
4073 {
4074 	int mask;
4075 	uint32_t reg;
4076 	int i;
4077 
4078 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
4079 		device_xname(sc->sc_dev), __func__));
4080 
4081 	/* Wait for eeprom to reload */
4082 	switch (sc->sc_type) {
4083 	case WM_T_82542_2_0:
4084 	case WM_T_82542_2_1:
4085 		/* null */
4086 		break;
4087 	case WM_T_82543:
4088 	case WM_T_82544:
4089 	case WM_T_82540:
4090 	case WM_T_82545:
4091 	case WM_T_82545_3:
4092 	case WM_T_82546:
4093 	case WM_T_82546_3:
4094 	case WM_T_82541:
4095 	case WM_T_82541_2:
4096 	case WM_T_82547:
4097 	case WM_T_82547_2:
4098 	case WM_T_82573:
4099 	case WM_T_82574:
4100 	case WM_T_82583:
4101 		/* generic */
4102 		delay(10*1000);
4103 		break;
4104 	case WM_T_80003:
4105 	case WM_T_82571:
4106 	case WM_T_82572:
4107 	case WM_T_82575:
4108 	case WM_T_82576:
4109 	case WM_T_82580:
4110 	case WM_T_I350:
4111 	case WM_T_I354:
4112 	case WM_T_I210:
4113 	case WM_T_I211:
4114 		if (sc->sc_type == WM_T_82571) {
4115 			/* Only 82571 shares port 0 */
4116 			mask = EEMNGCTL_CFGDONE_0;
4117 		} else
4118 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
4119 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
4120 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
4121 				break;
4122 			delay(1000);
4123 		}
4124 		if (i >= WM_PHY_CFG_TIMEOUT)
4125 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
4126 				device_xname(sc->sc_dev), __func__));
4127 		break;
4128 	case WM_T_ICH8:
4129 	case WM_T_ICH9:
4130 	case WM_T_ICH10:
4131 	case WM_T_PCH:
4132 	case WM_T_PCH2:
4133 	case WM_T_PCH_LPT:
4134 	case WM_T_PCH_SPT:
4135 	case WM_T_PCH_CNP:
4136 		delay(10*1000);
4137 		if (sc->sc_type >= WM_T_ICH10)
4138 			wm_lan_init_done(sc);
4139 		else
4140 			wm_get_auto_rd_done(sc);
4141 
4142 		/* Clear PHY Reset Asserted bit */
4143 		reg = CSR_READ(sc, WMREG_STATUS);
4144 		if ((reg & STATUS_PHYRA) != 0)
4145 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
4146 		break;
4147 	default:
4148 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4149 		    __func__);
4150 		break;
4151 	}
4152 }
4153 
4154 int
4155 wm_phy_post_reset(struct wm_softc *sc)
4156 {
4157 	device_t dev = sc->sc_dev;
4158 	uint16_t reg;
4159 	int rv = 0;
4160 
4161 	/* This function is only for ICH8 and newer. */
4162 	if (sc->sc_type < WM_T_ICH8)
4163 		return 0;
4164 
4165 	if (wm_phy_resetisblocked(sc)) {
4166 		/* XXX */
4167 		device_printf(dev, "PHY is blocked\n");
4168 		return -1;
4169 	}
4170 
4171 	/* Allow time for h/w to get to quiescent state after reset */
4172 	delay(10*1000);
4173 
4174 	/* Perform any necessary post-reset workarounds */
4175 	if (sc->sc_type == WM_T_PCH)
4176 		rv = wm_hv_phy_workarounds_ich8lan(sc);
4177 	else if (sc->sc_type == WM_T_PCH2)
4178 		rv = wm_lv_phy_workarounds_ich8lan(sc);
4179 	if (rv != 0)
4180 		return rv;
4181 
4182 	/* Clear the host wakeup bit after lcd reset */
4183 	if (sc->sc_type >= WM_T_PCH) {
4184 		wm_gmii_hv_readreg(dev, 2, BM_PORT_GEN_CFG, &reg);
4185 		reg &= ~BM_WUC_HOST_WU_BIT;
4186 		wm_gmii_hv_writereg(dev, 2, BM_PORT_GEN_CFG, reg);
4187 	}
4188 
4189 	/* Configure the LCD with the extended configuration region in NVM */
4190 	if ((rv = wm_init_lcd_from_nvm(sc)) != 0)
4191 		return rv;
4192 
4193 	/* Configure the LCD with the OEM bits in NVM */
4194 	rv = wm_oem_bits_config_ich8lan(sc, true);
4195 
4196 	if (sc->sc_type == WM_T_PCH2) {
4197 		/* Ungate automatic PHY configuration on non-managed 82579 */
4198 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
4199 			delay(10 * 1000);
4200 			wm_gate_hw_phy_config_ich8lan(sc, false);
4201 		}
4202 		/* Set EEE LPI Update Timer to 200usec */
4203 		rv = sc->phy.acquire(sc);
4204 		if (rv)
4205 			return rv;
4206 		rv = wm_write_emi_reg_locked(dev,
4207 		    I82579_LPI_UPDATE_TIMER, 0x1387);
4208 		sc->phy.release(sc);
4209 	}
4210 
4211 	return rv;
4212 }
4213 
4214 /* Only for PCH and newer */
4215 static int
4216 wm_write_smbus_addr(struct wm_softc *sc)
4217 {
4218 	uint32_t strap, freq;
4219 	uint16_t phy_data;
4220 	int rv;
4221 
4222 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
4223 		device_xname(sc->sc_dev), __func__));
4224 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
4225 
4226 	strap = CSR_READ(sc, WMREG_STRAP);
4227 	freq = __SHIFTOUT(strap, STRAP_FREQ);
4228 
4229 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data);
4230 	if (rv != 0)
4231 		return -1;
4232 
4233 	phy_data &= ~HV_SMB_ADDR_ADDR;
4234 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
4235 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
4236 
4237 	if (sc->sc_phytype == WMPHY_I217) {
4238 		/* Restore SMBus frequency */
4239 		if (freq --) {
4240 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
4241 			    | HV_SMB_ADDR_FREQ_HIGH);
4242 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
4243 			    HV_SMB_ADDR_FREQ_LOW);
4244 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
4245 			    HV_SMB_ADDR_FREQ_HIGH);
4246 		} else
4247 			DPRINTF(WM_DEBUG_INIT,
4248 			    ("%s: %s Unsupported SMB frequency in PHY\n",
4249 				device_xname(sc->sc_dev), __func__));
4250 	}
4251 
4252 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR,
4253 	    phy_data);
4254 }
4255 
4256 static int
4257 wm_init_lcd_from_nvm(struct wm_softc *sc)
4258 {
4259 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
4260 	uint16_t phy_page = 0;
4261 	int rv = 0;
4262 
4263 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
4264 		device_xname(sc->sc_dev), __func__));
4265 
4266 	switch (sc->sc_type) {
4267 	case WM_T_ICH8:
4268 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
4269 		    || (sc->sc_phytype != WMPHY_IGP_3))
4270 			return 0;
4271 
4272 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
4273 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
4274 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
4275 			break;
4276 		}
4277 		/* FALLTHROUGH */
4278 	case WM_T_PCH:
4279 	case WM_T_PCH2:
4280 	case WM_T_PCH_LPT:
4281 	case WM_T_PCH_SPT:
4282 	case WM_T_PCH_CNP:
4283 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
4284 		break;
4285 	default:
4286 		return 0;
4287 	}
4288 
4289 	if ((rv = sc->phy.acquire(sc)) != 0)
4290 		return rv;
4291 
4292 	reg = CSR_READ(sc, WMREG_FEXTNVM);
4293 	if ((reg & sw_cfg_mask) == 0)
4294 		goto release;
4295 
4296 	/*
4297 	 * Make sure HW does not configure LCD from PHY extended configuration
4298 	 * before SW configuration
4299 	 */
4300 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
4301 	if ((sc->sc_type < WM_T_PCH2)
4302 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
4303 		goto release;
4304 
4305 	DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
4306 		device_xname(sc->sc_dev), __func__));
4307 	/* word_addr is in DWORD */
4308 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
4309 
4310 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
4311 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
4312 	if (cnf_size == 0)
4313 		goto release;
4314 
4315 	if (((sc->sc_type == WM_T_PCH)
4316 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
4317 	    || (sc->sc_type > WM_T_PCH)) {
4318 		/*
4319 		 * HW configures the SMBus address and LEDs when the OEM and
4320 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
4321 		 * are cleared, SW will configure them instead.
4322 		 */
4323 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
4324 			device_xname(sc->sc_dev), __func__));
4325 		if ((rv = wm_write_smbus_addr(sc)) != 0)
4326 			goto release;
4327 
4328 		reg = CSR_READ(sc, WMREG_LEDCTL);
4329 		rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG,
4330 		    (uint16_t)reg);
4331 		if (rv != 0)
4332 			goto release;
4333 	}
4334 
4335 	/* Configure LCD from extended configuration region. */
4336 	for (i = 0; i < cnf_size; i++) {
4337 		uint16_t reg_data, reg_addr;
4338 
4339 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
4340 			goto release;
4341 
4342 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
4343 			goto release;
4344 
4345 		if (reg_addr == IGPHY_PAGE_SELECT)
4346 			phy_page = reg_data;
4347 
4348 		reg_addr &= IGPHY_MAXREGADDR;
4349 		reg_addr |= phy_page;
4350 
4351 		KASSERT(sc->phy.writereg_locked != NULL);
4352 		rv = sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr,
4353 		    reg_data);
4354 	}
4355 
4356 release:
4357 	sc->phy.release(sc);
4358 	return rv;
4359 }
4360 
4361 /*
4362  *  wm_oem_bits_config_ich8lan - SW-based LCD Configuration
4363  *  @sc:       pointer to the HW structure
4364  *  @d0_state: boolean if entering d0 or d3 device state
4365  *
4366  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
4367  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
4368  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
4369  */
4370 int
4371 wm_oem_bits_config_ich8lan(struct wm_softc *sc, bool d0_state)
4372 {
4373 	uint32_t mac_reg;
4374 	uint16_t oem_reg;
4375 	int rv;
4376 
4377 	if (sc->sc_type < WM_T_PCH)
4378 		return 0;
4379 
4380 	rv = sc->phy.acquire(sc);
4381 	if (rv != 0)
4382 		return rv;
4383 
4384 	if (sc->sc_type == WM_T_PCH) {
4385 		mac_reg = CSR_READ(sc, WMREG_EXTCNFCTR);
4386 		if ((mac_reg & EXTCNFCTR_OEM_WRITE_ENABLE) != 0)
4387 			goto release;
4388 	}
4389 
4390 	mac_reg = CSR_READ(sc, WMREG_FEXTNVM);
4391 	if ((mac_reg & FEXTNVM_SW_CONFIG_ICH8M) == 0)
4392 		goto release;
4393 
4394 	mac_reg = CSR_READ(sc, WMREG_PHY_CTRL);
4395 
4396 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_OEM_BITS, &oem_reg);
4397 	if (rv != 0)
4398 		goto release;
4399 	oem_reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
4400 
4401 	if (d0_state) {
4402 		if ((mac_reg & PHY_CTRL_GBE_DIS) != 0)
4403 			oem_reg |= HV_OEM_BITS_A1KDIS;
4404 		if ((mac_reg & PHY_CTRL_D0A_LPLU) != 0)
4405 			oem_reg |= HV_OEM_BITS_LPLU;
4406 	} else {
4407 		if ((mac_reg & (PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS))
4408 		    != 0)
4409 			oem_reg |= HV_OEM_BITS_A1KDIS;
4410 		if ((mac_reg & (PHY_CTRL_D0A_LPLU | PHY_CTRL_NOND0A_LPLU))
4411 		    != 0)
4412 			oem_reg |= HV_OEM_BITS_LPLU;
4413 	}
4414 
4415 	/* Set Restart auto-neg to activate the bits */
4416 	if ((d0_state || (sc->sc_type != WM_T_PCH))
4417 	    && (wm_phy_resetisblocked(sc) == false))
4418 		oem_reg |= HV_OEM_BITS_ANEGNOW;
4419 
4420 	rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_OEM_BITS, oem_reg);
4421 
4422 release:
4423 	sc->phy.release(sc);
4424 
4425 	return rv;
4426 }
4427 
4428 /* Init hardware bits */
4429 void
4430 wm_initialize_hardware_bits(struct wm_softc *sc)
4431 {
4432 	uint32_t tarc0, tarc1, reg;
4433 
4434 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
4435 		device_xname(sc->sc_dev), __func__));
4436 
4437 	/* For 82571 variant, 80003 and ICHs */
4438 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
4439 	    || (sc->sc_type >= WM_T_80003)) {
4440 
4441 		/* Transmit Descriptor Control 0 */
4442 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
4443 		reg |= TXDCTL_COUNT_DESC;
4444 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
4445 
4446 		/* Transmit Descriptor Control 1 */
4447 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
4448 		reg |= TXDCTL_COUNT_DESC;
4449 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
4450 
4451 		/* TARC0 */
4452 		tarc0 = CSR_READ(sc, WMREG_TARC0);
4453 		switch (sc->sc_type) {
4454 		case WM_T_82571:
4455 		case WM_T_82572:
4456 		case WM_T_82573:
4457 		case WM_T_82574:
4458 		case WM_T_82583:
4459 		case WM_T_80003:
4460 			/* Clear bits 30..27 */
4461 			tarc0 &= ~__BITS(30, 27);
4462 			break;
4463 		default:
4464 			break;
4465 		}
4466 
4467 		switch (sc->sc_type) {
4468 		case WM_T_82571:
4469 		case WM_T_82572:
4470 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
4471 
4472 			tarc1 = CSR_READ(sc, WMREG_TARC1);
4473 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
4474 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
4475 			/* 8257[12] Errata No.7 */
4476 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
4477 
4478 			/* TARC1 bit 28 */
4479 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
4480 				tarc1 &= ~__BIT(28);
4481 			else
4482 				tarc1 |= __BIT(28);
4483 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
4484 
4485 			/*
4486 			 * 8257[12] Errata No.13
4487 			 * Disable Dyamic Clock Gating.
4488 			 */
4489 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
4490 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
4491 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4492 			break;
4493 		case WM_T_82573:
4494 		case WM_T_82574:
4495 		case WM_T_82583:
4496 			if ((sc->sc_type == WM_T_82574)
4497 			    || (sc->sc_type == WM_T_82583))
4498 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
4499 
4500 			/* Extended Device Control */
4501 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
4502 			reg &= ~__BIT(23);	/* Clear bit 23 */
4503 			reg |= __BIT(22);	/* Set bit 22 */
4504 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4505 
4506 			/* Device Control */
4507 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
4508 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4509 
4510 			/* PCIe Control Register */
4511 			/*
4512 			 * 82573 Errata (unknown).
4513 			 *
4514 			 * 82574 Errata 25 and 82583 Errata 12
4515 			 * "Dropped Rx Packets":
4516 			 *   NVM Image Version 2.1.4 and newer has no this bug.
4517 			 */
4518 			reg = CSR_READ(sc, WMREG_GCR);
4519 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
4520 			CSR_WRITE(sc, WMREG_GCR, reg);
4521 
4522 			if ((sc->sc_type == WM_T_82574)
4523 			    || (sc->sc_type == WM_T_82583)) {
4524 				/*
4525 				 * Document says this bit must be set for
4526 				 * proper operation.
4527 				 */
4528 				reg = CSR_READ(sc, WMREG_GCR);
4529 				reg |= __BIT(22);
4530 				CSR_WRITE(sc, WMREG_GCR, reg);
4531 
4532 				/*
4533 				 * Apply workaround for hardware errata
4534 				 * documented in errata docs Fixes issue where
4535 				 * some error prone or unreliable PCIe
4536 				 * completions are occurring, particularly
4537 				 * with ASPM enabled. Without fix, issue can
4538 				 * cause Tx timeouts.
4539 				 */
4540 				reg = CSR_READ(sc, WMREG_GCR2);
4541 				reg |= __BIT(0);
4542 				CSR_WRITE(sc, WMREG_GCR2, reg);
4543 			}
4544 			break;
4545 		case WM_T_80003:
4546 			/* TARC0 */
4547 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
4548 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
4549 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
4550 
4551 			/* TARC1 bit 28 */
4552 			tarc1 = CSR_READ(sc, WMREG_TARC1);
4553 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
4554 				tarc1 &= ~__BIT(28);
4555 			else
4556 				tarc1 |= __BIT(28);
4557 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
4558 			break;
4559 		case WM_T_ICH8:
4560 		case WM_T_ICH9:
4561 		case WM_T_ICH10:
4562 		case WM_T_PCH:
4563 		case WM_T_PCH2:
4564 		case WM_T_PCH_LPT:
4565 		case WM_T_PCH_SPT:
4566 		case WM_T_PCH_CNP:
4567 			/* TARC0 */
4568 			if (sc->sc_type == WM_T_ICH8) {
4569 				/* Set TARC0 bits 29 and 28 */
4570 				tarc0 |= __BITS(29, 28);
4571 			} else if (sc->sc_type == WM_T_PCH_SPT) {
4572 				tarc0 |= __BIT(29);
4573 				/*
4574 				 *  Drop bit 28. From Linux.
4575 				 * See I218/I219 spec update
4576 				 * "5. Buffer Overrun While the I219 is
4577 				 * Processing DMA Transactions"
4578 				 */
4579 				tarc0 &= ~__BIT(28);
4580 			}
4581 			/* Set TARC0 bits 23,24,26,27 */
4582 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
4583 
4584 			/* CTRL_EXT */
4585 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
4586 			reg |= __BIT(22);	/* Set bit 22 */
4587 			/*
4588 			 * Enable PHY low-power state when MAC is at D3
4589 			 * w/o WoL
4590 			 */
4591 			if (sc->sc_type >= WM_T_PCH)
4592 				reg |= CTRL_EXT_PHYPDEN;
4593 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4594 
4595 			/* TARC1 */
4596 			tarc1 = CSR_READ(sc, WMREG_TARC1);
4597 			/* bit 28 */
4598 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
4599 				tarc1 &= ~__BIT(28);
4600 			else
4601 				tarc1 |= __BIT(28);
4602 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
4603 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
4604 
4605 			/* Device Status */
4606 			if (sc->sc_type == WM_T_ICH8) {
4607 				reg = CSR_READ(sc, WMREG_STATUS);
4608 				reg &= ~__BIT(31);
4609 				CSR_WRITE(sc, WMREG_STATUS, reg);
4610 
4611 			}
4612 
4613 			/* IOSFPC */
4614 			if (sc->sc_type == WM_T_PCH_SPT) {
4615 				reg = CSR_READ(sc, WMREG_IOSFPC);
4616 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
4617 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
4618 			}
4619 			/*
4620 			 * Work-around descriptor data corruption issue during
4621 			 * NFS v2 UDP traffic, just disable the NFS filtering
4622 			 * capability.
4623 			 */
4624 			reg = CSR_READ(sc, WMREG_RFCTL);
4625 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
4626 			CSR_WRITE(sc, WMREG_RFCTL, reg);
4627 			break;
4628 		default:
4629 			break;
4630 		}
4631 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
4632 
4633 		switch (sc->sc_type) {
4634 		/*
4635 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
4636 		 * Avoid RSS Hash Value bug.
4637 		 */
4638 		case WM_T_82571:
4639 		case WM_T_82572:
4640 		case WM_T_82573:
4641 		case WM_T_80003:
4642 		case WM_T_ICH8:
4643 			reg = CSR_READ(sc, WMREG_RFCTL);
4644 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
4645 			CSR_WRITE(sc, WMREG_RFCTL, reg);
4646 			break;
4647 		case WM_T_82574:
4648 			/* Use extened Rx descriptor. */
4649 			reg = CSR_READ(sc, WMREG_RFCTL);
4650 			reg |= WMREG_RFCTL_EXSTEN;
4651 			CSR_WRITE(sc, WMREG_RFCTL, reg);
4652 			break;
4653 		default:
4654 			break;
4655 		}
4656 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
4657 		/*
4658 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
4659 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
4660 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
4661 		 * Correctly by the Device"
4662 		 *
4663 		 * I354(C2000) Errata AVR53:
4664 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
4665 		 * Hang"
4666 		 */
4667 		reg = CSR_READ(sc, WMREG_RFCTL);
4668 		reg |= WMREG_RFCTL_IPV6EXDIS;
4669 		CSR_WRITE(sc, WMREG_RFCTL, reg);
4670 	}
4671 }
4672 
4673 static uint32_t
4674 wm_rxpbs_adjust_82580(uint32_t val)
4675 {
4676 	uint32_t rv = 0;
4677 
4678 	if (val < __arraycount(wm_82580_rxpbs_table))
4679 		rv = wm_82580_rxpbs_table[val];
4680 
4681 	return rv;
4682 }
4683 
4684 /*
4685  * wm_reset_phy:
4686  *
4687  *	generic PHY reset function.
4688  *	Same as e1000_phy_hw_reset_generic()
4689  */
4690 static int
4691 wm_reset_phy(struct wm_softc *sc)
4692 {
4693 	uint32_t reg;
4694 
4695 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
4696 		device_xname(sc->sc_dev), __func__));
4697 	if (wm_phy_resetisblocked(sc))
4698 		return -1;
4699 
4700 	sc->phy.acquire(sc);
4701 
4702 	reg = CSR_READ(sc, WMREG_CTRL);
4703 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
4704 	CSR_WRITE_FLUSH(sc);
4705 
4706 	delay(sc->phy.reset_delay_us);
4707 
4708 	CSR_WRITE(sc, WMREG_CTRL, reg);
4709 	CSR_WRITE_FLUSH(sc);
4710 
4711 	delay(150);
4712 
4713 	sc->phy.release(sc);
4714 
4715 	wm_get_cfg_done(sc);
4716 	wm_phy_post_reset(sc);
4717 
4718 	return 0;
4719 }
4720 
4721 /*
4722  * Only used by WM_T_PCH_SPT which does not use multiqueue,
4723  * so it is enough to check sc->sc_queue[0] only.
4724  */
4725 static void
4726 wm_flush_desc_rings(struct wm_softc *sc)
4727 {
4728 	pcireg_t preg;
4729 	uint32_t reg;
4730 	struct wm_txqueue *txq;
4731 	wiseman_txdesc_t *txd;
4732 	int nexttx;
4733 	uint32_t rctl;
4734 
4735 	/* First, disable MULR fix in FEXTNVM11 */
4736 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
4737 	reg |= FEXTNVM11_DIS_MULRFIX;
4738 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
4739 
4740 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
4741 	reg = CSR_READ(sc, WMREG_TDLEN(0));
4742 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
4743 		return;
4744 
4745 	/* TX */
4746 	device_printf(sc->sc_dev, "Need TX flush (reg = %08x, len = %u)\n",
4747 	    preg, reg);
4748 	reg = CSR_READ(sc, WMREG_TCTL);
4749 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
4750 
4751 	txq = &sc->sc_queue[0].wmq_txq;
4752 	nexttx = txq->txq_next;
4753 	txd = &txq->txq_descs[nexttx];
4754 	wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
4755 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
4756 	txd->wtx_fields.wtxu_status = 0;
4757 	txd->wtx_fields.wtxu_options = 0;
4758 	txd->wtx_fields.wtxu_vlan = 0;
4759 
4760 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
4761 	    BUS_SPACE_BARRIER_WRITE);
4762 
4763 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
4764 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
4765 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
4766 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
4767 	delay(250);
4768 
4769 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
4770 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
4771 		return;
4772 
4773 	/* RX */
4774 	device_printf(sc->sc_dev, "Need RX flush (reg = %08x)\n", preg);
4775 	rctl = CSR_READ(sc, WMREG_RCTL);
4776 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
4777 	CSR_WRITE_FLUSH(sc);
4778 	delay(150);
4779 
4780 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
4781 	/* Zero the lower 14 bits (prefetch and host thresholds) */
4782 	reg &= 0xffffc000;
4783 	/*
4784 	 * Update thresholds: prefetch threshold to 31, host threshold
4785 	 * to 1 and make sure the granularity is "descriptors" and not
4786 	 * "cache lines"
4787 	 */
4788 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
4789 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
4790 
4791 	/* Momentarily enable the RX ring for the changes to take effect */
4792 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
4793 	CSR_WRITE_FLUSH(sc);
4794 	delay(150);
4795 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
4796 }
4797 
4798 /*
4799  * wm_reset:
4800  *
4801  *	Reset the i82542 chip.
4802  */
4803 static void
4804 wm_reset(struct wm_softc *sc)
4805 {
4806 	int phy_reset = 0;
4807 	int i, error = 0;
4808 	uint32_t reg;
4809 	uint16_t kmreg;
4810 	int rv;
4811 
4812 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
4813 		device_xname(sc->sc_dev), __func__));
4814 	KASSERT(sc->sc_type != 0);
4815 
4816 	/*
4817 	 * Allocate on-chip memory according to the MTU size.
4818 	 * The Packet Buffer Allocation register must be written
4819 	 * before the chip is reset.
4820 	 */
4821 	switch (sc->sc_type) {
4822 	case WM_T_82547:
4823 	case WM_T_82547_2:
4824 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
4825 		    PBA_22K : PBA_30K;
4826 		for (i = 0; i < sc->sc_nqueues; i++) {
4827 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
4828 			txq->txq_fifo_head = 0;
4829 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
4830 			txq->txq_fifo_size =
4831 			    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
4832 			txq->txq_fifo_stall = 0;
4833 		}
4834 		break;
4835 	case WM_T_82571:
4836 	case WM_T_82572:
4837 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
4838 	case WM_T_80003:
4839 		sc->sc_pba = PBA_32K;
4840 		break;
4841 	case WM_T_82573:
4842 		sc->sc_pba = PBA_12K;
4843 		break;
4844 	case WM_T_82574:
4845 	case WM_T_82583:
4846 		sc->sc_pba = PBA_20K;
4847 		break;
4848 	case WM_T_82576:
4849 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
4850 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
4851 		break;
4852 	case WM_T_82580:
4853 	case WM_T_I350:
4854 	case WM_T_I354:
4855 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
4856 		break;
4857 	case WM_T_I210:
4858 	case WM_T_I211:
4859 		sc->sc_pba = PBA_34K;
4860 		break;
4861 	case WM_T_ICH8:
4862 		/* Workaround for a bit corruption issue in FIFO memory */
4863 		sc->sc_pba = PBA_8K;
4864 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
4865 		break;
4866 	case WM_T_ICH9:
4867 	case WM_T_ICH10:
4868 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
4869 		    PBA_14K : PBA_10K;
4870 		break;
4871 	case WM_T_PCH:
4872 	case WM_T_PCH2:	/* XXX 14K? */
4873 	case WM_T_PCH_LPT:
4874 	case WM_T_PCH_SPT:
4875 	case WM_T_PCH_CNP:
4876 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 1500 ?
4877 		    PBA_12K : PBA_26K;
4878 		break;
4879 	default:
4880 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
4881 		    PBA_40K : PBA_48K;
4882 		break;
4883 	}
4884 	/*
4885 	 * Only old or non-multiqueue devices have the PBA register
4886 	 * XXX Need special handling for 82575.
4887 	 */
4888 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
4889 	    || (sc->sc_type == WM_T_82575))
4890 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
4891 
4892 	/* Prevent the PCI-E bus from sticking */
4893 	if (sc->sc_flags & WM_F_PCIE) {
4894 		int timeout = 800;
4895 
4896 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
4897 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4898 
4899 		while (timeout--) {
4900 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
4901 			    == 0)
4902 				break;
4903 			delay(100);
4904 		}
4905 		if (timeout == 0)
4906 			device_printf(sc->sc_dev,
4907 			    "failed to disable busmastering\n");
4908 	}
4909 
4910 	/* Set the completion timeout for interface */
4911 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
4912 	    || (sc->sc_type == WM_T_82580)
4913 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
4914 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
4915 		wm_set_pcie_completion_timeout(sc);
4916 
4917 	/* Clear interrupt */
4918 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4919 	if (wm_is_using_msix(sc)) {
4920 		if (sc->sc_type != WM_T_82574) {
4921 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
4922 			CSR_WRITE(sc, WMREG_EIAC, 0);
4923 		} else
4924 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
4925 	}
4926 
4927 	/* Stop the transmit and receive processes. */
4928 	CSR_WRITE(sc, WMREG_RCTL, 0);
4929 	sc->sc_rctl &= ~RCTL_EN;
4930 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
4931 	CSR_WRITE_FLUSH(sc);
4932 
4933 	/* XXX set_tbi_sbp_82543() */
4934 
4935 	delay(10*1000);
4936 
4937 	/* Must acquire the MDIO ownership before MAC reset */
4938 	switch (sc->sc_type) {
4939 	case WM_T_82573:
4940 	case WM_T_82574:
4941 	case WM_T_82583:
4942 		error = wm_get_hw_semaphore_82573(sc);
4943 		break;
4944 	default:
4945 		break;
4946 	}
4947 
4948 	/*
4949 	 * 82541 Errata 29? & 82547 Errata 28?
4950 	 * See also the description about PHY_RST bit in CTRL register
4951 	 * in 8254x_GBe_SDM.pdf.
4952 	 */
4953 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
4954 		CSR_WRITE(sc, WMREG_CTRL,
4955 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
4956 		CSR_WRITE_FLUSH(sc);
4957 		delay(5000);
4958 	}
4959 
4960 	switch (sc->sc_type) {
4961 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
4962 	case WM_T_82541:
4963 	case WM_T_82541_2:
4964 	case WM_T_82547:
4965 	case WM_T_82547_2:
4966 		/*
4967 		 * On some chipsets, a reset through a memory-mapped write
4968 		 * cycle can cause the chip to reset before completing the
4969 		 * write cycle. This causes major headache that can be avoided
4970 		 * by issuing the reset via indirect register writes through
4971 		 * I/O space.
4972 		 *
4973 		 * So, if we successfully mapped the I/O BAR at attach time,
4974 		 * use that. Otherwise, try our luck with a memory-mapped
4975 		 * reset.
4976 		 */
4977 		if (sc->sc_flags & WM_F_IOH_VALID)
4978 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
4979 		else
4980 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
4981 		break;
4982 	case WM_T_82545_3:
4983 	case WM_T_82546_3:
4984 		/* Use the shadow control register on these chips. */
4985 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
4986 		break;
4987 	case WM_T_80003:
4988 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
4989 		sc->phy.acquire(sc);
4990 		CSR_WRITE(sc, WMREG_CTRL, reg);
4991 		sc->phy.release(sc);
4992 		break;
4993 	case WM_T_ICH8:
4994 	case WM_T_ICH9:
4995 	case WM_T_ICH10:
4996 	case WM_T_PCH:
4997 	case WM_T_PCH2:
4998 	case WM_T_PCH_LPT:
4999 	case WM_T_PCH_SPT:
5000 	case WM_T_PCH_CNP:
5001 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
5002 		if (wm_phy_resetisblocked(sc) == false) {
5003 			/*
5004 			 * Gate automatic PHY configuration by hardware on
5005 			 * non-managed 82579
5006 			 */
5007 			if ((sc->sc_type == WM_T_PCH2)
5008 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
5009 				== 0))
5010 				wm_gate_hw_phy_config_ich8lan(sc, true);
5011 
5012 			reg |= CTRL_PHY_RESET;
5013 			phy_reset = 1;
5014 		} else
5015 			device_printf(sc->sc_dev, "XXX reset is blocked!!!\n");
5016 		sc->phy.acquire(sc);
5017 		CSR_WRITE(sc, WMREG_CTRL, reg);
5018 		/* Don't insert a completion barrier when reset */
5019 		delay(20*1000);
5020 		mutex_exit(sc->sc_ich_phymtx);
5021 		break;
5022 	case WM_T_82580:
5023 	case WM_T_I350:
5024 	case WM_T_I354:
5025 	case WM_T_I210:
5026 	case WM_T_I211:
5027 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
5028 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
5029 			CSR_WRITE_FLUSH(sc);
5030 		delay(5000);
5031 		break;
5032 	case WM_T_82542_2_0:
5033 	case WM_T_82542_2_1:
5034 	case WM_T_82543:
5035 	case WM_T_82540:
5036 	case WM_T_82545:
5037 	case WM_T_82546:
5038 	case WM_T_82571:
5039 	case WM_T_82572:
5040 	case WM_T_82573:
5041 	case WM_T_82574:
5042 	case WM_T_82575:
5043 	case WM_T_82576:
5044 	case WM_T_82583:
5045 	default:
5046 		/* Everything else can safely use the documented method. */
5047 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
5048 		break;
5049 	}
5050 
5051 	/* Must release the MDIO ownership after MAC reset */
5052 	switch (sc->sc_type) {
5053 	case WM_T_82573:
5054 	case WM_T_82574:
5055 	case WM_T_82583:
5056 		if (error == 0)
5057 			wm_put_hw_semaphore_82573(sc);
5058 		break;
5059 	default:
5060 		break;
5061 	}
5062 
5063 	/* Set Phy Config Counter to 50msec */
5064 	if (sc->sc_type == WM_T_PCH2) {
5065 		reg = CSR_READ(sc, WMREG_FEXTNVM3);
5066 		reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
5067 		reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
5068 		CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
5069 	}
5070 
5071 	if (phy_reset != 0)
5072 		wm_get_cfg_done(sc);
5073 
5074 	/* Reload EEPROM */
5075 	switch (sc->sc_type) {
5076 	case WM_T_82542_2_0:
5077 	case WM_T_82542_2_1:
5078 	case WM_T_82543:
5079 	case WM_T_82544:
5080 		delay(10);
5081 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
5082 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5083 		CSR_WRITE_FLUSH(sc);
5084 		delay(2000);
5085 		break;
5086 	case WM_T_82540:
5087 	case WM_T_82545:
5088 	case WM_T_82545_3:
5089 	case WM_T_82546:
5090 	case WM_T_82546_3:
5091 		delay(5*1000);
5092 		/* XXX Disable HW ARPs on ASF enabled adapters */
5093 		break;
5094 	case WM_T_82541:
5095 	case WM_T_82541_2:
5096 	case WM_T_82547:
5097 	case WM_T_82547_2:
5098 		delay(20000);
5099 		/* XXX Disable HW ARPs on ASF enabled adapters */
5100 		break;
5101 	case WM_T_82571:
5102 	case WM_T_82572:
5103 	case WM_T_82573:
5104 	case WM_T_82574:
5105 	case WM_T_82583:
5106 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
5107 			delay(10);
5108 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
5109 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5110 			CSR_WRITE_FLUSH(sc);
5111 		}
5112 		/* check EECD_EE_AUTORD */
5113 		wm_get_auto_rd_done(sc);
5114 		/*
5115 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
5116 		 * is set.
5117 		 */
5118 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
5119 		    || (sc->sc_type == WM_T_82583))
5120 			delay(25*1000);
5121 		break;
5122 	case WM_T_82575:
5123 	case WM_T_82576:
5124 	case WM_T_82580:
5125 	case WM_T_I350:
5126 	case WM_T_I354:
5127 	case WM_T_I210:
5128 	case WM_T_I211:
5129 	case WM_T_80003:
5130 		/* check EECD_EE_AUTORD */
5131 		wm_get_auto_rd_done(sc);
5132 		break;
5133 	case WM_T_ICH8:
5134 	case WM_T_ICH9:
5135 	case WM_T_ICH10:
5136 	case WM_T_PCH:
5137 	case WM_T_PCH2:
5138 	case WM_T_PCH_LPT:
5139 	case WM_T_PCH_SPT:
5140 	case WM_T_PCH_CNP:
5141 		break;
5142 	default:
5143 		panic("%s: unknown type\n", __func__);
5144 	}
5145 
5146 	/* Check whether EEPROM is present or not */
5147 	switch (sc->sc_type) {
5148 	case WM_T_82575:
5149 	case WM_T_82576:
5150 	case WM_T_82580:
5151 	case WM_T_I350:
5152 	case WM_T_I354:
5153 	case WM_T_ICH8:
5154 	case WM_T_ICH9:
5155 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
5156 			/* Not found */
5157 			sc->sc_flags |= WM_F_EEPROM_INVALID;
5158 			if (sc->sc_type == WM_T_82575)
5159 				wm_reset_init_script_82575(sc);
5160 		}
5161 		break;
5162 	default:
5163 		break;
5164 	}
5165 
5166 	if (phy_reset != 0)
5167 		wm_phy_post_reset(sc);
5168 
5169 	if ((sc->sc_type == WM_T_82580)
5170 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
5171 		/* Clear global device reset status bit */
5172 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
5173 	}
5174 
5175 	/* Clear any pending interrupt events. */
5176 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
5177 	reg = CSR_READ(sc, WMREG_ICR);
5178 	if (wm_is_using_msix(sc)) {
5179 		if (sc->sc_type != WM_T_82574) {
5180 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
5181 			CSR_WRITE(sc, WMREG_EIAC, 0);
5182 		} else
5183 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
5184 	}
5185 
5186 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5187 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5188 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
5189 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
5190 		reg = CSR_READ(sc, WMREG_KABGTXD);
5191 		reg |= KABGTXD_BGSQLBIAS;
5192 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
5193 	}
5194 
5195 	/* Reload sc_ctrl */
5196 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
5197 
5198 	wm_set_eee(sc);
5199 
5200 	/*
5201 	 * For PCH, this write will make sure that any noise will be detected
5202 	 * as a CRC error and be dropped rather than show up as a bad packet
5203 	 * to the DMA engine
5204 	 */
5205 	if (sc->sc_type == WM_T_PCH)
5206 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
5207 
5208 	if (sc->sc_type >= WM_T_82544)
5209 		CSR_WRITE(sc, WMREG_WUC, 0);
5210 
5211 	if (sc->sc_type < WM_T_82575)
5212 		wm_disable_aspm(sc); /* Workaround for some chips */
5213 
5214 	wm_reset_mdicnfg_82580(sc);
5215 
5216 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
5217 		wm_pll_workaround_i210(sc);
5218 
5219 	if (sc->sc_type == WM_T_80003) {
5220 		/* Default to TRUE to enable the MDIC W/A */
5221 		sc->sc_flags |= WM_F_80003_MDIC_WA;
5222 
5223 		rv = wm_kmrn_readreg(sc,
5224 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
5225 		if (rv == 0) {
5226 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
5227 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
5228 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
5229 			else
5230 				sc->sc_flags |= WM_F_80003_MDIC_WA;
5231 		}
5232 	}
5233 }
5234 
5235 /*
5236  * wm_add_rxbuf:
5237  *
5238  *	Add a receive buffer to the indiciated descriptor.
5239  */
5240 static int
5241 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
5242 {
5243 	struct wm_softc *sc = rxq->rxq_sc;
5244 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
5245 	struct mbuf *m;
5246 	int error;
5247 
5248 	KASSERT(mutex_owned(rxq->rxq_lock));
5249 
5250 	MGETHDR(m, M_DONTWAIT, MT_DATA);
5251 	if (m == NULL)
5252 		return ENOBUFS;
5253 
5254 	MCLGET(m, M_DONTWAIT);
5255 	if ((m->m_flags & M_EXT) == 0) {
5256 		m_freem(m);
5257 		return ENOBUFS;
5258 	}
5259 
5260 	if (rxs->rxs_mbuf != NULL)
5261 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
5262 
5263 	rxs->rxs_mbuf = m;
5264 
5265 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
5266 	/*
5267 	 * Cannot use bus_dmamap_load_mbuf() here because m_data may be
5268 	 * sc_align_tweak'd between bus_dmamap_load() and bus_dmamap_sync().
5269 	 */
5270 	error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, m->m_ext.ext_buf,
5271 	    m->m_ext.ext_size, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT);
5272 	if (error) {
5273 		/* XXX XXX XXX */
5274 		aprint_error_dev(sc->sc_dev,
5275 		    "unable to load rx DMA map %d, error = %d\n", idx, error);
5276 		panic("wm_add_rxbuf");
5277 	}
5278 
5279 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
5280 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
5281 
5282 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5283 		if ((sc->sc_rctl & RCTL_EN) != 0)
5284 			wm_init_rxdesc(rxq, idx);
5285 	} else
5286 		wm_init_rxdesc(rxq, idx);
5287 
5288 	return 0;
5289 }
5290 
5291 /*
5292  * wm_rxdrain:
5293  *
5294  *	Drain the receive queue.
5295  */
5296 static void
5297 wm_rxdrain(struct wm_rxqueue *rxq)
5298 {
5299 	struct wm_softc *sc = rxq->rxq_sc;
5300 	struct wm_rxsoft *rxs;
5301 	int i;
5302 
5303 	KASSERT(mutex_owned(rxq->rxq_lock));
5304 
5305 	for (i = 0; i < WM_NRXDESC; i++) {
5306 		rxs = &rxq->rxq_soft[i];
5307 		if (rxs->rxs_mbuf != NULL) {
5308 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
5309 			m_freem(rxs->rxs_mbuf);
5310 			rxs->rxs_mbuf = NULL;
5311 		}
5312 	}
5313 }
5314 
5315 /*
5316  * Setup registers for RSS.
5317  *
5318  * XXX not yet VMDq support
5319  */
5320 static void
5321 wm_init_rss(struct wm_softc *sc)
5322 {
5323 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
5324 	int i;
5325 
5326 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
5327 
5328 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
5329 		unsigned int qid, reta_ent;
5330 
5331 		qid  = i % sc->sc_nqueues;
5332 		switch (sc->sc_type) {
5333 		case WM_T_82574:
5334 			reta_ent = __SHIFTIN(qid,
5335 			    RETA_ENT_QINDEX_MASK_82574);
5336 			break;
5337 		case WM_T_82575:
5338 			reta_ent = __SHIFTIN(qid,
5339 			    RETA_ENT_QINDEX1_MASK_82575);
5340 			break;
5341 		default:
5342 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
5343 			break;
5344 		}
5345 
5346 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
5347 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
5348 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
5349 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
5350 	}
5351 
5352 	rss_getkey((uint8_t *)rss_key);
5353 	for (i = 0; i < RSSRK_NUM_REGS; i++)
5354 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
5355 
5356 	if (sc->sc_type == WM_T_82574)
5357 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
5358 	else
5359 		mrqc = MRQC_ENABLE_RSS_MQ;
5360 
5361 	/*
5362 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
5363 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
5364 	 */
5365 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
5366 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
5367 #if 0
5368 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
5369 	mrqc |= MRQC_RSS_FIELD_IPV6_UDP_EX;
5370 #endif
5371 	mrqc |= MRQC_RSS_FIELD_IPV6_TCP_EX;
5372 
5373 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
5374 }
5375 
5376 /*
5377  * Adjust TX and RX queue numbers which the system actulally uses.
5378  *
5379  * The numbers are affected by below parameters.
5380  *     - The nubmer of hardware queues
5381  *     - The number of MSI-X vectors (= "nvectors" argument)
5382  *     - ncpu
5383  */
5384 static void
5385 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
5386 {
5387 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
5388 
5389 	if (nvectors < 2) {
5390 		sc->sc_nqueues = 1;
5391 		return;
5392 	}
5393 
5394 	switch (sc->sc_type) {
5395 	case WM_T_82572:
5396 		hw_ntxqueues = 2;
5397 		hw_nrxqueues = 2;
5398 		break;
5399 	case WM_T_82574:
5400 		hw_ntxqueues = 2;
5401 		hw_nrxqueues = 2;
5402 		break;
5403 	case WM_T_82575:
5404 		hw_ntxqueues = 4;
5405 		hw_nrxqueues = 4;
5406 		break;
5407 	case WM_T_82576:
5408 		hw_ntxqueues = 16;
5409 		hw_nrxqueues = 16;
5410 		break;
5411 	case WM_T_82580:
5412 	case WM_T_I350:
5413 	case WM_T_I354:
5414 		hw_ntxqueues = 8;
5415 		hw_nrxqueues = 8;
5416 		break;
5417 	case WM_T_I210:
5418 		hw_ntxqueues = 4;
5419 		hw_nrxqueues = 4;
5420 		break;
5421 	case WM_T_I211:
5422 		hw_ntxqueues = 2;
5423 		hw_nrxqueues = 2;
5424 		break;
5425 		/*
5426 		 * As below ethernet controllers does not support MSI-X,
5427 		 * this driver let them not use multiqueue.
5428 		 *     - WM_T_80003
5429 		 *     - WM_T_ICH8
5430 		 *     - WM_T_ICH9
5431 		 *     - WM_T_ICH10
5432 		 *     - WM_T_PCH
5433 		 *     - WM_T_PCH2
5434 		 *     - WM_T_PCH_LPT
5435 		 */
5436 	default:
5437 		hw_ntxqueues = 1;
5438 		hw_nrxqueues = 1;
5439 		break;
5440 	}
5441 
5442 	hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
5443 
5444 	/*
5445 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
5446 	 * the number of queues used actually.
5447 	 */
5448 	if (nvectors < hw_nqueues + 1)
5449 		sc->sc_nqueues = nvectors - 1;
5450 	else
5451 		sc->sc_nqueues = hw_nqueues;
5452 
5453 	/*
5454 	 * As queues more then cpus cannot improve scaling, we limit
5455 	 * the number of queues used actually.
5456 	 */
5457 	if (ncpu < sc->sc_nqueues)
5458 		sc->sc_nqueues = ncpu;
5459 }
5460 
5461 static inline bool
5462 wm_is_using_msix(struct wm_softc *sc)
5463 {
5464 
5465 	return (sc->sc_nintrs > 1);
5466 }
5467 
5468 static inline bool
5469 wm_is_using_multiqueue(struct wm_softc *sc)
5470 {
5471 
5472 	return (sc->sc_nqueues > 1);
5473 }
5474 
5475 static int
5476 wm_softint_establish_queue(struct wm_softc *sc, int qidx, int intr_idx)
5477 {
5478 	struct wm_queue *wmq = &sc->sc_queue[qidx];
5479 
5480 	wmq->wmq_id = qidx;
5481 	wmq->wmq_intr_idx = intr_idx;
5482 	wmq->wmq_si = softint_establish(SOFTINT_NET | WM_SOFTINT_FLAGS,
5483 	    wm_handle_queue, wmq);
5484 	if (wmq->wmq_si != NULL)
5485 		return 0;
5486 
5487 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
5488 	    wmq->wmq_id);
5489 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
5490 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
5491 	return ENOMEM;
5492 }
5493 
5494 /*
5495  * Both single interrupt MSI and INTx can use this function.
5496  */
5497 static int
5498 wm_setup_legacy(struct wm_softc *sc)
5499 {
5500 	pci_chipset_tag_t pc = sc->sc_pc;
5501 	const char *intrstr = NULL;
5502 	char intrbuf[PCI_INTRSTR_LEN];
5503 	int error;
5504 
5505 	error = wm_alloc_txrx_queues(sc);
5506 	if (error) {
5507 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
5508 		    error);
5509 		return ENOMEM;
5510 	}
5511 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
5512 	    sizeof(intrbuf));
5513 #ifdef WM_MPSAFE
5514 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
5515 #endif
5516 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
5517 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
5518 	if (sc->sc_ihs[0] == NULL) {
5519 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
5520 		    (pci_intr_type(pc, sc->sc_intrs[0])
5521 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
5522 		return ENOMEM;
5523 	}
5524 
5525 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
5526 	sc->sc_nintrs = 1;
5527 
5528 	return wm_softint_establish_queue(sc, 0, 0);
5529 }
5530 
5531 static int
5532 wm_setup_msix(struct wm_softc *sc)
5533 {
5534 	void *vih;
5535 	kcpuset_t *affinity;
5536 	int qidx, error, intr_idx, txrx_established;
5537 	pci_chipset_tag_t pc = sc->sc_pc;
5538 	const char *intrstr = NULL;
5539 	char intrbuf[PCI_INTRSTR_LEN];
5540 	char intr_xname[INTRDEVNAMEBUF];
5541 
5542 	if (sc->sc_nqueues < ncpu) {
5543 		/*
5544 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
5545 		 * interrupts start from CPU#1.
5546 		 */
5547 		sc->sc_affinity_offset = 1;
5548 	} else {
5549 		/*
5550 		 * In this case, this device use all CPUs. So, we unify
5551 		 * affinitied cpu_index to msix vector number for readability.
5552 		 */
5553 		sc->sc_affinity_offset = 0;
5554 	}
5555 
5556 	error = wm_alloc_txrx_queues(sc);
5557 	if (error) {
5558 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
5559 		    error);
5560 		return ENOMEM;
5561 	}
5562 
5563 	kcpuset_create(&affinity, false);
5564 	intr_idx = 0;
5565 
5566 	/*
5567 	 * TX and RX
5568 	 */
5569 	txrx_established = 0;
5570 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
5571 		struct wm_queue *wmq = &sc->sc_queue[qidx];
5572 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
5573 
5574 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
5575 		    sizeof(intrbuf));
5576 #ifdef WM_MPSAFE
5577 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
5578 		    PCI_INTR_MPSAFE, true);
5579 #endif
5580 		memset(intr_xname, 0, sizeof(intr_xname));
5581 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
5582 		    device_xname(sc->sc_dev), qidx);
5583 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
5584 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
5585 		if (vih == NULL) {
5586 			aprint_error_dev(sc->sc_dev,
5587 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
5588 			    intrstr ? " at " : "",
5589 			    intrstr ? intrstr : "");
5590 
5591 			goto fail;
5592 		}
5593 		kcpuset_zero(affinity);
5594 		/* Round-robin affinity */
5595 		kcpuset_set(affinity, affinity_to);
5596 		error = interrupt_distribute(vih, affinity, NULL);
5597 		if (error == 0) {
5598 			aprint_normal_dev(sc->sc_dev,
5599 			    "for TX and RX interrupting at %s affinity to %u\n",
5600 			    intrstr, affinity_to);
5601 		} else {
5602 			aprint_normal_dev(sc->sc_dev,
5603 			    "for TX and RX interrupting at %s\n", intrstr);
5604 		}
5605 		sc->sc_ihs[intr_idx] = vih;
5606 		if (wm_softint_establish_queue(sc, qidx, intr_idx) != 0)
5607 			goto fail;
5608 		txrx_established++;
5609 		intr_idx++;
5610 	}
5611 
5612 	/* LINK */
5613 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
5614 	    sizeof(intrbuf));
5615 #ifdef WM_MPSAFE
5616 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
5617 #endif
5618 	memset(intr_xname, 0, sizeof(intr_xname));
5619 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
5620 	    device_xname(sc->sc_dev));
5621 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
5622 	    IPL_NET, wm_linkintr_msix, sc, intr_xname);
5623 	if (vih == NULL) {
5624 		aprint_error_dev(sc->sc_dev,
5625 		    "unable to establish MSI-X(for LINK)%s%s\n",
5626 		    intrstr ? " at " : "",
5627 		    intrstr ? intrstr : "");
5628 
5629 		goto fail;
5630 	}
5631 	/* Keep default affinity to LINK interrupt */
5632 	aprint_normal_dev(sc->sc_dev,
5633 	    "for LINK interrupting at %s\n", intrstr);
5634 	sc->sc_ihs[intr_idx] = vih;
5635 	sc->sc_link_intr_idx = intr_idx;
5636 
5637 	sc->sc_nintrs = sc->sc_nqueues + 1;
5638 	kcpuset_destroy(affinity);
5639 	return 0;
5640 
5641  fail:
5642 	for (qidx = 0; qidx < txrx_established; qidx++) {
5643 		struct wm_queue *wmq = &sc->sc_queue[qidx];
5644 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
5645 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
5646 	}
5647 
5648 	kcpuset_destroy(affinity);
5649 	return ENOMEM;
5650 }
5651 
5652 static void
5653 wm_unset_stopping_flags(struct wm_softc *sc)
5654 {
5655 	int i;
5656 
5657 	KASSERT(WM_CORE_LOCKED(sc));
5658 
5659 	/* Must unset stopping flags in ascending order. */
5660 	for (i = 0; i < sc->sc_nqueues; i++) {
5661 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
5662 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
5663 
5664 		mutex_enter(txq->txq_lock);
5665 		txq->txq_stopping = false;
5666 		mutex_exit(txq->txq_lock);
5667 
5668 		mutex_enter(rxq->rxq_lock);
5669 		rxq->rxq_stopping = false;
5670 		mutex_exit(rxq->rxq_lock);
5671 	}
5672 
5673 	sc->sc_core_stopping = false;
5674 }
5675 
5676 static void
5677 wm_set_stopping_flags(struct wm_softc *sc)
5678 {
5679 	int i;
5680 
5681 	KASSERT(WM_CORE_LOCKED(sc));
5682 
5683 	sc->sc_core_stopping = true;
5684 
5685 	/* Must set stopping flags in ascending order. */
5686 	for (i = 0; i < sc->sc_nqueues; i++) {
5687 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
5688 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
5689 
5690 		mutex_enter(rxq->rxq_lock);
5691 		rxq->rxq_stopping = true;
5692 		mutex_exit(rxq->rxq_lock);
5693 
5694 		mutex_enter(txq->txq_lock);
5695 		txq->txq_stopping = true;
5696 		mutex_exit(txq->txq_lock);
5697 	}
5698 }
5699 
5700 /*
5701  * Write interrupt interval value to ITR or EITR
5702  */
5703 static void
5704 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
5705 {
5706 
5707 	if (!wmq->wmq_set_itr)
5708 		return;
5709 
5710 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5711 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
5712 
5713 		/*
5714 		 * 82575 doesn't have CNT_INGR field.
5715 		 * So, overwrite counter field by software.
5716 		 */
5717 		if (sc->sc_type == WM_T_82575)
5718 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
5719 		else
5720 			eitr |= EITR_CNT_INGR;
5721 
5722 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
5723 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
5724 		/*
5725 		 * 82574 has both ITR and EITR. SET EITR when we use
5726 		 * the multi queue function with MSI-X.
5727 		 */
5728 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
5729 		    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
5730 	} else {
5731 		KASSERT(wmq->wmq_id == 0);
5732 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
5733 	}
5734 
5735 	wmq->wmq_set_itr = false;
5736 }
5737 
5738 /*
5739  * TODO
5740  * Below dynamic calculation of itr is almost the same as linux igb,
5741  * however it does not fit to wm(4). So, we will have been disable AIM
5742  * until we will find appropriate calculation of itr.
5743  */
5744 /*
5745  * calculate interrupt interval value to be going to write register in
5746  * wm_itrs_writereg(). This function does not write ITR/EITR register.
5747  */
5748 static void
5749 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
5750 {
5751 #ifdef NOTYET
5752 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
5753 	struct wm_txqueue *txq = &wmq->wmq_txq;
5754 	uint32_t avg_size = 0;
5755 	uint32_t new_itr;
5756 
5757 	if (rxq->rxq_packets)
5758 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
5759 	if (txq->txq_packets)
5760 		avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
5761 
5762 	if (avg_size == 0) {
5763 		new_itr = 450; /* restore default value */
5764 		goto out;
5765 	}
5766 
5767 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
5768 	avg_size += 24;
5769 
5770 	/* Don't starve jumbo frames */
5771 	avg_size = uimin(avg_size, 3000);
5772 
5773 	/* Give a little boost to mid-size frames */
5774 	if ((avg_size > 300) && (avg_size < 1200))
5775 		new_itr = avg_size / 3;
5776 	else
5777 		new_itr = avg_size / 2;
5778 
5779 out:
5780 	/*
5781 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
5782 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
5783 	 */
5784 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
5785 		new_itr *= 4;
5786 
5787 	if (new_itr != wmq->wmq_itr) {
5788 		wmq->wmq_itr = new_itr;
5789 		wmq->wmq_set_itr = true;
5790 	} else
5791 		wmq->wmq_set_itr = false;
5792 
5793 	rxq->rxq_packets = 0;
5794 	rxq->rxq_bytes = 0;
5795 	txq->txq_packets = 0;
5796 	txq->txq_bytes = 0;
5797 #endif
5798 }
5799 
5800 static void
5801 wm_init_sysctls(struct wm_softc *sc)
5802 {
5803 	struct sysctllog **log;
5804 	const struct sysctlnode *rnode, *cnode;
5805 	int rv;
5806 	const char *dvname;
5807 
5808 	log = &sc->sc_sysctllog;
5809 	dvname = device_xname(sc->sc_dev);
5810 
5811 	rv = sysctl_createv(log, 0, NULL, &rnode,
5812 	    0, CTLTYPE_NODE, dvname,
5813 	    SYSCTL_DESCR("wm information and settings"),
5814 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
5815 	if (rv != 0)
5816 		goto err;
5817 
5818 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
5819 	    CTLTYPE_BOOL, "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
5820 	    NULL, 0, &sc->sc_txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL);
5821 	if (rv != 0)
5822 		goto teardown;
5823 
5824 	return;
5825 
5826 teardown:
5827 	sysctl_teardown(log);
5828 err:
5829 	sc->sc_sysctllog = NULL;
5830 	device_printf(sc->sc_dev, "%s: sysctl_createv failed, rv = %d\n",
5831 	    __func__, rv);
5832 }
5833 
5834 /*
5835  * wm_init:		[ifnet interface function]
5836  *
5837  *	Initialize the interface.
5838  */
5839 static int
5840 wm_init(struct ifnet *ifp)
5841 {
5842 	struct wm_softc *sc = ifp->if_softc;
5843 	int ret;
5844 
5845 	WM_CORE_LOCK(sc);
5846 	ret = wm_init_locked(ifp);
5847 	WM_CORE_UNLOCK(sc);
5848 
5849 	return ret;
5850 }
5851 
5852 static int
5853 wm_init_locked(struct ifnet *ifp)
5854 {
5855 	struct wm_softc *sc = ifp->if_softc;
5856 	struct ethercom *ec = &sc->sc_ethercom;
5857 	int i, j, trynum, error = 0;
5858 	uint32_t reg, sfp_mask = 0;
5859 
5860 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
5861 		device_xname(sc->sc_dev), __func__));
5862 	KASSERT(WM_CORE_LOCKED(sc));
5863 
5864 	/*
5865 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
5866 	 * There is a small but measurable benefit to avoiding the adjusment
5867 	 * of the descriptor so that the headers are aligned, for normal mtu,
5868 	 * on such platforms.  One possibility is that the DMA itself is
5869 	 * slightly more efficient if the front of the entire packet (instead
5870 	 * of the front of the headers) is aligned.
5871 	 *
5872 	 * Note we must always set align_tweak to 0 if we are using
5873 	 * jumbo frames.
5874 	 */
5875 #ifdef __NO_STRICT_ALIGNMENT
5876 	sc->sc_align_tweak = 0;
5877 #else
5878 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
5879 		sc->sc_align_tweak = 0;
5880 	else
5881 		sc->sc_align_tweak = 2;
5882 #endif /* __NO_STRICT_ALIGNMENT */
5883 
5884 	/* Cancel any pending I/O. */
5885 	wm_stop_locked(ifp, false, false);
5886 
5887 	/* Update statistics before reset */
5888 	if_statadd2(ifp, if_collisions, CSR_READ(sc, WMREG_COLC),
5889 	    if_ierrors, CSR_READ(sc, WMREG_RXERRC));
5890 
5891 	/* PCH_SPT hardware workaround */
5892 	if (sc->sc_type == WM_T_PCH_SPT)
5893 		wm_flush_desc_rings(sc);
5894 
5895 	/* Reset the chip to a known state. */
5896 	wm_reset(sc);
5897 
5898 	/*
5899 	 * AMT based hardware can now take control from firmware
5900 	 * Do this after reset.
5901 	 */
5902 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
5903 		wm_get_hw_control(sc);
5904 
5905 	if ((sc->sc_type >= WM_T_PCH_SPT) &&
5906 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
5907 		wm_legacy_irq_quirk_spt(sc);
5908 
5909 	/* Init hardware bits */
5910 	wm_initialize_hardware_bits(sc);
5911 
5912 	/* Reset the PHY. */
5913 	if (sc->sc_flags & WM_F_HAS_MII)
5914 		wm_gmii_reset(sc);
5915 
5916 	if (sc->sc_type >= WM_T_ICH8) {
5917 		reg = CSR_READ(sc, WMREG_GCR);
5918 		/*
5919 		 * ICH8 No-snoop bits are opposite polarity. Set to snoop by
5920 		 * default after reset.
5921 		 */
5922 		if (sc->sc_type == WM_T_ICH8)
5923 			reg |= GCR_NO_SNOOP_ALL;
5924 		else
5925 			reg &= ~GCR_NO_SNOOP_ALL;
5926 		CSR_WRITE(sc, WMREG_GCR, reg);
5927 	}
5928 
5929 	if ((sc->sc_type >= WM_T_ICH8)
5930 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER)
5931 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) {
5932 
5933 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
5934 		reg |= CTRL_EXT_RO_DIS;
5935 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5936 	}
5937 
5938 	/* Calculate (E)ITR value */
5939 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
5940 		/*
5941 		 * For NEWQUEUE's EITR (except for 82575).
5942 		 * 82575's EITR should be set same throttling value as other
5943 		 * old controllers' ITR because the interrupt/sec calculation
5944 		 * is the same, that is, 1,000,000,000 / (N * 256).
5945 		 *
5946 		 * 82574's EITR should be set same throttling value as ITR.
5947 		 *
5948 		 * For N interrupts/sec, set this value to:
5949 		 * 1,000,000 / N in contrast to ITR throttoling value.
5950 		 */
5951 		sc->sc_itr_init = 450;
5952 	} else if (sc->sc_type >= WM_T_82543) {
5953 		/*
5954 		 * Set up the interrupt throttling register (units of 256ns)
5955 		 * Note that a footnote in Intel's documentation says this
5956 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
5957 		 * or 10Mbit mode.  Empirically, it appears to be the case
5958 		 * that that is also true for the 1024ns units of the other
5959 		 * interrupt-related timer registers -- so, really, we ought
5960 		 * to divide this value by 4 when the link speed is low.
5961 		 *
5962 		 * XXX implement this division at link speed change!
5963 		 */
5964 
5965 		/*
5966 		 * For N interrupts/sec, set this value to:
5967 		 * 1,000,000,000 / (N * 256).  Note that we set the
5968 		 * absolute and packet timer values to this value
5969 		 * divided by 4 to get "simple timer" behavior.
5970 		 */
5971 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
5972 	}
5973 
5974 	error = wm_init_txrx_queues(sc);
5975 	if (error)
5976 		goto out;
5977 
5978 	if (((sc->sc_flags & WM_F_SGMII) == 0) &&
5979 	    (sc->sc_mediatype == WM_MEDIATYPE_SERDES) &&
5980 	    (sc->sc_type >= WM_T_82575))
5981 		wm_serdes_power_up_link_82575(sc);
5982 
5983 	/* Clear out the VLAN table -- we don't use it (yet). */
5984 	CSR_WRITE(sc, WMREG_VET, 0);
5985 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
5986 		trynum = 10; /* Due to hw errata */
5987 	else
5988 		trynum = 1;
5989 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
5990 		for (j = 0; j < trynum; j++)
5991 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
5992 
5993 	/*
5994 	 * Set up flow-control parameters.
5995 	 *
5996 	 * XXX Values could probably stand some tuning.
5997 	 */
5998 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
5999 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
6000 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
6001 	    && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
6002 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
6003 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
6004 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
6005 	}
6006 
6007 	sc->sc_fcrtl = FCRTL_DFLT;
6008 	if (sc->sc_type < WM_T_82543) {
6009 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
6010 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
6011 	} else {
6012 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
6013 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
6014 	}
6015 
6016 	if (sc->sc_type == WM_T_80003)
6017 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
6018 	else
6019 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
6020 
6021 	/* Writes the control register. */
6022 	wm_set_vlan(sc);
6023 
6024 	if (sc->sc_flags & WM_F_HAS_MII) {
6025 		uint16_t kmreg;
6026 
6027 		switch (sc->sc_type) {
6028 		case WM_T_80003:
6029 		case WM_T_ICH8:
6030 		case WM_T_ICH9:
6031 		case WM_T_ICH10:
6032 		case WM_T_PCH:
6033 		case WM_T_PCH2:
6034 		case WM_T_PCH_LPT:
6035 		case WM_T_PCH_SPT:
6036 		case WM_T_PCH_CNP:
6037 			/*
6038 			 * Set the mac to wait the maximum time between each
6039 			 * iteration and increase the max iterations when
6040 			 * polling the phy; this fixes erroneous timeouts at
6041 			 * 10Mbps.
6042 			 */
6043 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
6044 			    0xFFFF);
6045 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
6046 			    &kmreg);
6047 			kmreg |= 0x3F;
6048 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
6049 			    kmreg);
6050 			break;
6051 		default:
6052 			break;
6053 		}
6054 
6055 		if (sc->sc_type == WM_T_80003) {
6056 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
6057 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
6058 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
6059 
6060 			/* Bypass RX and TX FIFO's */
6061 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
6062 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
6063 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
6064 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
6065 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
6066 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
6067 		}
6068 	}
6069 #if 0
6070 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
6071 #endif
6072 
6073 	/* Set up checksum offload parameters. */
6074 	reg = CSR_READ(sc, WMREG_RXCSUM);
6075 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
6076 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
6077 		reg |= RXCSUM_IPOFL;
6078 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
6079 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
6080 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
6081 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
6082 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
6083 
6084 	/* Set registers about MSI-X */
6085 	if (wm_is_using_msix(sc)) {
6086 		uint32_t ivar, qintr_idx;
6087 		struct wm_queue *wmq;
6088 		unsigned int qid;
6089 
6090 		if (sc->sc_type == WM_T_82575) {
6091 			/* Interrupt control */
6092 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
6093 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
6094 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
6095 
6096 			/* TX and RX */
6097 			for (i = 0; i < sc->sc_nqueues; i++) {
6098 				wmq = &sc->sc_queue[i];
6099 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
6100 				    EITR_TX_QUEUE(wmq->wmq_id)
6101 				    | EITR_RX_QUEUE(wmq->wmq_id));
6102 			}
6103 			/* Link status */
6104 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
6105 			    EITR_OTHER);
6106 		} else if (sc->sc_type == WM_T_82574) {
6107 			/* Interrupt control */
6108 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
6109 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
6110 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
6111 
6112 			/*
6113 			 * Workaround issue with spurious interrupts
6114 			 * in MSI-X mode.
6115 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
6116 			 * initialized yet. So re-initialize WMREG_RFCTL here.
6117 			 */
6118 			reg = CSR_READ(sc, WMREG_RFCTL);
6119 			reg |= WMREG_RFCTL_ACKDIS;
6120 			CSR_WRITE(sc, WMREG_RFCTL, reg);
6121 
6122 			ivar = 0;
6123 			/* TX and RX */
6124 			for (i = 0; i < sc->sc_nqueues; i++) {
6125 				wmq = &sc->sc_queue[i];
6126 				qid = wmq->wmq_id;
6127 				qintr_idx = wmq->wmq_intr_idx;
6128 
6129 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
6130 				    IVAR_TX_MASK_Q_82574(qid));
6131 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
6132 				    IVAR_RX_MASK_Q_82574(qid));
6133 			}
6134 			/* Link status */
6135 			ivar |= __SHIFTIN((IVAR_VALID_82574
6136 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
6137 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
6138 		} else {
6139 			/* Interrupt control */
6140 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
6141 			    | GPIE_EIAME | GPIE_PBA);
6142 
6143 			switch (sc->sc_type) {
6144 			case WM_T_82580:
6145 			case WM_T_I350:
6146 			case WM_T_I354:
6147 			case WM_T_I210:
6148 			case WM_T_I211:
6149 				/* TX and RX */
6150 				for (i = 0; i < sc->sc_nqueues; i++) {
6151 					wmq = &sc->sc_queue[i];
6152 					qid = wmq->wmq_id;
6153 					qintr_idx = wmq->wmq_intr_idx;
6154 
6155 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
6156 					ivar &= ~IVAR_TX_MASK_Q(qid);
6157 					ivar |= __SHIFTIN((qintr_idx
6158 						| IVAR_VALID),
6159 					    IVAR_TX_MASK_Q(qid));
6160 					ivar &= ~IVAR_RX_MASK_Q(qid);
6161 					ivar |= __SHIFTIN((qintr_idx
6162 						| IVAR_VALID),
6163 					    IVAR_RX_MASK_Q(qid));
6164 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
6165 				}
6166 				break;
6167 			case WM_T_82576:
6168 				/* TX and RX */
6169 				for (i = 0; i < sc->sc_nqueues; i++) {
6170 					wmq = &sc->sc_queue[i];
6171 					qid = wmq->wmq_id;
6172 					qintr_idx = wmq->wmq_intr_idx;
6173 
6174 					ivar = CSR_READ(sc,
6175 					    WMREG_IVAR_Q_82576(qid));
6176 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
6177 					ivar |= __SHIFTIN((qintr_idx
6178 						| IVAR_VALID),
6179 					    IVAR_TX_MASK_Q_82576(qid));
6180 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
6181 					ivar |= __SHIFTIN((qintr_idx
6182 						| IVAR_VALID),
6183 					    IVAR_RX_MASK_Q_82576(qid));
6184 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
6185 					    ivar);
6186 				}
6187 				break;
6188 			default:
6189 				break;
6190 			}
6191 
6192 			/* Link status */
6193 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
6194 			    IVAR_MISC_OTHER);
6195 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
6196 		}
6197 
6198 		if (wm_is_using_multiqueue(sc)) {
6199 			wm_init_rss(sc);
6200 
6201 			/*
6202 			** NOTE: Receive Full-Packet Checksum Offload
6203 			** is mutually exclusive with Multiqueue. However
6204 			** this is not the same as TCP/IP checksums which
6205 			** still work.
6206 			*/
6207 			reg = CSR_READ(sc, WMREG_RXCSUM);
6208 			reg |= RXCSUM_PCSD;
6209 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
6210 		}
6211 	}
6212 
6213 	/* Set up the interrupt registers. */
6214 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
6215 
6216 	/* Enable SFP module insertion interrupt if it's required */
6217 	if ((sc->sc_flags & WM_F_SFP) != 0) {
6218 		sc->sc_ctrl |= CTRL_EXTLINK_EN;
6219 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6220 		sfp_mask = ICR_GPI(0);
6221 	}
6222 
6223 	if (wm_is_using_msix(sc)) {
6224 		uint32_t mask;
6225 		struct wm_queue *wmq;
6226 
6227 		switch (sc->sc_type) {
6228 		case WM_T_82574:
6229 			mask = 0;
6230 			for (i = 0; i < sc->sc_nqueues; i++) {
6231 				wmq = &sc->sc_queue[i];
6232 				mask |= ICR_TXQ(wmq->wmq_id);
6233 				mask |= ICR_RXQ(wmq->wmq_id);
6234 			}
6235 			mask |= ICR_OTHER;
6236 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
6237 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
6238 			break;
6239 		default:
6240 			if (sc->sc_type == WM_T_82575) {
6241 				mask = 0;
6242 				for (i = 0; i < sc->sc_nqueues; i++) {
6243 					wmq = &sc->sc_queue[i];
6244 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
6245 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
6246 				}
6247 				mask |= EITR_OTHER;
6248 			} else {
6249 				mask = 0;
6250 				for (i = 0; i < sc->sc_nqueues; i++) {
6251 					wmq = &sc->sc_queue[i];
6252 					mask |= 1 << wmq->wmq_intr_idx;
6253 				}
6254 				mask |= 1 << sc->sc_link_intr_idx;
6255 			}
6256 			CSR_WRITE(sc, WMREG_EIAC, mask);
6257 			CSR_WRITE(sc, WMREG_EIAM, mask);
6258 			CSR_WRITE(sc, WMREG_EIMS, mask);
6259 
6260 			/* For other interrupts */
6261 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC | sfp_mask);
6262 			break;
6263 		}
6264 	} else {
6265 		sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
6266 		    ICR_RXO | ICR_RXT0 | sfp_mask;
6267 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
6268 	}
6269 
6270 	/* Set up the inter-packet gap. */
6271 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
6272 
6273 	if (sc->sc_type >= WM_T_82543) {
6274 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
6275 			struct wm_queue *wmq = &sc->sc_queue[qidx];
6276 			wm_itrs_writereg(sc, wmq);
6277 		}
6278 		/*
6279 		 * Link interrupts occur much less than TX
6280 		 * interrupts and RX interrupts. So, we don't
6281 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
6282 		 * FreeBSD's if_igb.
6283 		 */
6284 	}
6285 
6286 	/* Set the VLAN ethernetype. */
6287 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
6288 
6289 	/*
6290 	 * Set up the transmit control register; we start out with
6291 	 * a collision distance suitable for FDX, but update it whe
6292 	 * we resolve the media type.
6293 	 */
6294 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
6295 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
6296 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
6297 	if (sc->sc_type >= WM_T_82571)
6298 		sc->sc_tctl |= TCTL_MULR;
6299 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
6300 
6301 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
6302 		/* Write TDT after TCTL.EN is set. See the document. */
6303 		CSR_WRITE(sc, WMREG_TDT(0), 0);
6304 	}
6305 
6306 	if (sc->sc_type == WM_T_80003) {
6307 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
6308 		reg &= ~TCTL_EXT_GCEX_MASK;
6309 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
6310 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
6311 	}
6312 
6313 	/* Set the media. */
6314 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
6315 		goto out;
6316 
6317 	/* Configure for OS presence */
6318 	wm_init_manageability(sc);
6319 
6320 	/*
6321 	 * Set up the receive control register; we actually program the
6322 	 * register when we set the receive filter. Use multicast address
6323 	 * offset type 0.
6324 	 *
6325 	 * Only the i82544 has the ability to strip the incoming CRC, so we
6326 	 * don't enable that feature.
6327 	 */
6328 	sc->sc_mchash_type = 0;
6329 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
6330 	    | __SHIFTIN(sc->sc_mchash_type, RCTL_MO);
6331 
6332 	/* 82574 use one buffer extended Rx descriptor. */
6333 	if (sc->sc_type == WM_T_82574)
6334 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
6335 
6336 	if ((sc->sc_flags & WM_F_CRC_STRIP) != 0)
6337 		sc->sc_rctl |= RCTL_SECRC;
6338 
6339 	if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
6340 	    && (ifp->if_mtu > ETHERMTU)) {
6341 		sc->sc_rctl |= RCTL_LPE;
6342 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
6343 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
6344 	}
6345 
6346 	if (MCLBYTES == 2048)
6347 		sc->sc_rctl |= RCTL_2k;
6348 	else {
6349 		if (sc->sc_type >= WM_T_82543) {
6350 			switch (MCLBYTES) {
6351 			case 4096:
6352 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
6353 				break;
6354 			case 8192:
6355 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
6356 				break;
6357 			case 16384:
6358 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
6359 				break;
6360 			default:
6361 				panic("wm_init: MCLBYTES %d unsupported",
6362 				    MCLBYTES);
6363 				break;
6364 			}
6365 		} else
6366 			panic("wm_init: i82542 requires MCLBYTES = 2048");
6367 	}
6368 
6369 	/* Enable ECC */
6370 	switch (sc->sc_type) {
6371 	case WM_T_82571:
6372 		reg = CSR_READ(sc, WMREG_PBA_ECC);
6373 		reg |= PBA_ECC_CORR_EN;
6374 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
6375 		break;
6376 	case WM_T_PCH_LPT:
6377 	case WM_T_PCH_SPT:
6378 	case WM_T_PCH_CNP:
6379 		reg = CSR_READ(sc, WMREG_PBECCSTS);
6380 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
6381 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
6382 
6383 		sc->sc_ctrl |= CTRL_MEHE;
6384 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6385 		break;
6386 	default:
6387 		break;
6388 	}
6389 
6390 	/*
6391 	 * Set the receive filter.
6392 	 *
6393 	 * For 82575 and 82576, the RX descriptors must be initialized after
6394 	 * the setting of RCTL.EN in wm_set_filter()
6395 	 */
6396 	wm_set_filter(sc);
6397 
6398 	/* On 575 and later set RDT only if RX enabled */
6399 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
6400 		int qidx;
6401 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
6402 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
6403 			for (i = 0; i < WM_NRXDESC; i++) {
6404 				mutex_enter(rxq->rxq_lock);
6405 				wm_init_rxdesc(rxq, i);
6406 				mutex_exit(rxq->rxq_lock);
6407 
6408 			}
6409 		}
6410 	}
6411 
6412 	wm_unset_stopping_flags(sc);
6413 
6414 	/* Start the one second link check clock. */
6415 	callout_schedule(&sc->sc_tick_ch, hz);
6416 
6417 	/* ...all done! */
6418 	ifp->if_flags |= IFF_RUNNING;
6419 
6420  out:
6421 	/* Save last flags for the callback */
6422 	sc->sc_if_flags = ifp->if_flags;
6423 	sc->sc_ec_capenable = ec->ec_capenable;
6424 	if (error)
6425 		log(LOG_ERR, "%s: interface not running\n",
6426 		    device_xname(sc->sc_dev));
6427 	return error;
6428 }
6429 
6430 /*
6431  * wm_stop:		[ifnet interface function]
6432  *
6433  *	Stop transmission on the interface.
6434  */
6435 static void
6436 wm_stop(struct ifnet *ifp, int disable)
6437 {
6438 	struct wm_softc *sc = ifp->if_softc;
6439 
6440 	ASSERT_SLEEPABLE();
6441 
6442 	WM_CORE_LOCK(sc);
6443 	wm_stop_locked(ifp, disable ? true : false, true);
6444 	WM_CORE_UNLOCK(sc);
6445 
6446 	/*
6447 	 * After wm_set_stopping_flags(), it is guaranteed
6448 	 * wm_handle_queue_work() does not call workqueue_enqueue().
6449 	 * However, workqueue_wait() cannot call in wm_stop_locked()
6450 	 * because it can sleep...
6451 	 * so, call workqueue_wait() here.
6452 	 */
6453 	for (int i = 0; i < sc->sc_nqueues; i++)
6454 		workqueue_wait(sc->sc_queue_wq, &sc->sc_queue[i].wmq_cookie);
6455 }
6456 
6457 static void
6458 wm_stop_locked(struct ifnet *ifp, bool disable, bool wait)
6459 {
6460 	struct wm_softc *sc = ifp->if_softc;
6461 	struct wm_txsoft *txs;
6462 	int i, qidx;
6463 
6464 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
6465 		device_xname(sc->sc_dev), __func__));
6466 	KASSERT(WM_CORE_LOCKED(sc));
6467 
6468 	wm_set_stopping_flags(sc);
6469 
6470 	if (sc->sc_flags & WM_F_HAS_MII) {
6471 		/* Down the MII. */
6472 		mii_down(&sc->sc_mii);
6473 	} else {
6474 #if 0
6475 		/* Should we clear PHY's status properly? */
6476 		wm_reset(sc);
6477 #endif
6478 	}
6479 
6480 	/* Stop the transmit and receive processes. */
6481 	CSR_WRITE(sc, WMREG_TCTL, 0);
6482 	CSR_WRITE(sc, WMREG_RCTL, 0);
6483 	sc->sc_rctl &= ~RCTL_EN;
6484 
6485 	/*
6486 	 * Clear the interrupt mask to ensure the device cannot assert its
6487 	 * interrupt line.
6488 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
6489 	 * service any currently pending or shared interrupt.
6490 	 */
6491 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
6492 	sc->sc_icr = 0;
6493 	if (wm_is_using_msix(sc)) {
6494 		if (sc->sc_type != WM_T_82574) {
6495 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
6496 			CSR_WRITE(sc, WMREG_EIAC, 0);
6497 		} else
6498 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
6499 	}
6500 
6501 	/*
6502 	 * Stop callouts after interrupts are disabled; if we have
6503 	 * to wait for them, we will be releasing the CORE_LOCK
6504 	 * briefly, which will unblock interrupts on the current CPU.
6505 	 */
6506 
6507 	/* Stop the one second clock. */
6508 	if (wait)
6509 		callout_halt(&sc->sc_tick_ch, sc->sc_core_lock);
6510 	else
6511 		callout_stop(&sc->sc_tick_ch);
6512 
6513 	/* Stop the 82547 Tx FIFO stall check timer. */
6514 	if (sc->sc_type == WM_T_82547) {
6515 		if (wait)
6516 			callout_halt(&sc->sc_txfifo_ch, sc->sc_core_lock);
6517 		else
6518 			callout_stop(&sc->sc_txfifo_ch);
6519 	}
6520 
6521 	/* Release any queued transmit buffers. */
6522 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
6523 		struct wm_queue *wmq = &sc->sc_queue[qidx];
6524 		struct wm_txqueue *txq = &wmq->wmq_txq;
6525 		mutex_enter(txq->txq_lock);
6526 		txq->txq_sending = false; /* Ensure watchdog disabled */
6527 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
6528 			txs = &txq->txq_soft[i];
6529 			if (txs->txs_mbuf != NULL) {
6530 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
6531 				m_freem(txs->txs_mbuf);
6532 				txs->txs_mbuf = NULL;
6533 			}
6534 		}
6535 		mutex_exit(txq->txq_lock);
6536 	}
6537 
6538 	/* Mark the interface as down and cancel the watchdog timer. */
6539 	ifp->if_flags &= ~IFF_RUNNING;
6540 
6541 	if (disable) {
6542 		for (i = 0; i < sc->sc_nqueues; i++) {
6543 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
6544 			mutex_enter(rxq->rxq_lock);
6545 			wm_rxdrain(rxq);
6546 			mutex_exit(rxq->rxq_lock);
6547 		}
6548 	}
6549 
6550 #if 0 /* notyet */
6551 	if (sc->sc_type >= WM_T_82544)
6552 		CSR_WRITE(sc, WMREG_WUC, 0);
6553 #endif
6554 }
6555 
6556 static void
6557 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
6558 {
6559 	struct mbuf *m;
6560 	int i;
6561 
6562 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
6563 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
6564 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
6565 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
6566 		    m->m_data, m->m_len, m->m_flags);
6567 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
6568 	    i, i == 1 ? "" : "s");
6569 }
6570 
6571 /*
6572  * wm_82547_txfifo_stall:
6573  *
6574  *	Callout used to wait for the 82547 Tx FIFO to drain,
6575  *	reset the FIFO pointers, and restart packet transmission.
6576  */
6577 static void
6578 wm_82547_txfifo_stall(void *arg)
6579 {
6580 	struct wm_softc *sc = arg;
6581 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
6582 
6583 	mutex_enter(txq->txq_lock);
6584 
6585 	if (txq->txq_stopping)
6586 		goto out;
6587 
6588 	if (txq->txq_fifo_stall) {
6589 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
6590 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
6591 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
6592 			/*
6593 			 * Packets have drained.  Stop transmitter, reset
6594 			 * FIFO pointers, restart transmitter, and kick
6595 			 * the packet queue.
6596 			 */
6597 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
6598 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
6599 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
6600 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
6601 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
6602 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
6603 			CSR_WRITE(sc, WMREG_TCTL, tctl);
6604 			CSR_WRITE_FLUSH(sc);
6605 
6606 			txq->txq_fifo_head = 0;
6607 			txq->txq_fifo_stall = 0;
6608 			wm_start_locked(&sc->sc_ethercom.ec_if);
6609 		} else {
6610 			/*
6611 			 * Still waiting for packets to drain; try again in
6612 			 * another tick.
6613 			 */
6614 			callout_schedule(&sc->sc_txfifo_ch, 1);
6615 		}
6616 	}
6617 
6618 out:
6619 	mutex_exit(txq->txq_lock);
6620 }
6621 
6622 /*
6623  * wm_82547_txfifo_bugchk:
6624  *
6625  *	Check for bug condition in the 82547 Tx FIFO.  We need to
6626  *	prevent enqueueing a packet that would wrap around the end
6627  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
6628  *
6629  *	We do this by checking the amount of space before the end
6630  *	of the Tx FIFO buffer. If the packet will not fit, we "stall"
6631  *	the Tx FIFO, wait for all remaining packets to drain, reset
6632  *	the internal FIFO pointers to the beginning, and restart
6633  *	transmission on the interface.
6634  */
6635 #define	WM_FIFO_HDR		0x10
6636 #define	WM_82547_PAD_LEN	0x3e0
6637 static int
6638 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
6639 {
6640 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
6641 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
6642 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
6643 
6644 	/* Just return if already stalled. */
6645 	if (txq->txq_fifo_stall)
6646 		return 1;
6647 
6648 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
6649 		/* Stall only occurs in half-duplex mode. */
6650 		goto send_packet;
6651 	}
6652 
6653 	if (len >= WM_82547_PAD_LEN + space) {
6654 		txq->txq_fifo_stall = 1;
6655 		callout_schedule(&sc->sc_txfifo_ch, 1);
6656 		return 1;
6657 	}
6658 
6659  send_packet:
6660 	txq->txq_fifo_head += len;
6661 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
6662 		txq->txq_fifo_head -= txq->txq_fifo_size;
6663 
6664 	return 0;
6665 }
6666 
6667 static int
6668 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
6669 {
6670 	int error;
6671 
6672 	/*
6673 	 * Allocate the control data structures, and create and load the
6674 	 * DMA map for it.
6675 	 *
6676 	 * NOTE: All Tx descriptors must be in the same 4G segment of
6677 	 * memory.  So must Rx descriptors.  We simplify by allocating
6678 	 * both sets within the same 4G segment.
6679 	 */
6680 	if (sc->sc_type < WM_T_82544)
6681 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
6682 	else
6683 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
6684 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
6685 		txq->txq_descsize = sizeof(nq_txdesc_t);
6686 	else
6687 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
6688 
6689 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
6690 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
6691 		    1, &txq->txq_desc_rseg, 0)) != 0) {
6692 		aprint_error_dev(sc->sc_dev,
6693 		    "unable to allocate TX control data, error = %d\n",
6694 		    error);
6695 		goto fail_0;
6696 	}
6697 
6698 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
6699 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
6700 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
6701 		aprint_error_dev(sc->sc_dev,
6702 		    "unable to map TX control data, error = %d\n", error);
6703 		goto fail_1;
6704 	}
6705 
6706 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
6707 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
6708 		aprint_error_dev(sc->sc_dev,
6709 		    "unable to create TX control data DMA map, error = %d\n",
6710 		    error);
6711 		goto fail_2;
6712 	}
6713 
6714 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
6715 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
6716 		aprint_error_dev(sc->sc_dev,
6717 		    "unable to load TX control data DMA map, error = %d\n",
6718 		    error);
6719 		goto fail_3;
6720 	}
6721 
6722 	return 0;
6723 
6724  fail_3:
6725 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
6726  fail_2:
6727 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
6728 	    WM_TXDESCS_SIZE(txq));
6729  fail_1:
6730 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
6731  fail_0:
6732 	return error;
6733 }
6734 
6735 static void
6736 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
6737 {
6738 
6739 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
6740 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
6741 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
6742 	    WM_TXDESCS_SIZE(txq));
6743 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
6744 }
6745 
6746 static int
6747 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
6748 {
6749 	int error;
6750 	size_t rxq_descs_size;
6751 
6752 	/*
6753 	 * Allocate the control data structures, and create and load the
6754 	 * DMA map for it.
6755 	 *
6756 	 * NOTE: All Tx descriptors must be in the same 4G segment of
6757 	 * memory.  So must Rx descriptors.  We simplify by allocating
6758 	 * both sets within the same 4G segment.
6759 	 */
6760 	rxq->rxq_ndesc = WM_NRXDESC;
6761 	if (sc->sc_type == WM_T_82574)
6762 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
6763 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
6764 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
6765 	else
6766 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
6767 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
6768 
6769 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
6770 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
6771 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
6772 		aprint_error_dev(sc->sc_dev,
6773 		    "unable to allocate RX control data, error = %d\n",
6774 		    error);
6775 		goto fail_0;
6776 	}
6777 
6778 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
6779 		    rxq->rxq_desc_rseg, rxq_descs_size,
6780 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
6781 		aprint_error_dev(sc->sc_dev,
6782 		    "unable to map RX control data, error = %d\n", error);
6783 		goto fail_1;
6784 	}
6785 
6786 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
6787 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
6788 		aprint_error_dev(sc->sc_dev,
6789 		    "unable to create RX control data DMA map, error = %d\n",
6790 		    error);
6791 		goto fail_2;
6792 	}
6793 
6794 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
6795 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
6796 		aprint_error_dev(sc->sc_dev,
6797 		    "unable to load RX control data DMA map, error = %d\n",
6798 		    error);
6799 		goto fail_3;
6800 	}
6801 
6802 	return 0;
6803 
6804  fail_3:
6805 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
6806  fail_2:
6807 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
6808 	    rxq_descs_size);
6809  fail_1:
6810 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
6811  fail_0:
6812 	return error;
6813 }
6814 
6815 static void
6816 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
6817 {
6818 
6819 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
6820 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
6821 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
6822 	    rxq->rxq_descsize * rxq->rxq_ndesc);
6823 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
6824 }
6825 
6826 
6827 static int
6828 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
6829 {
6830 	int i, error;
6831 
6832 	/* Create the transmit buffer DMA maps. */
6833 	WM_TXQUEUELEN(txq) =
6834 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
6835 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
6836 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
6837 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
6838 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
6839 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
6840 			aprint_error_dev(sc->sc_dev,
6841 			    "unable to create Tx DMA map %d, error = %d\n",
6842 			    i, error);
6843 			goto fail;
6844 		}
6845 	}
6846 
6847 	return 0;
6848 
6849  fail:
6850 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
6851 		if (txq->txq_soft[i].txs_dmamap != NULL)
6852 			bus_dmamap_destroy(sc->sc_dmat,
6853 			    txq->txq_soft[i].txs_dmamap);
6854 	}
6855 	return error;
6856 }
6857 
6858 static void
6859 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
6860 {
6861 	int i;
6862 
6863 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
6864 		if (txq->txq_soft[i].txs_dmamap != NULL)
6865 			bus_dmamap_destroy(sc->sc_dmat,
6866 			    txq->txq_soft[i].txs_dmamap);
6867 	}
6868 }
6869 
6870 static int
6871 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
6872 {
6873 	int i, error;
6874 
6875 	/* Create the receive buffer DMA maps. */
6876 	for (i = 0; i < rxq->rxq_ndesc; i++) {
6877 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
6878 			    MCLBYTES, 0, 0,
6879 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
6880 			aprint_error_dev(sc->sc_dev,
6881 			    "unable to create Rx DMA map %d error = %d\n",
6882 			    i, error);
6883 			goto fail;
6884 		}
6885 		rxq->rxq_soft[i].rxs_mbuf = NULL;
6886 	}
6887 
6888 	return 0;
6889 
6890  fail:
6891 	for (i = 0; i < rxq->rxq_ndesc; i++) {
6892 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
6893 			bus_dmamap_destroy(sc->sc_dmat,
6894 			    rxq->rxq_soft[i].rxs_dmamap);
6895 	}
6896 	return error;
6897 }
6898 
6899 static void
6900 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
6901 {
6902 	int i;
6903 
6904 	for (i = 0; i < rxq->rxq_ndesc; i++) {
6905 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
6906 			bus_dmamap_destroy(sc->sc_dmat,
6907 			    rxq->rxq_soft[i].rxs_dmamap);
6908 	}
6909 }
6910 
6911 /*
6912  * wm_alloc_quques:
6913  *	Allocate {tx,rx}descs and {tx,rx} buffers
6914  */
6915 static int
6916 wm_alloc_txrx_queues(struct wm_softc *sc)
6917 {
6918 	int i, error, tx_done, rx_done;
6919 
6920 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
6921 	    KM_SLEEP);
6922 	if (sc->sc_queue == NULL) {
6923 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
6924 		error = ENOMEM;
6925 		goto fail_0;
6926 	}
6927 
6928 	/* For transmission */
6929 	error = 0;
6930 	tx_done = 0;
6931 	for (i = 0; i < sc->sc_nqueues; i++) {
6932 #ifdef WM_EVENT_COUNTERS
6933 		int j;
6934 		const char *xname;
6935 #endif
6936 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
6937 		txq->txq_sc = sc;
6938 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
6939 
6940 		error = wm_alloc_tx_descs(sc, txq);
6941 		if (error)
6942 			break;
6943 		error = wm_alloc_tx_buffer(sc, txq);
6944 		if (error) {
6945 			wm_free_tx_descs(sc, txq);
6946 			break;
6947 		}
6948 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
6949 		if (txq->txq_interq == NULL) {
6950 			wm_free_tx_descs(sc, txq);
6951 			wm_free_tx_buffer(sc, txq);
6952 			error = ENOMEM;
6953 			break;
6954 		}
6955 
6956 #ifdef WM_EVENT_COUNTERS
6957 		xname = device_xname(sc->sc_dev);
6958 
6959 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
6960 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
6961 		WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
6962 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
6963 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
6964 		WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
6965 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
6966 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
6967 		WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
6968 		WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
6969 		WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
6970 
6971 		for (j = 0; j < WM_NTXSEGS; j++) {
6972 			snprintf(txq->txq_txseg_evcnt_names[j],
6973 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
6974 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
6975 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
6976 		}
6977 
6978 		WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
6979 		WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
6980 		WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
6981 		WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
6982 		WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
6983 		WM_Q_MISC_EVCNT_ATTACH(txq, skipcontext, txq, i, xname);
6984 #endif /* WM_EVENT_COUNTERS */
6985 
6986 		tx_done++;
6987 	}
6988 	if (error)
6989 		goto fail_1;
6990 
6991 	/* For receive */
6992 	error = 0;
6993 	rx_done = 0;
6994 	for (i = 0; i < sc->sc_nqueues; i++) {
6995 #ifdef WM_EVENT_COUNTERS
6996 		const char *xname;
6997 #endif
6998 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
6999 		rxq->rxq_sc = sc;
7000 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
7001 
7002 		error = wm_alloc_rx_descs(sc, rxq);
7003 		if (error)
7004 			break;
7005 
7006 		error = wm_alloc_rx_buffer(sc, rxq);
7007 		if (error) {
7008 			wm_free_rx_descs(sc, rxq);
7009 			break;
7010 		}
7011 
7012 #ifdef WM_EVENT_COUNTERS
7013 		xname = device_xname(sc->sc_dev);
7014 
7015 		WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
7016 		WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
7017 
7018 		WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
7019 		WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
7020 #endif /* WM_EVENT_COUNTERS */
7021 
7022 		rx_done++;
7023 	}
7024 	if (error)
7025 		goto fail_2;
7026 
7027 	return 0;
7028 
7029  fail_2:
7030 	for (i = 0; i < rx_done; i++) {
7031 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
7032 		wm_free_rx_buffer(sc, rxq);
7033 		wm_free_rx_descs(sc, rxq);
7034 		if (rxq->rxq_lock)
7035 			mutex_obj_free(rxq->rxq_lock);
7036 	}
7037  fail_1:
7038 	for (i = 0; i < tx_done; i++) {
7039 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
7040 		pcq_destroy(txq->txq_interq);
7041 		wm_free_tx_buffer(sc, txq);
7042 		wm_free_tx_descs(sc, txq);
7043 		if (txq->txq_lock)
7044 			mutex_obj_free(txq->txq_lock);
7045 	}
7046 
7047 	kmem_free(sc->sc_queue,
7048 	    sizeof(struct wm_queue) * sc->sc_nqueues);
7049  fail_0:
7050 	return error;
7051 }
7052 
7053 /*
7054  * wm_free_quques:
7055  *	Free {tx,rx}descs and {tx,rx} buffers
7056  */
7057 static void
7058 wm_free_txrx_queues(struct wm_softc *sc)
7059 {
7060 	int i;
7061 
7062 	for (i = 0; i < sc->sc_nqueues; i++) {
7063 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
7064 
7065 #ifdef WM_EVENT_COUNTERS
7066 		WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
7067 		WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
7068 		WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
7069 		WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
7070 #endif /* WM_EVENT_COUNTERS */
7071 
7072 		wm_free_rx_buffer(sc, rxq);
7073 		wm_free_rx_descs(sc, rxq);
7074 		if (rxq->rxq_lock)
7075 			mutex_obj_free(rxq->rxq_lock);
7076 	}
7077 
7078 	for (i = 0; i < sc->sc_nqueues; i++) {
7079 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
7080 		struct mbuf *m;
7081 #ifdef WM_EVENT_COUNTERS
7082 		int j;
7083 
7084 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
7085 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
7086 		WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
7087 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
7088 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
7089 		WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
7090 		WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
7091 		WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
7092 		WM_Q_EVCNT_DETACH(txq, tso, txq, i);
7093 		WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
7094 		WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
7095 
7096 		for (j = 0; j < WM_NTXSEGS; j++)
7097 			evcnt_detach(&txq->txq_ev_txseg[j]);
7098 
7099 		WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
7100 		WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
7101 		WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
7102 		WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
7103 		WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
7104 		WM_Q_EVCNT_DETACH(txq, skipcontext, txq, i);
7105 #endif /* WM_EVENT_COUNTERS */
7106 
7107 		/* Drain txq_interq */
7108 		while ((m = pcq_get(txq->txq_interq)) != NULL)
7109 			m_freem(m);
7110 		pcq_destroy(txq->txq_interq);
7111 
7112 		wm_free_tx_buffer(sc, txq);
7113 		wm_free_tx_descs(sc, txq);
7114 		if (txq->txq_lock)
7115 			mutex_obj_free(txq->txq_lock);
7116 	}
7117 
7118 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
7119 }
7120 
7121 static void
7122 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
7123 {
7124 
7125 	KASSERT(mutex_owned(txq->txq_lock));
7126 
7127 	/* Initialize the transmit descriptor ring. */
7128 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
7129 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
7130 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
7131 	txq->txq_free = WM_NTXDESC(txq);
7132 	txq->txq_next = 0;
7133 }
7134 
7135 static void
7136 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
7137     struct wm_txqueue *txq)
7138 {
7139 
7140 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
7141 		device_xname(sc->sc_dev), __func__));
7142 	KASSERT(mutex_owned(txq->txq_lock));
7143 
7144 	if (sc->sc_type < WM_T_82543) {
7145 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
7146 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
7147 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
7148 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
7149 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
7150 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
7151 	} else {
7152 		int qid = wmq->wmq_id;
7153 
7154 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
7155 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
7156 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
7157 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
7158 
7159 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
7160 			/*
7161 			 * Don't write TDT before TCTL.EN is set.
7162 			 * See the document.
7163 			 */
7164 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
7165 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
7166 			    | TXDCTL_WTHRESH(0));
7167 		else {
7168 			/* XXX should update with AIM? */
7169 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
7170 			if (sc->sc_type >= WM_T_82540) {
7171 				/* Should be the same */
7172 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
7173 			}
7174 
7175 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
7176 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
7177 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
7178 		}
7179 	}
7180 }
7181 
7182 static void
7183 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
7184 {
7185 	int i;
7186 
7187 	KASSERT(mutex_owned(txq->txq_lock));
7188 
7189 	/* Initialize the transmit job descriptors. */
7190 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
7191 		txq->txq_soft[i].txs_mbuf = NULL;
7192 	txq->txq_sfree = WM_TXQUEUELEN(txq);
7193 	txq->txq_snext = 0;
7194 	txq->txq_sdirty = 0;
7195 }
7196 
7197 static void
7198 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
7199     struct wm_txqueue *txq)
7200 {
7201 
7202 	KASSERT(mutex_owned(txq->txq_lock));
7203 
7204 	/*
7205 	 * Set up some register offsets that are different between
7206 	 * the i82542 and the i82543 and later chips.
7207 	 */
7208 	if (sc->sc_type < WM_T_82543)
7209 		txq->txq_tdt_reg = WMREG_OLD_TDT;
7210 	else
7211 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
7212 
7213 	wm_init_tx_descs(sc, txq);
7214 	wm_init_tx_regs(sc, wmq, txq);
7215 	wm_init_tx_buffer(sc, txq);
7216 
7217 	txq->txq_flags = 0; /* Clear WM_TXQ_NO_SPACE */
7218 	txq->txq_sending = false;
7219 }
7220 
7221 static void
7222 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
7223     struct wm_rxqueue *rxq)
7224 {
7225 
7226 	KASSERT(mutex_owned(rxq->rxq_lock));
7227 
7228 	/*
7229 	 * Initialize the receive descriptor and receive job
7230 	 * descriptor rings.
7231 	 */
7232 	if (sc->sc_type < WM_T_82543) {
7233 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
7234 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
7235 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
7236 		    rxq->rxq_descsize * rxq->rxq_ndesc);
7237 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
7238 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
7239 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
7240 
7241 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
7242 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
7243 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
7244 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
7245 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
7246 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
7247 	} else {
7248 		int qid = wmq->wmq_id;
7249 
7250 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
7251 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
7252 		CSR_WRITE(sc, WMREG_RDLEN(qid),
7253 		    rxq->rxq_descsize * rxq->rxq_ndesc);
7254 
7255 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
7256 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
7257 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
7258 
7259 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
7260 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
7261 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
7262 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
7263 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
7264 			    | RXDCTL_WTHRESH(1));
7265 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
7266 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
7267 		} else {
7268 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
7269 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
7270 			/* XXX should update with AIM? */
7271 			CSR_WRITE(sc, WMREG_RDTR,
7272 			    (wmq->wmq_itr / 4) | RDTR_FPD);
7273 			/* MUST be same */
7274 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
7275 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
7276 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
7277 		}
7278 	}
7279 }
7280 
7281 static int
7282 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
7283 {
7284 	struct wm_rxsoft *rxs;
7285 	int error, i;
7286 
7287 	KASSERT(mutex_owned(rxq->rxq_lock));
7288 
7289 	for (i = 0; i < rxq->rxq_ndesc; i++) {
7290 		rxs = &rxq->rxq_soft[i];
7291 		if (rxs->rxs_mbuf == NULL) {
7292 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
7293 				log(LOG_ERR, "%s: unable to allocate or map "
7294 				    "rx buffer %d, error = %d\n",
7295 				    device_xname(sc->sc_dev), i, error);
7296 				/*
7297 				 * XXX Should attempt to run with fewer receive
7298 				 * XXX buffers instead of just failing.
7299 				 */
7300 				wm_rxdrain(rxq);
7301 				return ENOMEM;
7302 			}
7303 		} else {
7304 			/*
7305 			 * For 82575 and 82576, the RX descriptors must be
7306 			 * initialized after the setting of RCTL.EN in
7307 			 * wm_set_filter()
7308 			 */
7309 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
7310 				wm_init_rxdesc(rxq, i);
7311 		}
7312 	}
7313 	rxq->rxq_ptr = 0;
7314 	rxq->rxq_discard = 0;
7315 	WM_RXCHAIN_RESET(rxq);
7316 
7317 	return 0;
7318 }
7319 
7320 static int
7321 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
7322     struct wm_rxqueue *rxq)
7323 {
7324 
7325 	KASSERT(mutex_owned(rxq->rxq_lock));
7326 
7327 	/*
7328 	 * Set up some register offsets that are different between
7329 	 * the i82542 and the i82543 and later chips.
7330 	 */
7331 	if (sc->sc_type < WM_T_82543)
7332 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
7333 	else
7334 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
7335 
7336 	wm_init_rx_regs(sc, wmq, rxq);
7337 	return wm_init_rx_buffer(sc, rxq);
7338 }
7339 
7340 /*
7341  * wm_init_quques:
7342  *	Initialize {tx,rx}descs and {tx,rx} buffers
7343  */
7344 static int
7345 wm_init_txrx_queues(struct wm_softc *sc)
7346 {
7347 	int i, error = 0;
7348 
7349 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
7350 		device_xname(sc->sc_dev), __func__));
7351 
7352 	for (i = 0; i < sc->sc_nqueues; i++) {
7353 		struct wm_queue *wmq = &sc->sc_queue[i];
7354 		struct wm_txqueue *txq = &wmq->wmq_txq;
7355 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
7356 
7357 		/*
7358 		 * TODO
7359 		 * Currently, use constant variable instead of AIM.
7360 		 * Furthermore, the interrupt interval of multiqueue which use
7361 		 * polling mode is less than default value.
7362 		 * More tuning and AIM are required.
7363 		 */
7364 		if (wm_is_using_multiqueue(sc))
7365 			wmq->wmq_itr = 50;
7366 		else
7367 			wmq->wmq_itr = sc->sc_itr_init;
7368 		wmq->wmq_set_itr = true;
7369 
7370 		mutex_enter(txq->txq_lock);
7371 		wm_init_tx_queue(sc, wmq, txq);
7372 		mutex_exit(txq->txq_lock);
7373 
7374 		mutex_enter(rxq->rxq_lock);
7375 		error = wm_init_rx_queue(sc, wmq, rxq);
7376 		mutex_exit(rxq->rxq_lock);
7377 		if (error)
7378 			break;
7379 	}
7380 
7381 	return error;
7382 }
7383 
7384 /*
7385  * wm_tx_offload:
7386  *
7387  *	Set up TCP/IP checksumming parameters for the
7388  *	specified packet.
7389  */
7390 static void
7391 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
7392     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
7393 {
7394 	struct mbuf *m0 = txs->txs_mbuf;
7395 	struct livengood_tcpip_ctxdesc *t;
7396 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
7397 	uint32_t ipcse;
7398 	struct ether_header *eh;
7399 	int offset, iphl;
7400 	uint8_t fields;
7401 
7402 	/*
7403 	 * XXX It would be nice if the mbuf pkthdr had offset
7404 	 * fields for the protocol headers.
7405 	 */
7406 
7407 	eh = mtod(m0, struct ether_header *);
7408 	switch (htons(eh->ether_type)) {
7409 	case ETHERTYPE_IP:
7410 	case ETHERTYPE_IPV6:
7411 		offset = ETHER_HDR_LEN;
7412 		break;
7413 
7414 	case ETHERTYPE_VLAN:
7415 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
7416 		break;
7417 
7418 	default:
7419 		/* Don't support this protocol or encapsulation. */
7420  		txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
7421  		txq->txq_last_hw_ipcs = 0;
7422  		txq->txq_last_hw_tucs = 0;
7423 		*fieldsp = 0;
7424 		*cmdp = 0;
7425 		return;
7426 	}
7427 
7428 	if ((m0->m_pkthdr.csum_flags &
7429 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
7430 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
7431 	} else
7432 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
7433 
7434 	ipcse = offset + iphl - 1;
7435 
7436 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
7437 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
7438 	seg = 0;
7439 	fields = 0;
7440 
7441 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
7442 		int hlen = offset + iphl;
7443 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
7444 
7445 		if (__predict_false(m0->m_len <
7446 				    (hlen + sizeof(struct tcphdr)))) {
7447 			/*
7448 			 * TCP/IP headers are not in the first mbuf; we need
7449 			 * to do this the slow and painful way. Let's just
7450 			 * hope this doesn't happen very often.
7451 			 */
7452 			struct tcphdr th;
7453 
7454 			WM_Q_EVCNT_INCR(txq, tsopain);
7455 
7456 			m_copydata(m0, hlen, sizeof(th), &th);
7457 			if (v4) {
7458 				struct ip ip;
7459 
7460 				m_copydata(m0, offset, sizeof(ip), &ip);
7461 				ip.ip_len = 0;
7462 				m_copyback(m0,
7463 				    offset + offsetof(struct ip, ip_len),
7464 				    sizeof(ip.ip_len), &ip.ip_len);
7465 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
7466 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
7467 			} else {
7468 				struct ip6_hdr ip6;
7469 
7470 				m_copydata(m0, offset, sizeof(ip6), &ip6);
7471 				ip6.ip6_plen = 0;
7472 				m_copyback(m0,
7473 				    offset + offsetof(struct ip6_hdr, ip6_plen),
7474 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
7475 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
7476 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
7477 			}
7478 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
7479 			    sizeof(th.th_sum), &th.th_sum);
7480 
7481 			hlen += th.th_off << 2;
7482 		} else {
7483 			/*
7484 			 * TCP/IP headers are in the first mbuf; we can do
7485 			 * this the easy way.
7486 			 */
7487 			struct tcphdr *th;
7488 
7489 			if (v4) {
7490 				struct ip *ip =
7491 				    (void *)(mtod(m0, char *) + offset);
7492 				th = (void *)(mtod(m0, char *) + hlen);
7493 
7494 				ip->ip_len = 0;
7495 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
7496 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
7497 			} else {
7498 				struct ip6_hdr *ip6 =
7499 				    (void *)(mtod(m0, char *) + offset);
7500 				th = (void *)(mtod(m0, char *) + hlen);
7501 
7502 				ip6->ip6_plen = 0;
7503 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
7504 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
7505 			}
7506 			hlen += th->th_off << 2;
7507 		}
7508 
7509 		if (v4) {
7510 			WM_Q_EVCNT_INCR(txq, tso);
7511 			cmdlen |= WTX_TCPIP_CMD_IP;
7512 		} else {
7513 			WM_Q_EVCNT_INCR(txq, tso6);
7514 			ipcse = 0;
7515 		}
7516 		cmd |= WTX_TCPIP_CMD_TSE;
7517 		cmdlen |= WTX_TCPIP_CMD_TSE |
7518 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
7519 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
7520 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
7521 	}
7522 
7523 	/*
7524 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
7525 	 * offload feature, if we load the context descriptor, we
7526 	 * MUST provide valid values for IPCSS and TUCSS fields.
7527 	 */
7528 
7529 	ipcs = WTX_TCPIP_IPCSS(offset) |
7530 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
7531 	    WTX_TCPIP_IPCSE(ipcse);
7532 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
7533 		WM_Q_EVCNT_INCR(txq, ipsum);
7534 		fields |= WTX_IXSM;
7535 	}
7536 
7537 	offset += iphl;
7538 
7539 	if (m0->m_pkthdr.csum_flags &
7540 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
7541 		WM_Q_EVCNT_INCR(txq, tusum);
7542 		fields |= WTX_TXSM;
7543 		tucs = WTX_TCPIP_TUCSS(offset) |
7544 		    WTX_TCPIP_TUCSO(offset +
7545 			M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
7546 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
7547 	} else if ((m0->m_pkthdr.csum_flags &
7548 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
7549 		WM_Q_EVCNT_INCR(txq, tusum6);
7550 		fields |= WTX_TXSM;
7551 		tucs = WTX_TCPIP_TUCSS(offset) |
7552 		    WTX_TCPIP_TUCSO(offset +
7553 			M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
7554 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
7555 	} else {
7556 		/* Just initialize it to a valid TCP context. */
7557 		tucs = WTX_TCPIP_TUCSS(offset) |
7558 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
7559 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
7560 	}
7561 
7562 	*cmdp = cmd;
7563 	*fieldsp = fields;
7564 
7565 	/*
7566 	 * We don't have to write context descriptor for every packet
7567 	 * except for 82574. For 82574, we must write context descriptor
7568 	 * for every packet when we use two descriptor queues.
7569 	 *
7570 	 * The 82574L can only remember the *last* context used
7571 	 * regardless of queue that it was use for.  We cannot reuse
7572 	 * contexts on this hardware platform and must generate a new
7573 	 * context every time.  82574L hardware spec, section 7.2.6,
7574 	 * second note.
7575 	 */
7576 	if (sc->sc_nqueues < 2) {
7577 		/*
7578 	 	 *
7579 	  	 * Setting up new checksum offload context for every
7580 		 * frames takes a lot of processing time for hardware.
7581 		 * This also reduces performance a lot for small sized
7582 		 * frames so avoid it if driver can use previously
7583 		 * configured checksum offload context.
7584 		 * For TSO, in theory we can use the same TSO context only if
7585 		 * frame is the same type(IP/TCP) and the same MSS. However
7586 		 * checking whether a frame has the same IP/TCP structure is
7587 		 * hard thing so just ignore that and always restablish a
7588 		 * new TSO context.
7589 	  	 */
7590 		if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6))
7591 		    == 0) {
7592 			if (txq->txq_last_hw_cmd == cmd &&
7593 			    txq->txq_last_hw_fields == fields &&
7594 			    txq->txq_last_hw_ipcs == (ipcs & 0xffff) &&
7595 			    txq->txq_last_hw_tucs == (tucs & 0xffff)) {
7596 				WM_Q_EVCNT_INCR(txq, skipcontext);
7597 				return;
7598 			}
7599 		}
7600 
7601 	 	txq->txq_last_hw_cmd = cmd;
7602  		txq->txq_last_hw_fields = fields;
7603  		txq->txq_last_hw_ipcs = (ipcs & 0xffff);
7604 		txq->txq_last_hw_tucs = (tucs & 0xffff);
7605 	}
7606 
7607 	/* Fill in the context descriptor. */
7608 	t = (struct livengood_tcpip_ctxdesc *)
7609 	    &txq->txq_descs[txq->txq_next];
7610 	t->tcpip_ipcs = htole32(ipcs);
7611 	t->tcpip_tucs = htole32(tucs);
7612 	t->tcpip_cmdlen = htole32(cmdlen);
7613 	t->tcpip_seg = htole32(seg);
7614 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
7615 
7616 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
7617 	txs->txs_ndesc++;
7618 }
7619 
7620 static inline int
7621 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
7622 {
7623 	struct wm_softc *sc = ifp->if_softc;
7624 	u_int cpuid = cpu_index(curcpu());
7625 
7626 	/*
7627 	 * Currently, simple distribute strategy.
7628 	 * TODO:
7629 	 * distribute by flowid(RSS has value).
7630 	 */
7631 	return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues;
7632 }
7633 
7634 /*
7635  * wm_start:		[ifnet interface function]
7636  *
7637  *	Start packet transmission on the interface.
7638  */
7639 static void
7640 wm_start(struct ifnet *ifp)
7641 {
7642 	struct wm_softc *sc = ifp->if_softc;
7643 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
7644 
7645 #ifdef WM_MPSAFE
7646 	KASSERT(if_is_mpsafe(ifp));
7647 #endif
7648 	/*
7649 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
7650 	 */
7651 
7652 	mutex_enter(txq->txq_lock);
7653 	if (!txq->txq_stopping)
7654 		wm_start_locked(ifp);
7655 	mutex_exit(txq->txq_lock);
7656 }
7657 
7658 static void
7659 wm_start_locked(struct ifnet *ifp)
7660 {
7661 	struct wm_softc *sc = ifp->if_softc;
7662 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
7663 
7664 	wm_send_common_locked(ifp, txq, false);
7665 }
7666 
7667 static int
7668 wm_transmit(struct ifnet *ifp, struct mbuf *m)
7669 {
7670 	int qid;
7671 	struct wm_softc *sc = ifp->if_softc;
7672 	struct wm_txqueue *txq;
7673 
7674 	qid = wm_select_txqueue(ifp, m);
7675 	txq = &sc->sc_queue[qid].wmq_txq;
7676 
7677 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
7678 		m_freem(m);
7679 		WM_Q_EVCNT_INCR(txq, pcqdrop);
7680 		return ENOBUFS;
7681 	}
7682 
7683 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
7684 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
7685 	if (m->m_flags & M_MCAST)
7686 		if_statinc_ref(nsr, if_omcasts);
7687 	IF_STAT_PUTREF(ifp);
7688 
7689 	if (mutex_tryenter(txq->txq_lock)) {
7690 		if (!txq->txq_stopping)
7691 			wm_transmit_locked(ifp, txq);
7692 		mutex_exit(txq->txq_lock);
7693 	}
7694 
7695 	return 0;
7696 }
7697 
7698 static void
7699 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
7700 {
7701 
7702 	wm_send_common_locked(ifp, txq, true);
7703 }
7704 
7705 static void
7706 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
7707     bool is_transmit)
7708 {
7709 	struct wm_softc *sc = ifp->if_softc;
7710 	struct mbuf *m0;
7711 	struct wm_txsoft *txs;
7712 	bus_dmamap_t dmamap;
7713 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
7714 	bus_addr_t curaddr;
7715 	bus_size_t seglen, curlen;
7716 	uint32_t cksumcmd;
7717 	uint8_t cksumfields;
7718 	bool remap = true;
7719 
7720 	KASSERT(mutex_owned(txq->txq_lock));
7721 
7722 	if ((ifp->if_flags & IFF_RUNNING) == 0)
7723 		return;
7724 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
7725 		return;
7726 
7727 	/* Remember the previous number of free descriptors. */
7728 	ofree = txq->txq_free;
7729 
7730 	/*
7731 	 * Loop through the send queue, setting up transmit descriptors
7732 	 * until we drain the queue, or use up all available transmit
7733 	 * descriptors.
7734 	 */
7735 	for (;;) {
7736 		m0 = NULL;
7737 
7738 		/* Get a work queue entry. */
7739 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
7740 			wm_txeof(txq, UINT_MAX);
7741 			if (txq->txq_sfree == 0) {
7742 				DPRINTF(WM_DEBUG_TX,
7743 				    ("%s: TX: no free job descriptors\n",
7744 					device_xname(sc->sc_dev)));
7745 				WM_Q_EVCNT_INCR(txq, txsstall);
7746 				break;
7747 			}
7748 		}
7749 
7750 		/* Grab a packet off the queue. */
7751 		if (is_transmit)
7752 			m0 = pcq_get(txq->txq_interq);
7753 		else
7754 			IFQ_DEQUEUE(&ifp->if_snd, m0);
7755 		if (m0 == NULL)
7756 			break;
7757 
7758 		DPRINTF(WM_DEBUG_TX,
7759 		    ("%s: TX: have packet to transmit: %p\n",
7760 			device_xname(sc->sc_dev), m0));
7761 
7762 		txs = &txq->txq_soft[txq->txq_snext];
7763 		dmamap = txs->txs_dmamap;
7764 
7765 		use_tso = (m0->m_pkthdr.csum_flags &
7766 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
7767 
7768 		/*
7769 		 * So says the Linux driver:
7770 		 * The controller does a simple calculation to make sure
7771 		 * there is enough room in the FIFO before initiating the
7772 		 * DMA for each buffer. The calc is:
7773 		 *	4 = ceil(buffer len / MSS)
7774 		 * To make sure we don't overrun the FIFO, adjust the max
7775 		 * buffer len if the MSS drops.
7776 		 */
7777 		dmamap->dm_maxsegsz =
7778 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
7779 		    ? m0->m_pkthdr.segsz << 2
7780 		    : WTX_MAX_LEN;
7781 
7782 		/*
7783 		 * Load the DMA map.  If this fails, the packet either
7784 		 * didn't fit in the allotted number of segments, or we
7785 		 * were short on resources.  For the too-many-segments
7786 		 * case, we simply report an error and drop the packet,
7787 		 * since we can't sanely copy a jumbo packet to a single
7788 		 * buffer.
7789 		 */
7790 retry:
7791 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
7792 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
7793 		if (__predict_false(error)) {
7794 			if (error == EFBIG) {
7795 				if (remap == true) {
7796 					struct mbuf *m;
7797 
7798 					remap = false;
7799 					m = m_defrag(m0, M_NOWAIT);
7800 					if (m != NULL) {
7801 						WM_Q_EVCNT_INCR(txq, defrag);
7802 						m0 = m;
7803 						goto retry;
7804 					}
7805 				}
7806 				WM_Q_EVCNT_INCR(txq, toomanyseg);
7807 				log(LOG_ERR, "%s: Tx packet consumes too many "
7808 				    "DMA segments, dropping...\n",
7809 				    device_xname(sc->sc_dev));
7810 				wm_dump_mbuf_chain(sc, m0);
7811 				m_freem(m0);
7812 				continue;
7813 			}
7814 			/* Short on resources, just stop for now. */
7815 			DPRINTF(WM_DEBUG_TX,
7816 			    ("%s: TX: dmamap load failed: %d\n",
7817 				device_xname(sc->sc_dev), error));
7818 			break;
7819 		}
7820 
7821 		segs_needed = dmamap->dm_nsegs;
7822 		if (use_tso) {
7823 			/* For sentinel descriptor; see below. */
7824 			segs_needed++;
7825 		}
7826 
7827 		/*
7828 		 * Ensure we have enough descriptors free to describe
7829 		 * the packet. Note, we always reserve one descriptor
7830 		 * at the end of the ring due to the semantics of the
7831 		 * TDT register, plus one more in the event we need
7832 		 * to load offload context.
7833 		 */
7834 		if (segs_needed > txq->txq_free - 2) {
7835 			/*
7836 			 * Not enough free descriptors to transmit this
7837 			 * packet.  We haven't committed anything yet,
7838 			 * so just unload the DMA map, put the packet
7839 			 * pack on the queue, and punt. Notify the upper
7840 			 * layer that there are no more slots left.
7841 			 */
7842 			DPRINTF(WM_DEBUG_TX,
7843 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
7844 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
7845 				segs_needed, txq->txq_free - 1));
7846 			txq->txq_flags |= WM_TXQ_NO_SPACE;
7847 			bus_dmamap_unload(sc->sc_dmat, dmamap);
7848 			WM_Q_EVCNT_INCR(txq, txdstall);
7849 			break;
7850 		}
7851 
7852 		/*
7853 		 * Check for 82547 Tx FIFO bug. We need to do this
7854 		 * once we know we can transmit the packet, since we
7855 		 * do some internal FIFO space accounting here.
7856 		 */
7857 		if (sc->sc_type == WM_T_82547 &&
7858 		    wm_82547_txfifo_bugchk(sc, m0)) {
7859 			DPRINTF(WM_DEBUG_TX,
7860 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
7861 				device_xname(sc->sc_dev)));
7862 			txq->txq_flags |= WM_TXQ_NO_SPACE;
7863 			bus_dmamap_unload(sc->sc_dmat, dmamap);
7864 			WM_Q_EVCNT_INCR(txq, fifo_stall);
7865 			break;
7866 		}
7867 
7868 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
7869 
7870 		DPRINTF(WM_DEBUG_TX,
7871 		    ("%s: TX: packet has %d (%d) DMA segments\n",
7872 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
7873 
7874 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
7875 
7876 		/*
7877 		 * Store a pointer to the packet so that we can free it
7878 		 * later.
7879 		 *
7880 		 * Initially, we consider the number of descriptors the
7881 		 * packet uses the number of DMA segments.  This may be
7882 		 * incremented by 1 if we do checksum offload (a descriptor
7883 		 * is used to set the checksum context).
7884 		 */
7885 		txs->txs_mbuf = m0;
7886 		txs->txs_firstdesc = txq->txq_next;
7887 		txs->txs_ndesc = segs_needed;
7888 
7889 		/* Set up offload parameters for this packet. */
7890 		if (m0->m_pkthdr.csum_flags &
7891 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
7892 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
7893 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
7894 			wm_tx_offload(sc, txq, txs, &cksumcmd, &cksumfields);
7895 		} else {
7896  			txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
7897  			txq->txq_last_hw_ipcs = txq->txq_last_hw_tucs = 0;
7898 			cksumcmd = 0;
7899 			cksumfields = 0;
7900 		}
7901 
7902 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
7903 
7904 		/* Sync the DMA map. */
7905 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
7906 		    BUS_DMASYNC_PREWRITE);
7907 
7908 		/* Initialize the transmit descriptor. */
7909 		for (nexttx = txq->txq_next, seg = 0;
7910 		     seg < dmamap->dm_nsegs; seg++) {
7911 			for (seglen = dmamap->dm_segs[seg].ds_len,
7912 			     curaddr = dmamap->dm_segs[seg].ds_addr;
7913 			     seglen != 0;
7914 			     curaddr += curlen, seglen -= curlen,
7915 			     nexttx = WM_NEXTTX(txq, nexttx)) {
7916 				curlen = seglen;
7917 
7918 				/*
7919 				 * So says the Linux driver:
7920 				 * Work around for premature descriptor
7921 				 * write-backs in TSO mode.  Append a
7922 				 * 4-byte sentinel descriptor.
7923 				 */
7924 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
7925 				    curlen > 8)
7926 					curlen -= 4;
7927 
7928 				wm_set_dma_addr(
7929 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
7930 				txq->txq_descs[nexttx].wtx_cmdlen
7931 				    = htole32(cksumcmd | curlen);
7932 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
7933 				    = 0;
7934 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
7935 				    = cksumfields;
7936 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
7937 				lasttx = nexttx;
7938 
7939 				DPRINTF(WM_DEBUG_TX,
7940 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
7941 					"len %#04zx\n",
7942 					device_xname(sc->sc_dev), nexttx,
7943 					(uint64_t)curaddr, curlen));
7944 			}
7945 		}
7946 
7947 		KASSERT(lasttx != -1);
7948 
7949 		/*
7950 		 * Set up the command byte on the last descriptor of
7951 		 * the packet. If we're in the interrupt delay window,
7952 		 * delay the interrupt.
7953 		 */
7954 		txq->txq_descs[lasttx].wtx_cmdlen |=
7955 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
7956 
7957 		/*
7958 		 * If VLANs are enabled and the packet has a VLAN tag, set
7959 		 * up the descriptor to encapsulate the packet for us.
7960 		 *
7961 		 * This is only valid on the last descriptor of the packet.
7962 		 */
7963 		if (vlan_has_tag(m0)) {
7964 			txq->txq_descs[lasttx].wtx_cmdlen |=
7965 			    htole32(WTX_CMD_VLE);
7966 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
7967 			    = htole16(vlan_get_tag(m0));
7968 		}
7969 
7970 		txs->txs_lastdesc = lasttx;
7971 
7972 		DPRINTF(WM_DEBUG_TX,
7973 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
7974 			device_xname(sc->sc_dev),
7975 			lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
7976 
7977 		/* Sync the descriptors we're using. */
7978 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
7979 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
7980 
7981 		/* Give the packet to the chip. */
7982 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
7983 
7984 		DPRINTF(WM_DEBUG_TX,
7985 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
7986 
7987 		DPRINTF(WM_DEBUG_TX,
7988 		    ("%s: TX: finished transmitting packet, job %d\n",
7989 			device_xname(sc->sc_dev), txq->txq_snext));
7990 
7991 		/* Advance the tx pointer. */
7992 		txq->txq_free -= txs->txs_ndesc;
7993 		txq->txq_next = nexttx;
7994 
7995 		txq->txq_sfree--;
7996 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
7997 
7998 		/* Pass the packet to any BPF listeners. */
7999 		bpf_mtap(ifp, m0, BPF_D_OUT);
8000 	}
8001 
8002 	if (m0 != NULL) {
8003 		txq->txq_flags |= WM_TXQ_NO_SPACE;
8004 		WM_Q_EVCNT_INCR(txq, descdrop);
8005 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
8006 			__func__));
8007 		m_freem(m0);
8008 	}
8009 
8010 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
8011 		/* No more slots; notify upper layer. */
8012 		txq->txq_flags |= WM_TXQ_NO_SPACE;
8013 	}
8014 
8015 	if (txq->txq_free != ofree) {
8016 		/* Set a watchdog timer in case the chip flakes out. */
8017 		txq->txq_lastsent = time_uptime;
8018 		txq->txq_sending = true;
8019 	}
8020 }
8021 
8022 /*
8023  * wm_nq_tx_offload:
8024  *
8025  *	Set up TCP/IP checksumming parameters for the
8026  *	specified packet, for NEWQUEUE devices
8027  */
8028 static void
8029 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
8030     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
8031 {
8032 	struct mbuf *m0 = txs->txs_mbuf;
8033 	uint32_t vl_len, mssidx, cmdc;
8034 	struct ether_header *eh;
8035 	int offset, iphl;
8036 
8037 	/*
8038 	 * XXX It would be nice if the mbuf pkthdr had offset
8039 	 * fields for the protocol headers.
8040 	 */
8041 	*cmdlenp = 0;
8042 	*fieldsp = 0;
8043 
8044 	eh = mtod(m0, struct ether_header *);
8045 	switch (htons(eh->ether_type)) {
8046 	case ETHERTYPE_IP:
8047 	case ETHERTYPE_IPV6:
8048 		offset = ETHER_HDR_LEN;
8049 		break;
8050 
8051 	case ETHERTYPE_VLAN:
8052 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
8053 		break;
8054 
8055 	default:
8056 		/* Don't support this protocol or encapsulation. */
8057 		*do_csum = false;
8058 		return;
8059 	}
8060 	*do_csum = true;
8061 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
8062 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
8063 
8064 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
8065 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
8066 
8067 	if ((m0->m_pkthdr.csum_flags &
8068 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
8069 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
8070 	} else {
8071 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
8072 	}
8073 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
8074 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
8075 
8076 	if (vlan_has_tag(m0)) {
8077 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
8078 		    << NQTXC_VLLEN_VLAN_SHIFT);
8079 		*cmdlenp |= NQTX_CMD_VLE;
8080 	}
8081 
8082 	mssidx = 0;
8083 
8084 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
8085 		int hlen = offset + iphl;
8086 		int tcp_hlen;
8087 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
8088 
8089 		if (__predict_false(m0->m_len <
8090 				    (hlen + sizeof(struct tcphdr)))) {
8091 			/*
8092 			 * TCP/IP headers are not in the first mbuf; we need
8093 			 * to do this the slow and painful way. Let's just
8094 			 * hope this doesn't happen very often.
8095 			 */
8096 			struct tcphdr th;
8097 
8098 			WM_Q_EVCNT_INCR(txq, tsopain);
8099 
8100 			m_copydata(m0, hlen, sizeof(th), &th);
8101 			if (v4) {
8102 				struct ip ip;
8103 
8104 				m_copydata(m0, offset, sizeof(ip), &ip);
8105 				ip.ip_len = 0;
8106 				m_copyback(m0,
8107 				    offset + offsetof(struct ip, ip_len),
8108 				    sizeof(ip.ip_len), &ip.ip_len);
8109 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
8110 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
8111 			} else {
8112 				struct ip6_hdr ip6;
8113 
8114 				m_copydata(m0, offset, sizeof(ip6), &ip6);
8115 				ip6.ip6_plen = 0;
8116 				m_copyback(m0,
8117 				    offset + offsetof(struct ip6_hdr, ip6_plen),
8118 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
8119 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
8120 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
8121 			}
8122 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
8123 			    sizeof(th.th_sum), &th.th_sum);
8124 
8125 			tcp_hlen = th.th_off << 2;
8126 		} else {
8127 			/*
8128 			 * TCP/IP headers are in the first mbuf; we can do
8129 			 * this the easy way.
8130 			 */
8131 			struct tcphdr *th;
8132 
8133 			if (v4) {
8134 				struct ip *ip =
8135 				    (void *)(mtod(m0, char *) + offset);
8136 				th = (void *)(mtod(m0, char *) + hlen);
8137 
8138 				ip->ip_len = 0;
8139 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
8140 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
8141 			} else {
8142 				struct ip6_hdr *ip6 =
8143 				    (void *)(mtod(m0, char *) + offset);
8144 				th = (void *)(mtod(m0, char *) + hlen);
8145 
8146 				ip6->ip6_plen = 0;
8147 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
8148 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
8149 			}
8150 			tcp_hlen = th->th_off << 2;
8151 		}
8152 		hlen += tcp_hlen;
8153 		*cmdlenp |= NQTX_CMD_TSE;
8154 
8155 		if (v4) {
8156 			WM_Q_EVCNT_INCR(txq, tso);
8157 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
8158 		} else {
8159 			WM_Q_EVCNT_INCR(txq, tso6);
8160 			*fieldsp |= NQTXD_FIELDS_TUXSM;
8161 		}
8162 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
8163 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
8164 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
8165 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
8166 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
8167 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
8168 	} else {
8169 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
8170 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
8171 	}
8172 
8173 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
8174 		*fieldsp |= NQTXD_FIELDS_IXSM;
8175 		cmdc |= NQTXC_CMD_IP4;
8176 	}
8177 
8178 	if (m0->m_pkthdr.csum_flags &
8179 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
8180 		WM_Q_EVCNT_INCR(txq, tusum);
8181 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
8182 			cmdc |= NQTXC_CMD_TCP;
8183 		else
8184 			cmdc |= NQTXC_CMD_UDP;
8185 
8186 		cmdc |= NQTXC_CMD_IP4;
8187 		*fieldsp |= NQTXD_FIELDS_TUXSM;
8188 	}
8189 	if (m0->m_pkthdr.csum_flags &
8190 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
8191 		WM_Q_EVCNT_INCR(txq, tusum6);
8192 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
8193 			cmdc |= NQTXC_CMD_TCP;
8194 		else
8195 			cmdc |= NQTXC_CMD_UDP;
8196 
8197 		cmdc |= NQTXC_CMD_IP6;
8198 		*fieldsp |= NQTXD_FIELDS_TUXSM;
8199 	}
8200 
8201 	/*
8202 	 * We don't have to write context descriptor for every packet to
8203 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
8204 	 * I210 and I211. It is enough to write once per a Tx queue for these
8205 	 * controllers.
8206 	 * It would be overhead to write context descriptor for every packet,
8207 	 * however it does not cause problems.
8208 	 */
8209 	/* Fill in the context descriptor. */
8210 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
8211 	    htole32(vl_len);
8212 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
8213 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
8214 	    htole32(cmdc);
8215 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
8216 	    htole32(mssidx);
8217 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
8218 	DPRINTF(WM_DEBUG_TX,
8219 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
8220 		txq->txq_next, 0, vl_len));
8221 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
8222 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
8223 	txs->txs_ndesc++;
8224 }
8225 
8226 /*
8227  * wm_nq_start:		[ifnet interface function]
8228  *
8229  *	Start packet transmission on the interface for NEWQUEUE devices
8230  */
8231 static void
8232 wm_nq_start(struct ifnet *ifp)
8233 {
8234 	struct wm_softc *sc = ifp->if_softc;
8235 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
8236 
8237 #ifdef WM_MPSAFE
8238 	KASSERT(if_is_mpsafe(ifp));
8239 #endif
8240 	/*
8241 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
8242 	 */
8243 
8244 	mutex_enter(txq->txq_lock);
8245 	if (!txq->txq_stopping)
8246 		wm_nq_start_locked(ifp);
8247 	mutex_exit(txq->txq_lock);
8248 }
8249 
8250 static void
8251 wm_nq_start_locked(struct ifnet *ifp)
8252 {
8253 	struct wm_softc *sc = ifp->if_softc;
8254 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
8255 
8256 	wm_nq_send_common_locked(ifp, txq, false);
8257 }
8258 
8259 static int
8260 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
8261 {
8262 	int qid;
8263 	struct wm_softc *sc = ifp->if_softc;
8264 	struct wm_txqueue *txq;
8265 
8266 	qid = wm_select_txqueue(ifp, m);
8267 	txq = &sc->sc_queue[qid].wmq_txq;
8268 
8269 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
8270 		m_freem(m);
8271 		WM_Q_EVCNT_INCR(txq, pcqdrop);
8272 		return ENOBUFS;
8273 	}
8274 
8275 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
8276 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
8277 	if (m->m_flags & M_MCAST)
8278 		if_statinc_ref(nsr, if_omcasts);
8279 	IF_STAT_PUTREF(ifp);
8280 
8281 	/*
8282 	 * The situations which this mutex_tryenter() fails at running time
8283 	 * are below two patterns.
8284 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
8285 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
8286 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
8287 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
8288 	 * In the case of (2), the last packet enqueued to txq->txq_interq is
8289 	 * also dequeued by wm_deferred_start_locked(). So, it does not get
8290 	 * stuck, either.
8291 	 */
8292 	if (mutex_tryenter(txq->txq_lock)) {
8293 		if (!txq->txq_stopping)
8294 			wm_nq_transmit_locked(ifp, txq);
8295 		mutex_exit(txq->txq_lock);
8296 	}
8297 
8298 	return 0;
8299 }
8300 
8301 static void
8302 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
8303 {
8304 
8305 	wm_nq_send_common_locked(ifp, txq, true);
8306 }
8307 
8308 static void
8309 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
8310     bool is_transmit)
8311 {
8312 	struct wm_softc *sc = ifp->if_softc;
8313 	struct mbuf *m0;
8314 	struct wm_txsoft *txs;
8315 	bus_dmamap_t dmamap;
8316 	int error, nexttx, lasttx = -1, seg, segs_needed;
8317 	bool do_csum, sent;
8318 	bool remap = true;
8319 
8320 	KASSERT(mutex_owned(txq->txq_lock));
8321 
8322 	if ((ifp->if_flags & IFF_RUNNING) == 0)
8323 		return;
8324 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
8325 		return;
8326 
8327 	sent = false;
8328 
8329 	/*
8330 	 * Loop through the send queue, setting up transmit descriptors
8331 	 * until we drain the queue, or use up all available transmit
8332 	 * descriptors.
8333 	 */
8334 	for (;;) {
8335 		m0 = NULL;
8336 
8337 		/* Get a work queue entry. */
8338 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
8339 			wm_txeof(txq, UINT_MAX);
8340 			if (txq->txq_sfree == 0) {
8341 				DPRINTF(WM_DEBUG_TX,
8342 				    ("%s: TX: no free job descriptors\n",
8343 					device_xname(sc->sc_dev)));
8344 				WM_Q_EVCNT_INCR(txq, txsstall);
8345 				break;
8346 			}
8347 		}
8348 
8349 		/* Grab a packet off the queue. */
8350 		if (is_transmit)
8351 			m0 = pcq_get(txq->txq_interq);
8352 		else
8353 			IFQ_DEQUEUE(&ifp->if_snd, m0);
8354 		if (m0 == NULL)
8355 			break;
8356 
8357 		DPRINTF(WM_DEBUG_TX,
8358 		    ("%s: TX: have packet to transmit: %p\n",
8359 		    device_xname(sc->sc_dev), m0));
8360 
8361 		txs = &txq->txq_soft[txq->txq_snext];
8362 		dmamap = txs->txs_dmamap;
8363 
8364 		/*
8365 		 * Load the DMA map.  If this fails, the packet either
8366 		 * didn't fit in the allotted number of segments, or we
8367 		 * were short on resources.  For the too-many-segments
8368 		 * case, we simply report an error and drop the packet,
8369 		 * since we can't sanely copy a jumbo packet to a single
8370 		 * buffer.
8371 		 */
8372 retry:
8373 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
8374 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
8375 		if (__predict_false(error)) {
8376 			if (error == EFBIG) {
8377 				if (remap == true) {
8378 					struct mbuf *m;
8379 
8380 					remap = false;
8381 					m = m_defrag(m0, M_NOWAIT);
8382 					if (m != NULL) {
8383 						WM_Q_EVCNT_INCR(txq, defrag);
8384 						m0 = m;
8385 						goto retry;
8386 					}
8387 				}
8388 				WM_Q_EVCNT_INCR(txq, toomanyseg);
8389 				log(LOG_ERR, "%s: Tx packet consumes too many "
8390 				    "DMA segments, dropping...\n",
8391 				    device_xname(sc->sc_dev));
8392 				wm_dump_mbuf_chain(sc, m0);
8393 				m_freem(m0);
8394 				continue;
8395 			}
8396 			/* Short on resources, just stop for now. */
8397 			DPRINTF(WM_DEBUG_TX,
8398 			    ("%s: TX: dmamap load failed: %d\n",
8399 				device_xname(sc->sc_dev), error));
8400 			break;
8401 		}
8402 
8403 		segs_needed = dmamap->dm_nsegs;
8404 
8405 		/*
8406 		 * Ensure we have enough descriptors free to describe
8407 		 * the packet. Note, we always reserve one descriptor
8408 		 * at the end of the ring due to the semantics of the
8409 		 * TDT register, plus one more in the event we need
8410 		 * to load offload context.
8411 		 */
8412 		if (segs_needed > txq->txq_free - 2) {
8413 			/*
8414 			 * Not enough free descriptors to transmit this
8415 			 * packet.  We haven't committed anything yet,
8416 			 * so just unload the DMA map, put the packet
8417 			 * pack on the queue, and punt. Notify the upper
8418 			 * layer that there are no more slots left.
8419 			 */
8420 			DPRINTF(WM_DEBUG_TX,
8421 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
8422 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
8423 				segs_needed, txq->txq_free - 1));
8424 			txq->txq_flags |= WM_TXQ_NO_SPACE;
8425 			bus_dmamap_unload(sc->sc_dmat, dmamap);
8426 			WM_Q_EVCNT_INCR(txq, txdstall);
8427 			break;
8428 		}
8429 
8430 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
8431 
8432 		DPRINTF(WM_DEBUG_TX,
8433 		    ("%s: TX: packet has %d (%d) DMA segments\n",
8434 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
8435 
8436 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
8437 
8438 		/*
8439 		 * Store a pointer to the packet so that we can free it
8440 		 * later.
8441 		 *
8442 		 * Initially, we consider the number of descriptors the
8443 		 * packet uses the number of DMA segments.  This may be
8444 		 * incremented by 1 if we do checksum offload (a descriptor
8445 		 * is used to set the checksum context).
8446 		 */
8447 		txs->txs_mbuf = m0;
8448 		txs->txs_firstdesc = txq->txq_next;
8449 		txs->txs_ndesc = segs_needed;
8450 
8451 		/* Set up offload parameters for this packet. */
8452 		uint32_t cmdlen, fields, dcmdlen;
8453 		if (m0->m_pkthdr.csum_flags &
8454 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
8455 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
8456 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
8457 			wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
8458 			    &do_csum);
8459 		} else {
8460 			do_csum = false;
8461 			cmdlen = 0;
8462 			fields = 0;
8463 		}
8464 
8465 		/* Sync the DMA map. */
8466 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
8467 		    BUS_DMASYNC_PREWRITE);
8468 
8469 		/* Initialize the first transmit descriptor. */
8470 		nexttx = txq->txq_next;
8471 		if (!do_csum) {
8472 			/* Setup a legacy descriptor */
8473 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
8474 			    dmamap->dm_segs[0].ds_addr);
8475 			txq->txq_descs[nexttx].wtx_cmdlen =
8476 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
8477 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
8478 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
8479 			if (vlan_has_tag(m0)) {
8480 				txq->txq_descs[nexttx].wtx_cmdlen |=
8481 				    htole32(WTX_CMD_VLE);
8482 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
8483 				    htole16(vlan_get_tag(m0));
8484 			} else
8485 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
8486 
8487 			dcmdlen = 0;
8488 		} else {
8489 			/* Setup an advanced data descriptor */
8490 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
8491 			    htole64(dmamap->dm_segs[0].ds_addr);
8492 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
8493 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
8494 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen);
8495 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
8496 			    htole32(fields);
8497 			DPRINTF(WM_DEBUG_TX,
8498 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
8499 				device_xname(sc->sc_dev), nexttx,
8500 				(uint64_t)dmamap->dm_segs[0].ds_addr));
8501 			DPRINTF(WM_DEBUG_TX,
8502 			    ("\t 0x%08x%08x\n", fields,
8503 				(uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
8504 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
8505 		}
8506 
8507 		lasttx = nexttx;
8508 		nexttx = WM_NEXTTX(txq, nexttx);
8509 		/*
8510 		 * Fill in the next descriptors. legacy or advanced format
8511 		 * is the same here
8512 		 */
8513 		for (seg = 1; seg < dmamap->dm_nsegs;
8514 		     seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
8515 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
8516 			    htole64(dmamap->dm_segs[seg].ds_addr);
8517 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
8518 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
8519 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
8520 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
8521 			lasttx = nexttx;
8522 
8523 			DPRINTF(WM_DEBUG_TX,
8524 			    ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
8525 				device_xname(sc->sc_dev), nexttx,
8526 				(uint64_t)dmamap->dm_segs[seg].ds_addr,
8527 				dmamap->dm_segs[seg].ds_len));
8528 		}
8529 
8530 		KASSERT(lasttx != -1);
8531 
8532 		/*
8533 		 * Set up the command byte on the last descriptor of
8534 		 * the packet. If we're in the interrupt delay window,
8535 		 * delay the interrupt.
8536 		 */
8537 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
8538 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
8539 		txq->txq_descs[lasttx].wtx_cmdlen |=
8540 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
8541 
8542 		txs->txs_lastdesc = lasttx;
8543 
8544 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
8545 		    device_xname(sc->sc_dev),
8546 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
8547 
8548 		/* Sync the descriptors we're using. */
8549 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
8550 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
8551 
8552 		/* Give the packet to the chip. */
8553 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
8554 		sent = true;
8555 
8556 		DPRINTF(WM_DEBUG_TX,
8557 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
8558 
8559 		DPRINTF(WM_DEBUG_TX,
8560 		    ("%s: TX: finished transmitting packet, job %d\n",
8561 			device_xname(sc->sc_dev), txq->txq_snext));
8562 
8563 		/* Advance the tx pointer. */
8564 		txq->txq_free -= txs->txs_ndesc;
8565 		txq->txq_next = nexttx;
8566 
8567 		txq->txq_sfree--;
8568 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
8569 
8570 		/* Pass the packet to any BPF listeners. */
8571 		bpf_mtap(ifp, m0, BPF_D_OUT);
8572 	}
8573 
8574 	if (m0 != NULL) {
8575 		txq->txq_flags |= WM_TXQ_NO_SPACE;
8576 		WM_Q_EVCNT_INCR(txq, descdrop);
8577 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
8578 			__func__));
8579 		m_freem(m0);
8580 	}
8581 
8582 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
8583 		/* No more slots; notify upper layer. */
8584 		txq->txq_flags |= WM_TXQ_NO_SPACE;
8585 	}
8586 
8587 	if (sent) {
8588 		/* Set a watchdog timer in case the chip flakes out. */
8589 		txq->txq_lastsent = time_uptime;
8590 		txq->txq_sending = true;
8591 	}
8592 }
8593 
8594 static void
8595 wm_deferred_start_locked(struct wm_txqueue *txq)
8596 {
8597 	struct wm_softc *sc = txq->txq_sc;
8598 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
8599 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
8600 	int qid = wmq->wmq_id;
8601 
8602 	KASSERT(mutex_owned(txq->txq_lock));
8603 
8604 	if (txq->txq_stopping) {
8605 		mutex_exit(txq->txq_lock);
8606 		return;
8607 	}
8608 
8609 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
8610 		/* XXX need for ALTQ or one CPU system */
8611 		if (qid == 0)
8612 			wm_nq_start_locked(ifp);
8613 		wm_nq_transmit_locked(ifp, txq);
8614 	} else {
8615 		/* XXX need for ALTQ or one CPU system */
8616 		if (qid == 0)
8617 			wm_start_locked(ifp);
8618 		wm_transmit_locked(ifp, txq);
8619 	}
8620 }
8621 
8622 /* Interrupt */
8623 
8624 /*
8625  * wm_txeof:
8626  *
8627  *	Helper; handle transmit interrupts.
8628  */
8629 static bool
8630 wm_txeof(struct wm_txqueue *txq, u_int limit)
8631 {
8632 	struct wm_softc *sc = txq->txq_sc;
8633 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
8634 	struct wm_txsoft *txs;
8635 	int count = 0;
8636 	int i;
8637 	uint8_t status;
8638 	bool more = false;
8639 
8640 	KASSERT(mutex_owned(txq->txq_lock));
8641 
8642 	if (txq->txq_stopping)
8643 		return false;
8644 
8645 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
8646 
8647 	/*
8648 	 * Go through the Tx list and free mbufs for those
8649 	 * frames which have been transmitted.
8650 	 */
8651 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
8652 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
8653 		if (limit-- == 0) {
8654 			more = true;
8655 			DPRINTF(WM_DEBUG_TX,
8656 			    ("%s: TX: loop limited, job %d is not processed\n",
8657 				device_xname(sc->sc_dev), i));
8658 			break;
8659 		}
8660 
8661 		txs = &txq->txq_soft[i];
8662 
8663 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
8664 			device_xname(sc->sc_dev), i));
8665 
8666 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
8667 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
8668 
8669 		status =
8670 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
8671 		if ((status & WTX_ST_DD) == 0) {
8672 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
8673 			    BUS_DMASYNC_PREREAD);
8674 			break;
8675 		}
8676 
8677 		count++;
8678 		DPRINTF(WM_DEBUG_TX,
8679 		    ("%s: TX: job %d done: descs %d..%d\n",
8680 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
8681 		    txs->txs_lastdesc));
8682 
8683 		/*
8684 		 * XXX We should probably be using the statistics
8685 		 * XXX registers, but I don't know if they exist
8686 		 * XXX on chips before the i82544.
8687 		 */
8688 
8689 #ifdef WM_EVENT_COUNTERS
8690 		if (status & WTX_ST_TU)
8691 			WM_Q_EVCNT_INCR(txq, underrun);
8692 #endif /* WM_EVENT_COUNTERS */
8693 
8694 		/*
8695 		 * 82574 and newer's document says the status field has neither
8696 		 * EC (Excessive Collision) bit nor LC (Late Collision) bit
8697 		 * (reserved). Refer "PCIe GbE Controller Open Source Software
8698 		 * Developer's Manual", 82574 datasheet and newer.
8699 		 *
8700 		 * XXX I saw the LC bit was set on I218 even though the media
8701 		 * was full duplex, so the bit might be used for other
8702 		 * meaning ...(I have no document).
8703 		 */
8704 
8705 		if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
8706 		    && ((sc->sc_type < WM_T_82574)
8707 			|| (sc->sc_type == WM_T_80003))) {
8708 			if_statinc(ifp, if_oerrors);
8709 			if (status & WTX_ST_LC)
8710 				log(LOG_WARNING, "%s: late collision\n",
8711 				    device_xname(sc->sc_dev));
8712 			else if (status & WTX_ST_EC) {
8713 				if_statadd(ifp, if_collisions,
8714 				    TX_COLLISION_THRESHOLD + 1);
8715 				log(LOG_WARNING, "%s: excessive collisions\n",
8716 				    device_xname(sc->sc_dev));
8717 			}
8718 		} else
8719 			if_statinc(ifp, if_opackets);
8720 
8721 		txq->txq_packets++;
8722 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
8723 
8724 		txq->txq_free += txs->txs_ndesc;
8725 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
8726 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
8727 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
8728 		m_freem(txs->txs_mbuf);
8729 		txs->txs_mbuf = NULL;
8730 	}
8731 
8732 	/* Update the dirty transmit buffer pointer. */
8733 	txq->txq_sdirty = i;
8734 	DPRINTF(WM_DEBUG_TX,
8735 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
8736 
8737 	if (count != 0)
8738 		rnd_add_uint32(&sc->rnd_source, count);
8739 
8740 	/*
8741 	 * If there are no more pending transmissions, cancel the watchdog
8742 	 * timer.
8743 	 */
8744 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
8745 		txq->txq_sending = false;
8746 
8747 	return more;
8748 }
8749 
8750 static inline uint32_t
8751 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
8752 {
8753 	struct wm_softc *sc = rxq->rxq_sc;
8754 
8755 	if (sc->sc_type == WM_T_82574)
8756 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
8757 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
8758 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
8759 	else
8760 		return rxq->rxq_descs[idx].wrx_status;
8761 }
8762 
8763 static inline uint32_t
8764 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
8765 {
8766 	struct wm_softc *sc = rxq->rxq_sc;
8767 
8768 	if (sc->sc_type == WM_T_82574)
8769 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
8770 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
8771 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
8772 	else
8773 		return rxq->rxq_descs[idx].wrx_errors;
8774 }
8775 
8776 static inline uint16_t
8777 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
8778 {
8779 	struct wm_softc *sc = rxq->rxq_sc;
8780 
8781 	if (sc->sc_type == WM_T_82574)
8782 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
8783 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
8784 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
8785 	else
8786 		return rxq->rxq_descs[idx].wrx_special;
8787 }
8788 
8789 static inline int
8790 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
8791 {
8792 	struct wm_softc *sc = rxq->rxq_sc;
8793 
8794 	if (sc->sc_type == WM_T_82574)
8795 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
8796 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
8797 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
8798 	else
8799 		return rxq->rxq_descs[idx].wrx_len;
8800 }
8801 
8802 #ifdef WM_DEBUG
8803 static inline uint32_t
8804 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
8805 {
8806 	struct wm_softc *sc = rxq->rxq_sc;
8807 
8808 	if (sc->sc_type == WM_T_82574)
8809 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
8810 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
8811 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
8812 	else
8813 		return 0;
8814 }
8815 
8816 static inline uint8_t
8817 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
8818 {
8819 	struct wm_softc *sc = rxq->rxq_sc;
8820 
8821 	if (sc->sc_type == WM_T_82574)
8822 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
8823 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
8824 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
8825 	else
8826 		return 0;
8827 }
8828 #endif /* WM_DEBUG */
8829 
8830 static inline bool
8831 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
8832     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
8833 {
8834 
8835 	if (sc->sc_type == WM_T_82574)
8836 		return (status & ext_bit) != 0;
8837 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
8838 		return (status & nq_bit) != 0;
8839 	else
8840 		return (status & legacy_bit) != 0;
8841 }
8842 
8843 static inline bool
8844 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
8845     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
8846 {
8847 
8848 	if (sc->sc_type == WM_T_82574)
8849 		return (error & ext_bit) != 0;
8850 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
8851 		return (error & nq_bit) != 0;
8852 	else
8853 		return (error & legacy_bit) != 0;
8854 }
8855 
8856 static inline bool
8857 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
8858 {
8859 
8860 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
8861 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
8862 		return true;
8863 	else
8864 		return false;
8865 }
8866 
8867 static inline bool
8868 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
8869 {
8870 	struct wm_softc *sc = rxq->rxq_sc;
8871 
8872 	/* XXX missing error bit for newqueue? */
8873 	if (wm_rxdesc_is_set_error(sc, errors,
8874 		WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
8875 		EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
8876 		| EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
8877 		NQRXC_ERROR_RXE)) {
8878 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
8879 		    EXTRXC_ERROR_SE, 0))
8880 			log(LOG_WARNING, "%s: symbol error\n",
8881 			    device_xname(sc->sc_dev));
8882 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
8883 		    EXTRXC_ERROR_SEQ, 0))
8884 			log(LOG_WARNING, "%s: receive sequence error\n",
8885 			    device_xname(sc->sc_dev));
8886 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
8887 		    EXTRXC_ERROR_CE, 0))
8888 			log(LOG_WARNING, "%s: CRC error\n",
8889 			    device_xname(sc->sc_dev));
8890 		return true;
8891 	}
8892 
8893 	return false;
8894 }
8895 
8896 static inline bool
8897 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
8898 {
8899 	struct wm_softc *sc = rxq->rxq_sc;
8900 
8901 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
8902 		NQRXC_STATUS_DD)) {
8903 		/* We have processed all of the receive descriptors. */
8904 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
8905 		return false;
8906 	}
8907 
8908 	return true;
8909 }
8910 
8911 static inline bool
8912 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
8913     uint16_t vlantag, struct mbuf *m)
8914 {
8915 
8916 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
8917 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
8918 		vlan_set_tag(m, le16toh(vlantag));
8919 	}
8920 
8921 	return true;
8922 }
8923 
8924 static inline void
8925 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
8926     uint32_t errors, struct mbuf *m)
8927 {
8928 	struct wm_softc *sc = rxq->rxq_sc;
8929 
8930 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
8931 		if (wm_rxdesc_is_set_status(sc, status,
8932 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
8933 			WM_Q_EVCNT_INCR(rxq, ipsum);
8934 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
8935 			if (wm_rxdesc_is_set_error(sc, errors,
8936 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
8937 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
8938 		}
8939 		if (wm_rxdesc_is_set_status(sc, status,
8940 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
8941 			/*
8942 			 * Note: we don't know if this was TCP or UDP,
8943 			 * so we just set both bits, and expect the
8944 			 * upper layers to deal.
8945 			 */
8946 			WM_Q_EVCNT_INCR(rxq, tusum);
8947 			m->m_pkthdr.csum_flags |=
8948 			    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
8949 			    M_CSUM_TCPv6 | M_CSUM_UDPv6;
8950 			if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
8951 			    EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
8952 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
8953 		}
8954 	}
8955 }
8956 
8957 /*
8958  * wm_rxeof:
8959  *
8960  *	Helper; handle receive interrupts.
8961  */
8962 static bool
8963 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
8964 {
8965 	struct wm_softc *sc = rxq->rxq_sc;
8966 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
8967 	struct wm_rxsoft *rxs;
8968 	struct mbuf *m;
8969 	int i, len;
8970 	int count = 0;
8971 	uint32_t status, errors;
8972 	uint16_t vlantag;
8973 	bool more = false;
8974 
8975 	KASSERT(mutex_owned(rxq->rxq_lock));
8976 
8977 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
8978 		if (limit-- == 0) {
8979 			rxq->rxq_ptr = i;
8980 			more = true;
8981 			DPRINTF(WM_DEBUG_RX,
8982 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
8983 				device_xname(sc->sc_dev), i));
8984 			break;
8985 		}
8986 
8987 		rxs = &rxq->rxq_soft[i];
8988 
8989 		DPRINTF(WM_DEBUG_RX,
8990 		    ("%s: RX: checking descriptor %d\n",
8991 			device_xname(sc->sc_dev), i));
8992 		wm_cdrxsync(rxq, i,
8993 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
8994 
8995 		status = wm_rxdesc_get_status(rxq, i);
8996 		errors = wm_rxdesc_get_errors(rxq, i);
8997 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
8998 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
8999 #ifdef WM_DEBUG
9000 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
9001 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
9002 #endif
9003 
9004 		if (!wm_rxdesc_dd(rxq, i, status)) {
9005 			/*
9006 			 * Update the receive pointer holding rxq_lock
9007 			 * consistent with increment counter.
9008 			 */
9009 			rxq->rxq_ptr = i;
9010 			break;
9011 		}
9012 
9013 		count++;
9014 		if (__predict_false(rxq->rxq_discard)) {
9015 			DPRINTF(WM_DEBUG_RX,
9016 			    ("%s: RX: discarding contents of descriptor %d\n",
9017 				device_xname(sc->sc_dev), i));
9018 			wm_init_rxdesc(rxq, i);
9019 			if (wm_rxdesc_is_eop(rxq, status)) {
9020 				/* Reset our state. */
9021 				DPRINTF(WM_DEBUG_RX,
9022 				    ("%s: RX: resetting rxdiscard -> 0\n",
9023 					device_xname(sc->sc_dev)));
9024 				rxq->rxq_discard = 0;
9025 			}
9026 			continue;
9027 		}
9028 
9029 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
9030 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
9031 
9032 		m = rxs->rxs_mbuf;
9033 
9034 		/*
9035 		 * Add a new receive buffer to the ring, unless of
9036 		 * course the length is zero. Treat the latter as a
9037 		 * failed mapping.
9038 		 */
9039 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
9040 			/*
9041 			 * Failed, throw away what we've done so
9042 			 * far, and discard the rest of the packet.
9043 			 */
9044 			if_statinc(ifp, if_ierrors);
9045 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
9046 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
9047 			wm_init_rxdesc(rxq, i);
9048 			if (!wm_rxdesc_is_eop(rxq, status))
9049 				rxq->rxq_discard = 1;
9050 			if (rxq->rxq_head != NULL)
9051 				m_freem(rxq->rxq_head);
9052 			WM_RXCHAIN_RESET(rxq);
9053 			DPRINTF(WM_DEBUG_RX,
9054 			    ("%s: RX: Rx buffer allocation failed, "
9055 			    "dropping packet%s\n", device_xname(sc->sc_dev),
9056 				rxq->rxq_discard ? " (discard)" : ""));
9057 			continue;
9058 		}
9059 
9060 		m->m_len = len;
9061 		rxq->rxq_len += len;
9062 		DPRINTF(WM_DEBUG_RX,
9063 		    ("%s: RX: buffer at %p len %d\n",
9064 			device_xname(sc->sc_dev), m->m_data, len));
9065 
9066 		/* If this is not the end of the packet, keep looking. */
9067 		if (!wm_rxdesc_is_eop(rxq, status)) {
9068 			WM_RXCHAIN_LINK(rxq, m);
9069 			DPRINTF(WM_DEBUG_RX,
9070 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
9071 				device_xname(sc->sc_dev), rxq->rxq_len));
9072 			continue;
9073 		}
9074 
9075 		/*
9076 		 * Okay, we have the entire packet now. The chip is
9077 		 * configured to include the FCS except I35[05], I21[01].
9078 		 * (not all chips can be configured to strip it), so we need
9079 		 * to trim it. Those chips have an eratta, the RCTL_SECRC bit
9080 		 * in RCTL register is always set, so we don't trim it.
9081 		 * PCH2 and newer chip also not include FCS when jumbo
9082 		 * frame is used to do workaround an errata.
9083 		 * May need to adjust length of previous mbuf in the
9084 		 * chain if the current mbuf is too short.
9085 		 */
9086 		if ((sc->sc_flags & WM_F_CRC_STRIP) == 0) {
9087 			if (m->m_len < ETHER_CRC_LEN) {
9088 				rxq->rxq_tail->m_len
9089 				    -= (ETHER_CRC_LEN - m->m_len);
9090 				m->m_len = 0;
9091 			} else
9092 				m->m_len -= ETHER_CRC_LEN;
9093 			len = rxq->rxq_len - ETHER_CRC_LEN;
9094 		} else
9095 			len = rxq->rxq_len;
9096 
9097 		WM_RXCHAIN_LINK(rxq, m);
9098 
9099 		*rxq->rxq_tailp = NULL;
9100 		m = rxq->rxq_head;
9101 
9102 		WM_RXCHAIN_RESET(rxq);
9103 
9104 		DPRINTF(WM_DEBUG_RX,
9105 		    ("%s: RX: have entire packet, len -> %d\n",
9106 			device_xname(sc->sc_dev), len));
9107 
9108 		/* If an error occurred, update stats and drop the packet. */
9109 		if (wm_rxdesc_has_errors(rxq, errors)) {
9110 			m_freem(m);
9111 			continue;
9112 		}
9113 
9114 		/* No errors.  Receive the packet. */
9115 		m_set_rcvif(m, ifp);
9116 		m->m_pkthdr.len = len;
9117 		/*
9118 		 * TODO
9119 		 * should be save rsshash and rsstype to this mbuf.
9120 		 */
9121 		DPRINTF(WM_DEBUG_RX,
9122 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
9123 			device_xname(sc->sc_dev), rsstype, rsshash));
9124 
9125 		/*
9126 		 * If VLANs are enabled, VLAN packets have been unwrapped
9127 		 * for us.  Associate the tag with the packet.
9128 		 */
9129 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
9130 			continue;
9131 
9132 		/* Set up checksum info for this packet. */
9133 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
9134 		/*
9135 		 * Update the receive pointer holding rxq_lock consistent with
9136 		 * increment counter.
9137 		 */
9138 		rxq->rxq_ptr = i;
9139 		rxq->rxq_packets++;
9140 		rxq->rxq_bytes += len;
9141 		mutex_exit(rxq->rxq_lock);
9142 
9143 		/* Pass it on. */
9144 		if_percpuq_enqueue(sc->sc_ipq, m);
9145 
9146 		mutex_enter(rxq->rxq_lock);
9147 
9148 		if (rxq->rxq_stopping)
9149 			break;
9150 	}
9151 
9152 	if (count != 0)
9153 		rnd_add_uint32(&sc->rnd_source, count);
9154 
9155 	DPRINTF(WM_DEBUG_RX,
9156 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
9157 
9158 	return more;
9159 }
9160 
9161 /*
9162  * wm_linkintr_gmii:
9163  *
9164  *	Helper; handle link interrupts for GMII.
9165  */
9166 static void
9167 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
9168 {
9169 	device_t dev = sc->sc_dev;
9170 	uint32_t status, reg;
9171 	bool link;
9172 	int rv;
9173 
9174 	KASSERT(WM_CORE_LOCKED(sc));
9175 
9176 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(dev),
9177 		__func__));
9178 
9179 	if ((icr & ICR_LSC) == 0) {
9180 		if (icr & ICR_RXSEQ)
9181 			DPRINTF(WM_DEBUG_LINK,
9182 			    ("%s: LINK Receive sequence error\n",
9183 				device_xname(dev)));
9184 		return;
9185 	}
9186 
9187 	/* Link status changed */
9188 	status = CSR_READ(sc, WMREG_STATUS);
9189 	link = status & STATUS_LU;
9190 	if (link) {
9191 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
9192 			device_xname(dev),
9193 			(status & STATUS_FD) ? "FDX" : "HDX"));
9194 	} else {
9195 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
9196 			device_xname(dev)));
9197 	}
9198 	if ((sc->sc_type == WM_T_ICH8) && (link == false))
9199 		wm_gig_downshift_workaround_ich8lan(sc);
9200 
9201 	if ((sc->sc_type == WM_T_ICH8)
9202 	    && (sc->sc_phytype == WMPHY_IGP_3)) {
9203 		wm_kmrn_lock_loss_workaround_ich8lan(sc);
9204 	}
9205 	DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
9206 		device_xname(dev)));
9207 	mii_pollstat(&sc->sc_mii);
9208 	if (sc->sc_type == WM_T_82543) {
9209 		int miistatus, active;
9210 
9211 		/*
9212 		 * With 82543, we need to force speed and
9213 		 * duplex on the MAC equal to what the PHY
9214 		 * speed and duplex configuration is.
9215 		 */
9216 		miistatus = sc->sc_mii.mii_media_status;
9217 
9218 		if (miistatus & IFM_ACTIVE) {
9219 			active = sc->sc_mii.mii_media_active;
9220 			sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
9221 			switch (IFM_SUBTYPE(active)) {
9222 			case IFM_10_T:
9223 				sc->sc_ctrl |= CTRL_SPEED_10;
9224 				break;
9225 			case IFM_100_TX:
9226 				sc->sc_ctrl |= CTRL_SPEED_100;
9227 				break;
9228 			case IFM_1000_T:
9229 				sc->sc_ctrl |= CTRL_SPEED_1000;
9230 				break;
9231 			default:
9232 				/*
9233 				 * Fiber?
9234 				 * Shoud not enter here.
9235 				 */
9236 				device_printf(dev, "unknown media (%x)\n",
9237 				    active);
9238 				break;
9239 			}
9240 			if (active & IFM_FDX)
9241 				sc->sc_ctrl |= CTRL_FD;
9242 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9243 		}
9244 	} else if (sc->sc_type == WM_T_PCH) {
9245 		wm_k1_gig_workaround_hv(sc,
9246 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
9247 	}
9248 
9249 	/*
9250 	 * When connected at 10Mbps half-duplex, some parts are excessively
9251 	 * aggressive resulting in many collisions. To avoid this, increase
9252 	 * the IPG and reduce Rx latency in the PHY.
9253 	 */
9254 	if ((sc->sc_type >= WM_T_PCH2) && (sc->sc_type <= WM_T_PCH_CNP)
9255 	    && link) {
9256 		uint32_t tipg_reg;
9257 		uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
9258 		bool fdx;
9259 		uint16_t emi_addr, emi_val;
9260 
9261 		tipg_reg = CSR_READ(sc, WMREG_TIPG);
9262 		tipg_reg &= ~TIPG_IPGT_MASK;
9263 		fdx = status & STATUS_FD;
9264 
9265 		if (!fdx && (speed == STATUS_SPEED_10)) {
9266 			tipg_reg |= 0xff;
9267 			/* Reduce Rx latency in analog PHY */
9268 			emi_val = 0;
9269 		} else if ((sc->sc_type >= WM_T_PCH_SPT) &&
9270 		    fdx && speed != STATUS_SPEED_1000) {
9271 			tipg_reg |= 0xc;
9272 			emi_val = 1;
9273 		} else {
9274 			/* Roll back the default values */
9275 			tipg_reg |= 0x08;
9276 			emi_val = 1;
9277 		}
9278 
9279 		CSR_WRITE(sc, WMREG_TIPG, tipg_reg);
9280 
9281 		rv = sc->phy.acquire(sc);
9282 		if (rv)
9283 			return;
9284 
9285 		if (sc->sc_type == WM_T_PCH2)
9286 			emi_addr = I82579_RX_CONFIG;
9287 		else
9288 			emi_addr = I217_RX_CONFIG;
9289 		rv = wm_write_emi_reg_locked(dev, emi_addr, emi_val);
9290 
9291 		if (sc->sc_type >= WM_T_PCH_LPT) {
9292 			uint16_t phy_reg;
9293 
9294 			sc->phy.readreg_locked(dev, 2,
9295 			    I217_PLL_CLOCK_GATE_REG, &phy_reg);
9296 			phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
9297 			if (speed == STATUS_SPEED_100
9298 			    || speed == STATUS_SPEED_10)
9299 				phy_reg |= 0x3e8;
9300 			else
9301 				phy_reg |= 0xfa;
9302 			sc->phy.writereg_locked(dev, 2,
9303 			    I217_PLL_CLOCK_GATE_REG, phy_reg);
9304 
9305 			if (speed == STATUS_SPEED_1000) {
9306 				sc->phy.readreg_locked(dev, 2,
9307 				    HV_PM_CTRL, &phy_reg);
9308 
9309 				phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
9310 
9311 				sc->phy.writereg_locked(dev, 2,
9312 				    HV_PM_CTRL, phy_reg);
9313 			}
9314 		}
9315 		sc->phy.release(sc);
9316 
9317 		if (rv)
9318 			return;
9319 
9320 		if (sc->sc_type >= WM_T_PCH_SPT) {
9321 			uint16_t data, ptr_gap;
9322 
9323 			if (speed == STATUS_SPEED_1000) {
9324 				rv = sc->phy.acquire(sc);
9325 				if (rv)
9326 					return;
9327 
9328 				rv = sc->phy.readreg_locked(dev, 2,
9329 				    I82579_UNKNOWN1, &data);
9330 				if (rv) {
9331 					sc->phy.release(sc);
9332 					return;
9333 				}
9334 
9335 				ptr_gap = (data & (0x3ff << 2)) >> 2;
9336 				if (ptr_gap < 0x18) {
9337 					data &= ~(0x3ff << 2);
9338 					data |= (0x18 << 2);
9339 					rv = sc->phy.writereg_locked(dev,
9340 					    2, I82579_UNKNOWN1, data);
9341 				}
9342 				sc->phy.release(sc);
9343 				if (rv)
9344 					return;
9345 			} else {
9346 				rv = sc->phy.acquire(sc);
9347 				if (rv)
9348 					return;
9349 
9350 				rv = sc->phy.writereg_locked(dev, 2,
9351 				    I82579_UNKNOWN1, 0xc023);
9352 				sc->phy.release(sc);
9353 				if (rv)
9354 					return;
9355 
9356 			}
9357 		}
9358 	}
9359 
9360 	/*
9361 	 * I217 Packet Loss issue:
9362 	 * ensure that FEXTNVM4 Beacon Duration is set correctly
9363 	 * on power up.
9364 	 * Set the Beacon Duration for I217 to 8 usec
9365 	 */
9366 	if (sc->sc_type >= WM_T_PCH_LPT) {
9367 		reg = CSR_READ(sc, WMREG_FEXTNVM4);
9368 		reg &= ~FEXTNVM4_BEACON_DURATION;
9369 		reg |= FEXTNVM4_BEACON_DURATION_8US;
9370 		CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
9371 	}
9372 
9373 	/* Work-around I218 hang issue */
9374 	if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
9375 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
9376 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
9377 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
9378 		wm_k1_workaround_lpt_lp(sc, link);
9379 
9380 	if (sc->sc_type >= WM_T_PCH_LPT) {
9381 		/*
9382 		 * Set platform power management values for Latency
9383 		 * Tolerance Reporting (LTR)
9384 		 */
9385 		wm_platform_pm_pch_lpt(sc,
9386 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
9387 	}
9388 
9389 	/* Clear link partner's EEE ability */
9390 	sc->eee_lp_ability = 0;
9391 
9392 	/* FEXTNVM6 K1-off workaround */
9393 	if (sc->sc_type == WM_T_PCH_SPT) {
9394 		reg = CSR_READ(sc, WMREG_FEXTNVM6);
9395 		if (CSR_READ(sc, WMREG_PCIEANACFG) & FEXTNVM6_K1_OFF_ENABLE)
9396 			reg |= FEXTNVM6_K1_OFF_ENABLE;
9397 		else
9398 			reg &= ~FEXTNVM6_K1_OFF_ENABLE;
9399 		CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
9400 	}
9401 
9402 	if (!link)
9403 		return;
9404 
9405 	switch (sc->sc_type) {
9406 	case WM_T_PCH2:
9407 		wm_k1_workaround_lv(sc);
9408 		/* FALLTHROUGH */
9409 	case WM_T_PCH:
9410 		if (sc->sc_phytype == WMPHY_82578)
9411 			wm_link_stall_workaround_hv(sc);
9412 		break;
9413 	default:
9414 		break;
9415 	}
9416 
9417 	/* Enable/Disable EEE after link up */
9418 	if (sc->sc_phytype > WMPHY_82579)
9419 		wm_set_eee_pchlan(sc);
9420 }
9421 
9422 /*
9423  * wm_linkintr_tbi:
9424  *
9425  *	Helper; handle link interrupts for TBI mode.
9426  */
9427 static void
9428 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
9429 {
9430 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9431 	uint32_t status;
9432 
9433 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
9434 		__func__));
9435 
9436 	status = CSR_READ(sc, WMREG_STATUS);
9437 	if (icr & ICR_LSC) {
9438 		wm_check_for_link(sc);
9439 		if (status & STATUS_LU) {
9440 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
9441 				device_xname(sc->sc_dev),
9442 				(status & STATUS_FD) ? "FDX" : "HDX"));
9443 			/*
9444 			 * NOTE: CTRL will update TFCE and RFCE automatically,
9445 			 * so we should update sc->sc_ctrl
9446 			 */
9447 
9448 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
9449 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
9450 			sc->sc_fcrtl &= ~FCRTL_XONE;
9451 			if (status & STATUS_FD)
9452 				sc->sc_tctl |=
9453 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
9454 			else
9455 				sc->sc_tctl |=
9456 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
9457 			if (sc->sc_ctrl & CTRL_TFCE)
9458 				sc->sc_fcrtl |= FCRTL_XONE;
9459 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
9460 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
9461 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
9462 			sc->sc_tbi_linkup = 1;
9463 			if_link_state_change(ifp, LINK_STATE_UP);
9464 		} else {
9465 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
9466 				device_xname(sc->sc_dev)));
9467 			sc->sc_tbi_linkup = 0;
9468 			if_link_state_change(ifp, LINK_STATE_DOWN);
9469 		}
9470 		/* Update LED */
9471 		wm_tbi_serdes_set_linkled(sc);
9472 	} else if (icr & ICR_RXSEQ)
9473 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
9474 			device_xname(sc->sc_dev)));
9475 }
9476 
9477 /*
9478  * wm_linkintr_serdes:
9479  *
9480  *	Helper; handle link interrupts for TBI mode.
9481  */
9482 static void
9483 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
9484 {
9485 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9486 	struct mii_data *mii = &sc->sc_mii;
9487 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
9488 	uint32_t pcs_adv, pcs_lpab, reg;
9489 
9490 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
9491 		__func__));
9492 
9493 	if (icr & ICR_LSC) {
9494 		/* Check PCS */
9495 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
9496 		if ((reg & PCS_LSTS_LINKOK) != 0) {
9497 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
9498 				device_xname(sc->sc_dev)));
9499 			mii->mii_media_status |= IFM_ACTIVE;
9500 			sc->sc_tbi_linkup = 1;
9501 			if_link_state_change(ifp, LINK_STATE_UP);
9502 		} else {
9503 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
9504 				device_xname(sc->sc_dev)));
9505 			mii->mii_media_status |= IFM_NONE;
9506 			sc->sc_tbi_linkup = 0;
9507 			if_link_state_change(ifp, LINK_STATE_DOWN);
9508 			wm_tbi_serdes_set_linkled(sc);
9509 			return;
9510 		}
9511 		mii->mii_media_active |= IFM_1000_SX;
9512 		if ((reg & PCS_LSTS_FDX) != 0)
9513 			mii->mii_media_active |= IFM_FDX;
9514 		else
9515 			mii->mii_media_active |= IFM_HDX;
9516 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
9517 			/* Check flow */
9518 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
9519 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
9520 				DPRINTF(WM_DEBUG_LINK,
9521 				    ("XXX LINKOK but not ACOMP\n"));
9522 				return;
9523 			}
9524 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
9525 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
9526 			DPRINTF(WM_DEBUG_LINK,
9527 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
9528 			if ((pcs_adv & TXCW_SYM_PAUSE)
9529 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
9530 				mii->mii_media_active |= IFM_FLOW
9531 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
9532 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
9533 			    && (pcs_adv & TXCW_ASYM_PAUSE)
9534 			    && (pcs_lpab & TXCW_SYM_PAUSE)
9535 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
9536 				mii->mii_media_active |= IFM_FLOW
9537 				    | IFM_ETH_TXPAUSE;
9538 			else if ((pcs_adv & TXCW_SYM_PAUSE)
9539 			    && (pcs_adv & TXCW_ASYM_PAUSE)
9540 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
9541 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
9542 				mii->mii_media_active |= IFM_FLOW
9543 				    | IFM_ETH_RXPAUSE;
9544 		}
9545 		/* Update LED */
9546 		wm_tbi_serdes_set_linkled(sc);
9547 	} else
9548 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
9549 		    device_xname(sc->sc_dev)));
9550 }
9551 
9552 /*
9553  * wm_linkintr:
9554  *
9555  *	Helper; handle link interrupts.
9556  */
9557 static void
9558 wm_linkintr(struct wm_softc *sc, uint32_t icr)
9559 {
9560 
9561 	KASSERT(WM_CORE_LOCKED(sc));
9562 
9563 	if (sc->sc_flags & WM_F_HAS_MII)
9564 		wm_linkintr_gmii(sc, icr);
9565 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
9566 	    && ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)))
9567 		wm_linkintr_serdes(sc, icr);
9568 	else
9569 		wm_linkintr_tbi(sc, icr);
9570 }
9571 
9572 
9573 static inline void
9574 wm_sched_handle_queue(struct wm_softc *sc, struct wm_queue *wmq)
9575 {
9576 
9577 	if (wmq->wmq_txrx_use_workqueue)
9578 		workqueue_enqueue(sc->sc_queue_wq, &wmq->wmq_cookie, curcpu());
9579 	else
9580 		softint_schedule(wmq->wmq_si);
9581 }
9582 
9583 /*
9584  * wm_intr_legacy:
9585  *
9586  *	Interrupt service routine for INTx and MSI.
9587  */
9588 static int
9589 wm_intr_legacy(void *arg)
9590 {
9591 	struct wm_softc *sc = arg;
9592 	struct wm_queue *wmq = &sc->sc_queue[0];
9593 	struct wm_txqueue *txq = &wmq->wmq_txq;
9594 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
9595 	uint32_t icr, rndval = 0;
9596 	int handled = 0;
9597 
9598 	while (1 /* CONSTCOND */) {
9599 		icr = CSR_READ(sc, WMREG_ICR);
9600 		if ((icr & sc->sc_icr) == 0)
9601 			break;
9602 		if (handled == 0)
9603 			DPRINTF(WM_DEBUG_TX,
9604 			    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
9605 		if (rndval == 0)
9606 			rndval = icr;
9607 
9608 		mutex_enter(rxq->rxq_lock);
9609 
9610 		if (rxq->rxq_stopping) {
9611 			mutex_exit(rxq->rxq_lock);
9612 			break;
9613 		}
9614 
9615 		handled = 1;
9616 
9617 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
9618 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
9619 			DPRINTF(WM_DEBUG_RX,
9620 			    ("%s: RX: got Rx intr 0x%08x\n",
9621 				device_xname(sc->sc_dev),
9622 				icr & (ICR_RXDMT0 | ICR_RXT0)));
9623 			WM_Q_EVCNT_INCR(rxq, intr);
9624 		}
9625 #endif
9626 		/*
9627 		 * wm_rxeof() does *not* call upper layer functions directly,
9628 		 * as if_percpuq_enqueue() just call softint_schedule().
9629 		 * So, we can call wm_rxeof() in interrupt context.
9630 		 */
9631 		wm_rxeof(rxq, UINT_MAX);
9632 
9633 		mutex_exit(rxq->rxq_lock);
9634 		mutex_enter(txq->txq_lock);
9635 
9636 		if (txq->txq_stopping) {
9637 			mutex_exit(txq->txq_lock);
9638 			break;
9639 		}
9640 
9641 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
9642 		if (icr & ICR_TXDW) {
9643 			DPRINTF(WM_DEBUG_TX,
9644 			    ("%s: TX: got TXDW interrupt\n",
9645 				device_xname(sc->sc_dev)));
9646 			WM_Q_EVCNT_INCR(txq, txdw);
9647 		}
9648 #endif
9649 		wm_txeof(txq, UINT_MAX);
9650 
9651 		mutex_exit(txq->txq_lock);
9652 		WM_CORE_LOCK(sc);
9653 
9654 		if (sc->sc_core_stopping) {
9655 			WM_CORE_UNLOCK(sc);
9656 			break;
9657 		}
9658 
9659 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
9660 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
9661 			wm_linkintr(sc, icr);
9662 		}
9663 		if ((icr & ICR_GPI(0)) != 0)
9664 			device_printf(sc->sc_dev, "got module interrupt\n");
9665 
9666 		WM_CORE_UNLOCK(sc);
9667 
9668 		if (icr & ICR_RXO) {
9669 #if defined(WM_DEBUG)
9670 			log(LOG_WARNING, "%s: Receive overrun\n",
9671 			    device_xname(sc->sc_dev));
9672 #endif /* defined(WM_DEBUG) */
9673 		}
9674 	}
9675 
9676 	rnd_add_uint32(&sc->rnd_source, rndval);
9677 
9678 	if (handled) {
9679 		/* Try to get more packets going. */
9680 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
9681 		wm_sched_handle_queue(sc, wmq);
9682 	}
9683 
9684 	return handled;
9685 }
9686 
9687 static inline void
9688 wm_txrxintr_disable(struct wm_queue *wmq)
9689 {
9690 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
9691 
9692 	if (sc->sc_type == WM_T_82574)
9693 		CSR_WRITE(sc, WMREG_IMC,
9694 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
9695 	else if (sc->sc_type == WM_T_82575)
9696 		CSR_WRITE(sc, WMREG_EIMC,
9697 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
9698 	else
9699 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
9700 }
9701 
9702 static inline void
9703 wm_txrxintr_enable(struct wm_queue *wmq)
9704 {
9705 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
9706 
9707 	wm_itrs_calculate(sc, wmq);
9708 
9709 	/*
9710 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
9711 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
9712 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
9713 	 * while each wm_handle_queue(wmq) is runnig.
9714 	 */
9715 	if (sc->sc_type == WM_T_82574)
9716 		CSR_WRITE(sc, WMREG_IMS,
9717 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
9718 	else if (sc->sc_type == WM_T_82575)
9719 		CSR_WRITE(sc, WMREG_EIMS,
9720 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
9721 	else
9722 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
9723 }
9724 
9725 static int
9726 wm_txrxintr_msix(void *arg)
9727 {
9728 	struct wm_queue *wmq = arg;
9729 	struct wm_txqueue *txq = &wmq->wmq_txq;
9730 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
9731 	struct wm_softc *sc = txq->txq_sc;
9732 	u_int txlimit = sc->sc_tx_intr_process_limit;
9733 	u_int rxlimit = sc->sc_rx_intr_process_limit;
9734 	bool txmore;
9735 	bool rxmore;
9736 
9737 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
9738 
9739 	DPRINTF(WM_DEBUG_TX,
9740 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
9741 
9742 	wm_txrxintr_disable(wmq);
9743 
9744 	mutex_enter(txq->txq_lock);
9745 
9746 	if (txq->txq_stopping) {
9747 		mutex_exit(txq->txq_lock);
9748 		return 0;
9749 	}
9750 
9751 	WM_Q_EVCNT_INCR(txq, txdw);
9752 	txmore = wm_txeof(txq, txlimit);
9753 	/* wm_deferred start() is done in wm_handle_queue(). */
9754 	mutex_exit(txq->txq_lock);
9755 
9756 	DPRINTF(WM_DEBUG_RX,
9757 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
9758 	mutex_enter(rxq->rxq_lock);
9759 
9760 	if (rxq->rxq_stopping) {
9761 		mutex_exit(rxq->rxq_lock);
9762 		return 0;
9763 	}
9764 
9765 	WM_Q_EVCNT_INCR(rxq, intr);
9766 	rxmore = wm_rxeof(rxq, rxlimit);
9767 	mutex_exit(rxq->rxq_lock);
9768 
9769 	wm_itrs_writereg(sc, wmq);
9770 
9771 	if (txmore || rxmore) {
9772 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
9773 		wm_sched_handle_queue(sc, wmq);
9774 	} else
9775 		wm_txrxintr_enable(wmq);
9776 
9777 	return 1;
9778 }
9779 
9780 static void
9781 wm_handle_queue(void *arg)
9782 {
9783 	struct wm_queue *wmq = arg;
9784 	struct wm_txqueue *txq = &wmq->wmq_txq;
9785 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
9786 	struct wm_softc *sc = txq->txq_sc;
9787 	u_int txlimit = sc->sc_tx_process_limit;
9788 	u_int rxlimit = sc->sc_rx_process_limit;
9789 	bool txmore;
9790 	bool rxmore;
9791 
9792 	mutex_enter(txq->txq_lock);
9793 	if (txq->txq_stopping) {
9794 		mutex_exit(txq->txq_lock);
9795 		return;
9796 	}
9797 	txmore = wm_txeof(txq, txlimit);
9798 	wm_deferred_start_locked(txq);
9799 	mutex_exit(txq->txq_lock);
9800 
9801 	mutex_enter(rxq->rxq_lock);
9802 	if (rxq->rxq_stopping) {
9803 		mutex_exit(rxq->rxq_lock);
9804 		return;
9805 	}
9806 	WM_Q_EVCNT_INCR(rxq, defer);
9807 	rxmore = wm_rxeof(rxq, rxlimit);
9808 	mutex_exit(rxq->rxq_lock);
9809 
9810 	if (txmore || rxmore) {
9811 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
9812 		wm_sched_handle_queue(sc, wmq);
9813 	} else
9814 		wm_txrxintr_enable(wmq);
9815 }
9816 
9817 static void
9818 wm_handle_queue_work(struct work *wk, void *context)
9819 {
9820 	struct wm_queue *wmq = container_of(wk, struct wm_queue, wmq_cookie);
9821 
9822 	/*
9823 	 * "enqueued flag" is not required here.
9824 	 */
9825 	wm_handle_queue(wmq);
9826 }
9827 
9828 /*
9829  * wm_linkintr_msix:
9830  *
9831  *	Interrupt service routine for link status change for MSI-X.
9832  */
9833 static int
9834 wm_linkintr_msix(void *arg)
9835 {
9836 	struct wm_softc *sc = arg;
9837 	uint32_t reg;
9838 	bool has_rxo;
9839 
9840 	reg = CSR_READ(sc, WMREG_ICR);
9841 	WM_CORE_LOCK(sc);
9842 	DPRINTF(WM_DEBUG_LINK,
9843 	    ("%s: LINK: got link intr. ICR = %08x\n",
9844 		device_xname(sc->sc_dev), reg));
9845 
9846 	if (sc->sc_core_stopping)
9847 		goto out;
9848 
9849 	if ((reg & ICR_LSC) != 0) {
9850 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
9851 		wm_linkintr(sc, ICR_LSC);
9852 	}
9853 	if ((reg & ICR_GPI(0)) != 0)
9854 		device_printf(sc->sc_dev, "got module interrupt\n");
9855 
9856 	/*
9857 	 * XXX 82574 MSI-X mode workaround
9858 	 *
9859 	 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
9860 	 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
9861 	 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
9862 	 * interrupts by writing WMREG_ICS to process receive packets.
9863 	 */
9864 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
9865 #if defined(WM_DEBUG)
9866 		log(LOG_WARNING, "%s: Receive overrun\n",
9867 		    device_xname(sc->sc_dev));
9868 #endif /* defined(WM_DEBUG) */
9869 
9870 		has_rxo = true;
9871 		/*
9872 		 * The RXO interrupt is very high rate when receive traffic is
9873 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
9874 		 * interrupts. ICR_OTHER will be enabled at the end of
9875 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
9876 		 * ICR_RXQ(1) interrupts.
9877 		 */
9878 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
9879 
9880 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
9881 	}
9882 
9883 
9884 
9885 out:
9886 	WM_CORE_UNLOCK(sc);
9887 
9888 	if (sc->sc_type == WM_T_82574) {
9889 		if (!has_rxo)
9890 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
9891 		else
9892 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
9893 	} else if (sc->sc_type == WM_T_82575)
9894 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
9895 	else
9896 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
9897 
9898 	return 1;
9899 }
9900 
9901 /*
9902  * Media related.
9903  * GMII, SGMII, TBI (and SERDES)
9904  */
9905 
9906 /* Common */
9907 
9908 /*
9909  * wm_tbi_serdes_set_linkled:
9910  *
9911  *	Update the link LED on TBI and SERDES devices.
9912  */
9913 static void
9914 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
9915 {
9916 
9917 	if (sc->sc_tbi_linkup)
9918 		sc->sc_ctrl |= CTRL_SWDPIN(0);
9919 	else
9920 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
9921 
9922 	/* 82540 or newer devices are active low */
9923 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
9924 
9925 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9926 }
9927 
9928 /* GMII related */
9929 
9930 /*
9931  * wm_gmii_reset:
9932  *
9933  *	Reset the PHY.
9934  */
9935 static void
9936 wm_gmii_reset(struct wm_softc *sc)
9937 {
9938 	uint32_t reg;
9939 	int rv;
9940 
9941 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
9942 		device_xname(sc->sc_dev), __func__));
9943 
9944 	rv = sc->phy.acquire(sc);
9945 	if (rv != 0) {
9946 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9947 		    __func__);
9948 		return;
9949 	}
9950 
9951 	switch (sc->sc_type) {
9952 	case WM_T_82542_2_0:
9953 	case WM_T_82542_2_1:
9954 		/* null */
9955 		break;
9956 	case WM_T_82543:
9957 		/*
9958 		 * With 82543, we need to force speed and duplex on the MAC
9959 		 * equal to what the PHY speed and duplex configuration is.
9960 		 * In addition, we need to perform a hardware reset on the PHY
9961 		 * to take it out of reset.
9962 		 */
9963 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
9964 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9965 
9966 		/* The PHY reset pin is active-low. */
9967 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
9968 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
9969 		    CTRL_EXT_SWDPIN(4));
9970 		reg |= CTRL_EXT_SWDPIO(4);
9971 
9972 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
9973 		CSR_WRITE_FLUSH(sc);
9974 		delay(10*1000);
9975 
9976 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
9977 		CSR_WRITE_FLUSH(sc);
9978 		delay(150);
9979 #if 0
9980 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
9981 #endif
9982 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
9983 		break;
9984 	case WM_T_82544:	/* Reset 10000us */
9985 	case WM_T_82540:
9986 	case WM_T_82545:
9987 	case WM_T_82545_3:
9988 	case WM_T_82546:
9989 	case WM_T_82546_3:
9990 	case WM_T_82541:
9991 	case WM_T_82541_2:
9992 	case WM_T_82547:
9993 	case WM_T_82547_2:
9994 	case WM_T_82571:	/* Reset 100us */
9995 	case WM_T_82572:
9996 	case WM_T_82573:
9997 	case WM_T_82574:
9998 	case WM_T_82575:
9999 	case WM_T_82576:
10000 	case WM_T_82580:
10001 	case WM_T_I350:
10002 	case WM_T_I354:
10003 	case WM_T_I210:
10004 	case WM_T_I211:
10005 	case WM_T_82583:
10006 	case WM_T_80003:
10007 		/* Generic reset */
10008 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
10009 		CSR_WRITE_FLUSH(sc);
10010 		delay(20000);
10011 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10012 		CSR_WRITE_FLUSH(sc);
10013 		delay(20000);
10014 
10015 		if ((sc->sc_type == WM_T_82541)
10016 		    || (sc->sc_type == WM_T_82541_2)
10017 		    || (sc->sc_type == WM_T_82547)
10018 		    || (sc->sc_type == WM_T_82547_2)) {
10019 			/* Workaround for igp are done in igp_reset() */
10020 			/* XXX add code to set LED after phy reset */
10021 		}
10022 		break;
10023 	case WM_T_ICH8:
10024 	case WM_T_ICH9:
10025 	case WM_T_ICH10:
10026 	case WM_T_PCH:
10027 	case WM_T_PCH2:
10028 	case WM_T_PCH_LPT:
10029 	case WM_T_PCH_SPT:
10030 	case WM_T_PCH_CNP:
10031 		/* Generic reset */
10032 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
10033 		CSR_WRITE_FLUSH(sc);
10034 		delay(100);
10035 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10036 		CSR_WRITE_FLUSH(sc);
10037 		delay(150);
10038 		break;
10039 	default:
10040 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
10041 		    __func__);
10042 		break;
10043 	}
10044 
10045 	sc->phy.release(sc);
10046 
10047 	/* get_cfg_done */
10048 	wm_get_cfg_done(sc);
10049 
10050 	/* Extra setup */
10051 	switch (sc->sc_type) {
10052 	case WM_T_82542_2_0:
10053 	case WM_T_82542_2_1:
10054 	case WM_T_82543:
10055 	case WM_T_82544:
10056 	case WM_T_82540:
10057 	case WM_T_82545:
10058 	case WM_T_82545_3:
10059 	case WM_T_82546:
10060 	case WM_T_82546_3:
10061 	case WM_T_82541_2:
10062 	case WM_T_82547_2:
10063 	case WM_T_82571:
10064 	case WM_T_82572:
10065 	case WM_T_82573:
10066 	case WM_T_82574:
10067 	case WM_T_82583:
10068 	case WM_T_82575:
10069 	case WM_T_82576:
10070 	case WM_T_82580:
10071 	case WM_T_I350:
10072 	case WM_T_I354:
10073 	case WM_T_I210:
10074 	case WM_T_I211:
10075 	case WM_T_80003:
10076 		/* Null */
10077 		break;
10078 	case WM_T_82541:
10079 	case WM_T_82547:
10080 		/* XXX Configure actively LED after PHY reset */
10081 		break;
10082 	case WM_T_ICH8:
10083 	case WM_T_ICH9:
10084 	case WM_T_ICH10:
10085 	case WM_T_PCH:
10086 	case WM_T_PCH2:
10087 	case WM_T_PCH_LPT:
10088 	case WM_T_PCH_SPT:
10089 	case WM_T_PCH_CNP:
10090 		wm_phy_post_reset(sc);
10091 		break;
10092 	default:
10093 		panic("%s: unknown type\n", __func__);
10094 		break;
10095 	}
10096 }
10097 
10098 /*
10099  * Setup sc_phytype and mii_{read|write}reg.
10100  *
10101  *  To identify PHY type, correct read/write function should be selected.
10102  * To select correct read/write function, PCI ID or MAC type are required
10103  * without accessing PHY registers.
10104  *
10105  *  On the first call of this function, PHY ID is not known yet. Check
10106  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
10107  * result might be incorrect.
10108  *
10109  *  In the second call, PHY OUI and model is used to identify PHY type.
10110  * It might not be perfect because of the lack of compared entry, but it
10111  * would be better than the first call.
10112  *
10113  *  If the detected new result and previous assumption is different,
10114  * diagnous message will be printed.
10115  */
10116 static void
10117 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
10118     uint16_t phy_model)
10119 {
10120 	device_t dev = sc->sc_dev;
10121 	struct mii_data *mii = &sc->sc_mii;
10122 	uint16_t new_phytype = WMPHY_UNKNOWN;
10123 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
10124 	mii_readreg_t new_readreg;
10125 	mii_writereg_t new_writereg;
10126 	bool dodiag = true;
10127 
10128 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
10129 		device_xname(sc->sc_dev), __func__));
10130 
10131 	/*
10132 	 * 1000BASE-T SFP uses SGMII and the first asumed PHY type is always
10133 	 * incorrect. So don't print diag output when it's 2nd call.
10134 	 */
10135 	if ((sc->sc_sfptype != 0) && (phy_oui == 0) && (phy_model == 0))
10136 		dodiag = false;
10137 
10138 	if (mii->mii_readreg == NULL) {
10139 		/*
10140 		 *  This is the first call of this function. For ICH and PCH
10141 		 * variants, it's difficult to determine the PHY access method
10142 		 * by sc_type, so use the PCI product ID for some devices.
10143 		 */
10144 
10145 		switch (sc->sc_pcidevid) {
10146 		case PCI_PRODUCT_INTEL_PCH_M_LM:
10147 		case PCI_PRODUCT_INTEL_PCH_M_LC:
10148 			/* 82577 */
10149 			new_phytype = WMPHY_82577;
10150 			break;
10151 		case PCI_PRODUCT_INTEL_PCH_D_DM:
10152 		case PCI_PRODUCT_INTEL_PCH_D_DC:
10153 			/* 82578 */
10154 			new_phytype = WMPHY_82578;
10155 			break;
10156 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
10157 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
10158 			/* 82579 */
10159 			new_phytype = WMPHY_82579;
10160 			break;
10161 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
10162 		case PCI_PRODUCT_INTEL_82801I_BM:
10163 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
10164 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
10165 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
10166 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
10167 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
10168 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
10169 			/* ICH8, 9, 10 with 82567 */
10170 			new_phytype = WMPHY_BM;
10171 			break;
10172 		default:
10173 			break;
10174 		}
10175 	} else {
10176 		/* It's not the first call. Use PHY OUI and model */
10177 		switch (phy_oui) {
10178 		case MII_OUI_ATTANSIC: /* XXX ??? */
10179 			switch (phy_model) {
10180 			case 0x0004: /* XXX */
10181 				new_phytype = WMPHY_82578;
10182 				break;
10183 			default:
10184 				break;
10185 			}
10186 			break;
10187 		case MII_OUI_xxMARVELL:
10188 			switch (phy_model) {
10189 			case MII_MODEL_xxMARVELL_I210:
10190 				new_phytype = WMPHY_I210;
10191 				break;
10192 			case MII_MODEL_xxMARVELL_E1011:
10193 			case MII_MODEL_xxMARVELL_E1000_3:
10194 			case MII_MODEL_xxMARVELL_E1000_5:
10195 			case MII_MODEL_xxMARVELL_E1112:
10196 				new_phytype = WMPHY_M88;
10197 				break;
10198 			case MII_MODEL_xxMARVELL_E1149:
10199 				new_phytype = WMPHY_BM;
10200 				break;
10201 			case MII_MODEL_xxMARVELL_E1111:
10202 			case MII_MODEL_xxMARVELL_I347:
10203 			case MII_MODEL_xxMARVELL_E1512:
10204 			case MII_MODEL_xxMARVELL_E1340M:
10205 			case MII_MODEL_xxMARVELL_E1543:
10206 				new_phytype = WMPHY_M88;
10207 				break;
10208 			case MII_MODEL_xxMARVELL_I82563:
10209 				new_phytype = WMPHY_GG82563;
10210 				break;
10211 			default:
10212 				break;
10213 			}
10214 			break;
10215 		case MII_OUI_INTEL:
10216 			switch (phy_model) {
10217 			case MII_MODEL_INTEL_I82577:
10218 				new_phytype = WMPHY_82577;
10219 				break;
10220 			case MII_MODEL_INTEL_I82579:
10221 				new_phytype = WMPHY_82579;
10222 				break;
10223 			case MII_MODEL_INTEL_I217:
10224 				new_phytype = WMPHY_I217;
10225 				break;
10226 			case MII_MODEL_INTEL_I82580:
10227 			case MII_MODEL_INTEL_I350:
10228 				new_phytype = WMPHY_82580;
10229 				break;
10230 			default:
10231 				break;
10232 			}
10233 			break;
10234 		case MII_OUI_yyINTEL:
10235 			switch (phy_model) {
10236 			case MII_MODEL_yyINTEL_I82562G:
10237 			case MII_MODEL_yyINTEL_I82562EM:
10238 			case MII_MODEL_yyINTEL_I82562ET:
10239 				new_phytype = WMPHY_IFE;
10240 				break;
10241 			case MII_MODEL_yyINTEL_IGP01E1000:
10242 				new_phytype = WMPHY_IGP;
10243 				break;
10244 			case MII_MODEL_yyINTEL_I82566:
10245 				new_phytype = WMPHY_IGP_3;
10246 				break;
10247 			default:
10248 				break;
10249 			}
10250 			break;
10251 		default:
10252 			break;
10253 		}
10254 
10255 		if (dodiag) {
10256 			if (new_phytype == WMPHY_UNKNOWN)
10257 				aprint_verbose_dev(dev,
10258 				    "%s: Unknown PHY model. OUI=%06x, "
10259 				    "model=%04x\n", __func__, phy_oui,
10260 				    phy_model);
10261 
10262 			if ((sc->sc_phytype != WMPHY_UNKNOWN)
10263 			    && (sc->sc_phytype != new_phytype)) {
10264 				aprint_error_dev(dev, "Previously assumed PHY "
10265 				    "type(%u) was incorrect. PHY type from PHY"
10266 				    "ID = %u\n", sc->sc_phytype, new_phytype);
10267 			}
10268 		}
10269 	}
10270 
10271 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
10272 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
10273 		/* SGMII */
10274 		new_readreg = wm_sgmii_readreg;
10275 		new_writereg = wm_sgmii_writereg;
10276 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
10277 		/* BM2 (phyaddr == 1) */
10278 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
10279 		    && (new_phytype != WMPHY_BM)
10280 		    && (new_phytype != WMPHY_UNKNOWN))
10281 			doubt_phytype = new_phytype;
10282 		new_phytype = WMPHY_BM;
10283 		new_readreg = wm_gmii_bm_readreg;
10284 		new_writereg = wm_gmii_bm_writereg;
10285 	} else if (sc->sc_type >= WM_T_PCH) {
10286 		/* All PCH* use _hv_ */
10287 		new_readreg = wm_gmii_hv_readreg;
10288 		new_writereg = wm_gmii_hv_writereg;
10289 	} else if (sc->sc_type >= WM_T_ICH8) {
10290 		/* non-82567 ICH8, 9 and 10 */
10291 		new_readreg = wm_gmii_i82544_readreg;
10292 		new_writereg = wm_gmii_i82544_writereg;
10293 	} else if (sc->sc_type >= WM_T_80003) {
10294 		/* 80003 */
10295 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
10296 		    && (new_phytype != WMPHY_GG82563)
10297 		    && (new_phytype != WMPHY_UNKNOWN))
10298 			doubt_phytype = new_phytype;
10299 		new_phytype = WMPHY_GG82563;
10300 		new_readreg = wm_gmii_i80003_readreg;
10301 		new_writereg = wm_gmii_i80003_writereg;
10302 	} else if (sc->sc_type >= WM_T_I210) {
10303 		/* I210 and I211 */
10304 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
10305 		    && (new_phytype != WMPHY_I210)
10306 		    && (new_phytype != WMPHY_UNKNOWN))
10307 			doubt_phytype = new_phytype;
10308 		new_phytype = WMPHY_I210;
10309 		new_readreg = wm_gmii_gs40g_readreg;
10310 		new_writereg = wm_gmii_gs40g_writereg;
10311 	} else if (sc->sc_type >= WM_T_82580) {
10312 		/* 82580, I350 and I354 */
10313 		new_readreg = wm_gmii_82580_readreg;
10314 		new_writereg = wm_gmii_82580_writereg;
10315 	} else if (sc->sc_type >= WM_T_82544) {
10316 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
10317 		new_readreg = wm_gmii_i82544_readreg;
10318 		new_writereg = wm_gmii_i82544_writereg;
10319 	} else {
10320 		new_readreg = wm_gmii_i82543_readreg;
10321 		new_writereg = wm_gmii_i82543_writereg;
10322 	}
10323 
10324 	if (new_phytype == WMPHY_BM) {
10325 		/* All BM use _bm_ */
10326 		new_readreg = wm_gmii_bm_readreg;
10327 		new_writereg = wm_gmii_bm_writereg;
10328 	}
10329 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
10330 		/* All PCH* use _hv_ */
10331 		new_readreg = wm_gmii_hv_readreg;
10332 		new_writereg = wm_gmii_hv_writereg;
10333 	}
10334 
10335 	/* Diag output */
10336 	if (dodiag) {
10337 		if (doubt_phytype != WMPHY_UNKNOWN)
10338 			aprint_error_dev(dev, "Assumed new PHY type was "
10339 			    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
10340 			    new_phytype);
10341 		else if ((sc->sc_phytype != WMPHY_UNKNOWN)
10342 		    && (sc->sc_phytype != new_phytype))
10343 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
10344 			    "was incorrect. New PHY type = %u\n",
10345 			    sc->sc_phytype, new_phytype);
10346 
10347 		if ((mii->mii_readreg != NULL) &&
10348 		    (new_phytype == WMPHY_UNKNOWN))
10349 			aprint_error_dev(dev, "PHY type is still unknown.\n");
10350 
10351 		if ((mii->mii_readreg != NULL) &&
10352 		    (mii->mii_readreg != new_readreg))
10353 			aprint_error_dev(dev, "Previously assumed PHY "
10354 			    "read/write function was incorrect.\n");
10355 	}
10356 
10357 	/* Update now */
10358 	sc->sc_phytype = new_phytype;
10359 	mii->mii_readreg = new_readreg;
10360 	mii->mii_writereg = new_writereg;
10361 	if (new_readreg == wm_gmii_hv_readreg) {
10362 		sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
10363 		sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
10364 	} else if (new_readreg == wm_sgmii_readreg) {
10365 		sc->phy.readreg_locked = wm_sgmii_readreg_locked;
10366 		sc->phy.writereg_locked = wm_sgmii_writereg_locked;
10367 	} else if (new_readreg == wm_gmii_i82544_readreg) {
10368 		sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
10369 		sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
10370 	}
10371 }
10372 
10373 /*
10374  * wm_get_phy_id_82575:
10375  *
10376  * Return PHY ID. Return -1 if it failed.
10377  */
10378 static int
10379 wm_get_phy_id_82575(struct wm_softc *sc)
10380 {
10381 	uint32_t reg;
10382 	int phyid = -1;
10383 
10384 	/* XXX */
10385 	if ((sc->sc_flags & WM_F_SGMII) == 0)
10386 		return -1;
10387 
10388 	if (wm_sgmii_uses_mdio(sc)) {
10389 		switch (sc->sc_type) {
10390 		case WM_T_82575:
10391 		case WM_T_82576:
10392 			reg = CSR_READ(sc, WMREG_MDIC);
10393 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
10394 			break;
10395 		case WM_T_82580:
10396 		case WM_T_I350:
10397 		case WM_T_I354:
10398 		case WM_T_I210:
10399 		case WM_T_I211:
10400 			reg = CSR_READ(sc, WMREG_MDICNFG);
10401 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
10402 			break;
10403 		default:
10404 			return -1;
10405 		}
10406 	}
10407 
10408 	return phyid;
10409 }
10410 
10411 /*
10412  * wm_gmii_mediainit:
10413  *
10414  *	Initialize media for use on 1000BASE-T devices.
10415  */
10416 static void
10417 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
10418 {
10419 	device_t dev = sc->sc_dev;
10420 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
10421 	struct mii_data *mii = &sc->sc_mii;
10422 
10423 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
10424 		device_xname(sc->sc_dev), __func__));
10425 
10426 	/* We have GMII. */
10427 	sc->sc_flags |= WM_F_HAS_MII;
10428 
10429 	if (sc->sc_type == WM_T_80003)
10430 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
10431 	else
10432 		sc->sc_tipg = TIPG_1000T_DFLT;
10433 
10434 	/*
10435 	 * Let the chip set speed/duplex on its own based on
10436 	 * signals from the PHY.
10437 	 * XXXbouyer - I'm not sure this is right for the 80003,
10438 	 * the em driver only sets CTRL_SLU here - but it seems to work.
10439 	 */
10440 	sc->sc_ctrl |= CTRL_SLU;
10441 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10442 
10443 	/* Initialize our media structures and probe the GMII. */
10444 	mii->mii_ifp = ifp;
10445 
10446 	mii->mii_statchg = wm_gmii_statchg;
10447 
10448 	/* get PHY control from SMBus to PCIe */
10449 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
10450 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
10451 	    || (sc->sc_type == WM_T_PCH_CNP))
10452 		wm_init_phy_workarounds_pchlan(sc);
10453 
10454 	wm_gmii_reset(sc);
10455 
10456 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
10457 	ifmedia_init_with_lock(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
10458 	    wm_gmii_mediastatus, sc->sc_core_lock);
10459 
10460 	/* Setup internal SGMII PHY for SFP */
10461 	wm_sgmii_sfp_preconfig(sc);
10462 
10463 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
10464 	    || (sc->sc_type == WM_T_82580)
10465 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
10466 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
10467 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
10468 			/* Attach only one port */
10469 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
10470 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
10471 		} else {
10472 			int i, id;
10473 			uint32_t ctrl_ext;
10474 
10475 			id = wm_get_phy_id_82575(sc);
10476 			if (id != -1) {
10477 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
10478 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
10479 			}
10480 			if ((id == -1)
10481 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
10482 				/* Power on sgmii phy if it is disabled */
10483 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
10484 				CSR_WRITE(sc, WMREG_CTRL_EXT,
10485 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
10486 				CSR_WRITE_FLUSH(sc);
10487 				delay(300*1000); /* XXX too long */
10488 
10489 				/*
10490 				 * From 1 to 8.
10491 				 *
10492 				 * I2C access fails with I2C register's ERROR
10493 				 * bit set, so prevent error message while
10494 				 * scanning.
10495 				 */
10496 				sc->phy.no_errprint = true;
10497 				for (i = 1; i < 8; i++)
10498 					mii_attach(sc->sc_dev, &sc->sc_mii,
10499 					    0xffffffff, i, MII_OFFSET_ANY,
10500 					    MIIF_DOPAUSE);
10501 				sc->phy.no_errprint = false;
10502 
10503 				/* Restore previous sfp cage power state */
10504 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
10505 			}
10506 		}
10507 	} else
10508 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
10509 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
10510 
10511 	/*
10512 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
10513 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
10514 	 */
10515 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
10516 		|| (sc->sc_type == WM_T_PCH_SPT)
10517 		|| (sc->sc_type == WM_T_PCH_CNP))
10518 	    && (LIST_FIRST(&mii->mii_phys) == NULL)) {
10519 		wm_set_mdio_slow_mode_hv(sc);
10520 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
10521 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
10522 	}
10523 
10524 	/*
10525 	 * (For ICH8 variants)
10526 	 * If PHY detection failed, use BM's r/w function and retry.
10527 	 */
10528 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
10529 		/* if failed, retry with *_bm_* */
10530 		aprint_verbose_dev(dev, "Assumed PHY access function "
10531 		    "(type = %d) might be incorrect. Use BM and retry.\n",
10532 		    sc->sc_phytype);
10533 		sc->sc_phytype = WMPHY_BM;
10534 		mii->mii_readreg = wm_gmii_bm_readreg;
10535 		mii->mii_writereg = wm_gmii_bm_writereg;
10536 
10537 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
10538 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
10539 	}
10540 
10541 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
10542 		/* Any PHY wasn't find */
10543 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
10544 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
10545 		sc->sc_phytype = WMPHY_NONE;
10546 	} else {
10547 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
10548 
10549 		/*
10550 		 * PHY Found! Check PHY type again by the second call of
10551 		 * wm_gmii_setup_phytype.
10552 		 */
10553 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
10554 		    child->mii_mpd_model);
10555 
10556 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
10557 	}
10558 }
10559 
10560 /*
10561  * wm_gmii_mediachange:	[ifmedia interface function]
10562  *
10563  *	Set hardware to newly-selected media on a 1000BASE-T device.
10564  */
10565 static int
10566 wm_gmii_mediachange(struct ifnet *ifp)
10567 {
10568 	struct wm_softc *sc = ifp->if_softc;
10569 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
10570 	uint32_t reg;
10571 	int rc;
10572 
10573 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
10574 		device_xname(sc->sc_dev), __func__));
10575 	if ((ifp->if_flags & IFF_UP) == 0)
10576 		return 0;
10577 
10578 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
10579 	if ((sc->sc_type == WM_T_82580)
10580 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
10581 	    || (sc->sc_type == WM_T_I211)) {
10582 		reg = CSR_READ(sc, WMREG_PHPM);
10583 		reg &= ~PHPM_GO_LINK_D;
10584 		CSR_WRITE(sc, WMREG_PHPM, reg);
10585 	}
10586 
10587 	/* Disable D0 LPLU. */
10588 	wm_lplu_d0_disable(sc);
10589 
10590 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
10591 	sc->sc_ctrl |= CTRL_SLU;
10592 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
10593 	    || (sc->sc_type > WM_T_82543)) {
10594 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
10595 	} else {
10596 		sc->sc_ctrl &= ~CTRL_ASDE;
10597 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
10598 		if (ife->ifm_media & IFM_FDX)
10599 			sc->sc_ctrl |= CTRL_FD;
10600 		switch (IFM_SUBTYPE(ife->ifm_media)) {
10601 		case IFM_10_T:
10602 			sc->sc_ctrl |= CTRL_SPEED_10;
10603 			break;
10604 		case IFM_100_TX:
10605 			sc->sc_ctrl |= CTRL_SPEED_100;
10606 			break;
10607 		case IFM_1000_T:
10608 			sc->sc_ctrl |= CTRL_SPEED_1000;
10609 			break;
10610 		case IFM_NONE:
10611 			/* There is no specific setting for IFM_NONE */
10612 			break;
10613 		default:
10614 			panic("wm_gmii_mediachange: bad media 0x%x",
10615 			    ife->ifm_media);
10616 		}
10617 	}
10618 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10619 	CSR_WRITE_FLUSH(sc);
10620 
10621 	if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
10622 		wm_serdes_mediachange(ifp);
10623 
10624 	if (sc->sc_type <= WM_T_82543)
10625 		wm_gmii_reset(sc);
10626 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
10627 	    && ((sc->sc_flags & WM_F_SGMII) != 0)) {
10628 		/* allow time for SFP cage time to power up phy */
10629 		delay(300 * 1000);
10630 		wm_gmii_reset(sc);
10631 	}
10632 
10633 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
10634 		return 0;
10635 	return rc;
10636 }
10637 
10638 /*
10639  * wm_gmii_mediastatus:	[ifmedia interface function]
10640  *
10641  *	Get the current interface media status on a 1000BASE-T device.
10642  */
10643 static void
10644 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
10645 {
10646 	struct wm_softc *sc = ifp->if_softc;
10647 
10648 	ether_mediastatus(ifp, ifmr);
10649 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
10650 	    | sc->sc_flowflags;
10651 }
10652 
10653 #define	MDI_IO		CTRL_SWDPIN(2)
10654 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
10655 #define	MDI_CLK		CTRL_SWDPIN(3)
10656 
10657 static void
10658 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
10659 {
10660 	uint32_t i, v;
10661 
10662 	v = CSR_READ(sc, WMREG_CTRL);
10663 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
10664 	v |= MDI_DIR | CTRL_SWDPIO(3);
10665 
10666 	for (i = __BIT(nbits - 1); i != 0; i >>= 1) {
10667 		if (data & i)
10668 			v |= MDI_IO;
10669 		else
10670 			v &= ~MDI_IO;
10671 		CSR_WRITE(sc, WMREG_CTRL, v);
10672 		CSR_WRITE_FLUSH(sc);
10673 		delay(10);
10674 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
10675 		CSR_WRITE_FLUSH(sc);
10676 		delay(10);
10677 		CSR_WRITE(sc, WMREG_CTRL, v);
10678 		CSR_WRITE_FLUSH(sc);
10679 		delay(10);
10680 	}
10681 }
10682 
10683 static uint16_t
10684 wm_i82543_mii_recvbits(struct wm_softc *sc)
10685 {
10686 	uint32_t v, i;
10687 	uint16_t data = 0;
10688 
10689 	v = CSR_READ(sc, WMREG_CTRL);
10690 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
10691 	v |= CTRL_SWDPIO(3);
10692 
10693 	CSR_WRITE(sc, WMREG_CTRL, v);
10694 	CSR_WRITE_FLUSH(sc);
10695 	delay(10);
10696 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
10697 	CSR_WRITE_FLUSH(sc);
10698 	delay(10);
10699 	CSR_WRITE(sc, WMREG_CTRL, v);
10700 	CSR_WRITE_FLUSH(sc);
10701 	delay(10);
10702 
10703 	for (i = 0; i < 16; i++) {
10704 		data <<= 1;
10705 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
10706 		CSR_WRITE_FLUSH(sc);
10707 		delay(10);
10708 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
10709 			data |= 1;
10710 		CSR_WRITE(sc, WMREG_CTRL, v);
10711 		CSR_WRITE_FLUSH(sc);
10712 		delay(10);
10713 	}
10714 
10715 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
10716 	CSR_WRITE_FLUSH(sc);
10717 	delay(10);
10718 	CSR_WRITE(sc, WMREG_CTRL, v);
10719 	CSR_WRITE_FLUSH(sc);
10720 	delay(10);
10721 
10722 	return data;
10723 }
10724 
10725 #undef MDI_IO
10726 #undef MDI_DIR
10727 #undef MDI_CLK
10728 
10729 /*
10730  * wm_gmii_i82543_readreg:	[mii interface function]
10731  *
10732  *	Read a PHY register on the GMII (i82543 version).
10733  */
10734 static int
10735 wm_gmii_i82543_readreg(device_t dev, int phy, int reg, uint16_t *val)
10736 {
10737 	struct wm_softc *sc = device_private(dev);
10738 
10739 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
10740 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
10741 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
10742 	*val = wm_i82543_mii_recvbits(sc) & 0xffff;
10743 
10744 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04hx\n",
10745 		device_xname(dev), phy, reg, *val));
10746 
10747 	return 0;
10748 }
10749 
10750 /*
10751  * wm_gmii_i82543_writereg:	[mii interface function]
10752  *
10753  *	Write a PHY register on the GMII (i82543 version).
10754  */
10755 static int
10756 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, uint16_t val)
10757 {
10758 	struct wm_softc *sc = device_private(dev);
10759 
10760 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
10761 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
10762 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
10763 	    (MII_COMMAND_START << 30), 32);
10764 
10765 	return 0;
10766 }
10767 
10768 /*
10769  * wm_gmii_mdic_readreg:	[mii interface function]
10770  *
10771  *	Read a PHY register on the GMII.
10772  */
10773 static int
10774 wm_gmii_mdic_readreg(device_t dev, int phy, int reg, uint16_t *val)
10775 {
10776 	struct wm_softc *sc = device_private(dev);
10777 	uint32_t mdic = 0;
10778 	int i;
10779 
10780 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
10781 	    && (reg > MII_ADDRMASK)) {
10782 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
10783 		    __func__, sc->sc_phytype, reg);
10784 		reg &= MII_ADDRMASK;
10785 	}
10786 
10787 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
10788 	    MDIC_REGADD(reg));
10789 
10790 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
10791 		delay(50);
10792 		mdic = CSR_READ(sc, WMREG_MDIC);
10793 		if (mdic & MDIC_READY)
10794 			break;
10795 	}
10796 
10797 	if ((mdic & MDIC_READY) == 0) {
10798 		DPRINTF(WM_DEBUG_GMII,
10799 		    ("%s: MDIC read timed out: phy %d reg %d\n",
10800 			device_xname(dev), phy, reg));
10801 		return ETIMEDOUT;
10802 	} else if (mdic & MDIC_E) {
10803 		/* This is normal if no PHY is present. */
10804 		DPRINTF(WM_DEBUG_GMII, ("%s: MDIC read error: phy %d reg %d\n",
10805 			device_xname(sc->sc_dev), phy, reg));
10806 		return -1;
10807 	} else
10808 		*val = MDIC_DATA(mdic);
10809 
10810 	/*
10811 	 * Allow some time after each MDIC transaction to avoid
10812 	 * reading duplicate data in the next MDIC transaction.
10813 	 */
10814 	if (sc->sc_type == WM_T_PCH2)
10815 		delay(100);
10816 
10817 	return 0;
10818 }
10819 
10820 /*
10821  * wm_gmii_mdic_writereg:	[mii interface function]
10822  *
10823  *	Write a PHY register on the GMII.
10824  */
10825 static int
10826 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, uint16_t val)
10827 {
10828 	struct wm_softc *sc = device_private(dev);
10829 	uint32_t mdic = 0;
10830 	int i;
10831 
10832 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
10833 	    && (reg > MII_ADDRMASK)) {
10834 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
10835 		    __func__, sc->sc_phytype, reg);
10836 		reg &= MII_ADDRMASK;
10837 	}
10838 
10839 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
10840 	    MDIC_REGADD(reg) | MDIC_DATA(val));
10841 
10842 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
10843 		delay(50);
10844 		mdic = CSR_READ(sc, WMREG_MDIC);
10845 		if (mdic & MDIC_READY)
10846 			break;
10847 	}
10848 
10849 	if ((mdic & MDIC_READY) == 0) {
10850 		DPRINTF(WM_DEBUG_GMII,
10851 		    ("%s: MDIC write timed out: phy %d reg %d\n",
10852 			device_xname(dev), phy, reg));
10853 		return ETIMEDOUT;
10854 	} else if (mdic & MDIC_E) {
10855 		DPRINTF(WM_DEBUG_GMII,
10856 		    ("%s: MDIC write error: phy %d reg %d\n",
10857 			device_xname(dev), phy, reg));
10858 		return -1;
10859 	}
10860 
10861 	/*
10862 	 * Allow some time after each MDIC transaction to avoid
10863 	 * reading duplicate data in the next MDIC transaction.
10864 	 */
10865 	if (sc->sc_type == WM_T_PCH2)
10866 		delay(100);
10867 
10868 	return 0;
10869 }
10870 
10871 /*
10872  * wm_gmii_i82544_readreg:	[mii interface function]
10873  *
10874  *	Read a PHY register on the GMII.
10875  */
10876 static int
10877 wm_gmii_i82544_readreg(device_t dev, int phy, int reg, uint16_t *val)
10878 {
10879 	struct wm_softc *sc = device_private(dev);
10880 	int rv;
10881 
10882 	if (sc->phy.acquire(sc)) {
10883 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
10884 		return -1;
10885 	}
10886 
10887 	rv = wm_gmii_i82544_readreg_locked(dev, phy, reg, val);
10888 
10889 	sc->phy.release(sc);
10890 
10891 	return rv;
10892 }
10893 
10894 static int
10895 wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
10896 {
10897 	struct wm_softc *sc = device_private(dev);
10898 	int rv;
10899 
10900 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
10901 		switch (sc->sc_phytype) {
10902 		case WMPHY_IGP:
10903 		case WMPHY_IGP_2:
10904 		case WMPHY_IGP_3:
10905 			rv = wm_gmii_mdic_writereg(dev, phy,
10906 			    IGPHY_PAGE_SELECT, reg);
10907 			if (rv != 0)
10908 				return rv;
10909 			break;
10910 		default:
10911 #ifdef WM_DEBUG
10912 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
10913 			    __func__, sc->sc_phytype, reg);
10914 #endif
10915 			break;
10916 		}
10917 	}
10918 
10919 	return wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
10920 }
10921 
10922 /*
10923  * wm_gmii_i82544_writereg:	[mii interface function]
10924  *
10925  *	Write a PHY register on the GMII.
10926  */
10927 static int
10928 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, uint16_t val)
10929 {
10930 	struct wm_softc *sc = device_private(dev);
10931 	int rv;
10932 
10933 	if (sc->phy.acquire(sc)) {
10934 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
10935 		return -1;
10936 	}
10937 
10938 	rv = wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val);
10939 	sc->phy.release(sc);
10940 
10941 	return rv;
10942 }
10943 
10944 static int
10945 wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
10946 {
10947 	struct wm_softc *sc = device_private(dev);
10948 	int rv;
10949 
10950 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
10951 		switch (sc->sc_phytype) {
10952 		case WMPHY_IGP:
10953 		case WMPHY_IGP_2:
10954 		case WMPHY_IGP_3:
10955 			rv = wm_gmii_mdic_writereg(dev, phy,
10956 			    IGPHY_PAGE_SELECT, reg);
10957 			if (rv != 0)
10958 				return rv;
10959 			break;
10960 		default:
10961 #ifdef WM_DEBUG
10962 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
10963 			    __func__, sc->sc_phytype, reg);
10964 #endif
10965 			break;
10966 		}
10967 	}
10968 
10969 	return wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
10970 }
10971 
10972 /*
10973  * wm_gmii_i80003_readreg:	[mii interface function]
10974  *
10975  *	Read a PHY register on the kumeran
10976  * This could be handled by the PHY layer if we didn't have to lock the
10977  * resource ...
10978  */
10979 static int
10980 wm_gmii_i80003_readreg(device_t dev, int phy, int reg, uint16_t *val)
10981 {
10982 	struct wm_softc *sc = device_private(dev);
10983 	int page_select;
10984 	uint16_t temp, temp2;
10985 	int rv = 0;
10986 
10987 	if (phy != 1) /* Only one PHY on kumeran bus */
10988 		return -1;
10989 
10990 	if (sc->phy.acquire(sc)) {
10991 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
10992 		return -1;
10993 	}
10994 
10995 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
10996 		page_select = GG82563_PHY_PAGE_SELECT;
10997 	else {
10998 		/*
10999 		 * Use Alternative Page Select register to access registers
11000 		 * 30 and 31.
11001 		 */
11002 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
11003 	}
11004 	temp = reg >> GG82563_PAGE_SHIFT;
11005 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
11006 		goto out;
11007 
11008 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
11009 		/*
11010 		 * Wait more 200us for a bug of the ready bit in the MDIC
11011 		 * register.
11012 		 */
11013 		delay(200);
11014 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
11015 		if ((rv != 0) || (temp2 != temp)) {
11016 			device_printf(dev, "%s failed\n", __func__);
11017 			rv = -1;
11018 			goto out;
11019 		}
11020 		delay(200);
11021 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
11022 		delay(200);
11023 	} else
11024 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
11025 
11026 out:
11027 	sc->phy.release(sc);
11028 	return rv;
11029 }
11030 
11031 /*
11032  * wm_gmii_i80003_writereg:	[mii interface function]
11033  *
11034  *	Write a PHY register on the kumeran.
11035  * This could be handled by the PHY layer if we didn't have to lock the
11036  * resource ...
11037  */
11038 static int
11039 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, uint16_t val)
11040 {
11041 	struct wm_softc *sc = device_private(dev);
11042 	int page_select, rv;
11043 	uint16_t temp, temp2;
11044 
11045 	if (phy != 1) /* Only one PHY on kumeran bus */
11046 		return -1;
11047 
11048 	if (sc->phy.acquire(sc)) {
11049 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
11050 		return -1;
11051 	}
11052 
11053 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
11054 		page_select = GG82563_PHY_PAGE_SELECT;
11055 	else {
11056 		/*
11057 		 * Use Alternative Page Select register to access registers
11058 		 * 30 and 31.
11059 		 */
11060 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
11061 	}
11062 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
11063 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
11064 		goto out;
11065 
11066 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
11067 		/*
11068 		 * Wait more 200us for a bug of the ready bit in the MDIC
11069 		 * register.
11070 		 */
11071 		delay(200);
11072 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
11073 		if ((rv != 0) || (temp2 != temp)) {
11074 			device_printf(dev, "%s failed\n", __func__);
11075 			rv = -1;
11076 			goto out;
11077 		}
11078 		delay(200);
11079 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
11080 		delay(200);
11081 	} else
11082 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
11083 
11084 out:
11085 	sc->phy.release(sc);
11086 	return rv;
11087 }
11088 
11089 /*
11090  * wm_gmii_bm_readreg:	[mii interface function]
11091  *
11092  *	Read a PHY register on the kumeran
11093  * This could be handled by the PHY layer if we didn't have to lock the
11094  * resource ...
11095  */
11096 static int
11097 wm_gmii_bm_readreg(device_t dev, int phy, int reg, uint16_t *val)
11098 {
11099 	struct wm_softc *sc = device_private(dev);
11100 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
11101 	int rv;
11102 
11103 	if (sc->phy.acquire(sc)) {
11104 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
11105 		return -1;
11106 	}
11107 
11108 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
11109 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
11110 		    || (reg == 31)) ? 1 : phy;
11111 	/* Page 800 works differently than the rest so it has its own func */
11112 	if (page == BM_WUC_PAGE) {
11113 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
11114 		goto release;
11115 	}
11116 
11117 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
11118 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
11119 		    && (sc->sc_type != WM_T_82583))
11120 			rv = wm_gmii_mdic_writereg(dev, phy,
11121 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
11122 		else
11123 			rv = wm_gmii_mdic_writereg(dev, phy,
11124 			    BME1000_PHY_PAGE_SELECT, page);
11125 		if (rv != 0)
11126 			goto release;
11127 	}
11128 
11129 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
11130 
11131 release:
11132 	sc->phy.release(sc);
11133 	return rv;
11134 }
11135 
11136 /*
11137  * wm_gmii_bm_writereg:	[mii interface function]
11138  *
11139  *	Write a PHY register on the kumeran.
11140  * This could be handled by the PHY layer if we didn't have to lock the
11141  * resource ...
11142  */
11143 static int
11144 wm_gmii_bm_writereg(device_t dev, int phy, int reg, uint16_t val)
11145 {
11146 	struct wm_softc *sc = device_private(dev);
11147 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
11148 	int rv;
11149 
11150 	if (sc->phy.acquire(sc)) {
11151 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
11152 		return -1;
11153 	}
11154 
11155 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
11156 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
11157 		    || (reg == 31)) ? 1 : phy;
11158 	/* Page 800 works differently than the rest so it has its own func */
11159 	if (page == BM_WUC_PAGE) {
11160 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, &val, false, false);
11161 		goto release;
11162 	}
11163 
11164 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
11165 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
11166 		    && (sc->sc_type != WM_T_82583))
11167 			rv = wm_gmii_mdic_writereg(dev, phy,
11168 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
11169 		else
11170 			rv = wm_gmii_mdic_writereg(dev, phy,
11171 			    BME1000_PHY_PAGE_SELECT, page);
11172 		if (rv != 0)
11173 			goto release;
11174 	}
11175 
11176 	rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
11177 
11178 release:
11179 	sc->phy.release(sc);
11180 	return rv;
11181 }
11182 
11183 /*
11184  *  wm_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
11185  *  @dev: pointer to the HW structure
11186  *  @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
11187  *
11188  *  Assumes semaphore already acquired and phy_reg points to a valid memory
11189  *  address to store contents of the BM_WUC_ENABLE_REG register.
11190  */
11191 static int
11192 wm_enable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
11193 {
11194 	uint16_t temp;
11195 	int rv;
11196 
11197 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
11198 		device_xname(dev), __func__));
11199 
11200 	if (!phy_regp)
11201 		return -1;
11202 
11203 	/* All page select, port ctrl and wakeup registers use phy address 1 */
11204 
11205 	/* Select Port Control Registers page */
11206 	rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
11207 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
11208 	if (rv != 0)
11209 		return rv;
11210 
11211 	/* Read WUCE and save it */
11212 	rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG, phy_regp);
11213 	if (rv != 0)
11214 		return rv;
11215 
11216 	/* Enable both PHY wakeup mode and Wakeup register page writes.
11217 	 * Prevent a power state change by disabling ME and Host PHY wakeup.
11218 	 */
11219 	temp = *phy_regp;
11220 	temp |= BM_WUC_ENABLE_BIT;
11221 	temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
11222 
11223 	if ((rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, temp)) != 0)
11224 		return rv;
11225 
11226 	/* Select Host Wakeup Registers page - caller now able to write
11227 	 * registers on the Wakeup registers page
11228 	 */
11229 	return wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
11230 	    BM_WUC_PAGE << IGP3_PAGE_SHIFT);
11231 }
11232 
11233 /*
11234  *  wm_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
11235  *  @dev: pointer to the HW structure
11236  *  @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
11237  *
11238  *  Restore BM_WUC_ENABLE_REG to its original value.
11239  *
11240  *  Assumes semaphore already acquired and *phy_reg is the contents of the
11241  *  BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
11242  *  caller.
11243  */
11244 static int
11245 wm_disable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
11246 {
11247 
11248 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
11249 		device_xname(dev), __func__));
11250 
11251 	if (!phy_regp)
11252 		return -1;
11253 
11254 	/* Select Port Control Registers page */
11255 	wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
11256 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
11257 
11258 	/* Restore 769.17 to its original value */
11259 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, *phy_regp);
11260 
11261 	return 0;
11262 }
11263 
11264 /*
11265  *  wm_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
11266  *  @sc: pointer to the HW structure
11267  *  @offset: register offset to be read or written
11268  *  @val: pointer to the data to read or write
11269  *  @rd: determines if operation is read or write
11270  *  @page_set: BM_WUC_PAGE already set and access enabled
11271  *
11272  *  Read the PHY register at offset and store the retrieved information in
11273  *  data, or write data to PHY register at offset.  Note the procedure to
11274  *  access the PHY wakeup registers is different than reading the other PHY
11275  *  registers. It works as such:
11276  *  1) Set 769.17.2 (page 769, register 17, bit 2) = 1
11277  *  2) Set page to 800 for host (801 if we were manageability)
11278  *  3) Write the address using the address opcode (0x11)
11279  *  4) Read or write the data using the data opcode (0x12)
11280  *  5) Restore 769.17.2 to its original value
11281  *
11282  *  Steps 1 and 2 are done by wm_enable_phy_wakeup_reg_access_bm() and
11283  *  step 5 is done by wm_disable_phy_wakeup_reg_access_bm().
11284  *
11285  *  Assumes semaphore is already acquired.  When page_set==TRUE, assumes
11286  *  the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
11287  *  is responsible for calls to wm_[enable|disable]_phy_wakeup_reg_bm()).
11288  */
11289 static int
11290 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd,
11291 	bool page_set)
11292 {
11293 	struct wm_softc *sc = device_private(dev);
11294 	uint16_t regnum = BM_PHY_REG_NUM(offset);
11295 	uint16_t page = BM_PHY_REG_PAGE(offset);
11296 	uint16_t wuce;
11297 	int rv = 0;
11298 
11299 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
11300 		device_xname(dev), __func__));
11301 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
11302 	if ((sc->sc_type == WM_T_PCH)
11303 	    && ((CSR_READ(sc, WMREG_PHY_CTRL) & PHY_CTRL_GBE_DIS) == 0)) {
11304 		device_printf(dev,
11305 		    "Attempting to access page %d while gig enabled.\n", page);
11306 	}
11307 
11308 	if (!page_set) {
11309 		/* Enable access to PHY wakeup registers */
11310 		rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
11311 		if (rv != 0) {
11312 			device_printf(dev,
11313 			    "%s: Could not enable PHY wakeup reg access\n",
11314 			    __func__);
11315 			return rv;
11316 		}
11317 	}
11318 	DPRINTF(WM_DEBUG_GMII, ("%s: %s: Accessing PHY page %d reg 0x%x\n",
11319 		device_xname(sc->sc_dev), __func__, page, regnum));
11320 
11321 	/*
11322 	 * 2) Access PHY wakeup register.
11323 	 * See wm_access_phy_wakeup_reg_bm.
11324 	 */
11325 
11326 	/* Write the Wakeup register page offset value using opcode 0x11 */
11327 	rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
11328 	if (rv != 0)
11329 		return rv;
11330 
11331 	if (rd) {
11332 		/* Read the Wakeup register page value using opcode 0x12 */
11333 		rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE, val);
11334 	} else {
11335 		/* Write the Wakeup register page value using opcode 0x12 */
11336 		rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
11337 	}
11338 	if (rv != 0)
11339 		return rv;
11340 
11341 	if (!page_set)
11342 		rv = wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
11343 
11344 	return rv;
11345 }
11346 
11347 /*
11348  * wm_gmii_hv_readreg:	[mii interface function]
11349  *
11350  *	Read a PHY register on the kumeran
11351  * This could be handled by the PHY layer if we didn't have to lock the
11352  * resource ...
11353  */
11354 static int
11355 wm_gmii_hv_readreg(device_t dev, int phy, int reg, uint16_t *val)
11356 {
11357 	struct wm_softc *sc = device_private(dev);
11358 	int rv;
11359 
11360 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
11361 		device_xname(dev), __func__));
11362 	if (sc->phy.acquire(sc)) {
11363 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
11364 		return -1;
11365 	}
11366 
11367 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg, val);
11368 	sc->phy.release(sc);
11369 	return rv;
11370 }
11371 
11372 static int
11373 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
11374 {
11375 	uint16_t page = BM_PHY_REG_PAGE(reg);
11376 	uint16_t regnum = BM_PHY_REG_NUM(reg);
11377 	int rv;
11378 
11379 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
11380 
11381 	/* Page 800 works differently than the rest so it has its own func */
11382 	if (page == BM_WUC_PAGE)
11383 		return wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
11384 
11385 	/*
11386 	 * Lower than page 768 works differently than the rest so it has its
11387 	 * own func
11388 	 */
11389 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
11390 		device_printf(dev, "gmii_hv_readreg!!!\n");
11391 		return -1;
11392 	}
11393 
11394 	/*
11395 	 * XXX I21[789] documents say that the SMBus Address register is at
11396 	 * PHY address 01, Page 0 (not 768), Register 26.
11397 	 */
11398 	if (page == HV_INTC_FC_PAGE_START)
11399 		page = 0;
11400 
11401 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
11402 		rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
11403 		    page << BME1000_PAGE_SHIFT);
11404 		if (rv != 0)
11405 			return rv;
11406 	}
11407 
11408 	return wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK, val);
11409 }
11410 
11411 /*
11412  * wm_gmii_hv_writereg:	[mii interface function]
11413  *
11414  *	Write a PHY register on the kumeran.
11415  * This could be handled by the PHY layer if we didn't have to lock the
11416  * resource ...
11417  */
11418 static int
11419 wm_gmii_hv_writereg(device_t dev, int phy, int reg, uint16_t val)
11420 {
11421 	struct wm_softc *sc = device_private(dev);
11422 	int rv;
11423 
11424 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
11425 		device_xname(dev), __func__));
11426 
11427 	if (sc->phy.acquire(sc)) {
11428 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
11429 		return -1;
11430 	}
11431 
11432 	rv = wm_gmii_hv_writereg_locked(dev, phy, reg, val);
11433 	sc->phy.release(sc);
11434 
11435 	return rv;
11436 }
11437 
11438 static int
11439 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
11440 {
11441 	struct wm_softc *sc = device_private(dev);
11442 	uint16_t page = BM_PHY_REG_PAGE(reg);
11443 	uint16_t regnum = BM_PHY_REG_NUM(reg);
11444 	int rv;
11445 
11446 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
11447 
11448 	/* Page 800 works differently than the rest so it has its own func */
11449 	if (page == BM_WUC_PAGE)
11450 		return wm_access_phy_wakeup_reg_bm(dev, reg, &val, false,
11451 		    false);
11452 
11453 	/*
11454 	 * Lower than page 768 works differently than the rest so it has its
11455 	 * own func
11456 	 */
11457 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
11458 		device_printf(dev, "gmii_hv_writereg!!!\n");
11459 		return -1;
11460 	}
11461 
11462 	{
11463 		/*
11464 		 * XXX I21[789] documents say that the SMBus Address register
11465 		 * is at PHY address 01, Page 0 (not 768), Register 26.
11466 		 */
11467 		if (page == HV_INTC_FC_PAGE_START)
11468 			page = 0;
11469 
11470 		/*
11471 		 * XXX Workaround MDIO accesses being disabled after entering
11472 		 * IEEE Power Down (whenever bit 11 of the PHY control
11473 		 * register is set)
11474 		 */
11475 		if (sc->sc_phytype == WMPHY_82578) {
11476 			struct mii_softc *child;
11477 
11478 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
11479 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
11480 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
11481 			    && ((val & (1 << 11)) != 0)) {
11482 				device_printf(dev, "XXX need workaround\n");
11483 			}
11484 		}
11485 
11486 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
11487 			rv = wm_gmii_mdic_writereg(dev, 1,
11488 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
11489 			if (rv != 0)
11490 				return rv;
11491 		}
11492 	}
11493 
11494 	return wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
11495 }
11496 
11497 /*
11498  * wm_gmii_82580_readreg:	[mii interface function]
11499  *
11500  *	Read a PHY register on the 82580 and I350.
11501  * This could be handled by the PHY layer if we didn't have to lock the
11502  * resource ...
11503  */
11504 static int
11505 wm_gmii_82580_readreg(device_t dev, int phy, int reg, uint16_t *val)
11506 {
11507 	struct wm_softc *sc = device_private(dev);
11508 	int rv;
11509 
11510 	if (sc->phy.acquire(sc) != 0) {
11511 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
11512 		return -1;
11513 	}
11514 
11515 #ifdef DIAGNOSTIC
11516 	if (reg > MII_ADDRMASK) {
11517 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
11518 		    __func__, sc->sc_phytype, reg);
11519 		reg &= MII_ADDRMASK;
11520 	}
11521 #endif
11522 	rv = wm_gmii_mdic_readreg(dev, phy, reg, val);
11523 
11524 	sc->phy.release(sc);
11525 	return rv;
11526 }
11527 
11528 /*
11529  * wm_gmii_82580_writereg:	[mii interface function]
11530  *
11531  *	Write a PHY register on the 82580 and I350.
11532  * This could be handled by the PHY layer if we didn't have to lock the
11533  * resource ...
11534  */
11535 static int
11536 wm_gmii_82580_writereg(device_t dev, int phy, int reg, uint16_t val)
11537 {
11538 	struct wm_softc *sc = device_private(dev);
11539 	int rv;
11540 
11541 	if (sc->phy.acquire(sc) != 0) {
11542 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
11543 		return -1;
11544 	}
11545 
11546 #ifdef DIAGNOSTIC
11547 	if (reg > MII_ADDRMASK) {
11548 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
11549 		    __func__, sc->sc_phytype, reg);
11550 		reg &= MII_ADDRMASK;
11551 	}
11552 #endif
11553 	rv = wm_gmii_mdic_writereg(dev, phy, reg, val);
11554 
11555 	sc->phy.release(sc);
11556 	return rv;
11557 }
11558 
11559 /*
11560  * wm_gmii_gs40g_readreg:	[mii interface function]
11561  *
11562  *	Read a PHY register on the I2100 and I211.
11563  * This could be handled by the PHY layer if we didn't have to lock the
11564  * resource ...
11565  */
11566 static int
11567 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg, uint16_t *val)
11568 {
11569 	struct wm_softc *sc = device_private(dev);
11570 	int page, offset;
11571 	int rv;
11572 
11573 	/* Acquire semaphore */
11574 	if (sc->phy.acquire(sc)) {
11575 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
11576 		return -1;
11577 	}
11578 
11579 	/* Page select */
11580 	page = reg >> GS40G_PAGE_SHIFT;
11581 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
11582 	if (rv != 0)
11583 		goto release;
11584 
11585 	/* Read reg */
11586 	offset = reg & GS40G_OFFSET_MASK;
11587 	rv = wm_gmii_mdic_readreg(dev, phy, offset, val);
11588 
11589 release:
11590 	sc->phy.release(sc);
11591 	return rv;
11592 }
11593 
11594 /*
11595  * wm_gmii_gs40g_writereg:	[mii interface function]
11596  *
11597  *	Write a PHY register on the I210 and I211.
11598  * This could be handled by the PHY layer if we didn't have to lock the
11599  * resource ...
11600  */
11601 static int
11602 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, uint16_t val)
11603 {
11604 	struct wm_softc *sc = device_private(dev);
11605 	uint16_t page;
11606 	int offset, rv;
11607 
11608 	/* Acquire semaphore */
11609 	if (sc->phy.acquire(sc)) {
11610 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
11611 		return -1;
11612 	}
11613 
11614 	/* Page select */
11615 	page = reg >> GS40G_PAGE_SHIFT;
11616 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
11617 	if (rv != 0)
11618 		goto release;
11619 
11620 	/* Write reg */
11621 	offset = reg & GS40G_OFFSET_MASK;
11622 	rv = wm_gmii_mdic_writereg(dev, phy, offset, val);
11623 
11624 release:
11625 	/* Release semaphore */
11626 	sc->phy.release(sc);
11627 	return rv;
11628 }
11629 
11630 /*
11631  * wm_gmii_statchg:	[mii interface function]
11632  *
11633  *	Callback from MII layer when media changes.
11634  */
11635 static void
11636 wm_gmii_statchg(struct ifnet *ifp)
11637 {
11638 	struct wm_softc *sc = ifp->if_softc;
11639 	struct mii_data *mii = &sc->sc_mii;
11640 
11641 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
11642 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
11643 	sc->sc_fcrtl &= ~FCRTL_XONE;
11644 
11645 	/* Get flow control negotiation result. */
11646 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
11647 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
11648 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
11649 		mii->mii_media_active &= ~IFM_ETH_FMASK;
11650 	}
11651 
11652 	if (sc->sc_flowflags & IFM_FLOW) {
11653 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
11654 			sc->sc_ctrl |= CTRL_TFCE;
11655 			sc->sc_fcrtl |= FCRTL_XONE;
11656 		}
11657 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
11658 			sc->sc_ctrl |= CTRL_RFCE;
11659 	}
11660 
11661 	if (mii->mii_media_active & IFM_FDX) {
11662 		DPRINTF(WM_DEBUG_LINK,
11663 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
11664 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
11665 	} else {
11666 		DPRINTF(WM_DEBUG_LINK,
11667 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
11668 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
11669 	}
11670 
11671 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11672 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
11673 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
11674 						 : WMREG_FCRTL, sc->sc_fcrtl);
11675 	if (sc->sc_type == WM_T_80003) {
11676 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
11677 		case IFM_1000_T:
11678 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
11679 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
11680 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
11681 			break;
11682 		default:
11683 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
11684 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
11685 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
11686 			break;
11687 		}
11688 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
11689 	}
11690 }
11691 
11692 /* kumeran related (80003, ICH* and PCH*) */
11693 
11694 /*
11695  * wm_kmrn_readreg:
11696  *
11697  *	Read a kumeran register
11698  */
11699 static int
11700 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
11701 {
11702 	int rv;
11703 
11704 	if (sc->sc_type == WM_T_80003)
11705 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
11706 	else
11707 		rv = sc->phy.acquire(sc);
11708 	if (rv != 0) {
11709 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
11710 		    __func__);
11711 		return rv;
11712 	}
11713 
11714 	rv = wm_kmrn_readreg_locked(sc, reg, val);
11715 
11716 	if (sc->sc_type == WM_T_80003)
11717 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
11718 	else
11719 		sc->phy.release(sc);
11720 
11721 	return rv;
11722 }
11723 
11724 static int
11725 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
11726 {
11727 
11728 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
11729 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
11730 	    KUMCTRLSTA_REN);
11731 	CSR_WRITE_FLUSH(sc);
11732 	delay(2);
11733 
11734 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
11735 
11736 	return 0;
11737 }
11738 
11739 /*
11740  * wm_kmrn_writereg:
11741  *
11742  *	Write a kumeran register
11743  */
11744 static int
11745 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
11746 {
11747 	int rv;
11748 
11749 	if (sc->sc_type == WM_T_80003)
11750 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
11751 	else
11752 		rv = sc->phy.acquire(sc);
11753 	if (rv != 0) {
11754 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
11755 		    __func__);
11756 		return rv;
11757 	}
11758 
11759 	rv = wm_kmrn_writereg_locked(sc, reg, val);
11760 
11761 	if (sc->sc_type == WM_T_80003)
11762 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
11763 	else
11764 		sc->phy.release(sc);
11765 
11766 	return rv;
11767 }
11768 
11769 static int
11770 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
11771 {
11772 
11773 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
11774 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
11775 
11776 	return 0;
11777 }
11778 
11779 /*
11780  * EMI register related (82579, WMPHY_I217(PCH2 and newer))
11781  * This access method is different from IEEE MMD.
11782  */
11783 static int
11784 wm_access_emi_reg_locked(device_t dev, int reg, uint16_t *val, bool rd)
11785 {
11786 	struct wm_softc *sc = device_private(dev);
11787 	int rv;
11788 
11789 	rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_ADDR, reg);
11790 	if (rv != 0)
11791 		return rv;
11792 
11793 	if (rd)
11794 		rv = sc->phy.readreg_locked(dev, 2, I82579_EMI_DATA, val);
11795 	else
11796 		rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_DATA, *val);
11797 	return rv;
11798 }
11799 
11800 static int
11801 wm_read_emi_reg_locked(device_t dev, int reg, uint16_t *val)
11802 {
11803 
11804 	return wm_access_emi_reg_locked(dev, reg, val, true);
11805 }
11806 
11807 static int
11808 wm_write_emi_reg_locked(device_t dev, int reg, uint16_t val)
11809 {
11810 
11811 	return wm_access_emi_reg_locked(dev, reg, &val, false);
11812 }
11813 
11814 /* SGMII related */
11815 
11816 /*
11817  * wm_sgmii_uses_mdio
11818  *
11819  * Check whether the transaction is to the internal PHY or the external
11820  * MDIO interface. Return true if it's MDIO.
11821  */
11822 static bool
11823 wm_sgmii_uses_mdio(struct wm_softc *sc)
11824 {
11825 	uint32_t reg;
11826 	bool ismdio = false;
11827 
11828 	switch (sc->sc_type) {
11829 	case WM_T_82575:
11830 	case WM_T_82576:
11831 		reg = CSR_READ(sc, WMREG_MDIC);
11832 		ismdio = ((reg & MDIC_DEST) != 0);
11833 		break;
11834 	case WM_T_82580:
11835 	case WM_T_I350:
11836 	case WM_T_I354:
11837 	case WM_T_I210:
11838 	case WM_T_I211:
11839 		reg = CSR_READ(sc, WMREG_MDICNFG);
11840 		ismdio = ((reg & MDICNFG_DEST) != 0);
11841 		break;
11842 	default:
11843 		break;
11844 	}
11845 
11846 	return ismdio;
11847 }
11848 
11849 /* Setup internal SGMII PHY for SFP */
11850 static void
11851 wm_sgmii_sfp_preconfig(struct wm_softc *sc)
11852 {
11853 	uint16_t id1, id2, phyreg;
11854 	int i, rv;
11855 
11856 	if (((sc->sc_flags & WM_F_SGMII) == 0)
11857 	    || ((sc->sc_flags & WM_F_SFP) == 0))
11858 		return;
11859 
11860 	for (i = 0; i < MII_NPHY; i++) {
11861 		sc->phy.no_errprint = true;
11862 		rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR1, &id1);
11863 		if (rv != 0)
11864 			continue;
11865 		rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR2, &id2);
11866 		if (rv != 0)
11867 			continue;
11868 		if (MII_OUI(id1, id2) != MII_OUI_xxMARVELL)
11869 			continue;
11870 		sc->phy.no_errprint = false;
11871 
11872 		sc->phy.readreg_locked(sc->sc_dev, i, MAKPHY_ESSR, &phyreg);
11873 		phyreg &= ~(ESSR_SER_ANEG_BYPASS | ESSR_HWCFG_MODE);
11874 		phyreg |= ESSR_SGMII_WOC_COPPER;
11875 		sc->phy.writereg_locked(sc->sc_dev, i, MAKPHY_ESSR, phyreg);
11876 		break;
11877 	}
11878 
11879 }
11880 
11881 /*
11882  * wm_sgmii_readreg:	[mii interface function]
11883  *
11884  *	Read a PHY register on the SGMII
11885  * This could be handled by the PHY layer if we didn't have to lock the
11886  * resource ...
11887  */
11888 static int
11889 wm_sgmii_readreg(device_t dev, int phy, int reg, uint16_t *val)
11890 {
11891 	struct wm_softc *sc = device_private(dev);
11892 	int rv;
11893 
11894 	if (sc->phy.acquire(sc)) {
11895 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
11896 		return -1;
11897 	}
11898 
11899 	rv = wm_sgmii_readreg_locked(dev, phy, reg, val);
11900 
11901 	sc->phy.release(sc);
11902 	return rv;
11903 }
11904 
11905 static int
11906 wm_sgmii_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
11907 {
11908 	struct wm_softc *sc = device_private(dev);
11909 	uint32_t i2ccmd;
11910 	int i, rv = 0;
11911 
11912 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
11913 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
11914 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
11915 
11916 	/* Poll the ready bit */
11917 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
11918 		delay(50);
11919 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
11920 		if (i2ccmd & I2CCMD_READY)
11921 			break;
11922 	}
11923 	if ((i2ccmd & I2CCMD_READY) == 0) {
11924 		device_printf(dev, "I2CCMD Read did not complete\n");
11925 		rv = ETIMEDOUT;
11926 	}
11927 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
11928 		if (!sc->phy.no_errprint)
11929 			device_printf(dev, "I2CCMD Error bit set\n");
11930 		rv = EIO;
11931 	}
11932 
11933 	*val = (uint16_t)((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
11934 
11935 	return rv;
11936 }
11937 
11938 /*
11939  * wm_sgmii_writereg:	[mii interface function]
11940  *
11941  *	Write a PHY register on the SGMII.
11942  * This could be handled by the PHY layer if we didn't have to lock the
11943  * resource ...
11944  */
11945 static int
11946 wm_sgmii_writereg(device_t dev, int phy, int reg, uint16_t val)
11947 {
11948 	struct wm_softc *sc = device_private(dev);
11949 	int rv;
11950 
11951 	if (sc->phy.acquire(sc) != 0) {
11952 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
11953 		return -1;
11954 	}
11955 
11956 	rv = wm_sgmii_writereg_locked(dev, phy, reg, val);
11957 
11958 	sc->phy.release(sc);
11959 
11960 	return rv;
11961 }
11962 
11963 static int
11964 wm_sgmii_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
11965 {
11966 	struct wm_softc *sc = device_private(dev);
11967 	uint32_t i2ccmd;
11968 	uint16_t swapdata;
11969 	int rv = 0;
11970 	int i;
11971 
11972 	/* Swap the data bytes for the I2C interface */
11973 	swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
11974 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
11975 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
11976 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
11977 
11978 	/* Poll the ready bit */
11979 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
11980 		delay(50);
11981 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
11982 		if (i2ccmd & I2CCMD_READY)
11983 			break;
11984 	}
11985 	if ((i2ccmd & I2CCMD_READY) == 0) {
11986 		device_printf(dev, "I2CCMD Write did not complete\n");
11987 		rv = ETIMEDOUT;
11988 	}
11989 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
11990 		device_printf(dev, "I2CCMD Error bit set\n");
11991 		rv = EIO;
11992 	}
11993 
11994 	return rv;
11995 }
11996 
11997 /* TBI related */
11998 
11999 static bool
12000 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
12001 {
12002 	bool sig;
12003 
12004 	sig = ctrl & CTRL_SWDPIN(1);
12005 
12006 	/*
12007 	 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
12008 	 * detect a signal, 1 if they don't.
12009 	 */
12010 	if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
12011 		sig = !sig;
12012 
12013 	return sig;
12014 }
12015 
12016 /*
12017  * wm_tbi_mediainit:
12018  *
12019  *	Initialize media for use on 1000BASE-X devices.
12020  */
12021 static void
12022 wm_tbi_mediainit(struct wm_softc *sc)
12023 {
12024 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
12025 	const char *sep = "";
12026 
12027 	if (sc->sc_type < WM_T_82543)
12028 		sc->sc_tipg = TIPG_WM_DFLT;
12029 	else
12030 		sc->sc_tipg = TIPG_LG_DFLT;
12031 
12032 	sc->sc_tbi_serdes_anegticks = 5;
12033 
12034 	/* Initialize our media structures */
12035 	sc->sc_mii.mii_ifp = ifp;
12036 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
12037 
12038 	ifp->if_baudrate = IF_Gbps(1);
12039 	if (((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
12040 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
12041 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
12042 		    wm_serdes_mediachange, wm_serdes_mediastatus,
12043 		    sc->sc_core_lock);
12044 	} else {
12045 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
12046 		    wm_tbi_mediachange, wm_tbi_mediastatus, sc->sc_core_lock);
12047 	}
12048 
12049 	/*
12050 	 * SWD Pins:
12051 	 *
12052 	 *	0 = Link LED (output)
12053 	 *	1 = Loss Of Signal (input)
12054 	 */
12055 	sc->sc_ctrl |= CTRL_SWDPIO(0);
12056 
12057 	/* XXX Perhaps this is only for TBI */
12058 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
12059 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
12060 
12061 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
12062 		sc->sc_ctrl &= ~CTRL_LRST;
12063 
12064 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
12065 
12066 #define	ADD(ss, mm, dd)							\
12067 do {									\
12068 	aprint_normal("%s%s", sep, ss);					\
12069 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
12070 	sep = ", ";							\
12071 } while (/*CONSTCOND*/0)
12072 
12073 	aprint_normal_dev(sc->sc_dev, "");
12074 
12075 	if (sc->sc_type == WM_T_I354) {
12076 		uint32_t status;
12077 
12078 		status = CSR_READ(sc, WMREG_STATUS);
12079 		if (((status & STATUS_2P5_SKU) != 0)
12080 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
12081 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
12082 		} else
12083 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
12084 	} else if (sc->sc_type == WM_T_82545) {
12085 		/* Only 82545 is LX (XXX except SFP) */
12086 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
12087 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
12088 	} else if (sc->sc_sfptype != 0) {
12089 		/* XXX wm(4) fiber/serdes don't use ifm_data */
12090 		switch (sc->sc_sfptype) {
12091 		default:
12092 		case SFF_SFP_ETH_FLAGS_1000SX:
12093 			ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
12094 			ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
12095 			break;
12096 		case SFF_SFP_ETH_FLAGS_1000LX:
12097 			ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
12098 			ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
12099 			break;
12100 		case SFF_SFP_ETH_FLAGS_1000CX:
12101 			ADD("1000baseCX", IFM_1000_CX, ANAR_X_HD);
12102 			ADD("1000baseCX-FDX", IFM_1000_CX | IFM_FDX, ANAR_X_FD);
12103 			break;
12104 		case SFF_SFP_ETH_FLAGS_1000T:
12105 			ADD("1000baseT", IFM_1000_T, 0);
12106 			ADD("1000baseT-FDX", IFM_1000_T | IFM_FDX, 0);
12107 			break;
12108 		case SFF_SFP_ETH_FLAGS_100FX:
12109 			ADD("100baseFX", IFM_100_FX, ANAR_TX);
12110 			ADD("100baseFX-FDX", IFM_100_FX | IFM_FDX, ANAR_TX_FD);
12111 			break;
12112 		}
12113 	} else {
12114 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
12115 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
12116 	}
12117 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
12118 	aprint_normal("\n");
12119 
12120 #undef ADD
12121 
12122 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
12123 }
12124 
12125 /*
12126  * wm_tbi_mediachange:	[ifmedia interface function]
12127  *
12128  *	Set hardware to newly-selected media on a 1000BASE-X device.
12129  */
12130 static int
12131 wm_tbi_mediachange(struct ifnet *ifp)
12132 {
12133 	struct wm_softc *sc = ifp->if_softc;
12134 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
12135 	uint32_t status, ctrl;
12136 	bool signal;
12137 	int i;
12138 
12139 	KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
12140 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
12141 		/* XXX need some work for >= 82571 and < 82575 */
12142 		if (sc->sc_type < WM_T_82575)
12143 			return 0;
12144 	}
12145 
12146 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
12147 	    || (sc->sc_type >= WM_T_82575))
12148 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
12149 
12150 	sc->sc_ctrl &= ~CTRL_LRST;
12151 	sc->sc_txcw = TXCW_ANE;
12152 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
12153 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
12154 	else if (ife->ifm_media & IFM_FDX)
12155 		sc->sc_txcw |= TXCW_FD;
12156 	else
12157 		sc->sc_txcw |= TXCW_HD;
12158 
12159 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
12160 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
12161 
12162 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
12163 		device_xname(sc->sc_dev), sc->sc_txcw));
12164 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
12165 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
12166 	CSR_WRITE_FLUSH(sc);
12167 	delay(1000);
12168 
12169 	ctrl = CSR_READ(sc, WMREG_CTRL);
12170 	signal = wm_tbi_havesignal(sc, ctrl);
12171 
12172 	DPRINTF(WM_DEBUG_LINK, ("%s: signal = %d\n", device_xname(sc->sc_dev),
12173 		signal));
12174 
12175 	if (signal) {
12176 		/* Have signal; wait for the link to come up. */
12177 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
12178 			delay(10000);
12179 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
12180 				break;
12181 		}
12182 
12183 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
12184 			device_xname(sc->sc_dev), i));
12185 
12186 		status = CSR_READ(sc, WMREG_STATUS);
12187 		DPRINTF(WM_DEBUG_LINK,
12188 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
12189 			device_xname(sc->sc_dev), status, STATUS_LU));
12190 		if (status & STATUS_LU) {
12191 			/* Link is up. */
12192 			DPRINTF(WM_DEBUG_LINK,
12193 			    ("%s: LINK: set media -> link up %s\n",
12194 				device_xname(sc->sc_dev),
12195 				(status & STATUS_FD) ? "FDX" : "HDX"));
12196 
12197 			/*
12198 			 * NOTE: CTRL will update TFCE and RFCE automatically,
12199 			 * so we should update sc->sc_ctrl
12200 			 */
12201 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
12202 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
12203 			sc->sc_fcrtl &= ~FCRTL_XONE;
12204 			if (status & STATUS_FD)
12205 				sc->sc_tctl |=
12206 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
12207 			else
12208 				sc->sc_tctl |=
12209 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
12210 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
12211 				sc->sc_fcrtl |= FCRTL_XONE;
12212 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
12213 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
12214 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
12215 			sc->sc_tbi_linkup = 1;
12216 		} else {
12217 			if (i == WM_LINKUP_TIMEOUT)
12218 				wm_check_for_link(sc);
12219 			/* Link is down. */
12220 			DPRINTF(WM_DEBUG_LINK,
12221 			    ("%s: LINK: set media -> link down\n",
12222 				device_xname(sc->sc_dev)));
12223 			sc->sc_tbi_linkup = 0;
12224 		}
12225 	} else {
12226 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
12227 			device_xname(sc->sc_dev)));
12228 		sc->sc_tbi_linkup = 0;
12229 	}
12230 
12231 	wm_tbi_serdes_set_linkled(sc);
12232 
12233 	return 0;
12234 }
12235 
12236 /*
12237  * wm_tbi_mediastatus:	[ifmedia interface function]
12238  *
12239  *	Get the current interface media status on a 1000BASE-X device.
12240  */
12241 static void
12242 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
12243 {
12244 	struct wm_softc *sc = ifp->if_softc;
12245 	uint32_t ctrl, status;
12246 
12247 	ifmr->ifm_status = IFM_AVALID;
12248 	ifmr->ifm_active = IFM_ETHER;
12249 
12250 	status = CSR_READ(sc, WMREG_STATUS);
12251 	if ((status & STATUS_LU) == 0) {
12252 		ifmr->ifm_active |= IFM_NONE;
12253 		return;
12254 	}
12255 
12256 	ifmr->ifm_status |= IFM_ACTIVE;
12257 	/* Only 82545 is LX */
12258 	if (sc->sc_type == WM_T_82545)
12259 		ifmr->ifm_active |= IFM_1000_LX;
12260 	else
12261 		ifmr->ifm_active |= IFM_1000_SX;
12262 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
12263 		ifmr->ifm_active |= IFM_FDX;
12264 	else
12265 		ifmr->ifm_active |= IFM_HDX;
12266 	ctrl = CSR_READ(sc, WMREG_CTRL);
12267 	if (ctrl & CTRL_RFCE)
12268 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
12269 	if (ctrl & CTRL_TFCE)
12270 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
12271 }
12272 
12273 /* XXX TBI only */
12274 static int
12275 wm_check_for_link(struct wm_softc *sc)
12276 {
12277 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
12278 	uint32_t rxcw;
12279 	uint32_t ctrl;
12280 	uint32_t status;
12281 	bool signal;
12282 
12283 	DPRINTF(WM_DEBUG_LINK, ("%s: %s called\n",
12284 		device_xname(sc->sc_dev), __func__));
12285 
12286 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
12287 		/* XXX need some work for >= 82571 */
12288 		if (sc->sc_type >= WM_T_82571) {
12289 			sc->sc_tbi_linkup = 1;
12290 			return 0;
12291 		}
12292 	}
12293 
12294 	rxcw = CSR_READ(sc, WMREG_RXCW);
12295 	ctrl = CSR_READ(sc, WMREG_CTRL);
12296 	status = CSR_READ(sc, WMREG_STATUS);
12297 	signal = wm_tbi_havesignal(sc, ctrl);
12298 
12299 	DPRINTF(WM_DEBUG_LINK,
12300 	    ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
12301 		device_xname(sc->sc_dev), __func__, signal,
12302 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
12303 
12304 	/*
12305 	 * SWDPIN   LU RXCW
12306 	 *	0    0	  0
12307 	 *	0    0	  1	(should not happen)
12308 	 *	0    1	  0	(should not happen)
12309 	 *	0    1	  1	(should not happen)
12310 	 *	1    0	  0	Disable autonego and force linkup
12311 	 *	1    0	  1	got /C/ but not linkup yet
12312 	 *	1    1	  0	(linkup)
12313 	 *	1    1	  1	If IFM_AUTO, back to autonego
12314 	 *
12315 	 */
12316 	if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
12317 		DPRINTF(WM_DEBUG_LINK,
12318 		    ("%s: %s: force linkup and fullduplex\n",
12319 			device_xname(sc->sc_dev), __func__));
12320 		sc->sc_tbi_linkup = 0;
12321 		/* Disable auto-negotiation in the TXCW register */
12322 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
12323 
12324 		/*
12325 		 * Force link-up and also force full-duplex.
12326 		 *
12327 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
12328 		 * so we should update sc->sc_ctrl
12329 		 */
12330 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
12331 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
12332 	} else if (((status & STATUS_LU) != 0)
12333 	    && ((rxcw & RXCW_C) != 0)
12334 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
12335 		sc->sc_tbi_linkup = 1;
12336 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
12337 			device_xname(sc->sc_dev),
12338 			__func__));
12339 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
12340 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
12341 	} else if (signal && ((rxcw & RXCW_C) != 0)) {
12342 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: /C/",
12343 			device_xname(sc->sc_dev), __func__));
12344 	} else {
12345 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
12346 			device_xname(sc->sc_dev), __func__, rxcw, ctrl,
12347 			status));
12348 	}
12349 
12350 	return 0;
12351 }
12352 
12353 /*
12354  * wm_tbi_tick:
12355  *
12356  *	Check the link on TBI devices.
12357  *	This function acts as mii_tick().
12358  */
12359 static void
12360 wm_tbi_tick(struct wm_softc *sc)
12361 {
12362 	struct mii_data *mii = &sc->sc_mii;
12363 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
12364 	uint32_t status;
12365 
12366 	KASSERT(WM_CORE_LOCKED(sc));
12367 
12368 	status = CSR_READ(sc, WMREG_STATUS);
12369 
12370 	/* XXX is this needed? */
12371 	(void)CSR_READ(sc, WMREG_RXCW);
12372 	(void)CSR_READ(sc, WMREG_CTRL);
12373 
12374 	/* set link status */
12375 	if ((status & STATUS_LU) == 0) {
12376 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
12377 			device_xname(sc->sc_dev)));
12378 		sc->sc_tbi_linkup = 0;
12379 	} else if (sc->sc_tbi_linkup == 0) {
12380 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
12381 			device_xname(sc->sc_dev),
12382 			(status & STATUS_FD) ? "FDX" : "HDX"));
12383 		sc->sc_tbi_linkup = 1;
12384 		sc->sc_tbi_serdes_ticks = 0;
12385 	}
12386 
12387 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
12388 		goto setled;
12389 
12390 	if ((status & STATUS_LU) == 0) {
12391 		sc->sc_tbi_linkup = 0;
12392 		/* If the timer expired, retry autonegotiation */
12393 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
12394 		    && (++sc->sc_tbi_serdes_ticks
12395 			>= sc->sc_tbi_serdes_anegticks)) {
12396 			DPRINTF(WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
12397 				device_xname(sc->sc_dev), __func__));
12398 			sc->sc_tbi_serdes_ticks = 0;
12399 			/*
12400 			 * Reset the link, and let autonegotiation do
12401 			 * its thing
12402 			 */
12403 			sc->sc_ctrl |= CTRL_LRST;
12404 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
12405 			CSR_WRITE_FLUSH(sc);
12406 			delay(1000);
12407 			sc->sc_ctrl &= ~CTRL_LRST;
12408 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
12409 			CSR_WRITE_FLUSH(sc);
12410 			delay(1000);
12411 			CSR_WRITE(sc, WMREG_TXCW,
12412 			    sc->sc_txcw & ~TXCW_ANE);
12413 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
12414 		}
12415 	}
12416 
12417 setled:
12418 	wm_tbi_serdes_set_linkled(sc);
12419 }
12420 
12421 /* SERDES related */
12422 static void
12423 wm_serdes_power_up_link_82575(struct wm_softc *sc)
12424 {
12425 	uint32_t reg;
12426 
12427 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
12428 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
12429 		return;
12430 
12431 	/* Enable PCS to turn on link */
12432 	reg = CSR_READ(sc, WMREG_PCS_CFG);
12433 	reg |= PCS_CFG_PCS_EN;
12434 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
12435 
12436 	/* Power up the laser */
12437 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
12438 	reg &= ~CTRL_EXT_SWDPIN(3);
12439 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
12440 
12441 	/* Flush the write to verify completion */
12442 	CSR_WRITE_FLUSH(sc);
12443 	delay(1000);
12444 }
12445 
12446 static int
12447 wm_serdes_mediachange(struct ifnet *ifp)
12448 {
12449 	struct wm_softc *sc = ifp->if_softc;
12450 	bool pcs_autoneg = true; /* XXX */
12451 	uint32_t ctrl_ext, pcs_lctl, reg;
12452 
12453 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
12454 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
12455 		return 0;
12456 
12457 	/* XXX Currently, this function is not called on 8257[12] */
12458 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
12459 	    || (sc->sc_type >= WM_T_82575))
12460 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
12461 
12462 	/* Power on the sfp cage if present */
12463 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
12464 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
12465 	ctrl_ext |= CTRL_EXT_I2C_ENA;
12466 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
12467 
12468 	sc->sc_ctrl |= CTRL_SLU;
12469 
12470 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
12471 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
12472 
12473 		reg = CSR_READ(sc, WMREG_CONNSW);
12474 		reg |= CONNSW_ENRGSRC;
12475 		CSR_WRITE(sc, WMREG_CONNSW, reg);
12476 	}
12477 
12478 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
12479 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
12480 	case CTRL_EXT_LINK_MODE_SGMII:
12481 		/* SGMII mode lets the phy handle forcing speed/duplex */
12482 		pcs_autoneg = true;
12483 		/* Autoneg time out should be disabled for SGMII mode */
12484 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
12485 		break;
12486 	case CTRL_EXT_LINK_MODE_1000KX:
12487 		pcs_autoneg = false;
12488 		/* FALLTHROUGH */
12489 	default:
12490 		if ((sc->sc_type == WM_T_82575)
12491 		    || (sc->sc_type == WM_T_82576)) {
12492 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
12493 				pcs_autoneg = false;
12494 		}
12495 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
12496 		    | CTRL_FRCFDX;
12497 
12498 		/* Set speed of 1000/Full if speed/duplex is forced */
12499 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
12500 	}
12501 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
12502 
12503 	pcs_lctl &= ~(PCS_LCTL_AN_ENABLE | PCS_LCTL_FLV_LINK_UP |
12504 	    PCS_LCTL_FSD | PCS_LCTL_FORCE_LINK);
12505 
12506 	if (pcs_autoneg) {
12507 		/* Set PCS register for autoneg */
12508 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
12509 
12510 		/* Disable force flow control for autoneg */
12511 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
12512 
12513 		/* Configure flow control advertisement for autoneg */
12514 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
12515 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
12516 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
12517 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
12518 	} else
12519 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
12520 
12521 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
12522 
12523 	return 0;
12524 }
12525 
12526 static void
12527 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
12528 {
12529 	struct wm_softc *sc = ifp->if_softc;
12530 	struct mii_data *mii = &sc->sc_mii;
12531 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
12532 	uint32_t pcs_adv, pcs_lpab, reg;
12533 
12534 	ifmr->ifm_status = IFM_AVALID;
12535 	ifmr->ifm_active = IFM_ETHER;
12536 
12537 	/* Check PCS */
12538 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
12539 	if ((reg & PCS_LSTS_LINKOK) == 0) {
12540 		ifmr->ifm_active |= IFM_NONE;
12541 		sc->sc_tbi_linkup = 0;
12542 		goto setled;
12543 	}
12544 
12545 	sc->sc_tbi_linkup = 1;
12546 	ifmr->ifm_status |= IFM_ACTIVE;
12547 	if (sc->sc_type == WM_T_I354) {
12548 		uint32_t status;
12549 
12550 		status = CSR_READ(sc, WMREG_STATUS);
12551 		if (((status & STATUS_2P5_SKU) != 0)
12552 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
12553 			ifmr->ifm_active |= IFM_2500_KX;
12554 		} else
12555 			ifmr->ifm_active |= IFM_1000_KX;
12556 	} else {
12557 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
12558 		case PCS_LSTS_SPEED_10:
12559 			ifmr->ifm_active |= IFM_10_T; /* XXX */
12560 			break;
12561 		case PCS_LSTS_SPEED_100:
12562 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
12563 			break;
12564 		case PCS_LSTS_SPEED_1000:
12565 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
12566 			break;
12567 		default:
12568 			device_printf(sc->sc_dev, "Unknown speed\n");
12569 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
12570 			break;
12571 		}
12572 	}
12573 	ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
12574 	if ((reg & PCS_LSTS_FDX) != 0)
12575 		ifmr->ifm_active |= IFM_FDX;
12576 	else
12577 		ifmr->ifm_active |= IFM_HDX;
12578 	mii->mii_media_active &= ~IFM_ETH_FMASK;
12579 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
12580 		/* Check flow */
12581 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
12582 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
12583 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
12584 			goto setled;
12585 		}
12586 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
12587 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
12588 		DPRINTF(WM_DEBUG_LINK,
12589 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
12590 		if ((pcs_adv & TXCW_SYM_PAUSE)
12591 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
12592 			mii->mii_media_active |= IFM_FLOW
12593 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
12594 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
12595 		    && (pcs_adv & TXCW_ASYM_PAUSE)
12596 		    && (pcs_lpab & TXCW_SYM_PAUSE)
12597 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
12598 			mii->mii_media_active |= IFM_FLOW
12599 			    | IFM_ETH_TXPAUSE;
12600 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
12601 		    && (pcs_adv & TXCW_ASYM_PAUSE)
12602 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
12603 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
12604 			mii->mii_media_active |= IFM_FLOW
12605 			    | IFM_ETH_RXPAUSE;
12606 		}
12607 	}
12608 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
12609 	    | (mii->mii_media_active & IFM_ETH_FMASK);
12610 setled:
12611 	wm_tbi_serdes_set_linkled(sc);
12612 }
12613 
12614 /*
12615  * wm_serdes_tick:
12616  *
12617  *	Check the link on serdes devices.
12618  */
12619 static void
12620 wm_serdes_tick(struct wm_softc *sc)
12621 {
12622 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
12623 	struct mii_data *mii = &sc->sc_mii;
12624 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
12625 	uint32_t reg;
12626 
12627 	KASSERT(WM_CORE_LOCKED(sc));
12628 
12629 	mii->mii_media_status = IFM_AVALID;
12630 	mii->mii_media_active = IFM_ETHER;
12631 
12632 	/* Check PCS */
12633 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
12634 	if ((reg & PCS_LSTS_LINKOK) != 0) {
12635 		mii->mii_media_status |= IFM_ACTIVE;
12636 		sc->sc_tbi_linkup = 1;
12637 		sc->sc_tbi_serdes_ticks = 0;
12638 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
12639 		if ((reg & PCS_LSTS_FDX) != 0)
12640 			mii->mii_media_active |= IFM_FDX;
12641 		else
12642 			mii->mii_media_active |= IFM_HDX;
12643 	} else {
12644 		mii->mii_media_status |= IFM_NONE;
12645 		sc->sc_tbi_linkup = 0;
12646 		/* If the timer expired, retry autonegotiation */
12647 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
12648 		    && (++sc->sc_tbi_serdes_ticks
12649 			>= sc->sc_tbi_serdes_anegticks)) {
12650 			DPRINTF(WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
12651 				device_xname(sc->sc_dev), __func__));
12652 			sc->sc_tbi_serdes_ticks = 0;
12653 			/* XXX */
12654 			wm_serdes_mediachange(ifp);
12655 		}
12656 	}
12657 
12658 	wm_tbi_serdes_set_linkled(sc);
12659 }
12660 
12661 /* SFP related */
12662 
12663 static int
12664 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
12665 {
12666 	uint32_t i2ccmd;
12667 	int i;
12668 
12669 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
12670 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
12671 
12672 	/* Poll the ready bit */
12673 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
12674 		delay(50);
12675 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
12676 		if (i2ccmd & I2CCMD_READY)
12677 			break;
12678 	}
12679 	if ((i2ccmd & I2CCMD_READY) == 0)
12680 		return -1;
12681 	if ((i2ccmd & I2CCMD_ERROR) != 0)
12682 		return -1;
12683 
12684 	*data = i2ccmd & 0x00ff;
12685 
12686 	return 0;
12687 }
12688 
12689 static uint32_t
12690 wm_sfp_get_media_type(struct wm_softc *sc)
12691 {
12692 	uint32_t ctrl_ext;
12693 	uint8_t val = 0;
12694 	int timeout = 3;
12695 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
12696 	int rv = -1;
12697 
12698 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
12699 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
12700 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
12701 	CSR_WRITE_FLUSH(sc);
12702 
12703 	/* Read SFP module data */
12704 	while (timeout) {
12705 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
12706 		if (rv == 0)
12707 			break;
12708 		delay(100*1000); /* XXX too big */
12709 		timeout--;
12710 	}
12711 	if (rv != 0)
12712 		goto out;
12713 
12714 	switch (val) {
12715 	case SFF_SFP_ID_SFF:
12716 		aprint_normal_dev(sc->sc_dev,
12717 		    "Module/Connector soldered to board\n");
12718 		break;
12719 	case SFF_SFP_ID_SFP:
12720 		sc->sc_flags |= WM_F_SFP;
12721 		break;
12722 	case SFF_SFP_ID_UNKNOWN:
12723 		goto out;
12724 	default:
12725 		break;
12726 	}
12727 
12728 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
12729 	if (rv != 0)
12730 		goto out;
12731 
12732 	sc->sc_sfptype = val;
12733 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
12734 		mediatype = WM_MEDIATYPE_SERDES;
12735 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
12736 		sc->sc_flags |= WM_F_SGMII;
12737 		mediatype = WM_MEDIATYPE_COPPER;
12738 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
12739 		sc->sc_flags |= WM_F_SGMII;
12740 		mediatype = WM_MEDIATYPE_SERDES;
12741 	} else {
12742 		device_printf(sc->sc_dev, "%s: unknown media type? (0x%hhx)\n",
12743 		    __func__, sc->sc_sfptype);
12744 		sc->sc_sfptype = 0; /* XXX unknown */
12745 	}
12746 
12747 out:
12748 	/* Restore I2C interface setting */
12749 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
12750 
12751 	return mediatype;
12752 }
12753 
12754 /*
12755  * NVM related.
12756  * Microwire, SPI (w/wo EERD) and Flash.
12757  */
12758 
12759 /* Both spi and uwire */
12760 
12761 /*
12762  * wm_eeprom_sendbits:
12763  *
12764  *	Send a series of bits to the EEPROM.
12765  */
12766 static void
12767 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
12768 {
12769 	uint32_t reg;
12770 	int x;
12771 
12772 	reg = CSR_READ(sc, WMREG_EECD);
12773 
12774 	for (x = nbits; x > 0; x--) {
12775 		if (bits & (1U << (x - 1)))
12776 			reg |= EECD_DI;
12777 		else
12778 			reg &= ~EECD_DI;
12779 		CSR_WRITE(sc, WMREG_EECD, reg);
12780 		CSR_WRITE_FLUSH(sc);
12781 		delay(2);
12782 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
12783 		CSR_WRITE_FLUSH(sc);
12784 		delay(2);
12785 		CSR_WRITE(sc, WMREG_EECD, reg);
12786 		CSR_WRITE_FLUSH(sc);
12787 		delay(2);
12788 	}
12789 }
12790 
12791 /*
12792  * wm_eeprom_recvbits:
12793  *
12794  *	Receive a series of bits from the EEPROM.
12795  */
12796 static void
12797 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
12798 {
12799 	uint32_t reg, val;
12800 	int x;
12801 
12802 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
12803 
12804 	val = 0;
12805 	for (x = nbits; x > 0; x--) {
12806 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
12807 		CSR_WRITE_FLUSH(sc);
12808 		delay(2);
12809 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
12810 			val |= (1U << (x - 1));
12811 		CSR_WRITE(sc, WMREG_EECD, reg);
12812 		CSR_WRITE_FLUSH(sc);
12813 		delay(2);
12814 	}
12815 	*valp = val;
12816 }
12817 
12818 /* Microwire */
12819 
12820 /*
12821  * wm_nvm_read_uwire:
12822  *
12823  *	Read a word from the EEPROM using the MicroWire protocol.
12824  */
12825 static int
12826 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
12827 {
12828 	uint32_t reg, val;
12829 	int i;
12830 
12831 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
12832 		device_xname(sc->sc_dev), __func__));
12833 
12834 	if (sc->nvm.acquire(sc) != 0)
12835 		return -1;
12836 
12837 	for (i = 0; i < wordcnt; i++) {
12838 		/* Clear SK and DI. */
12839 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
12840 		CSR_WRITE(sc, WMREG_EECD, reg);
12841 
12842 		/*
12843 		 * XXX: workaround for a bug in qemu-0.12.x and prior
12844 		 * and Xen.
12845 		 *
12846 		 * We use this workaround only for 82540 because qemu's
12847 		 * e1000 act as 82540.
12848 		 */
12849 		if (sc->sc_type == WM_T_82540) {
12850 			reg |= EECD_SK;
12851 			CSR_WRITE(sc, WMREG_EECD, reg);
12852 			reg &= ~EECD_SK;
12853 			CSR_WRITE(sc, WMREG_EECD, reg);
12854 			CSR_WRITE_FLUSH(sc);
12855 			delay(2);
12856 		}
12857 		/* XXX: end of workaround */
12858 
12859 		/* Set CHIP SELECT. */
12860 		reg |= EECD_CS;
12861 		CSR_WRITE(sc, WMREG_EECD, reg);
12862 		CSR_WRITE_FLUSH(sc);
12863 		delay(2);
12864 
12865 		/* Shift in the READ command. */
12866 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
12867 
12868 		/* Shift in address. */
12869 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
12870 
12871 		/* Shift out the data. */
12872 		wm_eeprom_recvbits(sc, &val, 16);
12873 		data[i] = val & 0xffff;
12874 
12875 		/* Clear CHIP SELECT. */
12876 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
12877 		CSR_WRITE(sc, WMREG_EECD, reg);
12878 		CSR_WRITE_FLUSH(sc);
12879 		delay(2);
12880 	}
12881 
12882 	sc->nvm.release(sc);
12883 	return 0;
12884 }
12885 
12886 /* SPI */
12887 
12888 /*
12889  * Set SPI and FLASH related information from the EECD register.
12890  * For 82541 and 82547, the word size is taken from EEPROM.
12891  */
12892 static int
12893 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
12894 {
12895 	int size;
12896 	uint32_t reg;
12897 	uint16_t data;
12898 
12899 	reg = CSR_READ(sc, WMREG_EECD);
12900 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
12901 
12902 	/* Read the size of NVM from EECD by default */
12903 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
12904 	switch (sc->sc_type) {
12905 	case WM_T_82541:
12906 	case WM_T_82541_2:
12907 	case WM_T_82547:
12908 	case WM_T_82547_2:
12909 		/* Set dummy value to access EEPROM */
12910 		sc->sc_nvm_wordsize = 64;
12911 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
12912 			aprint_error_dev(sc->sc_dev,
12913 			    "%s: failed to read EEPROM size\n", __func__);
12914 		}
12915 		reg = data;
12916 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
12917 		if (size == 0)
12918 			size = 6; /* 64 word size */
12919 		else
12920 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
12921 		break;
12922 	case WM_T_80003:
12923 	case WM_T_82571:
12924 	case WM_T_82572:
12925 	case WM_T_82573: /* SPI case */
12926 	case WM_T_82574: /* SPI case */
12927 	case WM_T_82583: /* SPI case */
12928 		size += NVM_WORD_SIZE_BASE_SHIFT;
12929 		if (size > 14)
12930 			size = 14;
12931 		break;
12932 	case WM_T_82575:
12933 	case WM_T_82576:
12934 	case WM_T_82580:
12935 	case WM_T_I350:
12936 	case WM_T_I354:
12937 	case WM_T_I210:
12938 	case WM_T_I211:
12939 		size += NVM_WORD_SIZE_BASE_SHIFT;
12940 		if (size > 15)
12941 			size = 15;
12942 		break;
12943 	default:
12944 		aprint_error_dev(sc->sc_dev,
12945 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
12946 		return -1;
12947 		break;
12948 	}
12949 
12950 	sc->sc_nvm_wordsize = 1 << size;
12951 
12952 	return 0;
12953 }
12954 
12955 /*
12956  * wm_nvm_ready_spi:
12957  *
12958  *	Wait for a SPI EEPROM to be ready for commands.
12959  */
12960 static int
12961 wm_nvm_ready_spi(struct wm_softc *sc)
12962 {
12963 	uint32_t val;
12964 	int usec;
12965 
12966 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
12967 		device_xname(sc->sc_dev), __func__));
12968 
12969 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
12970 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
12971 		wm_eeprom_recvbits(sc, &val, 8);
12972 		if ((val & SPI_SR_RDY) == 0)
12973 			break;
12974 	}
12975 	if (usec >= SPI_MAX_RETRIES) {
12976 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
12977 		return -1;
12978 	}
12979 	return 0;
12980 }
12981 
12982 /*
12983  * wm_nvm_read_spi:
12984  *
12985  *	Read a work from the EEPROM using the SPI protocol.
12986  */
12987 static int
12988 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
12989 {
12990 	uint32_t reg, val;
12991 	int i;
12992 	uint8_t opc;
12993 	int rv = 0;
12994 
12995 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
12996 		device_xname(sc->sc_dev), __func__));
12997 
12998 	if (sc->nvm.acquire(sc) != 0)
12999 		return -1;
13000 
13001 	/* Clear SK and CS. */
13002 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
13003 	CSR_WRITE(sc, WMREG_EECD, reg);
13004 	CSR_WRITE_FLUSH(sc);
13005 	delay(2);
13006 
13007 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
13008 		goto out;
13009 
13010 	/* Toggle CS to flush commands. */
13011 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
13012 	CSR_WRITE_FLUSH(sc);
13013 	delay(2);
13014 	CSR_WRITE(sc, WMREG_EECD, reg);
13015 	CSR_WRITE_FLUSH(sc);
13016 	delay(2);
13017 
13018 	opc = SPI_OPC_READ;
13019 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
13020 		opc |= SPI_OPC_A8;
13021 
13022 	wm_eeprom_sendbits(sc, opc, 8);
13023 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
13024 
13025 	for (i = 0; i < wordcnt; i++) {
13026 		wm_eeprom_recvbits(sc, &val, 16);
13027 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
13028 	}
13029 
13030 	/* Raise CS and clear SK. */
13031 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
13032 	CSR_WRITE(sc, WMREG_EECD, reg);
13033 	CSR_WRITE_FLUSH(sc);
13034 	delay(2);
13035 
13036 out:
13037 	sc->nvm.release(sc);
13038 	return rv;
13039 }
13040 
13041 /* Using with EERD */
13042 
13043 static int
13044 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
13045 {
13046 	uint32_t attempts = 100000;
13047 	uint32_t i, reg = 0;
13048 	int32_t done = -1;
13049 
13050 	for (i = 0; i < attempts; i++) {
13051 		reg = CSR_READ(sc, rw);
13052 
13053 		if (reg & EERD_DONE) {
13054 			done = 0;
13055 			break;
13056 		}
13057 		delay(5);
13058 	}
13059 
13060 	return done;
13061 }
13062 
13063 static int
13064 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
13065 {
13066 	int i, eerd = 0;
13067 	int rv = 0;
13068 
13069 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
13070 		device_xname(sc->sc_dev), __func__));
13071 
13072 	if (sc->nvm.acquire(sc) != 0)
13073 		return -1;
13074 
13075 	for (i = 0; i < wordcnt; i++) {
13076 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
13077 		CSR_WRITE(sc, WMREG_EERD, eerd);
13078 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
13079 		if (rv != 0) {
13080 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
13081 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
13082 			break;
13083 		}
13084 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
13085 	}
13086 
13087 	sc->nvm.release(sc);
13088 	return rv;
13089 }
13090 
13091 /* Flash */
13092 
13093 static int
13094 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
13095 {
13096 	uint32_t eecd;
13097 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
13098 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
13099 	uint32_t nvm_dword = 0;
13100 	uint8_t sig_byte = 0;
13101 	int rv;
13102 
13103 	switch (sc->sc_type) {
13104 	case WM_T_PCH_SPT:
13105 	case WM_T_PCH_CNP:
13106 		bank1_offset = sc->sc_ich8_flash_bank_size * 2;
13107 		act_offset = ICH_NVM_SIG_WORD * 2;
13108 
13109 		/* Set bank to 0 in case flash read fails. */
13110 		*bank = 0;
13111 
13112 		/* Check bank 0 */
13113 		rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
13114 		if (rv != 0)
13115 			return rv;
13116 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
13117 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
13118 			*bank = 0;
13119 			return 0;
13120 		}
13121 
13122 		/* Check bank 1 */
13123 		rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
13124 		    &nvm_dword);
13125 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
13126 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
13127 			*bank = 1;
13128 			return 0;
13129 		}
13130 		aprint_error_dev(sc->sc_dev,
13131 		    "%s: no valid NVM bank present (%u)\n", __func__, *bank);
13132 		return -1;
13133 	case WM_T_ICH8:
13134 	case WM_T_ICH9:
13135 		eecd = CSR_READ(sc, WMREG_EECD);
13136 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
13137 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
13138 			return 0;
13139 		}
13140 		/* FALLTHROUGH */
13141 	default:
13142 		/* Default to 0 */
13143 		*bank = 0;
13144 
13145 		/* Check bank 0 */
13146 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
13147 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
13148 			*bank = 0;
13149 			return 0;
13150 		}
13151 
13152 		/* Check bank 1 */
13153 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
13154 		    &sig_byte);
13155 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
13156 			*bank = 1;
13157 			return 0;
13158 		}
13159 	}
13160 
13161 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
13162 		device_xname(sc->sc_dev)));
13163 	return -1;
13164 }
13165 
13166 /******************************************************************************
13167  * This function does initial flash setup so that a new read/write/erase cycle
13168  * can be started.
13169  *
13170  * sc - The pointer to the hw structure
13171  ****************************************************************************/
13172 static int32_t
13173 wm_ich8_cycle_init(struct wm_softc *sc)
13174 {
13175 	uint16_t hsfsts;
13176 	int32_t error = 1;
13177 	int32_t i     = 0;
13178 
13179 	if (sc->sc_type >= WM_T_PCH_SPT)
13180 		hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
13181 	else
13182 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
13183 
13184 	/* May be check the Flash Des Valid bit in Hw status */
13185 	if ((hsfsts & HSFSTS_FLDVAL) == 0)
13186 		return error;
13187 
13188 	/* Clear FCERR in Hw status by writing 1 */
13189 	/* Clear DAEL in Hw status by writing a 1 */
13190 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
13191 
13192 	if (sc->sc_type >= WM_T_PCH_SPT)
13193 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
13194 	else
13195 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
13196 
13197 	/*
13198 	 * Either we should have a hardware SPI cycle in progress bit to check
13199 	 * against, in order to start a new cycle or FDONE bit should be
13200 	 * changed in the hardware so that it is 1 after hardware reset, which
13201 	 * can then be used as an indication whether a cycle is in progress or
13202 	 * has been completed .. we should also have some software semaphore
13203 	 * mechanism to guard FDONE or the cycle in progress bit so that two
13204 	 * threads access to those bits can be sequentiallized or a way so that
13205 	 * 2 threads don't start the cycle at the same time
13206 	 */
13207 
13208 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
13209 		/*
13210 		 * There is no cycle running at present, so we can start a
13211 		 * cycle
13212 		 */
13213 
13214 		/* Begin by setting Flash Cycle Done. */
13215 		hsfsts |= HSFSTS_DONE;
13216 		if (sc->sc_type >= WM_T_PCH_SPT)
13217 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
13218 			    hsfsts & 0xffffUL);
13219 		else
13220 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
13221 		error = 0;
13222 	} else {
13223 		/*
13224 		 * Otherwise poll for sometime so the current cycle has a
13225 		 * chance to end before giving up.
13226 		 */
13227 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
13228 			if (sc->sc_type >= WM_T_PCH_SPT)
13229 				hsfsts = ICH8_FLASH_READ32(sc,
13230 				    ICH_FLASH_HSFSTS) & 0xffffUL;
13231 			else
13232 				hsfsts = ICH8_FLASH_READ16(sc,
13233 				    ICH_FLASH_HSFSTS);
13234 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
13235 				error = 0;
13236 				break;
13237 			}
13238 			delay(1);
13239 		}
13240 		if (error == 0) {
13241 			/*
13242 			 * Successful in waiting for previous cycle to timeout,
13243 			 * now set the Flash Cycle Done.
13244 			 */
13245 			hsfsts |= HSFSTS_DONE;
13246 			if (sc->sc_type >= WM_T_PCH_SPT)
13247 				ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
13248 				    hsfsts & 0xffffUL);
13249 			else
13250 				ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
13251 				    hsfsts);
13252 		}
13253 	}
13254 	return error;
13255 }
13256 
13257 /******************************************************************************
13258  * This function starts a flash cycle and waits for its completion
13259  *
13260  * sc - The pointer to the hw structure
13261  ****************************************************************************/
13262 static int32_t
13263 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
13264 {
13265 	uint16_t hsflctl;
13266 	uint16_t hsfsts;
13267 	int32_t error = 1;
13268 	uint32_t i = 0;
13269 
13270 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
13271 	if (sc->sc_type >= WM_T_PCH_SPT)
13272 		hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
13273 	else
13274 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
13275 	hsflctl |= HSFCTL_GO;
13276 	if (sc->sc_type >= WM_T_PCH_SPT)
13277 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
13278 		    (uint32_t)hsflctl << 16);
13279 	else
13280 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
13281 
13282 	/* Wait till FDONE bit is set to 1 */
13283 	do {
13284 		if (sc->sc_type >= WM_T_PCH_SPT)
13285 			hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
13286 			    & 0xffffUL;
13287 		else
13288 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
13289 		if (hsfsts & HSFSTS_DONE)
13290 			break;
13291 		delay(1);
13292 		i++;
13293 	} while (i < timeout);
13294 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
13295 		error = 0;
13296 
13297 	return error;
13298 }
13299 
13300 /******************************************************************************
13301  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
13302  *
13303  * sc - The pointer to the hw structure
13304  * index - The index of the byte or word to read.
13305  * size - Size of data to read, 1=byte 2=word, 4=dword
13306  * data - Pointer to the word to store the value read.
13307  *****************************************************************************/
13308 static int32_t
13309 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
13310     uint32_t size, uint32_t *data)
13311 {
13312 	uint16_t hsfsts;
13313 	uint16_t hsflctl;
13314 	uint32_t flash_linear_address;
13315 	uint32_t flash_data = 0;
13316 	int32_t error = 1;
13317 	int32_t count = 0;
13318 
13319 	if (size < 1  || size > 4 || data == 0x0 ||
13320 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
13321 		return error;
13322 
13323 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
13324 	    sc->sc_ich8_flash_base;
13325 
13326 	do {
13327 		delay(1);
13328 		/* Steps */
13329 		error = wm_ich8_cycle_init(sc);
13330 		if (error)
13331 			break;
13332 
13333 		if (sc->sc_type >= WM_T_PCH_SPT)
13334 			hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
13335 			    >> 16;
13336 		else
13337 			hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
13338 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
13339 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
13340 		    & HSFCTL_BCOUNT_MASK;
13341 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
13342 		if (sc->sc_type >= WM_T_PCH_SPT) {
13343 			/*
13344 			 * In SPT, This register is in Lan memory space, not
13345 			 * flash. Therefore, only 32 bit access is supported.
13346 			 */
13347 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
13348 			    (uint32_t)hsflctl << 16);
13349 		} else
13350 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
13351 
13352 		/*
13353 		 * Write the last 24 bits of index into Flash Linear address
13354 		 * field in Flash Address
13355 		 */
13356 		/* TODO: TBD maybe check the index against the size of flash */
13357 
13358 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
13359 
13360 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
13361 
13362 		/*
13363 		 * Check if FCERR is set to 1, if set to 1, clear it and try
13364 		 * the whole sequence a few more times, else read in (shift in)
13365 		 * the Flash Data0, the order is least significant byte first
13366 		 * msb to lsb
13367 		 */
13368 		if (error == 0) {
13369 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
13370 			if (size == 1)
13371 				*data = (uint8_t)(flash_data & 0x000000FF);
13372 			else if (size == 2)
13373 				*data = (uint16_t)(flash_data & 0x0000FFFF);
13374 			else if (size == 4)
13375 				*data = (uint32_t)flash_data;
13376 			break;
13377 		} else {
13378 			/*
13379 			 * If we've gotten here, then things are probably
13380 			 * completely hosed, but if the error condition is
13381 			 * detected, it won't hurt to give it another try...
13382 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
13383 			 */
13384 			if (sc->sc_type >= WM_T_PCH_SPT)
13385 				hsfsts = ICH8_FLASH_READ32(sc,
13386 				    ICH_FLASH_HSFSTS) & 0xffffUL;
13387 			else
13388 				hsfsts = ICH8_FLASH_READ16(sc,
13389 				    ICH_FLASH_HSFSTS);
13390 
13391 			if (hsfsts & HSFSTS_ERR) {
13392 				/* Repeat for some time before giving up. */
13393 				continue;
13394 			} else if ((hsfsts & HSFSTS_DONE) == 0)
13395 				break;
13396 		}
13397 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
13398 
13399 	return error;
13400 }
13401 
13402 /******************************************************************************
13403  * Reads a single byte from the NVM using the ICH8 flash access registers.
13404  *
13405  * sc - pointer to wm_hw structure
13406  * index - The index of the byte to read.
13407  * data - Pointer to a byte to store the value read.
13408  *****************************************************************************/
13409 static int32_t
13410 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
13411 {
13412 	int32_t status;
13413 	uint32_t word = 0;
13414 
13415 	status = wm_read_ich8_data(sc, index, 1, &word);
13416 	if (status == 0)
13417 		*data = (uint8_t)word;
13418 	else
13419 		*data = 0;
13420 
13421 	return status;
13422 }
13423 
13424 /******************************************************************************
13425  * Reads a word from the NVM using the ICH8 flash access registers.
13426  *
13427  * sc - pointer to wm_hw structure
13428  * index - The starting byte index of the word to read.
13429  * data - Pointer to a word to store the value read.
13430  *****************************************************************************/
13431 static int32_t
13432 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
13433 {
13434 	int32_t status;
13435 	uint32_t word = 0;
13436 
13437 	status = wm_read_ich8_data(sc, index, 2, &word);
13438 	if (status == 0)
13439 		*data = (uint16_t)word;
13440 	else
13441 		*data = 0;
13442 
13443 	return status;
13444 }
13445 
13446 /******************************************************************************
13447  * Reads a dword from the NVM using the ICH8 flash access registers.
13448  *
13449  * sc - pointer to wm_hw structure
13450  * index - The starting byte index of the word to read.
13451  * data - Pointer to a word to store the value read.
13452  *****************************************************************************/
13453 static int32_t
13454 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
13455 {
13456 	int32_t status;
13457 
13458 	status = wm_read_ich8_data(sc, index, 4, data);
13459 	return status;
13460 }
13461 
13462 /******************************************************************************
13463  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
13464  * register.
13465  *
13466  * sc - Struct containing variables accessed by shared code
13467  * offset - offset of word in the EEPROM to read
13468  * data - word read from the EEPROM
13469  * words - number of words to read
13470  *****************************************************************************/
13471 static int
13472 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
13473 {
13474 	int32_t	 rv = 0;
13475 	uint32_t flash_bank = 0;
13476 	uint32_t act_offset = 0;
13477 	uint32_t bank_offset = 0;
13478 	uint16_t word = 0;
13479 	uint16_t i = 0;
13480 
13481 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
13482 		device_xname(sc->sc_dev), __func__));
13483 
13484 	if (sc->nvm.acquire(sc) != 0)
13485 		return -1;
13486 
13487 	/*
13488 	 * We need to know which is the valid flash bank.  In the event
13489 	 * that we didn't allocate eeprom_shadow_ram, we may not be
13490 	 * managing flash_bank. So it cannot be trusted and needs
13491 	 * to be updated with each read.
13492 	 */
13493 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
13494 	if (rv) {
13495 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
13496 			device_xname(sc->sc_dev)));
13497 		flash_bank = 0;
13498 	}
13499 
13500 	/*
13501 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
13502 	 * size
13503 	 */
13504 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
13505 
13506 	for (i = 0; i < words; i++) {
13507 		/* The NVM part needs a byte offset, hence * 2 */
13508 		act_offset = bank_offset + ((offset + i) * 2);
13509 		rv = wm_read_ich8_word(sc, act_offset, &word);
13510 		if (rv) {
13511 			aprint_error_dev(sc->sc_dev,
13512 			    "%s: failed to read NVM\n", __func__);
13513 			break;
13514 		}
13515 		data[i] = word;
13516 	}
13517 
13518 	sc->nvm.release(sc);
13519 	return rv;
13520 }
13521 
13522 /******************************************************************************
13523  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
13524  * register.
13525  *
13526  * sc - Struct containing variables accessed by shared code
13527  * offset - offset of word in the EEPROM to read
13528  * data - word read from the EEPROM
13529  * words - number of words to read
13530  *****************************************************************************/
13531 static int
13532 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
13533 {
13534 	int32_t	 rv = 0;
13535 	uint32_t flash_bank = 0;
13536 	uint32_t act_offset = 0;
13537 	uint32_t bank_offset = 0;
13538 	uint32_t dword = 0;
13539 	uint16_t i = 0;
13540 
13541 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
13542 		device_xname(sc->sc_dev), __func__));
13543 
13544 	if (sc->nvm.acquire(sc) != 0)
13545 		return -1;
13546 
13547 	/*
13548 	 * We need to know which is the valid flash bank.  In the event
13549 	 * that we didn't allocate eeprom_shadow_ram, we may not be
13550 	 * managing flash_bank. So it cannot be trusted and needs
13551 	 * to be updated with each read.
13552 	 */
13553 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
13554 	if (rv) {
13555 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
13556 			device_xname(sc->sc_dev)));
13557 		flash_bank = 0;
13558 	}
13559 
13560 	/*
13561 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
13562 	 * size
13563 	 */
13564 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
13565 
13566 	for (i = 0; i < words; i++) {
13567 		/* The NVM part needs a byte offset, hence * 2 */
13568 		act_offset = bank_offset + ((offset + i) * 2);
13569 		/* but we must read dword aligned, so mask ... */
13570 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
13571 		if (rv) {
13572 			aprint_error_dev(sc->sc_dev,
13573 			    "%s: failed to read NVM\n", __func__);
13574 			break;
13575 		}
13576 		/* ... and pick out low or high word */
13577 		if ((act_offset & 0x2) == 0)
13578 			data[i] = (uint16_t)(dword & 0xFFFF);
13579 		else
13580 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
13581 	}
13582 
13583 	sc->nvm.release(sc);
13584 	return rv;
13585 }
13586 
13587 /* iNVM */
13588 
13589 static int
13590 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
13591 {
13592 	int32_t	 rv = 0;
13593 	uint32_t invm_dword;
13594 	uint16_t i;
13595 	uint8_t record_type, word_address;
13596 
13597 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
13598 		device_xname(sc->sc_dev), __func__));
13599 
13600 	for (i = 0; i < INVM_SIZE; i++) {
13601 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
13602 		/* Get record type */
13603 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
13604 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
13605 			break;
13606 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
13607 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
13608 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
13609 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
13610 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
13611 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
13612 			if (word_address == address) {
13613 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
13614 				rv = 0;
13615 				break;
13616 			}
13617 		}
13618 	}
13619 
13620 	return rv;
13621 }
13622 
13623 static int
13624 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
13625 {
13626 	int rv = 0;
13627 	int i;
13628 
13629 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
13630 		device_xname(sc->sc_dev), __func__));
13631 
13632 	if (sc->nvm.acquire(sc) != 0)
13633 		return -1;
13634 
13635 	for (i = 0; i < words; i++) {
13636 		switch (offset + i) {
13637 		case NVM_OFF_MACADDR:
13638 		case NVM_OFF_MACADDR1:
13639 		case NVM_OFF_MACADDR2:
13640 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
13641 			if (rv != 0) {
13642 				data[i] = 0xffff;
13643 				rv = -1;
13644 			}
13645 			break;
13646 		case NVM_OFF_CFG1: /* == INVM_AUTOLOAD */
13647 			rv = wm_nvm_read_word_invm(sc, offset, data);
13648 			if (rv != 0) {
13649 				*data = INVM_DEFAULT_AL;
13650 				rv = 0;
13651 			}
13652 			break;
13653 		case NVM_OFF_CFG2:
13654 			rv = wm_nvm_read_word_invm(sc, offset, data);
13655 			if (rv != 0) {
13656 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
13657 				rv = 0;
13658 			}
13659 			break;
13660 		case NVM_OFF_CFG4:
13661 			rv = wm_nvm_read_word_invm(sc, offset, data);
13662 			if (rv != 0) {
13663 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
13664 				rv = 0;
13665 			}
13666 			break;
13667 		case NVM_OFF_LED_1_CFG:
13668 			rv = wm_nvm_read_word_invm(sc, offset, data);
13669 			if (rv != 0) {
13670 				*data = NVM_LED_1_CFG_DEFAULT_I211;
13671 				rv = 0;
13672 			}
13673 			break;
13674 		case NVM_OFF_LED_0_2_CFG:
13675 			rv = wm_nvm_read_word_invm(sc, offset, data);
13676 			if (rv != 0) {
13677 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
13678 				rv = 0;
13679 			}
13680 			break;
13681 		case NVM_OFF_ID_LED_SETTINGS:
13682 			rv = wm_nvm_read_word_invm(sc, offset, data);
13683 			if (rv != 0) {
13684 				*data = ID_LED_RESERVED_FFFF;
13685 				rv = 0;
13686 			}
13687 			break;
13688 		default:
13689 			DPRINTF(WM_DEBUG_NVM,
13690 			    ("NVM word 0x%02x is not mapped.\n", offset));
13691 			*data = NVM_RESERVED_WORD;
13692 			break;
13693 		}
13694 	}
13695 
13696 	sc->nvm.release(sc);
13697 	return rv;
13698 }
13699 
13700 /* Lock, detecting NVM type, validate checksum, version and read */
13701 
13702 static int
13703 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
13704 {
13705 	uint32_t eecd = 0;
13706 
13707 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
13708 	    || sc->sc_type == WM_T_82583) {
13709 		eecd = CSR_READ(sc, WMREG_EECD);
13710 
13711 		/* Isolate bits 15 & 16 */
13712 		eecd = ((eecd >> 15) & 0x03);
13713 
13714 		/* If both bits are set, device is Flash type */
13715 		if (eecd == 0x03)
13716 			return 0;
13717 	}
13718 	return 1;
13719 }
13720 
13721 static int
13722 wm_nvm_flash_presence_i210(struct wm_softc *sc)
13723 {
13724 	uint32_t eec;
13725 
13726 	eec = CSR_READ(sc, WMREG_EEC);
13727 	if ((eec & EEC_FLASH_DETECTED) != 0)
13728 		return 1;
13729 
13730 	return 0;
13731 }
13732 
13733 /*
13734  * wm_nvm_validate_checksum
13735  *
13736  * The checksum is defined as the sum of the first 64 (16 bit) words.
13737  */
13738 static int
13739 wm_nvm_validate_checksum(struct wm_softc *sc)
13740 {
13741 	uint16_t checksum;
13742 	uint16_t eeprom_data;
13743 #ifdef WM_DEBUG
13744 	uint16_t csum_wordaddr, valid_checksum;
13745 #endif
13746 	int i;
13747 
13748 	checksum = 0;
13749 
13750 	/* Don't check for I211 */
13751 	if (sc->sc_type == WM_T_I211)
13752 		return 0;
13753 
13754 #ifdef WM_DEBUG
13755 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
13756 	    || (sc->sc_type == WM_T_PCH_CNP)) {
13757 		csum_wordaddr = NVM_OFF_COMPAT;
13758 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
13759 	} else {
13760 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
13761 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
13762 	}
13763 
13764 	/* Dump EEPROM image for debug */
13765 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
13766 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
13767 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
13768 		/* XXX PCH_SPT? */
13769 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
13770 		if ((eeprom_data & valid_checksum) == 0)
13771 			DPRINTF(WM_DEBUG_NVM,
13772 			    ("%s: NVM need to be updated (%04x != %04x)\n",
13773 				device_xname(sc->sc_dev), eeprom_data,
13774 				    valid_checksum));
13775 	}
13776 
13777 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
13778 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
13779 		for (i = 0; i < NVM_SIZE; i++) {
13780 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
13781 				printf("XXXX ");
13782 			else
13783 				printf("%04hx ", eeprom_data);
13784 			if (i % 8 == 7)
13785 				printf("\n");
13786 		}
13787 	}
13788 
13789 #endif /* WM_DEBUG */
13790 
13791 	for (i = 0; i < NVM_SIZE; i++) {
13792 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
13793 			return 1;
13794 		checksum += eeprom_data;
13795 	}
13796 
13797 	if (checksum != (uint16_t) NVM_CHECKSUM) {
13798 #ifdef WM_DEBUG
13799 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
13800 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
13801 #endif
13802 	}
13803 
13804 	return 0;
13805 }
13806 
13807 static void
13808 wm_nvm_version_invm(struct wm_softc *sc)
13809 {
13810 	uint32_t dword;
13811 
13812 	/*
13813 	 * Linux's code to decode version is very strange, so we don't
13814 	 * obey that algorithm and just use word 61 as the document.
13815 	 * Perhaps it's not perfect though...
13816 	 *
13817 	 * Example:
13818 	 *
13819 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
13820 	 */
13821 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
13822 	dword = __SHIFTOUT(dword, INVM_VER_1);
13823 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
13824 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
13825 }
13826 
13827 static void
13828 wm_nvm_version(struct wm_softc *sc)
13829 {
13830 	uint16_t major, minor, build, patch;
13831 	uint16_t uid0, uid1;
13832 	uint16_t nvm_data;
13833 	uint16_t off;
13834 	bool check_version = false;
13835 	bool check_optionrom = false;
13836 	bool have_build = false;
13837 	bool have_uid = true;
13838 
13839 	/*
13840 	 * Version format:
13841 	 *
13842 	 * XYYZ
13843 	 * X0YZ
13844 	 * X0YY
13845 	 *
13846 	 * Example:
13847 	 *
13848 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
13849 	 *	82571	0x50a6	5.10.6?
13850 	 *	82572	0x506a	5.6.10?
13851 	 *	82572EI	0x5069	5.6.9?
13852 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
13853 	 *		0x2013	2.1.3?
13854 	 *	82583	0x10a0	1.10.0? (document says it's default value)
13855 	 * ICH8+82567	0x0040	0.4.0?
13856 	 * ICH9+82566	0x1040	1.4.0?
13857 	 *ICH10+82567	0x0043	0.4.3?
13858 	 *  PCH+82577	0x00c1	0.12.1?
13859 	 * PCH2+82579	0x00d3	0.13.3?
13860 	 *		0x00d4	0.13.4?
13861 	 *  LPT+I218	0x0023	0.2.3?
13862 	 *  SPT+I219	0x0084	0.8.4?
13863 	 *  CNP+I219	0x0054	0.5.4?
13864 	 */
13865 
13866 	/*
13867 	 * XXX
13868 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
13869 	 * I've never seen on real 82574 hardware with such small SPI ROM.
13870 	 */
13871 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
13872 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
13873 		have_uid = false;
13874 
13875 	switch (sc->sc_type) {
13876 	case WM_T_82571:
13877 	case WM_T_82572:
13878 	case WM_T_82574:
13879 	case WM_T_82583:
13880 		check_version = true;
13881 		check_optionrom = true;
13882 		have_build = true;
13883 		break;
13884 	case WM_T_ICH8:
13885 	case WM_T_ICH9:
13886 	case WM_T_ICH10:
13887 	case WM_T_PCH:
13888 	case WM_T_PCH2:
13889 	case WM_T_PCH_LPT:
13890 	case WM_T_PCH_SPT:
13891 	case WM_T_PCH_CNP:
13892 		check_version = true;
13893 		have_build = true;
13894 		have_uid = false;
13895 		break;
13896 	case WM_T_82575:
13897 	case WM_T_82576:
13898 	case WM_T_82580:
13899 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
13900 			check_version = true;
13901 		break;
13902 	case WM_T_I211:
13903 		wm_nvm_version_invm(sc);
13904 		have_uid = false;
13905 		goto printver;
13906 	case WM_T_I210:
13907 		if (!wm_nvm_flash_presence_i210(sc)) {
13908 			wm_nvm_version_invm(sc);
13909 			have_uid = false;
13910 			goto printver;
13911 		}
13912 		/* FALLTHROUGH */
13913 	case WM_T_I350:
13914 	case WM_T_I354:
13915 		check_version = true;
13916 		check_optionrom = true;
13917 		break;
13918 	default:
13919 		return;
13920 	}
13921 	if (check_version
13922 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
13923 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
13924 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
13925 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
13926 			build = nvm_data & NVM_BUILD_MASK;
13927 			have_build = true;
13928 		} else
13929 			minor = nvm_data & 0x00ff;
13930 
13931 		/* Decimal */
13932 		minor = (minor / 16) * 10 + (minor % 16);
13933 		sc->sc_nvm_ver_major = major;
13934 		sc->sc_nvm_ver_minor = minor;
13935 
13936 printver:
13937 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
13938 		    sc->sc_nvm_ver_minor);
13939 		if (have_build) {
13940 			sc->sc_nvm_ver_build = build;
13941 			aprint_verbose(".%d", build);
13942 		}
13943 	}
13944 
13945 	/* Assume the Option ROM area is at avove NVM_SIZE */
13946 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
13947 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
13948 		/* Option ROM Version */
13949 		if ((off != 0x0000) && (off != 0xffff)) {
13950 			int rv;
13951 
13952 			off += NVM_COMBO_VER_OFF;
13953 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
13954 			rv |= wm_nvm_read(sc, off, 1, &uid0);
13955 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
13956 			    && (uid1 != 0) && (uid1 != 0xffff)) {
13957 				/* 16bits */
13958 				major = uid0 >> 8;
13959 				build = (uid0 << 8) | (uid1 >> 8);
13960 				patch = uid1 & 0x00ff;
13961 				aprint_verbose(", option ROM Version %d.%d.%d",
13962 				    major, build, patch);
13963 			}
13964 		}
13965 	}
13966 
13967 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
13968 		aprint_verbose(", Image Unique ID %08x",
13969 		    ((uint32_t)uid1 << 16) | uid0);
13970 }
13971 
13972 /*
13973  * wm_nvm_read:
13974  *
13975  *	Read data from the serial EEPROM.
13976  */
13977 static int
13978 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
13979 {
13980 	int rv;
13981 
13982 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
13983 		device_xname(sc->sc_dev), __func__));
13984 
13985 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
13986 		return -1;
13987 
13988 	rv = sc->nvm.read(sc, word, wordcnt, data);
13989 
13990 	return rv;
13991 }
13992 
13993 /*
13994  * Hardware semaphores.
13995  * Very complexed...
13996  */
13997 
13998 static int
13999 wm_get_null(struct wm_softc *sc)
14000 {
14001 
14002 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
14003 		device_xname(sc->sc_dev), __func__));
14004 	return 0;
14005 }
14006 
14007 static void
14008 wm_put_null(struct wm_softc *sc)
14009 {
14010 
14011 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
14012 		device_xname(sc->sc_dev), __func__));
14013 	return;
14014 }
14015 
14016 static int
14017 wm_get_eecd(struct wm_softc *sc)
14018 {
14019 	uint32_t reg;
14020 	int x;
14021 
14022 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
14023 		device_xname(sc->sc_dev), __func__));
14024 
14025 	reg = CSR_READ(sc, WMREG_EECD);
14026 
14027 	/* Request EEPROM access. */
14028 	reg |= EECD_EE_REQ;
14029 	CSR_WRITE(sc, WMREG_EECD, reg);
14030 
14031 	/* ..and wait for it to be granted. */
14032 	for (x = 0; x < 1000; x++) {
14033 		reg = CSR_READ(sc, WMREG_EECD);
14034 		if (reg & EECD_EE_GNT)
14035 			break;
14036 		delay(5);
14037 	}
14038 	if ((reg & EECD_EE_GNT) == 0) {
14039 		aprint_error_dev(sc->sc_dev,
14040 		    "could not acquire EEPROM GNT\n");
14041 		reg &= ~EECD_EE_REQ;
14042 		CSR_WRITE(sc, WMREG_EECD, reg);
14043 		return -1;
14044 	}
14045 
14046 	return 0;
14047 }
14048 
14049 static void
14050 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
14051 {
14052 
14053 	*eecd |= EECD_SK;
14054 	CSR_WRITE(sc, WMREG_EECD, *eecd);
14055 	CSR_WRITE_FLUSH(sc);
14056 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
14057 		delay(1);
14058 	else
14059 		delay(50);
14060 }
14061 
14062 static void
14063 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
14064 {
14065 
14066 	*eecd &= ~EECD_SK;
14067 	CSR_WRITE(sc, WMREG_EECD, *eecd);
14068 	CSR_WRITE_FLUSH(sc);
14069 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
14070 		delay(1);
14071 	else
14072 		delay(50);
14073 }
14074 
14075 static void
14076 wm_put_eecd(struct wm_softc *sc)
14077 {
14078 	uint32_t reg;
14079 
14080 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
14081 		device_xname(sc->sc_dev), __func__));
14082 
14083 	/* Stop nvm */
14084 	reg = CSR_READ(sc, WMREG_EECD);
14085 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
14086 		/* Pull CS high */
14087 		reg |= EECD_CS;
14088 		wm_nvm_eec_clock_lower(sc, &reg);
14089 	} else {
14090 		/* CS on Microwire is active-high */
14091 		reg &= ~(EECD_CS | EECD_DI);
14092 		CSR_WRITE(sc, WMREG_EECD, reg);
14093 		wm_nvm_eec_clock_raise(sc, &reg);
14094 		wm_nvm_eec_clock_lower(sc, &reg);
14095 	}
14096 
14097 	reg = CSR_READ(sc, WMREG_EECD);
14098 	reg &= ~EECD_EE_REQ;
14099 	CSR_WRITE(sc, WMREG_EECD, reg);
14100 
14101 	return;
14102 }
14103 
14104 /*
14105  * Get hardware semaphore.
14106  * Same as e1000_get_hw_semaphore_generic()
14107  */
14108 static int
14109 wm_get_swsm_semaphore(struct wm_softc *sc)
14110 {
14111 	int32_t timeout;
14112 	uint32_t swsm;
14113 
14114 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
14115 		device_xname(sc->sc_dev), __func__));
14116 	KASSERT(sc->sc_nvm_wordsize > 0);
14117 
14118 retry:
14119 	/* Get the SW semaphore. */
14120 	timeout = sc->sc_nvm_wordsize + 1;
14121 	while (timeout) {
14122 		swsm = CSR_READ(sc, WMREG_SWSM);
14123 
14124 		if ((swsm & SWSM_SMBI) == 0)
14125 			break;
14126 
14127 		delay(50);
14128 		timeout--;
14129 	}
14130 
14131 	if (timeout == 0) {
14132 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
14133 			/*
14134 			 * In rare circumstances, the SW semaphore may already
14135 			 * be held unintentionally. Clear the semaphore once
14136 			 * before giving up.
14137 			 */
14138 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
14139 			wm_put_swsm_semaphore(sc);
14140 			goto retry;
14141 		}
14142 		aprint_error_dev(sc->sc_dev,
14143 		    "could not acquire SWSM SMBI\n");
14144 		return 1;
14145 	}
14146 
14147 	/* Get the FW semaphore. */
14148 	timeout = sc->sc_nvm_wordsize + 1;
14149 	while (timeout) {
14150 		swsm = CSR_READ(sc, WMREG_SWSM);
14151 		swsm |= SWSM_SWESMBI;
14152 		CSR_WRITE(sc, WMREG_SWSM, swsm);
14153 		/* If we managed to set the bit we got the semaphore. */
14154 		swsm = CSR_READ(sc, WMREG_SWSM);
14155 		if (swsm & SWSM_SWESMBI)
14156 			break;
14157 
14158 		delay(50);
14159 		timeout--;
14160 	}
14161 
14162 	if (timeout == 0) {
14163 		aprint_error_dev(sc->sc_dev,
14164 		    "could not acquire SWSM SWESMBI\n");
14165 		/* Release semaphores */
14166 		wm_put_swsm_semaphore(sc);
14167 		return 1;
14168 	}
14169 	return 0;
14170 }
14171 
14172 /*
14173  * Put hardware semaphore.
14174  * Same as e1000_put_hw_semaphore_generic()
14175  */
14176 static void
14177 wm_put_swsm_semaphore(struct wm_softc *sc)
14178 {
14179 	uint32_t swsm;
14180 
14181 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
14182 		device_xname(sc->sc_dev), __func__));
14183 
14184 	swsm = CSR_READ(sc, WMREG_SWSM);
14185 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
14186 	CSR_WRITE(sc, WMREG_SWSM, swsm);
14187 }
14188 
14189 /*
14190  * Get SW/FW semaphore.
14191  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
14192  */
14193 static int
14194 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
14195 {
14196 	uint32_t swfw_sync;
14197 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
14198 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
14199 	int timeout;
14200 
14201 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
14202 		device_xname(sc->sc_dev), __func__));
14203 
14204 	if (sc->sc_type == WM_T_80003)
14205 		timeout = 50;
14206 	else
14207 		timeout = 200;
14208 
14209 	while (timeout) {
14210 		if (wm_get_swsm_semaphore(sc)) {
14211 			aprint_error_dev(sc->sc_dev,
14212 			    "%s: failed to get semaphore\n",
14213 			    __func__);
14214 			return 1;
14215 		}
14216 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
14217 		if ((swfw_sync & (swmask | fwmask)) == 0) {
14218 			swfw_sync |= swmask;
14219 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
14220 			wm_put_swsm_semaphore(sc);
14221 			return 0;
14222 		}
14223 		wm_put_swsm_semaphore(sc);
14224 		delay(5000);
14225 		timeout--;
14226 	}
14227 	device_printf(sc->sc_dev,
14228 	    "failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
14229 	    mask, swfw_sync);
14230 	return 1;
14231 }
14232 
14233 static void
14234 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
14235 {
14236 	uint32_t swfw_sync;
14237 
14238 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
14239 		device_xname(sc->sc_dev), __func__));
14240 
14241 	while (wm_get_swsm_semaphore(sc) != 0)
14242 		continue;
14243 
14244 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
14245 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
14246 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
14247 
14248 	wm_put_swsm_semaphore(sc);
14249 }
14250 
14251 static int
14252 wm_get_nvm_80003(struct wm_softc *sc)
14253 {
14254 	int rv;
14255 
14256 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
14257 		device_xname(sc->sc_dev), __func__));
14258 
14259 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
14260 		aprint_error_dev(sc->sc_dev,
14261 		    "%s: failed to get semaphore(SWFW)\n", __func__);
14262 		return rv;
14263 	}
14264 
14265 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
14266 	    && (rv = wm_get_eecd(sc)) != 0) {
14267 		aprint_error_dev(sc->sc_dev,
14268 		    "%s: failed to get semaphore(EECD)\n", __func__);
14269 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
14270 		return rv;
14271 	}
14272 
14273 	return 0;
14274 }
14275 
14276 static void
14277 wm_put_nvm_80003(struct wm_softc *sc)
14278 {
14279 
14280 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
14281 		device_xname(sc->sc_dev), __func__));
14282 
14283 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
14284 		wm_put_eecd(sc);
14285 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
14286 }
14287 
14288 static int
14289 wm_get_nvm_82571(struct wm_softc *sc)
14290 {
14291 	int rv;
14292 
14293 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
14294 		device_xname(sc->sc_dev), __func__));
14295 
14296 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
14297 		return rv;
14298 
14299 	switch (sc->sc_type) {
14300 	case WM_T_82573:
14301 		break;
14302 	default:
14303 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
14304 			rv = wm_get_eecd(sc);
14305 		break;
14306 	}
14307 
14308 	if (rv != 0) {
14309 		aprint_error_dev(sc->sc_dev,
14310 		    "%s: failed to get semaphore\n",
14311 		    __func__);
14312 		wm_put_swsm_semaphore(sc);
14313 	}
14314 
14315 	return rv;
14316 }
14317 
14318 static void
14319 wm_put_nvm_82571(struct wm_softc *sc)
14320 {
14321 
14322 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
14323 		device_xname(sc->sc_dev), __func__));
14324 
14325 	switch (sc->sc_type) {
14326 	case WM_T_82573:
14327 		break;
14328 	default:
14329 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
14330 			wm_put_eecd(sc);
14331 		break;
14332 	}
14333 
14334 	wm_put_swsm_semaphore(sc);
14335 }
14336 
14337 static int
14338 wm_get_phy_82575(struct wm_softc *sc)
14339 {
14340 
14341 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
14342 		device_xname(sc->sc_dev), __func__));
14343 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
14344 }
14345 
14346 static void
14347 wm_put_phy_82575(struct wm_softc *sc)
14348 {
14349 
14350 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
14351 		device_xname(sc->sc_dev), __func__));
14352 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
14353 }
14354 
14355 static int
14356 wm_get_swfwhw_semaphore(struct wm_softc *sc)
14357 {
14358 	uint32_t ext_ctrl;
14359 	int timeout = 200;
14360 
14361 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
14362 		device_xname(sc->sc_dev), __func__));
14363 
14364 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
14365 	for (timeout = 0; timeout < 200; timeout++) {
14366 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
14367 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
14368 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
14369 
14370 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
14371 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
14372 			return 0;
14373 		delay(5000);
14374 	}
14375 	device_printf(sc->sc_dev,
14376 	    "failed to get swfwhw semaphore ext_ctrl 0x%x\n", ext_ctrl);
14377 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
14378 	return 1;
14379 }
14380 
14381 static void
14382 wm_put_swfwhw_semaphore(struct wm_softc *sc)
14383 {
14384 	uint32_t ext_ctrl;
14385 
14386 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
14387 		device_xname(sc->sc_dev), __func__));
14388 
14389 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
14390 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
14391 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
14392 
14393 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
14394 }
14395 
14396 static int
14397 wm_get_swflag_ich8lan(struct wm_softc *sc)
14398 {
14399 	uint32_t ext_ctrl;
14400 	int timeout;
14401 
14402 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
14403 		device_xname(sc->sc_dev), __func__));
14404 	mutex_enter(sc->sc_ich_phymtx);
14405 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
14406 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
14407 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
14408 			break;
14409 		delay(1000);
14410 	}
14411 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
14412 		device_printf(sc->sc_dev,
14413 		    "SW has already locked the resource\n");
14414 		goto out;
14415 	}
14416 
14417 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
14418 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
14419 	for (timeout = 0; timeout < 1000; timeout++) {
14420 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
14421 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
14422 			break;
14423 		delay(1000);
14424 	}
14425 	if (timeout >= 1000) {
14426 		device_printf(sc->sc_dev, "failed to acquire semaphore\n");
14427 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
14428 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
14429 		goto out;
14430 	}
14431 	return 0;
14432 
14433 out:
14434 	mutex_exit(sc->sc_ich_phymtx);
14435 	return 1;
14436 }
14437 
14438 static void
14439 wm_put_swflag_ich8lan(struct wm_softc *sc)
14440 {
14441 	uint32_t ext_ctrl;
14442 
14443 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
14444 		device_xname(sc->sc_dev), __func__));
14445 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
14446 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
14447 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
14448 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
14449 	} else {
14450 		device_printf(sc->sc_dev, "Semaphore unexpectedly released\n");
14451 	}
14452 
14453 	mutex_exit(sc->sc_ich_phymtx);
14454 }
14455 
14456 static int
14457 wm_get_nvm_ich8lan(struct wm_softc *sc)
14458 {
14459 
14460 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
14461 		device_xname(sc->sc_dev), __func__));
14462 	mutex_enter(sc->sc_ich_nvmmtx);
14463 
14464 	return 0;
14465 }
14466 
14467 static void
14468 wm_put_nvm_ich8lan(struct wm_softc *sc)
14469 {
14470 
14471 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
14472 		device_xname(sc->sc_dev), __func__));
14473 	mutex_exit(sc->sc_ich_nvmmtx);
14474 }
14475 
14476 static int
14477 wm_get_hw_semaphore_82573(struct wm_softc *sc)
14478 {
14479 	int i = 0;
14480 	uint32_t reg;
14481 
14482 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
14483 		device_xname(sc->sc_dev), __func__));
14484 
14485 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
14486 	do {
14487 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
14488 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
14489 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
14490 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
14491 			break;
14492 		delay(2*1000);
14493 		i++;
14494 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
14495 
14496 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
14497 		wm_put_hw_semaphore_82573(sc);
14498 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
14499 		    device_xname(sc->sc_dev));
14500 		return -1;
14501 	}
14502 
14503 	return 0;
14504 }
14505 
14506 static void
14507 wm_put_hw_semaphore_82573(struct wm_softc *sc)
14508 {
14509 	uint32_t reg;
14510 
14511 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
14512 		device_xname(sc->sc_dev), __func__));
14513 
14514 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
14515 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
14516 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
14517 }
14518 
14519 /*
14520  * Management mode and power management related subroutines.
14521  * BMC, AMT, suspend/resume and EEE.
14522  */
14523 
14524 #ifdef WM_WOL
14525 static int
14526 wm_check_mng_mode(struct wm_softc *sc)
14527 {
14528 	int rv;
14529 
14530 	switch (sc->sc_type) {
14531 	case WM_T_ICH8:
14532 	case WM_T_ICH9:
14533 	case WM_T_ICH10:
14534 	case WM_T_PCH:
14535 	case WM_T_PCH2:
14536 	case WM_T_PCH_LPT:
14537 	case WM_T_PCH_SPT:
14538 	case WM_T_PCH_CNP:
14539 		rv = wm_check_mng_mode_ich8lan(sc);
14540 		break;
14541 	case WM_T_82574:
14542 	case WM_T_82583:
14543 		rv = wm_check_mng_mode_82574(sc);
14544 		break;
14545 	case WM_T_82571:
14546 	case WM_T_82572:
14547 	case WM_T_82573:
14548 	case WM_T_80003:
14549 		rv = wm_check_mng_mode_generic(sc);
14550 		break;
14551 	default:
14552 		/* Noting to do */
14553 		rv = 0;
14554 		break;
14555 	}
14556 
14557 	return rv;
14558 }
14559 
14560 static int
14561 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
14562 {
14563 	uint32_t fwsm;
14564 
14565 	fwsm = CSR_READ(sc, WMREG_FWSM);
14566 
14567 	if (((fwsm & FWSM_FW_VALID) != 0)
14568 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
14569 		return 1;
14570 
14571 	return 0;
14572 }
14573 
14574 static int
14575 wm_check_mng_mode_82574(struct wm_softc *sc)
14576 {
14577 	uint16_t data;
14578 
14579 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
14580 
14581 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
14582 		return 1;
14583 
14584 	return 0;
14585 }
14586 
14587 static int
14588 wm_check_mng_mode_generic(struct wm_softc *sc)
14589 {
14590 	uint32_t fwsm;
14591 
14592 	fwsm = CSR_READ(sc, WMREG_FWSM);
14593 
14594 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
14595 		return 1;
14596 
14597 	return 0;
14598 }
14599 #endif /* WM_WOL */
14600 
14601 static int
14602 wm_enable_mng_pass_thru(struct wm_softc *sc)
14603 {
14604 	uint32_t manc, fwsm, factps;
14605 
14606 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
14607 		return 0;
14608 
14609 	manc = CSR_READ(sc, WMREG_MANC);
14610 
14611 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
14612 		device_xname(sc->sc_dev), manc));
14613 	if ((manc & MANC_RECV_TCO_EN) == 0)
14614 		return 0;
14615 
14616 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
14617 		fwsm = CSR_READ(sc, WMREG_FWSM);
14618 		factps = CSR_READ(sc, WMREG_FACTPS);
14619 		if (((factps & FACTPS_MNGCG) == 0)
14620 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
14621 			return 1;
14622 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
14623 		uint16_t data;
14624 
14625 		factps = CSR_READ(sc, WMREG_FACTPS);
14626 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
14627 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
14628 			device_xname(sc->sc_dev), factps, data));
14629 		if (((factps & FACTPS_MNGCG) == 0)
14630 		    && ((data & NVM_CFG2_MNGM_MASK)
14631 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
14632 			return 1;
14633 	} else if (((manc & MANC_SMBUS_EN) != 0)
14634 	    && ((manc & MANC_ASF_EN) == 0))
14635 		return 1;
14636 
14637 	return 0;
14638 }
14639 
14640 static bool
14641 wm_phy_resetisblocked(struct wm_softc *sc)
14642 {
14643 	bool blocked = false;
14644 	uint32_t reg;
14645 	int i = 0;
14646 
14647 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
14648 		device_xname(sc->sc_dev), __func__));
14649 
14650 	switch (sc->sc_type) {
14651 	case WM_T_ICH8:
14652 	case WM_T_ICH9:
14653 	case WM_T_ICH10:
14654 	case WM_T_PCH:
14655 	case WM_T_PCH2:
14656 	case WM_T_PCH_LPT:
14657 	case WM_T_PCH_SPT:
14658 	case WM_T_PCH_CNP:
14659 		do {
14660 			reg = CSR_READ(sc, WMREG_FWSM);
14661 			if ((reg & FWSM_RSPCIPHY) == 0) {
14662 				blocked = true;
14663 				delay(10*1000);
14664 				continue;
14665 			}
14666 			blocked = false;
14667 		} while (blocked && (i++ < 30));
14668 		return blocked;
14669 		break;
14670 	case WM_T_82571:
14671 	case WM_T_82572:
14672 	case WM_T_82573:
14673 	case WM_T_82574:
14674 	case WM_T_82583:
14675 	case WM_T_80003:
14676 		reg = CSR_READ(sc, WMREG_MANC);
14677 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
14678 			return true;
14679 		else
14680 			return false;
14681 		break;
14682 	default:
14683 		/* No problem */
14684 		break;
14685 	}
14686 
14687 	return false;
14688 }
14689 
14690 static void
14691 wm_get_hw_control(struct wm_softc *sc)
14692 {
14693 	uint32_t reg;
14694 
14695 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
14696 		device_xname(sc->sc_dev), __func__));
14697 
14698 	if (sc->sc_type == WM_T_82573) {
14699 		reg = CSR_READ(sc, WMREG_SWSM);
14700 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
14701 	} else if (sc->sc_type >= WM_T_82571) {
14702 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
14703 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
14704 	}
14705 }
14706 
14707 static void
14708 wm_release_hw_control(struct wm_softc *sc)
14709 {
14710 	uint32_t reg;
14711 
14712 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
14713 		device_xname(sc->sc_dev), __func__));
14714 
14715 	if (sc->sc_type == WM_T_82573) {
14716 		reg = CSR_READ(sc, WMREG_SWSM);
14717 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
14718 	} else if (sc->sc_type >= WM_T_82571) {
14719 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
14720 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
14721 	}
14722 }
14723 
14724 static void
14725 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
14726 {
14727 	uint32_t reg;
14728 
14729 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
14730 		device_xname(sc->sc_dev), __func__));
14731 
14732 	if (sc->sc_type < WM_T_PCH2)
14733 		return;
14734 
14735 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
14736 
14737 	if (gate)
14738 		reg |= EXTCNFCTR_GATE_PHY_CFG;
14739 	else
14740 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
14741 
14742 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
14743 }
14744 
14745 static int
14746 wm_init_phy_workarounds_pchlan(struct wm_softc *sc)
14747 {
14748 	uint32_t fwsm, reg;
14749 	int rv = 0;
14750 
14751 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
14752 		device_xname(sc->sc_dev), __func__));
14753 
14754 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
14755 	wm_gate_hw_phy_config_ich8lan(sc, true);
14756 
14757 	/* Disable ULP */
14758 	wm_ulp_disable(sc);
14759 
14760 	/* Acquire PHY semaphore */
14761 	rv = sc->phy.acquire(sc);
14762 	if (rv != 0) {
14763 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: failed\n",
14764 		device_xname(sc->sc_dev), __func__));
14765 		return -1;
14766 	}
14767 
14768 	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
14769 	 * inaccessible and resetting the PHY is not blocked, toggle the
14770 	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
14771 	 */
14772 	fwsm = CSR_READ(sc, WMREG_FWSM);
14773 	switch (sc->sc_type) {
14774 	case WM_T_PCH_LPT:
14775 	case WM_T_PCH_SPT:
14776 	case WM_T_PCH_CNP:
14777 		if (wm_phy_is_accessible_pchlan(sc))
14778 			break;
14779 
14780 		/* Before toggling LANPHYPC, see if PHY is accessible by
14781 		 * forcing MAC to SMBus mode first.
14782 		 */
14783 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
14784 		reg |= CTRL_EXT_FORCE_SMBUS;
14785 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
14786 #if 0
14787 		/* XXX Isn't this required??? */
14788 		CSR_WRITE_FLUSH(sc);
14789 #endif
14790 		/* Wait 50 milliseconds for MAC to finish any retries
14791 		 * that it might be trying to perform from previous
14792 		 * attempts to acknowledge any phy read requests.
14793 		 */
14794 		delay(50 * 1000);
14795 		/* FALLTHROUGH */
14796 	case WM_T_PCH2:
14797 		if (wm_phy_is_accessible_pchlan(sc) == true)
14798 			break;
14799 		/* FALLTHROUGH */
14800 	case WM_T_PCH:
14801 		if (sc->sc_type == WM_T_PCH)
14802 			if ((fwsm & FWSM_FW_VALID) != 0)
14803 				break;
14804 
14805 		if (wm_phy_resetisblocked(sc) == true) {
14806 			device_printf(sc->sc_dev, "XXX reset is blocked(3)\n");
14807 			break;
14808 		}
14809 
14810 		/* Toggle LANPHYPC Value bit */
14811 		wm_toggle_lanphypc_pch_lpt(sc);
14812 
14813 		if (sc->sc_type >= WM_T_PCH_LPT) {
14814 			if (wm_phy_is_accessible_pchlan(sc) == true)
14815 				break;
14816 
14817 			/* Toggling LANPHYPC brings the PHY out of SMBus mode
14818 			 * so ensure that the MAC is also out of SMBus mode
14819 			 */
14820 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
14821 			reg &= ~CTRL_EXT_FORCE_SMBUS;
14822 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
14823 
14824 			if (wm_phy_is_accessible_pchlan(sc) == true)
14825 				break;
14826 			rv = -1;
14827 		}
14828 		break;
14829 	default:
14830 		break;
14831 	}
14832 
14833 	/* Release semaphore */
14834 	sc->phy.release(sc);
14835 
14836 	if (rv == 0) {
14837 		/* Check to see if able to reset PHY.  Print error if not */
14838 		if (wm_phy_resetisblocked(sc)) {
14839 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
14840 			goto out;
14841 		}
14842 
14843 		/* Reset the PHY before any access to it.  Doing so, ensures
14844 		 * that the PHY is in a known good state before we read/write
14845 		 * PHY registers.  The generic reset is sufficient here,
14846 		 * because we haven't determined the PHY type yet.
14847 		 */
14848 		if (wm_reset_phy(sc) != 0)
14849 			goto out;
14850 
14851 		/* On a successful reset, possibly need to wait for the PHY
14852 		 * to quiesce to an accessible state before returning control
14853 		 * to the calling function.  If the PHY does not quiesce, then
14854 		 * return E1000E_BLK_PHY_RESET, as this is the condition that
14855 		 *  the PHY is in.
14856 		 */
14857 		if (wm_phy_resetisblocked(sc))
14858 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
14859 	}
14860 
14861 out:
14862 	/* Ungate automatic PHY configuration on non-managed 82579 */
14863 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
14864 		delay(10*1000);
14865 		wm_gate_hw_phy_config_ich8lan(sc, false);
14866 	}
14867 
14868 	return 0;
14869 }
14870 
14871 static void
14872 wm_init_manageability(struct wm_softc *sc)
14873 {
14874 
14875 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
14876 		device_xname(sc->sc_dev), __func__));
14877 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
14878 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
14879 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
14880 
14881 		/* Disable hardware interception of ARP */
14882 		manc &= ~MANC_ARP_EN;
14883 
14884 		/* Enable receiving management packets to the host */
14885 		if (sc->sc_type >= WM_T_82571) {
14886 			manc |= MANC_EN_MNG2HOST;
14887 			manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
14888 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
14889 		}
14890 
14891 		CSR_WRITE(sc, WMREG_MANC, manc);
14892 	}
14893 }
14894 
14895 static void
14896 wm_release_manageability(struct wm_softc *sc)
14897 {
14898 
14899 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
14900 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
14901 
14902 		manc |= MANC_ARP_EN;
14903 		if (sc->sc_type >= WM_T_82571)
14904 			manc &= ~MANC_EN_MNG2HOST;
14905 
14906 		CSR_WRITE(sc, WMREG_MANC, manc);
14907 	}
14908 }
14909 
14910 static void
14911 wm_get_wakeup(struct wm_softc *sc)
14912 {
14913 
14914 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
14915 	switch (sc->sc_type) {
14916 	case WM_T_82573:
14917 	case WM_T_82583:
14918 		sc->sc_flags |= WM_F_HAS_AMT;
14919 		/* FALLTHROUGH */
14920 	case WM_T_80003:
14921 	case WM_T_82575:
14922 	case WM_T_82576:
14923 	case WM_T_82580:
14924 	case WM_T_I350:
14925 	case WM_T_I354:
14926 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
14927 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
14928 		/* FALLTHROUGH */
14929 	case WM_T_82541:
14930 	case WM_T_82541_2:
14931 	case WM_T_82547:
14932 	case WM_T_82547_2:
14933 	case WM_T_82571:
14934 	case WM_T_82572:
14935 	case WM_T_82574:
14936 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
14937 		break;
14938 	case WM_T_ICH8:
14939 	case WM_T_ICH9:
14940 	case WM_T_ICH10:
14941 	case WM_T_PCH:
14942 	case WM_T_PCH2:
14943 	case WM_T_PCH_LPT:
14944 	case WM_T_PCH_SPT:
14945 	case WM_T_PCH_CNP:
14946 		sc->sc_flags |= WM_F_HAS_AMT;
14947 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
14948 		break;
14949 	default:
14950 		break;
14951 	}
14952 
14953 	/* 1: HAS_MANAGE */
14954 	if (wm_enable_mng_pass_thru(sc) != 0)
14955 		sc->sc_flags |= WM_F_HAS_MANAGE;
14956 
14957 	/*
14958 	 * Note that the WOL flags is set after the resetting of the eeprom
14959 	 * stuff
14960 	 */
14961 }
14962 
14963 /*
14964  * Unconfigure Ultra Low Power mode.
14965  * Only for I217 and newer (see below).
14966  */
14967 static int
14968 wm_ulp_disable(struct wm_softc *sc)
14969 {
14970 	uint32_t reg;
14971 	uint16_t phyreg;
14972 	int i = 0, rv = 0;
14973 
14974 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
14975 		device_xname(sc->sc_dev), __func__));
14976 	/* Exclude old devices */
14977 	if ((sc->sc_type < WM_T_PCH_LPT)
14978 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
14979 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
14980 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
14981 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
14982 		return 0;
14983 
14984 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
14985 		/* Request ME un-configure ULP mode in the PHY */
14986 		reg = CSR_READ(sc, WMREG_H2ME);
14987 		reg &= ~H2ME_ULP;
14988 		reg |= H2ME_ENFORCE_SETTINGS;
14989 		CSR_WRITE(sc, WMREG_H2ME, reg);
14990 
14991 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
14992 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
14993 			if (i++ == 30) {
14994 				device_printf(sc->sc_dev, "%s timed out\n",
14995 				    __func__);
14996 				return -1;
14997 			}
14998 			delay(10 * 1000);
14999 		}
15000 		reg = CSR_READ(sc, WMREG_H2ME);
15001 		reg &= ~H2ME_ENFORCE_SETTINGS;
15002 		CSR_WRITE(sc, WMREG_H2ME, reg);
15003 
15004 		return 0;
15005 	}
15006 
15007 	/* Acquire semaphore */
15008 	rv = sc->phy.acquire(sc);
15009 	if (rv != 0) {
15010 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: failed\n",
15011 		device_xname(sc->sc_dev), __func__));
15012 		return -1;
15013 	}
15014 
15015 	/* Toggle LANPHYPC */
15016 	wm_toggle_lanphypc_pch_lpt(sc);
15017 
15018 	/* Unforce SMBus mode in PHY */
15019 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg);
15020 	if (rv != 0) {
15021 		uint32_t reg2;
15022 
15023 		aprint_debug_dev(sc->sc_dev, "%s: Force SMBus first.\n",
15024 			__func__);
15025 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
15026 		reg2 |= CTRL_EXT_FORCE_SMBUS;
15027 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
15028 		delay(50 * 1000);
15029 
15030 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL,
15031 		    &phyreg);
15032 		if (rv != 0)
15033 			goto release;
15034 	}
15035 	phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
15036 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg);
15037 
15038 	/* Unforce SMBus mode in MAC */
15039 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
15040 	reg &= ~CTRL_EXT_FORCE_SMBUS;
15041 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
15042 
15043 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg);
15044 	if (rv != 0)
15045 		goto release;
15046 	phyreg |= HV_PM_CTRL_K1_ENA;
15047 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg);
15048 
15049 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1,
15050 		&phyreg);
15051 	if (rv != 0)
15052 		goto release;
15053 	phyreg &= ~(I218_ULP_CONFIG1_IND
15054 	    | I218_ULP_CONFIG1_STICKY_ULP
15055 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
15056 	    | I218_ULP_CONFIG1_WOL_HOST
15057 	    | I218_ULP_CONFIG1_INBAND_EXIT
15058 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
15059 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
15060 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
15061 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
15062 	phyreg |= I218_ULP_CONFIG1_START;
15063 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
15064 
15065 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
15066 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
15067 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
15068 
15069 release:
15070 	/* Release semaphore */
15071 	sc->phy.release(sc);
15072 	wm_gmii_reset(sc);
15073 	delay(50 * 1000);
15074 
15075 	return rv;
15076 }
15077 
15078 /* WOL in the newer chipset interfaces (pchlan) */
15079 static int
15080 wm_enable_phy_wakeup(struct wm_softc *sc)
15081 {
15082 	device_t dev = sc->sc_dev;
15083 	uint32_t mreg, moff;
15084 	uint16_t wuce, wuc, wufc, preg;
15085 	int i, rv;
15086 
15087 	KASSERT(sc->sc_type >= WM_T_PCH);
15088 
15089 	/* Copy MAC RARs to PHY RARs */
15090 	wm_copy_rx_addrs_to_phy_ich8lan(sc);
15091 
15092 	/* Activate PHY wakeup */
15093 	rv = sc->phy.acquire(sc);
15094 	if (rv != 0) {
15095 		device_printf(dev, "%s: failed to acquire semaphore\n",
15096 		    __func__);
15097 		return rv;
15098 	}
15099 
15100 	/*
15101 	 * Enable access to PHY wakeup registers.
15102 	 * BM_MTA, BM_RCTL, BM_WUFC and BM_WUC are in BM_WUC_PAGE.
15103 	 */
15104 	rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
15105 	if (rv != 0) {
15106 		device_printf(dev,
15107 		    "%s: Could not enable PHY wakeup reg access\n", __func__);
15108 		goto release;
15109 	}
15110 
15111 	/* Copy MAC MTA to PHY MTA */
15112 	for (i = 0; i < WM_ICH8_MC_TABSIZE; i++) {
15113 		uint16_t lo, hi;
15114 
15115 		mreg = CSR_READ(sc, WMREG_CORDOVA_MTA + (i * 4));
15116 		lo = (uint16_t)(mreg & 0xffff);
15117 		hi = (uint16_t)((mreg >> 16) & 0xffff);
15118 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i), &lo, 0, true);
15119 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i) + 1, &hi, 0, true);
15120 	}
15121 
15122 	/* Configure PHY Rx Control register */
15123 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 1, true);
15124 	mreg = CSR_READ(sc, WMREG_RCTL);
15125 	if (mreg & RCTL_UPE)
15126 		preg |= BM_RCTL_UPE;
15127 	if (mreg & RCTL_MPE)
15128 		preg |= BM_RCTL_MPE;
15129 	preg &= ~(BM_RCTL_MO_MASK);
15130 	moff = __SHIFTOUT(mreg, RCTL_MO);
15131 	if (moff != 0)
15132 		preg |= moff << BM_RCTL_MO_SHIFT;
15133 	if (mreg & RCTL_BAM)
15134 		preg |= BM_RCTL_BAM;
15135 	if (mreg & RCTL_PMCF)
15136 		preg |= BM_RCTL_PMCF;
15137 	mreg = CSR_READ(sc, WMREG_CTRL);
15138 	if (mreg & CTRL_RFCE)
15139 		preg |= BM_RCTL_RFCE;
15140 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 0, true);
15141 
15142 	wuc = WUC_APME | WUC_PME_EN;
15143 	wufc = WUFC_MAG;
15144 	/* Enable PHY wakeup in MAC register */
15145 	CSR_WRITE(sc, WMREG_WUC,
15146 	    WUC_PHY_WAKE | WUC_PME_STATUS | WUC_APMPME | wuc);
15147 	CSR_WRITE(sc, WMREG_WUFC, wufc);
15148 
15149 	/* Configure and enable PHY wakeup in PHY registers */
15150 	wm_access_phy_wakeup_reg_bm(dev, BM_WUC, &wuc, 0, true);
15151 	wm_access_phy_wakeup_reg_bm(dev, BM_WUFC, &wufc, 0, true);
15152 
15153 	wuce |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
15154 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
15155 
15156 release:
15157 	sc->phy.release(sc);
15158 
15159 	return 0;
15160 }
15161 
15162 /* Power down workaround on D3 */
15163 static void
15164 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
15165 {
15166 	uint32_t reg;
15167 	uint16_t phyreg;
15168 	int i;
15169 
15170 	for (i = 0; i < 2; i++) {
15171 		/* Disable link */
15172 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
15173 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
15174 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
15175 
15176 		/*
15177 		 * Call gig speed drop workaround on Gig disable before
15178 		 * accessing any PHY registers
15179 		 */
15180 		if (sc->sc_type == WM_T_ICH8)
15181 			wm_gig_downshift_workaround_ich8lan(sc);
15182 
15183 		/* Write VR power-down enable */
15184 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
15185 		phyreg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
15186 		phyreg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
15187 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, phyreg);
15188 
15189 		/* Read it back and test */
15190 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
15191 		phyreg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
15192 		if ((phyreg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
15193 			break;
15194 
15195 		/* Issue PHY reset and repeat at most one more time */
15196 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
15197 	}
15198 }
15199 
15200 /*
15201  *  wm_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
15202  *  @sc: pointer to the HW structure
15203  *
15204  *  During S0 to Sx transition, it is possible the link remains at gig
15205  *  instead of negotiating to a lower speed.  Before going to Sx, set
15206  *  'Gig Disable' to force link speed negotiation to a lower speed based on
15207  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
15208  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
15209  *  needs to be written.
15210  *  Parts that support (and are linked to a partner which support) EEE in
15211  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
15212  *  than 10Mbps w/o EEE.
15213  */
15214 static void
15215 wm_suspend_workarounds_ich8lan(struct wm_softc *sc)
15216 {
15217 	device_t dev = sc->sc_dev;
15218 	struct ethercom *ec = &sc->sc_ethercom;
15219 	uint32_t phy_ctrl;
15220 	int rv;
15221 
15222 	phy_ctrl = CSR_READ(sc, WMREG_PHY_CTRL);
15223 	phy_ctrl |= PHY_CTRL_GBE_DIS;
15224 
15225 	KASSERT((sc->sc_type >= WM_T_ICH8) && (sc->sc_type <= WM_T_PCH_CNP));
15226 
15227 	if (sc->sc_phytype == WMPHY_I217) {
15228 		uint16_t devid = sc->sc_pcidevid;
15229 
15230 		if ((devid == PCI_PRODUCT_INTEL_I218_LM) ||
15231 		    (devid == PCI_PRODUCT_INTEL_I218_V) ||
15232 		    (devid == PCI_PRODUCT_INTEL_I218_LM3) ||
15233 		    (devid == PCI_PRODUCT_INTEL_I218_V3) ||
15234 		    (sc->sc_type >= WM_T_PCH_SPT))
15235 			CSR_WRITE(sc, WMREG_FEXTNVM6,
15236 			    CSR_READ(sc, WMREG_FEXTNVM6)
15237 			    & ~FEXTNVM6_REQ_PLL_CLK);
15238 
15239 		if (sc->phy.acquire(sc) != 0)
15240 			goto out;
15241 
15242 		if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
15243 			uint16_t eee_advert;
15244 
15245 			rv = wm_read_emi_reg_locked(dev,
15246 			    I217_EEE_ADVERTISEMENT, &eee_advert);
15247 			if (rv)
15248 				goto release;
15249 
15250 			/*
15251 			 * Disable LPLU if both link partners support 100BaseT
15252 			 * EEE and 100Full is advertised on both ends of the
15253 			 * link, and enable Auto Enable LPI since there will
15254 			 * be no driver to enable LPI while in Sx.
15255 			 */
15256 			if ((eee_advert & AN_EEEADVERT_100_TX) &&
15257 			    (sc->eee_lp_ability & AN_EEEADVERT_100_TX)) {
15258 				uint16_t anar, phy_reg;
15259 
15260 				sc->phy.readreg_locked(dev, 2, MII_ANAR,
15261 				    &anar);
15262 				if (anar & ANAR_TX_FD) {
15263 					phy_ctrl &= ~(PHY_CTRL_D0A_LPLU |
15264 					    PHY_CTRL_NOND0A_LPLU);
15265 
15266 					/* Set Auto Enable LPI after link up */
15267 					sc->phy.readreg_locked(dev, 2,
15268 					    I217_LPI_GPIO_CTRL, &phy_reg);
15269 					phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
15270 					sc->phy.writereg_locked(dev, 2,
15271 					    I217_LPI_GPIO_CTRL, phy_reg);
15272 				}
15273 			}
15274 		}
15275 
15276 		/*
15277 		 * For i217 Intel Rapid Start Technology support,
15278 		 * when the system is going into Sx and no manageability engine
15279 		 * is present, the driver must configure proxy to reset only on
15280 		 * power good.	LPI (Low Power Idle) state must also reset only
15281 		 * on power good, as well as the MTA (Multicast table array).
15282 		 * The SMBus release must also be disabled on LCD reset.
15283 		 */
15284 
15285 		/*
15286 		 * Enable MTA to reset for Intel Rapid Start Technology
15287 		 * Support
15288 		 */
15289 
15290 release:
15291 		sc->phy.release(sc);
15292 	}
15293 out:
15294 	CSR_WRITE(sc, WMREG_PHY_CTRL, phy_ctrl);
15295 
15296 	if (sc->sc_type == WM_T_ICH8)
15297 		wm_gig_downshift_workaround_ich8lan(sc);
15298 
15299 	if (sc->sc_type >= WM_T_PCH) {
15300 		wm_oem_bits_config_ich8lan(sc, false);
15301 
15302 		/* Reset PHY to activate OEM bits on 82577/8 */
15303 		if (sc->sc_type == WM_T_PCH)
15304 			wm_reset_phy(sc);
15305 
15306 		if (sc->phy.acquire(sc) != 0)
15307 			return;
15308 		wm_write_smbus_addr(sc);
15309 		sc->phy.release(sc);
15310 	}
15311 }
15312 
15313 /*
15314  *  wm_resume_workarounds_pchlan - workarounds needed during Sx->S0
15315  *  @sc: pointer to the HW structure
15316  *
15317  *  During Sx to S0 transitions on non-managed devices or managed devices
15318  *  on which PHY resets are not blocked, if the PHY registers cannot be
15319  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
15320  *  the PHY.
15321  *  On i217, setup Intel Rapid Start Technology.
15322  */
15323 static int
15324 wm_resume_workarounds_pchlan(struct wm_softc *sc)
15325 {
15326 	device_t dev = sc->sc_dev;
15327 	int rv;
15328 
15329 	if (sc->sc_type < WM_T_PCH2)
15330 		return 0;
15331 
15332 	rv = wm_init_phy_workarounds_pchlan(sc);
15333 	if (rv != 0)
15334 		return -1;
15335 
15336 	/* For i217 Intel Rapid Start Technology support when the system
15337 	 * is transitioning from Sx and no manageability engine is present
15338 	 * configure SMBus to restore on reset, disable proxy, and enable
15339 	 * the reset on MTA (Multicast table array).
15340 	 */
15341 	if (sc->sc_phytype == WMPHY_I217) {
15342 		uint16_t phy_reg;
15343 
15344 		if (sc->phy.acquire(sc) != 0)
15345 			return -1;
15346 
15347 		/* Clear Auto Enable LPI after link up */
15348 		sc->phy.readreg_locked(dev, 1, I217_LPI_GPIO_CTRL, &phy_reg);
15349 		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
15350 		sc->phy.writereg_locked(dev, 1, I217_LPI_GPIO_CTRL, phy_reg);
15351 
15352 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
15353 			/* Restore clear on SMB if no manageability engine
15354 			 * is present
15355 			 */
15356 			rv = sc->phy.readreg_locked(dev, 1, I217_MEMPWR,
15357 			    &phy_reg);
15358 			if (rv != 0)
15359 				goto release;
15360 			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
15361 			sc->phy.writereg_locked(dev, 1, I217_MEMPWR, phy_reg);
15362 
15363 			/* Disable Proxy */
15364 			sc->phy.writereg_locked(dev, 1, I217_PROXY_CTRL, 0);
15365 		}
15366 		/* Enable reset on MTA */
15367 		sc->phy.readreg_locked(dev, 1, I217_CFGREG, &phy_reg);
15368 		if (rv != 0)
15369 			goto release;
15370 		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
15371 		sc->phy.writereg_locked(dev, 1, I217_CFGREG, phy_reg);
15372 
15373 release:
15374 		sc->phy.release(sc);
15375 		return rv;
15376 	}
15377 
15378 	return 0;
15379 }
15380 
15381 static void
15382 wm_enable_wakeup(struct wm_softc *sc)
15383 {
15384 	uint32_t reg, pmreg;
15385 	pcireg_t pmode;
15386 	int rv = 0;
15387 
15388 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
15389 		device_xname(sc->sc_dev), __func__));
15390 
15391 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
15392 	    &pmreg, NULL) == 0)
15393 		return;
15394 
15395 	if ((sc->sc_flags & WM_F_WOL) == 0)
15396 		goto pme;
15397 
15398 	/* Advertise the wakeup capability */
15399 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
15400 	    | CTRL_SWDPIN(3));
15401 
15402 	/* Keep the laser running on fiber adapters */
15403 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
15404 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
15405 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
15406 		reg |= CTRL_EXT_SWDPIN(3);
15407 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
15408 	}
15409 
15410 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) ||
15411 	    (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) ||
15412 	    (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
15413 	    (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
15414 		wm_suspend_workarounds_ich8lan(sc);
15415 
15416 #if 0	/* For the multicast packet */
15417 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
15418 	reg |= WUFC_MC;
15419 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
15420 #endif
15421 
15422 	if (sc->sc_type >= WM_T_PCH) {
15423 		rv = wm_enable_phy_wakeup(sc);
15424 		if (rv != 0)
15425 			goto pme;
15426 	} else {
15427 		/* Enable wakeup by the MAC */
15428 		CSR_WRITE(sc, WMREG_WUC, WUC_APME | WUC_PME_EN);
15429 		CSR_WRITE(sc, WMREG_WUFC, WUFC_MAG);
15430 	}
15431 
15432 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
15433 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
15434 		|| (sc->sc_type == WM_T_PCH2))
15435 	    && (sc->sc_phytype == WMPHY_IGP_3))
15436 		wm_igp3_phy_powerdown_workaround_ich8lan(sc);
15437 
15438 pme:
15439 	/* Request PME */
15440 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
15441 	pmode |= PCI_PMCSR_PME_STS; /* in case it's already set (W1C) */
15442 	if ((rv == 0) && (sc->sc_flags & WM_F_WOL) != 0) {
15443 		/* For WOL */
15444 		pmode |= PCI_PMCSR_PME_EN;
15445 	} else {
15446 		/* Disable WOL */
15447 		pmode &= ~PCI_PMCSR_PME_EN;
15448 	}
15449 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
15450 }
15451 
15452 /* Disable ASPM L0s and/or L1 for workaround */
15453 static void
15454 wm_disable_aspm(struct wm_softc *sc)
15455 {
15456 	pcireg_t reg, mask = 0;
15457 	unsigned const char *str = "";
15458 
15459 	/*
15460 	 *  Only for PCIe device which has PCIe capability in the PCI config
15461 	 * space.
15462 	 */
15463 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
15464 		return;
15465 
15466 	switch (sc->sc_type) {
15467 	case WM_T_82571:
15468 	case WM_T_82572:
15469 		/*
15470 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
15471 		 * State Power management L1 State (ASPM L1).
15472 		 */
15473 		mask = PCIE_LCSR_ASPM_L1;
15474 		str = "L1 is";
15475 		break;
15476 	case WM_T_82573:
15477 	case WM_T_82574:
15478 	case WM_T_82583:
15479 		/*
15480 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
15481 		 *
15482 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
15483 		 * some chipset.  The document of 82574 and 82583 says that
15484 		 * disabling L0s with some specific chipset is sufficient,
15485 		 * but we follow as of the Intel em driver does.
15486 		 *
15487 		 * References:
15488 		 * Errata 8 of the Specification Update of i82573.
15489 		 * Errata 20 of the Specification Update of i82574.
15490 		 * Errata 9 of the Specification Update of i82583.
15491 		 */
15492 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
15493 		str = "L0s and L1 are";
15494 		break;
15495 	default:
15496 		return;
15497 	}
15498 
15499 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
15500 	    sc->sc_pcixe_capoff + PCIE_LCSR);
15501 	reg &= ~mask;
15502 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
15503 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
15504 
15505 	/* Print only in wm_attach() */
15506 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
15507 		aprint_verbose_dev(sc->sc_dev,
15508 		    "ASPM %s disabled to workaround the errata.\n", str);
15509 }
15510 
15511 /* LPLU */
15512 
15513 static void
15514 wm_lplu_d0_disable(struct wm_softc *sc)
15515 {
15516 	struct mii_data *mii = &sc->sc_mii;
15517 	uint32_t reg;
15518 	uint16_t phyval;
15519 
15520 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
15521 		device_xname(sc->sc_dev), __func__));
15522 
15523 	if (sc->sc_phytype == WMPHY_IFE)
15524 		return;
15525 
15526 	switch (sc->sc_type) {
15527 	case WM_T_82571:
15528 	case WM_T_82572:
15529 	case WM_T_82573:
15530 	case WM_T_82575:
15531 	case WM_T_82576:
15532 		mii->mii_readreg(sc->sc_dev, 1, IGPHY_POWER_MGMT, &phyval);
15533 		phyval &= ~PMR_D0_LPLU;
15534 		mii->mii_writereg(sc->sc_dev, 1, IGPHY_POWER_MGMT, phyval);
15535 		break;
15536 	case WM_T_82580:
15537 	case WM_T_I350:
15538 	case WM_T_I210:
15539 	case WM_T_I211:
15540 		reg = CSR_READ(sc, WMREG_PHPM);
15541 		reg &= ~PHPM_D0A_LPLU;
15542 		CSR_WRITE(sc, WMREG_PHPM, reg);
15543 		break;
15544 	case WM_T_82574:
15545 	case WM_T_82583:
15546 	case WM_T_ICH8:
15547 	case WM_T_ICH9:
15548 	case WM_T_ICH10:
15549 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
15550 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
15551 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
15552 		CSR_WRITE_FLUSH(sc);
15553 		break;
15554 	case WM_T_PCH:
15555 	case WM_T_PCH2:
15556 	case WM_T_PCH_LPT:
15557 	case WM_T_PCH_SPT:
15558 	case WM_T_PCH_CNP:
15559 		wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS, &phyval);
15560 		phyval &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
15561 		if (wm_phy_resetisblocked(sc) == false)
15562 			phyval |= HV_OEM_BITS_ANEGNOW;
15563 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, phyval);
15564 		break;
15565 	default:
15566 		break;
15567 	}
15568 }
15569 
15570 /* EEE */
15571 
15572 static int
15573 wm_set_eee_i350(struct wm_softc *sc)
15574 {
15575 	struct ethercom *ec = &sc->sc_ethercom;
15576 	uint32_t ipcnfg, eeer;
15577 	uint32_t ipcnfg_mask
15578 	    = IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN | IPCNFG_10BASE_TE;
15579 	uint32_t eeer_mask = EEER_TX_LPI_EN | EEER_RX_LPI_EN | EEER_LPI_FC;
15580 
15581 	KASSERT(sc->sc_mediatype == WM_MEDIATYPE_COPPER);
15582 
15583 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
15584 	eeer = CSR_READ(sc, WMREG_EEER);
15585 
15586 	/* Enable or disable per user setting */
15587 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
15588 		ipcnfg |= ipcnfg_mask;
15589 		eeer |= eeer_mask;
15590 	} else {
15591 		ipcnfg &= ~ipcnfg_mask;
15592 		eeer &= ~eeer_mask;
15593 	}
15594 
15595 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
15596 	CSR_WRITE(sc, WMREG_EEER, eeer);
15597 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
15598 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
15599 
15600 	return 0;
15601 }
15602 
15603 static int
15604 wm_set_eee_pchlan(struct wm_softc *sc)
15605 {
15606 	device_t dev = sc->sc_dev;
15607 	struct ethercom *ec = &sc->sc_ethercom;
15608 	uint16_t lpa, pcs_status, adv_addr, adv, lpi_ctrl, data;
15609 	int rv = 0;
15610 
15611 	switch (sc->sc_phytype) {
15612 	case WMPHY_82579:
15613 		lpa = I82579_EEE_LP_ABILITY;
15614 		pcs_status = I82579_EEE_PCS_STATUS;
15615 		adv_addr = I82579_EEE_ADVERTISEMENT;
15616 		break;
15617 	case WMPHY_I217:
15618 		lpa = I217_EEE_LP_ABILITY;
15619 		pcs_status = I217_EEE_PCS_STATUS;
15620 		adv_addr = I217_EEE_ADVERTISEMENT;
15621 		break;
15622 	default:
15623 		return 0;
15624 	}
15625 
15626 	if (sc->phy.acquire(sc)) {
15627 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
15628 		return 0;
15629 	}
15630 
15631 	rv = sc->phy.readreg_locked(dev, 1, I82579_LPI_CTRL, &lpi_ctrl);
15632 	if (rv != 0)
15633 		goto release;
15634 
15635 	/* Clear bits that enable EEE in various speeds */
15636 	lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE;
15637 
15638 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
15639 		/* Save off link partner's EEE ability */
15640 		rv = wm_read_emi_reg_locked(dev, lpa, &sc->eee_lp_ability);
15641 		if (rv != 0)
15642 			goto release;
15643 
15644 		/* Read EEE advertisement */
15645 		if ((rv = wm_read_emi_reg_locked(dev, adv_addr, &adv)) != 0)
15646 			goto release;
15647 
15648 		/*
15649 		 * Enable EEE only for speeds in which the link partner is
15650 		 * EEE capable and for which we advertise EEE.
15651 		 */
15652 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_1000_T)
15653 			lpi_ctrl |= I82579_LPI_CTRL_EN_1000;
15654 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_100_TX) {
15655 			sc->phy.readreg_locked(dev, 2, MII_ANLPAR, &data);
15656 			if ((data & ANLPAR_TX_FD) != 0)
15657 				lpi_ctrl |= I82579_LPI_CTRL_EN_100;
15658 			else {
15659 				/*
15660 				 * EEE is not supported in 100Half, so ignore
15661 				 * partner's EEE in 100 ability if full-duplex
15662 				 * is not advertised.
15663 				 */
15664 				sc->eee_lp_ability
15665 				    &= ~AN_EEEADVERT_100_TX;
15666 			}
15667 		}
15668 	}
15669 
15670 	if (sc->sc_phytype == WMPHY_82579) {
15671 		rv = wm_read_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, &data);
15672 		if (rv != 0)
15673 			goto release;
15674 
15675 		data &= ~I82579_LPI_PLL_SHUT_100;
15676 		rv = wm_write_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, data);
15677 	}
15678 
15679 	/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
15680 	if ((rv = wm_read_emi_reg_locked(dev, pcs_status, &data)) != 0)
15681 		goto release;
15682 
15683 	rv = sc->phy.writereg_locked(dev, 1, I82579_LPI_CTRL, lpi_ctrl);
15684 release:
15685 	sc->phy.release(sc);
15686 
15687 	return rv;
15688 }
15689 
15690 static int
15691 wm_set_eee(struct wm_softc *sc)
15692 {
15693 	struct ethercom *ec = &sc->sc_ethercom;
15694 
15695 	if ((ec->ec_capabilities & ETHERCAP_EEE) == 0)
15696 		return 0;
15697 
15698 	if (sc->sc_type == WM_T_I354) {
15699 		/* I354 uses an external PHY */
15700 		return 0; /* not yet */
15701 	} else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
15702 		return wm_set_eee_i350(sc);
15703 	else if (sc->sc_type >= WM_T_PCH2)
15704 		return wm_set_eee_pchlan(sc);
15705 
15706 	return 0;
15707 }
15708 
15709 /*
15710  * Workarounds (mainly PHY related).
15711  * Basically, PHY's workarounds are in the PHY drivers.
15712  */
15713 
15714 /* Work-around for 82566 Kumeran PCS lock loss */
15715 static int
15716 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
15717 {
15718 	struct mii_data *mii = &sc->sc_mii;
15719 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
15720 	int i, reg, rv;
15721 	uint16_t phyreg;
15722 
15723 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
15724 		device_xname(sc->sc_dev), __func__));
15725 
15726 	/* If the link is not up, do nothing */
15727 	if ((status & STATUS_LU) == 0)
15728 		return 0;
15729 
15730 	/* Nothing to do if the link is other than 1Gbps */
15731 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
15732 		return 0;
15733 
15734 	for (i = 0; i < 10; i++) {
15735 		/* read twice */
15736 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
15737 		if (rv != 0)
15738 			return rv;
15739 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
15740 		if (rv != 0)
15741 			return rv;
15742 
15743 		if ((phyreg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
15744 			goto out;	/* GOOD! */
15745 
15746 		/* Reset the PHY */
15747 		wm_reset_phy(sc);
15748 		delay(5*1000);
15749 	}
15750 
15751 	/* Disable GigE link negotiation */
15752 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
15753 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
15754 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
15755 
15756 	/*
15757 	 * Call gig speed drop workaround on Gig disable before accessing
15758 	 * any PHY registers.
15759 	 */
15760 	wm_gig_downshift_workaround_ich8lan(sc);
15761 
15762 out:
15763 	return 0;
15764 }
15765 
15766 /*
15767  *  wm_gig_downshift_workaround_ich8lan - WoL from S5 stops working
15768  *  @sc: pointer to the HW structure
15769  *
15770  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
15771  *  LPLU, Gig disable, MDIC PHY reset):
15772  *    1) Set Kumeran Near-end loopback
15773  *    2) Clear Kumeran Near-end loopback
15774  *  Should only be called for ICH8[m] devices with any 1G Phy.
15775  */
15776 static void
15777 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
15778 {
15779 	uint16_t kmreg;
15780 
15781 	/* Only for igp3 */
15782 	if (sc->sc_phytype == WMPHY_IGP_3) {
15783 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
15784 			return;
15785 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
15786 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
15787 			return;
15788 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
15789 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
15790 	}
15791 }
15792 
15793 /*
15794  * Workaround for pch's PHYs
15795  * XXX should be moved to new PHY driver?
15796  */
15797 static int
15798 wm_hv_phy_workarounds_ich8lan(struct wm_softc *sc)
15799 {
15800 	device_t dev = sc->sc_dev;
15801 	struct mii_data *mii = &sc->sc_mii;
15802 	struct mii_softc *child;
15803 	uint16_t phy_data, phyrev = 0;
15804 	int phytype = sc->sc_phytype;
15805 	int rv;
15806 
15807 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
15808 		device_xname(dev), __func__));
15809 	KASSERT(sc->sc_type == WM_T_PCH);
15810 
15811 	/* Set MDIO slow mode before any other MDIO access */
15812 	if (phytype == WMPHY_82577)
15813 		if ((rv = wm_set_mdio_slow_mode_hv(sc)) != 0)
15814 			return rv;
15815 
15816 	child = LIST_FIRST(&mii->mii_phys);
15817 	if (child != NULL)
15818 		phyrev = child->mii_mpd_rev;
15819 
15820 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
15821 	if ((child != NULL) &&
15822 	    (((phytype == WMPHY_82577) && ((phyrev == 1) || (phyrev == 2))) ||
15823 		((phytype == WMPHY_82578) && (phyrev == 1)))) {
15824 		/* Disable generation of early preamble (0x4431) */
15825 		rv = mii->mii_readreg(dev, 2, BM_RATE_ADAPTATION_CTRL,
15826 		    &phy_data);
15827 		if (rv != 0)
15828 			return rv;
15829 		phy_data &= ~(BM_RATE_ADAPTATION_CTRL_RX_RXDV_PRE |
15830 		    BM_RATE_ADAPTATION_CTRL_RX_CRS_PRE);
15831 		rv = mii->mii_writereg(dev, 2, BM_RATE_ADAPTATION_CTRL,
15832 		    phy_data);
15833 		if (rv != 0)
15834 			return rv;
15835 
15836 		/* Preamble tuning for SSC */
15837 		rv = mii->mii_writereg(dev, 2, HV_KMRN_FIFO_CTRLSTA, 0xa204);
15838 		if (rv != 0)
15839 			return rv;
15840 	}
15841 
15842 	/* 82578 */
15843 	if (phytype == WMPHY_82578) {
15844 		/*
15845 		 * Return registers to default by doing a soft reset then
15846 		 * writing 0x3140 to the control register
15847 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
15848 		 */
15849 		if ((child != NULL) && (phyrev < 2)) {
15850 			PHY_RESET(child);
15851 			rv = mii->mii_writereg(dev, 2, MII_BMCR, 0x3140);
15852 			if (rv != 0)
15853 				return rv;
15854 		}
15855 	}
15856 
15857 	/* Select page 0 */
15858 	if ((rv = sc->phy.acquire(sc)) != 0)
15859 		return rv;
15860 	rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT, 0);
15861 	sc->phy.release(sc);
15862 	if (rv != 0)
15863 		return rv;
15864 
15865 	/*
15866 	 * Configure the K1 Si workaround during phy reset assuming there is
15867 	 * link so that it disables K1 if link is in 1Gbps.
15868 	 */
15869 	if ((rv = wm_k1_gig_workaround_hv(sc, 1)) != 0)
15870 		return rv;
15871 
15872 	/* Workaround for link disconnects on a busy hub in half duplex */
15873 	rv = sc->phy.acquire(sc);
15874 	if (rv)
15875 		return rv;
15876 	rv = sc->phy.readreg_locked(dev, 2, BM_PORT_GEN_CFG, &phy_data);
15877 	if (rv)
15878 		goto release;
15879 	rv = sc->phy.writereg_locked(dev, 2, BM_PORT_GEN_CFG,
15880 	    phy_data & 0x00ff);
15881 	if (rv)
15882 		goto release;
15883 
15884 	/* Set MSE higher to enable link to stay up when noise is high */
15885 	rv = wm_write_emi_reg_locked(dev, I82577_MSE_THRESHOLD, 0x0034);
15886 release:
15887 	sc->phy.release(sc);
15888 
15889 	return rv;
15890 }
15891 
15892 /*
15893  *  wm_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
15894  *  @sc:   pointer to the HW structure
15895  */
15896 static void
15897 wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *sc)
15898 {
15899 
15900 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
15901 		device_xname(sc->sc_dev), __func__));
15902 
15903 	if (sc->phy.acquire(sc) != 0)
15904 		return;
15905 
15906 	wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
15907 
15908 	sc->phy.release(sc);
15909 }
15910 
15911 static void
15912 wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *sc)
15913 {
15914 	device_t dev = sc->sc_dev;
15915 	uint32_t mac_reg;
15916 	uint16_t i, wuce;
15917 	int count;
15918 
15919 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
15920 		device_xname(dev), __func__));
15921 
15922 	if (wm_enable_phy_wakeup_reg_access_bm(dev, &wuce) != 0)
15923 		return;
15924 
15925 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
15926 	count = wm_rar_count(sc);
15927 	for (i = 0; i < count; i++) {
15928 		uint16_t lo, hi;
15929 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
15930 		lo = (uint16_t)(mac_reg & 0xffff);
15931 		hi = (uint16_t)((mac_reg >> 16) & 0xffff);
15932 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_L(i), &lo, 0, true);
15933 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_M(i), &hi, 0, true);
15934 
15935 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
15936 		lo = (uint16_t)(mac_reg & 0xffff);
15937 		hi = (uint16_t)((mac_reg & RAL_AV) >> 16);
15938 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_H(i), &lo, 0, true);
15939 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_CTRL(i), &hi, 0, true);
15940 	}
15941 
15942 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
15943 }
15944 
15945 /*
15946  *  wm_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
15947  *  with 82579 PHY
15948  *  @enable: flag to enable/disable workaround when enabling/disabling jumbos
15949  */
15950 static int
15951 wm_lv_jumbo_workaround_ich8lan(struct wm_softc *sc, bool enable)
15952 {
15953 	device_t dev = sc->sc_dev;
15954 	int rar_count;
15955 	int rv;
15956 	uint32_t mac_reg;
15957 	uint16_t dft_ctrl, data;
15958 	uint16_t i;
15959 
15960 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
15961 		device_xname(dev), __func__));
15962 
15963 	if (sc->sc_type < WM_T_PCH2)
15964 		return 0;
15965 
15966 	/* Acquire PHY semaphore */
15967 	rv = sc->phy.acquire(sc);
15968 	if (rv != 0)
15969 		return rv;
15970 
15971 	/* Disable Rx path while enabling/disabling workaround */
15972 	sc->phy.readreg_locked(dev, 2, I82579_DFT_CTRL, &dft_ctrl);
15973 	if (rv != 0)
15974 		goto out;
15975 	rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
15976 	    dft_ctrl | (1 << 14));
15977 	if (rv != 0)
15978 		goto out;
15979 
15980 	if (enable) {
15981 		/* Write Rx addresses (rar_entry_count for RAL/H, and
15982 		 * SHRAL/H) and initial CRC values to the MAC
15983 		 */
15984 		rar_count = wm_rar_count(sc);
15985 		for (i = 0; i < rar_count; i++) {
15986 			uint8_t mac_addr[ETHER_ADDR_LEN] = {0};
15987 			uint32_t addr_high, addr_low;
15988 
15989 			addr_high = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
15990 			if (!(addr_high & RAL_AV))
15991 				continue;
15992 			addr_low = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
15993 			mac_addr[0] = (addr_low & 0xFF);
15994 			mac_addr[1] = ((addr_low >> 8) & 0xFF);
15995 			mac_addr[2] = ((addr_low >> 16) & 0xFF);
15996 			mac_addr[3] = ((addr_low >> 24) & 0xFF);
15997 			mac_addr[4] = (addr_high & 0xFF);
15998 			mac_addr[5] = ((addr_high >> 8) & 0xFF);
15999 
16000 			CSR_WRITE(sc, WMREG_PCH_RAICC(i),
16001 			    ~ether_crc32_le(mac_addr, ETHER_ADDR_LEN));
16002 		}
16003 
16004 		/* Write Rx addresses to the PHY */
16005 		wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
16006 	}
16007 
16008 	/*
16009 	 * If enable ==
16010 	 *	true: Enable jumbo frame workaround in the MAC.
16011 	 *	false: Write MAC register values back to h/w defaults.
16012 	 */
16013 	mac_reg = CSR_READ(sc, WMREG_FFLT_DBG);
16014 	if (enable) {
16015 		mac_reg &= ~(1 << 14);
16016 		mac_reg |= (7 << 15);
16017 	} else
16018 		mac_reg &= ~(0xf << 14);
16019 	CSR_WRITE(sc, WMREG_FFLT_DBG, mac_reg);
16020 
16021 	mac_reg = CSR_READ(sc, WMREG_RCTL);
16022 	if (enable) {
16023 		mac_reg |= RCTL_SECRC;
16024 		sc->sc_rctl |= RCTL_SECRC;
16025 		sc->sc_flags |= WM_F_CRC_STRIP;
16026 	} else {
16027 		mac_reg &= ~RCTL_SECRC;
16028 		sc->sc_rctl &= ~RCTL_SECRC;
16029 		sc->sc_flags &= ~WM_F_CRC_STRIP;
16030 	}
16031 	CSR_WRITE(sc, WMREG_RCTL, mac_reg);
16032 
16033 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, &data);
16034 	if (rv != 0)
16035 		goto out;
16036 	if (enable)
16037 		data |= 1 << 0;
16038 	else
16039 		data &= ~(1 << 0);
16040 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, data);
16041 	if (rv != 0)
16042 		goto out;
16043 
16044 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, &data);
16045 	if (rv != 0)
16046 		goto out;
16047 	/*
16048 	 * XXX FreeBSD and Linux do the same thing that they set the same value
16049 	 * on both the enable case and the disable case. Is it correct?
16050 	 */
16051 	data &= ~(0xf << 8);
16052 	data |= (0xb << 8);
16053 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, data);
16054 	if (rv != 0)
16055 		goto out;
16056 
16057 	/*
16058 	 * If enable ==
16059 	 *	true: Enable jumbo frame workaround in the PHY.
16060 	 *	false: Write PHY register values back to h/w defaults.
16061 	 */
16062 	rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 23), &data);
16063 	if (rv != 0)
16064 		goto out;
16065 	data &= ~(0x7F << 5);
16066 	if (enable)
16067 		data |= (0x37 << 5);
16068 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 23), data);
16069 	if (rv != 0)
16070 		goto out;
16071 
16072 	rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 16), &data);
16073 	if (rv != 0)
16074 		goto out;
16075 	if (enable)
16076 		data &= ~(1 << 13);
16077 	else
16078 		data |= (1 << 13);
16079 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 16), data);
16080 	if (rv != 0)
16081 		goto out;
16082 
16083 	rv = sc->phy.readreg_locked(dev, 2, I82579_UNKNOWN1, &data);
16084 	if (rv != 0)
16085 		goto out;
16086 	data &= ~(0x3FF << 2);
16087 	if (enable)
16088 		data |= (I82579_TX_PTR_GAP << 2);
16089 	else
16090 		data |= (0x8 << 2);
16091 	rv = sc->phy.writereg_locked(dev, 2, I82579_UNKNOWN1, data);
16092 	if (rv != 0)
16093 		goto out;
16094 
16095 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(776, 23),
16096 	    enable ? 0xf100 : 0x7e00);
16097 	if (rv != 0)
16098 		goto out;
16099 
16100 	rv = sc->phy.readreg_locked(dev, 2, HV_PM_CTRL, &data);
16101 	if (rv != 0)
16102 		goto out;
16103 	if (enable)
16104 		data |= 1 << 10;
16105 	else
16106 		data &= ~(1 << 10);
16107 	rv = sc->phy.writereg_locked(dev, 2, HV_PM_CTRL, data);
16108 	if (rv != 0)
16109 		goto out;
16110 
16111 	/* Re-enable Rx path after enabling/disabling workaround */
16112 	rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
16113 	    dft_ctrl & ~(1 << 14));
16114 
16115 out:
16116 	sc->phy.release(sc);
16117 
16118 	return rv;
16119 }
16120 
16121 /*
16122  *  wm_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
16123  *  done after every PHY reset.
16124  */
16125 static int
16126 wm_lv_phy_workarounds_ich8lan(struct wm_softc *sc)
16127 {
16128 	device_t dev = sc->sc_dev;
16129 	int rv;
16130 
16131 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
16132 		device_xname(dev), __func__));
16133 	KASSERT(sc->sc_type == WM_T_PCH2);
16134 
16135 	/* Set MDIO slow mode before any other MDIO access */
16136 	rv = wm_set_mdio_slow_mode_hv(sc);
16137 	if (rv != 0)
16138 		return rv;
16139 
16140 	rv = sc->phy.acquire(sc);
16141 	if (rv != 0)
16142 		return rv;
16143 	/* Set MSE higher to enable link to stay up when noise is high */
16144 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_THRESHOLD, 0x0034);
16145 	if (rv != 0)
16146 		goto release;
16147 	/* Drop link after 5 times MSE threshold was reached */
16148 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_LINK_DOWN, 0x0005);
16149 release:
16150 	sc->phy.release(sc);
16151 
16152 	return rv;
16153 }
16154 
16155 /**
16156  *  wm_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
16157  *  @link: link up bool flag
16158  *
16159  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
16160  *  preventing further DMA write requests.  Workaround the issue by disabling
16161  *  the de-assertion of the clock request when in 1Gpbs mode.
16162  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
16163  *  speeds in order to avoid Tx hangs.
16164  **/
16165 static int
16166 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
16167 {
16168 	uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
16169 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
16170 	uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
16171 	uint16_t phyreg;
16172 
16173 	if (link && (speed == STATUS_SPEED_1000)) {
16174 		sc->phy.acquire(sc);
16175 		int rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
16176 		    &phyreg);
16177 		if (rv != 0)
16178 			goto release;
16179 		rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
16180 		    phyreg & ~KUMCTRLSTA_K1_ENABLE);
16181 		if (rv != 0)
16182 			goto release;
16183 		delay(20);
16184 		CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
16185 
16186 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
16187 		    &phyreg);
16188 release:
16189 		sc->phy.release(sc);
16190 		return rv;
16191 	}
16192 
16193 	fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
16194 
16195 	struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys);
16196 	if (((child != NULL) && (child->mii_mpd_rev > 5))
16197 	    || !link
16198 	    || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
16199 		goto update_fextnvm6;
16200 
16201 	wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL, &phyreg);
16202 
16203 	/* Clear link status transmit timeout */
16204 	phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
16205 	if (speed == STATUS_SPEED_100) {
16206 		/* Set inband Tx timeout to 5x10us for 100Half */
16207 		phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
16208 
16209 		/* Do not extend the K1 entry latency for 100Half */
16210 		fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
16211 	} else {
16212 		/* Set inband Tx timeout to 50x10us for 10Full/Half */
16213 		phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
16214 
16215 		/* Extend the K1 entry latency for 10 Mbps */
16216 		fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
16217 	}
16218 
16219 	wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
16220 
16221 update_fextnvm6:
16222 	CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
16223 	return 0;
16224 }
16225 
16226 /*
16227  *  wm_k1_gig_workaround_hv - K1 Si workaround
16228  *  @sc:   pointer to the HW structure
16229  *  @link: link up bool flag
16230  *
16231  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
16232  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
16233  *  If link is down, the function will restore the default K1 setting located
16234  *  in the NVM.
16235  */
16236 static int
16237 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
16238 {
16239 	int k1_enable = sc->sc_nvm_k1_enabled;
16240 
16241 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
16242 		device_xname(sc->sc_dev), __func__));
16243 
16244 	if (sc->phy.acquire(sc) != 0)
16245 		return -1;
16246 
16247 	if (link) {
16248 		k1_enable = 0;
16249 
16250 		/* Link stall fix for link up */
16251 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
16252 		    0x0100);
16253 	} else {
16254 		/* Link stall fix for link down */
16255 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
16256 		    0x4100);
16257 	}
16258 
16259 	wm_configure_k1_ich8lan(sc, k1_enable);
16260 	sc->phy.release(sc);
16261 
16262 	return 0;
16263 }
16264 
16265 /*
16266  *  wm_k1_workaround_lv - K1 Si workaround
16267  *  @sc:   pointer to the HW structure
16268  *
16269  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
16270  *  Disable K1 for 1000 and 100 speeds
16271  */
16272 static int
16273 wm_k1_workaround_lv(struct wm_softc *sc)
16274 {
16275 	uint32_t reg;
16276 	uint16_t phyreg;
16277 	int rv;
16278 
16279 	if (sc->sc_type != WM_T_PCH2)
16280 		return 0;
16281 
16282 	/* Set K1 beacon duration based on 10Mbps speed */
16283 	rv = wm_gmii_hv_readreg(sc->sc_dev, 2, HV_M_STATUS, &phyreg);
16284 	if (rv != 0)
16285 		return rv;
16286 
16287 	if ((phyreg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
16288 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
16289 		if (phyreg &
16290 		    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
16291 			/* LV 1G/100 Packet drop issue wa  */
16292 			rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_PM_CTRL,
16293 			    &phyreg);
16294 			if (rv != 0)
16295 				return rv;
16296 			phyreg &= ~HV_PM_CTRL_K1_ENA;
16297 			rv = wm_gmii_hv_writereg(sc->sc_dev, 1, HV_PM_CTRL,
16298 			    phyreg);
16299 			if (rv != 0)
16300 				return rv;
16301 		} else {
16302 			/* For 10Mbps */
16303 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
16304 			reg &= ~FEXTNVM4_BEACON_DURATION;
16305 			reg |= FEXTNVM4_BEACON_DURATION_16US;
16306 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
16307 		}
16308 	}
16309 
16310 	return 0;
16311 }
16312 
16313 /*
16314  *  wm_link_stall_workaround_hv - Si workaround
16315  *  @sc: pointer to the HW structure
16316  *
16317  *  This function works around a Si bug where the link partner can get
16318  *  a link up indication before the PHY does. If small packets are sent
16319  *  by the link partner they can be placed in the packet buffer without
16320  *  being properly accounted for by the PHY and will stall preventing
16321  *  further packets from being received.  The workaround is to clear the
16322  *  packet buffer after the PHY detects link up.
16323  */
16324 static int
16325 wm_link_stall_workaround_hv(struct wm_softc *sc)
16326 {
16327 	uint16_t phyreg;
16328 
16329 	if (sc->sc_phytype != WMPHY_82578)
16330 		return 0;
16331 
16332 	/* Do not apply workaround if in PHY loopback bit 14 set */
16333 	wm_gmii_hv_readreg(sc->sc_dev, 2, MII_BMCR, &phyreg);
16334 	if ((phyreg & BMCR_LOOP) != 0)
16335 		return 0;
16336 
16337 	/* Check if link is up and at 1Gbps */
16338 	wm_gmii_hv_readreg(sc->sc_dev, 2, BM_CS_STATUS, &phyreg);
16339 	phyreg &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
16340 	    | BM_CS_STATUS_SPEED_MASK;
16341 	if (phyreg != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
16342 		| BM_CS_STATUS_SPEED_1000))
16343 		return 0;
16344 
16345 	delay(200 * 1000);	/* XXX too big */
16346 
16347 	/* Flush the packets in the fifo buffer */
16348 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
16349 	    HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED);
16350 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
16351 	    HV_MUX_DATA_CTRL_GEN_TO_MAC);
16352 
16353 	return 0;
16354 }
16355 
16356 static int
16357 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
16358 {
16359 	int rv;
16360 	uint16_t reg;
16361 
16362 	rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL, &reg);
16363 	if (rv != 0)
16364 		return rv;
16365 
16366 	return wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
16367 	    reg | HV_KMRN_MDIO_SLOW);
16368 }
16369 
16370 /*
16371  *  wm_configure_k1_ich8lan - Configure K1 power state
16372  *  @sc: pointer to the HW structure
16373  *  @enable: K1 state to configure
16374  *
16375  *  Configure the K1 power state based on the provided parameter.
16376  *  Assumes semaphore already acquired.
16377  */
16378 static void
16379 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
16380 {
16381 	uint32_t ctrl, ctrl_ext, tmp;
16382 	uint16_t kmreg;
16383 	int rv;
16384 
16385 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
16386 
16387 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
16388 	if (rv != 0)
16389 		return;
16390 
16391 	if (k1_enable)
16392 		kmreg |= KUMCTRLSTA_K1_ENABLE;
16393 	else
16394 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
16395 
16396 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
16397 	if (rv != 0)
16398 		return;
16399 
16400 	delay(20);
16401 
16402 	ctrl = CSR_READ(sc, WMREG_CTRL);
16403 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
16404 
16405 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
16406 	tmp |= CTRL_FRCSPD;
16407 
16408 	CSR_WRITE(sc, WMREG_CTRL, tmp);
16409 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
16410 	CSR_WRITE_FLUSH(sc);
16411 	delay(20);
16412 
16413 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
16414 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
16415 	CSR_WRITE_FLUSH(sc);
16416 	delay(20);
16417 
16418 	return;
16419 }
16420 
16421 /* special case - for 82575 - need to do manual init ... */
16422 static void
16423 wm_reset_init_script_82575(struct wm_softc *sc)
16424 {
16425 	/*
16426 	 * Remark: this is untested code - we have no board without EEPROM
16427 	 *  same setup as mentioned int the FreeBSD driver for the i82575
16428 	 */
16429 
16430 	/* SerDes configuration via SERDESCTRL */
16431 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
16432 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
16433 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
16434 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
16435 
16436 	/* CCM configuration via CCMCTL register */
16437 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
16438 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
16439 
16440 	/* PCIe lanes configuration */
16441 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
16442 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
16443 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
16444 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
16445 
16446 	/* PCIe PLL Configuration */
16447 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
16448 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
16449 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
16450 }
16451 
16452 static void
16453 wm_reset_mdicnfg_82580(struct wm_softc *sc)
16454 {
16455 	uint32_t reg;
16456 	uint16_t nvmword;
16457 	int rv;
16458 
16459 	if (sc->sc_type != WM_T_82580)
16460 		return;
16461 	if ((sc->sc_flags & WM_F_SGMII) == 0)
16462 		return;
16463 
16464 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
16465 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
16466 	if (rv != 0) {
16467 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
16468 		    __func__);
16469 		return;
16470 	}
16471 
16472 	reg = CSR_READ(sc, WMREG_MDICNFG);
16473 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
16474 		reg |= MDICNFG_DEST;
16475 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
16476 		reg |= MDICNFG_COM_MDIO;
16477 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
16478 }
16479 
16480 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
16481 
16482 static bool
16483 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
16484 {
16485 	uint32_t reg;
16486 	uint16_t id1, id2;
16487 	int i, rv;
16488 
16489 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
16490 		device_xname(sc->sc_dev), __func__));
16491 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
16492 
16493 	id1 = id2 = 0xffff;
16494 	for (i = 0; i < 2; i++) {
16495 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
16496 		    &id1);
16497 		if ((rv != 0) || MII_INVALIDID(id1))
16498 			continue;
16499 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
16500 		    &id2);
16501 		if ((rv != 0) || MII_INVALIDID(id2))
16502 			continue;
16503 		break;
16504 	}
16505 	if ((rv == 0) && !MII_INVALIDID(id1) && !MII_INVALIDID(id2))
16506 		goto out;
16507 
16508 	/*
16509 	 * In case the PHY needs to be in mdio slow mode,
16510 	 * set slow mode and try to get the PHY id again.
16511 	 */
16512 	rv = 0;
16513 	if (sc->sc_type < WM_T_PCH_LPT) {
16514 		sc->phy.release(sc);
16515 		wm_set_mdio_slow_mode_hv(sc);
16516 		rv = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1, &id1);
16517 		rv |= wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2, &id2);
16518 		sc->phy.acquire(sc);
16519 	}
16520 	if ((rv != 0) || MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
16521 		device_printf(sc->sc_dev, "XXX return with false\n");
16522 		return false;
16523 	}
16524 out:
16525 	if (sc->sc_type >= WM_T_PCH_LPT) {
16526 		/* Only unforce SMBus if ME is not active */
16527 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
16528 			uint16_t phyreg;
16529 
16530 			/* Unforce SMBus mode in PHY */
16531 			rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
16532 			    CV_SMB_CTRL, &phyreg);
16533 			phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
16534 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
16535 			    CV_SMB_CTRL, phyreg);
16536 
16537 			/* Unforce SMBus mode in MAC */
16538 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
16539 			reg &= ~CTRL_EXT_FORCE_SMBUS;
16540 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
16541 		}
16542 	}
16543 	return true;
16544 }
16545 
16546 static void
16547 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
16548 {
16549 	uint32_t reg;
16550 	int i;
16551 
16552 	/* Set PHY Config Counter to 50msec */
16553 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
16554 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
16555 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
16556 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
16557 
16558 	/* Toggle LANPHYPC */
16559 	reg = CSR_READ(sc, WMREG_CTRL);
16560 	reg |= CTRL_LANPHYPC_OVERRIDE;
16561 	reg &= ~CTRL_LANPHYPC_VALUE;
16562 	CSR_WRITE(sc, WMREG_CTRL, reg);
16563 	CSR_WRITE_FLUSH(sc);
16564 	delay(1000);
16565 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
16566 	CSR_WRITE(sc, WMREG_CTRL, reg);
16567 	CSR_WRITE_FLUSH(sc);
16568 
16569 	if (sc->sc_type < WM_T_PCH_LPT)
16570 		delay(50 * 1000);
16571 	else {
16572 		i = 20;
16573 
16574 		do {
16575 			delay(5 * 1000);
16576 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
16577 		    && i--);
16578 
16579 		delay(30 * 1000);
16580 	}
16581 }
16582 
16583 static int
16584 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
16585 {
16586 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
16587 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
16588 	uint32_t rxa;
16589 	uint16_t scale = 0, lat_enc = 0;
16590 	int32_t obff_hwm = 0;
16591 	int64_t lat_ns, value;
16592 
16593 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
16594 		device_xname(sc->sc_dev), __func__));
16595 
16596 	if (link) {
16597 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
16598 		uint32_t status;
16599 		uint16_t speed;
16600 		pcireg_t preg;
16601 
16602 		status = CSR_READ(sc, WMREG_STATUS);
16603 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
16604 		case STATUS_SPEED_10:
16605 			speed = 10;
16606 			break;
16607 		case STATUS_SPEED_100:
16608 			speed = 100;
16609 			break;
16610 		case STATUS_SPEED_1000:
16611 			speed = 1000;
16612 			break;
16613 		default:
16614 			device_printf(sc->sc_dev, "Unknown speed "
16615 			    "(status = %08x)\n", status);
16616 			return -1;
16617 		}
16618 
16619 		/* Rx Packet Buffer Allocation size (KB) */
16620 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
16621 
16622 		/*
16623 		 * Determine the maximum latency tolerated by the device.
16624 		 *
16625 		 * Per the PCIe spec, the tolerated latencies are encoded as
16626 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
16627 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
16628 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
16629 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
16630 		 */
16631 		lat_ns = ((int64_t)rxa * 1024 -
16632 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
16633 			+ ETHER_HDR_LEN))) * 8 * 1000;
16634 		if (lat_ns < 0)
16635 			lat_ns = 0;
16636 		else
16637 			lat_ns /= speed;
16638 		value = lat_ns;
16639 
16640 		while (value > LTRV_VALUE) {
16641 			scale ++;
16642 			value = howmany(value, __BIT(5));
16643 		}
16644 		if (scale > LTRV_SCALE_MAX) {
16645 			device_printf(sc->sc_dev,
16646 			    "Invalid LTR latency scale %d\n", scale);
16647 			return -1;
16648 		}
16649 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
16650 
16651 		/* Determine the maximum latency tolerated by the platform */
16652 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
16653 		    WM_PCI_LTR_CAP_LPT);
16654 		max_snoop = preg & 0xffff;
16655 		max_nosnoop = preg >> 16;
16656 
16657 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
16658 
16659 		if (lat_enc > max_ltr_enc) {
16660 			lat_enc = max_ltr_enc;
16661 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
16662 			    * PCI_LTR_SCALETONS(
16663 				    __SHIFTOUT(lat_enc,
16664 					PCI_LTR_MAXSNOOPLAT_SCALE));
16665 		}
16666 
16667 		if (lat_ns) {
16668 			lat_ns *= speed * 1000;
16669 			lat_ns /= 8;
16670 			lat_ns /= 1000000000;
16671 			obff_hwm = (int32_t)(rxa - lat_ns);
16672 		}
16673 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
16674 			device_printf(sc->sc_dev, "Invalid high water mark %d"
16675 			    "(rxa = %d, lat_ns = %d)\n",
16676 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
16677 			return -1;
16678 		}
16679 	}
16680 	/* Snoop and No-Snoop latencies the same */
16681 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
16682 	CSR_WRITE(sc, WMREG_LTRV, reg);
16683 
16684 	/* Set OBFF high water mark */
16685 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
16686 	reg |= obff_hwm;
16687 	CSR_WRITE(sc, WMREG_SVT, reg);
16688 
16689 	/* Enable OBFF */
16690 	reg = CSR_READ(sc, WMREG_SVCR);
16691 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
16692 	CSR_WRITE(sc, WMREG_SVCR, reg);
16693 
16694 	return 0;
16695 }
16696 
16697 /*
16698  * I210 Errata 25 and I211 Errata 10
16699  * Slow System Clock.
16700  *
16701  * Note that this function is called on both FLASH and iNVM case on NetBSD.
16702  */
16703 static int
16704 wm_pll_workaround_i210(struct wm_softc *sc)
16705 {
16706 	uint32_t mdicnfg, wuc;
16707 	uint32_t reg;
16708 	pcireg_t pcireg;
16709 	uint32_t pmreg;
16710 	uint16_t nvmword, tmp_nvmword;
16711 	uint16_t phyval;
16712 	bool wa_done = false;
16713 	int i, rv = 0;
16714 
16715 	/* Get Power Management cap offset */
16716 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
16717 	    &pmreg, NULL) == 0)
16718 		return -1;
16719 
16720 	/* Save WUC and MDICNFG registers */
16721 	wuc = CSR_READ(sc, WMREG_WUC);
16722 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
16723 
16724 	reg = mdicnfg & ~MDICNFG_DEST;
16725 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
16726 
16727 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0) {
16728 		/*
16729 		 * The default value of the Initialization Control Word 1
16730 		 * is the same on both I210's FLASH_HW and I21[01]'s iNVM.
16731 		 */
16732 		nvmword = INVM_DEFAULT_AL;
16733 	}
16734 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
16735 
16736 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
16737 		wm_gmii_gs40g_readreg(sc->sc_dev, 1,
16738 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG, &phyval);
16739 
16740 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
16741 			rv = 0;
16742 			break; /* OK */
16743 		} else
16744 			rv = -1;
16745 
16746 		wa_done = true;
16747 		/* Directly reset the internal PHY */
16748 		reg = CSR_READ(sc, WMREG_CTRL);
16749 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
16750 
16751 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
16752 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
16753 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
16754 
16755 		CSR_WRITE(sc, WMREG_WUC, 0);
16756 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
16757 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
16758 
16759 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
16760 		    pmreg + PCI_PMCSR);
16761 		pcireg |= PCI_PMCSR_STATE_D3;
16762 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
16763 		    pmreg + PCI_PMCSR, pcireg);
16764 		delay(1000);
16765 		pcireg &= ~PCI_PMCSR_STATE_D3;
16766 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
16767 		    pmreg + PCI_PMCSR, pcireg);
16768 
16769 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
16770 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
16771 
16772 		/* Restore WUC register */
16773 		CSR_WRITE(sc, WMREG_WUC, wuc);
16774 	}
16775 
16776 	/* Restore MDICNFG setting */
16777 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
16778 	if (wa_done)
16779 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
16780 	return rv;
16781 }
16782 
16783 static void
16784 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
16785 {
16786 	uint32_t reg;
16787 
16788 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
16789 		device_xname(sc->sc_dev), __func__));
16790 	KASSERT((sc->sc_type == WM_T_PCH_SPT)
16791 	    || (sc->sc_type == WM_T_PCH_CNP));
16792 
16793 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
16794 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
16795 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
16796 
16797 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
16798 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
16799 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
16800 }
16801