xref: /netbsd-src/sys/dev/pci/if_wm.c (revision 7330f729ccf0bd976a06f95fad452fe774fc7fd1)
1 /*	$NetBSD: if_wm.c,v 1.649 2019/09/26 04:16:03 msaitoh Exp $	*/
2 
3 /*
4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed for the NetBSD Project by
20  *	Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 /*******************************************************************************
39 
40   Copyright (c) 2001-2005, Intel Corporation
41   All rights reserved.
42 
43   Redistribution and use in source and binary forms, with or without
44   modification, are permitted provided that the following conditions are met:
45 
46    1. Redistributions of source code must retain the above copyright notice,
47       this list of conditions and the following disclaimer.
48 
49    2. Redistributions in binary form must reproduce the above copyright
50       notice, this list of conditions and the following disclaimer in the
51       documentation and/or other materials provided with the distribution.
52 
53    3. Neither the name of the Intel Corporation nor the names of its
54       contributors may be used to endorse or promote products derived from
55       this software without specific prior written permission.
56 
57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67   POSSIBILITY OF SUCH DAMAGE.
68 
69 *******************************************************************************/
70 /*
71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72  *
73  * TODO (in order of importance):
74  *
75  *	- Check XXX'ed comments
76  *	- TX Multi queue improvement (refine queue selection logic)
77  *	- Split header buffer for newer descriptors
78  *	- EEE (Energy Efficiency Ethernet) for I354
79  *	- Virtual Function
80  *	- Set LED correctly (based on contents in EEPROM)
81  *	- Rework how parameters are loaded from the EEPROM.
82  */
83 
84 #include <sys/cdefs.h>
85 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.649 2019/09/26 04:16:03 msaitoh Exp $");
86 
87 #ifdef _KERNEL_OPT
88 #include "opt_net_mpsafe.h"
89 #include "opt_if_wm.h"
90 #endif
91 
92 #include <sys/param.h>
93 #include <sys/systm.h>
94 #include <sys/callout.h>
95 #include <sys/mbuf.h>
96 #include <sys/malloc.h>
97 #include <sys/kmem.h>
98 #include <sys/kernel.h>
99 #include <sys/socket.h>
100 #include <sys/ioctl.h>
101 #include <sys/errno.h>
102 #include <sys/device.h>
103 #include <sys/queue.h>
104 #include <sys/syslog.h>
105 #include <sys/interrupt.h>
106 #include <sys/cpu.h>
107 #include <sys/pcq.h>
108 
109 #include <sys/rndsource.h>
110 
111 #include <net/if.h>
112 #include <net/if_dl.h>
113 #include <net/if_media.h>
114 #include <net/if_ether.h>
115 
116 #include <net/bpf.h>
117 
118 #include <net/rss_config.h>
119 
120 #include <netinet/in.h>			/* XXX for struct ip */
121 #include <netinet/in_systm.h>		/* XXX for struct ip */
122 #include <netinet/ip.h>			/* XXX for struct ip */
123 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
124 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
125 
126 #include <sys/bus.h>
127 #include <sys/intr.h>
128 #include <machine/endian.h>
129 
130 #include <dev/mii/mii.h>
131 #include <dev/mii/mdio.h>
132 #include <dev/mii/miivar.h>
133 #include <dev/mii/miidevs.h>
134 #include <dev/mii/mii_bitbang.h>
135 #include <dev/mii/ikphyreg.h>
136 #include <dev/mii/igphyreg.h>
137 #include <dev/mii/igphyvar.h>
138 #include <dev/mii/inbmphyreg.h>
139 #include <dev/mii/ihphyreg.h>
140 
141 #include <dev/pci/pcireg.h>
142 #include <dev/pci/pcivar.h>
143 #include <dev/pci/pcidevs.h>
144 
145 #include <dev/pci/if_wmreg.h>
146 #include <dev/pci/if_wmvar.h>
147 
148 #ifdef WM_DEBUG
149 #define	WM_DEBUG_LINK		__BIT(0)
150 #define	WM_DEBUG_TX		__BIT(1)
151 #define	WM_DEBUG_RX		__BIT(2)
152 #define	WM_DEBUG_GMII		__BIT(3)
153 #define	WM_DEBUG_MANAGE		__BIT(4)
154 #define	WM_DEBUG_NVM		__BIT(5)
155 #define	WM_DEBUG_INIT		__BIT(6)
156 #define	WM_DEBUG_LOCK		__BIT(7)
157 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
158     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
159 
160 #define	DPRINTF(x, y)	do { if (wm_debug & (x)) printf y; } while (0)
161 #else
162 #define	DPRINTF(x, y)	__nothing
163 #endif /* WM_DEBUG */
164 
165 #ifdef NET_MPSAFE
166 #define WM_MPSAFE	1
167 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
168 #else
169 #define CALLOUT_FLAGS	0
170 #endif
171 
172 /*
173  * This device driver's max interrupt numbers.
174  */
175 #define WM_MAX_NQUEUEINTR	16
176 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
177 
178 #ifndef WM_DISABLE_MSI
179 #define	WM_DISABLE_MSI 0
180 #endif
181 #ifndef WM_DISABLE_MSIX
182 #define	WM_DISABLE_MSIX 0
183 #endif
184 
185 int wm_disable_msi = WM_DISABLE_MSI;
186 int wm_disable_msix = WM_DISABLE_MSIX;
187 
188 #ifndef WM_WATCHDOG_TIMEOUT
189 #define WM_WATCHDOG_TIMEOUT 5
190 #endif
191 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
192 
193 /*
194  * Transmit descriptor list size.  Due to errata, we can only have
195  * 256 hardware descriptors in the ring on < 82544, but we use 4096
196  * on >= 82544. We tell the upper layers that they can queue a lot
197  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
198  * of them at a time.
199  *
200  * We allow up to 64 DMA segments per packet.  Pathological packet
201  * chains containing many small mbufs have been observed in zero-copy
202  * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
203  * m_defrag() is called to reduce it.
204  */
205 #define	WM_NTXSEGS		64
206 #define	WM_IFQUEUELEN		256
207 #define	WM_TXQUEUELEN_MAX	64
208 #define	WM_TXQUEUELEN_MAX_82547	16
209 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
210 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
211 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
212 #define	WM_NTXDESC_82542	256
213 #define	WM_NTXDESC_82544	4096
214 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
215 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
216 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
217 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
218 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
219 
220 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
221 
222 #define	WM_TXINTERQSIZE		256
223 
224 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
225 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
226 #endif
227 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
228 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
229 #endif
230 
231 /*
232  * Receive descriptor list size.  We have one Rx buffer for normal
233  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
234  * packet.  We allocate 256 receive descriptors, each with a 2k
235  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
236  */
237 #define	WM_NRXDESC		256U
238 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
239 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
240 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
241 
242 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
243 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
244 #endif
245 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
246 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
247 #endif
248 
249 typedef union txdescs {
250 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
251 	nq_txdesc_t	 sctxu_nq_txdescs[WM_NTXDESC_82544];
252 } txdescs_t;
253 
254 typedef union rxdescs {
255 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
256 	ext_rxdesc_t	  sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
257 	nq_rxdesc_t	 sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
258 } rxdescs_t;
259 
260 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
261 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
262 
263 /*
264  * Software state for transmit jobs.
265  */
266 struct wm_txsoft {
267 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
268 	bus_dmamap_t txs_dmamap;	/* our DMA map */
269 	int txs_firstdesc;		/* first descriptor in packet */
270 	int txs_lastdesc;		/* last descriptor in packet */
271 	int txs_ndesc;			/* # of descriptors used */
272 };
273 
274 /*
275  * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
276  * buffer and a DMA map. For packets which fill more than one buffer, we chain
277  * them together.
278  */
279 struct wm_rxsoft {
280 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
281 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
282 };
283 
284 #define WM_LINKUP_TIMEOUT	50
285 
286 static uint16_t swfwphysem[] = {
287 	SWFW_PHY0_SM,
288 	SWFW_PHY1_SM,
289 	SWFW_PHY2_SM,
290 	SWFW_PHY3_SM
291 };
292 
293 static const uint32_t wm_82580_rxpbs_table[] = {
294 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
295 };
296 
297 struct wm_softc;
298 
299 #ifdef WM_EVENT_COUNTERS
300 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
301 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
302 	struct evcnt qname##_ev_##evname;
303 
304 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
305 	do {								\
306 		snprintf((q)->qname##_##evname##_evcnt_name,		\
307 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
308 		    "%s%02d%s", #qname, (qnum), #evname);		\
309 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
310 		    (evtype), NULL, (xname),				\
311 		    (q)->qname##_##evname##_evcnt_name);		\
312 	} while (0)
313 
314 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
315 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
316 
317 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
318 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
319 
320 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
321 	evcnt_detach(&(q)->qname##_ev_##evname);
322 #endif /* WM_EVENT_COUNTERS */
323 
324 struct wm_txqueue {
325 	kmutex_t *txq_lock;		/* lock for tx operations */
326 
327 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
328 
329 	/* Software state for the transmit descriptors. */
330 	int txq_num;			/* must be a power of two */
331 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
332 
333 	/* TX control data structures. */
334 	int txq_ndesc;			/* must be a power of two */
335 	size_t txq_descsize;		/* a tx descriptor size */
336 	txdescs_t *txq_descs_u;
337 	bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
338 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
339 	int txq_desc_rseg;		/* real number of control segment */
340 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
341 #define	txq_descs	txq_descs_u->sctxu_txdescs
342 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
343 
344 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
345 
346 	int txq_free;			/* number of free Tx descriptors */
347 	int txq_next;			/* next ready Tx descriptor */
348 
349 	int txq_sfree;			/* number of free Tx jobs */
350 	int txq_snext;			/* next free Tx job */
351 	int txq_sdirty;			/* dirty Tx jobs */
352 
353 	/* These 4 variables are used only on the 82547. */
354 	int txq_fifo_size;		/* Tx FIFO size */
355 	int txq_fifo_head;		/* current head of FIFO */
356 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
357 	int txq_fifo_stall;		/* Tx FIFO is stalled */
358 
359 	/*
360 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
361 	 * CPUs. This queue intermediate them without block.
362 	 */
363 	pcq_t *txq_interq;
364 
365 	/*
366 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
367 	 * to manage Tx H/W queue's busy flag.
368 	 */
369 	int txq_flags;			/* flags for H/W queue, see below */
370 #define	WM_TXQ_NO_SPACE	0x1
371 
372 	bool txq_stopping;
373 
374 	bool txq_sending;
375 	time_t txq_lastsent;
376 
377 	uint32_t txq_packets;		/* for AIM */
378 	uint32_t txq_bytes;		/* for AIM */
379 #ifdef WM_EVENT_COUNTERS
380 	/* TX event counters */
381 	WM_Q_EVCNT_DEFINE(txq, txsstall)    /* Stalled due to no txs */
382 	WM_Q_EVCNT_DEFINE(txq, txdstall)    /* Stalled due to no txd */
383 	WM_Q_EVCNT_DEFINE(txq, fifo_stall)  /* FIFO stalls (82547) */
384 	WM_Q_EVCNT_DEFINE(txq, txdw)	    /* Tx descriptor interrupts */
385 	WM_Q_EVCNT_DEFINE(txq, txqe)	    /* Tx queue empty interrupts */
386 					    /* XXX not used? */
387 
388 	WM_Q_EVCNT_DEFINE(txq, ipsum)	    /* IP checksums comp. */
389 	WM_Q_EVCNT_DEFINE(txq, tusum)	    /* TCP/UDP cksums comp. */
390 	WM_Q_EVCNT_DEFINE(txq, tusum6)	    /* TCP/UDP v6 cksums comp. */
391 	WM_Q_EVCNT_DEFINE(txq, tso)	    /* TCP seg offload (IPv4) */
392 	WM_Q_EVCNT_DEFINE(txq, tso6)	    /* TCP seg offload (IPv6) */
393 	WM_Q_EVCNT_DEFINE(txq, tsopain)	    /* Painful header manip. for TSO */
394 	WM_Q_EVCNT_DEFINE(txq, pcqdrop)	    /* Pkt dropped in pcq */
395 	WM_Q_EVCNT_DEFINE(txq, descdrop)    /* Pkt dropped in MAC desc ring */
396 					    /* other than toomanyseg */
397 
398 	WM_Q_EVCNT_DEFINE(txq, toomanyseg)  /* Pkt dropped(toomany DMA segs) */
399 	WM_Q_EVCNT_DEFINE(txq, defrag)	    /* m_defrag() */
400 	WM_Q_EVCNT_DEFINE(txq, underrun)    /* Tx underrun */
401 
402 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
403 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
404 #endif /* WM_EVENT_COUNTERS */
405 };
406 
407 struct wm_rxqueue {
408 	kmutex_t *rxq_lock;		/* lock for rx operations */
409 
410 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
411 
412 	/* Software state for the receive descriptors. */
413 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
414 
415 	/* RX control data structures. */
416 	int rxq_ndesc;			/* must be a power of two */
417 	size_t rxq_descsize;		/* a rx descriptor size */
418 	rxdescs_t *rxq_descs_u;
419 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
420 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
421 	int rxq_desc_rseg;		/* real number of control segment */
422 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
423 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
424 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
425 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
426 
427 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
428 
429 	int rxq_ptr;			/* next ready Rx desc/queue ent */
430 	int rxq_discard;
431 	int rxq_len;
432 	struct mbuf *rxq_head;
433 	struct mbuf *rxq_tail;
434 	struct mbuf **rxq_tailp;
435 
436 	bool rxq_stopping;
437 
438 	uint32_t rxq_packets;		/* for AIM */
439 	uint32_t rxq_bytes;		/* for AIM */
440 #ifdef WM_EVENT_COUNTERS
441 	/* RX event counters */
442 	WM_Q_EVCNT_DEFINE(rxq, intr);	/* Interrupts */
443 	WM_Q_EVCNT_DEFINE(rxq, defer);	/* Rx deferred processing */
444 
445 	WM_Q_EVCNT_DEFINE(rxq, ipsum);	/* IP checksums checked */
446 	WM_Q_EVCNT_DEFINE(rxq, tusum);	/* TCP/UDP cksums checked */
447 #endif
448 };
449 
450 struct wm_queue {
451 	int wmq_id;			/* index of TX/RX queues */
452 	int wmq_intr_idx;		/* index of MSI-X tables */
453 
454 	uint32_t wmq_itr;		/* interrupt interval per queue. */
455 	bool wmq_set_itr;
456 
457 	struct wm_txqueue wmq_txq;
458 	struct wm_rxqueue wmq_rxq;
459 
460 	void *wmq_si;
461 	krndsource_t rnd_source;	/* random source */
462 };
463 
464 struct wm_phyop {
465 	int (*acquire)(struct wm_softc *);
466 	void (*release)(struct wm_softc *);
467 	int (*readreg_locked)(device_t, int, int, uint16_t *);
468 	int (*writereg_locked)(device_t, int, int, uint16_t);
469 	int reset_delay_us;
470 };
471 
472 struct wm_nvmop {
473 	int (*acquire)(struct wm_softc *);
474 	void (*release)(struct wm_softc *);
475 	int (*read)(struct wm_softc *, int, int, uint16_t *);
476 };
477 
478 /*
479  * Software state per device.
480  */
481 struct wm_softc {
482 	device_t sc_dev;		/* generic device information */
483 	bus_space_tag_t sc_st;		/* bus space tag */
484 	bus_space_handle_t sc_sh;	/* bus space handle */
485 	bus_size_t sc_ss;		/* bus space size */
486 	bus_space_tag_t sc_iot;		/* I/O space tag */
487 	bus_space_handle_t sc_ioh;	/* I/O space handle */
488 	bus_size_t sc_ios;		/* I/O space size */
489 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
490 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
491 	bus_size_t sc_flashs;		/* flash registers space size */
492 	off_t sc_flashreg_offset;	/*
493 					 * offset to flash registers from
494 					 * start of BAR
495 					 */
496 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
497 
498 	struct ethercom sc_ethercom;	/* ethernet common data */
499 	struct mii_data sc_mii;		/* MII/media information */
500 
501 	pci_chipset_tag_t sc_pc;
502 	pcitag_t sc_pcitag;
503 	int sc_bus_speed;		/* PCI/PCIX bus speed */
504 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
505 
506 	uint16_t sc_pcidevid;		/* PCI device ID */
507 	wm_chip_type sc_type;		/* MAC type */
508 	int sc_rev;			/* MAC revision */
509 	wm_phy_type sc_phytype;		/* PHY type */
510 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
511 #define	WM_MEDIATYPE_UNKNOWN		0x00
512 #define	WM_MEDIATYPE_FIBER		0x01
513 #define	WM_MEDIATYPE_COPPER		0x02
514 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
515 	int sc_funcid;			/* unit number of the chip (0 to 3) */
516 	int sc_flags;			/* flags; see below */
517 	u_short sc_if_flags;		/* last if_flags */
518 	int sc_ec_capenable;		/* last ec_capenable */
519 	int sc_flowflags;		/* 802.3x flow control flags */
520 	uint16_t eee_lp_ability;	/* EEE link partner's ability */
521 	int sc_align_tweak;
522 
523 	void *sc_ihs[WM_MAX_NINTR];	/*
524 					 * interrupt cookie.
525 					 * - legacy and msi use sc_ihs[0] only
526 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
527 					 */
528 	pci_intr_handle_t *sc_intrs;	/*
529 					 * legacy and msi use sc_intrs[0] only
530 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
531 					 */
532 	int sc_nintrs;			/* number of interrupts */
533 
534 	int sc_link_intr_idx;		/* index of MSI-X tables */
535 
536 	callout_t sc_tick_ch;		/* tick callout */
537 	bool sc_core_stopping;
538 
539 	int sc_nvm_ver_major;
540 	int sc_nvm_ver_minor;
541 	int sc_nvm_ver_build;
542 	int sc_nvm_addrbits;		/* NVM address bits */
543 	unsigned int sc_nvm_wordsize;	/* NVM word size */
544 	int sc_ich8_flash_base;
545 	int sc_ich8_flash_bank_size;
546 	int sc_nvm_k1_enabled;
547 
548 	int sc_nqueues;
549 	struct wm_queue *sc_queue;
550 	u_int sc_tx_process_limit;	/* Tx proc. repeat limit in softint */
551 	u_int sc_tx_intr_process_limit;	/* Tx proc. repeat limit in H/W intr */
552 	u_int sc_rx_process_limit;	/* Rx proc. repeat limit in softint */
553 	u_int sc_rx_intr_process_limit;	/* Rx proc. repeat limit in H/W intr */
554 
555 	int sc_affinity_offset;
556 
557 #ifdef WM_EVENT_COUNTERS
558 	/* Event counters. */
559 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
560 
561 	/* WM_T_82542_2_1 only */
562 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
563 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
564 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
565 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
566 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
567 #endif /* WM_EVENT_COUNTERS */
568 
569 	/* This variable are used only on the 82547. */
570 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
571 
572 	uint32_t sc_ctrl;		/* prototype CTRL register */
573 #if 0
574 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
575 #endif
576 	uint32_t sc_icr;		/* prototype interrupt bits */
577 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
578 	uint32_t sc_tctl;		/* prototype TCTL register */
579 	uint32_t sc_rctl;		/* prototype RCTL register */
580 	uint32_t sc_txcw;		/* prototype TXCW register */
581 	uint32_t sc_tipg;		/* prototype TIPG register */
582 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
583 	uint32_t sc_pba;		/* prototype PBA register */
584 
585 	int sc_tbi_linkup;		/* TBI link status */
586 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
587 	int sc_tbi_serdes_ticks;	/* tbi ticks */
588 
589 	int sc_mchash_type;		/* multicast filter offset */
590 
591 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
592 
593 	kmutex_t *sc_core_lock;		/* lock for softc operations */
594 	kmutex_t *sc_ich_phymtx;	/*
595 					 * 82574/82583/ICH/PCH specific PHY
596 					 * mutex. For 82574/82583, the mutex
597 					 * is used for both PHY and NVM.
598 					 */
599 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
600 
601 	struct wm_phyop phy;
602 	struct wm_nvmop nvm;
603 };
604 
605 #define WM_CORE_LOCK(_sc)						\
606 	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
607 #define WM_CORE_UNLOCK(_sc)						\
608 	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
609 #define WM_CORE_LOCKED(_sc)						\
610 	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
611 
612 #define	WM_RXCHAIN_RESET(rxq)						\
613 do {									\
614 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
615 	*(rxq)->rxq_tailp = NULL;					\
616 	(rxq)->rxq_len = 0;						\
617 } while (/*CONSTCOND*/0)
618 
619 #define	WM_RXCHAIN_LINK(rxq, m)						\
620 do {									\
621 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
622 	(rxq)->rxq_tailp = &(m)->m_next;				\
623 } while (/*CONSTCOND*/0)
624 
625 #ifdef WM_EVENT_COUNTERS
626 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
627 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
628 
629 #define WM_Q_EVCNT_INCR(qname, evname)			\
630 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
631 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
632 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
633 #else /* !WM_EVENT_COUNTERS */
634 #define	WM_EVCNT_INCR(ev)	/* nothing */
635 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
636 
637 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
638 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
639 #endif /* !WM_EVENT_COUNTERS */
640 
641 #define	CSR_READ(sc, reg)						\
642 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
643 #define	CSR_WRITE(sc, reg, val)						\
644 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
645 #define	CSR_WRITE_FLUSH(sc)						\
646 	(void)CSR_READ((sc), WMREG_STATUS)
647 
648 #define ICH8_FLASH_READ32(sc, reg)					\
649 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
650 	    (reg) + sc->sc_flashreg_offset)
651 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
652 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
653 	    (reg) + sc->sc_flashreg_offset, (data))
654 
655 #define ICH8_FLASH_READ16(sc, reg)					\
656 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
657 	    (reg) + sc->sc_flashreg_offset)
658 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
659 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
660 	    (reg) + sc->sc_flashreg_offset, (data))
661 
662 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
663 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
664 
665 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
666 #define	WM_CDTXADDR_HI(txq, x)						\
667 	(sizeof(bus_addr_t) == 8 ?					\
668 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
669 
670 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
671 #define	WM_CDRXADDR_HI(rxq, x)						\
672 	(sizeof(bus_addr_t) == 8 ?					\
673 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
674 
675 /*
676  * Register read/write functions.
677  * Other than CSR_{READ|WRITE}().
678  */
679 #if 0
680 static inline uint32_t wm_io_read(struct wm_softc *, int);
681 #endif
682 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
683 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
684     uint32_t, uint32_t);
685 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
686 
687 /*
688  * Descriptor sync/init functions.
689  */
690 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
691 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
692 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
693 
694 /*
695  * Device driver interface functions and commonly used functions.
696  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
697  */
698 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
699 static int	wm_match(device_t, cfdata_t, void *);
700 static void	wm_attach(device_t, device_t, void *);
701 static int	wm_detach(device_t, int);
702 static bool	wm_suspend(device_t, const pmf_qual_t *);
703 static bool	wm_resume(device_t, const pmf_qual_t *);
704 static void	wm_watchdog(struct ifnet *);
705 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
706     uint16_t *);
707 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
708     uint16_t *);
709 static void	wm_tick(void *);
710 static int	wm_ifflags_cb(struct ethercom *);
711 static int	wm_ioctl(struct ifnet *, u_long, void *);
712 /* MAC address related */
713 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
714 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
715 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
716 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
717 static int	wm_rar_count(struct wm_softc *);
718 static void	wm_set_filter(struct wm_softc *);
719 /* Reset and init related */
720 static void	wm_set_vlan(struct wm_softc *);
721 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
722 static void	wm_get_auto_rd_done(struct wm_softc *);
723 static void	wm_lan_init_done(struct wm_softc *);
724 static void	wm_get_cfg_done(struct wm_softc *);
725 static int	wm_phy_post_reset(struct wm_softc *);
726 static int	wm_write_smbus_addr(struct wm_softc *);
727 static int	wm_init_lcd_from_nvm(struct wm_softc *);
728 static int	wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
729 static void	wm_initialize_hardware_bits(struct wm_softc *);
730 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
731 static int	wm_reset_phy(struct wm_softc *);
732 static void	wm_flush_desc_rings(struct wm_softc *);
733 static void	wm_reset(struct wm_softc *);
734 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
735 static void	wm_rxdrain(struct wm_rxqueue *);
736 static void	wm_init_rss(struct wm_softc *);
737 static void	wm_adjust_qnum(struct wm_softc *, int);
738 static inline bool	wm_is_using_msix(struct wm_softc *);
739 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
740 static int	wm_softint_establish(struct wm_softc *, int, int);
741 static int	wm_setup_legacy(struct wm_softc *);
742 static int	wm_setup_msix(struct wm_softc *);
743 static int	wm_init(struct ifnet *);
744 static int	wm_init_locked(struct ifnet *);
745 static void	wm_unset_stopping_flags(struct wm_softc *);
746 static void	wm_set_stopping_flags(struct wm_softc *);
747 static void	wm_stop(struct ifnet *, int);
748 static void	wm_stop_locked(struct ifnet *, int);
749 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
750 static void	wm_82547_txfifo_stall(void *);
751 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
752 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
753 /* DMA related */
754 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
755 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
756 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
757 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
758     struct wm_txqueue *);
759 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
760 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
761 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
762     struct wm_rxqueue *);
763 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
764 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
765 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
766 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
767 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
768 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
769 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
770     struct wm_txqueue *);
771 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
772     struct wm_rxqueue *);
773 static int	wm_alloc_txrx_queues(struct wm_softc *);
774 static void	wm_free_txrx_queues(struct wm_softc *);
775 static int	wm_init_txrx_queues(struct wm_softc *);
776 /* Start */
777 static int	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
778     struct wm_txsoft *, uint32_t *, uint8_t *);
779 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
780 static void	wm_start(struct ifnet *);
781 static void	wm_start_locked(struct ifnet *);
782 static int	wm_transmit(struct ifnet *, struct mbuf *);
783 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
784 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
785     bool);
786 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
787     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
788 static void	wm_nq_start(struct ifnet *);
789 static void	wm_nq_start_locked(struct ifnet *);
790 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
791 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
792 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
793     bool);
794 static void	wm_deferred_start_locked(struct wm_txqueue *);
795 static void	wm_handle_queue(void *);
796 /* Interrupt */
797 static bool	wm_txeof(struct wm_txqueue *, u_int);
798 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
799 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
800 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
801 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
802 static void	wm_linkintr(struct wm_softc *, uint32_t);
803 static int	wm_intr_legacy(void *);
804 static inline void	wm_txrxintr_disable(struct wm_queue *);
805 static inline void	wm_txrxintr_enable(struct wm_queue *);
806 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
807 static int	wm_txrxintr_msix(void *);
808 static int	wm_linkintr_msix(void *);
809 
810 /*
811  * Media related.
812  * GMII, SGMII, TBI, SERDES and SFP.
813  */
814 /* Common */
815 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
816 /* GMII related */
817 static void	wm_gmii_reset(struct wm_softc *);
818 static void	wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
819 static int	wm_get_phy_id_82575(struct wm_softc *);
820 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
821 static int	wm_gmii_mediachange(struct ifnet *);
822 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
823 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
824 static uint16_t	wm_i82543_mii_recvbits(struct wm_softc *);
825 static int	wm_gmii_i82543_readreg(device_t, int, int, uint16_t *);
826 static int	wm_gmii_i82543_writereg(device_t, int, int, uint16_t);
827 static int	wm_gmii_mdic_readreg(device_t, int, int, uint16_t *);
828 static int	wm_gmii_mdic_writereg(device_t, int, int, uint16_t);
829 static int	wm_gmii_i82544_readreg(device_t, int, int, uint16_t *);
830 static int	wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
831 static int	wm_gmii_i82544_writereg(device_t, int, int, uint16_t);
832 static int	wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
833 static int	wm_gmii_i80003_readreg(device_t, int, int, uint16_t *);
834 static int	wm_gmii_i80003_writereg(device_t, int, int, uint16_t);
835 static int	wm_gmii_bm_readreg(device_t, int, int, uint16_t *);
836 static int	wm_gmii_bm_writereg(device_t, int, int, uint16_t);
837 static int	wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
838 static int	wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
839 static int	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int,
840 	bool);
841 static int	wm_gmii_hv_readreg(device_t, int, int, uint16_t *);
842 static int	wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
843 static int	wm_gmii_hv_writereg(device_t, int, int, uint16_t);
844 static int	wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
845 static int	wm_gmii_82580_readreg(device_t, int, int, uint16_t *);
846 static int	wm_gmii_82580_writereg(device_t, int, int, uint16_t);
847 static int	wm_gmii_gs40g_readreg(device_t, int, int, uint16_t *);
848 static int	wm_gmii_gs40g_writereg(device_t, int, int, uint16_t);
849 static void	wm_gmii_statchg(struct ifnet *);
850 /*
851  * kumeran related (80003, ICH* and PCH*).
852  * These functions are not for accessing MII registers but for accessing
853  * kumeran specific registers.
854  */
855 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
856 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
857 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
858 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
859 /* EMI register related */
860 static int	wm_access_emi_reg_locked(device_t, int, uint16_t *, bool);
861 static int	wm_read_emi_reg_locked(device_t, int, uint16_t *);
862 static int	wm_write_emi_reg_locked(device_t, int, uint16_t);
863 /* SGMII */
864 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
865 static int	wm_sgmii_readreg(device_t, int, int, uint16_t *);
866 static int	wm_sgmii_readreg_locked(device_t, int, int, uint16_t *);
867 static int	wm_sgmii_writereg(device_t, int, int, uint16_t);
868 static int	wm_sgmii_writereg_locked(device_t, int, int, uint16_t);
869 /* TBI related */
870 static bool	wm_tbi_havesignal(struct wm_softc *, uint32_t);
871 static void	wm_tbi_mediainit(struct wm_softc *);
872 static int	wm_tbi_mediachange(struct ifnet *);
873 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
874 static int	wm_check_for_link(struct wm_softc *);
875 static void	wm_tbi_tick(struct wm_softc *);
876 /* SERDES related */
877 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
878 static int	wm_serdes_mediachange(struct ifnet *);
879 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
880 static void	wm_serdes_tick(struct wm_softc *);
881 /* SFP related */
882 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
883 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
884 
885 /*
886  * NVM related.
887  * Microwire, SPI (w/wo EERD) and Flash.
888  */
889 /* Misc functions */
890 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
891 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
892 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
893 /* Microwire */
894 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
895 /* SPI */
896 static int	wm_nvm_ready_spi(struct wm_softc *);
897 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
898 /* Using with EERD */
899 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
900 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
901 /* Flash */
902 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
903     unsigned int *);
904 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
905 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
906 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
907     uint32_t *);
908 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
909 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
910 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
911 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
912 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
913 /* iNVM */
914 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
915 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
916 /* Lock, detecting NVM type, validate checksum and read */
917 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
918 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
919 static int	wm_nvm_validate_checksum(struct wm_softc *);
920 static void	wm_nvm_version_invm(struct wm_softc *);
921 static void	wm_nvm_version(struct wm_softc *);
922 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
923 
924 /*
925  * Hardware semaphores.
926  * Very complexed...
927  */
928 static int	wm_get_null(struct wm_softc *);
929 static void	wm_put_null(struct wm_softc *);
930 static int	wm_get_eecd(struct wm_softc *);
931 static void	wm_put_eecd(struct wm_softc *);
932 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
933 static void	wm_put_swsm_semaphore(struct wm_softc *);
934 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
935 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
936 static int	wm_get_nvm_80003(struct wm_softc *);
937 static void	wm_put_nvm_80003(struct wm_softc *);
938 static int	wm_get_nvm_82571(struct wm_softc *);
939 static void	wm_put_nvm_82571(struct wm_softc *);
940 static int	wm_get_phy_82575(struct wm_softc *);
941 static void	wm_put_phy_82575(struct wm_softc *);
942 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
943 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
944 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
945 static void	wm_put_swflag_ich8lan(struct wm_softc *);
946 static int	wm_get_nvm_ich8lan(struct wm_softc *);
947 static void	wm_put_nvm_ich8lan(struct wm_softc *);
948 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
949 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
950 
951 /*
952  * Management mode and power management related subroutines.
953  * BMC, AMT, suspend/resume and EEE.
954  */
955 #if 0
956 static int	wm_check_mng_mode(struct wm_softc *);
957 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
958 static int	wm_check_mng_mode_82574(struct wm_softc *);
959 static int	wm_check_mng_mode_generic(struct wm_softc *);
960 #endif
961 static int	wm_enable_mng_pass_thru(struct wm_softc *);
962 static bool	wm_phy_resetisblocked(struct wm_softc *);
963 static void	wm_get_hw_control(struct wm_softc *);
964 static void	wm_release_hw_control(struct wm_softc *);
965 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
966 static int	wm_init_phy_workarounds_pchlan(struct wm_softc *);
967 static void	wm_init_manageability(struct wm_softc *);
968 static void	wm_release_manageability(struct wm_softc *);
969 static void	wm_get_wakeup(struct wm_softc *);
970 static int	wm_ulp_disable(struct wm_softc *);
971 static int	wm_enable_phy_wakeup(struct wm_softc *);
972 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
973 static void	wm_suspend_workarounds_ich8lan(struct wm_softc *);
974 static int	wm_resume_workarounds_pchlan(struct wm_softc *);
975 static void	wm_enable_wakeup(struct wm_softc *);
976 static void	wm_disable_aspm(struct wm_softc *);
977 /* LPLU (Low Power Link Up) */
978 static void	wm_lplu_d0_disable(struct wm_softc *);
979 /* EEE */
980 static int	wm_set_eee_i350(struct wm_softc *);
981 static int	wm_set_eee_pchlan(struct wm_softc *);
982 static int	wm_set_eee(struct wm_softc *);
983 
984 /*
985  * Workarounds (mainly PHY related).
986  * Basically, PHY's workarounds are in the PHY drivers.
987  */
988 static int	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
989 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
990 static int	wm_hv_phy_workarounds_ich8lan(struct wm_softc *);
991 static void	wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *);
992 static int	wm_lv_phy_workarounds_ich8lan(struct wm_softc *);
993 static int	wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
994 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
995 static int	wm_k1_workaround_lv(struct wm_softc *);
996 static int	wm_link_stall_workaround_hv(struct wm_softc *);
997 static int	wm_set_mdio_slow_mode_hv(struct wm_softc *);
998 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
999 static void	wm_reset_init_script_82575(struct wm_softc *);
1000 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
1001 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
1002 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
1003 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
1004 static int	wm_pll_workaround_i210(struct wm_softc *);
1005 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
1006 
1007 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
1008     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
1009 
1010 /*
1011  * Devices supported by this driver.
1012  */
1013 static const struct wm_product {
1014 	pci_vendor_id_t		wmp_vendor;
1015 	pci_product_id_t	wmp_product;
1016 	const char		*wmp_name;
1017 	wm_chip_type		wmp_type;
1018 	uint32_t		wmp_flags;
1019 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
1020 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
1021 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
1022 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
1023 #define WMP_MEDIATYPE(x)	((x) & 0x03)
1024 } wm_products[] = {
1025 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
1026 	  "Intel i82542 1000BASE-X Ethernet",
1027 	  WM_T_82542_2_1,	WMP_F_FIBER },
1028 
1029 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
1030 	  "Intel i82543GC 1000BASE-X Ethernet",
1031 	  WM_T_82543,		WMP_F_FIBER },
1032 
1033 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
1034 	  "Intel i82543GC 1000BASE-T Ethernet",
1035 	  WM_T_82543,		WMP_F_COPPER },
1036 
1037 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
1038 	  "Intel i82544EI 1000BASE-T Ethernet",
1039 	  WM_T_82544,		WMP_F_COPPER },
1040 
1041 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
1042 	  "Intel i82544EI 1000BASE-X Ethernet",
1043 	  WM_T_82544,		WMP_F_FIBER },
1044 
1045 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
1046 	  "Intel i82544GC 1000BASE-T Ethernet",
1047 	  WM_T_82544,		WMP_F_COPPER },
1048 
1049 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
1050 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
1051 	  WM_T_82544,		WMP_F_COPPER },
1052 
1053 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
1054 	  "Intel i82540EM 1000BASE-T Ethernet",
1055 	  WM_T_82540,		WMP_F_COPPER },
1056 
1057 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
1058 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
1059 	  WM_T_82540,		WMP_F_COPPER },
1060 
1061 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
1062 	  "Intel i82540EP 1000BASE-T Ethernet",
1063 	  WM_T_82540,		WMP_F_COPPER },
1064 
1065 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
1066 	  "Intel i82540EP 1000BASE-T Ethernet",
1067 	  WM_T_82540,		WMP_F_COPPER },
1068 
1069 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
1070 	  "Intel i82540EP 1000BASE-T Ethernet",
1071 	  WM_T_82540,		WMP_F_COPPER },
1072 
1073 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
1074 	  "Intel i82545EM 1000BASE-T Ethernet",
1075 	  WM_T_82545,		WMP_F_COPPER },
1076 
1077 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
1078 	  "Intel i82545GM 1000BASE-T Ethernet",
1079 	  WM_T_82545_3,		WMP_F_COPPER },
1080 
1081 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
1082 	  "Intel i82545GM 1000BASE-X Ethernet",
1083 	  WM_T_82545_3,		WMP_F_FIBER },
1084 
1085 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
1086 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
1087 	  WM_T_82545_3,		WMP_F_SERDES },
1088 
1089 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
1090 	  "Intel i82546EB 1000BASE-T Ethernet",
1091 	  WM_T_82546,		WMP_F_COPPER },
1092 
1093 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
1094 	  "Intel i82546EB 1000BASE-T Ethernet",
1095 	  WM_T_82546,		WMP_F_COPPER },
1096 
1097 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
1098 	  "Intel i82545EM 1000BASE-X Ethernet",
1099 	  WM_T_82545,		WMP_F_FIBER },
1100 
1101 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
1102 	  "Intel i82546EB 1000BASE-X Ethernet",
1103 	  WM_T_82546,		WMP_F_FIBER },
1104 
1105 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
1106 	  "Intel i82546GB 1000BASE-T Ethernet",
1107 	  WM_T_82546_3,		WMP_F_COPPER },
1108 
1109 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
1110 	  "Intel i82546GB 1000BASE-X Ethernet",
1111 	  WM_T_82546_3,		WMP_F_FIBER },
1112 
1113 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
1114 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
1115 	  WM_T_82546_3,		WMP_F_SERDES },
1116 
1117 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
1118 	  "i82546GB quad-port Gigabit Ethernet",
1119 	  WM_T_82546_3,		WMP_F_COPPER },
1120 
1121 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
1122 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
1123 	  WM_T_82546_3,		WMP_F_COPPER },
1124 
1125 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
1126 	  "Intel PRO/1000MT (82546GB)",
1127 	  WM_T_82546_3,		WMP_F_COPPER },
1128 
1129 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
1130 	  "Intel i82541EI 1000BASE-T Ethernet",
1131 	  WM_T_82541,		WMP_F_COPPER },
1132 
1133 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
1134 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
1135 	  WM_T_82541,		WMP_F_COPPER },
1136 
1137 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
1138 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
1139 	  WM_T_82541,		WMP_F_COPPER },
1140 
1141 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
1142 	  "Intel i82541ER 1000BASE-T Ethernet",
1143 	  WM_T_82541_2,		WMP_F_COPPER },
1144 
1145 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
1146 	  "Intel i82541GI 1000BASE-T Ethernet",
1147 	  WM_T_82541_2,		WMP_F_COPPER },
1148 
1149 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
1150 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
1151 	  WM_T_82541_2,		WMP_F_COPPER },
1152 
1153 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
1154 	  "Intel i82541PI 1000BASE-T Ethernet",
1155 	  WM_T_82541_2,		WMP_F_COPPER },
1156 
1157 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
1158 	  "Intel i82547EI 1000BASE-T Ethernet",
1159 	  WM_T_82547,		WMP_F_COPPER },
1160 
1161 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
1162 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
1163 	  WM_T_82547,		WMP_F_COPPER },
1164 
1165 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
1166 	  "Intel i82547GI 1000BASE-T Ethernet",
1167 	  WM_T_82547_2,		WMP_F_COPPER },
1168 
1169 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
1170 	  "Intel PRO/1000 PT (82571EB)",
1171 	  WM_T_82571,		WMP_F_COPPER },
1172 
1173 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
1174 	  "Intel PRO/1000 PF (82571EB)",
1175 	  WM_T_82571,		WMP_F_FIBER },
1176 
1177 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
1178 	  "Intel PRO/1000 PB (82571EB)",
1179 	  WM_T_82571,		WMP_F_SERDES },
1180 
1181 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
1182 	  "Intel PRO/1000 QT (82571EB)",
1183 	  WM_T_82571,		WMP_F_COPPER },
1184 
1185 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
1186 	  "Intel PRO/1000 PT Quad Port Server Adapter",
1187 	  WM_T_82571,		WMP_F_COPPER, },
1188 
1189 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
1190 	  "Intel Gigabit PT Quad Port Server ExpressModule",
1191 	  WM_T_82571,		WMP_F_COPPER, },
1192 
1193 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
1194 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
1195 	  WM_T_82571,		WMP_F_SERDES, },
1196 
1197 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
1198 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
1199 	  WM_T_82571,		WMP_F_SERDES, },
1200 
1201 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
1202 	  "Intel 82571EB Quad 1000baseX Ethernet",
1203 	  WM_T_82571,		WMP_F_FIBER, },
1204 
1205 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
1206 	  "Intel i82572EI 1000baseT Ethernet",
1207 	  WM_T_82572,		WMP_F_COPPER },
1208 
1209 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
1210 	  "Intel i82572EI 1000baseX Ethernet",
1211 	  WM_T_82572,		WMP_F_FIBER },
1212 
1213 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
1214 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
1215 	  WM_T_82572,		WMP_F_SERDES },
1216 
1217 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
1218 	  "Intel i82572EI 1000baseT Ethernet",
1219 	  WM_T_82572,		WMP_F_COPPER },
1220 
1221 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
1222 	  "Intel i82573E",
1223 	  WM_T_82573,		WMP_F_COPPER },
1224 
1225 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
1226 	  "Intel i82573E IAMT",
1227 	  WM_T_82573,		WMP_F_COPPER },
1228 
1229 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
1230 	  "Intel i82573L Gigabit Ethernet",
1231 	  WM_T_82573,		WMP_F_COPPER },
1232 
1233 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
1234 	  "Intel i82574L",
1235 	  WM_T_82574,		WMP_F_COPPER },
1236 
1237 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
1238 	  "Intel i82574L",
1239 	  WM_T_82574,		WMP_F_COPPER },
1240 
1241 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
1242 	  "Intel i82583V",
1243 	  WM_T_82583,		WMP_F_COPPER },
1244 
1245 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
1246 	  "i80003 dual 1000baseT Ethernet",
1247 	  WM_T_80003,		WMP_F_COPPER },
1248 
1249 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
1250 	  "i80003 dual 1000baseX Ethernet",
1251 	  WM_T_80003,		WMP_F_COPPER },
1252 
1253 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
1254 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
1255 	  WM_T_80003,		WMP_F_SERDES },
1256 
1257 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
1258 	  "Intel i80003 1000baseT Ethernet",
1259 	  WM_T_80003,		WMP_F_COPPER },
1260 
1261 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
1262 	  "Intel i80003 Gigabit Ethernet (SERDES)",
1263 	  WM_T_80003,		WMP_F_SERDES },
1264 
1265 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
1266 	  "Intel i82801H (M_AMT) LAN Controller",
1267 	  WM_T_ICH8,		WMP_F_COPPER },
1268 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
1269 	  "Intel i82801H (AMT) LAN Controller",
1270 	  WM_T_ICH8,		WMP_F_COPPER },
1271 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
1272 	  "Intel i82801H LAN Controller",
1273 	  WM_T_ICH8,		WMP_F_COPPER },
1274 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
1275 	  "Intel i82801H (IFE) 10/100 LAN Controller",
1276 	  WM_T_ICH8,		WMP_F_COPPER },
1277 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
1278 	  "Intel i82801H (M) LAN Controller",
1279 	  WM_T_ICH8,		WMP_F_COPPER },
1280 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
1281 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
1282 	  WM_T_ICH8,		WMP_F_COPPER },
1283 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
1284 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
1285 	  WM_T_ICH8,		WMP_F_COPPER },
1286 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
1287 	  "82567V-3 LAN Controller",
1288 	  WM_T_ICH8,		WMP_F_COPPER },
1289 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
1290 	  "82801I (AMT) LAN Controller",
1291 	  WM_T_ICH9,		WMP_F_COPPER },
1292 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
1293 	  "82801I 10/100 LAN Controller",
1294 	  WM_T_ICH9,		WMP_F_COPPER },
1295 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
1296 	  "82801I (G) 10/100 LAN Controller",
1297 	  WM_T_ICH9,		WMP_F_COPPER },
1298 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
1299 	  "82801I (GT) 10/100 LAN Controller",
1300 	  WM_T_ICH9,		WMP_F_COPPER },
1301 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
1302 	  "82801I (C) LAN Controller",
1303 	  WM_T_ICH9,		WMP_F_COPPER },
1304 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
1305 	  "82801I mobile LAN Controller",
1306 	  WM_T_ICH9,		WMP_F_COPPER },
1307 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
1308 	  "82801I mobile (V) LAN Controller",
1309 	  WM_T_ICH9,		WMP_F_COPPER },
1310 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
1311 	  "82801I mobile (AMT) LAN Controller",
1312 	  WM_T_ICH9,		WMP_F_COPPER },
1313 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
1314 	  "82567LM-4 LAN Controller",
1315 	  WM_T_ICH9,		WMP_F_COPPER },
1316 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
1317 	  "82567LM-2 LAN Controller",
1318 	  WM_T_ICH10,		WMP_F_COPPER },
1319 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
1320 	  "82567LF-2 LAN Controller",
1321 	  WM_T_ICH10,		WMP_F_COPPER },
1322 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
1323 	  "82567LM-3 LAN Controller",
1324 	  WM_T_ICH10,		WMP_F_COPPER },
1325 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
1326 	  "82567LF-3 LAN Controller",
1327 	  WM_T_ICH10,		WMP_F_COPPER },
1328 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
1329 	  "82567V-2 LAN Controller",
1330 	  WM_T_ICH10,		WMP_F_COPPER },
1331 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
1332 	  "82567V-3? LAN Controller",
1333 	  WM_T_ICH10,		WMP_F_COPPER },
1334 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
1335 	  "HANKSVILLE LAN Controller",
1336 	  WM_T_ICH10,		WMP_F_COPPER },
1337 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
1338 	  "PCH LAN (82577LM) Controller",
1339 	  WM_T_PCH,		WMP_F_COPPER },
1340 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
1341 	  "PCH LAN (82577LC) Controller",
1342 	  WM_T_PCH,		WMP_F_COPPER },
1343 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
1344 	  "PCH LAN (82578DM) Controller",
1345 	  WM_T_PCH,		WMP_F_COPPER },
1346 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
1347 	  "PCH LAN (82578DC) Controller",
1348 	  WM_T_PCH,		WMP_F_COPPER },
1349 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
1350 	  "PCH2 LAN (82579LM) Controller",
1351 	  WM_T_PCH2,		WMP_F_COPPER },
1352 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
1353 	  "PCH2 LAN (82579V) Controller",
1354 	  WM_T_PCH2,		WMP_F_COPPER },
1355 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
1356 	  "82575EB dual-1000baseT Ethernet",
1357 	  WM_T_82575,		WMP_F_COPPER },
1358 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
1359 	  "82575EB dual-1000baseX Ethernet (SERDES)",
1360 	  WM_T_82575,		WMP_F_SERDES },
1361 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
1362 	  "82575GB quad-1000baseT Ethernet",
1363 	  WM_T_82575,		WMP_F_COPPER },
1364 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
1365 	  "82575GB quad-1000baseT Ethernet (PM)",
1366 	  WM_T_82575,		WMP_F_COPPER },
1367 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
1368 	  "82576 1000BaseT Ethernet",
1369 	  WM_T_82576,		WMP_F_COPPER },
1370 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
1371 	  "82576 1000BaseX Ethernet",
1372 	  WM_T_82576,		WMP_F_FIBER },
1373 
1374 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
1375 	  "82576 gigabit Ethernet (SERDES)",
1376 	  WM_T_82576,		WMP_F_SERDES },
1377 
1378 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
1379 	  "82576 quad-1000BaseT Ethernet",
1380 	  WM_T_82576,		WMP_F_COPPER },
1381 
1382 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
1383 	  "82576 Gigabit ET2 Quad Port Server Adapter",
1384 	  WM_T_82576,		WMP_F_COPPER },
1385 
1386 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
1387 	  "82576 gigabit Ethernet",
1388 	  WM_T_82576,		WMP_F_COPPER },
1389 
1390 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
1391 	  "82576 gigabit Ethernet (SERDES)",
1392 	  WM_T_82576,		WMP_F_SERDES },
1393 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
1394 	  "82576 quad-gigabit Ethernet (SERDES)",
1395 	  WM_T_82576,		WMP_F_SERDES },
1396 
1397 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
1398 	  "82580 1000BaseT Ethernet",
1399 	  WM_T_82580,		WMP_F_COPPER },
1400 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
1401 	  "82580 1000BaseX Ethernet",
1402 	  WM_T_82580,		WMP_F_FIBER },
1403 
1404 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
1405 	  "82580 1000BaseT Ethernet (SERDES)",
1406 	  WM_T_82580,		WMP_F_SERDES },
1407 
1408 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
1409 	  "82580 gigabit Ethernet (SGMII)",
1410 	  WM_T_82580,		WMP_F_COPPER },
1411 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
1412 	  "82580 dual-1000BaseT Ethernet",
1413 	  WM_T_82580,		WMP_F_COPPER },
1414 
1415 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1416 	  "82580 quad-1000BaseX Ethernet",
1417 	  WM_T_82580,		WMP_F_FIBER },
1418 
1419 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
1420 	  "DH89XXCC Gigabit Ethernet (SGMII)",
1421 	  WM_T_82580,		WMP_F_COPPER },
1422 
1423 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
1424 	  "DH89XXCC Gigabit Ethernet (SERDES)",
1425 	  WM_T_82580,		WMP_F_SERDES },
1426 
1427 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
1428 	  "DH89XXCC 1000BASE-KX Ethernet",
1429 	  WM_T_82580,		WMP_F_SERDES },
1430 
1431 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
1432 	  "DH89XXCC Gigabit Ethernet (SFP)",
1433 	  WM_T_82580,		WMP_F_SERDES },
1434 
1435 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
1436 	  "I350 Gigabit Network Connection",
1437 	  WM_T_I350,		WMP_F_COPPER },
1438 
1439 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
1440 	  "I350 Gigabit Fiber Network Connection",
1441 	  WM_T_I350,		WMP_F_FIBER },
1442 
1443 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
1444 	  "I350 Gigabit Backplane Connection",
1445 	  WM_T_I350,		WMP_F_SERDES },
1446 
1447 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
1448 	  "I350 Quad Port Gigabit Ethernet",
1449 	  WM_T_I350,		WMP_F_SERDES },
1450 
1451 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
1452 	  "I350 Gigabit Connection",
1453 	  WM_T_I350,		WMP_F_COPPER },
1454 
1455 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
1456 	  "I354 Gigabit Ethernet (KX)",
1457 	  WM_T_I354,		WMP_F_SERDES },
1458 
1459 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
1460 	  "I354 Gigabit Ethernet (SGMII)",
1461 	  WM_T_I354,		WMP_F_COPPER },
1462 
1463 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
1464 	  "I354 Gigabit Ethernet (2.5G)",
1465 	  WM_T_I354,		WMP_F_COPPER },
1466 
1467 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
1468 	  "I210-T1 Ethernet Server Adapter",
1469 	  WM_T_I210,		WMP_F_COPPER },
1470 
1471 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
1472 	  "I210 Ethernet (Copper OEM)",
1473 	  WM_T_I210,		WMP_F_COPPER },
1474 
1475 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
1476 	  "I210 Ethernet (Copper IT)",
1477 	  WM_T_I210,		WMP_F_COPPER },
1478 
1479 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
1480 	  "I210 Ethernet (Copper, FLASH less)",
1481 	  WM_T_I210,		WMP_F_COPPER },
1482 
1483 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
1484 	  "I210 Gigabit Ethernet (Fiber)",
1485 	  WM_T_I210,		WMP_F_FIBER },
1486 
1487 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
1488 	  "I210 Gigabit Ethernet (SERDES)",
1489 	  WM_T_I210,		WMP_F_SERDES },
1490 
1491 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
1492 	  "I210 Gigabit Ethernet (SERDES, FLASH less)",
1493 	  WM_T_I210,		WMP_F_SERDES },
1494 
1495 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
1496 	  "I210 Gigabit Ethernet (SGMII)",
1497 	  WM_T_I210,		WMP_F_COPPER },
1498 
1499 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII_WOF,
1500 	  "I210 Gigabit Ethernet (SGMII, FLASH less)",
1501 	  WM_T_I210,		WMP_F_COPPER },
1502 
1503 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
1504 	  "I211 Ethernet (COPPER)",
1505 	  WM_T_I211,		WMP_F_COPPER },
1506 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
1507 	  "I217 V Ethernet Connection",
1508 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1509 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
1510 	  "I217 LM Ethernet Connection",
1511 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1512 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
1513 	  "I218 V Ethernet Connection",
1514 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1515 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
1516 	  "I218 V Ethernet Connection",
1517 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1518 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
1519 	  "I218 V Ethernet Connection",
1520 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1521 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
1522 	  "I218 LM Ethernet Connection",
1523 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1524 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
1525 	  "I218 LM Ethernet Connection",
1526 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1527 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
1528 	  "I218 LM Ethernet Connection",
1529 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1530 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
1531 	  "I219 LM Ethernet Connection",
1532 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1533 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
1534 	  "I219 LM Ethernet Connection",
1535 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1536 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
1537 	  "I219 LM Ethernet Connection",
1538 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1539 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
1540 	  "I219 LM Ethernet Connection",
1541 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1542 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
1543 	  "I219 LM Ethernet Connection",
1544 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1545 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM6,
1546 	  "I219 LM Ethernet Connection",
1547 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1548 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM7,
1549 	  "I219 LM Ethernet Connection",
1550 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1551 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM8,
1552 	  "I219 LM Ethernet Connection",
1553 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1554 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM9,
1555 	  "I219 LM Ethernet Connection",
1556 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1557 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
1558 	  "I219 V Ethernet Connection",
1559 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1560 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
1561 	  "I219 V Ethernet Connection",
1562 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1563 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
1564 	  "I219 V Ethernet Connection",
1565 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1566 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
1567 	  "I219 V Ethernet Connection",
1568 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1569 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V6,
1570 	  "I219 V Ethernet Connection",
1571 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1572 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V7,
1573 	  "I219 V Ethernet Connection",
1574 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1575 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V8,
1576 	  "I219 V Ethernet Connection",
1577 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1578 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V9,
1579 	  "I219 V Ethernet Connection",
1580 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1581 	{ 0,			0,
1582 	  NULL,
1583 	  0,			0 },
1584 };
1585 
1586 /*
1587  * Register read/write functions.
1588  * Other than CSR_{READ|WRITE}().
1589  */
1590 
1591 #if 0 /* Not currently used */
1592 static inline uint32_t
1593 wm_io_read(struct wm_softc *sc, int reg)
1594 {
1595 
1596 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1597 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1598 }
1599 #endif
1600 
1601 static inline void
1602 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1603 {
1604 
1605 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1606 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1607 }
1608 
1609 static inline void
1610 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1611     uint32_t data)
1612 {
1613 	uint32_t regval;
1614 	int i;
1615 
1616 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1617 
1618 	CSR_WRITE(sc, reg, regval);
1619 
1620 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1621 		delay(5);
1622 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1623 			break;
1624 	}
1625 	if (i == SCTL_CTL_POLL_TIMEOUT) {
1626 		aprint_error("%s: WARNING:"
1627 		    " i82575 reg 0x%08x setup did not indicate ready\n",
1628 		    device_xname(sc->sc_dev), reg);
1629 	}
1630 }
1631 
1632 static inline void
1633 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1634 {
1635 	wa->wa_low = htole32(v & 0xffffffffU);
1636 	if (sizeof(bus_addr_t) == 8)
1637 		wa->wa_high = htole32((uint64_t) v >> 32);
1638 	else
1639 		wa->wa_high = 0;
1640 }
1641 
1642 /*
1643  * Descriptor sync/init functions.
1644  */
1645 static inline void
1646 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
1647 {
1648 	struct wm_softc *sc = txq->txq_sc;
1649 
1650 	/* If it will wrap around, sync to the end of the ring. */
1651 	if ((start + num) > WM_NTXDESC(txq)) {
1652 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1653 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
1654 		    (WM_NTXDESC(txq) - start), ops);
1655 		num -= (WM_NTXDESC(txq) - start);
1656 		start = 0;
1657 	}
1658 
1659 	/* Now sync whatever is left. */
1660 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1661 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
1662 }
1663 
1664 static inline void
1665 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
1666 {
1667 	struct wm_softc *sc = rxq->rxq_sc;
1668 
1669 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
1670 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
1671 }
1672 
1673 static inline void
1674 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
1675 {
1676 	struct wm_softc *sc = rxq->rxq_sc;
1677 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
1678 	struct mbuf *m = rxs->rxs_mbuf;
1679 
1680 	/*
1681 	 * Note: We scoot the packet forward 2 bytes in the buffer
1682 	 * so that the payload after the Ethernet header is aligned
1683 	 * to a 4-byte boundary.
1684 
1685 	 * XXX BRAINDAMAGE ALERT!
1686 	 * The stupid chip uses the same size for every buffer, which
1687 	 * is set in the Receive Control register.  We are using the 2K
1688 	 * size option, but what we REALLY want is (2K - 2)!  For this
1689 	 * reason, we can't "scoot" packets longer than the standard
1690 	 * Ethernet MTU.  On strict-alignment platforms, if the total
1691 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
1692 	 * the upper layer copy the headers.
1693 	 */
1694 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
1695 
1696 	if (sc->sc_type == WM_T_82574) {
1697 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
1698 		rxd->erx_data.erxd_addr =
1699 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1700 		rxd->erx_data.erxd_dd = 0;
1701 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
1702 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
1703 
1704 		rxd->nqrx_data.nrxd_paddr =
1705 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1706 		/* Currently, split header is not supported. */
1707 		rxd->nqrx_data.nrxd_haddr = 0;
1708 	} else {
1709 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
1710 
1711 		wm_set_dma_addr(&rxd->wrx_addr,
1712 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1713 		rxd->wrx_len = 0;
1714 		rxd->wrx_cksum = 0;
1715 		rxd->wrx_status = 0;
1716 		rxd->wrx_errors = 0;
1717 		rxd->wrx_special = 0;
1718 	}
1719 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1720 
1721 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
1722 }
1723 
1724 /*
1725  * Device driver interface functions and commonly used functions.
1726  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
1727  */
1728 
1729 /* Lookup supported device table */
1730 static const struct wm_product *
1731 wm_lookup(const struct pci_attach_args *pa)
1732 {
1733 	const struct wm_product *wmp;
1734 
1735 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1736 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1737 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1738 			return wmp;
1739 	}
1740 	return NULL;
1741 }
1742 
1743 /* The match function (ca_match) */
1744 static int
1745 wm_match(device_t parent, cfdata_t cf, void *aux)
1746 {
1747 	struct pci_attach_args *pa = aux;
1748 
1749 	if (wm_lookup(pa) != NULL)
1750 		return 1;
1751 
1752 	return 0;
1753 }
1754 
1755 /* The attach function (ca_attach) */
1756 static void
1757 wm_attach(device_t parent, device_t self, void *aux)
1758 {
1759 	struct wm_softc *sc = device_private(self);
1760 	struct pci_attach_args *pa = aux;
1761 	prop_dictionary_t dict;
1762 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1763 	pci_chipset_tag_t pc = pa->pa_pc;
1764 	int counts[PCI_INTR_TYPE_SIZE];
1765 	pci_intr_type_t max_type;
1766 	const char *eetype, *xname;
1767 	bus_space_tag_t memt;
1768 	bus_space_handle_t memh;
1769 	bus_size_t memsize;
1770 	int memh_valid;
1771 	int i, error;
1772 	const struct wm_product *wmp;
1773 	prop_data_t ea;
1774 	prop_number_t pn;
1775 	uint8_t enaddr[ETHER_ADDR_LEN];
1776 	char buf[256];
1777 	uint16_t cfg1, cfg2, swdpin, nvmword;
1778 	pcireg_t preg, memtype;
1779 	uint16_t eeprom_data, apme_mask;
1780 	bool force_clear_smbi;
1781 	uint32_t link_mode;
1782 	uint32_t reg;
1783 
1784 	sc->sc_dev = self;
1785 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
1786 	sc->sc_core_stopping = false;
1787 
1788 	wmp = wm_lookup(pa);
1789 #ifdef DIAGNOSTIC
1790 	if (wmp == NULL) {
1791 		printf("\n");
1792 		panic("wm_attach: impossible");
1793 	}
1794 #endif
1795 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
1796 
1797 	sc->sc_pc = pa->pa_pc;
1798 	sc->sc_pcitag = pa->pa_tag;
1799 
1800 	if (pci_dma64_available(pa))
1801 		sc->sc_dmat = pa->pa_dmat64;
1802 	else
1803 		sc->sc_dmat = pa->pa_dmat;
1804 
1805 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
1806 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
1807 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
1808 
1809 	sc->sc_type = wmp->wmp_type;
1810 
1811 	/* Set default function pointers */
1812 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
1813 	sc->phy.release = sc->nvm.release = wm_put_null;
1814 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
1815 
1816 	if (sc->sc_type < WM_T_82543) {
1817 		if (sc->sc_rev < 2) {
1818 			aprint_error_dev(sc->sc_dev,
1819 			    "i82542 must be at least rev. 2\n");
1820 			return;
1821 		}
1822 		if (sc->sc_rev < 3)
1823 			sc->sc_type = WM_T_82542_2_0;
1824 	}
1825 
1826 	/*
1827 	 * Disable MSI for Errata:
1828 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
1829 	 *
1830 	 *  82544: Errata 25
1831 	 *  82540: Errata  6 (easy to reproduce device timeout)
1832 	 *  82545: Errata  4 (easy to reproduce device timeout)
1833 	 *  82546: Errata 26 (easy to reproduce device timeout)
1834 	 *  82541: Errata  7 (easy to reproduce device timeout)
1835 	 *
1836 	 * "Byte Enables 2 and 3 are not set on MSI writes"
1837 	 *
1838 	 *  82571 & 82572: Errata 63
1839 	 */
1840 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
1841 	    || (sc->sc_type == WM_T_82572))
1842 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
1843 
1844 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1845 	    || (sc->sc_type == WM_T_82580)
1846 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
1847 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
1848 		sc->sc_flags |= WM_F_NEWQUEUE;
1849 
1850 	/* Set device properties (mactype) */
1851 	dict = device_properties(sc->sc_dev);
1852 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1853 
1854 	/*
1855 	 * Map the device.  All devices support memory-mapped acccess,
1856 	 * and it is really required for normal operation.
1857 	 */
1858 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1859 	switch (memtype) {
1860 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1861 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1862 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1863 			memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1864 		break;
1865 	default:
1866 		memh_valid = 0;
1867 		break;
1868 	}
1869 
1870 	if (memh_valid) {
1871 		sc->sc_st = memt;
1872 		sc->sc_sh = memh;
1873 		sc->sc_ss = memsize;
1874 	} else {
1875 		aprint_error_dev(sc->sc_dev,
1876 		    "unable to map device registers\n");
1877 		return;
1878 	}
1879 
1880 	/*
1881 	 * In addition, i82544 and later support I/O mapped indirect
1882 	 * register access.  It is not desirable (nor supported in
1883 	 * this driver) to use it for normal operation, though it is
1884 	 * required to work around bugs in some chip versions.
1885 	 */
1886 	if (sc->sc_type >= WM_T_82544) {
1887 		/* First we have to find the I/O BAR. */
1888 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1889 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
1890 			if (memtype == PCI_MAPREG_TYPE_IO)
1891 				break;
1892 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
1893 			    PCI_MAPREG_MEM_TYPE_64BIT)
1894 				i += 4;	/* skip high bits, too */
1895 		}
1896 		if (i < PCI_MAPREG_END) {
1897 			/*
1898 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
1899 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
1900 			 * It's no problem because newer chips has no this
1901 			 * bug.
1902 			 *
1903 			 * The i8254x doesn't apparently respond when the
1904 			 * I/O BAR is 0, which looks somewhat like it's not
1905 			 * been configured.
1906 			 */
1907 			preg = pci_conf_read(pc, pa->pa_tag, i);
1908 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1909 				aprint_error_dev(sc->sc_dev,
1910 				    "WARNING: I/O BAR at zero.\n");
1911 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1912 					0, &sc->sc_iot, &sc->sc_ioh,
1913 					NULL, &sc->sc_ios) == 0) {
1914 				sc->sc_flags |= WM_F_IOH_VALID;
1915 			} else
1916 				aprint_error_dev(sc->sc_dev,
1917 				    "WARNING: unable to map I/O space\n");
1918 		}
1919 
1920 	}
1921 
1922 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
1923 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1924 	preg |= PCI_COMMAND_MASTER_ENABLE;
1925 	if (sc->sc_type < WM_T_82542_2_1)
1926 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1927 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1928 
1929 	/* Power up chip */
1930 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
1931 	    && error != EOPNOTSUPP) {
1932 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1933 		return;
1934 	}
1935 
1936 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
1937 	/*
1938 	 *  Don't use MSI-X if we can use only one queue to save interrupt
1939 	 * resource.
1940 	 */
1941 	if (sc->sc_nqueues > 1) {
1942 		max_type = PCI_INTR_TYPE_MSIX;
1943 		/*
1944 		 *  82583 has a MSI-X capability in the PCI configuration space
1945 		 * but it doesn't support it. At least the document doesn't
1946 		 * say anything about MSI-X.
1947 		 */
1948 		counts[PCI_INTR_TYPE_MSIX]
1949 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
1950 	} else {
1951 		max_type = PCI_INTR_TYPE_MSI;
1952 		counts[PCI_INTR_TYPE_MSIX] = 0;
1953 	}
1954 
1955 	/* Allocation settings */
1956 	counts[PCI_INTR_TYPE_MSI] = 1;
1957 	counts[PCI_INTR_TYPE_INTX] = 1;
1958 	/* overridden by disable flags */
1959 	if (wm_disable_msi != 0) {
1960 		counts[PCI_INTR_TYPE_MSI] = 0;
1961 		if (wm_disable_msix != 0) {
1962 			max_type = PCI_INTR_TYPE_INTX;
1963 			counts[PCI_INTR_TYPE_MSIX] = 0;
1964 		}
1965 	} else if (wm_disable_msix != 0) {
1966 		max_type = PCI_INTR_TYPE_MSI;
1967 		counts[PCI_INTR_TYPE_MSIX] = 0;
1968 	}
1969 
1970 alloc_retry:
1971 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
1972 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
1973 		return;
1974 	}
1975 
1976 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
1977 		error = wm_setup_msix(sc);
1978 		if (error) {
1979 			pci_intr_release(pc, sc->sc_intrs,
1980 			    counts[PCI_INTR_TYPE_MSIX]);
1981 
1982 			/* Setup for MSI: Disable MSI-X */
1983 			max_type = PCI_INTR_TYPE_MSI;
1984 			counts[PCI_INTR_TYPE_MSI] = 1;
1985 			counts[PCI_INTR_TYPE_INTX] = 1;
1986 			goto alloc_retry;
1987 		}
1988 	} else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
1989 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
1990 		error = wm_setup_legacy(sc);
1991 		if (error) {
1992 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
1993 			    counts[PCI_INTR_TYPE_MSI]);
1994 
1995 			/* The next try is for INTx: Disable MSI */
1996 			max_type = PCI_INTR_TYPE_INTX;
1997 			counts[PCI_INTR_TYPE_INTX] = 1;
1998 			goto alloc_retry;
1999 		}
2000 	} else {
2001 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
2002 		error = wm_setup_legacy(sc);
2003 		if (error) {
2004 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
2005 			    counts[PCI_INTR_TYPE_INTX]);
2006 			return;
2007 		}
2008 	}
2009 
2010 	/*
2011 	 * Check the function ID (unit number of the chip).
2012 	 */
2013 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
2014 	    || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
2015 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
2016 	    || (sc->sc_type == WM_T_82580)
2017 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
2018 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
2019 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
2020 	else
2021 		sc->sc_funcid = 0;
2022 
2023 	/*
2024 	 * Determine a few things about the bus we're connected to.
2025 	 */
2026 	if (sc->sc_type < WM_T_82543) {
2027 		/* We don't really know the bus characteristics here. */
2028 		sc->sc_bus_speed = 33;
2029 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
2030 		/*
2031 		 * CSA (Communication Streaming Architecture) is about as fast
2032 		 * a 32-bit 66MHz PCI Bus.
2033 		 */
2034 		sc->sc_flags |= WM_F_CSA;
2035 		sc->sc_bus_speed = 66;
2036 		aprint_verbose_dev(sc->sc_dev,
2037 		    "Communication Streaming Architecture\n");
2038 		if (sc->sc_type == WM_T_82547) {
2039 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
2040 			callout_setfunc(&sc->sc_txfifo_ch,
2041 			    wm_82547_txfifo_stall, sc);
2042 			aprint_verbose_dev(sc->sc_dev,
2043 			    "using 82547 Tx FIFO stall work-around\n");
2044 		}
2045 	} else if (sc->sc_type >= WM_T_82571) {
2046 		sc->sc_flags |= WM_F_PCIE;
2047 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
2048 		    && (sc->sc_type != WM_T_ICH10)
2049 		    && (sc->sc_type != WM_T_PCH)
2050 		    && (sc->sc_type != WM_T_PCH2)
2051 		    && (sc->sc_type != WM_T_PCH_LPT)
2052 		    && (sc->sc_type != WM_T_PCH_SPT)
2053 		    && (sc->sc_type != WM_T_PCH_CNP)) {
2054 			/* ICH* and PCH* have no PCIe capability registers */
2055 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
2056 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
2057 				NULL) == 0)
2058 				aprint_error_dev(sc->sc_dev,
2059 				    "unable to find PCIe capability\n");
2060 		}
2061 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
2062 	} else {
2063 		reg = CSR_READ(sc, WMREG_STATUS);
2064 		if (reg & STATUS_BUS64)
2065 			sc->sc_flags |= WM_F_BUS64;
2066 		if ((reg & STATUS_PCIX_MODE) != 0) {
2067 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
2068 
2069 			sc->sc_flags |= WM_F_PCIX;
2070 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
2071 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
2072 				aprint_error_dev(sc->sc_dev,
2073 				    "unable to find PCIX capability\n");
2074 			else if (sc->sc_type != WM_T_82545_3 &&
2075 				 sc->sc_type != WM_T_82546_3) {
2076 				/*
2077 				 * Work around a problem caused by the BIOS
2078 				 * setting the max memory read byte count
2079 				 * incorrectly.
2080 				 */
2081 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
2082 				    sc->sc_pcixe_capoff + PCIX_CMD);
2083 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
2084 				    sc->sc_pcixe_capoff + PCIX_STATUS);
2085 
2086 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
2087 				    PCIX_CMD_BYTECNT_SHIFT;
2088 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
2089 				    PCIX_STATUS_MAXB_SHIFT;
2090 				if (bytecnt > maxb) {
2091 					aprint_verbose_dev(sc->sc_dev,
2092 					    "resetting PCI-X MMRBC: %d -> %d\n",
2093 					    512 << bytecnt, 512 << maxb);
2094 					pcix_cmd = (pcix_cmd &
2095 					    ~PCIX_CMD_BYTECNT_MASK) |
2096 					    (maxb << PCIX_CMD_BYTECNT_SHIFT);
2097 					pci_conf_write(pa->pa_pc, pa->pa_tag,
2098 					    sc->sc_pcixe_capoff + PCIX_CMD,
2099 					    pcix_cmd);
2100 				}
2101 			}
2102 		}
2103 		/*
2104 		 * The quad port adapter is special; it has a PCIX-PCIX
2105 		 * bridge on the board, and can run the secondary bus at
2106 		 * a higher speed.
2107 		 */
2108 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
2109 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
2110 								      : 66;
2111 		} else if (sc->sc_flags & WM_F_PCIX) {
2112 			switch (reg & STATUS_PCIXSPD_MASK) {
2113 			case STATUS_PCIXSPD_50_66:
2114 				sc->sc_bus_speed = 66;
2115 				break;
2116 			case STATUS_PCIXSPD_66_100:
2117 				sc->sc_bus_speed = 100;
2118 				break;
2119 			case STATUS_PCIXSPD_100_133:
2120 				sc->sc_bus_speed = 133;
2121 				break;
2122 			default:
2123 				aprint_error_dev(sc->sc_dev,
2124 				    "unknown PCIXSPD %d; assuming 66MHz\n",
2125 				    reg & STATUS_PCIXSPD_MASK);
2126 				sc->sc_bus_speed = 66;
2127 				break;
2128 			}
2129 		} else
2130 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
2131 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
2132 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
2133 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
2134 	}
2135 
2136 	/* clear interesting stat counters */
2137 	CSR_READ(sc, WMREG_COLC);
2138 	CSR_READ(sc, WMREG_RXERRC);
2139 
2140 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
2141 	    || (sc->sc_type >= WM_T_ICH8))
2142 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2143 	if (sc->sc_type >= WM_T_ICH8)
2144 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2145 
2146 	/* Set PHY, NVM mutex related stuff */
2147 	switch (sc->sc_type) {
2148 	case WM_T_82542_2_0:
2149 	case WM_T_82542_2_1:
2150 	case WM_T_82543:
2151 	case WM_T_82544:
2152 		/* Microwire */
2153 		sc->nvm.read = wm_nvm_read_uwire;
2154 		sc->sc_nvm_wordsize = 64;
2155 		sc->sc_nvm_addrbits = 6;
2156 		break;
2157 	case WM_T_82540:
2158 	case WM_T_82545:
2159 	case WM_T_82545_3:
2160 	case WM_T_82546:
2161 	case WM_T_82546_3:
2162 		/* Microwire */
2163 		sc->nvm.read = wm_nvm_read_uwire;
2164 		reg = CSR_READ(sc, WMREG_EECD);
2165 		if (reg & EECD_EE_SIZE) {
2166 			sc->sc_nvm_wordsize = 256;
2167 			sc->sc_nvm_addrbits = 8;
2168 		} else {
2169 			sc->sc_nvm_wordsize = 64;
2170 			sc->sc_nvm_addrbits = 6;
2171 		}
2172 		sc->sc_flags |= WM_F_LOCK_EECD;
2173 		sc->nvm.acquire = wm_get_eecd;
2174 		sc->nvm.release = wm_put_eecd;
2175 		break;
2176 	case WM_T_82541:
2177 	case WM_T_82541_2:
2178 	case WM_T_82547:
2179 	case WM_T_82547_2:
2180 		reg = CSR_READ(sc, WMREG_EECD);
2181 		/*
2182 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
2183 		 * on 8254[17], so set flags and functios before calling it.
2184 		 */
2185 		sc->sc_flags |= WM_F_LOCK_EECD;
2186 		sc->nvm.acquire = wm_get_eecd;
2187 		sc->nvm.release = wm_put_eecd;
2188 		if (reg & EECD_EE_TYPE) {
2189 			/* SPI */
2190 			sc->nvm.read = wm_nvm_read_spi;
2191 			sc->sc_flags |= WM_F_EEPROM_SPI;
2192 			wm_nvm_set_addrbits_size_eecd(sc);
2193 		} else {
2194 			/* Microwire */
2195 			sc->nvm.read = wm_nvm_read_uwire;
2196 			if ((reg & EECD_EE_ABITS) != 0) {
2197 				sc->sc_nvm_wordsize = 256;
2198 				sc->sc_nvm_addrbits = 8;
2199 			} else {
2200 				sc->sc_nvm_wordsize = 64;
2201 				sc->sc_nvm_addrbits = 6;
2202 			}
2203 		}
2204 		break;
2205 	case WM_T_82571:
2206 	case WM_T_82572:
2207 		/* SPI */
2208 		sc->nvm.read = wm_nvm_read_eerd;
2209 		/* Not use WM_F_LOCK_EECD because we use EERD */
2210 		sc->sc_flags |= WM_F_EEPROM_SPI;
2211 		wm_nvm_set_addrbits_size_eecd(sc);
2212 		sc->phy.acquire = wm_get_swsm_semaphore;
2213 		sc->phy.release = wm_put_swsm_semaphore;
2214 		sc->nvm.acquire = wm_get_nvm_82571;
2215 		sc->nvm.release = wm_put_nvm_82571;
2216 		break;
2217 	case WM_T_82573:
2218 	case WM_T_82574:
2219 	case WM_T_82583:
2220 		sc->nvm.read = wm_nvm_read_eerd;
2221 		/* Not use WM_F_LOCK_EECD because we use EERD */
2222 		if (sc->sc_type == WM_T_82573) {
2223 			sc->phy.acquire = wm_get_swsm_semaphore;
2224 			sc->phy.release = wm_put_swsm_semaphore;
2225 			sc->nvm.acquire = wm_get_nvm_82571;
2226 			sc->nvm.release = wm_put_nvm_82571;
2227 		} else {
2228 			/* Both PHY and NVM use the same semaphore. */
2229 			sc->phy.acquire = sc->nvm.acquire
2230 			    = wm_get_swfwhw_semaphore;
2231 			sc->phy.release = sc->nvm.release
2232 			    = wm_put_swfwhw_semaphore;
2233 		}
2234 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
2235 			sc->sc_flags |= WM_F_EEPROM_FLASH;
2236 			sc->sc_nvm_wordsize = 2048;
2237 		} else {
2238 			/* SPI */
2239 			sc->sc_flags |= WM_F_EEPROM_SPI;
2240 			wm_nvm_set_addrbits_size_eecd(sc);
2241 		}
2242 		break;
2243 	case WM_T_82575:
2244 	case WM_T_82576:
2245 	case WM_T_82580:
2246 	case WM_T_I350:
2247 	case WM_T_I354:
2248 	case WM_T_80003:
2249 		/* SPI */
2250 		sc->sc_flags |= WM_F_EEPROM_SPI;
2251 		wm_nvm_set_addrbits_size_eecd(sc);
2252 		if ((sc->sc_type == WM_T_80003)
2253 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
2254 			sc->nvm.read = wm_nvm_read_eerd;
2255 			/* Don't use WM_F_LOCK_EECD because we use EERD */
2256 		} else {
2257 			sc->nvm.read = wm_nvm_read_spi;
2258 			sc->sc_flags |= WM_F_LOCK_EECD;
2259 		}
2260 		sc->phy.acquire = wm_get_phy_82575;
2261 		sc->phy.release = wm_put_phy_82575;
2262 		sc->nvm.acquire = wm_get_nvm_80003;
2263 		sc->nvm.release = wm_put_nvm_80003;
2264 		break;
2265 	case WM_T_ICH8:
2266 	case WM_T_ICH9:
2267 	case WM_T_ICH10:
2268 	case WM_T_PCH:
2269 	case WM_T_PCH2:
2270 	case WM_T_PCH_LPT:
2271 		sc->nvm.read = wm_nvm_read_ich8;
2272 		/* FLASH */
2273 		sc->sc_flags |= WM_F_EEPROM_FLASH;
2274 		sc->sc_nvm_wordsize = 2048;
2275 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
2276 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
2277 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
2278 			aprint_error_dev(sc->sc_dev,
2279 			    "can't map FLASH registers\n");
2280 			goto out;
2281 		}
2282 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
2283 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
2284 		    ICH_FLASH_SECTOR_SIZE;
2285 		sc->sc_ich8_flash_bank_size =
2286 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
2287 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
2288 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
2289 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
2290 		sc->sc_flashreg_offset = 0;
2291 		sc->phy.acquire = wm_get_swflag_ich8lan;
2292 		sc->phy.release = wm_put_swflag_ich8lan;
2293 		sc->nvm.acquire = wm_get_nvm_ich8lan;
2294 		sc->nvm.release = wm_put_nvm_ich8lan;
2295 		break;
2296 	case WM_T_PCH_SPT:
2297 	case WM_T_PCH_CNP:
2298 		sc->nvm.read = wm_nvm_read_spt;
2299 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
2300 		sc->sc_flags |= WM_F_EEPROM_FLASH;
2301 		sc->sc_flasht = sc->sc_st;
2302 		sc->sc_flashh = sc->sc_sh;
2303 		sc->sc_ich8_flash_base = 0;
2304 		sc->sc_nvm_wordsize =
2305 		    (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
2306 		    * NVM_SIZE_MULTIPLIER;
2307 		/* It is size in bytes, we want words */
2308 		sc->sc_nvm_wordsize /= 2;
2309 		/* Assume 2 banks */
2310 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
2311 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
2312 		sc->phy.acquire = wm_get_swflag_ich8lan;
2313 		sc->phy.release = wm_put_swflag_ich8lan;
2314 		sc->nvm.acquire = wm_get_nvm_ich8lan;
2315 		sc->nvm.release = wm_put_nvm_ich8lan;
2316 		break;
2317 	case WM_T_I210:
2318 	case WM_T_I211:
2319 		/* Allow a single clear of the SW semaphore on I210 and newer*/
2320 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
2321 		if (wm_nvm_flash_presence_i210(sc)) {
2322 			sc->nvm.read = wm_nvm_read_eerd;
2323 			/* Don't use WM_F_LOCK_EECD because we use EERD */
2324 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
2325 			wm_nvm_set_addrbits_size_eecd(sc);
2326 		} else {
2327 			sc->nvm.read = wm_nvm_read_invm;
2328 			sc->sc_flags |= WM_F_EEPROM_INVM;
2329 			sc->sc_nvm_wordsize = INVM_SIZE;
2330 		}
2331 		sc->phy.acquire = wm_get_phy_82575;
2332 		sc->phy.release = wm_put_phy_82575;
2333 		sc->nvm.acquire = wm_get_nvm_80003;
2334 		sc->nvm.release = wm_put_nvm_80003;
2335 		break;
2336 	default:
2337 		break;
2338 	}
2339 
2340 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
2341 	switch (sc->sc_type) {
2342 	case WM_T_82571:
2343 	case WM_T_82572:
2344 		reg = CSR_READ(sc, WMREG_SWSM2);
2345 		if ((reg & SWSM2_LOCK) == 0) {
2346 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
2347 			force_clear_smbi = true;
2348 		} else
2349 			force_clear_smbi = false;
2350 		break;
2351 	case WM_T_82573:
2352 	case WM_T_82574:
2353 	case WM_T_82583:
2354 		force_clear_smbi = true;
2355 		break;
2356 	default:
2357 		force_clear_smbi = false;
2358 		break;
2359 	}
2360 	if (force_clear_smbi) {
2361 		reg = CSR_READ(sc, WMREG_SWSM);
2362 		if ((reg & SWSM_SMBI) != 0)
2363 			aprint_error_dev(sc->sc_dev,
2364 			    "Please update the Bootagent\n");
2365 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
2366 	}
2367 
2368 	/*
2369 	 * Defer printing the EEPROM type until after verifying the checksum
2370 	 * This allows the EEPROM type to be printed correctly in the case
2371 	 * that no EEPROM is attached.
2372 	 */
2373 	/*
2374 	 * Validate the EEPROM checksum. If the checksum fails, flag
2375 	 * this for later, so we can fail future reads from the EEPROM.
2376 	 */
2377 	if (wm_nvm_validate_checksum(sc)) {
2378 		/*
2379 		 * Read twice again because some PCI-e parts fail the
2380 		 * first check due to the link being in sleep state.
2381 		 */
2382 		if (wm_nvm_validate_checksum(sc))
2383 			sc->sc_flags |= WM_F_EEPROM_INVALID;
2384 	}
2385 
2386 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
2387 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
2388 	else {
2389 		aprint_verbose_dev(sc->sc_dev, "%u words ",
2390 		    sc->sc_nvm_wordsize);
2391 		if (sc->sc_flags & WM_F_EEPROM_INVM)
2392 			aprint_verbose("iNVM");
2393 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
2394 			aprint_verbose("FLASH(HW)");
2395 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
2396 			aprint_verbose("FLASH");
2397 		else {
2398 			if (sc->sc_flags & WM_F_EEPROM_SPI)
2399 				eetype = "SPI";
2400 			else
2401 				eetype = "MicroWire";
2402 			aprint_verbose("(%d address bits) %s EEPROM",
2403 			    sc->sc_nvm_addrbits, eetype);
2404 		}
2405 	}
2406 	wm_nvm_version(sc);
2407 	aprint_verbose("\n");
2408 
2409 	/*
2410 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
2411 	 * incorrect.
2412 	 */
2413 	wm_gmii_setup_phytype(sc, 0, 0);
2414 
2415 	/* Check for WM_F_WOL on some chips before wm_reset() */
2416 	switch (sc->sc_type) {
2417 	case WM_T_ICH8:
2418 	case WM_T_ICH9:
2419 	case WM_T_ICH10:
2420 	case WM_T_PCH:
2421 	case WM_T_PCH2:
2422 	case WM_T_PCH_LPT:
2423 	case WM_T_PCH_SPT:
2424 	case WM_T_PCH_CNP:
2425 		apme_mask = WUC_APME;
2426 		eeprom_data = CSR_READ(sc, WMREG_WUC);
2427 		if ((eeprom_data & apme_mask) != 0)
2428 			sc->sc_flags |= WM_F_WOL;
2429 		break;
2430 	default:
2431 		break;
2432 	}
2433 
2434 	/* Reset the chip to a known state. */
2435 	wm_reset(sc);
2436 
2437 	/*
2438 	 * Check for I21[01] PLL workaround.
2439 	 *
2440 	 * Three cases:
2441 	 * a) Chip is I211.
2442 	 * b) Chip is I210 and it uses INVM (not FLASH).
2443 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
2444 	 */
2445 	if (sc->sc_type == WM_T_I211)
2446 		sc->sc_flags |= WM_F_PLL_WA_I210;
2447 	if (sc->sc_type == WM_T_I210) {
2448 		if (!wm_nvm_flash_presence_i210(sc))
2449 			sc->sc_flags |= WM_F_PLL_WA_I210;
2450 		else if ((sc->sc_nvm_ver_major < 3)
2451 		    || ((sc->sc_nvm_ver_major == 3)
2452 			&& (sc->sc_nvm_ver_minor < 25))) {
2453 			aprint_verbose_dev(sc->sc_dev,
2454 			    "ROM image version %d.%d is older than 3.25\n",
2455 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
2456 			sc->sc_flags |= WM_F_PLL_WA_I210;
2457 		}
2458 	}
2459 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
2460 		wm_pll_workaround_i210(sc);
2461 
2462 	wm_get_wakeup(sc);
2463 
2464 	/* Non-AMT based hardware can now take control from firmware */
2465 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
2466 		wm_get_hw_control(sc);
2467 
2468 	/*
2469 	 * Read the Ethernet address from the EEPROM, if not first found
2470 	 * in device properties.
2471 	 */
2472 	ea = prop_dictionary_get(dict, "mac-address");
2473 	if (ea != NULL) {
2474 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
2475 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
2476 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
2477 	} else {
2478 		if (wm_read_mac_addr(sc, enaddr) != 0) {
2479 			aprint_error_dev(sc->sc_dev,
2480 			    "unable to read Ethernet address\n");
2481 			goto out;
2482 		}
2483 	}
2484 
2485 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
2486 	    ether_sprintf(enaddr));
2487 
2488 	/*
2489 	 * Read the config info from the EEPROM, and set up various
2490 	 * bits in the control registers based on their contents.
2491 	 */
2492 	pn = prop_dictionary_get(dict, "i82543-cfg1");
2493 	if (pn != NULL) {
2494 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2495 		cfg1 = (uint16_t) prop_number_integer_value(pn);
2496 	} else {
2497 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
2498 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
2499 			goto out;
2500 		}
2501 	}
2502 
2503 	pn = prop_dictionary_get(dict, "i82543-cfg2");
2504 	if (pn != NULL) {
2505 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2506 		cfg2 = (uint16_t) prop_number_integer_value(pn);
2507 	} else {
2508 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
2509 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
2510 			goto out;
2511 		}
2512 	}
2513 
2514 	/* check for WM_F_WOL */
2515 	switch (sc->sc_type) {
2516 	case WM_T_82542_2_0:
2517 	case WM_T_82542_2_1:
2518 	case WM_T_82543:
2519 		/* dummy? */
2520 		eeprom_data = 0;
2521 		apme_mask = NVM_CFG3_APME;
2522 		break;
2523 	case WM_T_82544:
2524 		apme_mask = NVM_CFG2_82544_APM_EN;
2525 		eeprom_data = cfg2;
2526 		break;
2527 	case WM_T_82546:
2528 	case WM_T_82546_3:
2529 	case WM_T_82571:
2530 	case WM_T_82572:
2531 	case WM_T_82573:
2532 	case WM_T_82574:
2533 	case WM_T_82583:
2534 	case WM_T_80003:
2535 	case WM_T_82575:
2536 	case WM_T_82576:
2537 		apme_mask = NVM_CFG3_APME;
2538 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
2539 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
2540 		break;
2541 	case WM_T_82580:
2542 	case WM_T_I350:
2543 	case WM_T_I354:
2544 	case WM_T_I210:
2545 	case WM_T_I211:
2546 		apme_mask = NVM_CFG3_APME;
2547 		wm_nvm_read(sc,
2548 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
2549 		    1, &eeprom_data);
2550 		break;
2551 	case WM_T_ICH8:
2552 	case WM_T_ICH9:
2553 	case WM_T_ICH10:
2554 	case WM_T_PCH:
2555 	case WM_T_PCH2:
2556 	case WM_T_PCH_LPT:
2557 	case WM_T_PCH_SPT:
2558 	case WM_T_PCH_CNP:
2559 		/* Already checked before wm_reset () */
2560 		apme_mask = eeprom_data = 0;
2561 		break;
2562 	default: /* XXX 82540 */
2563 		apme_mask = NVM_CFG3_APME;
2564 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
2565 		break;
2566 	}
2567 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
2568 	if ((eeprom_data & apme_mask) != 0)
2569 		sc->sc_flags |= WM_F_WOL;
2570 
2571 	/*
2572 	 * We have the eeprom settings, now apply the special cases
2573 	 * where the eeprom may be wrong or the board won't support
2574 	 * wake on lan on a particular port
2575 	 */
2576 	switch (sc->sc_pcidevid) {
2577 	case PCI_PRODUCT_INTEL_82546GB_PCIE:
2578 		sc->sc_flags &= ~WM_F_WOL;
2579 		break;
2580 	case PCI_PRODUCT_INTEL_82546EB_FIBER:
2581 	case PCI_PRODUCT_INTEL_82546GB_FIBER:
2582 		/* Wake events only supported on port A for dual fiber
2583 		 * regardless of eeprom setting */
2584 		if (sc->sc_funcid == 1)
2585 			sc->sc_flags &= ~WM_F_WOL;
2586 		break;
2587 	case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3:
2588 		/* If quad port adapter, disable WoL on all but port A */
2589 		if (sc->sc_funcid != 0)
2590 			sc->sc_flags &= ~WM_F_WOL;
2591 		break;
2592 	case PCI_PRODUCT_INTEL_82571EB_FIBER:
2593 		/* Wake events only supported on port A for dual fiber
2594 		 * regardless of eeprom setting */
2595 		if (sc->sc_funcid == 1)
2596 			sc->sc_flags &= ~WM_F_WOL;
2597 		break;
2598 	case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER:
2599 	case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER:
2600 	case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER:
2601 		/* If quad port adapter, disable WoL on all but port A */
2602 		if (sc->sc_funcid != 0)
2603 			sc->sc_flags &= ~WM_F_WOL;
2604 		break;
2605 	}
2606 
2607 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
2608 		/* Check NVM for autonegotiation */
2609 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
2610 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
2611 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
2612 		}
2613 	}
2614 
2615 	/*
2616 	 * XXX need special handling for some multiple port cards
2617 	 * to disable a paticular port.
2618 	 */
2619 
2620 	if (sc->sc_type >= WM_T_82544) {
2621 		pn = prop_dictionary_get(dict, "i82543-swdpin");
2622 		if (pn != NULL) {
2623 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2624 			swdpin = (uint16_t) prop_number_integer_value(pn);
2625 		} else {
2626 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
2627 				aprint_error_dev(sc->sc_dev,
2628 				    "unable to read SWDPIN\n");
2629 				goto out;
2630 			}
2631 		}
2632 	}
2633 
2634 	if (cfg1 & NVM_CFG1_ILOS)
2635 		sc->sc_ctrl |= CTRL_ILOS;
2636 
2637 	/*
2638 	 * XXX
2639 	 * This code isn't correct because pin 2 and 3 are located
2640 	 * in different position on newer chips. Check all datasheet.
2641 	 *
2642 	 * Until resolve this problem, check if a chip < 82580
2643 	 */
2644 	if (sc->sc_type <= WM_T_82580) {
2645 		if (sc->sc_type >= WM_T_82544) {
2646 			sc->sc_ctrl |=
2647 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
2648 			    CTRL_SWDPIO_SHIFT;
2649 			sc->sc_ctrl |=
2650 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
2651 			    CTRL_SWDPINS_SHIFT;
2652 		} else {
2653 			sc->sc_ctrl |=
2654 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
2655 			    CTRL_SWDPIO_SHIFT;
2656 		}
2657 	}
2658 
2659 	/* XXX For other than 82580? */
2660 	if (sc->sc_type == WM_T_82580) {
2661 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
2662 		if (nvmword & __BIT(13))
2663 			sc->sc_ctrl |= CTRL_ILOS;
2664 	}
2665 
2666 #if 0
2667 	if (sc->sc_type >= WM_T_82544) {
2668 		if (cfg1 & NVM_CFG1_IPS0)
2669 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
2670 		if (cfg1 & NVM_CFG1_IPS1)
2671 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
2672 		sc->sc_ctrl_ext |=
2673 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
2674 		    CTRL_EXT_SWDPIO_SHIFT;
2675 		sc->sc_ctrl_ext |=
2676 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
2677 		    CTRL_EXT_SWDPINS_SHIFT;
2678 	} else {
2679 		sc->sc_ctrl_ext |=
2680 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
2681 		    CTRL_EXT_SWDPIO_SHIFT;
2682 	}
2683 #endif
2684 
2685 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2686 #if 0
2687 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2688 #endif
2689 
2690 	if (sc->sc_type == WM_T_PCH) {
2691 		uint16_t val;
2692 
2693 		/* Save the NVM K1 bit setting */
2694 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
2695 
2696 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
2697 			sc->sc_nvm_k1_enabled = 1;
2698 		else
2699 			sc->sc_nvm_k1_enabled = 0;
2700 	}
2701 
2702 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
2703 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
2704 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
2705 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
2706 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
2707 	    || sc->sc_type == WM_T_82573
2708 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
2709 		/* Copper only */
2710 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
2711 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
2712 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
2713 	    || (sc->sc_type ==WM_T_I211)) {
2714 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
2715 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
2716 		switch (link_mode) {
2717 		case CTRL_EXT_LINK_MODE_1000KX:
2718 			aprint_verbose_dev(sc->sc_dev, "1000KX\n");
2719 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
2720 			break;
2721 		case CTRL_EXT_LINK_MODE_SGMII:
2722 			if (wm_sgmii_uses_mdio(sc)) {
2723 				aprint_verbose_dev(sc->sc_dev,
2724 				    "SGMII(MDIO)\n");
2725 				sc->sc_flags |= WM_F_SGMII;
2726 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2727 				break;
2728 			}
2729 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
2730 			/*FALLTHROUGH*/
2731 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
2732 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
2733 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
2734 				if (link_mode
2735 				    == CTRL_EXT_LINK_MODE_SGMII) {
2736 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2737 					sc->sc_flags |= WM_F_SGMII;
2738 				} else {
2739 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
2740 					aprint_verbose_dev(sc->sc_dev,
2741 					    "SERDES\n");
2742 				}
2743 				break;
2744 			}
2745 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
2746 				aprint_verbose_dev(sc->sc_dev, "SERDES\n");
2747 
2748 			/* Change current link mode setting */
2749 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
2750 			switch (sc->sc_mediatype) {
2751 			case WM_MEDIATYPE_COPPER:
2752 				reg |= CTRL_EXT_LINK_MODE_SGMII;
2753 				break;
2754 			case WM_MEDIATYPE_SERDES:
2755 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
2756 				break;
2757 			default:
2758 				break;
2759 			}
2760 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2761 			break;
2762 		case CTRL_EXT_LINK_MODE_GMII:
2763 		default:
2764 			aprint_verbose_dev(sc->sc_dev, "Copper\n");
2765 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2766 			break;
2767 		}
2768 
2769 		reg &= ~CTRL_EXT_I2C_ENA;
2770 		if ((sc->sc_flags & WM_F_SGMII) != 0)
2771 			reg |= CTRL_EXT_I2C_ENA;
2772 		else
2773 			reg &= ~CTRL_EXT_I2C_ENA;
2774 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2775 	} else if (sc->sc_type < WM_T_82543 ||
2776 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
2777 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
2778 			aprint_error_dev(sc->sc_dev,
2779 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
2780 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
2781 		}
2782 	} else {
2783 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
2784 			aprint_error_dev(sc->sc_dev,
2785 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
2786 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2787 		}
2788 	}
2789 
2790 	if (sc->sc_type >= WM_T_PCH2)
2791 		sc->sc_flags |= WM_F_EEE;
2792 	else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211)
2793 	    && (sc->sc_mediatype == WM_MEDIATYPE_COPPER)) {
2794 		/* XXX: Need special handling for I354. (not yet) */
2795 		if (sc->sc_type != WM_T_I354)
2796 			sc->sc_flags |= WM_F_EEE;
2797 	}
2798 
2799 	/* Set device properties (macflags) */
2800 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
2801 
2802 	snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
2803 	aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
2804 
2805 	/* Initialize the media structures accordingly. */
2806 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
2807 		wm_gmii_mediainit(sc, wmp->wmp_product);
2808 	else
2809 		wm_tbi_mediainit(sc); /* All others */
2810 
2811 	ifp = &sc->sc_ethercom.ec_if;
2812 	xname = device_xname(sc->sc_dev);
2813 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
2814 	ifp->if_softc = sc;
2815 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2816 #ifdef WM_MPSAFE
2817 	ifp->if_extflags = IFEF_MPSAFE;
2818 #endif
2819 	ifp->if_ioctl = wm_ioctl;
2820 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
2821 		ifp->if_start = wm_nq_start;
2822 		/*
2823 		 * When the number of CPUs is one and the controller can use
2824 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
2825 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
2826 		 * and the other is used for link status changing.
2827 		 * In this situation, wm_nq_transmit() is disadvantageous
2828 		 * because of wm_select_txqueue() and pcq(9) overhead.
2829 		 */
2830 		if (wm_is_using_multiqueue(sc))
2831 			ifp->if_transmit = wm_nq_transmit;
2832 	} else {
2833 		ifp->if_start = wm_start;
2834 		/*
2835 		 * wm_transmit() has the same disadvantage as wm_transmit().
2836 		 */
2837 		if (wm_is_using_multiqueue(sc))
2838 			ifp->if_transmit = wm_transmit;
2839 	}
2840 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
2841 	ifp->if_init = wm_init;
2842 	ifp->if_stop = wm_stop;
2843 	IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
2844 	IFQ_SET_READY(&ifp->if_snd);
2845 
2846 	/* Check for jumbo frame */
2847 	switch (sc->sc_type) {
2848 	case WM_T_82573:
2849 		/* XXX limited to 9234 if ASPM is disabled */
2850 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
2851 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
2852 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2853 		break;
2854 	case WM_T_82571:
2855 	case WM_T_82572:
2856 	case WM_T_82574:
2857 	case WM_T_82583:
2858 	case WM_T_82575:
2859 	case WM_T_82576:
2860 	case WM_T_82580:
2861 	case WM_T_I350:
2862 	case WM_T_I354:
2863 	case WM_T_I210:
2864 	case WM_T_I211:
2865 	case WM_T_80003:
2866 	case WM_T_ICH9:
2867 	case WM_T_ICH10:
2868 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
2869 	case WM_T_PCH_LPT:
2870 	case WM_T_PCH_SPT:
2871 	case WM_T_PCH_CNP:
2872 		/* XXX limited to 9234 */
2873 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2874 		break;
2875 	case WM_T_PCH:
2876 		/* XXX limited to 4096 */
2877 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2878 		break;
2879 	case WM_T_82542_2_0:
2880 	case WM_T_82542_2_1:
2881 	case WM_T_ICH8:
2882 		/* No support for jumbo frame */
2883 		break;
2884 	default:
2885 		/* ETHER_MAX_LEN_JUMBO */
2886 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2887 		break;
2888 	}
2889 
2890 	/* If we're a i82543 or greater, we can support VLANs. */
2891 	if (sc->sc_type >= WM_T_82543) {
2892 		sc->sc_ethercom.ec_capabilities |=
2893 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
2894 		sc->sc_ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
2895 	}
2896 
2897 	if ((sc->sc_flags & WM_F_EEE) != 0)
2898 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_EEE;
2899 
2900 	/*
2901 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
2902 	 * on i82543 and later.
2903 	 */
2904 	if (sc->sc_type >= WM_T_82543) {
2905 		ifp->if_capabilities |=
2906 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
2907 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
2908 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
2909 		    IFCAP_CSUM_TCPv6_Tx |
2910 		    IFCAP_CSUM_UDPv6_Tx;
2911 	}
2912 
2913 	/*
2914 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
2915 	 *
2916 	 *	82541GI (8086:1076) ... no
2917 	 *	82572EI (8086:10b9) ... yes
2918 	 */
2919 	if (sc->sc_type >= WM_T_82571) {
2920 		ifp->if_capabilities |=
2921 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
2922 	}
2923 
2924 	/*
2925 	 * If we're a i82544 or greater (except i82547), we can do
2926 	 * TCP segmentation offload.
2927 	 */
2928 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
2929 		ifp->if_capabilities |= IFCAP_TSOv4;
2930 	}
2931 
2932 	if (sc->sc_type >= WM_T_82571) {
2933 		ifp->if_capabilities |= IFCAP_TSOv6;
2934 	}
2935 
2936 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
2937 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
2938 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
2939 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
2940 
2941 #ifdef WM_MPSAFE
2942 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2943 #else
2944 	sc->sc_core_lock = NULL;
2945 #endif
2946 
2947 	/* Attach the interface. */
2948 	error = if_initialize(ifp);
2949 	if (error != 0) {
2950 		aprint_error_dev(sc->sc_dev, "if_initialize failed(%d)\n",
2951 		    error);
2952 		return; /* Error */
2953 	}
2954 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
2955 	ether_ifattach(ifp, enaddr);
2956 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
2957 	if_register(ifp);
2958 
2959 #ifdef WM_EVENT_COUNTERS
2960 	/* Attach event counters. */
2961 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
2962 	    NULL, xname, "linkintr");
2963 
2964 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
2965 	    NULL, xname, "tx_xoff");
2966 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
2967 	    NULL, xname, "tx_xon");
2968 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
2969 	    NULL, xname, "rx_xoff");
2970 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
2971 	    NULL, xname, "rx_xon");
2972 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
2973 	    NULL, xname, "rx_macctl");
2974 #endif /* WM_EVENT_COUNTERS */
2975 
2976 	if (pmf_device_register(self, wm_suspend, wm_resume))
2977 		pmf_class_network_register(self, ifp);
2978 	else
2979 		aprint_error_dev(self, "couldn't establish power handler\n");
2980 
2981 	sc->sc_flags |= WM_F_ATTACHED;
2982 out:
2983 	return;
2984 }
2985 
2986 /* The detach function (ca_detach) */
2987 static int
2988 wm_detach(device_t self, int flags __unused)
2989 {
2990 	struct wm_softc *sc = device_private(self);
2991 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2992 	int i;
2993 
2994 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
2995 		return 0;
2996 
2997 	/* Stop the interface. Callouts are stopped in it. */
2998 	wm_stop(ifp, 1);
2999 
3000 	pmf_device_deregister(self);
3001 
3002 #ifdef WM_EVENT_COUNTERS
3003 	evcnt_detach(&sc->sc_ev_linkintr);
3004 
3005 	evcnt_detach(&sc->sc_ev_tx_xoff);
3006 	evcnt_detach(&sc->sc_ev_tx_xon);
3007 	evcnt_detach(&sc->sc_ev_rx_xoff);
3008 	evcnt_detach(&sc->sc_ev_rx_xon);
3009 	evcnt_detach(&sc->sc_ev_rx_macctl);
3010 #endif /* WM_EVENT_COUNTERS */
3011 
3012 	/* Tell the firmware about the release */
3013 	WM_CORE_LOCK(sc);
3014 	wm_release_manageability(sc);
3015 	wm_release_hw_control(sc);
3016 	wm_enable_wakeup(sc);
3017 	WM_CORE_UNLOCK(sc);
3018 
3019 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
3020 
3021 	/* Delete all remaining media. */
3022 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
3023 
3024 	ether_ifdetach(ifp);
3025 	if_detach(ifp);
3026 	if_percpuq_destroy(sc->sc_ipq);
3027 
3028 	/* Unload RX dmamaps and free mbufs */
3029 	for (i = 0; i < sc->sc_nqueues; i++) {
3030 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
3031 		mutex_enter(rxq->rxq_lock);
3032 		wm_rxdrain(rxq);
3033 		mutex_exit(rxq->rxq_lock);
3034 	}
3035 	/* Must unlock here */
3036 
3037 	/* Disestablish the interrupt handler */
3038 	for (i = 0; i < sc->sc_nintrs; i++) {
3039 		if (sc->sc_ihs[i] != NULL) {
3040 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
3041 			sc->sc_ihs[i] = NULL;
3042 		}
3043 	}
3044 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
3045 
3046 	wm_free_txrx_queues(sc);
3047 
3048 	/* Unmap the registers */
3049 	if (sc->sc_ss) {
3050 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
3051 		sc->sc_ss = 0;
3052 	}
3053 	if (sc->sc_ios) {
3054 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
3055 		sc->sc_ios = 0;
3056 	}
3057 	if (sc->sc_flashs) {
3058 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
3059 		sc->sc_flashs = 0;
3060 	}
3061 
3062 	if (sc->sc_core_lock)
3063 		mutex_obj_free(sc->sc_core_lock);
3064 	if (sc->sc_ich_phymtx)
3065 		mutex_obj_free(sc->sc_ich_phymtx);
3066 	if (sc->sc_ich_nvmmtx)
3067 		mutex_obj_free(sc->sc_ich_nvmmtx);
3068 
3069 	return 0;
3070 }
3071 
3072 static bool
3073 wm_suspend(device_t self, const pmf_qual_t *qual)
3074 {
3075 	struct wm_softc *sc = device_private(self);
3076 
3077 	wm_release_manageability(sc);
3078 	wm_release_hw_control(sc);
3079 	wm_enable_wakeup(sc);
3080 
3081 	return true;
3082 }
3083 
3084 static bool
3085 wm_resume(device_t self, const pmf_qual_t *qual)
3086 {
3087 	struct wm_softc *sc = device_private(self);
3088 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3089 	pcireg_t reg;
3090 	char buf[256];
3091 
3092 	reg = CSR_READ(sc, WMREG_WUS);
3093 	if (reg != 0) {
3094 		snprintb(buf, sizeof(buf), WUS_FLAGS, reg);
3095 		device_printf(sc->sc_dev, "wakeup status %s\n", buf);
3096 		CSR_WRITE(sc, WMREG_WUS, 0xffffffff); /* W1C */
3097 	}
3098 
3099 	if (sc->sc_type >= WM_T_PCH2)
3100 		wm_resume_workarounds_pchlan(sc);
3101 	if ((ifp->if_flags & IFF_UP) == 0) {
3102 		wm_reset(sc);
3103 		/* Non-AMT based hardware can now take control from firmware */
3104 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
3105 			wm_get_hw_control(sc);
3106 		wm_init_manageability(sc);
3107 	} else {
3108 		/*
3109 		 * We called pmf_class_network_register(), so if_init() is
3110 		 * automatically called when IFF_UP. wm_reset(),
3111 		 * wm_get_hw_control() and wm_init_manageability() are called
3112 		 * via wm_init().
3113 		 */
3114 	}
3115 
3116 	return true;
3117 }
3118 
3119 /*
3120  * wm_watchdog:		[ifnet interface function]
3121  *
3122  *	Watchdog timer handler.
3123  */
3124 static void
3125 wm_watchdog(struct ifnet *ifp)
3126 {
3127 	int qid;
3128 	struct wm_softc *sc = ifp->if_softc;
3129 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
3130 
3131 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
3132 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
3133 
3134 		wm_watchdog_txq(ifp, txq, &hang_queue);
3135 	}
3136 
3137 	/* IF any of queues hanged up, reset the interface. */
3138 	if (hang_queue != 0) {
3139 		(void)wm_init(ifp);
3140 
3141 		/*
3142 		 * There are still some upper layer processing which call
3143 		 * ifp->if_start(). e.g. ALTQ or one CPU system
3144 		 */
3145 		/* Try to get more packets going. */
3146 		ifp->if_start(ifp);
3147 	}
3148 }
3149 
3150 
3151 static void
3152 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
3153 {
3154 
3155 	mutex_enter(txq->txq_lock);
3156 	if (txq->txq_sending &&
3157 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout)
3158 		wm_watchdog_txq_locked(ifp, txq, hang);
3159 
3160 	mutex_exit(txq->txq_lock);
3161 }
3162 
3163 static void
3164 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
3165     uint16_t *hang)
3166 {
3167 	struct wm_softc *sc = ifp->if_softc;
3168 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
3169 
3170 	KASSERT(mutex_owned(txq->txq_lock));
3171 
3172 	/*
3173 	 * Since we're using delayed interrupts, sweep up
3174 	 * before we report an error.
3175 	 */
3176 	wm_txeof(txq, UINT_MAX);
3177 
3178 	if (txq->txq_sending)
3179 		*hang |= __BIT(wmq->wmq_id);
3180 
3181 	if (txq->txq_free == WM_NTXDESC(txq)) {
3182 		log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
3183 		    device_xname(sc->sc_dev));
3184 	} else {
3185 #ifdef WM_DEBUG
3186 		int i, j;
3187 		struct wm_txsoft *txs;
3188 #endif
3189 		log(LOG_ERR,
3190 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
3191 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
3192 		    txq->txq_next);
3193 		ifp->if_oerrors++;
3194 #ifdef WM_DEBUG
3195 		for (i = txq->txq_sdirty; i != txq->txq_snext;
3196 		    i = WM_NEXTTXS(txq, i)) {
3197 			txs = &txq->txq_soft[i];
3198 			printf("txs %d tx %d -> %d\n",
3199 			    i, txs->txs_firstdesc, txs->txs_lastdesc);
3200 			for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
3201 				if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3202 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
3203 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
3204 					printf("\t %#08x%08x\n",
3205 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
3206 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
3207 				} else {
3208 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
3209 					    (uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
3210 					    txq->txq_descs[j].wtx_addr.wa_low);
3211 					printf("\t %#04x%02x%02x%08x\n",
3212 					    txq->txq_descs[j].wtx_fields.wtxu_vlan,
3213 					    txq->txq_descs[j].wtx_fields.wtxu_options,
3214 					    txq->txq_descs[j].wtx_fields.wtxu_status,
3215 					    txq->txq_descs[j].wtx_cmdlen);
3216 				}
3217 				if (j == txs->txs_lastdesc)
3218 					break;
3219 			}
3220 		}
3221 #endif
3222 	}
3223 }
3224 
3225 /*
3226  * wm_tick:
3227  *
3228  *	One second timer, used to check link status, sweep up
3229  *	completed transmit jobs, etc.
3230  */
3231 static void
3232 wm_tick(void *arg)
3233 {
3234 	struct wm_softc *sc = arg;
3235 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3236 #ifndef WM_MPSAFE
3237 	int s = splnet();
3238 #endif
3239 
3240 	WM_CORE_LOCK(sc);
3241 
3242 	if (sc->sc_core_stopping) {
3243 		WM_CORE_UNLOCK(sc);
3244 #ifndef WM_MPSAFE
3245 		splx(s);
3246 #endif
3247 		return;
3248 	}
3249 
3250 	if (sc->sc_type >= WM_T_82542_2_1) {
3251 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
3252 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
3253 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
3254 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
3255 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
3256 	}
3257 
3258 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3259 	ifp->if_ierrors += 0ULL /* ensure quad_t */
3260 	    + CSR_READ(sc, WMREG_CRCERRS)
3261 	    + CSR_READ(sc, WMREG_ALGNERRC)
3262 	    + CSR_READ(sc, WMREG_SYMERRC)
3263 	    + CSR_READ(sc, WMREG_RXERRC)
3264 	    + CSR_READ(sc, WMREG_SEC)
3265 	    + CSR_READ(sc, WMREG_CEXTERR)
3266 	    + CSR_READ(sc, WMREG_RLEC);
3267 	/*
3268 	 * WMREG_RNBC is incremented when there is no available buffers in host
3269 	 * memory. It does not mean the number of dropped packet. Because
3270 	 * ethernet controller can receive packets in such case if there is
3271 	 * space in phy's FIFO.
3272 	 *
3273 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
3274 	 * own EVCNT instead of if_iqdrops.
3275 	 */
3276 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
3277 
3278 	if (sc->sc_flags & WM_F_HAS_MII)
3279 		mii_tick(&sc->sc_mii);
3280 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
3281 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
3282 		wm_serdes_tick(sc);
3283 	else
3284 		wm_tbi_tick(sc);
3285 
3286 	WM_CORE_UNLOCK(sc);
3287 
3288 	wm_watchdog(ifp);
3289 
3290 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
3291 }
3292 
3293 static int
3294 wm_ifflags_cb(struct ethercom *ec)
3295 {
3296 	struct ifnet *ifp = &ec->ec_if;
3297 	struct wm_softc *sc = ifp->if_softc;
3298 	u_short iffchange;
3299 	int ecchange;
3300 	bool needreset = false;
3301 	int rc = 0;
3302 
3303 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3304 		device_xname(sc->sc_dev), __func__));
3305 
3306 	WM_CORE_LOCK(sc);
3307 
3308 	/*
3309 	 * Check for if_flags.
3310 	 * Main usage is to prevent linkdown when opening bpf.
3311 	 */
3312 	iffchange = ifp->if_flags ^ sc->sc_if_flags;
3313 	sc->sc_if_flags = ifp->if_flags;
3314 	if ((iffchange & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
3315 		needreset = true;
3316 		goto ec;
3317 	}
3318 
3319 	/* iff related updates */
3320 	if ((iffchange & IFF_PROMISC) != 0)
3321 		wm_set_filter(sc);
3322 
3323 	wm_set_vlan(sc);
3324 
3325 ec:
3326 	/* Check for ec_capenable. */
3327 	ecchange = ec->ec_capenable ^ sc->sc_ec_capenable;
3328 	sc->sc_ec_capenable = ec->ec_capenable;
3329 	if ((ecchange & ~ETHERCAP_EEE) != 0) {
3330 		needreset = true;
3331 		goto out;
3332 	}
3333 
3334 	/* ec related updates */
3335 	wm_set_eee(sc);
3336 
3337 out:
3338 	if (needreset)
3339 		rc = ENETRESET;
3340 	WM_CORE_UNLOCK(sc);
3341 
3342 	return rc;
3343 }
3344 
3345 /*
3346  * wm_ioctl:		[ifnet interface function]
3347  *
3348  *	Handle control requests from the operator.
3349  */
3350 static int
3351 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
3352 {
3353 	struct wm_softc *sc = ifp->if_softc;
3354 	struct ifreq *ifr = (struct ifreq *)data;
3355 	struct ifaddr *ifa = (struct ifaddr *)data;
3356 	struct sockaddr_dl *sdl;
3357 	int s, error;
3358 
3359 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3360 		device_xname(sc->sc_dev), __func__));
3361 
3362 #ifndef WM_MPSAFE
3363 	s = splnet();
3364 #endif
3365 	switch (cmd) {
3366 	case SIOCSIFMEDIA:
3367 		WM_CORE_LOCK(sc);
3368 		/* Flow control requires full-duplex mode. */
3369 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
3370 		    (ifr->ifr_media & IFM_FDX) == 0)
3371 			ifr->ifr_media &= ~IFM_ETH_FMASK;
3372 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
3373 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
3374 				/* We can do both TXPAUSE and RXPAUSE. */
3375 				ifr->ifr_media |=
3376 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
3377 			}
3378 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
3379 		}
3380 		WM_CORE_UNLOCK(sc);
3381 #ifdef WM_MPSAFE
3382 		s = splnet();
3383 #endif
3384 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
3385 #ifdef WM_MPSAFE
3386 		splx(s);
3387 #endif
3388 		break;
3389 	case SIOCINITIFADDR:
3390 		WM_CORE_LOCK(sc);
3391 		if (ifa->ifa_addr->sa_family == AF_LINK) {
3392 			sdl = satosdl(ifp->if_dl->ifa_addr);
3393 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
3394 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
3395 			/* Unicast address is the first multicast entry */
3396 			wm_set_filter(sc);
3397 			error = 0;
3398 			WM_CORE_UNLOCK(sc);
3399 			break;
3400 		}
3401 		WM_CORE_UNLOCK(sc);
3402 		/*FALLTHROUGH*/
3403 	default:
3404 #ifdef WM_MPSAFE
3405 		s = splnet();
3406 #endif
3407 		/* It may call wm_start, so unlock here */
3408 		error = ether_ioctl(ifp, cmd, data);
3409 #ifdef WM_MPSAFE
3410 		splx(s);
3411 #endif
3412 		if (error != ENETRESET)
3413 			break;
3414 
3415 		error = 0;
3416 
3417 		if (cmd == SIOCSIFCAP)
3418 			error = (*ifp->if_init)(ifp);
3419 		else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
3420 			;
3421 		else if (ifp->if_flags & IFF_RUNNING) {
3422 			/*
3423 			 * Multicast list has changed; set the hardware filter
3424 			 * accordingly.
3425 			 */
3426 			WM_CORE_LOCK(sc);
3427 			wm_set_filter(sc);
3428 			WM_CORE_UNLOCK(sc);
3429 		}
3430 		break;
3431 	}
3432 
3433 #ifndef WM_MPSAFE
3434 	splx(s);
3435 #endif
3436 	return error;
3437 }
3438 
3439 /* MAC address related */
3440 
3441 /*
3442  * Get the offset of MAC address and return it.
3443  * If error occured, use offset 0.
3444  */
3445 static uint16_t
3446 wm_check_alt_mac_addr(struct wm_softc *sc)
3447 {
3448 	uint16_t myea[ETHER_ADDR_LEN / 2];
3449 	uint16_t offset = NVM_OFF_MACADDR;
3450 
3451 	/* Try to read alternative MAC address pointer */
3452 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
3453 		return 0;
3454 
3455 	/* Check pointer if it's valid or not. */
3456 	if ((offset == 0x0000) || (offset == 0xffff))
3457 		return 0;
3458 
3459 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
3460 	/*
3461 	 * Check whether alternative MAC address is valid or not.
3462 	 * Some cards have non 0xffff pointer but those don't use
3463 	 * alternative MAC address in reality.
3464 	 *
3465 	 * Check whether the broadcast bit is set or not.
3466 	 */
3467 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
3468 		if (((myea[0] & 0xff) & 0x01) == 0)
3469 			return offset; /* Found */
3470 
3471 	/* Not found */
3472 	return 0;
3473 }
3474 
3475 static int
3476 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
3477 {
3478 	uint16_t myea[ETHER_ADDR_LEN / 2];
3479 	uint16_t offset = NVM_OFF_MACADDR;
3480 	int do_invert = 0;
3481 
3482 	switch (sc->sc_type) {
3483 	case WM_T_82580:
3484 	case WM_T_I350:
3485 	case WM_T_I354:
3486 		/* EEPROM Top Level Partitioning */
3487 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
3488 		break;
3489 	case WM_T_82571:
3490 	case WM_T_82575:
3491 	case WM_T_82576:
3492 	case WM_T_80003:
3493 	case WM_T_I210:
3494 	case WM_T_I211:
3495 		offset = wm_check_alt_mac_addr(sc);
3496 		if (offset == 0)
3497 			if ((sc->sc_funcid & 0x01) == 1)
3498 				do_invert = 1;
3499 		break;
3500 	default:
3501 		if ((sc->sc_funcid & 0x01) == 1)
3502 			do_invert = 1;
3503 		break;
3504 	}
3505 
3506 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
3507 		goto bad;
3508 
3509 	enaddr[0] = myea[0] & 0xff;
3510 	enaddr[1] = myea[0] >> 8;
3511 	enaddr[2] = myea[1] & 0xff;
3512 	enaddr[3] = myea[1] >> 8;
3513 	enaddr[4] = myea[2] & 0xff;
3514 	enaddr[5] = myea[2] >> 8;
3515 
3516 	/*
3517 	 * Toggle the LSB of the MAC address on the second port
3518 	 * of some dual port cards.
3519 	 */
3520 	if (do_invert != 0)
3521 		enaddr[5] ^= 1;
3522 
3523 	return 0;
3524 
3525  bad:
3526 	return -1;
3527 }
3528 
3529 /*
3530  * wm_set_ral:
3531  *
3532  *	Set an entery in the receive address list.
3533  */
3534 static void
3535 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
3536 {
3537 	uint32_t ral_lo, ral_hi, addrl, addrh;
3538 	uint32_t wlock_mac;
3539 	int rv;
3540 
3541 	if (enaddr != NULL) {
3542 		ral_lo = (uint32_t)enaddr[0] | ((uint32_t)enaddr[1] << 8) |
3543 		    ((uint32_t)enaddr[2] << 16) | ((uint32_t)enaddr[3] << 24);
3544 		ral_hi = (uint32_t)enaddr[4] | ((uint32_t)enaddr[5] << 8);
3545 		ral_hi |= RAL_AV;
3546 	} else {
3547 		ral_lo = 0;
3548 		ral_hi = 0;
3549 	}
3550 
3551 	switch (sc->sc_type) {
3552 	case WM_T_82542_2_0:
3553 	case WM_T_82542_2_1:
3554 	case WM_T_82543:
3555 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
3556 		CSR_WRITE_FLUSH(sc);
3557 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
3558 		CSR_WRITE_FLUSH(sc);
3559 		break;
3560 	case WM_T_PCH2:
3561 	case WM_T_PCH_LPT:
3562 	case WM_T_PCH_SPT:
3563 	case WM_T_PCH_CNP:
3564 		if (idx == 0) {
3565 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
3566 			CSR_WRITE_FLUSH(sc);
3567 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
3568 			CSR_WRITE_FLUSH(sc);
3569 			return;
3570 		}
3571 		if (sc->sc_type != WM_T_PCH2) {
3572 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
3573 			    FWSM_WLOCK_MAC);
3574 			addrl = WMREG_SHRAL(idx - 1);
3575 			addrh = WMREG_SHRAH(idx - 1);
3576 		} else {
3577 			wlock_mac = 0;
3578 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
3579 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
3580 		}
3581 
3582 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
3583 			rv = wm_get_swflag_ich8lan(sc);
3584 			if (rv != 0)
3585 				return;
3586 			CSR_WRITE(sc, addrl, ral_lo);
3587 			CSR_WRITE_FLUSH(sc);
3588 			CSR_WRITE(sc, addrh, ral_hi);
3589 			CSR_WRITE_FLUSH(sc);
3590 			wm_put_swflag_ich8lan(sc);
3591 		}
3592 
3593 		break;
3594 	default:
3595 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
3596 		CSR_WRITE_FLUSH(sc);
3597 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
3598 		CSR_WRITE_FLUSH(sc);
3599 		break;
3600 	}
3601 }
3602 
3603 /*
3604  * wm_mchash:
3605  *
3606  *	Compute the hash of the multicast address for the 4096-bit
3607  *	multicast filter.
3608  */
3609 static uint32_t
3610 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
3611 {
3612 	static const int lo_shift[4] = { 4, 3, 2, 0 };
3613 	static const int hi_shift[4] = { 4, 5, 6, 8 };
3614 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
3615 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
3616 	uint32_t hash;
3617 
3618 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3619 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3620 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
3621 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
3622 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
3623 		    (((uint16_t)enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
3624 		return (hash & 0x3ff);
3625 	}
3626 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
3627 	    (((uint16_t)enaddr[5]) << hi_shift[sc->sc_mchash_type]);
3628 
3629 	return (hash & 0xfff);
3630 }
3631 
3632 /*
3633  *
3634  *
3635  */
3636 static int
3637 wm_rar_count(struct wm_softc *sc)
3638 {
3639 	int size;
3640 
3641 	switch (sc->sc_type) {
3642 	case WM_T_ICH8:
3643 		size = WM_RAL_TABSIZE_ICH8 -1;
3644 		break;
3645 	case WM_T_ICH9:
3646 	case WM_T_ICH10:
3647 	case WM_T_PCH:
3648 		size = WM_RAL_TABSIZE_ICH8;
3649 		break;
3650 	case WM_T_PCH2:
3651 		size = WM_RAL_TABSIZE_PCH2;
3652 		break;
3653 	case WM_T_PCH_LPT:
3654 	case WM_T_PCH_SPT:
3655 	case WM_T_PCH_CNP:
3656 		size = WM_RAL_TABSIZE_PCH_LPT;
3657 		break;
3658 	case WM_T_82575:
3659 	case WM_T_I210:
3660 	case WM_T_I211:
3661 		size = WM_RAL_TABSIZE_82575;
3662 		break;
3663 	case WM_T_82576:
3664 	case WM_T_82580:
3665 		size = WM_RAL_TABSIZE_82576;
3666 		break;
3667 	case WM_T_I350:
3668 	case WM_T_I354:
3669 		size = WM_RAL_TABSIZE_I350;
3670 		break;
3671 	default:
3672 		size = WM_RAL_TABSIZE;
3673 	}
3674 
3675 	return size;
3676 }
3677 
3678 /*
3679  * wm_set_filter:
3680  *
3681  *	Set up the receive filter.
3682  */
3683 static void
3684 wm_set_filter(struct wm_softc *sc)
3685 {
3686 	struct ethercom *ec = &sc->sc_ethercom;
3687 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3688 	struct ether_multi *enm;
3689 	struct ether_multistep step;
3690 	bus_addr_t mta_reg;
3691 	uint32_t hash, reg, bit;
3692 	int i, size, ralmax;
3693 
3694 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3695 		device_xname(sc->sc_dev), __func__));
3696 
3697 	if (sc->sc_type >= WM_T_82544)
3698 		mta_reg = WMREG_CORDOVA_MTA;
3699 	else
3700 		mta_reg = WMREG_MTA;
3701 
3702 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
3703 
3704 	if (ifp->if_flags & IFF_BROADCAST)
3705 		sc->sc_rctl |= RCTL_BAM;
3706 	if (ifp->if_flags & IFF_PROMISC) {
3707 		sc->sc_rctl |= RCTL_UPE;
3708 		ETHER_LOCK(ec);
3709 		ec->ec_flags |= ETHER_F_ALLMULTI;
3710 		ETHER_UNLOCK(ec);
3711 		goto allmulti;
3712 	}
3713 
3714 	/*
3715 	 * Set the station address in the first RAL slot, and
3716 	 * clear the remaining slots.
3717 	 */
3718 	size = wm_rar_count(sc);
3719 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
3720 
3721 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
3722 	    || (sc->sc_type == WM_T_PCH_CNP)) {
3723 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
3724 		switch (i) {
3725 		case 0:
3726 			/* We can use all entries */
3727 			ralmax = size;
3728 			break;
3729 		case 1:
3730 			/* Only RAR[0] */
3731 			ralmax = 1;
3732 			break;
3733 		default:
3734 			/* Available SHRA + RAR[0] */
3735 			ralmax = i + 1;
3736 		}
3737 	} else
3738 		ralmax = size;
3739 	for (i = 1; i < size; i++) {
3740 		if (i < ralmax)
3741 			wm_set_ral(sc, NULL, i);
3742 	}
3743 
3744 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3745 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3746 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
3747 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
3748 		size = WM_ICH8_MC_TABSIZE;
3749 	else
3750 		size = WM_MC_TABSIZE;
3751 	/* Clear out the multicast table. */
3752 	for (i = 0; i < size; i++) {
3753 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
3754 		CSR_WRITE_FLUSH(sc);
3755 	}
3756 
3757 	ETHER_LOCK(ec);
3758 	ETHER_FIRST_MULTI(step, ec, enm);
3759 	while (enm != NULL) {
3760 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
3761 			ec->ec_flags |= ETHER_F_ALLMULTI;
3762 			ETHER_UNLOCK(ec);
3763 			/*
3764 			 * We must listen to a range of multicast addresses.
3765 			 * For now, just accept all multicasts, rather than
3766 			 * trying to set only those filter bits needed to match
3767 			 * the range.  (At this time, the only use of address
3768 			 * ranges is for IP multicast routing, for which the
3769 			 * range is big enough to require all bits set.)
3770 			 */
3771 			goto allmulti;
3772 		}
3773 
3774 		hash = wm_mchash(sc, enm->enm_addrlo);
3775 
3776 		reg = (hash >> 5);
3777 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3778 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3779 		    || (sc->sc_type == WM_T_PCH2)
3780 		    || (sc->sc_type == WM_T_PCH_LPT)
3781 		    || (sc->sc_type == WM_T_PCH_SPT)
3782 		    || (sc->sc_type == WM_T_PCH_CNP))
3783 			reg &= 0x1f;
3784 		else
3785 			reg &= 0x7f;
3786 		bit = hash & 0x1f;
3787 
3788 		hash = CSR_READ(sc, mta_reg + (reg << 2));
3789 		hash |= 1U << bit;
3790 
3791 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
3792 			/*
3793 			 * 82544 Errata 9: Certain register cannot be written
3794 			 * with particular alignments in PCI-X bus operation
3795 			 * (FCAH, MTA and VFTA).
3796 			 */
3797 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
3798 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3799 			CSR_WRITE_FLUSH(sc);
3800 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
3801 			CSR_WRITE_FLUSH(sc);
3802 		} else {
3803 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3804 			CSR_WRITE_FLUSH(sc);
3805 		}
3806 
3807 		ETHER_NEXT_MULTI(step, enm);
3808 	}
3809 	ec->ec_flags &= ~ETHER_F_ALLMULTI;
3810 	ETHER_UNLOCK(ec);
3811 
3812 	goto setit;
3813 
3814  allmulti:
3815 	sc->sc_rctl |= RCTL_MPE;
3816 
3817  setit:
3818 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
3819 }
3820 
3821 /* Reset and init related */
3822 
3823 static void
3824 wm_set_vlan(struct wm_softc *sc)
3825 {
3826 
3827 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3828 		device_xname(sc->sc_dev), __func__));
3829 
3830 	/* Deal with VLAN enables. */
3831 	if (VLAN_ATTACHED(&sc->sc_ethercom))
3832 		sc->sc_ctrl |= CTRL_VME;
3833 	else
3834 		sc->sc_ctrl &= ~CTRL_VME;
3835 
3836 	/* Write the control registers. */
3837 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3838 }
3839 
3840 static void
3841 wm_set_pcie_completion_timeout(struct wm_softc *sc)
3842 {
3843 	uint32_t gcr;
3844 	pcireg_t ctrl2;
3845 
3846 	gcr = CSR_READ(sc, WMREG_GCR);
3847 
3848 	/* Only take action if timeout value is defaulted to 0 */
3849 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
3850 		goto out;
3851 
3852 	if ((gcr & GCR_CAP_VER2) == 0) {
3853 		gcr |= GCR_CMPL_TMOUT_10MS;
3854 		goto out;
3855 	}
3856 
3857 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
3858 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
3859 	ctrl2 |= WM_PCIE_DCSR2_16MS;
3860 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
3861 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
3862 
3863 out:
3864 	/* Disable completion timeout resend */
3865 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
3866 
3867 	CSR_WRITE(sc, WMREG_GCR, gcr);
3868 }
3869 
3870 void
3871 wm_get_auto_rd_done(struct wm_softc *sc)
3872 {
3873 	int i;
3874 
3875 	/* wait for eeprom to reload */
3876 	switch (sc->sc_type) {
3877 	case WM_T_82571:
3878 	case WM_T_82572:
3879 	case WM_T_82573:
3880 	case WM_T_82574:
3881 	case WM_T_82583:
3882 	case WM_T_82575:
3883 	case WM_T_82576:
3884 	case WM_T_82580:
3885 	case WM_T_I350:
3886 	case WM_T_I354:
3887 	case WM_T_I210:
3888 	case WM_T_I211:
3889 	case WM_T_80003:
3890 	case WM_T_ICH8:
3891 	case WM_T_ICH9:
3892 		for (i = 0; i < 10; i++) {
3893 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
3894 				break;
3895 			delay(1000);
3896 		}
3897 		if (i == 10) {
3898 			log(LOG_ERR, "%s: auto read from eeprom failed to "
3899 			    "complete\n", device_xname(sc->sc_dev));
3900 		}
3901 		break;
3902 	default:
3903 		break;
3904 	}
3905 }
3906 
3907 void
3908 wm_lan_init_done(struct wm_softc *sc)
3909 {
3910 	uint32_t reg = 0;
3911 	int i;
3912 
3913 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3914 		device_xname(sc->sc_dev), __func__));
3915 
3916 	/* Wait for eeprom to reload */
3917 	switch (sc->sc_type) {
3918 	case WM_T_ICH10:
3919 	case WM_T_PCH:
3920 	case WM_T_PCH2:
3921 	case WM_T_PCH_LPT:
3922 	case WM_T_PCH_SPT:
3923 	case WM_T_PCH_CNP:
3924 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
3925 			reg = CSR_READ(sc, WMREG_STATUS);
3926 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
3927 				break;
3928 			delay(100);
3929 		}
3930 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
3931 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
3932 			    "complete\n", device_xname(sc->sc_dev), __func__);
3933 		}
3934 		break;
3935 	default:
3936 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3937 		    __func__);
3938 		break;
3939 	}
3940 
3941 	reg &= ~STATUS_LAN_INIT_DONE;
3942 	CSR_WRITE(sc, WMREG_STATUS, reg);
3943 }
3944 
3945 void
3946 wm_get_cfg_done(struct wm_softc *sc)
3947 {
3948 	int mask;
3949 	uint32_t reg;
3950 	int i;
3951 
3952 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3953 		device_xname(sc->sc_dev), __func__));
3954 
3955 	/* Wait for eeprom to reload */
3956 	switch (sc->sc_type) {
3957 	case WM_T_82542_2_0:
3958 	case WM_T_82542_2_1:
3959 		/* null */
3960 		break;
3961 	case WM_T_82543:
3962 	case WM_T_82544:
3963 	case WM_T_82540:
3964 	case WM_T_82545:
3965 	case WM_T_82545_3:
3966 	case WM_T_82546:
3967 	case WM_T_82546_3:
3968 	case WM_T_82541:
3969 	case WM_T_82541_2:
3970 	case WM_T_82547:
3971 	case WM_T_82547_2:
3972 	case WM_T_82573:
3973 	case WM_T_82574:
3974 	case WM_T_82583:
3975 		/* generic */
3976 		delay(10*1000);
3977 		break;
3978 	case WM_T_80003:
3979 	case WM_T_82571:
3980 	case WM_T_82572:
3981 	case WM_T_82575:
3982 	case WM_T_82576:
3983 	case WM_T_82580:
3984 	case WM_T_I350:
3985 	case WM_T_I354:
3986 	case WM_T_I210:
3987 	case WM_T_I211:
3988 		if (sc->sc_type == WM_T_82571) {
3989 			/* Only 82571 shares port 0 */
3990 			mask = EEMNGCTL_CFGDONE_0;
3991 		} else
3992 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
3993 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
3994 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
3995 				break;
3996 			delay(1000);
3997 		}
3998 		if (i >= WM_PHY_CFG_TIMEOUT)
3999 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
4000 				device_xname(sc->sc_dev), __func__));
4001 		break;
4002 	case WM_T_ICH8:
4003 	case WM_T_ICH9:
4004 	case WM_T_ICH10:
4005 	case WM_T_PCH:
4006 	case WM_T_PCH2:
4007 	case WM_T_PCH_LPT:
4008 	case WM_T_PCH_SPT:
4009 	case WM_T_PCH_CNP:
4010 		delay(10*1000);
4011 		if (sc->sc_type >= WM_T_ICH10)
4012 			wm_lan_init_done(sc);
4013 		else
4014 			wm_get_auto_rd_done(sc);
4015 
4016 		/* Clear PHY Reset Asserted bit */
4017 		reg = CSR_READ(sc, WMREG_STATUS);
4018 		if ((reg & STATUS_PHYRA) != 0)
4019 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
4020 		break;
4021 	default:
4022 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4023 		    __func__);
4024 		break;
4025 	}
4026 }
4027 
4028 int
4029 wm_phy_post_reset(struct wm_softc *sc)
4030 {
4031 	device_t dev = sc->sc_dev;
4032 	uint16_t reg;
4033 	int rv = 0;
4034 
4035 	/* This function is only for ICH8 and newer. */
4036 	if (sc->sc_type < WM_T_ICH8)
4037 		return 0;
4038 
4039 	if (wm_phy_resetisblocked(sc)) {
4040 		/* XXX */
4041 		device_printf(dev, "PHY is blocked\n");
4042 		return -1;
4043 	}
4044 
4045 	/* Allow time for h/w to get to quiescent state after reset */
4046 	delay(10*1000);
4047 
4048 	/* Perform any necessary post-reset workarounds */
4049 	if (sc->sc_type == WM_T_PCH)
4050 		rv = wm_hv_phy_workarounds_ich8lan(sc);
4051 	else if (sc->sc_type == WM_T_PCH2)
4052 		rv = wm_lv_phy_workarounds_ich8lan(sc);
4053 	if (rv != 0)
4054 		return rv;
4055 
4056 	/* Clear the host wakeup bit after lcd reset */
4057 	if (sc->sc_type >= WM_T_PCH) {
4058 		wm_gmii_hv_readreg(dev, 2, BM_PORT_GEN_CFG, &reg);
4059 		reg &= ~BM_WUC_HOST_WU_BIT;
4060 		wm_gmii_hv_writereg(dev, 2, BM_PORT_GEN_CFG, reg);
4061 	}
4062 
4063 	/* Configure the LCD with the extended configuration region in NVM */
4064 	if ((rv = wm_init_lcd_from_nvm(sc)) != 0)
4065 		return rv;
4066 
4067 	/* Configure the LCD with the OEM bits in NVM */
4068 	rv = wm_oem_bits_config_ich8lan(sc, true);
4069 
4070 	if (sc->sc_type == WM_T_PCH2) {
4071 		/* Ungate automatic PHY configuration on non-managed 82579 */
4072 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
4073 			delay(10 * 1000);
4074 			wm_gate_hw_phy_config_ich8lan(sc, false);
4075 		}
4076 		/* Set EEE LPI Update Timer to 200usec */
4077 		rv = sc->phy.acquire(sc);
4078 		if (rv)
4079 			return rv;
4080 		rv = wm_write_emi_reg_locked(dev,
4081 		    I82579_LPI_UPDATE_TIMER, 0x1387);
4082 		sc->phy.release(sc);
4083 	}
4084 
4085 	return rv;
4086 }
4087 
4088 /* Only for PCH and newer */
4089 static int
4090 wm_write_smbus_addr(struct wm_softc *sc)
4091 {
4092 	uint32_t strap, freq;
4093 	uint16_t phy_data;
4094 	int rv;
4095 
4096 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
4097 		device_xname(sc->sc_dev), __func__));
4098 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
4099 
4100 	strap = CSR_READ(sc, WMREG_STRAP);
4101 	freq = __SHIFTOUT(strap, STRAP_FREQ);
4102 
4103 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data);
4104 	if (rv != 0)
4105 		return -1;
4106 
4107 	phy_data &= ~HV_SMB_ADDR_ADDR;
4108 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
4109 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
4110 
4111 	if (sc->sc_phytype == WMPHY_I217) {
4112 		/* Restore SMBus frequency */
4113 		if (freq --) {
4114 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
4115 			    | HV_SMB_ADDR_FREQ_HIGH);
4116 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
4117 			    HV_SMB_ADDR_FREQ_LOW);
4118 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
4119 			    HV_SMB_ADDR_FREQ_HIGH);
4120 		} else
4121 			DPRINTF(WM_DEBUG_INIT,
4122 			    ("%s: %s Unsupported SMB frequency in PHY\n",
4123 				device_xname(sc->sc_dev), __func__));
4124 	}
4125 
4126 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR,
4127 	    phy_data);
4128 }
4129 
4130 static int
4131 wm_init_lcd_from_nvm(struct wm_softc *sc)
4132 {
4133 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
4134 	uint16_t phy_page = 0;
4135 	int rv = 0;
4136 
4137 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
4138 		device_xname(sc->sc_dev), __func__));
4139 
4140 	switch (sc->sc_type) {
4141 	case WM_T_ICH8:
4142 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
4143 		    || (sc->sc_phytype != WMPHY_IGP_3))
4144 			return 0;
4145 
4146 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
4147 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
4148 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
4149 			break;
4150 		}
4151 		/* FALLTHROUGH */
4152 	case WM_T_PCH:
4153 	case WM_T_PCH2:
4154 	case WM_T_PCH_LPT:
4155 	case WM_T_PCH_SPT:
4156 	case WM_T_PCH_CNP:
4157 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
4158 		break;
4159 	default:
4160 		return 0;
4161 	}
4162 
4163 	if ((rv = sc->phy.acquire(sc)) != 0)
4164 		return rv;
4165 
4166 	reg = CSR_READ(sc, WMREG_FEXTNVM);
4167 	if ((reg & sw_cfg_mask) == 0)
4168 		goto release;
4169 
4170 	/*
4171 	 * Make sure HW does not configure LCD from PHY extended configuration
4172 	 * before SW configuration
4173 	 */
4174 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
4175 	if ((sc->sc_type < WM_T_PCH2)
4176 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
4177 		goto release;
4178 
4179 	DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
4180 		device_xname(sc->sc_dev), __func__));
4181 	/* word_addr is in DWORD */
4182 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
4183 
4184 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
4185 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
4186 	if (cnf_size == 0)
4187 		goto release;
4188 
4189 	if (((sc->sc_type == WM_T_PCH)
4190 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
4191 	    || (sc->sc_type > WM_T_PCH)) {
4192 		/*
4193 		 * HW configures the SMBus address and LEDs when the OEM and
4194 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
4195 		 * are cleared, SW will configure them instead.
4196 		 */
4197 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
4198 			device_xname(sc->sc_dev), __func__));
4199 		if ((rv = wm_write_smbus_addr(sc)) != 0)
4200 			goto release;
4201 
4202 		reg = CSR_READ(sc, WMREG_LEDCTL);
4203 		rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG,
4204 		    (uint16_t)reg);
4205 		if (rv != 0)
4206 			goto release;
4207 	}
4208 
4209 	/* Configure LCD from extended configuration region. */
4210 	for (i = 0; i < cnf_size; i++) {
4211 		uint16_t reg_data, reg_addr;
4212 
4213 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
4214 			goto release;
4215 
4216 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
4217 			goto release;
4218 
4219 		if (reg_addr == MII_IGPHY_PAGE_SELECT)
4220 			phy_page = reg_data;
4221 
4222 		reg_addr &= IGPHY_MAXREGADDR;
4223 		reg_addr |= phy_page;
4224 
4225 		KASSERT(sc->phy.writereg_locked != NULL);
4226 		rv = sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr,
4227 		    reg_data);
4228 	}
4229 
4230 release:
4231 	sc->phy.release(sc);
4232 	return rv;
4233 }
4234 
4235 /*
4236  *  wm_oem_bits_config_ich8lan - SW-based LCD Configuration
4237  *  @sc:       pointer to the HW structure
4238  *  @d0_state: boolean if entering d0 or d3 device state
4239  *
4240  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
4241  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
4242  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
4243  */
4244 int
4245 wm_oem_bits_config_ich8lan(struct wm_softc *sc, bool d0_state)
4246 {
4247 	uint32_t mac_reg;
4248 	uint16_t oem_reg;
4249 	int rv;
4250 
4251 	if (sc->sc_type < WM_T_PCH)
4252 		return 0;
4253 
4254 	rv = sc->phy.acquire(sc);
4255 	if (rv != 0)
4256 		return rv;
4257 
4258 	if (sc->sc_type == WM_T_PCH) {
4259 		mac_reg = CSR_READ(sc, WMREG_EXTCNFCTR);
4260 		if ((mac_reg & EXTCNFCTR_OEM_WRITE_ENABLE) != 0)
4261 			goto release;
4262 	}
4263 
4264 	mac_reg = CSR_READ(sc, WMREG_FEXTNVM);
4265 	if ((mac_reg & FEXTNVM_SW_CONFIG_ICH8M) == 0)
4266 		goto release;
4267 
4268 	mac_reg = CSR_READ(sc, WMREG_PHY_CTRL);
4269 
4270 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_OEM_BITS, &oem_reg);
4271 	if (rv != 0)
4272 		goto release;
4273 	oem_reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
4274 
4275 	if (d0_state) {
4276 		if ((mac_reg & PHY_CTRL_GBE_DIS) != 0)
4277 			oem_reg |= HV_OEM_BITS_A1KDIS;
4278 		if ((mac_reg & PHY_CTRL_D0A_LPLU) != 0)
4279 			oem_reg |= HV_OEM_BITS_LPLU;
4280 	} else {
4281 		if ((mac_reg & (PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS))
4282 		    != 0)
4283 			oem_reg |= HV_OEM_BITS_A1KDIS;
4284 		if ((mac_reg & (PHY_CTRL_D0A_LPLU | PHY_CTRL_NOND0A_LPLU))
4285 		    != 0)
4286 			oem_reg |= HV_OEM_BITS_LPLU;
4287 	}
4288 
4289 	/* Set Restart auto-neg to activate the bits */
4290 	if ((d0_state || (sc->sc_type != WM_T_PCH))
4291 	    && (wm_phy_resetisblocked(sc) == false))
4292 		oem_reg |= HV_OEM_BITS_ANEGNOW;
4293 
4294 	rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_OEM_BITS, oem_reg);
4295 
4296 release:
4297 	sc->phy.release(sc);
4298 
4299 	return rv;
4300 }
4301 
4302 /* Init hardware bits */
4303 void
4304 wm_initialize_hardware_bits(struct wm_softc *sc)
4305 {
4306 	uint32_t tarc0, tarc1, reg;
4307 
4308 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
4309 		device_xname(sc->sc_dev), __func__));
4310 
4311 	/* For 82571 variant, 80003 and ICHs */
4312 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
4313 	    || (sc->sc_type >= WM_T_80003)) {
4314 
4315 		/* Transmit Descriptor Control 0 */
4316 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
4317 		reg |= TXDCTL_COUNT_DESC;
4318 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
4319 
4320 		/* Transmit Descriptor Control 1 */
4321 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
4322 		reg |= TXDCTL_COUNT_DESC;
4323 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
4324 
4325 		/* TARC0 */
4326 		tarc0 = CSR_READ(sc, WMREG_TARC0);
4327 		switch (sc->sc_type) {
4328 		case WM_T_82571:
4329 		case WM_T_82572:
4330 		case WM_T_82573:
4331 		case WM_T_82574:
4332 		case WM_T_82583:
4333 		case WM_T_80003:
4334 			/* Clear bits 30..27 */
4335 			tarc0 &= ~__BITS(30, 27);
4336 			break;
4337 		default:
4338 			break;
4339 		}
4340 
4341 		switch (sc->sc_type) {
4342 		case WM_T_82571:
4343 		case WM_T_82572:
4344 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
4345 
4346 			tarc1 = CSR_READ(sc, WMREG_TARC1);
4347 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
4348 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
4349 			/* 8257[12] Errata No.7 */
4350 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
4351 
4352 			/* TARC1 bit 28 */
4353 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
4354 				tarc1 &= ~__BIT(28);
4355 			else
4356 				tarc1 |= __BIT(28);
4357 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
4358 
4359 			/*
4360 			 * 8257[12] Errata No.13
4361 			 * Disable Dyamic Clock Gating.
4362 			 */
4363 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
4364 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
4365 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4366 			break;
4367 		case WM_T_82573:
4368 		case WM_T_82574:
4369 		case WM_T_82583:
4370 			if ((sc->sc_type == WM_T_82574)
4371 			    || (sc->sc_type == WM_T_82583))
4372 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
4373 
4374 			/* Extended Device Control */
4375 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
4376 			reg &= ~__BIT(23);	/* Clear bit 23 */
4377 			reg |= __BIT(22);	/* Set bit 22 */
4378 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4379 
4380 			/* Device Control */
4381 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
4382 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4383 
4384 			/* PCIe Control Register */
4385 			/*
4386 			 * 82573 Errata (unknown).
4387 			 *
4388 			 * 82574 Errata 25 and 82583 Errata 12
4389 			 * "Dropped Rx Packets":
4390 			 *   NVM Image Version 2.1.4 and newer has no this bug.
4391 			 */
4392 			reg = CSR_READ(sc, WMREG_GCR);
4393 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
4394 			CSR_WRITE(sc, WMREG_GCR, reg);
4395 
4396 			if ((sc->sc_type == WM_T_82574)
4397 			    || (sc->sc_type == WM_T_82583)) {
4398 				/*
4399 				 * Document says this bit must be set for
4400 				 * proper operation.
4401 				 */
4402 				reg = CSR_READ(sc, WMREG_GCR);
4403 				reg |= __BIT(22);
4404 				CSR_WRITE(sc, WMREG_GCR, reg);
4405 
4406 				/*
4407 				 * Apply workaround for hardware errata
4408 				 * documented in errata docs Fixes issue where
4409 				 * some error prone or unreliable PCIe
4410 				 * completions are occurring, particularly
4411 				 * with ASPM enabled. Without fix, issue can
4412 				 * cause Tx timeouts.
4413 				 */
4414 				reg = CSR_READ(sc, WMREG_GCR2);
4415 				reg |= __BIT(0);
4416 				CSR_WRITE(sc, WMREG_GCR2, reg);
4417 			}
4418 			break;
4419 		case WM_T_80003:
4420 			/* TARC0 */
4421 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
4422 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
4423 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
4424 
4425 			/* TARC1 bit 28 */
4426 			tarc1 = CSR_READ(sc, WMREG_TARC1);
4427 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
4428 				tarc1 &= ~__BIT(28);
4429 			else
4430 				tarc1 |= __BIT(28);
4431 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
4432 			break;
4433 		case WM_T_ICH8:
4434 		case WM_T_ICH9:
4435 		case WM_T_ICH10:
4436 		case WM_T_PCH:
4437 		case WM_T_PCH2:
4438 		case WM_T_PCH_LPT:
4439 		case WM_T_PCH_SPT:
4440 		case WM_T_PCH_CNP:
4441 			/* TARC0 */
4442 			if (sc->sc_type == WM_T_ICH8) {
4443 				/* Set TARC0 bits 29 and 28 */
4444 				tarc0 |= __BITS(29, 28);
4445 			} else if (sc->sc_type == WM_T_PCH_SPT) {
4446 				tarc0 |= __BIT(29);
4447 				/*
4448 				 *  Drop bit 28. From Linux.
4449 				 * See I218/I219 spec update
4450 				 * "5. Buffer Overrun While the I219 is
4451 				 * Processing DMA Transactions"
4452 				 */
4453 				tarc0 &= ~__BIT(28);
4454 			}
4455 			/* Set TARC0 bits 23,24,26,27 */
4456 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
4457 
4458 			/* CTRL_EXT */
4459 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
4460 			reg |= __BIT(22);	/* Set bit 22 */
4461 			/*
4462 			 * Enable PHY low-power state when MAC is at D3
4463 			 * w/o WoL
4464 			 */
4465 			if (sc->sc_type >= WM_T_PCH)
4466 				reg |= CTRL_EXT_PHYPDEN;
4467 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4468 
4469 			/* TARC1 */
4470 			tarc1 = CSR_READ(sc, WMREG_TARC1);
4471 			/* bit 28 */
4472 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
4473 				tarc1 &= ~__BIT(28);
4474 			else
4475 				tarc1 |= __BIT(28);
4476 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
4477 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
4478 
4479 			/* Device Status */
4480 			if (sc->sc_type == WM_T_ICH8) {
4481 				reg = CSR_READ(sc, WMREG_STATUS);
4482 				reg &= ~__BIT(31);
4483 				CSR_WRITE(sc, WMREG_STATUS, reg);
4484 
4485 			}
4486 
4487 			/* IOSFPC */
4488 			if (sc->sc_type == WM_T_PCH_SPT) {
4489 				reg = CSR_READ(sc, WMREG_IOSFPC);
4490 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
4491 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
4492 			}
4493 			/*
4494 			 * Work-around descriptor data corruption issue during
4495 			 * NFS v2 UDP traffic, just disable the NFS filtering
4496 			 * capability.
4497 			 */
4498 			reg = CSR_READ(sc, WMREG_RFCTL);
4499 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
4500 			CSR_WRITE(sc, WMREG_RFCTL, reg);
4501 			break;
4502 		default:
4503 			break;
4504 		}
4505 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
4506 
4507 		switch (sc->sc_type) {
4508 		/*
4509 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
4510 		 * Avoid RSS Hash Value bug.
4511 		 */
4512 		case WM_T_82571:
4513 		case WM_T_82572:
4514 		case WM_T_82573:
4515 		case WM_T_80003:
4516 		case WM_T_ICH8:
4517 			reg = CSR_READ(sc, WMREG_RFCTL);
4518 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
4519 			CSR_WRITE(sc, WMREG_RFCTL, reg);
4520 			break;
4521 		case WM_T_82574:
4522 			/* Use extened Rx descriptor. */
4523 			reg = CSR_READ(sc, WMREG_RFCTL);
4524 			reg |= WMREG_RFCTL_EXSTEN;
4525 			CSR_WRITE(sc, WMREG_RFCTL, reg);
4526 			break;
4527 		default:
4528 			break;
4529 		}
4530 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
4531 		/*
4532 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
4533 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
4534 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
4535 		 * Correctly by the Device"
4536 		 *
4537 		 * I354(C2000) Errata AVR53:
4538 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
4539 		 * Hang"
4540 		 */
4541 		reg = CSR_READ(sc, WMREG_RFCTL);
4542 		reg |= WMREG_RFCTL_IPV6EXDIS;
4543 		CSR_WRITE(sc, WMREG_RFCTL, reg);
4544 	}
4545 }
4546 
4547 static uint32_t
4548 wm_rxpbs_adjust_82580(uint32_t val)
4549 {
4550 	uint32_t rv = 0;
4551 
4552 	if (val < __arraycount(wm_82580_rxpbs_table))
4553 		rv = wm_82580_rxpbs_table[val];
4554 
4555 	return rv;
4556 }
4557 
4558 /*
4559  * wm_reset_phy:
4560  *
4561  *	generic PHY reset function.
4562  *	Same as e1000_phy_hw_reset_generic()
4563  */
4564 static int
4565 wm_reset_phy(struct wm_softc *sc)
4566 {
4567 	uint32_t reg;
4568 
4569 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
4570 		device_xname(sc->sc_dev), __func__));
4571 	if (wm_phy_resetisblocked(sc))
4572 		return -1;
4573 
4574 	sc->phy.acquire(sc);
4575 
4576 	reg = CSR_READ(sc, WMREG_CTRL);
4577 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
4578 	CSR_WRITE_FLUSH(sc);
4579 
4580 	delay(sc->phy.reset_delay_us);
4581 
4582 	CSR_WRITE(sc, WMREG_CTRL, reg);
4583 	CSR_WRITE_FLUSH(sc);
4584 
4585 	delay(150);
4586 
4587 	sc->phy.release(sc);
4588 
4589 	wm_get_cfg_done(sc);
4590 	wm_phy_post_reset(sc);
4591 
4592 	return 0;
4593 }
4594 
4595 /*
4596  * Only used by WM_T_PCH_SPT which does not use multiqueue,
4597  * so it is enough to check sc->sc_queue[0] only.
4598  */
4599 static void
4600 wm_flush_desc_rings(struct wm_softc *sc)
4601 {
4602 	pcireg_t preg;
4603 	uint32_t reg;
4604 	struct wm_txqueue *txq;
4605 	wiseman_txdesc_t *txd;
4606 	int nexttx;
4607 	uint32_t rctl;
4608 
4609 	/* First, disable MULR fix in FEXTNVM11 */
4610 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
4611 	reg |= FEXTNVM11_DIS_MULRFIX;
4612 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
4613 
4614 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
4615 	reg = CSR_READ(sc, WMREG_TDLEN(0));
4616 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
4617 		return;
4618 
4619 	/* TX */
4620 	device_printf(sc->sc_dev, "Need TX flush (reg = %08x, len = %u)\n",
4621 	    preg, reg);
4622 	reg = CSR_READ(sc, WMREG_TCTL);
4623 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
4624 
4625 	txq = &sc->sc_queue[0].wmq_txq;
4626 	nexttx = txq->txq_next;
4627 	txd = &txq->txq_descs[nexttx];
4628 	wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
4629 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
4630 	txd->wtx_fields.wtxu_status = 0;
4631 	txd->wtx_fields.wtxu_options = 0;
4632 	txd->wtx_fields.wtxu_vlan = 0;
4633 
4634 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
4635 	    BUS_SPACE_BARRIER_WRITE);
4636 
4637 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
4638 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
4639 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
4640 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
4641 	delay(250);
4642 
4643 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
4644 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
4645 		return;
4646 
4647 	/* RX */
4648 	device_printf(sc->sc_dev, "Need RX flush (reg = %08x)\n", preg);
4649 	rctl = CSR_READ(sc, WMREG_RCTL);
4650 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
4651 	CSR_WRITE_FLUSH(sc);
4652 	delay(150);
4653 
4654 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
4655 	/* Zero the lower 14 bits (prefetch and host thresholds) */
4656 	reg &= 0xffffc000;
4657 	/*
4658 	 * Update thresholds: prefetch threshold to 31, host threshold
4659 	 * to 1 and make sure the granularity is "descriptors" and not
4660 	 * "cache lines"
4661 	 */
4662 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
4663 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
4664 
4665 	/* Momentarily enable the RX ring for the changes to take effect */
4666 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
4667 	CSR_WRITE_FLUSH(sc);
4668 	delay(150);
4669 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
4670 }
4671 
4672 /*
4673  * wm_reset:
4674  *
4675  *	Reset the i82542 chip.
4676  */
4677 static void
4678 wm_reset(struct wm_softc *sc)
4679 {
4680 	int phy_reset = 0;
4681 	int i, error = 0;
4682 	uint32_t reg;
4683 	uint16_t kmreg;
4684 	int rv;
4685 
4686 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
4687 		device_xname(sc->sc_dev), __func__));
4688 	KASSERT(sc->sc_type != 0);
4689 
4690 	/*
4691 	 * Allocate on-chip memory according to the MTU size.
4692 	 * The Packet Buffer Allocation register must be written
4693 	 * before the chip is reset.
4694 	 */
4695 	switch (sc->sc_type) {
4696 	case WM_T_82547:
4697 	case WM_T_82547_2:
4698 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
4699 		    PBA_22K : PBA_30K;
4700 		for (i = 0; i < sc->sc_nqueues; i++) {
4701 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
4702 			txq->txq_fifo_head = 0;
4703 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
4704 			txq->txq_fifo_size =
4705 			    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
4706 			txq->txq_fifo_stall = 0;
4707 		}
4708 		break;
4709 	case WM_T_82571:
4710 	case WM_T_82572:
4711 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
4712 	case WM_T_80003:
4713 		sc->sc_pba = PBA_32K;
4714 		break;
4715 	case WM_T_82573:
4716 		sc->sc_pba = PBA_12K;
4717 		break;
4718 	case WM_T_82574:
4719 	case WM_T_82583:
4720 		sc->sc_pba = PBA_20K;
4721 		break;
4722 	case WM_T_82576:
4723 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
4724 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
4725 		break;
4726 	case WM_T_82580:
4727 	case WM_T_I350:
4728 	case WM_T_I354:
4729 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
4730 		break;
4731 	case WM_T_I210:
4732 	case WM_T_I211:
4733 		sc->sc_pba = PBA_34K;
4734 		break;
4735 	case WM_T_ICH8:
4736 		/* Workaround for a bit corruption issue in FIFO memory */
4737 		sc->sc_pba = PBA_8K;
4738 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
4739 		break;
4740 	case WM_T_ICH9:
4741 	case WM_T_ICH10:
4742 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
4743 		    PBA_14K : PBA_10K;
4744 		break;
4745 	case WM_T_PCH:
4746 	case WM_T_PCH2:	/* XXX 14K? */
4747 	case WM_T_PCH_LPT:
4748 	case WM_T_PCH_SPT:
4749 	case WM_T_PCH_CNP:
4750 		sc->sc_pba = PBA_26K;
4751 		break;
4752 	default:
4753 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
4754 		    PBA_40K : PBA_48K;
4755 		break;
4756 	}
4757 	/*
4758 	 * Only old or non-multiqueue devices have the PBA register
4759 	 * XXX Need special handling for 82575.
4760 	 */
4761 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
4762 	    || (sc->sc_type == WM_T_82575))
4763 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
4764 
4765 	/* Prevent the PCI-E bus from sticking */
4766 	if (sc->sc_flags & WM_F_PCIE) {
4767 		int timeout = 800;
4768 
4769 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
4770 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4771 
4772 		while (timeout--) {
4773 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
4774 			    == 0)
4775 				break;
4776 			delay(100);
4777 		}
4778 		if (timeout == 0)
4779 			device_printf(sc->sc_dev,
4780 			    "failed to disable busmastering\n");
4781 	}
4782 
4783 	/* Set the completion timeout for interface */
4784 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
4785 	    || (sc->sc_type == WM_T_82580)
4786 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
4787 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
4788 		wm_set_pcie_completion_timeout(sc);
4789 
4790 	/* Clear interrupt */
4791 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4792 	if (wm_is_using_msix(sc)) {
4793 		if (sc->sc_type != WM_T_82574) {
4794 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
4795 			CSR_WRITE(sc, WMREG_EIAC, 0);
4796 		} else
4797 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
4798 	}
4799 
4800 	/* Stop the transmit and receive processes. */
4801 	CSR_WRITE(sc, WMREG_RCTL, 0);
4802 	sc->sc_rctl &= ~RCTL_EN;
4803 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
4804 	CSR_WRITE_FLUSH(sc);
4805 
4806 	/* XXX set_tbi_sbp_82543() */
4807 
4808 	delay(10*1000);
4809 
4810 	/* Must acquire the MDIO ownership before MAC reset */
4811 	switch (sc->sc_type) {
4812 	case WM_T_82573:
4813 	case WM_T_82574:
4814 	case WM_T_82583:
4815 		error = wm_get_hw_semaphore_82573(sc);
4816 		break;
4817 	default:
4818 		break;
4819 	}
4820 
4821 	/*
4822 	 * 82541 Errata 29? & 82547 Errata 28?
4823 	 * See also the description about PHY_RST bit in CTRL register
4824 	 * in 8254x_GBe_SDM.pdf.
4825 	 */
4826 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
4827 		CSR_WRITE(sc, WMREG_CTRL,
4828 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
4829 		CSR_WRITE_FLUSH(sc);
4830 		delay(5000);
4831 	}
4832 
4833 	switch (sc->sc_type) {
4834 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
4835 	case WM_T_82541:
4836 	case WM_T_82541_2:
4837 	case WM_T_82547:
4838 	case WM_T_82547_2:
4839 		/*
4840 		 * On some chipsets, a reset through a memory-mapped write
4841 		 * cycle can cause the chip to reset before completing the
4842 		 * write cycle. This causes major headache that can be avoided
4843 		 * by issuing the reset via indirect register writes through
4844 		 * I/O space.
4845 		 *
4846 		 * So, if we successfully mapped the I/O BAR at attach time,
4847 		 * use that. Otherwise, try our luck with a memory-mapped
4848 		 * reset.
4849 		 */
4850 		if (sc->sc_flags & WM_F_IOH_VALID)
4851 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
4852 		else
4853 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
4854 		break;
4855 	case WM_T_82545_3:
4856 	case WM_T_82546_3:
4857 		/* Use the shadow control register on these chips. */
4858 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
4859 		break;
4860 	case WM_T_80003:
4861 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
4862 		sc->phy.acquire(sc);
4863 		CSR_WRITE(sc, WMREG_CTRL, reg);
4864 		sc->phy.release(sc);
4865 		break;
4866 	case WM_T_ICH8:
4867 	case WM_T_ICH9:
4868 	case WM_T_ICH10:
4869 	case WM_T_PCH:
4870 	case WM_T_PCH2:
4871 	case WM_T_PCH_LPT:
4872 	case WM_T_PCH_SPT:
4873 	case WM_T_PCH_CNP:
4874 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
4875 		if (wm_phy_resetisblocked(sc) == false) {
4876 			/*
4877 			 * Gate automatic PHY configuration by hardware on
4878 			 * non-managed 82579
4879 			 */
4880 			if ((sc->sc_type == WM_T_PCH2)
4881 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
4882 				== 0))
4883 				wm_gate_hw_phy_config_ich8lan(sc, true);
4884 
4885 			reg |= CTRL_PHY_RESET;
4886 			phy_reset = 1;
4887 		} else
4888 			device_printf(sc->sc_dev, "XXX reset is blocked!!!\n");
4889 		sc->phy.acquire(sc);
4890 		CSR_WRITE(sc, WMREG_CTRL, reg);
4891 		/* Don't insert a completion barrier when reset */
4892 		delay(20*1000);
4893 		mutex_exit(sc->sc_ich_phymtx);
4894 		break;
4895 	case WM_T_82580:
4896 	case WM_T_I350:
4897 	case WM_T_I354:
4898 	case WM_T_I210:
4899 	case WM_T_I211:
4900 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
4901 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
4902 			CSR_WRITE_FLUSH(sc);
4903 		delay(5000);
4904 		break;
4905 	case WM_T_82542_2_0:
4906 	case WM_T_82542_2_1:
4907 	case WM_T_82543:
4908 	case WM_T_82540:
4909 	case WM_T_82545:
4910 	case WM_T_82546:
4911 	case WM_T_82571:
4912 	case WM_T_82572:
4913 	case WM_T_82573:
4914 	case WM_T_82574:
4915 	case WM_T_82575:
4916 	case WM_T_82576:
4917 	case WM_T_82583:
4918 	default:
4919 		/* Everything else can safely use the documented method. */
4920 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
4921 		break;
4922 	}
4923 
4924 	/* Must release the MDIO ownership after MAC reset */
4925 	switch (sc->sc_type) {
4926 	case WM_T_82573:
4927 	case WM_T_82574:
4928 	case WM_T_82583:
4929 		if (error == 0)
4930 			wm_put_hw_semaphore_82573(sc);
4931 		break;
4932 	default:
4933 		break;
4934 	}
4935 
4936 	/* Set Phy Config Counter to 50msec */
4937 	if (sc->sc_type == WM_T_PCH2) {
4938 		reg = CSR_READ(sc, WMREG_FEXTNVM3);
4939 		reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
4940 		reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
4941 		CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
4942 	}
4943 
4944 	if (phy_reset != 0)
4945 		wm_get_cfg_done(sc);
4946 
4947 	/* Reload EEPROM */
4948 	switch (sc->sc_type) {
4949 	case WM_T_82542_2_0:
4950 	case WM_T_82542_2_1:
4951 	case WM_T_82543:
4952 	case WM_T_82544:
4953 		delay(10);
4954 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
4955 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4956 		CSR_WRITE_FLUSH(sc);
4957 		delay(2000);
4958 		break;
4959 	case WM_T_82540:
4960 	case WM_T_82545:
4961 	case WM_T_82545_3:
4962 	case WM_T_82546:
4963 	case WM_T_82546_3:
4964 		delay(5*1000);
4965 		/* XXX Disable HW ARPs on ASF enabled adapters */
4966 		break;
4967 	case WM_T_82541:
4968 	case WM_T_82541_2:
4969 	case WM_T_82547:
4970 	case WM_T_82547_2:
4971 		delay(20000);
4972 		/* XXX Disable HW ARPs on ASF enabled adapters */
4973 		break;
4974 	case WM_T_82571:
4975 	case WM_T_82572:
4976 	case WM_T_82573:
4977 	case WM_T_82574:
4978 	case WM_T_82583:
4979 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
4980 			delay(10);
4981 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
4982 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4983 			CSR_WRITE_FLUSH(sc);
4984 		}
4985 		/* check EECD_EE_AUTORD */
4986 		wm_get_auto_rd_done(sc);
4987 		/*
4988 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
4989 		 * is set.
4990 		 */
4991 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
4992 		    || (sc->sc_type == WM_T_82583))
4993 			delay(25*1000);
4994 		break;
4995 	case WM_T_82575:
4996 	case WM_T_82576:
4997 	case WM_T_82580:
4998 	case WM_T_I350:
4999 	case WM_T_I354:
5000 	case WM_T_I210:
5001 	case WM_T_I211:
5002 	case WM_T_80003:
5003 		/* check EECD_EE_AUTORD */
5004 		wm_get_auto_rd_done(sc);
5005 		break;
5006 	case WM_T_ICH8:
5007 	case WM_T_ICH9:
5008 	case WM_T_ICH10:
5009 	case WM_T_PCH:
5010 	case WM_T_PCH2:
5011 	case WM_T_PCH_LPT:
5012 	case WM_T_PCH_SPT:
5013 	case WM_T_PCH_CNP:
5014 		break;
5015 	default:
5016 		panic("%s: unknown type\n", __func__);
5017 	}
5018 
5019 	/* Check whether EEPROM is present or not */
5020 	switch (sc->sc_type) {
5021 	case WM_T_82575:
5022 	case WM_T_82576:
5023 	case WM_T_82580:
5024 	case WM_T_I350:
5025 	case WM_T_I354:
5026 	case WM_T_ICH8:
5027 	case WM_T_ICH9:
5028 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
5029 			/* Not found */
5030 			sc->sc_flags |= WM_F_EEPROM_INVALID;
5031 			if (sc->sc_type == WM_T_82575)
5032 				wm_reset_init_script_82575(sc);
5033 		}
5034 		break;
5035 	default:
5036 		break;
5037 	}
5038 
5039 	if (phy_reset != 0)
5040 		wm_phy_post_reset(sc);
5041 
5042 	if ((sc->sc_type == WM_T_82580)
5043 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
5044 		/* Clear global device reset status bit */
5045 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
5046 	}
5047 
5048 	/* Clear any pending interrupt events. */
5049 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
5050 	reg = CSR_READ(sc, WMREG_ICR);
5051 	if (wm_is_using_msix(sc)) {
5052 		if (sc->sc_type != WM_T_82574) {
5053 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
5054 			CSR_WRITE(sc, WMREG_EIAC, 0);
5055 		} else
5056 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
5057 	}
5058 
5059 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5060 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5061 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
5062 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
5063 		reg = CSR_READ(sc, WMREG_KABGTXD);
5064 		reg |= KABGTXD_BGSQLBIAS;
5065 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
5066 	}
5067 
5068 	/* Reload sc_ctrl */
5069 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
5070 
5071 	wm_set_eee(sc);
5072 
5073 	/*
5074 	 * For PCH, this write will make sure that any noise will be detected
5075 	 * as a CRC error and be dropped rather than show up as a bad packet
5076 	 * to the DMA engine
5077 	 */
5078 	if (sc->sc_type == WM_T_PCH)
5079 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
5080 
5081 	if (sc->sc_type >= WM_T_82544)
5082 		CSR_WRITE(sc, WMREG_WUC, 0);
5083 
5084 	if (sc->sc_type < WM_T_82575)
5085 		wm_disable_aspm(sc); /* Workaround for some chips */
5086 
5087 	wm_reset_mdicnfg_82580(sc);
5088 
5089 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
5090 		wm_pll_workaround_i210(sc);
5091 
5092 	if (sc->sc_type == WM_T_80003) {
5093 		/* Default to TRUE to enable the MDIC W/A */
5094 		sc->sc_flags |= WM_F_80003_MDIC_WA;
5095 
5096 		rv = wm_kmrn_readreg(sc,
5097 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
5098 		if (rv == 0) {
5099 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
5100 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
5101 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
5102 			else
5103 				sc->sc_flags |= WM_F_80003_MDIC_WA;
5104 		}
5105 	}
5106 }
5107 
5108 /*
5109  * wm_add_rxbuf:
5110  *
5111  *	Add a receive buffer to the indiciated descriptor.
5112  */
5113 static int
5114 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
5115 {
5116 	struct wm_softc *sc = rxq->rxq_sc;
5117 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
5118 	struct mbuf *m;
5119 	int error;
5120 
5121 	KASSERT(mutex_owned(rxq->rxq_lock));
5122 
5123 	MGETHDR(m, M_DONTWAIT, MT_DATA);
5124 	if (m == NULL)
5125 		return ENOBUFS;
5126 
5127 	MCLGET(m, M_DONTWAIT);
5128 	if ((m->m_flags & M_EXT) == 0) {
5129 		m_freem(m);
5130 		return ENOBUFS;
5131 	}
5132 
5133 	if (rxs->rxs_mbuf != NULL)
5134 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
5135 
5136 	rxs->rxs_mbuf = m;
5137 
5138 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
5139 	/*
5140 	 * Cannot use bus_dmamap_load_mbuf() here because m_data may be
5141 	 * sc_align_tweak'd between bus_dmamap_load() and bus_dmamap_sync().
5142 	 */
5143 	error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, m->m_ext.ext_buf,
5144 	    m->m_ext.ext_size, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT);
5145 	if (error) {
5146 		/* XXX XXX XXX */
5147 		aprint_error_dev(sc->sc_dev,
5148 		    "unable to load rx DMA map %d, error = %d\n", idx, error);
5149 		panic("wm_add_rxbuf");
5150 	}
5151 
5152 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
5153 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
5154 
5155 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5156 		if ((sc->sc_rctl & RCTL_EN) != 0)
5157 			wm_init_rxdesc(rxq, idx);
5158 	} else
5159 		wm_init_rxdesc(rxq, idx);
5160 
5161 	return 0;
5162 }
5163 
5164 /*
5165  * wm_rxdrain:
5166  *
5167  *	Drain the receive queue.
5168  */
5169 static void
5170 wm_rxdrain(struct wm_rxqueue *rxq)
5171 {
5172 	struct wm_softc *sc = rxq->rxq_sc;
5173 	struct wm_rxsoft *rxs;
5174 	int i;
5175 
5176 	KASSERT(mutex_owned(rxq->rxq_lock));
5177 
5178 	for (i = 0; i < WM_NRXDESC; i++) {
5179 		rxs = &rxq->rxq_soft[i];
5180 		if (rxs->rxs_mbuf != NULL) {
5181 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
5182 			m_freem(rxs->rxs_mbuf);
5183 			rxs->rxs_mbuf = NULL;
5184 		}
5185 	}
5186 }
5187 
5188 /*
5189  * Setup registers for RSS.
5190  *
5191  * XXX not yet VMDq support
5192  */
5193 static void
5194 wm_init_rss(struct wm_softc *sc)
5195 {
5196 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
5197 	int i;
5198 
5199 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
5200 
5201 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
5202 		unsigned int qid, reta_ent;
5203 
5204 		qid  = i % sc->sc_nqueues;
5205 		switch (sc->sc_type) {
5206 		case WM_T_82574:
5207 			reta_ent = __SHIFTIN(qid,
5208 			    RETA_ENT_QINDEX_MASK_82574);
5209 			break;
5210 		case WM_T_82575:
5211 			reta_ent = __SHIFTIN(qid,
5212 			    RETA_ENT_QINDEX1_MASK_82575);
5213 			break;
5214 		default:
5215 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
5216 			break;
5217 		}
5218 
5219 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
5220 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
5221 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
5222 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
5223 	}
5224 
5225 	rss_getkey((uint8_t *)rss_key);
5226 	for (i = 0; i < RSSRK_NUM_REGS; i++)
5227 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
5228 
5229 	if (sc->sc_type == WM_T_82574)
5230 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
5231 	else
5232 		mrqc = MRQC_ENABLE_RSS_MQ;
5233 
5234 	/*
5235 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
5236 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
5237 	 */
5238 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
5239 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
5240 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
5241 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
5242 
5243 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
5244 }
5245 
5246 /*
5247  * Adjust TX and RX queue numbers which the system actulally uses.
5248  *
5249  * The numbers are affected by below parameters.
5250  *     - The nubmer of hardware queues
5251  *     - The number of MSI-X vectors (= "nvectors" argument)
5252  *     - ncpu
5253  */
5254 static void
5255 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
5256 {
5257 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
5258 
5259 	if (nvectors < 2) {
5260 		sc->sc_nqueues = 1;
5261 		return;
5262 	}
5263 
5264 	switch (sc->sc_type) {
5265 	case WM_T_82572:
5266 		hw_ntxqueues = 2;
5267 		hw_nrxqueues = 2;
5268 		break;
5269 	case WM_T_82574:
5270 		hw_ntxqueues = 2;
5271 		hw_nrxqueues = 2;
5272 		break;
5273 	case WM_T_82575:
5274 		hw_ntxqueues = 4;
5275 		hw_nrxqueues = 4;
5276 		break;
5277 	case WM_T_82576:
5278 		hw_ntxqueues = 16;
5279 		hw_nrxqueues = 16;
5280 		break;
5281 	case WM_T_82580:
5282 	case WM_T_I350:
5283 	case WM_T_I354:
5284 		hw_ntxqueues = 8;
5285 		hw_nrxqueues = 8;
5286 		break;
5287 	case WM_T_I210:
5288 		hw_ntxqueues = 4;
5289 		hw_nrxqueues = 4;
5290 		break;
5291 	case WM_T_I211:
5292 		hw_ntxqueues = 2;
5293 		hw_nrxqueues = 2;
5294 		break;
5295 		/*
5296 		 * As below ethernet controllers does not support MSI-X,
5297 		 * this driver let them not use multiqueue.
5298 		 *     - WM_T_80003
5299 		 *     - WM_T_ICH8
5300 		 *     - WM_T_ICH9
5301 		 *     - WM_T_ICH10
5302 		 *     - WM_T_PCH
5303 		 *     - WM_T_PCH2
5304 		 *     - WM_T_PCH_LPT
5305 		 */
5306 	default:
5307 		hw_ntxqueues = 1;
5308 		hw_nrxqueues = 1;
5309 		break;
5310 	}
5311 
5312 	hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
5313 
5314 	/*
5315 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
5316 	 * the number of queues used actually.
5317 	 */
5318 	if (nvectors < hw_nqueues + 1)
5319 		sc->sc_nqueues = nvectors - 1;
5320 	else
5321 		sc->sc_nqueues = hw_nqueues;
5322 
5323 	/*
5324 	 * As queues more then cpus cannot improve scaling, we limit
5325 	 * the number of queues used actually.
5326 	 */
5327 	if (ncpu < sc->sc_nqueues)
5328 		sc->sc_nqueues = ncpu;
5329 }
5330 
5331 static inline bool
5332 wm_is_using_msix(struct wm_softc *sc)
5333 {
5334 
5335 	return (sc->sc_nintrs > 1);
5336 }
5337 
5338 static inline bool
5339 wm_is_using_multiqueue(struct wm_softc *sc)
5340 {
5341 
5342 	return (sc->sc_nqueues > 1);
5343 }
5344 
5345 static int
5346 wm_softint_establish(struct wm_softc *sc, int qidx, int intr_idx)
5347 {
5348 	struct wm_queue *wmq = &sc->sc_queue[qidx];
5349 	wmq->wmq_id = qidx;
5350 	wmq->wmq_intr_idx = intr_idx;
5351 	wmq->wmq_si = softint_establish(SOFTINT_NET
5352 #ifdef WM_MPSAFE
5353 	    | SOFTINT_MPSAFE
5354 #endif
5355 	    , wm_handle_queue, wmq);
5356 	if (wmq->wmq_si != NULL)
5357 		return 0;
5358 
5359 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
5360 	    wmq->wmq_id);
5361 
5362 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
5363 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
5364 	return ENOMEM;
5365 }
5366 
5367 /*
5368  * Both single interrupt MSI and INTx can use this function.
5369  */
5370 static int
5371 wm_setup_legacy(struct wm_softc *sc)
5372 {
5373 	pci_chipset_tag_t pc = sc->sc_pc;
5374 	const char *intrstr = NULL;
5375 	char intrbuf[PCI_INTRSTR_LEN];
5376 	int error;
5377 
5378 	error = wm_alloc_txrx_queues(sc);
5379 	if (error) {
5380 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
5381 		    error);
5382 		return ENOMEM;
5383 	}
5384 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
5385 	    sizeof(intrbuf));
5386 #ifdef WM_MPSAFE
5387 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
5388 #endif
5389 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
5390 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
5391 	if (sc->sc_ihs[0] == NULL) {
5392 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
5393 		    (pci_intr_type(pc, sc->sc_intrs[0])
5394 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
5395 		return ENOMEM;
5396 	}
5397 
5398 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
5399 	sc->sc_nintrs = 1;
5400 
5401 	return wm_softint_establish(sc, 0, 0);
5402 }
5403 
5404 static int
5405 wm_setup_msix(struct wm_softc *sc)
5406 {
5407 	void *vih;
5408 	kcpuset_t *affinity;
5409 	int qidx, error, intr_idx, txrx_established;
5410 	pci_chipset_tag_t pc = sc->sc_pc;
5411 	const char *intrstr = NULL;
5412 	char intrbuf[PCI_INTRSTR_LEN];
5413 	char intr_xname[INTRDEVNAMEBUF];
5414 
5415 	if (sc->sc_nqueues < ncpu) {
5416 		/*
5417 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
5418 		 * interrupts start from CPU#1.
5419 		 */
5420 		sc->sc_affinity_offset = 1;
5421 	} else {
5422 		/*
5423 		 * In this case, this device use all CPUs. So, we unify
5424 		 * affinitied cpu_index to msix vector number for readability.
5425 		 */
5426 		sc->sc_affinity_offset = 0;
5427 	}
5428 
5429 	error = wm_alloc_txrx_queues(sc);
5430 	if (error) {
5431 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
5432 		    error);
5433 		return ENOMEM;
5434 	}
5435 
5436 	kcpuset_create(&affinity, false);
5437 	intr_idx = 0;
5438 
5439 	/*
5440 	 * TX and RX
5441 	 */
5442 	txrx_established = 0;
5443 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
5444 		struct wm_queue *wmq = &sc->sc_queue[qidx];
5445 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
5446 
5447 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
5448 		    sizeof(intrbuf));
5449 #ifdef WM_MPSAFE
5450 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
5451 		    PCI_INTR_MPSAFE, true);
5452 #endif
5453 		memset(intr_xname, 0, sizeof(intr_xname));
5454 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
5455 		    device_xname(sc->sc_dev), qidx);
5456 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
5457 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
5458 		if (vih == NULL) {
5459 			aprint_error_dev(sc->sc_dev,
5460 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
5461 			    intrstr ? " at " : "",
5462 			    intrstr ? intrstr : "");
5463 
5464 			goto fail;
5465 		}
5466 		kcpuset_zero(affinity);
5467 		/* Round-robin affinity */
5468 		kcpuset_set(affinity, affinity_to);
5469 		error = interrupt_distribute(vih, affinity, NULL);
5470 		if (error == 0) {
5471 			aprint_normal_dev(sc->sc_dev,
5472 			    "for TX and RX interrupting at %s affinity to %u\n",
5473 			    intrstr, affinity_to);
5474 		} else {
5475 			aprint_normal_dev(sc->sc_dev,
5476 			    "for TX and RX interrupting at %s\n", intrstr);
5477 		}
5478 		sc->sc_ihs[intr_idx] = vih;
5479 		if (wm_softint_establish(sc, qidx, intr_idx) != 0)
5480 			goto fail;
5481 		txrx_established++;
5482 		intr_idx++;
5483 	}
5484 
5485 	/* LINK */
5486 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
5487 	    sizeof(intrbuf));
5488 #ifdef WM_MPSAFE
5489 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
5490 #endif
5491 	memset(intr_xname, 0, sizeof(intr_xname));
5492 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
5493 	    device_xname(sc->sc_dev));
5494 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
5495 	    IPL_NET, wm_linkintr_msix, sc, intr_xname);
5496 	if (vih == NULL) {
5497 		aprint_error_dev(sc->sc_dev,
5498 		    "unable to establish MSI-X(for LINK)%s%s\n",
5499 		    intrstr ? " at " : "",
5500 		    intrstr ? intrstr : "");
5501 
5502 		goto fail;
5503 	}
5504 	/* Keep default affinity to LINK interrupt */
5505 	aprint_normal_dev(sc->sc_dev,
5506 	    "for LINK interrupting at %s\n", intrstr);
5507 	sc->sc_ihs[intr_idx] = vih;
5508 	sc->sc_link_intr_idx = intr_idx;
5509 
5510 	sc->sc_nintrs = sc->sc_nqueues + 1;
5511 	kcpuset_destroy(affinity);
5512 	return 0;
5513 
5514  fail:
5515 	for (qidx = 0; qidx < txrx_established; qidx++) {
5516 		struct wm_queue *wmq = &sc->sc_queue[qidx];
5517 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
5518 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
5519 	}
5520 
5521 	kcpuset_destroy(affinity);
5522 	return ENOMEM;
5523 }
5524 
5525 static void
5526 wm_unset_stopping_flags(struct wm_softc *sc)
5527 {
5528 	int i;
5529 
5530 	KASSERT(WM_CORE_LOCKED(sc));
5531 
5532 	/* Must unset stopping flags in ascending order. */
5533 	for (i = 0; i < sc->sc_nqueues; i++) {
5534 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
5535 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
5536 
5537 		mutex_enter(txq->txq_lock);
5538 		txq->txq_stopping = false;
5539 		mutex_exit(txq->txq_lock);
5540 
5541 		mutex_enter(rxq->rxq_lock);
5542 		rxq->rxq_stopping = false;
5543 		mutex_exit(rxq->rxq_lock);
5544 	}
5545 
5546 	sc->sc_core_stopping = false;
5547 }
5548 
5549 static void
5550 wm_set_stopping_flags(struct wm_softc *sc)
5551 {
5552 	int i;
5553 
5554 	KASSERT(WM_CORE_LOCKED(sc));
5555 
5556 	sc->sc_core_stopping = true;
5557 
5558 	/* Must set stopping flags in ascending order. */
5559 	for (i = 0; i < sc->sc_nqueues; i++) {
5560 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
5561 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
5562 
5563 		mutex_enter(rxq->rxq_lock);
5564 		rxq->rxq_stopping = true;
5565 		mutex_exit(rxq->rxq_lock);
5566 
5567 		mutex_enter(txq->txq_lock);
5568 		txq->txq_stopping = true;
5569 		mutex_exit(txq->txq_lock);
5570 	}
5571 }
5572 
5573 /*
5574  * Write interrupt interval value to ITR or EITR
5575  */
5576 static void
5577 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
5578 {
5579 
5580 	if (!wmq->wmq_set_itr)
5581 		return;
5582 
5583 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5584 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
5585 
5586 		/*
5587 		 * 82575 doesn't have CNT_INGR field.
5588 		 * So, overwrite counter field by software.
5589 		 */
5590 		if (sc->sc_type == WM_T_82575)
5591 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
5592 		else
5593 			eitr |= EITR_CNT_INGR;
5594 
5595 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
5596 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
5597 		/*
5598 		 * 82574 has both ITR and EITR. SET EITR when we use
5599 		 * the multi queue function with MSI-X.
5600 		 */
5601 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
5602 		    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
5603 	} else {
5604 		KASSERT(wmq->wmq_id == 0);
5605 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
5606 	}
5607 
5608 	wmq->wmq_set_itr = false;
5609 }
5610 
5611 /*
5612  * TODO
5613  * Below dynamic calculation of itr is almost the same as linux igb,
5614  * however it does not fit to wm(4). So, we will have been disable AIM
5615  * until we will find appropriate calculation of itr.
5616  */
5617 /*
5618  * calculate interrupt interval value to be going to write register in
5619  * wm_itrs_writereg(). This function does not write ITR/EITR register.
5620  */
5621 static void
5622 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
5623 {
5624 #ifdef NOTYET
5625 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
5626 	struct wm_txqueue *txq = &wmq->wmq_txq;
5627 	uint32_t avg_size = 0;
5628 	uint32_t new_itr;
5629 
5630 	if (rxq->rxq_packets)
5631 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
5632 	if (txq->txq_packets)
5633 		avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
5634 
5635 	if (avg_size == 0) {
5636 		new_itr = 450; /* restore default value */
5637 		goto out;
5638 	}
5639 
5640 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
5641 	avg_size += 24;
5642 
5643 	/* Don't starve jumbo frames */
5644 	avg_size = uimin(avg_size, 3000);
5645 
5646 	/* Give a little boost to mid-size frames */
5647 	if ((avg_size > 300) && (avg_size < 1200))
5648 		new_itr = avg_size / 3;
5649 	else
5650 		new_itr = avg_size / 2;
5651 
5652 out:
5653 	/*
5654 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
5655 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
5656 	 */
5657 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
5658 		new_itr *= 4;
5659 
5660 	if (new_itr != wmq->wmq_itr) {
5661 		wmq->wmq_itr = new_itr;
5662 		wmq->wmq_set_itr = true;
5663 	} else
5664 		wmq->wmq_set_itr = false;
5665 
5666 	rxq->rxq_packets = 0;
5667 	rxq->rxq_bytes = 0;
5668 	txq->txq_packets = 0;
5669 	txq->txq_bytes = 0;
5670 #endif
5671 }
5672 
5673 /*
5674  * wm_init:		[ifnet interface function]
5675  *
5676  *	Initialize the interface.
5677  */
5678 static int
5679 wm_init(struct ifnet *ifp)
5680 {
5681 	struct wm_softc *sc = ifp->if_softc;
5682 	int ret;
5683 
5684 	WM_CORE_LOCK(sc);
5685 	ret = wm_init_locked(ifp);
5686 	WM_CORE_UNLOCK(sc);
5687 
5688 	return ret;
5689 }
5690 
5691 static int
5692 wm_init_locked(struct ifnet *ifp)
5693 {
5694 	struct wm_softc *sc = ifp->if_softc;
5695 	struct ethercom *ec = &sc->sc_ethercom;
5696 	int i, j, trynum, error = 0;
5697 	uint32_t reg;
5698 
5699 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
5700 		device_xname(sc->sc_dev), __func__));
5701 	KASSERT(WM_CORE_LOCKED(sc));
5702 
5703 	/*
5704 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
5705 	 * There is a small but measurable benefit to avoiding the adjusment
5706 	 * of the descriptor so that the headers are aligned, for normal mtu,
5707 	 * on such platforms.  One possibility is that the DMA itself is
5708 	 * slightly more efficient if the front of the entire packet (instead
5709 	 * of the front of the headers) is aligned.
5710 	 *
5711 	 * Note we must always set align_tweak to 0 if we are using
5712 	 * jumbo frames.
5713 	 */
5714 #ifdef __NO_STRICT_ALIGNMENT
5715 	sc->sc_align_tweak = 0;
5716 #else
5717 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
5718 		sc->sc_align_tweak = 0;
5719 	else
5720 		sc->sc_align_tweak = 2;
5721 #endif /* __NO_STRICT_ALIGNMENT */
5722 
5723 	/* Cancel any pending I/O. */
5724 	wm_stop_locked(ifp, 0);
5725 
5726 	/* Update statistics before reset */
5727 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
5728 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
5729 
5730 	/* PCH_SPT hardware workaround */
5731 	if (sc->sc_type == WM_T_PCH_SPT)
5732 		wm_flush_desc_rings(sc);
5733 
5734 	/* Reset the chip to a known state. */
5735 	wm_reset(sc);
5736 
5737 	/*
5738 	 * AMT based hardware can now take control from firmware
5739 	 * Do this after reset.
5740 	 */
5741 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
5742 		wm_get_hw_control(sc);
5743 
5744 	if ((sc->sc_type >= WM_T_PCH_SPT) &&
5745 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
5746 		wm_legacy_irq_quirk_spt(sc);
5747 
5748 	/* Init hardware bits */
5749 	wm_initialize_hardware_bits(sc);
5750 
5751 	/* Reset the PHY. */
5752 	if (sc->sc_flags & WM_F_HAS_MII)
5753 		wm_gmii_reset(sc);
5754 
5755 	if (sc->sc_type >= WM_T_ICH8) {
5756 		reg = CSR_READ(sc, WMREG_GCR);
5757 		/*
5758 		 * ICH8 No-snoop bits are opposite polarity. Set to snoop by
5759 		 * default after reset.
5760 		 */
5761 		if (sc->sc_type == WM_T_ICH8)
5762 			reg |= GCR_NO_SNOOP_ALL;
5763 		else
5764 			reg &= ~GCR_NO_SNOOP_ALL;
5765 		CSR_WRITE(sc, WMREG_GCR, reg);
5766 	}
5767 	if ((sc->sc_type >= WM_T_ICH8)
5768 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER)
5769 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) {
5770 
5771 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
5772 		reg |= CTRL_EXT_RO_DIS;
5773 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5774 	}
5775 
5776 	/* Calculate (E)ITR value */
5777 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
5778 		/*
5779 		 * For NEWQUEUE's EITR (except for 82575).
5780 		 * 82575's EITR should be set same throttling value as other
5781 		 * old controllers' ITR because the interrupt/sec calculation
5782 		 * is the same, that is, 1,000,000,000 / (N * 256).
5783 		 *
5784 		 * 82574's EITR should be set same throttling value as ITR.
5785 		 *
5786 		 * For N interrupts/sec, set this value to:
5787 		 * 1,000,000 / N in contrast to ITR throttoling value.
5788 		 */
5789 		sc->sc_itr_init = 450;
5790 	} else if (sc->sc_type >= WM_T_82543) {
5791 		/*
5792 		 * Set up the interrupt throttling register (units of 256ns)
5793 		 * Note that a footnote in Intel's documentation says this
5794 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
5795 		 * or 10Mbit mode.  Empirically, it appears to be the case
5796 		 * that that is also true for the 1024ns units of the other
5797 		 * interrupt-related timer registers -- so, really, we ought
5798 		 * to divide this value by 4 when the link speed is low.
5799 		 *
5800 		 * XXX implement this division at link speed change!
5801 		 */
5802 
5803 		/*
5804 		 * For N interrupts/sec, set this value to:
5805 		 * 1,000,000,000 / (N * 256).  Note that we set the
5806 		 * absolute and packet timer values to this value
5807 		 * divided by 4 to get "simple timer" behavior.
5808 		 */
5809 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
5810 	}
5811 
5812 	error = wm_init_txrx_queues(sc);
5813 	if (error)
5814 		goto out;
5815 
5816 	/* Clear out the VLAN table -- we don't use it (yet). */
5817 	CSR_WRITE(sc, WMREG_VET, 0);
5818 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
5819 		trynum = 10; /* Due to hw errata */
5820 	else
5821 		trynum = 1;
5822 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
5823 		for (j = 0; j < trynum; j++)
5824 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
5825 
5826 	/*
5827 	 * Set up flow-control parameters.
5828 	 *
5829 	 * XXX Values could probably stand some tuning.
5830 	 */
5831 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
5832 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
5833 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
5834 	    && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
5835 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
5836 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
5837 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
5838 	}
5839 
5840 	sc->sc_fcrtl = FCRTL_DFLT;
5841 	if (sc->sc_type < WM_T_82543) {
5842 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
5843 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
5844 	} else {
5845 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
5846 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
5847 	}
5848 
5849 	if (sc->sc_type == WM_T_80003)
5850 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
5851 	else
5852 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
5853 
5854 	/* Writes the control register. */
5855 	wm_set_vlan(sc);
5856 
5857 	if (sc->sc_flags & WM_F_HAS_MII) {
5858 		uint16_t kmreg;
5859 
5860 		switch (sc->sc_type) {
5861 		case WM_T_80003:
5862 		case WM_T_ICH8:
5863 		case WM_T_ICH9:
5864 		case WM_T_ICH10:
5865 		case WM_T_PCH:
5866 		case WM_T_PCH2:
5867 		case WM_T_PCH_LPT:
5868 		case WM_T_PCH_SPT:
5869 		case WM_T_PCH_CNP:
5870 			/*
5871 			 * Set the mac to wait the maximum time between each
5872 			 * iteration and increase the max iterations when
5873 			 * polling the phy; this fixes erroneous timeouts at
5874 			 * 10Mbps.
5875 			 */
5876 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
5877 			    0xFFFF);
5878 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
5879 			    &kmreg);
5880 			kmreg |= 0x3F;
5881 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
5882 			    kmreg);
5883 			break;
5884 		default:
5885 			break;
5886 		}
5887 
5888 		if (sc->sc_type == WM_T_80003) {
5889 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
5890 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
5891 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5892 
5893 			/* Bypass RX and TX FIFO's */
5894 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
5895 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
5896 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
5897 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
5898 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
5899 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
5900 		}
5901 	}
5902 #if 0
5903 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
5904 #endif
5905 
5906 	/* Set up checksum offload parameters. */
5907 	reg = CSR_READ(sc, WMREG_RXCSUM);
5908 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
5909 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
5910 		reg |= RXCSUM_IPOFL;
5911 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
5912 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
5913 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
5914 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
5915 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
5916 
5917 	/* Set registers about MSI-X */
5918 	if (wm_is_using_msix(sc)) {
5919 		uint32_t ivar, qintr_idx;
5920 		struct wm_queue *wmq;
5921 		unsigned int qid;
5922 
5923 		if (sc->sc_type == WM_T_82575) {
5924 			/* Interrupt control */
5925 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
5926 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
5927 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5928 
5929 			/* TX and RX */
5930 			for (i = 0; i < sc->sc_nqueues; i++) {
5931 				wmq = &sc->sc_queue[i];
5932 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
5933 				    EITR_TX_QUEUE(wmq->wmq_id)
5934 				    | EITR_RX_QUEUE(wmq->wmq_id));
5935 			}
5936 			/* Link status */
5937 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
5938 			    EITR_OTHER);
5939 		} else if (sc->sc_type == WM_T_82574) {
5940 			/* Interrupt control */
5941 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
5942 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
5943 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5944 
5945 			/*
5946 			 * Workaround issue with spurious interrupts
5947 			 * in MSI-X mode.
5948 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
5949 			 * initialized yet. So re-initialize WMREG_RFCTL here.
5950 			 */
5951 			reg = CSR_READ(sc, WMREG_RFCTL);
5952 			reg |= WMREG_RFCTL_ACKDIS;
5953 			CSR_WRITE(sc, WMREG_RFCTL, reg);
5954 
5955 			ivar = 0;
5956 			/* TX and RX */
5957 			for (i = 0; i < sc->sc_nqueues; i++) {
5958 				wmq = &sc->sc_queue[i];
5959 				qid = wmq->wmq_id;
5960 				qintr_idx = wmq->wmq_intr_idx;
5961 
5962 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
5963 				    IVAR_TX_MASK_Q_82574(qid));
5964 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
5965 				    IVAR_RX_MASK_Q_82574(qid));
5966 			}
5967 			/* Link status */
5968 			ivar |= __SHIFTIN((IVAR_VALID_82574
5969 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
5970 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
5971 		} else {
5972 			/* Interrupt control */
5973 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
5974 			    | GPIE_EIAME | GPIE_PBA);
5975 
5976 			switch (sc->sc_type) {
5977 			case WM_T_82580:
5978 			case WM_T_I350:
5979 			case WM_T_I354:
5980 			case WM_T_I210:
5981 			case WM_T_I211:
5982 				/* TX and RX */
5983 				for (i = 0; i < sc->sc_nqueues; i++) {
5984 					wmq = &sc->sc_queue[i];
5985 					qid = wmq->wmq_id;
5986 					qintr_idx = wmq->wmq_intr_idx;
5987 
5988 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
5989 					ivar &= ~IVAR_TX_MASK_Q(qid);
5990 					ivar |= __SHIFTIN((qintr_idx
5991 						| IVAR_VALID),
5992 					    IVAR_TX_MASK_Q(qid));
5993 					ivar &= ~IVAR_RX_MASK_Q(qid);
5994 					ivar |= __SHIFTIN((qintr_idx
5995 						| IVAR_VALID),
5996 					    IVAR_RX_MASK_Q(qid));
5997 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
5998 				}
5999 				break;
6000 			case WM_T_82576:
6001 				/* TX and RX */
6002 				for (i = 0; i < sc->sc_nqueues; i++) {
6003 					wmq = &sc->sc_queue[i];
6004 					qid = wmq->wmq_id;
6005 					qintr_idx = wmq->wmq_intr_idx;
6006 
6007 					ivar = CSR_READ(sc,
6008 					    WMREG_IVAR_Q_82576(qid));
6009 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
6010 					ivar |= __SHIFTIN((qintr_idx
6011 						| IVAR_VALID),
6012 					    IVAR_TX_MASK_Q_82576(qid));
6013 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
6014 					ivar |= __SHIFTIN((qintr_idx
6015 						| IVAR_VALID),
6016 					    IVAR_RX_MASK_Q_82576(qid));
6017 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
6018 					    ivar);
6019 				}
6020 				break;
6021 			default:
6022 				break;
6023 			}
6024 
6025 			/* Link status */
6026 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
6027 			    IVAR_MISC_OTHER);
6028 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
6029 		}
6030 
6031 		if (wm_is_using_multiqueue(sc)) {
6032 			wm_init_rss(sc);
6033 
6034 			/*
6035 			** NOTE: Receive Full-Packet Checksum Offload
6036 			** is mutually exclusive with Multiqueue. However
6037 			** this is not the same as TCP/IP checksums which
6038 			** still work.
6039 			*/
6040 			reg = CSR_READ(sc, WMREG_RXCSUM);
6041 			reg |= RXCSUM_PCSD;
6042 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
6043 		}
6044 	}
6045 
6046 	/* Set up the interrupt registers. */
6047 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
6048 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
6049 	    ICR_RXO | ICR_RXT0;
6050 	if (wm_is_using_msix(sc)) {
6051 		uint32_t mask;
6052 		struct wm_queue *wmq;
6053 
6054 		switch (sc->sc_type) {
6055 		case WM_T_82574:
6056 			mask = 0;
6057 			for (i = 0; i < sc->sc_nqueues; i++) {
6058 				wmq = &sc->sc_queue[i];
6059 				mask |= ICR_TXQ(wmq->wmq_id);
6060 				mask |= ICR_RXQ(wmq->wmq_id);
6061 			}
6062 			mask |= ICR_OTHER;
6063 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
6064 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
6065 			break;
6066 		default:
6067 			if (sc->sc_type == WM_T_82575) {
6068 				mask = 0;
6069 				for (i = 0; i < sc->sc_nqueues; i++) {
6070 					wmq = &sc->sc_queue[i];
6071 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
6072 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
6073 				}
6074 				mask |= EITR_OTHER;
6075 			} else {
6076 				mask = 0;
6077 				for (i = 0; i < sc->sc_nqueues; i++) {
6078 					wmq = &sc->sc_queue[i];
6079 					mask |= 1 << wmq->wmq_intr_idx;
6080 				}
6081 				mask |= 1 << sc->sc_link_intr_idx;
6082 			}
6083 			CSR_WRITE(sc, WMREG_EIAC, mask);
6084 			CSR_WRITE(sc, WMREG_EIAM, mask);
6085 			CSR_WRITE(sc, WMREG_EIMS, mask);
6086 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
6087 			break;
6088 		}
6089 	} else
6090 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
6091 
6092 	/* Set up the inter-packet gap. */
6093 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
6094 
6095 	if (sc->sc_type >= WM_T_82543) {
6096 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
6097 			struct wm_queue *wmq = &sc->sc_queue[qidx];
6098 			wm_itrs_writereg(sc, wmq);
6099 		}
6100 		/*
6101 		 * Link interrupts occur much less than TX
6102 		 * interrupts and RX interrupts. So, we don't
6103 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
6104 		 * FreeBSD's if_igb.
6105 		 */
6106 	}
6107 
6108 	/* Set the VLAN ethernetype. */
6109 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
6110 
6111 	/*
6112 	 * Set up the transmit control register; we start out with
6113 	 * a collision distance suitable for FDX, but update it whe
6114 	 * we resolve the media type.
6115 	 */
6116 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
6117 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
6118 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
6119 	if (sc->sc_type >= WM_T_82571)
6120 		sc->sc_tctl |= TCTL_MULR;
6121 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
6122 
6123 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
6124 		/* Write TDT after TCTL.EN is set. See the document. */
6125 		CSR_WRITE(sc, WMREG_TDT(0), 0);
6126 	}
6127 
6128 	if (sc->sc_type == WM_T_80003) {
6129 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
6130 		reg &= ~TCTL_EXT_GCEX_MASK;
6131 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
6132 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
6133 	}
6134 
6135 	/* Set the media. */
6136 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
6137 		goto out;
6138 
6139 	/* Configure for OS presence */
6140 	wm_init_manageability(sc);
6141 
6142 	/*
6143 	 * Set up the receive control register; we actually program the
6144 	 * register when we set the receive filter. Use multicast address
6145 	 * offset type 0.
6146 	 *
6147 	 * Only the i82544 has the ability to strip the incoming CRC, so we
6148 	 * don't enable that feature.
6149 	 */
6150 	sc->sc_mchash_type = 0;
6151 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
6152 	    | __SHIFTIN(sc->sc_mchash_type, RCTL_MO);
6153 
6154 	/* 82574 use one buffer extended Rx descriptor. */
6155 	if (sc->sc_type == WM_T_82574)
6156 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
6157 
6158 	/*
6159 	 * The I350 has a bug where it always strips the CRC whether
6160 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
6161 	 */
6162 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
6163 	    || (sc->sc_type == WM_T_I210))
6164 		sc->sc_rctl |= RCTL_SECRC;
6165 
6166 	if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
6167 	    && (ifp->if_mtu > ETHERMTU)) {
6168 		sc->sc_rctl |= RCTL_LPE;
6169 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
6170 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
6171 	}
6172 
6173 	if (MCLBYTES == 2048)
6174 		sc->sc_rctl |= RCTL_2k;
6175 	else {
6176 		if (sc->sc_type >= WM_T_82543) {
6177 			switch (MCLBYTES) {
6178 			case 4096:
6179 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
6180 				break;
6181 			case 8192:
6182 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
6183 				break;
6184 			case 16384:
6185 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
6186 				break;
6187 			default:
6188 				panic("wm_init: MCLBYTES %d unsupported",
6189 				    MCLBYTES);
6190 				break;
6191 			}
6192 		} else
6193 			panic("wm_init: i82542 requires MCLBYTES = 2048");
6194 	}
6195 
6196 	/* Enable ECC */
6197 	switch (sc->sc_type) {
6198 	case WM_T_82571:
6199 		reg = CSR_READ(sc, WMREG_PBA_ECC);
6200 		reg |= PBA_ECC_CORR_EN;
6201 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
6202 		break;
6203 	case WM_T_PCH_LPT:
6204 	case WM_T_PCH_SPT:
6205 	case WM_T_PCH_CNP:
6206 		reg = CSR_READ(sc, WMREG_PBECCSTS);
6207 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
6208 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
6209 
6210 		sc->sc_ctrl |= CTRL_MEHE;
6211 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6212 		break;
6213 	default:
6214 		break;
6215 	}
6216 
6217 	/*
6218 	 * Set the receive filter.
6219 	 *
6220 	 * For 82575 and 82576, the RX descriptors must be initialized after
6221 	 * the setting of RCTL.EN in wm_set_filter()
6222 	 */
6223 	wm_set_filter(sc);
6224 
6225 	/* On 575 and later set RDT only if RX enabled */
6226 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
6227 		int qidx;
6228 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
6229 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
6230 			for (i = 0; i < WM_NRXDESC; i++) {
6231 				mutex_enter(rxq->rxq_lock);
6232 				wm_init_rxdesc(rxq, i);
6233 				mutex_exit(rxq->rxq_lock);
6234 
6235 			}
6236 		}
6237 	}
6238 
6239 	wm_unset_stopping_flags(sc);
6240 
6241 	/* Start the one second link check clock. */
6242 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
6243 
6244 	/* ...all done! */
6245 	ifp->if_flags |= IFF_RUNNING;
6246 	ifp->if_flags &= ~IFF_OACTIVE;
6247 
6248  out:
6249 	/* Save last flags for the callback */
6250 	sc->sc_if_flags = ifp->if_flags;
6251 	sc->sc_ec_capenable = ec->ec_capenable;
6252 	if (error)
6253 		log(LOG_ERR, "%s: interface not running\n",
6254 		    device_xname(sc->sc_dev));
6255 	return error;
6256 }
6257 
6258 /*
6259  * wm_stop:		[ifnet interface function]
6260  *
6261  *	Stop transmission on the interface.
6262  */
6263 static void
6264 wm_stop(struct ifnet *ifp, int disable)
6265 {
6266 	struct wm_softc *sc = ifp->if_softc;
6267 
6268 	WM_CORE_LOCK(sc);
6269 	wm_stop_locked(ifp, disable);
6270 	WM_CORE_UNLOCK(sc);
6271 }
6272 
6273 static void
6274 wm_stop_locked(struct ifnet *ifp, int disable)
6275 {
6276 	struct wm_softc *sc = ifp->if_softc;
6277 	struct wm_txsoft *txs;
6278 	int i, qidx;
6279 
6280 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
6281 		device_xname(sc->sc_dev), __func__));
6282 	KASSERT(WM_CORE_LOCKED(sc));
6283 
6284 	wm_set_stopping_flags(sc);
6285 
6286 	/* Stop the one second clock. */
6287 	callout_stop(&sc->sc_tick_ch);
6288 
6289 	/* Stop the 82547 Tx FIFO stall check timer. */
6290 	if (sc->sc_type == WM_T_82547)
6291 		callout_stop(&sc->sc_txfifo_ch);
6292 
6293 	if (sc->sc_flags & WM_F_HAS_MII) {
6294 		/* Down the MII. */
6295 		mii_down(&sc->sc_mii);
6296 	} else {
6297 #if 0
6298 		/* Should we clear PHY's status properly? */
6299 		wm_reset(sc);
6300 #endif
6301 	}
6302 
6303 	/* Stop the transmit and receive processes. */
6304 	CSR_WRITE(sc, WMREG_TCTL, 0);
6305 	CSR_WRITE(sc, WMREG_RCTL, 0);
6306 	sc->sc_rctl &= ~RCTL_EN;
6307 
6308 	/*
6309 	 * Clear the interrupt mask to ensure the device cannot assert its
6310 	 * interrupt line.
6311 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
6312 	 * service any currently pending or shared interrupt.
6313 	 */
6314 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
6315 	sc->sc_icr = 0;
6316 	if (wm_is_using_msix(sc)) {
6317 		if (sc->sc_type != WM_T_82574) {
6318 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
6319 			CSR_WRITE(sc, WMREG_EIAC, 0);
6320 		} else
6321 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
6322 	}
6323 
6324 	/* Release any queued transmit buffers. */
6325 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
6326 		struct wm_queue *wmq = &sc->sc_queue[qidx];
6327 		struct wm_txqueue *txq = &wmq->wmq_txq;
6328 		mutex_enter(txq->txq_lock);
6329 		txq->txq_sending = false; /* Ensure watchdog disabled */
6330 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
6331 			txs = &txq->txq_soft[i];
6332 			if (txs->txs_mbuf != NULL) {
6333 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
6334 				m_freem(txs->txs_mbuf);
6335 				txs->txs_mbuf = NULL;
6336 			}
6337 		}
6338 		mutex_exit(txq->txq_lock);
6339 	}
6340 
6341 	/* Mark the interface as down and cancel the watchdog timer. */
6342 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
6343 
6344 	if (disable) {
6345 		for (i = 0; i < sc->sc_nqueues; i++) {
6346 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
6347 			mutex_enter(rxq->rxq_lock);
6348 			wm_rxdrain(rxq);
6349 			mutex_exit(rxq->rxq_lock);
6350 		}
6351 	}
6352 
6353 #if 0 /* notyet */
6354 	if (sc->sc_type >= WM_T_82544)
6355 		CSR_WRITE(sc, WMREG_WUC, 0);
6356 #endif
6357 }
6358 
6359 static void
6360 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
6361 {
6362 	struct mbuf *m;
6363 	int i;
6364 
6365 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
6366 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
6367 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
6368 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
6369 		    m->m_data, m->m_len, m->m_flags);
6370 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
6371 	    i, i == 1 ? "" : "s");
6372 }
6373 
6374 /*
6375  * wm_82547_txfifo_stall:
6376  *
6377  *	Callout used to wait for the 82547 Tx FIFO to drain,
6378  *	reset the FIFO pointers, and restart packet transmission.
6379  */
6380 static void
6381 wm_82547_txfifo_stall(void *arg)
6382 {
6383 	struct wm_softc *sc = arg;
6384 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
6385 
6386 	mutex_enter(txq->txq_lock);
6387 
6388 	if (txq->txq_stopping)
6389 		goto out;
6390 
6391 	if (txq->txq_fifo_stall) {
6392 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
6393 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
6394 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
6395 			/*
6396 			 * Packets have drained.  Stop transmitter, reset
6397 			 * FIFO pointers, restart transmitter, and kick
6398 			 * the packet queue.
6399 			 */
6400 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
6401 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
6402 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
6403 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
6404 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
6405 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
6406 			CSR_WRITE(sc, WMREG_TCTL, tctl);
6407 			CSR_WRITE_FLUSH(sc);
6408 
6409 			txq->txq_fifo_head = 0;
6410 			txq->txq_fifo_stall = 0;
6411 			wm_start_locked(&sc->sc_ethercom.ec_if);
6412 		} else {
6413 			/*
6414 			 * Still waiting for packets to drain; try again in
6415 			 * another tick.
6416 			 */
6417 			callout_schedule(&sc->sc_txfifo_ch, 1);
6418 		}
6419 	}
6420 
6421 out:
6422 	mutex_exit(txq->txq_lock);
6423 }
6424 
6425 /*
6426  * wm_82547_txfifo_bugchk:
6427  *
6428  *	Check for bug condition in the 82547 Tx FIFO.  We need to
6429  *	prevent enqueueing a packet that would wrap around the end
6430  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
6431  *
6432  *	We do this by checking the amount of space before the end
6433  *	of the Tx FIFO buffer. If the packet will not fit, we "stall"
6434  *	the Tx FIFO, wait for all remaining packets to drain, reset
6435  *	the internal FIFO pointers to the beginning, and restart
6436  *	transmission on the interface.
6437  */
6438 #define	WM_FIFO_HDR		0x10
6439 #define	WM_82547_PAD_LEN	0x3e0
6440 static int
6441 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
6442 {
6443 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
6444 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
6445 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
6446 
6447 	/* Just return if already stalled. */
6448 	if (txq->txq_fifo_stall)
6449 		return 1;
6450 
6451 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
6452 		/* Stall only occurs in half-duplex mode. */
6453 		goto send_packet;
6454 	}
6455 
6456 	if (len >= WM_82547_PAD_LEN + space) {
6457 		txq->txq_fifo_stall = 1;
6458 		callout_schedule(&sc->sc_txfifo_ch, 1);
6459 		return 1;
6460 	}
6461 
6462  send_packet:
6463 	txq->txq_fifo_head += len;
6464 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
6465 		txq->txq_fifo_head -= txq->txq_fifo_size;
6466 
6467 	return 0;
6468 }
6469 
6470 static int
6471 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
6472 {
6473 	int error;
6474 
6475 	/*
6476 	 * Allocate the control data structures, and create and load the
6477 	 * DMA map for it.
6478 	 *
6479 	 * NOTE: All Tx descriptors must be in the same 4G segment of
6480 	 * memory.  So must Rx descriptors.  We simplify by allocating
6481 	 * both sets within the same 4G segment.
6482 	 */
6483 	if (sc->sc_type < WM_T_82544)
6484 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
6485 	else
6486 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
6487 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
6488 		txq->txq_descsize = sizeof(nq_txdesc_t);
6489 	else
6490 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
6491 
6492 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
6493 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
6494 		    1, &txq->txq_desc_rseg, 0)) != 0) {
6495 		aprint_error_dev(sc->sc_dev,
6496 		    "unable to allocate TX control data, error = %d\n",
6497 		    error);
6498 		goto fail_0;
6499 	}
6500 
6501 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
6502 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
6503 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
6504 		aprint_error_dev(sc->sc_dev,
6505 		    "unable to map TX control data, error = %d\n", error);
6506 		goto fail_1;
6507 	}
6508 
6509 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
6510 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
6511 		aprint_error_dev(sc->sc_dev,
6512 		    "unable to create TX control data DMA map, error = %d\n",
6513 		    error);
6514 		goto fail_2;
6515 	}
6516 
6517 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
6518 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
6519 		aprint_error_dev(sc->sc_dev,
6520 		    "unable to load TX control data DMA map, error = %d\n",
6521 		    error);
6522 		goto fail_3;
6523 	}
6524 
6525 	return 0;
6526 
6527  fail_3:
6528 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
6529  fail_2:
6530 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
6531 	    WM_TXDESCS_SIZE(txq));
6532  fail_1:
6533 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
6534  fail_0:
6535 	return error;
6536 }
6537 
6538 static void
6539 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
6540 {
6541 
6542 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
6543 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
6544 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
6545 	    WM_TXDESCS_SIZE(txq));
6546 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
6547 }
6548 
6549 static int
6550 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
6551 {
6552 	int error;
6553 	size_t rxq_descs_size;
6554 
6555 	/*
6556 	 * Allocate the control data structures, and create and load the
6557 	 * DMA map for it.
6558 	 *
6559 	 * NOTE: All Tx descriptors must be in the same 4G segment of
6560 	 * memory.  So must Rx descriptors.  We simplify by allocating
6561 	 * both sets within the same 4G segment.
6562 	 */
6563 	rxq->rxq_ndesc = WM_NRXDESC;
6564 	if (sc->sc_type == WM_T_82574)
6565 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
6566 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
6567 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
6568 	else
6569 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
6570 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
6571 
6572 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
6573 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
6574 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
6575 		aprint_error_dev(sc->sc_dev,
6576 		    "unable to allocate RX control data, error = %d\n",
6577 		    error);
6578 		goto fail_0;
6579 	}
6580 
6581 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
6582 		    rxq->rxq_desc_rseg, rxq_descs_size,
6583 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
6584 		aprint_error_dev(sc->sc_dev,
6585 		    "unable to map RX control data, error = %d\n", error);
6586 		goto fail_1;
6587 	}
6588 
6589 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
6590 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
6591 		aprint_error_dev(sc->sc_dev,
6592 		    "unable to create RX control data DMA map, error = %d\n",
6593 		    error);
6594 		goto fail_2;
6595 	}
6596 
6597 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
6598 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
6599 		aprint_error_dev(sc->sc_dev,
6600 		    "unable to load RX control data DMA map, error = %d\n",
6601 		    error);
6602 		goto fail_3;
6603 	}
6604 
6605 	return 0;
6606 
6607  fail_3:
6608 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
6609  fail_2:
6610 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
6611 	    rxq_descs_size);
6612  fail_1:
6613 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
6614  fail_0:
6615 	return error;
6616 }
6617 
6618 static void
6619 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
6620 {
6621 
6622 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
6623 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
6624 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
6625 	    rxq->rxq_descsize * rxq->rxq_ndesc);
6626 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
6627 }
6628 
6629 
6630 static int
6631 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
6632 {
6633 	int i, error;
6634 
6635 	/* Create the transmit buffer DMA maps. */
6636 	WM_TXQUEUELEN(txq) =
6637 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
6638 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
6639 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
6640 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
6641 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
6642 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
6643 			aprint_error_dev(sc->sc_dev,
6644 			    "unable to create Tx DMA map %d, error = %d\n",
6645 			    i, error);
6646 			goto fail;
6647 		}
6648 	}
6649 
6650 	return 0;
6651 
6652  fail:
6653 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
6654 		if (txq->txq_soft[i].txs_dmamap != NULL)
6655 			bus_dmamap_destroy(sc->sc_dmat,
6656 			    txq->txq_soft[i].txs_dmamap);
6657 	}
6658 	return error;
6659 }
6660 
6661 static void
6662 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
6663 {
6664 	int i;
6665 
6666 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
6667 		if (txq->txq_soft[i].txs_dmamap != NULL)
6668 			bus_dmamap_destroy(sc->sc_dmat,
6669 			    txq->txq_soft[i].txs_dmamap);
6670 	}
6671 }
6672 
6673 static int
6674 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
6675 {
6676 	int i, error;
6677 
6678 	/* Create the receive buffer DMA maps. */
6679 	for (i = 0; i < rxq->rxq_ndesc; i++) {
6680 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
6681 			    MCLBYTES, 0, 0,
6682 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
6683 			aprint_error_dev(sc->sc_dev,
6684 			    "unable to create Rx DMA map %d error = %d\n",
6685 			    i, error);
6686 			goto fail;
6687 		}
6688 		rxq->rxq_soft[i].rxs_mbuf = NULL;
6689 	}
6690 
6691 	return 0;
6692 
6693  fail:
6694 	for (i = 0; i < rxq->rxq_ndesc; i++) {
6695 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
6696 			bus_dmamap_destroy(sc->sc_dmat,
6697 			    rxq->rxq_soft[i].rxs_dmamap);
6698 	}
6699 	return error;
6700 }
6701 
6702 static void
6703 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
6704 {
6705 	int i;
6706 
6707 	for (i = 0; i < rxq->rxq_ndesc; i++) {
6708 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
6709 			bus_dmamap_destroy(sc->sc_dmat,
6710 			    rxq->rxq_soft[i].rxs_dmamap);
6711 	}
6712 }
6713 
6714 /*
6715  * wm_alloc_quques:
6716  *	Allocate {tx,rx}descs and {tx,rx} buffers
6717  */
6718 static int
6719 wm_alloc_txrx_queues(struct wm_softc *sc)
6720 {
6721 	int i, error, tx_done, rx_done;
6722 
6723 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
6724 	    KM_SLEEP);
6725 	if (sc->sc_queue == NULL) {
6726 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
6727 		error = ENOMEM;
6728 		goto fail_0;
6729 	}
6730 
6731 	/* For transmission */
6732 	error = 0;
6733 	tx_done = 0;
6734 	for (i = 0; i < sc->sc_nqueues; i++) {
6735 #ifdef WM_EVENT_COUNTERS
6736 		int j;
6737 		const char *xname;
6738 #endif
6739 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
6740 		txq->txq_sc = sc;
6741 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
6742 
6743 		error = wm_alloc_tx_descs(sc, txq);
6744 		if (error)
6745 			break;
6746 		error = wm_alloc_tx_buffer(sc, txq);
6747 		if (error) {
6748 			wm_free_tx_descs(sc, txq);
6749 			break;
6750 		}
6751 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
6752 		if (txq->txq_interq == NULL) {
6753 			wm_free_tx_descs(sc, txq);
6754 			wm_free_tx_buffer(sc, txq);
6755 			error = ENOMEM;
6756 			break;
6757 		}
6758 
6759 #ifdef WM_EVENT_COUNTERS
6760 		xname = device_xname(sc->sc_dev);
6761 
6762 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
6763 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
6764 		WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
6765 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
6766 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
6767 		WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
6768 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
6769 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
6770 		WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
6771 		WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
6772 		WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
6773 
6774 		for (j = 0; j < WM_NTXSEGS; j++) {
6775 			snprintf(txq->txq_txseg_evcnt_names[j],
6776 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
6777 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
6778 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
6779 		}
6780 
6781 		WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
6782 		WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
6783 		WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
6784 		WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
6785 		WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
6786 #endif /* WM_EVENT_COUNTERS */
6787 
6788 		tx_done++;
6789 	}
6790 	if (error)
6791 		goto fail_1;
6792 
6793 	/* For receive */
6794 	error = 0;
6795 	rx_done = 0;
6796 	for (i = 0; i < sc->sc_nqueues; i++) {
6797 #ifdef WM_EVENT_COUNTERS
6798 		const char *xname;
6799 #endif
6800 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
6801 		rxq->rxq_sc = sc;
6802 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
6803 
6804 		error = wm_alloc_rx_descs(sc, rxq);
6805 		if (error)
6806 			break;
6807 
6808 		error = wm_alloc_rx_buffer(sc, rxq);
6809 		if (error) {
6810 			wm_free_rx_descs(sc, rxq);
6811 			break;
6812 		}
6813 
6814 #ifdef WM_EVENT_COUNTERS
6815 		xname = device_xname(sc->sc_dev);
6816 
6817 		WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
6818 		WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
6819 
6820 		WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
6821 		WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
6822 #endif /* WM_EVENT_COUNTERS */
6823 
6824 		rx_done++;
6825 	}
6826 	if (error)
6827 		goto fail_2;
6828 
6829 	for (i = 0; i < sc->sc_nqueues; i++) {
6830 		char rndname[16];
6831 
6832 		snprintf(rndname, sizeof(rndname), "%sTXRX%d",
6833 		    device_xname(sc->sc_dev), i);
6834 		rnd_attach_source(&sc->sc_queue[i].rnd_source, rndname,
6835 		    RND_TYPE_NET, RND_FLAG_DEFAULT);
6836 	}
6837 
6838 	return 0;
6839 
6840  fail_2:
6841 	for (i = 0; i < rx_done; i++) {
6842 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
6843 		wm_free_rx_buffer(sc, rxq);
6844 		wm_free_rx_descs(sc, rxq);
6845 		if (rxq->rxq_lock)
6846 			mutex_obj_free(rxq->rxq_lock);
6847 	}
6848  fail_1:
6849 	for (i = 0; i < tx_done; i++) {
6850 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
6851 		pcq_destroy(txq->txq_interq);
6852 		wm_free_tx_buffer(sc, txq);
6853 		wm_free_tx_descs(sc, txq);
6854 		if (txq->txq_lock)
6855 			mutex_obj_free(txq->txq_lock);
6856 	}
6857 
6858 	kmem_free(sc->sc_queue,
6859 	    sizeof(struct wm_queue) * sc->sc_nqueues);
6860  fail_0:
6861 	return error;
6862 }
6863 
6864 /*
6865  * wm_free_quques:
6866  *	Free {tx,rx}descs and {tx,rx} buffers
6867  */
6868 static void
6869 wm_free_txrx_queues(struct wm_softc *sc)
6870 {
6871 	int i;
6872 
6873 	for (i = 0; i < sc->sc_nqueues; i++)
6874 		rnd_detach_source(&sc->sc_queue[i].rnd_source);
6875 
6876 	for (i = 0; i < sc->sc_nqueues; i++) {
6877 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
6878 
6879 #ifdef WM_EVENT_COUNTERS
6880 		WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
6881 		WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
6882 		WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
6883 		WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
6884 #endif /* WM_EVENT_COUNTERS */
6885 
6886 		wm_free_rx_buffer(sc, rxq);
6887 		wm_free_rx_descs(sc, rxq);
6888 		if (rxq->rxq_lock)
6889 			mutex_obj_free(rxq->rxq_lock);
6890 	}
6891 
6892 	for (i = 0; i < sc->sc_nqueues; i++) {
6893 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
6894 		struct mbuf *m;
6895 #ifdef WM_EVENT_COUNTERS
6896 		int j;
6897 
6898 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
6899 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
6900 		WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
6901 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
6902 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
6903 		WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
6904 		WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
6905 		WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
6906 		WM_Q_EVCNT_DETACH(txq, tso, txq, i);
6907 		WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
6908 		WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
6909 
6910 		for (j = 0; j < WM_NTXSEGS; j++)
6911 			evcnt_detach(&txq->txq_ev_txseg[j]);
6912 
6913 		WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
6914 		WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
6915 		WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
6916 		WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
6917 		WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
6918 #endif /* WM_EVENT_COUNTERS */
6919 
6920 		/* Drain txq_interq */
6921 		while ((m = pcq_get(txq->txq_interq)) != NULL)
6922 			m_freem(m);
6923 		pcq_destroy(txq->txq_interq);
6924 
6925 		wm_free_tx_buffer(sc, txq);
6926 		wm_free_tx_descs(sc, txq);
6927 		if (txq->txq_lock)
6928 			mutex_obj_free(txq->txq_lock);
6929 	}
6930 
6931 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
6932 }
6933 
6934 static void
6935 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
6936 {
6937 
6938 	KASSERT(mutex_owned(txq->txq_lock));
6939 
6940 	/* Initialize the transmit descriptor ring. */
6941 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
6942 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
6943 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
6944 	txq->txq_free = WM_NTXDESC(txq);
6945 	txq->txq_next = 0;
6946 }
6947 
6948 static void
6949 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
6950     struct wm_txqueue *txq)
6951 {
6952 
6953 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
6954 		device_xname(sc->sc_dev), __func__));
6955 	KASSERT(mutex_owned(txq->txq_lock));
6956 
6957 	if (sc->sc_type < WM_T_82543) {
6958 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
6959 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
6960 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
6961 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
6962 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
6963 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
6964 	} else {
6965 		int qid = wmq->wmq_id;
6966 
6967 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
6968 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
6969 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
6970 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
6971 
6972 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
6973 			/*
6974 			 * Don't write TDT before TCTL.EN is set.
6975 			 * See the document.
6976 			 */
6977 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
6978 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
6979 			    | TXDCTL_WTHRESH(0));
6980 		else {
6981 			/* XXX should update with AIM? */
6982 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
6983 			if (sc->sc_type >= WM_T_82540) {
6984 				/* Should be the same */
6985 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
6986 			}
6987 
6988 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
6989 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
6990 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
6991 		}
6992 	}
6993 }
6994 
6995 static void
6996 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
6997 {
6998 	int i;
6999 
7000 	KASSERT(mutex_owned(txq->txq_lock));
7001 
7002 	/* Initialize the transmit job descriptors. */
7003 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
7004 		txq->txq_soft[i].txs_mbuf = NULL;
7005 	txq->txq_sfree = WM_TXQUEUELEN(txq);
7006 	txq->txq_snext = 0;
7007 	txq->txq_sdirty = 0;
7008 }
7009 
7010 static void
7011 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
7012     struct wm_txqueue *txq)
7013 {
7014 
7015 	KASSERT(mutex_owned(txq->txq_lock));
7016 
7017 	/*
7018 	 * Set up some register offsets that are different between
7019 	 * the i82542 and the i82543 and later chips.
7020 	 */
7021 	if (sc->sc_type < WM_T_82543)
7022 		txq->txq_tdt_reg = WMREG_OLD_TDT;
7023 	else
7024 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
7025 
7026 	wm_init_tx_descs(sc, txq);
7027 	wm_init_tx_regs(sc, wmq, txq);
7028 	wm_init_tx_buffer(sc, txq);
7029 
7030 	txq->txq_flags = 0; /* Clear WM_TXQ_NO_SPACE */
7031 	txq->txq_sending = false;
7032 }
7033 
7034 static void
7035 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
7036     struct wm_rxqueue *rxq)
7037 {
7038 
7039 	KASSERT(mutex_owned(rxq->rxq_lock));
7040 
7041 	/*
7042 	 * Initialize the receive descriptor and receive job
7043 	 * descriptor rings.
7044 	 */
7045 	if (sc->sc_type < WM_T_82543) {
7046 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
7047 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
7048 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
7049 		    rxq->rxq_descsize * rxq->rxq_ndesc);
7050 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
7051 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
7052 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
7053 
7054 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
7055 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
7056 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
7057 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
7058 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
7059 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
7060 	} else {
7061 		int qid = wmq->wmq_id;
7062 
7063 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
7064 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
7065 		CSR_WRITE(sc, WMREG_RDLEN(qid),
7066 		    rxq->rxq_descsize * rxq->rxq_ndesc);
7067 
7068 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
7069 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
7070 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
7071 
7072 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
7073 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
7074 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
7075 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
7076 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
7077 			    | RXDCTL_WTHRESH(1));
7078 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
7079 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
7080 		} else {
7081 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
7082 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
7083 			/* XXX should update with AIM? */
7084 			CSR_WRITE(sc, WMREG_RDTR,
7085 			    (wmq->wmq_itr / 4) | RDTR_FPD);
7086 			/* MUST be same */
7087 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
7088 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
7089 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
7090 		}
7091 	}
7092 }
7093 
7094 static int
7095 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
7096 {
7097 	struct wm_rxsoft *rxs;
7098 	int error, i;
7099 
7100 	KASSERT(mutex_owned(rxq->rxq_lock));
7101 
7102 	for (i = 0; i < rxq->rxq_ndesc; i++) {
7103 		rxs = &rxq->rxq_soft[i];
7104 		if (rxs->rxs_mbuf == NULL) {
7105 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
7106 				log(LOG_ERR, "%s: unable to allocate or map "
7107 				    "rx buffer %d, error = %d\n",
7108 				    device_xname(sc->sc_dev), i, error);
7109 				/*
7110 				 * XXX Should attempt to run with fewer receive
7111 				 * XXX buffers instead of just failing.
7112 				 */
7113 				wm_rxdrain(rxq);
7114 				return ENOMEM;
7115 			}
7116 		} else {
7117 			/*
7118 			 * For 82575 and 82576, the RX descriptors must be
7119 			 * initialized after the setting of RCTL.EN in
7120 			 * wm_set_filter()
7121 			 */
7122 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
7123 				wm_init_rxdesc(rxq, i);
7124 		}
7125 	}
7126 	rxq->rxq_ptr = 0;
7127 	rxq->rxq_discard = 0;
7128 	WM_RXCHAIN_RESET(rxq);
7129 
7130 	return 0;
7131 }
7132 
7133 static int
7134 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
7135     struct wm_rxqueue *rxq)
7136 {
7137 
7138 	KASSERT(mutex_owned(rxq->rxq_lock));
7139 
7140 	/*
7141 	 * Set up some register offsets that are different between
7142 	 * the i82542 and the i82543 and later chips.
7143 	 */
7144 	if (sc->sc_type < WM_T_82543)
7145 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
7146 	else
7147 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
7148 
7149 	wm_init_rx_regs(sc, wmq, rxq);
7150 	return wm_init_rx_buffer(sc, rxq);
7151 }
7152 
7153 /*
7154  * wm_init_quques:
7155  *	Initialize {tx,rx}descs and {tx,rx} buffers
7156  */
7157 static int
7158 wm_init_txrx_queues(struct wm_softc *sc)
7159 {
7160 	int i, error = 0;
7161 
7162 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
7163 		device_xname(sc->sc_dev), __func__));
7164 
7165 	for (i = 0; i < sc->sc_nqueues; i++) {
7166 		struct wm_queue *wmq = &sc->sc_queue[i];
7167 		struct wm_txqueue *txq = &wmq->wmq_txq;
7168 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
7169 
7170 		/*
7171 		 * TODO
7172 		 * Currently, use constant variable instead of AIM.
7173 		 * Furthermore, the interrupt interval of multiqueue which use
7174 		 * polling mode is less than default value.
7175 		 * More tuning and AIM are required.
7176 		 */
7177 		if (wm_is_using_multiqueue(sc))
7178 			wmq->wmq_itr = 50;
7179 		else
7180 			wmq->wmq_itr = sc->sc_itr_init;
7181 		wmq->wmq_set_itr = true;
7182 
7183 		mutex_enter(txq->txq_lock);
7184 		wm_init_tx_queue(sc, wmq, txq);
7185 		mutex_exit(txq->txq_lock);
7186 
7187 		mutex_enter(rxq->rxq_lock);
7188 		error = wm_init_rx_queue(sc, wmq, rxq);
7189 		mutex_exit(rxq->rxq_lock);
7190 		if (error)
7191 			break;
7192 	}
7193 
7194 	return error;
7195 }
7196 
7197 /*
7198  * wm_tx_offload:
7199  *
7200  *	Set up TCP/IP checksumming parameters for the
7201  *	specified packet.
7202  */
7203 static int
7204 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
7205     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
7206 {
7207 	struct mbuf *m0 = txs->txs_mbuf;
7208 	struct livengood_tcpip_ctxdesc *t;
7209 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
7210 	uint32_t ipcse;
7211 	struct ether_header *eh;
7212 	int offset, iphl;
7213 	uint8_t fields;
7214 
7215 	/*
7216 	 * XXX It would be nice if the mbuf pkthdr had offset
7217 	 * fields for the protocol headers.
7218 	 */
7219 
7220 	eh = mtod(m0, struct ether_header *);
7221 	switch (htons(eh->ether_type)) {
7222 	case ETHERTYPE_IP:
7223 	case ETHERTYPE_IPV6:
7224 		offset = ETHER_HDR_LEN;
7225 		break;
7226 
7227 	case ETHERTYPE_VLAN:
7228 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
7229 		break;
7230 
7231 	default:
7232 		/* Don't support this protocol or encapsulation. */
7233 		*fieldsp = 0;
7234 		*cmdp = 0;
7235 		return 0;
7236 	}
7237 
7238 	if ((m0->m_pkthdr.csum_flags &
7239 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
7240 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
7241 	} else
7242 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
7243 
7244 	ipcse = offset + iphl - 1;
7245 
7246 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
7247 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
7248 	seg = 0;
7249 	fields = 0;
7250 
7251 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
7252 		int hlen = offset + iphl;
7253 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
7254 
7255 		if (__predict_false(m0->m_len <
7256 				    (hlen + sizeof(struct tcphdr)))) {
7257 			/*
7258 			 * TCP/IP headers are not in the first mbuf; we need
7259 			 * to do this the slow and painful way. Let's just
7260 			 * hope this doesn't happen very often.
7261 			 */
7262 			struct tcphdr th;
7263 
7264 			WM_Q_EVCNT_INCR(txq, tsopain);
7265 
7266 			m_copydata(m0, hlen, sizeof(th), &th);
7267 			if (v4) {
7268 				struct ip ip;
7269 
7270 				m_copydata(m0, offset, sizeof(ip), &ip);
7271 				ip.ip_len = 0;
7272 				m_copyback(m0,
7273 				    offset + offsetof(struct ip, ip_len),
7274 				    sizeof(ip.ip_len), &ip.ip_len);
7275 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
7276 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
7277 			} else {
7278 				struct ip6_hdr ip6;
7279 
7280 				m_copydata(m0, offset, sizeof(ip6), &ip6);
7281 				ip6.ip6_plen = 0;
7282 				m_copyback(m0,
7283 				    offset + offsetof(struct ip6_hdr, ip6_plen),
7284 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
7285 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
7286 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
7287 			}
7288 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
7289 			    sizeof(th.th_sum), &th.th_sum);
7290 
7291 			hlen += th.th_off << 2;
7292 		} else {
7293 			/*
7294 			 * TCP/IP headers are in the first mbuf; we can do
7295 			 * this the easy way.
7296 			 */
7297 			struct tcphdr *th;
7298 
7299 			if (v4) {
7300 				struct ip *ip =
7301 				    (void *)(mtod(m0, char *) + offset);
7302 				th = (void *)(mtod(m0, char *) + hlen);
7303 
7304 				ip->ip_len = 0;
7305 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
7306 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
7307 			} else {
7308 				struct ip6_hdr *ip6 =
7309 				    (void *)(mtod(m0, char *) + offset);
7310 				th = (void *)(mtod(m0, char *) + hlen);
7311 
7312 				ip6->ip6_plen = 0;
7313 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
7314 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
7315 			}
7316 			hlen += th->th_off << 2;
7317 		}
7318 
7319 		if (v4) {
7320 			WM_Q_EVCNT_INCR(txq, tso);
7321 			cmdlen |= WTX_TCPIP_CMD_IP;
7322 		} else {
7323 			WM_Q_EVCNT_INCR(txq, tso6);
7324 			ipcse = 0;
7325 		}
7326 		cmd |= WTX_TCPIP_CMD_TSE;
7327 		cmdlen |= WTX_TCPIP_CMD_TSE |
7328 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
7329 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
7330 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
7331 	}
7332 
7333 	/*
7334 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
7335 	 * offload feature, if we load the context descriptor, we
7336 	 * MUST provide valid values for IPCSS and TUCSS fields.
7337 	 */
7338 
7339 	ipcs = WTX_TCPIP_IPCSS(offset) |
7340 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
7341 	    WTX_TCPIP_IPCSE(ipcse);
7342 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
7343 		WM_Q_EVCNT_INCR(txq, ipsum);
7344 		fields |= WTX_IXSM;
7345 	}
7346 
7347 	offset += iphl;
7348 
7349 	if (m0->m_pkthdr.csum_flags &
7350 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
7351 		WM_Q_EVCNT_INCR(txq, tusum);
7352 		fields |= WTX_TXSM;
7353 		tucs = WTX_TCPIP_TUCSS(offset) |
7354 		    WTX_TCPIP_TUCSO(offset +
7355 			M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
7356 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
7357 	} else if ((m0->m_pkthdr.csum_flags &
7358 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
7359 		WM_Q_EVCNT_INCR(txq, tusum6);
7360 		fields |= WTX_TXSM;
7361 		tucs = WTX_TCPIP_TUCSS(offset) |
7362 		    WTX_TCPIP_TUCSO(offset +
7363 			M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
7364 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
7365 	} else {
7366 		/* Just initialize it to a valid TCP context. */
7367 		tucs = WTX_TCPIP_TUCSS(offset) |
7368 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
7369 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
7370 	}
7371 
7372 	/*
7373 	 * We don't have to write context descriptor for every packet
7374 	 * except for 82574. For 82574, we must write context descriptor
7375 	 * for every packet when we use two descriptor queues.
7376 	 * It would be overhead to write context descriptor for every packet,
7377 	 * however it does not cause problems.
7378 	 */
7379 	/* Fill in the context descriptor. */
7380 	t = (struct livengood_tcpip_ctxdesc *)
7381 	    &txq->txq_descs[txq->txq_next];
7382 	t->tcpip_ipcs = htole32(ipcs);
7383 	t->tcpip_tucs = htole32(tucs);
7384 	t->tcpip_cmdlen = htole32(cmdlen);
7385 	t->tcpip_seg = htole32(seg);
7386 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
7387 
7388 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
7389 	txs->txs_ndesc++;
7390 
7391 	*cmdp = cmd;
7392 	*fieldsp = fields;
7393 
7394 	return 0;
7395 }
7396 
7397 static inline int
7398 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
7399 {
7400 	struct wm_softc *sc = ifp->if_softc;
7401 	u_int cpuid = cpu_index(curcpu());
7402 
7403 	/*
7404 	 * Currently, simple distribute strategy.
7405 	 * TODO:
7406 	 * distribute by flowid(RSS has value).
7407 	 */
7408 	return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues;
7409 }
7410 
7411 /*
7412  * wm_start:		[ifnet interface function]
7413  *
7414  *	Start packet transmission on the interface.
7415  */
7416 static void
7417 wm_start(struct ifnet *ifp)
7418 {
7419 	struct wm_softc *sc = ifp->if_softc;
7420 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
7421 
7422 #ifdef WM_MPSAFE
7423 	KASSERT(if_is_mpsafe(ifp));
7424 #endif
7425 	/*
7426 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
7427 	 */
7428 
7429 	mutex_enter(txq->txq_lock);
7430 	if (!txq->txq_stopping)
7431 		wm_start_locked(ifp);
7432 	mutex_exit(txq->txq_lock);
7433 }
7434 
7435 static void
7436 wm_start_locked(struct ifnet *ifp)
7437 {
7438 	struct wm_softc *sc = ifp->if_softc;
7439 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
7440 
7441 	wm_send_common_locked(ifp, txq, false);
7442 }
7443 
7444 static int
7445 wm_transmit(struct ifnet *ifp, struct mbuf *m)
7446 {
7447 	int qid;
7448 	struct wm_softc *sc = ifp->if_softc;
7449 	struct wm_txqueue *txq;
7450 
7451 	qid = wm_select_txqueue(ifp, m);
7452 	txq = &sc->sc_queue[qid].wmq_txq;
7453 
7454 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
7455 		m_freem(m);
7456 		WM_Q_EVCNT_INCR(txq, pcqdrop);
7457 		return ENOBUFS;
7458 	}
7459 
7460 	/* XXX NOMPSAFE: ifp->if_data should be percpu. */
7461 	ifp->if_obytes += m->m_pkthdr.len;
7462 	if (m->m_flags & M_MCAST)
7463 		ifp->if_omcasts++;
7464 
7465 	if (mutex_tryenter(txq->txq_lock)) {
7466 		if (!txq->txq_stopping)
7467 			wm_transmit_locked(ifp, txq);
7468 		mutex_exit(txq->txq_lock);
7469 	}
7470 
7471 	return 0;
7472 }
7473 
7474 static void
7475 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
7476 {
7477 
7478 	wm_send_common_locked(ifp, txq, true);
7479 }
7480 
7481 static void
7482 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
7483     bool is_transmit)
7484 {
7485 	struct wm_softc *sc = ifp->if_softc;
7486 	struct mbuf *m0;
7487 	struct wm_txsoft *txs;
7488 	bus_dmamap_t dmamap;
7489 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
7490 	bus_addr_t curaddr;
7491 	bus_size_t seglen, curlen;
7492 	uint32_t cksumcmd;
7493 	uint8_t cksumfields;
7494 	bool remap = true;
7495 
7496 	KASSERT(mutex_owned(txq->txq_lock));
7497 
7498 	if ((ifp->if_flags & IFF_RUNNING) == 0)
7499 		return;
7500 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
7501 		return;
7502 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
7503 		return;
7504 
7505 	/* Remember the previous number of free descriptors. */
7506 	ofree = txq->txq_free;
7507 
7508 	/*
7509 	 * Loop through the send queue, setting up transmit descriptors
7510 	 * until we drain the queue, or use up all available transmit
7511 	 * descriptors.
7512 	 */
7513 	for (;;) {
7514 		m0 = NULL;
7515 
7516 		/* Get a work queue entry. */
7517 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
7518 			wm_txeof(txq, UINT_MAX);
7519 			if (txq->txq_sfree == 0) {
7520 				DPRINTF(WM_DEBUG_TX,
7521 				    ("%s: TX: no free job descriptors\n",
7522 					device_xname(sc->sc_dev)));
7523 				WM_Q_EVCNT_INCR(txq, txsstall);
7524 				break;
7525 			}
7526 		}
7527 
7528 		/* Grab a packet off the queue. */
7529 		if (is_transmit)
7530 			m0 = pcq_get(txq->txq_interq);
7531 		else
7532 			IFQ_DEQUEUE(&ifp->if_snd, m0);
7533 		if (m0 == NULL)
7534 			break;
7535 
7536 		DPRINTF(WM_DEBUG_TX,
7537 		    ("%s: TX: have packet to transmit: %p\n",
7538 			device_xname(sc->sc_dev), m0));
7539 
7540 		txs = &txq->txq_soft[txq->txq_snext];
7541 		dmamap = txs->txs_dmamap;
7542 
7543 		use_tso = (m0->m_pkthdr.csum_flags &
7544 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
7545 
7546 		/*
7547 		 * So says the Linux driver:
7548 		 * The controller does a simple calculation to make sure
7549 		 * there is enough room in the FIFO before initiating the
7550 		 * DMA for each buffer. The calc is:
7551 		 *	4 = ceil(buffer len / MSS)
7552 		 * To make sure we don't overrun the FIFO, adjust the max
7553 		 * buffer len if the MSS drops.
7554 		 */
7555 		dmamap->dm_maxsegsz =
7556 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
7557 		    ? m0->m_pkthdr.segsz << 2
7558 		    : WTX_MAX_LEN;
7559 
7560 		/*
7561 		 * Load the DMA map.  If this fails, the packet either
7562 		 * didn't fit in the allotted number of segments, or we
7563 		 * were short on resources.  For the too-many-segments
7564 		 * case, we simply report an error and drop the packet,
7565 		 * since we can't sanely copy a jumbo packet to a single
7566 		 * buffer.
7567 		 */
7568 retry:
7569 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
7570 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
7571 		if (__predict_false(error)) {
7572 			if (error == EFBIG) {
7573 				if (remap == true) {
7574 					struct mbuf *m;
7575 
7576 					remap = false;
7577 					m = m_defrag(m0, M_NOWAIT);
7578 					if (m != NULL) {
7579 						WM_Q_EVCNT_INCR(txq, defrag);
7580 						m0 = m;
7581 						goto retry;
7582 					}
7583 				}
7584 				WM_Q_EVCNT_INCR(txq, toomanyseg);
7585 				log(LOG_ERR, "%s: Tx packet consumes too many "
7586 				    "DMA segments, dropping...\n",
7587 				    device_xname(sc->sc_dev));
7588 				wm_dump_mbuf_chain(sc, m0);
7589 				m_freem(m0);
7590 				continue;
7591 			}
7592 			/* Short on resources, just stop for now. */
7593 			DPRINTF(WM_DEBUG_TX,
7594 			    ("%s: TX: dmamap load failed: %d\n",
7595 				device_xname(sc->sc_dev), error));
7596 			break;
7597 		}
7598 
7599 		segs_needed = dmamap->dm_nsegs;
7600 		if (use_tso) {
7601 			/* For sentinel descriptor; see below. */
7602 			segs_needed++;
7603 		}
7604 
7605 		/*
7606 		 * Ensure we have enough descriptors free to describe
7607 		 * the packet. Note, we always reserve one descriptor
7608 		 * at the end of the ring due to the semantics of the
7609 		 * TDT register, plus one more in the event we need
7610 		 * to load offload context.
7611 		 */
7612 		if (segs_needed > txq->txq_free - 2) {
7613 			/*
7614 			 * Not enough free descriptors to transmit this
7615 			 * packet.  We haven't committed anything yet,
7616 			 * so just unload the DMA map, put the packet
7617 			 * pack on the queue, and punt. Notify the upper
7618 			 * layer that there are no more slots left.
7619 			 */
7620 			DPRINTF(WM_DEBUG_TX,
7621 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
7622 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
7623 				segs_needed, txq->txq_free - 1));
7624 			if (!is_transmit)
7625 				ifp->if_flags |= IFF_OACTIVE;
7626 			txq->txq_flags |= WM_TXQ_NO_SPACE;
7627 			bus_dmamap_unload(sc->sc_dmat, dmamap);
7628 			WM_Q_EVCNT_INCR(txq, txdstall);
7629 			break;
7630 		}
7631 
7632 		/*
7633 		 * Check for 82547 Tx FIFO bug. We need to do this
7634 		 * once we know we can transmit the packet, since we
7635 		 * do some internal FIFO space accounting here.
7636 		 */
7637 		if (sc->sc_type == WM_T_82547 &&
7638 		    wm_82547_txfifo_bugchk(sc, m0)) {
7639 			DPRINTF(WM_DEBUG_TX,
7640 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
7641 				device_xname(sc->sc_dev)));
7642 			if (!is_transmit)
7643 				ifp->if_flags |= IFF_OACTIVE;
7644 			txq->txq_flags |= WM_TXQ_NO_SPACE;
7645 			bus_dmamap_unload(sc->sc_dmat, dmamap);
7646 			WM_Q_EVCNT_INCR(txq, fifo_stall);
7647 			break;
7648 		}
7649 
7650 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
7651 
7652 		DPRINTF(WM_DEBUG_TX,
7653 		    ("%s: TX: packet has %d (%d) DMA segments\n",
7654 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
7655 
7656 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
7657 
7658 		/*
7659 		 * Store a pointer to the packet so that we can free it
7660 		 * later.
7661 		 *
7662 		 * Initially, we consider the number of descriptors the
7663 		 * packet uses the number of DMA segments.  This may be
7664 		 * incremented by 1 if we do checksum offload (a descriptor
7665 		 * is used to set the checksum context).
7666 		 */
7667 		txs->txs_mbuf = m0;
7668 		txs->txs_firstdesc = txq->txq_next;
7669 		txs->txs_ndesc = segs_needed;
7670 
7671 		/* Set up offload parameters for this packet. */
7672 		if (m0->m_pkthdr.csum_flags &
7673 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
7674 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
7675 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
7676 			if (wm_tx_offload(sc, txq, txs, &cksumcmd,
7677 					  &cksumfields) != 0) {
7678 				/* Error message already displayed. */
7679 				bus_dmamap_unload(sc->sc_dmat, dmamap);
7680 				continue;
7681 			}
7682 		} else {
7683 			cksumcmd = 0;
7684 			cksumfields = 0;
7685 		}
7686 
7687 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
7688 
7689 		/* Sync the DMA map. */
7690 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
7691 		    BUS_DMASYNC_PREWRITE);
7692 
7693 		/* Initialize the transmit descriptor. */
7694 		for (nexttx = txq->txq_next, seg = 0;
7695 		     seg < dmamap->dm_nsegs; seg++) {
7696 			for (seglen = dmamap->dm_segs[seg].ds_len,
7697 			     curaddr = dmamap->dm_segs[seg].ds_addr;
7698 			     seglen != 0;
7699 			     curaddr += curlen, seglen -= curlen,
7700 			     nexttx = WM_NEXTTX(txq, nexttx)) {
7701 				curlen = seglen;
7702 
7703 				/*
7704 				 * So says the Linux driver:
7705 				 * Work around for premature descriptor
7706 				 * write-backs in TSO mode.  Append a
7707 				 * 4-byte sentinel descriptor.
7708 				 */
7709 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
7710 				    curlen > 8)
7711 					curlen -= 4;
7712 
7713 				wm_set_dma_addr(
7714 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
7715 				txq->txq_descs[nexttx].wtx_cmdlen
7716 				    = htole32(cksumcmd | curlen);
7717 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
7718 				    = 0;
7719 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
7720 				    = cksumfields;
7721 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
7722 				lasttx = nexttx;
7723 
7724 				DPRINTF(WM_DEBUG_TX,
7725 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
7726 					"len %#04zx\n",
7727 					device_xname(sc->sc_dev), nexttx,
7728 					(uint64_t)curaddr, curlen));
7729 			}
7730 		}
7731 
7732 		KASSERT(lasttx != -1);
7733 
7734 		/*
7735 		 * Set up the command byte on the last descriptor of
7736 		 * the packet. If we're in the interrupt delay window,
7737 		 * delay the interrupt.
7738 		 */
7739 		txq->txq_descs[lasttx].wtx_cmdlen |=
7740 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
7741 
7742 		/*
7743 		 * If VLANs are enabled and the packet has a VLAN tag, set
7744 		 * up the descriptor to encapsulate the packet for us.
7745 		 *
7746 		 * This is only valid on the last descriptor of the packet.
7747 		 */
7748 		if (vlan_has_tag(m0)) {
7749 			txq->txq_descs[lasttx].wtx_cmdlen |=
7750 			    htole32(WTX_CMD_VLE);
7751 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
7752 			    = htole16(vlan_get_tag(m0));
7753 		}
7754 
7755 		txs->txs_lastdesc = lasttx;
7756 
7757 		DPRINTF(WM_DEBUG_TX,
7758 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
7759 			device_xname(sc->sc_dev),
7760 			lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
7761 
7762 		/* Sync the descriptors we're using. */
7763 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
7764 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
7765 
7766 		/* Give the packet to the chip. */
7767 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
7768 
7769 		DPRINTF(WM_DEBUG_TX,
7770 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
7771 
7772 		DPRINTF(WM_DEBUG_TX,
7773 		    ("%s: TX: finished transmitting packet, job %d\n",
7774 			device_xname(sc->sc_dev), txq->txq_snext));
7775 
7776 		/* Advance the tx pointer. */
7777 		txq->txq_free -= txs->txs_ndesc;
7778 		txq->txq_next = nexttx;
7779 
7780 		txq->txq_sfree--;
7781 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
7782 
7783 		/* Pass the packet to any BPF listeners. */
7784 		bpf_mtap(ifp, m0, BPF_D_OUT);
7785 	}
7786 
7787 	if (m0 != NULL) {
7788 		if (!is_transmit)
7789 			ifp->if_flags |= IFF_OACTIVE;
7790 		txq->txq_flags |= WM_TXQ_NO_SPACE;
7791 		WM_Q_EVCNT_INCR(txq, descdrop);
7792 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
7793 			__func__));
7794 		m_freem(m0);
7795 	}
7796 
7797 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
7798 		/* No more slots; notify upper layer. */
7799 		if (!is_transmit)
7800 			ifp->if_flags |= IFF_OACTIVE;
7801 		txq->txq_flags |= WM_TXQ_NO_SPACE;
7802 	}
7803 
7804 	if (txq->txq_free != ofree) {
7805 		/* Set a watchdog timer in case the chip flakes out. */
7806 		txq->txq_lastsent = time_uptime;
7807 		txq->txq_sending = true;
7808 	}
7809 }
7810 
7811 /*
7812  * wm_nq_tx_offload:
7813  *
7814  *	Set up TCP/IP checksumming parameters for the
7815  *	specified packet, for NEWQUEUE devices
7816  */
7817 static int
7818 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
7819     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
7820 {
7821 	struct mbuf *m0 = txs->txs_mbuf;
7822 	uint32_t vl_len, mssidx, cmdc;
7823 	struct ether_header *eh;
7824 	int offset, iphl;
7825 
7826 	/*
7827 	 * XXX It would be nice if the mbuf pkthdr had offset
7828 	 * fields for the protocol headers.
7829 	 */
7830 	*cmdlenp = 0;
7831 	*fieldsp = 0;
7832 
7833 	eh = mtod(m0, struct ether_header *);
7834 	switch (htons(eh->ether_type)) {
7835 	case ETHERTYPE_IP:
7836 	case ETHERTYPE_IPV6:
7837 		offset = ETHER_HDR_LEN;
7838 		break;
7839 
7840 	case ETHERTYPE_VLAN:
7841 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
7842 		break;
7843 
7844 	default:
7845 		/* Don't support this protocol or encapsulation. */
7846 		*do_csum = false;
7847 		return 0;
7848 	}
7849 	*do_csum = true;
7850 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
7851 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
7852 
7853 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
7854 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
7855 
7856 	if ((m0->m_pkthdr.csum_flags &
7857 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
7858 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
7859 	} else {
7860 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
7861 	}
7862 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
7863 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
7864 
7865 	if (vlan_has_tag(m0)) {
7866 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
7867 		    << NQTXC_VLLEN_VLAN_SHIFT);
7868 		*cmdlenp |= NQTX_CMD_VLE;
7869 	}
7870 
7871 	mssidx = 0;
7872 
7873 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
7874 		int hlen = offset + iphl;
7875 		int tcp_hlen;
7876 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
7877 
7878 		if (__predict_false(m0->m_len <
7879 				    (hlen + sizeof(struct tcphdr)))) {
7880 			/*
7881 			 * TCP/IP headers are not in the first mbuf; we need
7882 			 * to do this the slow and painful way. Let's just
7883 			 * hope this doesn't happen very often.
7884 			 */
7885 			struct tcphdr th;
7886 
7887 			WM_Q_EVCNT_INCR(txq, tsopain);
7888 
7889 			m_copydata(m0, hlen, sizeof(th), &th);
7890 			if (v4) {
7891 				struct ip ip;
7892 
7893 				m_copydata(m0, offset, sizeof(ip), &ip);
7894 				ip.ip_len = 0;
7895 				m_copyback(m0,
7896 				    offset + offsetof(struct ip, ip_len),
7897 				    sizeof(ip.ip_len), &ip.ip_len);
7898 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
7899 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
7900 			} else {
7901 				struct ip6_hdr ip6;
7902 
7903 				m_copydata(m0, offset, sizeof(ip6), &ip6);
7904 				ip6.ip6_plen = 0;
7905 				m_copyback(m0,
7906 				    offset + offsetof(struct ip6_hdr, ip6_plen),
7907 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
7908 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
7909 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
7910 			}
7911 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
7912 			    sizeof(th.th_sum), &th.th_sum);
7913 
7914 			tcp_hlen = th.th_off << 2;
7915 		} else {
7916 			/*
7917 			 * TCP/IP headers are in the first mbuf; we can do
7918 			 * this the easy way.
7919 			 */
7920 			struct tcphdr *th;
7921 
7922 			if (v4) {
7923 				struct ip *ip =
7924 				    (void *)(mtod(m0, char *) + offset);
7925 				th = (void *)(mtod(m0, char *) + hlen);
7926 
7927 				ip->ip_len = 0;
7928 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
7929 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
7930 			} else {
7931 				struct ip6_hdr *ip6 =
7932 				    (void *)(mtod(m0, char *) + offset);
7933 				th = (void *)(mtod(m0, char *) + hlen);
7934 
7935 				ip6->ip6_plen = 0;
7936 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
7937 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
7938 			}
7939 			tcp_hlen = th->th_off << 2;
7940 		}
7941 		hlen += tcp_hlen;
7942 		*cmdlenp |= NQTX_CMD_TSE;
7943 
7944 		if (v4) {
7945 			WM_Q_EVCNT_INCR(txq, tso);
7946 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
7947 		} else {
7948 			WM_Q_EVCNT_INCR(txq, tso6);
7949 			*fieldsp |= NQTXD_FIELDS_TUXSM;
7950 		}
7951 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
7952 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
7953 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
7954 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
7955 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
7956 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
7957 	} else {
7958 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
7959 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
7960 	}
7961 
7962 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
7963 		*fieldsp |= NQTXD_FIELDS_IXSM;
7964 		cmdc |= NQTXC_CMD_IP4;
7965 	}
7966 
7967 	if (m0->m_pkthdr.csum_flags &
7968 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
7969 		WM_Q_EVCNT_INCR(txq, tusum);
7970 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
7971 			cmdc |= NQTXC_CMD_TCP;
7972 		else
7973 			cmdc |= NQTXC_CMD_UDP;
7974 
7975 		cmdc |= NQTXC_CMD_IP4;
7976 		*fieldsp |= NQTXD_FIELDS_TUXSM;
7977 	}
7978 	if (m0->m_pkthdr.csum_flags &
7979 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
7980 		WM_Q_EVCNT_INCR(txq, tusum6);
7981 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
7982 			cmdc |= NQTXC_CMD_TCP;
7983 		else
7984 			cmdc |= NQTXC_CMD_UDP;
7985 
7986 		cmdc |= NQTXC_CMD_IP6;
7987 		*fieldsp |= NQTXD_FIELDS_TUXSM;
7988 	}
7989 
7990 	/*
7991 	 * We don't have to write context descriptor for every packet to
7992 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
7993 	 * I210 and I211. It is enough to write once per a Tx queue for these
7994 	 * controllers.
7995 	 * It would be overhead to write context descriptor for every packet,
7996 	 * however it does not cause problems.
7997 	 */
7998 	/* Fill in the context descriptor. */
7999 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
8000 	    htole32(vl_len);
8001 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
8002 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
8003 	    htole32(cmdc);
8004 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
8005 	    htole32(mssidx);
8006 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
8007 	DPRINTF(WM_DEBUG_TX,
8008 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
8009 		txq->txq_next, 0, vl_len));
8010 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
8011 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
8012 	txs->txs_ndesc++;
8013 	return 0;
8014 }
8015 
8016 /*
8017  * wm_nq_start:		[ifnet interface function]
8018  *
8019  *	Start packet transmission on the interface for NEWQUEUE devices
8020  */
8021 static void
8022 wm_nq_start(struct ifnet *ifp)
8023 {
8024 	struct wm_softc *sc = ifp->if_softc;
8025 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
8026 
8027 #ifdef WM_MPSAFE
8028 	KASSERT(if_is_mpsafe(ifp));
8029 #endif
8030 	/*
8031 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
8032 	 */
8033 
8034 	mutex_enter(txq->txq_lock);
8035 	if (!txq->txq_stopping)
8036 		wm_nq_start_locked(ifp);
8037 	mutex_exit(txq->txq_lock);
8038 }
8039 
8040 static void
8041 wm_nq_start_locked(struct ifnet *ifp)
8042 {
8043 	struct wm_softc *sc = ifp->if_softc;
8044 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
8045 
8046 	wm_nq_send_common_locked(ifp, txq, false);
8047 }
8048 
8049 static int
8050 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
8051 {
8052 	int qid;
8053 	struct wm_softc *sc = ifp->if_softc;
8054 	struct wm_txqueue *txq;
8055 
8056 	qid = wm_select_txqueue(ifp, m);
8057 	txq = &sc->sc_queue[qid].wmq_txq;
8058 
8059 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
8060 		m_freem(m);
8061 		WM_Q_EVCNT_INCR(txq, pcqdrop);
8062 		return ENOBUFS;
8063 	}
8064 
8065 	/* XXX NOMPSAFE: ifp->if_data should be percpu. */
8066 	ifp->if_obytes += m->m_pkthdr.len;
8067 	if (m->m_flags & M_MCAST)
8068 		ifp->if_omcasts++;
8069 
8070 	/*
8071 	 * The situations which this mutex_tryenter() fails at running time
8072 	 * are below two patterns.
8073 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
8074 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
8075 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
8076 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
8077 	 * In the case of (2), the last packet enqueued to txq->txq_interq is
8078 	 * also dequeued by wm_deferred_start_locked(). So, it does not get
8079 	 * stuck, either.
8080 	 */
8081 	if (mutex_tryenter(txq->txq_lock)) {
8082 		if (!txq->txq_stopping)
8083 			wm_nq_transmit_locked(ifp, txq);
8084 		mutex_exit(txq->txq_lock);
8085 	}
8086 
8087 	return 0;
8088 }
8089 
8090 static void
8091 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
8092 {
8093 
8094 	wm_nq_send_common_locked(ifp, txq, true);
8095 }
8096 
8097 static void
8098 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
8099     bool is_transmit)
8100 {
8101 	struct wm_softc *sc = ifp->if_softc;
8102 	struct mbuf *m0;
8103 	struct wm_txsoft *txs;
8104 	bus_dmamap_t dmamap;
8105 	int error, nexttx, lasttx = -1, seg, segs_needed;
8106 	bool do_csum, sent;
8107 	bool remap = true;
8108 
8109 	KASSERT(mutex_owned(txq->txq_lock));
8110 
8111 	if ((ifp->if_flags & IFF_RUNNING) == 0)
8112 		return;
8113 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
8114 		return;
8115 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
8116 		return;
8117 
8118 	sent = false;
8119 
8120 	/*
8121 	 * Loop through the send queue, setting up transmit descriptors
8122 	 * until we drain the queue, or use up all available transmit
8123 	 * descriptors.
8124 	 */
8125 	for (;;) {
8126 		m0 = NULL;
8127 
8128 		/* Get a work queue entry. */
8129 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
8130 			wm_txeof(txq, UINT_MAX);
8131 			if (txq->txq_sfree == 0) {
8132 				DPRINTF(WM_DEBUG_TX,
8133 				    ("%s: TX: no free job descriptors\n",
8134 					device_xname(sc->sc_dev)));
8135 				WM_Q_EVCNT_INCR(txq, txsstall);
8136 				break;
8137 			}
8138 		}
8139 
8140 		/* Grab a packet off the queue. */
8141 		if (is_transmit)
8142 			m0 = pcq_get(txq->txq_interq);
8143 		else
8144 			IFQ_DEQUEUE(&ifp->if_snd, m0);
8145 		if (m0 == NULL)
8146 			break;
8147 
8148 		DPRINTF(WM_DEBUG_TX,
8149 		    ("%s: TX: have packet to transmit: %p\n",
8150 		    device_xname(sc->sc_dev), m0));
8151 
8152 		txs = &txq->txq_soft[txq->txq_snext];
8153 		dmamap = txs->txs_dmamap;
8154 
8155 		/*
8156 		 * Load the DMA map.  If this fails, the packet either
8157 		 * didn't fit in the allotted number of segments, or we
8158 		 * were short on resources.  For the too-many-segments
8159 		 * case, we simply report an error and drop the packet,
8160 		 * since we can't sanely copy a jumbo packet to a single
8161 		 * buffer.
8162 		 */
8163 retry:
8164 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
8165 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
8166 		if (__predict_false(error)) {
8167 			if (error == EFBIG) {
8168 				if (remap == true) {
8169 					struct mbuf *m;
8170 
8171 					remap = false;
8172 					m = m_defrag(m0, M_NOWAIT);
8173 					if (m != NULL) {
8174 						WM_Q_EVCNT_INCR(txq, defrag);
8175 						m0 = m;
8176 						goto retry;
8177 					}
8178 				}
8179 				WM_Q_EVCNT_INCR(txq, toomanyseg);
8180 				log(LOG_ERR, "%s: Tx packet consumes too many "
8181 				    "DMA segments, dropping...\n",
8182 				    device_xname(sc->sc_dev));
8183 				wm_dump_mbuf_chain(sc, m0);
8184 				m_freem(m0);
8185 				continue;
8186 			}
8187 			/* Short on resources, just stop for now. */
8188 			DPRINTF(WM_DEBUG_TX,
8189 			    ("%s: TX: dmamap load failed: %d\n",
8190 				device_xname(sc->sc_dev), error));
8191 			break;
8192 		}
8193 
8194 		segs_needed = dmamap->dm_nsegs;
8195 
8196 		/*
8197 		 * Ensure we have enough descriptors free to describe
8198 		 * the packet. Note, we always reserve one descriptor
8199 		 * at the end of the ring due to the semantics of the
8200 		 * TDT register, plus one more in the event we need
8201 		 * to load offload context.
8202 		 */
8203 		if (segs_needed > txq->txq_free - 2) {
8204 			/*
8205 			 * Not enough free descriptors to transmit this
8206 			 * packet.  We haven't committed anything yet,
8207 			 * so just unload the DMA map, put the packet
8208 			 * pack on the queue, and punt. Notify the upper
8209 			 * layer that there are no more slots left.
8210 			 */
8211 			DPRINTF(WM_DEBUG_TX,
8212 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
8213 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
8214 				segs_needed, txq->txq_free - 1));
8215 			if (!is_transmit)
8216 				ifp->if_flags |= IFF_OACTIVE;
8217 			txq->txq_flags |= WM_TXQ_NO_SPACE;
8218 			bus_dmamap_unload(sc->sc_dmat, dmamap);
8219 			WM_Q_EVCNT_INCR(txq, txdstall);
8220 			break;
8221 		}
8222 
8223 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
8224 
8225 		DPRINTF(WM_DEBUG_TX,
8226 		    ("%s: TX: packet has %d (%d) DMA segments\n",
8227 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
8228 
8229 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
8230 
8231 		/*
8232 		 * Store a pointer to the packet so that we can free it
8233 		 * later.
8234 		 *
8235 		 * Initially, we consider the number of descriptors the
8236 		 * packet uses the number of DMA segments.  This may be
8237 		 * incremented by 1 if we do checksum offload (a descriptor
8238 		 * is used to set the checksum context).
8239 		 */
8240 		txs->txs_mbuf = m0;
8241 		txs->txs_firstdesc = txq->txq_next;
8242 		txs->txs_ndesc = segs_needed;
8243 
8244 		/* Set up offload parameters for this packet. */
8245 		uint32_t cmdlen, fields, dcmdlen;
8246 		if (m0->m_pkthdr.csum_flags &
8247 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
8248 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
8249 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
8250 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
8251 			    &do_csum) != 0) {
8252 				/* Error message already displayed. */
8253 				bus_dmamap_unload(sc->sc_dmat, dmamap);
8254 				continue;
8255 			}
8256 		} else {
8257 			do_csum = false;
8258 			cmdlen = 0;
8259 			fields = 0;
8260 		}
8261 
8262 		/* Sync the DMA map. */
8263 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
8264 		    BUS_DMASYNC_PREWRITE);
8265 
8266 		/* Initialize the first transmit descriptor. */
8267 		nexttx = txq->txq_next;
8268 		if (!do_csum) {
8269 			/* Setup a legacy descriptor */
8270 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
8271 			    dmamap->dm_segs[0].ds_addr);
8272 			txq->txq_descs[nexttx].wtx_cmdlen =
8273 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
8274 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
8275 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
8276 			if (vlan_has_tag(m0)) {
8277 				txq->txq_descs[nexttx].wtx_cmdlen |=
8278 				    htole32(WTX_CMD_VLE);
8279 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
8280 				    htole16(vlan_get_tag(m0));
8281 			} else
8282 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
8283 
8284 			dcmdlen = 0;
8285 		} else {
8286 			/* Setup an advanced data descriptor */
8287 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
8288 			    htole64(dmamap->dm_segs[0].ds_addr);
8289 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
8290 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
8291 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
8292 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
8293 			    htole32(fields);
8294 			DPRINTF(WM_DEBUG_TX,
8295 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
8296 				device_xname(sc->sc_dev), nexttx,
8297 				(uint64_t)dmamap->dm_segs[0].ds_addr));
8298 			DPRINTF(WM_DEBUG_TX,
8299 			    ("\t 0x%08x%08x\n", fields,
8300 				(uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
8301 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
8302 		}
8303 
8304 		lasttx = nexttx;
8305 		nexttx = WM_NEXTTX(txq, nexttx);
8306 		/*
8307 		 * Fill in the next descriptors. legacy or advanced format
8308 		 * is the same here
8309 		 */
8310 		for (seg = 1; seg < dmamap->dm_nsegs;
8311 		     seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
8312 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
8313 			    htole64(dmamap->dm_segs[seg].ds_addr);
8314 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
8315 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
8316 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
8317 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
8318 			lasttx = nexttx;
8319 
8320 			DPRINTF(WM_DEBUG_TX,
8321 			    ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
8322 				device_xname(sc->sc_dev), nexttx,
8323 				(uint64_t)dmamap->dm_segs[seg].ds_addr,
8324 				dmamap->dm_segs[seg].ds_len));
8325 		}
8326 
8327 		KASSERT(lasttx != -1);
8328 
8329 		/*
8330 		 * Set up the command byte on the last descriptor of
8331 		 * the packet. If we're in the interrupt delay window,
8332 		 * delay the interrupt.
8333 		 */
8334 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
8335 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
8336 		txq->txq_descs[lasttx].wtx_cmdlen |=
8337 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
8338 
8339 		txs->txs_lastdesc = lasttx;
8340 
8341 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
8342 		    device_xname(sc->sc_dev),
8343 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
8344 
8345 		/* Sync the descriptors we're using. */
8346 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
8347 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
8348 
8349 		/* Give the packet to the chip. */
8350 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
8351 		sent = true;
8352 
8353 		DPRINTF(WM_DEBUG_TX,
8354 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
8355 
8356 		DPRINTF(WM_DEBUG_TX,
8357 		    ("%s: TX: finished transmitting packet, job %d\n",
8358 			device_xname(sc->sc_dev), txq->txq_snext));
8359 
8360 		/* Advance the tx pointer. */
8361 		txq->txq_free -= txs->txs_ndesc;
8362 		txq->txq_next = nexttx;
8363 
8364 		txq->txq_sfree--;
8365 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
8366 
8367 		/* Pass the packet to any BPF listeners. */
8368 		bpf_mtap(ifp, m0, BPF_D_OUT);
8369 	}
8370 
8371 	if (m0 != NULL) {
8372 		if (!is_transmit)
8373 			ifp->if_flags |= IFF_OACTIVE;
8374 		txq->txq_flags |= WM_TXQ_NO_SPACE;
8375 		WM_Q_EVCNT_INCR(txq, descdrop);
8376 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
8377 			__func__));
8378 		m_freem(m0);
8379 	}
8380 
8381 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
8382 		/* No more slots; notify upper layer. */
8383 		if (!is_transmit)
8384 			ifp->if_flags |= IFF_OACTIVE;
8385 		txq->txq_flags |= WM_TXQ_NO_SPACE;
8386 	}
8387 
8388 	if (sent) {
8389 		/* Set a watchdog timer in case the chip flakes out. */
8390 		txq->txq_lastsent = time_uptime;
8391 		txq->txq_sending = true;
8392 	}
8393 }
8394 
8395 static void
8396 wm_deferred_start_locked(struct wm_txqueue *txq)
8397 {
8398 	struct wm_softc *sc = txq->txq_sc;
8399 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
8400 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
8401 	int qid = wmq->wmq_id;
8402 
8403 	KASSERT(mutex_owned(txq->txq_lock));
8404 
8405 	if (txq->txq_stopping) {
8406 		mutex_exit(txq->txq_lock);
8407 		return;
8408 	}
8409 
8410 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
8411 		/* XXX need for ALTQ or one CPU system */
8412 		if (qid == 0)
8413 			wm_nq_start_locked(ifp);
8414 		wm_nq_transmit_locked(ifp, txq);
8415 	} else {
8416 		/* XXX need for ALTQ or one CPU system */
8417 		if (qid == 0)
8418 			wm_start_locked(ifp);
8419 		wm_transmit_locked(ifp, txq);
8420 	}
8421 }
8422 
8423 /* Interrupt */
8424 
8425 /*
8426  * wm_txeof:
8427  *
8428  *	Helper; handle transmit interrupts.
8429  */
8430 static bool
8431 wm_txeof(struct wm_txqueue *txq, u_int limit)
8432 {
8433 	struct wm_softc *sc = txq->txq_sc;
8434 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
8435 	struct wm_txsoft *txs;
8436 	int count = 0;
8437 	int i;
8438 	uint8_t status;
8439 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
8440 	bool more = false;
8441 
8442 	KASSERT(mutex_owned(txq->txq_lock));
8443 
8444 	if (txq->txq_stopping)
8445 		return false;
8446 
8447 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
8448 	/* For ALTQ and legacy(not use multiqueue) ethernet controller */
8449 	if (wmq->wmq_id == 0)
8450 		ifp->if_flags &= ~IFF_OACTIVE;
8451 
8452 	/*
8453 	 * Go through the Tx list and free mbufs for those
8454 	 * frames which have been transmitted.
8455 	 */
8456 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
8457 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
8458 		if (limit-- == 0) {
8459 			more = true;
8460 			DPRINTF(WM_DEBUG_TX,
8461 			    ("%s: TX: loop limited, job %d is not processed\n",
8462 				device_xname(sc->sc_dev), i));
8463 			break;
8464 		}
8465 
8466 		txs = &txq->txq_soft[i];
8467 
8468 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
8469 			device_xname(sc->sc_dev), i));
8470 
8471 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
8472 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
8473 
8474 		status =
8475 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
8476 		if ((status & WTX_ST_DD) == 0) {
8477 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
8478 			    BUS_DMASYNC_PREREAD);
8479 			break;
8480 		}
8481 
8482 		count++;
8483 		DPRINTF(WM_DEBUG_TX,
8484 		    ("%s: TX: job %d done: descs %d..%d\n",
8485 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
8486 		    txs->txs_lastdesc));
8487 
8488 		/*
8489 		 * XXX We should probably be using the statistics
8490 		 * XXX registers, but I don't know if they exist
8491 		 * XXX on chips before the i82544.
8492 		 */
8493 
8494 #ifdef WM_EVENT_COUNTERS
8495 		if (status & WTX_ST_TU)
8496 			WM_Q_EVCNT_INCR(txq, underrun);
8497 #endif /* WM_EVENT_COUNTERS */
8498 
8499 		/*
8500 		 * 82574 and newer's document says the status field has neither
8501 		 * EC (Excessive Collision) bit nor LC (Late Collision) bit
8502 		 * (reserved). Refer "PCIe GbE Controller Open Source Software
8503 		 * Developer's Manual", 82574 datasheet and newer.
8504 		 *
8505 		 * XXX I saw the LC bit was set on I218 even though the media
8506 		 * was full duplex, so the bit might be used for other
8507 		 * meaning ...(I have no document).
8508 		 */
8509 
8510 		if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
8511 		    && ((sc->sc_type < WM_T_82574)
8512 			|| (sc->sc_type == WM_T_80003))) {
8513 			ifp->if_oerrors++;
8514 			if (status & WTX_ST_LC)
8515 				log(LOG_WARNING, "%s: late collision\n",
8516 				    device_xname(sc->sc_dev));
8517 			else if (status & WTX_ST_EC) {
8518 				ifp->if_collisions +=
8519 				    TX_COLLISION_THRESHOLD + 1;
8520 				log(LOG_WARNING, "%s: excessive collisions\n",
8521 				    device_xname(sc->sc_dev));
8522 			}
8523 		} else
8524 			ifp->if_opackets++;
8525 
8526 		txq->txq_packets++;
8527 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
8528 
8529 		txq->txq_free += txs->txs_ndesc;
8530 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
8531 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
8532 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
8533 		m_freem(txs->txs_mbuf);
8534 		txs->txs_mbuf = NULL;
8535 	}
8536 
8537 	/* Update the dirty transmit buffer pointer. */
8538 	txq->txq_sdirty = i;
8539 	DPRINTF(WM_DEBUG_TX,
8540 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
8541 
8542 	/*
8543 	 * If there are no more pending transmissions, cancel the watchdog
8544 	 * timer.
8545 	 */
8546 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
8547 		txq->txq_sending = false;
8548 
8549 	return more;
8550 }
8551 
8552 static inline uint32_t
8553 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
8554 {
8555 	struct wm_softc *sc = rxq->rxq_sc;
8556 
8557 	if (sc->sc_type == WM_T_82574)
8558 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
8559 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
8560 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
8561 	else
8562 		return rxq->rxq_descs[idx].wrx_status;
8563 }
8564 
8565 static inline uint32_t
8566 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
8567 {
8568 	struct wm_softc *sc = rxq->rxq_sc;
8569 
8570 	if (sc->sc_type == WM_T_82574)
8571 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
8572 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
8573 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
8574 	else
8575 		return rxq->rxq_descs[idx].wrx_errors;
8576 }
8577 
8578 static inline uint16_t
8579 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
8580 {
8581 	struct wm_softc *sc = rxq->rxq_sc;
8582 
8583 	if (sc->sc_type == WM_T_82574)
8584 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
8585 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
8586 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
8587 	else
8588 		return rxq->rxq_descs[idx].wrx_special;
8589 }
8590 
8591 static inline int
8592 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
8593 {
8594 	struct wm_softc *sc = rxq->rxq_sc;
8595 
8596 	if (sc->sc_type == WM_T_82574)
8597 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
8598 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
8599 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
8600 	else
8601 		return rxq->rxq_descs[idx].wrx_len;
8602 }
8603 
8604 #ifdef WM_DEBUG
8605 static inline uint32_t
8606 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
8607 {
8608 	struct wm_softc *sc = rxq->rxq_sc;
8609 
8610 	if (sc->sc_type == WM_T_82574)
8611 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
8612 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
8613 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
8614 	else
8615 		return 0;
8616 }
8617 
8618 static inline uint8_t
8619 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
8620 {
8621 	struct wm_softc *sc = rxq->rxq_sc;
8622 
8623 	if (sc->sc_type == WM_T_82574)
8624 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
8625 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
8626 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
8627 	else
8628 		return 0;
8629 }
8630 #endif /* WM_DEBUG */
8631 
8632 static inline bool
8633 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
8634     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
8635 {
8636 
8637 	if (sc->sc_type == WM_T_82574)
8638 		return (status & ext_bit) != 0;
8639 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
8640 		return (status & nq_bit) != 0;
8641 	else
8642 		return (status & legacy_bit) != 0;
8643 }
8644 
8645 static inline bool
8646 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
8647     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
8648 {
8649 
8650 	if (sc->sc_type == WM_T_82574)
8651 		return (error & ext_bit) != 0;
8652 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
8653 		return (error & nq_bit) != 0;
8654 	else
8655 		return (error & legacy_bit) != 0;
8656 }
8657 
8658 static inline bool
8659 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
8660 {
8661 
8662 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
8663 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
8664 		return true;
8665 	else
8666 		return false;
8667 }
8668 
8669 static inline bool
8670 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
8671 {
8672 	struct wm_softc *sc = rxq->rxq_sc;
8673 
8674 	/* XXX missing error bit for newqueue? */
8675 	if (wm_rxdesc_is_set_error(sc, errors,
8676 		WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
8677 		EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
8678 		| EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
8679 		NQRXC_ERROR_RXE)) {
8680 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
8681 		    EXTRXC_ERROR_SE, 0))
8682 			log(LOG_WARNING, "%s: symbol error\n",
8683 			    device_xname(sc->sc_dev));
8684 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
8685 		    EXTRXC_ERROR_SEQ, 0))
8686 			log(LOG_WARNING, "%s: receive sequence error\n",
8687 			    device_xname(sc->sc_dev));
8688 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
8689 		    EXTRXC_ERROR_CE, 0))
8690 			log(LOG_WARNING, "%s: CRC error\n",
8691 			    device_xname(sc->sc_dev));
8692 		return true;
8693 	}
8694 
8695 	return false;
8696 }
8697 
8698 static inline bool
8699 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
8700 {
8701 	struct wm_softc *sc = rxq->rxq_sc;
8702 
8703 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
8704 		NQRXC_STATUS_DD)) {
8705 		/* We have processed all of the receive descriptors. */
8706 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
8707 		return false;
8708 	}
8709 
8710 	return true;
8711 }
8712 
8713 static inline bool
8714 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
8715     uint16_t vlantag, struct mbuf *m)
8716 {
8717 
8718 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
8719 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
8720 		vlan_set_tag(m, le16toh(vlantag));
8721 	}
8722 
8723 	return true;
8724 }
8725 
8726 static inline void
8727 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
8728     uint32_t errors, struct mbuf *m)
8729 {
8730 	struct wm_softc *sc = rxq->rxq_sc;
8731 
8732 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
8733 		if (wm_rxdesc_is_set_status(sc, status,
8734 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
8735 			WM_Q_EVCNT_INCR(rxq, ipsum);
8736 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
8737 			if (wm_rxdesc_is_set_error(sc, errors,
8738 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
8739 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
8740 		}
8741 		if (wm_rxdesc_is_set_status(sc, status,
8742 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
8743 			/*
8744 			 * Note: we don't know if this was TCP or UDP,
8745 			 * so we just set both bits, and expect the
8746 			 * upper layers to deal.
8747 			 */
8748 			WM_Q_EVCNT_INCR(rxq, tusum);
8749 			m->m_pkthdr.csum_flags |=
8750 			    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
8751 			    M_CSUM_TCPv6 | M_CSUM_UDPv6;
8752 			if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
8753 			    EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
8754 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
8755 		}
8756 	}
8757 }
8758 
8759 /*
8760  * wm_rxeof:
8761  *
8762  *	Helper; handle receive interrupts.
8763  */
8764 static bool
8765 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
8766 {
8767 	struct wm_softc *sc = rxq->rxq_sc;
8768 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
8769 	struct wm_rxsoft *rxs;
8770 	struct mbuf *m;
8771 	int i, len;
8772 	int count = 0;
8773 	uint32_t status, errors;
8774 	uint16_t vlantag;
8775 	bool more = false;
8776 
8777 	KASSERT(mutex_owned(rxq->rxq_lock));
8778 
8779 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
8780 		if (limit-- == 0) {
8781 			rxq->rxq_ptr = i;
8782 			more = true;
8783 			DPRINTF(WM_DEBUG_RX,
8784 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
8785 				device_xname(sc->sc_dev), i));
8786 			break;
8787 		}
8788 
8789 		rxs = &rxq->rxq_soft[i];
8790 
8791 		DPRINTF(WM_DEBUG_RX,
8792 		    ("%s: RX: checking descriptor %d\n",
8793 			device_xname(sc->sc_dev), i));
8794 		wm_cdrxsync(rxq, i,
8795 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
8796 
8797 		status = wm_rxdesc_get_status(rxq, i);
8798 		errors = wm_rxdesc_get_errors(rxq, i);
8799 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
8800 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
8801 #ifdef WM_DEBUG
8802 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
8803 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
8804 #endif
8805 
8806 		if (!wm_rxdesc_dd(rxq, i, status)) {
8807 			/*
8808 			 * Update the receive pointer holding rxq_lock
8809 			 * consistent with increment counter.
8810 			 */
8811 			rxq->rxq_ptr = i;
8812 			break;
8813 		}
8814 
8815 		count++;
8816 		if (__predict_false(rxq->rxq_discard)) {
8817 			DPRINTF(WM_DEBUG_RX,
8818 			    ("%s: RX: discarding contents of descriptor %d\n",
8819 				device_xname(sc->sc_dev), i));
8820 			wm_init_rxdesc(rxq, i);
8821 			if (wm_rxdesc_is_eop(rxq, status)) {
8822 				/* Reset our state. */
8823 				DPRINTF(WM_DEBUG_RX,
8824 				    ("%s: RX: resetting rxdiscard -> 0\n",
8825 					device_xname(sc->sc_dev)));
8826 				rxq->rxq_discard = 0;
8827 			}
8828 			continue;
8829 		}
8830 
8831 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
8832 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
8833 
8834 		m = rxs->rxs_mbuf;
8835 
8836 		/*
8837 		 * Add a new receive buffer to the ring, unless of
8838 		 * course the length is zero. Treat the latter as a
8839 		 * failed mapping.
8840 		 */
8841 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
8842 			/*
8843 			 * Failed, throw away what we've done so
8844 			 * far, and discard the rest of the packet.
8845 			 */
8846 			ifp->if_ierrors++;
8847 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
8848 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
8849 			wm_init_rxdesc(rxq, i);
8850 			if (!wm_rxdesc_is_eop(rxq, status))
8851 				rxq->rxq_discard = 1;
8852 			if (rxq->rxq_head != NULL)
8853 				m_freem(rxq->rxq_head);
8854 			WM_RXCHAIN_RESET(rxq);
8855 			DPRINTF(WM_DEBUG_RX,
8856 			    ("%s: RX: Rx buffer allocation failed, "
8857 			    "dropping packet%s\n", device_xname(sc->sc_dev),
8858 				rxq->rxq_discard ? " (discard)" : ""));
8859 			continue;
8860 		}
8861 
8862 		m->m_len = len;
8863 		rxq->rxq_len += len;
8864 		DPRINTF(WM_DEBUG_RX,
8865 		    ("%s: RX: buffer at %p len %d\n",
8866 			device_xname(sc->sc_dev), m->m_data, len));
8867 
8868 		/* If this is not the end of the packet, keep looking. */
8869 		if (!wm_rxdesc_is_eop(rxq, status)) {
8870 			WM_RXCHAIN_LINK(rxq, m);
8871 			DPRINTF(WM_DEBUG_RX,
8872 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
8873 				device_xname(sc->sc_dev), rxq->rxq_len));
8874 			continue;
8875 		}
8876 
8877 		/*
8878 		 * Okay, we have the entire packet now. The chip is
8879 		 * configured to include the FCS except I350 and I21[01]
8880 		 * (not all chips can be configured to strip it),
8881 		 * so we need to trim it.
8882 		 * May need to adjust length of previous mbuf in the
8883 		 * chain if the current mbuf is too short.
8884 		 * For an eratta, the RCTL_SECRC bit in RCTL register
8885 		 * is always set in I350, so we don't trim it.
8886 		 */
8887 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
8888 		    && (sc->sc_type != WM_T_I210)
8889 		    && (sc->sc_type != WM_T_I211)) {
8890 			if (m->m_len < ETHER_CRC_LEN) {
8891 				rxq->rxq_tail->m_len
8892 				    -= (ETHER_CRC_LEN - m->m_len);
8893 				m->m_len = 0;
8894 			} else
8895 				m->m_len -= ETHER_CRC_LEN;
8896 			len = rxq->rxq_len - ETHER_CRC_LEN;
8897 		} else
8898 			len = rxq->rxq_len;
8899 
8900 		WM_RXCHAIN_LINK(rxq, m);
8901 
8902 		*rxq->rxq_tailp = NULL;
8903 		m = rxq->rxq_head;
8904 
8905 		WM_RXCHAIN_RESET(rxq);
8906 
8907 		DPRINTF(WM_DEBUG_RX,
8908 		    ("%s: RX: have entire packet, len -> %d\n",
8909 			device_xname(sc->sc_dev), len));
8910 
8911 		/* If an error occurred, update stats and drop the packet. */
8912 		if (wm_rxdesc_has_errors(rxq, errors)) {
8913 			m_freem(m);
8914 			continue;
8915 		}
8916 
8917 		/* No errors.  Receive the packet. */
8918 		m_set_rcvif(m, ifp);
8919 		m->m_pkthdr.len = len;
8920 		/*
8921 		 * TODO
8922 		 * should be save rsshash and rsstype to this mbuf.
8923 		 */
8924 		DPRINTF(WM_DEBUG_RX,
8925 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
8926 			device_xname(sc->sc_dev), rsstype, rsshash));
8927 
8928 		/*
8929 		 * If VLANs are enabled, VLAN packets have been unwrapped
8930 		 * for us.  Associate the tag with the packet.
8931 		 */
8932 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
8933 			continue;
8934 
8935 		/* Set up checksum info for this packet. */
8936 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
8937 		/*
8938 		 * Update the receive pointer holding rxq_lock consistent with
8939 		 * increment counter.
8940 		 */
8941 		rxq->rxq_ptr = i;
8942 		rxq->rxq_packets++;
8943 		rxq->rxq_bytes += len;
8944 		mutex_exit(rxq->rxq_lock);
8945 
8946 		/* Pass it on. */
8947 		if_percpuq_enqueue(sc->sc_ipq, m);
8948 
8949 		mutex_enter(rxq->rxq_lock);
8950 
8951 		if (rxq->rxq_stopping)
8952 			break;
8953 	}
8954 
8955 	DPRINTF(WM_DEBUG_RX,
8956 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
8957 
8958 	return more;
8959 }
8960 
8961 /*
8962  * wm_linkintr_gmii:
8963  *
8964  *	Helper; handle link interrupts for GMII.
8965  */
8966 static void
8967 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
8968 {
8969 	device_t dev = sc->sc_dev;
8970 	uint32_t status, reg;
8971 	bool link;
8972 	int rv;
8973 
8974 	KASSERT(WM_CORE_LOCKED(sc));
8975 
8976 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(dev),
8977 		__func__));
8978 
8979 	if ((icr & ICR_LSC) == 0) {
8980 		if (icr & ICR_RXSEQ)
8981 			DPRINTF(WM_DEBUG_LINK,
8982 			    ("%s: LINK Receive sequence error\n",
8983 				device_xname(dev)));
8984 		return;
8985 	}
8986 
8987 	/* Link status changed */
8988 	status = CSR_READ(sc, WMREG_STATUS);
8989 	link = status & STATUS_LU;
8990 	if (link) {
8991 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
8992 			device_xname(dev),
8993 			(status & STATUS_FD) ? "FDX" : "HDX"));
8994 	} else {
8995 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
8996 			device_xname(dev)));
8997 	}
8998 	if ((sc->sc_type == WM_T_ICH8) && (link == false))
8999 		wm_gig_downshift_workaround_ich8lan(sc);
9000 
9001 	if ((sc->sc_type == WM_T_ICH8)
9002 	    && (sc->sc_phytype == WMPHY_IGP_3)) {
9003 		wm_kmrn_lock_loss_workaround_ich8lan(sc);
9004 	}
9005 	DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
9006 		device_xname(dev)));
9007 	mii_pollstat(&sc->sc_mii);
9008 	if (sc->sc_type == WM_T_82543) {
9009 		int miistatus, active;
9010 
9011 		/*
9012 		 * With 82543, we need to force speed and
9013 		 * duplex on the MAC equal to what the PHY
9014 		 * speed and duplex configuration is.
9015 		 */
9016 		miistatus = sc->sc_mii.mii_media_status;
9017 
9018 		if (miistatus & IFM_ACTIVE) {
9019 			active = sc->sc_mii.mii_media_active;
9020 			sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
9021 			switch (IFM_SUBTYPE(active)) {
9022 			case IFM_10_T:
9023 				sc->sc_ctrl |= CTRL_SPEED_10;
9024 				break;
9025 			case IFM_100_TX:
9026 				sc->sc_ctrl |= CTRL_SPEED_100;
9027 				break;
9028 			case IFM_1000_T:
9029 				sc->sc_ctrl |= CTRL_SPEED_1000;
9030 				break;
9031 			default:
9032 				/*
9033 				 * Fiber?
9034 				 * Shoud not enter here.
9035 				 */
9036 				device_printf(dev, "unknown media (%x)\n",
9037 				    active);
9038 				break;
9039 			}
9040 			if (active & IFM_FDX)
9041 				sc->sc_ctrl |= CTRL_FD;
9042 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9043 		}
9044 	} else if (sc->sc_type == WM_T_PCH) {
9045 		wm_k1_gig_workaround_hv(sc,
9046 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
9047 	}
9048 
9049 	/*
9050 	 * When connected at 10Mbps half-duplex, some parts are excessively
9051 	 * aggressive resulting in many collisions. To avoid this, increase
9052 	 * the IPG and reduce Rx latency in the PHY.
9053 	 */
9054 	if ((sc->sc_type >= WM_T_PCH2) && (sc->sc_type <= WM_T_PCH_CNP)
9055 	    && link) {
9056 		uint32_t tipg_reg;
9057 		uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
9058 		bool fdx;
9059 		uint16_t emi_addr, emi_val;
9060 
9061 		tipg_reg = CSR_READ(sc, WMREG_TIPG);
9062 		tipg_reg &= ~TIPG_IPGT_MASK;
9063 		fdx = status & STATUS_FD;
9064 
9065 		if (!fdx && (speed == STATUS_SPEED_10)) {
9066 			tipg_reg |= 0xff;
9067 			/* Reduce Rx latency in analog PHY */
9068 			emi_val = 0;
9069 		} else if ((sc->sc_type >= WM_T_PCH_SPT) &&
9070 		    fdx && speed != STATUS_SPEED_1000) {
9071 			tipg_reg |= 0xc;
9072 			emi_val = 1;
9073 		} else {
9074 			/* Roll back the default values */
9075 			tipg_reg |= 0x08;
9076 			emi_val = 1;
9077 		}
9078 
9079 		CSR_WRITE(sc, WMREG_TIPG, tipg_reg);
9080 
9081 		rv = sc->phy.acquire(sc);
9082 		if (rv)
9083 			return;
9084 
9085 		if (sc->sc_type == WM_T_PCH2)
9086 			emi_addr = I82579_RX_CONFIG;
9087 		else
9088 			emi_addr = I217_RX_CONFIG;
9089 		rv = wm_write_emi_reg_locked(dev, emi_addr, emi_val);
9090 
9091 		if (sc->sc_type >= WM_T_PCH_LPT) {
9092 			uint16_t phy_reg;
9093 
9094 			sc->phy.readreg_locked(dev, 2,
9095 			    I217_PLL_CLOCK_GATE_REG, &phy_reg);
9096 			phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
9097 			if (speed == STATUS_SPEED_100
9098 			    || speed == STATUS_SPEED_10)
9099 				phy_reg |= 0x3e8;
9100 			else
9101 				phy_reg |= 0xfa;
9102 			sc->phy.writereg_locked(dev, 2,
9103 			    I217_PLL_CLOCK_GATE_REG, phy_reg);
9104 
9105 			if (speed == STATUS_SPEED_1000) {
9106 				sc->phy.readreg_locked(dev, 2,
9107 				    HV_PM_CTRL, &phy_reg);
9108 
9109 				phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
9110 
9111 				sc->phy.writereg_locked(dev, 2,
9112 				    HV_PM_CTRL, phy_reg);
9113 			}
9114 		}
9115 		sc->phy.release(sc);
9116 
9117 		if (rv)
9118 			return;
9119 
9120 		if (sc->sc_type >= WM_T_PCH_SPT) {
9121 			uint16_t data, ptr_gap;
9122 
9123 			if (speed == STATUS_SPEED_1000) {
9124 				rv = sc->phy.acquire(sc);
9125 				if (rv)
9126 					return;
9127 
9128 				rv = sc->phy.readreg_locked(dev, 2,
9129 				    I219_UNKNOWN1, &data);
9130 				if (rv) {
9131 					sc->phy.release(sc);
9132 					return;
9133 				}
9134 
9135 				ptr_gap = (data & (0x3ff << 2)) >> 2;
9136 				if (ptr_gap < 0x18) {
9137 					data &= ~(0x3ff << 2);
9138 					data |= (0x18 << 2);
9139 					rv = sc->phy.writereg_locked(dev,
9140 					    2, I219_UNKNOWN1, data);
9141 				}
9142 				sc->phy.release(sc);
9143 				if (rv)
9144 					return;
9145 			} else {
9146 				rv = sc->phy.acquire(sc);
9147 				if (rv)
9148 					return;
9149 
9150 				rv = sc->phy.writereg_locked(dev, 2,
9151 				    I219_UNKNOWN1, 0xc023);
9152 				sc->phy.release(sc);
9153 				if (rv)
9154 					return;
9155 
9156 			}
9157 		}
9158 	}
9159 
9160 	/*
9161 	 * I217 Packet Loss issue:
9162 	 * ensure that FEXTNVM4 Beacon Duration is set correctly
9163 	 * on power up.
9164 	 * Set the Beacon Duration for I217 to 8 usec
9165 	 */
9166 	if (sc->sc_type >= WM_T_PCH_LPT) {
9167 		reg = CSR_READ(sc, WMREG_FEXTNVM4);
9168 		reg &= ~FEXTNVM4_BEACON_DURATION;
9169 		reg |= FEXTNVM4_BEACON_DURATION_8US;
9170 		CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
9171 	}
9172 
9173 	/* Work-around I218 hang issue */
9174 	if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
9175 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
9176 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
9177 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
9178 		wm_k1_workaround_lpt_lp(sc, link);
9179 
9180 	if (sc->sc_type >= WM_T_PCH_LPT) {
9181 		/*
9182 		 * Set platform power management values for Latency
9183 		 * Tolerance Reporting (LTR)
9184 		 */
9185 		wm_platform_pm_pch_lpt(sc,
9186 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
9187 	}
9188 
9189 	/* Clear link partner's EEE ability */
9190 	sc->eee_lp_ability = 0;
9191 
9192 	/* FEXTNVM6 K1-off workaround */
9193 	if (sc->sc_type == WM_T_PCH_SPT) {
9194 		reg = CSR_READ(sc, WMREG_FEXTNVM6);
9195 		if (CSR_READ(sc, WMREG_PCIEANACFG) & FEXTNVM6_K1_OFF_ENABLE)
9196 			reg |= FEXTNVM6_K1_OFF_ENABLE;
9197 		else
9198 			reg &= ~FEXTNVM6_K1_OFF_ENABLE;
9199 		CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
9200 	}
9201 
9202 	if (!link)
9203 		return;
9204 
9205 	switch (sc->sc_type) {
9206 	case WM_T_PCH2:
9207 		wm_k1_workaround_lv(sc);
9208 		/* FALLTHROUGH */
9209 	case WM_T_PCH:
9210 		if (sc->sc_phytype == WMPHY_82578)
9211 			wm_link_stall_workaround_hv(sc);
9212 		break;
9213 	default:
9214 		break;
9215 	}
9216 
9217 	/* Enable/Disable EEE after link up */
9218 	if (sc->sc_phytype > WMPHY_82579)
9219 		wm_set_eee_pchlan(sc);
9220 }
9221 
9222 /*
9223  * wm_linkintr_tbi:
9224  *
9225  *	Helper; handle link interrupts for TBI mode.
9226  */
9227 static void
9228 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
9229 {
9230 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9231 	uint32_t status;
9232 
9233 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
9234 		__func__));
9235 
9236 	status = CSR_READ(sc, WMREG_STATUS);
9237 	if (icr & ICR_LSC) {
9238 		wm_check_for_link(sc);
9239 		if (status & STATUS_LU) {
9240 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
9241 				device_xname(sc->sc_dev),
9242 				(status & STATUS_FD) ? "FDX" : "HDX"));
9243 			/*
9244 			 * NOTE: CTRL will update TFCE and RFCE automatically,
9245 			 * so we should update sc->sc_ctrl
9246 			 */
9247 
9248 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
9249 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
9250 			sc->sc_fcrtl &= ~FCRTL_XONE;
9251 			if (status & STATUS_FD)
9252 				sc->sc_tctl |=
9253 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
9254 			else
9255 				sc->sc_tctl |=
9256 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
9257 			if (sc->sc_ctrl & CTRL_TFCE)
9258 				sc->sc_fcrtl |= FCRTL_XONE;
9259 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
9260 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
9261 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
9262 			sc->sc_tbi_linkup = 1;
9263 			if_link_state_change(ifp, LINK_STATE_UP);
9264 		} else {
9265 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
9266 				device_xname(sc->sc_dev)));
9267 			sc->sc_tbi_linkup = 0;
9268 			if_link_state_change(ifp, LINK_STATE_DOWN);
9269 		}
9270 		/* Update LED */
9271 		wm_tbi_serdes_set_linkled(sc);
9272 	} else if (icr & ICR_RXSEQ)
9273 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
9274 			device_xname(sc->sc_dev)));
9275 }
9276 
9277 /*
9278  * wm_linkintr_serdes:
9279  *
9280  *	Helper; handle link interrupts for TBI mode.
9281  */
9282 static void
9283 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
9284 {
9285 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9286 	struct mii_data *mii = &sc->sc_mii;
9287 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
9288 	uint32_t pcs_adv, pcs_lpab, reg;
9289 
9290 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
9291 		__func__));
9292 
9293 	if (icr & ICR_LSC) {
9294 		/* Check PCS */
9295 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
9296 		if ((reg & PCS_LSTS_LINKOK) != 0) {
9297 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
9298 				device_xname(sc->sc_dev)));
9299 			mii->mii_media_status |= IFM_ACTIVE;
9300 			sc->sc_tbi_linkup = 1;
9301 			if_link_state_change(ifp, LINK_STATE_UP);
9302 		} else {
9303 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
9304 				device_xname(sc->sc_dev)));
9305 			mii->mii_media_status |= IFM_NONE;
9306 			sc->sc_tbi_linkup = 0;
9307 			if_link_state_change(ifp, LINK_STATE_DOWN);
9308 			wm_tbi_serdes_set_linkled(sc);
9309 			return;
9310 		}
9311 		mii->mii_media_active |= IFM_1000_SX;
9312 		if ((reg & PCS_LSTS_FDX) != 0)
9313 			mii->mii_media_active |= IFM_FDX;
9314 		else
9315 			mii->mii_media_active |= IFM_HDX;
9316 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
9317 			/* Check flow */
9318 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
9319 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
9320 				DPRINTF(WM_DEBUG_LINK,
9321 				    ("XXX LINKOK but not ACOMP\n"));
9322 				return;
9323 			}
9324 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
9325 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
9326 			DPRINTF(WM_DEBUG_LINK,
9327 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
9328 			if ((pcs_adv & TXCW_SYM_PAUSE)
9329 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
9330 				mii->mii_media_active |= IFM_FLOW
9331 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
9332 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
9333 			    && (pcs_adv & TXCW_ASYM_PAUSE)
9334 			    && (pcs_lpab & TXCW_SYM_PAUSE)
9335 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
9336 				mii->mii_media_active |= IFM_FLOW
9337 				    | IFM_ETH_TXPAUSE;
9338 			else if ((pcs_adv & TXCW_SYM_PAUSE)
9339 			    && (pcs_adv & TXCW_ASYM_PAUSE)
9340 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
9341 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
9342 				mii->mii_media_active |= IFM_FLOW
9343 				    | IFM_ETH_RXPAUSE;
9344 		}
9345 		/* Update LED */
9346 		wm_tbi_serdes_set_linkled(sc);
9347 	} else
9348 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
9349 		    device_xname(sc->sc_dev)));
9350 }
9351 
9352 /*
9353  * wm_linkintr:
9354  *
9355  *	Helper; handle link interrupts.
9356  */
9357 static void
9358 wm_linkintr(struct wm_softc *sc, uint32_t icr)
9359 {
9360 
9361 	KASSERT(WM_CORE_LOCKED(sc));
9362 
9363 	if (sc->sc_flags & WM_F_HAS_MII)
9364 		wm_linkintr_gmii(sc, icr);
9365 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
9366 	    && ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)))
9367 		wm_linkintr_serdes(sc, icr);
9368 	else
9369 		wm_linkintr_tbi(sc, icr);
9370 }
9371 
9372 /*
9373  * wm_intr_legacy:
9374  *
9375  *	Interrupt service routine for INTx and MSI.
9376  */
9377 static int
9378 wm_intr_legacy(void *arg)
9379 {
9380 	struct wm_softc *sc = arg;
9381 	struct wm_queue *wmq = &sc->sc_queue[0];
9382 	struct wm_txqueue *txq = &wmq->wmq_txq;
9383 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
9384 	uint32_t icr, rndval = 0;
9385 	int handled = 0;
9386 
9387 	while (1 /* CONSTCOND */) {
9388 		icr = CSR_READ(sc, WMREG_ICR);
9389 		if ((icr & sc->sc_icr) == 0)
9390 			break;
9391 		if (handled == 0)
9392 			DPRINTF(WM_DEBUG_TX,
9393 			    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
9394 		if (rndval == 0)
9395 			rndval = icr;
9396 
9397 		mutex_enter(rxq->rxq_lock);
9398 
9399 		if (rxq->rxq_stopping) {
9400 			mutex_exit(rxq->rxq_lock);
9401 			break;
9402 		}
9403 
9404 		handled = 1;
9405 
9406 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
9407 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
9408 			DPRINTF(WM_DEBUG_RX,
9409 			    ("%s: RX: got Rx intr 0x%08x\n",
9410 				device_xname(sc->sc_dev),
9411 				icr & (ICR_RXDMT0 | ICR_RXT0)));
9412 			WM_Q_EVCNT_INCR(rxq, intr);
9413 		}
9414 #endif
9415 		/*
9416 		 * wm_rxeof() does *not* call upper layer functions directly,
9417 		 * as if_percpuq_enqueue() just call softint_schedule().
9418 		 * So, we can call wm_rxeof() in interrupt context.
9419 		 */
9420 		wm_rxeof(rxq, UINT_MAX);
9421 		/* Fill lower bits with RX index. See below for the upper. */
9422 		rndval |= rxq->rxq_ptr & WM_NRXDESC_MASK;
9423 
9424 		mutex_exit(rxq->rxq_lock);
9425 		mutex_enter(txq->txq_lock);
9426 
9427 		if (txq->txq_stopping) {
9428 			mutex_exit(txq->txq_lock);
9429 			break;
9430 		}
9431 
9432 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
9433 		if (icr & ICR_TXDW) {
9434 			DPRINTF(WM_DEBUG_TX,
9435 			    ("%s: TX: got TXDW interrupt\n",
9436 				device_xname(sc->sc_dev)));
9437 			WM_Q_EVCNT_INCR(txq, txdw);
9438 		}
9439 #endif
9440 		wm_txeof(txq, UINT_MAX);
9441 		/* Fill upper bits with TX index. See above for the lower. */
9442 		rndval = txq->txq_next * WM_NRXDESC;
9443 
9444 		mutex_exit(txq->txq_lock);
9445 		WM_CORE_LOCK(sc);
9446 
9447 		if (sc->sc_core_stopping) {
9448 			WM_CORE_UNLOCK(sc);
9449 			break;
9450 		}
9451 
9452 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
9453 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
9454 			wm_linkintr(sc, icr);
9455 		}
9456 
9457 		WM_CORE_UNLOCK(sc);
9458 
9459 		if (icr & ICR_RXO) {
9460 #if defined(WM_DEBUG)
9461 			log(LOG_WARNING, "%s: Receive overrun\n",
9462 			    device_xname(sc->sc_dev));
9463 #endif /* defined(WM_DEBUG) */
9464 		}
9465 	}
9466 
9467 	rnd_add_uint32(&sc->sc_queue[0].rnd_source, rndval);
9468 
9469 	if (handled) {
9470 		/* Try to get more packets going. */
9471 		softint_schedule(wmq->wmq_si);
9472 	}
9473 
9474 	return handled;
9475 }
9476 
9477 static inline void
9478 wm_txrxintr_disable(struct wm_queue *wmq)
9479 {
9480 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
9481 
9482 	if (sc->sc_type == WM_T_82574)
9483 		CSR_WRITE(sc, WMREG_IMC,
9484 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
9485 	else if (sc->sc_type == WM_T_82575)
9486 		CSR_WRITE(sc, WMREG_EIMC,
9487 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
9488 	else
9489 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
9490 }
9491 
9492 static inline void
9493 wm_txrxintr_enable(struct wm_queue *wmq)
9494 {
9495 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
9496 
9497 	wm_itrs_calculate(sc, wmq);
9498 
9499 	/*
9500 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
9501 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
9502 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
9503 	 * while each wm_handle_queue(wmq) is runnig.
9504 	 */
9505 	if (sc->sc_type == WM_T_82574)
9506 		CSR_WRITE(sc, WMREG_IMS,
9507 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
9508 	else if (sc->sc_type == WM_T_82575)
9509 		CSR_WRITE(sc, WMREG_EIMS,
9510 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
9511 	else
9512 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
9513 }
9514 
9515 static int
9516 wm_txrxintr_msix(void *arg)
9517 {
9518 	struct wm_queue *wmq = arg;
9519 	struct wm_txqueue *txq = &wmq->wmq_txq;
9520 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
9521 	struct wm_softc *sc = txq->txq_sc;
9522 	u_int txlimit = sc->sc_tx_intr_process_limit;
9523 	u_int rxlimit = sc->sc_rx_intr_process_limit;
9524 	uint32_t rndval = 0;
9525 	bool txmore;
9526 	bool rxmore;
9527 
9528 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
9529 
9530 	DPRINTF(WM_DEBUG_TX,
9531 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
9532 
9533 	wm_txrxintr_disable(wmq);
9534 
9535 	mutex_enter(txq->txq_lock);
9536 
9537 	if (txq->txq_stopping) {
9538 		mutex_exit(txq->txq_lock);
9539 		return 0;
9540 	}
9541 
9542 	WM_Q_EVCNT_INCR(txq, txdw);
9543 	txmore = wm_txeof(txq, txlimit);
9544 	/* Fill upper bits with TX index. See below for the lower. */
9545 	rndval = txq->txq_next * WM_NRXDESC;
9546 	/* wm_deferred start() is done in wm_handle_queue(). */
9547 	mutex_exit(txq->txq_lock);
9548 
9549 	DPRINTF(WM_DEBUG_RX,
9550 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
9551 	mutex_enter(rxq->rxq_lock);
9552 
9553 	if (rxq->rxq_stopping) {
9554 		mutex_exit(rxq->rxq_lock);
9555 		return 0;
9556 	}
9557 
9558 	WM_Q_EVCNT_INCR(rxq, intr);
9559 	rxmore = wm_rxeof(rxq, rxlimit);
9560 
9561 	/* Fill lower bits with RX index. See above for the upper. */
9562 	rndval |= rxq->rxq_ptr & WM_NRXDESC_MASK;
9563 	mutex_exit(rxq->rxq_lock);
9564 
9565 	wm_itrs_writereg(sc, wmq);
9566 
9567 	/*
9568 	 * This function is called in the hardware interrupt context and
9569 	 * per-CPU, so it's not required to take a lock.
9570 	 */
9571 	if (rndval != 0)
9572 		rnd_add_uint32(&sc->sc_queue[wmq->wmq_id].rnd_source, rndval);
9573 
9574 	if (txmore || rxmore)
9575 		softint_schedule(wmq->wmq_si);
9576 	else
9577 		wm_txrxintr_enable(wmq);
9578 
9579 	return 1;
9580 }
9581 
9582 static void
9583 wm_handle_queue(void *arg)
9584 {
9585 	struct wm_queue *wmq = arg;
9586 	struct wm_txqueue *txq = &wmq->wmq_txq;
9587 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
9588 	struct wm_softc *sc = txq->txq_sc;
9589 	u_int txlimit = sc->sc_tx_process_limit;
9590 	u_int rxlimit = sc->sc_rx_process_limit;
9591 	bool txmore;
9592 	bool rxmore;
9593 
9594 	mutex_enter(txq->txq_lock);
9595 	if (txq->txq_stopping) {
9596 		mutex_exit(txq->txq_lock);
9597 		return;
9598 	}
9599 	txmore = wm_txeof(txq, txlimit);
9600 	wm_deferred_start_locked(txq);
9601 	mutex_exit(txq->txq_lock);
9602 
9603 	mutex_enter(rxq->rxq_lock);
9604 	if (rxq->rxq_stopping) {
9605 		mutex_exit(rxq->rxq_lock);
9606 		return;
9607 	}
9608 	WM_Q_EVCNT_INCR(rxq, defer);
9609 	rxmore = wm_rxeof(rxq, rxlimit);
9610 	mutex_exit(rxq->rxq_lock);
9611 
9612 	if (txmore || rxmore)
9613 		softint_schedule(wmq->wmq_si);
9614 	else
9615 		wm_txrxintr_enable(wmq);
9616 }
9617 
9618 /*
9619  * wm_linkintr_msix:
9620  *
9621  *	Interrupt service routine for link status change for MSI-X.
9622  */
9623 static int
9624 wm_linkintr_msix(void *arg)
9625 {
9626 	struct wm_softc *sc = arg;
9627 	uint32_t reg;
9628 	bool has_rxo;
9629 
9630 	DPRINTF(WM_DEBUG_LINK,
9631 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
9632 
9633 	reg = CSR_READ(sc, WMREG_ICR);
9634 	WM_CORE_LOCK(sc);
9635 	if (sc->sc_core_stopping)
9636 		goto out;
9637 
9638 	if ((reg & ICR_LSC) != 0) {
9639 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
9640 		wm_linkintr(sc, ICR_LSC);
9641 	}
9642 
9643 	/*
9644 	 * XXX 82574 MSI-X mode workaround
9645 	 *
9646 	 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
9647 	 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
9648 	 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
9649 	 * interrupts by writing WMREG_ICS to process receive packets.
9650 	 */
9651 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
9652 #if defined(WM_DEBUG)
9653 		log(LOG_WARNING, "%s: Receive overrun\n",
9654 		    device_xname(sc->sc_dev));
9655 #endif /* defined(WM_DEBUG) */
9656 
9657 		has_rxo = true;
9658 		/*
9659 		 * The RXO interrupt is very high rate when receive traffic is
9660 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
9661 		 * interrupts. ICR_OTHER will be enabled at the end of
9662 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
9663 		 * ICR_RXQ(1) interrupts.
9664 		 */
9665 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
9666 
9667 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
9668 	}
9669 
9670 
9671 
9672 out:
9673 	WM_CORE_UNLOCK(sc);
9674 
9675 	if (sc->sc_type == WM_T_82574) {
9676 		if (!has_rxo)
9677 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
9678 		else
9679 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
9680 	} else if (sc->sc_type == WM_T_82575)
9681 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
9682 	else
9683 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
9684 
9685 	return 1;
9686 }
9687 
9688 /*
9689  * Media related.
9690  * GMII, SGMII, TBI (and SERDES)
9691  */
9692 
9693 /* Common */
9694 
9695 /*
9696  * wm_tbi_serdes_set_linkled:
9697  *
9698  *	Update the link LED on TBI and SERDES devices.
9699  */
9700 static void
9701 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
9702 {
9703 
9704 	if (sc->sc_tbi_linkup)
9705 		sc->sc_ctrl |= CTRL_SWDPIN(0);
9706 	else
9707 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
9708 
9709 	/* 82540 or newer devices are active low */
9710 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
9711 
9712 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9713 }
9714 
9715 /* GMII related */
9716 
9717 /*
9718  * wm_gmii_reset:
9719  *
9720  *	Reset the PHY.
9721  */
9722 static void
9723 wm_gmii_reset(struct wm_softc *sc)
9724 {
9725 	uint32_t reg;
9726 	int rv;
9727 
9728 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
9729 		device_xname(sc->sc_dev), __func__));
9730 
9731 	rv = sc->phy.acquire(sc);
9732 	if (rv != 0) {
9733 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9734 		    __func__);
9735 		return;
9736 	}
9737 
9738 	switch (sc->sc_type) {
9739 	case WM_T_82542_2_0:
9740 	case WM_T_82542_2_1:
9741 		/* null */
9742 		break;
9743 	case WM_T_82543:
9744 		/*
9745 		 * With 82543, we need to force speed and duplex on the MAC
9746 		 * equal to what the PHY speed and duplex configuration is.
9747 		 * In addition, we need to perform a hardware reset on the PHY
9748 		 * to take it out of reset.
9749 		 */
9750 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
9751 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9752 
9753 		/* The PHY reset pin is active-low. */
9754 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
9755 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
9756 		    CTRL_EXT_SWDPIN(4));
9757 		reg |= CTRL_EXT_SWDPIO(4);
9758 
9759 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
9760 		CSR_WRITE_FLUSH(sc);
9761 		delay(10*1000);
9762 
9763 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
9764 		CSR_WRITE_FLUSH(sc);
9765 		delay(150);
9766 #if 0
9767 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
9768 #endif
9769 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
9770 		break;
9771 	case WM_T_82544:	/* Reset 10000us */
9772 	case WM_T_82540:
9773 	case WM_T_82545:
9774 	case WM_T_82545_3:
9775 	case WM_T_82546:
9776 	case WM_T_82546_3:
9777 	case WM_T_82541:
9778 	case WM_T_82541_2:
9779 	case WM_T_82547:
9780 	case WM_T_82547_2:
9781 	case WM_T_82571:	/* Reset 100us */
9782 	case WM_T_82572:
9783 	case WM_T_82573:
9784 	case WM_T_82574:
9785 	case WM_T_82575:
9786 	case WM_T_82576:
9787 	case WM_T_82580:
9788 	case WM_T_I350:
9789 	case WM_T_I354:
9790 	case WM_T_I210:
9791 	case WM_T_I211:
9792 	case WM_T_82583:
9793 	case WM_T_80003:
9794 		/* Generic reset */
9795 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
9796 		CSR_WRITE_FLUSH(sc);
9797 		delay(20000);
9798 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9799 		CSR_WRITE_FLUSH(sc);
9800 		delay(20000);
9801 
9802 		if ((sc->sc_type == WM_T_82541)
9803 		    || (sc->sc_type == WM_T_82541_2)
9804 		    || (sc->sc_type == WM_T_82547)
9805 		    || (sc->sc_type == WM_T_82547_2)) {
9806 			/* Workaround for igp are done in igp_reset() */
9807 			/* XXX add code to set LED after phy reset */
9808 		}
9809 		break;
9810 	case WM_T_ICH8:
9811 	case WM_T_ICH9:
9812 	case WM_T_ICH10:
9813 	case WM_T_PCH:
9814 	case WM_T_PCH2:
9815 	case WM_T_PCH_LPT:
9816 	case WM_T_PCH_SPT:
9817 	case WM_T_PCH_CNP:
9818 		/* Generic reset */
9819 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
9820 		CSR_WRITE_FLUSH(sc);
9821 		delay(100);
9822 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9823 		CSR_WRITE_FLUSH(sc);
9824 		delay(150);
9825 		break;
9826 	default:
9827 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
9828 		    __func__);
9829 		break;
9830 	}
9831 
9832 	sc->phy.release(sc);
9833 
9834 	/* get_cfg_done */
9835 	wm_get_cfg_done(sc);
9836 
9837 	/* Extra setup */
9838 	switch (sc->sc_type) {
9839 	case WM_T_82542_2_0:
9840 	case WM_T_82542_2_1:
9841 	case WM_T_82543:
9842 	case WM_T_82544:
9843 	case WM_T_82540:
9844 	case WM_T_82545:
9845 	case WM_T_82545_3:
9846 	case WM_T_82546:
9847 	case WM_T_82546_3:
9848 	case WM_T_82541_2:
9849 	case WM_T_82547_2:
9850 	case WM_T_82571:
9851 	case WM_T_82572:
9852 	case WM_T_82573:
9853 	case WM_T_82574:
9854 	case WM_T_82583:
9855 	case WM_T_82575:
9856 	case WM_T_82576:
9857 	case WM_T_82580:
9858 	case WM_T_I350:
9859 	case WM_T_I354:
9860 	case WM_T_I210:
9861 	case WM_T_I211:
9862 	case WM_T_80003:
9863 		/* Null */
9864 		break;
9865 	case WM_T_82541:
9866 	case WM_T_82547:
9867 		/* XXX Configure actively LED after PHY reset */
9868 		break;
9869 	case WM_T_ICH8:
9870 	case WM_T_ICH9:
9871 	case WM_T_ICH10:
9872 	case WM_T_PCH:
9873 	case WM_T_PCH2:
9874 	case WM_T_PCH_LPT:
9875 	case WM_T_PCH_SPT:
9876 	case WM_T_PCH_CNP:
9877 		wm_phy_post_reset(sc);
9878 		break;
9879 	default:
9880 		panic("%s: unknown type\n", __func__);
9881 		break;
9882 	}
9883 }
9884 
9885 /*
9886  * Setup sc_phytype and mii_{read|write}reg.
9887  *
9888  *  To identify PHY type, correct read/write function should be selected.
9889  * To select correct read/write function, PCI ID or MAC type are required
9890  * without accessing PHY registers.
9891  *
9892  *  On the first call of this function, PHY ID is not known yet. Check
9893  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
9894  * result might be incorrect.
9895  *
9896  *  In the second call, PHY OUI and model is used to identify PHY type.
9897  * It might not be perfect because of the lack of compared entry, but it
9898  * would be better than the first call.
9899  *
9900  *  If the detected new result and previous assumption is different,
9901  * diagnous message will be printed.
9902  */
9903 static void
9904 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
9905     uint16_t phy_model)
9906 {
9907 	device_t dev = sc->sc_dev;
9908 	struct mii_data *mii = &sc->sc_mii;
9909 	uint16_t new_phytype = WMPHY_UNKNOWN;
9910 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
9911 	mii_readreg_t new_readreg;
9912 	mii_writereg_t new_writereg;
9913 
9914 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
9915 		device_xname(sc->sc_dev), __func__));
9916 
9917 	if (mii->mii_readreg == NULL) {
9918 		/*
9919 		 *  This is the first call of this function. For ICH and PCH
9920 		 * variants, it's difficult to determine the PHY access method
9921 		 * by sc_type, so use the PCI product ID for some devices.
9922 		 */
9923 
9924 		switch (sc->sc_pcidevid) {
9925 		case PCI_PRODUCT_INTEL_PCH_M_LM:
9926 		case PCI_PRODUCT_INTEL_PCH_M_LC:
9927 			/* 82577 */
9928 			new_phytype = WMPHY_82577;
9929 			break;
9930 		case PCI_PRODUCT_INTEL_PCH_D_DM:
9931 		case PCI_PRODUCT_INTEL_PCH_D_DC:
9932 			/* 82578 */
9933 			new_phytype = WMPHY_82578;
9934 			break;
9935 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
9936 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
9937 			/* 82579 */
9938 			new_phytype = WMPHY_82579;
9939 			break;
9940 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
9941 		case PCI_PRODUCT_INTEL_82801I_BM:
9942 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
9943 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
9944 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
9945 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
9946 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
9947 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
9948 			/* ICH8, 9, 10 with 82567 */
9949 			new_phytype = WMPHY_BM;
9950 			break;
9951 		default:
9952 			break;
9953 		}
9954 	} else {
9955 		/* It's not the first call. Use PHY OUI and model */
9956 		switch (phy_oui) {
9957 		case MII_OUI_ATTANSIC: /* XXX ??? */
9958 			switch (phy_model) {
9959 			case 0x0004: /* XXX */
9960 				new_phytype = WMPHY_82578;
9961 				break;
9962 			default:
9963 				break;
9964 			}
9965 			break;
9966 		case MII_OUI_xxMARVELL:
9967 			switch (phy_model) {
9968 			case MII_MODEL_xxMARVELL_I210:
9969 				new_phytype = WMPHY_I210;
9970 				break;
9971 			case MII_MODEL_xxMARVELL_E1011:
9972 			case MII_MODEL_xxMARVELL_E1000_3:
9973 			case MII_MODEL_xxMARVELL_E1000_5:
9974 			case MII_MODEL_xxMARVELL_E1112:
9975 				new_phytype = WMPHY_M88;
9976 				break;
9977 			case MII_MODEL_xxMARVELL_E1149:
9978 				new_phytype = WMPHY_BM;
9979 				break;
9980 			case MII_MODEL_xxMARVELL_E1111:
9981 			case MII_MODEL_xxMARVELL_I347:
9982 			case MII_MODEL_xxMARVELL_E1512:
9983 			case MII_MODEL_xxMARVELL_E1340M:
9984 			case MII_MODEL_xxMARVELL_E1543:
9985 				new_phytype = WMPHY_M88;
9986 				break;
9987 			case MII_MODEL_xxMARVELL_I82563:
9988 				new_phytype = WMPHY_GG82563;
9989 				break;
9990 			default:
9991 				break;
9992 			}
9993 			break;
9994 		case MII_OUI_INTEL:
9995 			switch (phy_model) {
9996 			case MII_MODEL_INTEL_I82577:
9997 				new_phytype = WMPHY_82577;
9998 				break;
9999 			case MII_MODEL_INTEL_I82579:
10000 				new_phytype = WMPHY_82579;
10001 				break;
10002 			case MII_MODEL_INTEL_I217:
10003 				new_phytype = WMPHY_I217;
10004 				break;
10005 			case MII_MODEL_INTEL_I82580:
10006 			case MII_MODEL_INTEL_I350:
10007 				new_phytype = WMPHY_82580;
10008 				break;
10009 			default:
10010 				break;
10011 			}
10012 			break;
10013 		case MII_OUI_yyINTEL:
10014 			switch (phy_model) {
10015 			case MII_MODEL_yyINTEL_I82562G:
10016 			case MII_MODEL_yyINTEL_I82562EM:
10017 			case MII_MODEL_yyINTEL_I82562ET:
10018 				new_phytype = WMPHY_IFE;
10019 				break;
10020 			case MII_MODEL_yyINTEL_IGP01E1000:
10021 				new_phytype = WMPHY_IGP;
10022 				break;
10023 			case MII_MODEL_yyINTEL_I82566:
10024 				new_phytype = WMPHY_IGP_3;
10025 				break;
10026 			default:
10027 				break;
10028 			}
10029 			break;
10030 		default:
10031 			break;
10032 		}
10033 		if (new_phytype == WMPHY_UNKNOWN)
10034 			aprint_verbose_dev(dev,
10035 			    "%s: unknown PHY model. OUI=%06x, model=%04x\n",
10036 			    __func__, phy_oui, phy_model);
10037 
10038 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
10039 		    && (sc->sc_phytype != new_phytype )) {
10040 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
10041 			    "was incorrect. PHY type from PHY ID = %u\n",
10042 			    sc->sc_phytype, new_phytype);
10043 		}
10044 	}
10045 
10046 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
10047 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
10048 		/* SGMII */
10049 		new_readreg = wm_sgmii_readreg;
10050 		new_writereg = wm_sgmii_writereg;
10051 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
10052 		/* BM2 (phyaddr == 1) */
10053 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
10054 		    && (new_phytype != WMPHY_BM)
10055 		    && (new_phytype != WMPHY_UNKNOWN))
10056 			doubt_phytype = new_phytype;
10057 		new_phytype = WMPHY_BM;
10058 		new_readreg = wm_gmii_bm_readreg;
10059 		new_writereg = wm_gmii_bm_writereg;
10060 	} else if (sc->sc_type >= WM_T_PCH) {
10061 		/* All PCH* use _hv_ */
10062 		new_readreg = wm_gmii_hv_readreg;
10063 		new_writereg = wm_gmii_hv_writereg;
10064 	} else if (sc->sc_type >= WM_T_ICH8) {
10065 		/* non-82567 ICH8, 9 and 10 */
10066 		new_readreg = wm_gmii_i82544_readreg;
10067 		new_writereg = wm_gmii_i82544_writereg;
10068 	} else if (sc->sc_type >= WM_T_80003) {
10069 		/* 80003 */
10070 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
10071 		    && (new_phytype != WMPHY_GG82563)
10072 		    && (new_phytype != WMPHY_UNKNOWN))
10073 			doubt_phytype = new_phytype;
10074 		new_phytype = WMPHY_GG82563;
10075 		new_readreg = wm_gmii_i80003_readreg;
10076 		new_writereg = wm_gmii_i80003_writereg;
10077 	} else if (sc->sc_type >= WM_T_I210) {
10078 		/* I210 and I211 */
10079 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
10080 		    && (new_phytype != WMPHY_I210)
10081 		    && (new_phytype != WMPHY_UNKNOWN))
10082 			doubt_phytype = new_phytype;
10083 		new_phytype = WMPHY_I210;
10084 		new_readreg = wm_gmii_gs40g_readreg;
10085 		new_writereg = wm_gmii_gs40g_writereg;
10086 	} else if (sc->sc_type >= WM_T_82580) {
10087 		/* 82580, I350 and I354 */
10088 		new_readreg = wm_gmii_82580_readreg;
10089 		new_writereg = wm_gmii_82580_writereg;
10090 	} else if (sc->sc_type >= WM_T_82544) {
10091 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
10092 		new_readreg = wm_gmii_i82544_readreg;
10093 		new_writereg = wm_gmii_i82544_writereg;
10094 	} else {
10095 		new_readreg = wm_gmii_i82543_readreg;
10096 		new_writereg = wm_gmii_i82543_writereg;
10097 	}
10098 
10099 	if (new_phytype == WMPHY_BM) {
10100 		/* All BM use _bm_ */
10101 		new_readreg = wm_gmii_bm_readreg;
10102 		new_writereg = wm_gmii_bm_writereg;
10103 	}
10104 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
10105 		/* All PCH* use _hv_ */
10106 		new_readreg = wm_gmii_hv_readreg;
10107 		new_writereg = wm_gmii_hv_writereg;
10108 	}
10109 
10110 	/* Diag output */
10111 	if (doubt_phytype != WMPHY_UNKNOWN)
10112 		aprint_error_dev(dev, "Assumed new PHY type was "
10113 		    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
10114 		    new_phytype);
10115 	else if ((sc->sc_phytype != WMPHY_UNKNOWN)
10116 	    && (sc->sc_phytype != new_phytype ))
10117 		aprint_error_dev(dev, "Previously assumed PHY type(%u)"
10118 		    "was incorrect. New PHY type = %u\n",
10119 		    sc->sc_phytype, new_phytype);
10120 
10121 	if ((mii->mii_readreg != NULL) && (new_phytype == WMPHY_UNKNOWN))
10122 		aprint_error_dev(dev, "PHY type is still unknown.\n");
10123 
10124 	if ((mii->mii_readreg != NULL) && (mii->mii_readreg != new_readreg))
10125 		aprint_error_dev(dev, "Previously assumed PHY read/write "
10126 		    "function was incorrect.\n");
10127 
10128 	/* Update now */
10129 	sc->sc_phytype = new_phytype;
10130 	mii->mii_readreg = new_readreg;
10131 	mii->mii_writereg = new_writereg;
10132 	if (new_readreg == wm_gmii_hv_readreg) {
10133 		sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
10134 		sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
10135 	} else if (new_readreg == wm_sgmii_readreg) {
10136 		sc->phy.readreg_locked = wm_sgmii_readreg_locked;
10137 		sc->phy.writereg_locked = wm_sgmii_writereg_locked;
10138 	} else if (new_readreg == wm_gmii_i82544_readreg) {
10139 		sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
10140 		sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
10141 	}
10142 }
10143 
10144 /*
10145  * wm_get_phy_id_82575:
10146  *
10147  * Return PHY ID. Return -1 if it failed.
10148  */
10149 static int
10150 wm_get_phy_id_82575(struct wm_softc *sc)
10151 {
10152 	uint32_t reg;
10153 	int phyid = -1;
10154 
10155 	/* XXX */
10156 	if ((sc->sc_flags & WM_F_SGMII) == 0)
10157 		return -1;
10158 
10159 	if (wm_sgmii_uses_mdio(sc)) {
10160 		switch (sc->sc_type) {
10161 		case WM_T_82575:
10162 		case WM_T_82576:
10163 			reg = CSR_READ(sc, WMREG_MDIC);
10164 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
10165 			break;
10166 		case WM_T_82580:
10167 		case WM_T_I350:
10168 		case WM_T_I354:
10169 		case WM_T_I210:
10170 		case WM_T_I211:
10171 			reg = CSR_READ(sc, WMREG_MDICNFG);
10172 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
10173 			break;
10174 		default:
10175 			return -1;
10176 		}
10177 	}
10178 
10179 	return phyid;
10180 }
10181 
10182 
10183 /*
10184  * wm_gmii_mediainit:
10185  *
10186  *	Initialize media for use on 1000BASE-T devices.
10187  */
10188 static void
10189 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
10190 {
10191 	device_t dev = sc->sc_dev;
10192 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
10193 	struct mii_data *mii = &sc->sc_mii;
10194 	uint32_t reg;
10195 
10196 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
10197 		device_xname(sc->sc_dev), __func__));
10198 
10199 	/* We have GMII. */
10200 	sc->sc_flags |= WM_F_HAS_MII;
10201 
10202 	if (sc->sc_type == WM_T_80003)
10203 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
10204 	else
10205 		sc->sc_tipg = TIPG_1000T_DFLT;
10206 
10207 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
10208 	if ((sc->sc_type == WM_T_82580)
10209 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
10210 	    || (sc->sc_type == WM_T_I211)) {
10211 		reg = CSR_READ(sc, WMREG_PHPM);
10212 		reg &= ~PHPM_GO_LINK_D;
10213 		CSR_WRITE(sc, WMREG_PHPM, reg);
10214 	}
10215 
10216 	/*
10217 	 * Let the chip set speed/duplex on its own based on
10218 	 * signals from the PHY.
10219 	 * XXXbouyer - I'm not sure this is right for the 80003,
10220 	 * the em driver only sets CTRL_SLU here - but it seems to work.
10221 	 */
10222 	sc->sc_ctrl |= CTRL_SLU;
10223 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10224 
10225 	/* Initialize our media structures and probe the GMII. */
10226 	mii->mii_ifp = ifp;
10227 
10228 	mii->mii_statchg = wm_gmii_statchg;
10229 
10230 	/* get PHY control from SMBus to PCIe */
10231 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
10232 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
10233 	    || (sc->sc_type == WM_T_PCH_CNP))
10234 		wm_init_phy_workarounds_pchlan(sc);
10235 
10236 	wm_gmii_reset(sc);
10237 
10238 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
10239 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
10240 	    wm_gmii_mediastatus);
10241 
10242 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
10243 	    || (sc->sc_type == WM_T_82580)
10244 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
10245 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
10246 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
10247 			/* Attach only one port */
10248 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
10249 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
10250 		} else {
10251 			int i, id;
10252 			uint32_t ctrl_ext;
10253 
10254 			id = wm_get_phy_id_82575(sc);
10255 			if (id != -1) {
10256 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
10257 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
10258 			}
10259 			if ((id == -1)
10260 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
10261 				/* Power on sgmii phy if it is disabled */
10262 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
10263 				CSR_WRITE(sc, WMREG_CTRL_EXT,
10264 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
10265 				CSR_WRITE_FLUSH(sc);
10266 				delay(300*1000); /* XXX too long */
10267 
10268 				/* From 1 to 8 */
10269 				for (i = 1; i < 8; i++)
10270 					mii_attach(sc->sc_dev, &sc->sc_mii,
10271 					    0xffffffff, i, MII_OFFSET_ANY,
10272 					    MIIF_DOPAUSE);
10273 
10274 				/* Restore previous sfp cage power state */
10275 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
10276 			}
10277 		}
10278 	} else
10279 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
10280 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
10281 
10282 	/*
10283 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
10284 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
10285 	 */
10286 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
10287 		|| (sc->sc_type == WM_T_PCH_SPT)
10288 		|| (sc->sc_type == WM_T_PCH_CNP))
10289 	    && (LIST_FIRST(&mii->mii_phys) == NULL)) {
10290 		wm_set_mdio_slow_mode_hv(sc);
10291 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
10292 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
10293 	}
10294 
10295 	/*
10296 	 * (For ICH8 variants)
10297 	 * If PHY detection failed, use BM's r/w function and retry.
10298 	 */
10299 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
10300 		/* if failed, retry with *_bm_* */
10301 		aprint_verbose_dev(dev, "Assumed PHY access function "
10302 		    "(type = %d) might be incorrect. Use BM and retry.\n",
10303 		    sc->sc_phytype);
10304 		sc->sc_phytype = WMPHY_BM;
10305 		mii->mii_readreg = wm_gmii_bm_readreg;
10306 		mii->mii_writereg = wm_gmii_bm_writereg;
10307 
10308 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
10309 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
10310 	}
10311 
10312 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
10313 		/* Any PHY wasn't find */
10314 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
10315 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
10316 		sc->sc_phytype = WMPHY_NONE;
10317 	} else {
10318 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
10319 
10320 		/*
10321 		 * PHY Found! Check PHY type again by the second call of
10322 		 * wm_gmii_setup_phytype.
10323 		 */
10324 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
10325 		    child->mii_mpd_model);
10326 
10327 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
10328 	}
10329 }
10330 
10331 /*
10332  * wm_gmii_mediachange:	[ifmedia interface function]
10333  *
10334  *	Set hardware to newly-selected media on a 1000BASE-T device.
10335  */
10336 static int
10337 wm_gmii_mediachange(struct ifnet *ifp)
10338 {
10339 	struct wm_softc *sc = ifp->if_softc;
10340 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
10341 	int rc;
10342 
10343 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
10344 		device_xname(sc->sc_dev), __func__));
10345 	if ((ifp->if_flags & IFF_UP) == 0)
10346 		return 0;
10347 
10348 	/* Disable D0 LPLU. */
10349 	wm_lplu_d0_disable(sc);
10350 
10351 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
10352 	sc->sc_ctrl |= CTRL_SLU;
10353 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
10354 	    || (sc->sc_type > WM_T_82543)) {
10355 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
10356 	} else {
10357 		sc->sc_ctrl &= ~CTRL_ASDE;
10358 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
10359 		if (ife->ifm_media & IFM_FDX)
10360 			sc->sc_ctrl |= CTRL_FD;
10361 		switch (IFM_SUBTYPE(ife->ifm_media)) {
10362 		case IFM_10_T:
10363 			sc->sc_ctrl |= CTRL_SPEED_10;
10364 			break;
10365 		case IFM_100_TX:
10366 			sc->sc_ctrl |= CTRL_SPEED_100;
10367 			break;
10368 		case IFM_1000_T:
10369 			sc->sc_ctrl |= CTRL_SPEED_1000;
10370 			break;
10371 		case IFM_NONE:
10372 			/* There is no specific setting for IFM_NONE */
10373 			break;
10374 		default:
10375 			panic("wm_gmii_mediachange: bad media 0x%x",
10376 			    ife->ifm_media);
10377 		}
10378 	}
10379 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10380 	CSR_WRITE_FLUSH(sc);
10381 	if (sc->sc_type <= WM_T_82543)
10382 		wm_gmii_reset(sc);
10383 
10384 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
10385 		return 0;
10386 	return rc;
10387 }
10388 
10389 /*
10390  * wm_gmii_mediastatus:	[ifmedia interface function]
10391  *
10392  *	Get the current interface media status on a 1000BASE-T device.
10393  */
10394 static void
10395 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
10396 {
10397 	struct wm_softc *sc = ifp->if_softc;
10398 
10399 	ether_mediastatus(ifp, ifmr);
10400 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
10401 	    | sc->sc_flowflags;
10402 }
10403 
10404 #define	MDI_IO		CTRL_SWDPIN(2)
10405 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
10406 #define	MDI_CLK		CTRL_SWDPIN(3)
10407 
10408 static void
10409 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
10410 {
10411 	uint32_t i, v;
10412 
10413 	v = CSR_READ(sc, WMREG_CTRL);
10414 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
10415 	v |= MDI_DIR | CTRL_SWDPIO(3);
10416 
10417 	for (i = __BIT(nbits - 1); i != 0; i >>= 1) {
10418 		if (data & i)
10419 			v |= MDI_IO;
10420 		else
10421 			v &= ~MDI_IO;
10422 		CSR_WRITE(sc, WMREG_CTRL, v);
10423 		CSR_WRITE_FLUSH(sc);
10424 		delay(10);
10425 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
10426 		CSR_WRITE_FLUSH(sc);
10427 		delay(10);
10428 		CSR_WRITE(sc, WMREG_CTRL, v);
10429 		CSR_WRITE_FLUSH(sc);
10430 		delay(10);
10431 	}
10432 }
10433 
10434 static uint16_t
10435 wm_i82543_mii_recvbits(struct wm_softc *sc)
10436 {
10437 	uint32_t v, i;
10438 	uint16_t data = 0;
10439 
10440 	v = CSR_READ(sc, WMREG_CTRL);
10441 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
10442 	v |= CTRL_SWDPIO(3);
10443 
10444 	CSR_WRITE(sc, WMREG_CTRL, v);
10445 	CSR_WRITE_FLUSH(sc);
10446 	delay(10);
10447 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
10448 	CSR_WRITE_FLUSH(sc);
10449 	delay(10);
10450 	CSR_WRITE(sc, WMREG_CTRL, v);
10451 	CSR_WRITE_FLUSH(sc);
10452 	delay(10);
10453 
10454 	for (i = 0; i < 16; i++) {
10455 		data <<= 1;
10456 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
10457 		CSR_WRITE_FLUSH(sc);
10458 		delay(10);
10459 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
10460 			data |= 1;
10461 		CSR_WRITE(sc, WMREG_CTRL, v);
10462 		CSR_WRITE_FLUSH(sc);
10463 		delay(10);
10464 	}
10465 
10466 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
10467 	CSR_WRITE_FLUSH(sc);
10468 	delay(10);
10469 	CSR_WRITE(sc, WMREG_CTRL, v);
10470 	CSR_WRITE_FLUSH(sc);
10471 	delay(10);
10472 
10473 	return data;
10474 }
10475 
10476 #undef MDI_IO
10477 #undef MDI_DIR
10478 #undef MDI_CLK
10479 
10480 /*
10481  * wm_gmii_i82543_readreg:	[mii interface function]
10482  *
10483  *	Read a PHY register on the GMII (i82543 version).
10484  */
10485 static int
10486 wm_gmii_i82543_readreg(device_t dev, int phy, int reg, uint16_t *val)
10487 {
10488 	struct wm_softc *sc = device_private(dev);
10489 
10490 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
10491 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
10492 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
10493 	*val = wm_i82543_mii_recvbits(sc) & 0xffff;
10494 
10495 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04hx\n",
10496 		device_xname(dev), phy, reg, *val));
10497 
10498 	return 0;
10499 }
10500 
10501 /*
10502  * wm_gmii_i82543_writereg:	[mii interface function]
10503  *
10504  *	Write a PHY register on the GMII (i82543 version).
10505  */
10506 static int
10507 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, uint16_t val)
10508 {
10509 	struct wm_softc *sc = device_private(dev);
10510 
10511 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
10512 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
10513 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
10514 	    (MII_COMMAND_START << 30), 32);
10515 
10516 	return 0;
10517 }
10518 
10519 /*
10520  * wm_gmii_mdic_readreg:	[mii interface function]
10521  *
10522  *	Read a PHY register on the GMII.
10523  */
10524 static int
10525 wm_gmii_mdic_readreg(device_t dev, int phy, int reg, uint16_t *val)
10526 {
10527 	struct wm_softc *sc = device_private(dev);
10528 	uint32_t mdic = 0;
10529 	int i;
10530 
10531 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
10532 	    && (reg > MII_ADDRMASK)) {
10533 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
10534 		    __func__, sc->sc_phytype, reg);
10535 		reg &= MII_ADDRMASK;
10536 	}
10537 
10538 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
10539 	    MDIC_REGADD(reg));
10540 
10541 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
10542 		delay(50);
10543 		mdic = CSR_READ(sc, WMREG_MDIC);
10544 		if (mdic & MDIC_READY)
10545 			break;
10546 	}
10547 
10548 	if ((mdic & MDIC_READY) == 0) {
10549 		DPRINTF(WM_DEBUG_GMII,
10550 		    ("%s: MDIC read timed out: phy %d reg %d\n",
10551 			device_xname(dev), phy, reg));
10552 		return ETIMEDOUT;
10553 	} else if (mdic & MDIC_E) {
10554 		/* This is normal if no PHY is present. */
10555 		DPRINTF(WM_DEBUG_GMII, ("%s: MDIC read error: phy %d reg %d\n",
10556 			device_xname(sc->sc_dev), phy, reg));
10557 		return -1;
10558 	} else
10559 		*val = MDIC_DATA(mdic);
10560 
10561 	/*
10562 	 * Allow some time after each MDIC transaction to avoid
10563 	 * reading duplicate data in the next MDIC transaction.
10564 	 */
10565 	if (sc->sc_type == WM_T_PCH2)
10566 		delay(100);
10567 
10568 	return 0;
10569 }
10570 
10571 /*
10572  * wm_gmii_mdic_writereg:	[mii interface function]
10573  *
10574  *	Write a PHY register on the GMII.
10575  */
10576 static int
10577 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, uint16_t val)
10578 {
10579 	struct wm_softc *sc = device_private(dev);
10580 	uint32_t mdic = 0;
10581 	int i;
10582 
10583 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
10584 	    && (reg > MII_ADDRMASK)) {
10585 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
10586 		    __func__, sc->sc_phytype, reg);
10587 		reg &= MII_ADDRMASK;
10588 	}
10589 
10590 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
10591 	    MDIC_REGADD(reg) | MDIC_DATA(val));
10592 
10593 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
10594 		delay(50);
10595 		mdic = CSR_READ(sc, WMREG_MDIC);
10596 		if (mdic & MDIC_READY)
10597 			break;
10598 	}
10599 
10600 	if ((mdic & MDIC_READY) == 0) {
10601 		DPRINTF(WM_DEBUG_GMII,
10602 		    ("%s: MDIC write timed out: phy %d reg %d\n",
10603 			device_xname(dev), phy, reg));
10604 		return ETIMEDOUT;
10605 	} else if (mdic & MDIC_E) {
10606 		DPRINTF(WM_DEBUG_GMII,
10607 		    ("%s: MDIC write error: phy %d reg %d\n",
10608 			device_xname(dev), phy, reg));
10609 		return -1;
10610 	}
10611 
10612 	/*
10613 	 * Allow some time after each MDIC transaction to avoid
10614 	 * reading duplicate data in the next MDIC transaction.
10615 	 */
10616 	if (sc->sc_type == WM_T_PCH2)
10617 		delay(100);
10618 
10619 	return 0;
10620 }
10621 
10622 /*
10623  * wm_gmii_i82544_readreg:	[mii interface function]
10624  *
10625  *	Read a PHY register on the GMII.
10626  */
10627 static int
10628 wm_gmii_i82544_readreg(device_t dev, int phy, int reg, uint16_t *val)
10629 {
10630 	struct wm_softc *sc = device_private(dev);
10631 	int rv;
10632 
10633 	if (sc->phy.acquire(sc)) {
10634 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
10635 		return -1;
10636 	}
10637 
10638 	rv = wm_gmii_i82544_readreg_locked(dev, phy, reg, val);
10639 
10640 	sc->phy.release(sc);
10641 
10642 	return rv;
10643 }
10644 
10645 static int
10646 wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
10647 {
10648 	struct wm_softc *sc = device_private(dev);
10649 	int rv;
10650 
10651 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
10652 		switch (sc->sc_phytype) {
10653 		case WMPHY_IGP:
10654 		case WMPHY_IGP_2:
10655 		case WMPHY_IGP_3:
10656 			rv = wm_gmii_mdic_writereg(dev, phy,
10657 			    MII_IGPHY_PAGE_SELECT, reg);
10658 			if (rv != 0)
10659 				return rv;
10660 			break;
10661 		default:
10662 #ifdef WM_DEBUG
10663 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
10664 			    __func__, sc->sc_phytype, reg);
10665 #endif
10666 			break;
10667 		}
10668 	}
10669 
10670 	return wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
10671 }
10672 
10673 /*
10674  * wm_gmii_i82544_writereg:	[mii interface function]
10675  *
10676  *	Write a PHY register on the GMII.
10677  */
10678 static int
10679 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, uint16_t val)
10680 {
10681 	struct wm_softc *sc = device_private(dev);
10682 	int rv;
10683 
10684 	if (sc->phy.acquire(sc)) {
10685 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
10686 		return -1;
10687 	}
10688 
10689 	rv = wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val);
10690 	sc->phy.release(sc);
10691 
10692 	return rv;
10693 }
10694 
10695 static int
10696 wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
10697 {
10698 	struct wm_softc *sc = device_private(dev);
10699 	int rv;
10700 
10701 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
10702 		switch (sc->sc_phytype) {
10703 		case WMPHY_IGP:
10704 		case WMPHY_IGP_2:
10705 		case WMPHY_IGP_3:
10706 			rv = wm_gmii_mdic_writereg(dev, phy,
10707 			    MII_IGPHY_PAGE_SELECT, reg);
10708 			if (rv != 0)
10709 				return rv;
10710 			break;
10711 		default:
10712 #ifdef WM_DEBUG
10713 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
10714 			    __func__, sc->sc_phytype, reg);
10715 #endif
10716 			break;
10717 		}
10718 	}
10719 
10720 	return wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
10721 }
10722 
10723 /*
10724  * wm_gmii_i80003_readreg:	[mii interface function]
10725  *
10726  *	Read a PHY register on the kumeran
10727  * This could be handled by the PHY layer if we didn't have to lock the
10728  * ressource ...
10729  */
10730 static int
10731 wm_gmii_i80003_readreg(device_t dev, int phy, int reg, uint16_t *val)
10732 {
10733 	struct wm_softc *sc = device_private(dev);
10734 	int page_select;
10735 	uint16_t temp, temp2;
10736 	int rv = 0;
10737 
10738 	if (phy != 1) /* Only one PHY on kumeran bus */
10739 		return -1;
10740 
10741 	if (sc->phy.acquire(sc)) {
10742 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
10743 		return -1;
10744 	}
10745 
10746 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
10747 		page_select = GG82563_PHY_PAGE_SELECT;
10748 	else {
10749 		/*
10750 		 * Use Alternative Page Select register to access registers
10751 		 * 30 and 31.
10752 		 */
10753 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
10754 	}
10755 	temp = reg >> GG82563_PAGE_SHIFT;
10756 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
10757 		goto out;
10758 
10759 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
10760 		/*
10761 		 * Wait more 200us for a bug of the ready bit in the MDIC
10762 		 * register.
10763 		 */
10764 		delay(200);
10765 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
10766 		if ((rv != 0) || (temp2 != temp)) {
10767 			device_printf(dev, "%s failed\n", __func__);
10768 			rv = -1;
10769 			goto out;
10770 		}
10771 		delay(200);
10772 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
10773 		delay(200);
10774 	} else
10775 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
10776 
10777 out:
10778 	sc->phy.release(sc);
10779 	return rv;
10780 }
10781 
10782 /*
10783  * wm_gmii_i80003_writereg:	[mii interface function]
10784  *
10785  *	Write a PHY register on the kumeran.
10786  * This could be handled by the PHY layer if we didn't have to lock the
10787  * ressource ...
10788  */
10789 static int
10790 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, uint16_t val)
10791 {
10792 	struct wm_softc *sc = device_private(dev);
10793 	int page_select, rv;
10794 	uint16_t temp, temp2;
10795 
10796 	if (phy != 1) /* Only one PHY on kumeran bus */
10797 		return -1;
10798 
10799 	if (sc->phy.acquire(sc)) {
10800 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
10801 		return -1;
10802 	}
10803 
10804 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
10805 		page_select = GG82563_PHY_PAGE_SELECT;
10806 	else {
10807 		/*
10808 		 * Use Alternative Page Select register to access registers
10809 		 * 30 and 31.
10810 		 */
10811 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
10812 	}
10813 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
10814 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
10815 		goto out;
10816 
10817 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
10818 		/*
10819 		 * Wait more 200us for a bug of the ready bit in the MDIC
10820 		 * register.
10821 		 */
10822 		delay(200);
10823 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
10824 		if ((rv != 0) || (temp2 != temp)) {
10825 			device_printf(dev, "%s failed\n", __func__);
10826 			rv = -1;
10827 			goto out;
10828 		}
10829 		delay(200);
10830 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
10831 		delay(200);
10832 	} else
10833 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
10834 
10835 out:
10836 	sc->phy.release(sc);
10837 	return rv;
10838 }
10839 
10840 /*
10841  * wm_gmii_bm_readreg:	[mii interface function]
10842  *
10843  *	Read a PHY register on the kumeran
10844  * This could be handled by the PHY layer if we didn't have to lock the
10845  * ressource ...
10846  */
10847 static int
10848 wm_gmii_bm_readreg(device_t dev, int phy, int reg, uint16_t *val)
10849 {
10850 	struct wm_softc *sc = device_private(dev);
10851 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
10852 	int rv;
10853 
10854 	if (sc->phy.acquire(sc)) {
10855 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
10856 		return -1;
10857 	}
10858 
10859 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
10860 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
10861 		    || (reg == 31)) ? 1 : phy;
10862 	/* Page 800 works differently than the rest so it has its own func */
10863 	if (page == BM_WUC_PAGE) {
10864 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
10865 		goto release;
10866 	}
10867 
10868 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
10869 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
10870 		    && (sc->sc_type != WM_T_82583))
10871 			rv = wm_gmii_mdic_writereg(dev, phy,
10872 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
10873 		else
10874 			rv = wm_gmii_mdic_writereg(dev, phy,
10875 			    BME1000_PHY_PAGE_SELECT, page);
10876 		if (rv != 0)
10877 			goto release;
10878 	}
10879 
10880 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
10881 
10882 release:
10883 	sc->phy.release(sc);
10884 	return rv;
10885 }
10886 
10887 /*
10888  * wm_gmii_bm_writereg:	[mii interface function]
10889  *
10890  *	Write a PHY register on the kumeran.
10891  * This could be handled by the PHY layer if we didn't have to lock the
10892  * ressource ...
10893  */
10894 static int
10895 wm_gmii_bm_writereg(device_t dev, int phy, int reg, uint16_t val)
10896 {
10897 	struct wm_softc *sc = device_private(dev);
10898 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
10899 	int rv;
10900 
10901 	if (sc->phy.acquire(sc)) {
10902 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
10903 		return -1;
10904 	}
10905 
10906 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
10907 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
10908 		    || (reg == 31)) ? 1 : phy;
10909 	/* Page 800 works differently than the rest so it has its own func */
10910 	if (page == BM_WUC_PAGE) {
10911 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, &val, false, false);
10912 		goto release;
10913 	}
10914 
10915 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
10916 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
10917 		    && (sc->sc_type != WM_T_82583))
10918 			rv = wm_gmii_mdic_writereg(dev, phy,
10919 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
10920 		else
10921 			rv = wm_gmii_mdic_writereg(dev, phy,
10922 			    BME1000_PHY_PAGE_SELECT, page);
10923 		if (rv != 0)
10924 			goto release;
10925 	}
10926 
10927 	rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
10928 
10929 release:
10930 	sc->phy.release(sc);
10931 	return rv;
10932 }
10933 
10934 /*
10935  *  wm_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
10936  *  @dev: pointer to the HW structure
10937  *  @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
10938  *
10939  *  Assumes semaphore already acquired and phy_reg points to a valid memory
10940  *  address to store contents of the BM_WUC_ENABLE_REG register.
10941  */
10942 static int
10943 wm_enable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
10944 {
10945 	uint16_t temp;
10946 	int rv;
10947 
10948 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
10949 		device_xname(dev), __func__));
10950 
10951 	if (!phy_regp)
10952 		return -1;
10953 
10954 	/* All page select, port ctrl and wakeup registers use phy address 1 */
10955 
10956 	/* Select Port Control Registers page */
10957 	rv = wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
10958 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
10959 	if (rv != 0)
10960 		return rv;
10961 
10962 	/* Read WUCE and save it */
10963 	rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG, phy_regp);
10964 	if (rv != 0)
10965 		return rv;
10966 
10967 	/* Enable both PHY wakeup mode and Wakeup register page writes.
10968 	 * Prevent a power state change by disabling ME and Host PHY wakeup.
10969 	 */
10970 	temp = *phy_regp;
10971 	temp |= BM_WUC_ENABLE_BIT;
10972 	temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
10973 
10974 	if ((rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, temp)) != 0)
10975 		return rv;
10976 
10977 	/* Select Host Wakeup Registers page - caller now able to write
10978 	 * registers on the Wakeup registers page
10979 	 */
10980 	return wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
10981 	    BM_WUC_PAGE << IGP3_PAGE_SHIFT);
10982 }
10983 
10984 /*
10985  *  wm_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
10986  *  @dev: pointer to the HW structure
10987  *  @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
10988  *
10989  *  Restore BM_WUC_ENABLE_REG to its original value.
10990  *
10991  *  Assumes semaphore already acquired and *phy_reg is the contents of the
10992  *  BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
10993  *  caller.
10994  */
10995 static int
10996 wm_disable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
10997 {
10998 
10999 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
11000 		device_xname(dev), __func__));
11001 
11002 	if (!phy_regp)
11003 		return -1;
11004 
11005 	/* Select Port Control Registers page */
11006 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
11007 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
11008 
11009 	/* Restore 769.17 to its original value */
11010 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, *phy_regp);
11011 
11012 	return 0;
11013 }
11014 
11015 /*
11016  *  wm_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
11017  *  @sc: pointer to the HW structure
11018  *  @offset: register offset to be read or written
11019  *  @val: pointer to the data to read or write
11020  *  @rd: determines if operation is read or write
11021  *  @page_set: BM_WUC_PAGE already set and access enabled
11022  *
11023  *  Read the PHY register at offset and store the retrieved information in
11024  *  data, or write data to PHY register at offset.  Note the procedure to
11025  *  access the PHY wakeup registers is different than reading the other PHY
11026  *  registers. It works as such:
11027  *  1) Set 769.17.2 (page 769, register 17, bit 2) = 1
11028  *  2) Set page to 800 for host (801 if we were manageability)
11029  *  3) Write the address using the address opcode (0x11)
11030  *  4) Read or write the data using the data opcode (0x12)
11031  *  5) Restore 769.17.2 to its original value
11032  *
11033  *  Steps 1 and 2 are done by wm_enable_phy_wakeup_reg_access_bm() and
11034  *  step 5 is done by wm_disable_phy_wakeup_reg_access_bm().
11035  *
11036  *  Assumes semaphore is already acquired.  When page_set==TRUE, assumes
11037  *  the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
11038  *  is responsible for calls to wm_[enable|disable]_phy_wakeup_reg_bm()).
11039  */
11040 static int
11041 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd,
11042 	bool page_set)
11043 {
11044 	struct wm_softc *sc = device_private(dev);
11045 	uint16_t regnum = BM_PHY_REG_NUM(offset);
11046 	uint16_t page = BM_PHY_REG_PAGE(offset);
11047 	uint16_t wuce;
11048 	int rv = 0;
11049 
11050 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
11051 		device_xname(dev), __func__));
11052 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
11053 	if ((sc->sc_type == WM_T_PCH)
11054 	    && ((CSR_READ(sc, WMREG_PHY_CTRL) & PHY_CTRL_GBE_DIS) == 0)) {
11055 		device_printf(dev,
11056 		    "Attempting to access page %d while gig enabled.\n", page);
11057 	}
11058 
11059 	if (!page_set) {
11060 		/* Enable access to PHY wakeup registers */
11061 		rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
11062 		if (rv != 0) {
11063 			device_printf(dev,
11064 			    "%s: Could not enable PHY wakeup reg access\n",
11065 			    __func__);
11066 			return rv;
11067 		}
11068 	}
11069 	DPRINTF(WM_DEBUG_GMII, ("%s: %s: Accessing PHY page %d reg 0x%x\n",
11070 		device_xname(sc->sc_dev), __func__, page, regnum));
11071 
11072 	/*
11073 	 * 2) Access PHY wakeup register.
11074 	 * See wm_access_phy_wakeup_reg_bm.
11075 	 */
11076 
11077 	/* Write the Wakeup register page offset value using opcode 0x11 */
11078 	rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
11079 	if (rv != 0)
11080 		return rv;
11081 
11082 	if (rd) {
11083 		/* Read the Wakeup register page value using opcode 0x12 */
11084 		rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE, val);
11085 	} else {
11086 		/* Write the Wakeup register page value using opcode 0x12 */
11087 		rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
11088 	}
11089 	if (rv != 0)
11090 		return rv;
11091 
11092 	if (!page_set)
11093 		rv = wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
11094 
11095 	return rv;
11096 }
11097 
11098 /*
11099  * wm_gmii_hv_readreg:	[mii interface function]
11100  *
11101  *	Read a PHY register on the kumeran
11102  * This could be handled by the PHY layer if we didn't have to lock the
11103  * ressource ...
11104  */
11105 static int
11106 wm_gmii_hv_readreg(device_t dev, int phy, int reg, uint16_t *val)
11107 {
11108 	struct wm_softc *sc = device_private(dev);
11109 	int rv;
11110 
11111 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
11112 		device_xname(dev), __func__));
11113 	if (sc->phy.acquire(sc)) {
11114 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
11115 		return -1;
11116 	}
11117 
11118 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg, val);
11119 	sc->phy.release(sc);
11120 	return rv;
11121 }
11122 
11123 static int
11124 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
11125 {
11126 	uint16_t page = BM_PHY_REG_PAGE(reg);
11127 	uint16_t regnum = BM_PHY_REG_NUM(reg);
11128 	int rv;
11129 
11130 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
11131 
11132 	/* Page 800 works differently than the rest so it has its own func */
11133 	if (page == BM_WUC_PAGE)
11134 		return wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
11135 
11136 	/*
11137 	 * Lower than page 768 works differently than the rest so it has its
11138 	 * own func
11139 	 */
11140 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
11141 		device_printf(dev, "gmii_hv_readreg!!!\n");
11142 		return -1;
11143 	}
11144 
11145 	/*
11146 	 * XXX I21[789] documents say that the SMBus Address register is at
11147 	 * PHY address 01, Page 0 (not 768), Register 26.
11148 	 */
11149 	if (page == HV_INTC_FC_PAGE_START)
11150 		page = 0;
11151 
11152 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
11153 		rv = wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
11154 		    page << BME1000_PAGE_SHIFT);
11155 		if (rv != 0)
11156 			return rv;
11157 	}
11158 
11159 	return wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK, val);
11160 }
11161 
11162 /*
11163  * wm_gmii_hv_writereg:	[mii interface function]
11164  *
11165  *	Write a PHY register on the kumeran.
11166  * This could be handled by the PHY layer if we didn't have to lock the
11167  * ressource ...
11168  */
11169 static int
11170 wm_gmii_hv_writereg(device_t dev, int phy, int reg, uint16_t val)
11171 {
11172 	struct wm_softc *sc = device_private(dev);
11173 	int rv;
11174 
11175 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
11176 		device_xname(dev), __func__));
11177 
11178 	if (sc->phy.acquire(sc)) {
11179 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
11180 		return -1;
11181 	}
11182 
11183 	rv = wm_gmii_hv_writereg_locked(dev, phy, reg, val);
11184 	sc->phy.release(sc);
11185 
11186 	return rv;
11187 }
11188 
11189 static int
11190 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
11191 {
11192 	struct wm_softc *sc = device_private(dev);
11193 	uint16_t page = BM_PHY_REG_PAGE(reg);
11194 	uint16_t regnum = BM_PHY_REG_NUM(reg);
11195 	int rv;
11196 
11197 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
11198 
11199 	/* Page 800 works differently than the rest so it has its own func */
11200 	if (page == BM_WUC_PAGE)
11201 		return wm_access_phy_wakeup_reg_bm(dev, reg, &val, false,
11202 		    false);
11203 
11204 	/*
11205 	 * Lower than page 768 works differently than the rest so it has its
11206 	 * own func
11207 	 */
11208 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
11209 		device_printf(dev, "gmii_hv_writereg!!!\n");
11210 		return -1;
11211 	}
11212 
11213 	{
11214 		/*
11215 		 * XXX I21[789] documents say that the SMBus Address register
11216 		 * is at PHY address 01, Page 0 (not 768), Register 26.
11217 		 */
11218 		if (page == HV_INTC_FC_PAGE_START)
11219 			page = 0;
11220 
11221 		/*
11222 		 * XXX Workaround MDIO accesses being disabled after entering
11223 		 * IEEE Power Down (whenever bit 11 of the PHY control
11224 		 * register is set)
11225 		 */
11226 		if (sc->sc_phytype == WMPHY_82578) {
11227 			struct mii_softc *child;
11228 
11229 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
11230 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
11231 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
11232 			    && ((val & (1 << 11)) != 0)) {
11233 				device_printf(dev, "XXX need workaround\n");
11234 			}
11235 		}
11236 
11237 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
11238 			rv = wm_gmii_mdic_writereg(dev, 1,
11239 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
11240 			if (rv != 0)
11241 				return rv;
11242 		}
11243 	}
11244 
11245 	return wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
11246 }
11247 
11248 /*
11249  * wm_gmii_82580_readreg:	[mii interface function]
11250  *
11251  *	Read a PHY register on the 82580 and I350.
11252  * This could be handled by the PHY layer if we didn't have to lock the
11253  * ressource ...
11254  */
11255 static int
11256 wm_gmii_82580_readreg(device_t dev, int phy, int reg, uint16_t *val)
11257 {
11258 	struct wm_softc *sc = device_private(dev);
11259 	int rv;
11260 
11261 	if (sc->phy.acquire(sc) != 0) {
11262 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
11263 		return -1;
11264 	}
11265 
11266 #ifdef DIAGNOSTIC
11267 	if (reg > MII_ADDRMASK) {
11268 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
11269 		    __func__, sc->sc_phytype, reg);
11270 		reg &= MII_ADDRMASK;
11271 	}
11272 #endif
11273 	rv = wm_gmii_mdic_readreg(dev, phy, reg, val);
11274 
11275 	sc->phy.release(sc);
11276 	return rv;
11277 }
11278 
11279 /*
11280  * wm_gmii_82580_writereg:	[mii interface function]
11281  *
11282  *	Write a PHY register on the 82580 and I350.
11283  * This could be handled by the PHY layer if we didn't have to lock the
11284  * ressource ...
11285  */
11286 static int
11287 wm_gmii_82580_writereg(device_t dev, int phy, int reg, uint16_t val)
11288 {
11289 	struct wm_softc *sc = device_private(dev);
11290 	int rv;
11291 
11292 	if (sc->phy.acquire(sc) != 0) {
11293 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
11294 		return -1;
11295 	}
11296 
11297 #ifdef DIAGNOSTIC
11298 	if (reg > MII_ADDRMASK) {
11299 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
11300 		    __func__, sc->sc_phytype, reg);
11301 		reg &= MII_ADDRMASK;
11302 	}
11303 #endif
11304 	rv = wm_gmii_mdic_writereg(dev, phy, reg, val);
11305 
11306 	sc->phy.release(sc);
11307 	return rv;
11308 }
11309 
11310 /*
11311  * wm_gmii_gs40g_readreg:	[mii interface function]
11312  *
11313  *	Read a PHY register on the I2100 and I211.
11314  * This could be handled by the PHY layer if we didn't have to lock the
11315  * ressource ...
11316  */
11317 static int
11318 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg, uint16_t *val)
11319 {
11320 	struct wm_softc *sc = device_private(dev);
11321 	int page, offset;
11322 	int rv;
11323 
11324 	/* Acquire semaphore */
11325 	if (sc->phy.acquire(sc)) {
11326 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
11327 		return -1;
11328 	}
11329 
11330 	/* Page select */
11331 	page = reg >> GS40G_PAGE_SHIFT;
11332 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
11333 	if (rv != 0)
11334 		goto release;
11335 
11336 	/* Read reg */
11337 	offset = reg & GS40G_OFFSET_MASK;
11338 	rv = wm_gmii_mdic_readreg(dev, phy, offset, val);
11339 
11340 release:
11341 	sc->phy.release(sc);
11342 	return rv;
11343 }
11344 
11345 /*
11346  * wm_gmii_gs40g_writereg:	[mii interface function]
11347  *
11348  *	Write a PHY register on the I210 and I211.
11349  * This could be handled by the PHY layer if we didn't have to lock the
11350  * ressource ...
11351  */
11352 static int
11353 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, uint16_t val)
11354 {
11355 	struct wm_softc *sc = device_private(dev);
11356 	uint16_t page;
11357 	int offset, rv;
11358 
11359 	/* Acquire semaphore */
11360 	if (sc->phy.acquire(sc)) {
11361 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
11362 		return -1;
11363 	}
11364 
11365 	/* Page select */
11366 	page = reg >> GS40G_PAGE_SHIFT;
11367 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
11368 	if (rv != 0)
11369 		goto release;
11370 
11371 	/* Write reg */
11372 	offset = reg & GS40G_OFFSET_MASK;
11373 	rv = wm_gmii_mdic_writereg(dev, phy, offset, val);
11374 
11375 release:
11376 	/* Release semaphore */
11377 	sc->phy.release(sc);
11378 	return rv;
11379 }
11380 
11381 /*
11382  * wm_gmii_statchg:	[mii interface function]
11383  *
11384  *	Callback from MII layer when media changes.
11385  */
11386 static void
11387 wm_gmii_statchg(struct ifnet *ifp)
11388 {
11389 	struct wm_softc *sc = ifp->if_softc;
11390 	struct mii_data *mii = &sc->sc_mii;
11391 
11392 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
11393 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
11394 	sc->sc_fcrtl &= ~FCRTL_XONE;
11395 
11396 	/* Get flow control negotiation result. */
11397 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
11398 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
11399 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
11400 		mii->mii_media_active &= ~IFM_ETH_FMASK;
11401 	}
11402 
11403 	if (sc->sc_flowflags & IFM_FLOW) {
11404 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
11405 			sc->sc_ctrl |= CTRL_TFCE;
11406 			sc->sc_fcrtl |= FCRTL_XONE;
11407 		}
11408 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
11409 			sc->sc_ctrl |= CTRL_RFCE;
11410 	}
11411 
11412 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
11413 		DPRINTF(WM_DEBUG_LINK,
11414 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
11415 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
11416 	} else {
11417 		DPRINTF(WM_DEBUG_LINK,
11418 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
11419 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
11420 	}
11421 
11422 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11423 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
11424 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
11425 						 : WMREG_FCRTL, sc->sc_fcrtl);
11426 	if (sc->sc_type == WM_T_80003) {
11427 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
11428 		case IFM_1000_T:
11429 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
11430 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
11431 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
11432 			break;
11433 		default:
11434 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
11435 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
11436 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
11437 			break;
11438 		}
11439 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
11440 	}
11441 }
11442 
11443 /* kumeran related (80003, ICH* and PCH*) */
11444 
11445 /*
11446  * wm_kmrn_readreg:
11447  *
11448  *	Read a kumeran register
11449  */
11450 static int
11451 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
11452 {
11453 	int rv;
11454 
11455 	if (sc->sc_type == WM_T_80003)
11456 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
11457 	else
11458 		rv = sc->phy.acquire(sc);
11459 	if (rv != 0) {
11460 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
11461 		    __func__);
11462 		return rv;
11463 	}
11464 
11465 	rv = wm_kmrn_readreg_locked(sc, reg, val);
11466 
11467 	if (sc->sc_type == WM_T_80003)
11468 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
11469 	else
11470 		sc->phy.release(sc);
11471 
11472 	return rv;
11473 }
11474 
11475 static int
11476 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
11477 {
11478 
11479 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
11480 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
11481 	    KUMCTRLSTA_REN);
11482 	CSR_WRITE_FLUSH(sc);
11483 	delay(2);
11484 
11485 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
11486 
11487 	return 0;
11488 }
11489 
11490 /*
11491  * wm_kmrn_writereg:
11492  *
11493  *	Write a kumeran register
11494  */
11495 static int
11496 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
11497 {
11498 	int rv;
11499 
11500 	if (sc->sc_type == WM_T_80003)
11501 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
11502 	else
11503 		rv = sc->phy.acquire(sc);
11504 	if (rv != 0) {
11505 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
11506 		    __func__);
11507 		return rv;
11508 	}
11509 
11510 	rv = wm_kmrn_writereg_locked(sc, reg, val);
11511 
11512 	if (sc->sc_type == WM_T_80003)
11513 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
11514 	else
11515 		sc->phy.release(sc);
11516 
11517 	return rv;
11518 }
11519 
11520 static int
11521 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
11522 {
11523 
11524 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
11525 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
11526 
11527 	return 0;
11528 }
11529 
11530 /*
11531  * EMI register related (82579, WMPHY_I217(PCH2 and newer))
11532  * This access method is different from IEEE MMD.
11533  */
11534 static int
11535 wm_access_emi_reg_locked(device_t dev, int reg, uint16_t *val, bool rd)
11536 {
11537 	struct wm_softc *sc = device_private(dev);
11538 	int rv;
11539 
11540 	rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_ADDR, reg);
11541 	if (rv != 0)
11542 		return rv;
11543 
11544 	if (rd)
11545 		rv = sc->phy.readreg_locked(dev, 2, I82579_EMI_DATA, val);
11546 	else
11547 		rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_DATA, *val);
11548 	return rv;
11549 }
11550 
11551 static int
11552 wm_read_emi_reg_locked(device_t dev, int reg, uint16_t *val)
11553 {
11554 
11555 	return wm_access_emi_reg_locked(dev, reg, val, true);
11556 }
11557 
11558 static int
11559 wm_write_emi_reg_locked(device_t dev, int reg, uint16_t val)
11560 {
11561 
11562 	return wm_access_emi_reg_locked(dev, reg, &val, false);
11563 }
11564 
11565 /* SGMII related */
11566 
11567 /*
11568  * wm_sgmii_uses_mdio
11569  *
11570  * Check whether the transaction is to the internal PHY or the external
11571  * MDIO interface. Return true if it's MDIO.
11572  */
11573 static bool
11574 wm_sgmii_uses_mdio(struct wm_softc *sc)
11575 {
11576 	uint32_t reg;
11577 	bool ismdio = false;
11578 
11579 	switch (sc->sc_type) {
11580 	case WM_T_82575:
11581 	case WM_T_82576:
11582 		reg = CSR_READ(sc, WMREG_MDIC);
11583 		ismdio = ((reg & MDIC_DEST) != 0);
11584 		break;
11585 	case WM_T_82580:
11586 	case WM_T_I350:
11587 	case WM_T_I354:
11588 	case WM_T_I210:
11589 	case WM_T_I211:
11590 		reg = CSR_READ(sc, WMREG_MDICNFG);
11591 		ismdio = ((reg & MDICNFG_DEST) != 0);
11592 		break;
11593 	default:
11594 		break;
11595 	}
11596 
11597 	return ismdio;
11598 }
11599 
11600 /*
11601  * wm_sgmii_readreg:	[mii interface function]
11602  *
11603  *	Read a PHY register on the SGMII
11604  * This could be handled by the PHY layer if we didn't have to lock the
11605  * ressource ...
11606  */
11607 static int
11608 wm_sgmii_readreg(device_t dev, int phy, int reg, uint16_t *val)
11609 {
11610 	struct wm_softc *sc = device_private(dev);
11611 	int rv;
11612 
11613 	if (sc->phy.acquire(sc)) {
11614 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
11615 		return -1;
11616 	}
11617 
11618 	rv = wm_sgmii_readreg_locked(dev, phy, reg, val);
11619 
11620 	sc->phy.release(sc);
11621 	return rv;
11622 }
11623 
11624 static int
11625 wm_sgmii_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
11626 {
11627 	struct wm_softc *sc = device_private(dev);
11628 	uint32_t i2ccmd;
11629 	int i, rv;
11630 
11631 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
11632 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
11633 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
11634 
11635 	/* Poll the ready bit */
11636 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
11637 		delay(50);
11638 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
11639 		if (i2ccmd & I2CCMD_READY)
11640 			break;
11641 	}
11642 	if ((i2ccmd & I2CCMD_READY) == 0) {
11643 		device_printf(dev, "I2CCMD Read did not complete\n");
11644 		rv = ETIMEDOUT;
11645 	}
11646 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
11647 		device_printf(dev, "I2CCMD Error bit set\n");
11648 		rv = EIO;
11649 	}
11650 
11651 	*val = (uint16_t)((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
11652 
11653 	return rv;
11654 }
11655 
11656 /*
11657  * wm_sgmii_writereg:	[mii interface function]
11658  *
11659  *	Write a PHY register on the SGMII.
11660  * This could be handled by the PHY layer if we didn't have to lock the
11661  * ressource ...
11662  */
11663 static int
11664 wm_sgmii_writereg(device_t dev, int phy, int reg, uint16_t val)
11665 {
11666 	struct wm_softc *sc = device_private(dev);
11667 	int rv;
11668 
11669 	if (sc->phy.acquire(sc) != 0) {
11670 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
11671 		return -1;
11672 	}
11673 
11674 	rv = wm_sgmii_writereg_locked(dev, phy, reg, val);
11675 
11676 	sc->phy.release(sc);
11677 
11678 	return rv;
11679 }
11680 
11681 static int
11682 wm_sgmii_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
11683 {
11684 	struct wm_softc *sc = device_private(dev);
11685 	uint32_t i2ccmd;
11686 	uint16_t swapdata;
11687 	int rv = 0;
11688 	int i;
11689 
11690 	/* Swap the data bytes for the I2C interface */
11691 	swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
11692 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
11693 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
11694 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
11695 
11696 	/* Poll the ready bit */
11697 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
11698 		delay(50);
11699 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
11700 		if (i2ccmd & I2CCMD_READY)
11701 			break;
11702 	}
11703 	if ((i2ccmd & I2CCMD_READY) == 0) {
11704 		device_printf(dev, "I2CCMD Write did not complete\n");
11705 		rv = ETIMEDOUT;
11706 	}
11707 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
11708 		device_printf(dev, "I2CCMD Error bit set\n");
11709 		rv = EIO;
11710 	}
11711 
11712 	return rv;
11713 }
11714 
11715 /* TBI related */
11716 
11717 static bool
11718 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
11719 {
11720 	bool sig;
11721 
11722 	sig = ctrl & CTRL_SWDPIN(1);
11723 
11724 	/*
11725 	 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
11726 	 * detect a signal, 1 if they don't.
11727 	 */
11728 	if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
11729 		sig = !sig;
11730 
11731 	return sig;
11732 }
11733 
11734 /*
11735  * wm_tbi_mediainit:
11736  *
11737  *	Initialize media for use on 1000BASE-X devices.
11738  */
11739 static void
11740 wm_tbi_mediainit(struct wm_softc *sc)
11741 {
11742 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
11743 	const char *sep = "";
11744 
11745 	if (sc->sc_type < WM_T_82543)
11746 		sc->sc_tipg = TIPG_WM_DFLT;
11747 	else
11748 		sc->sc_tipg = TIPG_LG_DFLT;
11749 
11750 	sc->sc_tbi_serdes_anegticks = 5;
11751 
11752 	/* Initialize our media structures */
11753 	sc->sc_mii.mii_ifp = ifp;
11754 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
11755 
11756 	if (((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
11757 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
11758 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
11759 		    wm_serdes_mediachange, wm_serdes_mediastatus);
11760 	else
11761 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
11762 		    wm_tbi_mediachange, wm_tbi_mediastatus);
11763 
11764 	/*
11765 	 * SWD Pins:
11766 	 *
11767 	 *	0 = Link LED (output)
11768 	 *	1 = Loss Of Signal (input)
11769 	 */
11770 	sc->sc_ctrl |= CTRL_SWDPIO(0);
11771 
11772 	/* XXX Perhaps this is only for TBI */
11773 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
11774 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
11775 
11776 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
11777 		sc->sc_ctrl &= ~CTRL_LRST;
11778 
11779 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11780 
11781 #define	ADD(ss, mm, dd)							\
11782 do {									\
11783 	aprint_normal("%s%s", sep, ss);					\
11784 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
11785 	sep = ", ";							\
11786 } while (/*CONSTCOND*/0)
11787 
11788 	aprint_normal_dev(sc->sc_dev, "");
11789 
11790 	if (sc->sc_type == WM_T_I354) {
11791 		uint32_t status;
11792 
11793 		status = CSR_READ(sc, WMREG_STATUS);
11794 		if (((status & STATUS_2P5_SKU) != 0)
11795 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
11796 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
11797 		} else
11798 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
11799 	} else if (sc->sc_type == WM_T_82545) {
11800 		/* Only 82545 is LX (XXX except SFP) */
11801 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
11802 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
11803 	} else {
11804 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
11805 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
11806 	}
11807 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
11808 	aprint_normal("\n");
11809 
11810 #undef ADD
11811 
11812 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
11813 }
11814 
11815 /*
11816  * wm_tbi_mediachange:	[ifmedia interface function]
11817  *
11818  *	Set hardware to newly-selected media on a 1000BASE-X device.
11819  */
11820 static int
11821 wm_tbi_mediachange(struct ifnet *ifp)
11822 {
11823 	struct wm_softc *sc = ifp->if_softc;
11824 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
11825 	uint32_t status, ctrl;
11826 	bool signal;
11827 	int i;
11828 
11829 	KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
11830 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
11831 		/* XXX need some work for >= 82571 and < 82575 */
11832 		if (sc->sc_type < WM_T_82575)
11833 			return 0;
11834 	}
11835 
11836 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
11837 	    || (sc->sc_type >= WM_T_82575))
11838 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
11839 
11840 	sc->sc_ctrl &= ~CTRL_LRST;
11841 	sc->sc_txcw = TXCW_ANE;
11842 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
11843 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
11844 	else if (ife->ifm_media & IFM_FDX)
11845 		sc->sc_txcw |= TXCW_FD;
11846 	else
11847 		sc->sc_txcw |= TXCW_HD;
11848 
11849 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
11850 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
11851 
11852 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
11853 		device_xname(sc->sc_dev), sc->sc_txcw));
11854 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
11855 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11856 	CSR_WRITE_FLUSH(sc);
11857 	delay(1000);
11858 
11859 	ctrl = CSR_READ(sc, WMREG_CTRL);
11860 	signal = wm_tbi_havesignal(sc, ctrl);
11861 
11862 	DPRINTF(WM_DEBUG_LINK, ("%s: signal = %d\n", device_xname(sc->sc_dev),
11863 		signal));
11864 
11865 	if (signal) {
11866 		/* Have signal; wait for the link to come up. */
11867 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
11868 			delay(10000);
11869 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
11870 				break;
11871 		}
11872 
11873 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
11874 			device_xname(sc->sc_dev), i));
11875 
11876 		status = CSR_READ(sc, WMREG_STATUS);
11877 		DPRINTF(WM_DEBUG_LINK,
11878 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
11879 			device_xname(sc->sc_dev), status, STATUS_LU));
11880 		if (status & STATUS_LU) {
11881 			/* Link is up. */
11882 			DPRINTF(WM_DEBUG_LINK,
11883 			    ("%s: LINK: set media -> link up %s\n",
11884 				device_xname(sc->sc_dev),
11885 				(status & STATUS_FD) ? "FDX" : "HDX"));
11886 
11887 			/*
11888 			 * NOTE: CTRL will update TFCE and RFCE automatically,
11889 			 * so we should update sc->sc_ctrl
11890 			 */
11891 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
11892 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
11893 			sc->sc_fcrtl &= ~FCRTL_XONE;
11894 			if (status & STATUS_FD)
11895 				sc->sc_tctl |=
11896 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
11897 			else
11898 				sc->sc_tctl |=
11899 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
11900 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
11901 				sc->sc_fcrtl |= FCRTL_XONE;
11902 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
11903 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
11904 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
11905 			sc->sc_tbi_linkup = 1;
11906 		} else {
11907 			if (i == WM_LINKUP_TIMEOUT)
11908 				wm_check_for_link(sc);
11909 			/* Link is down. */
11910 			DPRINTF(WM_DEBUG_LINK,
11911 			    ("%s: LINK: set media -> link down\n",
11912 				device_xname(sc->sc_dev)));
11913 			sc->sc_tbi_linkup = 0;
11914 		}
11915 	} else {
11916 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
11917 			device_xname(sc->sc_dev)));
11918 		sc->sc_tbi_linkup = 0;
11919 	}
11920 
11921 	wm_tbi_serdes_set_linkled(sc);
11922 
11923 	return 0;
11924 }
11925 
11926 /*
11927  * wm_tbi_mediastatus:	[ifmedia interface function]
11928  *
11929  *	Get the current interface media status on a 1000BASE-X device.
11930  */
11931 static void
11932 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
11933 {
11934 	struct wm_softc *sc = ifp->if_softc;
11935 	uint32_t ctrl, status;
11936 
11937 	ifmr->ifm_status = IFM_AVALID;
11938 	ifmr->ifm_active = IFM_ETHER;
11939 
11940 	status = CSR_READ(sc, WMREG_STATUS);
11941 	if ((status & STATUS_LU) == 0) {
11942 		ifmr->ifm_active |= IFM_NONE;
11943 		return;
11944 	}
11945 
11946 	ifmr->ifm_status |= IFM_ACTIVE;
11947 	/* Only 82545 is LX */
11948 	if (sc->sc_type == WM_T_82545)
11949 		ifmr->ifm_active |= IFM_1000_LX;
11950 	else
11951 		ifmr->ifm_active |= IFM_1000_SX;
11952 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
11953 		ifmr->ifm_active |= IFM_FDX;
11954 	else
11955 		ifmr->ifm_active |= IFM_HDX;
11956 	ctrl = CSR_READ(sc, WMREG_CTRL);
11957 	if (ctrl & CTRL_RFCE)
11958 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
11959 	if (ctrl & CTRL_TFCE)
11960 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
11961 }
11962 
11963 /* XXX TBI only */
11964 static int
11965 wm_check_for_link(struct wm_softc *sc)
11966 {
11967 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
11968 	uint32_t rxcw;
11969 	uint32_t ctrl;
11970 	uint32_t status;
11971 	bool signal;
11972 
11973 	DPRINTF(WM_DEBUG_LINK, ("%s: %s called\n",
11974 		device_xname(sc->sc_dev), __func__));
11975 
11976 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
11977 		/* XXX need some work for >= 82571 */
11978 		if (sc->sc_type >= WM_T_82571) {
11979 			sc->sc_tbi_linkup = 1;
11980 			return 0;
11981 		}
11982 	}
11983 
11984 	rxcw = CSR_READ(sc, WMREG_RXCW);
11985 	ctrl = CSR_READ(sc, WMREG_CTRL);
11986 	status = CSR_READ(sc, WMREG_STATUS);
11987 	signal = wm_tbi_havesignal(sc, ctrl);
11988 
11989 	DPRINTF(WM_DEBUG_LINK,
11990 	    ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
11991 		device_xname(sc->sc_dev), __func__, signal,
11992 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
11993 
11994 	/*
11995 	 * SWDPIN   LU RXCW
11996 	 *	0    0	  0
11997 	 *	0    0	  1	(should not happen)
11998 	 *	0    1	  0	(should not happen)
11999 	 *	0    1	  1	(should not happen)
12000 	 *	1    0	  0	Disable autonego and force linkup
12001 	 *	1    0	  1	got /C/ but not linkup yet
12002 	 *	1    1	  0	(linkup)
12003 	 *	1    1	  1	If IFM_AUTO, back to autonego
12004 	 *
12005 	 */
12006 	if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
12007 		DPRINTF(WM_DEBUG_LINK,
12008 		    ("%s: %s: force linkup and fullduplex\n",
12009 			device_xname(sc->sc_dev), __func__));
12010 		sc->sc_tbi_linkup = 0;
12011 		/* Disable auto-negotiation in the TXCW register */
12012 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
12013 
12014 		/*
12015 		 * Force link-up and also force full-duplex.
12016 		 *
12017 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
12018 		 * so we should update sc->sc_ctrl
12019 		 */
12020 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
12021 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
12022 	} else if (((status & STATUS_LU) != 0)
12023 	    && ((rxcw & RXCW_C) != 0)
12024 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
12025 		sc->sc_tbi_linkup = 1;
12026 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
12027 			device_xname(sc->sc_dev),
12028 			__func__));
12029 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
12030 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
12031 	} else if (signal && ((rxcw & RXCW_C) != 0)) {
12032 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: /C/",
12033 			device_xname(sc->sc_dev), __func__));
12034 	} else {
12035 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
12036 			device_xname(sc->sc_dev), __func__, rxcw, ctrl,
12037 			status));
12038 	}
12039 
12040 	return 0;
12041 }
12042 
12043 /*
12044  * wm_tbi_tick:
12045  *
12046  *	Check the link on TBI devices.
12047  *	This function acts as mii_tick().
12048  */
12049 static void
12050 wm_tbi_tick(struct wm_softc *sc)
12051 {
12052 	struct mii_data *mii = &sc->sc_mii;
12053 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
12054 	uint32_t status;
12055 
12056 	KASSERT(WM_CORE_LOCKED(sc));
12057 
12058 	status = CSR_READ(sc, WMREG_STATUS);
12059 
12060 	/* XXX is this needed? */
12061 	(void)CSR_READ(sc, WMREG_RXCW);
12062 	(void)CSR_READ(sc, WMREG_CTRL);
12063 
12064 	/* set link status */
12065 	if ((status & STATUS_LU) == 0) {
12066 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
12067 			device_xname(sc->sc_dev)));
12068 		sc->sc_tbi_linkup = 0;
12069 	} else if (sc->sc_tbi_linkup == 0) {
12070 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
12071 			device_xname(sc->sc_dev),
12072 			(status & STATUS_FD) ? "FDX" : "HDX"));
12073 		sc->sc_tbi_linkup = 1;
12074 		sc->sc_tbi_serdes_ticks = 0;
12075 	}
12076 
12077 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
12078 		goto setled;
12079 
12080 	if ((status & STATUS_LU) == 0) {
12081 		sc->sc_tbi_linkup = 0;
12082 		/* If the timer expired, retry autonegotiation */
12083 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
12084 		    && (++sc->sc_tbi_serdes_ticks
12085 			>= sc->sc_tbi_serdes_anegticks)) {
12086 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
12087 			sc->sc_tbi_serdes_ticks = 0;
12088 			/*
12089 			 * Reset the link, and let autonegotiation do
12090 			 * its thing
12091 			 */
12092 			sc->sc_ctrl |= CTRL_LRST;
12093 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
12094 			CSR_WRITE_FLUSH(sc);
12095 			delay(1000);
12096 			sc->sc_ctrl &= ~CTRL_LRST;
12097 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
12098 			CSR_WRITE_FLUSH(sc);
12099 			delay(1000);
12100 			CSR_WRITE(sc, WMREG_TXCW,
12101 			    sc->sc_txcw & ~TXCW_ANE);
12102 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
12103 		}
12104 	}
12105 
12106 setled:
12107 	wm_tbi_serdes_set_linkled(sc);
12108 }
12109 
12110 /* SERDES related */
12111 static void
12112 wm_serdes_power_up_link_82575(struct wm_softc *sc)
12113 {
12114 	uint32_t reg;
12115 
12116 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
12117 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
12118 		return;
12119 
12120 	reg = CSR_READ(sc, WMREG_PCS_CFG);
12121 	reg |= PCS_CFG_PCS_EN;
12122 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
12123 
12124 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
12125 	reg &= ~CTRL_EXT_SWDPIN(3);
12126 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
12127 	CSR_WRITE_FLUSH(sc);
12128 }
12129 
12130 static int
12131 wm_serdes_mediachange(struct ifnet *ifp)
12132 {
12133 	struct wm_softc *sc = ifp->if_softc;
12134 	bool pcs_autoneg = true; /* XXX */
12135 	uint32_t ctrl_ext, pcs_lctl, reg;
12136 
12137 	/* XXX Currently, this function is not called on 8257[12] */
12138 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
12139 	    || (sc->sc_type >= WM_T_82575))
12140 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
12141 
12142 	wm_serdes_power_up_link_82575(sc);
12143 
12144 	sc->sc_ctrl |= CTRL_SLU;
12145 
12146 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
12147 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
12148 
12149 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
12150 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
12151 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
12152 	case CTRL_EXT_LINK_MODE_SGMII:
12153 		pcs_autoneg = true;
12154 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
12155 		break;
12156 	case CTRL_EXT_LINK_MODE_1000KX:
12157 		pcs_autoneg = false;
12158 		/* FALLTHROUGH */
12159 	default:
12160 		if ((sc->sc_type == WM_T_82575)
12161 		    || (sc->sc_type == WM_T_82576)) {
12162 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
12163 				pcs_autoneg = false;
12164 		}
12165 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
12166 		    | CTRL_FRCFDX;
12167 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
12168 	}
12169 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
12170 
12171 	if (pcs_autoneg) {
12172 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
12173 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
12174 
12175 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
12176 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
12177 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
12178 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
12179 	} else
12180 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
12181 
12182 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
12183 
12184 
12185 	return 0;
12186 }
12187 
12188 static void
12189 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
12190 {
12191 	struct wm_softc *sc = ifp->if_softc;
12192 	struct mii_data *mii = &sc->sc_mii;
12193 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
12194 	uint32_t pcs_adv, pcs_lpab, reg;
12195 
12196 	ifmr->ifm_status = IFM_AVALID;
12197 	ifmr->ifm_active = IFM_ETHER;
12198 
12199 	/* Check PCS */
12200 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
12201 	if ((reg & PCS_LSTS_LINKOK) == 0) {
12202 		ifmr->ifm_active |= IFM_NONE;
12203 		sc->sc_tbi_linkup = 0;
12204 		goto setled;
12205 	}
12206 
12207 	sc->sc_tbi_linkup = 1;
12208 	ifmr->ifm_status |= IFM_ACTIVE;
12209 	if (sc->sc_type == WM_T_I354) {
12210 		uint32_t status;
12211 
12212 		status = CSR_READ(sc, WMREG_STATUS);
12213 		if (((status & STATUS_2P5_SKU) != 0)
12214 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
12215 			ifmr->ifm_active |= IFM_2500_KX;
12216 		} else
12217 			ifmr->ifm_active |= IFM_1000_KX;
12218 	} else {
12219 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
12220 		case PCS_LSTS_SPEED_10:
12221 			ifmr->ifm_active |= IFM_10_T; /* XXX */
12222 			break;
12223 		case PCS_LSTS_SPEED_100:
12224 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
12225 			break;
12226 		case PCS_LSTS_SPEED_1000:
12227 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
12228 			break;
12229 		default:
12230 			device_printf(sc->sc_dev, "Unknown speed\n");
12231 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
12232 			break;
12233 		}
12234 	}
12235 	if ((reg & PCS_LSTS_FDX) != 0)
12236 		ifmr->ifm_active |= IFM_FDX;
12237 	else
12238 		ifmr->ifm_active |= IFM_HDX;
12239 	mii->mii_media_active &= ~IFM_ETH_FMASK;
12240 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
12241 		/* Check flow */
12242 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
12243 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
12244 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
12245 			goto setled;
12246 		}
12247 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
12248 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
12249 		DPRINTF(WM_DEBUG_LINK,
12250 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
12251 		if ((pcs_adv & TXCW_SYM_PAUSE)
12252 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
12253 			mii->mii_media_active |= IFM_FLOW
12254 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
12255 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
12256 		    && (pcs_adv & TXCW_ASYM_PAUSE)
12257 		    && (pcs_lpab & TXCW_SYM_PAUSE)
12258 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
12259 			mii->mii_media_active |= IFM_FLOW
12260 			    | IFM_ETH_TXPAUSE;
12261 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
12262 		    && (pcs_adv & TXCW_ASYM_PAUSE)
12263 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
12264 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
12265 			mii->mii_media_active |= IFM_FLOW
12266 			    | IFM_ETH_RXPAUSE;
12267 		}
12268 	}
12269 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
12270 	    | (mii->mii_media_active & IFM_ETH_FMASK);
12271 setled:
12272 	wm_tbi_serdes_set_linkled(sc);
12273 }
12274 
12275 /*
12276  * wm_serdes_tick:
12277  *
12278  *	Check the link on serdes devices.
12279  */
12280 static void
12281 wm_serdes_tick(struct wm_softc *sc)
12282 {
12283 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
12284 	struct mii_data *mii = &sc->sc_mii;
12285 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
12286 	uint32_t reg;
12287 
12288 	KASSERT(WM_CORE_LOCKED(sc));
12289 
12290 	mii->mii_media_status = IFM_AVALID;
12291 	mii->mii_media_active = IFM_ETHER;
12292 
12293 	/* Check PCS */
12294 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
12295 	if ((reg & PCS_LSTS_LINKOK) != 0) {
12296 		mii->mii_media_status |= IFM_ACTIVE;
12297 		sc->sc_tbi_linkup = 1;
12298 		sc->sc_tbi_serdes_ticks = 0;
12299 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
12300 		if ((reg & PCS_LSTS_FDX) != 0)
12301 			mii->mii_media_active |= IFM_FDX;
12302 		else
12303 			mii->mii_media_active |= IFM_HDX;
12304 	} else {
12305 		mii->mii_media_status |= IFM_NONE;
12306 		sc->sc_tbi_linkup = 0;
12307 		/* If the timer expired, retry autonegotiation */
12308 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
12309 		    && (++sc->sc_tbi_serdes_ticks
12310 			>= sc->sc_tbi_serdes_anegticks)) {
12311 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
12312 			sc->sc_tbi_serdes_ticks = 0;
12313 			/* XXX */
12314 			wm_serdes_mediachange(ifp);
12315 		}
12316 	}
12317 
12318 	wm_tbi_serdes_set_linkled(sc);
12319 }
12320 
12321 /* SFP related */
12322 
12323 static int
12324 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
12325 {
12326 	uint32_t i2ccmd;
12327 	int i;
12328 
12329 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
12330 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
12331 
12332 	/* Poll the ready bit */
12333 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
12334 		delay(50);
12335 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
12336 		if (i2ccmd & I2CCMD_READY)
12337 			break;
12338 	}
12339 	if ((i2ccmd & I2CCMD_READY) == 0)
12340 		return -1;
12341 	if ((i2ccmd & I2CCMD_ERROR) != 0)
12342 		return -1;
12343 
12344 	*data = i2ccmd & 0x00ff;
12345 
12346 	return 0;
12347 }
12348 
12349 static uint32_t
12350 wm_sfp_get_media_type(struct wm_softc *sc)
12351 {
12352 	uint32_t ctrl_ext;
12353 	uint8_t val = 0;
12354 	int timeout = 3;
12355 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
12356 	int rv = -1;
12357 
12358 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
12359 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
12360 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
12361 	CSR_WRITE_FLUSH(sc);
12362 
12363 	/* Read SFP module data */
12364 	while (timeout) {
12365 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
12366 		if (rv == 0)
12367 			break;
12368 		delay(100*1000); /* XXX too big */
12369 		timeout--;
12370 	}
12371 	if (rv != 0)
12372 		goto out;
12373 	switch (val) {
12374 	case SFF_SFP_ID_SFF:
12375 		aprint_normal_dev(sc->sc_dev,
12376 		    "Module/Connector soldered to board\n");
12377 		break;
12378 	case SFF_SFP_ID_SFP:
12379 		aprint_normal_dev(sc->sc_dev, "SFP\n");
12380 		break;
12381 	case SFF_SFP_ID_UNKNOWN:
12382 		goto out;
12383 	default:
12384 		break;
12385 	}
12386 
12387 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
12388 	if (rv != 0) {
12389 		goto out;
12390 	}
12391 
12392 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
12393 		mediatype = WM_MEDIATYPE_SERDES;
12394 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
12395 		sc->sc_flags |= WM_F_SGMII;
12396 		mediatype = WM_MEDIATYPE_COPPER;
12397 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
12398 		sc->sc_flags |= WM_F_SGMII;
12399 		mediatype = WM_MEDIATYPE_SERDES;
12400 	}
12401 
12402 out:
12403 	/* Restore I2C interface setting */
12404 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
12405 
12406 	return mediatype;
12407 }
12408 
12409 /*
12410  * NVM related.
12411  * Microwire, SPI (w/wo EERD) and Flash.
12412  */
12413 
12414 /* Both spi and uwire */
12415 
12416 /*
12417  * wm_eeprom_sendbits:
12418  *
12419  *	Send a series of bits to the EEPROM.
12420  */
12421 static void
12422 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
12423 {
12424 	uint32_t reg;
12425 	int x;
12426 
12427 	reg = CSR_READ(sc, WMREG_EECD);
12428 
12429 	for (x = nbits; x > 0; x--) {
12430 		if (bits & (1U << (x - 1)))
12431 			reg |= EECD_DI;
12432 		else
12433 			reg &= ~EECD_DI;
12434 		CSR_WRITE(sc, WMREG_EECD, reg);
12435 		CSR_WRITE_FLUSH(sc);
12436 		delay(2);
12437 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
12438 		CSR_WRITE_FLUSH(sc);
12439 		delay(2);
12440 		CSR_WRITE(sc, WMREG_EECD, reg);
12441 		CSR_WRITE_FLUSH(sc);
12442 		delay(2);
12443 	}
12444 }
12445 
12446 /*
12447  * wm_eeprom_recvbits:
12448  *
12449  *	Receive a series of bits from the EEPROM.
12450  */
12451 static void
12452 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
12453 {
12454 	uint32_t reg, val;
12455 	int x;
12456 
12457 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
12458 
12459 	val = 0;
12460 	for (x = nbits; x > 0; x--) {
12461 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
12462 		CSR_WRITE_FLUSH(sc);
12463 		delay(2);
12464 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
12465 			val |= (1U << (x - 1));
12466 		CSR_WRITE(sc, WMREG_EECD, reg);
12467 		CSR_WRITE_FLUSH(sc);
12468 		delay(2);
12469 	}
12470 	*valp = val;
12471 }
12472 
12473 /* Microwire */
12474 
12475 /*
12476  * wm_nvm_read_uwire:
12477  *
12478  *	Read a word from the EEPROM using the MicroWire protocol.
12479  */
12480 static int
12481 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
12482 {
12483 	uint32_t reg, val;
12484 	int i;
12485 
12486 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
12487 		device_xname(sc->sc_dev), __func__));
12488 
12489 	if (sc->nvm.acquire(sc) != 0)
12490 		return -1;
12491 
12492 	for (i = 0; i < wordcnt; i++) {
12493 		/* Clear SK and DI. */
12494 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
12495 		CSR_WRITE(sc, WMREG_EECD, reg);
12496 
12497 		/*
12498 		 * XXX: workaround for a bug in qemu-0.12.x and prior
12499 		 * and Xen.
12500 		 *
12501 		 * We use this workaround only for 82540 because qemu's
12502 		 * e1000 act as 82540.
12503 		 */
12504 		if (sc->sc_type == WM_T_82540) {
12505 			reg |= EECD_SK;
12506 			CSR_WRITE(sc, WMREG_EECD, reg);
12507 			reg &= ~EECD_SK;
12508 			CSR_WRITE(sc, WMREG_EECD, reg);
12509 			CSR_WRITE_FLUSH(sc);
12510 			delay(2);
12511 		}
12512 		/* XXX: end of workaround */
12513 
12514 		/* Set CHIP SELECT. */
12515 		reg |= EECD_CS;
12516 		CSR_WRITE(sc, WMREG_EECD, reg);
12517 		CSR_WRITE_FLUSH(sc);
12518 		delay(2);
12519 
12520 		/* Shift in the READ command. */
12521 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
12522 
12523 		/* Shift in address. */
12524 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
12525 
12526 		/* Shift out the data. */
12527 		wm_eeprom_recvbits(sc, &val, 16);
12528 		data[i] = val & 0xffff;
12529 
12530 		/* Clear CHIP SELECT. */
12531 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
12532 		CSR_WRITE(sc, WMREG_EECD, reg);
12533 		CSR_WRITE_FLUSH(sc);
12534 		delay(2);
12535 	}
12536 
12537 	sc->nvm.release(sc);
12538 	return 0;
12539 }
12540 
12541 /* SPI */
12542 
12543 /*
12544  * Set SPI and FLASH related information from the EECD register.
12545  * For 82541 and 82547, the word size is taken from EEPROM.
12546  */
12547 static int
12548 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
12549 {
12550 	int size;
12551 	uint32_t reg;
12552 	uint16_t data;
12553 
12554 	reg = CSR_READ(sc, WMREG_EECD);
12555 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
12556 
12557 	/* Read the size of NVM from EECD by default */
12558 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
12559 	switch (sc->sc_type) {
12560 	case WM_T_82541:
12561 	case WM_T_82541_2:
12562 	case WM_T_82547:
12563 	case WM_T_82547_2:
12564 		/* Set dummy value to access EEPROM */
12565 		sc->sc_nvm_wordsize = 64;
12566 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
12567 			aprint_error_dev(sc->sc_dev,
12568 			    "%s: failed to read EEPROM size\n", __func__);
12569 		}
12570 		reg = data;
12571 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
12572 		if (size == 0)
12573 			size = 6; /* 64 word size */
12574 		else
12575 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
12576 		break;
12577 	case WM_T_80003:
12578 	case WM_T_82571:
12579 	case WM_T_82572:
12580 	case WM_T_82573: /* SPI case */
12581 	case WM_T_82574: /* SPI case */
12582 	case WM_T_82583: /* SPI case */
12583 		size += NVM_WORD_SIZE_BASE_SHIFT;
12584 		if (size > 14)
12585 			size = 14;
12586 		break;
12587 	case WM_T_82575:
12588 	case WM_T_82576:
12589 	case WM_T_82580:
12590 	case WM_T_I350:
12591 	case WM_T_I354:
12592 	case WM_T_I210:
12593 	case WM_T_I211:
12594 		size += NVM_WORD_SIZE_BASE_SHIFT;
12595 		if (size > 15)
12596 			size = 15;
12597 		break;
12598 	default:
12599 		aprint_error_dev(sc->sc_dev,
12600 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
12601 		return -1;
12602 		break;
12603 	}
12604 
12605 	sc->sc_nvm_wordsize = 1 << size;
12606 
12607 	return 0;
12608 }
12609 
12610 /*
12611  * wm_nvm_ready_spi:
12612  *
12613  *	Wait for a SPI EEPROM to be ready for commands.
12614  */
12615 static int
12616 wm_nvm_ready_spi(struct wm_softc *sc)
12617 {
12618 	uint32_t val;
12619 	int usec;
12620 
12621 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
12622 		device_xname(sc->sc_dev), __func__));
12623 
12624 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
12625 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
12626 		wm_eeprom_recvbits(sc, &val, 8);
12627 		if ((val & SPI_SR_RDY) == 0)
12628 			break;
12629 	}
12630 	if (usec >= SPI_MAX_RETRIES) {
12631 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
12632 		return -1;
12633 	}
12634 	return 0;
12635 }
12636 
12637 /*
12638  * wm_nvm_read_spi:
12639  *
12640  *	Read a work from the EEPROM using the SPI protocol.
12641  */
12642 static int
12643 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
12644 {
12645 	uint32_t reg, val;
12646 	int i;
12647 	uint8_t opc;
12648 	int rv = 0;
12649 
12650 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
12651 		device_xname(sc->sc_dev), __func__));
12652 
12653 	if (sc->nvm.acquire(sc) != 0)
12654 		return -1;
12655 
12656 	/* Clear SK and CS. */
12657 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
12658 	CSR_WRITE(sc, WMREG_EECD, reg);
12659 	CSR_WRITE_FLUSH(sc);
12660 	delay(2);
12661 
12662 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
12663 		goto out;
12664 
12665 	/* Toggle CS to flush commands. */
12666 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
12667 	CSR_WRITE_FLUSH(sc);
12668 	delay(2);
12669 	CSR_WRITE(sc, WMREG_EECD, reg);
12670 	CSR_WRITE_FLUSH(sc);
12671 	delay(2);
12672 
12673 	opc = SPI_OPC_READ;
12674 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
12675 		opc |= SPI_OPC_A8;
12676 
12677 	wm_eeprom_sendbits(sc, opc, 8);
12678 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
12679 
12680 	for (i = 0; i < wordcnt; i++) {
12681 		wm_eeprom_recvbits(sc, &val, 16);
12682 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
12683 	}
12684 
12685 	/* Raise CS and clear SK. */
12686 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
12687 	CSR_WRITE(sc, WMREG_EECD, reg);
12688 	CSR_WRITE_FLUSH(sc);
12689 	delay(2);
12690 
12691 out:
12692 	sc->nvm.release(sc);
12693 	return rv;
12694 }
12695 
12696 /* Using with EERD */
12697 
12698 static int
12699 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
12700 {
12701 	uint32_t attempts = 100000;
12702 	uint32_t i, reg = 0;
12703 	int32_t done = -1;
12704 
12705 	for (i = 0; i < attempts; i++) {
12706 		reg = CSR_READ(sc, rw);
12707 
12708 		if (reg & EERD_DONE) {
12709 			done = 0;
12710 			break;
12711 		}
12712 		delay(5);
12713 	}
12714 
12715 	return done;
12716 }
12717 
12718 static int
12719 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
12720 {
12721 	int i, eerd = 0;
12722 	int rv = 0;
12723 
12724 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
12725 		device_xname(sc->sc_dev), __func__));
12726 
12727 	if (sc->nvm.acquire(sc) != 0)
12728 		return -1;
12729 
12730 	for (i = 0; i < wordcnt; i++) {
12731 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
12732 		CSR_WRITE(sc, WMREG_EERD, eerd);
12733 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
12734 		if (rv != 0) {
12735 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
12736 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
12737 			break;
12738 		}
12739 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
12740 	}
12741 
12742 	sc->nvm.release(sc);
12743 	return rv;
12744 }
12745 
12746 /* Flash */
12747 
12748 static int
12749 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
12750 {
12751 	uint32_t eecd;
12752 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
12753 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
12754 	uint32_t nvm_dword = 0;
12755 	uint8_t sig_byte = 0;
12756 	int rv;
12757 
12758 	switch (sc->sc_type) {
12759 	case WM_T_PCH_SPT:
12760 	case WM_T_PCH_CNP:
12761 		bank1_offset = sc->sc_ich8_flash_bank_size * 2;
12762 		act_offset = ICH_NVM_SIG_WORD * 2;
12763 
12764 		/* Set bank to 0 in case flash read fails. */
12765 		*bank = 0;
12766 
12767 		/* Check bank 0 */
12768 		rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
12769 		if (rv != 0)
12770 			return rv;
12771 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
12772 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
12773 			*bank = 0;
12774 			return 0;
12775 		}
12776 
12777 		/* Check bank 1 */
12778 		rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
12779 		    &nvm_dword);
12780 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
12781 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
12782 			*bank = 1;
12783 			return 0;
12784 		}
12785 		aprint_error_dev(sc->sc_dev,
12786 		    "%s: no valid NVM bank present (%u)\n", __func__, *bank);
12787 		return -1;
12788 	case WM_T_ICH8:
12789 	case WM_T_ICH9:
12790 		eecd = CSR_READ(sc, WMREG_EECD);
12791 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
12792 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
12793 			return 0;
12794 		}
12795 		/* FALLTHROUGH */
12796 	default:
12797 		/* Default to 0 */
12798 		*bank = 0;
12799 
12800 		/* Check bank 0 */
12801 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
12802 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
12803 			*bank = 0;
12804 			return 0;
12805 		}
12806 
12807 		/* Check bank 1 */
12808 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
12809 		    &sig_byte);
12810 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
12811 			*bank = 1;
12812 			return 0;
12813 		}
12814 	}
12815 
12816 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
12817 		device_xname(sc->sc_dev)));
12818 	return -1;
12819 }
12820 
12821 /******************************************************************************
12822  * This function does initial flash setup so that a new read/write/erase cycle
12823  * can be started.
12824  *
12825  * sc - The pointer to the hw structure
12826  ****************************************************************************/
12827 static int32_t
12828 wm_ich8_cycle_init(struct wm_softc *sc)
12829 {
12830 	uint16_t hsfsts;
12831 	int32_t error = 1;
12832 	int32_t i     = 0;
12833 
12834 	if (sc->sc_type >= WM_T_PCH_SPT)
12835 		hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
12836 	else
12837 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
12838 
12839 	/* May be check the Flash Des Valid bit in Hw status */
12840 	if ((hsfsts & HSFSTS_FLDVAL) == 0)
12841 		return error;
12842 
12843 	/* Clear FCERR in Hw status by writing 1 */
12844 	/* Clear DAEL in Hw status by writing a 1 */
12845 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
12846 
12847 	if (sc->sc_type >= WM_T_PCH_SPT)
12848 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
12849 	else
12850 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
12851 
12852 	/*
12853 	 * Either we should have a hardware SPI cycle in progress bit to check
12854 	 * against, in order to start a new cycle or FDONE bit should be
12855 	 * changed in the hardware so that it is 1 after hardware reset, which
12856 	 * can then be used as an indication whether a cycle is in progress or
12857 	 * has been completed .. we should also have some software semaphore
12858 	 * mechanism to guard FDONE or the cycle in progress bit so that two
12859 	 * threads access to those bits can be sequentiallized or a way so that
12860 	 * 2 threads don't start the cycle at the same time
12861 	 */
12862 
12863 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
12864 		/*
12865 		 * There is no cycle running at present, so we can start a
12866 		 * cycle
12867 		 */
12868 
12869 		/* Begin by setting Flash Cycle Done. */
12870 		hsfsts |= HSFSTS_DONE;
12871 		if (sc->sc_type >= WM_T_PCH_SPT)
12872 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
12873 			    hsfsts & 0xffffUL);
12874 		else
12875 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
12876 		error = 0;
12877 	} else {
12878 		/*
12879 		 * Otherwise poll for sometime so the current cycle has a
12880 		 * chance to end before giving up.
12881 		 */
12882 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
12883 			if (sc->sc_type >= WM_T_PCH_SPT)
12884 				hsfsts = ICH8_FLASH_READ32(sc,
12885 				    ICH_FLASH_HSFSTS) & 0xffffUL;
12886 			else
12887 				hsfsts = ICH8_FLASH_READ16(sc,
12888 				    ICH_FLASH_HSFSTS);
12889 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
12890 				error = 0;
12891 				break;
12892 			}
12893 			delay(1);
12894 		}
12895 		if (error == 0) {
12896 			/*
12897 			 * Successful in waiting for previous cycle to timeout,
12898 			 * now set the Flash Cycle Done.
12899 			 */
12900 			hsfsts |= HSFSTS_DONE;
12901 			if (sc->sc_type >= WM_T_PCH_SPT)
12902 				ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
12903 				    hsfsts & 0xffffUL);
12904 			else
12905 				ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
12906 				    hsfsts);
12907 		}
12908 	}
12909 	return error;
12910 }
12911 
12912 /******************************************************************************
12913  * This function starts a flash cycle and waits for its completion
12914  *
12915  * sc - The pointer to the hw structure
12916  ****************************************************************************/
12917 static int32_t
12918 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
12919 {
12920 	uint16_t hsflctl;
12921 	uint16_t hsfsts;
12922 	int32_t error = 1;
12923 	uint32_t i = 0;
12924 
12925 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
12926 	if (sc->sc_type >= WM_T_PCH_SPT)
12927 		hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
12928 	else
12929 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
12930 	hsflctl |= HSFCTL_GO;
12931 	if (sc->sc_type >= WM_T_PCH_SPT)
12932 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
12933 		    (uint32_t)hsflctl << 16);
12934 	else
12935 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
12936 
12937 	/* Wait till FDONE bit is set to 1 */
12938 	do {
12939 		if (sc->sc_type >= WM_T_PCH_SPT)
12940 			hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
12941 			    & 0xffffUL;
12942 		else
12943 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
12944 		if (hsfsts & HSFSTS_DONE)
12945 			break;
12946 		delay(1);
12947 		i++;
12948 	} while (i < timeout);
12949 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
12950 		error = 0;
12951 
12952 	return error;
12953 }
12954 
12955 /******************************************************************************
12956  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
12957  *
12958  * sc - The pointer to the hw structure
12959  * index - The index of the byte or word to read.
12960  * size - Size of data to read, 1=byte 2=word, 4=dword
12961  * data - Pointer to the word to store the value read.
12962  *****************************************************************************/
12963 static int32_t
12964 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
12965     uint32_t size, uint32_t *data)
12966 {
12967 	uint16_t hsfsts;
12968 	uint16_t hsflctl;
12969 	uint32_t flash_linear_address;
12970 	uint32_t flash_data = 0;
12971 	int32_t error = 1;
12972 	int32_t count = 0;
12973 
12974 	if (size < 1  || size > 4 || data == 0x0 ||
12975 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
12976 		return error;
12977 
12978 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
12979 	    sc->sc_ich8_flash_base;
12980 
12981 	do {
12982 		delay(1);
12983 		/* Steps */
12984 		error = wm_ich8_cycle_init(sc);
12985 		if (error)
12986 			break;
12987 
12988 		if (sc->sc_type >= WM_T_PCH_SPT)
12989 			hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
12990 			    >> 16;
12991 		else
12992 			hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
12993 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
12994 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
12995 		    & HSFCTL_BCOUNT_MASK;
12996 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
12997 		if (sc->sc_type >= WM_T_PCH_SPT) {
12998 			/*
12999 			 * In SPT, This register is in Lan memory space, not
13000 			 * flash. Therefore, only 32 bit access is supported.
13001 			 */
13002 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
13003 			    (uint32_t)hsflctl << 16);
13004 		} else
13005 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
13006 
13007 		/*
13008 		 * Write the last 24 bits of index into Flash Linear address
13009 		 * field in Flash Address
13010 		 */
13011 		/* TODO: TBD maybe check the index against the size of flash */
13012 
13013 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
13014 
13015 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
13016 
13017 		/*
13018 		 * Check if FCERR is set to 1, if set to 1, clear it and try
13019 		 * the whole sequence a few more times, else read in (shift in)
13020 		 * the Flash Data0, the order is least significant byte first
13021 		 * msb to lsb
13022 		 */
13023 		if (error == 0) {
13024 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
13025 			if (size == 1)
13026 				*data = (uint8_t)(flash_data & 0x000000FF);
13027 			else if (size == 2)
13028 				*data = (uint16_t)(flash_data & 0x0000FFFF);
13029 			else if (size == 4)
13030 				*data = (uint32_t)flash_data;
13031 			break;
13032 		} else {
13033 			/*
13034 			 * If we've gotten here, then things are probably
13035 			 * completely hosed, but if the error condition is
13036 			 * detected, it won't hurt to give it another try...
13037 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
13038 			 */
13039 			if (sc->sc_type >= WM_T_PCH_SPT)
13040 				hsfsts = ICH8_FLASH_READ32(sc,
13041 				    ICH_FLASH_HSFSTS) & 0xffffUL;
13042 			else
13043 				hsfsts = ICH8_FLASH_READ16(sc,
13044 				    ICH_FLASH_HSFSTS);
13045 
13046 			if (hsfsts & HSFSTS_ERR) {
13047 				/* Repeat for some time before giving up. */
13048 				continue;
13049 			} else if ((hsfsts & HSFSTS_DONE) == 0)
13050 				break;
13051 		}
13052 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
13053 
13054 	return error;
13055 }
13056 
13057 /******************************************************************************
13058  * Reads a single byte from the NVM using the ICH8 flash access registers.
13059  *
13060  * sc - pointer to wm_hw structure
13061  * index - The index of the byte to read.
13062  * data - Pointer to a byte to store the value read.
13063  *****************************************************************************/
13064 static int32_t
13065 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
13066 {
13067 	int32_t status;
13068 	uint32_t word = 0;
13069 
13070 	status = wm_read_ich8_data(sc, index, 1, &word);
13071 	if (status == 0)
13072 		*data = (uint8_t)word;
13073 	else
13074 		*data = 0;
13075 
13076 	return status;
13077 }
13078 
13079 /******************************************************************************
13080  * Reads a word from the NVM using the ICH8 flash access registers.
13081  *
13082  * sc - pointer to wm_hw structure
13083  * index - The starting byte index of the word to read.
13084  * data - Pointer to a word to store the value read.
13085  *****************************************************************************/
13086 static int32_t
13087 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
13088 {
13089 	int32_t status;
13090 	uint32_t word = 0;
13091 
13092 	status = wm_read_ich8_data(sc, index, 2, &word);
13093 	if (status == 0)
13094 		*data = (uint16_t)word;
13095 	else
13096 		*data = 0;
13097 
13098 	return status;
13099 }
13100 
13101 /******************************************************************************
13102  * Reads a dword from the NVM using the ICH8 flash access registers.
13103  *
13104  * sc - pointer to wm_hw structure
13105  * index - The starting byte index of the word to read.
13106  * data - Pointer to a word to store the value read.
13107  *****************************************************************************/
13108 static int32_t
13109 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
13110 {
13111 	int32_t status;
13112 
13113 	status = wm_read_ich8_data(sc, index, 4, data);
13114 	return status;
13115 }
13116 
13117 /******************************************************************************
13118  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
13119  * register.
13120  *
13121  * sc - Struct containing variables accessed by shared code
13122  * offset - offset of word in the EEPROM to read
13123  * data - word read from the EEPROM
13124  * words - number of words to read
13125  *****************************************************************************/
13126 static int
13127 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
13128 {
13129 	int32_t	 rv = 0;
13130 	uint32_t flash_bank = 0;
13131 	uint32_t act_offset = 0;
13132 	uint32_t bank_offset = 0;
13133 	uint16_t word = 0;
13134 	uint16_t i = 0;
13135 
13136 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
13137 		device_xname(sc->sc_dev), __func__));
13138 
13139 	if (sc->nvm.acquire(sc) != 0)
13140 		return -1;
13141 
13142 	/*
13143 	 * We need to know which is the valid flash bank.  In the event
13144 	 * that we didn't allocate eeprom_shadow_ram, we may not be
13145 	 * managing flash_bank. So it cannot be trusted and needs
13146 	 * to be updated with each read.
13147 	 */
13148 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
13149 	if (rv) {
13150 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
13151 			device_xname(sc->sc_dev)));
13152 		flash_bank = 0;
13153 	}
13154 
13155 	/*
13156 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
13157 	 * size
13158 	 */
13159 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
13160 
13161 	for (i = 0; i < words; i++) {
13162 		/* The NVM part needs a byte offset, hence * 2 */
13163 		act_offset = bank_offset + ((offset + i) * 2);
13164 		rv = wm_read_ich8_word(sc, act_offset, &word);
13165 		if (rv) {
13166 			aprint_error_dev(sc->sc_dev,
13167 			    "%s: failed to read NVM\n", __func__);
13168 			break;
13169 		}
13170 		data[i] = word;
13171 	}
13172 
13173 	sc->nvm.release(sc);
13174 	return rv;
13175 }
13176 
13177 /******************************************************************************
13178  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
13179  * register.
13180  *
13181  * sc - Struct containing variables accessed by shared code
13182  * offset - offset of word in the EEPROM to read
13183  * data - word read from the EEPROM
13184  * words - number of words to read
13185  *****************************************************************************/
13186 static int
13187 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
13188 {
13189 	int32_t	 rv = 0;
13190 	uint32_t flash_bank = 0;
13191 	uint32_t act_offset = 0;
13192 	uint32_t bank_offset = 0;
13193 	uint32_t dword = 0;
13194 	uint16_t i = 0;
13195 
13196 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
13197 		device_xname(sc->sc_dev), __func__));
13198 
13199 	if (sc->nvm.acquire(sc) != 0)
13200 		return -1;
13201 
13202 	/*
13203 	 * We need to know which is the valid flash bank.  In the event
13204 	 * that we didn't allocate eeprom_shadow_ram, we may not be
13205 	 * managing flash_bank. So it cannot be trusted and needs
13206 	 * to be updated with each read.
13207 	 */
13208 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
13209 	if (rv) {
13210 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
13211 			device_xname(sc->sc_dev)));
13212 		flash_bank = 0;
13213 	}
13214 
13215 	/*
13216 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
13217 	 * size
13218 	 */
13219 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
13220 
13221 	for (i = 0; i < words; i++) {
13222 		/* The NVM part needs a byte offset, hence * 2 */
13223 		act_offset = bank_offset + ((offset + i) * 2);
13224 		/* but we must read dword aligned, so mask ... */
13225 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
13226 		if (rv) {
13227 			aprint_error_dev(sc->sc_dev,
13228 			    "%s: failed to read NVM\n", __func__);
13229 			break;
13230 		}
13231 		/* ... and pick out low or high word */
13232 		if ((act_offset & 0x2) == 0)
13233 			data[i] = (uint16_t)(dword & 0xFFFF);
13234 		else
13235 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
13236 	}
13237 
13238 	sc->nvm.release(sc);
13239 	return rv;
13240 }
13241 
13242 /* iNVM */
13243 
13244 static int
13245 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
13246 {
13247 	int32_t	 rv = 0;
13248 	uint32_t invm_dword;
13249 	uint16_t i;
13250 	uint8_t record_type, word_address;
13251 
13252 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
13253 		device_xname(sc->sc_dev), __func__));
13254 
13255 	for (i = 0; i < INVM_SIZE; i++) {
13256 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
13257 		/* Get record type */
13258 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
13259 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
13260 			break;
13261 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
13262 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
13263 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
13264 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
13265 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
13266 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
13267 			if (word_address == address) {
13268 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
13269 				rv = 0;
13270 				break;
13271 			}
13272 		}
13273 	}
13274 
13275 	return rv;
13276 }
13277 
13278 static int
13279 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
13280 {
13281 	int rv = 0;
13282 	int i;
13283 
13284 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
13285 		device_xname(sc->sc_dev), __func__));
13286 
13287 	if (sc->nvm.acquire(sc) != 0)
13288 		return -1;
13289 
13290 	for (i = 0; i < words; i++) {
13291 		switch (offset + i) {
13292 		case NVM_OFF_MACADDR:
13293 		case NVM_OFF_MACADDR1:
13294 		case NVM_OFF_MACADDR2:
13295 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
13296 			if (rv != 0) {
13297 				data[i] = 0xffff;
13298 				rv = -1;
13299 			}
13300 			break;
13301 		case NVM_OFF_CFG2:
13302 			rv = wm_nvm_read_word_invm(sc, offset, data);
13303 			if (rv != 0) {
13304 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
13305 				rv = 0;
13306 			}
13307 			break;
13308 		case NVM_OFF_CFG4:
13309 			rv = wm_nvm_read_word_invm(sc, offset, data);
13310 			if (rv != 0) {
13311 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
13312 				rv = 0;
13313 			}
13314 			break;
13315 		case NVM_OFF_LED_1_CFG:
13316 			rv = wm_nvm_read_word_invm(sc, offset, data);
13317 			if (rv != 0) {
13318 				*data = NVM_LED_1_CFG_DEFAULT_I211;
13319 				rv = 0;
13320 			}
13321 			break;
13322 		case NVM_OFF_LED_0_2_CFG:
13323 			rv = wm_nvm_read_word_invm(sc, offset, data);
13324 			if (rv != 0) {
13325 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
13326 				rv = 0;
13327 			}
13328 			break;
13329 		case NVM_OFF_ID_LED_SETTINGS:
13330 			rv = wm_nvm_read_word_invm(sc, offset, data);
13331 			if (rv != 0) {
13332 				*data = ID_LED_RESERVED_FFFF;
13333 				rv = 0;
13334 			}
13335 			break;
13336 		default:
13337 			DPRINTF(WM_DEBUG_NVM,
13338 			    ("NVM word 0x%02x is not mapped.\n", offset));
13339 			*data = NVM_RESERVED_WORD;
13340 			break;
13341 		}
13342 	}
13343 
13344 	sc->nvm.release(sc);
13345 	return rv;
13346 }
13347 
13348 /* Lock, detecting NVM type, validate checksum, version and read */
13349 
13350 static int
13351 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
13352 {
13353 	uint32_t eecd = 0;
13354 
13355 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
13356 	    || sc->sc_type == WM_T_82583) {
13357 		eecd = CSR_READ(sc, WMREG_EECD);
13358 
13359 		/* Isolate bits 15 & 16 */
13360 		eecd = ((eecd >> 15) & 0x03);
13361 
13362 		/* If both bits are set, device is Flash type */
13363 		if (eecd == 0x03)
13364 			return 0;
13365 	}
13366 	return 1;
13367 }
13368 
13369 static int
13370 wm_nvm_flash_presence_i210(struct wm_softc *sc)
13371 {
13372 	uint32_t eec;
13373 
13374 	eec = CSR_READ(sc, WMREG_EEC);
13375 	if ((eec & EEC_FLASH_DETECTED) != 0)
13376 		return 1;
13377 
13378 	return 0;
13379 }
13380 
13381 /*
13382  * wm_nvm_validate_checksum
13383  *
13384  * The checksum is defined as the sum of the first 64 (16 bit) words.
13385  */
13386 static int
13387 wm_nvm_validate_checksum(struct wm_softc *sc)
13388 {
13389 	uint16_t checksum;
13390 	uint16_t eeprom_data;
13391 #ifdef WM_DEBUG
13392 	uint16_t csum_wordaddr, valid_checksum;
13393 #endif
13394 	int i;
13395 
13396 	checksum = 0;
13397 
13398 	/* Don't check for I211 */
13399 	if (sc->sc_type == WM_T_I211)
13400 		return 0;
13401 
13402 #ifdef WM_DEBUG
13403 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
13404 	    || (sc->sc_type == WM_T_PCH_CNP)) {
13405 		csum_wordaddr = NVM_OFF_COMPAT;
13406 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
13407 	} else {
13408 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
13409 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
13410 	}
13411 
13412 	/* Dump EEPROM image for debug */
13413 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
13414 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
13415 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
13416 		/* XXX PCH_SPT? */
13417 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
13418 		if ((eeprom_data & valid_checksum) == 0)
13419 			DPRINTF(WM_DEBUG_NVM,
13420 			    ("%s: NVM need to be updated (%04x != %04x)\n",
13421 				device_xname(sc->sc_dev), eeprom_data,
13422 				    valid_checksum));
13423 	}
13424 
13425 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
13426 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
13427 		for (i = 0; i < NVM_SIZE; i++) {
13428 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
13429 				printf("XXXX ");
13430 			else
13431 				printf("%04hx ", eeprom_data);
13432 			if (i % 8 == 7)
13433 				printf("\n");
13434 		}
13435 	}
13436 
13437 #endif /* WM_DEBUG */
13438 
13439 	for (i = 0; i < NVM_SIZE; i++) {
13440 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
13441 			return 1;
13442 		checksum += eeprom_data;
13443 	}
13444 
13445 	if (checksum != (uint16_t) NVM_CHECKSUM) {
13446 #ifdef WM_DEBUG
13447 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
13448 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
13449 #endif
13450 	}
13451 
13452 	return 0;
13453 }
13454 
13455 static void
13456 wm_nvm_version_invm(struct wm_softc *sc)
13457 {
13458 	uint32_t dword;
13459 
13460 	/*
13461 	 * Linux's code to decode version is very strange, so we don't
13462 	 * obey that algorithm and just use word 61 as the document.
13463 	 * Perhaps it's not perfect though...
13464 	 *
13465 	 * Example:
13466 	 *
13467 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
13468 	 */
13469 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
13470 	dword = __SHIFTOUT(dword, INVM_VER_1);
13471 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
13472 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
13473 }
13474 
13475 static void
13476 wm_nvm_version(struct wm_softc *sc)
13477 {
13478 	uint16_t major, minor, build, patch;
13479 	uint16_t uid0, uid1;
13480 	uint16_t nvm_data;
13481 	uint16_t off;
13482 	bool check_version = false;
13483 	bool check_optionrom = false;
13484 	bool have_build = false;
13485 	bool have_uid = true;
13486 
13487 	/*
13488 	 * Version format:
13489 	 *
13490 	 * XYYZ
13491 	 * X0YZ
13492 	 * X0YY
13493 	 *
13494 	 * Example:
13495 	 *
13496 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
13497 	 *	82571	0x50a6	5.10.6?
13498 	 *	82572	0x506a	5.6.10?
13499 	 *	82572EI	0x5069	5.6.9?
13500 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
13501 	 *		0x2013	2.1.3?
13502 	 *	82583	0x10a0	1.10.0? (document says it's default value)
13503 	 * ICH8+82567	0x0040	0.4.0?
13504 	 * ICH9+82566	0x1040	1.4.0?
13505 	 *ICH10+82567	0x0043	0.4.3?
13506 	 *  PCH+82577	0x00c1	0.12.1?
13507 	 * PCH2+82579	0x00d3	0.13.3?
13508 	 *		0x00d4	0.13.4?
13509 	 *  LPT+I218	0x0023	0.2.3?
13510 	 *  SPT+I219	0x0084	0.8.4?
13511 	 *  CNP+I219	0x0054	0.5.4?
13512 	 */
13513 
13514 	/*
13515 	 * XXX
13516 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
13517 	 * I've never seen on real 82574 hardware with such small SPI ROM.
13518 	 */
13519 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
13520 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
13521 		have_uid = false;
13522 
13523 	switch (sc->sc_type) {
13524 	case WM_T_82571:
13525 	case WM_T_82572:
13526 	case WM_T_82574:
13527 	case WM_T_82583:
13528 		check_version = true;
13529 		check_optionrom = true;
13530 		have_build = true;
13531 		break;
13532 	case WM_T_ICH8:
13533 	case WM_T_ICH9:
13534 	case WM_T_ICH10:
13535 	case WM_T_PCH:
13536 	case WM_T_PCH2:
13537 	case WM_T_PCH_LPT:
13538 	case WM_T_PCH_SPT:
13539 	case WM_T_PCH_CNP:
13540 		check_version = true;
13541 		have_build = true;
13542 		have_uid = false;
13543 		break;
13544 	case WM_T_82575:
13545 	case WM_T_82576:
13546 	case WM_T_82580:
13547 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
13548 			check_version = true;
13549 		break;
13550 	case WM_T_I211:
13551 		wm_nvm_version_invm(sc);
13552 		have_uid = false;
13553 		goto printver;
13554 	case WM_T_I210:
13555 		if (!wm_nvm_flash_presence_i210(sc)) {
13556 			wm_nvm_version_invm(sc);
13557 			have_uid = false;
13558 			goto printver;
13559 		}
13560 		/* FALLTHROUGH */
13561 	case WM_T_I350:
13562 	case WM_T_I354:
13563 		check_version = true;
13564 		check_optionrom = true;
13565 		break;
13566 	default:
13567 		return;
13568 	}
13569 	if (check_version
13570 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
13571 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
13572 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
13573 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
13574 			build = nvm_data & NVM_BUILD_MASK;
13575 			have_build = true;
13576 		} else
13577 			minor = nvm_data & 0x00ff;
13578 
13579 		/* Decimal */
13580 		minor = (minor / 16) * 10 + (minor % 16);
13581 		sc->sc_nvm_ver_major = major;
13582 		sc->sc_nvm_ver_minor = minor;
13583 
13584 printver:
13585 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
13586 		    sc->sc_nvm_ver_minor);
13587 		if (have_build) {
13588 			sc->sc_nvm_ver_build = build;
13589 			aprint_verbose(".%d", build);
13590 		}
13591 	}
13592 
13593 	/* Assume the Option ROM area is at avove NVM_SIZE */
13594 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
13595 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
13596 		/* Option ROM Version */
13597 		if ((off != 0x0000) && (off != 0xffff)) {
13598 			int rv;
13599 
13600 			off += NVM_COMBO_VER_OFF;
13601 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
13602 			rv |= wm_nvm_read(sc, off, 1, &uid0);
13603 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
13604 			    && (uid1 != 0) && (uid1 != 0xffff)) {
13605 				/* 16bits */
13606 				major = uid0 >> 8;
13607 				build = (uid0 << 8) | (uid1 >> 8);
13608 				patch = uid1 & 0x00ff;
13609 				aprint_verbose(", option ROM Version %d.%d.%d",
13610 				    major, build, patch);
13611 			}
13612 		}
13613 	}
13614 
13615 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
13616 		aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
13617 }
13618 
13619 /*
13620  * wm_nvm_read:
13621  *
13622  *	Read data from the serial EEPROM.
13623  */
13624 static int
13625 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
13626 {
13627 	int rv;
13628 
13629 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
13630 		device_xname(sc->sc_dev), __func__));
13631 
13632 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
13633 		return -1;
13634 
13635 	rv = sc->nvm.read(sc, word, wordcnt, data);
13636 
13637 	return rv;
13638 }
13639 
13640 /*
13641  * Hardware semaphores.
13642  * Very complexed...
13643  */
13644 
13645 static int
13646 wm_get_null(struct wm_softc *sc)
13647 {
13648 
13649 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
13650 		device_xname(sc->sc_dev), __func__));
13651 	return 0;
13652 }
13653 
13654 static void
13655 wm_put_null(struct wm_softc *sc)
13656 {
13657 
13658 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
13659 		device_xname(sc->sc_dev), __func__));
13660 	return;
13661 }
13662 
13663 static int
13664 wm_get_eecd(struct wm_softc *sc)
13665 {
13666 	uint32_t reg;
13667 	int x;
13668 
13669 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
13670 		device_xname(sc->sc_dev), __func__));
13671 
13672 	reg = CSR_READ(sc, WMREG_EECD);
13673 
13674 	/* Request EEPROM access. */
13675 	reg |= EECD_EE_REQ;
13676 	CSR_WRITE(sc, WMREG_EECD, reg);
13677 
13678 	/* ..and wait for it to be granted. */
13679 	for (x = 0; x < 1000; x++) {
13680 		reg = CSR_READ(sc, WMREG_EECD);
13681 		if (reg & EECD_EE_GNT)
13682 			break;
13683 		delay(5);
13684 	}
13685 	if ((reg & EECD_EE_GNT) == 0) {
13686 		aprint_error_dev(sc->sc_dev,
13687 		    "could not acquire EEPROM GNT\n");
13688 		reg &= ~EECD_EE_REQ;
13689 		CSR_WRITE(sc, WMREG_EECD, reg);
13690 		return -1;
13691 	}
13692 
13693 	return 0;
13694 }
13695 
13696 static void
13697 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
13698 {
13699 
13700 	*eecd |= EECD_SK;
13701 	CSR_WRITE(sc, WMREG_EECD, *eecd);
13702 	CSR_WRITE_FLUSH(sc);
13703 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
13704 		delay(1);
13705 	else
13706 		delay(50);
13707 }
13708 
13709 static void
13710 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
13711 {
13712 
13713 	*eecd &= ~EECD_SK;
13714 	CSR_WRITE(sc, WMREG_EECD, *eecd);
13715 	CSR_WRITE_FLUSH(sc);
13716 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
13717 		delay(1);
13718 	else
13719 		delay(50);
13720 }
13721 
13722 static void
13723 wm_put_eecd(struct wm_softc *sc)
13724 {
13725 	uint32_t reg;
13726 
13727 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
13728 		device_xname(sc->sc_dev), __func__));
13729 
13730 	/* Stop nvm */
13731 	reg = CSR_READ(sc, WMREG_EECD);
13732 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
13733 		/* Pull CS high */
13734 		reg |= EECD_CS;
13735 		wm_nvm_eec_clock_lower(sc, &reg);
13736 	} else {
13737 		/* CS on Microwire is active-high */
13738 		reg &= ~(EECD_CS | EECD_DI);
13739 		CSR_WRITE(sc, WMREG_EECD, reg);
13740 		wm_nvm_eec_clock_raise(sc, &reg);
13741 		wm_nvm_eec_clock_lower(sc, &reg);
13742 	}
13743 
13744 	reg = CSR_READ(sc, WMREG_EECD);
13745 	reg &= ~EECD_EE_REQ;
13746 	CSR_WRITE(sc, WMREG_EECD, reg);
13747 
13748 	return;
13749 }
13750 
13751 /*
13752  * Get hardware semaphore.
13753  * Same as e1000_get_hw_semaphore_generic()
13754  */
13755 static int
13756 wm_get_swsm_semaphore(struct wm_softc *sc)
13757 {
13758 	int32_t timeout;
13759 	uint32_t swsm;
13760 
13761 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
13762 		device_xname(sc->sc_dev), __func__));
13763 	KASSERT(sc->sc_nvm_wordsize > 0);
13764 
13765 retry:
13766 	/* Get the SW semaphore. */
13767 	timeout = sc->sc_nvm_wordsize + 1;
13768 	while (timeout) {
13769 		swsm = CSR_READ(sc, WMREG_SWSM);
13770 
13771 		if ((swsm & SWSM_SMBI) == 0)
13772 			break;
13773 
13774 		delay(50);
13775 		timeout--;
13776 	}
13777 
13778 	if (timeout == 0) {
13779 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
13780 			/*
13781 			 * In rare circumstances, the SW semaphore may already
13782 			 * be held unintentionally. Clear the semaphore once
13783 			 * before giving up.
13784 			 */
13785 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
13786 			wm_put_swsm_semaphore(sc);
13787 			goto retry;
13788 		}
13789 		aprint_error_dev(sc->sc_dev,
13790 		    "could not acquire SWSM SMBI\n");
13791 		return 1;
13792 	}
13793 
13794 	/* Get the FW semaphore. */
13795 	timeout = sc->sc_nvm_wordsize + 1;
13796 	while (timeout) {
13797 		swsm = CSR_READ(sc, WMREG_SWSM);
13798 		swsm |= SWSM_SWESMBI;
13799 		CSR_WRITE(sc, WMREG_SWSM, swsm);
13800 		/* If we managed to set the bit we got the semaphore. */
13801 		swsm = CSR_READ(sc, WMREG_SWSM);
13802 		if (swsm & SWSM_SWESMBI)
13803 			break;
13804 
13805 		delay(50);
13806 		timeout--;
13807 	}
13808 
13809 	if (timeout == 0) {
13810 		aprint_error_dev(sc->sc_dev,
13811 		    "could not acquire SWSM SWESMBI\n");
13812 		/* Release semaphores */
13813 		wm_put_swsm_semaphore(sc);
13814 		return 1;
13815 	}
13816 	return 0;
13817 }
13818 
13819 /*
13820  * Put hardware semaphore.
13821  * Same as e1000_put_hw_semaphore_generic()
13822  */
13823 static void
13824 wm_put_swsm_semaphore(struct wm_softc *sc)
13825 {
13826 	uint32_t swsm;
13827 
13828 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
13829 		device_xname(sc->sc_dev), __func__));
13830 
13831 	swsm = CSR_READ(sc, WMREG_SWSM);
13832 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
13833 	CSR_WRITE(sc, WMREG_SWSM, swsm);
13834 }
13835 
13836 /*
13837  * Get SW/FW semaphore.
13838  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
13839  */
13840 static int
13841 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
13842 {
13843 	uint32_t swfw_sync;
13844 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
13845 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
13846 	int timeout;
13847 
13848 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
13849 		device_xname(sc->sc_dev), __func__));
13850 
13851 	if (sc->sc_type == WM_T_80003)
13852 		timeout = 50;
13853 	else
13854 		timeout = 200;
13855 
13856 	while (timeout) {
13857 		if (wm_get_swsm_semaphore(sc)) {
13858 			aprint_error_dev(sc->sc_dev,
13859 			    "%s: failed to get semaphore\n",
13860 			    __func__);
13861 			return 1;
13862 		}
13863 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
13864 		if ((swfw_sync & (swmask | fwmask)) == 0) {
13865 			swfw_sync |= swmask;
13866 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
13867 			wm_put_swsm_semaphore(sc);
13868 			return 0;
13869 		}
13870 		wm_put_swsm_semaphore(sc);
13871 		delay(5000);
13872 		timeout--;
13873 	}
13874 	device_printf(sc->sc_dev,
13875 	    "failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
13876 	    mask, swfw_sync);
13877 	return 1;
13878 }
13879 
13880 static void
13881 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
13882 {
13883 	uint32_t swfw_sync;
13884 
13885 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
13886 		device_xname(sc->sc_dev), __func__));
13887 
13888 	while (wm_get_swsm_semaphore(sc) != 0)
13889 		continue;
13890 
13891 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
13892 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
13893 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
13894 
13895 	wm_put_swsm_semaphore(sc);
13896 }
13897 
13898 static int
13899 wm_get_nvm_80003(struct wm_softc *sc)
13900 {
13901 	int rv;
13902 
13903 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
13904 		device_xname(sc->sc_dev), __func__));
13905 
13906 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
13907 		aprint_error_dev(sc->sc_dev,
13908 		    "%s: failed to get semaphore(SWFW)\n", __func__);
13909 		return rv;
13910 	}
13911 
13912 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
13913 	    && (rv = wm_get_eecd(sc)) != 0) {
13914 		aprint_error_dev(sc->sc_dev,
13915 		    "%s: failed to get semaphore(EECD)\n", __func__);
13916 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
13917 		return rv;
13918 	}
13919 
13920 	return 0;
13921 }
13922 
13923 static void
13924 wm_put_nvm_80003(struct wm_softc *sc)
13925 {
13926 
13927 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
13928 		device_xname(sc->sc_dev), __func__));
13929 
13930 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
13931 		wm_put_eecd(sc);
13932 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
13933 }
13934 
13935 static int
13936 wm_get_nvm_82571(struct wm_softc *sc)
13937 {
13938 	int rv;
13939 
13940 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
13941 		device_xname(sc->sc_dev), __func__));
13942 
13943 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
13944 		return rv;
13945 
13946 	switch (sc->sc_type) {
13947 	case WM_T_82573:
13948 		break;
13949 	default:
13950 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
13951 			rv = wm_get_eecd(sc);
13952 		break;
13953 	}
13954 
13955 	if (rv != 0) {
13956 		aprint_error_dev(sc->sc_dev,
13957 		    "%s: failed to get semaphore\n",
13958 		    __func__);
13959 		wm_put_swsm_semaphore(sc);
13960 	}
13961 
13962 	return rv;
13963 }
13964 
13965 static void
13966 wm_put_nvm_82571(struct wm_softc *sc)
13967 {
13968 
13969 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
13970 		device_xname(sc->sc_dev), __func__));
13971 
13972 	switch (sc->sc_type) {
13973 	case WM_T_82573:
13974 		break;
13975 	default:
13976 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
13977 			wm_put_eecd(sc);
13978 		break;
13979 	}
13980 
13981 	wm_put_swsm_semaphore(sc);
13982 }
13983 
13984 static int
13985 wm_get_phy_82575(struct wm_softc *sc)
13986 {
13987 
13988 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
13989 		device_xname(sc->sc_dev), __func__));
13990 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
13991 }
13992 
13993 static void
13994 wm_put_phy_82575(struct wm_softc *sc)
13995 {
13996 
13997 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
13998 		device_xname(sc->sc_dev), __func__));
13999 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
14000 }
14001 
14002 static int
14003 wm_get_swfwhw_semaphore(struct wm_softc *sc)
14004 {
14005 	uint32_t ext_ctrl;
14006 	int timeout = 200;
14007 
14008 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
14009 		device_xname(sc->sc_dev), __func__));
14010 
14011 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
14012 	for (timeout = 0; timeout < 200; timeout++) {
14013 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
14014 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
14015 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
14016 
14017 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
14018 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
14019 			return 0;
14020 		delay(5000);
14021 	}
14022 	device_printf(sc->sc_dev,
14023 	    "failed to get swfwhw semaphore ext_ctrl 0x%x\n", ext_ctrl);
14024 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
14025 	return 1;
14026 }
14027 
14028 static void
14029 wm_put_swfwhw_semaphore(struct wm_softc *sc)
14030 {
14031 	uint32_t ext_ctrl;
14032 
14033 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
14034 		device_xname(sc->sc_dev), __func__));
14035 
14036 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
14037 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
14038 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
14039 
14040 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
14041 }
14042 
14043 static int
14044 wm_get_swflag_ich8lan(struct wm_softc *sc)
14045 {
14046 	uint32_t ext_ctrl;
14047 	int timeout;
14048 
14049 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
14050 		device_xname(sc->sc_dev), __func__));
14051 	mutex_enter(sc->sc_ich_phymtx);
14052 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
14053 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
14054 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
14055 			break;
14056 		delay(1000);
14057 	}
14058 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
14059 		device_printf(sc->sc_dev,
14060 		    "SW has already locked the resource\n");
14061 		goto out;
14062 	}
14063 
14064 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
14065 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
14066 	for (timeout = 0; timeout < 1000; timeout++) {
14067 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
14068 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
14069 			break;
14070 		delay(1000);
14071 	}
14072 	if (timeout >= 1000) {
14073 		device_printf(sc->sc_dev, "failed to acquire semaphore\n");
14074 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
14075 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
14076 		goto out;
14077 	}
14078 	return 0;
14079 
14080 out:
14081 	mutex_exit(sc->sc_ich_phymtx);
14082 	return 1;
14083 }
14084 
14085 static void
14086 wm_put_swflag_ich8lan(struct wm_softc *sc)
14087 {
14088 	uint32_t ext_ctrl;
14089 
14090 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
14091 		device_xname(sc->sc_dev), __func__));
14092 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
14093 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
14094 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
14095 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
14096 	} else {
14097 		device_printf(sc->sc_dev, "Semaphore unexpectedly released\n");
14098 	}
14099 
14100 	mutex_exit(sc->sc_ich_phymtx);
14101 }
14102 
14103 static int
14104 wm_get_nvm_ich8lan(struct wm_softc *sc)
14105 {
14106 
14107 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
14108 		device_xname(sc->sc_dev), __func__));
14109 	mutex_enter(sc->sc_ich_nvmmtx);
14110 
14111 	return 0;
14112 }
14113 
14114 static void
14115 wm_put_nvm_ich8lan(struct wm_softc *sc)
14116 {
14117 
14118 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
14119 		device_xname(sc->sc_dev), __func__));
14120 	mutex_exit(sc->sc_ich_nvmmtx);
14121 }
14122 
14123 static int
14124 wm_get_hw_semaphore_82573(struct wm_softc *sc)
14125 {
14126 	int i = 0;
14127 	uint32_t reg;
14128 
14129 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
14130 		device_xname(sc->sc_dev), __func__));
14131 
14132 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
14133 	do {
14134 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
14135 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
14136 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
14137 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
14138 			break;
14139 		delay(2*1000);
14140 		i++;
14141 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
14142 
14143 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
14144 		wm_put_hw_semaphore_82573(sc);
14145 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
14146 		    device_xname(sc->sc_dev));
14147 		return -1;
14148 	}
14149 
14150 	return 0;
14151 }
14152 
14153 static void
14154 wm_put_hw_semaphore_82573(struct wm_softc *sc)
14155 {
14156 	uint32_t reg;
14157 
14158 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
14159 		device_xname(sc->sc_dev), __func__));
14160 
14161 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
14162 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
14163 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
14164 }
14165 
14166 /*
14167  * Management mode and power management related subroutines.
14168  * BMC, AMT, suspend/resume and EEE.
14169  */
14170 
14171 #ifdef WM_WOL
14172 static int
14173 wm_check_mng_mode(struct wm_softc *sc)
14174 {
14175 	int rv;
14176 
14177 	switch (sc->sc_type) {
14178 	case WM_T_ICH8:
14179 	case WM_T_ICH9:
14180 	case WM_T_ICH10:
14181 	case WM_T_PCH:
14182 	case WM_T_PCH2:
14183 	case WM_T_PCH_LPT:
14184 	case WM_T_PCH_SPT:
14185 	case WM_T_PCH_CNP:
14186 		rv = wm_check_mng_mode_ich8lan(sc);
14187 		break;
14188 	case WM_T_82574:
14189 	case WM_T_82583:
14190 		rv = wm_check_mng_mode_82574(sc);
14191 		break;
14192 	case WM_T_82571:
14193 	case WM_T_82572:
14194 	case WM_T_82573:
14195 	case WM_T_80003:
14196 		rv = wm_check_mng_mode_generic(sc);
14197 		break;
14198 	default:
14199 		/* Noting to do */
14200 		rv = 0;
14201 		break;
14202 	}
14203 
14204 	return rv;
14205 }
14206 
14207 static int
14208 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
14209 {
14210 	uint32_t fwsm;
14211 
14212 	fwsm = CSR_READ(sc, WMREG_FWSM);
14213 
14214 	if (((fwsm & FWSM_FW_VALID) != 0)
14215 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
14216 		return 1;
14217 
14218 	return 0;
14219 }
14220 
14221 static int
14222 wm_check_mng_mode_82574(struct wm_softc *sc)
14223 {
14224 	uint16_t data;
14225 
14226 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
14227 
14228 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
14229 		return 1;
14230 
14231 	return 0;
14232 }
14233 
14234 static int
14235 wm_check_mng_mode_generic(struct wm_softc *sc)
14236 {
14237 	uint32_t fwsm;
14238 
14239 	fwsm = CSR_READ(sc, WMREG_FWSM);
14240 
14241 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
14242 		return 1;
14243 
14244 	return 0;
14245 }
14246 #endif /* WM_WOL */
14247 
14248 static int
14249 wm_enable_mng_pass_thru(struct wm_softc *sc)
14250 {
14251 	uint32_t manc, fwsm, factps;
14252 
14253 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
14254 		return 0;
14255 
14256 	manc = CSR_READ(sc, WMREG_MANC);
14257 
14258 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
14259 		device_xname(sc->sc_dev), manc));
14260 	if ((manc & MANC_RECV_TCO_EN) == 0)
14261 		return 0;
14262 
14263 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
14264 		fwsm = CSR_READ(sc, WMREG_FWSM);
14265 		factps = CSR_READ(sc, WMREG_FACTPS);
14266 		if (((factps & FACTPS_MNGCG) == 0)
14267 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
14268 			return 1;
14269 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
14270 		uint16_t data;
14271 
14272 		factps = CSR_READ(sc, WMREG_FACTPS);
14273 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
14274 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
14275 			device_xname(sc->sc_dev), factps, data));
14276 		if (((factps & FACTPS_MNGCG) == 0)
14277 		    && ((data & NVM_CFG2_MNGM_MASK)
14278 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
14279 			return 1;
14280 	} else if (((manc & MANC_SMBUS_EN) != 0)
14281 	    && ((manc & MANC_ASF_EN) == 0))
14282 		return 1;
14283 
14284 	return 0;
14285 }
14286 
14287 static bool
14288 wm_phy_resetisblocked(struct wm_softc *sc)
14289 {
14290 	bool blocked = false;
14291 	uint32_t reg;
14292 	int i = 0;
14293 
14294 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
14295 		device_xname(sc->sc_dev), __func__));
14296 
14297 	switch (sc->sc_type) {
14298 	case WM_T_ICH8:
14299 	case WM_T_ICH9:
14300 	case WM_T_ICH10:
14301 	case WM_T_PCH:
14302 	case WM_T_PCH2:
14303 	case WM_T_PCH_LPT:
14304 	case WM_T_PCH_SPT:
14305 	case WM_T_PCH_CNP:
14306 		do {
14307 			reg = CSR_READ(sc, WMREG_FWSM);
14308 			if ((reg & FWSM_RSPCIPHY) == 0) {
14309 				blocked = true;
14310 				delay(10*1000);
14311 				continue;
14312 			}
14313 			blocked = false;
14314 		} while (blocked && (i++ < 30));
14315 		return blocked;
14316 		break;
14317 	case WM_T_82571:
14318 	case WM_T_82572:
14319 	case WM_T_82573:
14320 	case WM_T_82574:
14321 	case WM_T_82583:
14322 	case WM_T_80003:
14323 		reg = CSR_READ(sc, WMREG_MANC);
14324 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
14325 			return true;
14326 		else
14327 			return false;
14328 		break;
14329 	default:
14330 		/* No problem */
14331 		break;
14332 	}
14333 
14334 	return false;
14335 }
14336 
14337 static void
14338 wm_get_hw_control(struct wm_softc *sc)
14339 {
14340 	uint32_t reg;
14341 
14342 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
14343 		device_xname(sc->sc_dev), __func__));
14344 
14345 	if (sc->sc_type == WM_T_82573) {
14346 		reg = CSR_READ(sc, WMREG_SWSM);
14347 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
14348 	} else if (sc->sc_type >= WM_T_82571) {
14349 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
14350 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
14351 	}
14352 }
14353 
14354 static void
14355 wm_release_hw_control(struct wm_softc *sc)
14356 {
14357 	uint32_t reg;
14358 
14359 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
14360 		device_xname(sc->sc_dev), __func__));
14361 
14362 	if (sc->sc_type == WM_T_82573) {
14363 		reg = CSR_READ(sc, WMREG_SWSM);
14364 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
14365 	} else if (sc->sc_type >= WM_T_82571) {
14366 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
14367 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
14368 	}
14369 }
14370 
14371 static void
14372 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
14373 {
14374 	uint32_t reg;
14375 
14376 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
14377 		device_xname(sc->sc_dev), __func__));
14378 
14379 	if (sc->sc_type < WM_T_PCH2)
14380 		return;
14381 
14382 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
14383 
14384 	if (gate)
14385 		reg |= EXTCNFCTR_GATE_PHY_CFG;
14386 	else
14387 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
14388 
14389 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
14390 }
14391 
14392 static int
14393 wm_init_phy_workarounds_pchlan(struct wm_softc *sc)
14394 {
14395 	uint32_t fwsm, reg;
14396 	int rv = 0;
14397 
14398 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
14399 		device_xname(sc->sc_dev), __func__));
14400 
14401 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
14402 	wm_gate_hw_phy_config_ich8lan(sc, true);
14403 
14404 	/* Disable ULP */
14405 	wm_ulp_disable(sc);
14406 
14407 	/* Acquire PHY semaphore */
14408 	rv = sc->phy.acquire(sc);
14409 	if (rv != 0) {
14410 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: failed\n",
14411 		device_xname(sc->sc_dev), __func__));
14412 		return -1;
14413 	}
14414 
14415 	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
14416 	 * inaccessible and resetting the PHY is not blocked, toggle the
14417 	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
14418 	 */
14419 	fwsm = CSR_READ(sc, WMREG_FWSM);
14420 	switch (sc->sc_type) {
14421 	case WM_T_PCH_LPT:
14422 	case WM_T_PCH_SPT:
14423 	case WM_T_PCH_CNP:
14424 		if (wm_phy_is_accessible_pchlan(sc))
14425 			break;
14426 
14427 		/* Before toggling LANPHYPC, see if PHY is accessible by
14428 		 * forcing MAC to SMBus mode first.
14429 		 */
14430 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
14431 		reg |= CTRL_EXT_FORCE_SMBUS;
14432 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
14433 #if 0
14434 		/* XXX Isn't this required??? */
14435 		CSR_WRITE_FLUSH(sc);
14436 #endif
14437 		/* Wait 50 milliseconds for MAC to finish any retries
14438 		 * that it might be trying to perform from previous
14439 		 * attempts to acknowledge any phy read requests.
14440 		 */
14441 		delay(50 * 1000);
14442 		/* FALLTHROUGH */
14443 	case WM_T_PCH2:
14444 		if (wm_phy_is_accessible_pchlan(sc) == true)
14445 			break;
14446 		/* FALLTHROUGH */
14447 	case WM_T_PCH:
14448 		if (sc->sc_type == WM_T_PCH)
14449 			if ((fwsm & FWSM_FW_VALID) != 0)
14450 				break;
14451 
14452 		if (wm_phy_resetisblocked(sc) == true) {
14453 			device_printf(sc->sc_dev, "XXX reset is blocked(3)\n");
14454 			break;
14455 		}
14456 
14457 		/* Toggle LANPHYPC Value bit */
14458 		wm_toggle_lanphypc_pch_lpt(sc);
14459 
14460 		if (sc->sc_type >= WM_T_PCH_LPT) {
14461 			if (wm_phy_is_accessible_pchlan(sc) == true)
14462 				break;
14463 
14464 			/* Toggling LANPHYPC brings the PHY out of SMBus mode
14465 			 * so ensure that the MAC is also out of SMBus mode
14466 			 */
14467 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
14468 			reg &= ~CTRL_EXT_FORCE_SMBUS;
14469 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
14470 
14471 			if (wm_phy_is_accessible_pchlan(sc) == true)
14472 				break;
14473 			rv = -1;
14474 		}
14475 		break;
14476 	default:
14477 		break;
14478 	}
14479 
14480 	/* Release semaphore */
14481 	sc->phy.release(sc);
14482 
14483 	if (rv == 0) {
14484 		/* Check to see if able to reset PHY.  Print error if not */
14485 		if (wm_phy_resetisblocked(sc)) {
14486 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
14487 			goto out;
14488 		}
14489 
14490 		/* Reset the PHY before any access to it.  Doing so, ensures
14491 		 * that the PHY is in a known good state before we read/write
14492 		 * PHY registers.  The generic reset is sufficient here,
14493 		 * because we haven't determined the PHY type yet.
14494 		 */
14495 		if (wm_reset_phy(sc) != 0)
14496 			goto out;
14497 
14498 		/* On a successful reset, possibly need to wait for the PHY
14499 		 * to quiesce to an accessible state before returning control
14500 		 * to the calling function.  If the PHY does not quiesce, then
14501 		 * return E1000E_BLK_PHY_RESET, as this is the condition that
14502 		 *  the PHY is in.
14503 		 */
14504 		if (wm_phy_resetisblocked(sc))
14505 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
14506 	}
14507 
14508 out:
14509 	/* Ungate automatic PHY configuration on non-managed 82579 */
14510 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
14511 		delay(10*1000);
14512 		wm_gate_hw_phy_config_ich8lan(sc, false);
14513 	}
14514 
14515 	return 0;
14516 }
14517 
14518 static void
14519 wm_init_manageability(struct wm_softc *sc)
14520 {
14521 
14522 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
14523 		device_xname(sc->sc_dev), __func__));
14524 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
14525 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
14526 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
14527 
14528 		/* Disable hardware interception of ARP */
14529 		manc &= ~MANC_ARP_EN;
14530 
14531 		/* Enable receiving management packets to the host */
14532 		if (sc->sc_type >= WM_T_82571) {
14533 			manc |= MANC_EN_MNG2HOST;
14534 			manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
14535 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
14536 		}
14537 
14538 		CSR_WRITE(sc, WMREG_MANC, manc);
14539 	}
14540 }
14541 
14542 static void
14543 wm_release_manageability(struct wm_softc *sc)
14544 {
14545 
14546 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
14547 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
14548 
14549 		manc |= MANC_ARP_EN;
14550 		if (sc->sc_type >= WM_T_82571)
14551 			manc &= ~MANC_EN_MNG2HOST;
14552 
14553 		CSR_WRITE(sc, WMREG_MANC, manc);
14554 	}
14555 }
14556 
14557 static void
14558 wm_get_wakeup(struct wm_softc *sc)
14559 {
14560 
14561 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
14562 	switch (sc->sc_type) {
14563 	case WM_T_82573:
14564 	case WM_T_82583:
14565 		sc->sc_flags |= WM_F_HAS_AMT;
14566 		/* FALLTHROUGH */
14567 	case WM_T_80003:
14568 	case WM_T_82575:
14569 	case WM_T_82576:
14570 	case WM_T_82580:
14571 	case WM_T_I350:
14572 	case WM_T_I354:
14573 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
14574 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
14575 		/* FALLTHROUGH */
14576 	case WM_T_82541:
14577 	case WM_T_82541_2:
14578 	case WM_T_82547:
14579 	case WM_T_82547_2:
14580 	case WM_T_82571:
14581 	case WM_T_82572:
14582 	case WM_T_82574:
14583 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
14584 		break;
14585 	case WM_T_ICH8:
14586 	case WM_T_ICH9:
14587 	case WM_T_ICH10:
14588 	case WM_T_PCH:
14589 	case WM_T_PCH2:
14590 	case WM_T_PCH_LPT:
14591 	case WM_T_PCH_SPT:
14592 	case WM_T_PCH_CNP:
14593 		sc->sc_flags |= WM_F_HAS_AMT;
14594 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
14595 		break;
14596 	default:
14597 		break;
14598 	}
14599 
14600 	/* 1: HAS_MANAGE */
14601 	if (wm_enable_mng_pass_thru(sc) != 0)
14602 		sc->sc_flags |= WM_F_HAS_MANAGE;
14603 
14604 	/*
14605 	 * Note that the WOL flags is set after the resetting of the eeprom
14606 	 * stuff
14607 	 */
14608 }
14609 
14610 /*
14611  * Unconfigure Ultra Low Power mode.
14612  * Only for I217 and newer (see below).
14613  */
14614 static int
14615 wm_ulp_disable(struct wm_softc *sc)
14616 {
14617 	uint32_t reg;
14618 	uint16_t phyreg;
14619 	int i = 0, rv = 0;
14620 
14621 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
14622 		device_xname(sc->sc_dev), __func__));
14623 	/* Exclude old devices */
14624 	if ((sc->sc_type < WM_T_PCH_LPT)
14625 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
14626 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
14627 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
14628 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
14629 		return 0;
14630 
14631 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
14632 		/* Request ME un-configure ULP mode in the PHY */
14633 		reg = CSR_READ(sc, WMREG_H2ME);
14634 		reg &= ~H2ME_ULP;
14635 		reg |= H2ME_ENFORCE_SETTINGS;
14636 		CSR_WRITE(sc, WMREG_H2ME, reg);
14637 
14638 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
14639 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
14640 			if (i++ == 30) {
14641 				device_printf(sc->sc_dev, "%s timed out\n",
14642 				    __func__);
14643 				return -1;
14644 			}
14645 			delay(10 * 1000);
14646 		}
14647 		reg = CSR_READ(sc, WMREG_H2ME);
14648 		reg &= ~H2ME_ENFORCE_SETTINGS;
14649 		CSR_WRITE(sc, WMREG_H2ME, reg);
14650 
14651 		return 0;
14652 	}
14653 
14654 	/* Acquire semaphore */
14655 	rv = sc->phy.acquire(sc);
14656 	if (rv != 0) {
14657 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: failed\n",
14658 		device_xname(sc->sc_dev), __func__));
14659 		return -1;
14660 	}
14661 
14662 	/* Toggle LANPHYPC */
14663 	wm_toggle_lanphypc_pch_lpt(sc);
14664 
14665 	/* Unforce SMBus mode in PHY */
14666 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg);
14667 	if (rv != 0) {
14668 		uint32_t reg2;
14669 
14670 		aprint_debug_dev(sc->sc_dev, "%s: Force SMBus first.\n",
14671 			__func__);
14672 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
14673 		reg2 |= CTRL_EXT_FORCE_SMBUS;
14674 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
14675 		delay(50 * 1000);
14676 
14677 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL,
14678 		    &phyreg);
14679 		if (rv != 0)
14680 			goto release;
14681 	}
14682 	phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
14683 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg);
14684 
14685 	/* Unforce SMBus mode in MAC */
14686 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
14687 	reg &= ~CTRL_EXT_FORCE_SMBUS;
14688 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
14689 
14690 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg);
14691 	if (rv != 0)
14692 		goto release;
14693 	phyreg |= HV_PM_CTRL_K1_ENA;
14694 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg);
14695 
14696 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1,
14697 		&phyreg);
14698 	if (rv != 0)
14699 		goto release;
14700 	phyreg &= ~(I218_ULP_CONFIG1_IND
14701 	    | I218_ULP_CONFIG1_STICKY_ULP
14702 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
14703 	    | I218_ULP_CONFIG1_WOL_HOST
14704 	    | I218_ULP_CONFIG1_INBAND_EXIT
14705 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
14706 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
14707 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
14708 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
14709 	phyreg |= I218_ULP_CONFIG1_START;
14710 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
14711 
14712 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
14713 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
14714 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
14715 
14716 release:
14717 	/* Release semaphore */
14718 	sc->phy.release(sc);
14719 	wm_gmii_reset(sc);
14720 	delay(50 * 1000);
14721 
14722 	return rv;
14723 }
14724 
14725 /* WOL in the newer chipset interfaces (pchlan) */
14726 static int
14727 wm_enable_phy_wakeup(struct wm_softc *sc)
14728 {
14729 	device_t dev = sc->sc_dev;
14730 	uint32_t mreg, moff;
14731 	uint16_t wuce, wuc, wufc, preg;
14732 	int i, rv;
14733 
14734 	KASSERT(sc->sc_type >= WM_T_PCH);
14735 
14736 	/* Copy MAC RARs to PHY RARs */
14737 	wm_copy_rx_addrs_to_phy_ich8lan(sc);
14738 
14739 	/* Activate PHY wakeup */
14740 	rv = sc->phy.acquire(sc);
14741 	if (rv != 0) {
14742 		device_printf(dev, "%s: failed to acquire semaphore\n",
14743 		    __func__);
14744 		return rv;
14745 	}
14746 
14747 	/*
14748 	 * Enable access to PHY wakeup registers.
14749 	 * BM_MTA, BM_RCTL, BM_WUFC and BM_WUC are in BM_WUC_PAGE.
14750 	 */
14751 	rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
14752 	if (rv != 0) {
14753 		device_printf(dev,
14754 		    "%s: Could not enable PHY wakeup reg access\n", __func__);
14755 		goto release;
14756 	}
14757 
14758 	/* Copy MAC MTA to PHY MTA */
14759 	for (i = 0; i < WM_ICH8_MC_TABSIZE; i++) {
14760 		uint16_t lo, hi;
14761 
14762 		mreg = CSR_READ(sc, WMREG_CORDOVA_MTA + (i * 4));
14763 		lo = (uint16_t)(mreg & 0xffff);
14764 		hi = (uint16_t)((mreg >> 16) & 0xffff);
14765 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i), &lo, 0, true);
14766 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i) + 1, &hi, 0, true);
14767 	}
14768 
14769 	/* Configure PHY Rx Control register */
14770 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 1, true);
14771 	mreg = CSR_READ(sc, WMREG_RCTL);
14772 	if (mreg & RCTL_UPE)
14773 		preg |= BM_RCTL_UPE;
14774 	if (mreg & RCTL_MPE)
14775 		preg |= BM_RCTL_MPE;
14776 	preg &= ~(BM_RCTL_MO_MASK);
14777 	moff = __SHIFTOUT(mreg, RCTL_MO);
14778 	if (moff != 0)
14779 		preg |= moff << BM_RCTL_MO_SHIFT;
14780 	if (mreg & RCTL_BAM)
14781 		preg |= BM_RCTL_BAM;
14782 	if (mreg & RCTL_PMCF)
14783 		preg |= BM_RCTL_PMCF;
14784 	mreg = CSR_READ(sc, WMREG_CTRL);
14785 	if (mreg & CTRL_RFCE)
14786 		preg |= BM_RCTL_RFCE;
14787 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 0, true);
14788 
14789 	wuc = WUC_APME | WUC_PME_EN;
14790 	wufc = WUFC_MAG;
14791 	/* Enable PHY wakeup in MAC register */
14792 	CSR_WRITE(sc, WMREG_WUC,
14793 	    WUC_PHY_WAKE | WUC_PME_STATUS | WUC_APMPME | wuc);
14794 	CSR_WRITE(sc, WMREG_WUFC, wufc);
14795 
14796 	/* Configure and enable PHY wakeup in PHY registers */
14797 	wm_access_phy_wakeup_reg_bm(dev, BM_WUC, &wuc, 0, true);
14798 	wm_access_phy_wakeup_reg_bm(dev, BM_WUFC, &wufc, 0, true);
14799 
14800 	wuce |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
14801 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
14802 
14803 release:
14804 	sc->phy.release(sc);
14805 
14806 	return 0;
14807 }
14808 
14809 /* Power down workaround on D3 */
14810 static void
14811 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
14812 {
14813 	uint32_t reg;
14814 	uint16_t phyreg;
14815 	int i;
14816 
14817 	for (i = 0; i < 2; i++) {
14818 		/* Disable link */
14819 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
14820 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
14821 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
14822 
14823 		/*
14824 		 * Call gig speed drop workaround on Gig disable before
14825 		 * accessing any PHY registers
14826 		 */
14827 		if (sc->sc_type == WM_T_ICH8)
14828 			wm_gig_downshift_workaround_ich8lan(sc);
14829 
14830 		/* Write VR power-down enable */
14831 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
14832 		phyreg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
14833 		phyreg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
14834 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, phyreg);
14835 
14836 		/* Read it back and test */
14837 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
14838 		phyreg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
14839 		if ((phyreg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
14840 			break;
14841 
14842 		/* Issue PHY reset and repeat at most one more time */
14843 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
14844 	}
14845 }
14846 
14847 /*
14848  *  wm_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
14849  *  @sc: pointer to the HW structure
14850  *
14851  *  During S0 to Sx transition, it is possible the link remains at gig
14852  *  instead of negotiating to a lower speed.  Before going to Sx, set
14853  *  'Gig Disable' to force link speed negotiation to a lower speed based on
14854  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
14855  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
14856  *  needs to be written.
14857  *  Parts that support (and are linked to a partner which support) EEE in
14858  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
14859  *  than 10Mbps w/o EEE.
14860  */
14861 static void
14862 wm_suspend_workarounds_ich8lan(struct wm_softc *sc)
14863 {
14864 	device_t dev = sc->sc_dev;
14865 	struct ethercom *ec = &sc->sc_ethercom;
14866 	uint32_t phy_ctrl;
14867 	int rv;
14868 
14869 	phy_ctrl = CSR_READ(sc, WMREG_PHY_CTRL);
14870 	phy_ctrl |= PHY_CTRL_GBE_DIS;
14871 
14872 	KASSERT((sc->sc_type >= WM_T_ICH8) && (sc->sc_type <= WM_T_PCH_CNP));
14873 
14874 	if (sc->sc_phytype == WMPHY_I217) {
14875 		uint16_t devid = sc->sc_pcidevid;
14876 
14877 		if ((devid == PCI_PRODUCT_INTEL_I218_LM) ||
14878 		    (devid == PCI_PRODUCT_INTEL_I218_V) ||
14879 		    (devid == PCI_PRODUCT_INTEL_I218_LM3) ||
14880 		    (devid == PCI_PRODUCT_INTEL_I218_V3) ||
14881 		    (sc->sc_type >= WM_T_PCH_SPT))
14882 			CSR_WRITE(sc, WMREG_FEXTNVM6,
14883 			    CSR_READ(sc, WMREG_FEXTNVM6)
14884 			    & ~FEXTNVM6_REQ_PLL_CLK);
14885 
14886 		if (sc->phy.acquire(sc) != 0)
14887 			goto out;
14888 
14889 		if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
14890 			uint16_t eee_advert;
14891 
14892 			rv = wm_read_emi_reg_locked(dev,
14893 			    I217_EEE_ADVERTISEMENT, &eee_advert);
14894 			if (rv)
14895 				goto release;
14896 
14897 			/*
14898 			 * Disable LPLU if both link partners support 100BaseT
14899 			 * EEE and 100Full is advertised on both ends of the
14900 			 * link, and enable Auto Enable LPI since there will
14901 			 * be no driver to enable LPI while in Sx.
14902 			 */
14903 			if ((eee_advert & AN_EEEADVERT_100_TX) &&
14904 			    (sc->eee_lp_ability & AN_EEEADVERT_100_TX)) {
14905 				uint16_t anar, phy_reg;
14906 
14907 				sc->phy.readreg_locked(dev, 2, MII_ANAR,
14908 				    &anar);
14909 				if (anar & ANAR_TX_FD) {
14910 					phy_ctrl &= ~(PHY_CTRL_D0A_LPLU |
14911 					    PHY_CTRL_NOND0A_LPLU);
14912 
14913 					/* Set Auto Enable LPI after link up */
14914 					sc->phy.readreg_locked(dev, 2,
14915 					    I217_LPI_GPIO_CTRL, &phy_reg);
14916 					phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
14917 					sc->phy.writereg_locked(dev, 2,
14918 					    I217_LPI_GPIO_CTRL, phy_reg);
14919 				}
14920 			}
14921 		}
14922 
14923 		/*
14924 		 * For i217 Intel Rapid Start Technology support,
14925 		 * when the system is going into Sx and no manageability engine
14926 		 * is present, the driver must configure proxy to reset only on
14927 		 * power good.	LPI (Low Power Idle) state must also reset only
14928 		 * on power good, as well as the MTA (Multicast table array).
14929 		 * The SMBus release must also be disabled on LCD reset.
14930 		 */
14931 
14932 		/*
14933 		 * Enable MTA to reset for Intel Rapid Start Technology
14934 		 * Support
14935 		 */
14936 
14937 release:
14938 		sc->phy.release(sc);
14939 	}
14940 out:
14941 	CSR_WRITE(sc, WMREG_PHY_CTRL, phy_ctrl);
14942 
14943 	if (sc->sc_type == WM_T_ICH8)
14944 		wm_gig_downshift_workaround_ich8lan(sc);
14945 
14946 	if (sc->sc_type >= WM_T_PCH) {
14947 		wm_oem_bits_config_ich8lan(sc, false);
14948 
14949 		/* Reset PHY to activate OEM bits on 82577/8 */
14950 		if (sc->sc_type == WM_T_PCH)
14951 			wm_reset_phy(sc);
14952 
14953 		if (sc->phy.acquire(sc) != 0)
14954 			return;
14955 		wm_write_smbus_addr(sc);
14956 		sc->phy.release(sc);
14957 	}
14958 }
14959 
14960 /*
14961  *  wm_resume_workarounds_pchlan - workarounds needed during Sx->S0
14962  *  @sc: pointer to the HW structure
14963  *
14964  *  During Sx to S0 transitions on non-managed devices or managed devices
14965  *  on which PHY resets are not blocked, if the PHY registers cannot be
14966  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
14967  *  the PHY.
14968  *  On i217, setup Intel Rapid Start Technology.
14969  */
14970 static int
14971 wm_resume_workarounds_pchlan(struct wm_softc *sc)
14972 {
14973 	device_t dev = sc->sc_dev;
14974 	int rv;
14975 
14976 	if (sc->sc_type < WM_T_PCH2)
14977 		return 0;
14978 
14979 	rv = wm_init_phy_workarounds_pchlan(sc);
14980 	if (rv != 0)
14981 		return -1;
14982 
14983 	/* For i217 Intel Rapid Start Technology support when the system
14984 	 * is transitioning from Sx and no manageability engine is present
14985 	 * configure SMBus to restore on reset, disable proxy, and enable
14986 	 * the reset on MTA (Multicast table array).
14987 	 */
14988 	if (sc->sc_phytype == WMPHY_I217) {
14989 		uint16_t phy_reg;
14990 
14991 		if (sc->phy.acquire(sc) != 0)
14992 			return -1;
14993 
14994 		/* Clear Auto Enable LPI after link up */
14995 		sc->phy.readreg_locked(dev, 1, I217_LPI_GPIO_CTRL, &phy_reg);
14996 		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
14997 		sc->phy.writereg_locked(dev, 1, I217_LPI_GPIO_CTRL, phy_reg);
14998 
14999 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
15000 			/* Restore clear on SMB if no manageability engine
15001 			 * is present
15002 			 */
15003 			rv = sc->phy.readreg_locked(dev, 1, I217_MEMPWR,
15004 			    &phy_reg);
15005 			if (rv != 0)
15006 				goto release;
15007 			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
15008 			sc->phy.writereg_locked(dev, 1, I217_MEMPWR, phy_reg);
15009 
15010 			/* Disable Proxy */
15011 			sc->phy.writereg_locked(dev, 1, I217_PROXY_CTRL, 0);
15012 		}
15013 		/* Enable reset on MTA */
15014 		sc->phy.readreg_locked(dev, 1, I217_CFGREG, &phy_reg);
15015 		if (rv != 0)
15016 			goto release;
15017 		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
15018 		sc->phy.writereg_locked(dev, 1, I217_CFGREG, phy_reg);
15019 
15020 release:
15021 		sc->phy.release(sc);
15022 		return rv;
15023 	}
15024 
15025 	return 0;
15026 }
15027 
15028 static void
15029 wm_enable_wakeup(struct wm_softc *sc)
15030 {
15031 	uint32_t reg, pmreg;
15032 	pcireg_t pmode;
15033 	int rv = 0;
15034 
15035 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
15036 		device_xname(sc->sc_dev), __func__));
15037 
15038 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
15039 	    &pmreg, NULL) == 0)
15040 		return;
15041 
15042 	if ((sc->sc_flags & WM_F_WOL) == 0)
15043 		goto pme;
15044 
15045 	/* Advertise the wakeup capability */
15046 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
15047 	    | CTRL_SWDPIN(3));
15048 
15049 	/* Keep the laser running on fiber adapters */
15050 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
15051 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
15052 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
15053 		reg |= CTRL_EXT_SWDPIN(3);
15054 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
15055 	}
15056 
15057 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) ||
15058 	    (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) ||
15059 	    (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
15060 	    (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
15061 		wm_suspend_workarounds_ich8lan(sc);
15062 
15063 #if 0	/* For the multicast packet */
15064 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
15065 	reg |= WUFC_MC;
15066 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
15067 #endif
15068 
15069 	if (sc->sc_type >= WM_T_PCH) {
15070 		rv = wm_enable_phy_wakeup(sc);
15071 		if (rv != 0)
15072 			goto pme;
15073 	} else {
15074 		/* Enable wakeup by the MAC */
15075 		CSR_WRITE(sc, WMREG_WUC, WUC_APME | WUC_PME_EN);
15076 		CSR_WRITE(sc, WMREG_WUFC, WUFC_MAG);
15077 	}
15078 
15079 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
15080 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
15081 		|| (sc->sc_type == WM_T_PCH2))
15082 	    && (sc->sc_phytype == WMPHY_IGP_3))
15083 		wm_igp3_phy_powerdown_workaround_ich8lan(sc);
15084 
15085 pme:
15086 	/* Request PME */
15087 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
15088 	if ((rv == 0) && (sc->sc_flags & WM_F_WOL) != 0) {
15089 		/* For WOL */
15090 		pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
15091 	} else {
15092 		/* Disable WOL */
15093 		pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
15094 	}
15095 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
15096 }
15097 
15098 /* Disable ASPM L0s and/or L1 for workaround */
15099 static void
15100 wm_disable_aspm(struct wm_softc *sc)
15101 {
15102 	pcireg_t reg, mask = 0;
15103 	unsigned const char *str = "";
15104 
15105 	/*
15106 	 *  Only for PCIe device which has PCIe capability in the PCI config
15107 	 * space.
15108 	 */
15109 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
15110 		return;
15111 
15112 	switch (sc->sc_type) {
15113 	case WM_T_82571:
15114 	case WM_T_82572:
15115 		/*
15116 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
15117 		 * State Power management L1 State (ASPM L1).
15118 		 */
15119 		mask = PCIE_LCSR_ASPM_L1;
15120 		str = "L1 is";
15121 		break;
15122 	case WM_T_82573:
15123 	case WM_T_82574:
15124 	case WM_T_82583:
15125 		/*
15126 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
15127 		 *
15128 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
15129 		 * some chipset.  The document of 82574 and 82583 says that
15130 		 * disabling L0s with some specific chipset is sufficient,
15131 		 * but we follow as of the Intel em driver does.
15132 		 *
15133 		 * References:
15134 		 * Errata 8 of the Specification Update of i82573.
15135 		 * Errata 20 of the Specification Update of i82574.
15136 		 * Errata 9 of the Specification Update of i82583.
15137 		 */
15138 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
15139 		str = "L0s and L1 are";
15140 		break;
15141 	default:
15142 		return;
15143 	}
15144 
15145 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
15146 	    sc->sc_pcixe_capoff + PCIE_LCSR);
15147 	reg &= ~mask;
15148 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
15149 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
15150 
15151 	/* Print only in wm_attach() */
15152 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
15153 		aprint_verbose_dev(sc->sc_dev,
15154 		    "ASPM %s disabled to workaround the errata.\n", str);
15155 }
15156 
15157 /* LPLU */
15158 
15159 static void
15160 wm_lplu_d0_disable(struct wm_softc *sc)
15161 {
15162 	struct mii_data *mii = &sc->sc_mii;
15163 	uint32_t reg;
15164 	uint16_t phyval;
15165 
15166 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
15167 		device_xname(sc->sc_dev), __func__));
15168 
15169 	if (sc->sc_phytype == WMPHY_IFE)
15170 		return;
15171 
15172 	switch (sc->sc_type) {
15173 	case WM_T_82571:
15174 	case WM_T_82572:
15175 	case WM_T_82573:
15176 	case WM_T_82575:
15177 	case WM_T_82576:
15178 		mii->mii_readreg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, &phyval);
15179 		phyval &= ~PMR_D0_LPLU;
15180 		mii->mii_writereg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, phyval);
15181 		break;
15182 	case WM_T_82580:
15183 	case WM_T_I350:
15184 	case WM_T_I210:
15185 	case WM_T_I211:
15186 		reg = CSR_READ(sc, WMREG_PHPM);
15187 		reg &= ~PHPM_D0A_LPLU;
15188 		CSR_WRITE(sc, WMREG_PHPM, reg);
15189 		break;
15190 	case WM_T_82574:
15191 	case WM_T_82583:
15192 	case WM_T_ICH8:
15193 	case WM_T_ICH9:
15194 	case WM_T_ICH10:
15195 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
15196 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
15197 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
15198 		CSR_WRITE_FLUSH(sc);
15199 		break;
15200 	case WM_T_PCH:
15201 	case WM_T_PCH2:
15202 	case WM_T_PCH_LPT:
15203 	case WM_T_PCH_SPT:
15204 	case WM_T_PCH_CNP:
15205 		wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS, &phyval);
15206 		phyval &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
15207 		if (wm_phy_resetisblocked(sc) == false)
15208 			phyval |= HV_OEM_BITS_ANEGNOW;
15209 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, phyval);
15210 		break;
15211 	default:
15212 		break;
15213 	}
15214 }
15215 
15216 /* EEE */
15217 
15218 static int
15219 wm_set_eee_i350(struct wm_softc *sc)
15220 {
15221 	struct ethercom *ec = &sc->sc_ethercom;
15222 	uint32_t ipcnfg, eeer;
15223 	uint32_t ipcnfg_mask
15224 	    = IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN | IPCNFG_10BASE_TE;
15225 	uint32_t eeer_mask = EEER_TX_LPI_EN | EEER_RX_LPI_EN | EEER_LPI_FC;
15226 
15227 	KASSERT(sc->sc_mediatype == WM_MEDIATYPE_COPPER);
15228 
15229 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
15230 	eeer = CSR_READ(sc, WMREG_EEER);
15231 
15232 	/* Enable or disable per user setting */
15233 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
15234 		ipcnfg |= ipcnfg_mask;
15235 		eeer |= eeer_mask;
15236 	} else {
15237 		ipcnfg &= ~ipcnfg_mask;
15238 		eeer &= ~eeer_mask;
15239 	}
15240 
15241 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
15242 	CSR_WRITE(sc, WMREG_EEER, eeer);
15243 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
15244 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
15245 
15246 	return 0;
15247 }
15248 
15249 static int
15250 wm_set_eee_pchlan(struct wm_softc *sc)
15251 {
15252 	device_t dev = sc->sc_dev;
15253 	struct ethercom *ec = &sc->sc_ethercom;
15254 	uint16_t lpa, pcs_status, adv_addr, adv, lpi_ctrl, data;
15255 	int rv = 0;
15256 
15257 	switch (sc->sc_phytype) {
15258 	case WMPHY_82579:
15259 		lpa = I82579_EEE_LP_ABILITY;
15260 		pcs_status = I82579_EEE_PCS_STATUS;
15261 		adv_addr = I82579_EEE_ADVERTISEMENT;
15262 		break;
15263 	case WMPHY_I217:
15264 		lpa = I217_EEE_LP_ABILITY;
15265 		pcs_status = I217_EEE_PCS_STATUS;
15266 		adv_addr = I217_EEE_ADVERTISEMENT;
15267 		break;
15268 	default:
15269 		return 0;
15270 	}
15271 
15272 	if (sc->phy.acquire(sc)) {
15273 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
15274 		return 0;
15275 	}
15276 
15277 	rv = sc->phy.readreg_locked(dev, 1, I82579_LPI_CTRL, &lpi_ctrl);
15278 	if (rv != 0)
15279 		goto release;
15280 
15281 	/* Clear bits that enable EEE in various speeds */
15282 	lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE;
15283 
15284 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
15285 		/* Save off link partner's EEE ability */
15286 		rv = wm_read_emi_reg_locked(dev, lpa, &sc->eee_lp_ability);
15287 		if (rv != 0)
15288 			goto release;
15289 
15290 		/* Read EEE advertisement */
15291 		if ((rv = wm_read_emi_reg_locked(dev, adv_addr, &adv)) != 0)
15292 			goto release;
15293 
15294 		/*
15295 		 * Enable EEE only for speeds in which the link partner is
15296 		 * EEE capable and for which we advertise EEE.
15297 		 */
15298 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_1000_T)
15299 			lpi_ctrl |= I82579_LPI_CTRL_EN_1000;
15300 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_100_TX) {
15301 			sc->phy.readreg_locked(dev, 2, MII_ANLPAR, &data);
15302 			if ((data & ANLPAR_TX_FD) != 0)
15303 				lpi_ctrl |= I82579_LPI_CTRL_EN_100;
15304 			else {
15305 				/*
15306 				 * EEE is not supported in 100Half, so ignore
15307 				 * partner's EEE in 100 ability if full-duplex
15308 				 * is not advertised.
15309 				 */
15310 				sc->eee_lp_ability
15311 				    &= ~AN_EEEADVERT_100_TX;
15312 			}
15313 		}
15314 	}
15315 
15316 	if (sc->sc_phytype == WMPHY_82579) {
15317 		rv = wm_read_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, &data);
15318 		if (rv != 0)
15319 			goto release;
15320 
15321 		data &= ~I82579_LPI_PLL_SHUT_100;
15322 		rv = wm_write_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, data);
15323 	}
15324 
15325 	/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
15326 	if ((rv = wm_read_emi_reg_locked(dev, pcs_status, &data)) != 0)
15327 		goto release;
15328 
15329 	rv = sc->phy.writereg_locked(dev, 1, I82579_LPI_CTRL, lpi_ctrl);
15330 release:
15331 	sc->phy.release(sc);
15332 
15333 	return rv;
15334 }
15335 
15336 static int
15337 wm_set_eee(struct wm_softc *sc)
15338 {
15339 	struct ethercom *ec = &sc->sc_ethercom;
15340 
15341 	if ((ec->ec_capabilities & ETHERCAP_EEE) == 0)
15342 		return 0;
15343 
15344 	if (sc->sc_type == WM_T_I354) {
15345 		/* I354 uses an external PHY */
15346 		return 0; /* not yet */
15347 	} else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
15348 		return wm_set_eee_i350(sc);
15349 	else if (sc->sc_type >= WM_T_PCH2)
15350 		return wm_set_eee_pchlan(sc);
15351 
15352 	return 0;
15353 }
15354 
15355 /*
15356  * Workarounds (mainly PHY related).
15357  * Basically, PHY's workarounds are in the PHY drivers.
15358  */
15359 
15360 /* Work-around for 82566 Kumeran PCS lock loss */
15361 static int
15362 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
15363 {
15364 	struct mii_data *mii = &sc->sc_mii;
15365 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
15366 	int i, reg, rv;
15367 	uint16_t phyreg;
15368 
15369 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
15370 		device_xname(sc->sc_dev), __func__));
15371 
15372 	/* If the link is not up, do nothing */
15373 	if ((status & STATUS_LU) == 0)
15374 		return 0;
15375 
15376 	/* Nothing to do if the link is other than 1Gbps */
15377 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
15378 		return 0;
15379 
15380 	for (i = 0; i < 10; i++) {
15381 		/* read twice */
15382 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
15383 		if (rv != 0)
15384 			return rv;
15385 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
15386 		if (rv != 0)
15387 			return rv;
15388 
15389 		if ((phyreg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
15390 			goto out;	/* GOOD! */
15391 
15392 		/* Reset the PHY */
15393 		wm_reset_phy(sc);
15394 		delay(5*1000);
15395 	}
15396 
15397 	/* Disable GigE link negotiation */
15398 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
15399 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
15400 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
15401 
15402 	/*
15403 	 * Call gig speed drop workaround on Gig disable before accessing
15404 	 * any PHY registers.
15405 	 */
15406 	wm_gig_downshift_workaround_ich8lan(sc);
15407 
15408 out:
15409 	return 0;
15410 }
15411 
15412 /*
15413  *  wm_gig_downshift_workaround_ich8lan - WoL from S5 stops working
15414  *  @sc: pointer to the HW structure
15415  *
15416  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
15417  *  LPLU, Gig disable, MDIC PHY reset):
15418  *    1) Set Kumeran Near-end loopback
15419  *    2) Clear Kumeran Near-end loopback
15420  *  Should only be called for ICH8[m] devices with any 1G Phy.
15421  */
15422 static void
15423 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
15424 {
15425 	uint16_t kmreg;
15426 
15427 	/* Only for igp3 */
15428 	if (sc->sc_phytype == WMPHY_IGP_3) {
15429 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
15430 			return;
15431 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
15432 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
15433 			return;
15434 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
15435 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
15436 	}
15437 }
15438 
15439 /*
15440  * Workaround for pch's PHYs
15441  * XXX should be moved to new PHY driver?
15442  */
15443 static int
15444 wm_hv_phy_workarounds_ich8lan(struct wm_softc *sc)
15445 {
15446 	device_t dev = sc->sc_dev;
15447 	struct mii_data *mii = &sc->sc_mii;
15448 	struct mii_softc *child;
15449 	uint16_t phy_data, phyrev = 0;
15450 	int phytype = sc->sc_phytype;
15451 	int rv;
15452 
15453 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
15454 		device_xname(dev), __func__));
15455 	KASSERT(sc->sc_type == WM_T_PCH);
15456 
15457 	/* Set MDIO slow mode before any other MDIO access */
15458 	if (phytype == WMPHY_82577)
15459 		if ((rv = wm_set_mdio_slow_mode_hv(sc)) != 0)
15460 			return rv;
15461 
15462 	child = LIST_FIRST(&sc->sc_mii.mii_phys);
15463 	if (child != NULL)
15464 		phyrev = child->mii_mpd_rev;
15465 
15466 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
15467 	if ((child != NULL) &&
15468 	    (((phytype == WMPHY_82577) && ((phyrev == 1) || (phyrev == 2))) ||
15469 		((phytype == WMPHY_82578) && (phyrev == 1)))) {
15470 		/* Disable generation of early preamble (0x4431) */
15471 		rv = mii->mii_readreg(dev, 2, BM_RATE_ADAPTATION_CTRL,
15472 		    &phy_data);
15473 		if (rv != 0)
15474 			return rv;
15475 		phy_data &= ~(BM_RATE_ADAPTATION_CTRL_RX_RXDV_PRE |
15476 		    BM_RATE_ADAPTATION_CTRL_RX_CRS_PRE);
15477 		rv = mii->mii_writereg(dev, 2, BM_RATE_ADAPTATION_CTRL,
15478 		    phy_data);
15479 		if (rv != 0)
15480 			return rv;
15481 
15482 		/* Preamble tuning for SSC */
15483 		rv = mii->mii_writereg(dev, 2, HV_KMRN_FIFO_CTRLSTA, 0xa204);
15484 		if (rv != 0)
15485 			return rv;
15486 	}
15487 
15488 	/* 82578 */
15489 	if (phytype == WMPHY_82578) {
15490 		/*
15491 		 * Return registers to default by doing a soft reset then
15492 		 * writing 0x3140 to the control register
15493 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
15494 		 */
15495 		if ((child != NULL) && (phyrev < 2)) {
15496 			PHY_RESET(child);
15497 			rv = sc->sc_mii.mii_writereg(dev, 2, MII_BMCR,
15498 			    0x3140);
15499 			if (rv != 0)
15500 				return rv;
15501 		}
15502 	}
15503 
15504 	/* Select page 0 */
15505 	if ((rv = sc->phy.acquire(sc)) != 0)
15506 		return rv;
15507 	rv = wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT, 0);
15508 	sc->phy.release(sc);
15509 	if (rv != 0)
15510 		return rv;
15511 
15512 	/*
15513 	 * Configure the K1 Si workaround during phy reset assuming there is
15514 	 * link so that it disables K1 if link is in 1Gbps.
15515 	 */
15516 	if ((rv = wm_k1_gig_workaround_hv(sc, 1)) != 0)
15517 		return rv;
15518 
15519 	/* Workaround for link disconnects on a busy hub in half duplex */
15520 	rv = sc->phy.acquire(sc);
15521 	if (rv)
15522 		return rv;
15523 	rv = sc->phy.readreg_locked(dev, 2, BM_PORT_GEN_CFG, &phy_data);
15524 	if (rv)
15525 		goto release;
15526 	rv = sc->phy.writereg_locked(dev, 2, BM_PORT_GEN_CFG,
15527 	    phy_data & 0x00ff);
15528 	if (rv)
15529 		goto release;
15530 
15531 	/* Set MSE higher to enable link to stay up when noise is high */
15532 	rv = wm_write_emi_reg_locked(dev, I82577_MSE_THRESHOLD, 0x0034);
15533 release:
15534 	sc->phy.release(sc);
15535 
15536 	return rv;
15537 }
15538 
15539 /*
15540  *  wm_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
15541  *  @sc:   pointer to the HW structure
15542  */
15543 static void
15544 wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *sc)
15545 {
15546 	device_t dev = sc->sc_dev;
15547 	uint32_t mac_reg;
15548 	uint16_t i, wuce;
15549 	int count;
15550 
15551 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
15552 		device_xname(sc->sc_dev), __func__));
15553 
15554 	if (sc->phy.acquire(sc) != 0)
15555 		return;
15556 	if (wm_enable_phy_wakeup_reg_access_bm(dev, &wuce) != 0)
15557 		goto release;
15558 
15559 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
15560 	count = wm_rar_count(sc);
15561 	for (i = 0; i < count; i++) {
15562 		uint16_t lo, hi;
15563 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
15564 		lo = (uint16_t)(mac_reg & 0xffff);
15565 		hi = (uint16_t)((mac_reg >> 16) & 0xffff);
15566 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_L(i), &lo, 0, true);
15567 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_M(i), &hi, 0, true);
15568 
15569 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
15570 		lo = (uint16_t)(mac_reg & 0xffff);
15571 		hi = (uint16_t)((mac_reg & RAL_AV) >> 16);
15572 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_H(i), &lo, 0, true);
15573 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_CTRL(i), &hi, 0, true);
15574 	}
15575 
15576 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
15577 
15578 release:
15579 	sc->phy.release(sc);
15580 }
15581 
15582 /*
15583  *  wm_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
15584  *  done after every PHY reset.
15585  */
15586 static int
15587 wm_lv_phy_workarounds_ich8lan(struct wm_softc *sc)
15588 {
15589 	device_t dev = sc->sc_dev;
15590 	int rv;
15591 
15592 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
15593 		device_xname(dev), __func__));
15594 	KASSERT(sc->sc_type == WM_T_PCH2);
15595 
15596 	/* Set MDIO slow mode before any other MDIO access */
15597 	rv = wm_set_mdio_slow_mode_hv(sc);
15598 	if (rv != 0)
15599 		return rv;
15600 
15601 	rv = sc->phy.acquire(sc);
15602 	if (rv != 0)
15603 		return rv;
15604 	/* Set MSE higher to enable link to stay up when noise is high */
15605 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_THRESHOLD, 0x0034);
15606 	if (rv != 0)
15607 		goto release;
15608 	/* Drop link after 5 times MSE threshold was reached */
15609 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_LINK_DOWN, 0x0005);
15610 release:
15611 	sc->phy.release(sc);
15612 
15613 	return rv;
15614 }
15615 
15616 /**
15617  *  wm_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
15618  *  @link: link up bool flag
15619  *
15620  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
15621  *  preventing further DMA write requests.  Workaround the issue by disabling
15622  *  the de-assertion of the clock request when in 1Gpbs mode.
15623  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
15624  *  speeds in order to avoid Tx hangs.
15625  **/
15626 static int
15627 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
15628 {
15629 	uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
15630 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
15631 	uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
15632 	uint16_t phyreg;
15633 
15634 	if (link && (speed == STATUS_SPEED_1000)) {
15635 		sc->phy.acquire(sc);
15636 		int rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
15637 		    &phyreg);
15638 		if (rv != 0)
15639 			goto release;
15640 		rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
15641 		    phyreg & ~KUMCTRLSTA_K1_ENABLE);
15642 		if (rv != 0)
15643 			goto release;
15644 		delay(20);
15645 		CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
15646 
15647 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
15648 		    &phyreg);
15649 release:
15650 		sc->phy.release(sc);
15651 		return rv;
15652 	}
15653 
15654 	fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
15655 
15656 	struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys);
15657 	if (((child != NULL) && (child->mii_mpd_rev > 5))
15658 	    || !link
15659 	    || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
15660 		goto update_fextnvm6;
15661 
15662 	wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL, &phyreg);
15663 
15664 	/* Clear link status transmit timeout */
15665 	phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
15666 	if (speed == STATUS_SPEED_100) {
15667 		/* Set inband Tx timeout to 5x10us for 100Half */
15668 		phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
15669 
15670 		/* Do not extend the K1 entry latency for 100Half */
15671 		fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
15672 	} else {
15673 		/* Set inband Tx timeout to 50x10us for 10Full/Half */
15674 		phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
15675 
15676 		/* Extend the K1 entry latency for 10 Mbps */
15677 		fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
15678 	}
15679 
15680 	wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
15681 
15682 update_fextnvm6:
15683 	CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
15684 	return 0;
15685 }
15686 
15687 /*
15688  *  wm_k1_gig_workaround_hv - K1 Si workaround
15689  *  @sc:   pointer to the HW structure
15690  *  @link: link up bool flag
15691  *
15692  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
15693  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
15694  *  If link is down, the function will restore the default K1 setting located
15695  *  in the NVM.
15696  */
15697 static int
15698 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
15699 {
15700 	int k1_enable = sc->sc_nvm_k1_enabled;
15701 
15702 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
15703 		device_xname(sc->sc_dev), __func__));
15704 
15705 	if (sc->phy.acquire(sc) != 0)
15706 		return -1;
15707 
15708 	if (link) {
15709 		k1_enable = 0;
15710 
15711 		/* Link stall fix for link up */
15712 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
15713 		    0x0100);
15714 	} else {
15715 		/* Link stall fix for link down */
15716 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
15717 		    0x4100);
15718 	}
15719 
15720 	wm_configure_k1_ich8lan(sc, k1_enable);
15721 	sc->phy.release(sc);
15722 
15723 	return 0;
15724 }
15725 
15726 /*
15727  *  wm_k1_workaround_lv - K1 Si workaround
15728  *  @sc:   pointer to the HW structure
15729  *
15730  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
15731  *  Disable K1 for 1000 and 100 speeds
15732  */
15733 static int
15734 wm_k1_workaround_lv(struct wm_softc *sc)
15735 {
15736 	uint32_t reg;
15737 	uint16_t phyreg;
15738 	int rv;
15739 
15740 	if (sc->sc_type != WM_T_PCH2)
15741 		return 0;
15742 
15743 	/* Set K1 beacon duration based on 10Mbps speed */
15744 	rv = wm_gmii_hv_readreg(sc->sc_dev, 2, HV_M_STATUS, &phyreg);
15745 	if (rv != 0)
15746 		return rv;
15747 
15748 	if ((phyreg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
15749 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
15750 		if (phyreg &
15751 		    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
15752 			/* LV 1G/100 Packet drop issue wa  */
15753 			rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_PM_CTRL,
15754 			    &phyreg);
15755 			if (rv != 0)
15756 				return rv;
15757 			phyreg &= ~HV_PM_CTRL_K1_ENA;
15758 			rv = wm_gmii_hv_writereg(sc->sc_dev, 1, HV_PM_CTRL,
15759 			    phyreg);
15760 			if (rv != 0)
15761 				return rv;
15762 		} else {
15763 			/* For 10Mbps */
15764 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
15765 			reg &= ~FEXTNVM4_BEACON_DURATION;
15766 			reg |= FEXTNVM4_BEACON_DURATION_16US;
15767 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
15768 		}
15769 	}
15770 
15771 	return 0;
15772 }
15773 
15774 /*
15775  *  wm_link_stall_workaround_hv - Si workaround
15776  *  @sc: pointer to the HW structure
15777  *
15778  *  This function works around a Si bug where the link partner can get
15779  *  a link up indication before the PHY does. If small packets are sent
15780  *  by the link partner they can be placed in the packet buffer without
15781  *  being properly accounted for by the PHY and will stall preventing
15782  *  further packets from being received.  The workaround is to clear the
15783  *  packet buffer after the PHY detects link up.
15784  */
15785 static int
15786 wm_link_stall_workaround_hv(struct wm_softc *sc)
15787 {
15788 	uint16_t phyreg;
15789 
15790 	if (sc->sc_phytype != WMPHY_82578)
15791 		return 0;
15792 
15793 	/* Do not apply workaround if in PHY loopback bit 14 set */
15794 	wm_gmii_hv_readreg(sc->sc_dev, 2, MII_BMCR, &phyreg);
15795 	if ((phyreg & BMCR_LOOP) != 0)
15796 		return 0;
15797 
15798 	/* Check if link is up and at 1Gbps */
15799 	wm_gmii_hv_readreg(sc->sc_dev, 2, BM_CS_STATUS, &phyreg);
15800 	phyreg &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
15801 	    | BM_CS_STATUS_SPEED_MASK;
15802 	if (phyreg != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
15803 		| BM_CS_STATUS_SPEED_1000))
15804 		return 0;
15805 
15806 	delay(200 * 1000);	/* XXX too big */
15807 
15808 	/* Flush the packets in the fifo buffer */
15809 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
15810 	    HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED);
15811 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
15812 	    HV_MUX_DATA_CTRL_GEN_TO_MAC);
15813 
15814 	return 0;
15815 }
15816 
15817 static int
15818 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
15819 {
15820 	int rv;
15821 	uint16_t reg;
15822 
15823 	rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL, &reg);
15824 	if (rv != 0)
15825 		return rv;
15826 
15827 	return wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
15828 	    reg | HV_KMRN_MDIO_SLOW);
15829 }
15830 
15831 /*
15832  *  wm_configure_k1_ich8lan - Configure K1 power state
15833  *  @sc: pointer to the HW structure
15834  *  @enable: K1 state to configure
15835  *
15836  *  Configure the K1 power state based on the provided parameter.
15837  *  Assumes semaphore already acquired.
15838  */
15839 static void
15840 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
15841 {
15842 	uint32_t ctrl, ctrl_ext, tmp;
15843 	uint16_t kmreg;
15844 	int rv;
15845 
15846 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
15847 
15848 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
15849 	if (rv != 0)
15850 		return;
15851 
15852 	if (k1_enable)
15853 		kmreg |= KUMCTRLSTA_K1_ENABLE;
15854 	else
15855 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
15856 
15857 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
15858 	if (rv != 0)
15859 		return;
15860 
15861 	delay(20);
15862 
15863 	ctrl = CSR_READ(sc, WMREG_CTRL);
15864 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
15865 
15866 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
15867 	tmp |= CTRL_FRCSPD;
15868 
15869 	CSR_WRITE(sc, WMREG_CTRL, tmp);
15870 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
15871 	CSR_WRITE_FLUSH(sc);
15872 	delay(20);
15873 
15874 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
15875 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
15876 	CSR_WRITE_FLUSH(sc);
15877 	delay(20);
15878 
15879 	return;
15880 }
15881 
15882 /* special case - for 82575 - need to do manual init ... */
15883 static void
15884 wm_reset_init_script_82575(struct wm_softc *sc)
15885 {
15886 	/*
15887 	 * Remark: this is untested code - we have no board without EEPROM
15888 	 *  same setup as mentioned int the FreeBSD driver for the i82575
15889 	 */
15890 
15891 	/* SerDes configuration via SERDESCTRL */
15892 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
15893 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
15894 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
15895 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
15896 
15897 	/* CCM configuration via CCMCTL register */
15898 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
15899 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
15900 
15901 	/* PCIe lanes configuration */
15902 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
15903 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
15904 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
15905 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
15906 
15907 	/* PCIe PLL Configuration */
15908 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
15909 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
15910 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
15911 }
15912 
15913 static void
15914 wm_reset_mdicnfg_82580(struct wm_softc *sc)
15915 {
15916 	uint32_t reg;
15917 	uint16_t nvmword;
15918 	int rv;
15919 
15920 	if (sc->sc_type != WM_T_82580)
15921 		return;
15922 	if ((sc->sc_flags & WM_F_SGMII) == 0)
15923 		return;
15924 
15925 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
15926 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
15927 	if (rv != 0) {
15928 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
15929 		    __func__);
15930 		return;
15931 	}
15932 
15933 	reg = CSR_READ(sc, WMREG_MDICNFG);
15934 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
15935 		reg |= MDICNFG_DEST;
15936 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
15937 		reg |= MDICNFG_COM_MDIO;
15938 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
15939 }
15940 
15941 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
15942 
15943 static bool
15944 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
15945 {
15946 	uint32_t reg;
15947 	uint16_t id1, id2;
15948 	int i, rv;
15949 
15950 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
15951 		device_xname(sc->sc_dev), __func__));
15952 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
15953 
15954 	id1 = id2 = 0xffff;
15955 	for (i = 0; i < 2; i++) {
15956 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
15957 		    &id1);
15958 		if ((rv != 0) || MII_INVALIDID(id1))
15959 			continue;
15960 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
15961 		    &id2);
15962 		if ((rv != 0) || MII_INVALIDID(id2))
15963 			continue;
15964 		break;
15965 	}
15966 	if ((rv == 0) && !MII_INVALIDID(id1) && !MII_INVALIDID(id2))
15967 		goto out;
15968 
15969 	/*
15970 	 * In case the PHY needs to be in mdio slow mode,
15971 	 * set slow mode and try to get the PHY id again.
15972 	 */
15973 	rv = 0;
15974 	if (sc->sc_type < WM_T_PCH_LPT) {
15975 		sc->phy.release(sc);
15976 		wm_set_mdio_slow_mode_hv(sc);
15977 		rv = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1, &id1);
15978 		rv |= wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2, &id2);
15979 		sc->phy.acquire(sc);
15980 	}
15981 	if ((rv != 0) || MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
15982 		device_printf(sc->sc_dev, "XXX return with false\n");
15983 		return false;
15984 	}
15985 out:
15986 	if (sc->sc_type >= WM_T_PCH_LPT) {
15987 		/* Only unforce SMBus if ME is not active */
15988 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
15989 			uint16_t phyreg;
15990 
15991 			/* Unforce SMBus mode in PHY */
15992 			rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
15993 			    CV_SMB_CTRL, &phyreg);
15994 			phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
15995 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
15996 			    CV_SMB_CTRL, phyreg);
15997 
15998 			/* Unforce SMBus mode in MAC */
15999 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
16000 			reg &= ~CTRL_EXT_FORCE_SMBUS;
16001 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
16002 		}
16003 	}
16004 	return true;
16005 }
16006 
16007 static void
16008 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
16009 {
16010 	uint32_t reg;
16011 	int i;
16012 
16013 	/* Set PHY Config Counter to 50msec */
16014 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
16015 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
16016 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
16017 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
16018 
16019 	/* Toggle LANPHYPC */
16020 	reg = CSR_READ(sc, WMREG_CTRL);
16021 	reg |= CTRL_LANPHYPC_OVERRIDE;
16022 	reg &= ~CTRL_LANPHYPC_VALUE;
16023 	CSR_WRITE(sc, WMREG_CTRL, reg);
16024 	CSR_WRITE_FLUSH(sc);
16025 	delay(1000);
16026 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
16027 	CSR_WRITE(sc, WMREG_CTRL, reg);
16028 	CSR_WRITE_FLUSH(sc);
16029 
16030 	if (sc->sc_type < WM_T_PCH_LPT)
16031 		delay(50 * 1000);
16032 	else {
16033 		i = 20;
16034 
16035 		do {
16036 			delay(5 * 1000);
16037 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
16038 		    && i--);
16039 
16040 		delay(30 * 1000);
16041 	}
16042 }
16043 
16044 static int
16045 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
16046 {
16047 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
16048 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
16049 	uint32_t rxa;
16050 	uint16_t scale = 0, lat_enc = 0;
16051 	int32_t obff_hwm = 0;
16052 	int64_t lat_ns, value;
16053 
16054 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
16055 		device_xname(sc->sc_dev), __func__));
16056 
16057 	if (link) {
16058 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
16059 		uint32_t status;
16060 		uint16_t speed;
16061 		pcireg_t preg;
16062 
16063 		status = CSR_READ(sc, WMREG_STATUS);
16064 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
16065 		case STATUS_SPEED_10:
16066 			speed = 10;
16067 			break;
16068 		case STATUS_SPEED_100:
16069 			speed = 100;
16070 			break;
16071 		case STATUS_SPEED_1000:
16072 			speed = 1000;
16073 			break;
16074 		default:
16075 			device_printf(sc->sc_dev, "Unknown speed "
16076 			    "(status = %08x)\n", status);
16077 			return -1;
16078 		}
16079 
16080 		/* Rx Packet Buffer Allocation size (KB) */
16081 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
16082 
16083 		/*
16084 		 * Determine the maximum latency tolerated by the device.
16085 		 *
16086 		 * Per the PCIe spec, the tolerated latencies are encoded as
16087 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
16088 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
16089 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
16090 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
16091 		 */
16092 		lat_ns = ((int64_t)rxa * 1024 -
16093 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
16094 			+ ETHER_HDR_LEN))) * 8 * 1000;
16095 		if (lat_ns < 0)
16096 			lat_ns = 0;
16097 		else
16098 			lat_ns /= speed;
16099 		value = lat_ns;
16100 
16101 		while (value > LTRV_VALUE) {
16102 			scale ++;
16103 			value = howmany(value, __BIT(5));
16104 		}
16105 		if (scale > LTRV_SCALE_MAX) {
16106 			device_printf(sc->sc_dev,
16107 			    "Invalid LTR latency scale %d\n", scale);
16108 			return -1;
16109 		}
16110 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
16111 
16112 		/* Determine the maximum latency tolerated by the platform */
16113 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
16114 		    WM_PCI_LTR_CAP_LPT);
16115 		max_snoop = preg & 0xffff;
16116 		max_nosnoop = preg >> 16;
16117 
16118 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
16119 
16120 		if (lat_enc > max_ltr_enc) {
16121 			lat_enc = max_ltr_enc;
16122 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
16123 			    * PCI_LTR_SCALETONS(
16124 				    __SHIFTOUT(lat_enc,
16125 					PCI_LTR_MAXSNOOPLAT_SCALE));
16126 		}
16127 
16128 		if (lat_ns) {
16129 			lat_ns *= speed * 1000;
16130 			lat_ns /= 8;
16131 			lat_ns /= 1000000000;
16132 			obff_hwm = (int32_t)(rxa - lat_ns);
16133 		}
16134 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
16135 			device_printf(sc->sc_dev, "Invalid high water mark %d"
16136 			    "(rxa = %d, lat_ns = %d)\n",
16137 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
16138 			return -1;
16139 		}
16140 	}
16141 	/* Snoop and No-Snoop latencies the same */
16142 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
16143 	CSR_WRITE(sc, WMREG_LTRV, reg);
16144 
16145 	/* Set OBFF high water mark */
16146 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
16147 	reg |= obff_hwm;
16148 	CSR_WRITE(sc, WMREG_SVT, reg);
16149 
16150 	/* Enable OBFF */
16151 	reg = CSR_READ(sc, WMREG_SVCR);
16152 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
16153 	CSR_WRITE(sc, WMREG_SVCR, reg);
16154 
16155 	return 0;
16156 }
16157 
16158 /*
16159  * I210 Errata 25 and I211 Errata 10
16160  * Slow System Clock.
16161  */
16162 static int
16163 wm_pll_workaround_i210(struct wm_softc *sc)
16164 {
16165 	uint32_t mdicnfg, wuc;
16166 	uint32_t reg;
16167 	pcireg_t pcireg;
16168 	uint32_t pmreg;
16169 	uint16_t nvmword, tmp_nvmword;
16170 	uint16_t phyval;
16171 	bool wa_done = false;
16172 	int i, rv = 0;
16173 
16174 	/* Get Power Management cap offset */
16175 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
16176 	    &pmreg, NULL) == 0)
16177 		return -1;
16178 
16179 	/* Save WUC and MDICNFG registers */
16180 	wuc = CSR_READ(sc, WMREG_WUC);
16181 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
16182 
16183 	reg = mdicnfg & ~MDICNFG_DEST;
16184 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
16185 
16186 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
16187 		nvmword = INVM_DEFAULT_AL;
16188 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
16189 
16190 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
16191 		wm_gmii_gs40g_readreg(sc->sc_dev, 1,
16192 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG, &phyval);
16193 
16194 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
16195 			rv = 0;
16196 			break; /* OK */
16197 		} else
16198 			rv = -1;
16199 
16200 		wa_done = true;
16201 		/* Directly reset the internal PHY */
16202 		reg = CSR_READ(sc, WMREG_CTRL);
16203 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
16204 
16205 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
16206 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
16207 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
16208 
16209 		CSR_WRITE(sc, WMREG_WUC, 0);
16210 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
16211 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
16212 
16213 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
16214 		    pmreg + PCI_PMCSR);
16215 		pcireg |= PCI_PMCSR_STATE_D3;
16216 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
16217 		    pmreg + PCI_PMCSR, pcireg);
16218 		delay(1000);
16219 		pcireg &= ~PCI_PMCSR_STATE_D3;
16220 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
16221 		    pmreg + PCI_PMCSR, pcireg);
16222 
16223 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
16224 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
16225 
16226 		/* Restore WUC register */
16227 		CSR_WRITE(sc, WMREG_WUC, wuc);
16228 	}
16229 
16230 	/* Restore MDICNFG setting */
16231 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
16232 	if (wa_done)
16233 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
16234 	return rv;
16235 }
16236 
16237 static void
16238 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
16239 {
16240 	uint32_t reg;
16241 
16242 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
16243 		device_xname(sc->sc_dev), __func__));
16244 	KASSERT((sc->sc_type == WM_T_PCH_SPT)
16245 	    || (sc->sc_type == WM_T_PCH_CNP));
16246 
16247 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
16248 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
16249 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
16250 
16251 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
16252 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
16253 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
16254 }
16255