xref: /netbsd-src/sys/dev/pci/if_wm.c (revision 16dce51364ebe8aeafbae46bc5aa167b8115bc45)
1 /*	$NetBSD: if_wm.c,v 1.572 2018/04/13 09:35:10 msaitoh Exp $	*/
2 
3 /*
4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed for the NetBSD Project by
20  *	Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 /*******************************************************************************
39 
40   Copyright (c) 2001-2005, Intel Corporation
41   All rights reserved.
42 
43   Redistribution and use in source and binary forms, with or without
44   modification, are permitted provided that the following conditions are met:
45 
46    1. Redistributions of source code must retain the above copyright notice,
47       this list of conditions and the following disclaimer.
48 
49    2. Redistributions in binary form must reproduce the above copyright
50       notice, this list of conditions and the following disclaimer in the
51       documentation and/or other materials provided with the distribution.
52 
53    3. Neither the name of the Intel Corporation nor the names of its
54       contributors may be used to endorse or promote products derived from
55       this software without specific prior written permission.
56 
57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67   POSSIBILITY OF SUCH DAMAGE.
68 
69 *******************************************************************************/
70 /*
71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72  *
73  * TODO (in order of importance):
74  *
75  *	- Check XXX'ed comments
76  *	- TX Multi queue improvement (refine queue selection logic)
77  *	- Split header buffer for newer descriptors
78  *	- EEE (Energy Efficiency Ethernet)
79  *	- Virtual Function
80  *	- Set LED correctly (based on contents in EEPROM)
81  *	- Rework how parameters are loaded from the EEPROM.
82  *	- Image Unique ID
83  */
84 
85 #include <sys/cdefs.h>
86 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.572 2018/04/13 09:35:10 msaitoh Exp $");
87 
88 #ifdef _KERNEL_OPT
89 #include "opt_net_mpsafe.h"
90 #include "opt_if_wm.h"
91 #endif
92 
93 #include <sys/param.h>
94 #include <sys/systm.h>
95 #include <sys/callout.h>
96 #include <sys/mbuf.h>
97 #include <sys/malloc.h>
98 #include <sys/kmem.h>
99 #include <sys/kernel.h>
100 #include <sys/socket.h>
101 #include <sys/ioctl.h>
102 #include <sys/errno.h>
103 #include <sys/device.h>
104 #include <sys/queue.h>
105 #include <sys/syslog.h>
106 #include <sys/interrupt.h>
107 #include <sys/cpu.h>
108 #include <sys/pcq.h>
109 
110 #include <sys/rndsource.h>
111 
112 #include <net/if.h>
113 #include <net/if_dl.h>
114 #include <net/if_media.h>
115 #include <net/if_ether.h>
116 
117 #include <net/bpf.h>
118 
119 #include <net/rss_config.h>
120 
121 #include <netinet/in.h>			/* XXX for struct ip */
122 #include <netinet/in_systm.h>		/* XXX for struct ip */
123 #include <netinet/ip.h>			/* XXX for struct ip */
124 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
125 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
126 
127 #include <sys/bus.h>
128 #include <sys/intr.h>
129 #include <machine/endian.h>
130 
131 #include <dev/mii/mii.h>
132 #include <dev/mii/miivar.h>
133 #include <dev/mii/miidevs.h>
134 #include <dev/mii/mii_bitbang.h>
135 #include <dev/mii/ikphyreg.h>
136 #include <dev/mii/igphyreg.h>
137 #include <dev/mii/igphyvar.h>
138 #include <dev/mii/inbmphyreg.h>
139 #include <dev/mii/ihphyreg.h>
140 
141 #include <dev/pci/pcireg.h>
142 #include <dev/pci/pcivar.h>
143 #include <dev/pci/pcidevs.h>
144 
145 #include <dev/pci/if_wmreg.h>
146 #include <dev/pci/if_wmvar.h>
147 
148 #ifdef WM_DEBUG
149 #define	WM_DEBUG_LINK		__BIT(0)
150 #define	WM_DEBUG_TX		__BIT(1)
151 #define	WM_DEBUG_RX		__BIT(2)
152 #define	WM_DEBUG_GMII		__BIT(3)
153 #define	WM_DEBUG_MANAGE		__BIT(4)
154 #define	WM_DEBUG_NVM		__BIT(5)
155 #define	WM_DEBUG_INIT		__BIT(6)
156 #define	WM_DEBUG_LOCK		__BIT(7)
157 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
158     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
159 
160 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
161 #else
162 #define	DPRINTF(x, y)	/* nothing */
163 #endif /* WM_DEBUG */
164 
165 #ifdef NET_MPSAFE
166 #define WM_MPSAFE	1
167 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
168 #else
169 #define CALLOUT_FLAGS	0
170 #endif
171 
172 /*
173  * This device driver's max interrupt numbers.
174  */
175 #define WM_MAX_NQUEUEINTR	16
176 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
177 
178 #ifndef WM_DISABLE_MSI
179 #define	WM_DISABLE_MSI 0
180 #endif
181 #ifndef WM_DISABLE_MSIX
182 #define	WM_DISABLE_MSIX 0
183 #endif
184 
185 int wm_disable_msi = WM_DISABLE_MSI;
186 int wm_disable_msix = WM_DISABLE_MSIX;
187 
188 #ifndef WM_WATCHDOG_TIMEOUT
189 #define WM_WATCHDOG_TIMEOUT 5
190 #endif
191 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
192 
193 /*
194  * Transmit descriptor list size.  Due to errata, we can only have
195  * 256 hardware descriptors in the ring on < 82544, but we use 4096
196  * on >= 82544.  We tell the upper layers that they can queue a lot
197  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
198  * of them at a time.
199  *
200  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
201  * chains containing many small mbufs have been observed in zero-copy
202  * situations with jumbo frames.
203  */
204 #define	WM_NTXSEGS		256
205 #define	WM_IFQUEUELEN		256
206 #define	WM_TXQUEUELEN_MAX	64
207 #define	WM_TXQUEUELEN_MAX_82547	16
208 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
209 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
210 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
211 #define	WM_NTXDESC_82542	256
212 #define	WM_NTXDESC_82544	4096
213 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
214 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
215 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
216 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
217 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
218 
219 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
220 
221 #define	WM_TXINTERQSIZE		256
222 
223 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
224 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
225 #endif
226 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
227 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
228 #endif
229 
230 /*
231  * Receive descriptor list size.  We have one Rx buffer for normal
232  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
233  * packet.  We allocate 256 receive descriptors, each with a 2k
234  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
235  */
236 #define	WM_NRXDESC		256
237 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
238 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
239 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
240 
241 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
242 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
243 #endif
244 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
245 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
246 #endif
247 
248 typedef union txdescs {
249 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
250 	nq_txdesc_t      sctxu_nq_txdescs[WM_NTXDESC_82544];
251 } txdescs_t;
252 
253 typedef union rxdescs {
254 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
255 	ext_rxdesc_t      sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
256 	nq_rxdesc_t      sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
257 } rxdescs_t;
258 
259 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
260 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
261 
262 /*
263  * Software state for transmit jobs.
264  */
265 struct wm_txsoft {
266 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
267 	bus_dmamap_t txs_dmamap;	/* our DMA map */
268 	int txs_firstdesc;		/* first descriptor in packet */
269 	int txs_lastdesc;		/* last descriptor in packet */
270 	int txs_ndesc;			/* # of descriptors used */
271 };
272 
273 /*
274  * Software state for receive buffers.  Each descriptor gets a
275  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
276  * more than one buffer, we chain them together.
277  */
278 struct wm_rxsoft {
279 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
280 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
281 };
282 
283 #define WM_LINKUP_TIMEOUT	50
284 
285 static uint16_t swfwphysem[] = {
286 	SWFW_PHY0_SM,
287 	SWFW_PHY1_SM,
288 	SWFW_PHY2_SM,
289 	SWFW_PHY3_SM
290 };
291 
292 static const uint32_t wm_82580_rxpbs_table[] = {
293 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
294 };
295 
296 struct wm_softc;
297 
298 #ifdef WM_EVENT_COUNTERS
299 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
300 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
301 	struct evcnt qname##_ev_##evname;
302 
303 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
304 	do{								\
305 		snprintf((q)->qname##_##evname##_evcnt_name,		\
306 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
307 		    "%s%02d%s", #qname, (qnum), #evname);		\
308 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
309 		    (evtype), NULL, (xname),				\
310 		    (q)->qname##_##evname##_evcnt_name);		\
311 	}while(0)
312 
313 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
314 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
315 
316 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
317 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
318 
319 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
320 	evcnt_detach(&(q)->qname##_ev_##evname);
321 #endif /* WM_EVENT_COUNTERS */
322 
323 struct wm_txqueue {
324 	kmutex_t *txq_lock;		/* lock for tx operations */
325 
326 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
327 
328 	/* Software state for the transmit descriptors. */
329 	int txq_num;			/* must be a power of two */
330 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
331 
332 	/* TX control data structures. */
333 	int txq_ndesc;			/* must be a power of two */
334 	size_t txq_descsize;		/* a tx descriptor size */
335 	txdescs_t *txq_descs_u;
336         bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
337 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
338 	int txq_desc_rseg;		/* real number of control segment */
339 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
340 #define	txq_descs	txq_descs_u->sctxu_txdescs
341 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
342 
343 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
344 
345 	int txq_free;			/* number of free Tx descriptors */
346 	int txq_next;			/* next ready Tx descriptor */
347 
348 	int txq_sfree;			/* number of free Tx jobs */
349 	int txq_snext;			/* next free Tx job */
350 	int txq_sdirty;			/* dirty Tx jobs */
351 
352 	/* These 4 variables are used only on the 82547. */
353 	int txq_fifo_size;		/* Tx FIFO size */
354 	int txq_fifo_head;		/* current head of FIFO */
355 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
356 	int txq_fifo_stall;		/* Tx FIFO is stalled */
357 
358 	/*
359 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
360 	 * CPUs. This queue intermediate them without block.
361 	 */
362 	pcq_t *txq_interq;
363 
364 	/*
365 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
366 	 * to manage Tx H/W queue's busy flag.
367 	 */
368 	int txq_flags;			/* flags for H/W queue, see below */
369 #define	WM_TXQ_NO_SPACE	0x1
370 
371 	bool txq_stopping;
372 
373 	bool txq_watchdog;
374 	time_t txq_lastsent;
375 
376 	uint32_t txq_packets;		/* for AIM */
377 	uint32_t txq_bytes;		/* for AIM */
378 #ifdef WM_EVENT_COUNTERS
379 	WM_Q_EVCNT_DEFINE(txq, txsstall)	/* Tx stalled due to no txs */
380 	WM_Q_EVCNT_DEFINE(txq, txdstall)	/* Tx stalled due to no txd */
381 	WM_Q_EVCNT_DEFINE(txq, txfifo_stall)	/* Tx FIFO stalls (82547) */
382 	WM_Q_EVCNT_DEFINE(txq, txdw)		/* Tx descriptor interrupts */
383 	WM_Q_EVCNT_DEFINE(txq, txqe)		/* Tx queue empty interrupts */
384 						/* XXX not used? */
385 
386 	WM_Q_EVCNT_DEFINE(txq, txipsum)		/* IP checksums comp. out-bound */
387 	WM_Q_EVCNT_DEFINE(txq,txtusum)		/* TCP/UDP cksums comp. out-bound */
388 	WM_Q_EVCNT_DEFINE(txq, txtusum6)	/* TCP/UDP v6 cksums comp. out-bound */
389 	WM_Q_EVCNT_DEFINE(txq, txtso)		/* TCP seg offload out-bound (IPv4) */
390 	WM_Q_EVCNT_DEFINE(txq, txtso6)		/* TCP seg offload out-bound (IPv6) */
391 	WM_Q_EVCNT_DEFINE(txq, txtsopain)	/* painful header manip. for TSO */
392 
393 	WM_Q_EVCNT_DEFINE(txq, txdrop)		/* Tx packets dropped(too many segs) */
394 
395 	WM_Q_EVCNT_DEFINE(txq, tu)		/* Tx underrun */
396 
397 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
398 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
399 #endif /* WM_EVENT_COUNTERS */
400 };
401 
402 struct wm_rxqueue {
403 	kmutex_t *rxq_lock;		/* lock for rx operations */
404 
405 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
406 
407 	/* Software state for the receive descriptors. */
408 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
409 
410 	/* RX control data structures. */
411 	int rxq_ndesc;			/* must be a power of two */
412 	size_t rxq_descsize;		/* a rx descriptor size */
413 	rxdescs_t *rxq_descs_u;
414 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
415 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
416 	int rxq_desc_rseg;		/* real number of control segment */
417 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
418 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
419 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
420 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
421 
422 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
423 
424 	int rxq_ptr;			/* next ready Rx desc/queue ent */
425 	int rxq_discard;
426 	int rxq_len;
427 	struct mbuf *rxq_head;
428 	struct mbuf *rxq_tail;
429 	struct mbuf **rxq_tailp;
430 
431 	bool rxq_stopping;
432 
433 	uint32_t rxq_packets;		/* for AIM */
434 	uint32_t rxq_bytes;		/* for AIM */
435 #ifdef WM_EVENT_COUNTERS
436 	WM_Q_EVCNT_DEFINE(rxq, rxintr);		/* Rx interrupts */
437 	WM_Q_EVCNT_DEFINE(rxq, rxdefer);	/* Rx deferred processing */
438 
439 	WM_Q_EVCNT_DEFINE(rxq, rxipsum);	/* IP checksums checked in-bound */
440 	WM_Q_EVCNT_DEFINE(rxq, rxtusum);	/* TCP/UDP cksums checked in-bound */
441 #endif
442 };
443 
444 struct wm_queue {
445 	int wmq_id;			/* index of transmit and receive queues */
446 	int wmq_intr_idx;		/* index of MSI-X tables */
447 
448 	uint32_t wmq_itr;		/* interrupt interval per queue. */
449 	bool wmq_set_itr;
450 
451 	struct wm_txqueue wmq_txq;
452 	struct wm_rxqueue wmq_rxq;
453 
454 	void *wmq_si;
455 };
456 
457 struct wm_phyop {
458 	int (*acquire)(struct wm_softc *);
459 	void (*release)(struct wm_softc *);
460 	int reset_delay_us;
461 };
462 
463 struct wm_nvmop {
464 	int (*acquire)(struct wm_softc *);
465 	void (*release)(struct wm_softc *);
466 	int (*read)(struct wm_softc *, int, int, uint16_t *);
467 };
468 
469 /*
470  * Software state per device.
471  */
472 struct wm_softc {
473 	device_t sc_dev;		/* generic device information */
474 	bus_space_tag_t sc_st;		/* bus space tag */
475 	bus_space_handle_t sc_sh;	/* bus space handle */
476 	bus_size_t sc_ss;		/* bus space size */
477 	bus_space_tag_t sc_iot;		/* I/O space tag */
478 	bus_space_handle_t sc_ioh;	/* I/O space handle */
479 	bus_size_t sc_ios;		/* I/O space size */
480 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
481 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
482 	bus_size_t sc_flashs;		/* flash registers space size */
483 	off_t sc_flashreg_offset;	/*
484 					 * offset to flash registers from
485 					 * start of BAR
486 					 */
487 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
488 
489 	struct ethercom sc_ethercom;	/* ethernet common data */
490 	struct mii_data sc_mii;		/* MII/media information */
491 
492 	pci_chipset_tag_t sc_pc;
493 	pcitag_t sc_pcitag;
494 	int sc_bus_speed;		/* PCI/PCIX bus speed */
495 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
496 
497 	uint16_t sc_pcidevid;		/* PCI device ID */
498 	wm_chip_type sc_type;		/* MAC type */
499 	int sc_rev;			/* MAC revision */
500 	wm_phy_type sc_phytype;		/* PHY type */
501 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
502 #define	WM_MEDIATYPE_UNKNOWN		0x00
503 #define	WM_MEDIATYPE_FIBER		0x01
504 #define	WM_MEDIATYPE_COPPER		0x02
505 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
506 	int sc_funcid;			/* unit number of the chip (0 to 3) */
507 	int sc_flags;			/* flags; see below */
508 	int sc_if_flags;		/* last if_flags */
509 	int sc_flowflags;		/* 802.3x flow control flags */
510 	int sc_align_tweak;
511 
512 	void *sc_ihs[WM_MAX_NINTR];	/*
513 					 * interrupt cookie.
514 					 * - legacy and msi use sc_ihs[0] only
515 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
516 					 */
517 	pci_intr_handle_t *sc_intrs;	/*
518 					 * legacy and msi use sc_intrs[0] only
519 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
520 					 */
521 	int sc_nintrs;			/* number of interrupts */
522 
523 	int sc_link_intr_idx;		/* index of MSI-X tables */
524 
525 	callout_t sc_tick_ch;		/* tick callout */
526 	bool sc_core_stopping;
527 
528 	int sc_nvm_ver_major;
529 	int sc_nvm_ver_minor;
530 	int sc_nvm_ver_build;
531 	int sc_nvm_addrbits;		/* NVM address bits */
532 	unsigned int sc_nvm_wordsize;	/* NVM word size */
533 	int sc_ich8_flash_base;
534 	int sc_ich8_flash_bank_size;
535 	int sc_nvm_k1_enabled;
536 
537 	int sc_nqueues;
538 	struct wm_queue *sc_queue;
539 	u_int sc_tx_process_limit;	/* Tx processing repeat limit in softint */
540 	u_int sc_tx_intr_process_limit;	/* Tx processing repeat limit in H/W intr */
541 	u_int sc_rx_process_limit;	/* Rx processing repeat limit in softint */
542 	u_int sc_rx_intr_process_limit;	/* Rx processing repeat limit in H/W intr */
543 
544 	int sc_affinity_offset;
545 
546 #ifdef WM_EVENT_COUNTERS
547 	/* Event counters. */
548 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
549 
550         /* WM_T_82542_2_1 only */
551 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
552 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
553 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
554 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
555 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
556 #endif /* WM_EVENT_COUNTERS */
557 
558 	/* This variable are used only on the 82547. */
559 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
560 
561 	uint32_t sc_ctrl;		/* prototype CTRL register */
562 #if 0
563 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
564 #endif
565 	uint32_t sc_icr;		/* prototype interrupt bits */
566 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
567 	uint32_t sc_tctl;		/* prototype TCTL register */
568 	uint32_t sc_rctl;		/* prototype RCTL register */
569 	uint32_t sc_txcw;		/* prototype TXCW register */
570 	uint32_t sc_tipg;		/* prototype TIPG register */
571 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
572 	uint32_t sc_pba;		/* prototype PBA register */
573 
574 	int sc_tbi_linkup;		/* TBI link status */
575 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
576 	int sc_tbi_serdes_ticks;	/* tbi ticks */
577 
578 	int sc_mchash_type;		/* multicast filter offset */
579 
580 	krndsource_t rnd_source;	/* random source */
581 
582 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
583 
584 	kmutex_t *sc_core_lock;		/* lock for softc operations */
585 	kmutex_t *sc_ich_phymtx;	/*
586 					 * 82574/82583/ICH/PCH specific PHY
587 					 * mutex. For 82574/82583, the mutex
588 					 * is used for both PHY and NVM.
589 					 */
590 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
591 
592 	struct wm_phyop phy;
593 	struct wm_nvmop nvm;
594 };
595 
596 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
597 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
598 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
599 
600 #define	WM_RXCHAIN_RESET(rxq)						\
601 do {									\
602 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
603 	*(rxq)->rxq_tailp = NULL;					\
604 	(rxq)->rxq_len = 0;						\
605 } while (/*CONSTCOND*/0)
606 
607 #define	WM_RXCHAIN_LINK(rxq, m)						\
608 do {									\
609 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
610 	(rxq)->rxq_tailp = &(m)->m_next;				\
611 } while (/*CONSTCOND*/0)
612 
613 #ifdef WM_EVENT_COUNTERS
614 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
615 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
616 
617 #define WM_Q_EVCNT_INCR(qname, evname)			\
618 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
619 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
620 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
621 #else /* !WM_EVENT_COUNTERS */
622 #define	WM_EVCNT_INCR(ev)	/* nothing */
623 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
624 
625 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
626 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
627 #endif /* !WM_EVENT_COUNTERS */
628 
629 #define	CSR_READ(sc, reg)						\
630 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
631 #define	CSR_WRITE(sc, reg, val)						\
632 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
633 #define	CSR_WRITE_FLUSH(sc)						\
634 	(void) CSR_READ((sc), WMREG_STATUS)
635 
636 #define ICH8_FLASH_READ32(sc, reg)					\
637 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
638 	    (reg) + sc->sc_flashreg_offset)
639 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
640 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
641 	    (reg) + sc->sc_flashreg_offset, (data))
642 
643 #define ICH8_FLASH_READ16(sc, reg)					\
644 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
645 	    (reg) + sc->sc_flashreg_offset)
646 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
647 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
648 	    (reg) + sc->sc_flashreg_offset, (data))
649 
650 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
651 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
652 
653 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
654 #define	WM_CDTXADDR_HI(txq, x)						\
655 	(sizeof(bus_addr_t) == 8 ?					\
656 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
657 
658 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
659 #define	WM_CDRXADDR_HI(rxq, x)						\
660 	(sizeof(bus_addr_t) == 8 ?					\
661 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
662 
663 /*
664  * Register read/write functions.
665  * Other than CSR_{READ|WRITE}().
666  */
667 #if 0
668 static inline uint32_t wm_io_read(struct wm_softc *, int);
669 #endif
670 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
671 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
672 	uint32_t, uint32_t);
673 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
674 
675 /*
676  * Descriptor sync/init functions.
677  */
678 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
679 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
680 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
681 
682 /*
683  * Device driver interface functions and commonly used functions.
684  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
685  */
686 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
687 static int	wm_match(device_t, cfdata_t, void *);
688 static void	wm_attach(device_t, device_t, void *);
689 static int	wm_detach(device_t, int);
690 static bool	wm_suspend(device_t, const pmf_qual_t *);
691 static bool	wm_resume(device_t, const pmf_qual_t *);
692 static void	wm_watchdog(struct ifnet *);
693 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *, uint16_t *);
694 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *, uint16_t *);
695 static void	wm_tick(void *);
696 static int	wm_ifflags_cb(struct ethercom *);
697 static int	wm_ioctl(struct ifnet *, u_long, void *);
698 /* MAC address related */
699 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
700 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
701 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
702 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
703 static void	wm_set_filter(struct wm_softc *);
704 /* Reset and init related */
705 static void	wm_set_vlan(struct wm_softc *);
706 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
707 static void	wm_get_auto_rd_done(struct wm_softc *);
708 static void	wm_lan_init_done(struct wm_softc *);
709 static void	wm_get_cfg_done(struct wm_softc *);
710 static void	wm_phy_post_reset(struct wm_softc *);
711 static void	wm_write_smbus_addr(struct wm_softc *);
712 static void	wm_init_lcd_from_nvm(struct wm_softc *);
713 static void	wm_initialize_hardware_bits(struct wm_softc *);
714 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
715 static void	wm_reset_phy(struct wm_softc *);
716 static void	wm_flush_desc_rings(struct wm_softc *);
717 static void	wm_reset(struct wm_softc *);
718 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
719 static void	wm_rxdrain(struct wm_rxqueue *);
720 static void	wm_init_rss(struct wm_softc *);
721 static void	wm_adjust_qnum(struct wm_softc *, int);
722 static inline bool	wm_is_using_msix(struct wm_softc *);
723 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
724 static int	wm_softint_establish(struct wm_softc *, int, int);
725 static int	wm_setup_legacy(struct wm_softc *);
726 static int	wm_setup_msix(struct wm_softc *);
727 static int	wm_init(struct ifnet *);
728 static int	wm_init_locked(struct ifnet *);
729 static void	wm_unset_stopping_flags(struct wm_softc *);
730 static void	wm_set_stopping_flags(struct wm_softc *);
731 static void	wm_stop(struct ifnet *, int);
732 static void	wm_stop_locked(struct ifnet *, int);
733 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
734 static void	wm_82547_txfifo_stall(void *);
735 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
736 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
737 /* DMA related */
738 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
739 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
740 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
741 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
742     struct wm_txqueue *);
743 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
744 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
745 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
746     struct wm_rxqueue *);
747 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
748 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
749 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
750 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
751 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
752 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
753 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
754     struct wm_txqueue *);
755 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
756     struct wm_rxqueue *);
757 static int	wm_alloc_txrx_queues(struct wm_softc *);
758 static void	wm_free_txrx_queues(struct wm_softc *);
759 static int	wm_init_txrx_queues(struct wm_softc *);
760 /* Start */
761 static int	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
762     struct wm_txsoft *, uint32_t *, uint8_t *);
763 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
764 static void	wm_start(struct ifnet *);
765 static void	wm_start_locked(struct ifnet *);
766 static int	wm_transmit(struct ifnet *, struct mbuf *);
767 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
768 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
769 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
770     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
771 static void	wm_nq_start(struct ifnet *);
772 static void	wm_nq_start_locked(struct ifnet *);
773 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
774 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
775 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
776 static void	wm_deferred_start_locked(struct wm_txqueue *);
777 static void	wm_handle_queue(void *);
778 /* Interrupt */
779 static bool	wm_txeof(struct wm_txqueue *, u_int);
780 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
781 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
782 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
783 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
784 static void	wm_linkintr(struct wm_softc *, uint32_t);
785 static int	wm_intr_legacy(void *);
786 static inline void	wm_txrxintr_disable(struct wm_queue *);
787 static inline void	wm_txrxintr_enable(struct wm_queue *);
788 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
789 static int	wm_txrxintr_msix(void *);
790 static int	wm_linkintr_msix(void *);
791 
792 /*
793  * Media related.
794  * GMII, SGMII, TBI, SERDES and SFP.
795  */
796 /* Common */
797 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
798 /* GMII related */
799 static void	wm_gmii_reset(struct wm_softc *);
800 static void	wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t, uint16_t);
801 static int	wm_get_phy_id_82575(struct wm_softc *);
802 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
803 static int	wm_gmii_mediachange(struct ifnet *);
804 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
805 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
806 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
807 static int	wm_gmii_i82543_readreg(device_t, int, int);
808 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
809 static int	wm_gmii_mdic_readreg(device_t, int, int);
810 static void	wm_gmii_mdic_writereg(device_t, int, int, int);
811 static int	wm_gmii_i82544_readreg(device_t, int, int);
812 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
813 static int	wm_gmii_i80003_readreg(device_t, int, int);
814 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
815 static int	wm_gmii_bm_readreg(device_t, int, int);
816 static void	wm_gmii_bm_writereg(device_t, int, int, int);
817 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
818 static int	wm_gmii_hv_readreg(device_t, int, int);
819 static int	wm_gmii_hv_readreg_locked(device_t, int, int);
820 static void	wm_gmii_hv_writereg(device_t, int, int, int);
821 static void	wm_gmii_hv_writereg_locked(device_t, int, int, int);
822 static int	wm_gmii_82580_readreg(device_t, int, int);
823 static void	wm_gmii_82580_writereg(device_t, int, int, int);
824 static int	wm_gmii_gs40g_readreg(device_t, int, int);
825 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
826 static void	wm_gmii_statchg(struct ifnet *);
827 /*
828  * kumeran related (80003, ICH* and PCH*).
829  * These functions are not for accessing MII registers but for accessing
830  * kumeran specific registers.
831  */
832 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
833 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
834 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
835 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
836 /* SGMII */
837 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
838 static int	wm_sgmii_readreg(device_t, int, int);
839 static void	wm_sgmii_writereg(device_t, int, int, int);
840 /* TBI related */
841 static void	wm_tbi_mediainit(struct wm_softc *);
842 static int	wm_tbi_mediachange(struct ifnet *);
843 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
844 static int	wm_check_for_link(struct wm_softc *);
845 static void	wm_tbi_tick(struct wm_softc *);
846 /* SERDES related */
847 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
848 static int	wm_serdes_mediachange(struct ifnet *);
849 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
850 static void	wm_serdes_tick(struct wm_softc *);
851 /* SFP related */
852 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
853 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
854 
855 /*
856  * NVM related.
857  * Microwire, SPI (w/wo EERD) and Flash.
858  */
859 /* Misc functions */
860 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
861 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
862 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
863 /* Microwire */
864 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
865 /* SPI */
866 static int	wm_nvm_ready_spi(struct wm_softc *);
867 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
868 /* Using with EERD */
869 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
870 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
871 /* Flash */
872 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
873     unsigned int *);
874 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
875 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
876 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
877 	uint32_t *);
878 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
879 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
880 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
881 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
882 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
883 /* iNVM */
884 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
885 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
886 /* Lock, detecting NVM type, validate checksum and read */
887 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
888 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
889 static int	wm_nvm_validate_checksum(struct wm_softc *);
890 static void	wm_nvm_version_invm(struct wm_softc *);
891 static void	wm_nvm_version(struct wm_softc *);
892 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
893 
894 /*
895  * Hardware semaphores.
896  * Very complexed...
897  */
898 static int	wm_get_null(struct wm_softc *);
899 static void	wm_put_null(struct wm_softc *);
900 static int	wm_get_eecd(struct wm_softc *);
901 static void	wm_put_eecd(struct wm_softc *);
902 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
903 static void	wm_put_swsm_semaphore(struct wm_softc *);
904 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
905 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
906 static int	wm_get_nvm_80003(struct wm_softc *);
907 static void	wm_put_nvm_80003(struct wm_softc *);
908 static int	wm_get_nvm_82571(struct wm_softc *);
909 static void	wm_put_nvm_82571(struct wm_softc *);
910 static int	wm_get_phy_82575(struct wm_softc *);
911 static void	wm_put_phy_82575(struct wm_softc *);
912 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
913 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
914 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
915 static void	wm_put_swflag_ich8lan(struct wm_softc *);
916 static int	wm_get_nvm_ich8lan(struct wm_softc *);
917 static void	wm_put_nvm_ich8lan(struct wm_softc *);
918 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
919 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
920 
921 /*
922  * Management mode and power management related subroutines.
923  * BMC, AMT, suspend/resume and EEE.
924  */
925 #if 0
926 static int	wm_check_mng_mode(struct wm_softc *);
927 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
928 static int	wm_check_mng_mode_82574(struct wm_softc *);
929 static int	wm_check_mng_mode_generic(struct wm_softc *);
930 #endif
931 static int	wm_enable_mng_pass_thru(struct wm_softc *);
932 static bool	wm_phy_resetisblocked(struct wm_softc *);
933 static void	wm_get_hw_control(struct wm_softc *);
934 static void	wm_release_hw_control(struct wm_softc *);
935 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
936 static void	wm_smbustopci(struct wm_softc *);
937 static void	wm_init_manageability(struct wm_softc *);
938 static void	wm_release_manageability(struct wm_softc *);
939 static void	wm_get_wakeup(struct wm_softc *);
940 static void	wm_ulp_disable(struct wm_softc *);
941 static void	wm_enable_phy_wakeup(struct wm_softc *);
942 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
943 static void	wm_enable_wakeup(struct wm_softc *);
944 static void	wm_disable_aspm(struct wm_softc *);
945 /* LPLU (Low Power Link Up) */
946 static void	wm_lplu_d0_disable(struct wm_softc *);
947 /* EEE */
948 static void	wm_set_eee_i350(struct wm_softc *);
949 
950 /*
951  * Workarounds (mainly PHY related).
952  * Basically, PHY's workarounds are in the PHY drivers.
953  */
954 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
955 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
956 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
957 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
958 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
959 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
960 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
961 static void	wm_reset_init_script_82575(struct wm_softc *);
962 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
963 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
964 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
965 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
966 static void	wm_pll_workaround_i210(struct wm_softc *);
967 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
968 
969 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
970     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
971 
972 /*
973  * Devices supported by this driver.
974  */
975 static const struct wm_product {
976 	pci_vendor_id_t		wmp_vendor;
977 	pci_product_id_t	wmp_product;
978 	const char		*wmp_name;
979 	wm_chip_type		wmp_type;
980 	uint32_t		wmp_flags;
981 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
982 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
983 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
984 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
985 #define WMP_MEDIATYPE(x)	((x) & 0x03)
986 } wm_products[] = {
987 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
988 	  "Intel i82542 1000BASE-X Ethernet",
989 	  WM_T_82542_2_1,	WMP_F_FIBER },
990 
991 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
992 	  "Intel i82543GC 1000BASE-X Ethernet",
993 	  WM_T_82543,		WMP_F_FIBER },
994 
995 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
996 	  "Intel i82543GC 1000BASE-T Ethernet",
997 	  WM_T_82543,		WMP_F_COPPER },
998 
999 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
1000 	  "Intel i82544EI 1000BASE-T Ethernet",
1001 	  WM_T_82544,		WMP_F_COPPER },
1002 
1003 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
1004 	  "Intel i82544EI 1000BASE-X Ethernet",
1005 	  WM_T_82544,		WMP_F_FIBER },
1006 
1007 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
1008 	  "Intel i82544GC 1000BASE-T Ethernet",
1009 	  WM_T_82544,		WMP_F_COPPER },
1010 
1011 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
1012 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
1013 	  WM_T_82544,		WMP_F_COPPER },
1014 
1015 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
1016 	  "Intel i82540EM 1000BASE-T Ethernet",
1017 	  WM_T_82540,		WMP_F_COPPER },
1018 
1019 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
1020 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
1021 	  WM_T_82540,		WMP_F_COPPER },
1022 
1023 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
1024 	  "Intel i82540EP 1000BASE-T Ethernet",
1025 	  WM_T_82540,		WMP_F_COPPER },
1026 
1027 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
1028 	  "Intel i82540EP 1000BASE-T Ethernet",
1029 	  WM_T_82540,		WMP_F_COPPER },
1030 
1031 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
1032 	  "Intel i82540EP 1000BASE-T Ethernet",
1033 	  WM_T_82540,		WMP_F_COPPER },
1034 
1035 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
1036 	  "Intel i82545EM 1000BASE-T Ethernet",
1037 	  WM_T_82545,		WMP_F_COPPER },
1038 
1039 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
1040 	  "Intel i82545GM 1000BASE-T Ethernet",
1041 	  WM_T_82545_3,		WMP_F_COPPER },
1042 
1043 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
1044 	  "Intel i82545GM 1000BASE-X Ethernet",
1045 	  WM_T_82545_3,		WMP_F_FIBER },
1046 
1047 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
1048 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
1049 	  WM_T_82545_3,		WMP_F_SERDES },
1050 
1051 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
1052 	  "Intel i82546EB 1000BASE-T Ethernet",
1053 	  WM_T_82546,		WMP_F_COPPER },
1054 
1055 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
1056 	  "Intel i82546EB 1000BASE-T Ethernet",
1057 	  WM_T_82546,		WMP_F_COPPER },
1058 
1059 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
1060 	  "Intel i82545EM 1000BASE-X Ethernet",
1061 	  WM_T_82545,		WMP_F_FIBER },
1062 
1063 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
1064 	  "Intel i82546EB 1000BASE-X Ethernet",
1065 	  WM_T_82546,		WMP_F_FIBER },
1066 
1067 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
1068 	  "Intel i82546GB 1000BASE-T Ethernet",
1069 	  WM_T_82546_3,		WMP_F_COPPER },
1070 
1071 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
1072 	  "Intel i82546GB 1000BASE-X Ethernet",
1073 	  WM_T_82546_3,		WMP_F_FIBER },
1074 
1075 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
1076 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
1077 	  WM_T_82546_3,		WMP_F_SERDES },
1078 
1079 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
1080 	  "i82546GB quad-port Gigabit Ethernet",
1081 	  WM_T_82546_3,		WMP_F_COPPER },
1082 
1083 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
1084 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
1085 	  WM_T_82546_3,		WMP_F_COPPER },
1086 
1087 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
1088 	  "Intel PRO/1000MT (82546GB)",
1089 	  WM_T_82546_3,		WMP_F_COPPER },
1090 
1091 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
1092 	  "Intel i82541EI 1000BASE-T Ethernet",
1093 	  WM_T_82541,		WMP_F_COPPER },
1094 
1095 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
1096 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
1097 	  WM_T_82541,		WMP_F_COPPER },
1098 
1099 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
1100 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
1101 	  WM_T_82541,		WMP_F_COPPER },
1102 
1103 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
1104 	  "Intel i82541ER 1000BASE-T Ethernet",
1105 	  WM_T_82541_2,		WMP_F_COPPER },
1106 
1107 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
1108 	  "Intel i82541GI 1000BASE-T Ethernet",
1109 	  WM_T_82541_2,		WMP_F_COPPER },
1110 
1111 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
1112 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
1113 	  WM_T_82541_2,		WMP_F_COPPER },
1114 
1115 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
1116 	  "Intel i82541PI 1000BASE-T Ethernet",
1117 	  WM_T_82541_2,		WMP_F_COPPER },
1118 
1119 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
1120 	  "Intel i82547EI 1000BASE-T Ethernet",
1121 	  WM_T_82547,		WMP_F_COPPER },
1122 
1123 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
1124 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
1125 	  WM_T_82547,		WMP_F_COPPER },
1126 
1127 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
1128 	  "Intel i82547GI 1000BASE-T Ethernet",
1129 	  WM_T_82547_2,		WMP_F_COPPER },
1130 
1131 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
1132 	  "Intel PRO/1000 PT (82571EB)",
1133 	  WM_T_82571,		WMP_F_COPPER },
1134 
1135 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
1136 	  "Intel PRO/1000 PF (82571EB)",
1137 	  WM_T_82571,		WMP_F_FIBER },
1138 
1139 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
1140 	  "Intel PRO/1000 PB (82571EB)",
1141 	  WM_T_82571,		WMP_F_SERDES },
1142 
1143 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
1144 	  "Intel PRO/1000 QT (82571EB)",
1145 	  WM_T_82571,		WMP_F_COPPER },
1146 
1147 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
1148 	  "Intel PRO/1000 PT Quad Port Server Adapter",
1149 	  WM_T_82571,		WMP_F_COPPER, },
1150 
1151 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
1152 	  "Intel Gigabit PT Quad Port Server ExpressModule",
1153 	  WM_T_82571,		WMP_F_COPPER, },
1154 
1155 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
1156 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
1157 	  WM_T_82571,		WMP_F_SERDES, },
1158 
1159 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
1160 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
1161 	  WM_T_82571,		WMP_F_SERDES, },
1162 
1163 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
1164 	  "Intel 82571EB Quad 1000baseX Ethernet",
1165 	  WM_T_82571,		WMP_F_FIBER, },
1166 
1167 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
1168 	  "Intel i82572EI 1000baseT Ethernet",
1169 	  WM_T_82572,		WMP_F_COPPER },
1170 
1171 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
1172 	  "Intel i82572EI 1000baseX Ethernet",
1173 	  WM_T_82572,		WMP_F_FIBER },
1174 
1175 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
1176 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
1177 	  WM_T_82572,		WMP_F_SERDES },
1178 
1179 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
1180 	  "Intel i82572EI 1000baseT Ethernet",
1181 	  WM_T_82572,		WMP_F_COPPER },
1182 
1183 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
1184 	  "Intel i82573E",
1185 	  WM_T_82573,		WMP_F_COPPER },
1186 
1187 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
1188 	  "Intel i82573E IAMT",
1189 	  WM_T_82573,		WMP_F_COPPER },
1190 
1191 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
1192 	  "Intel i82573L Gigabit Ethernet",
1193 	  WM_T_82573,		WMP_F_COPPER },
1194 
1195 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
1196 	  "Intel i82574L",
1197 	  WM_T_82574,		WMP_F_COPPER },
1198 
1199 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
1200 	  "Intel i82574L",
1201 	  WM_T_82574,		WMP_F_COPPER },
1202 
1203 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
1204 	  "Intel i82583V",
1205 	  WM_T_82583,		WMP_F_COPPER },
1206 
1207 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
1208 	  "i80003 dual 1000baseT Ethernet",
1209 	  WM_T_80003,		WMP_F_COPPER },
1210 
1211 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
1212 	  "i80003 dual 1000baseX Ethernet",
1213 	  WM_T_80003,		WMP_F_COPPER },
1214 
1215 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
1216 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
1217 	  WM_T_80003,		WMP_F_SERDES },
1218 
1219 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
1220 	  "Intel i80003 1000baseT Ethernet",
1221 	  WM_T_80003,		WMP_F_COPPER },
1222 
1223 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
1224 	  "Intel i80003 Gigabit Ethernet (SERDES)",
1225 	  WM_T_80003,		WMP_F_SERDES },
1226 
1227 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
1228 	  "Intel i82801H (M_AMT) LAN Controller",
1229 	  WM_T_ICH8,		WMP_F_COPPER },
1230 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
1231 	  "Intel i82801H (AMT) LAN Controller",
1232 	  WM_T_ICH8,		WMP_F_COPPER },
1233 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
1234 	  "Intel i82801H LAN Controller",
1235 	  WM_T_ICH8,		WMP_F_COPPER },
1236 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
1237 	  "Intel i82801H (IFE) 10/100 LAN Controller",
1238 	  WM_T_ICH8,		WMP_F_COPPER },
1239 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
1240 	  "Intel i82801H (M) LAN Controller",
1241 	  WM_T_ICH8,		WMP_F_COPPER },
1242 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
1243 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
1244 	  WM_T_ICH8,		WMP_F_COPPER },
1245 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
1246 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
1247 	  WM_T_ICH8,		WMP_F_COPPER },
1248 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
1249 	  "82567V-3 LAN Controller",
1250 	  WM_T_ICH8,		WMP_F_COPPER },
1251 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
1252 	  "82801I (AMT) LAN Controller",
1253 	  WM_T_ICH9,		WMP_F_COPPER },
1254 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
1255 	  "82801I 10/100 LAN Controller",
1256 	  WM_T_ICH9,		WMP_F_COPPER },
1257 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
1258 	  "82801I (G) 10/100 LAN Controller",
1259 	  WM_T_ICH9,		WMP_F_COPPER },
1260 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
1261 	  "82801I (GT) 10/100 LAN Controller",
1262 	  WM_T_ICH9,		WMP_F_COPPER },
1263 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
1264 	  "82801I (C) LAN Controller",
1265 	  WM_T_ICH9,		WMP_F_COPPER },
1266 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
1267 	  "82801I mobile LAN Controller",
1268 	  WM_T_ICH9,		WMP_F_COPPER },
1269 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
1270 	  "82801I mobile (V) LAN Controller",
1271 	  WM_T_ICH9,		WMP_F_COPPER },
1272 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
1273 	  "82801I mobile (AMT) LAN Controller",
1274 	  WM_T_ICH9,		WMP_F_COPPER },
1275 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
1276 	  "82567LM-4 LAN Controller",
1277 	  WM_T_ICH9,		WMP_F_COPPER },
1278 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
1279 	  "82567LM-2 LAN Controller",
1280 	  WM_T_ICH10,		WMP_F_COPPER },
1281 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
1282 	  "82567LF-2 LAN Controller",
1283 	  WM_T_ICH10,		WMP_F_COPPER },
1284 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
1285 	  "82567LM-3 LAN Controller",
1286 	  WM_T_ICH10,		WMP_F_COPPER },
1287 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
1288 	  "82567LF-3 LAN Controller",
1289 	  WM_T_ICH10,		WMP_F_COPPER },
1290 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
1291 	  "82567V-2 LAN Controller",
1292 	  WM_T_ICH10,		WMP_F_COPPER },
1293 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
1294 	  "82567V-3? LAN Controller",
1295 	  WM_T_ICH10,		WMP_F_COPPER },
1296 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
1297 	  "HANKSVILLE LAN Controller",
1298 	  WM_T_ICH10,		WMP_F_COPPER },
1299 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
1300 	  "PCH LAN (82577LM) Controller",
1301 	  WM_T_PCH,		WMP_F_COPPER },
1302 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
1303 	  "PCH LAN (82577LC) Controller",
1304 	  WM_T_PCH,		WMP_F_COPPER },
1305 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
1306 	  "PCH LAN (82578DM) Controller",
1307 	  WM_T_PCH,		WMP_F_COPPER },
1308 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
1309 	  "PCH LAN (82578DC) Controller",
1310 	  WM_T_PCH,		WMP_F_COPPER },
1311 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
1312 	  "PCH2 LAN (82579LM) Controller",
1313 	  WM_T_PCH2,		WMP_F_COPPER },
1314 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
1315 	  "PCH2 LAN (82579V) Controller",
1316 	  WM_T_PCH2,		WMP_F_COPPER },
1317 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
1318 	  "82575EB dual-1000baseT Ethernet",
1319 	  WM_T_82575,		WMP_F_COPPER },
1320 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
1321 	  "82575EB dual-1000baseX Ethernet (SERDES)",
1322 	  WM_T_82575,		WMP_F_SERDES },
1323 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
1324 	  "82575GB quad-1000baseT Ethernet",
1325 	  WM_T_82575,		WMP_F_COPPER },
1326 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
1327 	  "82575GB quad-1000baseT Ethernet (PM)",
1328 	  WM_T_82575,		WMP_F_COPPER },
1329 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
1330 	  "82576 1000BaseT Ethernet",
1331 	  WM_T_82576,		WMP_F_COPPER },
1332 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
1333 	  "82576 1000BaseX Ethernet",
1334 	  WM_T_82576,		WMP_F_FIBER },
1335 
1336 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
1337 	  "82576 gigabit Ethernet (SERDES)",
1338 	  WM_T_82576,		WMP_F_SERDES },
1339 
1340 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
1341 	  "82576 quad-1000BaseT Ethernet",
1342 	  WM_T_82576,		WMP_F_COPPER },
1343 
1344 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
1345 	  "82576 Gigabit ET2 Quad Port Server Adapter",
1346 	  WM_T_82576,		WMP_F_COPPER },
1347 
1348 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
1349 	  "82576 gigabit Ethernet",
1350 	  WM_T_82576,		WMP_F_COPPER },
1351 
1352 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
1353 	  "82576 gigabit Ethernet (SERDES)",
1354 	  WM_T_82576,		WMP_F_SERDES },
1355 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
1356 	  "82576 quad-gigabit Ethernet (SERDES)",
1357 	  WM_T_82576,		WMP_F_SERDES },
1358 
1359 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
1360 	  "82580 1000BaseT Ethernet",
1361 	  WM_T_82580,		WMP_F_COPPER },
1362 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
1363 	  "82580 1000BaseX Ethernet",
1364 	  WM_T_82580,		WMP_F_FIBER },
1365 
1366 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
1367 	  "82580 1000BaseT Ethernet (SERDES)",
1368 	  WM_T_82580,		WMP_F_SERDES },
1369 
1370 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
1371 	  "82580 gigabit Ethernet (SGMII)",
1372 	  WM_T_82580,		WMP_F_COPPER },
1373 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
1374 	  "82580 dual-1000BaseT Ethernet",
1375 	  WM_T_82580,		WMP_F_COPPER },
1376 
1377 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1378 	  "82580 quad-1000BaseX Ethernet",
1379 	  WM_T_82580,		WMP_F_FIBER },
1380 
1381 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
1382 	  "DH89XXCC Gigabit Ethernet (SGMII)",
1383 	  WM_T_82580,		WMP_F_COPPER },
1384 
1385 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
1386 	  "DH89XXCC Gigabit Ethernet (SERDES)",
1387 	  WM_T_82580,		WMP_F_SERDES },
1388 
1389 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
1390 	  "DH89XXCC 1000BASE-KX Ethernet",
1391 	  WM_T_82580,		WMP_F_SERDES },
1392 
1393 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
1394 	  "DH89XXCC Gigabit Ethernet (SFP)",
1395 	  WM_T_82580,		WMP_F_SERDES },
1396 
1397 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
1398 	  "I350 Gigabit Network Connection",
1399 	  WM_T_I350,		WMP_F_COPPER },
1400 
1401 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
1402 	  "I350 Gigabit Fiber Network Connection",
1403 	  WM_T_I350,		WMP_F_FIBER },
1404 
1405 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
1406 	  "I350 Gigabit Backplane Connection",
1407 	  WM_T_I350,		WMP_F_SERDES },
1408 
1409 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
1410 	  "I350 Quad Port Gigabit Ethernet",
1411 	  WM_T_I350,		WMP_F_SERDES },
1412 
1413 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
1414 	  "I350 Gigabit Connection",
1415 	  WM_T_I350,		WMP_F_COPPER },
1416 
1417 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
1418 	  "I354 Gigabit Ethernet (KX)",
1419 	  WM_T_I354,		WMP_F_SERDES },
1420 
1421 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
1422 	  "I354 Gigabit Ethernet (SGMII)",
1423 	  WM_T_I354,		WMP_F_COPPER },
1424 
1425 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
1426 	  "I354 Gigabit Ethernet (2.5G)",
1427 	  WM_T_I354,		WMP_F_COPPER },
1428 
1429 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
1430 	  "I210-T1 Ethernet Server Adapter",
1431 	  WM_T_I210,		WMP_F_COPPER },
1432 
1433 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
1434 	  "I210 Ethernet (Copper OEM)",
1435 	  WM_T_I210,		WMP_F_COPPER },
1436 
1437 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
1438 	  "I210 Ethernet (Copper IT)",
1439 	  WM_T_I210,		WMP_F_COPPER },
1440 
1441 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
1442 	  "I210 Ethernet (FLASH less)",
1443 	  WM_T_I210,		WMP_F_COPPER },
1444 
1445 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
1446 	  "I210 Gigabit Ethernet (Fiber)",
1447 	  WM_T_I210,		WMP_F_FIBER },
1448 
1449 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
1450 	  "I210 Gigabit Ethernet (SERDES)",
1451 	  WM_T_I210,		WMP_F_SERDES },
1452 
1453 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
1454 	  "I210 Gigabit Ethernet (FLASH less)",
1455 	  WM_T_I210,		WMP_F_SERDES },
1456 
1457 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
1458 	  "I210 Gigabit Ethernet (SGMII)",
1459 	  WM_T_I210,		WMP_F_COPPER },
1460 
1461 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
1462 	  "I211 Ethernet (COPPER)",
1463 	  WM_T_I211,		WMP_F_COPPER },
1464 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
1465 	  "I217 V Ethernet Connection",
1466 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1467 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
1468 	  "I217 LM Ethernet Connection",
1469 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1470 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
1471 	  "I218 V Ethernet Connection",
1472 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1473 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
1474 	  "I218 V Ethernet Connection",
1475 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1476 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
1477 	  "I218 V Ethernet Connection",
1478 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1479 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
1480 	  "I218 LM Ethernet Connection",
1481 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1482 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
1483 	  "I218 LM Ethernet Connection",
1484 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1485 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
1486 	  "I218 LM Ethernet Connection",
1487 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1488 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
1489 	  "I219 V Ethernet Connection",
1490 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1491 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
1492 	  "I219 V Ethernet Connection",
1493 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1494 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
1495 	  "I219 V Ethernet Connection",
1496 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1497 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
1498 	  "I219 V Ethernet Connection",
1499 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1500 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
1501 	  "I219 LM Ethernet Connection",
1502 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1503 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
1504 	  "I219 LM Ethernet Connection",
1505 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1506 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
1507 	  "I219 LM Ethernet Connection",
1508 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1509 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
1510 	  "I219 LM Ethernet Connection",
1511 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1512 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
1513 	  "I219 LM Ethernet Connection",
1514 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1515 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V6,
1516 	  "I219 V Ethernet Connection",
1517 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1518 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V7,
1519 	  "I219 V Ethernet Connection",
1520 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1521 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM6,
1522 	  "I219 LM Ethernet Connection",
1523 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1524 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM7,
1525 	  "I219 LM Ethernet Connection",
1526 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1527 	{ 0,			0,
1528 	  NULL,
1529 	  0,			0 },
1530 };
1531 
1532 /*
1533  * Register read/write functions.
1534  * Other than CSR_{READ|WRITE}().
1535  */
1536 
1537 #if 0 /* Not currently used */
1538 static inline uint32_t
1539 wm_io_read(struct wm_softc *sc, int reg)
1540 {
1541 
1542 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1543 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1544 }
1545 #endif
1546 
1547 static inline void
1548 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1549 {
1550 
1551 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1552 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1553 }
1554 
1555 static inline void
1556 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1557     uint32_t data)
1558 {
1559 	uint32_t regval;
1560 	int i;
1561 
1562 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1563 
1564 	CSR_WRITE(sc, reg, regval);
1565 
1566 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1567 		delay(5);
1568 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1569 			break;
1570 	}
1571 	if (i == SCTL_CTL_POLL_TIMEOUT) {
1572 		aprint_error("%s: WARNING:"
1573 		    " i82575 reg 0x%08x setup did not indicate ready\n",
1574 		    device_xname(sc->sc_dev), reg);
1575 	}
1576 }
1577 
1578 static inline void
1579 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1580 {
1581 	wa->wa_low = htole32(v & 0xffffffffU);
1582 	if (sizeof(bus_addr_t) == 8)
1583 		wa->wa_high = htole32((uint64_t) v >> 32);
1584 	else
1585 		wa->wa_high = 0;
1586 }
1587 
1588 /*
1589  * Descriptor sync/init functions.
1590  */
1591 static inline void
1592 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
1593 {
1594 	struct wm_softc *sc = txq->txq_sc;
1595 
1596 	/* If it will wrap around, sync to the end of the ring. */
1597 	if ((start + num) > WM_NTXDESC(txq)) {
1598 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1599 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
1600 		    (WM_NTXDESC(txq) - start), ops);
1601 		num -= (WM_NTXDESC(txq) - start);
1602 		start = 0;
1603 	}
1604 
1605 	/* Now sync whatever is left. */
1606 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1607 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
1608 }
1609 
1610 static inline void
1611 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
1612 {
1613 	struct wm_softc *sc = rxq->rxq_sc;
1614 
1615 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
1616 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
1617 }
1618 
1619 static inline void
1620 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
1621 {
1622 	struct wm_softc *sc = rxq->rxq_sc;
1623 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
1624 	struct mbuf *m = rxs->rxs_mbuf;
1625 
1626 	/*
1627 	 * Note: We scoot the packet forward 2 bytes in the buffer
1628 	 * so that the payload after the Ethernet header is aligned
1629 	 * to a 4-byte boundary.
1630 
1631 	 * XXX BRAINDAMAGE ALERT!
1632 	 * The stupid chip uses the same size for every buffer, which
1633 	 * is set in the Receive Control register.  We are using the 2K
1634 	 * size option, but what we REALLY want is (2K - 2)!  For this
1635 	 * reason, we can't "scoot" packets longer than the standard
1636 	 * Ethernet MTU.  On strict-alignment platforms, if the total
1637 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
1638 	 * the upper layer copy the headers.
1639 	 */
1640 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
1641 
1642 	if (sc->sc_type == WM_T_82574) {
1643 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
1644 		rxd->erx_data.erxd_addr =
1645 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1646 		rxd->erx_data.erxd_dd = 0;
1647 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
1648 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
1649 
1650 		rxd->nqrx_data.nrxd_paddr =
1651 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1652 		/* Currently, split header is not supported. */
1653 		rxd->nqrx_data.nrxd_haddr = 0;
1654 	} else {
1655 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
1656 
1657 		wm_set_dma_addr(&rxd->wrx_addr,
1658 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1659 		rxd->wrx_len = 0;
1660 		rxd->wrx_cksum = 0;
1661 		rxd->wrx_status = 0;
1662 		rxd->wrx_errors = 0;
1663 		rxd->wrx_special = 0;
1664 	}
1665 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1666 
1667 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
1668 }
1669 
1670 /*
1671  * Device driver interface functions and commonly used functions.
1672  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
1673  */
1674 
1675 /* Lookup supported device table */
1676 static const struct wm_product *
1677 wm_lookup(const struct pci_attach_args *pa)
1678 {
1679 	const struct wm_product *wmp;
1680 
1681 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1682 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1683 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1684 			return wmp;
1685 	}
1686 	return NULL;
1687 }
1688 
1689 /* The match function (ca_match) */
1690 static int
1691 wm_match(device_t parent, cfdata_t cf, void *aux)
1692 {
1693 	struct pci_attach_args *pa = aux;
1694 
1695 	if (wm_lookup(pa) != NULL)
1696 		return 1;
1697 
1698 	return 0;
1699 }
1700 
1701 /* The attach function (ca_attach) */
1702 static void
1703 wm_attach(device_t parent, device_t self, void *aux)
1704 {
1705 	struct wm_softc *sc = device_private(self);
1706 	struct pci_attach_args *pa = aux;
1707 	prop_dictionary_t dict;
1708 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1709 	pci_chipset_tag_t pc = pa->pa_pc;
1710 	int counts[PCI_INTR_TYPE_SIZE];
1711 	pci_intr_type_t max_type;
1712 	const char *eetype, *xname;
1713 	bus_space_tag_t memt;
1714 	bus_space_handle_t memh;
1715 	bus_size_t memsize;
1716 	int memh_valid;
1717 	int i, error;
1718 	const struct wm_product *wmp;
1719 	prop_data_t ea;
1720 	prop_number_t pn;
1721 	uint8_t enaddr[ETHER_ADDR_LEN];
1722 	char buf[256];
1723 	uint16_t cfg1, cfg2, swdpin, nvmword;
1724 	pcireg_t preg, memtype;
1725 	uint16_t eeprom_data, apme_mask;
1726 	bool force_clear_smbi;
1727 	uint32_t link_mode;
1728 	uint32_t reg;
1729 
1730 	sc->sc_dev = self;
1731 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
1732 	sc->sc_core_stopping = false;
1733 
1734 	wmp = wm_lookup(pa);
1735 #ifdef DIAGNOSTIC
1736 	if (wmp == NULL) {
1737 		printf("\n");
1738 		panic("wm_attach: impossible");
1739 	}
1740 #endif
1741 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
1742 
1743 	sc->sc_pc = pa->pa_pc;
1744 	sc->sc_pcitag = pa->pa_tag;
1745 
1746 	if (pci_dma64_available(pa))
1747 		sc->sc_dmat = pa->pa_dmat64;
1748 	else
1749 		sc->sc_dmat = pa->pa_dmat;
1750 
1751 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
1752 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
1753 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
1754 
1755 	sc->sc_type = wmp->wmp_type;
1756 
1757 	/* Set default function pointers */
1758 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
1759 	sc->phy.release = sc->nvm.release = wm_put_null;
1760 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
1761 
1762 	if (sc->sc_type < WM_T_82543) {
1763 		if (sc->sc_rev < 2) {
1764 			aprint_error_dev(sc->sc_dev,
1765 			    "i82542 must be at least rev. 2\n");
1766 			return;
1767 		}
1768 		if (sc->sc_rev < 3)
1769 			sc->sc_type = WM_T_82542_2_0;
1770 	}
1771 
1772 	/*
1773 	 * Disable MSI for Errata:
1774 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
1775 	 *
1776 	 *  82544: Errata 25
1777 	 *  82540: Errata  6 (easy to reproduce device timeout)
1778 	 *  82545: Errata  4 (easy to reproduce device timeout)
1779 	 *  82546: Errata 26 (easy to reproduce device timeout)
1780 	 *  82541: Errata  7 (easy to reproduce device timeout)
1781 	 *
1782 	 * "Byte Enables 2 and 3 are not set on MSI writes"
1783 	 *
1784 	 *  82571 & 82572: Errata 63
1785 	 */
1786 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
1787 	    || (sc->sc_type == WM_T_82572))
1788 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
1789 
1790 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1791 	    || (sc->sc_type == WM_T_82580)
1792 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
1793 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
1794 		sc->sc_flags |= WM_F_NEWQUEUE;
1795 
1796 	/* Set device properties (mactype) */
1797 	dict = device_properties(sc->sc_dev);
1798 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1799 
1800 	/*
1801 	 * Map the device.  All devices support memory-mapped acccess,
1802 	 * and it is really required for normal operation.
1803 	 */
1804 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1805 	switch (memtype) {
1806 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1807 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1808 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1809 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1810 		break;
1811 	default:
1812 		memh_valid = 0;
1813 		break;
1814 	}
1815 
1816 	if (memh_valid) {
1817 		sc->sc_st = memt;
1818 		sc->sc_sh = memh;
1819 		sc->sc_ss = memsize;
1820 	} else {
1821 		aprint_error_dev(sc->sc_dev,
1822 		    "unable to map device registers\n");
1823 		return;
1824 	}
1825 
1826 	/*
1827 	 * In addition, i82544 and later support I/O mapped indirect
1828 	 * register access.  It is not desirable (nor supported in
1829 	 * this driver) to use it for normal operation, though it is
1830 	 * required to work around bugs in some chip versions.
1831 	 */
1832 	if (sc->sc_type >= WM_T_82544) {
1833 		/* First we have to find the I/O BAR. */
1834 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1835 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
1836 			if (memtype == PCI_MAPREG_TYPE_IO)
1837 				break;
1838 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
1839 			    PCI_MAPREG_MEM_TYPE_64BIT)
1840 				i += 4;	/* skip high bits, too */
1841 		}
1842 		if (i < PCI_MAPREG_END) {
1843 			/*
1844 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
1845 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
1846 			 * It's no problem because newer chips has no this
1847 			 * bug.
1848 			 *
1849 			 * The i8254x doesn't apparently respond when the
1850 			 * I/O BAR is 0, which looks somewhat like it's not
1851 			 * been configured.
1852 			 */
1853 			preg = pci_conf_read(pc, pa->pa_tag, i);
1854 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1855 				aprint_error_dev(sc->sc_dev,
1856 				    "WARNING: I/O BAR at zero.\n");
1857 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1858 					0, &sc->sc_iot, &sc->sc_ioh,
1859 					NULL, &sc->sc_ios) == 0) {
1860 				sc->sc_flags |= WM_F_IOH_VALID;
1861 			} else {
1862 				aprint_error_dev(sc->sc_dev,
1863 				    "WARNING: unable to map I/O space\n");
1864 			}
1865 		}
1866 
1867 	}
1868 
1869 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
1870 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1871 	preg |= PCI_COMMAND_MASTER_ENABLE;
1872 	if (sc->sc_type < WM_T_82542_2_1)
1873 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1874 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1875 
1876 	/* power up chip */
1877 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1878 	    NULL)) && error != EOPNOTSUPP) {
1879 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1880 		return;
1881 	}
1882 
1883 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
1884 	/*
1885 	 *  Don't use MSI-X if we can use only one queue to save interrupt
1886 	 * resource.
1887 	 */
1888 	if (sc->sc_nqueues > 1) {
1889 		max_type = PCI_INTR_TYPE_MSIX;
1890 		/*
1891 		 *  82583 has a MSI-X capability in the PCI configuration space
1892 		 * but it doesn't support it. At least the document doesn't
1893 		 * say anything about MSI-X.
1894 		 */
1895 		counts[PCI_INTR_TYPE_MSIX]
1896 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
1897 	} else {
1898 		max_type = PCI_INTR_TYPE_MSI;
1899 		counts[PCI_INTR_TYPE_MSIX] = 0;
1900 	}
1901 
1902 	/* Allocation settings */
1903 	counts[PCI_INTR_TYPE_MSI] = 1;
1904 	counts[PCI_INTR_TYPE_INTX] = 1;
1905 	/* overridden by disable flags */
1906 	if (wm_disable_msi != 0) {
1907 		counts[PCI_INTR_TYPE_MSI] = 0;
1908 		if (wm_disable_msix != 0) {
1909 			max_type = PCI_INTR_TYPE_INTX;
1910 			counts[PCI_INTR_TYPE_MSIX] = 0;
1911 		}
1912 	} else if (wm_disable_msix != 0) {
1913 		max_type = PCI_INTR_TYPE_MSI;
1914 		counts[PCI_INTR_TYPE_MSIX] = 0;
1915 	}
1916 
1917 alloc_retry:
1918 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
1919 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
1920 		return;
1921 	}
1922 
1923 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
1924 		error = wm_setup_msix(sc);
1925 		if (error) {
1926 			pci_intr_release(pc, sc->sc_intrs,
1927 			    counts[PCI_INTR_TYPE_MSIX]);
1928 
1929 			/* Setup for MSI: Disable MSI-X */
1930 			max_type = PCI_INTR_TYPE_MSI;
1931 			counts[PCI_INTR_TYPE_MSI] = 1;
1932 			counts[PCI_INTR_TYPE_INTX] = 1;
1933 			goto alloc_retry;
1934 		}
1935 	} else 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
1936 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
1937 		error = wm_setup_legacy(sc);
1938 		if (error) {
1939 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
1940 			    counts[PCI_INTR_TYPE_MSI]);
1941 
1942 			/* The next try is for INTx: Disable MSI */
1943 			max_type = PCI_INTR_TYPE_INTX;
1944 			counts[PCI_INTR_TYPE_INTX] = 1;
1945 			goto alloc_retry;
1946 		}
1947 	} else {
1948 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
1949 		error = wm_setup_legacy(sc);
1950 		if (error) {
1951 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
1952 			    counts[PCI_INTR_TYPE_INTX]);
1953 			return;
1954 		}
1955 	}
1956 
1957 	/*
1958 	 * Check the function ID (unit number of the chip).
1959 	 */
1960 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
1961 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
1962 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1963 	    || (sc->sc_type == WM_T_82580)
1964 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
1965 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
1966 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
1967 	else
1968 		sc->sc_funcid = 0;
1969 
1970 	/*
1971 	 * Determine a few things about the bus we're connected to.
1972 	 */
1973 	if (sc->sc_type < WM_T_82543) {
1974 		/* We don't really know the bus characteristics here. */
1975 		sc->sc_bus_speed = 33;
1976 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1977 		/*
1978 		 * CSA (Communication Streaming Architecture) is about as fast
1979 		 * a 32-bit 66MHz PCI Bus.
1980 		 */
1981 		sc->sc_flags |= WM_F_CSA;
1982 		sc->sc_bus_speed = 66;
1983 		aprint_verbose_dev(sc->sc_dev,
1984 		    "Communication Streaming Architecture\n");
1985 		if (sc->sc_type == WM_T_82547) {
1986 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
1987 			callout_setfunc(&sc->sc_txfifo_ch,
1988 					wm_82547_txfifo_stall, sc);
1989 			aprint_verbose_dev(sc->sc_dev,
1990 			    "using 82547 Tx FIFO stall work-around\n");
1991 		}
1992 	} else if (sc->sc_type >= WM_T_82571) {
1993 		sc->sc_flags |= WM_F_PCIE;
1994 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1995 		    && (sc->sc_type != WM_T_ICH10)
1996 		    && (sc->sc_type != WM_T_PCH)
1997 		    && (sc->sc_type != WM_T_PCH2)
1998 		    && (sc->sc_type != WM_T_PCH_LPT)
1999 		    && (sc->sc_type != WM_T_PCH_SPT)
2000 		    && (sc->sc_type != WM_T_PCH_CNP)) {
2001 			/* ICH* and PCH* have no PCIe capability registers */
2002 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
2003 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
2004 				NULL) == 0)
2005 				aprint_error_dev(sc->sc_dev,
2006 				    "unable to find PCIe capability\n");
2007 		}
2008 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
2009 	} else {
2010 		reg = CSR_READ(sc, WMREG_STATUS);
2011 		if (reg & STATUS_BUS64)
2012 			sc->sc_flags |= WM_F_BUS64;
2013 		if ((reg & STATUS_PCIX_MODE) != 0) {
2014 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
2015 
2016 			sc->sc_flags |= WM_F_PCIX;
2017 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
2018 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
2019 				aprint_error_dev(sc->sc_dev,
2020 				    "unable to find PCIX capability\n");
2021 			else if (sc->sc_type != WM_T_82545_3 &&
2022 				 sc->sc_type != WM_T_82546_3) {
2023 				/*
2024 				 * Work around a problem caused by the BIOS
2025 				 * setting the max memory read byte count
2026 				 * incorrectly.
2027 				 */
2028 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
2029 				    sc->sc_pcixe_capoff + PCIX_CMD);
2030 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
2031 				    sc->sc_pcixe_capoff + PCIX_STATUS);
2032 
2033 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
2034 				    PCIX_CMD_BYTECNT_SHIFT;
2035 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
2036 				    PCIX_STATUS_MAXB_SHIFT;
2037 				if (bytecnt > maxb) {
2038 					aprint_verbose_dev(sc->sc_dev,
2039 					    "resetting PCI-X MMRBC: %d -> %d\n",
2040 					    512 << bytecnt, 512 << maxb);
2041 					pcix_cmd = (pcix_cmd &
2042 					    ~PCIX_CMD_BYTECNT_MASK) |
2043 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
2044 					pci_conf_write(pa->pa_pc, pa->pa_tag,
2045 					    sc->sc_pcixe_capoff + PCIX_CMD,
2046 					    pcix_cmd);
2047 				}
2048 			}
2049 		}
2050 		/*
2051 		 * The quad port adapter is special; it has a PCIX-PCIX
2052 		 * bridge on the board, and can run the secondary bus at
2053 		 * a higher speed.
2054 		 */
2055 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
2056 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
2057 								      : 66;
2058 		} else if (sc->sc_flags & WM_F_PCIX) {
2059 			switch (reg & STATUS_PCIXSPD_MASK) {
2060 			case STATUS_PCIXSPD_50_66:
2061 				sc->sc_bus_speed = 66;
2062 				break;
2063 			case STATUS_PCIXSPD_66_100:
2064 				sc->sc_bus_speed = 100;
2065 				break;
2066 			case STATUS_PCIXSPD_100_133:
2067 				sc->sc_bus_speed = 133;
2068 				break;
2069 			default:
2070 				aprint_error_dev(sc->sc_dev,
2071 				    "unknown PCIXSPD %d; assuming 66MHz\n",
2072 				    reg & STATUS_PCIXSPD_MASK);
2073 				sc->sc_bus_speed = 66;
2074 				break;
2075 			}
2076 		} else
2077 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
2078 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
2079 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
2080 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
2081 	}
2082 
2083 	/* Disable ASPM L0s and/or L1 for workaround */
2084 	wm_disable_aspm(sc);
2085 
2086 	/* clear interesting stat counters */
2087 	CSR_READ(sc, WMREG_COLC);
2088 	CSR_READ(sc, WMREG_RXERRC);
2089 
2090 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
2091 	    || (sc->sc_type >= WM_T_ICH8))
2092 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2093 	if (sc->sc_type >= WM_T_ICH8)
2094 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2095 
2096 	/* Set PHY, NVM mutex related stuff */
2097 	switch (sc->sc_type) {
2098 	case WM_T_82542_2_0:
2099 	case WM_T_82542_2_1:
2100 	case WM_T_82543:
2101 	case WM_T_82544:
2102 		/* Microwire */
2103 		sc->nvm.read = wm_nvm_read_uwire;
2104 		sc->sc_nvm_wordsize = 64;
2105 		sc->sc_nvm_addrbits = 6;
2106 		break;
2107 	case WM_T_82540:
2108 	case WM_T_82545:
2109 	case WM_T_82545_3:
2110 	case WM_T_82546:
2111 	case WM_T_82546_3:
2112 		/* Microwire */
2113 		sc->nvm.read = wm_nvm_read_uwire;
2114 		reg = CSR_READ(sc, WMREG_EECD);
2115 		if (reg & EECD_EE_SIZE) {
2116 			sc->sc_nvm_wordsize = 256;
2117 			sc->sc_nvm_addrbits = 8;
2118 		} else {
2119 			sc->sc_nvm_wordsize = 64;
2120 			sc->sc_nvm_addrbits = 6;
2121 		}
2122 		sc->sc_flags |= WM_F_LOCK_EECD;
2123 		sc->nvm.acquire = wm_get_eecd;
2124 		sc->nvm.release = wm_put_eecd;
2125 		break;
2126 	case WM_T_82541:
2127 	case WM_T_82541_2:
2128 	case WM_T_82547:
2129 	case WM_T_82547_2:
2130 		reg = CSR_READ(sc, WMREG_EECD);
2131 		/*
2132 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
2133 		 * on 8254[17], so set flags and functios before calling it.
2134 		 */
2135 		sc->sc_flags |= WM_F_LOCK_EECD;
2136 		sc->nvm.acquire = wm_get_eecd;
2137 		sc->nvm.release = wm_put_eecd;
2138 		if (reg & EECD_EE_TYPE) {
2139 			/* SPI */
2140 			sc->nvm.read = wm_nvm_read_spi;
2141 			sc->sc_flags |= WM_F_EEPROM_SPI;
2142 			wm_nvm_set_addrbits_size_eecd(sc);
2143 		} else {
2144 			/* Microwire */
2145 			sc->nvm.read = wm_nvm_read_uwire;
2146 			if ((reg & EECD_EE_ABITS) != 0) {
2147 				sc->sc_nvm_wordsize = 256;
2148 				sc->sc_nvm_addrbits = 8;
2149 			} else {
2150 				sc->sc_nvm_wordsize = 64;
2151 				sc->sc_nvm_addrbits = 6;
2152 			}
2153 		}
2154 		break;
2155 	case WM_T_82571:
2156 	case WM_T_82572:
2157 		/* SPI */
2158 		sc->nvm.read = wm_nvm_read_eerd;
2159 		/* Not use WM_F_LOCK_EECD because we use EERD */
2160 		sc->sc_flags |= WM_F_EEPROM_SPI;
2161 		wm_nvm_set_addrbits_size_eecd(sc);
2162 		sc->phy.acquire = wm_get_swsm_semaphore;
2163 		sc->phy.release = wm_put_swsm_semaphore;
2164 		sc->nvm.acquire = wm_get_nvm_82571;
2165 		sc->nvm.release = wm_put_nvm_82571;
2166 		break;
2167 	case WM_T_82573:
2168 	case WM_T_82574:
2169 	case WM_T_82583:
2170 		sc->nvm.read = wm_nvm_read_eerd;
2171 		/* Not use WM_F_LOCK_EECD because we use EERD */
2172 		if (sc->sc_type == WM_T_82573) {
2173 			sc->phy.acquire = wm_get_swsm_semaphore;
2174 			sc->phy.release = wm_put_swsm_semaphore;
2175 			sc->nvm.acquire = wm_get_nvm_82571;
2176 			sc->nvm.release = wm_put_nvm_82571;
2177 		} else {
2178 			/* Both PHY and NVM use the same semaphore. */
2179 			sc->phy.acquire = sc->nvm.acquire
2180 			    = wm_get_swfwhw_semaphore;
2181 			sc->phy.release = sc->nvm.release
2182 			    = wm_put_swfwhw_semaphore;
2183 		}
2184 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
2185 			sc->sc_flags |= WM_F_EEPROM_FLASH;
2186 			sc->sc_nvm_wordsize = 2048;
2187 		} else {
2188 			/* SPI */
2189 			sc->sc_flags |= WM_F_EEPROM_SPI;
2190 			wm_nvm_set_addrbits_size_eecd(sc);
2191 		}
2192 		break;
2193 	case WM_T_82575:
2194 	case WM_T_82576:
2195 	case WM_T_82580:
2196 	case WM_T_I350:
2197 	case WM_T_I354:
2198 	case WM_T_80003:
2199 		/* SPI */
2200 		sc->sc_flags |= WM_F_EEPROM_SPI;
2201 		wm_nvm_set_addrbits_size_eecd(sc);
2202 		if((sc->sc_type == WM_T_80003)
2203 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
2204 			sc->nvm.read = wm_nvm_read_eerd;
2205 			/* Don't use WM_F_LOCK_EECD because we use EERD */
2206 		} else {
2207 			sc->nvm.read = wm_nvm_read_spi;
2208 			sc->sc_flags |= WM_F_LOCK_EECD;
2209 		}
2210 		sc->phy.acquire = wm_get_phy_82575;
2211 		sc->phy.release = wm_put_phy_82575;
2212 		sc->nvm.acquire = wm_get_nvm_80003;
2213 		sc->nvm.release = wm_put_nvm_80003;
2214 		break;
2215 	case WM_T_ICH8:
2216 	case WM_T_ICH9:
2217 	case WM_T_ICH10:
2218 	case WM_T_PCH:
2219 	case WM_T_PCH2:
2220 	case WM_T_PCH_LPT:
2221 		sc->nvm.read = wm_nvm_read_ich8;
2222 		/* FLASH */
2223 		sc->sc_flags |= WM_F_EEPROM_FLASH;
2224 		sc->sc_nvm_wordsize = 2048;
2225 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
2226 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
2227 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
2228 			aprint_error_dev(sc->sc_dev,
2229 			    "can't map FLASH registers\n");
2230 			goto out;
2231 		}
2232 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
2233 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
2234 		    ICH_FLASH_SECTOR_SIZE;
2235 		sc->sc_ich8_flash_bank_size =
2236 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
2237 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
2238 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
2239 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
2240 		sc->sc_flashreg_offset = 0;
2241 		sc->phy.acquire = wm_get_swflag_ich8lan;
2242 		sc->phy.release = wm_put_swflag_ich8lan;
2243 		sc->nvm.acquire = wm_get_nvm_ich8lan;
2244 		sc->nvm.release = wm_put_nvm_ich8lan;
2245 		break;
2246 	case WM_T_PCH_SPT:
2247 	case WM_T_PCH_CNP:
2248 		sc->nvm.read = wm_nvm_read_spt;
2249 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
2250 		sc->sc_flags |= WM_F_EEPROM_FLASH;
2251 		sc->sc_flasht = sc->sc_st;
2252 		sc->sc_flashh = sc->sc_sh;
2253 		sc->sc_ich8_flash_base = 0;
2254 		sc->sc_nvm_wordsize =
2255 			(((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
2256 			* NVM_SIZE_MULTIPLIER;
2257 		/* It is size in bytes, we want words */
2258 		sc->sc_nvm_wordsize /= 2;
2259 		/* assume 2 banks */
2260 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
2261 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
2262 		sc->phy.acquire = wm_get_swflag_ich8lan;
2263 		sc->phy.release = wm_put_swflag_ich8lan;
2264 		sc->nvm.acquire = wm_get_nvm_ich8lan;
2265 		sc->nvm.release = wm_put_nvm_ich8lan;
2266 		break;
2267 	case WM_T_I210:
2268 	case WM_T_I211:
2269 		/* Allow a single clear of the SW semaphore on I210 and newer*/
2270 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
2271 		if (wm_nvm_flash_presence_i210(sc)) {
2272 			sc->nvm.read = wm_nvm_read_eerd;
2273 			/* Don't use WM_F_LOCK_EECD because we use EERD */
2274 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
2275 			wm_nvm_set_addrbits_size_eecd(sc);
2276 		} else {
2277 			sc->nvm.read = wm_nvm_read_invm;
2278 			sc->sc_flags |= WM_F_EEPROM_INVM;
2279 			sc->sc_nvm_wordsize = INVM_SIZE;
2280 		}
2281 		sc->phy.acquire = wm_get_phy_82575;
2282 		sc->phy.release = wm_put_phy_82575;
2283 		sc->nvm.acquire = wm_get_nvm_80003;
2284 		sc->nvm.release = wm_put_nvm_80003;
2285 		break;
2286 	default:
2287 		break;
2288 	}
2289 
2290 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
2291 	switch (sc->sc_type) {
2292 	case WM_T_82571:
2293 	case WM_T_82572:
2294 		reg = CSR_READ(sc, WMREG_SWSM2);
2295 		if ((reg & SWSM2_LOCK) == 0) {
2296 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
2297 			force_clear_smbi = true;
2298 		} else
2299 			force_clear_smbi = false;
2300 		break;
2301 	case WM_T_82573:
2302 	case WM_T_82574:
2303 	case WM_T_82583:
2304 		force_clear_smbi = true;
2305 		break;
2306 	default:
2307 		force_clear_smbi = false;
2308 		break;
2309 	}
2310 	if (force_clear_smbi) {
2311 		reg = CSR_READ(sc, WMREG_SWSM);
2312 		if ((reg & SWSM_SMBI) != 0)
2313 			aprint_error_dev(sc->sc_dev,
2314 			    "Please update the Bootagent\n");
2315 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
2316 	}
2317 
2318 	/*
2319 	 * Defer printing the EEPROM type until after verifying the checksum
2320 	 * This allows the EEPROM type to be printed correctly in the case
2321 	 * that no EEPROM is attached.
2322 	 */
2323 	/*
2324 	 * Validate the EEPROM checksum. If the checksum fails, flag
2325 	 * this for later, so we can fail future reads from the EEPROM.
2326 	 */
2327 	if (wm_nvm_validate_checksum(sc)) {
2328 		/*
2329 		 * Read twice again because some PCI-e parts fail the
2330 		 * first check due to the link being in sleep state.
2331 		 */
2332 		if (wm_nvm_validate_checksum(sc))
2333 			sc->sc_flags |= WM_F_EEPROM_INVALID;
2334 	}
2335 
2336 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
2337 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
2338 	else {
2339 		aprint_verbose_dev(sc->sc_dev, "%u words ",
2340 		    sc->sc_nvm_wordsize);
2341 		if (sc->sc_flags & WM_F_EEPROM_INVM)
2342 			aprint_verbose("iNVM");
2343 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
2344 			aprint_verbose("FLASH(HW)");
2345 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
2346 			aprint_verbose("FLASH");
2347 		else {
2348 			if (sc->sc_flags & WM_F_EEPROM_SPI)
2349 				eetype = "SPI";
2350 			else
2351 				eetype = "MicroWire";
2352 			aprint_verbose("(%d address bits) %s EEPROM",
2353 			    sc->sc_nvm_addrbits, eetype);
2354 		}
2355 	}
2356 	wm_nvm_version(sc);
2357 	aprint_verbose("\n");
2358 
2359 	/*
2360 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
2361 	 * incorrect.
2362 	 */
2363 	wm_gmii_setup_phytype(sc, 0, 0);
2364 
2365 	/* Reset the chip to a known state. */
2366 	wm_reset(sc);
2367 
2368 	/*
2369 	 * Check for I21[01] PLL workaround.
2370 	 *
2371 	 * Three cases:
2372 	 * a) Chip is I211.
2373 	 * b) Chip is I210 and it uses INVM (not FLASH).
2374 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
2375 	 */
2376 	if (sc->sc_type == WM_T_I211)
2377 		sc->sc_flags |= WM_F_PLL_WA_I210;
2378 	if (sc->sc_type == WM_T_I210) {
2379 		if (!wm_nvm_flash_presence_i210(sc))
2380 			sc->sc_flags |= WM_F_PLL_WA_I210;
2381 		else if ((sc->sc_nvm_ver_major < 3)
2382 		    || ((sc->sc_nvm_ver_major == 3)
2383 			&& (sc->sc_nvm_ver_minor < 25))) {
2384 			aprint_verbose_dev(sc->sc_dev,
2385 			    "ROM image version %d.%d is older than 3.25\n",
2386 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
2387 			sc->sc_flags |= WM_F_PLL_WA_I210;
2388 		}
2389 	}
2390 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
2391 		wm_pll_workaround_i210(sc);
2392 
2393 	wm_get_wakeup(sc);
2394 
2395 	/* Non-AMT based hardware can now take control from firmware */
2396 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
2397 		wm_get_hw_control(sc);
2398 
2399 	/*
2400 	 * Read the Ethernet address from the EEPROM, if not first found
2401 	 * in device properties.
2402 	 */
2403 	ea = prop_dictionary_get(dict, "mac-address");
2404 	if (ea != NULL) {
2405 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
2406 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
2407 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
2408 	} else {
2409 		if (wm_read_mac_addr(sc, enaddr) != 0) {
2410 			aprint_error_dev(sc->sc_dev,
2411 			    "unable to read Ethernet address\n");
2412 			goto out;
2413 		}
2414 	}
2415 
2416 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
2417 	    ether_sprintf(enaddr));
2418 
2419 	/*
2420 	 * Read the config info from the EEPROM, and set up various
2421 	 * bits in the control registers based on their contents.
2422 	 */
2423 	pn = prop_dictionary_get(dict, "i82543-cfg1");
2424 	if (pn != NULL) {
2425 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2426 		cfg1 = (uint16_t) prop_number_integer_value(pn);
2427 	} else {
2428 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
2429 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
2430 			goto out;
2431 		}
2432 	}
2433 
2434 	pn = prop_dictionary_get(dict, "i82543-cfg2");
2435 	if (pn != NULL) {
2436 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2437 		cfg2 = (uint16_t) prop_number_integer_value(pn);
2438 	} else {
2439 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
2440 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
2441 			goto out;
2442 		}
2443 	}
2444 
2445 	/* check for WM_F_WOL */
2446 	switch (sc->sc_type) {
2447 	case WM_T_82542_2_0:
2448 	case WM_T_82542_2_1:
2449 	case WM_T_82543:
2450 		/* dummy? */
2451 		eeprom_data = 0;
2452 		apme_mask = NVM_CFG3_APME;
2453 		break;
2454 	case WM_T_82544:
2455 		apme_mask = NVM_CFG2_82544_APM_EN;
2456 		eeprom_data = cfg2;
2457 		break;
2458 	case WM_T_82546:
2459 	case WM_T_82546_3:
2460 	case WM_T_82571:
2461 	case WM_T_82572:
2462 	case WM_T_82573:
2463 	case WM_T_82574:
2464 	case WM_T_82583:
2465 	case WM_T_80003:
2466 	default:
2467 		apme_mask = NVM_CFG3_APME;
2468 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
2469 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
2470 		break;
2471 	case WM_T_82575:
2472 	case WM_T_82576:
2473 	case WM_T_82580:
2474 	case WM_T_I350:
2475 	case WM_T_I354: /* XXX ok? */
2476 	case WM_T_ICH8:
2477 	case WM_T_ICH9:
2478 	case WM_T_ICH10:
2479 	case WM_T_PCH:
2480 	case WM_T_PCH2:
2481 	case WM_T_PCH_LPT:
2482 	case WM_T_PCH_SPT:
2483 	case WM_T_PCH_CNP:
2484 		/* XXX The funcid should be checked on some devices */
2485 		apme_mask = WUC_APME;
2486 		eeprom_data = CSR_READ(sc, WMREG_WUC);
2487 		break;
2488 	}
2489 
2490 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
2491 	if ((eeprom_data & apme_mask) != 0)
2492 		sc->sc_flags |= WM_F_WOL;
2493 
2494 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
2495 		/* Check NVM for autonegotiation */
2496 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
2497 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
2498 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
2499 		}
2500 	}
2501 
2502 	/*
2503 	 * XXX need special handling for some multiple port cards
2504 	 * to disable a paticular port.
2505 	 */
2506 
2507 	if (sc->sc_type >= WM_T_82544) {
2508 		pn = prop_dictionary_get(dict, "i82543-swdpin");
2509 		if (pn != NULL) {
2510 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2511 			swdpin = (uint16_t) prop_number_integer_value(pn);
2512 		} else {
2513 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
2514 				aprint_error_dev(sc->sc_dev,
2515 				    "unable to read SWDPIN\n");
2516 				goto out;
2517 			}
2518 		}
2519 	}
2520 
2521 	if (cfg1 & NVM_CFG1_ILOS)
2522 		sc->sc_ctrl |= CTRL_ILOS;
2523 
2524 	/*
2525 	 * XXX
2526 	 * This code isn't correct because pin 2 and 3 are located
2527 	 * in different position on newer chips. Check all datasheet.
2528 	 *
2529 	 * Until resolve this problem, check if a chip < 82580
2530 	 */
2531 	if (sc->sc_type <= WM_T_82580) {
2532 		if (sc->sc_type >= WM_T_82544) {
2533 			sc->sc_ctrl |=
2534 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
2535 			    CTRL_SWDPIO_SHIFT;
2536 			sc->sc_ctrl |=
2537 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
2538 			    CTRL_SWDPINS_SHIFT;
2539 		} else {
2540 			sc->sc_ctrl |=
2541 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
2542 			    CTRL_SWDPIO_SHIFT;
2543 		}
2544 	}
2545 
2546 	/* XXX For other than 82580? */
2547 	if (sc->sc_type == WM_T_82580) {
2548 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
2549 		if (nvmword & __BIT(13))
2550 			sc->sc_ctrl |= CTRL_ILOS;
2551 	}
2552 
2553 #if 0
2554 	if (sc->sc_type >= WM_T_82544) {
2555 		if (cfg1 & NVM_CFG1_IPS0)
2556 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
2557 		if (cfg1 & NVM_CFG1_IPS1)
2558 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
2559 		sc->sc_ctrl_ext |=
2560 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
2561 		    CTRL_EXT_SWDPIO_SHIFT;
2562 		sc->sc_ctrl_ext |=
2563 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
2564 		    CTRL_EXT_SWDPINS_SHIFT;
2565 	} else {
2566 		sc->sc_ctrl_ext |=
2567 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
2568 		    CTRL_EXT_SWDPIO_SHIFT;
2569 	}
2570 #endif
2571 
2572 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2573 #if 0
2574 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2575 #endif
2576 
2577 	if (sc->sc_type == WM_T_PCH) {
2578 		uint16_t val;
2579 
2580 		/* Save the NVM K1 bit setting */
2581 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
2582 
2583 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
2584 			sc->sc_nvm_k1_enabled = 1;
2585 		else
2586 			sc->sc_nvm_k1_enabled = 0;
2587 	}
2588 
2589 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
2590 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
2591 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
2592 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
2593 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
2594 	    || sc->sc_type == WM_T_82573
2595 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
2596 		/* Copper only */
2597 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
2598 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
2599 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
2600 	    || (sc->sc_type ==WM_T_I211)) {
2601 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
2602 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
2603 		switch (link_mode) {
2604 		case CTRL_EXT_LINK_MODE_1000KX:
2605 			aprint_verbose_dev(sc->sc_dev, "1000KX\n");
2606 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
2607 			break;
2608 		case CTRL_EXT_LINK_MODE_SGMII:
2609 			if (wm_sgmii_uses_mdio(sc)) {
2610 				aprint_verbose_dev(sc->sc_dev,
2611 				    "SGMII(MDIO)\n");
2612 				sc->sc_flags |= WM_F_SGMII;
2613 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2614 				break;
2615 			}
2616 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
2617 			/*FALLTHROUGH*/
2618 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
2619 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
2620 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
2621 				if (link_mode
2622 				    == CTRL_EXT_LINK_MODE_SGMII) {
2623 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2624 					sc->sc_flags |= WM_F_SGMII;
2625 				} else {
2626 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
2627 					aprint_verbose_dev(sc->sc_dev,
2628 					    "SERDES\n");
2629 				}
2630 				break;
2631 			}
2632 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
2633 				aprint_verbose_dev(sc->sc_dev, "SERDES\n");
2634 
2635 			/* Change current link mode setting */
2636 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
2637 			switch (sc->sc_mediatype) {
2638 			case WM_MEDIATYPE_COPPER:
2639 				reg |= CTRL_EXT_LINK_MODE_SGMII;
2640 				break;
2641 			case WM_MEDIATYPE_SERDES:
2642 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
2643 				break;
2644 			default:
2645 				break;
2646 			}
2647 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2648 			break;
2649 		case CTRL_EXT_LINK_MODE_GMII:
2650 		default:
2651 			aprint_verbose_dev(sc->sc_dev, "Copper\n");
2652 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2653 			break;
2654 		}
2655 
2656 		reg &= ~CTRL_EXT_I2C_ENA;
2657 		if ((sc->sc_flags & WM_F_SGMII) != 0)
2658 			reg |= CTRL_EXT_I2C_ENA;
2659 		else
2660 			reg &= ~CTRL_EXT_I2C_ENA;
2661 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2662 	} else if (sc->sc_type < WM_T_82543 ||
2663 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
2664 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
2665 			aprint_error_dev(sc->sc_dev,
2666 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
2667 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
2668 		}
2669 	} else {
2670 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
2671 			aprint_error_dev(sc->sc_dev,
2672 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
2673 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2674 		}
2675 	}
2676 	snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
2677 	aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
2678 
2679 	/* Set device properties (macflags) */
2680 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
2681 
2682 	/* Initialize the media structures accordingly. */
2683 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
2684 		wm_gmii_mediainit(sc, wmp->wmp_product);
2685 	else
2686 		wm_tbi_mediainit(sc); /* All others */
2687 
2688 	ifp = &sc->sc_ethercom.ec_if;
2689 	xname = device_xname(sc->sc_dev);
2690 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
2691 	ifp->if_softc = sc;
2692 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2693 #ifdef WM_MPSAFE
2694 	ifp->if_extflags = IFEF_MPSAFE;
2695 #endif
2696 	ifp->if_ioctl = wm_ioctl;
2697 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
2698 		ifp->if_start = wm_nq_start;
2699 		/*
2700 		 * When the number of CPUs is one and the controller can use
2701 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
2702 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
2703 		 * and the other is used for link status changing.
2704 		 * In this situation, wm_nq_transmit() is disadvantageous
2705 		 * because of wm_select_txqueue() and pcq(9) overhead.
2706 		 */
2707 		if (wm_is_using_multiqueue(sc))
2708 			ifp->if_transmit = wm_nq_transmit;
2709 	} else {
2710 		ifp->if_start = wm_start;
2711 		/*
2712 		 * wm_transmit() has the same disadvantage as wm_transmit().
2713 		 */
2714 		if (wm_is_using_multiqueue(sc))
2715 			ifp->if_transmit = wm_transmit;
2716 	}
2717 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
2718 	ifp->if_init = wm_init;
2719 	ifp->if_stop = wm_stop;
2720 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
2721 	IFQ_SET_READY(&ifp->if_snd);
2722 
2723 	/* Check for jumbo frame */
2724 	switch (sc->sc_type) {
2725 	case WM_T_82573:
2726 		/* XXX limited to 9234 if ASPM is disabled */
2727 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
2728 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
2729 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2730 		break;
2731 	case WM_T_82571:
2732 	case WM_T_82572:
2733 	case WM_T_82574:
2734 	case WM_T_82583:
2735 	case WM_T_82575:
2736 	case WM_T_82576:
2737 	case WM_T_82580:
2738 	case WM_T_I350:
2739 	case WM_T_I354:
2740 	case WM_T_I210:
2741 	case WM_T_I211:
2742 	case WM_T_80003:
2743 	case WM_T_ICH9:
2744 	case WM_T_ICH10:
2745 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
2746 	case WM_T_PCH_LPT:
2747 	case WM_T_PCH_SPT:
2748 	case WM_T_PCH_CNP:
2749 		/* XXX limited to 9234 */
2750 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2751 		break;
2752 	case WM_T_PCH:
2753 		/* XXX limited to 4096 */
2754 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2755 		break;
2756 	case WM_T_82542_2_0:
2757 	case WM_T_82542_2_1:
2758 	case WM_T_ICH8:
2759 		/* No support for jumbo frame */
2760 		break;
2761 	default:
2762 		/* ETHER_MAX_LEN_JUMBO */
2763 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2764 		break;
2765 	}
2766 
2767 	/* If we're a i82543 or greater, we can support VLANs. */
2768 	if (sc->sc_type >= WM_T_82543)
2769 		sc->sc_ethercom.ec_capabilities |=
2770 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
2771 
2772 	/*
2773 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
2774 	 * on i82543 and later.
2775 	 */
2776 	if (sc->sc_type >= WM_T_82543) {
2777 		ifp->if_capabilities |=
2778 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
2779 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
2780 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
2781 		    IFCAP_CSUM_TCPv6_Tx |
2782 		    IFCAP_CSUM_UDPv6_Tx;
2783 	}
2784 
2785 	/*
2786 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
2787 	 *
2788 	 *	82541GI (8086:1076) ... no
2789 	 *	82572EI (8086:10b9) ... yes
2790 	 */
2791 	if (sc->sc_type >= WM_T_82571) {
2792 		ifp->if_capabilities |=
2793 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
2794 	}
2795 
2796 	/*
2797 	 * If we're a i82544 or greater (except i82547), we can do
2798 	 * TCP segmentation offload.
2799 	 */
2800 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
2801 		ifp->if_capabilities |= IFCAP_TSOv4;
2802 	}
2803 
2804 	if (sc->sc_type >= WM_T_82571) {
2805 		ifp->if_capabilities |= IFCAP_TSOv6;
2806 	}
2807 
2808 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
2809 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
2810 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
2811 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
2812 
2813 #ifdef WM_MPSAFE
2814 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2815 #else
2816 	sc->sc_core_lock = NULL;
2817 #endif
2818 
2819 	/* Attach the interface. */
2820 	error = if_initialize(ifp);
2821 	if (error != 0) {
2822 		aprint_error_dev(sc->sc_dev, "if_initialize failed(%d)\n",
2823 		    error);
2824 		return; /* Error */
2825 	}
2826 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
2827 	ether_ifattach(ifp, enaddr);
2828 	if_register(ifp);
2829 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
2830 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
2831 			  RND_FLAG_DEFAULT);
2832 
2833 #ifdef WM_EVENT_COUNTERS
2834 	/* Attach event counters. */
2835 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
2836 	    NULL, xname, "linkintr");
2837 
2838 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
2839 	    NULL, xname, "tx_xoff");
2840 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
2841 	    NULL, xname, "tx_xon");
2842 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
2843 	    NULL, xname, "rx_xoff");
2844 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
2845 	    NULL, xname, "rx_xon");
2846 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
2847 	    NULL, xname, "rx_macctl");
2848 #endif /* WM_EVENT_COUNTERS */
2849 
2850 	if (pmf_device_register(self, wm_suspend, wm_resume))
2851 		pmf_class_network_register(self, ifp);
2852 	else
2853 		aprint_error_dev(self, "couldn't establish power handler\n");
2854 
2855 	sc->sc_flags |= WM_F_ATTACHED;
2856  out:
2857 	return;
2858 }
2859 
2860 /* The detach function (ca_detach) */
2861 static int
2862 wm_detach(device_t self, int flags __unused)
2863 {
2864 	struct wm_softc *sc = device_private(self);
2865 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2866 	int i;
2867 
2868 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
2869 		return 0;
2870 
2871 	/* Stop the interface. Callouts are stopped in it. */
2872 	wm_stop(ifp, 1);
2873 
2874 	pmf_device_deregister(self);
2875 
2876 #ifdef WM_EVENT_COUNTERS
2877 	evcnt_detach(&sc->sc_ev_linkintr);
2878 
2879 	evcnt_detach(&sc->sc_ev_tx_xoff);
2880 	evcnt_detach(&sc->sc_ev_tx_xon);
2881 	evcnt_detach(&sc->sc_ev_rx_xoff);
2882 	evcnt_detach(&sc->sc_ev_rx_xon);
2883 	evcnt_detach(&sc->sc_ev_rx_macctl);
2884 #endif /* WM_EVENT_COUNTERS */
2885 
2886 	/* Tell the firmware about the release */
2887 	WM_CORE_LOCK(sc);
2888 	wm_release_manageability(sc);
2889 	wm_release_hw_control(sc);
2890 	wm_enable_wakeup(sc);
2891 	WM_CORE_UNLOCK(sc);
2892 
2893 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2894 
2895 	/* Delete all remaining media. */
2896 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2897 
2898 	ether_ifdetach(ifp);
2899 	if_detach(ifp);
2900 	if_percpuq_destroy(sc->sc_ipq);
2901 
2902 	/* Unload RX dmamaps and free mbufs */
2903 	for (i = 0; i < sc->sc_nqueues; i++) {
2904 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
2905 		mutex_enter(rxq->rxq_lock);
2906 		wm_rxdrain(rxq);
2907 		mutex_exit(rxq->rxq_lock);
2908 	}
2909 	/* Must unlock here */
2910 
2911 	/* Disestablish the interrupt handler */
2912 	for (i = 0; i < sc->sc_nintrs; i++) {
2913 		if (sc->sc_ihs[i] != NULL) {
2914 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
2915 			sc->sc_ihs[i] = NULL;
2916 		}
2917 	}
2918 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
2919 
2920 	wm_free_txrx_queues(sc);
2921 
2922 	/* Unmap the registers */
2923 	if (sc->sc_ss) {
2924 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
2925 		sc->sc_ss = 0;
2926 	}
2927 	if (sc->sc_ios) {
2928 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
2929 		sc->sc_ios = 0;
2930 	}
2931 	if (sc->sc_flashs) {
2932 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
2933 		sc->sc_flashs = 0;
2934 	}
2935 
2936 	if (sc->sc_core_lock)
2937 		mutex_obj_free(sc->sc_core_lock);
2938 	if (sc->sc_ich_phymtx)
2939 		mutex_obj_free(sc->sc_ich_phymtx);
2940 	if (sc->sc_ich_nvmmtx)
2941 		mutex_obj_free(sc->sc_ich_nvmmtx);
2942 
2943 	return 0;
2944 }
2945 
2946 static bool
2947 wm_suspend(device_t self, const pmf_qual_t *qual)
2948 {
2949 	struct wm_softc *sc = device_private(self);
2950 
2951 	wm_release_manageability(sc);
2952 	wm_release_hw_control(sc);
2953 	wm_enable_wakeup(sc);
2954 
2955 	return true;
2956 }
2957 
2958 static bool
2959 wm_resume(device_t self, const pmf_qual_t *qual)
2960 {
2961 	struct wm_softc *sc = device_private(self);
2962 
2963 	/* Disable ASPM L0s and/or L1 for workaround */
2964 	wm_disable_aspm(sc);
2965 	wm_init_manageability(sc);
2966 
2967 	return true;
2968 }
2969 
2970 /*
2971  * wm_watchdog:		[ifnet interface function]
2972  *
2973  *	Watchdog timer handler.
2974  */
2975 static void
2976 wm_watchdog(struct ifnet *ifp)
2977 {
2978 	int qid;
2979 	struct wm_softc *sc = ifp->if_softc;
2980 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
2981 
2982 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
2983 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
2984 
2985 		wm_watchdog_txq(ifp, txq, &hang_queue);
2986 	}
2987 
2988 	/*
2989 	 * IF any of queues hanged up, reset the interface.
2990 	 */
2991 	if (hang_queue != 0) {
2992 		(void) wm_init(ifp);
2993 
2994 		/*
2995 		 * There are still some upper layer processing which call
2996 		 * ifp->if_start(). e.g. ALTQ or one CPU system
2997 		 */
2998 		/* Try to get more packets going. */
2999 		ifp->if_start(ifp);
3000 	}
3001 }
3002 
3003 
3004 static void
3005 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
3006 {
3007 
3008 	mutex_enter(txq->txq_lock);
3009 	if (txq->txq_watchdog &&
3010 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout) {
3011 		wm_watchdog_txq_locked(ifp, txq, hang);
3012 	}
3013 	mutex_exit(txq->txq_lock);
3014 }
3015 
3016 static void
3017 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
3018 {
3019 	struct wm_softc *sc = ifp->if_softc;
3020 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
3021 
3022 	KASSERT(mutex_owned(txq->txq_lock));
3023 
3024 	/*
3025 	 * Since we're using delayed interrupts, sweep up
3026 	 * before we report an error.
3027 	 */
3028 	wm_txeof(txq, UINT_MAX);
3029 	if (txq->txq_watchdog)
3030 		*hang |= __BIT(wmq->wmq_id);
3031 
3032 	if (txq->txq_free != WM_NTXDESC(txq)) {
3033 #ifdef WM_DEBUG
3034 		int i, j;
3035 		struct wm_txsoft *txs;
3036 #endif
3037 		log(LOG_ERR,
3038 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
3039 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
3040 		    txq->txq_next);
3041 		ifp->if_oerrors++;
3042 #ifdef WM_DEBUG
3043 		for (i = txq->txq_sdirty; i != txq->txq_snext ;
3044 		    i = WM_NEXTTXS(txq, i)) {
3045 		    txs = &txq->txq_soft[i];
3046 		    printf("txs %d tx %d -> %d\n",
3047 			i, txs->txs_firstdesc, txs->txs_lastdesc);
3048 		    for (j = txs->txs_firstdesc; ;
3049 			j = WM_NEXTTX(txq, j)) {
3050 			    if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3051 				    printf("\tdesc %d: 0x%" PRIx64 "\n", j,
3052 					txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
3053 				    printf("\t %#08x%08x\n",
3054 					txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
3055 					txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
3056 			    } else {
3057 				    printf("\tdesc %d: 0x%" PRIx64 "\n", j,
3058 					(uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
3059 					txq->txq_descs[j].wtx_addr.wa_low);
3060 				    printf("\t %#04x%02x%02x%08x\n",
3061 					txq->txq_descs[j].wtx_fields.wtxu_vlan,
3062 					txq->txq_descs[j].wtx_fields.wtxu_options,
3063 					txq->txq_descs[j].wtx_fields.wtxu_status,
3064 					txq->txq_descs[j].wtx_cmdlen);
3065 			    }
3066 			if (j == txs->txs_lastdesc)
3067 				break;
3068 			}
3069 		}
3070 #endif
3071 	}
3072 }
3073 
3074 /*
3075  * wm_tick:
3076  *
3077  *	One second timer, used to check link status, sweep up
3078  *	completed transmit jobs, etc.
3079  */
3080 static void
3081 wm_tick(void *arg)
3082 {
3083 	struct wm_softc *sc = arg;
3084 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3085 #ifndef WM_MPSAFE
3086 	int s = splnet();
3087 #endif
3088 
3089 	WM_CORE_LOCK(sc);
3090 
3091 	if (sc->sc_core_stopping) {
3092 		WM_CORE_UNLOCK(sc);
3093 #ifndef WM_MPSAFE
3094 		splx(s);
3095 #endif
3096 		return;
3097 	}
3098 
3099 	if (sc->sc_type >= WM_T_82542_2_1) {
3100 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
3101 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
3102 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
3103 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
3104 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
3105 	}
3106 
3107 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3108 	ifp->if_ierrors += 0ULL /* ensure quad_t */
3109 	    + CSR_READ(sc, WMREG_CRCERRS)
3110 	    + CSR_READ(sc, WMREG_ALGNERRC)
3111 	    + CSR_READ(sc, WMREG_SYMERRC)
3112 	    + CSR_READ(sc, WMREG_RXERRC)
3113 	    + CSR_READ(sc, WMREG_SEC)
3114 	    + CSR_READ(sc, WMREG_CEXTERR)
3115 	    + CSR_READ(sc, WMREG_RLEC);
3116 	/*
3117 	 * WMREG_RNBC is incremented when there is no available buffers in host
3118 	 * memory. It does not mean the number of dropped packet. Because
3119 	 * ethernet controller can receive packets in such case if there is
3120 	 * space in phy's FIFO.
3121 	 *
3122 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
3123 	 * own EVCNT instead of if_iqdrops.
3124 	 */
3125 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
3126 
3127 	if (sc->sc_flags & WM_F_HAS_MII)
3128 		mii_tick(&sc->sc_mii);
3129 	else if ((sc->sc_type >= WM_T_82575)
3130 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
3131 		wm_serdes_tick(sc);
3132 	else
3133 		wm_tbi_tick(sc);
3134 
3135 	WM_CORE_UNLOCK(sc);
3136 
3137 	wm_watchdog(ifp);
3138 
3139 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
3140 }
3141 
3142 static int
3143 wm_ifflags_cb(struct ethercom *ec)
3144 {
3145 	struct ifnet *ifp = &ec->ec_if;
3146 	struct wm_softc *sc = ifp->if_softc;
3147 	int rc = 0;
3148 
3149 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3150 		device_xname(sc->sc_dev), __func__));
3151 
3152 	WM_CORE_LOCK(sc);
3153 
3154 	int change = ifp->if_flags ^ sc->sc_if_flags;
3155 	sc->sc_if_flags = ifp->if_flags;
3156 
3157 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
3158 		rc = ENETRESET;
3159 		goto out;
3160 	}
3161 
3162 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
3163 		wm_set_filter(sc);
3164 
3165 	wm_set_vlan(sc);
3166 
3167 out:
3168 	WM_CORE_UNLOCK(sc);
3169 
3170 	return rc;
3171 }
3172 
3173 /*
3174  * wm_ioctl:		[ifnet interface function]
3175  *
3176  *	Handle control requests from the operator.
3177  */
3178 static int
3179 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
3180 {
3181 	struct wm_softc *sc = ifp->if_softc;
3182 	struct ifreq *ifr = (struct ifreq *) data;
3183 	struct ifaddr *ifa = (struct ifaddr *)data;
3184 	struct sockaddr_dl *sdl;
3185 	int s, error;
3186 
3187 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3188 		device_xname(sc->sc_dev), __func__));
3189 
3190 #ifndef WM_MPSAFE
3191 	s = splnet();
3192 #endif
3193 	switch (cmd) {
3194 	case SIOCSIFMEDIA:
3195 	case SIOCGIFMEDIA:
3196 		WM_CORE_LOCK(sc);
3197 		/* Flow control requires full-duplex mode. */
3198 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
3199 		    (ifr->ifr_media & IFM_FDX) == 0)
3200 			ifr->ifr_media &= ~IFM_ETH_FMASK;
3201 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
3202 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
3203 				/* We can do both TXPAUSE and RXPAUSE. */
3204 				ifr->ifr_media |=
3205 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
3206 			}
3207 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
3208 		}
3209 		WM_CORE_UNLOCK(sc);
3210 #ifdef WM_MPSAFE
3211 		s = splnet();
3212 #endif
3213 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
3214 #ifdef WM_MPSAFE
3215 		splx(s);
3216 #endif
3217 		break;
3218 	case SIOCINITIFADDR:
3219 		WM_CORE_LOCK(sc);
3220 		if (ifa->ifa_addr->sa_family == AF_LINK) {
3221 			sdl = satosdl(ifp->if_dl->ifa_addr);
3222 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
3223 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
3224 			/* unicast address is first multicast entry */
3225 			wm_set_filter(sc);
3226 			error = 0;
3227 			WM_CORE_UNLOCK(sc);
3228 			break;
3229 		}
3230 		WM_CORE_UNLOCK(sc);
3231 		/*FALLTHROUGH*/
3232 	default:
3233 #ifdef WM_MPSAFE
3234 		s = splnet();
3235 #endif
3236 		/* It may call wm_start, so unlock here */
3237 		error = ether_ioctl(ifp, cmd, data);
3238 #ifdef WM_MPSAFE
3239 		splx(s);
3240 #endif
3241 		if (error != ENETRESET)
3242 			break;
3243 
3244 		error = 0;
3245 
3246 		if (cmd == SIOCSIFCAP) {
3247 			error = (*ifp->if_init)(ifp);
3248 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
3249 			;
3250 		else if (ifp->if_flags & IFF_RUNNING) {
3251 			/*
3252 			 * Multicast list has changed; set the hardware filter
3253 			 * accordingly.
3254 			 */
3255 			WM_CORE_LOCK(sc);
3256 			wm_set_filter(sc);
3257 			WM_CORE_UNLOCK(sc);
3258 		}
3259 		break;
3260 	}
3261 
3262 #ifndef WM_MPSAFE
3263 	splx(s);
3264 #endif
3265 	return error;
3266 }
3267 
3268 /* MAC address related */
3269 
3270 /*
3271  * Get the offset of MAC address and return it.
3272  * If error occured, use offset 0.
3273  */
3274 static uint16_t
3275 wm_check_alt_mac_addr(struct wm_softc *sc)
3276 {
3277 	uint16_t myea[ETHER_ADDR_LEN / 2];
3278 	uint16_t offset = NVM_OFF_MACADDR;
3279 
3280 	/* Try to read alternative MAC address pointer */
3281 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
3282 		return 0;
3283 
3284 	/* Check pointer if it's valid or not. */
3285 	if ((offset == 0x0000) || (offset == 0xffff))
3286 		return 0;
3287 
3288 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
3289 	/*
3290 	 * Check whether alternative MAC address is valid or not.
3291 	 * Some cards have non 0xffff pointer but those don't use
3292 	 * alternative MAC address in reality.
3293 	 *
3294 	 * Check whether the broadcast bit is set or not.
3295 	 */
3296 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
3297 		if (((myea[0] & 0xff) & 0x01) == 0)
3298 			return offset; /* Found */
3299 
3300 	/* Not found */
3301 	return 0;
3302 }
3303 
3304 static int
3305 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
3306 {
3307 	uint16_t myea[ETHER_ADDR_LEN / 2];
3308 	uint16_t offset = NVM_OFF_MACADDR;
3309 	int do_invert = 0;
3310 
3311 	switch (sc->sc_type) {
3312 	case WM_T_82580:
3313 	case WM_T_I350:
3314 	case WM_T_I354:
3315 		/* EEPROM Top Level Partitioning */
3316 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
3317 		break;
3318 	case WM_T_82571:
3319 	case WM_T_82575:
3320 	case WM_T_82576:
3321 	case WM_T_80003:
3322 	case WM_T_I210:
3323 	case WM_T_I211:
3324 		offset = wm_check_alt_mac_addr(sc);
3325 		if (offset == 0)
3326 			if ((sc->sc_funcid & 0x01) == 1)
3327 				do_invert = 1;
3328 		break;
3329 	default:
3330 		if ((sc->sc_funcid & 0x01) == 1)
3331 			do_invert = 1;
3332 		break;
3333 	}
3334 
3335 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
3336 		goto bad;
3337 
3338 	enaddr[0] = myea[0] & 0xff;
3339 	enaddr[1] = myea[0] >> 8;
3340 	enaddr[2] = myea[1] & 0xff;
3341 	enaddr[3] = myea[1] >> 8;
3342 	enaddr[4] = myea[2] & 0xff;
3343 	enaddr[5] = myea[2] >> 8;
3344 
3345 	/*
3346 	 * Toggle the LSB of the MAC address on the second port
3347 	 * of some dual port cards.
3348 	 */
3349 	if (do_invert != 0)
3350 		enaddr[5] ^= 1;
3351 
3352 	return 0;
3353 
3354  bad:
3355 	return -1;
3356 }
3357 
3358 /*
3359  * wm_set_ral:
3360  *
3361  *	Set an entery in the receive address list.
3362  */
3363 static void
3364 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
3365 {
3366 	uint32_t ral_lo, ral_hi, addrl, addrh;
3367 	uint32_t wlock_mac;
3368 	int rv;
3369 
3370 	if (enaddr != NULL) {
3371 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
3372 		    (enaddr[3] << 24);
3373 		ral_hi = enaddr[4] | (enaddr[5] << 8);
3374 		ral_hi |= RAL_AV;
3375 	} else {
3376 		ral_lo = 0;
3377 		ral_hi = 0;
3378 	}
3379 
3380 	switch (sc->sc_type) {
3381 	case WM_T_82542_2_0:
3382 	case WM_T_82542_2_1:
3383 	case WM_T_82543:
3384 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
3385 		CSR_WRITE_FLUSH(sc);
3386 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
3387 		CSR_WRITE_FLUSH(sc);
3388 		break;
3389 	case WM_T_PCH2:
3390 	case WM_T_PCH_LPT:
3391 	case WM_T_PCH_SPT:
3392 	case WM_T_PCH_CNP:
3393 		if (idx == 0) {
3394 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
3395 			CSR_WRITE_FLUSH(sc);
3396 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
3397 			CSR_WRITE_FLUSH(sc);
3398 			return;
3399 		}
3400 		if (sc->sc_type != WM_T_PCH2) {
3401 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
3402 			    FWSM_WLOCK_MAC);
3403 			addrl = WMREG_SHRAL(idx - 1);
3404 			addrh = WMREG_SHRAH(idx - 1);
3405 		} else {
3406 			wlock_mac = 0;
3407 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
3408 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
3409 		}
3410 
3411 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
3412 			rv = wm_get_swflag_ich8lan(sc);
3413 			if (rv != 0)
3414 				return;
3415 			CSR_WRITE(sc, addrl, ral_lo);
3416 			CSR_WRITE_FLUSH(sc);
3417 			CSR_WRITE(sc, addrh, ral_hi);
3418 			CSR_WRITE_FLUSH(sc);
3419 			wm_put_swflag_ich8lan(sc);
3420 		}
3421 
3422 		break;
3423 	default:
3424 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
3425 		CSR_WRITE_FLUSH(sc);
3426 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
3427 		CSR_WRITE_FLUSH(sc);
3428 		break;
3429 	}
3430 }
3431 
3432 /*
3433  * wm_mchash:
3434  *
3435  *	Compute the hash of the multicast address for the 4096-bit
3436  *	multicast filter.
3437  */
3438 static uint32_t
3439 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
3440 {
3441 	static const int lo_shift[4] = { 4, 3, 2, 0 };
3442 	static const int hi_shift[4] = { 4, 5, 6, 8 };
3443 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
3444 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
3445 	uint32_t hash;
3446 
3447 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3448 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3449 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
3450 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
3451 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
3452 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
3453 		return (hash & 0x3ff);
3454 	}
3455 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
3456 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
3457 
3458 	return (hash & 0xfff);
3459 }
3460 
3461 /*
3462  * wm_set_filter:
3463  *
3464  *	Set up the receive filter.
3465  */
3466 static void
3467 wm_set_filter(struct wm_softc *sc)
3468 {
3469 	struct ethercom *ec = &sc->sc_ethercom;
3470 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3471 	struct ether_multi *enm;
3472 	struct ether_multistep step;
3473 	bus_addr_t mta_reg;
3474 	uint32_t hash, reg, bit;
3475 	int i, size, ralmax;
3476 
3477 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3478 		device_xname(sc->sc_dev), __func__));
3479 
3480 	if (sc->sc_type >= WM_T_82544)
3481 		mta_reg = WMREG_CORDOVA_MTA;
3482 	else
3483 		mta_reg = WMREG_MTA;
3484 
3485 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
3486 
3487 	if (ifp->if_flags & IFF_BROADCAST)
3488 		sc->sc_rctl |= RCTL_BAM;
3489 	if (ifp->if_flags & IFF_PROMISC) {
3490 		sc->sc_rctl |= RCTL_UPE;
3491 		goto allmulti;
3492 	}
3493 
3494 	/*
3495 	 * Set the station address in the first RAL slot, and
3496 	 * clear the remaining slots.
3497 	 */
3498 	if (sc->sc_type == WM_T_ICH8)
3499 		size = WM_RAL_TABSIZE_ICH8 -1;
3500 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
3501 	    || (sc->sc_type == WM_T_PCH))
3502 		size = WM_RAL_TABSIZE_ICH8;
3503 	else if (sc->sc_type == WM_T_PCH2)
3504 		size = WM_RAL_TABSIZE_PCH2;
3505 	else if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
3506 	    || (sc->sc_type == WM_T_PCH_CNP))
3507 		size = WM_RAL_TABSIZE_PCH_LPT;
3508 	else if (sc->sc_type == WM_T_82575)
3509 		size = WM_RAL_TABSIZE_82575;
3510 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
3511 		size = WM_RAL_TABSIZE_82576;
3512 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
3513 		size = WM_RAL_TABSIZE_I350;
3514 	else
3515 		size = WM_RAL_TABSIZE;
3516 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
3517 
3518 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
3519 	    || (sc->sc_type == WM_T_PCH_CNP)) {
3520 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
3521 		switch (i) {
3522 		case 0:
3523 			/* We can use all entries */
3524 			ralmax = size;
3525 			break;
3526 		case 1:
3527 			/* Only RAR[0] */
3528 			ralmax = 1;
3529 			break;
3530 		default:
3531 			/* available SHRA + RAR[0] */
3532 			ralmax = i + 1;
3533 		}
3534 	} else
3535 		ralmax = size;
3536 	for (i = 1; i < size; i++) {
3537 		if (i < ralmax)
3538 			wm_set_ral(sc, NULL, i);
3539 	}
3540 
3541 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3542 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3543 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
3544 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
3545 		size = WM_ICH8_MC_TABSIZE;
3546 	else
3547 		size = WM_MC_TABSIZE;
3548 	/* Clear out the multicast table. */
3549 	for (i = 0; i < size; i++) {
3550 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
3551 		CSR_WRITE_FLUSH(sc);
3552 	}
3553 
3554 	ETHER_LOCK(ec);
3555 	ETHER_FIRST_MULTI(step, ec, enm);
3556 	while (enm != NULL) {
3557 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
3558 			ETHER_UNLOCK(ec);
3559 			/*
3560 			 * We must listen to a range of multicast addresses.
3561 			 * For now, just accept all multicasts, rather than
3562 			 * trying to set only those filter bits needed to match
3563 			 * the range.  (At this time, the only use of address
3564 			 * ranges is for IP multicast routing, for which the
3565 			 * range is big enough to require all bits set.)
3566 			 */
3567 			goto allmulti;
3568 		}
3569 
3570 		hash = wm_mchash(sc, enm->enm_addrlo);
3571 
3572 		reg = (hash >> 5);
3573 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3574 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3575 		    || (sc->sc_type == WM_T_PCH2)
3576 		    || (sc->sc_type == WM_T_PCH_LPT)
3577 		    || (sc->sc_type == WM_T_PCH_SPT)
3578 		    || (sc->sc_type == WM_T_PCH_CNP))
3579 			reg &= 0x1f;
3580 		else
3581 			reg &= 0x7f;
3582 		bit = hash & 0x1f;
3583 
3584 		hash = CSR_READ(sc, mta_reg + (reg << 2));
3585 		hash |= 1U << bit;
3586 
3587 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
3588 			/*
3589 			 * 82544 Errata 9: Certain register cannot be written
3590 			 * with particular alignments in PCI-X bus operation
3591 			 * (FCAH, MTA and VFTA).
3592 			 */
3593 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
3594 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3595 			CSR_WRITE_FLUSH(sc);
3596 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
3597 			CSR_WRITE_FLUSH(sc);
3598 		} else {
3599 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3600 			CSR_WRITE_FLUSH(sc);
3601 		}
3602 
3603 		ETHER_NEXT_MULTI(step, enm);
3604 	}
3605 	ETHER_UNLOCK(ec);
3606 
3607 	ifp->if_flags &= ~IFF_ALLMULTI;
3608 	goto setit;
3609 
3610  allmulti:
3611 	ifp->if_flags |= IFF_ALLMULTI;
3612 	sc->sc_rctl |= RCTL_MPE;
3613 
3614  setit:
3615 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
3616 }
3617 
3618 /* Reset and init related */
3619 
3620 static void
3621 wm_set_vlan(struct wm_softc *sc)
3622 {
3623 
3624 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3625 		device_xname(sc->sc_dev), __func__));
3626 
3627 	/* Deal with VLAN enables. */
3628 	if (VLAN_ATTACHED(&sc->sc_ethercom))
3629 		sc->sc_ctrl |= CTRL_VME;
3630 	else
3631 		sc->sc_ctrl &= ~CTRL_VME;
3632 
3633 	/* Write the control registers. */
3634 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3635 }
3636 
3637 static void
3638 wm_set_pcie_completion_timeout(struct wm_softc *sc)
3639 {
3640 	uint32_t gcr;
3641 	pcireg_t ctrl2;
3642 
3643 	gcr = CSR_READ(sc, WMREG_GCR);
3644 
3645 	/* Only take action if timeout value is defaulted to 0 */
3646 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
3647 		goto out;
3648 
3649 	if ((gcr & GCR_CAP_VER2) == 0) {
3650 		gcr |= GCR_CMPL_TMOUT_10MS;
3651 		goto out;
3652 	}
3653 
3654 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
3655 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
3656 	ctrl2 |= WM_PCIE_DCSR2_16MS;
3657 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
3658 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
3659 
3660 out:
3661 	/* Disable completion timeout resend */
3662 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
3663 
3664 	CSR_WRITE(sc, WMREG_GCR, gcr);
3665 }
3666 
3667 void
3668 wm_get_auto_rd_done(struct wm_softc *sc)
3669 {
3670 	int i;
3671 
3672 	/* wait for eeprom to reload */
3673 	switch (sc->sc_type) {
3674 	case WM_T_82571:
3675 	case WM_T_82572:
3676 	case WM_T_82573:
3677 	case WM_T_82574:
3678 	case WM_T_82583:
3679 	case WM_T_82575:
3680 	case WM_T_82576:
3681 	case WM_T_82580:
3682 	case WM_T_I350:
3683 	case WM_T_I354:
3684 	case WM_T_I210:
3685 	case WM_T_I211:
3686 	case WM_T_80003:
3687 	case WM_T_ICH8:
3688 	case WM_T_ICH9:
3689 		for (i = 0; i < 10; i++) {
3690 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
3691 				break;
3692 			delay(1000);
3693 		}
3694 		if (i == 10) {
3695 			log(LOG_ERR, "%s: auto read from eeprom failed to "
3696 			    "complete\n", device_xname(sc->sc_dev));
3697 		}
3698 		break;
3699 	default:
3700 		break;
3701 	}
3702 }
3703 
3704 void
3705 wm_lan_init_done(struct wm_softc *sc)
3706 {
3707 	uint32_t reg = 0;
3708 	int i;
3709 
3710 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3711 		device_xname(sc->sc_dev), __func__));
3712 
3713 	/* Wait for eeprom to reload */
3714 	switch (sc->sc_type) {
3715 	case WM_T_ICH10:
3716 	case WM_T_PCH:
3717 	case WM_T_PCH2:
3718 	case WM_T_PCH_LPT:
3719 	case WM_T_PCH_SPT:
3720 	case WM_T_PCH_CNP:
3721 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
3722 			reg = CSR_READ(sc, WMREG_STATUS);
3723 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
3724 				break;
3725 			delay(100);
3726 		}
3727 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
3728 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
3729 			    "complete\n", device_xname(sc->sc_dev), __func__);
3730 		}
3731 		break;
3732 	default:
3733 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3734 		    __func__);
3735 		break;
3736 	}
3737 
3738 	reg &= ~STATUS_LAN_INIT_DONE;
3739 	CSR_WRITE(sc, WMREG_STATUS, reg);
3740 }
3741 
3742 void
3743 wm_get_cfg_done(struct wm_softc *sc)
3744 {
3745 	int mask;
3746 	uint32_t reg;
3747 	int i;
3748 
3749 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3750 		device_xname(sc->sc_dev), __func__));
3751 
3752 	/* Wait for eeprom to reload */
3753 	switch (sc->sc_type) {
3754 	case WM_T_82542_2_0:
3755 	case WM_T_82542_2_1:
3756 		/* null */
3757 		break;
3758 	case WM_T_82543:
3759 	case WM_T_82544:
3760 	case WM_T_82540:
3761 	case WM_T_82545:
3762 	case WM_T_82545_3:
3763 	case WM_T_82546:
3764 	case WM_T_82546_3:
3765 	case WM_T_82541:
3766 	case WM_T_82541_2:
3767 	case WM_T_82547:
3768 	case WM_T_82547_2:
3769 	case WM_T_82573:
3770 	case WM_T_82574:
3771 	case WM_T_82583:
3772 		/* generic */
3773 		delay(10*1000);
3774 		break;
3775 	case WM_T_80003:
3776 	case WM_T_82571:
3777 	case WM_T_82572:
3778 	case WM_T_82575:
3779 	case WM_T_82576:
3780 	case WM_T_82580:
3781 	case WM_T_I350:
3782 	case WM_T_I354:
3783 	case WM_T_I210:
3784 	case WM_T_I211:
3785 		if (sc->sc_type == WM_T_82571) {
3786 			/* Only 82571 shares port 0 */
3787 			mask = EEMNGCTL_CFGDONE_0;
3788 		} else
3789 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
3790 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
3791 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
3792 				break;
3793 			delay(1000);
3794 		}
3795 		if (i >= WM_PHY_CFG_TIMEOUT) {
3796 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
3797 				device_xname(sc->sc_dev), __func__));
3798 		}
3799 		break;
3800 	case WM_T_ICH8:
3801 	case WM_T_ICH9:
3802 	case WM_T_ICH10:
3803 	case WM_T_PCH:
3804 	case WM_T_PCH2:
3805 	case WM_T_PCH_LPT:
3806 	case WM_T_PCH_SPT:
3807 	case WM_T_PCH_CNP:
3808 		delay(10*1000);
3809 		if (sc->sc_type >= WM_T_ICH10)
3810 			wm_lan_init_done(sc);
3811 		else
3812 			wm_get_auto_rd_done(sc);
3813 
3814 		reg = CSR_READ(sc, WMREG_STATUS);
3815 		if ((reg & STATUS_PHYRA) != 0)
3816 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
3817 		break;
3818 	default:
3819 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3820 		    __func__);
3821 		break;
3822 	}
3823 }
3824 
3825 void
3826 wm_phy_post_reset(struct wm_softc *sc)
3827 {
3828 	uint32_t reg;
3829 
3830 	/* This function is only for ICH8 and newer. */
3831 	if (sc->sc_type < WM_T_ICH8)
3832 		return;
3833 
3834 	if (wm_phy_resetisblocked(sc)) {
3835 		/* XXX */
3836 		device_printf(sc->sc_dev, "PHY is blocked\n");
3837 		return;
3838 	}
3839 
3840 	/* Allow time for h/w to get to quiescent state after reset */
3841 	delay(10*1000);
3842 
3843 	/* Perform any necessary post-reset workarounds */
3844 	if (sc->sc_type == WM_T_PCH)
3845 		wm_hv_phy_workaround_ich8lan(sc);
3846 	if (sc->sc_type == WM_T_PCH2)
3847 		wm_lv_phy_workaround_ich8lan(sc);
3848 
3849 	/* Clear the host wakeup bit after lcd reset */
3850 	if (sc->sc_type >= WM_T_PCH) {
3851 		reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
3852 		    BM_PORT_GEN_CFG);
3853 		reg &= ~BM_WUC_HOST_WU_BIT;
3854 		wm_gmii_hv_writereg(sc->sc_dev, 2,
3855 		    BM_PORT_GEN_CFG, reg);
3856 	}
3857 
3858 	/* Configure the LCD with the extended configuration region in NVM */
3859 	wm_init_lcd_from_nvm(sc);
3860 
3861 	/* Configure the LCD with the OEM bits in NVM */
3862 }
3863 
3864 /* Only for PCH and newer */
3865 static void
3866 wm_write_smbus_addr(struct wm_softc *sc)
3867 {
3868 	uint32_t strap, freq;
3869 	uint32_t phy_data;
3870 
3871 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3872 		device_xname(sc->sc_dev), __func__));
3873 
3874 	strap = CSR_READ(sc, WMREG_STRAP);
3875 	freq = __SHIFTOUT(strap, STRAP_FREQ);
3876 
3877 	phy_data = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR);
3878 
3879 	phy_data &= ~HV_SMB_ADDR_ADDR;
3880 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
3881 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
3882 
3883 	if (sc->sc_phytype == WMPHY_I217) {
3884 		/* Restore SMBus frequency */
3885 		if (freq --) {
3886 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
3887 			    | HV_SMB_ADDR_FREQ_HIGH);
3888 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
3889 			    HV_SMB_ADDR_FREQ_LOW);
3890 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
3891 			    HV_SMB_ADDR_FREQ_HIGH);
3892 		} else {
3893 			DPRINTF(WM_DEBUG_INIT,
3894 			    ("%s: %s Unsupported SMB frequency in PHY\n",
3895 				device_xname(sc->sc_dev), __func__));
3896 		}
3897 	}
3898 
3899 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR, phy_data);
3900 }
3901 
3902 void
3903 wm_init_lcd_from_nvm(struct wm_softc *sc)
3904 {
3905 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
3906 	uint16_t phy_page = 0;
3907 
3908 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3909 		device_xname(sc->sc_dev), __func__));
3910 
3911 	switch (sc->sc_type) {
3912 	case WM_T_ICH8:
3913 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
3914 		    || (sc->sc_phytype != WMPHY_IGP_3))
3915 			return;
3916 
3917 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
3918 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
3919 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
3920 			break;
3921 		}
3922 		/* FALLTHROUGH */
3923 	case WM_T_PCH:
3924 	case WM_T_PCH2:
3925 	case WM_T_PCH_LPT:
3926 	case WM_T_PCH_SPT:
3927 	case WM_T_PCH_CNP:
3928 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
3929 		break;
3930 	default:
3931 		return;
3932 	}
3933 
3934 	sc->phy.acquire(sc);
3935 
3936 	reg = CSR_READ(sc, WMREG_FEXTNVM);
3937 	if ((reg & sw_cfg_mask) == 0)
3938 		goto release;
3939 
3940 	/*
3941 	 * Make sure HW does not configure LCD from PHY extended configuration
3942 	 * before SW configuration
3943 	 */
3944 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
3945 	if ((sc->sc_type < WM_T_PCH2)
3946 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
3947 		goto release;
3948 
3949 	DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
3950 		device_xname(sc->sc_dev), __func__));
3951 	/* word_addr is in DWORD */
3952 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
3953 
3954 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
3955 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
3956 	if (cnf_size == 0)
3957 		goto release;
3958 
3959 	if (((sc->sc_type == WM_T_PCH)
3960 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
3961 	    || (sc->sc_type > WM_T_PCH)) {
3962 		/*
3963 		 * HW configures the SMBus address and LEDs when the OEM and
3964 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
3965 		 * are cleared, SW will configure them instead.
3966 		 */
3967 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
3968 			device_xname(sc->sc_dev), __func__));
3969 		wm_write_smbus_addr(sc);
3970 
3971 		reg = CSR_READ(sc, WMREG_LEDCTL);
3972 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG, reg);
3973 	}
3974 
3975 	/* Configure LCD from extended configuration region. */
3976 	for (i = 0; i < cnf_size; i++) {
3977 		uint16_t reg_data, reg_addr;
3978 
3979 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
3980 			goto release;
3981 
3982 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
3983 			goto release;
3984 
3985 		if (reg_addr == MII_IGPHY_PAGE_SELECT)
3986 			phy_page = reg_data;
3987 
3988 		reg_addr &= IGPHY_MAXREGADDR;
3989 		reg_addr |= phy_page;
3990 
3991 		sc->phy.release(sc); /* XXX */
3992 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, reg_addr, reg_data);
3993 		sc->phy.acquire(sc); /* XXX */
3994 	}
3995 
3996 release:
3997 	sc->phy.release(sc);
3998 	return;
3999 }
4000 
4001 
4002 /* Init hardware bits */
4003 void
4004 wm_initialize_hardware_bits(struct wm_softc *sc)
4005 {
4006 	uint32_t tarc0, tarc1, reg;
4007 
4008 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
4009 		device_xname(sc->sc_dev), __func__));
4010 
4011 	/* For 82571 variant, 80003 and ICHs */
4012 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
4013 	    || (sc->sc_type >= WM_T_80003)) {
4014 
4015 		/* Transmit Descriptor Control 0 */
4016 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
4017 		reg |= TXDCTL_COUNT_DESC;
4018 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
4019 
4020 		/* Transmit Descriptor Control 1 */
4021 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
4022 		reg |= TXDCTL_COUNT_DESC;
4023 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
4024 
4025 		/* TARC0 */
4026 		tarc0 = CSR_READ(sc, WMREG_TARC0);
4027 		switch (sc->sc_type) {
4028 		case WM_T_82571:
4029 		case WM_T_82572:
4030 		case WM_T_82573:
4031 		case WM_T_82574:
4032 		case WM_T_82583:
4033 		case WM_T_80003:
4034 			/* Clear bits 30..27 */
4035 			tarc0 &= ~__BITS(30, 27);
4036 			break;
4037 		default:
4038 			break;
4039 		}
4040 
4041 		switch (sc->sc_type) {
4042 		case WM_T_82571:
4043 		case WM_T_82572:
4044 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
4045 
4046 			tarc1 = CSR_READ(sc, WMREG_TARC1);
4047 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
4048 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
4049 			/* 8257[12] Errata No.7 */
4050 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
4051 
4052 			/* TARC1 bit 28 */
4053 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
4054 				tarc1 &= ~__BIT(28);
4055 			else
4056 				tarc1 |= __BIT(28);
4057 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
4058 
4059 			/*
4060 			 * 8257[12] Errata No.13
4061 			 * Disable Dyamic Clock Gating.
4062 			 */
4063 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
4064 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
4065 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4066 			break;
4067 		case WM_T_82573:
4068 		case WM_T_82574:
4069 		case WM_T_82583:
4070 			if ((sc->sc_type == WM_T_82574)
4071 			    || (sc->sc_type == WM_T_82583))
4072 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
4073 
4074 			/* Extended Device Control */
4075 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
4076 			reg &= ~__BIT(23);	/* Clear bit 23 */
4077 			reg |= __BIT(22);	/* Set bit 22 */
4078 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4079 
4080 			/* Device Control */
4081 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
4082 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4083 
4084 			/* PCIe Control Register */
4085 			/*
4086 			 * 82573 Errata (unknown).
4087 			 *
4088 			 * 82574 Errata 25 and 82583 Errata 12
4089 			 * "Dropped Rx Packets":
4090 			 *   NVM Image Version 2.1.4 and newer has no this bug.
4091 			 */
4092 			reg = CSR_READ(sc, WMREG_GCR);
4093 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
4094 			CSR_WRITE(sc, WMREG_GCR, reg);
4095 
4096 			if ((sc->sc_type == WM_T_82574)
4097 			    || (sc->sc_type == WM_T_82583)) {
4098 				/*
4099 				 * Document says this bit must be set for
4100 				 * proper operation.
4101 				 */
4102 				reg = CSR_READ(sc, WMREG_GCR);
4103 				reg |= __BIT(22);
4104 				CSR_WRITE(sc, WMREG_GCR, reg);
4105 
4106 				/*
4107 				 * Apply workaround for hardware errata
4108 				 * documented in errata docs Fixes issue where
4109 				 * some error prone or unreliable PCIe
4110 				 * completions are occurring, particularly
4111 				 * with ASPM enabled. Without fix, issue can
4112 				 * cause Tx timeouts.
4113 				 */
4114 				reg = CSR_READ(sc, WMREG_GCR2);
4115 				reg |= __BIT(0);
4116 				CSR_WRITE(sc, WMREG_GCR2, reg);
4117 			}
4118 			break;
4119 		case WM_T_80003:
4120 			/* TARC0 */
4121 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
4122 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
4123 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
4124 
4125 			/* TARC1 bit 28 */
4126 			tarc1 = CSR_READ(sc, WMREG_TARC1);
4127 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
4128 				tarc1 &= ~__BIT(28);
4129 			else
4130 				tarc1 |= __BIT(28);
4131 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
4132 			break;
4133 		case WM_T_ICH8:
4134 		case WM_T_ICH9:
4135 		case WM_T_ICH10:
4136 		case WM_T_PCH:
4137 		case WM_T_PCH2:
4138 		case WM_T_PCH_LPT:
4139 		case WM_T_PCH_SPT:
4140 		case WM_T_PCH_CNP:
4141 			/* TARC0 */
4142 			if (sc->sc_type == WM_T_ICH8) {
4143 				/* Set TARC0 bits 29 and 28 */
4144 				tarc0 |= __BITS(29, 28);
4145 			} else if (sc->sc_type == WM_T_PCH_SPT) {
4146 				tarc0 |= __BIT(29);
4147 				/*
4148 				 *  Drop bit 28. From Linux.
4149 				 * See I218/I219 spec update
4150 				 * "5. Buffer Overrun While the I219 is
4151 				 * Processing DMA Transactions"
4152 				 */
4153 				tarc0 &= ~__BIT(28);
4154 			}
4155 			/* Set TARC0 bits 23,24,26,27 */
4156 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
4157 
4158 			/* CTRL_EXT */
4159 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
4160 			reg |= __BIT(22);	/* Set bit 22 */
4161 			/*
4162 			 * Enable PHY low-power state when MAC is at D3
4163 			 * w/o WoL
4164 			 */
4165 			if (sc->sc_type >= WM_T_PCH)
4166 				reg |= CTRL_EXT_PHYPDEN;
4167 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4168 
4169 			/* TARC1 */
4170 			tarc1 = CSR_READ(sc, WMREG_TARC1);
4171 			/* bit 28 */
4172 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
4173 				tarc1 &= ~__BIT(28);
4174 			else
4175 				tarc1 |= __BIT(28);
4176 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
4177 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
4178 
4179 			/* Device Status */
4180 			if (sc->sc_type == WM_T_ICH8) {
4181 				reg = CSR_READ(sc, WMREG_STATUS);
4182 				reg &= ~__BIT(31);
4183 				CSR_WRITE(sc, WMREG_STATUS, reg);
4184 
4185 			}
4186 
4187 			/* IOSFPC */
4188 			if (sc->sc_type == WM_T_PCH_SPT) {
4189 				reg = CSR_READ(sc, WMREG_IOSFPC);
4190 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
4191 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
4192 			}
4193 			/*
4194 			 * Work-around descriptor data corruption issue during
4195 			 * NFS v2 UDP traffic, just disable the NFS filtering
4196 			 * capability.
4197 			 */
4198 			reg = CSR_READ(sc, WMREG_RFCTL);
4199 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
4200 			CSR_WRITE(sc, WMREG_RFCTL, reg);
4201 			break;
4202 		default:
4203 			break;
4204 		}
4205 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
4206 
4207 		switch (sc->sc_type) {
4208 		/*
4209 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
4210 		 * Avoid RSS Hash Value bug.
4211 		 */
4212 		case WM_T_82571:
4213 		case WM_T_82572:
4214 		case WM_T_82573:
4215 		case WM_T_80003:
4216 		case WM_T_ICH8:
4217 			reg = CSR_READ(sc, WMREG_RFCTL);
4218 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
4219 			CSR_WRITE(sc, WMREG_RFCTL, reg);
4220 			break;
4221 		case WM_T_82574:
4222 			/* use extened Rx descriptor. */
4223 			reg = CSR_READ(sc, WMREG_RFCTL);
4224 			reg |= WMREG_RFCTL_EXSTEN;
4225 			CSR_WRITE(sc, WMREG_RFCTL, reg);
4226 			break;
4227 		default:
4228 			break;
4229 		}
4230 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
4231 		/*
4232 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
4233 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
4234 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
4235 		 * Correctly by the Device"
4236 		 *
4237 		 * I354(C2000) Errata AVR53:
4238 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
4239 		 * Hang"
4240 		 */
4241 		reg = CSR_READ(sc, WMREG_RFCTL);
4242 		reg |= WMREG_RFCTL_IPV6EXDIS;
4243 		CSR_WRITE(sc, WMREG_RFCTL, reg);
4244 	}
4245 }
4246 
4247 static uint32_t
4248 wm_rxpbs_adjust_82580(uint32_t val)
4249 {
4250 	uint32_t rv = 0;
4251 
4252 	if (val < __arraycount(wm_82580_rxpbs_table))
4253 		rv = wm_82580_rxpbs_table[val];
4254 
4255 	return rv;
4256 }
4257 
4258 /*
4259  * wm_reset_phy:
4260  *
4261  *	generic PHY reset function.
4262  *	Same as e1000_phy_hw_reset_generic()
4263  */
4264 static void
4265 wm_reset_phy(struct wm_softc *sc)
4266 {
4267 	uint32_t reg;
4268 
4269 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
4270 		device_xname(sc->sc_dev), __func__));
4271 	if (wm_phy_resetisblocked(sc))
4272 		return;
4273 
4274 	sc->phy.acquire(sc);
4275 
4276 	reg = CSR_READ(sc, WMREG_CTRL);
4277 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
4278 	CSR_WRITE_FLUSH(sc);
4279 
4280 	delay(sc->phy.reset_delay_us);
4281 
4282 	CSR_WRITE(sc, WMREG_CTRL, reg);
4283 	CSR_WRITE_FLUSH(sc);
4284 
4285 	delay(150);
4286 
4287 	sc->phy.release(sc);
4288 
4289 	wm_get_cfg_done(sc);
4290 	wm_phy_post_reset(sc);
4291 }
4292 
4293 /*
4294  * Only used by WM_T_PCH_SPT which does not use multiqueue,
4295  * so it is enough to check sc->sc_queue[0] only.
4296  */
4297 static void
4298 wm_flush_desc_rings(struct wm_softc *sc)
4299 {
4300 	pcireg_t preg;
4301 	uint32_t reg;
4302 	struct wm_txqueue *txq;
4303 	wiseman_txdesc_t *txd;
4304 	int nexttx;
4305 	uint32_t rctl;
4306 
4307 	/* First, disable MULR fix in FEXTNVM11 */
4308 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
4309 	reg |= FEXTNVM11_DIS_MULRFIX;
4310 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
4311 
4312 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
4313 	reg = CSR_READ(sc, WMREG_TDLEN(0));
4314 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
4315 		return;
4316 
4317 	/* TX */
4318 	printf("%s: Need TX flush (reg = %08x, len = %u)\n",
4319 	    device_xname(sc->sc_dev), preg, reg);
4320 	reg = CSR_READ(sc, WMREG_TCTL);
4321 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
4322 
4323 	txq = &sc->sc_queue[0].wmq_txq;
4324 	nexttx = txq->txq_next;
4325 	txd = &txq->txq_descs[nexttx];
4326 	wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
4327 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS| 512);
4328 	txd->wtx_fields.wtxu_status = 0;
4329 	txd->wtx_fields.wtxu_options = 0;
4330 	txd->wtx_fields.wtxu_vlan = 0;
4331 
4332 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
4333 	    BUS_SPACE_BARRIER_WRITE);
4334 
4335 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
4336 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
4337 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
4338 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
4339 	delay(250);
4340 
4341 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
4342 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
4343 		return;
4344 
4345 	/* RX */
4346 	printf("%s: Need RX flush (reg = %08x)\n",
4347 	    device_xname(sc->sc_dev), preg);
4348 	rctl = CSR_READ(sc, WMREG_RCTL);
4349 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
4350 	CSR_WRITE_FLUSH(sc);
4351 	delay(150);
4352 
4353 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
4354 	/* zero the lower 14 bits (prefetch and host thresholds) */
4355 	reg &= 0xffffc000;
4356 	/*
4357 	 * update thresholds: prefetch threshold to 31, host threshold
4358 	 * to 1 and make sure the granularity is "descriptors" and not
4359 	 * "cache lines"
4360 	 */
4361 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
4362 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
4363 
4364 	/*
4365 	 * momentarily enable the RX ring for the changes to take
4366 	 * effect
4367 	 */
4368 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
4369 	CSR_WRITE_FLUSH(sc);
4370 	delay(150);
4371 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
4372 }
4373 
4374 /*
4375  * wm_reset:
4376  *
4377  *	Reset the i82542 chip.
4378  */
4379 static void
4380 wm_reset(struct wm_softc *sc)
4381 {
4382 	int phy_reset = 0;
4383 	int i, error = 0;
4384 	uint32_t reg;
4385 	uint16_t kmreg;
4386 	int rv;
4387 
4388 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
4389 		device_xname(sc->sc_dev), __func__));
4390 	KASSERT(sc->sc_type != 0);
4391 
4392 	/*
4393 	 * Allocate on-chip memory according to the MTU size.
4394 	 * The Packet Buffer Allocation register must be written
4395 	 * before the chip is reset.
4396 	 */
4397 	switch (sc->sc_type) {
4398 	case WM_T_82547:
4399 	case WM_T_82547_2:
4400 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
4401 		    PBA_22K : PBA_30K;
4402 		for (i = 0; i < sc->sc_nqueues; i++) {
4403 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
4404 			txq->txq_fifo_head = 0;
4405 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
4406 			txq->txq_fifo_size =
4407 				(PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
4408 			txq->txq_fifo_stall = 0;
4409 		}
4410 		break;
4411 	case WM_T_82571:
4412 	case WM_T_82572:
4413 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
4414 	case WM_T_80003:
4415 		sc->sc_pba = PBA_32K;
4416 		break;
4417 	case WM_T_82573:
4418 		sc->sc_pba = PBA_12K;
4419 		break;
4420 	case WM_T_82574:
4421 	case WM_T_82583:
4422 		sc->sc_pba = PBA_20K;
4423 		break;
4424 	case WM_T_82576:
4425 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
4426 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
4427 		break;
4428 	case WM_T_82580:
4429 	case WM_T_I350:
4430 	case WM_T_I354:
4431 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
4432 		break;
4433 	case WM_T_I210:
4434 	case WM_T_I211:
4435 		sc->sc_pba = PBA_34K;
4436 		break;
4437 	case WM_T_ICH8:
4438 		/* Workaround for a bit corruption issue in FIFO memory */
4439 		sc->sc_pba = PBA_8K;
4440 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
4441 		break;
4442 	case WM_T_ICH9:
4443 	case WM_T_ICH10:
4444 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
4445 		    PBA_14K : PBA_10K;
4446 		break;
4447 	case WM_T_PCH:
4448 	case WM_T_PCH2:	/* XXX 14K? */
4449 	case WM_T_PCH_LPT:
4450 	case WM_T_PCH_SPT:
4451 	case WM_T_PCH_CNP:
4452 		sc->sc_pba = PBA_26K;
4453 		break;
4454 	default:
4455 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
4456 		    PBA_40K : PBA_48K;
4457 		break;
4458 	}
4459 	/*
4460 	 * Only old or non-multiqueue devices have the PBA register
4461 	 * XXX Need special handling for 82575.
4462 	 */
4463 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
4464 	    || (sc->sc_type == WM_T_82575))
4465 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
4466 
4467 	/* Prevent the PCI-E bus from sticking */
4468 	if (sc->sc_flags & WM_F_PCIE) {
4469 		int timeout = 800;
4470 
4471 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
4472 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4473 
4474 		while (timeout--) {
4475 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
4476 			    == 0)
4477 				break;
4478 			delay(100);
4479 		}
4480 		if (timeout == 0)
4481 			device_printf(sc->sc_dev,
4482 			    "failed to disable busmastering\n");
4483 	}
4484 
4485 	/* Set the completion timeout for interface */
4486 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
4487 	    || (sc->sc_type == WM_T_82580)
4488 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
4489 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
4490 		wm_set_pcie_completion_timeout(sc);
4491 
4492 	/* Clear interrupt */
4493 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4494 	if (wm_is_using_msix(sc)) {
4495 		if (sc->sc_type != WM_T_82574) {
4496 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
4497 			CSR_WRITE(sc, WMREG_EIAC, 0);
4498 		} else {
4499 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
4500 		}
4501 	}
4502 
4503 	/* Stop the transmit and receive processes. */
4504 	CSR_WRITE(sc, WMREG_RCTL, 0);
4505 	sc->sc_rctl &= ~RCTL_EN;
4506 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
4507 	CSR_WRITE_FLUSH(sc);
4508 
4509 	/* XXX set_tbi_sbp_82543() */
4510 
4511 	delay(10*1000);
4512 
4513 	/* Must acquire the MDIO ownership before MAC reset */
4514 	switch (sc->sc_type) {
4515 	case WM_T_82573:
4516 	case WM_T_82574:
4517 	case WM_T_82583:
4518 		error = wm_get_hw_semaphore_82573(sc);
4519 		break;
4520 	default:
4521 		break;
4522 	}
4523 
4524 	/*
4525 	 * 82541 Errata 29? & 82547 Errata 28?
4526 	 * See also the description about PHY_RST bit in CTRL register
4527 	 * in 8254x_GBe_SDM.pdf.
4528 	 */
4529 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
4530 		CSR_WRITE(sc, WMREG_CTRL,
4531 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
4532 		CSR_WRITE_FLUSH(sc);
4533 		delay(5000);
4534 	}
4535 
4536 	switch (sc->sc_type) {
4537 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
4538 	case WM_T_82541:
4539 	case WM_T_82541_2:
4540 	case WM_T_82547:
4541 	case WM_T_82547_2:
4542 		/*
4543 		 * On some chipsets, a reset through a memory-mapped write
4544 		 * cycle can cause the chip to reset before completing the
4545 		 * write cycle.  This causes major headache that can be
4546 		 * avoided by issuing the reset via indirect register writes
4547 		 * through I/O space.
4548 		 *
4549 		 * So, if we successfully mapped the I/O BAR at attach time,
4550 		 * use that.  Otherwise, try our luck with a memory-mapped
4551 		 * reset.
4552 		 */
4553 		if (sc->sc_flags & WM_F_IOH_VALID)
4554 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
4555 		else
4556 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
4557 		break;
4558 	case WM_T_82545_3:
4559 	case WM_T_82546_3:
4560 		/* Use the shadow control register on these chips. */
4561 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
4562 		break;
4563 	case WM_T_80003:
4564 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
4565 		sc->phy.acquire(sc);
4566 		CSR_WRITE(sc, WMREG_CTRL, reg);
4567 		sc->phy.release(sc);
4568 		break;
4569 	case WM_T_ICH8:
4570 	case WM_T_ICH9:
4571 	case WM_T_ICH10:
4572 	case WM_T_PCH:
4573 	case WM_T_PCH2:
4574 	case WM_T_PCH_LPT:
4575 	case WM_T_PCH_SPT:
4576 	case WM_T_PCH_CNP:
4577 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
4578 		if (wm_phy_resetisblocked(sc) == false) {
4579 			/*
4580 			 * Gate automatic PHY configuration by hardware on
4581 			 * non-managed 82579
4582 			 */
4583 			if ((sc->sc_type == WM_T_PCH2)
4584 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
4585 				== 0))
4586 				wm_gate_hw_phy_config_ich8lan(sc, true);
4587 
4588 			reg |= CTRL_PHY_RESET;
4589 			phy_reset = 1;
4590 		} else
4591 			printf("XXX reset is blocked!!!\n");
4592 		sc->phy.acquire(sc);
4593 		CSR_WRITE(sc, WMREG_CTRL, reg);
4594 		/* Don't insert a completion barrier when reset */
4595 		delay(20*1000);
4596 		mutex_exit(sc->sc_ich_phymtx);
4597 		break;
4598 	case WM_T_82580:
4599 	case WM_T_I350:
4600 	case WM_T_I354:
4601 	case WM_T_I210:
4602 	case WM_T_I211:
4603 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
4604 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
4605 			CSR_WRITE_FLUSH(sc);
4606 		delay(5000);
4607 		break;
4608 	case WM_T_82542_2_0:
4609 	case WM_T_82542_2_1:
4610 	case WM_T_82543:
4611 	case WM_T_82540:
4612 	case WM_T_82545:
4613 	case WM_T_82546:
4614 	case WM_T_82571:
4615 	case WM_T_82572:
4616 	case WM_T_82573:
4617 	case WM_T_82574:
4618 	case WM_T_82575:
4619 	case WM_T_82576:
4620 	case WM_T_82583:
4621 	default:
4622 		/* Everything else can safely use the documented method. */
4623 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
4624 		break;
4625 	}
4626 
4627 	/* Must release the MDIO ownership after MAC reset */
4628 	switch (sc->sc_type) {
4629 	case WM_T_82573:
4630 	case WM_T_82574:
4631 	case WM_T_82583:
4632 		if (error == 0)
4633 			wm_put_hw_semaphore_82573(sc);
4634 		break;
4635 	default:
4636 		break;
4637 	}
4638 
4639 	if (phy_reset != 0)
4640 		wm_get_cfg_done(sc);
4641 
4642 	/* reload EEPROM */
4643 	switch (sc->sc_type) {
4644 	case WM_T_82542_2_0:
4645 	case WM_T_82542_2_1:
4646 	case WM_T_82543:
4647 	case WM_T_82544:
4648 		delay(10);
4649 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
4650 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4651 		CSR_WRITE_FLUSH(sc);
4652 		delay(2000);
4653 		break;
4654 	case WM_T_82540:
4655 	case WM_T_82545:
4656 	case WM_T_82545_3:
4657 	case WM_T_82546:
4658 	case WM_T_82546_3:
4659 		delay(5*1000);
4660 		/* XXX Disable HW ARPs on ASF enabled adapters */
4661 		break;
4662 	case WM_T_82541:
4663 	case WM_T_82541_2:
4664 	case WM_T_82547:
4665 	case WM_T_82547_2:
4666 		delay(20000);
4667 		/* XXX Disable HW ARPs on ASF enabled adapters */
4668 		break;
4669 	case WM_T_82571:
4670 	case WM_T_82572:
4671 	case WM_T_82573:
4672 	case WM_T_82574:
4673 	case WM_T_82583:
4674 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
4675 			delay(10);
4676 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
4677 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4678 			CSR_WRITE_FLUSH(sc);
4679 		}
4680 		/* check EECD_EE_AUTORD */
4681 		wm_get_auto_rd_done(sc);
4682 		/*
4683 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
4684 		 * is set.
4685 		 */
4686 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
4687 		    || (sc->sc_type == WM_T_82583))
4688 			delay(25*1000);
4689 		break;
4690 	case WM_T_82575:
4691 	case WM_T_82576:
4692 	case WM_T_82580:
4693 	case WM_T_I350:
4694 	case WM_T_I354:
4695 	case WM_T_I210:
4696 	case WM_T_I211:
4697 	case WM_T_80003:
4698 		/* check EECD_EE_AUTORD */
4699 		wm_get_auto_rd_done(sc);
4700 		break;
4701 	case WM_T_ICH8:
4702 	case WM_T_ICH9:
4703 	case WM_T_ICH10:
4704 	case WM_T_PCH:
4705 	case WM_T_PCH2:
4706 	case WM_T_PCH_LPT:
4707 	case WM_T_PCH_SPT:
4708 	case WM_T_PCH_CNP:
4709 		break;
4710 	default:
4711 		panic("%s: unknown type\n", __func__);
4712 	}
4713 
4714 	/* Check whether EEPROM is present or not */
4715 	switch (sc->sc_type) {
4716 	case WM_T_82575:
4717 	case WM_T_82576:
4718 	case WM_T_82580:
4719 	case WM_T_I350:
4720 	case WM_T_I354:
4721 	case WM_T_ICH8:
4722 	case WM_T_ICH9:
4723 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
4724 			/* Not found */
4725 			sc->sc_flags |= WM_F_EEPROM_INVALID;
4726 			if (sc->sc_type == WM_T_82575)
4727 				wm_reset_init_script_82575(sc);
4728 		}
4729 		break;
4730 	default:
4731 		break;
4732 	}
4733 
4734 	if (phy_reset != 0)
4735 		wm_phy_post_reset(sc);
4736 
4737 	if ((sc->sc_type == WM_T_82580)
4738 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
4739 		/* clear global device reset status bit */
4740 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
4741 	}
4742 
4743 	/* Clear any pending interrupt events. */
4744 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4745 	reg = CSR_READ(sc, WMREG_ICR);
4746 	if (wm_is_using_msix(sc)) {
4747 		if (sc->sc_type != WM_T_82574) {
4748 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
4749 			CSR_WRITE(sc, WMREG_EIAC, 0);
4750 		} else
4751 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
4752 	}
4753 
4754 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4755 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4756 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
4757 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
4758 		reg = CSR_READ(sc, WMREG_KABGTXD);
4759 		reg |= KABGTXD_BGSQLBIAS;
4760 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
4761 	}
4762 
4763 	/* reload sc_ctrl */
4764 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
4765 
4766 	if (sc->sc_type == WM_T_I354) {
4767 #if 0
4768 		/* I354 uses an external PHY */
4769 		wm_set_eee_i354(sc);
4770 #endif
4771 	} else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
4772 		wm_set_eee_i350(sc);
4773 
4774 	/*
4775 	 * For PCH, this write will make sure that any noise will be detected
4776 	 * as a CRC error and be dropped rather than show up as a bad packet
4777 	 * to the DMA engine
4778 	 */
4779 	if (sc->sc_type == WM_T_PCH)
4780 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
4781 
4782 	if (sc->sc_type >= WM_T_82544)
4783 		CSR_WRITE(sc, WMREG_WUC, 0);
4784 
4785 	wm_reset_mdicnfg_82580(sc);
4786 
4787 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
4788 		wm_pll_workaround_i210(sc);
4789 
4790 	if (sc->sc_type == WM_T_80003) {
4791 		/* default to TRUE to enable the MDIC W/A */
4792 		sc->sc_flags |= WM_F_80003_MDIC_WA;
4793 
4794 		rv = wm_kmrn_readreg(sc,
4795 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
4796 		if (rv == 0) {
4797 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
4798 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
4799 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
4800 			else
4801 				sc->sc_flags |= WM_F_80003_MDIC_WA;
4802 		}
4803 	}
4804 }
4805 
4806 /*
4807  * wm_add_rxbuf:
4808  *
4809  *	Add a receive buffer to the indiciated descriptor.
4810  */
4811 static int
4812 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
4813 {
4814 	struct wm_softc *sc = rxq->rxq_sc;
4815 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
4816 	struct mbuf *m;
4817 	int error;
4818 
4819 	KASSERT(mutex_owned(rxq->rxq_lock));
4820 
4821 	MGETHDR(m, M_DONTWAIT, MT_DATA);
4822 	if (m == NULL)
4823 		return ENOBUFS;
4824 
4825 	MCLGET(m, M_DONTWAIT);
4826 	if ((m->m_flags & M_EXT) == 0) {
4827 		m_freem(m);
4828 		return ENOBUFS;
4829 	}
4830 
4831 	if (rxs->rxs_mbuf != NULL)
4832 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4833 
4834 	rxs->rxs_mbuf = m;
4835 
4836 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
4837 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
4838 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
4839 	if (error) {
4840 		/* XXX XXX XXX */
4841 		aprint_error_dev(sc->sc_dev,
4842 		    "unable to load rx DMA map %d, error = %d\n",
4843 		    idx, error);
4844 		panic("wm_add_rxbuf");
4845 	}
4846 
4847 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
4848 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
4849 
4850 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4851 		if ((sc->sc_rctl & RCTL_EN) != 0)
4852 			wm_init_rxdesc(rxq, idx);
4853 	} else
4854 		wm_init_rxdesc(rxq, idx);
4855 
4856 	return 0;
4857 }
4858 
4859 /*
4860  * wm_rxdrain:
4861  *
4862  *	Drain the receive queue.
4863  */
4864 static void
4865 wm_rxdrain(struct wm_rxqueue *rxq)
4866 {
4867 	struct wm_softc *sc = rxq->rxq_sc;
4868 	struct wm_rxsoft *rxs;
4869 	int i;
4870 
4871 	KASSERT(mutex_owned(rxq->rxq_lock));
4872 
4873 	for (i = 0; i < WM_NRXDESC; i++) {
4874 		rxs = &rxq->rxq_soft[i];
4875 		if (rxs->rxs_mbuf != NULL) {
4876 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4877 			m_freem(rxs->rxs_mbuf);
4878 			rxs->rxs_mbuf = NULL;
4879 		}
4880 	}
4881 }
4882 
4883 /*
4884  * Setup registers for RSS.
4885  *
4886  * XXX not yet VMDq support
4887  */
4888 static void
4889 wm_init_rss(struct wm_softc *sc)
4890 {
4891 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
4892 	int i;
4893 
4894 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
4895 
4896 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
4897 		int qid, reta_ent;
4898 
4899 		qid  = i % sc->sc_nqueues;
4900 		switch(sc->sc_type) {
4901 		case WM_T_82574:
4902 			reta_ent = __SHIFTIN(qid,
4903 			    RETA_ENT_QINDEX_MASK_82574);
4904 			break;
4905 		case WM_T_82575:
4906 			reta_ent = __SHIFTIN(qid,
4907 			    RETA_ENT_QINDEX1_MASK_82575);
4908 			break;
4909 		default:
4910 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
4911 			break;
4912 		}
4913 
4914 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
4915 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
4916 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
4917 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
4918 	}
4919 
4920 	rss_getkey((uint8_t *)rss_key);
4921 	for (i = 0; i < RSSRK_NUM_REGS; i++)
4922 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
4923 
4924 	if (sc->sc_type == WM_T_82574)
4925 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
4926 	else
4927 		mrqc = MRQC_ENABLE_RSS_MQ;
4928 
4929 	/*
4930 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
4931 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
4932 	 */
4933 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
4934 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
4935 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
4936 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
4937 
4938 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
4939 }
4940 
4941 /*
4942  * Adjust TX and RX queue numbers which the system actulally uses.
4943  *
4944  * The numbers are affected by below parameters.
4945  *     - The nubmer of hardware queues
4946  *     - The number of MSI-X vectors (= "nvectors" argument)
4947  *     - ncpu
4948  */
4949 static void
4950 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
4951 {
4952 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
4953 
4954 	if (nvectors < 2) {
4955 		sc->sc_nqueues = 1;
4956 		return;
4957 	}
4958 
4959 	switch(sc->sc_type) {
4960 	case WM_T_82572:
4961 		hw_ntxqueues = 2;
4962 		hw_nrxqueues = 2;
4963 		break;
4964 	case WM_T_82574:
4965 		hw_ntxqueues = 2;
4966 		hw_nrxqueues = 2;
4967 		break;
4968 	case WM_T_82575:
4969 		hw_ntxqueues = 4;
4970 		hw_nrxqueues = 4;
4971 		break;
4972 	case WM_T_82576:
4973 		hw_ntxqueues = 16;
4974 		hw_nrxqueues = 16;
4975 		break;
4976 	case WM_T_82580:
4977 	case WM_T_I350:
4978 	case WM_T_I354:
4979 		hw_ntxqueues = 8;
4980 		hw_nrxqueues = 8;
4981 		break;
4982 	case WM_T_I210:
4983 		hw_ntxqueues = 4;
4984 		hw_nrxqueues = 4;
4985 		break;
4986 	case WM_T_I211:
4987 		hw_ntxqueues = 2;
4988 		hw_nrxqueues = 2;
4989 		break;
4990 		/*
4991 		 * As below ethernet controllers does not support MSI-X,
4992 		 * this driver let them not use multiqueue.
4993 		 *     - WM_T_80003
4994 		 *     - WM_T_ICH8
4995 		 *     - WM_T_ICH9
4996 		 *     - WM_T_ICH10
4997 		 *     - WM_T_PCH
4998 		 *     - WM_T_PCH2
4999 		 *     - WM_T_PCH_LPT
5000 		 */
5001 	default:
5002 		hw_ntxqueues = 1;
5003 		hw_nrxqueues = 1;
5004 		break;
5005 	}
5006 
5007 	hw_nqueues = min(hw_ntxqueues, hw_nrxqueues);
5008 
5009 	/*
5010 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
5011 	 * the number of queues used actually.
5012 	 */
5013 	if (nvectors < hw_nqueues + 1) {
5014 		sc->sc_nqueues = nvectors - 1;
5015 	} else {
5016 		sc->sc_nqueues = hw_nqueues;
5017 	}
5018 
5019 	/*
5020 	 * As queues more then cpus cannot improve scaling, we limit
5021 	 * the number of queues used actually.
5022 	 */
5023 	if (ncpu < sc->sc_nqueues)
5024 		sc->sc_nqueues = ncpu;
5025 }
5026 
5027 static inline bool
5028 wm_is_using_msix(struct wm_softc *sc)
5029 {
5030 
5031 	return (sc->sc_nintrs > 1);
5032 }
5033 
5034 static inline bool
5035 wm_is_using_multiqueue(struct wm_softc *sc)
5036 {
5037 
5038 	return (sc->sc_nqueues > 1);
5039 }
5040 
5041 static int
5042 wm_softint_establish(struct wm_softc *sc, int qidx, int intr_idx)
5043 {
5044 	struct wm_queue *wmq = &sc->sc_queue[qidx];
5045 	wmq->wmq_id = qidx;
5046 	wmq->wmq_intr_idx = intr_idx;
5047 	wmq->wmq_si = softint_establish(SOFTINT_NET
5048 #ifdef WM_MPSAFE
5049 	    | SOFTINT_MPSAFE
5050 #endif
5051 	    , wm_handle_queue, wmq);
5052 	if (wmq->wmq_si != NULL)
5053 		return 0;
5054 
5055 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
5056 	    wmq->wmq_id);
5057 
5058 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
5059 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
5060 	return ENOMEM;
5061 }
5062 
5063 /*
5064  * Both single interrupt MSI and INTx can use this function.
5065  */
5066 static int
5067 wm_setup_legacy(struct wm_softc *sc)
5068 {
5069 	pci_chipset_tag_t pc = sc->sc_pc;
5070 	const char *intrstr = NULL;
5071 	char intrbuf[PCI_INTRSTR_LEN];
5072 	int error;
5073 
5074 	error = wm_alloc_txrx_queues(sc);
5075 	if (error) {
5076 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
5077 		    error);
5078 		return ENOMEM;
5079 	}
5080 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
5081 	    sizeof(intrbuf));
5082 #ifdef WM_MPSAFE
5083 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
5084 #endif
5085 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
5086 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
5087 	if (sc->sc_ihs[0] == NULL) {
5088 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
5089 		    (pci_intr_type(pc, sc->sc_intrs[0])
5090 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
5091 		return ENOMEM;
5092 	}
5093 
5094 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
5095 	sc->sc_nintrs = 1;
5096 
5097 	return wm_softint_establish(sc, 0, 0);
5098 }
5099 
5100 static int
5101 wm_setup_msix(struct wm_softc *sc)
5102 {
5103 	void *vih;
5104 	kcpuset_t *affinity;
5105 	int qidx, error, intr_idx, txrx_established;
5106 	pci_chipset_tag_t pc = sc->sc_pc;
5107 	const char *intrstr = NULL;
5108 	char intrbuf[PCI_INTRSTR_LEN];
5109 	char intr_xname[INTRDEVNAMEBUF];
5110 
5111 	if (sc->sc_nqueues < ncpu) {
5112 		/*
5113 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
5114 		 * interrupts start from CPU#1.
5115 		 */
5116 		sc->sc_affinity_offset = 1;
5117 	} else {
5118 		/*
5119 		 * In this case, this device use all CPUs. So, we unify
5120 		 * affinitied cpu_index to msix vector number for readability.
5121 		 */
5122 		sc->sc_affinity_offset = 0;
5123 	}
5124 
5125 	error = wm_alloc_txrx_queues(sc);
5126 	if (error) {
5127 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
5128 		    error);
5129 		return ENOMEM;
5130 	}
5131 
5132 	kcpuset_create(&affinity, false);
5133 	intr_idx = 0;
5134 
5135 	/*
5136 	 * TX and RX
5137 	 */
5138 	txrx_established = 0;
5139 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
5140 		struct wm_queue *wmq = &sc->sc_queue[qidx];
5141 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
5142 
5143 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
5144 		    sizeof(intrbuf));
5145 #ifdef WM_MPSAFE
5146 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
5147 		    PCI_INTR_MPSAFE, true);
5148 #endif
5149 		memset(intr_xname, 0, sizeof(intr_xname));
5150 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
5151 		    device_xname(sc->sc_dev), qidx);
5152 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
5153 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
5154 		if (vih == NULL) {
5155 			aprint_error_dev(sc->sc_dev,
5156 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
5157 			    intrstr ? " at " : "",
5158 			    intrstr ? intrstr : "");
5159 
5160 			goto fail;
5161 		}
5162 		kcpuset_zero(affinity);
5163 		/* Round-robin affinity */
5164 		kcpuset_set(affinity, affinity_to);
5165 		error = interrupt_distribute(vih, affinity, NULL);
5166 		if (error == 0) {
5167 			aprint_normal_dev(sc->sc_dev,
5168 			    "for TX and RX interrupting at %s affinity to %u\n",
5169 			    intrstr, affinity_to);
5170 		} else {
5171 			aprint_normal_dev(sc->sc_dev,
5172 			    "for TX and RX interrupting at %s\n", intrstr);
5173 		}
5174 		sc->sc_ihs[intr_idx] = vih;
5175 		if (wm_softint_establish(sc, qidx, intr_idx) != 0)
5176 			goto fail;
5177 		txrx_established++;
5178 		intr_idx++;
5179 	}
5180 
5181 	/*
5182 	 * LINK
5183 	 */
5184 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
5185 	    sizeof(intrbuf));
5186 #ifdef WM_MPSAFE
5187 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
5188 #endif
5189 	memset(intr_xname, 0, sizeof(intr_xname));
5190 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
5191 	    device_xname(sc->sc_dev));
5192 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
5193 		    IPL_NET, wm_linkintr_msix, sc, intr_xname);
5194 	if (vih == NULL) {
5195 		aprint_error_dev(sc->sc_dev,
5196 		    "unable to establish MSI-X(for LINK)%s%s\n",
5197 		    intrstr ? " at " : "",
5198 		    intrstr ? intrstr : "");
5199 
5200 		goto fail;
5201 	}
5202 	/* keep default affinity to LINK interrupt */
5203 	aprint_normal_dev(sc->sc_dev,
5204 	    "for LINK interrupting at %s\n", intrstr);
5205 	sc->sc_ihs[intr_idx] = vih;
5206 	sc->sc_link_intr_idx = intr_idx;
5207 
5208 	sc->sc_nintrs = sc->sc_nqueues + 1;
5209 	kcpuset_destroy(affinity);
5210 	return 0;
5211 
5212  fail:
5213 	for (qidx = 0; qidx < txrx_established; qidx++) {
5214 		struct wm_queue *wmq = &sc->sc_queue[qidx];
5215 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
5216 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
5217 	}
5218 
5219 	kcpuset_destroy(affinity);
5220 	return ENOMEM;
5221 }
5222 
5223 static void
5224 wm_unset_stopping_flags(struct wm_softc *sc)
5225 {
5226 	int i;
5227 
5228 	KASSERT(WM_CORE_LOCKED(sc));
5229 
5230 	/*
5231 	 * must unset stopping flags in ascending order.
5232 	 */
5233 	for(i = 0; i < sc->sc_nqueues; i++) {
5234 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
5235 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
5236 
5237 		mutex_enter(txq->txq_lock);
5238 		txq->txq_stopping = false;
5239 		mutex_exit(txq->txq_lock);
5240 
5241 		mutex_enter(rxq->rxq_lock);
5242 		rxq->rxq_stopping = false;
5243 		mutex_exit(rxq->rxq_lock);
5244 	}
5245 
5246 	sc->sc_core_stopping = false;
5247 }
5248 
5249 static void
5250 wm_set_stopping_flags(struct wm_softc *sc)
5251 {
5252 	int i;
5253 
5254 	KASSERT(WM_CORE_LOCKED(sc));
5255 
5256 	sc->sc_core_stopping = true;
5257 
5258 	/*
5259 	 * must set stopping flags in ascending order.
5260 	 */
5261 	for(i = 0; i < sc->sc_nqueues; i++) {
5262 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
5263 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
5264 
5265 		mutex_enter(rxq->rxq_lock);
5266 		rxq->rxq_stopping = true;
5267 		mutex_exit(rxq->rxq_lock);
5268 
5269 		mutex_enter(txq->txq_lock);
5270 		txq->txq_stopping = true;
5271 		mutex_exit(txq->txq_lock);
5272 	}
5273 }
5274 
5275 /*
5276  * write interrupt interval value to ITR or EITR
5277  */
5278 static void
5279 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
5280 {
5281 
5282 	if (!wmq->wmq_set_itr)
5283 		return;
5284 
5285 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5286 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
5287 
5288 		/*
5289 		 * 82575 doesn't have CNT_INGR field.
5290 		 * So, overwrite counter field by software.
5291 		 */
5292 		if (sc->sc_type == WM_T_82575)
5293 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
5294 		else
5295 			eitr |= EITR_CNT_INGR;
5296 
5297 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
5298 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
5299 		/*
5300 		 * 82574 has both ITR and EITR. SET EITR when we use
5301 		 * the multi queue function with MSI-X.
5302 		 */
5303 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
5304 			    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
5305 	} else {
5306 		KASSERT(wmq->wmq_id == 0);
5307 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
5308 	}
5309 
5310 	wmq->wmq_set_itr = false;
5311 }
5312 
5313 /*
5314  * TODO
5315  * Below dynamic calculation of itr is almost the same as linux igb,
5316  * however it does not fit to wm(4). So, we will have been disable AIM
5317  * until we will find appropriate calculation of itr.
5318  */
5319 /*
5320  * calculate interrupt interval value to be going to write register in
5321  * wm_itrs_writereg(). This function does not write ITR/EITR register.
5322  */
5323 static void
5324 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
5325 {
5326 #ifdef NOTYET
5327 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
5328 	struct wm_txqueue *txq = &wmq->wmq_txq;
5329 	uint32_t avg_size = 0;
5330 	uint32_t new_itr;
5331 
5332 	if (rxq->rxq_packets)
5333 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
5334 	if (txq->txq_packets)
5335 		avg_size = max(avg_size, txq->txq_bytes / txq->txq_packets);
5336 
5337 	if (avg_size == 0) {
5338 		new_itr = 450; /* restore default value */
5339 		goto out;
5340 	}
5341 
5342 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
5343 	avg_size += 24;
5344 
5345 	/* Don't starve jumbo frames */
5346 	avg_size = min(avg_size, 3000);
5347 
5348 	/* Give a little boost to mid-size frames */
5349 	if ((avg_size > 300) && (avg_size < 1200))
5350 		new_itr = avg_size / 3;
5351 	else
5352 		new_itr = avg_size / 2;
5353 
5354 out:
5355 	/*
5356 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
5357 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
5358 	 */
5359 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
5360 		new_itr *= 4;
5361 
5362 	if (new_itr != wmq->wmq_itr) {
5363 		wmq->wmq_itr = new_itr;
5364 		wmq->wmq_set_itr = true;
5365 	} else
5366 		wmq->wmq_set_itr = false;
5367 
5368 	rxq->rxq_packets = 0;
5369 	rxq->rxq_bytes = 0;
5370 	txq->txq_packets = 0;
5371 	txq->txq_bytes = 0;
5372 #endif
5373 }
5374 
5375 /*
5376  * wm_init:		[ifnet interface function]
5377  *
5378  *	Initialize the interface.
5379  */
5380 static int
5381 wm_init(struct ifnet *ifp)
5382 {
5383 	struct wm_softc *sc = ifp->if_softc;
5384 	int ret;
5385 
5386 	WM_CORE_LOCK(sc);
5387 	ret = wm_init_locked(ifp);
5388 	WM_CORE_UNLOCK(sc);
5389 
5390 	return ret;
5391 }
5392 
5393 static int
5394 wm_init_locked(struct ifnet *ifp)
5395 {
5396 	struct wm_softc *sc = ifp->if_softc;
5397 	int i, j, trynum, error = 0;
5398 	uint32_t reg;
5399 
5400 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
5401 		device_xname(sc->sc_dev), __func__));
5402 	KASSERT(WM_CORE_LOCKED(sc));
5403 
5404 	/*
5405 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
5406 	 * There is a small but measurable benefit to avoiding the adjusment
5407 	 * of the descriptor so that the headers are aligned, for normal mtu,
5408 	 * on such platforms.  One possibility is that the DMA itself is
5409 	 * slightly more efficient if the front of the entire packet (instead
5410 	 * of the front of the headers) is aligned.
5411 	 *
5412 	 * Note we must always set align_tweak to 0 if we are using
5413 	 * jumbo frames.
5414 	 */
5415 #ifdef __NO_STRICT_ALIGNMENT
5416 	sc->sc_align_tweak = 0;
5417 #else
5418 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
5419 		sc->sc_align_tweak = 0;
5420 	else
5421 		sc->sc_align_tweak = 2;
5422 #endif /* __NO_STRICT_ALIGNMENT */
5423 
5424 	/* Cancel any pending I/O. */
5425 	wm_stop_locked(ifp, 0);
5426 
5427 	/* update statistics before reset */
5428 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
5429 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
5430 
5431 	/* PCH_SPT hardware workaround */
5432 	if (sc->sc_type == WM_T_PCH_SPT)
5433 		wm_flush_desc_rings(sc);
5434 
5435 	/* Reset the chip to a known state. */
5436 	wm_reset(sc);
5437 
5438 	/*
5439 	 * AMT based hardware can now take control from firmware
5440 	 * Do this after reset.
5441 	 */
5442 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
5443 		wm_get_hw_control(sc);
5444 
5445 	if ((sc->sc_type >= WM_T_PCH_SPT) &&
5446 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
5447 		wm_legacy_irq_quirk_spt(sc);
5448 
5449 	/* Init hardware bits */
5450 	wm_initialize_hardware_bits(sc);
5451 
5452 	/* Reset the PHY. */
5453 	if (sc->sc_flags & WM_F_HAS_MII)
5454 		wm_gmii_reset(sc);
5455 
5456 	/* Calculate (E)ITR value */
5457 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
5458 		/*
5459 		 * For NEWQUEUE's EITR (except for 82575).
5460 		 * 82575's EITR should be set same throttling value as other
5461 		 * old controllers' ITR because the interrupt/sec calculation
5462 		 * is the same, that is, 1,000,000,000 / (N * 256).
5463 		 *
5464 		 * 82574's EITR should be set same throttling value as ITR.
5465 		 *
5466 		 * For N interrupts/sec, set this value to:
5467 		 * 1,000,000 / N in contrast to ITR throttoling value.
5468 		 */
5469 		sc->sc_itr_init = 450;
5470 	} else if (sc->sc_type >= WM_T_82543) {
5471 		/*
5472 		 * Set up the interrupt throttling register (units of 256ns)
5473 		 * Note that a footnote in Intel's documentation says this
5474 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
5475 		 * or 10Mbit mode.  Empirically, it appears to be the case
5476 		 * that that is also true for the 1024ns units of the other
5477 		 * interrupt-related timer registers -- so, really, we ought
5478 		 * to divide this value by 4 when the link speed is low.
5479 		 *
5480 		 * XXX implement this division at link speed change!
5481 		 */
5482 
5483 		/*
5484 		 * For N interrupts/sec, set this value to:
5485 		 * 1,000,000,000 / (N * 256).  Note that we set the
5486 		 * absolute and packet timer values to this value
5487 		 * divided by 4 to get "simple timer" behavior.
5488 		 */
5489 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
5490 	}
5491 
5492 	error = wm_init_txrx_queues(sc);
5493 	if (error)
5494 		goto out;
5495 
5496 	/*
5497 	 * Clear out the VLAN table -- we don't use it (yet).
5498 	 */
5499 	CSR_WRITE(sc, WMREG_VET, 0);
5500 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
5501 		trynum = 10; /* Due to hw errata */
5502 	else
5503 		trynum = 1;
5504 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
5505 		for (j = 0; j < trynum; j++)
5506 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
5507 
5508 	/*
5509 	 * Set up flow-control parameters.
5510 	 *
5511 	 * XXX Values could probably stand some tuning.
5512 	 */
5513 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
5514 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
5515 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
5516 	    && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
5517 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
5518 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
5519 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
5520 	}
5521 
5522 	sc->sc_fcrtl = FCRTL_DFLT;
5523 	if (sc->sc_type < WM_T_82543) {
5524 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
5525 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
5526 	} else {
5527 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
5528 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
5529 	}
5530 
5531 	if (sc->sc_type == WM_T_80003)
5532 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
5533 	else
5534 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
5535 
5536 	/* Writes the control register. */
5537 	wm_set_vlan(sc);
5538 
5539 	if (sc->sc_flags & WM_F_HAS_MII) {
5540 		uint16_t kmreg;
5541 
5542 		switch (sc->sc_type) {
5543 		case WM_T_80003:
5544 		case WM_T_ICH8:
5545 		case WM_T_ICH9:
5546 		case WM_T_ICH10:
5547 		case WM_T_PCH:
5548 		case WM_T_PCH2:
5549 		case WM_T_PCH_LPT:
5550 		case WM_T_PCH_SPT:
5551 		case WM_T_PCH_CNP:
5552 			/*
5553 			 * Set the mac to wait the maximum time between each
5554 			 * iteration and increase the max iterations when
5555 			 * polling the phy; this fixes erroneous timeouts at
5556 			 * 10Mbps.
5557 			 */
5558 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
5559 			    0xFFFF);
5560 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
5561 			    &kmreg);
5562 			kmreg |= 0x3F;
5563 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
5564 			    kmreg);
5565 			break;
5566 		default:
5567 			break;
5568 		}
5569 
5570 		if (sc->sc_type == WM_T_80003) {
5571 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
5572 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
5573 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5574 
5575 			/* Bypass RX and TX FIFO's */
5576 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
5577 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
5578 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
5579 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
5580 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
5581 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
5582 		}
5583 	}
5584 #if 0
5585 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
5586 #endif
5587 
5588 	/* Set up checksum offload parameters. */
5589 	reg = CSR_READ(sc, WMREG_RXCSUM);
5590 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
5591 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
5592 		reg |= RXCSUM_IPOFL;
5593 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
5594 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
5595 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
5596 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
5597 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
5598 
5599 	/* Set registers about MSI-X */
5600 	if (wm_is_using_msix(sc)) {
5601 		uint32_t ivar;
5602 		struct wm_queue *wmq;
5603 		int qid, qintr_idx;
5604 
5605 		if (sc->sc_type == WM_T_82575) {
5606 			/* Interrupt control */
5607 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
5608 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
5609 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5610 
5611 			/* TX and RX */
5612 			for (i = 0; i < sc->sc_nqueues; i++) {
5613 				wmq = &sc->sc_queue[i];
5614 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
5615 				    EITR_TX_QUEUE(wmq->wmq_id)
5616 				    | EITR_RX_QUEUE(wmq->wmq_id));
5617 			}
5618 			/* Link status */
5619 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
5620 			    EITR_OTHER);
5621 		} else if (sc->sc_type == WM_T_82574) {
5622 			/* Interrupt control */
5623 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
5624 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
5625 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5626 
5627 			/*
5628 			 * workaround issue with spurious interrupts
5629 			 * in MSI-X mode.
5630 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
5631 			 * initialized yet. So re-initialize WMREG_RFCTL here.
5632 			 */
5633 			reg = CSR_READ(sc, WMREG_RFCTL);
5634 			reg |= WMREG_RFCTL_ACKDIS;
5635 			CSR_WRITE(sc, WMREG_RFCTL, reg);
5636 
5637 			ivar = 0;
5638 			/* TX and RX */
5639 			for (i = 0; i < sc->sc_nqueues; i++) {
5640 				wmq = &sc->sc_queue[i];
5641 				qid = wmq->wmq_id;
5642 				qintr_idx = wmq->wmq_intr_idx;
5643 
5644 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
5645 				    IVAR_TX_MASK_Q_82574(qid));
5646 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
5647 				    IVAR_RX_MASK_Q_82574(qid));
5648 			}
5649 			/* Link status */
5650 			ivar |= __SHIFTIN((IVAR_VALID_82574
5651 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
5652 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
5653 		} else {
5654 			/* Interrupt control */
5655 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
5656 			    | GPIE_EIAME | GPIE_PBA);
5657 
5658 			switch (sc->sc_type) {
5659 			case WM_T_82580:
5660 			case WM_T_I350:
5661 			case WM_T_I354:
5662 			case WM_T_I210:
5663 			case WM_T_I211:
5664 				/* TX and RX */
5665 				for (i = 0; i < sc->sc_nqueues; i++) {
5666 					wmq = &sc->sc_queue[i];
5667 					qid = wmq->wmq_id;
5668 					qintr_idx = wmq->wmq_intr_idx;
5669 
5670 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
5671 					ivar &= ~IVAR_TX_MASK_Q(qid);
5672 					ivar |= __SHIFTIN((qintr_idx
5673 						| IVAR_VALID),
5674 					    IVAR_TX_MASK_Q(qid));
5675 					ivar &= ~IVAR_RX_MASK_Q(qid);
5676 					ivar |= __SHIFTIN((qintr_idx
5677 						| IVAR_VALID),
5678 					    IVAR_RX_MASK_Q(qid));
5679 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
5680 				}
5681 				break;
5682 			case WM_T_82576:
5683 				/* TX and RX */
5684 				for (i = 0; i < sc->sc_nqueues; i++) {
5685 					wmq = &sc->sc_queue[i];
5686 					qid = wmq->wmq_id;
5687 					qintr_idx = wmq->wmq_intr_idx;
5688 
5689 					ivar = CSR_READ(sc,
5690 					    WMREG_IVAR_Q_82576(qid));
5691 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
5692 					ivar |= __SHIFTIN((qintr_idx
5693 						| IVAR_VALID),
5694 					    IVAR_TX_MASK_Q_82576(qid));
5695 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
5696 					ivar |= __SHIFTIN((qintr_idx
5697 						| IVAR_VALID),
5698 					    IVAR_RX_MASK_Q_82576(qid));
5699 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
5700 					    ivar);
5701 				}
5702 				break;
5703 			default:
5704 				break;
5705 			}
5706 
5707 			/* Link status */
5708 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
5709 			    IVAR_MISC_OTHER);
5710 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
5711 		}
5712 
5713 		if (wm_is_using_multiqueue(sc)) {
5714 			wm_init_rss(sc);
5715 
5716 			/*
5717 			** NOTE: Receive Full-Packet Checksum Offload
5718 			** is mutually exclusive with Multiqueue. However
5719 			** this is not the same as TCP/IP checksums which
5720 			** still work.
5721 			*/
5722 			reg = CSR_READ(sc, WMREG_RXCSUM);
5723 			reg |= RXCSUM_PCSD;
5724 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
5725 		}
5726 	}
5727 
5728 	/* Set up the interrupt registers. */
5729 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
5730 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
5731 	    ICR_RXO | ICR_RXT0;
5732 	if (wm_is_using_msix(sc)) {
5733 		uint32_t mask;
5734 		struct wm_queue *wmq;
5735 
5736 		switch (sc->sc_type) {
5737 		case WM_T_82574:
5738 			mask = 0;
5739 			for (i = 0; i < sc->sc_nqueues; i++) {
5740 				wmq = &sc->sc_queue[i];
5741 				mask |= ICR_TXQ(wmq->wmq_id);
5742 				mask |= ICR_RXQ(wmq->wmq_id);
5743 			}
5744 			mask |= ICR_OTHER;
5745 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
5746 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
5747 			break;
5748 		default:
5749 			if (sc->sc_type == WM_T_82575) {
5750 				mask = 0;
5751 				for (i = 0; i < sc->sc_nqueues; i++) {
5752 					wmq = &sc->sc_queue[i];
5753 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
5754 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
5755 				}
5756 				mask |= EITR_OTHER;
5757 			} else {
5758 				mask = 0;
5759 				for (i = 0; i < sc->sc_nqueues; i++) {
5760 					wmq = &sc->sc_queue[i];
5761 					mask |= 1 << wmq->wmq_intr_idx;
5762 				}
5763 				mask |= 1 << sc->sc_link_intr_idx;
5764 			}
5765 			CSR_WRITE(sc, WMREG_EIAC, mask);
5766 			CSR_WRITE(sc, WMREG_EIAM, mask);
5767 			CSR_WRITE(sc, WMREG_EIMS, mask);
5768 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
5769 			break;
5770 		}
5771 	} else
5772 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
5773 
5774 	/* Set up the inter-packet gap. */
5775 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
5776 
5777 	if (sc->sc_type >= WM_T_82543) {
5778 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
5779 			struct wm_queue *wmq = &sc->sc_queue[qidx];
5780 			wm_itrs_writereg(sc, wmq);
5781 		}
5782 		/*
5783 		 * Link interrupts occur much less than TX
5784 		 * interrupts and RX interrupts. So, we don't
5785 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
5786 		 * FreeBSD's if_igb.
5787 		 */
5788 	}
5789 
5790 	/* Set the VLAN ethernetype. */
5791 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
5792 
5793 	/*
5794 	 * Set up the transmit control register; we start out with
5795 	 * a collision distance suitable for FDX, but update it whe
5796 	 * we resolve the media type.
5797 	 */
5798 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
5799 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
5800 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
5801 	if (sc->sc_type >= WM_T_82571)
5802 		sc->sc_tctl |= TCTL_MULR;
5803 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
5804 
5805 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5806 		/* Write TDT after TCTL.EN is set. See the document. */
5807 		CSR_WRITE(sc, WMREG_TDT(0), 0);
5808 	}
5809 
5810 	if (sc->sc_type == WM_T_80003) {
5811 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
5812 		reg &= ~TCTL_EXT_GCEX_MASK;
5813 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
5814 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
5815 	}
5816 
5817 	/* Set the media. */
5818 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
5819 		goto out;
5820 
5821 	/* Configure for OS presence */
5822 	wm_init_manageability(sc);
5823 
5824 	/*
5825 	 * Set up the receive control register; we actually program
5826 	 * the register when we set the receive filter.  Use multicast
5827 	 * address offset type 0.
5828 	 *
5829 	 * Only the i82544 has the ability to strip the incoming
5830 	 * CRC, so we don't enable that feature.
5831 	 */
5832 	sc->sc_mchash_type = 0;
5833 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
5834 	    | RCTL_MO(sc->sc_mchash_type);
5835 
5836 	/*
5837 	 * 82574 use one buffer extended Rx descriptor.
5838 	 */
5839 	if (sc->sc_type == WM_T_82574)
5840 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
5841 
5842 	/*
5843 	 * The I350 has a bug where it always strips the CRC whether
5844 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
5845 	 */
5846 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
5847 	    || (sc->sc_type == WM_T_I210))
5848 		sc->sc_rctl |= RCTL_SECRC;
5849 
5850 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
5851 	    && (ifp->if_mtu > ETHERMTU)) {
5852 		sc->sc_rctl |= RCTL_LPE;
5853 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
5854 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
5855 	}
5856 
5857 	if (MCLBYTES == 2048) {
5858 		sc->sc_rctl |= RCTL_2k;
5859 	} else {
5860 		if (sc->sc_type >= WM_T_82543) {
5861 			switch (MCLBYTES) {
5862 			case 4096:
5863 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
5864 				break;
5865 			case 8192:
5866 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
5867 				break;
5868 			case 16384:
5869 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
5870 				break;
5871 			default:
5872 				panic("wm_init: MCLBYTES %d unsupported",
5873 				    MCLBYTES);
5874 				break;
5875 			}
5876 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
5877 	}
5878 
5879 	/* Enable ECC */
5880 	switch (sc->sc_type) {
5881 	case WM_T_82571:
5882 		reg = CSR_READ(sc, WMREG_PBA_ECC);
5883 		reg |= PBA_ECC_CORR_EN;
5884 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
5885 		break;
5886 	case WM_T_PCH_LPT:
5887 	case WM_T_PCH_SPT:
5888 	case WM_T_PCH_CNP:
5889 		reg = CSR_READ(sc, WMREG_PBECCSTS);
5890 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
5891 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
5892 
5893 		sc->sc_ctrl |= CTRL_MEHE;
5894 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5895 		break;
5896 	default:
5897 		break;
5898 	}
5899 
5900 	/*
5901 	 * Set the receive filter.
5902 	 *
5903 	 * For 82575 and 82576, the RX descriptors must be initialized after
5904 	 * the setting of RCTL.EN in wm_set_filter()
5905 	 */
5906 	wm_set_filter(sc);
5907 
5908 	/* On 575 and later set RDT only if RX enabled */
5909 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5910 		int qidx;
5911 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
5912 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
5913 			for (i = 0; i < WM_NRXDESC; i++) {
5914 				mutex_enter(rxq->rxq_lock);
5915 				wm_init_rxdesc(rxq, i);
5916 				mutex_exit(rxq->rxq_lock);
5917 
5918 			}
5919 		}
5920 	}
5921 
5922 	wm_unset_stopping_flags(sc);
5923 
5924 	/* Start the one second link check clock. */
5925 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
5926 
5927 	/* ...all done! */
5928 	ifp->if_flags |= IFF_RUNNING;
5929 	ifp->if_flags &= ~IFF_OACTIVE;
5930 
5931  out:
5932 	sc->sc_if_flags = ifp->if_flags;
5933 	if (error)
5934 		log(LOG_ERR, "%s: interface not running\n",
5935 		    device_xname(sc->sc_dev));
5936 	return error;
5937 }
5938 
5939 /*
5940  * wm_stop:		[ifnet interface function]
5941  *
5942  *	Stop transmission on the interface.
5943  */
5944 static void
5945 wm_stop(struct ifnet *ifp, int disable)
5946 {
5947 	struct wm_softc *sc = ifp->if_softc;
5948 
5949 	WM_CORE_LOCK(sc);
5950 	wm_stop_locked(ifp, disable);
5951 	WM_CORE_UNLOCK(sc);
5952 }
5953 
5954 static void
5955 wm_stop_locked(struct ifnet *ifp, int disable)
5956 {
5957 	struct wm_softc *sc = ifp->if_softc;
5958 	struct wm_txsoft *txs;
5959 	int i, qidx;
5960 
5961 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
5962 		device_xname(sc->sc_dev), __func__));
5963 	KASSERT(WM_CORE_LOCKED(sc));
5964 
5965 	wm_set_stopping_flags(sc);
5966 
5967 	/* Stop the one second clock. */
5968 	callout_stop(&sc->sc_tick_ch);
5969 
5970 	/* Stop the 82547 Tx FIFO stall check timer. */
5971 	if (sc->sc_type == WM_T_82547)
5972 		callout_stop(&sc->sc_txfifo_ch);
5973 
5974 	if (sc->sc_flags & WM_F_HAS_MII) {
5975 		/* Down the MII. */
5976 		mii_down(&sc->sc_mii);
5977 	} else {
5978 #if 0
5979 		/* Should we clear PHY's status properly? */
5980 		wm_reset(sc);
5981 #endif
5982 	}
5983 
5984 	/* Stop the transmit and receive processes. */
5985 	CSR_WRITE(sc, WMREG_TCTL, 0);
5986 	CSR_WRITE(sc, WMREG_RCTL, 0);
5987 	sc->sc_rctl &= ~RCTL_EN;
5988 
5989 	/*
5990 	 * Clear the interrupt mask to ensure the device cannot assert its
5991 	 * interrupt line.
5992 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
5993 	 * service any currently pending or shared interrupt.
5994 	 */
5995 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
5996 	sc->sc_icr = 0;
5997 	if (wm_is_using_msix(sc)) {
5998 		if (sc->sc_type != WM_T_82574) {
5999 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
6000 			CSR_WRITE(sc, WMREG_EIAC, 0);
6001 		} else
6002 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
6003 	}
6004 
6005 	/* Release any queued transmit buffers. */
6006 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
6007 		struct wm_queue *wmq = &sc->sc_queue[qidx];
6008 		struct wm_txqueue *txq = &wmq->wmq_txq;
6009 		mutex_enter(txq->txq_lock);
6010 		txq->txq_watchdog = false; /* ensure watchdog disabled */
6011 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
6012 			txs = &txq->txq_soft[i];
6013 			if (txs->txs_mbuf != NULL) {
6014 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
6015 				m_freem(txs->txs_mbuf);
6016 				txs->txs_mbuf = NULL;
6017 			}
6018 		}
6019 		mutex_exit(txq->txq_lock);
6020 	}
6021 
6022 	/* Mark the interface as down and cancel the watchdog timer. */
6023 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
6024 
6025 	if (disable) {
6026 		for (i = 0; i < sc->sc_nqueues; i++) {
6027 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
6028 			mutex_enter(rxq->rxq_lock);
6029 			wm_rxdrain(rxq);
6030 			mutex_exit(rxq->rxq_lock);
6031 		}
6032 	}
6033 
6034 #if 0 /* notyet */
6035 	if (sc->sc_type >= WM_T_82544)
6036 		CSR_WRITE(sc, WMREG_WUC, 0);
6037 #endif
6038 }
6039 
6040 static void
6041 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
6042 {
6043 	struct mbuf *m;
6044 	int i;
6045 
6046 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
6047 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
6048 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
6049 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
6050 		    m->m_data, m->m_len, m->m_flags);
6051 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
6052 	    i, i == 1 ? "" : "s");
6053 }
6054 
6055 /*
6056  * wm_82547_txfifo_stall:
6057  *
6058  *	Callout used to wait for the 82547 Tx FIFO to drain,
6059  *	reset the FIFO pointers, and restart packet transmission.
6060  */
6061 static void
6062 wm_82547_txfifo_stall(void *arg)
6063 {
6064 	struct wm_softc *sc = arg;
6065 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
6066 
6067 	mutex_enter(txq->txq_lock);
6068 
6069 	if (txq->txq_stopping)
6070 		goto out;
6071 
6072 	if (txq->txq_fifo_stall) {
6073 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
6074 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
6075 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
6076 			/*
6077 			 * Packets have drained.  Stop transmitter, reset
6078 			 * FIFO pointers, restart transmitter, and kick
6079 			 * the packet queue.
6080 			 */
6081 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
6082 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
6083 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
6084 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
6085 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
6086 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
6087 			CSR_WRITE(sc, WMREG_TCTL, tctl);
6088 			CSR_WRITE_FLUSH(sc);
6089 
6090 			txq->txq_fifo_head = 0;
6091 			txq->txq_fifo_stall = 0;
6092 			wm_start_locked(&sc->sc_ethercom.ec_if);
6093 		} else {
6094 			/*
6095 			 * Still waiting for packets to drain; try again in
6096 			 * another tick.
6097 			 */
6098 			callout_schedule(&sc->sc_txfifo_ch, 1);
6099 		}
6100 	}
6101 
6102 out:
6103 	mutex_exit(txq->txq_lock);
6104 }
6105 
6106 /*
6107  * wm_82547_txfifo_bugchk:
6108  *
6109  *	Check for bug condition in the 82547 Tx FIFO.  We need to
6110  *	prevent enqueueing a packet that would wrap around the end
6111  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
6112  *
6113  *	We do this by checking the amount of space before the end
6114  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
6115  *	the Tx FIFO, wait for all remaining packets to drain, reset
6116  *	the internal FIFO pointers to the beginning, and restart
6117  *	transmission on the interface.
6118  */
6119 #define	WM_FIFO_HDR		0x10
6120 #define	WM_82547_PAD_LEN	0x3e0
6121 static int
6122 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
6123 {
6124 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
6125 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
6126 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
6127 
6128 	/* Just return if already stalled. */
6129 	if (txq->txq_fifo_stall)
6130 		return 1;
6131 
6132 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
6133 		/* Stall only occurs in half-duplex mode. */
6134 		goto send_packet;
6135 	}
6136 
6137 	if (len >= WM_82547_PAD_LEN + space) {
6138 		txq->txq_fifo_stall = 1;
6139 		callout_schedule(&sc->sc_txfifo_ch, 1);
6140 		return 1;
6141 	}
6142 
6143  send_packet:
6144 	txq->txq_fifo_head += len;
6145 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
6146 		txq->txq_fifo_head -= txq->txq_fifo_size;
6147 
6148 	return 0;
6149 }
6150 
6151 static int
6152 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
6153 {
6154 	int error;
6155 
6156 	/*
6157 	 * Allocate the control data structures, and create and load the
6158 	 * DMA map for it.
6159 	 *
6160 	 * NOTE: All Tx descriptors must be in the same 4G segment of
6161 	 * memory.  So must Rx descriptors.  We simplify by allocating
6162 	 * both sets within the same 4G segment.
6163 	 */
6164 	if (sc->sc_type < WM_T_82544)
6165 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
6166 	else
6167 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
6168 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
6169 		txq->txq_descsize = sizeof(nq_txdesc_t);
6170 	else
6171 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
6172 
6173 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
6174 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
6175 		    1, &txq->txq_desc_rseg, 0)) != 0) {
6176 		aprint_error_dev(sc->sc_dev,
6177 		    "unable to allocate TX control data, error = %d\n",
6178 		    error);
6179 		goto fail_0;
6180 	}
6181 
6182 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
6183 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
6184 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
6185 		aprint_error_dev(sc->sc_dev,
6186 		    "unable to map TX control data, error = %d\n", error);
6187 		goto fail_1;
6188 	}
6189 
6190 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
6191 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
6192 		aprint_error_dev(sc->sc_dev,
6193 		    "unable to create TX control data DMA map, error = %d\n",
6194 		    error);
6195 		goto fail_2;
6196 	}
6197 
6198 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
6199 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
6200 		aprint_error_dev(sc->sc_dev,
6201 		    "unable to load TX control data DMA map, error = %d\n",
6202 		    error);
6203 		goto fail_3;
6204 	}
6205 
6206 	return 0;
6207 
6208  fail_3:
6209 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
6210  fail_2:
6211 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
6212 	    WM_TXDESCS_SIZE(txq));
6213  fail_1:
6214 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
6215  fail_0:
6216 	return error;
6217 }
6218 
6219 static void
6220 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
6221 {
6222 
6223 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
6224 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
6225 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
6226 	    WM_TXDESCS_SIZE(txq));
6227 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
6228 }
6229 
6230 static int
6231 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
6232 {
6233 	int error;
6234 	size_t rxq_descs_size;
6235 
6236 	/*
6237 	 * Allocate the control data structures, and create and load the
6238 	 * DMA map for it.
6239 	 *
6240 	 * NOTE: All Tx descriptors must be in the same 4G segment of
6241 	 * memory.  So must Rx descriptors.  We simplify by allocating
6242 	 * both sets within the same 4G segment.
6243 	 */
6244 	rxq->rxq_ndesc = WM_NRXDESC;
6245 	if (sc->sc_type == WM_T_82574)
6246 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
6247 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
6248 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
6249 	else
6250 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
6251 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
6252 
6253 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
6254 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
6255 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
6256 		aprint_error_dev(sc->sc_dev,
6257 		    "unable to allocate RX control data, error = %d\n",
6258 		    error);
6259 		goto fail_0;
6260 	}
6261 
6262 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
6263 		    rxq->rxq_desc_rseg, rxq_descs_size,
6264 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
6265 		aprint_error_dev(sc->sc_dev,
6266 		    "unable to map RX control data, error = %d\n", error);
6267 		goto fail_1;
6268 	}
6269 
6270 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
6271 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
6272 		aprint_error_dev(sc->sc_dev,
6273 		    "unable to create RX control data DMA map, error = %d\n",
6274 		    error);
6275 		goto fail_2;
6276 	}
6277 
6278 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
6279 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
6280 		aprint_error_dev(sc->sc_dev,
6281 		    "unable to load RX control data DMA map, error = %d\n",
6282 		    error);
6283 		goto fail_3;
6284 	}
6285 
6286 	return 0;
6287 
6288  fail_3:
6289 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
6290  fail_2:
6291 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
6292 	    rxq_descs_size);
6293  fail_1:
6294 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
6295  fail_0:
6296 	return error;
6297 }
6298 
6299 static void
6300 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
6301 {
6302 
6303 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
6304 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
6305 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
6306 	    rxq->rxq_descsize * rxq->rxq_ndesc);
6307 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
6308 }
6309 
6310 
6311 static int
6312 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
6313 {
6314 	int i, error;
6315 
6316 	/* Create the transmit buffer DMA maps. */
6317 	WM_TXQUEUELEN(txq) =
6318 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
6319 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
6320 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
6321 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
6322 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
6323 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
6324 			aprint_error_dev(sc->sc_dev,
6325 			    "unable to create Tx DMA map %d, error = %d\n",
6326 			    i, error);
6327 			goto fail;
6328 		}
6329 	}
6330 
6331 	return 0;
6332 
6333  fail:
6334 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
6335 		if (txq->txq_soft[i].txs_dmamap != NULL)
6336 			bus_dmamap_destroy(sc->sc_dmat,
6337 			    txq->txq_soft[i].txs_dmamap);
6338 	}
6339 	return error;
6340 }
6341 
6342 static void
6343 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
6344 {
6345 	int i;
6346 
6347 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
6348 		if (txq->txq_soft[i].txs_dmamap != NULL)
6349 			bus_dmamap_destroy(sc->sc_dmat,
6350 			    txq->txq_soft[i].txs_dmamap);
6351 	}
6352 }
6353 
6354 static int
6355 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
6356 {
6357 	int i, error;
6358 
6359 	/* Create the receive buffer DMA maps. */
6360 	for (i = 0; i < rxq->rxq_ndesc; i++) {
6361 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
6362 			    MCLBYTES, 0, 0,
6363 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
6364 			aprint_error_dev(sc->sc_dev,
6365 			    "unable to create Rx DMA map %d error = %d\n",
6366 			    i, error);
6367 			goto fail;
6368 		}
6369 		rxq->rxq_soft[i].rxs_mbuf = NULL;
6370 	}
6371 
6372 	return 0;
6373 
6374  fail:
6375 	for (i = 0; i < rxq->rxq_ndesc; i++) {
6376 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
6377 			bus_dmamap_destroy(sc->sc_dmat,
6378 			    rxq->rxq_soft[i].rxs_dmamap);
6379 	}
6380 	return error;
6381 }
6382 
6383 static void
6384 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
6385 {
6386 	int i;
6387 
6388 	for (i = 0; i < rxq->rxq_ndesc; i++) {
6389 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
6390 			bus_dmamap_destroy(sc->sc_dmat,
6391 			    rxq->rxq_soft[i].rxs_dmamap);
6392 	}
6393 }
6394 
6395 /*
6396  * wm_alloc_quques:
6397  *	Allocate {tx,rx}descs and {tx,rx} buffers
6398  */
6399 static int
6400 wm_alloc_txrx_queues(struct wm_softc *sc)
6401 {
6402 	int i, error, tx_done, rx_done;
6403 
6404 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
6405 	    KM_SLEEP);
6406 	if (sc->sc_queue == NULL) {
6407 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
6408 		error = ENOMEM;
6409 		goto fail_0;
6410 	}
6411 
6412 	/*
6413 	 * For transmission
6414 	 */
6415 	error = 0;
6416 	tx_done = 0;
6417 	for (i = 0; i < sc->sc_nqueues; i++) {
6418 #ifdef WM_EVENT_COUNTERS
6419 		int j;
6420 		const char *xname;
6421 #endif
6422 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
6423 		txq->txq_sc = sc;
6424 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
6425 
6426 		error = wm_alloc_tx_descs(sc, txq);
6427 		if (error)
6428 			break;
6429 		error = wm_alloc_tx_buffer(sc, txq);
6430 		if (error) {
6431 			wm_free_tx_descs(sc, txq);
6432 			break;
6433 		}
6434 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
6435 		if (txq->txq_interq == NULL) {
6436 			wm_free_tx_descs(sc, txq);
6437 			wm_free_tx_buffer(sc, txq);
6438 			error = ENOMEM;
6439 			break;
6440 		}
6441 
6442 #ifdef WM_EVENT_COUNTERS
6443 		xname = device_xname(sc->sc_dev);
6444 
6445 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
6446 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
6447 		WM_Q_MISC_EVCNT_ATTACH(txq, txfifo_stall, txq, i, xname);
6448 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
6449 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
6450 
6451 		WM_Q_MISC_EVCNT_ATTACH(txq, txipsum, txq, i, xname);
6452 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum, txq, i, xname);
6453 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum6, txq, i, xname);
6454 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso, txq, i, xname);
6455 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso6, txq, i, xname);
6456 		WM_Q_MISC_EVCNT_ATTACH(txq, txtsopain, txq, i, xname);
6457 
6458 		for (j = 0; j < WM_NTXSEGS; j++) {
6459 			snprintf(txq->txq_txseg_evcnt_names[j],
6460 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
6461 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
6462 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
6463 		}
6464 
6465 		WM_Q_MISC_EVCNT_ATTACH(txq, txdrop, txq, i, xname);
6466 
6467 		WM_Q_MISC_EVCNT_ATTACH(txq, tu, txq, i, xname);
6468 #endif /* WM_EVENT_COUNTERS */
6469 
6470 		tx_done++;
6471 	}
6472 	if (error)
6473 		goto fail_1;
6474 
6475 	/*
6476 	 * For recieve
6477 	 */
6478 	error = 0;
6479 	rx_done = 0;
6480 	for (i = 0; i < sc->sc_nqueues; i++) {
6481 #ifdef WM_EVENT_COUNTERS
6482 		const char *xname;
6483 #endif
6484 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
6485 		rxq->rxq_sc = sc;
6486 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
6487 
6488 		error = wm_alloc_rx_descs(sc, rxq);
6489 		if (error)
6490 			break;
6491 
6492 		error = wm_alloc_rx_buffer(sc, rxq);
6493 		if (error) {
6494 			wm_free_rx_descs(sc, rxq);
6495 			break;
6496 		}
6497 
6498 #ifdef WM_EVENT_COUNTERS
6499 		xname = device_xname(sc->sc_dev);
6500 
6501 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxintr, rxq, i, xname);
6502 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxdefer, rxq, i, xname);
6503 
6504 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxipsum, rxq, i, xname);
6505 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxtusum, rxq, i, xname);
6506 #endif /* WM_EVENT_COUNTERS */
6507 
6508 		rx_done++;
6509 	}
6510 	if (error)
6511 		goto fail_2;
6512 
6513 	return 0;
6514 
6515  fail_2:
6516 	for (i = 0; i < rx_done; i++) {
6517 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
6518 		wm_free_rx_buffer(sc, rxq);
6519 		wm_free_rx_descs(sc, rxq);
6520 		if (rxq->rxq_lock)
6521 			mutex_obj_free(rxq->rxq_lock);
6522 	}
6523  fail_1:
6524 	for (i = 0; i < tx_done; i++) {
6525 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
6526 		pcq_destroy(txq->txq_interq);
6527 		wm_free_tx_buffer(sc, txq);
6528 		wm_free_tx_descs(sc, txq);
6529 		if (txq->txq_lock)
6530 			mutex_obj_free(txq->txq_lock);
6531 	}
6532 
6533 	kmem_free(sc->sc_queue,
6534 	    sizeof(struct wm_queue) * sc->sc_nqueues);
6535  fail_0:
6536 	return error;
6537 }
6538 
6539 /*
6540  * wm_free_quques:
6541  *	Free {tx,rx}descs and {tx,rx} buffers
6542  */
6543 static void
6544 wm_free_txrx_queues(struct wm_softc *sc)
6545 {
6546 	int i;
6547 
6548 	for (i = 0; i < sc->sc_nqueues; i++) {
6549 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
6550 
6551 #ifdef WM_EVENT_COUNTERS
6552 		WM_Q_EVCNT_DETACH(rxq, rxintr, rxq, i);
6553 		WM_Q_EVCNT_DETACH(rxq, rxdefer, rxq, i);
6554 		WM_Q_EVCNT_DETACH(rxq, rxipsum, rxq, i);
6555 		WM_Q_EVCNT_DETACH(rxq, rxtusum, rxq, i);
6556 #endif /* WM_EVENT_COUNTERS */
6557 
6558 		wm_free_rx_buffer(sc, rxq);
6559 		wm_free_rx_descs(sc, rxq);
6560 		if (rxq->rxq_lock)
6561 			mutex_obj_free(rxq->rxq_lock);
6562 	}
6563 
6564 	for (i = 0; i < sc->sc_nqueues; i++) {
6565 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
6566 		struct mbuf *m;
6567 #ifdef WM_EVENT_COUNTERS
6568 		int j;
6569 
6570 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
6571 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
6572 		WM_Q_EVCNT_DETACH(txq, txfifo_stall, txq, i);
6573 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
6574 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
6575 		WM_Q_EVCNT_DETACH(txq, txipsum, txq, i);
6576 		WM_Q_EVCNT_DETACH(txq, txtusum, txq, i);
6577 		WM_Q_EVCNT_DETACH(txq, txtusum6, txq, i);
6578 		WM_Q_EVCNT_DETACH(txq, txtso, txq, i);
6579 		WM_Q_EVCNT_DETACH(txq, txtso6, txq, i);
6580 		WM_Q_EVCNT_DETACH(txq, txtsopain, txq, i);
6581 
6582 		for (j = 0; j < WM_NTXSEGS; j++)
6583 			evcnt_detach(&txq->txq_ev_txseg[j]);
6584 
6585 		WM_Q_EVCNT_DETACH(txq, txdrop, txq, i);
6586 		WM_Q_EVCNT_DETACH(txq, tu, txq, i);
6587 #endif /* WM_EVENT_COUNTERS */
6588 
6589 		/* drain txq_interq */
6590 		while ((m = pcq_get(txq->txq_interq)) != NULL)
6591 			m_freem(m);
6592 		pcq_destroy(txq->txq_interq);
6593 
6594 		wm_free_tx_buffer(sc, txq);
6595 		wm_free_tx_descs(sc, txq);
6596 		if (txq->txq_lock)
6597 			mutex_obj_free(txq->txq_lock);
6598 	}
6599 
6600 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
6601 }
6602 
6603 static void
6604 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
6605 {
6606 
6607 	KASSERT(mutex_owned(txq->txq_lock));
6608 
6609 	/* Initialize the transmit descriptor ring. */
6610 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
6611 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
6612 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
6613 	txq->txq_free = WM_NTXDESC(txq);
6614 	txq->txq_next = 0;
6615 }
6616 
6617 static void
6618 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
6619     struct wm_txqueue *txq)
6620 {
6621 
6622 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
6623 		device_xname(sc->sc_dev), __func__));
6624 	KASSERT(mutex_owned(txq->txq_lock));
6625 
6626 	if (sc->sc_type < WM_T_82543) {
6627 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
6628 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
6629 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
6630 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
6631 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
6632 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
6633 	} else {
6634 		int qid = wmq->wmq_id;
6635 
6636 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
6637 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
6638 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
6639 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
6640 
6641 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
6642 			/*
6643 			 * Don't write TDT before TCTL.EN is set.
6644 			 * See the document.
6645 			 */
6646 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
6647 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
6648 			    | TXDCTL_WTHRESH(0));
6649 		else {
6650 			/* XXX should update with AIM? */
6651 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
6652 			if (sc->sc_type >= WM_T_82540) {
6653 				/* should be same */
6654 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
6655 			}
6656 
6657 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
6658 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
6659 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
6660 		}
6661 	}
6662 }
6663 
6664 static void
6665 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
6666 {
6667 	int i;
6668 
6669 	KASSERT(mutex_owned(txq->txq_lock));
6670 
6671 	/* Initialize the transmit job descriptors. */
6672 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
6673 		txq->txq_soft[i].txs_mbuf = NULL;
6674 	txq->txq_sfree = WM_TXQUEUELEN(txq);
6675 	txq->txq_snext = 0;
6676 	txq->txq_sdirty = 0;
6677 }
6678 
6679 static void
6680 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
6681     struct wm_txqueue *txq)
6682 {
6683 
6684 	KASSERT(mutex_owned(txq->txq_lock));
6685 
6686 	/*
6687 	 * Set up some register offsets that are different between
6688 	 * the i82542 and the i82543 and later chips.
6689 	 */
6690 	if (sc->sc_type < WM_T_82543)
6691 		txq->txq_tdt_reg = WMREG_OLD_TDT;
6692 	else
6693 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
6694 
6695 	wm_init_tx_descs(sc, txq);
6696 	wm_init_tx_regs(sc, wmq, txq);
6697 	wm_init_tx_buffer(sc, txq);
6698 
6699 	txq->txq_watchdog = false;
6700 }
6701 
6702 static void
6703 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
6704     struct wm_rxqueue *rxq)
6705 {
6706 
6707 	KASSERT(mutex_owned(rxq->rxq_lock));
6708 
6709 	/*
6710 	 * Initialize the receive descriptor and receive job
6711 	 * descriptor rings.
6712 	 */
6713 	if (sc->sc_type < WM_T_82543) {
6714 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
6715 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
6716 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
6717 		    rxq->rxq_descsize * rxq->rxq_ndesc);
6718 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
6719 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
6720 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
6721 
6722 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
6723 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
6724 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
6725 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
6726 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
6727 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
6728 	} else {
6729 		int qid = wmq->wmq_id;
6730 
6731 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
6732 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
6733 		CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_descsize * rxq->rxq_ndesc);
6734 
6735 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
6736 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
6737 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
6738 
6739 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
6740 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
6741 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
6742 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
6743 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
6744 			    | RXDCTL_WTHRESH(1));
6745 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
6746 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
6747 		} else {
6748 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
6749 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
6750 			/* XXX should update with AIM? */
6751 			CSR_WRITE(sc, WMREG_RDTR, (wmq->wmq_itr / 4) | RDTR_FPD);
6752 			/* MUST be same */
6753 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
6754 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
6755 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
6756 		}
6757 	}
6758 }
6759 
6760 static int
6761 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
6762 {
6763 	struct wm_rxsoft *rxs;
6764 	int error, i;
6765 
6766 	KASSERT(mutex_owned(rxq->rxq_lock));
6767 
6768 	for (i = 0; i < rxq->rxq_ndesc; i++) {
6769 		rxs = &rxq->rxq_soft[i];
6770 		if (rxs->rxs_mbuf == NULL) {
6771 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
6772 				log(LOG_ERR, "%s: unable to allocate or map "
6773 				    "rx buffer %d, error = %d\n",
6774 				    device_xname(sc->sc_dev), i, error);
6775 				/*
6776 				 * XXX Should attempt to run with fewer receive
6777 				 * XXX buffers instead of just failing.
6778 				 */
6779 				wm_rxdrain(rxq);
6780 				return ENOMEM;
6781 			}
6782 		} else {
6783 			/*
6784 			 * For 82575 and 82576, the RX descriptors must be
6785 			 * initialized after the setting of RCTL.EN in
6786 			 * wm_set_filter()
6787 			 */
6788 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
6789 				wm_init_rxdesc(rxq, i);
6790 		}
6791 	}
6792 	rxq->rxq_ptr = 0;
6793 	rxq->rxq_discard = 0;
6794 	WM_RXCHAIN_RESET(rxq);
6795 
6796 	return 0;
6797 }
6798 
6799 static int
6800 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
6801     struct wm_rxqueue *rxq)
6802 {
6803 
6804 	KASSERT(mutex_owned(rxq->rxq_lock));
6805 
6806 	/*
6807 	 * Set up some register offsets that are different between
6808 	 * the i82542 and the i82543 and later chips.
6809 	 */
6810 	if (sc->sc_type < WM_T_82543)
6811 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
6812 	else
6813 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
6814 
6815 	wm_init_rx_regs(sc, wmq, rxq);
6816 	return wm_init_rx_buffer(sc, rxq);
6817 }
6818 
6819 /*
6820  * wm_init_quques:
6821  *	Initialize {tx,rx}descs and {tx,rx} buffers
6822  */
6823 static int
6824 wm_init_txrx_queues(struct wm_softc *sc)
6825 {
6826 	int i, error = 0;
6827 
6828 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
6829 		device_xname(sc->sc_dev), __func__));
6830 
6831 	for (i = 0; i < sc->sc_nqueues; i++) {
6832 		struct wm_queue *wmq = &sc->sc_queue[i];
6833 		struct wm_txqueue *txq = &wmq->wmq_txq;
6834 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
6835 
6836 		/*
6837 		 * TODO
6838 		 * Currently, use constant variable instead of AIM.
6839 		 * Furthermore, the interrupt interval of multiqueue which use
6840 		 * polling mode is less than default value.
6841 		 * More tuning and AIM are required.
6842 		 */
6843 		if (wm_is_using_multiqueue(sc))
6844 			wmq->wmq_itr = 50;
6845 		else
6846 			wmq->wmq_itr = sc->sc_itr_init;
6847 		wmq->wmq_set_itr = true;
6848 
6849 		mutex_enter(txq->txq_lock);
6850 		wm_init_tx_queue(sc, wmq, txq);
6851 		mutex_exit(txq->txq_lock);
6852 
6853 		mutex_enter(rxq->rxq_lock);
6854 		error = wm_init_rx_queue(sc, wmq, rxq);
6855 		mutex_exit(rxq->rxq_lock);
6856 		if (error)
6857 			break;
6858 	}
6859 
6860 	return error;
6861 }
6862 
6863 /*
6864  * wm_tx_offload:
6865  *
6866  *	Set up TCP/IP checksumming parameters for the
6867  *	specified packet.
6868  */
6869 static int
6870 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
6871     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
6872 {
6873 	struct mbuf *m0 = txs->txs_mbuf;
6874 	struct livengood_tcpip_ctxdesc *t;
6875 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
6876 	uint32_t ipcse;
6877 	struct ether_header *eh;
6878 	int offset, iphl;
6879 	uint8_t fields;
6880 
6881 	/*
6882 	 * XXX It would be nice if the mbuf pkthdr had offset
6883 	 * fields for the protocol headers.
6884 	 */
6885 
6886 	eh = mtod(m0, struct ether_header *);
6887 	switch (htons(eh->ether_type)) {
6888 	case ETHERTYPE_IP:
6889 	case ETHERTYPE_IPV6:
6890 		offset = ETHER_HDR_LEN;
6891 		break;
6892 
6893 	case ETHERTYPE_VLAN:
6894 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
6895 		break;
6896 
6897 	default:
6898 		/*
6899 		 * Don't support this protocol or encapsulation.
6900 		 */
6901 		*fieldsp = 0;
6902 		*cmdp = 0;
6903 		return 0;
6904 	}
6905 
6906 	if ((m0->m_pkthdr.csum_flags &
6907 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
6908 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
6909 	} else {
6910 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
6911 	}
6912 	ipcse = offset + iphl - 1;
6913 
6914 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
6915 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
6916 	seg = 0;
6917 	fields = 0;
6918 
6919 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
6920 		int hlen = offset + iphl;
6921 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
6922 
6923 		if (__predict_false(m0->m_len <
6924 				    (hlen + sizeof(struct tcphdr)))) {
6925 			/*
6926 			 * TCP/IP headers are not in the first mbuf; we need
6927 			 * to do this the slow and painful way.  Let's just
6928 			 * hope this doesn't happen very often.
6929 			 */
6930 			struct tcphdr th;
6931 
6932 			WM_Q_EVCNT_INCR(txq, txtsopain);
6933 
6934 			m_copydata(m0, hlen, sizeof(th), &th);
6935 			if (v4) {
6936 				struct ip ip;
6937 
6938 				m_copydata(m0, offset, sizeof(ip), &ip);
6939 				ip.ip_len = 0;
6940 				m_copyback(m0,
6941 				    offset + offsetof(struct ip, ip_len),
6942 				    sizeof(ip.ip_len), &ip.ip_len);
6943 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
6944 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
6945 			} else {
6946 				struct ip6_hdr ip6;
6947 
6948 				m_copydata(m0, offset, sizeof(ip6), &ip6);
6949 				ip6.ip6_plen = 0;
6950 				m_copyback(m0,
6951 				    offset + offsetof(struct ip6_hdr, ip6_plen),
6952 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
6953 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
6954 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
6955 			}
6956 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
6957 			    sizeof(th.th_sum), &th.th_sum);
6958 
6959 			hlen += th.th_off << 2;
6960 		} else {
6961 			/*
6962 			 * TCP/IP headers are in the first mbuf; we can do
6963 			 * this the easy way.
6964 			 */
6965 			struct tcphdr *th;
6966 
6967 			if (v4) {
6968 				struct ip *ip =
6969 				    (void *)(mtod(m0, char *) + offset);
6970 				th = (void *)(mtod(m0, char *) + hlen);
6971 
6972 				ip->ip_len = 0;
6973 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
6974 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
6975 			} else {
6976 				struct ip6_hdr *ip6 =
6977 				    (void *)(mtod(m0, char *) + offset);
6978 				th = (void *)(mtod(m0, char *) + hlen);
6979 
6980 				ip6->ip6_plen = 0;
6981 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
6982 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
6983 			}
6984 			hlen += th->th_off << 2;
6985 		}
6986 
6987 		if (v4) {
6988 			WM_Q_EVCNT_INCR(txq, txtso);
6989 			cmdlen |= WTX_TCPIP_CMD_IP;
6990 		} else {
6991 			WM_Q_EVCNT_INCR(txq, txtso6);
6992 			ipcse = 0;
6993 		}
6994 		cmd |= WTX_TCPIP_CMD_TSE;
6995 		cmdlen |= WTX_TCPIP_CMD_TSE |
6996 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
6997 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
6998 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
6999 	}
7000 
7001 	/*
7002 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
7003 	 * offload feature, if we load the context descriptor, we
7004 	 * MUST provide valid values for IPCSS and TUCSS fields.
7005 	 */
7006 
7007 	ipcs = WTX_TCPIP_IPCSS(offset) |
7008 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
7009 	    WTX_TCPIP_IPCSE(ipcse);
7010 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
7011 		WM_Q_EVCNT_INCR(txq, txipsum);
7012 		fields |= WTX_IXSM;
7013 	}
7014 
7015 	offset += iphl;
7016 
7017 	if (m0->m_pkthdr.csum_flags &
7018 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
7019 		WM_Q_EVCNT_INCR(txq, txtusum);
7020 		fields |= WTX_TXSM;
7021 		tucs = WTX_TCPIP_TUCSS(offset) |
7022 		    WTX_TCPIP_TUCSO(offset +
7023 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
7024 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
7025 	} else if ((m0->m_pkthdr.csum_flags &
7026 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
7027 		WM_Q_EVCNT_INCR(txq, txtusum6);
7028 		fields |= WTX_TXSM;
7029 		tucs = WTX_TCPIP_TUCSS(offset) |
7030 		    WTX_TCPIP_TUCSO(offset +
7031 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
7032 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
7033 	} else {
7034 		/* Just initialize it to a valid TCP context. */
7035 		tucs = WTX_TCPIP_TUCSS(offset) |
7036 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
7037 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
7038 	}
7039 
7040 	/*
7041 	 * We don't have to write context descriptor for every packet
7042 	 * except for 82574. For 82574, we must write context descriptor
7043 	 * for every packet when we use two descriptor queues.
7044 	 * It would be overhead to write context descriptor for every packet,
7045 	 * however it does not cause problems.
7046 	 */
7047 	/* Fill in the context descriptor. */
7048 	t = (struct livengood_tcpip_ctxdesc *)
7049 	    &txq->txq_descs[txq->txq_next];
7050 	t->tcpip_ipcs = htole32(ipcs);
7051 	t->tcpip_tucs = htole32(tucs);
7052 	t->tcpip_cmdlen = htole32(cmdlen);
7053 	t->tcpip_seg = htole32(seg);
7054 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
7055 
7056 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
7057 	txs->txs_ndesc++;
7058 
7059 	*cmdp = cmd;
7060 	*fieldsp = fields;
7061 
7062 	return 0;
7063 }
7064 
7065 static inline int
7066 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
7067 {
7068 	struct wm_softc *sc = ifp->if_softc;
7069 	u_int cpuid = cpu_index(curcpu());
7070 
7071 	/*
7072 	 * Currently, simple distribute strategy.
7073 	 * TODO:
7074 	 * distribute by flowid(RSS has value).
7075 	 */
7076         return (cpuid + ncpu - sc->sc_affinity_offset) % sc->sc_nqueues;
7077 }
7078 
7079 /*
7080  * wm_start:		[ifnet interface function]
7081  *
7082  *	Start packet transmission on the interface.
7083  */
7084 static void
7085 wm_start(struct ifnet *ifp)
7086 {
7087 	struct wm_softc *sc = ifp->if_softc;
7088 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
7089 
7090 #ifdef WM_MPSAFE
7091 	KASSERT(if_is_mpsafe(ifp));
7092 #endif
7093 	/*
7094 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
7095 	 */
7096 
7097 	mutex_enter(txq->txq_lock);
7098 	if (!txq->txq_stopping)
7099 		wm_start_locked(ifp);
7100 	mutex_exit(txq->txq_lock);
7101 }
7102 
7103 static void
7104 wm_start_locked(struct ifnet *ifp)
7105 {
7106 	struct wm_softc *sc = ifp->if_softc;
7107 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
7108 
7109 	wm_send_common_locked(ifp, txq, false);
7110 }
7111 
7112 static int
7113 wm_transmit(struct ifnet *ifp, struct mbuf *m)
7114 {
7115 	int qid;
7116 	struct wm_softc *sc = ifp->if_softc;
7117 	struct wm_txqueue *txq;
7118 
7119 	qid = wm_select_txqueue(ifp, m);
7120 	txq = &sc->sc_queue[qid].wmq_txq;
7121 
7122 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
7123 		m_freem(m);
7124 		WM_Q_EVCNT_INCR(txq, txdrop);
7125 		return ENOBUFS;
7126 	}
7127 
7128 	/*
7129 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
7130 	 */
7131 	ifp->if_obytes += m->m_pkthdr.len;
7132 	if (m->m_flags & M_MCAST)
7133 		ifp->if_omcasts++;
7134 
7135 	if (mutex_tryenter(txq->txq_lock)) {
7136 		if (!txq->txq_stopping)
7137 			wm_transmit_locked(ifp, txq);
7138 		mutex_exit(txq->txq_lock);
7139 	}
7140 
7141 	return 0;
7142 }
7143 
7144 static void
7145 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
7146 {
7147 
7148 	wm_send_common_locked(ifp, txq, true);
7149 }
7150 
7151 static void
7152 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
7153     bool is_transmit)
7154 {
7155 	struct wm_softc *sc = ifp->if_softc;
7156 	struct mbuf *m0;
7157 	struct wm_txsoft *txs;
7158 	bus_dmamap_t dmamap;
7159 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
7160 	bus_addr_t curaddr;
7161 	bus_size_t seglen, curlen;
7162 	uint32_t cksumcmd;
7163 	uint8_t cksumfields;
7164 
7165 	KASSERT(mutex_owned(txq->txq_lock));
7166 
7167 	if ((ifp->if_flags & IFF_RUNNING) == 0)
7168 		return;
7169 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
7170 		return;
7171 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
7172 		return;
7173 
7174 	/* Remember the previous number of free descriptors. */
7175 	ofree = txq->txq_free;
7176 
7177 	/*
7178 	 * Loop through the send queue, setting up transmit descriptors
7179 	 * until we drain the queue, or use up all available transmit
7180 	 * descriptors.
7181 	 */
7182 	for (;;) {
7183 		m0 = NULL;
7184 
7185 		/* Get a work queue entry. */
7186 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
7187 			wm_txeof(txq, UINT_MAX);
7188 			if (txq->txq_sfree == 0) {
7189 				DPRINTF(WM_DEBUG_TX,
7190 				    ("%s: TX: no free job descriptors\n",
7191 					device_xname(sc->sc_dev)));
7192 				WM_Q_EVCNT_INCR(txq, txsstall);
7193 				break;
7194 			}
7195 		}
7196 
7197 		/* Grab a packet off the queue. */
7198 		if (is_transmit)
7199 			m0 = pcq_get(txq->txq_interq);
7200 		else
7201 			IFQ_DEQUEUE(&ifp->if_snd, m0);
7202 		if (m0 == NULL)
7203 			break;
7204 
7205 		DPRINTF(WM_DEBUG_TX,
7206 		    ("%s: TX: have packet to transmit: %p\n",
7207 		    device_xname(sc->sc_dev), m0));
7208 
7209 		txs = &txq->txq_soft[txq->txq_snext];
7210 		dmamap = txs->txs_dmamap;
7211 
7212 		use_tso = (m0->m_pkthdr.csum_flags &
7213 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
7214 
7215 		/*
7216 		 * So says the Linux driver:
7217 		 * The controller does a simple calculation to make sure
7218 		 * there is enough room in the FIFO before initiating the
7219 		 * DMA for each buffer.  The calc is:
7220 		 *	4 = ceil(buffer len / MSS)
7221 		 * To make sure we don't overrun the FIFO, adjust the max
7222 		 * buffer len if the MSS drops.
7223 		 */
7224 		dmamap->dm_maxsegsz =
7225 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
7226 		    ? m0->m_pkthdr.segsz << 2
7227 		    : WTX_MAX_LEN;
7228 
7229 		/*
7230 		 * Load the DMA map.  If this fails, the packet either
7231 		 * didn't fit in the allotted number of segments, or we
7232 		 * were short on resources.  For the too-many-segments
7233 		 * case, we simply report an error and drop the packet,
7234 		 * since we can't sanely copy a jumbo packet to a single
7235 		 * buffer.
7236 		 */
7237 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
7238 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
7239 		if (error) {
7240 			if (error == EFBIG) {
7241 				WM_Q_EVCNT_INCR(txq, txdrop);
7242 				log(LOG_ERR, "%s: Tx packet consumes too many "
7243 				    "DMA segments, dropping...\n",
7244 				    device_xname(sc->sc_dev));
7245 				wm_dump_mbuf_chain(sc, m0);
7246 				m_freem(m0);
7247 				continue;
7248 			}
7249 			/*  Short on resources, just stop for now. */
7250 			DPRINTF(WM_DEBUG_TX,
7251 			    ("%s: TX: dmamap load failed: %d\n",
7252 			    device_xname(sc->sc_dev), error));
7253 			break;
7254 		}
7255 
7256 		segs_needed = dmamap->dm_nsegs;
7257 		if (use_tso) {
7258 			/* For sentinel descriptor; see below. */
7259 			segs_needed++;
7260 		}
7261 
7262 		/*
7263 		 * Ensure we have enough descriptors free to describe
7264 		 * the packet.  Note, we always reserve one descriptor
7265 		 * at the end of the ring due to the semantics of the
7266 		 * TDT register, plus one more in the event we need
7267 		 * to load offload context.
7268 		 */
7269 		if (segs_needed > txq->txq_free - 2) {
7270 			/*
7271 			 * Not enough free descriptors to transmit this
7272 			 * packet.  We haven't committed anything yet,
7273 			 * so just unload the DMA map, put the packet
7274 			 * pack on the queue, and punt.  Notify the upper
7275 			 * layer that there are no more slots left.
7276 			 */
7277 			DPRINTF(WM_DEBUG_TX,
7278 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
7279 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
7280 			    segs_needed, txq->txq_free - 1));
7281 			if (!is_transmit)
7282 				ifp->if_flags |= IFF_OACTIVE;
7283 			txq->txq_flags |= WM_TXQ_NO_SPACE;
7284 			bus_dmamap_unload(sc->sc_dmat, dmamap);
7285 			WM_Q_EVCNT_INCR(txq, txdstall);
7286 			break;
7287 		}
7288 
7289 		/*
7290 		 * Check for 82547 Tx FIFO bug.  We need to do this
7291 		 * once we know we can transmit the packet, since we
7292 		 * do some internal FIFO space accounting here.
7293 		 */
7294 		if (sc->sc_type == WM_T_82547 &&
7295 		    wm_82547_txfifo_bugchk(sc, m0)) {
7296 			DPRINTF(WM_DEBUG_TX,
7297 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
7298 			    device_xname(sc->sc_dev)));
7299 			if (!is_transmit)
7300 				ifp->if_flags |= IFF_OACTIVE;
7301 			txq->txq_flags |= WM_TXQ_NO_SPACE;
7302 			bus_dmamap_unload(sc->sc_dmat, dmamap);
7303 			WM_Q_EVCNT_INCR(txq, txfifo_stall);
7304 			break;
7305 		}
7306 
7307 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
7308 
7309 		DPRINTF(WM_DEBUG_TX,
7310 		    ("%s: TX: packet has %d (%d) DMA segments\n",
7311 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
7312 
7313 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
7314 
7315 		/*
7316 		 * Store a pointer to the packet so that we can free it
7317 		 * later.
7318 		 *
7319 		 * Initially, we consider the number of descriptors the
7320 		 * packet uses the number of DMA segments.  This may be
7321 		 * incremented by 1 if we do checksum offload (a descriptor
7322 		 * is used to set the checksum context).
7323 		 */
7324 		txs->txs_mbuf = m0;
7325 		txs->txs_firstdesc = txq->txq_next;
7326 		txs->txs_ndesc = segs_needed;
7327 
7328 		/* Set up offload parameters for this packet. */
7329 		if (m0->m_pkthdr.csum_flags &
7330 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
7331 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
7332 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
7333 			if (wm_tx_offload(sc, txq, txs, &cksumcmd,
7334 					  &cksumfields) != 0) {
7335 				/* Error message already displayed. */
7336 				bus_dmamap_unload(sc->sc_dmat, dmamap);
7337 				continue;
7338 			}
7339 		} else {
7340 			cksumcmd = 0;
7341 			cksumfields = 0;
7342 		}
7343 
7344 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
7345 
7346 		/* Sync the DMA map. */
7347 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
7348 		    BUS_DMASYNC_PREWRITE);
7349 
7350 		/* Initialize the transmit descriptor. */
7351 		for (nexttx = txq->txq_next, seg = 0;
7352 		     seg < dmamap->dm_nsegs; seg++) {
7353 			for (seglen = dmamap->dm_segs[seg].ds_len,
7354 			     curaddr = dmamap->dm_segs[seg].ds_addr;
7355 			     seglen != 0;
7356 			     curaddr += curlen, seglen -= curlen,
7357 			     nexttx = WM_NEXTTX(txq, nexttx)) {
7358 				curlen = seglen;
7359 
7360 				/*
7361 				 * So says the Linux driver:
7362 				 * Work around for premature descriptor
7363 				 * write-backs in TSO mode.  Append a
7364 				 * 4-byte sentinel descriptor.
7365 				 */
7366 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
7367 				    curlen > 8)
7368 					curlen -= 4;
7369 
7370 				wm_set_dma_addr(
7371 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
7372 				txq->txq_descs[nexttx].wtx_cmdlen
7373 				    = htole32(cksumcmd | curlen);
7374 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
7375 				    = 0;
7376 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
7377 				    = cksumfields;
7378 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
7379 				lasttx = nexttx;
7380 
7381 				DPRINTF(WM_DEBUG_TX,
7382 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
7383 				     "len %#04zx\n",
7384 				    device_xname(sc->sc_dev), nexttx,
7385 				    (uint64_t)curaddr, curlen));
7386 			}
7387 		}
7388 
7389 		KASSERT(lasttx != -1);
7390 
7391 		/*
7392 		 * Set up the command byte on the last descriptor of
7393 		 * the packet.  If we're in the interrupt delay window,
7394 		 * delay the interrupt.
7395 		 */
7396 		txq->txq_descs[lasttx].wtx_cmdlen |=
7397 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
7398 
7399 		/*
7400 		 * If VLANs are enabled and the packet has a VLAN tag, set
7401 		 * up the descriptor to encapsulate the packet for us.
7402 		 *
7403 		 * This is only valid on the last descriptor of the packet.
7404 		 */
7405 		if (vlan_has_tag(m0)) {
7406 			txq->txq_descs[lasttx].wtx_cmdlen |=
7407 			    htole32(WTX_CMD_VLE);
7408 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
7409 			    = htole16(vlan_get_tag(m0));
7410 		}
7411 
7412 		txs->txs_lastdesc = lasttx;
7413 
7414 		DPRINTF(WM_DEBUG_TX,
7415 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
7416 		    device_xname(sc->sc_dev),
7417 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
7418 
7419 		/* Sync the descriptors we're using. */
7420 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
7421 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
7422 
7423 		/* Give the packet to the chip. */
7424 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
7425 
7426 		DPRINTF(WM_DEBUG_TX,
7427 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
7428 
7429 		DPRINTF(WM_DEBUG_TX,
7430 		    ("%s: TX: finished transmitting packet, job %d\n",
7431 		    device_xname(sc->sc_dev), txq->txq_snext));
7432 
7433 		/* Advance the tx pointer. */
7434 		txq->txq_free -= txs->txs_ndesc;
7435 		txq->txq_next = nexttx;
7436 
7437 		txq->txq_sfree--;
7438 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
7439 
7440 		/* Pass the packet to any BPF listeners. */
7441 		bpf_mtap(ifp, m0);
7442 	}
7443 
7444 	if (m0 != NULL) {
7445 		if (!is_transmit)
7446 			ifp->if_flags |= IFF_OACTIVE;
7447 		txq->txq_flags |= WM_TXQ_NO_SPACE;
7448 		WM_Q_EVCNT_INCR(txq, txdrop);
7449 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
7450 			__func__));
7451 		m_freem(m0);
7452 	}
7453 
7454 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
7455 		/* No more slots; notify upper layer. */
7456 		if (!is_transmit)
7457 			ifp->if_flags |= IFF_OACTIVE;
7458 		txq->txq_flags |= WM_TXQ_NO_SPACE;
7459 	}
7460 
7461 	if (txq->txq_free != ofree) {
7462 		/* Set a watchdog timer in case the chip flakes out. */
7463 		txq->txq_lastsent = time_uptime;
7464 		txq->txq_watchdog = true;
7465 	}
7466 }
7467 
7468 /*
7469  * wm_nq_tx_offload:
7470  *
7471  *	Set up TCP/IP checksumming parameters for the
7472  *	specified packet, for NEWQUEUE devices
7473  */
7474 static int
7475 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
7476     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
7477 {
7478 	struct mbuf *m0 = txs->txs_mbuf;
7479 	uint32_t vl_len, mssidx, cmdc;
7480 	struct ether_header *eh;
7481 	int offset, iphl;
7482 
7483 	/*
7484 	 * XXX It would be nice if the mbuf pkthdr had offset
7485 	 * fields for the protocol headers.
7486 	 */
7487 	*cmdlenp = 0;
7488 	*fieldsp = 0;
7489 
7490 	eh = mtod(m0, struct ether_header *);
7491 	switch (htons(eh->ether_type)) {
7492 	case ETHERTYPE_IP:
7493 	case ETHERTYPE_IPV6:
7494 		offset = ETHER_HDR_LEN;
7495 		break;
7496 
7497 	case ETHERTYPE_VLAN:
7498 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
7499 		break;
7500 
7501 	default:
7502 		/* Don't support this protocol or encapsulation. */
7503 		*do_csum = false;
7504 		return 0;
7505 	}
7506 	*do_csum = true;
7507 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
7508 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
7509 
7510 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
7511 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
7512 
7513 	if ((m0->m_pkthdr.csum_flags &
7514 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
7515 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
7516 	} else {
7517 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
7518 	}
7519 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
7520 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
7521 
7522 	if (vlan_has_tag(m0)) {
7523 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
7524 		     << NQTXC_VLLEN_VLAN_SHIFT);
7525 		*cmdlenp |= NQTX_CMD_VLE;
7526 	}
7527 
7528 	mssidx = 0;
7529 
7530 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
7531 		int hlen = offset + iphl;
7532 		int tcp_hlen;
7533 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
7534 
7535 		if (__predict_false(m0->m_len <
7536 				    (hlen + sizeof(struct tcphdr)))) {
7537 			/*
7538 			 * TCP/IP headers are not in the first mbuf; we need
7539 			 * to do this the slow and painful way.  Let's just
7540 			 * hope this doesn't happen very often.
7541 			 */
7542 			struct tcphdr th;
7543 
7544 			WM_Q_EVCNT_INCR(txq, txtsopain);
7545 
7546 			m_copydata(m0, hlen, sizeof(th), &th);
7547 			if (v4) {
7548 				struct ip ip;
7549 
7550 				m_copydata(m0, offset, sizeof(ip), &ip);
7551 				ip.ip_len = 0;
7552 				m_copyback(m0,
7553 				    offset + offsetof(struct ip, ip_len),
7554 				    sizeof(ip.ip_len), &ip.ip_len);
7555 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
7556 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
7557 			} else {
7558 				struct ip6_hdr ip6;
7559 
7560 				m_copydata(m0, offset, sizeof(ip6), &ip6);
7561 				ip6.ip6_plen = 0;
7562 				m_copyback(m0,
7563 				    offset + offsetof(struct ip6_hdr, ip6_plen),
7564 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
7565 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
7566 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
7567 			}
7568 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
7569 			    sizeof(th.th_sum), &th.th_sum);
7570 
7571 			tcp_hlen = th.th_off << 2;
7572 		} else {
7573 			/*
7574 			 * TCP/IP headers are in the first mbuf; we can do
7575 			 * this the easy way.
7576 			 */
7577 			struct tcphdr *th;
7578 
7579 			if (v4) {
7580 				struct ip *ip =
7581 				    (void *)(mtod(m0, char *) + offset);
7582 				th = (void *)(mtod(m0, char *) + hlen);
7583 
7584 				ip->ip_len = 0;
7585 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
7586 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
7587 			} else {
7588 				struct ip6_hdr *ip6 =
7589 				    (void *)(mtod(m0, char *) + offset);
7590 				th = (void *)(mtod(m0, char *) + hlen);
7591 
7592 				ip6->ip6_plen = 0;
7593 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
7594 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
7595 			}
7596 			tcp_hlen = th->th_off << 2;
7597 		}
7598 		hlen += tcp_hlen;
7599 		*cmdlenp |= NQTX_CMD_TSE;
7600 
7601 		if (v4) {
7602 			WM_Q_EVCNT_INCR(txq, txtso);
7603 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
7604 		} else {
7605 			WM_Q_EVCNT_INCR(txq, txtso6);
7606 			*fieldsp |= NQTXD_FIELDS_TUXSM;
7607 		}
7608 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
7609 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
7610 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
7611 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
7612 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
7613 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
7614 	} else {
7615 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
7616 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
7617 	}
7618 
7619 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
7620 		*fieldsp |= NQTXD_FIELDS_IXSM;
7621 		cmdc |= NQTXC_CMD_IP4;
7622 	}
7623 
7624 	if (m0->m_pkthdr.csum_flags &
7625 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
7626 		WM_Q_EVCNT_INCR(txq, txtusum);
7627 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
7628 			cmdc |= NQTXC_CMD_TCP;
7629 		} else {
7630 			cmdc |= NQTXC_CMD_UDP;
7631 		}
7632 		cmdc |= NQTXC_CMD_IP4;
7633 		*fieldsp |= NQTXD_FIELDS_TUXSM;
7634 	}
7635 	if (m0->m_pkthdr.csum_flags &
7636 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
7637 		WM_Q_EVCNT_INCR(txq, txtusum6);
7638 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
7639 			cmdc |= NQTXC_CMD_TCP;
7640 		} else {
7641 			cmdc |= NQTXC_CMD_UDP;
7642 		}
7643 		cmdc |= NQTXC_CMD_IP6;
7644 		*fieldsp |= NQTXD_FIELDS_TUXSM;
7645 	}
7646 
7647 	/*
7648 	 * We don't have to write context descriptor for every packet to
7649 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
7650 	 * I210 and I211. It is enough to write once per a Tx queue for these
7651 	 * controllers.
7652 	 * It would be overhead to write context descriptor for every packet,
7653 	 * however it does not cause problems.
7654 	 */
7655 	/* Fill in the context descriptor. */
7656 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
7657 	    htole32(vl_len);
7658 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
7659 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
7660 	    htole32(cmdc);
7661 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
7662 	    htole32(mssidx);
7663 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
7664 	DPRINTF(WM_DEBUG_TX,
7665 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
7666 	    txq->txq_next, 0, vl_len));
7667 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
7668 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
7669 	txs->txs_ndesc++;
7670 	return 0;
7671 }
7672 
7673 /*
7674  * wm_nq_start:		[ifnet interface function]
7675  *
7676  *	Start packet transmission on the interface for NEWQUEUE devices
7677  */
7678 static void
7679 wm_nq_start(struct ifnet *ifp)
7680 {
7681 	struct wm_softc *sc = ifp->if_softc;
7682 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
7683 
7684 #ifdef WM_MPSAFE
7685 	KASSERT(if_is_mpsafe(ifp));
7686 #endif
7687 	/*
7688 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
7689 	 */
7690 
7691 	mutex_enter(txq->txq_lock);
7692 	if (!txq->txq_stopping)
7693 		wm_nq_start_locked(ifp);
7694 	mutex_exit(txq->txq_lock);
7695 }
7696 
7697 static void
7698 wm_nq_start_locked(struct ifnet *ifp)
7699 {
7700 	struct wm_softc *sc = ifp->if_softc;
7701 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
7702 
7703 	wm_nq_send_common_locked(ifp, txq, false);
7704 }
7705 
7706 static int
7707 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
7708 {
7709 	int qid;
7710 	struct wm_softc *sc = ifp->if_softc;
7711 	struct wm_txqueue *txq;
7712 
7713 	qid = wm_select_txqueue(ifp, m);
7714 	txq = &sc->sc_queue[qid].wmq_txq;
7715 
7716 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
7717 		m_freem(m);
7718 		WM_Q_EVCNT_INCR(txq, txdrop);
7719 		return ENOBUFS;
7720 	}
7721 
7722 	/*
7723 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
7724 	 */
7725 	ifp->if_obytes += m->m_pkthdr.len;
7726 	if (m->m_flags & M_MCAST)
7727 		ifp->if_omcasts++;
7728 
7729 	/*
7730 	 * The situations which this mutex_tryenter() fails at running time
7731 	 * are below two patterns.
7732 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
7733 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
7734 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
7735 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
7736 	 * In the case of (2), the last packet enqueued to txq->txq_interq is also
7737 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck, either.
7738 	 */
7739 	if (mutex_tryenter(txq->txq_lock)) {
7740 		if (!txq->txq_stopping)
7741 			wm_nq_transmit_locked(ifp, txq);
7742 		mutex_exit(txq->txq_lock);
7743 	}
7744 
7745 	return 0;
7746 }
7747 
7748 static void
7749 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
7750 {
7751 
7752 	wm_nq_send_common_locked(ifp, txq, true);
7753 }
7754 
7755 static void
7756 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
7757     bool is_transmit)
7758 {
7759 	struct wm_softc *sc = ifp->if_softc;
7760 	struct mbuf *m0;
7761 	struct wm_txsoft *txs;
7762 	bus_dmamap_t dmamap;
7763 	int error, nexttx, lasttx = -1, seg, segs_needed;
7764 	bool do_csum, sent;
7765 
7766 	KASSERT(mutex_owned(txq->txq_lock));
7767 
7768 	if ((ifp->if_flags & IFF_RUNNING) == 0)
7769 		return;
7770 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
7771 		return;
7772 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
7773 		return;
7774 
7775 	sent = false;
7776 
7777 	/*
7778 	 * Loop through the send queue, setting up transmit descriptors
7779 	 * until we drain the queue, or use up all available transmit
7780 	 * descriptors.
7781 	 */
7782 	for (;;) {
7783 		m0 = NULL;
7784 
7785 		/* Get a work queue entry. */
7786 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
7787 			wm_txeof(txq, UINT_MAX);
7788 			if (txq->txq_sfree == 0) {
7789 				DPRINTF(WM_DEBUG_TX,
7790 				    ("%s: TX: no free job descriptors\n",
7791 					device_xname(sc->sc_dev)));
7792 				WM_Q_EVCNT_INCR(txq, txsstall);
7793 				break;
7794 			}
7795 		}
7796 
7797 		/* Grab a packet off the queue. */
7798 		if (is_transmit)
7799 			m0 = pcq_get(txq->txq_interq);
7800 		else
7801 			IFQ_DEQUEUE(&ifp->if_snd, m0);
7802 		if (m0 == NULL)
7803 			break;
7804 
7805 		DPRINTF(WM_DEBUG_TX,
7806 		    ("%s: TX: have packet to transmit: %p\n",
7807 		    device_xname(sc->sc_dev), m0));
7808 
7809 		txs = &txq->txq_soft[txq->txq_snext];
7810 		dmamap = txs->txs_dmamap;
7811 
7812 		/*
7813 		 * Load the DMA map.  If this fails, the packet either
7814 		 * didn't fit in the allotted number of segments, or we
7815 		 * were short on resources.  For the too-many-segments
7816 		 * case, we simply report an error and drop the packet,
7817 		 * since we can't sanely copy a jumbo packet to a single
7818 		 * buffer.
7819 		 */
7820 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
7821 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
7822 		if (error) {
7823 			if (error == EFBIG) {
7824 				WM_Q_EVCNT_INCR(txq, txdrop);
7825 				log(LOG_ERR, "%s: Tx packet consumes too many "
7826 				    "DMA segments, dropping...\n",
7827 				    device_xname(sc->sc_dev));
7828 				wm_dump_mbuf_chain(sc, m0);
7829 				m_freem(m0);
7830 				continue;
7831 			}
7832 			/* Short on resources, just stop for now. */
7833 			DPRINTF(WM_DEBUG_TX,
7834 			    ("%s: TX: dmamap load failed: %d\n",
7835 			    device_xname(sc->sc_dev), error));
7836 			break;
7837 		}
7838 
7839 		segs_needed = dmamap->dm_nsegs;
7840 
7841 		/*
7842 		 * Ensure we have enough descriptors free to describe
7843 		 * the packet.  Note, we always reserve one descriptor
7844 		 * at the end of the ring due to the semantics of the
7845 		 * TDT register, plus one more in the event we need
7846 		 * to load offload context.
7847 		 */
7848 		if (segs_needed > txq->txq_free - 2) {
7849 			/*
7850 			 * Not enough free descriptors to transmit this
7851 			 * packet.  We haven't committed anything yet,
7852 			 * so just unload the DMA map, put the packet
7853 			 * pack on the queue, and punt.  Notify the upper
7854 			 * layer that there are no more slots left.
7855 			 */
7856 			DPRINTF(WM_DEBUG_TX,
7857 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
7858 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
7859 			    segs_needed, txq->txq_free - 1));
7860 			if (!is_transmit)
7861 				ifp->if_flags |= IFF_OACTIVE;
7862 			txq->txq_flags |= WM_TXQ_NO_SPACE;
7863 			bus_dmamap_unload(sc->sc_dmat, dmamap);
7864 			WM_Q_EVCNT_INCR(txq, txdstall);
7865 			break;
7866 		}
7867 
7868 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
7869 
7870 		DPRINTF(WM_DEBUG_TX,
7871 		    ("%s: TX: packet has %d (%d) DMA segments\n",
7872 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
7873 
7874 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
7875 
7876 		/*
7877 		 * Store a pointer to the packet so that we can free it
7878 		 * later.
7879 		 *
7880 		 * Initially, we consider the number of descriptors the
7881 		 * packet uses the number of DMA segments.  This may be
7882 		 * incremented by 1 if we do checksum offload (a descriptor
7883 		 * is used to set the checksum context).
7884 		 */
7885 		txs->txs_mbuf = m0;
7886 		txs->txs_firstdesc = txq->txq_next;
7887 		txs->txs_ndesc = segs_needed;
7888 
7889 		/* Set up offload parameters for this packet. */
7890 		uint32_t cmdlen, fields, dcmdlen;
7891 		if (m0->m_pkthdr.csum_flags &
7892 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
7893 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
7894 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
7895 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
7896 			    &do_csum) != 0) {
7897 				/* Error message already displayed. */
7898 				bus_dmamap_unload(sc->sc_dmat, dmamap);
7899 				continue;
7900 			}
7901 		} else {
7902 			do_csum = false;
7903 			cmdlen = 0;
7904 			fields = 0;
7905 		}
7906 
7907 		/* Sync the DMA map. */
7908 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
7909 		    BUS_DMASYNC_PREWRITE);
7910 
7911 		/* Initialize the first transmit descriptor. */
7912 		nexttx = txq->txq_next;
7913 		if (!do_csum) {
7914 			/* setup a legacy descriptor */
7915 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
7916 			    dmamap->dm_segs[0].ds_addr);
7917 			txq->txq_descs[nexttx].wtx_cmdlen =
7918 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
7919 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
7920 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
7921 			if (vlan_has_tag(m0)) {
7922 				txq->txq_descs[nexttx].wtx_cmdlen |=
7923 				    htole32(WTX_CMD_VLE);
7924 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
7925 				    htole16(vlan_get_tag(m0));
7926 			} else {
7927 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
7928 			}
7929 			dcmdlen = 0;
7930 		} else {
7931 			/* setup an advanced data descriptor */
7932 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
7933 			    htole64(dmamap->dm_segs[0].ds_addr);
7934 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
7935 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
7936 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
7937 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
7938 			    htole32(fields);
7939 			DPRINTF(WM_DEBUG_TX,
7940 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
7941 			    device_xname(sc->sc_dev), nexttx,
7942 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
7943 			DPRINTF(WM_DEBUG_TX,
7944 			    ("\t 0x%08x%08x\n", fields,
7945 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
7946 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
7947 		}
7948 
7949 		lasttx = nexttx;
7950 		nexttx = WM_NEXTTX(txq, nexttx);
7951 		/*
7952 		 * fill in the next descriptors. legacy or advanced format
7953 		 * is the same here
7954 		 */
7955 		for (seg = 1; seg < dmamap->dm_nsegs;
7956 		    seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
7957 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
7958 			    htole64(dmamap->dm_segs[seg].ds_addr);
7959 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
7960 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
7961 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
7962 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
7963 			lasttx = nexttx;
7964 
7965 			DPRINTF(WM_DEBUG_TX,
7966 			    ("%s: TX: desc %d: %#" PRIx64 ", "
7967 			     "len %#04zx\n",
7968 			    device_xname(sc->sc_dev), nexttx,
7969 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
7970 			    dmamap->dm_segs[seg].ds_len));
7971 		}
7972 
7973 		KASSERT(lasttx != -1);
7974 
7975 		/*
7976 		 * Set up the command byte on the last descriptor of
7977 		 * the packet.  If we're in the interrupt delay window,
7978 		 * delay the interrupt.
7979 		 */
7980 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
7981 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
7982 		txq->txq_descs[lasttx].wtx_cmdlen |=
7983 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
7984 
7985 		txs->txs_lastdesc = lasttx;
7986 
7987 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
7988 		    device_xname(sc->sc_dev),
7989 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
7990 
7991 		/* Sync the descriptors we're using. */
7992 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
7993 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
7994 
7995 		/* Give the packet to the chip. */
7996 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
7997 		sent = true;
7998 
7999 		DPRINTF(WM_DEBUG_TX,
8000 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
8001 
8002 		DPRINTF(WM_DEBUG_TX,
8003 		    ("%s: TX: finished transmitting packet, job %d\n",
8004 		    device_xname(sc->sc_dev), txq->txq_snext));
8005 
8006 		/* Advance the tx pointer. */
8007 		txq->txq_free -= txs->txs_ndesc;
8008 		txq->txq_next = nexttx;
8009 
8010 		txq->txq_sfree--;
8011 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
8012 
8013 		/* Pass the packet to any BPF listeners. */
8014 		bpf_mtap(ifp, m0);
8015 	}
8016 
8017 	if (m0 != NULL) {
8018 		if (!is_transmit)
8019 			ifp->if_flags |= IFF_OACTIVE;
8020 		txq->txq_flags |= WM_TXQ_NO_SPACE;
8021 		WM_Q_EVCNT_INCR(txq, txdrop);
8022 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
8023 			__func__));
8024 		m_freem(m0);
8025 	}
8026 
8027 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
8028 		/* No more slots; notify upper layer. */
8029 		if (!is_transmit)
8030 			ifp->if_flags |= IFF_OACTIVE;
8031 		txq->txq_flags |= WM_TXQ_NO_SPACE;
8032 	}
8033 
8034 	if (sent) {
8035 		/* Set a watchdog timer in case the chip flakes out. */
8036 		txq->txq_lastsent = time_uptime;
8037 		txq->txq_watchdog = true;
8038 	}
8039 }
8040 
8041 static void
8042 wm_deferred_start_locked(struct wm_txqueue *txq)
8043 {
8044 	struct wm_softc *sc = txq->txq_sc;
8045 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
8046 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
8047 	int qid = wmq->wmq_id;
8048 
8049 	KASSERT(mutex_owned(txq->txq_lock));
8050 
8051 	if (txq->txq_stopping) {
8052 		mutex_exit(txq->txq_lock);
8053 		return;
8054 	}
8055 
8056 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
8057 		/* XXX need for ALTQ or one CPU system */
8058 		if (qid == 0)
8059 			wm_nq_start_locked(ifp);
8060 		wm_nq_transmit_locked(ifp, txq);
8061 	} else {
8062 		/* XXX need for ALTQ or one CPU system */
8063 		if (qid == 0)
8064 			wm_start_locked(ifp);
8065 		wm_transmit_locked(ifp, txq);
8066 	}
8067 }
8068 
8069 /* Interrupt */
8070 
8071 /*
8072  * wm_txeof:
8073  *
8074  *	Helper; handle transmit interrupts.
8075  */
8076 static bool
8077 wm_txeof(struct wm_txqueue *txq, u_int limit)
8078 {
8079 	struct wm_softc *sc = txq->txq_sc;
8080 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
8081 	struct wm_txsoft *txs;
8082 	int count = 0;
8083 	int i;
8084 	uint8_t status;
8085 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
8086 	bool more = false;
8087 
8088 	KASSERT(mutex_owned(txq->txq_lock));
8089 
8090 	if (txq->txq_stopping)
8091 		return false;
8092 
8093 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
8094 	/* for ALTQ and legacy(not use multiqueue) ethernet controller */
8095 	if (wmq->wmq_id == 0)
8096 		ifp->if_flags &= ~IFF_OACTIVE;
8097 
8098 	/*
8099 	 * Go through the Tx list and free mbufs for those
8100 	 * frames which have been transmitted.
8101 	 */
8102 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
8103 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
8104 		if (limit-- == 0) {
8105 			more = true;
8106 			DPRINTF(WM_DEBUG_TX,
8107 			    ("%s: TX: loop limited, job %d is not processed\n",
8108 				device_xname(sc->sc_dev), i));
8109 			break;
8110 		}
8111 
8112 		txs = &txq->txq_soft[i];
8113 
8114 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
8115 			device_xname(sc->sc_dev), i));
8116 
8117 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
8118 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
8119 
8120 		status =
8121 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
8122 		if ((status & WTX_ST_DD) == 0) {
8123 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
8124 			    BUS_DMASYNC_PREREAD);
8125 			break;
8126 		}
8127 
8128 		count++;
8129 		DPRINTF(WM_DEBUG_TX,
8130 		    ("%s: TX: job %d done: descs %d..%d\n",
8131 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
8132 		    txs->txs_lastdesc));
8133 
8134 		/*
8135 		 * XXX We should probably be using the statistics
8136 		 * XXX registers, but I don't know if they exist
8137 		 * XXX on chips before the i82544.
8138 		 */
8139 
8140 #ifdef WM_EVENT_COUNTERS
8141 		if (status & WTX_ST_TU)
8142 			WM_Q_EVCNT_INCR(txq, tu);
8143 #endif /* WM_EVENT_COUNTERS */
8144 
8145 		if (status & (WTX_ST_EC | WTX_ST_LC)) {
8146 			ifp->if_oerrors++;
8147 			if (status & WTX_ST_LC)
8148 				log(LOG_WARNING, "%s: late collision\n",
8149 				    device_xname(sc->sc_dev));
8150 			else if (status & WTX_ST_EC) {
8151 				ifp->if_collisions += 16;
8152 				log(LOG_WARNING, "%s: excessive collisions\n",
8153 				    device_xname(sc->sc_dev));
8154 			}
8155 		} else
8156 			ifp->if_opackets++;
8157 
8158 		txq->txq_packets++;
8159 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
8160 
8161 		txq->txq_free += txs->txs_ndesc;
8162 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
8163 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
8164 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
8165 		m_freem(txs->txs_mbuf);
8166 		txs->txs_mbuf = NULL;
8167 	}
8168 
8169 	/* Update the dirty transmit buffer pointer. */
8170 	txq->txq_sdirty = i;
8171 	DPRINTF(WM_DEBUG_TX,
8172 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
8173 
8174 	if (count != 0)
8175 		rnd_add_uint32(&sc->rnd_source, count);
8176 
8177 	/*
8178 	 * If there are no more pending transmissions, cancel the watchdog
8179 	 * timer.
8180 	 */
8181 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
8182 		txq->txq_watchdog = false;
8183 
8184 	return more;
8185 }
8186 
8187 static inline uint32_t
8188 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
8189 {
8190 	struct wm_softc *sc = rxq->rxq_sc;
8191 
8192 	if (sc->sc_type == WM_T_82574)
8193 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
8194 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
8195 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
8196 	else
8197 		return rxq->rxq_descs[idx].wrx_status;
8198 }
8199 
8200 static inline uint32_t
8201 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
8202 {
8203 	struct wm_softc *sc = rxq->rxq_sc;
8204 
8205 	if (sc->sc_type == WM_T_82574)
8206 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
8207 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
8208 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
8209 	else
8210 		return rxq->rxq_descs[idx].wrx_errors;
8211 }
8212 
8213 static inline uint16_t
8214 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
8215 {
8216 	struct wm_softc *sc = rxq->rxq_sc;
8217 
8218 	if (sc->sc_type == WM_T_82574)
8219 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
8220 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
8221 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
8222 	else
8223 		return rxq->rxq_descs[idx].wrx_special;
8224 }
8225 
8226 static inline int
8227 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
8228 {
8229 	struct wm_softc *sc = rxq->rxq_sc;
8230 
8231 	if (sc->sc_type == WM_T_82574)
8232 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
8233 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
8234 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
8235 	else
8236 		return rxq->rxq_descs[idx].wrx_len;
8237 }
8238 
8239 #ifdef WM_DEBUG
8240 static inline uint32_t
8241 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
8242 {
8243 	struct wm_softc *sc = rxq->rxq_sc;
8244 
8245 	if (sc->sc_type == WM_T_82574)
8246 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
8247 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
8248 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
8249 	else
8250 		return 0;
8251 }
8252 
8253 static inline uint8_t
8254 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
8255 {
8256 	struct wm_softc *sc = rxq->rxq_sc;
8257 
8258 	if (sc->sc_type == WM_T_82574)
8259 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
8260 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
8261 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
8262 	else
8263 		return 0;
8264 }
8265 #endif /* WM_DEBUG */
8266 
8267 static inline bool
8268 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
8269     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
8270 {
8271 
8272 	if (sc->sc_type == WM_T_82574)
8273 		return (status & ext_bit) != 0;
8274 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
8275 		return (status & nq_bit) != 0;
8276 	else
8277 		return (status & legacy_bit) != 0;
8278 }
8279 
8280 static inline bool
8281 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
8282     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
8283 {
8284 
8285 	if (sc->sc_type == WM_T_82574)
8286 		return (error & ext_bit) != 0;
8287 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
8288 		return (error & nq_bit) != 0;
8289 	else
8290 		return (error & legacy_bit) != 0;
8291 }
8292 
8293 static inline bool
8294 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
8295 {
8296 
8297 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
8298 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
8299 		return true;
8300 	else
8301 		return false;
8302 }
8303 
8304 static inline bool
8305 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
8306 {
8307 	struct wm_softc *sc = rxq->rxq_sc;
8308 
8309 	/* XXXX missing error bit for newqueue? */
8310 	if (wm_rxdesc_is_set_error(sc, errors,
8311 		WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE,
8312 		EXTRXC_ERROR_CE|EXTRXC_ERROR_SE|EXTRXC_ERROR_SEQ|EXTRXC_ERROR_CXE|EXTRXC_ERROR_RXE,
8313 		NQRXC_ERROR_RXE)) {
8314 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE, EXTRXC_ERROR_SE, 0))
8315 			log(LOG_WARNING, "%s: symbol error\n",
8316 			    device_xname(sc->sc_dev));
8317 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ, EXTRXC_ERROR_SEQ, 0))
8318 			log(LOG_WARNING, "%s: receive sequence error\n",
8319 			    device_xname(sc->sc_dev));
8320 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE, EXTRXC_ERROR_CE, 0))
8321 			log(LOG_WARNING, "%s: CRC error\n",
8322 			    device_xname(sc->sc_dev));
8323 		return true;
8324 	}
8325 
8326 	return false;
8327 }
8328 
8329 static inline bool
8330 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
8331 {
8332 	struct wm_softc *sc = rxq->rxq_sc;
8333 
8334 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
8335 		NQRXC_STATUS_DD)) {
8336 		/* We have processed all of the receive descriptors. */
8337 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
8338 		return false;
8339 	}
8340 
8341 	return true;
8342 }
8343 
8344 static inline bool
8345 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status, uint16_t vlantag,
8346     struct mbuf *m)
8347 {
8348 
8349 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
8350 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
8351 		vlan_set_tag(m, le16toh(vlantag));
8352 	}
8353 
8354 	return true;
8355 }
8356 
8357 static inline void
8358 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
8359     uint32_t errors, struct mbuf *m)
8360 {
8361 	struct wm_softc *sc = rxq->rxq_sc;
8362 
8363 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
8364 		if (wm_rxdesc_is_set_status(sc, status,
8365 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
8366 			WM_Q_EVCNT_INCR(rxq, rxipsum);
8367 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
8368 			if (wm_rxdesc_is_set_error(sc, errors,
8369 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
8370 				m->m_pkthdr.csum_flags |=
8371 					M_CSUM_IPv4_BAD;
8372 		}
8373 		if (wm_rxdesc_is_set_status(sc, status,
8374 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
8375 			/*
8376 			 * Note: we don't know if this was TCP or UDP,
8377 			 * so we just set both bits, and expect the
8378 			 * upper layers to deal.
8379 			 */
8380 			WM_Q_EVCNT_INCR(rxq, rxtusum);
8381 			m->m_pkthdr.csum_flags |=
8382 				M_CSUM_TCPv4 | M_CSUM_UDPv4 |
8383 				M_CSUM_TCPv6 | M_CSUM_UDPv6;
8384 			if (wm_rxdesc_is_set_error(sc, errors,
8385 				WRX_ER_TCPE, EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
8386 				m->m_pkthdr.csum_flags |=
8387 					M_CSUM_TCP_UDP_BAD;
8388 		}
8389 	}
8390 }
8391 
8392 /*
8393  * wm_rxeof:
8394  *
8395  *	Helper; handle receive interrupts.
8396  */
8397 static bool
8398 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
8399 {
8400 	struct wm_softc *sc = rxq->rxq_sc;
8401 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
8402 	struct wm_rxsoft *rxs;
8403 	struct mbuf *m;
8404 	int i, len;
8405 	int count = 0;
8406 	uint32_t status, errors;
8407 	uint16_t vlantag;
8408 	bool more = false;
8409 
8410 	KASSERT(mutex_owned(rxq->rxq_lock));
8411 
8412 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
8413 		if (limit-- == 0) {
8414 			rxq->rxq_ptr = i;
8415 			more = true;
8416 			DPRINTF(WM_DEBUG_RX,
8417 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
8418 				device_xname(sc->sc_dev), i));
8419 			break;
8420 		}
8421 
8422 		rxs = &rxq->rxq_soft[i];
8423 
8424 		DPRINTF(WM_DEBUG_RX,
8425 		    ("%s: RX: checking descriptor %d\n",
8426 		    device_xname(sc->sc_dev), i));
8427 		wm_cdrxsync(rxq, i,BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
8428 
8429 		status = wm_rxdesc_get_status(rxq, i);
8430 		errors = wm_rxdesc_get_errors(rxq, i);
8431 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
8432 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
8433 #ifdef WM_DEBUG
8434 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
8435 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
8436 #endif
8437 
8438 		if (!wm_rxdesc_dd(rxq, i, status)) {
8439 			/*
8440 			 * Update the receive pointer holding rxq_lock
8441 			 * consistent with increment counter.
8442 			 */
8443 			rxq->rxq_ptr = i;
8444 			break;
8445 		}
8446 
8447 		count++;
8448 		if (__predict_false(rxq->rxq_discard)) {
8449 			DPRINTF(WM_DEBUG_RX,
8450 			    ("%s: RX: discarding contents of descriptor %d\n",
8451 			    device_xname(sc->sc_dev), i));
8452 			wm_init_rxdesc(rxq, i);
8453 			if (wm_rxdesc_is_eop(rxq, status)) {
8454 				/* Reset our state. */
8455 				DPRINTF(WM_DEBUG_RX,
8456 				    ("%s: RX: resetting rxdiscard -> 0\n",
8457 				    device_xname(sc->sc_dev)));
8458 				rxq->rxq_discard = 0;
8459 			}
8460 			continue;
8461 		}
8462 
8463 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
8464 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
8465 
8466 		m = rxs->rxs_mbuf;
8467 
8468 		/*
8469 		 * Add a new receive buffer to the ring, unless of
8470 		 * course the length is zero. Treat the latter as a
8471 		 * failed mapping.
8472 		 */
8473 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
8474 			/*
8475 			 * Failed, throw away what we've done so
8476 			 * far, and discard the rest of the packet.
8477 			 */
8478 			ifp->if_ierrors++;
8479 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
8480 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
8481 			wm_init_rxdesc(rxq, i);
8482 			if (!wm_rxdesc_is_eop(rxq, status))
8483 				rxq->rxq_discard = 1;
8484 			if (rxq->rxq_head != NULL)
8485 				m_freem(rxq->rxq_head);
8486 			WM_RXCHAIN_RESET(rxq);
8487 			DPRINTF(WM_DEBUG_RX,
8488 			    ("%s: RX: Rx buffer allocation failed, "
8489 			    "dropping packet%s\n", device_xname(sc->sc_dev),
8490 			    rxq->rxq_discard ? " (discard)" : ""));
8491 			continue;
8492 		}
8493 
8494 		m->m_len = len;
8495 		rxq->rxq_len += len;
8496 		DPRINTF(WM_DEBUG_RX,
8497 		    ("%s: RX: buffer at %p len %d\n",
8498 		    device_xname(sc->sc_dev), m->m_data, len));
8499 
8500 		/* If this is not the end of the packet, keep looking. */
8501 		if (!wm_rxdesc_is_eop(rxq, status)) {
8502 			WM_RXCHAIN_LINK(rxq, m);
8503 			DPRINTF(WM_DEBUG_RX,
8504 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
8505 			    device_xname(sc->sc_dev), rxq->rxq_len));
8506 			continue;
8507 		}
8508 
8509 		/*
8510 		 * Okay, we have the entire packet now.  The chip is
8511 		 * configured to include the FCS except I350 and I21[01]
8512 		 * (not all chips can be configured to strip it),
8513 		 * so we need to trim it.
8514 		 * May need to adjust length of previous mbuf in the
8515 		 * chain if the current mbuf is too short.
8516 		 * For an eratta, the RCTL_SECRC bit in RCTL register
8517 		 * is always set in I350, so we don't trim it.
8518 		 */
8519 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
8520 		    && (sc->sc_type != WM_T_I210)
8521 		    && (sc->sc_type != WM_T_I211)) {
8522 			if (m->m_len < ETHER_CRC_LEN) {
8523 				rxq->rxq_tail->m_len
8524 				    -= (ETHER_CRC_LEN - m->m_len);
8525 				m->m_len = 0;
8526 			} else
8527 				m->m_len -= ETHER_CRC_LEN;
8528 			len = rxq->rxq_len - ETHER_CRC_LEN;
8529 		} else
8530 			len = rxq->rxq_len;
8531 
8532 		WM_RXCHAIN_LINK(rxq, m);
8533 
8534 		*rxq->rxq_tailp = NULL;
8535 		m = rxq->rxq_head;
8536 
8537 		WM_RXCHAIN_RESET(rxq);
8538 
8539 		DPRINTF(WM_DEBUG_RX,
8540 		    ("%s: RX: have entire packet, len -> %d\n",
8541 		    device_xname(sc->sc_dev), len));
8542 
8543 		/* If an error occurred, update stats and drop the packet. */
8544 		if (wm_rxdesc_has_errors(rxq, errors)) {
8545 			m_freem(m);
8546 			continue;
8547 		}
8548 
8549 		/* No errors.  Receive the packet. */
8550 		m_set_rcvif(m, ifp);
8551 		m->m_pkthdr.len = len;
8552 		/*
8553 		 * TODO
8554 		 * should be save rsshash and rsstype to this mbuf.
8555 		 */
8556 		DPRINTF(WM_DEBUG_RX,
8557 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
8558 			device_xname(sc->sc_dev), rsstype, rsshash));
8559 
8560 		/*
8561 		 * If VLANs are enabled, VLAN packets have been unwrapped
8562 		 * for us.  Associate the tag with the packet.
8563 		 */
8564 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
8565 			continue;
8566 
8567 		/* Set up checksum info for this packet. */
8568 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
8569 		/*
8570 		 * Update the receive pointer holding rxq_lock consistent with
8571 		 * increment counter.
8572 		 */
8573 		rxq->rxq_ptr = i;
8574 		rxq->rxq_packets++;
8575 		rxq->rxq_bytes += len;
8576 		mutex_exit(rxq->rxq_lock);
8577 
8578 		/* Pass it on. */
8579 		if_percpuq_enqueue(sc->sc_ipq, m);
8580 
8581 		mutex_enter(rxq->rxq_lock);
8582 
8583 		if (rxq->rxq_stopping)
8584 			break;
8585 	}
8586 
8587 	if (count != 0)
8588 		rnd_add_uint32(&sc->rnd_source, count);
8589 
8590 	DPRINTF(WM_DEBUG_RX,
8591 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
8592 
8593 	return more;
8594 }
8595 
8596 /*
8597  * wm_linkintr_gmii:
8598  *
8599  *	Helper; handle link interrupts for GMII.
8600  */
8601 static void
8602 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
8603 {
8604 
8605 	KASSERT(WM_CORE_LOCKED(sc));
8606 
8607 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
8608 		__func__));
8609 
8610 	if (icr & ICR_LSC) {
8611 		uint32_t reg;
8612 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
8613 
8614 		if ((status & STATUS_LU) != 0) {
8615 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
8616 				device_xname(sc->sc_dev),
8617 				(status & STATUS_FD) ? "FDX" : "HDX"));
8618 		} else {
8619 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
8620 				device_xname(sc->sc_dev)));
8621 		}
8622 		if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0))
8623 			wm_gig_downshift_workaround_ich8lan(sc);
8624 
8625 		if ((sc->sc_type == WM_T_ICH8)
8626 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
8627 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
8628 		}
8629 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
8630 			device_xname(sc->sc_dev)));
8631 		mii_pollstat(&sc->sc_mii);
8632 		if (sc->sc_type == WM_T_82543) {
8633 			int miistatus, active;
8634 
8635 			/*
8636 			 * With 82543, we need to force speed and
8637 			 * duplex on the MAC equal to what the PHY
8638 			 * speed and duplex configuration is.
8639 			 */
8640 			miistatus = sc->sc_mii.mii_media_status;
8641 
8642 			if (miistatus & IFM_ACTIVE) {
8643 				active = sc->sc_mii.mii_media_active;
8644 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
8645 				switch (IFM_SUBTYPE(active)) {
8646 				case IFM_10_T:
8647 					sc->sc_ctrl |= CTRL_SPEED_10;
8648 					break;
8649 				case IFM_100_TX:
8650 					sc->sc_ctrl |= CTRL_SPEED_100;
8651 					break;
8652 				case IFM_1000_T:
8653 					sc->sc_ctrl |= CTRL_SPEED_1000;
8654 					break;
8655 				default:
8656 					/*
8657 					 * fiber?
8658 					 * Shoud not enter here.
8659 					 */
8660 					printf("unknown media (%x)\n", active);
8661 					break;
8662 				}
8663 				if (active & IFM_FDX)
8664 					sc->sc_ctrl |= CTRL_FD;
8665 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8666 			}
8667 		} else if (sc->sc_type == WM_T_PCH) {
8668 			wm_k1_gig_workaround_hv(sc,
8669 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
8670 		}
8671 
8672 		if ((sc->sc_phytype == WMPHY_82578)
8673 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
8674 			== IFM_1000_T)) {
8675 
8676 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
8677 				delay(200*1000); /* XXX too big */
8678 
8679 				/* Link stall fix for link up */
8680 				wm_gmii_hv_writereg(sc->sc_dev, 1,
8681 				    HV_MUX_DATA_CTRL,
8682 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
8683 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
8684 				wm_gmii_hv_writereg(sc->sc_dev, 1,
8685 				    HV_MUX_DATA_CTRL,
8686 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
8687 			}
8688 		}
8689 		/*
8690 		 * I217 Packet Loss issue:
8691 		 * ensure that FEXTNVM4 Beacon Duration is set correctly
8692 		 * on power up.
8693 		 * Set the Beacon Duration for I217 to 8 usec
8694 		 */
8695 		if (sc->sc_type >= WM_T_PCH_LPT) {
8696 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
8697 			reg &= ~FEXTNVM4_BEACON_DURATION;
8698 			reg |= FEXTNVM4_BEACON_DURATION_8US;
8699 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
8700 		}
8701 
8702 		/* XXX Work-around I218 hang issue */
8703 		/* e1000_k1_workaround_lpt_lp() */
8704 
8705 		if (sc->sc_type >= WM_T_PCH_LPT) {
8706 			/*
8707 			 * Set platform power management values for Latency
8708 			 * Tolerance Reporting (LTR)
8709 			 */
8710 			wm_platform_pm_pch_lpt(sc,
8711 				((sc->sc_mii.mii_media_status & IFM_ACTIVE)
8712 				    != 0));
8713 		}
8714 
8715 		/* FEXTNVM6 K1-off workaround */
8716 		if (sc->sc_type == WM_T_PCH_SPT) {
8717 			reg = CSR_READ(sc, WMREG_FEXTNVM6);
8718 			if (CSR_READ(sc, WMREG_PCIEANACFG)
8719 			    & FEXTNVM6_K1_OFF_ENABLE)
8720 				reg |= FEXTNVM6_K1_OFF_ENABLE;
8721 			else
8722 				reg &= ~FEXTNVM6_K1_OFF_ENABLE;
8723 			CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
8724 		}
8725 	} else if (icr & ICR_RXSEQ) {
8726 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
8727 			device_xname(sc->sc_dev)));
8728 	}
8729 }
8730 
8731 /*
8732  * wm_linkintr_tbi:
8733  *
8734  *	Helper; handle link interrupts for TBI mode.
8735  */
8736 static void
8737 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
8738 {
8739 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
8740 	uint32_t status;
8741 
8742 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
8743 		__func__));
8744 
8745 	status = CSR_READ(sc, WMREG_STATUS);
8746 	if (icr & ICR_LSC) {
8747 		if (status & STATUS_LU) {
8748 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
8749 			    device_xname(sc->sc_dev),
8750 			    (status & STATUS_FD) ? "FDX" : "HDX"));
8751 			/*
8752 			 * NOTE: CTRL will update TFCE and RFCE automatically,
8753 			 * so we should update sc->sc_ctrl
8754 			 */
8755 
8756 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
8757 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
8758 			sc->sc_fcrtl &= ~FCRTL_XONE;
8759 			if (status & STATUS_FD)
8760 				sc->sc_tctl |=
8761 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
8762 			else
8763 				sc->sc_tctl |=
8764 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
8765 			if (sc->sc_ctrl & CTRL_TFCE)
8766 				sc->sc_fcrtl |= FCRTL_XONE;
8767 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
8768 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
8769 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
8770 				      sc->sc_fcrtl);
8771 			sc->sc_tbi_linkup = 1;
8772 			if_link_state_change(ifp, LINK_STATE_UP);
8773 		} else {
8774 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
8775 			    device_xname(sc->sc_dev)));
8776 			sc->sc_tbi_linkup = 0;
8777 			if_link_state_change(ifp, LINK_STATE_DOWN);
8778 		}
8779 		/* Update LED */
8780 		wm_tbi_serdes_set_linkled(sc);
8781 	} else if (icr & ICR_RXSEQ) {
8782 		DPRINTF(WM_DEBUG_LINK,
8783 		    ("%s: LINK: Receive sequence error\n",
8784 		    device_xname(sc->sc_dev)));
8785 	}
8786 }
8787 
8788 /*
8789  * wm_linkintr_serdes:
8790  *
8791  *	Helper; handle link interrupts for TBI mode.
8792  */
8793 static void
8794 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
8795 {
8796 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
8797 	struct mii_data *mii = &sc->sc_mii;
8798 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
8799 	uint32_t pcs_adv, pcs_lpab, reg;
8800 
8801 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
8802 		__func__));
8803 
8804 	if (icr & ICR_LSC) {
8805 		/* Check PCS */
8806 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
8807 		if ((reg & PCS_LSTS_LINKOK) != 0) {
8808 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
8809 				device_xname(sc->sc_dev)));
8810 			mii->mii_media_status |= IFM_ACTIVE;
8811 			sc->sc_tbi_linkup = 1;
8812 			if_link_state_change(ifp, LINK_STATE_UP);
8813 		} else {
8814 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
8815 				device_xname(sc->sc_dev)));
8816 			mii->mii_media_status |= IFM_NONE;
8817 			sc->sc_tbi_linkup = 0;
8818 			if_link_state_change(ifp, LINK_STATE_DOWN);
8819 			wm_tbi_serdes_set_linkled(sc);
8820 			return;
8821 		}
8822 		mii->mii_media_active |= IFM_1000_SX;
8823 		if ((reg & PCS_LSTS_FDX) != 0)
8824 			mii->mii_media_active |= IFM_FDX;
8825 		else
8826 			mii->mii_media_active |= IFM_HDX;
8827 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
8828 			/* Check flow */
8829 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
8830 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
8831 				DPRINTF(WM_DEBUG_LINK,
8832 				    ("XXX LINKOK but not ACOMP\n"));
8833 				return;
8834 			}
8835 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
8836 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
8837 			DPRINTF(WM_DEBUG_LINK,
8838 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
8839 			if ((pcs_adv & TXCW_SYM_PAUSE)
8840 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
8841 				mii->mii_media_active |= IFM_FLOW
8842 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
8843 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
8844 			    && (pcs_adv & TXCW_ASYM_PAUSE)
8845 			    && (pcs_lpab & TXCW_SYM_PAUSE)
8846 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
8847 				mii->mii_media_active |= IFM_FLOW
8848 				    | IFM_ETH_TXPAUSE;
8849 			else if ((pcs_adv & TXCW_SYM_PAUSE)
8850 			    && (pcs_adv & TXCW_ASYM_PAUSE)
8851 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
8852 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
8853 				mii->mii_media_active |= IFM_FLOW
8854 				    | IFM_ETH_RXPAUSE;
8855 		}
8856 		/* Update LED */
8857 		wm_tbi_serdes_set_linkled(sc);
8858 	} else {
8859 		DPRINTF(WM_DEBUG_LINK,
8860 		    ("%s: LINK: Receive sequence error\n",
8861 		    device_xname(sc->sc_dev)));
8862 	}
8863 }
8864 
8865 /*
8866  * wm_linkintr:
8867  *
8868  *	Helper; handle link interrupts.
8869  */
8870 static void
8871 wm_linkintr(struct wm_softc *sc, uint32_t icr)
8872 {
8873 
8874 	KASSERT(WM_CORE_LOCKED(sc));
8875 
8876 	if (sc->sc_flags & WM_F_HAS_MII)
8877 		wm_linkintr_gmii(sc, icr);
8878 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
8879 	    && (sc->sc_type >= WM_T_82575))
8880 		wm_linkintr_serdes(sc, icr);
8881 	else
8882 		wm_linkintr_tbi(sc, icr);
8883 }
8884 
8885 /*
8886  * wm_intr_legacy:
8887  *
8888  *	Interrupt service routine for INTx and MSI.
8889  */
8890 static int
8891 wm_intr_legacy(void *arg)
8892 {
8893 	struct wm_softc *sc = arg;
8894 	struct wm_queue *wmq = &sc->sc_queue[0];
8895 	struct wm_txqueue *txq = &wmq->wmq_txq;
8896 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
8897 	uint32_t icr, rndval = 0;
8898 	int handled = 0;
8899 
8900 	while (1 /* CONSTCOND */) {
8901 		icr = CSR_READ(sc, WMREG_ICR);
8902 		if ((icr & sc->sc_icr) == 0)
8903 			break;
8904 		if (handled == 0) {
8905 			DPRINTF(WM_DEBUG_TX,
8906 			    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
8907 		}
8908 		if (rndval == 0)
8909 			rndval = icr;
8910 
8911 		mutex_enter(rxq->rxq_lock);
8912 
8913 		if (rxq->rxq_stopping) {
8914 			mutex_exit(rxq->rxq_lock);
8915 			break;
8916 		}
8917 
8918 		handled = 1;
8919 
8920 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
8921 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
8922 			DPRINTF(WM_DEBUG_RX,
8923 			    ("%s: RX: got Rx intr 0x%08x\n",
8924 			    device_xname(sc->sc_dev),
8925 			    icr & (ICR_RXDMT0 | ICR_RXT0)));
8926 			WM_Q_EVCNT_INCR(rxq, rxintr);
8927 		}
8928 #endif
8929 		/*
8930 		 * wm_rxeof() does *not* call upper layer functions directly,
8931 		 * as if_percpuq_enqueue() just call softint_schedule().
8932 		 * So, we can call wm_rxeof() in interrupt context.
8933 		 */
8934 		wm_rxeof(rxq, UINT_MAX);
8935 
8936 		mutex_exit(rxq->rxq_lock);
8937 		mutex_enter(txq->txq_lock);
8938 
8939 		if (txq->txq_stopping) {
8940 			mutex_exit(txq->txq_lock);
8941 			break;
8942 		}
8943 
8944 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
8945 		if (icr & ICR_TXDW) {
8946 			DPRINTF(WM_DEBUG_TX,
8947 			    ("%s: TX: got TXDW interrupt\n",
8948 			    device_xname(sc->sc_dev)));
8949 			WM_Q_EVCNT_INCR(txq, txdw);
8950 		}
8951 #endif
8952 		wm_txeof(txq, UINT_MAX);
8953 
8954 		mutex_exit(txq->txq_lock);
8955 		WM_CORE_LOCK(sc);
8956 
8957 		if (sc->sc_core_stopping) {
8958 			WM_CORE_UNLOCK(sc);
8959 			break;
8960 		}
8961 
8962 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
8963 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
8964 			wm_linkintr(sc, icr);
8965 		}
8966 
8967 		WM_CORE_UNLOCK(sc);
8968 
8969 		if (icr & ICR_RXO) {
8970 #if defined(WM_DEBUG)
8971 			log(LOG_WARNING, "%s: Receive overrun\n",
8972 			    device_xname(sc->sc_dev));
8973 #endif /* defined(WM_DEBUG) */
8974 		}
8975 	}
8976 
8977 	rnd_add_uint32(&sc->rnd_source, rndval);
8978 
8979 	if (handled) {
8980 		/* Try to get more packets going. */
8981 		softint_schedule(wmq->wmq_si);
8982 	}
8983 
8984 	return handled;
8985 }
8986 
8987 static inline void
8988 wm_txrxintr_disable(struct wm_queue *wmq)
8989 {
8990 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
8991 
8992 	if (sc->sc_type == WM_T_82574)
8993 		CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
8994 	else if (sc->sc_type == WM_T_82575)
8995 		CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
8996 	else
8997 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
8998 }
8999 
9000 static inline void
9001 wm_txrxintr_enable(struct wm_queue *wmq)
9002 {
9003 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
9004 
9005 	wm_itrs_calculate(sc, wmq);
9006 
9007 	/*
9008 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
9009 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
9010 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
9011 	 * while each wm_handle_queue(wmq) is runnig.
9012 	 */
9013 	if (sc->sc_type == WM_T_82574)
9014 		CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
9015 	else if (sc->sc_type == WM_T_82575)
9016 		CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
9017 	else
9018 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
9019 }
9020 
9021 static int
9022 wm_txrxintr_msix(void *arg)
9023 {
9024 	struct wm_queue *wmq = arg;
9025 	struct wm_txqueue *txq = &wmq->wmq_txq;
9026 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
9027 	struct wm_softc *sc = txq->txq_sc;
9028 	u_int txlimit = sc->sc_tx_intr_process_limit;
9029 	u_int rxlimit = sc->sc_rx_intr_process_limit;
9030 	bool txmore;
9031 	bool rxmore;
9032 
9033 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
9034 
9035 	DPRINTF(WM_DEBUG_TX,
9036 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
9037 
9038 	wm_txrxintr_disable(wmq);
9039 
9040 	mutex_enter(txq->txq_lock);
9041 
9042 	if (txq->txq_stopping) {
9043 		mutex_exit(txq->txq_lock);
9044 		return 0;
9045 	}
9046 
9047 	WM_Q_EVCNT_INCR(txq, txdw);
9048 	txmore = wm_txeof(txq, txlimit);
9049 	/* wm_deferred start() is done in wm_handle_queue(). */
9050 	mutex_exit(txq->txq_lock);
9051 
9052 	DPRINTF(WM_DEBUG_RX,
9053 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
9054 	mutex_enter(rxq->rxq_lock);
9055 
9056 	if (rxq->rxq_stopping) {
9057 		mutex_exit(rxq->rxq_lock);
9058 		return 0;
9059 	}
9060 
9061 	WM_Q_EVCNT_INCR(rxq, rxintr);
9062 	rxmore = wm_rxeof(rxq, rxlimit);
9063 	mutex_exit(rxq->rxq_lock);
9064 
9065 	wm_itrs_writereg(sc, wmq);
9066 
9067 	if (txmore || rxmore)
9068 		softint_schedule(wmq->wmq_si);
9069 	else
9070 		wm_txrxintr_enable(wmq);
9071 
9072 	return 1;
9073 }
9074 
9075 static void
9076 wm_handle_queue(void *arg)
9077 {
9078 	struct wm_queue *wmq = arg;
9079 	struct wm_txqueue *txq = &wmq->wmq_txq;
9080 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
9081 	struct wm_softc *sc = txq->txq_sc;
9082 	u_int txlimit = sc->sc_tx_process_limit;
9083 	u_int rxlimit = sc->sc_rx_process_limit;
9084 	bool txmore;
9085 	bool rxmore;
9086 
9087 	mutex_enter(txq->txq_lock);
9088 	if (txq->txq_stopping) {
9089 		mutex_exit(txq->txq_lock);
9090 		return;
9091 	}
9092 	txmore = wm_txeof(txq, txlimit);
9093 	wm_deferred_start_locked(txq);
9094 	mutex_exit(txq->txq_lock);
9095 
9096 	mutex_enter(rxq->rxq_lock);
9097 	if (rxq->rxq_stopping) {
9098 		mutex_exit(rxq->rxq_lock);
9099 		return;
9100 	}
9101 	WM_Q_EVCNT_INCR(rxq, rxdefer);
9102 	rxmore = wm_rxeof(rxq, rxlimit);
9103 	mutex_exit(rxq->rxq_lock);
9104 
9105 	if (txmore || rxmore)
9106 		softint_schedule(wmq->wmq_si);
9107 	else
9108 		wm_txrxintr_enable(wmq);
9109 }
9110 
9111 /*
9112  * wm_linkintr_msix:
9113  *
9114  *	Interrupt service routine for link status change for MSI-X.
9115  */
9116 static int
9117 wm_linkintr_msix(void *arg)
9118 {
9119 	struct wm_softc *sc = arg;
9120 	uint32_t reg;
9121 	bool has_rxo;
9122 
9123 	DPRINTF(WM_DEBUG_LINK,
9124 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
9125 
9126 	reg = CSR_READ(sc, WMREG_ICR);
9127 	WM_CORE_LOCK(sc);
9128 	if (sc->sc_core_stopping)
9129 		goto out;
9130 
9131 	if((reg & ICR_LSC) != 0) {
9132 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
9133 		wm_linkintr(sc, ICR_LSC);
9134 	}
9135 
9136 	/*
9137 	 * XXX 82574 MSI-X mode workaround
9138 	 *
9139 	 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
9140 	 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
9141 	 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
9142 	 * interrupts by writing WMREG_ICS to process receive packets.
9143 	 */
9144 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
9145 #if defined(WM_DEBUG)
9146 		log(LOG_WARNING, "%s: Receive overrun\n",
9147 		    device_xname(sc->sc_dev));
9148 #endif /* defined(WM_DEBUG) */
9149 
9150 		has_rxo = true;
9151 		/*
9152 		 * The RXO interrupt is very high rate when receive traffic is
9153 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
9154 		 * interrupts. ICR_OTHER will be enabled at the end of
9155 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
9156 		 * ICR_RXQ(1) interrupts.
9157 		 */
9158 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
9159 
9160 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
9161 	}
9162 
9163 
9164 
9165 out:
9166 	WM_CORE_UNLOCK(sc);
9167 
9168 	if (sc->sc_type == WM_T_82574) {
9169 		if (!has_rxo)
9170 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
9171 		else
9172 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
9173 	} else if (sc->sc_type == WM_T_82575)
9174 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
9175 	else
9176 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
9177 
9178 	return 1;
9179 }
9180 
9181 /*
9182  * Media related.
9183  * GMII, SGMII, TBI (and SERDES)
9184  */
9185 
9186 /* Common */
9187 
9188 /*
9189  * wm_tbi_serdes_set_linkled:
9190  *
9191  *	Update the link LED on TBI and SERDES devices.
9192  */
9193 static void
9194 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
9195 {
9196 
9197 	if (sc->sc_tbi_linkup)
9198 		sc->sc_ctrl |= CTRL_SWDPIN(0);
9199 	else
9200 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
9201 
9202 	/* 82540 or newer devices are active low */
9203 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
9204 
9205 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9206 }
9207 
9208 /* GMII related */
9209 
9210 /*
9211  * wm_gmii_reset:
9212  *
9213  *	Reset the PHY.
9214  */
9215 static void
9216 wm_gmii_reset(struct wm_softc *sc)
9217 {
9218 	uint32_t reg;
9219 	int rv;
9220 
9221 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
9222 		device_xname(sc->sc_dev), __func__));
9223 
9224 	rv = sc->phy.acquire(sc);
9225 	if (rv != 0) {
9226 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9227 		    __func__);
9228 		return;
9229 	}
9230 
9231 	switch (sc->sc_type) {
9232 	case WM_T_82542_2_0:
9233 	case WM_T_82542_2_1:
9234 		/* null */
9235 		break;
9236 	case WM_T_82543:
9237 		/*
9238 		 * With 82543, we need to force speed and duplex on the MAC
9239 		 * equal to what the PHY speed and duplex configuration is.
9240 		 * In addition, we need to perform a hardware reset on the PHY
9241 		 * to take it out of reset.
9242 		 */
9243 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
9244 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9245 
9246 		/* The PHY reset pin is active-low. */
9247 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
9248 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
9249 		    CTRL_EXT_SWDPIN(4));
9250 		reg |= CTRL_EXT_SWDPIO(4);
9251 
9252 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
9253 		CSR_WRITE_FLUSH(sc);
9254 		delay(10*1000);
9255 
9256 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
9257 		CSR_WRITE_FLUSH(sc);
9258 		delay(150);
9259 #if 0
9260 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
9261 #endif
9262 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
9263 		break;
9264 	case WM_T_82544:	/* reset 10000us */
9265 	case WM_T_82540:
9266 	case WM_T_82545:
9267 	case WM_T_82545_3:
9268 	case WM_T_82546:
9269 	case WM_T_82546_3:
9270 	case WM_T_82541:
9271 	case WM_T_82541_2:
9272 	case WM_T_82547:
9273 	case WM_T_82547_2:
9274 	case WM_T_82571:	/* reset 100us */
9275 	case WM_T_82572:
9276 	case WM_T_82573:
9277 	case WM_T_82574:
9278 	case WM_T_82575:
9279 	case WM_T_82576:
9280 	case WM_T_82580:
9281 	case WM_T_I350:
9282 	case WM_T_I354:
9283 	case WM_T_I210:
9284 	case WM_T_I211:
9285 	case WM_T_82583:
9286 	case WM_T_80003:
9287 		/* generic reset */
9288 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
9289 		CSR_WRITE_FLUSH(sc);
9290 		delay(20000);
9291 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9292 		CSR_WRITE_FLUSH(sc);
9293 		delay(20000);
9294 
9295 		if ((sc->sc_type == WM_T_82541)
9296 		    || (sc->sc_type == WM_T_82541_2)
9297 		    || (sc->sc_type == WM_T_82547)
9298 		    || (sc->sc_type == WM_T_82547_2)) {
9299 			/* workaround for igp are done in igp_reset() */
9300 			/* XXX add code to set LED after phy reset */
9301 		}
9302 		break;
9303 	case WM_T_ICH8:
9304 	case WM_T_ICH9:
9305 	case WM_T_ICH10:
9306 	case WM_T_PCH:
9307 	case WM_T_PCH2:
9308 	case WM_T_PCH_LPT:
9309 	case WM_T_PCH_SPT:
9310 	case WM_T_PCH_CNP:
9311 		/* generic reset */
9312 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
9313 		CSR_WRITE_FLUSH(sc);
9314 		delay(100);
9315 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9316 		CSR_WRITE_FLUSH(sc);
9317 		delay(150);
9318 		break;
9319 	default:
9320 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
9321 		    __func__);
9322 		break;
9323 	}
9324 
9325 	sc->phy.release(sc);
9326 
9327 	/* get_cfg_done */
9328 	wm_get_cfg_done(sc);
9329 
9330 	/* extra setup */
9331 	switch (sc->sc_type) {
9332 	case WM_T_82542_2_0:
9333 	case WM_T_82542_2_1:
9334 	case WM_T_82543:
9335 	case WM_T_82544:
9336 	case WM_T_82540:
9337 	case WM_T_82545:
9338 	case WM_T_82545_3:
9339 	case WM_T_82546:
9340 	case WM_T_82546_3:
9341 	case WM_T_82541_2:
9342 	case WM_T_82547_2:
9343 	case WM_T_82571:
9344 	case WM_T_82572:
9345 	case WM_T_82573:
9346 	case WM_T_82574:
9347 	case WM_T_82583:
9348 	case WM_T_82575:
9349 	case WM_T_82576:
9350 	case WM_T_82580:
9351 	case WM_T_I350:
9352 	case WM_T_I354:
9353 	case WM_T_I210:
9354 	case WM_T_I211:
9355 	case WM_T_80003:
9356 		/* null */
9357 		break;
9358 	case WM_T_82541:
9359 	case WM_T_82547:
9360 		/* XXX Configure actively LED after PHY reset */
9361 		break;
9362 	case WM_T_ICH8:
9363 	case WM_T_ICH9:
9364 	case WM_T_ICH10:
9365 	case WM_T_PCH:
9366 	case WM_T_PCH2:
9367 	case WM_T_PCH_LPT:
9368 	case WM_T_PCH_SPT:
9369 	case WM_T_PCH_CNP:
9370 		wm_phy_post_reset(sc);
9371 		break;
9372 	default:
9373 		panic("%s: unknown type\n", __func__);
9374 		break;
9375 	}
9376 }
9377 
9378 /*
9379  * Setup sc_phytype and mii_{read|write}reg.
9380  *
9381  *  To identify PHY type, correct read/write function should be selected.
9382  * To select correct read/write function, PCI ID or MAC type are required
9383  * without accessing PHY registers.
9384  *
9385  *  On the first call of this function, PHY ID is not known yet. Check
9386  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
9387  * result might be incorrect.
9388  *
9389  *  In the second call, PHY OUI and model is used to identify PHY type.
9390  * It might not be perfpect because of the lack of compared entry, but it
9391  * would be better than the first call.
9392  *
9393  *  If the detected new result and previous assumption is different,
9394  * diagnous message will be printed.
9395  */
9396 static void
9397 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
9398     uint16_t phy_model)
9399 {
9400 	device_t dev = sc->sc_dev;
9401 	struct mii_data *mii = &sc->sc_mii;
9402 	uint16_t new_phytype = WMPHY_UNKNOWN;
9403 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
9404 	mii_readreg_t new_readreg;
9405 	mii_writereg_t new_writereg;
9406 
9407 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
9408 		device_xname(sc->sc_dev), __func__));
9409 
9410 	if (mii->mii_readreg == NULL) {
9411 		/*
9412 		 *  This is the first call of this function. For ICH and PCH
9413 		 * variants, it's difficult to determine the PHY access method
9414 		 * by sc_type, so use the PCI product ID for some devices.
9415 		 */
9416 
9417 		switch (sc->sc_pcidevid) {
9418 		case PCI_PRODUCT_INTEL_PCH_M_LM:
9419 		case PCI_PRODUCT_INTEL_PCH_M_LC:
9420 			/* 82577 */
9421 			new_phytype = WMPHY_82577;
9422 			break;
9423 		case PCI_PRODUCT_INTEL_PCH_D_DM:
9424 		case PCI_PRODUCT_INTEL_PCH_D_DC:
9425 			/* 82578 */
9426 			new_phytype = WMPHY_82578;
9427 			break;
9428 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
9429 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
9430 			/* 82579 */
9431 			new_phytype = WMPHY_82579;
9432 			break;
9433 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
9434 		case PCI_PRODUCT_INTEL_82801I_BM:
9435 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
9436 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
9437 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
9438 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
9439 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
9440 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
9441 			/* ICH8, 9, 10 with 82567 */
9442 			new_phytype = WMPHY_BM;
9443 			break;
9444 		default:
9445 			break;
9446 		}
9447 	} else {
9448 		/* It's not the first call. Use PHY OUI and model */
9449 		switch (phy_oui) {
9450 		case MII_OUI_ATHEROS: /* XXX ??? */
9451 			switch (phy_model) {
9452 			case 0x0004: /* XXX */
9453 				new_phytype = WMPHY_82578;
9454 				break;
9455 			default:
9456 				break;
9457 			}
9458 			break;
9459 		case MII_OUI_xxMARVELL:
9460 			switch (phy_model) {
9461 			case MII_MODEL_xxMARVELL_I210:
9462 				new_phytype = WMPHY_I210;
9463 				break;
9464 			case MII_MODEL_xxMARVELL_E1011:
9465 			case MII_MODEL_xxMARVELL_E1000_3:
9466 			case MII_MODEL_xxMARVELL_E1000_5:
9467 			case MII_MODEL_xxMARVELL_E1112:
9468 				new_phytype = WMPHY_M88;
9469 				break;
9470 			case MII_MODEL_xxMARVELL_E1149:
9471 				new_phytype = WMPHY_BM;
9472 				break;
9473 			case MII_MODEL_xxMARVELL_E1111:
9474 			case MII_MODEL_xxMARVELL_I347:
9475 			case MII_MODEL_xxMARVELL_E1512:
9476 			case MII_MODEL_xxMARVELL_E1340M:
9477 			case MII_MODEL_xxMARVELL_E1543:
9478 				new_phytype = WMPHY_M88;
9479 				break;
9480 			case MII_MODEL_xxMARVELL_I82563:
9481 				new_phytype = WMPHY_GG82563;
9482 				break;
9483 			default:
9484 				break;
9485 			}
9486 			break;
9487 		case MII_OUI_INTEL:
9488 			switch (phy_model) {
9489 			case MII_MODEL_INTEL_I82577:
9490 				new_phytype = WMPHY_82577;
9491 				break;
9492 			case MII_MODEL_INTEL_I82579:
9493 				new_phytype = WMPHY_82579;
9494 				break;
9495 			case MII_MODEL_INTEL_I217:
9496 				new_phytype = WMPHY_I217;
9497 				break;
9498 			case MII_MODEL_INTEL_I82580:
9499 			case MII_MODEL_INTEL_I350:
9500 				new_phytype = WMPHY_82580;
9501 				break;
9502 			default:
9503 				break;
9504 			}
9505 			break;
9506 		case MII_OUI_yyINTEL:
9507 			switch (phy_model) {
9508 			case MII_MODEL_yyINTEL_I82562G:
9509 			case MII_MODEL_yyINTEL_I82562EM:
9510 			case MII_MODEL_yyINTEL_I82562ET:
9511 				new_phytype = WMPHY_IFE;
9512 				break;
9513 			case MII_MODEL_yyINTEL_IGP01E1000:
9514 				new_phytype = WMPHY_IGP;
9515 				break;
9516 			case MII_MODEL_yyINTEL_I82566:
9517 				new_phytype = WMPHY_IGP_3;
9518 				break;
9519 			default:
9520 				break;
9521 			}
9522 			break;
9523 		default:
9524 			break;
9525 		}
9526 		if (new_phytype == WMPHY_UNKNOWN)
9527 			aprint_verbose_dev(dev, "%s: unknown PHY model\n",
9528 			    __func__);
9529 
9530 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
9531 		    && (sc->sc_phytype != new_phytype )) {
9532 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
9533 			    "was incorrect. PHY type from PHY ID = %u\n",
9534 			    sc->sc_phytype, new_phytype);
9535 		}
9536 	}
9537 
9538 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
9539 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
9540 		/* SGMII */
9541 		new_readreg = wm_sgmii_readreg;
9542 		new_writereg = wm_sgmii_writereg;
9543 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
9544 		/* BM2 (phyaddr == 1) */
9545 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
9546 		    && (new_phytype != WMPHY_BM)
9547 		    && (new_phytype != WMPHY_UNKNOWN))
9548 			doubt_phytype = new_phytype;
9549 		new_phytype = WMPHY_BM;
9550 		new_readreg = wm_gmii_bm_readreg;
9551 		new_writereg = wm_gmii_bm_writereg;
9552 	} else if (sc->sc_type >= WM_T_PCH) {
9553 		/* All PCH* use _hv_ */
9554 		new_readreg = wm_gmii_hv_readreg;
9555 		new_writereg = wm_gmii_hv_writereg;
9556 	} else if (sc->sc_type >= WM_T_ICH8) {
9557 		/* non-82567 ICH8, 9 and 10 */
9558 		new_readreg = wm_gmii_i82544_readreg;
9559 		new_writereg = wm_gmii_i82544_writereg;
9560 	} else if (sc->sc_type >= WM_T_80003) {
9561 		/* 80003 */
9562 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
9563 		    && (new_phytype != WMPHY_GG82563)
9564 		    && (new_phytype != WMPHY_UNKNOWN))
9565 			doubt_phytype = new_phytype;
9566 		new_phytype = WMPHY_GG82563;
9567 		new_readreg = wm_gmii_i80003_readreg;
9568 		new_writereg = wm_gmii_i80003_writereg;
9569 	} else if (sc->sc_type >= WM_T_I210) {
9570 		/* I210 and I211 */
9571 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
9572 		    && (new_phytype != WMPHY_I210)
9573 		    && (new_phytype != WMPHY_UNKNOWN))
9574 			doubt_phytype = new_phytype;
9575 		new_phytype = WMPHY_I210;
9576 		new_readreg = wm_gmii_gs40g_readreg;
9577 		new_writereg = wm_gmii_gs40g_writereg;
9578 	} else if (sc->sc_type >= WM_T_82580) {
9579 		/* 82580, I350 and I354 */
9580 		new_readreg = wm_gmii_82580_readreg;
9581 		new_writereg = wm_gmii_82580_writereg;
9582 	} else if (sc->sc_type >= WM_T_82544) {
9583 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
9584 		new_readreg = wm_gmii_i82544_readreg;
9585 		new_writereg = wm_gmii_i82544_writereg;
9586 	} else {
9587 		new_readreg = wm_gmii_i82543_readreg;
9588 		new_writereg = wm_gmii_i82543_writereg;
9589 	}
9590 
9591 	if (new_phytype == WMPHY_BM) {
9592 		/* All BM use _bm_ */
9593 		new_readreg = wm_gmii_bm_readreg;
9594 		new_writereg = wm_gmii_bm_writereg;
9595 	}
9596 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
9597 		/* All PCH* use _hv_ */
9598 		new_readreg = wm_gmii_hv_readreg;
9599 		new_writereg = wm_gmii_hv_writereg;
9600 	}
9601 
9602 	/* Diag output */
9603 	if (doubt_phytype != WMPHY_UNKNOWN)
9604 		aprint_error_dev(dev, "Assumed new PHY type was "
9605 		    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
9606 		    new_phytype);
9607 	else if ((sc->sc_phytype != WMPHY_UNKNOWN)
9608 	    && (sc->sc_phytype != new_phytype ))
9609 		aprint_error_dev(dev, "Previously assumed PHY type(%u)"
9610 		    "was incorrect. New PHY type = %u\n",
9611 		    sc->sc_phytype, new_phytype);
9612 
9613 	if ((mii->mii_readreg != NULL) && (new_phytype == WMPHY_UNKNOWN))
9614 		aprint_error_dev(dev, "PHY type is still unknown.\n");
9615 
9616 	if ((mii->mii_readreg != NULL) && (mii->mii_readreg != new_readreg))
9617 		aprint_error_dev(dev, "Previously assumed PHY read/write "
9618 		    "function was incorrect.\n");
9619 
9620 	/* Update now */
9621 	sc->sc_phytype = new_phytype;
9622 	mii->mii_readreg = new_readreg;
9623 	mii->mii_writereg = new_writereg;
9624 }
9625 
9626 /*
9627  * wm_get_phy_id_82575:
9628  *
9629  * Return PHY ID. Return -1 if it failed.
9630  */
9631 static int
9632 wm_get_phy_id_82575(struct wm_softc *sc)
9633 {
9634 	uint32_t reg;
9635 	int phyid = -1;
9636 
9637 	/* XXX */
9638 	if ((sc->sc_flags & WM_F_SGMII) == 0)
9639 		return -1;
9640 
9641 	if (wm_sgmii_uses_mdio(sc)) {
9642 		switch (sc->sc_type) {
9643 		case WM_T_82575:
9644 		case WM_T_82576:
9645 			reg = CSR_READ(sc, WMREG_MDIC);
9646 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
9647 			break;
9648 		case WM_T_82580:
9649 		case WM_T_I350:
9650 		case WM_T_I354:
9651 		case WM_T_I210:
9652 		case WM_T_I211:
9653 			reg = CSR_READ(sc, WMREG_MDICNFG);
9654 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
9655 			break;
9656 		default:
9657 			return -1;
9658 		}
9659 	}
9660 
9661 	return phyid;
9662 }
9663 
9664 
9665 /*
9666  * wm_gmii_mediainit:
9667  *
9668  *	Initialize media for use on 1000BASE-T devices.
9669  */
9670 static void
9671 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
9672 {
9673 	device_t dev = sc->sc_dev;
9674 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9675 	struct mii_data *mii = &sc->sc_mii;
9676 	uint32_t reg;
9677 
9678 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
9679 		device_xname(sc->sc_dev), __func__));
9680 
9681 	/* We have GMII. */
9682 	sc->sc_flags |= WM_F_HAS_MII;
9683 
9684 	if (sc->sc_type == WM_T_80003)
9685 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
9686 	else
9687 		sc->sc_tipg = TIPG_1000T_DFLT;
9688 
9689 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
9690 	if ((sc->sc_type == WM_T_82580)
9691 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
9692 	    || (sc->sc_type == WM_T_I211)) {
9693 		reg = CSR_READ(sc, WMREG_PHPM);
9694 		reg &= ~PHPM_GO_LINK_D;
9695 		CSR_WRITE(sc, WMREG_PHPM, reg);
9696 	}
9697 
9698 	/*
9699 	 * Let the chip set speed/duplex on its own based on
9700 	 * signals from the PHY.
9701 	 * XXXbouyer - I'm not sure this is right for the 80003,
9702 	 * the em driver only sets CTRL_SLU here - but it seems to work.
9703 	 */
9704 	sc->sc_ctrl |= CTRL_SLU;
9705 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9706 
9707 	/* Initialize our media structures and probe the GMII. */
9708 	mii->mii_ifp = ifp;
9709 
9710 	mii->mii_statchg = wm_gmii_statchg;
9711 
9712 	/* get PHY control from SMBus to PCIe */
9713 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
9714 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
9715 	    || (sc->sc_type == WM_T_PCH_CNP))
9716 		wm_smbustopci(sc);
9717 
9718 	wm_gmii_reset(sc);
9719 
9720 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
9721 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
9722 	    wm_gmii_mediastatus);
9723 
9724 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
9725 	    || (sc->sc_type == WM_T_82580)
9726 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
9727 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
9728 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
9729 			/* Attach only one port */
9730 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
9731 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
9732 		} else {
9733 			int i, id;
9734 			uint32_t ctrl_ext;
9735 
9736 			id = wm_get_phy_id_82575(sc);
9737 			if (id != -1) {
9738 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
9739 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
9740 			}
9741 			if ((id == -1)
9742 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
9743 				/* Power on sgmii phy if it is disabled */
9744 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
9745 				CSR_WRITE(sc, WMREG_CTRL_EXT,
9746 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
9747 				CSR_WRITE_FLUSH(sc);
9748 				delay(300*1000); /* XXX too long */
9749 
9750 				/* from 1 to 8 */
9751 				for (i = 1; i < 8; i++)
9752 					mii_attach(sc->sc_dev, &sc->sc_mii,
9753 					    0xffffffff, i, MII_OFFSET_ANY,
9754 					    MIIF_DOPAUSE);
9755 
9756 				/* restore previous sfp cage power state */
9757 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
9758 			}
9759 		}
9760 	} else {
9761 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
9762 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
9763 	}
9764 
9765 	/*
9766 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
9767 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
9768 	 */
9769 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
9770 		|| (sc->sc_type == WM_T_PCH_SPT)
9771 		|| (sc->sc_type == WM_T_PCH_CNP))
9772 	    && (LIST_FIRST(&mii->mii_phys) == NULL)) {
9773 		wm_set_mdio_slow_mode_hv(sc);
9774 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
9775 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
9776 	}
9777 
9778 	/*
9779 	 * (For ICH8 variants)
9780 	 * If PHY detection failed, use BM's r/w function and retry.
9781 	 */
9782 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
9783 		/* if failed, retry with *_bm_* */
9784 		aprint_verbose_dev(dev, "Assumed PHY access function "
9785 		    "(type = %d) might be incorrect. Use BM and retry.\n",
9786 		    sc->sc_phytype);
9787 		sc->sc_phytype = WMPHY_BM;
9788 		mii->mii_readreg = wm_gmii_bm_readreg;
9789 		mii->mii_writereg = wm_gmii_bm_writereg;
9790 
9791 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
9792 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
9793 	}
9794 
9795 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
9796 		/* Any PHY wasn't find */
9797 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
9798 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
9799 		sc->sc_phytype = WMPHY_NONE;
9800 	} else {
9801 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
9802 
9803 		/*
9804 		 * PHY Found! Check PHY type again by the second call of
9805 		 * wm_gmii_setup_phytype.
9806 		 */
9807 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
9808 		    child->mii_mpd_model);
9809 
9810 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
9811 	}
9812 }
9813 
9814 /*
9815  * wm_gmii_mediachange:	[ifmedia interface function]
9816  *
9817  *	Set hardware to newly-selected media on a 1000BASE-T device.
9818  */
9819 static int
9820 wm_gmii_mediachange(struct ifnet *ifp)
9821 {
9822 	struct wm_softc *sc = ifp->if_softc;
9823 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
9824 	int rc;
9825 
9826 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
9827 		device_xname(sc->sc_dev), __func__));
9828 	if ((ifp->if_flags & IFF_UP) == 0)
9829 		return 0;
9830 
9831 	/* Disable D0 LPLU. */
9832 	wm_lplu_d0_disable(sc);
9833 
9834 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
9835 	sc->sc_ctrl |= CTRL_SLU;
9836 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
9837 	    || (sc->sc_type > WM_T_82543)) {
9838 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
9839 	} else {
9840 		sc->sc_ctrl &= ~CTRL_ASDE;
9841 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
9842 		if (ife->ifm_media & IFM_FDX)
9843 			sc->sc_ctrl |= CTRL_FD;
9844 		switch (IFM_SUBTYPE(ife->ifm_media)) {
9845 		case IFM_10_T:
9846 			sc->sc_ctrl |= CTRL_SPEED_10;
9847 			break;
9848 		case IFM_100_TX:
9849 			sc->sc_ctrl |= CTRL_SPEED_100;
9850 			break;
9851 		case IFM_1000_T:
9852 			sc->sc_ctrl |= CTRL_SPEED_1000;
9853 			break;
9854 		default:
9855 			panic("wm_gmii_mediachange: bad media 0x%x",
9856 			    ife->ifm_media);
9857 		}
9858 	}
9859 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9860 	CSR_WRITE_FLUSH(sc);
9861 	if (sc->sc_type <= WM_T_82543)
9862 		wm_gmii_reset(sc);
9863 
9864 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
9865 		return 0;
9866 	return rc;
9867 }
9868 
9869 /*
9870  * wm_gmii_mediastatus:	[ifmedia interface function]
9871  *
9872  *	Get the current interface media status on a 1000BASE-T device.
9873  */
9874 static void
9875 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
9876 {
9877 	struct wm_softc *sc = ifp->if_softc;
9878 
9879 	ether_mediastatus(ifp, ifmr);
9880 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
9881 	    | sc->sc_flowflags;
9882 }
9883 
9884 #define	MDI_IO		CTRL_SWDPIN(2)
9885 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
9886 #define	MDI_CLK		CTRL_SWDPIN(3)
9887 
9888 static void
9889 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
9890 {
9891 	uint32_t i, v;
9892 
9893 	v = CSR_READ(sc, WMREG_CTRL);
9894 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
9895 	v |= MDI_DIR | CTRL_SWDPIO(3);
9896 
9897 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
9898 		if (data & i)
9899 			v |= MDI_IO;
9900 		else
9901 			v &= ~MDI_IO;
9902 		CSR_WRITE(sc, WMREG_CTRL, v);
9903 		CSR_WRITE_FLUSH(sc);
9904 		delay(10);
9905 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
9906 		CSR_WRITE_FLUSH(sc);
9907 		delay(10);
9908 		CSR_WRITE(sc, WMREG_CTRL, v);
9909 		CSR_WRITE_FLUSH(sc);
9910 		delay(10);
9911 	}
9912 }
9913 
9914 static uint32_t
9915 wm_i82543_mii_recvbits(struct wm_softc *sc)
9916 {
9917 	uint32_t v, i, data = 0;
9918 
9919 	v = CSR_READ(sc, WMREG_CTRL);
9920 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
9921 	v |= CTRL_SWDPIO(3);
9922 
9923 	CSR_WRITE(sc, WMREG_CTRL, v);
9924 	CSR_WRITE_FLUSH(sc);
9925 	delay(10);
9926 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
9927 	CSR_WRITE_FLUSH(sc);
9928 	delay(10);
9929 	CSR_WRITE(sc, WMREG_CTRL, v);
9930 	CSR_WRITE_FLUSH(sc);
9931 	delay(10);
9932 
9933 	for (i = 0; i < 16; i++) {
9934 		data <<= 1;
9935 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
9936 		CSR_WRITE_FLUSH(sc);
9937 		delay(10);
9938 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
9939 			data |= 1;
9940 		CSR_WRITE(sc, WMREG_CTRL, v);
9941 		CSR_WRITE_FLUSH(sc);
9942 		delay(10);
9943 	}
9944 
9945 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
9946 	CSR_WRITE_FLUSH(sc);
9947 	delay(10);
9948 	CSR_WRITE(sc, WMREG_CTRL, v);
9949 	CSR_WRITE_FLUSH(sc);
9950 	delay(10);
9951 
9952 	return data;
9953 }
9954 
9955 #undef MDI_IO
9956 #undef MDI_DIR
9957 #undef MDI_CLK
9958 
9959 /*
9960  * wm_gmii_i82543_readreg:	[mii interface function]
9961  *
9962  *	Read a PHY register on the GMII (i82543 version).
9963  */
9964 static int
9965 wm_gmii_i82543_readreg(device_t dev, int phy, int reg)
9966 {
9967 	struct wm_softc *sc = device_private(dev);
9968 	int rv;
9969 
9970 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
9971 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
9972 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
9973 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
9974 
9975 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
9976 	    device_xname(dev), phy, reg, rv));
9977 
9978 	return rv;
9979 }
9980 
9981 /*
9982  * wm_gmii_i82543_writereg:	[mii interface function]
9983  *
9984  *	Write a PHY register on the GMII (i82543 version).
9985  */
9986 static void
9987 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, int val)
9988 {
9989 	struct wm_softc *sc = device_private(dev);
9990 
9991 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
9992 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
9993 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
9994 	    (MII_COMMAND_START << 30), 32);
9995 }
9996 
9997 /*
9998  * wm_gmii_mdic_readreg:	[mii interface function]
9999  *
10000  *	Read a PHY register on the GMII.
10001  */
10002 static int
10003 wm_gmii_mdic_readreg(device_t dev, int phy, int reg)
10004 {
10005 	struct wm_softc *sc = device_private(dev);
10006 	uint32_t mdic = 0;
10007 	int i, rv;
10008 
10009 	if (reg > MII_ADDRMASK) {
10010 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
10011 		    __func__, sc->sc_phytype, reg);
10012 		reg &= MII_ADDRMASK;
10013 	}
10014 
10015 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
10016 	    MDIC_REGADD(reg));
10017 
10018 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
10019 		mdic = CSR_READ(sc, WMREG_MDIC);
10020 		if (mdic & MDIC_READY)
10021 			break;
10022 		delay(50);
10023 	}
10024 
10025 	if ((mdic & MDIC_READY) == 0) {
10026 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
10027 		    device_xname(dev), phy, reg);
10028 		rv = 0;
10029 	} else if (mdic & MDIC_E) {
10030 #if 0 /* This is normal if no PHY is present. */
10031 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
10032 		    device_xname(dev), phy, reg);
10033 #endif
10034 		rv = 0;
10035 	} else {
10036 		rv = MDIC_DATA(mdic);
10037 		if (rv == 0xffff)
10038 			rv = 0;
10039 	}
10040 
10041 	return rv;
10042 }
10043 
10044 /*
10045  * wm_gmii_mdic_writereg:	[mii interface function]
10046  *
10047  *	Write a PHY register on the GMII.
10048  */
10049 static void
10050 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, int val)
10051 {
10052 	struct wm_softc *sc = device_private(dev);
10053 	uint32_t mdic = 0;
10054 	int i;
10055 
10056 	if (reg > MII_ADDRMASK) {
10057 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
10058 		    __func__, sc->sc_phytype, reg);
10059 		reg &= MII_ADDRMASK;
10060 	}
10061 
10062 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
10063 	    MDIC_REGADD(reg) | MDIC_DATA(val));
10064 
10065 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
10066 		mdic = CSR_READ(sc, WMREG_MDIC);
10067 		if (mdic & MDIC_READY)
10068 			break;
10069 		delay(50);
10070 	}
10071 
10072 	if ((mdic & MDIC_READY) == 0)
10073 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
10074 		    device_xname(dev), phy, reg);
10075 	else if (mdic & MDIC_E)
10076 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
10077 		    device_xname(dev), phy, reg);
10078 }
10079 
10080 /*
10081  * wm_gmii_i82544_readreg:	[mii interface function]
10082  *
10083  *	Read a PHY register on the GMII.
10084  */
10085 static int
10086 wm_gmii_i82544_readreg(device_t dev, int phy, int reg)
10087 {
10088 	struct wm_softc *sc = device_private(dev);
10089 	int rv;
10090 
10091 	if (sc->phy.acquire(sc)) {
10092 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
10093 		return 0;
10094 	}
10095 
10096 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
10097 		switch (sc->sc_phytype) {
10098 		case WMPHY_IGP:
10099 		case WMPHY_IGP_2:
10100 		case WMPHY_IGP_3:
10101 			wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT, reg);
10102 			break;
10103 		default:
10104 #ifdef WM_DEBUG
10105 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
10106 			    __func__, sc->sc_phytype, reg);
10107 #endif
10108 			break;
10109 		}
10110 	}
10111 
10112 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
10113 	sc->phy.release(sc);
10114 
10115 	return rv;
10116 }
10117 
10118 /*
10119  * wm_gmii_i82544_writereg:	[mii interface function]
10120  *
10121  *	Write a PHY register on the GMII.
10122  */
10123 static void
10124 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, int val)
10125 {
10126 	struct wm_softc *sc = device_private(dev);
10127 
10128 	if (sc->phy.acquire(sc)) {
10129 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
10130 		return;
10131 	}
10132 
10133 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
10134 		switch (sc->sc_phytype) {
10135 		case WMPHY_IGP:
10136 		case WMPHY_IGP_2:
10137 		case WMPHY_IGP_3:
10138 			wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT, reg);
10139 			break;
10140 		default:
10141 #ifdef WM_DEBUG
10142 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
10143 			    __func__, sc->sc_phytype, reg);
10144 #endif
10145 			break;
10146 		}
10147 	}
10148 
10149 	wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
10150 	sc->phy.release(sc);
10151 }
10152 
10153 /*
10154  * wm_gmii_i80003_readreg:	[mii interface function]
10155  *
10156  *	Read a PHY register on the kumeran
10157  * This could be handled by the PHY layer if we didn't have to lock the
10158  * ressource ...
10159  */
10160 static int
10161 wm_gmii_i80003_readreg(device_t dev, int phy, int reg)
10162 {
10163 	struct wm_softc *sc = device_private(dev);
10164 	int page_select, temp;
10165 	int rv;
10166 
10167 	if (phy != 1) /* only one PHY on kumeran bus */
10168 		return 0;
10169 
10170 	if (sc->phy.acquire(sc)) {
10171 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
10172 		return 0;
10173 	}
10174 
10175 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
10176 		page_select = GG82563_PHY_PAGE_SELECT;
10177 	else {
10178 		/*
10179 		 * Use Alternative Page Select register to access registers
10180 		 * 30 and 31.
10181 		 */
10182 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
10183 	}
10184 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
10185 	wm_gmii_mdic_writereg(dev, phy, page_select, temp);
10186 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
10187 		/*
10188 		 * Wait more 200us for a bug of the ready bit in the MDIC
10189 		 * register.
10190 		 */
10191 		delay(200);
10192 		if (wm_gmii_mdic_readreg(dev, phy, page_select) != temp) {
10193 			device_printf(dev, "%s failed\n", __func__);
10194 			rv = 0; /* XXX */
10195 			goto out;
10196 		}
10197 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
10198 		delay(200);
10199 	} else
10200 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
10201 
10202 out:
10203 	sc->phy.release(sc);
10204 	return rv;
10205 }
10206 
10207 /*
10208  * wm_gmii_i80003_writereg:	[mii interface function]
10209  *
10210  *	Write a PHY register on the kumeran.
10211  * This could be handled by the PHY layer if we didn't have to lock the
10212  * ressource ...
10213  */
10214 static void
10215 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, int val)
10216 {
10217 	struct wm_softc *sc = device_private(dev);
10218 	int page_select, temp;
10219 
10220 	if (phy != 1) /* only one PHY on kumeran bus */
10221 		return;
10222 
10223 	if (sc->phy.acquire(sc)) {
10224 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
10225 		return;
10226 	}
10227 
10228 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
10229 		page_select = GG82563_PHY_PAGE_SELECT;
10230 	else {
10231 		/*
10232 		 * Use Alternative Page Select register to access registers
10233 		 * 30 and 31.
10234 		 */
10235 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
10236 	}
10237 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
10238 	wm_gmii_mdic_writereg(dev, phy, page_select, temp);
10239 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
10240 		/*
10241 		 * Wait more 200us for a bug of the ready bit in the MDIC
10242 		 * register.
10243 		 */
10244 		delay(200);
10245 		if (wm_gmii_mdic_readreg(dev, phy, page_select) != temp) {
10246 			device_printf(dev, "%s failed\n", __func__);
10247 			goto out;
10248 		}
10249 		wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
10250 		delay(200);
10251 	} else
10252 		wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
10253 
10254 out:
10255 	sc->phy.release(sc);
10256 }
10257 
10258 /*
10259  * wm_gmii_bm_readreg:	[mii interface function]
10260  *
10261  *	Read a PHY register on the kumeran
10262  * This could be handled by the PHY layer if we didn't have to lock the
10263  * ressource ...
10264  */
10265 static int
10266 wm_gmii_bm_readreg(device_t dev, int phy, int reg)
10267 {
10268 	struct wm_softc *sc = device_private(dev);
10269 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
10270 	uint16_t val;
10271 	int rv;
10272 
10273 	if (sc->phy.acquire(sc)) {
10274 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
10275 		return 0;
10276 	}
10277 
10278 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
10279 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
10280 		    || (reg == 31)) ? 1 : phy;
10281 	/* Page 800 works differently than the rest so it has its own func */
10282 	if (page == BM_WUC_PAGE) {
10283 		wm_access_phy_wakeup_reg_bm(dev, reg, &val, 1);
10284 		rv = val;
10285 		goto release;
10286 	}
10287 
10288 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
10289 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
10290 		    && (sc->sc_type != WM_T_82583))
10291 			wm_gmii_mdic_writereg(dev, phy,
10292 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
10293 		else
10294 			wm_gmii_mdic_writereg(dev, phy,
10295 			    BME1000_PHY_PAGE_SELECT, page);
10296 	}
10297 
10298 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
10299 
10300 release:
10301 	sc->phy.release(sc);
10302 	return rv;
10303 }
10304 
10305 /*
10306  * wm_gmii_bm_writereg:	[mii interface function]
10307  *
10308  *	Write a PHY register on the kumeran.
10309  * This could be handled by the PHY layer if we didn't have to lock the
10310  * ressource ...
10311  */
10312 static void
10313 wm_gmii_bm_writereg(device_t dev, int phy, int reg, int val)
10314 {
10315 	struct wm_softc *sc = device_private(dev);
10316 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
10317 
10318 	if (sc->phy.acquire(sc)) {
10319 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
10320 		return;
10321 	}
10322 
10323 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
10324 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
10325 		    || (reg == 31)) ? 1 : phy;
10326 	/* Page 800 works differently than the rest so it has its own func */
10327 	if (page == BM_WUC_PAGE) {
10328 		uint16_t tmp;
10329 
10330 		tmp = val;
10331 		wm_access_phy_wakeup_reg_bm(dev, reg, &tmp, 0);
10332 		goto release;
10333 	}
10334 
10335 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
10336 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
10337 		    && (sc->sc_type != WM_T_82583))
10338 			wm_gmii_mdic_writereg(dev, phy,
10339 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
10340 		else
10341 			wm_gmii_mdic_writereg(dev, phy,
10342 			    BME1000_PHY_PAGE_SELECT, page);
10343 	}
10344 
10345 	wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
10346 
10347 release:
10348 	sc->phy.release(sc);
10349 }
10350 
10351 static void
10352 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd)
10353 {
10354 	struct wm_softc *sc = device_private(dev);
10355 	uint16_t regnum = BM_PHY_REG_NUM(offset);
10356 	uint16_t wuce, reg;
10357 
10358 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
10359 		device_xname(dev), __func__));
10360 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
10361 	if (sc->sc_type == WM_T_PCH) {
10362 		/* XXX e1000 driver do nothing... why? */
10363 	}
10364 
10365 	/*
10366 	 * 1) Enable PHY wakeup register first.
10367 	 * See e1000_enable_phy_wakeup_reg_access_bm().
10368 	 */
10369 
10370 	/* Set page 769 */
10371 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
10372 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
10373 
10374 	/* Read WUCE and save it */
10375 	wuce = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG);
10376 
10377 	reg = wuce | BM_WUC_ENABLE_BIT;
10378 	reg &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
10379 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, reg);
10380 
10381 	/* Select page 800 */
10382 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
10383 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
10384 
10385 	/*
10386 	 * 2) Access PHY wakeup register.
10387 	 * See e1000_access_phy_wakeup_reg_bm.
10388 	 */
10389 
10390 	/* Write page 800 */
10391 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
10392 
10393 	if (rd)
10394 		*val = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE);
10395 	else
10396 		wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
10397 
10398 	/*
10399 	 * 3) Disable PHY wakeup register.
10400 	 * See e1000_disable_phy_wakeup_reg_access_bm().
10401 	 */
10402 	/* Set page 769 */
10403 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
10404 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
10405 
10406 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, wuce);
10407 }
10408 
10409 /*
10410  * wm_gmii_hv_readreg:	[mii interface function]
10411  *
10412  *	Read a PHY register on the kumeran
10413  * This could be handled by the PHY layer if we didn't have to lock the
10414  * ressource ...
10415  */
10416 static int
10417 wm_gmii_hv_readreg(device_t dev, int phy, int reg)
10418 {
10419 	struct wm_softc *sc = device_private(dev);
10420 	int rv;
10421 
10422 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
10423 		device_xname(dev), __func__));
10424 	if (sc->phy.acquire(sc)) {
10425 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
10426 		return 0;
10427 	}
10428 
10429 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg);
10430 	sc->phy.release(sc);
10431 	return rv;
10432 }
10433 
10434 static int
10435 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg)
10436 {
10437 	uint16_t page = BM_PHY_REG_PAGE(reg);
10438 	uint16_t regnum = BM_PHY_REG_NUM(reg);
10439 	uint16_t val;
10440 	int rv;
10441 
10442 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
10443 
10444 	/* Page 800 works differently than the rest so it has its own func */
10445 	if (page == BM_WUC_PAGE) {
10446 		wm_access_phy_wakeup_reg_bm(dev, reg, &val, 1);
10447 		return val;
10448 	}
10449 
10450 	/*
10451 	 * Lower than page 768 works differently than the rest so it has its
10452 	 * own func
10453 	 */
10454 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
10455 		printf("gmii_hv_readreg!!!\n");
10456 		return 0;
10457 	}
10458 
10459 	/*
10460 	 * XXX I21[789] documents say that the SMBus Address register is at
10461 	 * PHY address 01, Page 0 (not 768), Register 26.
10462 	 */
10463 	if (page == HV_INTC_FC_PAGE_START)
10464 		page = 0;
10465 
10466 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
10467 		wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
10468 		    page << BME1000_PAGE_SHIFT);
10469 	}
10470 
10471 	rv = wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK);
10472 	return rv;
10473 }
10474 
10475 /*
10476  * wm_gmii_hv_writereg:	[mii interface function]
10477  *
10478  *	Write a PHY register on the kumeran.
10479  * This could be handled by the PHY layer if we didn't have to lock the
10480  * ressource ...
10481  */
10482 static void
10483 wm_gmii_hv_writereg(device_t dev, int phy, int reg, int val)
10484 {
10485 	struct wm_softc *sc = device_private(dev);
10486 
10487 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
10488 		device_xname(dev), __func__));
10489 
10490 	if (sc->phy.acquire(sc)) {
10491 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
10492 		return;
10493 	}
10494 
10495 	wm_gmii_hv_writereg_locked(dev, phy, reg, val);
10496 	sc->phy.release(sc);
10497 }
10498 
10499 static void
10500 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, int val)
10501 {
10502 	struct wm_softc *sc = device_private(dev);
10503 	uint16_t page = BM_PHY_REG_PAGE(reg);
10504 	uint16_t regnum = BM_PHY_REG_NUM(reg);
10505 
10506 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
10507 
10508 	/* Page 800 works differently than the rest so it has its own func */
10509 	if (page == BM_WUC_PAGE) {
10510 		uint16_t tmp;
10511 
10512 		tmp = val;
10513 		wm_access_phy_wakeup_reg_bm(dev, reg, &tmp, 0);
10514 		return;
10515 	}
10516 
10517 	/*
10518 	 * Lower than page 768 works differently than the rest so it has its
10519 	 * own func
10520 	 */
10521 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
10522 		printf("gmii_hv_writereg!!!\n");
10523 		return;
10524 	}
10525 
10526 	{
10527 		/*
10528 		 * XXX I21[789] documents say that the SMBus Address register
10529 		 * is at PHY address 01, Page 0 (not 768), Register 26.
10530 		 */
10531 		if (page == HV_INTC_FC_PAGE_START)
10532 			page = 0;
10533 
10534 		/*
10535 		 * XXX Workaround MDIO accesses being disabled after entering
10536 		 * IEEE Power Down (whenever bit 11 of the PHY control
10537 		 * register is set)
10538 		 */
10539 		if (sc->sc_phytype == WMPHY_82578) {
10540 			struct mii_softc *child;
10541 
10542 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
10543 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
10544 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
10545 			    && ((val & (1 << 11)) != 0)) {
10546 				printf("XXX need workaround\n");
10547 			}
10548 		}
10549 
10550 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
10551 			wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
10552 			    page << BME1000_PAGE_SHIFT);
10553 		}
10554 	}
10555 
10556 	wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
10557 }
10558 
10559 /*
10560  * wm_gmii_82580_readreg:	[mii interface function]
10561  *
10562  *	Read a PHY register on the 82580 and I350.
10563  * This could be handled by the PHY layer if we didn't have to lock the
10564  * ressource ...
10565  */
10566 static int
10567 wm_gmii_82580_readreg(device_t dev, int phy, int reg)
10568 {
10569 	struct wm_softc *sc = device_private(dev);
10570 	int rv;
10571 
10572 	if (sc->phy.acquire(sc) != 0) {
10573 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
10574 		return 0;
10575 	}
10576 
10577 #ifdef DIAGNOSTIC
10578 	if (reg > MII_ADDRMASK) {
10579 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
10580 		    __func__, sc->sc_phytype, reg);
10581 		reg &= MII_ADDRMASK;
10582 	}
10583 #endif
10584 	rv = wm_gmii_mdic_readreg(dev, phy, reg);
10585 
10586 	sc->phy.release(sc);
10587 	return rv;
10588 }
10589 
10590 /*
10591  * wm_gmii_82580_writereg:	[mii interface function]
10592  *
10593  *	Write a PHY register on the 82580 and I350.
10594  * This could be handled by the PHY layer if we didn't have to lock the
10595  * ressource ...
10596  */
10597 static void
10598 wm_gmii_82580_writereg(device_t dev, int phy, int reg, int val)
10599 {
10600 	struct wm_softc *sc = device_private(dev);
10601 
10602 	if (sc->phy.acquire(sc) != 0) {
10603 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
10604 		return;
10605 	}
10606 
10607 #ifdef DIAGNOSTIC
10608 	if (reg > MII_ADDRMASK) {
10609 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
10610 		    __func__, sc->sc_phytype, reg);
10611 		reg &= MII_ADDRMASK;
10612 	}
10613 #endif
10614 	wm_gmii_mdic_writereg(dev, phy, reg, val);
10615 
10616 	sc->phy.release(sc);
10617 }
10618 
10619 /*
10620  * wm_gmii_gs40g_readreg:	[mii interface function]
10621  *
10622  *	Read a PHY register on the I2100 and I211.
10623  * This could be handled by the PHY layer if we didn't have to lock the
10624  * ressource ...
10625  */
10626 static int
10627 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg)
10628 {
10629 	struct wm_softc *sc = device_private(dev);
10630 	int page, offset;
10631 	int rv;
10632 
10633 	/* Acquire semaphore */
10634 	if (sc->phy.acquire(sc)) {
10635 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
10636 		return 0;
10637 	}
10638 
10639 	/* Page select */
10640 	page = reg >> GS40G_PAGE_SHIFT;
10641 	wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
10642 
10643 	/* Read reg */
10644 	offset = reg & GS40G_OFFSET_MASK;
10645 	rv = wm_gmii_mdic_readreg(dev, phy, offset);
10646 
10647 	sc->phy.release(sc);
10648 	return rv;
10649 }
10650 
10651 /*
10652  * wm_gmii_gs40g_writereg:	[mii interface function]
10653  *
10654  *	Write a PHY register on the I210 and I211.
10655  * This could be handled by the PHY layer if we didn't have to lock the
10656  * ressource ...
10657  */
10658 static void
10659 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, int val)
10660 {
10661 	struct wm_softc *sc = device_private(dev);
10662 	int page, offset;
10663 
10664 	/* Acquire semaphore */
10665 	if (sc->phy.acquire(sc)) {
10666 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
10667 		return;
10668 	}
10669 
10670 	/* Page select */
10671 	page = reg >> GS40G_PAGE_SHIFT;
10672 	wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
10673 
10674 	/* Write reg */
10675 	offset = reg & GS40G_OFFSET_MASK;
10676 	wm_gmii_mdic_writereg(dev, phy, offset, val);
10677 
10678 	/* Release semaphore */
10679 	sc->phy.release(sc);
10680 }
10681 
10682 /*
10683  * wm_gmii_statchg:	[mii interface function]
10684  *
10685  *	Callback from MII layer when media changes.
10686  */
10687 static void
10688 wm_gmii_statchg(struct ifnet *ifp)
10689 {
10690 	struct wm_softc *sc = ifp->if_softc;
10691 	struct mii_data *mii = &sc->sc_mii;
10692 
10693 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
10694 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
10695 	sc->sc_fcrtl &= ~FCRTL_XONE;
10696 
10697 	/*
10698 	 * Get flow control negotiation result.
10699 	 */
10700 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
10701 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
10702 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
10703 		mii->mii_media_active &= ~IFM_ETH_FMASK;
10704 	}
10705 
10706 	if (sc->sc_flowflags & IFM_FLOW) {
10707 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
10708 			sc->sc_ctrl |= CTRL_TFCE;
10709 			sc->sc_fcrtl |= FCRTL_XONE;
10710 		}
10711 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
10712 			sc->sc_ctrl |= CTRL_RFCE;
10713 	}
10714 
10715 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
10716 		DPRINTF(WM_DEBUG_LINK,
10717 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
10718 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
10719 	} else {
10720 		DPRINTF(WM_DEBUG_LINK,
10721 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
10722 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
10723 	}
10724 
10725 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10726 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
10727 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
10728 						 : WMREG_FCRTL, sc->sc_fcrtl);
10729 	if (sc->sc_type == WM_T_80003) {
10730 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
10731 		case IFM_1000_T:
10732 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
10733 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
10734 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
10735 			break;
10736 		default:
10737 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
10738 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
10739 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
10740 			break;
10741 		}
10742 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
10743 	}
10744 }
10745 
10746 /* kumeran related (80003, ICH* and PCH*) */
10747 
10748 /*
10749  * wm_kmrn_readreg:
10750  *
10751  *	Read a kumeran register
10752  */
10753 static int
10754 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
10755 {
10756 	int rv;
10757 
10758 	if (sc->sc_type == WM_T_80003)
10759 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
10760 	else
10761 		rv = sc->phy.acquire(sc);
10762 	if (rv != 0) {
10763 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
10764 		    __func__);
10765 		return rv;
10766 	}
10767 
10768 	rv = wm_kmrn_readreg_locked(sc, reg, val);
10769 
10770 	if (sc->sc_type == WM_T_80003)
10771 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
10772 	else
10773 		sc->phy.release(sc);
10774 
10775 	return rv;
10776 }
10777 
10778 static int
10779 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
10780 {
10781 
10782 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
10783 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
10784 	    KUMCTRLSTA_REN);
10785 	CSR_WRITE_FLUSH(sc);
10786 	delay(2);
10787 
10788 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
10789 
10790 	return 0;
10791 }
10792 
10793 /*
10794  * wm_kmrn_writereg:
10795  *
10796  *	Write a kumeran register
10797  */
10798 static int
10799 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
10800 {
10801 	int rv;
10802 
10803 	if (sc->sc_type == WM_T_80003)
10804 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
10805 	else
10806 		rv = sc->phy.acquire(sc);
10807 	if (rv != 0) {
10808 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
10809 		    __func__);
10810 		return rv;
10811 	}
10812 
10813 	rv = wm_kmrn_writereg_locked(sc, reg, val);
10814 
10815 	if (sc->sc_type == WM_T_80003)
10816 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
10817 	else
10818 		sc->phy.release(sc);
10819 
10820 	return rv;
10821 }
10822 
10823 static int
10824 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
10825 {
10826 
10827 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
10828 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
10829 
10830 	return 0;
10831 }
10832 
10833 /* SGMII related */
10834 
10835 /*
10836  * wm_sgmii_uses_mdio
10837  *
10838  * Check whether the transaction is to the internal PHY or the external
10839  * MDIO interface. Return true if it's MDIO.
10840  */
10841 static bool
10842 wm_sgmii_uses_mdio(struct wm_softc *sc)
10843 {
10844 	uint32_t reg;
10845 	bool ismdio = false;
10846 
10847 	switch (sc->sc_type) {
10848 	case WM_T_82575:
10849 	case WM_T_82576:
10850 		reg = CSR_READ(sc, WMREG_MDIC);
10851 		ismdio = ((reg & MDIC_DEST) != 0);
10852 		break;
10853 	case WM_T_82580:
10854 	case WM_T_I350:
10855 	case WM_T_I354:
10856 	case WM_T_I210:
10857 	case WM_T_I211:
10858 		reg = CSR_READ(sc, WMREG_MDICNFG);
10859 		ismdio = ((reg & MDICNFG_DEST) != 0);
10860 		break;
10861 	default:
10862 		break;
10863 	}
10864 
10865 	return ismdio;
10866 }
10867 
10868 /*
10869  * wm_sgmii_readreg:	[mii interface function]
10870  *
10871  *	Read a PHY register on the SGMII
10872  * This could be handled by the PHY layer if we didn't have to lock the
10873  * ressource ...
10874  */
10875 static int
10876 wm_sgmii_readreg(device_t dev, int phy, int reg)
10877 {
10878 	struct wm_softc *sc = device_private(dev);
10879 	uint32_t i2ccmd;
10880 	int i, rv;
10881 
10882 	if (sc->phy.acquire(sc)) {
10883 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
10884 		return 0;
10885 	}
10886 
10887 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
10888 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
10889 	    | I2CCMD_OPCODE_READ;
10890 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
10891 
10892 	/* Poll the ready bit */
10893 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
10894 		delay(50);
10895 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
10896 		if (i2ccmd & I2CCMD_READY)
10897 			break;
10898 	}
10899 	if ((i2ccmd & I2CCMD_READY) == 0)
10900 		device_printf(dev, "I2CCMD Read did not complete\n");
10901 	if ((i2ccmd & I2CCMD_ERROR) != 0)
10902 		device_printf(dev, "I2CCMD Error bit set\n");
10903 
10904 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
10905 
10906 	sc->phy.release(sc);
10907 	return rv;
10908 }
10909 
10910 /*
10911  * wm_sgmii_writereg:	[mii interface function]
10912  *
10913  *	Write a PHY register on the SGMII.
10914  * This could be handled by the PHY layer if we didn't have to lock the
10915  * ressource ...
10916  */
10917 static void
10918 wm_sgmii_writereg(device_t dev, int phy, int reg, int val)
10919 {
10920 	struct wm_softc *sc = device_private(dev);
10921 	uint32_t i2ccmd;
10922 	int i;
10923 	int val_swapped;
10924 
10925 	if (sc->phy.acquire(sc) != 0) {
10926 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
10927 		return;
10928 	}
10929 	/* Swap the data bytes for the I2C interface */
10930 	val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
10931 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
10932 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
10933 	    | I2CCMD_OPCODE_WRITE | val_swapped;
10934 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
10935 
10936 	/* Poll the ready bit */
10937 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
10938 		delay(50);
10939 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
10940 		if (i2ccmd & I2CCMD_READY)
10941 			break;
10942 	}
10943 	if ((i2ccmd & I2CCMD_READY) == 0)
10944 		device_printf(dev, "I2CCMD Write did not complete\n");
10945 	if ((i2ccmd & I2CCMD_ERROR) != 0)
10946 		device_printf(dev, "I2CCMD Error bit set\n");
10947 
10948 	sc->phy.release(sc);
10949 }
10950 
10951 /* TBI related */
10952 
10953 /*
10954  * wm_tbi_mediainit:
10955  *
10956  *	Initialize media for use on 1000BASE-X devices.
10957  */
10958 static void
10959 wm_tbi_mediainit(struct wm_softc *sc)
10960 {
10961 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
10962 	const char *sep = "";
10963 
10964 	if (sc->sc_type < WM_T_82543)
10965 		sc->sc_tipg = TIPG_WM_DFLT;
10966 	else
10967 		sc->sc_tipg = TIPG_LG_DFLT;
10968 
10969 	sc->sc_tbi_serdes_anegticks = 5;
10970 
10971 	/* Initialize our media structures */
10972 	sc->sc_mii.mii_ifp = ifp;
10973 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
10974 
10975 	if ((sc->sc_type >= WM_T_82575)
10976 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
10977 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
10978 		    wm_serdes_mediachange, wm_serdes_mediastatus);
10979 	else
10980 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
10981 		    wm_tbi_mediachange, wm_tbi_mediastatus);
10982 
10983 	/*
10984 	 * SWD Pins:
10985 	 *
10986 	 *	0 = Link LED (output)
10987 	 *	1 = Loss Of Signal (input)
10988 	 */
10989 	sc->sc_ctrl |= CTRL_SWDPIO(0);
10990 
10991 	/* XXX Perhaps this is only for TBI */
10992 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
10993 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
10994 
10995 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
10996 		sc->sc_ctrl &= ~CTRL_LRST;
10997 
10998 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10999 
11000 #define	ADD(ss, mm, dd)							\
11001 do {									\
11002 	aprint_normal("%s%s", sep, ss);					\
11003 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
11004 	sep = ", ";							\
11005 } while (/*CONSTCOND*/0)
11006 
11007 	aprint_normal_dev(sc->sc_dev, "");
11008 
11009 	if (sc->sc_type == WM_T_I354) {
11010 		uint32_t status;
11011 
11012 		status = CSR_READ(sc, WMREG_STATUS);
11013 		if (((status & STATUS_2P5_SKU) != 0)
11014 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
11015 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
11016 		} else
11017 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
11018 	} else if (sc->sc_type == WM_T_82545) {
11019 		/* Only 82545 is LX (XXX except SFP) */
11020 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
11021 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
11022 	} else {
11023 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
11024 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
11025 	}
11026 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
11027 	aprint_normal("\n");
11028 
11029 #undef ADD
11030 
11031 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
11032 }
11033 
11034 /*
11035  * wm_tbi_mediachange:	[ifmedia interface function]
11036  *
11037  *	Set hardware to newly-selected media on a 1000BASE-X device.
11038  */
11039 static int
11040 wm_tbi_mediachange(struct ifnet *ifp)
11041 {
11042 	struct wm_softc *sc = ifp->if_softc;
11043 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
11044 	uint32_t status;
11045 	int i;
11046 
11047 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
11048 		/* XXX need some work for >= 82571 and < 82575 */
11049 		if (sc->sc_type < WM_T_82575)
11050 			return 0;
11051 	}
11052 
11053 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
11054 	    || (sc->sc_type >= WM_T_82575))
11055 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
11056 
11057 	sc->sc_ctrl &= ~CTRL_LRST;
11058 	sc->sc_txcw = TXCW_ANE;
11059 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
11060 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
11061 	else if (ife->ifm_media & IFM_FDX)
11062 		sc->sc_txcw |= TXCW_FD;
11063 	else
11064 		sc->sc_txcw |= TXCW_HD;
11065 
11066 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
11067 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
11068 
11069 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
11070 		    device_xname(sc->sc_dev), sc->sc_txcw));
11071 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
11072 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11073 	CSR_WRITE_FLUSH(sc);
11074 	delay(1000);
11075 
11076 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
11077 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
11078 
11079 	/*
11080 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
11081 	 * optics detect a signal, 0 if they don't.
11082 	 */
11083 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
11084 		/* Have signal; wait for the link to come up. */
11085 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
11086 			delay(10000);
11087 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
11088 				break;
11089 		}
11090 
11091 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
11092 			    device_xname(sc->sc_dev),i));
11093 
11094 		status = CSR_READ(sc, WMREG_STATUS);
11095 		DPRINTF(WM_DEBUG_LINK,
11096 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
11097 			device_xname(sc->sc_dev),status, STATUS_LU));
11098 		if (status & STATUS_LU) {
11099 			/* Link is up. */
11100 			DPRINTF(WM_DEBUG_LINK,
11101 			    ("%s: LINK: set media -> link up %s\n",
11102 			    device_xname(sc->sc_dev),
11103 			    (status & STATUS_FD) ? "FDX" : "HDX"));
11104 
11105 			/*
11106 			 * NOTE: CTRL will update TFCE and RFCE automatically,
11107 			 * so we should update sc->sc_ctrl
11108 			 */
11109 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
11110 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
11111 			sc->sc_fcrtl &= ~FCRTL_XONE;
11112 			if (status & STATUS_FD)
11113 				sc->sc_tctl |=
11114 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
11115 			else
11116 				sc->sc_tctl |=
11117 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
11118 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
11119 				sc->sc_fcrtl |= FCRTL_XONE;
11120 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
11121 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
11122 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
11123 				      sc->sc_fcrtl);
11124 			sc->sc_tbi_linkup = 1;
11125 		} else {
11126 			if (i == WM_LINKUP_TIMEOUT)
11127 				wm_check_for_link(sc);
11128 			/* Link is down. */
11129 			DPRINTF(WM_DEBUG_LINK,
11130 			    ("%s: LINK: set media -> link down\n",
11131 			    device_xname(sc->sc_dev)));
11132 			sc->sc_tbi_linkup = 0;
11133 		}
11134 	} else {
11135 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
11136 		    device_xname(sc->sc_dev)));
11137 		sc->sc_tbi_linkup = 0;
11138 	}
11139 
11140 	wm_tbi_serdes_set_linkled(sc);
11141 
11142 	return 0;
11143 }
11144 
11145 /*
11146  * wm_tbi_mediastatus:	[ifmedia interface function]
11147  *
11148  *	Get the current interface media status on a 1000BASE-X device.
11149  */
11150 static void
11151 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
11152 {
11153 	struct wm_softc *sc = ifp->if_softc;
11154 	uint32_t ctrl, status;
11155 
11156 	ifmr->ifm_status = IFM_AVALID;
11157 	ifmr->ifm_active = IFM_ETHER;
11158 
11159 	status = CSR_READ(sc, WMREG_STATUS);
11160 	if ((status & STATUS_LU) == 0) {
11161 		ifmr->ifm_active |= IFM_NONE;
11162 		return;
11163 	}
11164 
11165 	ifmr->ifm_status |= IFM_ACTIVE;
11166 	/* Only 82545 is LX */
11167 	if (sc->sc_type == WM_T_82545)
11168 		ifmr->ifm_active |= IFM_1000_LX;
11169 	else
11170 		ifmr->ifm_active |= IFM_1000_SX;
11171 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
11172 		ifmr->ifm_active |= IFM_FDX;
11173 	else
11174 		ifmr->ifm_active |= IFM_HDX;
11175 	ctrl = CSR_READ(sc, WMREG_CTRL);
11176 	if (ctrl & CTRL_RFCE)
11177 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
11178 	if (ctrl & CTRL_TFCE)
11179 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
11180 }
11181 
11182 /* XXX TBI only */
11183 static int
11184 wm_check_for_link(struct wm_softc *sc)
11185 {
11186 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
11187 	uint32_t rxcw;
11188 	uint32_t ctrl;
11189 	uint32_t status;
11190 	uint32_t sig;
11191 
11192 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
11193 		/* XXX need some work for >= 82571 */
11194 		if (sc->sc_type >= WM_T_82571) {
11195 			sc->sc_tbi_linkup = 1;
11196 			return 0;
11197 		}
11198 	}
11199 
11200 	rxcw = CSR_READ(sc, WMREG_RXCW);
11201 	ctrl = CSR_READ(sc, WMREG_CTRL);
11202 	status = CSR_READ(sc, WMREG_STATUS);
11203 
11204 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
11205 
11206 	DPRINTF(WM_DEBUG_LINK,
11207 	    ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
11208 		device_xname(sc->sc_dev), __func__,
11209 		((ctrl & CTRL_SWDPIN(1)) == sig),
11210 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
11211 
11212 	/*
11213 	 * SWDPIN   LU RXCW
11214 	 *      0    0    0
11215 	 *      0    0    1	(should not happen)
11216 	 *      0    1    0	(should not happen)
11217 	 *      0    1    1	(should not happen)
11218 	 *      1    0    0	Disable autonego and force linkup
11219 	 *      1    0    1	got /C/ but not linkup yet
11220 	 *      1    1    0	(linkup)
11221 	 *      1    1    1	If IFM_AUTO, back to autonego
11222 	 *
11223 	 */
11224 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
11225 	    && ((status & STATUS_LU) == 0)
11226 	    && ((rxcw & RXCW_C) == 0)) {
11227 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
11228 			__func__));
11229 		sc->sc_tbi_linkup = 0;
11230 		/* Disable auto-negotiation in the TXCW register */
11231 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
11232 
11233 		/*
11234 		 * Force link-up and also force full-duplex.
11235 		 *
11236 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
11237 		 * so we should update sc->sc_ctrl
11238 		 */
11239 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
11240 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11241 	} else if (((status & STATUS_LU) != 0)
11242 	    && ((rxcw & RXCW_C) != 0)
11243 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
11244 		sc->sc_tbi_linkup = 1;
11245 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
11246 			__func__));
11247 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
11248 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
11249 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
11250 	    && ((rxcw & RXCW_C) != 0)) {
11251 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
11252 	} else {
11253 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
11254 			status));
11255 	}
11256 
11257 	return 0;
11258 }
11259 
11260 /*
11261  * wm_tbi_tick:
11262  *
11263  *	Check the link on TBI devices.
11264  *	This function acts as mii_tick().
11265  */
11266 static void
11267 wm_tbi_tick(struct wm_softc *sc)
11268 {
11269 	struct mii_data *mii = &sc->sc_mii;
11270 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
11271 	uint32_t status;
11272 
11273 	KASSERT(WM_CORE_LOCKED(sc));
11274 
11275 	status = CSR_READ(sc, WMREG_STATUS);
11276 
11277 	/* XXX is this needed? */
11278 	(void)CSR_READ(sc, WMREG_RXCW);
11279 	(void)CSR_READ(sc, WMREG_CTRL);
11280 
11281 	/* set link status */
11282 	if ((status & STATUS_LU) == 0) {
11283 		DPRINTF(WM_DEBUG_LINK,
11284 		    ("%s: LINK: checklink -> down\n",
11285 			device_xname(sc->sc_dev)));
11286 		sc->sc_tbi_linkup = 0;
11287 	} else if (sc->sc_tbi_linkup == 0) {
11288 		DPRINTF(WM_DEBUG_LINK,
11289 		    ("%s: LINK: checklink -> up %s\n",
11290 			device_xname(sc->sc_dev),
11291 			(status & STATUS_FD) ? "FDX" : "HDX"));
11292 		sc->sc_tbi_linkup = 1;
11293 		sc->sc_tbi_serdes_ticks = 0;
11294 	}
11295 
11296 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
11297 		goto setled;
11298 
11299 	if ((status & STATUS_LU) == 0) {
11300 		sc->sc_tbi_linkup = 0;
11301 		/* If the timer expired, retry autonegotiation */
11302 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
11303 		    && (++sc->sc_tbi_serdes_ticks
11304 			>= sc->sc_tbi_serdes_anegticks)) {
11305 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
11306 			sc->sc_tbi_serdes_ticks = 0;
11307 			/*
11308 			 * Reset the link, and let autonegotiation do
11309 			 * its thing
11310 			 */
11311 			sc->sc_ctrl |= CTRL_LRST;
11312 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11313 			CSR_WRITE_FLUSH(sc);
11314 			delay(1000);
11315 			sc->sc_ctrl &= ~CTRL_LRST;
11316 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11317 			CSR_WRITE_FLUSH(sc);
11318 			delay(1000);
11319 			CSR_WRITE(sc, WMREG_TXCW,
11320 			    sc->sc_txcw & ~TXCW_ANE);
11321 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
11322 		}
11323 	}
11324 
11325 setled:
11326 	wm_tbi_serdes_set_linkled(sc);
11327 }
11328 
11329 /* SERDES related */
11330 static void
11331 wm_serdes_power_up_link_82575(struct wm_softc *sc)
11332 {
11333 	uint32_t reg;
11334 
11335 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
11336 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
11337 		return;
11338 
11339 	reg = CSR_READ(sc, WMREG_PCS_CFG);
11340 	reg |= PCS_CFG_PCS_EN;
11341 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
11342 
11343 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
11344 	reg &= ~CTRL_EXT_SWDPIN(3);
11345 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
11346 	CSR_WRITE_FLUSH(sc);
11347 }
11348 
11349 static int
11350 wm_serdes_mediachange(struct ifnet *ifp)
11351 {
11352 	struct wm_softc *sc = ifp->if_softc;
11353 	bool pcs_autoneg = true; /* XXX */
11354 	uint32_t ctrl_ext, pcs_lctl, reg;
11355 
11356 	/* XXX Currently, this function is not called on 8257[12] */
11357 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
11358 	    || (sc->sc_type >= WM_T_82575))
11359 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
11360 
11361 	wm_serdes_power_up_link_82575(sc);
11362 
11363 	sc->sc_ctrl |= CTRL_SLU;
11364 
11365 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
11366 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
11367 
11368 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
11369 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
11370 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
11371 	case CTRL_EXT_LINK_MODE_SGMII:
11372 		pcs_autoneg = true;
11373 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
11374 		break;
11375 	case CTRL_EXT_LINK_MODE_1000KX:
11376 		pcs_autoneg = false;
11377 		/* FALLTHROUGH */
11378 	default:
11379 		if ((sc->sc_type == WM_T_82575)
11380 		    || (sc->sc_type == WM_T_82576)) {
11381 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
11382 				pcs_autoneg = false;
11383 		}
11384 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
11385 		    | CTRL_FRCFDX;
11386 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
11387 	}
11388 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11389 
11390 	if (pcs_autoneg) {
11391 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
11392 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
11393 
11394 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
11395 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
11396 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
11397 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
11398 	} else
11399 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
11400 
11401 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
11402 
11403 
11404 	return 0;
11405 }
11406 
11407 static void
11408 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
11409 {
11410 	struct wm_softc *sc = ifp->if_softc;
11411 	struct mii_data *mii = &sc->sc_mii;
11412 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
11413 	uint32_t pcs_adv, pcs_lpab, reg;
11414 
11415 	ifmr->ifm_status = IFM_AVALID;
11416 	ifmr->ifm_active = IFM_ETHER;
11417 
11418 	/* Check PCS */
11419 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
11420 	if ((reg & PCS_LSTS_LINKOK) == 0) {
11421 		ifmr->ifm_active |= IFM_NONE;
11422 		sc->sc_tbi_linkup = 0;
11423 		goto setled;
11424 	}
11425 
11426 	sc->sc_tbi_linkup = 1;
11427 	ifmr->ifm_status |= IFM_ACTIVE;
11428 	if (sc->sc_type == WM_T_I354) {
11429 		uint32_t status;
11430 
11431 		status = CSR_READ(sc, WMREG_STATUS);
11432 		if (((status & STATUS_2P5_SKU) != 0)
11433 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
11434 			ifmr->ifm_active |= IFM_2500_SX; /* XXX KX */
11435 		} else
11436 			ifmr->ifm_active |= IFM_1000_SX; /* XXX KX */
11437 	} else {
11438 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
11439 		case PCS_LSTS_SPEED_10:
11440 			ifmr->ifm_active |= IFM_10_T; /* XXX */
11441 			break;
11442 		case PCS_LSTS_SPEED_100:
11443 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
11444 			break;
11445 		case PCS_LSTS_SPEED_1000:
11446 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
11447 			break;
11448 		default:
11449 			device_printf(sc->sc_dev, "Unknown speed\n");
11450 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
11451 			break;
11452 		}
11453 	}
11454 	if ((reg & PCS_LSTS_FDX) != 0)
11455 		ifmr->ifm_active |= IFM_FDX;
11456 	else
11457 		ifmr->ifm_active |= IFM_HDX;
11458 	mii->mii_media_active &= ~IFM_ETH_FMASK;
11459 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
11460 		/* Check flow */
11461 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
11462 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
11463 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
11464 			goto setled;
11465 		}
11466 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
11467 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
11468 		DPRINTF(WM_DEBUG_LINK,
11469 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
11470 		if ((pcs_adv & TXCW_SYM_PAUSE)
11471 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
11472 			mii->mii_media_active |= IFM_FLOW
11473 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
11474 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
11475 		    && (pcs_adv & TXCW_ASYM_PAUSE)
11476 		    && (pcs_lpab & TXCW_SYM_PAUSE)
11477 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
11478 			mii->mii_media_active |= IFM_FLOW
11479 			    | IFM_ETH_TXPAUSE;
11480 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
11481 		    && (pcs_adv & TXCW_ASYM_PAUSE)
11482 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
11483 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
11484 			mii->mii_media_active |= IFM_FLOW
11485 			    | IFM_ETH_RXPAUSE;
11486 		}
11487 	}
11488 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
11489 	    | (mii->mii_media_active & IFM_ETH_FMASK);
11490 setled:
11491 	wm_tbi_serdes_set_linkled(sc);
11492 }
11493 
11494 /*
11495  * wm_serdes_tick:
11496  *
11497  *	Check the link on serdes devices.
11498  */
11499 static void
11500 wm_serdes_tick(struct wm_softc *sc)
11501 {
11502 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
11503 	struct mii_data *mii = &sc->sc_mii;
11504 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
11505 	uint32_t reg;
11506 
11507 	KASSERT(WM_CORE_LOCKED(sc));
11508 
11509 	mii->mii_media_status = IFM_AVALID;
11510 	mii->mii_media_active = IFM_ETHER;
11511 
11512 	/* Check PCS */
11513 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
11514 	if ((reg & PCS_LSTS_LINKOK) != 0) {
11515 		mii->mii_media_status |= IFM_ACTIVE;
11516 		sc->sc_tbi_linkup = 1;
11517 		sc->sc_tbi_serdes_ticks = 0;
11518 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
11519 		if ((reg & PCS_LSTS_FDX) != 0)
11520 			mii->mii_media_active |= IFM_FDX;
11521 		else
11522 			mii->mii_media_active |= IFM_HDX;
11523 	} else {
11524 		mii->mii_media_status |= IFM_NONE;
11525 		sc->sc_tbi_linkup = 0;
11526 		/* If the timer expired, retry autonegotiation */
11527 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
11528 		    && (++sc->sc_tbi_serdes_ticks
11529 			>= sc->sc_tbi_serdes_anegticks)) {
11530 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
11531 			sc->sc_tbi_serdes_ticks = 0;
11532 			/* XXX */
11533 			wm_serdes_mediachange(ifp);
11534 		}
11535 	}
11536 
11537 	wm_tbi_serdes_set_linkled(sc);
11538 }
11539 
11540 /* SFP related */
11541 
11542 static int
11543 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
11544 {
11545 	uint32_t i2ccmd;
11546 	int i;
11547 
11548 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
11549 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
11550 
11551 	/* Poll the ready bit */
11552 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
11553 		delay(50);
11554 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
11555 		if (i2ccmd & I2CCMD_READY)
11556 			break;
11557 	}
11558 	if ((i2ccmd & I2CCMD_READY) == 0)
11559 		return -1;
11560 	if ((i2ccmd & I2CCMD_ERROR) != 0)
11561 		return -1;
11562 
11563 	*data = i2ccmd & 0x00ff;
11564 
11565 	return 0;
11566 }
11567 
11568 static uint32_t
11569 wm_sfp_get_media_type(struct wm_softc *sc)
11570 {
11571 	uint32_t ctrl_ext;
11572 	uint8_t val = 0;
11573 	int timeout = 3;
11574 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
11575 	int rv = -1;
11576 
11577 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
11578 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
11579 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
11580 	CSR_WRITE_FLUSH(sc);
11581 
11582 	/* Read SFP module data */
11583 	while (timeout) {
11584 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
11585 		if (rv == 0)
11586 			break;
11587 		delay(100*1000); /* XXX too big */
11588 		timeout--;
11589 	}
11590 	if (rv != 0)
11591 		goto out;
11592 	switch (val) {
11593 	case SFF_SFP_ID_SFF:
11594 		aprint_normal_dev(sc->sc_dev,
11595 		    "Module/Connector soldered to board\n");
11596 		break;
11597 	case SFF_SFP_ID_SFP:
11598 		aprint_normal_dev(sc->sc_dev, "SFP\n");
11599 		break;
11600 	case SFF_SFP_ID_UNKNOWN:
11601 		goto out;
11602 	default:
11603 		break;
11604 	}
11605 
11606 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
11607 	if (rv != 0) {
11608 		goto out;
11609 	}
11610 
11611 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
11612 		mediatype = WM_MEDIATYPE_SERDES;
11613 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
11614 		sc->sc_flags |= WM_F_SGMII;
11615 		mediatype = WM_MEDIATYPE_COPPER;
11616 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
11617 		sc->sc_flags |= WM_F_SGMII;
11618 		mediatype = WM_MEDIATYPE_SERDES;
11619 	}
11620 
11621 out:
11622 	/* Restore I2C interface setting */
11623 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
11624 
11625 	return mediatype;
11626 }
11627 
11628 /*
11629  * NVM related.
11630  * Microwire, SPI (w/wo EERD) and Flash.
11631  */
11632 
11633 /* Both spi and uwire */
11634 
11635 /*
11636  * wm_eeprom_sendbits:
11637  *
11638  *	Send a series of bits to the EEPROM.
11639  */
11640 static void
11641 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
11642 {
11643 	uint32_t reg;
11644 	int x;
11645 
11646 	reg = CSR_READ(sc, WMREG_EECD);
11647 
11648 	for (x = nbits; x > 0; x--) {
11649 		if (bits & (1U << (x - 1)))
11650 			reg |= EECD_DI;
11651 		else
11652 			reg &= ~EECD_DI;
11653 		CSR_WRITE(sc, WMREG_EECD, reg);
11654 		CSR_WRITE_FLUSH(sc);
11655 		delay(2);
11656 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
11657 		CSR_WRITE_FLUSH(sc);
11658 		delay(2);
11659 		CSR_WRITE(sc, WMREG_EECD, reg);
11660 		CSR_WRITE_FLUSH(sc);
11661 		delay(2);
11662 	}
11663 }
11664 
11665 /*
11666  * wm_eeprom_recvbits:
11667  *
11668  *	Receive a series of bits from the EEPROM.
11669  */
11670 static void
11671 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
11672 {
11673 	uint32_t reg, val;
11674 	int x;
11675 
11676 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
11677 
11678 	val = 0;
11679 	for (x = nbits; x > 0; x--) {
11680 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
11681 		CSR_WRITE_FLUSH(sc);
11682 		delay(2);
11683 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
11684 			val |= (1U << (x - 1));
11685 		CSR_WRITE(sc, WMREG_EECD, reg);
11686 		CSR_WRITE_FLUSH(sc);
11687 		delay(2);
11688 	}
11689 	*valp = val;
11690 }
11691 
11692 /* Microwire */
11693 
11694 /*
11695  * wm_nvm_read_uwire:
11696  *
11697  *	Read a word from the EEPROM using the MicroWire protocol.
11698  */
11699 static int
11700 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
11701 {
11702 	uint32_t reg, val;
11703 	int i;
11704 
11705 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
11706 		device_xname(sc->sc_dev), __func__));
11707 
11708 	if (sc->nvm.acquire(sc) != 0)
11709 		return -1;
11710 
11711 	for (i = 0; i < wordcnt; i++) {
11712 		/* Clear SK and DI. */
11713 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
11714 		CSR_WRITE(sc, WMREG_EECD, reg);
11715 
11716 		/*
11717 		 * XXX: workaround for a bug in qemu-0.12.x and prior
11718 		 * and Xen.
11719 		 *
11720 		 * We use this workaround only for 82540 because qemu's
11721 		 * e1000 act as 82540.
11722 		 */
11723 		if (sc->sc_type == WM_T_82540) {
11724 			reg |= EECD_SK;
11725 			CSR_WRITE(sc, WMREG_EECD, reg);
11726 			reg &= ~EECD_SK;
11727 			CSR_WRITE(sc, WMREG_EECD, reg);
11728 			CSR_WRITE_FLUSH(sc);
11729 			delay(2);
11730 		}
11731 		/* XXX: end of workaround */
11732 
11733 		/* Set CHIP SELECT. */
11734 		reg |= EECD_CS;
11735 		CSR_WRITE(sc, WMREG_EECD, reg);
11736 		CSR_WRITE_FLUSH(sc);
11737 		delay(2);
11738 
11739 		/* Shift in the READ command. */
11740 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
11741 
11742 		/* Shift in address. */
11743 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
11744 
11745 		/* Shift out the data. */
11746 		wm_eeprom_recvbits(sc, &val, 16);
11747 		data[i] = val & 0xffff;
11748 
11749 		/* Clear CHIP SELECT. */
11750 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
11751 		CSR_WRITE(sc, WMREG_EECD, reg);
11752 		CSR_WRITE_FLUSH(sc);
11753 		delay(2);
11754 	}
11755 
11756 	sc->nvm.release(sc);
11757 	return 0;
11758 }
11759 
11760 /* SPI */
11761 
11762 /*
11763  * Set SPI and FLASH related information from the EECD register.
11764  * For 82541 and 82547, the word size is taken from EEPROM.
11765  */
11766 static int
11767 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
11768 {
11769 	int size;
11770 	uint32_t reg;
11771 	uint16_t data;
11772 
11773 	reg = CSR_READ(sc, WMREG_EECD);
11774 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
11775 
11776 	/* Read the size of NVM from EECD by default */
11777 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
11778 	switch (sc->sc_type) {
11779 	case WM_T_82541:
11780 	case WM_T_82541_2:
11781 	case WM_T_82547:
11782 	case WM_T_82547_2:
11783 		/* Set dummy value to access EEPROM */
11784 		sc->sc_nvm_wordsize = 64;
11785 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
11786 			aprint_error_dev(sc->sc_dev,
11787 			    "%s: failed to read EEPROM size\n", __func__);
11788 		}
11789 		reg = data;
11790 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
11791 		if (size == 0)
11792 			size = 6; /* 64 word size */
11793 		else
11794 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
11795 		break;
11796 	case WM_T_80003:
11797 	case WM_T_82571:
11798 	case WM_T_82572:
11799 	case WM_T_82573: /* SPI case */
11800 	case WM_T_82574: /* SPI case */
11801 	case WM_T_82583: /* SPI case */
11802 		size += NVM_WORD_SIZE_BASE_SHIFT;
11803 		if (size > 14)
11804 			size = 14;
11805 		break;
11806 	case WM_T_82575:
11807 	case WM_T_82576:
11808 	case WM_T_82580:
11809 	case WM_T_I350:
11810 	case WM_T_I354:
11811 	case WM_T_I210:
11812 	case WM_T_I211:
11813 		size += NVM_WORD_SIZE_BASE_SHIFT;
11814 		if (size > 15)
11815 			size = 15;
11816 		break;
11817 	default:
11818 		aprint_error_dev(sc->sc_dev,
11819 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
11820 		return -1;
11821 		break;
11822 	}
11823 
11824 	sc->sc_nvm_wordsize = 1 << size;
11825 
11826 	return 0;
11827 }
11828 
11829 /*
11830  * wm_nvm_ready_spi:
11831  *
11832  *	Wait for a SPI EEPROM to be ready for commands.
11833  */
11834 static int
11835 wm_nvm_ready_spi(struct wm_softc *sc)
11836 {
11837 	uint32_t val;
11838 	int usec;
11839 
11840 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
11841 		device_xname(sc->sc_dev), __func__));
11842 
11843 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
11844 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
11845 		wm_eeprom_recvbits(sc, &val, 8);
11846 		if ((val & SPI_SR_RDY) == 0)
11847 			break;
11848 	}
11849 	if (usec >= SPI_MAX_RETRIES) {
11850 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
11851 		return -1;
11852 	}
11853 	return 0;
11854 }
11855 
11856 /*
11857  * wm_nvm_read_spi:
11858  *
11859  *	Read a work from the EEPROM using the SPI protocol.
11860  */
11861 static int
11862 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
11863 {
11864 	uint32_t reg, val;
11865 	int i;
11866 	uint8_t opc;
11867 	int rv = 0;
11868 
11869 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
11870 		device_xname(sc->sc_dev), __func__));
11871 
11872 	if (sc->nvm.acquire(sc) != 0)
11873 		return -1;
11874 
11875 	/* Clear SK and CS. */
11876 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
11877 	CSR_WRITE(sc, WMREG_EECD, reg);
11878 	CSR_WRITE_FLUSH(sc);
11879 	delay(2);
11880 
11881 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
11882 		goto out;
11883 
11884 	/* Toggle CS to flush commands. */
11885 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
11886 	CSR_WRITE_FLUSH(sc);
11887 	delay(2);
11888 	CSR_WRITE(sc, WMREG_EECD, reg);
11889 	CSR_WRITE_FLUSH(sc);
11890 	delay(2);
11891 
11892 	opc = SPI_OPC_READ;
11893 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
11894 		opc |= SPI_OPC_A8;
11895 
11896 	wm_eeprom_sendbits(sc, opc, 8);
11897 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
11898 
11899 	for (i = 0; i < wordcnt; i++) {
11900 		wm_eeprom_recvbits(sc, &val, 16);
11901 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
11902 	}
11903 
11904 	/* Raise CS and clear SK. */
11905 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
11906 	CSR_WRITE(sc, WMREG_EECD, reg);
11907 	CSR_WRITE_FLUSH(sc);
11908 	delay(2);
11909 
11910 out:
11911 	sc->nvm.release(sc);
11912 	return rv;
11913 }
11914 
11915 /* Using with EERD */
11916 
11917 static int
11918 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
11919 {
11920 	uint32_t attempts = 100000;
11921 	uint32_t i, reg = 0;
11922 	int32_t done = -1;
11923 
11924 	for (i = 0; i < attempts; i++) {
11925 		reg = CSR_READ(sc, rw);
11926 
11927 		if (reg & EERD_DONE) {
11928 			done = 0;
11929 			break;
11930 		}
11931 		delay(5);
11932 	}
11933 
11934 	return done;
11935 }
11936 
11937 static int
11938 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
11939     uint16_t *data)
11940 {
11941 	int i, eerd = 0;
11942 	int rv = 0;
11943 
11944 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
11945 		device_xname(sc->sc_dev), __func__));
11946 
11947 	if (sc->nvm.acquire(sc) != 0)
11948 		return -1;
11949 
11950 	for (i = 0; i < wordcnt; i++) {
11951 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
11952 		CSR_WRITE(sc, WMREG_EERD, eerd);
11953 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
11954 		if (rv != 0) {
11955 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
11956 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
11957 			break;
11958 		}
11959 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
11960 	}
11961 
11962 	sc->nvm.release(sc);
11963 	return rv;
11964 }
11965 
11966 /* Flash */
11967 
11968 static int
11969 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
11970 {
11971 	uint32_t eecd;
11972 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
11973 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
11974 	uint32_t nvm_dword = 0;
11975 	uint8_t sig_byte = 0;
11976  	int rv;
11977 
11978 	switch (sc->sc_type) {
11979 	case WM_T_PCH_SPT:
11980 	case WM_T_PCH_CNP:
11981 		bank1_offset = sc->sc_ich8_flash_bank_size * 2;
11982 		act_offset = ICH_NVM_SIG_WORD * 2;
11983 
11984 		/* set bank to 0 in case flash read fails. */
11985 		*bank = 0;
11986 
11987 		/* Check bank 0 */
11988 		rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
11989 		if (rv != 0)
11990 			return rv;
11991 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
11992 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
11993 			*bank = 0;
11994 			return 0;
11995 		}
11996 
11997 		/* Check bank 1 */
11998 		rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
11999 		    &nvm_dword);
12000 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
12001 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
12002 			*bank = 1;
12003 			return 0;
12004 		}
12005 		aprint_error_dev(sc->sc_dev,
12006 		    "%s: no valid NVM bank present (%u)\n", __func__, *bank);
12007 		return -1;
12008 	case WM_T_ICH8:
12009 	case WM_T_ICH9:
12010 		eecd = CSR_READ(sc, WMREG_EECD);
12011 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
12012 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
12013 			return 0;
12014 		}
12015 		/* FALLTHROUGH */
12016 	default:
12017 		/* Default to 0 */
12018 		*bank = 0;
12019 
12020 		/* Check bank 0 */
12021 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
12022 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
12023 			*bank = 0;
12024 			return 0;
12025 		}
12026 
12027 		/* Check bank 1 */
12028 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
12029 		    &sig_byte);
12030 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
12031 			*bank = 1;
12032 			return 0;
12033 		}
12034 	}
12035 
12036 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
12037 		device_xname(sc->sc_dev)));
12038 	return -1;
12039 }
12040 
12041 /******************************************************************************
12042  * This function does initial flash setup so that a new read/write/erase cycle
12043  * can be started.
12044  *
12045  * sc - The pointer to the hw structure
12046  ****************************************************************************/
12047 static int32_t
12048 wm_ich8_cycle_init(struct wm_softc *sc)
12049 {
12050 	uint16_t hsfsts;
12051 	int32_t error = 1;
12052 	int32_t i     = 0;
12053 
12054 	if (sc->sc_type >= WM_T_PCH_SPT)
12055 		hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
12056 	else
12057 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
12058 
12059 	/* May be check the Flash Des Valid bit in Hw status */
12060 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
12061 		return error;
12062 	}
12063 
12064 	/* Clear FCERR in Hw status by writing 1 */
12065 	/* Clear DAEL in Hw status by writing a 1 */
12066 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
12067 
12068 	if (sc->sc_type >= WM_T_PCH_SPT)
12069 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
12070 	else
12071 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
12072 
12073 	/*
12074 	 * Either we should have a hardware SPI cycle in progress bit to check
12075 	 * against, in order to start a new cycle or FDONE bit should be
12076 	 * changed in the hardware so that it is 1 after harware reset, which
12077 	 * can then be used as an indication whether a cycle is in progress or
12078 	 * has been completed .. we should also have some software semaphore
12079 	 * mechanism to guard FDONE or the cycle in progress bit so that two
12080 	 * threads access to those bits can be sequentiallized or a way so that
12081 	 * 2 threads dont start the cycle at the same time
12082 	 */
12083 
12084 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
12085 		/*
12086 		 * There is no cycle running at present, so we can start a
12087 		 * cycle
12088 		 */
12089 
12090 		/* Begin by setting Flash Cycle Done. */
12091 		hsfsts |= HSFSTS_DONE;
12092 		if (sc->sc_type >= WM_T_PCH_SPT)
12093 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
12094 			    hsfsts & 0xffffUL);
12095 		else
12096 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
12097 		error = 0;
12098 	} else {
12099 		/*
12100 		 * otherwise poll for sometime so the current cycle has a
12101 		 * chance to end before giving up.
12102 		 */
12103 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
12104 			if (sc->sc_type >= WM_T_PCH_SPT)
12105 				hsfsts = ICH8_FLASH_READ32(sc,
12106 				    ICH_FLASH_HSFSTS) & 0xffffUL;
12107 			else
12108 				hsfsts = ICH8_FLASH_READ16(sc,
12109 				    ICH_FLASH_HSFSTS);
12110 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
12111 				error = 0;
12112 				break;
12113 			}
12114 			delay(1);
12115 		}
12116 		if (error == 0) {
12117 			/*
12118 			 * Successful in waiting for previous cycle to timeout,
12119 			 * now set the Flash Cycle Done.
12120 			 */
12121 			hsfsts |= HSFSTS_DONE;
12122 			if (sc->sc_type >= WM_T_PCH_SPT)
12123 				ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
12124 				    hsfsts & 0xffffUL);
12125 			else
12126 				ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
12127 				    hsfsts);
12128 		}
12129 	}
12130 	return error;
12131 }
12132 
12133 /******************************************************************************
12134  * This function starts a flash cycle and waits for its completion
12135  *
12136  * sc - The pointer to the hw structure
12137  ****************************************************************************/
12138 static int32_t
12139 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
12140 {
12141 	uint16_t hsflctl;
12142 	uint16_t hsfsts;
12143 	int32_t error = 1;
12144 	uint32_t i = 0;
12145 
12146 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
12147 	if (sc->sc_type >= WM_T_PCH_SPT)
12148 		hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
12149 	else
12150 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
12151 	hsflctl |= HSFCTL_GO;
12152 	if (sc->sc_type >= WM_T_PCH_SPT)
12153 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
12154 		    (uint32_t)hsflctl << 16);
12155 	else
12156 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
12157 
12158 	/* Wait till FDONE bit is set to 1 */
12159 	do {
12160 		if (sc->sc_type >= WM_T_PCH_SPT)
12161 			hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
12162 			    & 0xffffUL;
12163 		else
12164 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
12165 		if (hsfsts & HSFSTS_DONE)
12166 			break;
12167 		delay(1);
12168 		i++;
12169 	} while (i < timeout);
12170 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
12171 		error = 0;
12172 
12173 	return error;
12174 }
12175 
12176 /******************************************************************************
12177  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
12178  *
12179  * sc - The pointer to the hw structure
12180  * index - The index of the byte or word to read.
12181  * size - Size of data to read, 1=byte 2=word, 4=dword
12182  * data - Pointer to the word to store the value read.
12183  *****************************************************************************/
12184 static int32_t
12185 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
12186     uint32_t size, uint32_t *data)
12187 {
12188 	uint16_t hsfsts;
12189 	uint16_t hsflctl;
12190 	uint32_t flash_linear_address;
12191 	uint32_t flash_data = 0;
12192 	int32_t error = 1;
12193 	int32_t count = 0;
12194 
12195 	if (size < 1  || size > 4 || data == 0x0 ||
12196 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
12197 		return error;
12198 
12199 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
12200 	    sc->sc_ich8_flash_base;
12201 
12202 	do {
12203 		delay(1);
12204 		/* Steps */
12205 		error = wm_ich8_cycle_init(sc);
12206 		if (error)
12207 			break;
12208 
12209 		if (sc->sc_type >= WM_T_PCH_SPT)
12210 			hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
12211 			    >> 16;
12212 		else
12213 			hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
12214 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
12215 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
12216 		    & HSFCTL_BCOUNT_MASK;
12217 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
12218 		if (sc->sc_type >= WM_T_PCH_SPT) {
12219 			/*
12220 			 * In SPT, This register is in Lan memory space, not
12221 			 * flash. Therefore, only 32 bit access is supported.
12222 			 */
12223 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
12224 			    (uint32_t)hsflctl << 16);
12225 		} else
12226 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
12227 
12228 		/*
12229 		 * Write the last 24 bits of index into Flash Linear address
12230 		 * field in Flash Address
12231 		 */
12232 		/* TODO: TBD maybe check the index against the size of flash */
12233 
12234 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
12235 
12236 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
12237 
12238 		/*
12239 		 * Check if FCERR is set to 1, if set to 1, clear it and try
12240 		 * the whole sequence a few more times, else read in (shift in)
12241 		 * the Flash Data0, the order is least significant byte first
12242 		 * msb to lsb
12243 		 */
12244 		if (error == 0) {
12245 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
12246 			if (size == 1)
12247 				*data = (uint8_t)(flash_data & 0x000000FF);
12248 			else if (size == 2)
12249 				*data = (uint16_t)(flash_data & 0x0000FFFF);
12250 			else if (size == 4)
12251 				*data = (uint32_t)flash_data;
12252 			break;
12253 		} else {
12254 			/*
12255 			 * If we've gotten here, then things are probably
12256 			 * completely hosed, but if the error condition is
12257 			 * detected, it won't hurt to give it another try...
12258 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
12259 			 */
12260 			if (sc->sc_type >= WM_T_PCH_SPT)
12261 				hsfsts = ICH8_FLASH_READ32(sc,
12262 				    ICH_FLASH_HSFSTS) & 0xffffUL;
12263 			else
12264 				hsfsts = ICH8_FLASH_READ16(sc,
12265 				    ICH_FLASH_HSFSTS);
12266 
12267 			if (hsfsts & HSFSTS_ERR) {
12268 				/* Repeat for some time before giving up. */
12269 				continue;
12270 			} else if ((hsfsts & HSFSTS_DONE) == 0)
12271 				break;
12272 		}
12273 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
12274 
12275 	return error;
12276 }
12277 
12278 /******************************************************************************
12279  * Reads a single byte from the NVM using the ICH8 flash access registers.
12280  *
12281  * sc - pointer to wm_hw structure
12282  * index - The index of the byte to read.
12283  * data - Pointer to a byte to store the value read.
12284  *****************************************************************************/
12285 static int32_t
12286 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
12287 {
12288 	int32_t status;
12289 	uint32_t word = 0;
12290 
12291 	status = wm_read_ich8_data(sc, index, 1, &word);
12292 	if (status == 0)
12293 		*data = (uint8_t)word;
12294 	else
12295 		*data = 0;
12296 
12297 	return status;
12298 }
12299 
12300 /******************************************************************************
12301  * Reads a word from the NVM using the ICH8 flash access registers.
12302  *
12303  * sc - pointer to wm_hw structure
12304  * index - The starting byte index of the word to read.
12305  * data - Pointer to a word to store the value read.
12306  *****************************************************************************/
12307 static int32_t
12308 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
12309 {
12310 	int32_t status;
12311 	uint32_t word = 0;
12312 
12313 	status = wm_read_ich8_data(sc, index, 2, &word);
12314 	if (status == 0)
12315 		*data = (uint16_t)word;
12316 	else
12317 		*data = 0;
12318 
12319 	return status;
12320 }
12321 
12322 /******************************************************************************
12323  * Reads a dword from the NVM using the ICH8 flash access registers.
12324  *
12325  * sc - pointer to wm_hw structure
12326  * index - The starting byte index of the word to read.
12327  * data - Pointer to a word to store the value read.
12328  *****************************************************************************/
12329 static int32_t
12330 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
12331 {
12332 	int32_t status;
12333 
12334 	status = wm_read_ich8_data(sc, index, 4, data);
12335 	return status;
12336 }
12337 
12338 /******************************************************************************
12339  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
12340  * register.
12341  *
12342  * sc - Struct containing variables accessed by shared code
12343  * offset - offset of word in the EEPROM to read
12344  * data - word read from the EEPROM
12345  * words - number of words to read
12346  *****************************************************************************/
12347 static int
12348 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
12349 {
12350 	int32_t  rv = 0;
12351 	uint32_t flash_bank = 0;
12352 	uint32_t act_offset = 0;
12353 	uint32_t bank_offset = 0;
12354 	uint16_t word = 0;
12355 	uint16_t i = 0;
12356 
12357 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
12358 		device_xname(sc->sc_dev), __func__));
12359 
12360 	if (sc->nvm.acquire(sc) != 0)
12361 		return -1;
12362 
12363 	/*
12364 	 * We need to know which is the valid flash bank.  In the event
12365 	 * that we didn't allocate eeprom_shadow_ram, we may not be
12366 	 * managing flash_bank.  So it cannot be trusted and needs
12367 	 * to be updated with each read.
12368 	 */
12369 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
12370 	if (rv) {
12371 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
12372 			device_xname(sc->sc_dev)));
12373 		flash_bank = 0;
12374 	}
12375 
12376 	/*
12377 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
12378 	 * size
12379 	 */
12380 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
12381 
12382 	for (i = 0; i < words; i++) {
12383 		/* The NVM part needs a byte offset, hence * 2 */
12384 		act_offset = bank_offset + ((offset + i) * 2);
12385 		rv = wm_read_ich8_word(sc, act_offset, &word);
12386 		if (rv) {
12387 			aprint_error_dev(sc->sc_dev,
12388 			    "%s: failed to read NVM\n", __func__);
12389 			break;
12390 		}
12391 		data[i] = word;
12392 	}
12393 
12394 	sc->nvm.release(sc);
12395 	return rv;
12396 }
12397 
12398 /******************************************************************************
12399  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
12400  * register.
12401  *
12402  * sc - Struct containing variables accessed by shared code
12403  * offset - offset of word in the EEPROM to read
12404  * data - word read from the EEPROM
12405  * words - number of words to read
12406  *****************************************************************************/
12407 static int
12408 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
12409 {
12410 	int32_t  rv = 0;
12411 	uint32_t flash_bank = 0;
12412 	uint32_t act_offset = 0;
12413 	uint32_t bank_offset = 0;
12414 	uint32_t dword = 0;
12415 	uint16_t i = 0;
12416 
12417 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
12418 		device_xname(sc->sc_dev), __func__));
12419 
12420 	if (sc->nvm.acquire(sc) != 0)
12421 		return -1;
12422 
12423 	/*
12424 	 * We need to know which is the valid flash bank.  In the event
12425 	 * that we didn't allocate eeprom_shadow_ram, we may not be
12426 	 * managing flash_bank.  So it cannot be trusted and needs
12427 	 * to be updated with each read.
12428 	 */
12429 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
12430 	if (rv) {
12431 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
12432 			device_xname(sc->sc_dev)));
12433 		flash_bank = 0;
12434 	}
12435 
12436 	/*
12437 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
12438 	 * size
12439 	 */
12440 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
12441 
12442 	for (i = 0; i < words; i++) {
12443 		/* The NVM part needs a byte offset, hence * 2 */
12444 		act_offset = bank_offset + ((offset + i) * 2);
12445 		/* but we must read dword aligned, so mask ... */
12446 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
12447 		if (rv) {
12448 			aprint_error_dev(sc->sc_dev,
12449 			    "%s: failed to read NVM\n", __func__);
12450 			break;
12451 		}
12452 		/* ... and pick out low or high word */
12453 		if ((act_offset & 0x2) == 0)
12454 			data[i] = (uint16_t)(dword & 0xFFFF);
12455 		else
12456 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
12457 	}
12458 
12459 	sc->nvm.release(sc);
12460 	return rv;
12461 }
12462 
12463 /* iNVM */
12464 
12465 static int
12466 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
12467 {
12468 	int32_t  rv = 0;
12469 	uint32_t invm_dword;
12470 	uint16_t i;
12471 	uint8_t record_type, word_address;
12472 
12473 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
12474 		device_xname(sc->sc_dev), __func__));
12475 
12476 	for (i = 0; i < INVM_SIZE; i++) {
12477 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
12478 		/* Get record type */
12479 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
12480 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
12481 			break;
12482 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
12483 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
12484 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
12485 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
12486 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
12487 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
12488 			if (word_address == address) {
12489 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
12490 				rv = 0;
12491 				break;
12492 			}
12493 		}
12494 	}
12495 
12496 	return rv;
12497 }
12498 
12499 static int
12500 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
12501 {
12502 	int rv = 0;
12503 	int i;
12504 
12505 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
12506 		device_xname(sc->sc_dev), __func__));
12507 
12508 	if (sc->nvm.acquire(sc) != 0)
12509 		return -1;
12510 
12511 	for (i = 0; i < words; i++) {
12512 		switch (offset + i) {
12513 		case NVM_OFF_MACADDR:
12514 		case NVM_OFF_MACADDR1:
12515 		case NVM_OFF_MACADDR2:
12516 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
12517 			if (rv != 0) {
12518 				data[i] = 0xffff;
12519 				rv = -1;
12520 			}
12521 			break;
12522 		case NVM_OFF_CFG2:
12523 			rv = wm_nvm_read_word_invm(sc, offset, data);
12524 			if (rv != 0) {
12525 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
12526 				rv = 0;
12527 			}
12528 			break;
12529 		case NVM_OFF_CFG4:
12530 			rv = wm_nvm_read_word_invm(sc, offset, data);
12531 			if (rv != 0) {
12532 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
12533 				rv = 0;
12534 			}
12535 			break;
12536 		case NVM_OFF_LED_1_CFG:
12537 			rv = wm_nvm_read_word_invm(sc, offset, data);
12538 			if (rv != 0) {
12539 				*data = NVM_LED_1_CFG_DEFAULT_I211;
12540 				rv = 0;
12541 			}
12542 			break;
12543 		case NVM_OFF_LED_0_2_CFG:
12544 			rv = wm_nvm_read_word_invm(sc, offset, data);
12545 			if (rv != 0) {
12546 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
12547 				rv = 0;
12548 			}
12549 			break;
12550 		case NVM_OFF_ID_LED_SETTINGS:
12551 			rv = wm_nvm_read_word_invm(sc, offset, data);
12552 			if (rv != 0) {
12553 				*data = ID_LED_RESERVED_FFFF;
12554 				rv = 0;
12555 			}
12556 			break;
12557 		default:
12558 			DPRINTF(WM_DEBUG_NVM,
12559 			    ("NVM word 0x%02x is not mapped.\n", offset));
12560 			*data = NVM_RESERVED_WORD;
12561 			break;
12562 		}
12563 	}
12564 
12565 	sc->nvm.release(sc);
12566 	return rv;
12567 }
12568 
12569 /* Lock, detecting NVM type, validate checksum, version and read */
12570 
12571 static int
12572 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
12573 {
12574 	uint32_t eecd = 0;
12575 
12576 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
12577 	    || sc->sc_type == WM_T_82583) {
12578 		eecd = CSR_READ(sc, WMREG_EECD);
12579 
12580 		/* Isolate bits 15 & 16 */
12581 		eecd = ((eecd >> 15) & 0x03);
12582 
12583 		/* If both bits are set, device is Flash type */
12584 		if (eecd == 0x03)
12585 			return 0;
12586 	}
12587 	return 1;
12588 }
12589 
12590 static int
12591 wm_nvm_flash_presence_i210(struct wm_softc *sc)
12592 {
12593 	uint32_t eec;
12594 
12595 	eec = CSR_READ(sc, WMREG_EEC);
12596 	if ((eec & EEC_FLASH_DETECTED) != 0)
12597 		return 1;
12598 
12599 	return 0;
12600 }
12601 
12602 /*
12603  * wm_nvm_validate_checksum
12604  *
12605  * The checksum is defined as the sum of the first 64 (16 bit) words.
12606  */
12607 static int
12608 wm_nvm_validate_checksum(struct wm_softc *sc)
12609 {
12610 	uint16_t checksum;
12611 	uint16_t eeprom_data;
12612 #ifdef WM_DEBUG
12613 	uint16_t csum_wordaddr, valid_checksum;
12614 #endif
12615 	int i;
12616 
12617 	checksum = 0;
12618 
12619 	/* Don't check for I211 */
12620 	if (sc->sc_type == WM_T_I211)
12621 		return 0;
12622 
12623 #ifdef WM_DEBUG
12624 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
12625 	    || (sc->sc_type == WM_T_PCH_CNP)) {
12626 		csum_wordaddr = NVM_OFF_COMPAT;
12627 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
12628 	} else {
12629 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
12630 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
12631 	}
12632 
12633 	/* Dump EEPROM image for debug */
12634 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
12635 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
12636 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
12637 		/* XXX PCH_SPT? */
12638 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
12639 		if ((eeprom_data & valid_checksum) == 0) {
12640 			DPRINTF(WM_DEBUG_NVM,
12641 			    ("%s: NVM need to be updated (%04x != %04x)\n",
12642 				device_xname(sc->sc_dev), eeprom_data,
12643 				    valid_checksum));
12644 		}
12645 	}
12646 
12647 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
12648 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
12649 		for (i = 0; i < NVM_SIZE; i++) {
12650 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
12651 				printf("XXXX ");
12652 			else
12653 				printf("%04hx ", eeprom_data);
12654 			if (i % 8 == 7)
12655 				printf("\n");
12656 		}
12657 	}
12658 
12659 #endif /* WM_DEBUG */
12660 
12661 	for (i = 0; i < NVM_SIZE; i++) {
12662 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
12663 			return 1;
12664 		checksum += eeprom_data;
12665 	}
12666 
12667 	if (checksum != (uint16_t) NVM_CHECKSUM) {
12668 #ifdef WM_DEBUG
12669 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
12670 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
12671 #endif
12672 	}
12673 
12674 	return 0;
12675 }
12676 
12677 static void
12678 wm_nvm_version_invm(struct wm_softc *sc)
12679 {
12680 	uint32_t dword;
12681 
12682 	/*
12683 	 * Linux's code to decode version is very strange, so we don't
12684 	 * obey that algorithm and just use word 61 as the document.
12685 	 * Perhaps it's not perfect though...
12686 	 *
12687 	 * Example:
12688 	 *
12689 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
12690 	 */
12691 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
12692 	dword = __SHIFTOUT(dword, INVM_VER_1);
12693 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
12694 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
12695 }
12696 
12697 static void
12698 wm_nvm_version(struct wm_softc *sc)
12699 {
12700 	uint16_t major, minor, build, patch;
12701 	uint16_t uid0, uid1;
12702 	uint16_t nvm_data;
12703 	uint16_t off;
12704 	bool check_version = false;
12705 	bool check_optionrom = false;
12706 	bool have_build = false;
12707 	bool have_uid = true;
12708 
12709 	/*
12710 	 * Version format:
12711 	 *
12712 	 * XYYZ
12713 	 * X0YZ
12714 	 * X0YY
12715 	 *
12716 	 * Example:
12717 	 *
12718 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
12719 	 *	82571	0x50a6	5.10.6?
12720 	 *	82572	0x506a	5.6.10?
12721 	 *	82572EI	0x5069	5.6.9?
12722 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
12723 	 *		0x2013	2.1.3?
12724 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
12725 	 */
12726 
12727 	/*
12728 	 * XXX
12729 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
12730 	 * I've never seen on real 82574 hardware with such small SPI ROM.
12731 	 */
12732 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
12733 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
12734 		have_uid = false;
12735 
12736 	switch (sc->sc_type) {
12737 	case WM_T_82571:
12738 	case WM_T_82572:
12739 	case WM_T_82574:
12740 	case WM_T_82583:
12741 		check_version = true;
12742 		check_optionrom = true;
12743 		have_build = true;
12744 		break;
12745 	case WM_T_82575:
12746 	case WM_T_82576:
12747 	case WM_T_82580:
12748 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
12749 			check_version = true;
12750 		break;
12751 	case WM_T_I211:
12752 		wm_nvm_version_invm(sc);
12753 		have_uid = false;
12754 		goto printver;
12755 	case WM_T_I210:
12756 		if (!wm_nvm_flash_presence_i210(sc)) {
12757 			wm_nvm_version_invm(sc);
12758 			have_uid = false;
12759 			goto printver;
12760 		}
12761 		/* FALLTHROUGH */
12762 	case WM_T_I350:
12763 	case WM_T_I354:
12764 		check_version = true;
12765 		check_optionrom = true;
12766 		break;
12767 	default:
12768 		return;
12769 	}
12770 	if (check_version
12771 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
12772 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
12773 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
12774 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
12775 			build = nvm_data & NVM_BUILD_MASK;
12776 			have_build = true;
12777 		} else
12778 			minor = nvm_data & 0x00ff;
12779 
12780 		/* Decimal */
12781 		minor = (minor / 16) * 10 + (minor % 16);
12782 		sc->sc_nvm_ver_major = major;
12783 		sc->sc_nvm_ver_minor = minor;
12784 
12785 printver:
12786 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
12787 		    sc->sc_nvm_ver_minor);
12788 		if (have_build) {
12789 			sc->sc_nvm_ver_build = build;
12790 			aprint_verbose(".%d", build);
12791 		}
12792 	}
12793 
12794 	/* Assume the Option ROM area is at avove NVM_SIZE */
12795 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
12796 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
12797 		/* Option ROM Version */
12798 		if ((off != 0x0000) && (off != 0xffff)) {
12799 			int rv;
12800 
12801 			off += NVM_COMBO_VER_OFF;
12802 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
12803 			rv |= wm_nvm_read(sc, off, 1, &uid0);
12804 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
12805 			    && (uid1 != 0) && (uid1 != 0xffff)) {
12806 				/* 16bits */
12807 				major = uid0 >> 8;
12808 				build = (uid0 << 8) | (uid1 >> 8);
12809 				patch = uid1 & 0x00ff;
12810 				aprint_verbose(", option ROM Version %d.%d.%d",
12811 				    major, build, patch);
12812 			}
12813 		}
12814 	}
12815 
12816 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
12817 		aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
12818 }
12819 
12820 /*
12821  * wm_nvm_read:
12822  *
12823  *	Read data from the serial EEPROM.
12824  */
12825 static int
12826 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
12827 {
12828 	int rv;
12829 
12830 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
12831 		device_xname(sc->sc_dev), __func__));
12832 
12833 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
12834 		return -1;
12835 
12836 	rv = sc->nvm.read(sc, word, wordcnt, data);
12837 
12838 	return rv;
12839 }
12840 
12841 /*
12842  * Hardware semaphores.
12843  * Very complexed...
12844  */
12845 
12846 static int
12847 wm_get_null(struct wm_softc *sc)
12848 {
12849 
12850 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
12851 		device_xname(sc->sc_dev), __func__));
12852 	return 0;
12853 }
12854 
12855 static void
12856 wm_put_null(struct wm_softc *sc)
12857 {
12858 
12859 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
12860 		device_xname(sc->sc_dev), __func__));
12861 	return;
12862 }
12863 
12864 static int
12865 wm_get_eecd(struct wm_softc *sc)
12866 {
12867 	uint32_t reg;
12868 	int x;
12869 
12870 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
12871 		device_xname(sc->sc_dev), __func__));
12872 
12873 	reg = CSR_READ(sc, WMREG_EECD);
12874 
12875 	/* Request EEPROM access. */
12876 	reg |= EECD_EE_REQ;
12877 	CSR_WRITE(sc, WMREG_EECD, reg);
12878 
12879 	/* ..and wait for it to be granted. */
12880 	for (x = 0; x < 1000; x++) {
12881 		reg = CSR_READ(sc, WMREG_EECD);
12882 		if (reg & EECD_EE_GNT)
12883 			break;
12884 		delay(5);
12885 	}
12886 	if ((reg & EECD_EE_GNT) == 0) {
12887 		aprint_error_dev(sc->sc_dev,
12888 		    "could not acquire EEPROM GNT\n");
12889 		reg &= ~EECD_EE_REQ;
12890 		CSR_WRITE(sc, WMREG_EECD, reg);
12891 		return -1;
12892 	}
12893 
12894 	return 0;
12895 }
12896 
12897 static void
12898 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
12899 {
12900 
12901 	*eecd |= EECD_SK;
12902 	CSR_WRITE(sc, WMREG_EECD, *eecd);
12903 	CSR_WRITE_FLUSH(sc);
12904 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
12905 		delay(1);
12906 	else
12907 		delay(50);
12908 }
12909 
12910 static void
12911 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
12912 {
12913 
12914 	*eecd &= ~EECD_SK;
12915 	CSR_WRITE(sc, WMREG_EECD, *eecd);
12916 	CSR_WRITE_FLUSH(sc);
12917 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
12918 		delay(1);
12919 	else
12920 		delay(50);
12921 }
12922 
12923 static void
12924 wm_put_eecd(struct wm_softc *sc)
12925 {
12926 	uint32_t reg;
12927 
12928 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
12929 		device_xname(sc->sc_dev), __func__));
12930 
12931 	/* Stop nvm */
12932 	reg = CSR_READ(sc, WMREG_EECD);
12933 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
12934 		/* Pull CS high */
12935 		reg |= EECD_CS;
12936 		wm_nvm_eec_clock_lower(sc, &reg);
12937 	} else {
12938 		/* CS on Microwire is active-high */
12939 		reg &= ~(EECD_CS | EECD_DI);
12940 		CSR_WRITE(sc, WMREG_EECD, reg);
12941 		wm_nvm_eec_clock_raise(sc, &reg);
12942 		wm_nvm_eec_clock_lower(sc, &reg);
12943 	}
12944 
12945 	reg = CSR_READ(sc, WMREG_EECD);
12946 	reg &= ~EECD_EE_REQ;
12947 	CSR_WRITE(sc, WMREG_EECD, reg);
12948 
12949 	return;
12950 }
12951 
12952 /*
12953  * Get hardware semaphore.
12954  * Same as e1000_get_hw_semaphore_generic()
12955  */
12956 static int
12957 wm_get_swsm_semaphore(struct wm_softc *sc)
12958 {
12959 	int32_t timeout;
12960 	uint32_t swsm;
12961 
12962 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
12963 		device_xname(sc->sc_dev), __func__));
12964 	KASSERT(sc->sc_nvm_wordsize > 0);
12965 
12966 retry:
12967 	/* Get the SW semaphore. */
12968 	timeout = sc->sc_nvm_wordsize + 1;
12969 	while (timeout) {
12970 		swsm = CSR_READ(sc, WMREG_SWSM);
12971 
12972 		if ((swsm & SWSM_SMBI) == 0)
12973 			break;
12974 
12975 		delay(50);
12976 		timeout--;
12977 	}
12978 
12979 	if (timeout == 0) {
12980 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
12981 			/*
12982 			 * In rare circumstances, the SW semaphore may already
12983 			 * be held unintentionally. Clear the semaphore once
12984 			 * before giving up.
12985 			 */
12986 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
12987 			wm_put_swsm_semaphore(sc);
12988 			goto retry;
12989 		}
12990 		aprint_error_dev(sc->sc_dev,
12991 		    "could not acquire SWSM SMBI\n");
12992 		return 1;
12993 	}
12994 
12995 	/* Get the FW semaphore. */
12996 	timeout = sc->sc_nvm_wordsize + 1;
12997 	while (timeout) {
12998 		swsm = CSR_READ(sc, WMREG_SWSM);
12999 		swsm |= SWSM_SWESMBI;
13000 		CSR_WRITE(sc, WMREG_SWSM, swsm);
13001 		/* If we managed to set the bit we got the semaphore. */
13002 		swsm = CSR_READ(sc, WMREG_SWSM);
13003 		if (swsm & SWSM_SWESMBI)
13004 			break;
13005 
13006 		delay(50);
13007 		timeout--;
13008 	}
13009 
13010 	if (timeout == 0) {
13011 		aprint_error_dev(sc->sc_dev,
13012 		    "could not acquire SWSM SWESMBI\n");
13013 		/* Release semaphores */
13014 		wm_put_swsm_semaphore(sc);
13015 		return 1;
13016 	}
13017 	return 0;
13018 }
13019 
13020 /*
13021  * Put hardware semaphore.
13022  * Same as e1000_put_hw_semaphore_generic()
13023  */
13024 static void
13025 wm_put_swsm_semaphore(struct wm_softc *sc)
13026 {
13027 	uint32_t swsm;
13028 
13029 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
13030 		device_xname(sc->sc_dev), __func__));
13031 
13032 	swsm = CSR_READ(sc, WMREG_SWSM);
13033 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
13034 	CSR_WRITE(sc, WMREG_SWSM, swsm);
13035 }
13036 
13037 /*
13038  * Get SW/FW semaphore.
13039  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
13040  */
13041 static int
13042 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
13043 {
13044 	uint32_t swfw_sync;
13045 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
13046 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
13047 	int timeout;
13048 
13049 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
13050 		device_xname(sc->sc_dev), __func__));
13051 
13052 	if (sc->sc_type == WM_T_80003)
13053 		timeout = 50;
13054 	else
13055 		timeout = 200;
13056 
13057 	for (timeout = 0; timeout < 200; timeout++) {
13058 		if (wm_get_swsm_semaphore(sc)) {
13059 			aprint_error_dev(sc->sc_dev,
13060 			    "%s: failed to get semaphore\n",
13061 			    __func__);
13062 			return 1;
13063 		}
13064 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
13065 		if ((swfw_sync & (swmask | fwmask)) == 0) {
13066 			swfw_sync |= swmask;
13067 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
13068 			wm_put_swsm_semaphore(sc);
13069 			return 0;
13070 		}
13071 		wm_put_swsm_semaphore(sc);
13072 		delay(5000);
13073 	}
13074 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
13075 	    device_xname(sc->sc_dev), mask, swfw_sync);
13076 	return 1;
13077 }
13078 
13079 static void
13080 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
13081 {
13082 	uint32_t swfw_sync;
13083 
13084 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
13085 		device_xname(sc->sc_dev), __func__));
13086 
13087 	while (wm_get_swsm_semaphore(sc) != 0)
13088 		continue;
13089 
13090 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
13091 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
13092 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
13093 
13094 	wm_put_swsm_semaphore(sc);
13095 }
13096 
13097 static int
13098 wm_get_nvm_80003(struct wm_softc *sc)
13099 {
13100 	int rv;
13101 
13102 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
13103 		device_xname(sc->sc_dev), __func__));
13104 
13105 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
13106 		aprint_error_dev(sc->sc_dev,
13107 		    "%s: failed to get semaphore(SWFW)\n",
13108 		    __func__);
13109 		return rv;
13110 	}
13111 
13112 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
13113 	    && (rv = wm_get_eecd(sc)) != 0) {
13114 		aprint_error_dev(sc->sc_dev,
13115 		    "%s: failed to get semaphore(EECD)\n",
13116 		    __func__);
13117 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
13118 		return rv;
13119 	}
13120 
13121 	return 0;
13122 }
13123 
13124 static void
13125 wm_put_nvm_80003(struct wm_softc *sc)
13126 {
13127 
13128 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
13129 		device_xname(sc->sc_dev), __func__));
13130 
13131 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
13132 		wm_put_eecd(sc);
13133 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
13134 }
13135 
13136 static int
13137 wm_get_nvm_82571(struct wm_softc *sc)
13138 {
13139 	int rv;
13140 
13141 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
13142 		device_xname(sc->sc_dev), __func__));
13143 
13144 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
13145 		return rv;
13146 
13147 	switch (sc->sc_type) {
13148 	case WM_T_82573:
13149 		break;
13150 	default:
13151 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
13152 			rv = wm_get_eecd(sc);
13153 		break;
13154 	}
13155 
13156 	if (rv != 0) {
13157 		aprint_error_dev(sc->sc_dev,
13158 		    "%s: failed to get semaphore\n",
13159 		    __func__);
13160 		wm_put_swsm_semaphore(sc);
13161 	}
13162 
13163 	return rv;
13164 }
13165 
13166 static void
13167 wm_put_nvm_82571(struct wm_softc *sc)
13168 {
13169 
13170 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
13171 		device_xname(sc->sc_dev), __func__));
13172 
13173 	switch (sc->sc_type) {
13174 	case WM_T_82573:
13175 		break;
13176 	default:
13177 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
13178 			wm_put_eecd(sc);
13179 		break;
13180 	}
13181 
13182 	wm_put_swsm_semaphore(sc);
13183 }
13184 
13185 static int
13186 wm_get_phy_82575(struct wm_softc *sc)
13187 {
13188 
13189 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
13190 		device_xname(sc->sc_dev), __func__));
13191 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
13192 }
13193 
13194 static void
13195 wm_put_phy_82575(struct wm_softc *sc)
13196 {
13197 
13198 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
13199 		device_xname(sc->sc_dev), __func__));
13200 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
13201 }
13202 
13203 static int
13204 wm_get_swfwhw_semaphore(struct wm_softc *sc)
13205 {
13206 	uint32_t ext_ctrl;
13207 	int timeout = 200;
13208 
13209 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
13210 		device_xname(sc->sc_dev), __func__));
13211 
13212 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
13213 	for (timeout = 0; timeout < 200; timeout++) {
13214 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
13215 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
13216 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
13217 
13218 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
13219 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
13220 			return 0;
13221 		delay(5000);
13222 	}
13223 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
13224 	    device_xname(sc->sc_dev), ext_ctrl);
13225 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
13226 	return 1;
13227 }
13228 
13229 static void
13230 wm_put_swfwhw_semaphore(struct wm_softc *sc)
13231 {
13232 	uint32_t ext_ctrl;
13233 
13234 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
13235 		device_xname(sc->sc_dev), __func__));
13236 
13237 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
13238 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
13239 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
13240 
13241 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
13242 }
13243 
13244 static int
13245 wm_get_swflag_ich8lan(struct wm_softc *sc)
13246 {
13247 	uint32_t ext_ctrl;
13248 	int timeout;
13249 
13250 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
13251 		device_xname(sc->sc_dev), __func__));
13252 	mutex_enter(sc->sc_ich_phymtx);
13253 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
13254 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
13255 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
13256 			break;
13257 		delay(1000);
13258 	}
13259 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
13260 		printf("%s: SW has already locked the resource\n",
13261 		    device_xname(sc->sc_dev));
13262 		goto out;
13263 	}
13264 
13265 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
13266 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
13267 	for (timeout = 0; timeout < 1000; timeout++) {
13268 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
13269 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
13270 			break;
13271 		delay(1000);
13272 	}
13273 	if (timeout >= 1000) {
13274 		printf("%s: failed to acquire semaphore\n",
13275 		    device_xname(sc->sc_dev));
13276 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
13277 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
13278 		goto out;
13279 	}
13280 	return 0;
13281 
13282 out:
13283 	mutex_exit(sc->sc_ich_phymtx);
13284 	return 1;
13285 }
13286 
13287 static void
13288 wm_put_swflag_ich8lan(struct wm_softc *sc)
13289 {
13290 	uint32_t ext_ctrl;
13291 
13292 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
13293 		device_xname(sc->sc_dev), __func__));
13294 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
13295 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
13296 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
13297 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
13298 	} else {
13299 		printf("%s: Semaphore unexpectedly released\n",
13300 		    device_xname(sc->sc_dev));
13301 	}
13302 
13303 	mutex_exit(sc->sc_ich_phymtx);
13304 }
13305 
13306 static int
13307 wm_get_nvm_ich8lan(struct wm_softc *sc)
13308 {
13309 
13310 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
13311 		device_xname(sc->sc_dev), __func__));
13312 	mutex_enter(sc->sc_ich_nvmmtx);
13313 
13314 	return 0;
13315 }
13316 
13317 static void
13318 wm_put_nvm_ich8lan(struct wm_softc *sc)
13319 {
13320 
13321 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
13322 		device_xname(sc->sc_dev), __func__));
13323 	mutex_exit(sc->sc_ich_nvmmtx);
13324 }
13325 
13326 static int
13327 wm_get_hw_semaphore_82573(struct wm_softc *sc)
13328 {
13329 	int i = 0;
13330 	uint32_t reg;
13331 
13332 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
13333 		device_xname(sc->sc_dev), __func__));
13334 
13335 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
13336 	do {
13337 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
13338 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
13339 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
13340 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
13341 			break;
13342 		delay(2*1000);
13343 		i++;
13344 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
13345 
13346 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
13347 		wm_put_hw_semaphore_82573(sc);
13348 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
13349 		    device_xname(sc->sc_dev));
13350 		return -1;
13351 	}
13352 
13353 	return 0;
13354 }
13355 
13356 static void
13357 wm_put_hw_semaphore_82573(struct wm_softc *sc)
13358 {
13359 	uint32_t reg;
13360 
13361 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
13362 		device_xname(sc->sc_dev), __func__));
13363 
13364 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
13365 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
13366 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
13367 }
13368 
13369 /*
13370  * Management mode and power management related subroutines.
13371  * BMC, AMT, suspend/resume and EEE.
13372  */
13373 
13374 #ifdef WM_WOL
13375 static int
13376 wm_check_mng_mode(struct wm_softc *sc)
13377 {
13378 	int rv;
13379 
13380 	switch (sc->sc_type) {
13381 	case WM_T_ICH8:
13382 	case WM_T_ICH9:
13383 	case WM_T_ICH10:
13384 	case WM_T_PCH:
13385 	case WM_T_PCH2:
13386 	case WM_T_PCH_LPT:
13387 	case WM_T_PCH_SPT:
13388 	case WM_T_PCH_CNP:
13389 		rv = wm_check_mng_mode_ich8lan(sc);
13390 		break;
13391 	case WM_T_82574:
13392 	case WM_T_82583:
13393 		rv = wm_check_mng_mode_82574(sc);
13394 		break;
13395 	case WM_T_82571:
13396 	case WM_T_82572:
13397 	case WM_T_82573:
13398 	case WM_T_80003:
13399 		rv = wm_check_mng_mode_generic(sc);
13400 		break;
13401 	default:
13402 		/* noting to do */
13403 		rv = 0;
13404 		break;
13405 	}
13406 
13407 	return rv;
13408 }
13409 
13410 static int
13411 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
13412 {
13413 	uint32_t fwsm;
13414 
13415 	fwsm = CSR_READ(sc, WMREG_FWSM);
13416 
13417 	if (((fwsm & FWSM_FW_VALID) != 0)
13418 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
13419 		return 1;
13420 
13421 	return 0;
13422 }
13423 
13424 static int
13425 wm_check_mng_mode_82574(struct wm_softc *sc)
13426 {
13427 	uint16_t data;
13428 
13429 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
13430 
13431 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
13432 		return 1;
13433 
13434 	return 0;
13435 }
13436 
13437 static int
13438 wm_check_mng_mode_generic(struct wm_softc *sc)
13439 {
13440 	uint32_t fwsm;
13441 
13442 	fwsm = CSR_READ(sc, WMREG_FWSM);
13443 
13444 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
13445 		return 1;
13446 
13447 	return 0;
13448 }
13449 #endif /* WM_WOL */
13450 
13451 static int
13452 wm_enable_mng_pass_thru(struct wm_softc *sc)
13453 {
13454 	uint32_t manc, fwsm, factps;
13455 
13456 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
13457 		return 0;
13458 
13459 	manc = CSR_READ(sc, WMREG_MANC);
13460 
13461 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
13462 		device_xname(sc->sc_dev), manc));
13463 	if ((manc & MANC_RECV_TCO_EN) == 0)
13464 		return 0;
13465 
13466 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
13467 		fwsm = CSR_READ(sc, WMREG_FWSM);
13468 		factps = CSR_READ(sc, WMREG_FACTPS);
13469 		if (((factps & FACTPS_MNGCG) == 0)
13470 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
13471 			return 1;
13472 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
13473 		uint16_t data;
13474 
13475 		factps = CSR_READ(sc, WMREG_FACTPS);
13476 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
13477 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
13478 			device_xname(sc->sc_dev), factps, data));
13479 		if (((factps & FACTPS_MNGCG) == 0)
13480 		    && ((data & NVM_CFG2_MNGM_MASK)
13481 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
13482 			return 1;
13483 	} else if (((manc & MANC_SMBUS_EN) != 0)
13484 	    && ((manc & MANC_ASF_EN) == 0))
13485 		return 1;
13486 
13487 	return 0;
13488 }
13489 
13490 static bool
13491 wm_phy_resetisblocked(struct wm_softc *sc)
13492 {
13493 	bool blocked = false;
13494 	uint32_t reg;
13495 	int i = 0;
13496 
13497 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
13498 		device_xname(sc->sc_dev), __func__));
13499 
13500 	switch (sc->sc_type) {
13501 	case WM_T_ICH8:
13502 	case WM_T_ICH9:
13503 	case WM_T_ICH10:
13504 	case WM_T_PCH:
13505 	case WM_T_PCH2:
13506 	case WM_T_PCH_LPT:
13507 	case WM_T_PCH_SPT:
13508 	case WM_T_PCH_CNP:
13509 		do {
13510 			reg = CSR_READ(sc, WMREG_FWSM);
13511 			if ((reg & FWSM_RSPCIPHY) == 0) {
13512 				blocked = true;
13513 				delay(10*1000);
13514 				continue;
13515 			}
13516 			blocked = false;
13517 		} while (blocked && (i++ < 30));
13518 		return blocked;
13519 		break;
13520 	case WM_T_82571:
13521 	case WM_T_82572:
13522 	case WM_T_82573:
13523 	case WM_T_82574:
13524 	case WM_T_82583:
13525 	case WM_T_80003:
13526 		reg = CSR_READ(sc, WMREG_MANC);
13527 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
13528 			return true;
13529 		else
13530 			return false;
13531 		break;
13532 	default:
13533 		/* no problem */
13534 		break;
13535 	}
13536 
13537 	return false;
13538 }
13539 
13540 static void
13541 wm_get_hw_control(struct wm_softc *sc)
13542 {
13543 	uint32_t reg;
13544 
13545 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
13546 		device_xname(sc->sc_dev), __func__));
13547 
13548 	if (sc->sc_type == WM_T_82573) {
13549 		reg = CSR_READ(sc, WMREG_SWSM);
13550 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
13551 	} else if (sc->sc_type >= WM_T_82571) {
13552 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
13553 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
13554 	}
13555 }
13556 
13557 static void
13558 wm_release_hw_control(struct wm_softc *sc)
13559 {
13560 	uint32_t reg;
13561 
13562 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
13563 		device_xname(sc->sc_dev), __func__));
13564 
13565 	if (sc->sc_type == WM_T_82573) {
13566 		reg = CSR_READ(sc, WMREG_SWSM);
13567 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
13568 	} else if (sc->sc_type >= WM_T_82571) {
13569 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
13570 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
13571 	}
13572 }
13573 
13574 static void
13575 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
13576 {
13577 	uint32_t reg;
13578 
13579 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
13580 		device_xname(sc->sc_dev), __func__));
13581 
13582 	if (sc->sc_type < WM_T_PCH2)
13583 		return;
13584 
13585 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
13586 
13587 	if (gate)
13588 		reg |= EXTCNFCTR_GATE_PHY_CFG;
13589 	else
13590 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
13591 
13592 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
13593 }
13594 
13595 static void
13596 wm_smbustopci(struct wm_softc *sc)
13597 {
13598 	uint32_t fwsm, reg;
13599 	int rv = 0;
13600 
13601 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
13602 		device_xname(sc->sc_dev), __func__));
13603 
13604 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
13605 	wm_gate_hw_phy_config_ich8lan(sc, true);
13606 
13607 	/* Disable ULP */
13608 	wm_ulp_disable(sc);
13609 
13610 	/* Acquire PHY semaphore */
13611 	sc->phy.acquire(sc);
13612 
13613 	fwsm = CSR_READ(sc, WMREG_FWSM);
13614 	switch (sc->sc_type) {
13615 	case WM_T_PCH_LPT:
13616 	case WM_T_PCH_SPT:
13617 	case WM_T_PCH_CNP:
13618 		if (wm_phy_is_accessible_pchlan(sc))
13619 			break;
13620 
13621 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
13622 		reg |= CTRL_EXT_FORCE_SMBUS;
13623 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
13624 #if 0
13625 		/* XXX Isn't this required??? */
13626 		CSR_WRITE_FLUSH(sc);
13627 #endif
13628 		delay(50 * 1000);
13629 		/* FALLTHROUGH */
13630 	case WM_T_PCH2:
13631 		if (wm_phy_is_accessible_pchlan(sc) == true)
13632 			break;
13633 		/* FALLTHROUGH */
13634 	case WM_T_PCH:
13635 		if (sc->sc_type == WM_T_PCH)
13636 			if ((fwsm & FWSM_FW_VALID) != 0)
13637 				break;
13638 
13639 		if (wm_phy_resetisblocked(sc) == true) {
13640 			printf("XXX reset is blocked(3)\n");
13641 			break;
13642 		}
13643 
13644 		wm_toggle_lanphypc_pch_lpt(sc);
13645 
13646 		if (sc->sc_type >= WM_T_PCH_LPT) {
13647 			if (wm_phy_is_accessible_pchlan(sc) == true)
13648 				break;
13649 
13650 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
13651 			reg &= ~CTRL_EXT_FORCE_SMBUS;
13652 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
13653 
13654 			if (wm_phy_is_accessible_pchlan(sc) == true)
13655 				break;
13656 			rv = -1;
13657 		}
13658 		break;
13659 	default:
13660 		break;
13661 	}
13662 
13663 	/* Release semaphore */
13664 	sc->phy.release(sc);
13665 
13666 	if (rv == 0) {
13667 		if (wm_phy_resetisblocked(sc)) {
13668 			printf("XXX reset is blocked(4)\n");
13669 			goto out;
13670 		}
13671 		wm_reset_phy(sc);
13672 		if (wm_phy_resetisblocked(sc))
13673 			printf("XXX reset is blocked(4)\n");
13674 	}
13675 
13676 out:
13677 	/*
13678 	 * Ungate automatic PHY configuration by hardware on non-managed 82579
13679 	 */
13680 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
13681 		delay(10*1000);
13682 		wm_gate_hw_phy_config_ich8lan(sc, false);
13683 	}
13684 }
13685 
13686 static void
13687 wm_init_manageability(struct wm_softc *sc)
13688 {
13689 
13690 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
13691 		device_xname(sc->sc_dev), __func__));
13692 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
13693 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
13694 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
13695 
13696 		/* Disable hardware interception of ARP */
13697 		manc &= ~MANC_ARP_EN;
13698 
13699 		/* Enable receiving management packets to the host */
13700 		if (sc->sc_type >= WM_T_82571) {
13701 			manc |= MANC_EN_MNG2HOST;
13702 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
13703 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
13704 		}
13705 
13706 		CSR_WRITE(sc, WMREG_MANC, manc);
13707 	}
13708 }
13709 
13710 static void
13711 wm_release_manageability(struct wm_softc *sc)
13712 {
13713 
13714 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
13715 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
13716 
13717 		manc |= MANC_ARP_EN;
13718 		if (sc->sc_type >= WM_T_82571)
13719 			manc &= ~MANC_EN_MNG2HOST;
13720 
13721 		CSR_WRITE(sc, WMREG_MANC, manc);
13722 	}
13723 }
13724 
13725 static void
13726 wm_get_wakeup(struct wm_softc *sc)
13727 {
13728 
13729 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
13730 	switch (sc->sc_type) {
13731 	case WM_T_82573:
13732 	case WM_T_82583:
13733 		sc->sc_flags |= WM_F_HAS_AMT;
13734 		/* FALLTHROUGH */
13735 	case WM_T_80003:
13736 	case WM_T_82575:
13737 	case WM_T_82576:
13738 	case WM_T_82580:
13739 	case WM_T_I350:
13740 	case WM_T_I354:
13741 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
13742 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
13743 		/* FALLTHROUGH */
13744 	case WM_T_82541:
13745 	case WM_T_82541_2:
13746 	case WM_T_82547:
13747 	case WM_T_82547_2:
13748 	case WM_T_82571:
13749 	case WM_T_82572:
13750 	case WM_T_82574:
13751 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
13752 		break;
13753 	case WM_T_ICH8:
13754 	case WM_T_ICH9:
13755 	case WM_T_ICH10:
13756 	case WM_T_PCH:
13757 	case WM_T_PCH2:
13758 	case WM_T_PCH_LPT:
13759 	case WM_T_PCH_SPT:
13760 	case WM_T_PCH_CNP:
13761 		sc->sc_flags |= WM_F_HAS_AMT;
13762 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
13763 		break;
13764 	default:
13765 		break;
13766 	}
13767 
13768 	/* 1: HAS_MANAGE */
13769 	if (wm_enable_mng_pass_thru(sc) != 0)
13770 		sc->sc_flags |= WM_F_HAS_MANAGE;
13771 
13772 	/*
13773 	 * Note that the WOL flags is set after the resetting of the eeprom
13774 	 * stuff
13775 	 */
13776 }
13777 
13778 /*
13779  * Unconfigure Ultra Low Power mode.
13780  * Only for I217 and newer (see below).
13781  */
13782 static void
13783 wm_ulp_disable(struct wm_softc *sc)
13784 {
13785 	uint32_t reg;
13786 	int i = 0;
13787 
13788 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
13789 		device_xname(sc->sc_dev), __func__));
13790 	/* Exclude old devices */
13791 	if ((sc->sc_type < WM_T_PCH_LPT)
13792 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
13793 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
13794 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
13795 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
13796 		return;
13797 
13798 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
13799 		/* Request ME un-configure ULP mode in the PHY */
13800 		reg = CSR_READ(sc, WMREG_H2ME);
13801 		reg &= ~H2ME_ULP;
13802 		reg |= H2ME_ENFORCE_SETTINGS;
13803 		CSR_WRITE(sc, WMREG_H2ME, reg);
13804 
13805 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
13806 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
13807 			if (i++ == 30) {
13808 				printf("%s timed out\n", __func__);
13809 				return;
13810 			}
13811 			delay(10 * 1000);
13812 		}
13813 		reg = CSR_READ(sc, WMREG_H2ME);
13814 		reg &= ~H2ME_ENFORCE_SETTINGS;
13815 		CSR_WRITE(sc, WMREG_H2ME, reg);
13816 
13817 		return;
13818 	}
13819 
13820 	/* Acquire semaphore */
13821 	sc->phy.acquire(sc);
13822 
13823 	/* Toggle LANPHYPC */
13824 	wm_toggle_lanphypc_pch_lpt(sc);
13825 
13826 	/* Unforce SMBus mode in PHY */
13827 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
13828 	if (reg == 0x0000 || reg == 0xffff) {
13829 		uint32_t reg2;
13830 
13831 		printf("%s: Force SMBus first.\n", __func__);
13832 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
13833 		reg2 |= CTRL_EXT_FORCE_SMBUS;
13834 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
13835 		delay(50 * 1000);
13836 
13837 		reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
13838 	}
13839 	reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
13840 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, reg);
13841 
13842 	/* Unforce SMBus mode in MAC */
13843 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
13844 	reg &= ~CTRL_EXT_FORCE_SMBUS;
13845 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
13846 
13847 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL);
13848 	reg |= HV_PM_CTRL_K1_ENA;
13849 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, reg);
13850 
13851 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1);
13852 	reg &= ~(I218_ULP_CONFIG1_IND
13853 	    | I218_ULP_CONFIG1_STICKY_ULP
13854 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
13855 	    | I218_ULP_CONFIG1_WOL_HOST
13856 	    | I218_ULP_CONFIG1_INBAND_EXIT
13857 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
13858 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
13859 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
13860 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
13861 	reg |= I218_ULP_CONFIG1_START;
13862 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
13863 
13864 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
13865 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
13866 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
13867 
13868 	/* Release semaphore */
13869 	sc->phy.release(sc);
13870 	wm_gmii_reset(sc);
13871 	delay(50 * 1000);
13872 }
13873 
13874 /* WOL in the newer chipset interfaces (pchlan) */
13875 static void
13876 wm_enable_phy_wakeup(struct wm_softc *sc)
13877 {
13878 #if 0
13879 	uint16_t preg;
13880 
13881 	/* Copy MAC RARs to PHY RARs */
13882 
13883 	/* Copy MAC MTA to PHY MTA */
13884 
13885 	/* Configure PHY Rx Control register */
13886 
13887 	/* Enable PHY wakeup in MAC register */
13888 
13889 	/* Configure and enable PHY wakeup in PHY registers */
13890 
13891 	/* Activate PHY wakeup */
13892 
13893 	/* XXX */
13894 #endif
13895 }
13896 
13897 /* Power down workaround on D3 */
13898 static void
13899 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
13900 {
13901 	uint32_t reg;
13902 	int i;
13903 
13904 	for (i = 0; i < 2; i++) {
13905 		/* Disable link */
13906 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
13907 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
13908 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
13909 
13910 		/*
13911 		 * Call gig speed drop workaround on Gig disable before
13912 		 * accessing any PHY registers
13913 		 */
13914 		if (sc->sc_type == WM_T_ICH8)
13915 			wm_gig_downshift_workaround_ich8lan(sc);
13916 
13917 		/* Write VR power-down enable */
13918 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
13919 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
13920 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
13921 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
13922 
13923 		/* Read it back and test */
13924 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
13925 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
13926 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
13927 			break;
13928 
13929 		/* Issue PHY reset and repeat at most one more time */
13930 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
13931 	}
13932 }
13933 
13934 static void
13935 wm_enable_wakeup(struct wm_softc *sc)
13936 {
13937 	uint32_t reg, pmreg;
13938 	pcireg_t pmode;
13939 
13940 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
13941 		device_xname(sc->sc_dev), __func__));
13942 
13943 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
13944 		&pmreg, NULL) == 0)
13945 		return;
13946 
13947 	/* Advertise the wakeup capability */
13948 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
13949 	    | CTRL_SWDPIN(3));
13950 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
13951 
13952 	/* ICH workaround */
13953 	switch (sc->sc_type) {
13954 	case WM_T_ICH8:
13955 	case WM_T_ICH9:
13956 	case WM_T_ICH10:
13957 	case WM_T_PCH:
13958 	case WM_T_PCH2:
13959 	case WM_T_PCH_LPT:
13960 	case WM_T_PCH_SPT:
13961 	case WM_T_PCH_CNP:
13962 		/* Disable gig during WOL */
13963 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
13964 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
13965 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
13966 		if (sc->sc_type == WM_T_PCH)
13967 			wm_gmii_reset(sc);
13968 
13969 		/* Power down workaround */
13970 		if (sc->sc_phytype == WMPHY_82577) {
13971 			struct mii_softc *child;
13972 
13973 			/* Assume that the PHY is copper */
13974 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
13975 			if ((child != NULL) && (child->mii_mpd_rev <= 2))
13976 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
13977 				    (768 << 5) | 25, 0x0444); /* magic num */
13978 		}
13979 		break;
13980 	default:
13981 		break;
13982 	}
13983 
13984 	/* Keep the laser running on fiber adapters */
13985 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
13986 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
13987 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
13988 		reg |= CTRL_EXT_SWDPIN(3);
13989 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
13990 	}
13991 
13992 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
13993 #if 0	/* for the multicast packet */
13994 	reg |= WUFC_MC;
13995 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
13996 #endif
13997 
13998 	if (sc->sc_type >= WM_T_PCH)
13999 		wm_enable_phy_wakeup(sc);
14000 	else {
14001 		CSR_WRITE(sc, WMREG_WUC, CSR_READ(sc, WMREG_WUC) | WUC_PME_EN);
14002 		CSR_WRITE(sc, WMREG_WUFC, reg);
14003 	}
14004 
14005 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
14006 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
14007 		|| (sc->sc_type == WM_T_PCH2))
14008 		    && (sc->sc_phytype == WMPHY_IGP_3))
14009 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
14010 
14011 	/* Request PME */
14012 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
14013 #if 0
14014 	/* Disable WOL */
14015 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
14016 #else
14017 	/* For WOL */
14018 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
14019 #endif
14020 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
14021 }
14022 
14023 /* Disable ASPM L0s and/or L1 for workaround */
14024 static void
14025 wm_disable_aspm(struct wm_softc *sc)
14026 {
14027 	pcireg_t reg, mask = 0;
14028 	unsigned const char *str = "";
14029 
14030 	/*
14031 	 *  Only for PCIe device which has PCIe capability in the PCI config
14032 	 * space.
14033 	 */
14034 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
14035 		return;
14036 
14037 	switch (sc->sc_type) {
14038 	case WM_T_82571:
14039 	case WM_T_82572:
14040 		/*
14041 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
14042 		 * State Power management L1 State (ASPM L1).
14043 		 */
14044 		mask = PCIE_LCSR_ASPM_L1;
14045 		str = "L1 is";
14046 		break;
14047 	case WM_T_82573:
14048 	case WM_T_82574:
14049 	case WM_T_82583:
14050 		/*
14051 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
14052 		 *
14053 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
14054 		 * some chipset.  The document of 82574 and 82583 says that
14055 		 * disabling L0s with some specific chipset is sufficient,
14056 		 * but we follow as of the Intel em driver does.
14057 		 *
14058 		 * References:
14059 		 * Errata 8 of the Specification Update of i82573.
14060 		 * Errata 20 of the Specification Update of i82574.
14061 		 * Errata 9 of the Specification Update of i82583.
14062 		 */
14063 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
14064 		str = "L0s and L1 are";
14065 		break;
14066 	default:
14067 		return;
14068 	}
14069 
14070 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
14071 	    sc->sc_pcixe_capoff + PCIE_LCSR);
14072 	reg &= ~mask;
14073 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
14074 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
14075 
14076 	/* Print only in wm_attach() */
14077 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
14078 		aprint_verbose_dev(sc->sc_dev,
14079 		    "ASPM %s disabled to workaround the errata.\n",
14080 			str);
14081 }
14082 
14083 /* LPLU */
14084 
14085 static void
14086 wm_lplu_d0_disable(struct wm_softc *sc)
14087 {
14088 	struct mii_data *mii = &sc->sc_mii;
14089 	uint32_t reg;
14090 
14091 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
14092 		device_xname(sc->sc_dev), __func__));
14093 
14094 	if (sc->sc_phytype == WMPHY_IFE)
14095 		return;
14096 
14097 	switch (sc->sc_type) {
14098 	case WM_T_82571:
14099 	case WM_T_82572:
14100 	case WM_T_82573:
14101 	case WM_T_82575:
14102 	case WM_T_82576:
14103 		reg = mii->mii_readreg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT);
14104 		reg &= ~PMR_D0_LPLU;
14105 		mii->mii_writereg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, reg);
14106 		break;
14107 	case WM_T_82580:
14108 	case WM_T_I350:
14109 	case WM_T_I210:
14110 	case WM_T_I211:
14111 		reg = CSR_READ(sc, WMREG_PHPM);
14112 		reg &= ~PHPM_D0A_LPLU;
14113 		CSR_WRITE(sc, WMREG_PHPM, reg);
14114 		break;
14115 	case WM_T_82574:
14116 	case WM_T_82583:
14117 	case WM_T_ICH8:
14118 	case WM_T_ICH9:
14119 	case WM_T_ICH10:
14120 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
14121 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
14122 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
14123 		CSR_WRITE_FLUSH(sc);
14124 		break;
14125 	case WM_T_PCH:
14126 	case WM_T_PCH2:
14127 	case WM_T_PCH_LPT:
14128 	case WM_T_PCH_SPT:
14129 	case WM_T_PCH_CNP:
14130 		reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
14131 		reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
14132 		if (wm_phy_resetisblocked(sc) == false)
14133 			reg |= HV_OEM_BITS_ANEGNOW;
14134 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
14135 		break;
14136 	default:
14137 		break;
14138 	}
14139 }
14140 
14141 /* EEE */
14142 
14143 static void
14144 wm_set_eee_i350(struct wm_softc *sc)
14145 {
14146 	uint32_t ipcnfg, eeer;
14147 
14148 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
14149 	eeer = CSR_READ(sc, WMREG_EEER);
14150 
14151 	if ((sc->sc_flags & WM_F_EEE) != 0) {
14152 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
14153 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
14154 		    | EEER_LPI_FC);
14155 	} else {
14156 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
14157 		ipcnfg &= ~IPCNFG_10BASE_TE;
14158 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
14159 		    | EEER_LPI_FC);
14160 	}
14161 
14162 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
14163 	CSR_WRITE(sc, WMREG_EEER, eeer);
14164 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
14165 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
14166 }
14167 
14168 /*
14169  * Workarounds (mainly PHY related).
14170  * Basically, PHY's workarounds are in the PHY drivers.
14171  */
14172 
14173 /* Work-around for 82566 Kumeran PCS lock loss */
14174 static void
14175 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
14176 {
14177 	struct mii_data *mii = &sc->sc_mii;
14178 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
14179 	int i;
14180 	int reg;
14181 
14182 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
14183 		device_xname(sc->sc_dev), __func__));
14184 
14185 	/* If the link is not up, do nothing */
14186 	if ((status & STATUS_LU) == 0)
14187 		return;
14188 
14189 	/* Nothing to do if the link is other than 1Gbps */
14190 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
14191 		return;
14192 
14193 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
14194 	for (i = 0; i < 10; i++) {
14195 		/* read twice */
14196 		reg = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
14197 		reg = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
14198 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
14199 			goto out;	/* GOOD! */
14200 
14201 		/* Reset the PHY */
14202 		wm_reset_phy(sc);
14203 		delay(5*1000);
14204 	}
14205 
14206 	/* Disable GigE link negotiation */
14207 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
14208 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
14209 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
14210 
14211 	/*
14212 	 * Call gig speed drop workaround on Gig disable before accessing
14213 	 * any PHY registers.
14214 	 */
14215 	wm_gig_downshift_workaround_ich8lan(sc);
14216 
14217 out:
14218 	return;
14219 }
14220 
14221 /* WOL from S5 stops working */
14222 static void
14223 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
14224 {
14225 	uint16_t kmreg;
14226 
14227 	/* Only for igp3 */
14228 	if (sc->sc_phytype == WMPHY_IGP_3) {
14229 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
14230 			return;
14231 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
14232 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
14233 			return;
14234 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
14235 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
14236 	}
14237 }
14238 
14239 /*
14240  * Workaround for pch's PHYs
14241  * XXX should be moved to new PHY driver?
14242  */
14243 static void
14244 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
14245 {
14246 
14247 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
14248 		device_xname(sc->sc_dev), __func__));
14249 	KASSERT(sc->sc_type == WM_T_PCH);
14250 
14251 	if (sc->sc_phytype == WMPHY_82577)
14252 		wm_set_mdio_slow_mode_hv(sc);
14253 
14254 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
14255 
14256 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
14257 
14258 	/* 82578 */
14259 	if (sc->sc_phytype == WMPHY_82578) {
14260 		struct mii_softc *child;
14261 
14262 		/*
14263 		 * Return registers to default by doing a soft reset then
14264 		 * writing 0x3140 to the control register
14265 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
14266 		 */
14267 		child = LIST_FIRST(&sc->sc_mii.mii_phys);
14268 		if ((child != NULL) && (child->mii_mpd_rev < 2)) {
14269 			PHY_RESET(child);
14270 			sc->sc_mii.mii_writereg(sc->sc_dev, 2, MII_BMCR,
14271 			    0x3140);
14272 		}
14273 	}
14274 
14275 	/* Select page 0 */
14276 	sc->phy.acquire(sc);
14277 	wm_gmii_mdic_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
14278 	sc->phy.release(sc);
14279 
14280 	/*
14281 	 * Configure the K1 Si workaround during phy reset assuming there is
14282 	 * link so that it disables K1 if link is in 1Gbps.
14283 	 */
14284 	wm_k1_gig_workaround_hv(sc, 1);
14285 }
14286 
14287 static void
14288 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
14289 {
14290 
14291 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
14292 		device_xname(sc->sc_dev), __func__));
14293 	KASSERT(sc->sc_type == WM_T_PCH2);
14294 
14295 	wm_set_mdio_slow_mode_hv(sc);
14296 }
14297 
14298 static int
14299 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
14300 {
14301 	int k1_enable = sc->sc_nvm_k1_enabled;
14302 
14303 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
14304 		device_xname(sc->sc_dev), __func__));
14305 
14306 	if (sc->phy.acquire(sc) != 0)
14307 		return -1;
14308 
14309 	if (link) {
14310 		k1_enable = 0;
14311 
14312 		/* Link stall fix for link up */
14313 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
14314 	} else {
14315 		/* Link stall fix for link down */
14316 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
14317 	}
14318 
14319 	wm_configure_k1_ich8lan(sc, k1_enable);
14320 	sc->phy.release(sc);
14321 
14322 	return 0;
14323 }
14324 
14325 static void
14326 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
14327 {
14328 	uint32_t reg;
14329 
14330 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
14331 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
14332 	    reg | HV_KMRN_MDIO_SLOW);
14333 }
14334 
14335 static void
14336 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
14337 {
14338 	uint32_t ctrl, ctrl_ext, tmp;
14339 	uint16_t kmreg;
14340 	int rv;
14341 
14342 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
14343 	if (rv != 0)
14344 		return;
14345 
14346 	if (k1_enable)
14347 		kmreg |= KUMCTRLSTA_K1_ENABLE;
14348 	else
14349 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
14350 
14351 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
14352 	if (rv != 0)
14353 		return;
14354 
14355 	delay(20);
14356 
14357 	ctrl = CSR_READ(sc, WMREG_CTRL);
14358 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
14359 
14360 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
14361 	tmp |= CTRL_FRCSPD;
14362 
14363 	CSR_WRITE(sc, WMREG_CTRL, tmp);
14364 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
14365 	CSR_WRITE_FLUSH(sc);
14366 	delay(20);
14367 
14368 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
14369 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
14370 	CSR_WRITE_FLUSH(sc);
14371 	delay(20);
14372 
14373 	return;
14374 }
14375 
14376 /* special case - for 82575 - need to do manual init ... */
14377 static void
14378 wm_reset_init_script_82575(struct wm_softc *sc)
14379 {
14380 	/*
14381 	 * remark: this is untested code - we have no board without EEPROM
14382 	 *  same setup as mentioned int the FreeBSD driver for the i82575
14383 	 */
14384 
14385 	/* SerDes configuration via SERDESCTRL */
14386 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
14387 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
14388 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
14389 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
14390 
14391 	/* CCM configuration via CCMCTL register */
14392 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
14393 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
14394 
14395 	/* PCIe lanes configuration */
14396 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
14397 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
14398 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
14399 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
14400 
14401 	/* PCIe PLL Configuration */
14402 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
14403 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
14404 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
14405 }
14406 
14407 static void
14408 wm_reset_mdicnfg_82580(struct wm_softc *sc)
14409 {
14410 	uint32_t reg;
14411 	uint16_t nvmword;
14412 	int rv;
14413 
14414 	if (sc->sc_type != WM_T_82580)
14415 		return;
14416 	if ((sc->sc_flags & WM_F_SGMII) == 0)
14417 		return;
14418 
14419 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
14420 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
14421 	if (rv != 0) {
14422 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
14423 		    __func__);
14424 		return;
14425 	}
14426 
14427 	reg = CSR_READ(sc, WMREG_MDICNFG);
14428 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
14429 		reg |= MDICNFG_DEST;
14430 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
14431 		reg |= MDICNFG_COM_MDIO;
14432 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
14433 }
14434 
14435 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
14436 
14437 static bool
14438 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
14439 {
14440 	int i;
14441 	uint32_t reg;
14442 	uint16_t id1, id2;
14443 
14444 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
14445 		device_xname(sc->sc_dev), __func__));
14446 	id1 = id2 = 0xffff;
14447 	for (i = 0; i < 2; i++) {
14448 		id1 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1);
14449 		if (MII_INVALIDID(id1))
14450 			continue;
14451 		id2 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2);
14452 		if (MII_INVALIDID(id2))
14453 			continue;
14454 		break;
14455 	}
14456 	if (!MII_INVALIDID(id1) && !MII_INVALIDID(id2)) {
14457 		goto out;
14458 	}
14459 
14460 	if (sc->sc_type < WM_T_PCH_LPT) {
14461 		sc->phy.release(sc);
14462 		wm_set_mdio_slow_mode_hv(sc);
14463 		id1 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1);
14464 		id2 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2);
14465 		sc->phy.acquire(sc);
14466 	}
14467 	if (MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
14468 		printf("XXX return with false\n");
14469 		return false;
14470 	}
14471 out:
14472 	if (sc->sc_type >= WM_T_PCH_LPT) {
14473 		/* Only unforce SMBus if ME is not active */
14474 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
14475 			/* Unforce SMBus mode in PHY */
14476 			reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
14477 			    CV_SMB_CTRL);
14478 			reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
14479 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
14480 			    CV_SMB_CTRL, reg);
14481 
14482 			/* Unforce SMBus mode in MAC */
14483 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
14484 			reg &= ~CTRL_EXT_FORCE_SMBUS;
14485 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
14486 		}
14487 	}
14488 	return true;
14489 }
14490 
14491 static void
14492 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
14493 {
14494 	uint32_t reg;
14495 	int i;
14496 
14497 	/* Set PHY Config Counter to 50msec */
14498 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
14499 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
14500 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
14501 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
14502 
14503 	/* Toggle LANPHYPC */
14504 	reg = CSR_READ(sc, WMREG_CTRL);
14505 	reg |= CTRL_LANPHYPC_OVERRIDE;
14506 	reg &= ~CTRL_LANPHYPC_VALUE;
14507 	CSR_WRITE(sc, WMREG_CTRL, reg);
14508 	CSR_WRITE_FLUSH(sc);
14509 	delay(1000);
14510 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
14511 	CSR_WRITE(sc, WMREG_CTRL, reg);
14512 	CSR_WRITE_FLUSH(sc);
14513 
14514 	if (sc->sc_type < WM_T_PCH_LPT)
14515 		delay(50 * 1000);
14516 	else {
14517 		i = 20;
14518 
14519 		do {
14520 			delay(5 * 1000);
14521 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
14522 		    && i--);
14523 
14524 		delay(30 * 1000);
14525 	}
14526 }
14527 
14528 static int
14529 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
14530 {
14531 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
14532 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
14533 	uint32_t rxa;
14534 	uint16_t scale = 0, lat_enc = 0;
14535 	int32_t obff_hwm = 0;
14536 	int64_t lat_ns, value;
14537 
14538 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
14539 		device_xname(sc->sc_dev), __func__));
14540 
14541 	if (link) {
14542 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
14543 		uint32_t status;
14544 		uint16_t speed;
14545 		pcireg_t preg;
14546 
14547 		status = CSR_READ(sc, WMREG_STATUS);
14548 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
14549 		case STATUS_SPEED_10:
14550 			speed = 10;
14551 			break;
14552 		case STATUS_SPEED_100:
14553 			speed = 100;
14554 			break;
14555 		case STATUS_SPEED_1000:
14556 			speed = 1000;
14557 			break;
14558 		default:
14559 			device_printf(sc->sc_dev, "Unknown speed "
14560 			    "(status = %08x)\n", status);
14561 			return -1;
14562 		}
14563 
14564 		/* Rx Packet Buffer Allocation size (KB) */
14565 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
14566 
14567 		/*
14568 		 * Determine the maximum latency tolerated by the device.
14569 		 *
14570 		 * Per the PCIe spec, the tolerated latencies are encoded as
14571 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
14572 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
14573 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
14574 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
14575 		 */
14576 		lat_ns = ((int64_t)rxa * 1024 -
14577 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
14578 			+ ETHER_HDR_LEN))) * 8 * 1000;
14579 		if (lat_ns < 0)
14580 			lat_ns = 0;
14581 		else
14582 			lat_ns /= speed;
14583 		value = lat_ns;
14584 
14585 		while (value > LTRV_VALUE) {
14586 			scale ++;
14587 			value = howmany(value, __BIT(5));
14588 		}
14589 		if (scale > LTRV_SCALE_MAX) {
14590 			printf("%s: Invalid LTR latency scale %d\n",
14591 			    device_xname(sc->sc_dev), scale);
14592 			return -1;
14593 		}
14594 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
14595 
14596 		/* Determine the maximum latency tolerated by the platform */
14597 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
14598 		    WM_PCI_LTR_CAP_LPT);
14599 		max_snoop = preg & 0xffff;
14600 		max_nosnoop = preg >> 16;
14601 
14602 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
14603 
14604 		if (lat_enc > max_ltr_enc) {
14605 			lat_enc = max_ltr_enc;
14606 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
14607 			    * PCI_LTR_SCALETONS(
14608 				    __SHIFTOUT(lat_enc,
14609 					PCI_LTR_MAXSNOOPLAT_SCALE));
14610 		}
14611 
14612 		if (lat_ns) {
14613 			lat_ns *= speed * 1000;
14614 			lat_ns /= 8;
14615 			lat_ns /= 1000000000;
14616 			obff_hwm = (int32_t)(rxa - lat_ns);
14617 		}
14618 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
14619 			device_printf(sc->sc_dev, "Invalid high water mark %d"
14620 			    "(rxa = %d, lat_ns = %d)\n",
14621 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
14622 			return -1;
14623 		}
14624 	}
14625 	/* Snoop and No-Snoop latencies the same */
14626 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
14627 	CSR_WRITE(sc, WMREG_LTRV, reg);
14628 
14629 	/* Set OBFF high water mark */
14630 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
14631 	reg |= obff_hwm;
14632 	CSR_WRITE(sc, WMREG_SVT, reg);
14633 
14634 	/* Enable OBFF */
14635 	reg = CSR_READ(sc, WMREG_SVCR);
14636 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
14637 	CSR_WRITE(sc, WMREG_SVCR, reg);
14638 
14639 	return 0;
14640 }
14641 
14642 /*
14643  * I210 Errata 25 and I211 Errata 10
14644  * Slow System Clock.
14645  */
14646 static void
14647 wm_pll_workaround_i210(struct wm_softc *sc)
14648 {
14649 	uint32_t mdicnfg, wuc;
14650 	uint32_t reg;
14651 	pcireg_t pcireg;
14652 	uint32_t pmreg;
14653 	uint16_t nvmword, tmp_nvmword;
14654 	int phyval;
14655 	bool wa_done = false;
14656 	int i;
14657 
14658 	/* Save WUC and MDICNFG registers */
14659 	wuc = CSR_READ(sc, WMREG_WUC);
14660 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
14661 
14662 	reg = mdicnfg & ~MDICNFG_DEST;
14663 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
14664 
14665 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
14666 		nvmword = INVM_DEFAULT_AL;
14667 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
14668 
14669 	/* Get Power Management cap offset */
14670 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
14671 		&pmreg, NULL) == 0)
14672 		return;
14673 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
14674 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
14675 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
14676 
14677 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
14678 			break; /* OK */
14679 		}
14680 
14681 		wa_done = true;
14682 		/* Directly reset the internal PHY */
14683 		reg = CSR_READ(sc, WMREG_CTRL);
14684 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
14685 
14686 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
14687 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
14688 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
14689 
14690 		CSR_WRITE(sc, WMREG_WUC, 0);
14691 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
14692 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
14693 
14694 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
14695 		    pmreg + PCI_PMCSR);
14696 		pcireg |= PCI_PMCSR_STATE_D3;
14697 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
14698 		    pmreg + PCI_PMCSR, pcireg);
14699 		delay(1000);
14700 		pcireg &= ~PCI_PMCSR_STATE_D3;
14701 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
14702 		    pmreg + PCI_PMCSR, pcireg);
14703 
14704 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
14705 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
14706 
14707 		/* Restore WUC register */
14708 		CSR_WRITE(sc, WMREG_WUC, wuc);
14709 	}
14710 
14711 	/* Restore MDICNFG setting */
14712 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
14713 	if (wa_done)
14714 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
14715 }
14716 
14717 static void
14718 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
14719 {
14720 	uint32_t reg;
14721 
14722 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
14723 		device_xname(sc->sc_dev), __func__));
14724 	KASSERT(sc->sc_type == WM_T_PCH_SPT);
14725 
14726 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
14727 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
14728 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
14729 
14730 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
14731 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
14732 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
14733 }
14734