xref: /netbsd-src/sys/dev/pci/if_wm.c (revision 87d689fb734c654d2486f87f7be32f1b53ecdbec)
1 /*	$NetBSD: if_wm.c,v 1.552 2018/01/04 09:43:27 msaitoh Exp $	*/
2 
3 /*
4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed for the NetBSD Project by
20  *	Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 /*******************************************************************************
39 
40   Copyright (c) 2001-2005, Intel Corporation
41   All rights reserved.
42 
43   Redistribution and use in source and binary forms, with or without
44   modification, are permitted provided that the following conditions are met:
45 
46    1. Redistributions of source code must retain the above copyright notice,
47       this list of conditions and the following disclaimer.
48 
49    2. Redistributions in binary form must reproduce the above copyright
50       notice, this list of conditions and the following disclaimer in the
51       documentation and/or other materials provided with the distribution.
52 
53    3. Neither the name of the Intel Corporation nor the names of its
54       contributors may be used to endorse or promote products derived from
55       this software without specific prior written permission.
56 
57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67   POSSIBILITY OF SUCH DAMAGE.
68 
69 *******************************************************************************/
70 /*
71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72  *
73  * TODO (in order of importance):
74  *
75  *	- Check XXX'ed comments
76  *	- TX Multi queue improvement (refine queue selection logic)
77  *	- Split header buffer for newer descriptors
78  *	- EEE (Energy Efficiency Ethernet)
79  *	- Virtual Function
80  *	- Set LED correctly (based on contents in EEPROM)
81  *	- Rework how parameters are loaded from the EEPROM.
82  *	- Image Unique ID
83  */
84 
85 #include <sys/cdefs.h>
86 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.552 2018/01/04 09:43:27 msaitoh Exp $");
87 
88 #ifdef _KERNEL_OPT
89 #include "opt_net_mpsafe.h"
90 #include "opt_if_wm.h"
91 #endif
92 
93 #include <sys/param.h>
94 #include <sys/systm.h>
95 #include <sys/callout.h>
96 #include <sys/mbuf.h>
97 #include <sys/malloc.h>
98 #include <sys/kmem.h>
99 #include <sys/kernel.h>
100 #include <sys/socket.h>
101 #include <sys/ioctl.h>
102 #include <sys/errno.h>
103 #include <sys/device.h>
104 #include <sys/queue.h>
105 #include <sys/syslog.h>
106 #include <sys/interrupt.h>
107 #include <sys/cpu.h>
108 #include <sys/pcq.h>
109 
110 #include <sys/rndsource.h>
111 
112 #include <net/if.h>
113 #include <net/if_dl.h>
114 #include <net/if_media.h>
115 #include <net/if_ether.h>
116 
117 #include <net/bpf.h>
118 
119 #include <netinet/in.h>			/* XXX for struct ip */
120 #include <netinet/in_systm.h>		/* XXX for struct ip */
121 #include <netinet/ip.h>			/* XXX for struct ip */
122 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
123 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
124 
125 #include <sys/bus.h>
126 #include <sys/intr.h>
127 #include <machine/endian.h>
128 
129 #include <dev/mii/mii.h>
130 #include <dev/mii/miivar.h>
131 #include <dev/mii/miidevs.h>
132 #include <dev/mii/mii_bitbang.h>
133 #include <dev/mii/ikphyreg.h>
134 #include <dev/mii/igphyreg.h>
135 #include <dev/mii/igphyvar.h>
136 #include <dev/mii/inbmphyreg.h>
137 #include <dev/mii/ihphyreg.h>
138 
139 #include <dev/pci/pcireg.h>
140 #include <dev/pci/pcivar.h>
141 #include <dev/pci/pcidevs.h>
142 
143 #include <dev/pci/if_wmreg.h>
144 #include <dev/pci/if_wmvar.h>
145 
146 #ifdef WM_DEBUG
147 #define	WM_DEBUG_LINK		__BIT(0)
148 #define	WM_DEBUG_TX		__BIT(1)
149 #define	WM_DEBUG_RX		__BIT(2)
150 #define	WM_DEBUG_GMII		__BIT(3)
151 #define	WM_DEBUG_MANAGE		__BIT(4)
152 #define	WM_DEBUG_NVM		__BIT(5)
153 #define	WM_DEBUG_INIT		__BIT(6)
154 #define	WM_DEBUG_LOCK		__BIT(7)
155 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
156     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
157 
158 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
159 #else
160 #define	DPRINTF(x, y)	/* nothing */
161 #endif /* WM_DEBUG */
162 
163 #ifdef NET_MPSAFE
164 #define WM_MPSAFE	1
165 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
166 #else
167 #define CALLOUT_FLAGS	0
168 #endif
169 
170 /*
171  * This device driver's max interrupt numbers.
172  */
173 #define WM_MAX_NQUEUEINTR	16
174 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
175 
176 #ifndef WM_DISABLE_MSI
177 #define	WM_DISABLE_MSI 0
178 #endif
179 #ifndef WM_DISABLE_MSIX
180 #define	WM_DISABLE_MSIX 0
181 #endif
182 
183 int wm_disable_msi = WM_DISABLE_MSI;
184 int wm_disable_msix = WM_DISABLE_MSIX;
185 
186 /*
187  * Transmit descriptor list size.  Due to errata, we can only have
188  * 256 hardware descriptors in the ring on < 82544, but we use 4096
189  * on >= 82544.  We tell the upper layers that they can queue a lot
190  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
191  * of them at a time.
192  *
193  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
194  * chains containing many small mbufs have been observed in zero-copy
195  * situations with jumbo frames.
196  */
197 #define	WM_NTXSEGS		256
198 #define	WM_IFQUEUELEN		256
199 #define	WM_TXQUEUELEN_MAX	64
200 #define	WM_TXQUEUELEN_MAX_82547	16
201 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
202 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
203 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
204 #define	WM_NTXDESC_82542	256
205 #define	WM_NTXDESC_82544	4096
206 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
207 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
208 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
209 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
210 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
211 
212 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
213 
214 #define	WM_TXINTERQSIZE		256
215 
216 /*
217  * Receive descriptor list size.  We have one Rx buffer for normal
218  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
219  * packet.  We allocate 256 receive descriptors, each with a 2k
220  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
221  */
222 #define	WM_NRXDESC		256
223 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
224 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
225 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
226 
227 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
228 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
229 #endif
230 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
231 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
232 #endif
233 
234 typedef union txdescs {
235 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
236 	nq_txdesc_t      sctxu_nq_txdescs[WM_NTXDESC_82544];
237 } txdescs_t;
238 
239 typedef union rxdescs {
240 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
241 	ext_rxdesc_t      sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
242 	nq_rxdesc_t      sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
243 } rxdescs_t;
244 
245 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
246 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
247 
248 /*
249  * Software state for transmit jobs.
250  */
251 struct wm_txsoft {
252 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
253 	bus_dmamap_t txs_dmamap;	/* our DMA map */
254 	int txs_firstdesc;		/* first descriptor in packet */
255 	int txs_lastdesc;		/* last descriptor in packet */
256 	int txs_ndesc;			/* # of descriptors used */
257 };
258 
259 /*
260  * Software state for receive buffers.  Each descriptor gets a
261  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
262  * more than one buffer, we chain them together.
263  */
264 struct wm_rxsoft {
265 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
266 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
267 };
268 
269 #define WM_LINKUP_TIMEOUT	50
270 
271 static uint16_t swfwphysem[] = {
272 	SWFW_PHY0_SM,
273 	SWFW_PHY1_SM,
274 	SWFW_PHY2_SM,
275 	SWFW_PHY3_SM
276 };
277 
278 static const uint32_t wm_82580_rxpbs_table[] = {
279 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
280 };
281 
282 struct wm_softc;
283 
284 #ifdef WM_EVENT_COUNTERS
285 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
286 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
287 	struct evcnt qname##_ev_##evname;
288 
289 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
290 	do{								\
291 		snprintf((q)->qname##_##evname##_evcnt_name,		\
292 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
293 		    "%s%02d%s", #qname, (qnum), #evname);		\
294 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
295 		    (evtype), NULL, (xname),				\
296 		    (q)->qname##_##evname##_evcnt_name);		\
297 	}while(0)
298 
299 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
300 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
301 
302 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
303 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
304 
305 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
306 	evcnt_detach(&(q)->qname##_ev_##evname);
307 #endif /* WM_EVENT_COUNTERS */
308 
309 struct wm_txqueue {
310 	kmutex_t *txq_lock;		/* lock for tx operations */
311 
312 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
313 
314 	/* Software state for the transmit descriptors. */
315 	int txq_num;			/* must be a power of two */
316 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
317 
318 	/* TX control data structures. */
319 	int txq_ndesc;			/* must be a power of two */
320 	size_t txq_descsize;		/* a tx descriptor size */
321 	txdescs_t *txq_descs_u;
322         bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
323 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
324 	int txq_desc_rseg;		/* real number of control segment */
325 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
326 #define	txq_descs	txq_descs_u->sctxu_txdescs
327 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
328 
329 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
330 
331 	int txq_free;			/* number of free Tx descriptors */
332 	int txq_next;			/* next ready Tx descriptor */
333 
334 	int txq_sfree;			/* number of free Tx jobs */
335 	int txq_snext;			/* next free Tx job */
336 	int txq_sdirty;			/* dirty Tx jobs */
337 
338 	/* These 4 variables are used only on the 82547. */
339 	int txq_fifo_size;		/* Tx FIFO size */
340 	int txq_fifo_head;		/* current head of FIFO */
341 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
342 	int txq_fifo_stall;		/* Tx FIFO is stalled */
343 
344 	/*
345 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
346 	 * CPUs. This queue intermediate them without block.
347 	 */
348 	pcq_t *txq_interq;
349 
350 	/*
351 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
352 	 * to manage Tx H/W queue's busy flag.
353 	 */
354 	int txq_flags;			/* flags for H/W queue, see below */
355 #define	WM_TXQ_NO_SPACE	0x1
356 
357 	bool txq_stopping;
358 
359 	uint32_t txq_packets;		/* for AIM */
360 	uint32_t txq_bytes;		/* for AIM */
361 #ifdef WM_EVENT_COUNTERS
362 	WM_Q_EVCNT_DEFINE(txq, txsstall)	/* Tx stalled due to no txs */
363 	WM_Q_EVCNT_DEFINE(txq, txdstall)	/* Tx stalled due to no txd */
364 	WM_Q_EVCNT_DEFINE(txq, txfifo_stall)	/* Tx FIFO stalls (82547) */
365 	WM_Q_EVCNT_DEFINE(txq, txdw)		/* Tx descriptor interrupts */
366 	WM_Q_EVCNT_DEFINE(txq, txqe)		/* Tx queue empty interrupts */
367 						/* XXX not used? */
368 
369 	WM_Q_EVCNT_DEFINE(txq, txipsum)		/* IP checksums comp. out-bound */
370 	WM_Q_EVCNT_DEFINE(txq,txtusum)		/* TCP/UDP cksums comp. out-bound */
371 	WM_Q_EVCNT_DEFINE(txq, txtusum6)	/* TCP/UDP v6 cksums comp. out-bound */
372 	WM_Q_EVCNT_DEFINE(txq, txtso)		/* TCP seg offload out-bound (IPv4) */
373 	WM_Q_EVCNT_DEFINE(txq, txtso6)		/* TCP seg offload out-bound (IPv6) */
374 	WM_Q_EVCNT_DEFINE(txq, txtsopain)	/* painful header manip. for TSO */
375 
376 	WM_Q_EVCNT_DEFINE(txq, txdrop)		/* Tx packets dropped(too many segs) */
377 
378 	WM_Q_EVCNT_DEFINE(txq, tu)		/* Tx underrun */
379 
380 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
381 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
382 #endif /* WM_EVENT_COUNTERS */
383 };
384 
385 struct wm_rxqueue {
386 	kmutex_t *rxq_lock;		/* lock for rx operations */
387 
388 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
389 
390 	/* Software state for the receive descriptors. */
391 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
392 
393 	/* RX control data structures. */
394 	int rxq_ndesc;			/* must be a power of two */
395 	size_t rxq_descsize;		/* a rx descriptor size */
396 	rxdescs_t *rxq_descs_u;
397 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
398 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
399 	int rxq_desc_rseg;		/* real number of control segment */
400 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
401 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
402 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
403 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
404 
405 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
406 
407 	int rxq_ptr;			/* next ready Rx desc/queue ent */
408 	int rxq_discard;
409 	int rxq_len;
410 	struct mbuf *rxq_head;
411 	struct mbuf *rxq_tail;
412 	struct mbuf **rxq_tailp;
413 
414 	bool rxq_stopping;
415 
416 	uint32_t rxq_packets;		/* for AIM */
417 	uint32_t rxq_bytes;		/* for AIM */
418 #ifdef WM_EVENT_COUNTERS
419 	WM_Q_EVCNT_DEFINE(rxq, rxintr);		/* Rx interrupts */
420 
421 	WM_Q_EVCNT_DEFINE(rxq, rxipsum);	/* IP checksums checked in-bound */
422 	WM_Q_EVCNT_DEFINE(rxq, rxtusum);	/* TCP/UDP cksums checked in-bound */
423 #endif
424 };
425 
426 struct wm_queue {
427 	int wmq_id;			/* index of transmit and receive queues */
428 	int wmq_intr_idx;		/* index of MSI-X tables */
429 
430 	uint32_t wmq_itr;		/* interrupt interval per queue. */
431 	bool wmq_set_itr;
432 
433 	struct wm_txqueue wmq_txq;
434 	struct wm_rxqueue wmq_rxq;
435 
436 	void *wmq_si;
437 };
438 
439 struct wm_phyop {
440 	int (*acquire)(struct wm_softc *);
441 	void (*release)(struct wm_softc *);
442 	int reset_delay_us;
443 };
444 
445 struct wm_nvmop {
446 	int (*acquire)(struct wm_softc *);
447 	void (*release)(struct wm_softc *);
448 	int (*read)(struct wm_softc *, int, int, uint16_t *);
449 };
450 
451 /*
452  * Software state per device.
453  */
454 struct wm_softc {
455 	device_t sc_dev;		/* generic device information */
456 	bus_space_tag_t sc_st;		/* bus space tag */
457 	bus_space_handle_t sc_sh;	/* bus space handle */
458 	bus_size_t sc_ss;		/* bus space size */
459 	bus_space_tag_t sc_iot;		/* I/O space tag */
460 	bus_space_handle_t sc_ioh;	/* I/O space handle */
461 	bus_size_t sc_ios;		/* I/O space size */
462 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
463 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
464 	bus_size_t sc_flashs;		/* flash registers space size */
465 	off_t sc_flashreg_offset;	/*
466 					 * offset to flash registers from
467 					 * start of BAR
468 					 */
469 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
470 
471 	struct ethercom sc_ethercom;	/* ethernet common data */
472 	struct mii_data sc_mii;		/* MII/media information */
473 
474 	pci_chipset_tag_t sc_pc;
475 	pcitag_t sc_pcitag;
476 	int sc_bus_speed;		/* PCI/PCIX bus speed */
477 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
478 
479 	uint16_t sc_pcidevid;		/* PCI device ID */
480 	wm_chip_type sc_type;		/* MAC type */
481 	int sc_rev;			/* MAC revision */
482 	wm_phy_type sc_phytype;		/* PHY type */
483 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
484 #define	WM_MEDIATYPE_UNKNOWN		0x00
485 #define	WM_MEDIATYPE_FIBER		0x01
486 #define	WM_MEDIATYPE_COPPER		0x02
487 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
488 	int sc_funcid;			/* unit number of the chip (0 to 3) */
489 	int sc_flags;			/* flags; see below */
490 	int sc_if_flags;		/* last if_flags */
491 	int sc_flowflags;		/* 802.3x flow control flags */
492 	int sc_align_tweak;
493 
494 	void *sc_ihs[WM_MAX_NINTR];	/*
495 					 * interrupt cookie.
496 					 * - legacy and msi use sc_ihs[0] only
497 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
498 					 */
499 	pci_intr_handle_t *sc_intrs;	/*
500 					 * legacy and msi use sc_intrs[0] only
501 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
502 					 */
503 	int sc_nintrs;			/* number of interrupts */
504 
505 	int sc_link_intr_idx;		/* index of MSI-X tables */
506 
507 	callout_t sc_tick_ch;		/* tick callout */
508 	bool sc_core_stopping;
509 
510 	int sc_nvm_ver_major;
511 	int sc_nvm_ver_minor;
512 	int sc_nvm_ver_build;
513 	int sc_nvm_addrbits;		/* NVM address bits */
514 	unsigned int sc_nvm_wordsize;	/* NVM word size */
515 	int sc_ich8_flash_base;
516 	int sc_ich8_flash_bank_size;
517 	int sc_nvm_k1_enabled;
518 
519 	int sc_nqueues;
520 	struct wm_queue *sc_queue;
521 	u_int sc_rx_process_limit;	/* Rx processing repeat limit in softint */
522 	u_int sc_rx_intr_process_limit;	/* Rx processing repeat limit in H/W intr */
523 
524 	int sc_affinity_offset;
525 
526 #ifdef WM_EVENT_COUNTERS
527 	/* Event counters. */
528 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
529 
530         /* WM_T_82542_2_1 only */
531 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
532 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
533 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
534 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
535 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
536 #endif /* WM_EVENT_COUNTERS */
537 
538 	/* This variable are used only on the 82547. */
539 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
540 
541 	uint32_t sc_ctrl;		/* prototype CTRL register */
542 #if 0
543 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
544 #endif
545 	uint32_t sc_icr;		/* prototype interrupt bits */
546 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
547 	uint32_t sc_tctl;		/* prototype TCTL register */
548 	uint32_t sc_rctl;		/* prototype RCTL register */
549 	uint32_t sc_txcw;		/* prototype TXCW register */
550 	uint32_t sc_tipg;		/* prototype TIPG register */
551 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
552 	uint32_t sc_pba;		/* prototype PBA register */
553 
554 	int sc_tbi_linkup;		/* TBI link status */
555 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
556 	int sc_tbi_serdes_ticks;	/* tbi ticks */
557 
558 	int sc_mchash_type;		/* multicast filter offset */
559 
560 	krndsource_t rnd_source;	/* random source */
561 
562 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
563 
564 	kmutex_t *sc_core_lock;		/* lock for softc operations */
565 	kmutex_t *sc_ich_phymtx;	/*
566 					 * 82574/82583/ICH/PCH specific PHY
567 					 * mutex. For 82574/82583, the mutex
568 					 * is used for both PHY and NVM.
569 					 */
570 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
571 
572 	struct wm_phyop phy;
573 	struct wm_nvmop nvm;
574 };
575 
576 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
577 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
578 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
579 
580 #define	WM_RXCHAIN_RESET(rxq)						\
581 do {									\
582 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
583 	*(rxq)->rxq_tailp = NULL;					\
584 	(rxq)->rxq_len = 0;						\
585 } while (/*CONSTCOND*/0)
586 
587 #define	WM_RXCHAIN_LINK(rxq, m)						\
588 do {									\
589 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
590 	(rxq)->rxq_tailp = &(m)->m_next;				\
591 } while (/*CONSTCOND*/0)
592 
593 #ifdef WM_EVENT_COUNTERS
594 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
595 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
596 
597 #define WM_Q_EVCNT_INCR(qname, evname)			\
598 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
599 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
600 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
601 #else /* !WM_EVENT_COUNTERS */
602 #define	WM_EVCNT_INCR(ev)	/* nothing */
603 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
604 
605 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
606 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
607 #endif /* !WM_EVENT_COUNTERS */
608 
609 #define	CSR_READ(sc, reg)						\
610 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
611 #define	CSR_WRITE(sc, reg, val)						\
612 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
613 #define	CSR_WRITE_FLUSH(sc)						\
614 	(void) CSR_READ((sc), WMREG_STATUS)
615 
616 #define ICH8_FLASH_READ32(sc, reg)					\
617 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
618 	    (reg) + sc->sc_flashreg_offset)
619 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
620 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
621 	    (reg) + sc->sc_flashreg_offset, (data))
622 
623 #define ICH8_FLASH_READ16(sc, reg)					\
624 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
625 	    (reg) + sc->sc_flashreg_offset)
626 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
627 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
628 	    (reg) + sc->sc_flashreg_offset, (data))
629 
630 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
631 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
632 
633 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
634 #define	WM_CDTXADDR_HI(txq, x)						\
635 	(sizeof(bus_addr_t) == 8 ?					\
636 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
637 
638 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
639 #define	WM_CDRXADDR_HI(rxq, x)						\
640 	(sizeof(bus_addr_t) == 8 ?					\
641 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
642 
643 /*
644  * Register read/write functions.
645  * Other than CSR_{READ|WRITE}().
646  */
647 #if 0
648 static inline uint32_t wm_io_read(struct wm_softc *, int);
649 #endif
650 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
651 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
652 	uint32_t, uint32_t);
653 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
654 
655 /*
656  * Descriptor sync/init functions.
657  */
658 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
659 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
660 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
661 
662 /*
663  * Device driver interface functions and commonly used functions.
664  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
665  */
666 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
667 static int	wm_match(device_t, cfdata_t, void *);
668 static void	wm_attach(device_t, device_t, void *);
669 static int	wm_detach(device_t, int);
670 static bool	wm_suspend(device_t, const pmf_qual_t *);
671 static bool	wm_resume(device_t, const pmf_qual_t *);
672 static void	wm_watchdog(struct ifnet *);
673 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *);
674 static void	wm_tick(void *);
675 static int	wm_ifflags_cb(struct ethercom *);
676 static int	wm_ioctl(struct ifnet *, u_long, void *);
677 /* MAC address related */
678 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
679 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
680 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
681 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
682 static void	wm_set_filter(struct wm_softc *);
683 /* Reset and init related */
684 static void	wm_set_vlan(struct wm_softc *);
685 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
686 static void	wm_get_auto_rd_done(struct wm_softc *);
687 static void	wm_lan_init_done(struct wm_softc *);
688 static void	wm_get_cfg_done(struct wm_softc *);
689 static void	wm_phy_post_reset(struct wm_softc *);
690 static void	wm_write_smbus_addr(struct wm_softc *);
691 static void	wm_init_lcd_from_nvm(struct wm_softc *);
692 static void	wm_initialize_hardware_bits(struct wm_softc *);
693 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
694 static void	wm_reset_phy(struct wm_softc *);
695 static void	wm_flush_desc_rings(struct wm_softc *);
696 static void	wm_reset(struct wm_softc *);
697 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
698 static void	wm_rxdrain(struct wm_rxqueue *);
699 static void	wm_rss_getkey(uint8_t *);
700 static void	wm_init_rss(struct wm_softc *);
701 static void	wm_adjust_qnum(struct wm_softc *, int);
702 static inline bool	wm_is_using_msix(struct wm_softc *);
703 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
704 static int	wm_softint_establish(struct wm_softc *, int, int);
705 static int	wm_setup_legacy(struct wm_softc *);
706 static int	wm_setup_msix(struct wm_softc *);
707 static int	wm_init(struct ifnet *);
708 static int	wm_init_locked(struct ifnet *);
709 static void	wm_unset_stopping_flags(struct wm_softc *);
710 static void	wm_set_stopping_flags(struct wm_softc *);
711 static void	wm_stop(struct ifnet *, int);
712 static void	wm_stop_locked(struct ifnet *, int);
713 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
714 static void	wm_82547_txfifo_stall(void *);
715 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
716 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
717 /* DMA related */
718 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
719 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
720 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
721 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
722     struct wm_txqueue *);
723 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
724 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
725 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
726     struct wm_rxqueue *);
727 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
728 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
729 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
730 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
731 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
732 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
733 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
734     struct wm_txqueue *);
735 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
736     struct wm_rxqueue *);
737 static int	wm_alloc_txrx_queues(struct wm_softc *);
738 static void	wm_free_txrx_queues(struct wm_softc *);
739 static int	wm_init_txrx_queues(struct wm_softc *);
740 /* Start */
741 static int	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
742     struct wm_txsoft *, uint32_t *, uint8_t *);
743 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
744 static void	wm_start(struct ifnet *);
745 static void	wm_start_locked(struct ifnet *);
746 static int	wm_transmit(struct ifnet *, struct mbuf *);
747 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
748 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
749 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
750     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
751 static void	wm_nq_start(struct ifnet *);
752 static void	wm_nq_start_locked(struct ifnet *);
753 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
754 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
755 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
756 static void	wm_deferred_start_locked(struct wm_txqueue *);
757 static void	wm_handle_queue(void *);
758 /* Interrupt */
759 static int	wm_txeof(struct wm_softc *, struct wm_txqueue *);
760 static void	wm_rxeof(struct wm_rxqueue *, u_int);
761 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
762 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
763 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
764 static void	wm_linkintr(struct wm_softc *, uint32_t);
765 static int	wm_intr_legacy(void *);
766 static inline void	wm_txrxintr_disable(struct wm_queue *);
767 static inline void	wm_txrxintr_enable(struct wm_queue *);
768 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
769 static int	wm_txrxintr_msix(void *);
770 static int	wm_linkintr_msix(void *);
771 
772 /*
773  * Media related.
774  * GMII, SGMII, TBI, SERDES and SFP.
775  */
776 /* Common */
777 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
778 /* GMII related */
779 static void	wm_gmii_reset(struct wm_softc *);
780 static void	wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t, uint16_t);
781 static int	wm_get_phy_id_82575(struct wm_softc *);
782 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
783 static int	wm_gmii_mediachange(struct ifnet *);
784 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
785 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
786 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
787 static int	wm_gmii_i82543_readreg(device_t, int, int);
788 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
789 static int	wm_gmii_mdic_readreg(device_t, int, int);
790 static void	wm_gmii_mdic_writereg(device_t, int, int, int);
791 static int	wm_gmii_i82544_readreg(device_t, int, int);
792 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
793 static int	wm_gmii_i80003_readreg(device_t, int, int);
794 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
795 static int	wm_gmii_bm_readreg(device_t, int, int);
796 static void	wm_gmii_bm_writereg(device_t, int, int, int);
797 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
798 static int	wm_gmii_hv_readreg(device_t, int, int);
799 static int	wm_gmii_hv_readreg_locked(device_t, int, int);
800 static void	wm_gmii_hv_writereg(device_t, int, int, int);
801 static void	wm_gmii_hv_writereg_locked(device_t, int, int, int);
802 static int	wm_gmii_82580_readreg(device_t, int, int);
803 static void	wm_gmii_82580_writereg(device_t, int, int, int);
804 static int	wm_gmii_gs40g_readreg(device_t, int, int);
805 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
806 static void	wm_gmii_statchg(struct ifnet *);
807 /*
808  * kumeran related (80003, ICH* and PCH*).
809  * These functions are not for accessing MII registers but for accessing
810  * kumeran specific registers.
811  */
812 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
813 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
814 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
815 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
816 /* SGMII */
817 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
818 static int	wm_sgmii_readreg(device_t, int, int);
819 static void	wm_sgmii_writereg(device_t, int, int, int);
820 /* TBI related */
821 static void	wm_tbi_mediainit(struct wm_softc *);
822 static int	wm_tbi_mediachange(struct ifnet *);
823 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
824 static int	wm_check_for_link(struct wm_softc *);
825 static void	wm_tbi_tick(struct wm_softc *);
826 /* SERDES related */
827 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
828 static int	wm_serdes_mediachange(struct ifnet *);
829 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
830 static void	wm_serdes_tick(struct wm_softc *);
831 /* SFP related */
832 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
833 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
834 
835 /*
836  * NVM related.
837  * Microwire, SPI (w/wo EERD) and Flash.
838  */
839 /* Misc functions */
840 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
841 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
842 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
843 /* Microwire */
844 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
845 /* SPI */
846 static int	wm_nvm_ready_spi(struct wm_softc *);
847 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
848 /* Using with EERD */
849 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
850 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
851 /* Flash */
852 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
853     unsigned int *);
854 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
855 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
856 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
857 	uint32_t *);
858 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
859 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
860 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
861 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
862 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
863 /* iNVM */
864 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
865 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
866 /* Lock, detecting NVM type, validate checksum and read */
867 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
868 static int	wm_nvm_get_flash_presence_i210(struct wm_softc *);
869 static int	wm_nvm_validate_checksum(struct wm_softc *);
870 static void	wm_nvm_version_invm(struct wm_softc *);
871 static void	wm_nvm_version(struct wm_softc *);
872 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
873 
874 /*
875  * Hardware semaphores.
876  * Very complexed...
877  */
878 static int	wm_get_null(struct wm_softc *);
879 static void	wm_put_null(struct wm_softc *);
880 static int	wm_get_eecd(struct wm_softc *);
881 static void	wm_put_eecd(struct wm_softc *);
882 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
883 static void	wm_put_swsm_semaphore(struct wm_softc *);
884 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
885 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
886 static int	wm_get_nvm_80003(struct wm_softc *);
887 static void	wm_put_nvm_80003(struct wm_softc *);
888 static int	wm_get_nvm_82571(struct wm_softc *);
889 static void	wm_put_nvm_82571(struct wm_softc *);
890 static int	wm_get_phy_82575(struct wm_softc *);
891 static void	wm_put_phy_82575(struct wm_softc *);
892 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
893 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
894 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
895 static void	wm_put_swflag_ich8lan(struct wm_softc *);
896 static int	wm_get_nvm_ich8lan(struct wm_softc *);
897 static void	wm_put_nvm_ich8lan(struct wm_softc *);
898 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
899 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
900 
901 /*
902  * Management mode and power management related subroutines.
903  * BMC, AMT, suspend/resume and EEE.
904  */
905 #if 0
906 static int	wm_check_mng_mode(struct wm_softc *);
907 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
908 static int	wm_check_mng_mode_82574(struct wm_softc *);
909 static int	wm_check_mng_mode_generic(struct wm_softc *);
910 #endif
911 static int	wm_enable_mng_pass_thru(struct wm_softc *);
912 static bool	wm_phy_resetisblocked(struct wm_softc *);
913 static void	wm_get_hw_control(struct wm_softc *);
914 static void	wm_release_hw_control(struct wm_softc *);
915 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
916 static void	wm_smbustopci(struct wm_softc *);
917 static void	wm_init_manageability(struct wm_softc *);
918 static void	wm_release_manageability(struct wm_softc *);
919 static void	wm_get_wakeup(struct wm_softc *);
920 static void	wm_ulp_disable(struct wm_softc *);
921 static void	wm_enable_phy_wakeup(struct wm_softc *);
922 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
923 static void	wm_enable_wakeup(struct wm_softc *);
924 static void	wm_disable_aspm(struct wm_softc *);
925 /* LPLU (Low Power Link Up) */
926 static void	wm_lplu_d0_disable(struct wm_softc *);
927 /* EEE */
928 static void	wm_set_eee_i350(struct wm_softc *);
929 
930 /*
931  * Workarounds (mainly PHY related).
932  * Basically, PHY's workarounds are in the PHY drivers.
933  */
934 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
935 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
936 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
937 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
938 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
939 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
940 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
941 static void	wm_reset_init_script_82575(struct wm_softc *);
942 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
943 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
944 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
945 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
946 static void	wm_pll_workaround_i210(struct wm_softc *);
947 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
948 
949 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
950     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
951 
952 /*
953  * Devices supported by this driver.
954  */
955 static const struct wm_product {
956 	pci_vendor_id_t		wmp_vendor;
957 	pci_product_id_t	wmp_product;
958 	const char		*wmp_name;
959 	wm_chip_type		wmp_type;
960 	uint32_t		wmp_flags;
961 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
962 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
963 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
964 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
965 #define WMP_MEDIATYPE(x)	((x) & 0x03)
966 } wm_products[] = {
967 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
968 	  "Intel i82542 1000BASE-X Ethernet",
969 	  WM_T_82542_2_1,	WMP_F_FIBER },
970 
971 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
972 	  "Intel i82543GC 1000BASE-X Ethernet",
973 	  WM_T_82543,		WMP_F_FIBER },
974 
975 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
976 	  "Intel i82543GC 1000BASE-T Ethernet",
977 	  WM_T_82543,		WMP_F_COPPER },
978 
979 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
980 	  "Intel i82544EI 1000BASE-T Ethernet",
981 	  WM_T_82544,		WMP_F_COPPER },
982 
983 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
984 	  "Intel i82544EI 1000BASE-X Ethernet",
985 	  WM_T_82544,		WMP_F_FIBER },
986 
987 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
988 	  "Intel i82544GC 1000BASE-T Ethernet",
989 	  WM_T_82544,		WMP_F_COPPER },
990 
991 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
992 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
993 	  WM_T_82544,		WMP_F_COPPER },
994 
995 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
996 	  "Intel i82540EM 1000BASE-T Ethernet",
997 	  WM_T_82540,		WMP_F_COPPER },
998 
999 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
1000 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
1001 	  WM_T_82540,		WMP_F_COPPER },
1002 
1003 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
1004 	  "Intel i82540EP 1000BASE-T Ethernet",
1005 	  WM_T_82540,		WMP_F_COPPER },
1006 
1007 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
1008 	  "Intel i82540EP 1000BASE-T Ethernet",
1009 	  WM_T_82540,		WMP_F_COPPER },
1010 
1011 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
1012 	  "Intel i82540EP 1000BASE-T Ethernet",
1013 	  WM_T_82540,		WMP_F_COPPER },
1014 
1015 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
1016 	  "Intel i82545EM 1000BASE-T Ethernet",
1017 	  WM_T_82545,		WMP_F_COPPER },
1018 
1019 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
1020 	  "Intel i82545GM 1000BASE-T Ethernet",
1021 	  WM_T_82545_3,		WMP_F_COPPER },
1022 
1023 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
1024 	  "Intel i82545GM 1000BASE-X Ethernet",
1025 	  WM_T_82545_3,		WMP_F_FIBER },
1026 
1027 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
1028 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
1029 	  WM_T_82545_3,		WMP_F_SERDES },
1030 
1031 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
1032 	  "Intel i82546EB 1000BASE-T Ethernet",
1033 	  WM_T_82546,		WMP_F_COPPER },
1034 
1035 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
1036 	  "Intel i82546EB 1000BASE-T Ethernet",
1037 	  WM_T_82546,		WMP_F_COPPER },
1038 
1039 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
1040 	  "Intel i82545EM 1000BASE-X Ethernet",
1041 	  WM_T_82545,		WMP_F_FIBER },
1042 
1043 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
1044 	  "Intel i82546EB 1000BASE-X Ethernet",
1045 	  WM_T_82546,		WMP_F_FIBER },
1046 
1047 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
1048 	  "Intel i82546GB 1000BASE-T Ethernet",
1049 	  WM_T_82546_3,		WMP_F_COPPER },
1050 
1051 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
1052 	  "Intel i82546GB 1000BASE-X Ethernet",
1053 	  WM_T_82546_3,		WMP_F_FIBER },
1054 
1055 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
1056 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
1057 	  WM_T_82546_3,		WMP_F_SERDES },
1058 
1059 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
1060 	  "i82546GB quad-port Gigabit Ethernet",
1061 	  WM_T_82546_3,		WMP_F_COPPER },
1062 
1063 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
1064 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
1065 	  WM_T_82546_3,		WMP_F_COPPER },
1066 
1067 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
1068 	  "Intel PRO/1000MT (82546GB)",
1069 	  WM_T_82546_3,		WMP_F_COPPER },
1070 
1071 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
1072 	  "Intel i82541EI 1000BASE-T Ethernet",
1073 	  WM_T_82541,		WMP_F_COPPER },
1074 
1075 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
1076 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
1077 	  WM_T_82541,		WMP_F_COPPER },
1078 
1079 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
1080 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
1081 	  WM_T_82541,		WMP_F_COPPER },
1082 
1083 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
1084 	  "Intel i82541ER 1000BASE-T Ethernet",
1085 	  WM_T_82541_2,		WMP_F_COPPER },
1086 
1087 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
1088 	  "Intel i82541GI 1000BASE-T Ethernet",
1089 	  WM_T_82541_2,		WMP_F_COPPER },
1090 
1091 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
1092 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
1093 	  WM_T_82541_2,		WMP_F_COPPER },
1094 
1095 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
1096 	  "Intel i82541PI 1000BASE-T Ethernet",
1097 	  WM_T_82541_2,		WMP_F_COPPER },
1098 
1099 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
1100 	  "Intel i82547EI 1000BASE-T Ethernet",
1101 	  WM_T_82547,		WMP_F_COPPER },
1102 
1103 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
1104 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
1105 	  WM_T_82547,		WMP_F_COPPER },
1106 
1107 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
1108 	  "Intel i82547GI 1000BASE-T Ethernet",
1109 	  WM_T_82547_2,		WMP_F_COPPER },
1110 
1111 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
1112 	  "Intel PRO/1000 PT (82571EB)",
1113 	  WM_T_82571,		WMP_F_COPPER },
1114 
1115 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
1116 	  "Intel PRO/1000 PF (82571EB)",
1117 	  WM_T_82571,		WMP_F_FIBER },
1118 
1119 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
1120 	  "Intel PRO/1000 PB (82571EB)",
1121 	  WM_T_82571,		WMP_F_SERDES },
1122 
1123 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
1124 	  "Intel PRO/1000 QT (82571EB)",
1125 	  WM_T_82571,		WMP_F_COPPER },
1126 
1127 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
1128 	  "Intel PRO/1000 PT Quad Port Server Adapter",
1129 	  WM_T_82571,		WMP_F_COPPER, },
1130 
1131 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
1132 	  "Intel Gigabit PT Quad Port Server ExpressModule",
1133 	  WM_T_82571,		WMP_F_COPPER, },
1134 
1135 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
1136 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
1137 	  WM_T_82571,		WMP_F_SERDES, },
1138 
1139 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
1140 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
1141 	  WM_T_82571,		WMP_F_SERDES, },
1142 
1143 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
1144 	  "Intel 82571EB Quad 1000baseX Ethernet",
1145 	  WM_T_82571,		WMP_F_FIBER, },
1146 
1147 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
1148 	  "Intel i82572EI 1000baseT Ethernet",
1149 	  WM_T_82572,		WMP_F_COPPER },
1150 
1151 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
1152 	  "Intel i82572EI 1000baseX Ethernet",
1153 	  WM_T_82572,		WMP_F_FIBER },
1154 
1155 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
1156 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
1157 	  WM_T_82572,		WMP_F_SERDES },
1158 
1159 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
1160 	  "Intel i82572EI 1000baseT Ethernet",
1161 	  WM_T_82572,		WMP_F_COPPER },
1162 
1163 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
1164 	  "Intel i82573E",
1165 	  WM_T_82573,		WMP_F_COPPER },
1166 
1167 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
1168 	  "Intel i82573E IAMT",
1169 	  WM_T_82573,		WMP_F_COPPER },
1170 
1171 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
1172 	  "Intel i82573L Gigabit Ethernet",
1173 	  WM_T_82573,		WMP_F_COPPER },
1174 
1175 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
1176 	  "Intel i82574L",
1177 	  WM_T_82574,		WMP_F_COPPER },
1178 
1179 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
1180 	  "Intel i82574L",
1181 	  WM_T_82574,		WMP_F_COPPER },
1182 
1183 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
1184 	  "Intel i82583V",
1185 	  WM_T_82583,		WMP_F_COPPER },
1186 
1187 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
1188 	  "i80003 dual 1000baseT Ethernet",
1189 	  WM_T_80003,		WMP_F_COPPER },
1190 
1191 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
1192 	  "i80003 dual 1000baseX Ethernet",
1193 	  WM_T_80003,		WMP_F_COPPER },
1194 
1195 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
1196 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
1197 	  WM_T_80003,		WMP_F_SERDES },
1198 
1199 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
1200 	  "Intel i80003 1000baseT Ethernet",
1201 	  WM_T_80003,		WMP_F_COPPER },
1202 
1203 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
1204 	  "Intel i80003 Gigabit Ethernet (SERDES)",
1205 	  WM_T_80003,		WMP_F_SERDES },
1206 
1207 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
1208 	  "Intel i82801H (M_AMT) LAN Controller",
1209 	  WM_T_ICH8,		WMP_F_COPPER },
1210 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
1211 	  "Intel i82801H (AMT) LAN Controller",
1212 	  WM_T_ICH8,		WMP_F_COPPER },
1213 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
1214 	  "Intel i82801H LAN Controller",
1215 	  WM_T_ICH8,		WMP_F_COPPER },
1216 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
1217 	  "Intel i82801H (IFE) 10/100 LAN Controller",
1218 	  WM_T_ICH8,		WMP_F_COPPER },
1219 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
1220 	  "Intel i82801H (M) LAN Controller",
1221 	  WM_T_ICH8,		WMP_F_COPPER },
1222 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
1223 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
1224 	  WM_T_ICH8,		WMP_F_COPPER },
1225 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
1226 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
1227 	  WM_T_ICH8,		WMP_F_COPPER },
1228 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
1229 	  "82567V-3 LAN Controller",
1230 	  WM_T_ICH8,		WMP_F_COPPER },
1231 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
1232 	  "82801I (AMT) LAN Controller",
1233 	  WM_T_ICH9,		WMP_F_COPPER },
1234 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
1235 	  "82801I 10/100 LAN Controller",
1236 	  WM_T_ICH9,		WMP_F_COPPER },
1237 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
1238 	  "82801I (G) 10/100 LAN Controller",
1239 	  WM_T_ICH9,		WMP_F_COPPER },
1240 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
1241 	  "82801I (GT) 10/100 LAN Controller",
1242 	  WM_T_ICH9,		WMP_F_COPPER },
1243 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
1244 	  "82801I (C) LAN Controller",
1245 	  WM_T_ICH9,		WMP_F_COPPER },
1246 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
1247 	  "82801I mobile LAN Controller",
1248 	  WM_T_ICH9,		WMP_F_COPPER },
1249 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
1250 	  "82801I mobile (V) LAN Controller",
1251 	  WM_T_ICH9,		WMP_F_COPPER },
1252 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
1253 	  "82801I mobile (AMT) LAN Controller",
1254 	  WM_T_ICH9,		WMP_F_COPPER },
1255 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
1256 	  "82567LM-4 LAN Controller",
1257 	  WM_T_ICH9,		WMP_F_COPPER },
1258 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
1259 	  "82567LM-2 LAN Controller",
1260 	  WM_T_ICH10,		WMP_F_COPPER },
1261 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
1262 	  "82567LF-2 LAN Controller",
1263 	  WM_T_ICH10,		WMP_F_COPPER },
1264 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
1265 	  "82567LM-3 LAN Controller",
1266 	  WM_T_ICH10,		WMP_F_COPPER },
1267 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
1268 	  "82567LF-3 LAN Controller",
1269 	  WM_T_ICH10,		WMP_F_COPPER },
1270 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
1271 	  "82567V-2 LAN Controller",
1272 	  WM_T_ICH10,		WMP_F_COPPER },
1273 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
1274 	  "82567V-3? LAN Controller",
1275 	  WM_T_ICH10,		WMP_F_COPPER },
1276 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
1277 	  "HANKSVILLE LAN Controller",
1278 	  WM_T_ICH10,		WMP_F_COPPER },
1279 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
1280 	  "PCH LAN (82577LM) Controller",
1281 	  WM_T_PCH,		WMP_F_COPPER },
1282 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
1283 	  "PCH LAN (82577LC) Controller",
1284 	  WM_T_PCH,		WMP_F_COPPER },
1285 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
1286 	  "PCH LAN (82578DM) Controller",
1287 	  WM_T_PCH,		WMP_F_COPPER },
1288 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
1289 	  "PCH LAN (82578DC) Controller",
1290 	  WM_T_PCH,		WMP_F_COPPER },
1291 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
1292 	  "PCH2 LAN (82579LM) Controller",
1293 	  WM_T_PCH2,		WMP_F_COPPER },
1294 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
1295 	  "PCH2 LAN (82579V) Controller",
1296 	  WM_T_PCH2,		WMP_F_COPPER },
1297 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
1298 	  "82575EB dual-1000baseT Ethernet",
1299 	  WM_T_82575,		WMP_F_COPPER },
1300 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
1301 	  "82575EB dual-1000baseX Ethernet (SERDES)",
1302 	  WM_T_82575,		WMP_F_SERDES },
1303 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
1304 	  "82575GB quad-1000baseT Ethernet",
1305 	  WM_T_82575,		WMP_F_COPPER },
1306 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
1307 	  "82575GB quad-1000baseT Ethernet (PM)",
1308 	  WM_T_82575,		WMP_F_COPPER },
1309 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
1310 	  "82576 1000BaseT Ethernet",
1311 	  WM_T_82576,		WMP_F_COPPER },
1312 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
1313 	  "82576 1000BaseX Ethernet",
1314 	  WM_T_82576,		WMP_F_FIBER },
1315 
1316 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
1317 	  "82576 gigabit Ethernet (SERDES)",
1318 	  WM_T_82576,		WMP_F_SERDES },
1319 
1320 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
1321 	  "82576 quad-1000BaseT Ethernet",
1322 	  WM_T_82576,		WMP_F_COPPER },
1323 
1324 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
1325 	  "82576 Gigabit ET2 Quad Port Server Adapter",
1326 	  WM_T_82576,		WMP_F_COPPER },
1327 
1328 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
1329 	  "82576 gigabit Ethernet",
1330 	  WM_T_82576,		WMP_F_COPPER },
1331 
1332 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
1333 	  "82576 gigabit Ethernet (SERDES)",
1334 	  WM_T_82576,		WMP_F_SERDES },
1335 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
1336 	  "82576 quad-gigabit Ethernet (SERDES)",
1337 	  WM_T_82576,		WMP_F_SERDES },
1338 
1339 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
1340 	  "82580 1000BaseT Ethernet",
1341 	  WM_T_82580,		WMP_F_COPPER },
1342 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
1343 	  "82580 1000BaseX Ethernet",
1344 	  WM_T_82580,		WMP_F_FIBER },
1345 
1346 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
1347 	  "82580 1000BaseT Ethernet (SERDES)",
1348 	  WM_T_82580,		WMP_F_SERDES },
1349 
1350 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
1351 	  "82580 gigabit Ethernet (SGMII)",
1352 	  WM_T_82580,		WMP_F_COPPER },
1353 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
1354 	  "82580 dual-1000BaseT Ethernet",
1355 	  WM_T_82580,		WMP_F_COPPER },
1356 
1357 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1358 	  "82580 quad-1000BaseX Ethernet",
1359 	  WM_T_82580,		WMP_F_FIBER },
1360 
1361 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
1362 	  "DH89XXCC Gigabit Ethernet (SGMII)",
1363 	  WM_T_82580,		WMP_F_COPPER },
1364 
1365 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
1366 	  "DH89XXCC Gigabit Ethernet (SERDES)",
1367 	  WM_T_82580,		WMP_F_SERDES },
1368 
1369 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
1370 	  "DH89XXCC 1000BASE-KX Ethernet",
1371 	  WM_T_82580,		WMP_F_SERDES },
1372 
1373 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
1374 	  "DH89XXCC Gigabit Ethernet (SFP)",
1375 	  WM_T_82580,		WMP_F_SERDES },
1376 
1377 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
1378 	  "I350 Gigabit Network Connection",
1379 	  WM_T_I350,		WMP_F_COPPER },
1380 
1381 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
1382 	  "I350 Gigabit Fiber Network Connection",
1383 	  WM_T_I350,		WMP_F_FIBER },
1384 
1385 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
1386 	  "I350 Gigabit Backplane Connection",
1387 	  WM_T_I350,		WMP_F_SERDES },
1388 
1389 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
1390 	  "I350 Quad Port Gigabit Ethernet",
1391 	  WM_T_I350,		WMP_F_SERDES },
1392 
1393 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
1394 	  "I350 Gigabit Connection",
1395 	  WM_T_I350,		WMP_F_COPPER },
1396 
1397 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
1398 	  "I354 Gigabit Ethernet (KX)",
1399 	  WM_T_I354,		WMP_F_SERDES },
1400 
1401 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
1402 	  "I354 Gigabit Ethernet (SGMII)",
1403 	  WM_T_I354,		WMP_F_COPPER },
1404 
1405 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
1406 	  "I354 Gigabit Ethernet (2.5G)",
1407 	  WM_T_I354,		WMP_F_COPPER },
1408 
1409 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
1410 	  "I210-T1 Ethernet Server Adapter",
1411 	  WM_T_I210,		WMP_F_COPPER },
1412 
1413 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
1414 	  "I210 Ethernet (Copper OEM)",
1415 	  WM_T_I210,		WMP_F_COPPER },
1416 
1417 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
1418 	  "I210 Ethernet (Copper IT)",
1419 	  WM_T_I210,		WMP_F_COPPER },
1420 
1421 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
1422 	  "I210 Ethernet (FLASH less)",
1423 	  WM_T_I210,		WMP_F_COPPER },
1424 
1425 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
1426 	  "I210 Gigabit Ethernet (Fiber)",
1427 	  WM_T_I210,		WMP_F_FIBER },
1428 
1429 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
1430 	  "I210 Gigabit Ethernet (SERDES)",
1431 	  WM_T_I210,		WMP_F_SERDES },
1432 
1433 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
1434 	  "I210 Gigabit Ethernet (FLASH less)",
1435 	  WM_T_I210,		WMP_F_SERDES },
1436 
1437 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
1438 	  "I210 Gigabit Ethernet (SGMII)",
1439 	  WM_T_I210,		WMP_F_COPPER },
1440 
1441 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
1442 	  "I211 Ethernet (COPPER)",
1443 	  WM_T_I211,		WMP_F_COPPER },
1444 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
1445 	  "I217 V Ethernet Connection",
1446 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1447 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
1448 	  "I217 LM Ethernet Connection",
1449 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1450 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
1451 	  "I218 V Ethernet Connection",
1452 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1453 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
1454 	  "I218 V Ethernet Connection",
1455 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1456 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
1457 	  "I218 V Ethernet Connection",
1458 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1459 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
1460 	  "I218 LM Ethernet Connection",
1461 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1462 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
1463 	  "I218 LM Ethernet Connection",
1464 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1465 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
1466 	  "I218 LM Ethernet Connection",
1467 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1468 #if 0
1469 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
1470 	  "I219 V Ethernet Connection",
1471 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1472 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
1473 	  "I219 V Ethernet Connection",
1474 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1475 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
1476 	  "I219 V Ethernet Connection",
1477 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1478 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
1479 	  "I219 V Ethernet Connection",
1480 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1481 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
1482 	  "I219 LM Ethernet Connection",
1483 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1484 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
1485 	  "I219 LM Ethernet Connection",
1486 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1487 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
1488 	  "I219 LM Ethernet Connection",
1489 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1490 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
1491 	  "I219 LM Ethernet Connection",
1492 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1493 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
1494 	  "I219 LM Ethernet Connection",
1495 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1496 #endif
1497 	{ 0,			0,
1498 	  NULL,
1499 	  0,			0 },
1500 };
1501 
1502 /*
1503  * Register read/write functions.
1504  * Other than CSR_{READ|WRITE}().
1505  */
1506 
1507 #if 0 /* Not currently used */
1508 static inline uint32_t
1509 wm_io_read(struct wm_softc *sc, int reg)
1510 {
1511 
1512 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1513 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1514 }
1515 #endif
1516 
1517 static inline void
1518 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1519 {
1520 
1521 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1522 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1523 }
1524 
1525 static inline void
1526 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1527     uint32_t data)
1528 {
1529 	uint32_t regval;
1530 	int i;
1531 
1532 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1533 
1534 	CSR_WRITE(sc, reg, regval);
1535 
1536 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1537 		delay(5);
1538 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1539 			break;
1540 	}
1541 	if (i == SCTL_CTL_POLL_TIMEOUT) {
1542 		aprint_error("%s: WARNING:"
1543 		    " i82575 reg 0x%08x setup did not indicate ready\n",
1544 		    device_xname(sc->sc_dev), reg);
1545 	}
1546 }
1547 
1548 static inline void
1549 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1550 {
1551 	wa->wa_low = htole32(v & 0xffffffffU);
1552 	if (sizeof(bus_addr_t) == 8)
1553 		wa->wa_high = htole32((uint64_t) v >> 32);
1554 	else
1555 		wa->wa_high = 0;
1556 }
1557 
1558 /*
1559  * Descriptor sync/init functions.
1560  */
1561 static inline void
1562 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
1563 {
1564 	struct wm_softc *sc = txq->txq_sc;
1565 
1566 	/* If it will wrap around, sync to the end of the ring. */
1567 	if ((start + num) > WM_NTXDESC(txq)) {
1568 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1569 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
1570 		    (WM_NTXDESC(txq) - start), ops);
1571 		num -= (WM_NTXDESC(txq) - start);
1572 		start = 0;
1573 	}
1574 
1575 	/* Now sync whatever is left. */
1576 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1577 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
1578 }
1579 
1580 static inline void
1581 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
1582 {
1583 	struct wm_softc *sc = rxq->rxq_sc;
1584 
1585 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
1586 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
1587 }
1588 
1589 static inline void
1590 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
1591 {
1592 	struct wm_softc *sc = rxq->rxq_sc;
1593 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
1594 	struct mbuf *m = rxs->rxs_mbuf;
1595 
1596 	/*
1597 	 * Note: We scoot the packet forward 2 bytes in the buffer
1598 	 * so that the payload after the Ethernet header is aligned
1599 	 * to a 4-byte boundary.
1600 
1601 	 * XXX BRAINDAMAGE ALERT!
1602 	 * The stupid chip uses the same size for every buffer, which
1603 	 * is set in the Receive Control register.  We are using the 2K
1604 	 * size option, but what we REALLY want is (2K - 2)!  For this
1605 	 * reason, we can't "scoot" packets longer than the standard
1606 	 * Ethernet MTU.  On strict-alignment platforms, if the total
1607 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
1608 	 * the upper layer copy the headers.
1609 	 */
1610 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
1611 
1612 	if (sc->sc_type == WM_T_82574) {
1613 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
1614 		rxd->erx_data.erxd_addr =
1615 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1616 		rxd->erx_data.erxd_dd = 0;
1617 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
1618 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
1619 
1620 		rxd->nqrx_data.nrxd_paddr =
1621 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1622 		/* Currently, split header is not supported. */
1623 		rxd->nqrx_data.nrxd_haddr = 0;
1624 	} else {
1625 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
1626 
1627 		wm_set_dma_addr(&rxd->wrx_addr,
1628 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1629 		rxd->wrx_len = 0;
1630 		rxd->wrx_cksum = 0;
1631 		rxd->wrx_status = 0;
1632 		rxd->wrx_errors = 0;
1633 		rxd->wrx_special = 0;
1634 	}
1635 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1636 
1637 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
1638 }
1639 
1640 /*
1641  * Device driver interface functions and commonly used functions.
1642  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
1643  */
1644 
1645 /* Lookup supported device table */
1646 static const struct wm_product *
1647 wm_lookup(const struct pci_attach_args *pa)
1648 {
1649 	const struct wm_product *wmp;
1650 
1651 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1652 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1653 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1654 			return wmp;
1655 	}
1656 	return NULL;
1657 }
1658 
1659 /* The match function (ca_match) */
1660 static int
1661 wm_match(device_t parent, cfdata_t cf, void *aux)
1662 {
1663 	struct pci_attach_args *pa = aux;
1664 
1665 	if (wm_lookup(pa) != NULL)
1666 		return 1;
1667 
1668 	return 0;
1669 }
1670 
1671 /* The attach function (ca_attach) */
1672 static void
1673 wm_attach(device_t parent, device_t self, void *aux)
1674 {
1675 	struct wm_softc *sc = device_private(self);
1676 	struct pci_attach_args *pa = aux;
1677 	prop_dictionary_t dict;
1678 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1679 	pci_chipset_tag_t pc = pa->pa_pc;
1680 	int counts[PCI_INTR_TYPE_SIZE];
1681 	pci_intr_type_t max_type;
1682 	const char *eetype, *xname;
1683 	bus_space_tag_t memt;
1684 	bus_space_handle_t memh;
1685 	bus_size_t memsize;
1686 	int memh_valid;
1687 	int i, error;
1688 	const struct wm_product *wmp;
1689 	prop_data_t ea;
1690 	prop_number_t pn;
1691 	uint8_t enaddr[ETHER_ADDR_LEN];
1692 	char buf[256];
1693 	uint16_t cfg1, cfg2, swdpin, nvmword;
1694 	pcireg_t preg, memtype;
1695 	uint16_t eeprom_data, apme_mask;
1696 	bool force_clear_smbi;
1697 	uint32_t link_mode;
1698 	uint32_t reg;
1699 
1700 	sc->sc_dev = self;
1701 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
1702 	sc->sc_core_stopping = false;
1703 
1704 	wmp = wm_lookup(pa);
1705 #ifdef DIAGNOSTIC
1706 	if (wmp == NULL) {
1707 		printf("\n");
1708 		panic("wm_attach: impossible");
1709 	}
1710 #endif
1711 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
1712 
1713 	sc->sc_pc = pa->pa_pc;
1714 	sc->sc_pcitag = pa->pa_tag;
1715 
1716 	if (pci_dma64_available(pa))
1717 		sc->sc_dmat = pa->pa_dmat64;
1718 	else
1719 		sc->sc_dmat = pa->pa_dmat;
1720 
1721 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
1722 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
1723 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
1724 
1725 	sc->sc_type = wmp->wmp_type;
1726 
1727 	/* Set default function pointers */
1728 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
1729 	sc->phy.release = sc->nvm.release = wm_put_null;
1730 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
1731 
1732 	if (sc->sc_type < WM_T_82543) {
1733 		if (sc->sc_rev < 2) {
1734 			aprint_error_dev(sc->sc_dev,
1735 			    "i82542 must be at least rev. 2\n");
1736 			return;
1737 		}
1738 		if (sc->sc_rev < 3)
1739 			sc->sc_type = WM_T_82542_2_0;
1740 	}
1741 
1742 	/*
1743 	 * Disable MSI for Errata:
1744 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
1745 	 *
1746 	 *  82544: Errata 25
1747 	 *  82540: Errata  6 (easy to reproduce device timeout)
1748 	 *  82545: Errata  4 (easy to reproduce device timeout)
1749 	 *  82546: Errata 26 (easy to reproduce device timeout)
1750 	 *  82541: Errata  7 (easy to reproduce device timeout)
1751 	 *
1752 	 * "Byte Enables 2 and 3 are not set on MSI writes"
1753 	 *
1754 	 *  82571 & 82572: Errata 63
1755 	 */
1756 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
1757 	    || (sc->sc_type == WM_T_82572))
1758 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
1759 
1760 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1761 	    || (sc->sc_type == WM_T_82580)
1762 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
1763 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
1764 		sc->sc_flags |= WM_F_NEWQUEUE;
1765 
1766 	/* Set device properties (mactype) */
1767 	dict = device_properties(sc->sc_dev);
1768 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1769 
1770 	/*
1771 	 * Map the device.  All devices support memory-mapped acccess,
1772 	 * and it is really required for normal operation.
1773 	 */
1774 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1775 	switch (memtype) {
1776 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1777 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1778 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1779 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1780 		break;
1781 	default:
1782 		memh_valid = 0;
1783 		break;
1784 	}
1785 
1786 	if (memh_valid) {
1787 		sc->sc_st = memt;
1788 		sc->sc_sh = memh;
1789 		sc->sc_ss = memsize;
1790 	} else {
1791 		aprint_error_dev(sc->sc_dev,
1792 		    "unable to map device registers\n");
1793 		return;
1794 	}
1795 
1796 	/*
1797 	 * In addition, i82544 and later support I/O mapped indirect
1798 	 * register access.  It is not desirable (nor supported in
1799 	 * this driver) to use it for normal operation, though it is
1800 	 * required to work around bugs in some chip versions.
1801 	 */
1802 	if (sc->sc_type >= WM_T_82544) {
1803 		/* First we have to find the I/O BAR. */
1804 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1805 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
1806 			if (memtype == PCI_MAPREG_TYPE_IO)
1807 				break;
1808 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
1809 			    PCI_MAPREG_MEM_TYPE_64BIT)
1810 				i += 4;	/* skip high bits, too */
1811 		}
1812 		if (i < PCI_MAPREG_END) {
1813 			/*
1814 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
1815 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
1816 			 * It's no problem because newer chips has no this
1817 			 * bug.
1818 			 *
1819 			 * The i8254x doesn't apparently respond when the
1820 			 * I/O BAR is 0, which looks somewhat like it's not
1821 			 * been configured.
1822 			 */
1823 			preg = pci_conf_read(pc, pa->pa_tag, i);
1824 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1825 				aprint_error_dev(sc->sc_dev,
1826 				    "WARNING: I/O BAR at zero.\n");
1827 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1828 					0, &sc->sc_iot, &sc->sc_ioh,
1829 					NULL, &sc->sc_ios) == 0) {
1830 				sc->sc_flags |= WM_F_IOH_VALID;
1831 			} else {
1832 				aprint_error_dev(sc->sc_dev,
1833 				    "WARNING: unable to map I/O space\n");
1834 			}
1835 		}
1836 
1837 	}
1838 
1839 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
1840 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1841 	preg |= PCI_COMMAND_MASTER_ENABLE;
1842 	if (sc->sc_type < WM_T_82542_2_1)
1843 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1844 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1845 
1846 	/* power up chip */
1847 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1848 	    NULL)) && error != EOPNOTSUPP) {
1849 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1850 		return;
1851 	}
1852 
1853 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
1854 	/*
1855 	 *  Don't use MSI-X if we can use only one queue to save interrupt
1856 	 * resource.
1857 	 */
1858 	if (sc->sc_nqueues > 1) {
1859 		max_type = PCI_INTR_TYPE_MSIX;
1860 		/*
1861 		 *  82583 has a MSI-X capability in the PCI configuration space
1862 		 * but it doesn't support it. At least the document doesn't
1863 		 * say anything about MSI-X.
1864 		 */
1865 		counts[PCI_INTR_TYPE_MSIX]
1866 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
1867 	} else {
1868 		max_type = PCI_INTR_TYPE_MSI;
1869 		counts[PCI_INTR_TYPE_MSIX] = 0;
1870 	}
1871 
1872 	/* Allocation settings */
1873 	counts[PCI_INTR_TYPE_MSI] = 1;
1874 	counts[PCI_INTR_TYPE_INTX] = 1;
1875 	/* overridden by disable flags */
1876 	if (wm_disable_msi != 0) {
1877 		counts[PCI_INTR_TYPE_MSI] = 0;
1878 		if (wm_disable_msix != 0) {
1879 			max_type = PCI_INTR_TYPE_INTX;
1880 			counts[PCI_INTR_TYPE_MSIX] = 0;
1881 		}
1882 	} else if (wm_disable_msix != 0) {
1883 		max_type = PCI_INTR_TYPE_MSI;
1884 		counts[PCI_INTR_TYPE_MSIX] = 0;
1885 	}
1886 
1887 alloc_retry:
1888 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
1889 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
1890 		return;
1891 	}
1892 
1893 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
1894 		error = wm_setup_msix(sc);
1895 		if (error) {
1896 			pci_intr_release(pc, sc->sc_intrs,
1897 			    counts[PCI_INTR_TYPE_MSIX]);
1898 
1899 			/* Setup for MSI: Disable MSI-X */
1900 			max_type = PCI_INTR_TYPE_MSI;
1901 			counts[PCI_INTR_TYPE_MSI] = 1;
1902 			counts[PCI_INTR_TYPE_INTX] = 1;
1903 			goto alloc_retry;
1904 		}
1905 	} else 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
1906 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
1907 		error = wm_setup_legacy(sc);
1908 		if (error) {
1909 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
1910 			    counts[PCI_INTR_TYPE_MSI]);
1911 
1912 			/* The next try is for INTx: Disable MSI */
1913 			max_type = PCI_INTR_TYPE_INTX;
1914 			counts[PCI_INTR_TYPE_INTX] = 1;
1915 			goto alloc_retry;
1916 		}
1917 	} else {
1918 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
1919 		error = wm_setup_legacy(sc);
1920 		if (error) {
1921 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
1922 			    counts[PCI_INTR_TYPE_INTX]);
1923 			return;
1924 		}
1925 	}
1926 
1927 	/*
1928 	 * Check the function ID (unit number of the chip).
1929 	 */
1930 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
1931 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
1932 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1933 	    || (sc->sc_type == WM_T_82580)
1934 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
1935 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
1936 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
1937 	else
1938 		sc->sc_funcid = 0;
1939 
1940 	/*
1941 	 * Determine a few things about the bus we're connected to.
1942 	 */
1943 	if (sc->sc_type < WM_T_82543) {
1944 		/* We don't really know the bus characteristics here. */
1945 		sc->sc_bus_speed = 33;
1946 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1947 		/*
1948 		 * CSA (Communication Streaming Architecture) is about as fast
1949 		 * a 32-bit 66MHz PCI Bus.
1950 		 */
1951 		sc->sc_flags |= WM_F_CSA;
1952 		sc->sc_bus_speed = 66;
1953 		aprint_verbose_dev(sc->sc_dev,
1954 		    "Communication Streaming Architecture\n");
1955 		if (sc->sc_type == WM_T_82547) {
1956 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
1957 			callout_setfunc(&sc->sc_txfifo_ch,
1958 					wm_82547_txfifo_stall, sc);
1959 			aprint_verbose_dev(sc->sc_dev,
1960 			    "using 82547 Tx FIFO stall work-around\n");
1961 		}
1962 	} else if (sc->sc_type >= WM_T_82571) {
1963 		sc->sc_flags |= WM_F_PCIE;
1964 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1965 		    && (sc->sc_type != WM_T_ICH10)
1966 		    && (sc->sc_type != WM_T_PCH)
1967 		    && (sc->sc_type != WM_T_PCH2)
1968 		    && (sc->sc_type != WM_T_PCH_LPT)
1969 		    && (sc->sc_type != WM_T_PCH_SPT)) {
1970 			/* ICH* and PCH* have no PCIe capability registers */
1971 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1972 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
1973 				NULL) == 0)
1974 				aprint_error_dev(sc->sc_dev,
1975 				    "unable to find PCIe capability\n");
1976 		}
1977 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1978 	} else {
1979 		reg = CSR_READ(sc, WMREG_STATUS);
1980 		if (reg & STATUS_BUS64)
1981 			sc->sc_flags |= WM_F_BUS64;
1982 		if ((reg & STATUS_PCIX_MODE) != 0) {
1983 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1984 
1985 			sc->sc_flags |= WM_F_PCIX;
1986 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1987 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
1988 				aprint_error_dev(sc->sc_dev,
1989 				    "unable to find PCIX capability\n");
1990 			else if (sc->sc_type != WM_T_82545_3 &&
1991 				 sc->sc_type != WM_T_82546_3) {
1992 				/*
1993 				 * Work around a problem caused by the BIOS
1994 				 * setting the max memory read byte count
1995 				 * incorrectly.
1996 				 */
1997 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1998 				    sc->sc_pcixe_capoff + PCIX_CMD);
1999 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
2000 				    sc->sc_pcixe_capoff + PCIX_STATUS);
2001 
2002 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
2003 				    PCIX_CMD_BYTECNT_SHIFT;
2004 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
2005 				    PCIX_STATUS_MAXB_SHIFT;
2006 				if (bytecnt > maxb) {
2007 					aprint_verbose_dev(sc->sc_dev,
2008 					    "resetting PCI-X MMRBC: %d -> %d\n",
2009 					    512 << bytecnt, 512 << maxb);
2010 					pcix_cmd = (pcix_cmd &
2011 					    ~PCIX_CMD_BYTECNT_MASK) |
2012 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
2013 					pci_conf_write(pa->pa_pc, pa->pa_tag,
2014 					    sc->sc_pcixe_capoff + PCIX_CMD,
2015 					    pcix_cmd);
2016 				}
2017 			}
2018 		}
2019 		/*
2020 		 * The quad port adapter is special; it has a PCIX-PCIX
2021 		 * bridge on the board, and can run the secondary bus at
2022 		 * a higher speed.
2023 		 */
2024 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
2025 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
2026 								      : 66;
2027 		} else if (sc->sc_flags & WM_F_PCIX) {
2028 			switch (reg & STATUS_PCIXSPD_MASK) {
2029 			case STATUS_PCIXSPD_50_66:
2030 				sc->sc_bus_speed = 66;
2031 				break;
2032 			case STATUS_PCIXSPD_66_100:
2033 				sc->sc_bus_speed = 100;
2034 				break;
2035 			case STATUS_PCIXSPD_100_133:
2036 				sc->sc_bus_speed = 133;
2037 				break;
2038 			default:
2039 				aprint_error_dev(sc->sc_dev,
2040 				    "unknown PCIXSPD %d; assuming 66MHz\n",
2041 				    reg & STATUS_PCIXSPD_MASK);
2042 				sc->sc_bus_speed = 66;
2043 				break;
2044 			}
2045 		} else
2046 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
2047 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
2048 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
2049 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
2050 	}
2051 
2052 	/* Disable ASPM L0s and/or L1 for workaround */
2053 	wm_disable_aspm(sc);
2054 
2055 	/* clear interesting stat counters */
2056 	CSR_READ(sc, WMREG_COLC);
2057 	CSR_READ(sc, WMREG_RXERRC);
2058 
2059 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
2060 	    || (sc->sc_type >= WM_T_ICH8))
2061 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2062 	if (sc->sc_type >= WM_T_ICH8)
2063 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2064 
2065 	/* Set PHY, NVM mutex related stuff */
2066 	switch (sc->sc_type) {
2067 	case WM_T_82542_2_0:
2068 	case WM_T_82542_2_1:
2069 	case WM_T_82543:
2070 	case WM_T_82544:
2071 		/* Microwire */
2072 		sc->nvm.read = wm_nvm_read_uwire;
2073 		sc->sc_nvm_wordsize = 64;
2074 		sc->sc_nvm_addrbits = 6;
2075 		break;
2076 	case WM_T_82540:
2077 	case WM_T_82545:
2078 	case WM_T_82545_3:
2079 	case WM_T_82546:
2080 	case WM_T_82546_3:
2081 		/* Microwire */
2082 		sc->nvm.read = wm_nvm_read_uwire;
2083 		reg = CSR_READ(sc, WMREG_EECD);
2084 		if (reg & EECD_EE_SIZE) {
2085 			sc->sc_nvm_wordsize = 256;
2086 			sc->sc_nvm_addrbits = 8;
2087 		} else {
2088 			sc->sc_nvm_wordsize = 64;
2089 			sc->sc_nvm_addrbits = 6;
2090 		}
2091 		sc->sc_flags |= WM_F_LOCK_EECD;
2092 		sc->nvm.acquire = wm_get_eecd;
2093 		sc->nvm.release = wm_put_eecd;
2094 		break;
2095 	case WM_T_82541:
2096 	case WM_T_82541_2:
2097 	case WM_T_82547:
2098 	case WM_T_82547_2:
2099 		reg = CSR_READ(sc, WMREG_EECD);
2100 		/*
2101 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
2102 		 * on 8254[17], so set flags and functios before calling it.
2103 		 */
2104 		sc->sc_flags |= WM_F_LOCK_EECD;
2105 		sc->nvm.acquire = wm_get_eecd;
2106 		sc->nvm.release = wm_put_eecd;
2107 		if (reg & EECD_EE_TYPE) {
2108 			/* SPI */
2109 			sc->nvm.read = wm_nvm_read_spi;
2110 			sc->sc_flags |= WM_F_EEPROM_SPI;
2111 			wm_nvm_set_addrbits_size_eecd(sc);
2112 		} else {
2113 			/* Microwire */
2114 			sc->nvm.read = wm_nvm_read_uwire;
2115 			if ((reg & EECD_EE_ABITS) != 0) {
2116 				sc->sc_nvm_wordsize = 256;
2117 				sc->sc_nvm_addrbits = 8;
2118 			} else {
2119 				sc->sc_nvm_wordsize = 64;
2120 				sc->sc_nvm_addrbits = 6;
2121 			}
2122 		}
2123 		break;
2124 	case WM_T_82571:
2125 	case WM_T_82572:
2126 		/* SPI */
2127 		sc->nvm.read = wm_nvm_read_eerd;
2128 		/* Not use WM_F_LOCK_EECD because we use EERD */
2129 		sc->sc_flags |= WM_F_EEPROM_SPI;
2130 		wm_nvm_set_addrbits_size_eecd(sc);
2131 		sc->phy.acquire = wm_get_swsm_semaphore;
2132 		sc->phy.release = wm_put_swsm_semaphore;
2133 		sc->nvm.acquire = wm_get_nvm_82571;
2134 		sc->nvm.release = wm_put_nvm_82571;
2135 		break;
2136 	case WM_T_82573:
2137 	case WM_T_82574:
2138 	case WM_T_82583:
2139 		sc->nvm.read = wm_nvm_read_eerd;
2140 		/* Not use WM_F_LOCK_EECD because we use EERD */
2141 		if (sc->sc_type == WM_T_82573) {
2142 			sc->phy.acquire = wm_get_swsm_semaphore;
2143 			sc->phy.release = wm_put_swsm_semaphore;
2144 			sc->nvm.acquire = wm_get_nvm_82571;
2145 			sc->nvm.release = wm_put_nvm_82571;
2146 		} else {
2147 			/* Both PHY and NVM use the same semaphore. */
2148 			sc->phy.acquire = sc->nvm.acquire
2149 			    = wm_get_swfwhw_semaphore;
2150 			sc->phy.release = sc->nvm.release
2151 			    = wm_put_swfwhw_semaphore;
2152 		}
2153 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
2154 			sc->sc_flags |= WM_F_EEPROM_FLASH;
2155 			sc->sc_nvm_wordsize = 2048;
2156 		} else {
2157 			/* SPI */
2158 			sc->sc_flags |= WM_F_EEPROM_SPI;
2159 			wm_nvm_set_addrbits_size_eecd(sc);
2160 		}
2161 		break;
2162 	case WM_T_82575:
2163 	case WM_T_82576:
2164 	case WM_T_82580:
2165 	case WM_T_I350:
2166 	case WM_T_I354:
2167 	case WM_T_80003:
2168 		/* SPI */
2169 		sc->sc_flags |= WM_F_EEPROM_SPI;
2170 		wm_nvm_set_addrbits_size_eecd(sc);
2171 		if((sc->sc_type == WM_T_80003)
2172 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
2173 			sc->nvm.read = wm_nvm_read_eerd;
2174 			/* Don't use WM_F_LOCK_EECD because we use EERD */
2175 		} else {
2176 			sc->nvm.read = wm_nvm_read_spi;
2177 			sc->sc_flags |= WM_F_LOCK_EECD;
2178 		}
2179 		sc->phy.acquire = wm_get_phy_82575;
2180 		sc->phy.release = wm_put_phy_82575;
2181 		sc->nvm.acquire = wm_get_nvm_80003;
2182 		sc->nvm.release = wm_put_nvm_80003;
2183 		break;
2184 	case WM_T_ICH8:
2185 	case WM_T_ICH9:
2186 	case WM_T_ICH10:
2187 	case WM_T_PCH:
2188 	case WM_T_PCH2:
2189 	case WM_T_PCH_LPT:
2190 		sc->nvm.read = wm_nvm_read_ich8;
2191 		/* FLASH */
2192 		sc->sc_flags |= WM_F_EEPROM_FLASH;
2193 		sc->sc_nvm_wordsize = 2048;
2194 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
2195 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
2196 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
2197 			aprint_error_dev(sc->sc_dev,
2198 			    "can't map FLASH registers\n");
2199 			goto out;
2200 		}
2201 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
2202 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
2203 		    ICH_FLASH_SECTOR_SIZE;
2204 		sc->sc_ich8_flash_bank_size =
2205 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
2206 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
2207 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
2208 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
2209 		sc->sc_flashreg_offset = 0;
2210 		sc->phy.acquire = wm_get_swflag_ich8lan;
2211 		sc->phy.release = wm_put_swflag_ich8lan;
2212 		sc->nvm.acquire = wm_get_nvm_ich8lan;
2213 		sc->nvm.release = wm_put_nvm_ich8lan;
2214 		break;
2215 	case WM_T_PCH_SPT:
2216 		sc->nvm.read = wm_nvm_read_spt;
2217 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
2218 		sc->sc_flags |= WM_F_EEPROM_FLASH;
2219 		sc->sc_flasht = sc->sc_st;
2220 		sc->sc_flashh = sc->sc_sh;
2221 		sc->sc_ich8_flash_base = 0;
2222 		sc->sc_nvm_wordsize =
2223 			(((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
2224 			* NVM_SIZE_MULTIPLIER;
2225 		/* It is size in bytes, we want words */
2226 		sc->sc_nvm_wordsize /= 2;
2227 		/* assume 2 banks */
2228 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
2229 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
2230 		sc->phy.acquire = wm_get_swflag_ich8lan;
2231 		sc->phy.release = wm_put_swflag_ich8lan;
2232 		sc->nvm.acquire = wm_get_nvm_ich8lan;
2233 		sc->nvm.release = wm_put_nvm_ich8lan;
2234 		break;
2235 	case WM_T_I210:
2236 	case WM_T_I211:
2237 		/* Allow a single clear of the SW semaphore on I210 and newer*/
2238 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
2239 		if (wm_nvm_get_flash_presence_i210(sc)) {
2240 			sc->nvm.read = wm_nvm_read_eerd;
2241 			/* Don't use WM_F_LOCK_EECD because we use EERD */
2242 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
2243 			wm_nvm_set_addrbits_size_eecd(sc);
2244 		} else {
2245 			sc->nvm.read = wm_nvm_read_invm;
2246 			sc->sc_flags |= WM_F_EEPROM_INVM;
2247 			sc->sc_nvm_wordsize = INVM_SIZE;
2248 		}
2249 		sc->phy.acquire = wm_get_phy_82575;
2250 		sc->phy.release = wm_put_phy_82575;
2251 		sc->nvm.acquire = wm_get_nvm_80003;
2252 		sc->nvm.release = wm_put_nvm_80003;
2253 		break;
2254 	default:
2255 		break;
2256 	}
2257 
2258 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
2259 	switch (sc->sc_type) {
2260 	case WM_T_82571:
2261 	case WM_T_82572:
2262 		reg = CSR_READ(sc, WMREG_SWSM2);
2263 		if ((reg & SWSM2_LOCK) == 0) {
2264 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
2265 			force_clear_smbi = true;
2266 		} else
2267 			force_clear_smbi = false;
2268 		break;
2269 	case WM_T_82573:
2270 	case WM_T_82574:
2271 	case WM_T_82583:
2272 		force_clear_smbi = true;
2273 		break;
2274 	default:
2275 		force_clear_smbi = false;
2276 		break;
2277 	}
2278 	if (force_clear_smbi) {
2279 		reg = CSR_READ(sc, WMREG_SWSM);
2280 		if ((reg & SWSM_SMBI) != 0)
2281 			aprint_error_dev(sc->sc_dev,
2282 			    "Please update the Bootagent\n");
2283 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
2284 	}
2285 
2286 	/*
2287 	 * Defer printing the EEPROM type until after verifying the checksum
2288 	 * This allows the EEPROM type to be printed correctly in the case
2289 	 * that no EEPROM is attached.
2290 	 */
2291 	/*
2292 	 * Validate the EEPROM checksum. If the checksum fails, flag
2293 	 * this for later, so we can fail future reads from the EEPROM.
2294 	 */
2295 	if (wm_nvm_validate_checksum(sc)) {
2296 		/*
2297 		 * Read twice again because some PCI-e parts fail the
2298 		 * first check due to the link being in sleep state.
2299 		 */
2300 		if (wm_nvm_validate_checksum(sc))
2301 			sc->sc_flags |= WM_F_EEPROM_INVALID;
2302 	}
2303 
2304 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
2305 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
2306 	else {
2307 		aprint_verbose_dev(sc->sc_dev, "%u words ",
2308 		    sc->sc_nvm_wordsize);
2309 		if (sc->sc_flags & WM_F_EEPROM_INVM)
2310 			aprint_verbose("iNVM");
2311 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
2312 			aprint_verbose("FLASH(HW)");
2313 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
2314 			aprint_verbose("FLASH");
2315 		else {
2316 			if (sc->sc_flags & WM_F_EEPROM_SPI)
2317 				eetype = "SPI";
2318 			else
2319 				eetype = "MicroWire";
2320 			aprint_verbose("(%d address bits) %s EEPROM",
2321 			    sc->sc_nvm_addrbits, eetype);
2322 		}
2323 	}
2324 	wm_nvm_version(sc);
2325 	aprint_verbose("\n");
2326 
2327 	/*
2328 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
2329 	 * incorrect.
2330 	 */
2331 	wm_gmii_setup_phytype(sc, 0, 0);
2332 
2333 	/* Reset the chip to a known state. */
2334 	wm_reset(sc);
2335 
2336 	/* Check for I21[01] PLL workaround */
2337 	if (sc->sc_type == WM_T_I210)
2338 		sc->sc_flags |= WM_F_PLL_WA_I210;
2339 	if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
2340 		/* NVM image release 3.25 has a workaround */
2341 		if ((sc->sc_nvm_ver_major < 3)
2342 		    || ((sc->sc_nvm_ver_major == 3)
2343 			&& (sc->sc_nvm_ver_minor < 25))) {
2344 			aprint_verbose_dev(sc->sc_dev,
2345 			    "ROM image version %d.%d is older than 3.25\n",
2346 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
2347 			sc->sc_flags |= WM_F_PLL_WA_I210;
2348 		}
2349 	}
2350 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
2351 		wm_pll_workaround_i210(sc);
2352 
2353 	wm_get_wakeup(sc);
2354 
2355 	/* Non-AMT based hardware can now take control from firmware */
2356 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
2357 		wm_get_hw_control(sc);
2358 
2359 	/*
2360 	 * Read the Ethernet address from the EEPROM, if not first found
2361 	 * in device properties.
2362 	 */
2363 	ea = prop_dictionary_get(dict, "mac-address");
2364 	if (ea != NULL) {
2365 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
2366 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
2367 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
2368 	} else {
2369 		if (wm_read_mac_addr(sc, enaddr) != 0) {
2370 			aprint_error_dev(sc->sc_dev,
2371 			    "unable to read Ethernet address\n");
2372 			goto out;
2373 		}
2374 	}
2375 
2376 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
2377 	    ether_sprintf(enaddr));
2378 
2379 	/*
2380 	 * Read the config info from the EEPROM, and set up various
2381 	 * bits in the control registers based on their contents.
2382 	 */
2383 	pn = prop_dictionary_get(dict, "i82543-cfg1");
2384 	if (pn != NULL) {
2385 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2386 		cfg1 = (uint16_t) prop_number_integer_value(pn);
2387 	} else {
2388 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
2389 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
2390 			goto out;
2391 		}
2392 	}
2393 
2394 	pn = prop_dictionary_get(dict, "i82543-cfg2");
2395 	if (pn != NULL) {
2396 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2397 		cfg2 = (uint16_t) prop_number_integer_value(pn);
2398 	} else {
2399 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
2400 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
2401 			goto out;
2402 		}
2403 	}
2404 
2405 	/* check for WM_F_WOL */
2406 	switch (sc->sc_type) {
2407 	case WM_T_82542_2_0:
2408 	case WM_T_82542_2_1:
2409 	case WM_T_82543:
2410 		/* dummy? */
2411 		eeprom_data = 0;
2412 		apme_mask = NVM_CFG3_APME;
2413 		break;
2414 	case WM_T_82544:
2415 		apme_mask = NVM_CFG2_82544_APM_EN;
2416 		eeprom_data = cfg2;
2417 		break;
2418 	case WM_T_82546:
2419 	case WM_T_82546_3:
2420 	case WM_T_82571:
2421 	case WM_T_82572:
2422 	case WM_T_82573:
2423 	case WM_T_82574:
2424 	case WM_T_82583:
2425 	case WM_T_80003:
2426 	default:
2427 		apme_mask = NVM_CFG3_APME;
2428 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
2429 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
2430 		break;
2431 	case WM_T_82575:
2432 	case WM_T_82576:
2433 	case WM_T_82580:
2434 	case WM_T_I350:
2435 	case WM_T_I354: /* XXX ok? */
2436 	case WM_T_ICH8:
2437 	case WM_T_ICH9:
2438 	case WM_T_ICH10:
2439 	case WM_T_PCH:
2440 	case WM_T_PCH2:
2441 	case WM_T_PCH_LPT:
2442 	case WM_T_PCH_SPT:
2443 		/* XXX The funcid should be checked on some devices */
2444 		apme_mask = WUC_APME;
2445 		eeprom_data = CSR_READ(sc, WMREG_WUC);
2446 		break;
2447 	}
2448 
2449 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
2450 	if ((eeprom_data & apme_mask) != 0)
2451 		sc->sc_flags |= WM_F_WOL;
2452 
2453 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
2454 		/* Check NVM for autonegotiation */
2455 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
2456 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
2457 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
2458 		}
2459 	}
2460 
2461 	/*
2462 	 * XXX need special handling for some multiple port cards
2463 	 * to disable a paticular port.
2464 	 */
2465 
2466 	if (sc->sc_type >= WM_T_82544) {
2467 		pn = prop_dictionary_get(dict, "i82543-swdpin");
2468 		if (pn != NULL) {
2469 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2470 			swdpin = (uint16_t) prop_number_integer_value(pn);
2471 		} else {
2472 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
2473 				aprint_error_dev(sc->sc_dev,
2474 				    "unable to read SWDPIN\n");
2475 				goto out;
2476 			}
2477 		}
2478 	}
2479 
2480 	if (cfg1 & NVM_CFG1_ILOS)
2481 		sc->sc_ctrl |= CTRL_ILOS;
2482 
2483 	/*
2484 	 * XXX
2485 	 * This code isn't correct because pin 2 and 3 are located
2486 	 * in different position on newer chips. Check all datasheet.
2487 	 *
2488 	 * Until resolve this problem, check if a chip < 82580
2489 	 */
2490 	if (sc->sc_type <= WM_T_82580) {
2491 		if (sc->sc_type >= WM_T_82544) {
2492 			sc->sc_ctrl |=
2493 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
2494 			    CTRL_SWDPIO_SHIFT;
2495 			sc->sc_ctrl |=
2496 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
2497 			    CTRL_SWDPINS_SHIFT;
2498 		} else {
2499 			sc->sc_ctrl |=
2500 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
2501 			    CTRL_SWDPIO_SHIFT;
2502 		}
2503 	}
2504 
2505 	/* XXX For other than 82580? */
2506 	if (sc->sc_type == WM_T_82580) {
2507 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
2508 		if (nvmword & __BIT(13))
2509 			sc->sc_ctrl |= CTRL_ILOS;
2510 	}
2511 
2512 #if 0
2513 	if (sc->sc_type >= WM_T_82544) {
2514 		if (cfg1 & NVM_CFG1_IPS0)
2515 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
2516 		if (cfg1 & NVM_CFG1_IPS1)
2517 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
2518 		sc->sc_ctrl_ext |=
2519 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
2520 		    CTRL_EXT_SWDPIO_SHIFT;
2521 		sc->sc_ctrl_ext |=
2522 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
2523 		    CTRL_EXT_SWDPINS_SHIFT;
2524 	} else {
2525 		sc->sc_ctrl_ext |=
2526 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
2527 		    CTRL_EXT_SWDPIO_SHIFT;
2528 	}
2529 #endif
2530 
2531 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2532 #if 0
2533 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2534 #endif
2535 
2536 	if (sc->sc_type == WM_T_PCH) {
2537 		uint16_t val;
2538 
2539 		/* Save the NVM K1 bit setting */
2540 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
2541 
2542 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
2543 			sc->sc_nvm_k1_enabled = 1;
2544 		else
2545 			sc->sc_nvm_k1_enabled = 0;
2546 	}
2547 
2548 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
2549 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
2550 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
2551 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
2552 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_82573
2553 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
2554 		/* Copper only */
2555 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
2556 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
2557 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
2558 	    || (sc->sc_type ==WM_T_I211)) {
2559 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
2560 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
2561 		switch (link_mode) {
2562 		case CTRL_EXT_LINK_MODE_1000KX:
2563 			aprint_verbose_dev(sc->sc_dev, "1000KX\n");
2564 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
2565 			break;
2566 		case CTRL_EXT_LINK_MODE_SGMII:
2567 			if (wm_sgmii_uses_mdio(sc)) {
2568 				aprint_verbose_dev(sc->sc_dev,
2569 				    "SGMII(MDIO)\n");
2570 				sc->sc_flags |= WM_F_SGMII;
2571 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2572 				break;
2573 			}
2574 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
2575 			/*FALLTHROUGH*/
2576 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
2577 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
2578 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
2579 				if (link_mode
2580 				    == CTRL_EXT_LINK_MODE_SGMII) {
2581 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2582 					sc->sc_flags |= WM_F_SGMII;
2583 				} else {
2584 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
2585 					aprint_verbose_dev(sc->sc_dev,
2586 					    "SERDES\n");
2587 				}
2588 				break;
2589 			}
2590 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
2591 				aprint_verbose_dev(sc->sc_dev, "SERDES\n");
2592 
2593 			/* Change current link mode setting */
2594 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
2595 			switch (sc->sc_mediatype) {
2596 			case WM_MEDIATYPE_COPPER:
2597 				reg |= CTRL_EXT_LINK_MODE_SGMII;
2598 				break;
2599 			case WM_MEDIATYPE_SERDES:
2600 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
2601 				break;
2602 			default:
2603 				break;
2604 			}
2605 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2606 			break;
2607 		case CTRL_EXT_LINK_MODE_GMII:
2608 		default:
2609 			aprint_verbose_dev(sc->sc_dev, "Copper\n");
2610 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2611 			break;
2612 		}
2613 
2614 		reg &= ~CTRL_EXT_I2C_ENA;
2615 		if ((sc->sc_flags & WM_F_SGMII) != 0)
2616 			reg |= CTRL_EXT_I2C_ENA;
2617 		else
2618 			reg &= ~CTRL_EXT_I2C_ENA;
2619 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2620 	} else if (sc->sc_type < WM_T_82543 ||
2621 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
2622 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
2623 			aprint_error_dev(sc->sc_dev,
2624 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
2625 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
2626 		}
2627 	} else {
2628 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
2629 			aprint_error_dev(sc->sc_dev,
2630 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
2631 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2632 		}
2633 	}
2634 	snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
2635 	aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
2636 
2637 	/* Set device properties (macflags) */
2638 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
2639 
2640 	/* Initialize the media structures accordingly. */
2641 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
2642 		wm_gmii_mediainit(sc, wmp->wmp_product);
2643 	else
2644 		wm_tbi_mediainit(sc); /* All others */
2645 
2646 	ifp = &sc->sc_ethercom.ec_if;
2647 	xname = device_xname(sc->sc_dev);
2648 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
2649 	ifp->if_softc = sc;
2650 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2651 #ifdef WM_MPSAFE
2652 	ifp->if_extflags = IFEF_MPSAFE;
2653 #endif
2654 	ifp->if_ioctl = wm_ioctl;
2655 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
2656 		ifp->if_start = wm_nq_start;
2657 		/*
2658 		 * When the number of CPUs is one and the controller can use
2659 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
2660 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
2661 		 * and the other is used for link status changing.
2662 		 * In this situation, wm_nq_transmit() is disadvantageous
2663 		 * because of wm_select_txqueue() and pcq(9) overhead.
2664 		 */
2665 		if (wm_is_using_multiqueue(sc))
2666 			ifp->if_transmit = wm_nq_transmit;
2667 	} else {
2668 		ifp->if_start = wm_start;
2669 		/*
2670 		 * wm_transmit() has the same disadvantage as wm_transmit().
2671 		 */
2672 		if (wm_is_using_multiqueue(sc))
2673 			ifp->if_transmit = wm_transmit;
2674 	}
2675 	ifp->if_watchdog = wm_watchdog;
2676 	ifp->if_init = wm_init;
2677 	ifp->if_stop = wm_stop;
2678 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
2679 	IFQ_SET_READY(&ifp->if_snd);
2680 
2681 	/* Check for jumbo frame */
2682 	switch (sc->sc_type) {
2683 	case WM_T_82573:
2684 		/* XXX limited to 9234 if ASPM is disabled */
2685 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
2686 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
2687 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2688 		break;
2689 	case WM_T_82571:
2690 	case WM_T_82572:
2691 	case WM_T_82574:
2692 	case WM_T_82583:
2693 	case WM_T_82575:
2694 	case WM_T_82576:
2695 	case WM_T_82580:
2696 	case WM_T_I350:
2697 	case WM_T_I354:
2698 	case WM_T_I210:
2699 	case WM_T_I211:
2700 	case WM_T_80003:
2701 	case WM_T_ICH9:
2702 	case WM_T_ICH10:
2703 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
2704 	case WM_T_PCH_LPT:
2705 	case WM_T_PCH_SPT:
2706 		/* XXX limited to 9234 */
2707 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2708 		break;
2709 	case WM_T_PCH:
2710 		/* XXX limited to 4096 */
2711 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2712 		break;
2713 	case WM_T_82542_2_0:
2714 	case WM_T_82542_2_1:
2715 	case WM_T_ICH8:
2716 		/* No support for jumbo frame */
2717 		break;
2718 	default:
2719 		/* ETHER_MAX_LEN_JUMBO */
2720 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2721 		break;
2722 	}
2723 
2724 	/* If we're a i82543 or greater, we can support VLANs. */
2725 	if (sc->sc_type >= WM_T_82543)
2726 		sc->sc_ethercom.ec_capabilities |=
2727 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
2728 
2729 	/*
2730 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
2731 	 * on i82543 and later.
2732 	 */
2733 	if (sc->sc_type >= WM_T_82543) {
2734 		ifp->if_capabilities |=
2735 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
2736 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
2737 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
2738 		    IFCAP_CSUM_TCPv6_Tx |
2739 		    IFCAP_CSUM_UDPv6_Tx;
2740 	}
2741 
2742 	/*
2743 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
2744 	 *
2745 	 *	82541GI (8086:1076) ... no
2746 	 *	82572EI (8086:10b9) ... yes
2747 	 */
2748 	if (sc->sc_type >= WM_T_82571) {
2749 		ifp->if_capabilities |=
2750 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
2751 	}
2752 
2753 	/*
2754 	 * If we're a i82544 or greater (except i82547), we can do
2755 	 * TCP segmentation offload.
2756 	 */
2757 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
2758 		ifp->if_capabilities |= IFCAP_TSOv4;
2759 	}
2760 
2761 	if (sc->sc_type >= WM_T_82571) {
2762 		ifp->if_capabilities |= IFCAP_TSOv6;
2763 	}
2764 
2765 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
2766 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
2767 
2768 #ifdef WM_MPSAFE
2769 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2770 #else
2771 	sc->sc_core_lock = NULL;
2772 #endif
2773 
2774 	/* Attach the interface. */
2775 	error = if_initialize(ifp);
2776 	if (error != 0) {
2777 		aprint_error_dev(sc->sc_dev, "if_initialize failed(%d)\n",
2778 		    error);
2779 		return; /* Error */
2780 	}
2781 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
2782 	ether_ifattach(ifp, enaddr);
2783 	if_register(ifp);
2784 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
2785 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
2786 			  RND_FLAG_DEFAULT);
2787 
2788 #ifdef WM_EVENT_COUNTERS
2789 	/* Attach event counters. */
2790 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
2791 	    NULL, xname, "linkintr");
2792 
2793 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
2794 	    NULL, xname, "tx_xoff");
2795 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
2796 	    NULL, xname, "tx_xon");
2797 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
2798 	    NULL, xname, "rx_xoff");
2799 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
2800 	    NULL, xname, "rx_xon");
2801 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
2802 	    NULL, xname, "rx_macctl");
2803 #endif /* WM_EVENT_COUNTERS */
2804 
2805 	if (pmf_device_register(self, wm_suspend, wm_resume))
2806 		pmf_class_network_register(self, ifp);
2807 	else
2808 		aprint_error_dev(self, "couldn't establish power handler\n");
2809 
2810 	sc->sc_flags |= WM_F_ATTACHED;
2811  out:
2812 	return;
2813 }
2814 
2815 /* The detach function (ca_detach) */
2816 static int
2817 wm_detach(device_t self, int flags __unused)
2818 {
2819 	struct wm_softc *sc = device_private(self);
2820 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2821 	int i;
2822 
2823 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
2824 		return 0;
2825 
2826 	/* Stop the interface. Callouts are stopped in it. */
2827 	wm_stop(ifp, 1);
2828 
2829 	pmf_device_deregister(self);
2830 
2831 #ifdef WM_EVENT_COUNTERS
2832 	evcnt_detach(&sc->sc_ev_linkintr);
2833 
2834 	evcnt_detach(&sc->sc_ev_tx_xoff);
2835 	evcnt_detach(&sc->sc_ev_tx_xon);
2836 	evcnt_detach(&sc->sc_ev_rx_xoff);
2837 	evcnt_detach(&sc->sc_ev_rx_xon);
2838 	evcnt_detach(&sc->sc_ev_rx_macctl);
2839 #endif /* WM_EVENT_COUNTERS */
2840 
2841 	/* Tell the firmware about the release */
2842 	WM_CORE_LOCK(sc);
2843 	wm_release_manageability(sc);
2844 	wm_release_hw_control(sc);
2845 	wm_enable_wakeup(sc);
2846 	WM_CORE_UNLOCK(sc);
2847 
2848 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2849 
2850 	/* Delete all remaining media. */
2851 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2852 
2853 	ether_ifdetach(ifp);
2854 	if_detach(ifp);
2855 	if_percpuq_destroy(sc->sc_ipq);
2856 
2857 	/* Unload RX dmamaps and free mbufs */
2858 	for (i = 0; i < sc->sc_nqueues; i++) {
2859 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
2860 		mutex_enter(rxq->rxq_lock);
2861 		wm_rxdrain(rxq);
2862 		mutex_exit(rxq->rxq_lock);
2863 	}
2864 	/* Must unlock here */
2865 
2866 	/* Disestablish the interrupt handler */
2867 	for (i = 0; i < sc->sc_nintrs; i++) {
2868 		if (sc->sc_ihs[i] != NULL) {
2869 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
2870 			sc->sc_ihs[i] = NULL;
2871 		}
2872 	}
2873 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
2874 
2875 	wm_free_txrx_queues(sc);
2876 
2877 	/* Unmap the registers */
2878 	if (sc->sc_ss) {
2879 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
2880 		sc->sc_ss = 0;
2881 	}
2882 	if (sc->sc_ios) {
2883 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
2884 		sc->sc_ios = 0;
2885 	}
2886 	if (sc->sc_flashs) {
2887 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
2888 		sc->sc_flashs = 0;
2889 	}
2890 
2891 	if (sc->sc_core_lock)
2892 		mutex_obj_free(sc->sc_core_lock);
2893 	if (sc->sc_ich_phymtx)
2894 		mutex_obj_free(sc->sc_ich_phymtx);
2895 	if (sc->sc_ich_nvmmtx)
2896 		mutex_obj_free(sc->sc_ich_nvmmtx);
2897 
2898 	return 0;
2899 }
2900 
2901 static bool
2902 wm_suspend(device_t self, const pmf_qual_t *qual)
2903 {
2904 	struct wm_softc *sc = device_private(self);
2905 
2906 	wm_release_manageability(sc);
2907 	wm_release_hw_control(sc);
2908 	wm_enable_wakeup(sc);
2909 
2910 	return true;
2911 }
2912 
2913 static bool
2914 wm_resume(device_t self, const pmf_qual_t *qual)
2915 {
2916 	struct wm_softc *sc = device_private(self);
2917 
2918 	/* Disable ASPM L0s and/or L1 for workaround */
2919 	wm_disable_aspm(sc);
2920 	wm_init_manageability(sc);
2921 
2922 	return true;
2923 }
2924 
2925 /*
2926  * wm_watchdog:		[ifnet interface function]
2927  *
2928  *	Watchdog timer handler.
2929  */
2930 static void
2931 wm_watchdog(struct ifnet *ifp)
2932 {
2933 	int qid;
2934 	struct wm_softc *sc = ifp->if_softc;
2935 
2936 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
2937 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
2938 
2939 		wm_watchdog_txq(ifp, txq);
2940 	}
2941 
2942 	/* Reset the interface. */
2943 	(void) wm_init(ifp);
2944 
2945 	/*
2946 	 * There are still some upper layer processing which call
2947 	 * ifp->if_start(). e.g. ALTQ or one CPU system
2948 	 */
2949 	/* Try to get more packets going. */
2950 	ifp->if_start(ifp);
2951 }
2952 
2953 static void
2954 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq)
2955 {
2956 	struct wm_softc *sc = ifp->if_softc;
2957 
2958 	/*
2959 	 * Since we're using delayed interrupts, sweep up
2960 	 * before we report an error.
2961 	 */
2962 	mutex_enter(txq->txq_lock);
2963 	wm_txeof(sc, txq);
2964 	mutex_exit(txq->txq_lock);
2965 
2966 	if (txq->txq_free != WM_NTXDESC(txq)) {
2967 #ifdef WM_DEBUG
2968 		int i, j;
2969 		struct wm_txsoft *txs;
2970 #endif
2971 		log(LOG_ERR,
2972 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2973 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
2974 		    txq->txq_next);
2975 		ifp->if_oerrors++;
2976 #ifdef WM_DEBUG
2977 		for (i = txq->txq_sdirty; i != txq->txq_snext ;
2978 		    i = WM_NEXTTXS(txq, i)) {
2979 		    txs = &txq->txq_soft[i];
2980 		    printf("txs %d tx %d -> %d\n",
2981 			i, txs->txs_firstdesc, txs->txs_lastdesc);
2982 		    for (j = txs->txs_firstdesc; ;
2983 			j = WM_NEXTTX(txq, j)) {
2984 			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
2985 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
2986 			printf("\t %#08x%08x\n",
2987 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
2988 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
2989 			if (j == txs->txs_lastdesc)
2990 				break;
2991 			}
2992 		}
2993 #endif
2994 	}
2995 }
2996 
2997 /*
2998  * wm_tick:
2999  *
3000  *	One second timer, used to check link status, sweep up
3001  *	completed transmit jobs, etc.
3002  */
3003 static void
3004 wm_tick(void *arg)
3005 {
3006 	struct wm_softc *sc = arg;
3007 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3008 #ifndef WM_MPSAFE
3009 	int s = splnet();
3010 #endif
3011 
3012 	WM_CORE_LOCK(sc);
3013 
3014 	if (sc->sc_core_stopping)
3015 		goto out;
3016 
3017 	if (sc->sc_type >= WM_T_82542_2_1) {
3018 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
3019 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
3020 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
3021 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
3022 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
3023 	}
3024 
3025 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3026 	ifp->if_ierrors += 0ULL /* ensure quad_t */
3027 	    + CSR_READ(sc, WMREG_CRCERRS)
3028 	    + CSR_READ(sc, WMREG_ALGNERRC)
3029 	    + CSR_READ(sc, WMREG_SYMERRC)
3030 	    + CSR_READ(sc, WMREG_RXERRC)
3031 	    + CSR_READ(sc, WMREG_SEC)
3032 	    + CSR_READ(sc, WMREG_CEXTERR)
3033 	    + CSR_READ(sc, WMREG_RLEC);
3034 	/*
3035 	 * WMREG_RNBC is incremented when there is no available buffers in host
3036 	 * memory. It does not mean the number of dropped packet. Because
3037 	 * ethernet controller can receive packets in such case if there is
3038 	 * space in phy's FIFO.
3039 	 *
3040 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
3041 	 * own EVCNT instead of if_iqdrops.
3042 	 */
3043 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
3044 
3045 	if (sc->sc_flags & WM_F_HAS_MII)
3046 		mii_tick(&sc->sc_mii);
3047 	else if ((sc->sc_type >= WM_T_82575)
3048 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
3049 		wm_serdes_tick(sc);
3050 	else
3051 		wm_tbi_tick(sc);
3052 
3053 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
3054 out:
3055 	WM_CORE_UNLOCK(sc);
3056 #ifndef WM_MPSAFE
3057 	splx(s);
3058 #endif
3059 }
3060 
3061 static int
3062 wm_ifflags_cb(struct ethercom *ec)
3063 {
3064 	struct ifnet *ifp = &ec->ec_if;
3065 	struct wm_softc *sc = ifp->if_softc;
3066 	int rc = 0;
3067 
3068 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3069 		device_xname(sc->sc_dev), __func__));
3070 
3071 	WM_CORE_LOCK(sc);
3072 
3073 	int change = ifp->if_flags ^ sc->sc_if_flags;
3074 	sc->sc_if_flags = ifp->if_flags;
3075 
3076 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
3077 		rc = ENETRESET;
3078 		goto out;
3079 	}
3080 
3081 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
3082 		wm_set_filter(sc);
3083 
3084 	wm_set_vlan(sc);
3085 
3086 out:
3087 	WM_CORE_UNLOCK(sc);
3088 
3089 	return rc;
3090 }
3091 
3092 /*
3093  * wm_ioctl:		[ifnet interface function]
3094  *
3095  *	Handle control requests from the operator.
3096  */
3097 static int
3098 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
3099 {
3100 	struct wm_softc *sc = ifp->if_softc;
3101 	struct ifreq *ifr = (struct ifreq *) data;
3102 	struct ifaddr *ifa = (struct ifaddr *)data;
3103 	struct sockaddr_dl *sdl;
3104 	int s, error;
3105 
3106 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3107 		device_xname(sc->sc_dev), __func__));
3108 
3109 #ifndef WM_MPSAFE
3110 	s = splnet();
3111 #endif
3112 	switch (cmd) {
3113 	case SIOCSIFMEDIA:
3114 	case SIOCGIFMEDIA:
3115 		WM_CORE_LOCK(sc);
3116 		/* Flow control requires full-duplex mode. */
3117 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
3118 		    (ifr->ifr_media & IFM_FDX) == 0)
3119 			ifr->ifr_media &= ~IFM_ETH_FMASK;
3120 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
3121 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
3122 				/* We can do both TXPAUSE and RXPAUSE. */
3123 				ifr->ifr_media |=
3124 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
3125 			}
3126 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
3127 		}
3128 		WM_CORE_UNLOCK(sc);
3129 #ifdef WM_MPSAFE
3130 		s = splnet();
3131 #endif
3132 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
3133 #ifdef WM_MPSAFE
3134 		splx(s);
3135 #endif
3136 		break;
3137 	case SIOCINITIFADDR:
3138 		WM_CORE_LOCK(sc);
3139 		if (ifa->ifa_addr->sa_family == AF_LINK) {
3140 			sdl = satosdl(ifp->if_dl->ifa_addr);
3141 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
3142 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
3143 			/* unicast address is first multicast entry */
3144 			wm_set_filter(sc);
3145 			error = 0;
3146 			WM_CORE_UNLOCK(sc);
3147 			break;
3148 		}
3149 		WM_CORE_UNLOCK(sc);
3150 		/*FALLTHROUGH*/
3151 	default:
3152 #ifdef WM_MPSAFE
3153 		s = splnet();
3154 #endif
3155 		/* It may call wm_start, so unlock here */
3156 		error = ether_ioctl(ifp, cmd, data);
3157 #ifdef WM_MPSAFE
3158 		splx(s);
3159 #endif
3160 		if (error != ENETRESET)
3161 			break;
3162 
3163 		error = 0;
3164 
3165 		if (cmd == SIOCSIFCAP) {
3166 			error = (*ifp->if_init)(ifp);
3167 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
3168 			;
3169 		else if (ifp->if_flags & IFF_RUNNING) {
3170 			/*
3171 			 * Multicast list has changed; set the hardware filter
3172 			 * accordingly.
3173 			 */
3174 			WM_CORE_LOCK(sc);
3175 			wm_set_filter(sc);
3176 			WM_CORE_UNLOCK(sc);
3177 		}
3178 		break;
3179 	}
3180 
3181 #ifndef WM_MPSAFE
3182 	splx(s);
3183 #endif
3184 	return error;
3185 }
3186 
3187 /* MAC address related */
3188 
3189 /*
3190  * Get the offset of MAC address and return it.
3191  * If error occured, use offset 0.
3192  */
3193 static uint16_t
3194 wm_check_alt_mac_addr(struct wm_softc *sc)
3195 {
3196 	uint16_t myea[ETHER_ADDR_LEN / 2];
3197 	uint16_t offset = NVM_OFF_MACADDR;
3198 
3199 	/* Try to read alternative MAC address pointer */
3200 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
3201 		return 0;
3202 
3203 	/* Check pointer if it's valid or not. */
3204 	if ((offset == 0x0000) || (offset == 0xffff))
3205 		return 0;
3206 
3207 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
3208 	/*
3209 	 * Check whether alternative MAC address is valid or not.
3210 	 * Some cards have non 0xffff pointer but those don't use
3211 	 * alternative MAC address in reality.
3212 	 *
3213 	 * Check whether the broadcast bit is set or not.
3214 	 */
3215 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
3216 		if (((myea[0] & 0xff) & 0x01) == 0)
3217 			return offset; /* Found */
3218 
3219 	/* Not found */
3220 	return 0;
3221 }
3222 
3223 static int
3224 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
3225 {
3226 	uint16_t myea[ETHER_ADDR_LEN / 2];
3227 	uint16_t offset = NVM_OFF_MACADDR;
3228 	int do_invert = 0;
3229 
3230 	switch (sc->sc_type) {
3231 	case WM_T_82580:
3232 	case WM_T_I350:
3233 	case WM_T_I354:
3234 		/* EEPROM Top Level Partitioning */
3235 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
3236 		break;
3237 	case WM_T_82571:
3238 	case WM_T_82575:
3239 	case WM_T_82576:
3240 	case WM_T_80003:
3241 	case WM_T_I210:
3242 	case WM_T_I211:
3243 		offset = wm_check_alt_mac_addr(sc);
3244 		if (offset == 0)
3245 			if ((sc->sc_funcid & 0x01) == 1)
3246 				do_invert = 1;
3247 		break;
3248 	default:
3249 		if ((sc->sc_funcid & 0x01) == 1)
3250 			do_invert = 1;
3251 		break;
3252 	}
3253 
3254 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
3255 		goto bad;
3256 
3257 	enaddr[0] = myea[0] & 0xff;
3258 	enaddr[1] = myea[0] >> 8;
3259 	enaddr[2] = myea[1] & 0xff;
3260 	enaddr[3] = myea[1] >> 8;
3261 	enaddr[4] = myea[2] & 0xff;
3262 	enaddr[5] = myea[2] >> 8;
3263 
3264 	/*
3265 	 * Toggle the LSB of the MAC address on the second port
3266 	 * of some dual port cards.
3267 	 */
3268 	if (do_invert != 0)
3269 		enaddr[5] ^= 1;
3270 
3271 	return 0;
3272 
3273  bad:
3274 	return -1;
3275 }
3276 
3277 /*
3278  * wm_set_ral:
3279  *
3280  *	Set an entery in the receive address list.
3281  */
3282 static void
3283 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
3284 {
3285 	uint32_t ral_lo, ral_hi, addrl, addrh;
3286 	uint32_t wlock_mac;
3287 	int rv;
3288 
3289 	if (enaddr != NULL) {
3290 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
3291 		    (enaddr[3] << 24);
3292 		ral_hi = enaddr[4] | (enaddr[5] << 8);
3293 		ral_hi |= RAL_AV;
3294 	} else {
3295 		ral_lo = 0;
3296 		ral_hi = 0;
3297 	}
3298 
3299 	switch (sc->sc_type) {
3300 	case WM_T_82542_2_0:
3301 	case WM_T_82542_2_1:
3302 	case WM_T_82543:
3303 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
3304 		CSR_WRITE_FLUSH(sc);
3305 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
3306 		CSR_WRITE_FLUSH(sc);
3307 		break;
3308 	case WM_T_PCH2:
3309 	case WM_T_PCH_LPT:
3310 	case WM_T_PCH_SPT:
3311 		if (idx == 0) {
3312 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
3313 			CSR_WRITE_FLUSH(sc);
3314 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
3315 			CSR_WRITE_FLUSH(sc);
3316 			return;
3317 		}
3318 		if (sc->sc_type != WM_T_PCH2) {
3319 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
3320 			    FWSM_WLOCK_MAC);
3321 			addrl = WMREG_SHRAL(idx - 1);
3322 			addrh = WMREG_SHRAH(idx - 1);
3323 		} else {
3324 			wlock_mac = 0;
3325 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
3326 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
3327 		}
3328 
3329 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
3330 			rv = wm_get_swflag_ich8lan(sc);
3331 			if (rv != 0)
3332 				return;
3333 			CSR_WRITE(sc, addrl, ral_lo);
3334 			CSR_WRITE_FLUSH(sc);
3335 			CSR_WRITE(sc, addrh, ral_hi);
3336 			CSR_WRITE_FLUSH(sc);
3337 			wm_put_swflag_ich8lan(sc);
3338 		}
3339 
3340 		break;
3341 	default:
3342 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
3343 		CSR_WRITE_FLUSH(sc);
3344 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
3345 		CSR_WRITE_FLUSH(sc);
3346 		break;
3347 	}
3348 }
3349 
3350 /*
3351  * wm_mchash:
3352  *
3353  *	Compute the hash of the multicast address for the 4096-bit
3354  *	multicast filter.
3355  */
3356 static uint32_t
3357 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
3358 {
3359 	static const int lo_shift[4] = { 4, 3, 2, 0 };
3360 	static const int hi_shift[4] = { 4, 5, 6, 8 };
3361 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
3362 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
3363 	uint32_t hash;
3364 
3365 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3366 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3367 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
3368 	    || (sc->sc_type == WM_T_PCH_SPT)) {
3369 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
3370 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
3371 		return (hash & 0x3ff);
3372 	}
3373 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
3374 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
3375 
3376 	return (hash & 0xfff);
3377 }
3378 
3379 /*
3380  * wm_set_filter:
3381  *
3382  *	Set up the receive filter.
3383  */
3384 static void
3385 wm_set_filter(struct wm_softc *sc)
3386 {
3387 	struct ethercom *ec = &sc->sc_ethercom;
3388 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3389 	struct ether_multi *enm;
3390 	struct ether_multistep step;
3391 	bus_addr_t mta_reg;
3392 	uint32_t hash, reg, bit;
3393 	int i, size, ralmax;
3394 
3395 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3396 		device_xname(sc->sc_dev), __func__));
3397 
3398 	if (sc->sc_type >= WM_T_82544)
3399 		mta_reg = WMREG_CORDOVA_MTA;
3400 	else
3401 		mta_reg = WMREG_MTA;
3402 
3403 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
3404 
3405 	if (ifp->if_flags & IFF_BROADCAST)
3406 		sc->sc_rctl |= RCTL_BAM;
3407 	if (ifp->if_flags & IFF_PROMISC) {
3408 		sc->sc_rctl |= RCTL_UPE;
3409 		goto allmulti;
3410 	}
3411 
3412 	/*
3413 	 * Set the station address in the first RAL slot, and
3414 	 * clear the remaining slots.
3415 	 */
3416 	if (sc->sc_type == WM_T_ICH8)
3417 		size = WM_RAL_TABSIZE_ICH8 -1;
3418 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
3419 	    || (sc->sc_type == WM_T_PCH))
3420 		size = WM_RAL_TABSIZE_ICH8;
3421 	else if (sc->sc_type == WM_T_PCH2)
3422 		size = WM_RAL_TABSIZE_PCH2;
3423 	else if ((sc->sc_type == WM_T_PCH_LPT) ||(sc->sc_type == WM_T_PCH_SPT))
3424 		size = WM_RAL_TABSIZE_PCH_LPT;
3425 	else if (sc->sc_type == WM_T_82575)
3426 		size = WM_RAL_TABSIZE_82575;
3427 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
3428 		size = WM_RAL_TABSIZE_82576;
3429 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
3430 		size = WM_RAL_TABSIZE_I350;
3431 	else
3432 		size = WM_RAL_TABSIZE;
3433 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
3434 
3435 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
3436 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
3437 		switch (i) {
3438 		case 0:
3439 			/* We can use all entries */
3440 			ralmax = size;
3441 			break;
3442 		case 1:
3443 			/* Only RAR[0] */
3444 			ralmax = 1;
3445 			break;
3446 		default:
3447 			/* available SHRA + RAR[0] */
3448 			ralmax = i + 1;
3449 		}
3450 	} else
3451 		ralmax = size;
3452 	for (i = 1; i < size; i++) {
3453 		if (i < ralmax)
3454 			wm_set_ral(sc, NULL, i);
3455 	}
3456 
3457 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3458 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3459 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
3460 	    || (sc->sc_type == WM_T_PCH_SPT))
3461 		size = WM_ICH8_MC_TABSIZE;
3462 	else
3463 		size = WM_MC_TABSIZE;
3464 	/* Clear out the multicast table. */
3465 	for (i = 0; i < size; i++) {
3466 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
3467 		CSR_WRITE_FLUSH(sc);
3468 	}
3469 
3470 	ETHER_LOCK(ec);
3471 	ETHER_FIRST_MULTI(step, ec, enm);
3472 	while (enm != NULL) {
3473 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
3474 			ETHER_UNLOCK(ec);
3475 			/*
3476 			 * We must listen to a range of multicast addresses.
3477 			 * For now, just accept all multicasts, rather than
3478 			 * trying to set only those filter bits needed to match
3479 			 * the range.  (At this time, the only use of address
3480 			 * ranges is for IP multicast routing, for which the
3481 			 * range is big enough to require all bits set.)
3482 			 */
3483 			goto allmulti;
3484 		}
3485 
3486 		hash = wm_mchash(sc, enm->enm_addrlo);
3487 
3488 		reg = (hash >> 5);
3489 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3490 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3491 		    || (sc->sc_type == WM_T_PCH2)
3492 		    || (sc->sc_type == WM_T_PCH_LPT)
3493 		    || (sc->sc_type == WM_T_PCH_SPT))
3494 			reg &= 0x1f;
3495 		else
3496 			reg &= 0x7f;
3497 		bit = hash & 0x1f;
3498 
3499 		hash = CSR_READ(sc, mta_reg + (reg << 2));
3500 		hash |= 1U << bit;
3501 
3502 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
3503 			/*
3504 			 * 82544 Errata 9: Certain register cannot be written
3505 			 * with particular alignments in PCI-X bus operation
3506 			 * (FCAH, MTA and VFTA).
3507 			 */
3508 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
3509 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3510 			CSR_WRITE_FLUSH(sc);
3511 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
3512 			CSR_WRITE_FLUSH(sc);
3513 		} else {
3514 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3515 			CSR_WRITE_FLUSH(sc);
3516 		}
3517 
3518 		ETHER_NEXT_MULTI(step, enm);
3519 	}
3520 	ETHER_UNLOCK(ec);
3521 
3522 	ifp->if_flags &= ~IFF_ALLMULTI;
3523 	goto setit;
3524 
3525  allmulti:
3526 	ifp->if_flags |= IFF_ALLMULTI;
3527 	sc->sc_rctl |= RCTL_MPE;
3528 
3529  setit:
3530 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
3531 }
3532 
3533 /* Reset and init related */
3534 
3535 static void
3536 wm_set_vlan(struct wm_softc *sc)
3537 {
3538 
3539 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3540 		device_xname(sc->sc_dev), __func__));
3541 
3542 	/* Deal with VLAN enables. */
3543 	if (VLAN_ATTACHED(&sc->sc_ethercom))
3544 		sc->sc_ctrl |= CTRL_VME;
3545 	else
3546 		sc->sc_ctrl &= ~CTRL_VME;
3547 
3548 	/* Write the control registers. */
3549 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3550 }
3551 
3552 static void
3553 wm_set_pcie_completion_timeout(struct wm_softc *sc)
3554 {
3555 	uint32_t gcr;
3556 	pcireg_t ctrl2;
3557 
3558 	gcr = CSR_READ(sc, WMREG_GCR);
3559 
3560 	/* Only take action if timeout value is defaulted to 0 */
3561 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
3562 		goto out;
3563 
3564 	if ((gcr & GCR_CAP_VER2) == 0) {
3565 		gcr |= GCR_CMPL_TMOUT_10MS;
3566 		goto out;
3567 	}
3568 
3569 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
3570 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
3571 	ctrl2 |= WM_PCIE_DCSR2_16MS;
3572 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
3573 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
3574 
3575 out:
3576 	/* Disable completion timeout resend */
3577 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
3578 
3579 	CSR_WRITE(sc, WMREG_GCR, gcr);
3580 }
3581 
3582 void
3583 wm_get_auto_rd_done(struct wm_softc *sc)
3584 {
3585 	int i;
3586 
3587 	/* wait for eeprom to reload */
3588 	switch (sc->sc_type) {
3589 	case WM_T_82571:
3590 	case WM_T_82572:
3591 	case WM_T_82573:
3592 	case WM_T_82574:
3593 	case WM_T_82583:
3594 	case WM_T_82575:
3595 	case WM_T_82576:
3596 	case WM_T_82580:
3597 	case WM_T_I350:
3598 	case WM_T_I354:
3599 	case WM_T_I210:
3600 	case WM_T_I211:
3601 	case WM_T_80003:
3602 	case WM_T_ICH8:
3603 	case WM_T_ICH9:
3604 		for (i = 0; i < 10; i++) {
3605 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
3606 				break;
3607 			delay(1000);
3608 		}
3609 		if (i == 10) {
3610 			log(LOG_ERR, "%s: auto read from eeprom failed to "
3611 			    "complete\n", device_xname(sc->sc_dev));
3612 		}
3613 		break;
3614 	default:
3615 		break;
3616 	}
3617 }
3618 
3619 void
3620 wm_lan_init_done(struct wm_softc *sc)
3621 {
3622 	uint32_t reg = 0;
3623 	int i;
3624 
3625 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3626 		device_xname(sc->sc_dev), __func__));
3627 
3628 	/* Wait for eeprom to reload */
3629 	switch (sc->sc_type) {
3630 	case WM_T_ICH10:
3631 	case WM_T_PCH:
3632 	case WM_T_PCH2:
3633 	case WM_T_PCH_LPT:
3634 	case WM_T_PCH_SPT:
3635 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
3636 			reg = CSR_READ(sc, WMREG_STATUS);
3637 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
3638 				break;
3639 			delay(100);
3640 		}
3641 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
3642 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
3643 			    "complete\n", device_xname(sc->sc_dev), __func__);
3644 		}
3645 		break;
3646 	default:
3647 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3648 		    __func__);
3649 		break;
3650 	}
3651 
3652 	reg &= ~STATUS_LAN_INIT_DONE;
3653 	CSR_WRITE(sc, WMREG_STATUS, reg);
3654 }
3655 
3656 void
3657 wm_get_cfg_done(struct wm_softc *sc)
3658 {
3659 	int mask;
3660 	uint32_t reg;
3661 	int i;
3662 
3663 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3664 		device_xname(sc->sc_dev), __func__));
3665 
3666 	/* Wait for eeprom to reload */
3667 	switch (sc->sc_type) {
3668 	case WM_T_82542_2_0:
3669 	case WM_T_82542_2_1:
3670 		/* null */
3671 		break;
3672 	case WM_T_82543:
3673 	case WM_T_82544:
3674 	case WM_T_82540:
3675 	case WM_T_82545:
3676 	case WM_T_82545_3:
3677 	case WM_T_82546:
3678 	case WM_T_82546_3:
3679 	case WM_T_82541:
3680 	case WM_T_82541_2:
3681 	case WM_T_82547:
3682 	case WM_T_82547_2:
3683 	case WM_T_82573:
3684 	case WM_T_82574:
3685 	case WM_T_82583:
3686 		/* generic */
3687 		delay(10*1000);
3688 		break;
3689 	case WM_T_80003:
3690 	case WM_T_82571:
3691 	case WM_T_82572:
3692 	case WM_T_82575:
3693 	case WM_T_82576:
3694 	case WM_T_82580:
3695 	case WM_T_I350:
3696 	case WM_T_I354:
3697 	case WM_T_I210:
3698 	case WM_T_I211:
3699 		if (sc->sc_type == WM_T_82571) {
3700 			/* Only 82571 shares port 0 */
3701 			mask = EEMNGCTL_CFGDONE_0;
3702 		} else
3703 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
3704 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
3705 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
3706 				break;
3707 			delay(1000);
3708 		}
3709 		if (i >= WM_PHY_CFG_TIMEOUT) {
3710 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
3711 				device_xname(sc->sc_dev), __func__));
3712 		}
3713 		break;
3714 	case WM_T_ICH8:
3715 	case WM_T_ICH9:
3716 	case WM_T_ICH10:
3717 	case WM_T_PCH:
3718 	case WM_T_PCH2:
3719 	case WM_T_PCH_LPT:
3720 	case WM_T_PCH_SPT:
3721 		delay(10*1000);
3722 		if (sc->sc_type >= WM_T_ICH10)
3723 			wm_lan_init_done(sc);
3724 		else
3725 			wm_get_auto_rd_done(sc);
3726 
3727 		reg = CSR_READ(sc, WMREG_STATUS);
3728 		if ((reg & STATUS_PHYRA) != 0)
3729 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
3730 		break;
3731 	default:
3732 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3733 		    __func__);
3734 		break;
3735 	}
3736 }
3737 
3738 void
3739 wm_phy_post_reset(struct wm_softc *sc)
3740 {
3741 	uint32_t reg;
3742 
3743 	/* This function is only for ICH8 and newer. */
3744 	if (sc->sc_type < WM_T_ICH8)
3745 		return;
3746 
3747 	if (wm_phy_resetisblocked(sc)) {
3748 		/* XXX */
3749 		device_printf(sc->sc_dev, "PHY is blocked\n");
3750 		return;
3751 	}
3752 
3753 	/* Allow time for h/w to get to quiescent state after reset */
3754 	delay(10*1000);
3755 
3756 	/* Perform any necessary post-reset workarounds */
3757 	if (sc->sc_type == WM_T_PCH)
3758 		wm_hv_phy_workaround_ich8lan(sc);
3759 	if (sc->sc_type == WM_T_PCH2)
3760 		wm_lv_phy_workaround_ich8lan(sc);
3761 
3762 	/* Clear the host wakeup bit after lcd reset */
3763 	if (sc->sc_type >= WM_T_PCH) {
3764 		reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
3765 		    BM_PORT_GEN_CFG);
3766 		reg &= ~BM_WUC_HOST_WU_BIT;
3767 		wm_gmii_hv_writereg(sc->sc_dev, 2,
3768 		    BM_PORT_GEN_CFG, reg);
3769 	}
3770 
3771 	/* Configure the LCD with the extended configuration region in NVM */
3772 	wm_init_lcd_from_nvm(sc);
3773 
3774 	/* Configure the LCD with the OEM bits in NVM */
3775 }
3776 
3777 /* Only for PCH and newer */
3778 static void
3779 wm_write_smbus_addr(struct wm_softc *sc)
3780 {
3781 	uint32_t strap, freq;
3782 	uint32_t phy_data;
3783 
3784 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3785 		device_xname(sc->sc_dev), __func__));
3786 
3787 	strap = CSR_READ(sc, WMREG_STRAP);
3788 	freq = __SHIFTOUT(strap, STRAP_FREQ);
3789 
3790 	phy_data = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR);
3791 
3792 	phy_data &= ~HV_SMB_ADDR_ADDR;
3793 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
3794 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
3795 
3796 	if (sc->sc_phytype == WMPHY_I217) {
3797 		/* Restore SMBus frequency */
3798 		if (freq --) {
3799 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
3800 			    | HV_SMB_ADDR_FREQ_HIGH);
3801 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
3802 			    HV_SMB_ADDR_FREQ_LOW);
3803 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
3804 			    HV_SMB_ADDR_FREQ_HIGH);
3805 		} else {
3806 			DPRINTF(WM_DEBUG_INIT,
3807 			    ("%s: %s Unsupported SMB frequency in PHY\n",
3808 				device_xname(sc->sc_dev), __func__));
3809 		}
3810 	}
3811 
3812 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR, phy_data);
3813 }
3814 
3815 void
3816 wm_init_lcd_from_nvm(struct wm_softc *sc)
3817 {
3818 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
3819 	uint16_t phy_page = 0;
3820 
3821 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3822 		device_xname(sc->sc_dev), __func__));
3823 
3824 	switch (sc->sc_type) {
3825 	case WM_T_ICH8:
3826 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
3827 		    || (sc->sc_phytype != WMPHY_IGP_3))
3828 			return;
3829 
3830 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
3831 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
3832 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
3833 			break;
3834 		}
3835 		/* FALLTHROUGH */
3836 	case WM_T_PCH:
3837 	case WM_T_PCH2:
3838 	case WM_T_PCH_LPT:
3839 	case WM_T_PCH_SPT:
3840 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
3841 		break;
3842 	default:
3843 		return;
3844 	}
3845 
3846 	sc->phy.acquire(sc);
3847 
3848 	reg = CSR_READ(sc, WMREG_FEXTNVM);
3849 	if ((reg & sw_cfg_mask) == 0)
3850 		goto release;
3851 
3852 	/*
3853 	 * Make sure HW does not configure LCD from PHY extended configuration
3854 	 * before SW configuration
3855 	 */
3856 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
3857 	if ((sc->sc_type < WM_T_PCH2)
3858 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
3859 		goto release;
3860 
3861 	DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
3862 		device_xname(sc->sc_dev), __func__));
3863 	/* word_addr is in DWORD */
3864 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
3865 
3866 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
3867 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
3868 
3869 	if (((sc->sc_type == WM_T_PCH)
3870 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
3871 	    || (sc->sc_type > WM_T_PCH)) {
3872 		/*
3873 		 * HW configures the SMBus address and LEDs when the OEM and
3874 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
3875 		 * are cleared, SW will configure them instead.
3876 		 */
3877 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
3878 			device_xname(sc->sc_dev), __func__));
3879 		wm_write_smbus_addr(sc);
3880 
3881 		reg = CSR_READ(sc, WMREG_LEDCTL);
3882 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG, reg);
3883 	}
3884 
3885 	/* Configure LCD from extended configuration region. */
3886 	for (i = 0; i < cnf_size; i++) {
3887 		uint16_t reg_data, reg_addr;
3888 
3889 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
3890 			goto release;
3891 
3892 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
3893 			goto release;
3894 
3895 		if (reg_addr == MII_IGPHY_PAGE_SELECT)
3896 			phy_page = reg_data;
3897 
3898 		reg_addr &= IGPHY_MAXREGADDR;
3899 		reg_addr |= phy_page;
3900 
3901 		sc->phy.release(sc); /* XXX */
3902 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, reg_addr, reg_data);
3903 		sc->phy.acquire(sc); /* XXX */
3904 	}
3905 
3906 release:
3907 	sc->phy.release(sc);
3908 	return;
3909 }
3910 
3911 
3912 /* Init hardware bits */
3913 void
3914 wm_initialize_hardware_bits(struct wm_softc *sc)
3915 {
3916 	uint32_t tarc0, tarc1, reg;
3917 
3918 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3919 		device_xname(sc->sc_dev), __func__));
3920 
3921 	/* For 82571 variant, 80003 and ICHs */
3922 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
3923 	    || (sc->sc_type >= WM_T_80003)) {
3924 
3925 		/* Transmit Descriptor Control 0 */
3926 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
3927 		reg |= TXDCTL_COUNT_DESC;
3928 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
3929 
3930 		/* Transmit Descriptor Control 1 */
3931 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
3932 		reg |= TXDCTL_COUNT_DESC;
3933 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
3934 
3935 		/* TARC0 */
3936 		tarc0 = CSR_READ(sc, WMREG_TARC0);
3937 		switch (sc->sc_type) {
3938 		case WM_T_82571:
3939 		case WM_T_82572:
3940 		case WM_T_82573:
3941 		case WM_T_82574:
3942 		case WM_T_82583:
3943 		case WM_T_80003:
3944 			/* Clear bits 30..27 */
3945 			tarc0 &= ~__BITS(30, 27);
3946 			break;
3947 		default:
3948 			break;
3949 		}
3950 
3951 		switch (sc->sc_type) {
3952 		case WM_T_82571:
3953 		case WM_T_82572:
3954 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
3955 
3956 			tarc1 = CSR_READ(sc, WMREG_TARC1);
3957 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
3958 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
3959 			/* 8257[12] Errata No.7 */
3960 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
3961 
3962 			/* TARC1 bit 28 */
3963 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3964 				tarc1 &= ~__BIT(28);
3965 			else
3966 				tarc1 |= __BIT(28);
3967 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
3968 
3969 			/*
3970 			 * 8257[12] Errata No.13
3971 			 * Disable Dyamic Clock Gating.
3972 			 */
3973 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
3974 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
3975 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3976 			break;
3977 		case WM_T_82573:
3978 		case WM_T_82574:
3979 		case WM_T_82583:
3980 			if ((sc->sc_type == WM_T_82574)
3981 			    || (sc->sc_type == WM_T_82583))
3982 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
3983 
3984 			/* Extended Device Control */
3985 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
3986 			reg &= ~__BIT(23);	/* Clear bit 23 */
3987 			reg |= __BIT(22);	/* Set bit 22 */
3988 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3989 
3990 			/* Device Control */
3991 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
3992 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3993 
3994 			/* PCIe Control Register */
3995 			/*
3996 			 * 82573 Errata (unknown).
3997 			 *
3998 			 * 82574 Errata 25 and 82583 Errata 12
3999 			 * "Dropped Rx Packets":
4000 			 *   NVM Image Version 2.1.4 and newer has no this bug.
4001 			 */
4002 			reg = CSR_READ(sc, WMREG_GCR);
4003 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
4004 			CSR_WRITE(sc, WMREG_GCR, reg);
4005 
4006 			if ((sc->sc_type == WM_T_82574)
4007 			    || (sc->sc_type == WM_T_82583)) {
4008 				/*
4009 				 * Document says this bit must be set for
4010 				 * proper operation.
4011 				 */
4012 				reg = CSR_READ(sc, WMREG_GCR);
4013 				reg |= __BIT(22);
4014 				CSR_WRITE(sc, WMREG_GCR, reg);
4015 
4016 				/*
4017 				 * Apply workaround for hardware errata
4018 				 * documented in errata docs Fixes issue where
4019 				 * some error prone or unreliable PCIe
4020 				 * completions are occurring, particularly
4021 				 * with ASPM enabled. Without fix, issue can
4022 				 * cause Tx timeouts.
4023 				 */
4024 				reg = CSR_READ(sc, WMREG_GCR2);
4025 				reg |= __BIT(0);
4026 				CSR_WRITE(sc, WMREG_GCR2, reg);
4027 			}
4028 			break;
4029 		case WM_T_80003:
4030 			/* TARC0 */
4031 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
4032 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
4033 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
4034 
4035 			/* TARC1 bit 28 */
4036 			tarc1 = CSR_READ(sc, WMREG_TARC1);
4037 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
4038 				tarc1 &= ~__BIT(28);
4039 			else
4040 				tarc1 |= __BIT(28);
4041 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
4042 			break;
4043 		case WM_T_ICH8:
4044 		case WM_T_ICH9:
4045 		case WM_T_ICH10:
4046 		case WM_T_PCH:
4047 		case WM_T_PCH2:
4048 		case WM_T_PCH_LPT:
4049 		case WM_T_PCH_SPT:
4050 			/* TARC0 */
4051 			if (sc->sc_type == WM_T_ICH8) {
4052 				/* Set TARC0 bits 29 and 28 */
4053 				tarc0 |= __BITS(29, 28);
4054 			} else if (sc->sc_type == WM_T_PCH_SPT) {
4055 				tarc0 |= __BIT(29);
4056 				/*
4057 				 *  Drop bit 28. From Linux.
4058 				 * See I218/I219 spec update
4059 				 * "5. Buffer Overrun While the I219 is
4060 				 * Processing DMA Transactions"
4061 				 */
4062 				tarc0 &= ~__BIT(28);
4063 			}
4064 			/* Set TARC0 bits 23,24,26,27 */
4065 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
4066 
4067 			/* CTRL_EXT */
4068 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
4069 			reg |= __BIT(22);	/* Set bit 22 */
4070 			/*
4071 			 * Enable PHY low-power state when MAC is at D3
4072 			 * w/o WoL
4073 			 */
4074 			if (sc->sc_type >= WM_T_PCH)
4075 				reg |= CTRL_EXT_PHYPDEN;
4076 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4077 
4078 			/* TARC1 */
4079 			tarc1 = CSR_READ(sc, WMREG_TARC1);
4080 			/* bit 28 */
4081 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
4082 				tarc1 &= ~__BIT(28);
4083 			else
4084 				tarc1 |= __BIT(28);
4085 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
4086 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
4087 
4088 			/* Device Status */
4089 			if (sc->sc_type == WM_T_ICH8) {
4090 				reg = CSR_READ(sc, WMREG_STATUS);
4091 				reg &= ~__BIT(31);
4092 				CSR_WRITE(sc, WMREG_STATUS, reg);
4093 
4094 			}
4095 
4096 			/* IOSFPC */
4097 			if (sc->sc_type == WM_T_PCH_SPT) {
4098 				reg = CSR_READ(sc, WMREG_IOSFPC);
4099 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
4100 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
4101 			}
4102 			/*
4103 			 * Work-around descriptor data corruption issue during
4104 			 * NFS v2 UDP traffic, just disable the NFS filtering
4105 			 * capability.
4106 			 */
4107 			reg = CSR_READ(sc, WMREG_RFCTL);
4108 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
4109 			CSR_WRITE(sc, WMREG_RFCTL, reg);
4110 			break;
4111 		default:
4112 			break;
4113 		}
4114 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
4115 
4116 		switch (sc->sc_type) {
4117 		/*
4118 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
4119 		 * Avoid RSS Hash Value bug.
4120 		 */
4121 		case WM_T_82571:
4122 		case WM_T_82572:
4123 		case WM_T_82573:
4124 		case WM_T_80003:
4125 		case WM_T_ICH8:
4126 			reg = CSR_READ(sc, WMREG_RFCTL);
4127 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
4128 			CSR_WRITE(sc, WMREG_RFCTL, reg);
4129 			break;
4130 		case WM_T_82574:
4131 			/* use extened Rx descriptor. */
4132 			reg = CSR_READ(sc, WMREG_RFCTL);
4133 			reg |= WMREG_RFCTL_EXSTEN;
4134 			CSR_WRITE(sc, WMREG_RFCTL, reg);
4135 			break;
4136 		default:
4137 			break;
4138 		}
4139 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
4140 		/*
4141 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
4142 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
4143 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
4144 		 * Correctly by the Device"
4145 		 *
4146 		 * I354(C2000) Errata AVR53:
4147 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
4148 		 * Hang"
4149 		 */
4150 		reg = CSR_READ(sc, WMREG_RFCTL);
4151 		reg |= WMREG_RFCTL_IPV6EXDIS;
4152 		CSR_WRITE(sc, WMREG_RFCTL, reg);
4153 	}
4154 }
4155 
4156 static uint32_t
4157 wm_rxpbs_adjust_82580(uint32_t val)
4158 {
4159 	uint32_t rv = 0;
4160 
4161 	if (val < __arraycount(wm_82580_rxpbs_table))
4162 		rv = wm_82580_rxpbs_table[val];
4163 
4164 	return rv;
4165 }
4166 
4167 /*
4168  * wm_reset_phy:
4169  *
4170  *	generic PHY reset function.
4171  *	Same as e1000_phy_hw_reset_generic()
4172  */
4173 static void
4174 wm_reset_phy(struct wm_softc *sc)
4175 {
4176 	uint32_t reg;
4177 
4178 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
4179 		device_xname(sc->sc_dev), __func__));
4180 	if (wm_phy_resetisblocked(sc))
4181 		return;
4182 
4183 	sc->phy.acquire(sc);
4184 
4185 	reg = CSR_READ(sc, WMREG_CTRL);
4186 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
4187 	CSR_WRITE_FLUSH(sc);
4188 
4189 	delay(sc->phy.reset_delay_us);
4190 
4191 	CSR_WRITE(sc, WMREG_CTRL, reg);
4192 	CSR_WRITE_FLUSH(sc);
4193 
4194 	delay(150);
4195 
4196 	sc->phy.release(sc);
4197 
4198 	wm_get_cfg_done(sc);
4199 	wm_phy_post_reset(sc);
4200 }
4201 
4202 static void
4203 wm_flush_desc_rings(struct wm_softc *sc)
4204 {
4205 	pcireg_t preg;
4206 	uint32_t reg;
4207 	struct wm_txqueue *txq;
4208 	wiseman_txdesc_t *txd;
4209 	int nexttx;
4210 	uint32_t rctl;
4211 
4212 	/* First, disable MULR fix in FEXTNVM11 */
4213 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
4214 	reg |= FEXTNVM11_DIS_MULRFIX;
4215 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
4216 
4217 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
4218 	reg = CSR_READ(sc, WMREG_TDLEN(0));
4219 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
4220 		return;
4221 
4222 	/* TX */
4223 	printf("%s: Need TX flush (reg = %08x, len = %u)\n",
4224 	    device_xname(sc->sc_dev), preg, reg);
4225 	reg = CSR_READ(sc, WMREG_TCTL);
4226 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
4227 
4228 	txq = &sc->sc_queue[0].wmq_txq;
4229 	nexttx = txq->txq_next;
4230 	txd = &txq->txq_descs[nexttx];
4231 	wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
4232 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS| 512);
4233 	txd->wtx_fields.wtxu_status = 0;
4234 	txd->wtx_fields.wtxu_options = 0;
4235 	txd->wtx_fields.wtxu_vlan = 0;
4236 
4237 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
4238 	    BUS_SPACE_BARRIER_WRITE);
4239 
4240 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
4241 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
4242 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
4243 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
4244 	delay(250);
4245 
4246 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
4247 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
4248 		return;
4249 
4250 	/* RX */
4251 	printf("%s: Need RX flush (reg = %08x)\n",
4252 	    device_xname(sc->sc_dev), preg);
4253 	rctl = CSR_READ(sc, WMREG_RCTL);
4254 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
4255 	CSR_WRITE_FLUSH(sc);
4256 	delay(150);
4257 
4258 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
4259 	/* zero the lower 14 bits (prefetch and host thresholds) */
4260 	reg &= 0xffffc000;
4261 	/*
4262 	 * update thresholds: prefetch threshold to 31, host threshold
4263 	 * to 1 and make sure the granularity is "descriptors" and not
4264 	 * "cache lines"
4265 	 */
4266 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
4267 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
4268 
4269 	/*
4270 	 * momentarily enable the RX ring for the changes to take
4271 	 * effect
4272 	 */
4273 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
4274 	CSR_WRITE_FLUSH(sc);
4275 	delay(150);
4276 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
4277 }
4278 
4279 /*
4280  * wm_reset:
4281  *
4282  *	Reset the i82542 chip.
4283  */
4284 static void
4285 wm_reset(struct wm_softc *sc)
4286 {
4287 	int phy_reset = 0;
4288 	int i, error = 0;
4289 	uint32_t reg;
4290 	uint16_t kmreg;
4291 	int rv;
4292 
4293 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
4294 		device_xname(sc->sc_dev), __func__));
4295 	KASSERT(sc->sc_type != 0);
4296 
4297 	/*
4298 	 * Allocate on-chip memory according to the MTU size.
4299 	 * The Packet Buffer Allocation register must be written
4300 	 * before the chip is reset.
4301 	 */
4302 	switch (sc->sc_type) {
4303 	case WM_T_82547:
4304 	case WM_T_82547_2:
4305 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
4306 		    PBA_22K : PBA_30K;
4307 		for (i = 0; i < sc->sc_nqueues; i++) {
4308 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
4309 			txq->txq_fifo_head = 0;
4310 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
4311 			txq->txq_fifo_size =
4312 				(PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
4313 			txq->txq_fifo_stall = 0;
4314 		}
4315 		break;
4316 	case WM_T_82571:
4317 	case WM_T_82572:
4318 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
4319 	case WM_T_80003:
4320 		sc->sc_pba = PBA_32K;
4321 		break;
4322 	case WM_T_82573:
4323 		sc->sc_pba = PBA_12K;
4324 		break;
4325 	case WM_T_82574:
4326 	case WM_T_82583:
4327 		sc->sc_pba = PBA_20K;
4328 		break;
4329 	case WM_T_82576:
4330 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
4331 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
4332 		break;
4333 	case WM_T_82580:
4334 	case WM_T_I350:
4335 	case WM_T_I354:
4336 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
4337 		break;
4338 	case WM_T_I210:
4339 	case WM_T_I211:
4340 		sc->sc_pba = PBA_34K;
4341 		break;
4342 	case WM_T_ICH8:
4343 		/* Workaround for a bit corruption issue in FIFO memory */
4344 		sc->sc_pba = PBA_8K;
4345 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
4346 		break;
4347 	case WM_T_ICH9:
4348 	case WM_T_ICH10:
4349 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
4350 		    PBA_14K : PBA_10K;
4351 		break;
4352 	case WM_T_PCH:
4353 	case WM_T_PCH2:
4354 	case WM_T_PCH_LPT:
4355 	case WM_T_PCH_SPT:
4356 		sc->sc_pba = PBA_26K;
4357 		break;
4358 	default:
4359 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
4360 		    PBA_40K : PBA_48K;
4361 		break;
4362 	}
4363 	/*
4364 	 * Only old or non-multiqueue devices have the PBA register
4365 	 * XXX Need special handling for 82575.
4366 	 */
4367 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
4368 	    || (sc->sc_type == WM_T_82575))
4369 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
4370 
4371 	/* Prevent the PCI-E bus from sticking */
4372 	if (sc->sc_flags & WM_F_PCIE) {
4373 		int timeout = 800;
4374 
4375 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
4376 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4377 
4378 		while (timeout--) {
4379 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
4380 			    == 0)
4381 				break;
4382 			delay(100);
4383 		}
4384 		if (timeout == 0)
4385 			device_printf(sc->sc_dev,
4386 			    "failed to disable busmastering\n");
4387 	}
4388 
4389 	/* Set the completion timeout for interface */
4390 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
4391 	    || (sc->sc_type == WM_T_82580)
4392 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
4393 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
4394 		wm_set_pcie_completion_timeout(sc);
4395 
4396 	/* Clear interrupt */
4397 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4398 	if (wm_is_using_msix(sc)) {
4399 		if (sc->sc_type != WM_T_82574) {
4400 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
4401 			CSR_WRITE(sc, WMREG_EIAC, 0);
4402 		} else {
4403 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
4404 		}
4405 	}
4406 
4407 	/* Stop the transmit and receive processes. */
4408 	CSR_WRITE(sc, WMREG_RCTL, 0);
4409 	sc->sc_rctl &= ~RCTL_EN;
4410 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
4411 	CSR_WRITE_FLUSH(sc);
4412 
4413 	/* XXX set_tbi_sbp_82543() */
4414 
4415 	delay(10*1000);
4416 
4417 	/* Must acquire the MDIO ownership before MAC reset */
4418 	switch (sc->sc_type) {
4419 	case WM_T_82573:
4420 	case WM_T_82574:
4421 	case WM_T_82583:
4422 		error = wm_get_hw_semaphore_82573(sc);
4423 		break;
4424 	default:
4425 		break;
4426 	}
4427 
4428 	/*
4429 	 * 82541 Errata 29? & 82547 Errata 28?
4430 	 * See also the description about PHY_RST bit in CTRL register
4431 	 * in 8254x_GBe_SDM.pdf.
4432 	 */
4433 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
4434 		CSR_WRITE(sc, WMREG_CTRL,
4435 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
4436 		CSR_WRITE_FLUSH(sc);
4437 		delay(5000);
4438 	}
4439 
4440 	switch (sc->sc_type) {
4441 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
4442 	case WM_T_82541:
4443 	case WM_T_82541_2:
4444 	case WM_T_82547:
4445 	case WM_T_82547_2:
4446 		/*
4447 		 * On some chipsets, a reset through a memory-mapped write
4448 		 * cycle can cause the chip to reset before completing the
4449 		 * write cycle.  This causes major headache that can be
4450 		 * avoided by issuing the reset via indirect register writes
4451 		 * through I/O space.
4452 		 *
4453 		 * So, if we successfully mapped the I/O BAR at attach time,
4454 		 * use that.  Otherwise, try our luck with a memory-mapped
4455 		 * reset.
4456 		 */
4457 		if (sc->sc_flags & WM_F_IOH_VALID)
4458 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
4459 		else
4460 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
4461 		break;
4462 	case WM_T_82545_3:
4463 	case WM_T_82546_3:
4464 		/* Use the shadow control register on these chips. */
4465 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
4466 		break;
4467 	case WM_T_80003:
4468 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
4469 		sc->phy.acquire(sc);
4470 		CSR_WRITE(sc, WMREG_CTRL, reg);
4471 		sc->phy.release(sc);
4472 		break;
4473 	case WM_T_ICH8:
4474 	case WM_T_ICH9:
4475 	case WM_T_ICH10:
4476 	case WM_T_PCH:
4477 	case WM_T_PCH2:
4478 	case WM_T_PCH_LPT:
4479 	case WM_T_PCH_SPT:
4480 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
4481 		if (wm_phy_resetisblocked(sc) == false) {
4482 			/*
4483 			 * Gate automatic PHY configuration by hardware on
4484 			 * non-managed 82579
4485 			 */
4486 			if ((sc->sc_type == WM_T_PCH2)
4487 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
4488 				== 0))
4489 				wm_gate_hw_phy_config_ich8lan(sc, true);
4490 
4491 			reg |= CTRL_PHY_RESET;
4492 			phy_reset = 1;
4493 		} else
4494 			printf("XXX reset is blocked!!!\n");
4495 		sc->phy.acquire(sc);
4496 		CSR_WRITE(sc, WMREG_CTRL, reg);
4497 		/* Don't insert a completion barrier when reset */
4498 		delay(20*1000);
4499 		mutex_exit(sc->sc_ich_phymtx);
4500 		break;
4501 	case WM_T_82580:
4502 	case WM_T_I350:
4503 	case WM_T_I354:
4504 	case WM_T_I210:
4505 	case WM_T_I211:
4506 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
4507 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
4508 			CSR_WRITE_FLUSH(sc);
4509 		delay(5000);
4510 		break;
4511 	case WM_T_82542_2_0:
4512 	case WM_T_82542_2_1:
4513 	case WM_T_82543:
4514 	case WM_T_82540:
4515 	case WM_T_82545:
4516 	case WM_T_82546:
4517 	case WM_T_82571:
4518 	case WM_T_82572:
4519 	case WM_T_82573:
4520 	case WM_T_82574:
4521 	case WM_T_82575:
4522 	case WM_T_82576:
4523 	case WM_T_82583:
4524 	default:
4525 		/* Everything else can safely use the documented method. */
4526 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
4527 		break;
4528 	}
4529 
4530 	/* Must release the MDIO ownership after MAC reset */
4531 	switch (sc->sc_type) {
4532 	case WM_T_82573:
4533 	case WM_T_82574:
4534 	case WM_T_82583:
4535 		if (error == 0)
4536 			wm_put_hw_semaphore_82573(sc);
4537 		break;
4538 	default:
4539 		break;
4540 	}
4541 
4542 	if (phy_reset != 0)
4543 		wm_get_cfg_done(sc);
4544 
4545 	/* reload EEPROM */
4546 	switch (sc->sc_type) {
4547 	case WM_T_82542_2_0:
4548 	case WM_T_82542_2_1:
4549 	case WM_T_82543:
4550 	case WM_T_82544:
4551 		delay(10);
4552 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
4553 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4554 		CSR_WRITE_FLUSH(sc);
4555 		delay(2000);
4556 		break;
4557 	case WM_T_82540:
4558 	case WM_T_82545:
4559 	case WM_T_82545_3:
4560 	case WM_T_82546:
4561 	case WM_T_82546_3:
4562 		delay(5*1000);
4563 		/* XXX Disable HW ARPs on ASF enabled adapters */
4564 		break;
4565 	case WM_T_82541:
4566 	case WM_T_82541_2:
4567 	case WM_T_82547:
4568 	case WM_T_82547_2:
4569 		delay(20000);
4570 		/* XXX Disable HW ARPs on ASF enabled adapters */
4571 		break;
4572 	case WM_T_82571:
4573 	case WM_T_82572:
4574 	case WM_T_82573:
4575 	case WM_T_82574:
4576 	case WM_T_82583:
4577 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
4578 			delay(10);
4579 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
4580 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4581 			CSR_WRITE_FLUSH(sc);
4582 		}
4583 		/* check EECD_EE_AUTORD */
4584 		wm_get_auto_rd_done(sc);
4585 		/*
4586 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
4587 		 * is set.
4588 		 */
4589 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
4590 		    || (sc->sc_type == WM_T_82583))
4591 			delay(25*1000);
4592 		break;
4593 	case WM_T_82575:
4594 	case WM_T_82576:
4595 	case WM_T_82580:
4596 	case WM_T_I350:
4597 	case WM_T_I354:
4598 	case WM_T_I210:
4599 	case WM_T_I211:
4600 	case WM_T_80003:
4601 		/* check EECD_EE_AUTORD */
4602 		wm_get_auto_rd_done(sc);
4603 		break;
4604 	case WM_T_ICH8:
4605 	case WM_T_ICH9:
4606 	case WM_T_ICH10:
4607 	case WM_T_PCH:
4608 	case WM_T_PCH2:
4609 	case WM_T_PCH_LPT:
4610 	case WM_T_PCH_SPT:
4611 		break;
4612 	default:
4613 		panic("%s: unknown type\n", __func__);
4614 	}
4615 
4616 	/* Check whether EEPROM is present or not */
4617 	switch (sc->sc_type) {
4618 	case WM_T_82575:
4619 	case WM_T_82576:
4620 	case WM_T_82580:
4621 	case WM_T_I350:
4622 	case WM_T_I354:
4623 	case WM_T_ICH8:
4624 	case WM_T_ICH9:
4625 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
4626 			/* Not found */
4627 			sc->sc_flags |= WM_F_EEPROM_INVALID;
4628 			if (sc->sc_type == WM_T_82575)
4629 				wm_reset_init_script_82575(sc);
4630 		}
4631 		break;
4632 	default:
4633 		break;
4634 	}
4635 
4636 	if (phy_reset != 0)
4637 		wm_phy_post_reset(sc);
4638 
4639 	if ((sc->sc_type == WM_T_82580)
4640 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
4641 		/* clear global device reset status bit */
4642 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
4643 	}
4644 
4645 	/* Clear any pending interrupt events. */
4646 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4647 	reg = CSR_READ(sc, WMREG_ICR);
4648 	if (wm_is_using_msix(sc)) {
4649 		if (sc->sc_type != WM_T_82574) {
4650 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
4651 			CSR_WRITE(sc, WMREG_EIAC, 0);
4652 		} else
4653 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
4654 	}
4655 
4656 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4657 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4658 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
4659 	    || (sc->sc_type == WM_T_PCH_SPT)) {
4660 		reg = CSR_READ(sc, WMREG_KABGTXD);
4661 		reg |= KABGTXD_BGSQLBIAS;
4662 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
4663 	}
4664 
4665 	/* reload sc_ctrl */
4666 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
4667 
4668 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
4669 		wm_set_eee_i350(sc);
4670 
4671 	/*
4672 	 * For PCH, this write will make sure that any noise will be detected
4673 	 * as a CRC error and be dropped rather than show up as a bad packet
4674 	 * to the DMA engine
4675 	 */
4676 	if (sc->sc_type == WM_T_PCH)
4677 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
4678 
4679 	if (sc->sc_type >= WM_T_82544)
4680 		CSR_WRITE(sc, WMREG_WUC, 0);
4681 
4682 	wm_reset_mdicnfg_82580(sc);
4683 
4684 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
4685 		wm_pll_workaround_i210(sc);
4686 
4687 	if (sc->sc_type == WM_T_80003) {
4688 		/* default to TRUE to enable the MDIC W/A */
4689 		sc->sc_flags |= WM_F_80003_MDIC_WA;
4690 
4691 		rv = wm_kmrn_readreg(sc,
4692 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
4693 		if (rv == 0) {
4694 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
4695 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
4696 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
4697 			else
4698 				sc->sc_flags |= WM_F_80003_MDIC_WA;
4699 		}
4700 	}
4701 }
4702 
4703 /*
4704  * wm_add_rxbuf:
4705  *
4706  *	Add a receive buffer to the indiciated descriptor.
4707  */
4708 static int
4709 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
4710 {
4711 	struct wm_softc *sc = rxq->rxq_sc;
4712 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
4713 	struct mbuf *m;
4714 	int error;
4715 
4716 	KASSERT(mutex_owned(rxq->rxq_lock));
4717 
4718 	MGETHDR(m, M_DONTWAIT, MT_DATA);
4719 	if (m == NULL)
4720 		return ENOBUFS;
4721 
4722 	MCLGET(m, M_DONTWAIT);
4723 	if ((m->m_flags & M_EXT) == 0) {
4724 		m_freem(m);
4725 		return ENOBUFS;
4726 	}
4727 
4728 	if (rxs->rxs_mbuf != NULL)
4729 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4730 
4731 	rxs->rxs_mbuf = m;
4732 
4733 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
4734 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
4735 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
4736 	if (error) {
4737 		/* XXX XXX XXX */
4738 		aprint_error_dev(sc->sc_dev,
4739 		    "unable to load rx DMA map %d, error = %d\n",
4740 		    idx, error);
4741 		panic("wm_add_rxbuf");
4742 	}
4743 
4744 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
4745 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
4746 
4747 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4748 		if ((sc->sc_rctl & RCTL_EN) != 0)
4749 			wm_init_rxdesc(rxq, idx);
4750 	} else
4751 		wm_init_rxdesc(rxq, idx);
4752 
4753 	return 0;
4754 }
4755 
4756 /*
4757  * wm_rxdrain:
4758  *
4759  *	Drain the receive queue.
4760  */
4761 static void
4762 wm_rxdrain(struct wm_rxqueue *rxq)
4763 {
4764 	struct wm_softc *sc = rxq->rxq_sc;
4765 	struct wm_rxsoft *rxs;
4766 	int i;
4767 
4768 	KASSERT(mutex_owned(rxq->rxq_lock));
4769 
4770 	for (i = 0; i < WM_NRXDESC; i++) {
4771 		rxs = &rxq->rxq_soft[i];
4772 		if (rxs->rxs_mbuf != NULL) {
4773 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4774 			m_freem(rxs->rxs_mbuf);
4775 			rxs->rxs_mbuf = NULL;
4776 		}
4777 	}
4778 }
4779 
4780 
4781 /*
4782  * XXX copy from FreeBSD's sys/net/rss_config.c
4783  */
4784 /*
4785  * RSS secret key, intended to prevent attacks on load-balancing.  Its
4786  * effectiveness may be limited by algorithm choice and available entropy
4787  * during the boot.
4788  *
4789  * XXXRW: And that we don't randomize it yet!
4790  *
4791  * This is the default Microsoft RSS specification key which is also
4792  * the Chelsio T5 firmware default key.
4793  */
4794 #define RSS_KEYSIZE 40
4795 static uint8_t wm_rss_key[RSS_KEYSIZE] = {
4796 	0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
4797 	0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
4798 	0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
4799 	0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
4800 	0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
4801 };
4802 
4803 /*
4804  * Caller must pass an array of size sizeof(rss_key).
4805  *
4806  * XXX
4807  * As if_ixgbe may use this function, this function should not be
4808  * if_wm specific function.
4809  */
4810 static void
4811 wm_rss_getkey(uint8_t *key)
4812 {
4813 
4814 	memcpy(key, wm_rss_key, sizeof(wm_rss_key));
4815 }
4816 
4817 /*
4818  * Setup registers for RSS.
4819  *
4820  * XXX not yet VMDq support
4821  */
4822 static void
4823 wm_init_rss(struct wm_softc *sc)
4824 {
4825 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
4826 	int i;
4827 
4828 	CTASSERT(sizeof(rss_key) == sizeof(wm_rss_key));
4829 
4830 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
4831 		int qid, reta_ent;
4832 
4833 		qid  = i % sc->sc_nqueues;
4834 		switch(sc->sc_type) {
4835 		case WM_T_82574:
4836 			reta_ent = __SHIFTIN(qid,
4837 			    RETA_ENT_QINDEX_MASK_82574);
4838 			break;
4839 		case WM_T_82575:
4840 			reta_ent = __SHIFTIN(qid,
4841 			    RETA_ENT_QINDEX1_MASK_82575);
4842 			break;
4843 		default:
4844 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
4845 			break;
4846 		}
4847 
4848 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
4849 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
4850 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
4851 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
4852 	}
4853 
4854 	wm_rss_getkey((uint8_t *)rss_key);
4855 	for (i = 0; i < RSSRK_NUM_REGS; i++)
4856 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
4857 
4858 	if (sc->sc_type == WM_T_82574)
4859 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
4860 	else
4861 		mrqc = MRQC_ENABLE_RSS_MQ;
4862 
4863 	/*
4864 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
4865 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
4866 	 */
4867 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
4868 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
4869 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
4870 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
4871 
4872 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
4873 }
4874 
4875 /*
4876  * Adjust TX and RX queue numbers which the system actulally uses.
4877  *
4878  * The numbers are affected by below parameters.
4879  *     - The nubmer of hardware queues
4880  *     - The number of MSI-X vectors (= "nvectors" argument)
4881  *     - ncpu
4882  */
4883 static void
4884 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
4885 {
4886 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
4887 
4888 	if (nvectors < 2) {
4889 		sc->sc_nqueues = 1;
4890 		return;
4891 	}
4892 
4893 	switch(sc->sc_type) {
4894 	case WM_T_82572:
4895 		hw_ntxqueues = 2;
4896 		hw_nrxqueues = 2;
4897 		break;
4898 	case WM_T_82574:
4899 		hw_ntxqueues = 2;
4900 		hw_nrxqueues = 2;
4901 		break;
4902 	case WM_T_82575:
4903 		hw_ntxqueues = 4;
4904 		hw_nrxqueues = 4;
4905 		break;
4906 	case WM_T_82576:
4907 		hw_ntxqueues = 16;
4908 		hw_nrxqueues = 16;
4909 		break;
4910 	case WM_T_82580:
4911 	case WM_T_I350:
4912 	case WM_T_I354:
4913 		hw_ntxqueues = 8;
4914 		hw_nrxqueues = 8;
4915 		break;
4916 	case WM_T_I210:
4917 		hw_ntxqueues = 4;
4918 		hw_nrxqueues = 4;
4919 		break;
4920 	case WM_T_I211:
4921 		hw_ntxqueues = 2;
4922 		hw_nrxqueues = 2;
4923 		break;
4924 		/*
4925 		 * As below ethernet controllers does not support MSI-X,
4926 		 * this driver let them not use multiqueue.
4927 		 *     - WM_T_80003
4928 		 *     - WM_T_ICH8
4929 		 *     - WM_T_ICH9
4930 		 *     - WM_T_ICH10
4931 		 *     - WM_T_PCH
4932 		 *     - WM_T_PCH2
4933 		 *     - WM_T_PCH_LPT
4934 		 */
4935 	default:
4936 		hw_ntxqueues = 1;
4937 		hw_nrxqueues = 1;
4938 		break;
4939 	}
4940 
4941 	hw_nqueues = min(hw_ntxqueues, hw_nrxqueues);
4942 
4943 	/*
4944 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
4945 	 * the number of queues used actually.
4946 	 */
4947 	if (nvectors < hw_nqueues + 1) {
4948 		sc->sc_nqueues = nvectors - 1;
4949 	} else {
4950 		sc->sc_nqueues = hw_nqueues;
4951 	}
4952 
4953 	/*
4954 	 * As queues more then cpus cannot improve scaling, we limit
4955 	 * the number of queues used actually.
4956 	 */
4957 	if (ncpu < sc->sc_nqueues)
4958 		sc->sc_nqueues = ncpu;
4959 }
4960 
4961 static inline bool
4962 wm_is_using_msix(struct wm_softc *sc)
4963 {
4964 
4965 	return (sc->sc_nintrs > 1);
4966 }
4967 
4968 static inline bool
4969 wm_is_using_multiqueue(struct wm_softc *sc)
4970 {
4971 
4972 	return (sc->sc_nqueues > 1);
4973 }
4974 
4975 static int
4976 wm_softint_establish(struct wm_softc *sc, int qidx, int intr_idx)
4977 {
4978 	struct wm_queue *wmq = &sc->sc_queue[qidx];
4979 	wmq->wmq_id = qidx;
4980 	wmq->wmq_intr_idx = intr_idx;
4981 	wmq->wmq_si = softint_establish(SOFTINT_NET
4982 #ifdef WM_MPSAFE
4983 	    | SOFTINT_MPSAFE
4984 #endif
4985 	    , wm_handle_queue, wmq);
4986 	if (wmq->wmq_si != NULL)
4987 		return 0;
4988 
4989 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
4990 	    wmq->wmq_id);
4991 
4992 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
4993 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
4994 	return ENOMEM;
4995 }
4996 
4997 /*
4998  * Both single interrupt MSI and INTx can use this function.
4999  */
5000 static int
5001 wm_setup_legacy(struct wm_softc *sc)
5002 {
5003 	pci_chipset_tag_t pc = sc->sc_pc;
5004 	const char *intrstr = NULL;
5005 	char intrbuf[PCI_INTRSTR_LEN];
5006 	int error;
5007 
5008 	error = wm_alloc_txrx_queues(sc);
5009 	if (error) {
5010 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
5011 		    error);
5012 		return ENOMEM;
5013 	}
5014 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
5015 	    sizeof(intrbuf));
5016 #ifdef WM_MPSAFE
5017 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
5018 #endif
5019 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
5020 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
5021 	if (sc->sc_ihs[0] == NULL) {
5022 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
5023 		    (pci_intr_type(pc, sc->sc_intrs[0])
5024 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
5025 		return ENOMEM;
5026 	}
5027 
5028 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
5029 	sc->sc_nintrs = 1;
5030 
5031 	return wm_softint_establish(sc, 0, 0);
5032 }
5033 
5034 static int
5035 wm_setup_msix(struct wm_softc *sc)
5036 {
5037 	void *vih;
5038 	kcpuset_t *affinity;
5039 	int qidx, error, intr_idx, txrx_established;
5040 	pci_chipset_tag_t pc = sc->sc_pc;
5041 	const char *intrstr = NULL;
5042 	char intrbuf[PCI_INTRSTR_LEN];
5043 	char intr_xname[INTRDEVNAMEBUF];
5044 
5045 	if (sc->sc_nqueues < ncpu) {
5046 		/*
5047 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
5048 		 * interrupts start from CPU#1.
5049 		 */
5050 		sc->sc_affinity_offset = 1;
5051 	} else {
5052 		/*
5053 		 * In this case, this device use all CPUs. So, we unify
5054 		 * affinitied cpu_index to msix vector number for readability.
5055 		 */
5056 		sc->sc_affinity_offset = 0;
5057 	}
5058 
5059 	error = wm_alloc_txrx_queues(sc);
5060 	if (error) {
5061 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
5062 		    error);
5063 		return ENOMEM;
5064 	}
5065 
5066 	kcpuset_create(&affinity, false);
5067 	intr_idx = 0;
5068 
5069 	/*
5070 	 * TX and RX
5071 	 */
5072 	txrx_established = 0;
5073 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
5074 		struct wm_queue *wmq = &sc->sc_queue[qidx];
5075 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
5076 
5077 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
5078 		    sizeof(intrbuf));
5079 #ifdef WM_MPSAFE
5080 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
5081 		    PCI_INTR_MPSAFE, true);
5082 #endif
5083 		memset(intr_xname, 0, sizeof(intr_xname));
5084 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
5085 		    device_xname(sc->sc_dev), qidx);
5086 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
5087 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
5088 		if (vih == NULL) {
5089 			aprint_error_dev(sc->sc_dev,
5090 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
5091 			    intrstr ? " at " : "",
5092 			    intrstr ? intrstr : "");
5093 
5094 			goto fail;
5095 		}
5096 		kcpuset_zero(affinity);
5097 		/* Round-robin affinity */
5098 		kcpuset_set(affinity, affinity_to);
5099 		error = interrupt_distribute(vih, affinity, NULL);
5100 		if (error == 0) {
5101 			aprint_normal_dev(sc->sc_dev,
5102 			    "for TX and RX interrupting at %s affinity to %u\n",
5103 			    intrstr, affinity_to);
5104 		} else {
5105 			aprint_normal_dev(sc->sc_dev,
5106 			    "for TX and RX interrupting at %s\n", intrstr);
5107 		}
5108 		sc->sc_ihs[intr_idx] = vih;
5109 		if (wm_softint_establish(sc, qidx, intr_idx) != 0)
5110 			goto fail;
5111 		txrx_established++;
5112 		intr_idx++;
5113 	}
5114 
5115 	/*
5116 	 * LINK
5117 	 */
5118 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
5119 	    sizeof(intrbuf));
5120 #ifdef WM_MPSAFE
5121 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
5122 #endif
5123 	memset(intr_xname, 0, sizeof(intr_xname));
5124 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
5125 	    device_xname(sc->sc_dev));
5126 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
5127 		    IPL_NET, wm_linkintr_msix, sc, intr_xname);
5128 	if (vih == NULL) {
5129 		aprint_error_dev(sc->sc_dev,
5130 		    "unable to establish MSI-X(for LINK)%s%s\n",
5131 		    intrstr ? " at " : "",
5132 		    intrstr ? intrstr : "");
5133 
5134 		goto fail;
5135 	}
5136 	/* keep default affinity to LINK interrupt */
5137 	aprint_normal_dev(sc->sc_dev,
5138 	    "for LINK interrupting at %s\n", intrstr);
5139 	sc->sc_ihs[intr_idx] = vih;
5140 	sc->sc_link_intr_idx = intr_idx;
5141 
5142 	sc->sc_nintrs = sc->sc_nqueues + 1;
5143 	kcpuset_destroy(affinity);
5144 	return 0;
5145 
5146  fail:
5147 	for (qidx = 0; qidx < txrx_established; qidx++) {
5148 		struct wm_queue *wmq = &sc->sc_queue[qidx];
5149 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
5150 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
5151 	}
5152 
5153 	kcpuset_destroy(affinity);
5154 	return ENOMEM;
5155 }
5156 
5157 static void
5158 wm_unset_stopping_flags(struct wm_softc *sc)
5159 {
5160 	int i;
5161 
5162 	KASSERT(WM_CORE_LOCKED(sc));
5163 
5164 	/*
5165 	 * must unset stopping flags in ascending order.
5166 	 */
5167 	for(i = 0; i < sc->sc_nqueues; i++) {
5168 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
5169 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
5170 
5171 		mutex_enter(txq->txq_lock);
5172 		txq->txq_stopping = false;
5173 		mutex_exit(txq->txq_lock);
5174 
5175 		mutex_enter(rxq->rxq_lock);
5176 		rxq->rxq_stopping = false;
5177 		mutex_exit(rxq->rxq_lock);
5178 	}
5179 
5180 	sc->sc_core_stopping = false;
5181 }
5182 
5183 static void
5184 wm_set_stopping_flags(struct wm_softc *sc)
5185 {
5186 	int i;
5187 
5188 	KASSERT(WM_CORE_LOCKED(sc));
5189 
5190 	sc->sc_core_stopping = true;
5191 
5192 	/*
5193 	 * must set stopping flags in ascending order.
5194 	 */
5195 	for(i = 0; i < sc->sc_nqueues; i++) {
5196 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
5197 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
5198 
5199 		mutex_enter(rxq->rxq_lock);
5200 		rxq->rxq_stopping = true;
5201 		mutex_exit(rxq->rxq_lock);
5202 
5203 		mutex_enter(txq->txq_lock);
5204 		txq->txq_stopping = true;
5205 		mutex_exit(txq->txq_lock);
5206 	}
5207 }
5208 
5209 /*
5210  * write interrupt interval value to ITR or EITR
5211  */
5212 static void
5213 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
5214 {
5215 
5216 	if (!wmq->wmq_set_itr)
5217 		return;
5218 
5219 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5220 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
5221 
5222 		/*
5223 		 * 82575 doesn't have CNT_INGR field.
5224 		 * So, overwrite counter field by software.
5225 		 */
5226 		if (sc->sc_type == WM_T_82575)
5227 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
5228 		else
5229 			eitr |= EITR_CNT_INGR;
5230 
5231 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
5232 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
5233 		/*
5234 		 * 82574 has both ITR and EITR. SET EITR when we use
5235 		 * the multi queue function with MSI-X.
5236 		 */
5237 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
5238 			    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
5239 	} else {
5240 		KASSERT(wmq->wmq_id == 0);
5241 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
5242 	}
5243 
5244 	wmq->wmq_set_itr = false;
5245 }
5246 
5247 /*
5248  * TODO
5249  * Below dynamic calculation of itr is almost the same as linux igb,
5250  * however it does not fit to wm(4). So, we will have been disable AIM
5251  * until we will find appropriate calculation of itr.
5252  */
5253 /*
5254  * calculate interrupt interval value to be going to write register in
5255  * wm_itrs_writereg(). This function does not write ITR/EITR register.
5256  */
5257 static void
5258 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
5259 {
5260 #ifdef NOTYET
5261 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
5262 	struct wm_txqueue *txq = &wmq->wmq_txq;
5263 	uint32_t avg_size = 0;
5264 	uint32_t new_itr;
5265 
5266 	if (rxq->rxq_packets)
5267 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
5268 	if (txq->txq_packets)
5269 		avg_size = max(avg_size, txq->txq_bytes / txq->txq_packets);
5270 
5271 	if (avg_size == 0) {
5272 		new_itr = 450; /* restore default value */
5273 		goto out;
5274 	}
5275 
5276 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
5277 	avg_size += 24;
5278 
5279 	/* Don't starve jumbo frames */
5280 	avg_size = min(avg_size, 3000);
5281 
5282 	/* Give a little boost to mid-size frames */
5283 	if ((avg_size > 300) && (avg_size < 1200))
5284 		new_itr = avg_size / 3;
5285 	else
5286 		new_itr = avg_size / 2;
5287 
5288 out:
5289 	/*
5290 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
5291 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
5292 	 */
5293 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
5294 		new_itr *= 4;
5295 
5296 	if (new_itr != wmq->wmq_itr) {
5297 		wmq->wmq_itr = new_itr;
5298 		wmq->wmq_set_itr = true;
5299 	} else
5300 		wmq->wmq_set_itr = false;
5301 
5302 	rxq->rxq_packets = 0;
5303 	rxq->rxq_bytes = 0;
5304 	txq->txq_packets = 0;
5305 	txq->txq_bytes = 0;
5306 #endif
5307 }
5308 
5309 /*
5310  * wm_init:		[ifnet interface function]
5311  *
5312  *	Initialize the interface.
5313  */
5314 static int
5315 wm_init(struct ifnet *ifp)
5316 {
5317 	struct wm_softc *sc = ifp->if_softc;
5318 	int ret;
5319 
5320 	WM_CORE_LOCK(sc);
5321 	ret = wm_init_locked(ifp);
5322 	WM_CORE_UNLOCK(sc);
5323 
5324 	return ret;
5325 }
5326 
5327 static int
5328 wm_init_locked(struct ifnet *ifp)
5329 {
5330 	struct wm_softc *sc = ifp->if_softc;
5331 	int i, j, trynum, error = 0;
5332 	uint32_t reg;
5333 
5334 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
5335 		device_xname(sc->sc_dev), __func__));
5336 	KASSERT(WM_CORE_LOCKED(sc));
5337 
5338 	/*
5339 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
5340 	 * There is a small but measurable benefit to avoiding the adjusment
5341 	 * of the descriptor so that the headers are aligned, for normal mtu,
5342 	 * on such platforms.  One possibility is that the DMA itself is
5343 	 * slightly more efficient if the front of the entire packet (instead
5344 	 * of the front of the headers) is aligned.
5345 	 *
5346 	 * Note we must always set align_tweak to 0 if we are using
5347 	 * jumbo frames.
5348 	 */
5349 #ifdef __NO_STRICT_ALIGNMENT
5350 	sc->sc_align_tweak = 0;
5351 #else
5352 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
5353 		sc->sc_align_tweak = 0;
5354 	else
5355 		sc->sc_align_tweak = 2;
5356 #endif /* __NO_STRICT_ALIGNMENT */
5357 
5358 	/* Cancel any pending I/O. */
5359 	wm_stop_locked(ifp, 0);
5360 
5361 	/* update statistics before reset */
5362 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
5363 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
5364 
5365 	/* PCH_SPT hardware workaround */
5366 	if (sc->sc_type == WM_T_PCH_SPT)
5367 		wm_flush_desc_rings(sc);
5368 
5369 	/* Reset the chip to a known state. */
5370 	wm_reset(sc);
5371 
5372 	/*
5373 	 * AMT based hardware can now take control from firmware
5374 	 * Do this after reset.
5375 	 */
5376 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
5377 		wm_get_hw_control(sc);
5378 
5379 	if ((sc->sc_type == WM_T_PCH_SPT) &&
5380 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
5381 		wm_legacy_irq_quirk_spt(sc);
5382 
5383 	/* Init hardware bits */
5384 	wm_initialize_hardware_bits(sc);
5385 
5386 	/* Reset the PHY. */
5387 	if (sc->sc_flags & WM_F_HAS_MII)
5388 		wm_gmii_reset(sc);
5389 
5390 	/* Calculate (E)ITR value */
5391 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
5392 		/*
5393 		 * For NEWQUEUE's EITR (except for 82575).
5394 		 * 82575's EITR should be set same throttling value as other
5395 		 * old controllers' ITR because the interrupt/sec calculation
5396 		 * is the same, that is, 1,000,000,000 / (N * 256).
5397 		 *
5398 		 * 82574's EITR should be set same throttling value as ITR.
5399 		 *
5400 		 * For N interrupts/sec, set this value to:
5401 		 * 1,000,000 / N in contrast to ITR throttoling value.
5402 		 */
5403 		sc->sc_itr_init = 450;
5404 	} else if (sc->sc_type >= WM_T_82543) {
5405 		/*
5406 		 * Set up the interrupt throttling register (units of 256ns)
5407 		 * Note that a footnote in Intel's documentation says this
5408 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
5409 		 * or 10Mbit mode.  Empirically, it appears to be the case
5410 		 * that that is also true for the 1024ns units of the other
5411 		 * interrupt-related timer registers -- so, really, we ought
5412 		 * to divide this value by 4 when the link speed is low.
5413 		 *
5414 		 * XXX implement this division at link speed change!
5415 		 */
5416 
5417 		/*
5418 		 * For N interrupts/sec, set this value to:
5419 		 * 1,000,000,000 / (N * 256).  Note that we set the
5420 		 * absolute and packet timer values to this value
5421 		 * divided by 4 to get "simple timer" behavior.
5422 		 */
5423 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
5424 	}
5425 
5426 	error = wm_init_txrx_queues(sc);
5427 	if (error)
5428 		goto out;
5429 
5430 	/*
5431 	 * Clear out the VLAN table -- we don't use it (yet).
5432 	 */
5433 	CSR_WRITE(sc, WMREG_VET, 0);
5434 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
5435 		trynum = 10; /* Due to hw errata */
5436 	else
5437 		trynum = 1;
5438 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
5439 		for (j = 0; j < trynum; j++)
5440 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
5441 
5442 	/*
5443 	 * Set up flow-control parameters.
5444 	 *
5445 	 * XXX Values could probably stand some tuning.
5446 	 */
5447 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
5448 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
5449 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
5450 	    && (sc->sc_type != WM_T_PCH_SPT)) {
5451 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
5452 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
5453 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
5454 	}
5455 
5456 	sc->sc_fcrtl = FCRTL_DFLT;
5457 	if (sc->sc_type < WM_T_82543) {
5458 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
5459 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
5460 	} else {
5461 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
5462 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
5463 	}
5464 
5465 	if (sc->sc_type == WM_T_80003)
5466 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
5467 	else
5468 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
5469 
5470 	/* Writes the control register. */
5471 	wm_set_vlan(sc);
5472 
5473 	if (sc->sc_flags & WM_F_HAS_MII) {
5474 		uint16_t kmreg;
5475 
5476 		switch (sc->sc_type) {
5477 		case WM_T_80003:
5478 		case WM_T_ICH8:
5479 		case WM_T_ICH9:
5480 		case WM_T_ICH10:
5481 		case WM_T_PCH:
5482 		case WM_T_PCH2:
5483 		case WM_T_PCH_LPT:
5484 		case WM_T_PCH_SPT:
5485 			/*
5486 			 * Set the mac to wait the maximum time between each
5487 			 * iteration and increase the max iterations when
5488 			 * polling the phy; this fixes erroneous timeouts at
5489 			 * 10Mbps.
5490 			 */
5491 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
5492 			    0xFFFF);
5493 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
5494 			    &kmreg);
5495 			kmreg |= 0x3F;
5496 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
5497 			    kmreg);
5498 			break;
5499 		default:
5500 			break;
5501 		}
5502 
5503 		if (sc->sc_type == WM_T_80003) {
5504 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
5505 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
5506 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5507 
5508 			/* Bypass RX and TX FIFO's */
5509 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
5510 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
5511 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
5512 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
5513 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
5514 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
5515 		}
5516 	}
5517 #if 0
5518 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
5519 #endif
5520 
5521 	/* Set up checksum offload parameters. */
5522 	reg = CSR_READ(sc, WMREG_RXCSUM);
5523 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
5524 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
5525 		reg |= RXCSUM_IPOFL;
5526 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
5527 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
5528 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
5529 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
5530 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
5531 
5532 	/* Set registers about MSI-X */
5533 	if (wm_is_using_msix(sc)) {
5534 		uint32_t ivar;
5535 		struct wm_queue *wmq;
5536 		int qid, qintr_idx;
5537 
5538 		if (sc->sc_type == WM_T_82575) {
5539 			/* Interrupt control */
5540 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
5541 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
5542 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5543 
5544 			/* TX and RX */
5545 			for (i = 0; i < sc->sc_nqueues; i++) {
5546 				wmq = &sc->sc_queue[i];
5547 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
5548 				    EITR_TX_QUEUE(wmq->wmq_id)
5549 				    | EITR_RX_QUEUE(wmq->wmq_id));
5550 			}
5551 			/* Link status */
5552 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
5553 			    EITR_OTHER);
5554 		} else if (sc->sc_type == WM_T_82574) {
5555 			/* Interrupt control */
5556 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
5557 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
5558 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5559 
5560 			/*
5561 			 * workaround issue with spurious interrupts
5562 			 * in MSI-X mode.
5563 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
5564 			 * initialized yet. So re-initialize WMREG_RFCTL here.
5565 			 */
5566 			reg = CSR_READ(sc, WMREG_RFCTL);
5567 			reg |= WMREG_RFCTL_ACKDIS;
5568 			CSR_WRITE(sc, WMREG_RFCTL, reg);
5569 
5570 			ivar = 0;
5571 			/* TX and RX */
5572 			for (i = 0; i < sc->sc_nqueues; i++) {
5573 				wmq = &sc->sc_queue[i];
5574 				qid = wmq->wmq_id;
5575 				qintr_idx = wmq->wmq_intr_idx;
5576 
5577 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
5578 				    IVAR_TX_MASK_Q_82574(qid));
5579 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
5580 				    IVAR_RX_MASK_Q_82574(qid));
5581 			}
5582 			/* Link status */
5583 			ivar |= __SHIFTIN((IVAR_VALID_82574
5584 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
5585 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
5586 		} else {
5587 			/* Interrupt control */
5588 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
5589 			    | GPIE_EIAME | GPIE_PBA);
5590 
5591 			switch (sc->sc_type) {
5592 			case WM_T_82580:
5593 			case WM_T_I350:
5594 			case WM_T_I354:
5595 			case WM_T_I210:
5596 			case WM_T_I211:
5597 				/* TX and RX */
5598 				for (i = 0; i < sc->sc_nqueues; i++) {
5599 					wmq = &sc->sc_queue[i];
5600 					qid = wmq->wmq_id;
5601 					qintr_idx = wmq->wmq_intr_idx;
5602 
5603 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
5604 					ivar &= ~IVAR_TX_MASK_Q(qid);
5605 					ivar |= __SHIFTIN((qintr_idx
5606 						| IVAR_VALID),
5607 					    IVAR_TX_MASK_Q(qid));
5608 					ivar &= ~IVAR_RX_MASK_Q(qid);
5609 					ivar |= __SHIFTIN((qintr_idx
5610 						| IVAR_VALID),
5611 					    IVAR_RX_MASK_Q(qid));
5612 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
5613 				}
5614 				break;
5615 			case WM_T_82576:
5616 				/* TX and RX */
5617 				for (i = 0; i < sc->sc_nqueues; i++) {
5618 					wmq = &sc->sc_queue[i];
5619 					qid = wmq->wmq_id;
5620 					qintr_idx = wmq->wmq_intr_idx;
5621 
5622 					ivar = CSR_READ(sc,
5623 					    WMREG_IVAR_Q_82576(qid));
5624 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
5625 					ivar |= __SHIFTIN((qintr_idx
5626 						| IVAR_VALID),
5627 					    IVAR_TX_MASK_Q_82576(qid));
5628 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
5629 					ivar |= __SHIFTIN((qintr_idx
5630 						| IVAR_VALID),
5631 					    IVAR_RX_MASK_Q_82576(qid));
5632 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
5633 					    ivar);
5634 				}
5635 				break;
5636 			default:
5637 				break;
5638 			}
5639 
5640 			/* Link status */
5641 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
5642 			    IVAR_MISC_OTHER);
5643 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
5644 		}
5645 
5646 		if (wm_is_using_multiqueue(sc)) {
5647 			wm_init_rss(sc);
5648 
5649 			/*
5650 			** NOTE: Receive Full-Packet Checksum Offload
5651 			** is mutually exclusive with Multiqueue. However
5652 			** this is not the same as TCP/IP checksums which
5653 			** still work.
5654 			*/
5655 			reg = CSR_READ(sc, WMREG_RXCSUM);
5656 			reg |= RXCSUM_PCSD;
5657 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
5658 		}
5659 	}
5660 
5661 	/* Set up the interrupt registers. */
5662 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
5663 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
5664 	    ICR_RXO | ICR_RXT0;
5665 	if (wm_is_using_msix(sc)) {
5666 		uint32_t mask;
5667 		struct wm_queue *wmq;
5668 
5669 		switch (sc->sc_type) {
5670 		case WM_T_82574:
5671 			mask = 0;
5672 			for (i = 0; i < sc->sc_nqueues; i++) {
5673 				wmq = &sc->sc_queue[i];
5674 				mask |= ICR_TXQ(wmq->wmq_id);
5675 				mask |= ICR_RXQ(wmq->wmq_id);
5676 			}
5677 			mask |= ICR_OTHER;
5678 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
5679 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
5680 			break;
5681 		default:
5682 			if (sc->sc_type == WM_T_82575) {
5683 				mask = 0;
5684 				for (i = 0; i < sc->sc_nqueues; i++) {
5685 					wmq = &sc->sc_queue[i];
5686 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
5687 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
5688 				}
5689 				mask |= EITR_OTHER;
5690 			} else {
5691 				mask = 0;
5692 				for (i = 0; i < sc->sc_nqueues; i++) {
5693 					wmq = &sc->sc_queue[i];
5694 					mask |= 1 << wmq->wmq_intr_idx;
5695 				}
5696 				mask |= 1 << sc->sc_link_intr_idx;
5697 			}
5698 			CSR_WRITE(sc, WMREG_EIAC, mask);
5699 			CSR_WRITE(sc, WMREG_EIAM, mask);
5700 			CSR_WRITE(sc, WMREG_EIMS, mask);
5701 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
5702 			break;
5703 		}
5704 	} else
5705 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
5706 
5707 	/* Set up the inter-packet gap. */
5708 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
5709 
5710 	if (sc->sc_type >= WM_T_82543) {
5711 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
5712 			struct wm_queue *wmq = &sc->sc_queue[qidx];
5713 			wm_itrs_writereg(sc, wmq);
5714 		}
5715 		/*
5716 		 * Link interrupts occur much less than TX
5717 		 * interrupts and RX interrupts. So, we don't
5718 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
5719 		 * FreeBSD's if_igb.
5720 		 */
5721 	}
5722 
5723 	/* Set the VLAN ethernetype. */
5724 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
5725 
5726 	/*
5727 	 * Set up the transmit control register; we start out with
5728 	 * a collision distance suitable for FDX, but update it whe
5729 	 * we resolve the media type.
5730 	 */
5731 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
5732 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
5733 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
5734 	if (sc->sc_type >= WM_T_82571)
5735 		sc->sc_tctl |= TCTL_MULR;
5736 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
5737 
5738 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5739 		/* Write TDT after TCTL.EN is set. See the document. */
5740 		CSR_WRITE(sc, WMREG_TDT(0), 0);
5741 	}
5742 
5743 	if (sc->sc_type == WM_T_80003) {
5744 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
5745 		reg &= ~TCTL_EXT_GCEX_MASK;
5746 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
5747 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
5748 	}
5749 
5750 	/* Set the media. */
5751 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
5752 		goto out;
5753 
5754 	/* Configure for OS presence */
5755 	wm_init_manageability(sc);
5756 
5757 	/*
5758 	 * Set up the receive control register; we actually program
5759 	 * the register when we set the receive filter.  Use multicast
5760 	 * address offset type 0.
5761 	 *
5762 	 * Only the i82544 has the ability to strip the incoming
5763 	 * CRC, so we don't enable that feature.
5764 	 */
5765 	sc->sc_mchash_type = 0;
5766 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
5767 	    | RCTL_MO(sc->sc_mchash_type);
5768 
5769 	/*
5770 	 * 82574 use one buffer extended Rx descriptor.
5771 	 */
5772 	if (sc->sc_type == WM_T_82574)
5773 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
5774 
5775 	/*
5776 	 * The I350 has a bug where it always strips the CRC whether
5777 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
5778 	 */
5779 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
5780 	    || (sc->sc_type == WM_T_I210))
5781 		sc->sc_rctl |= RCTL_SECRC;
5782 
5783 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
5784 	    && (ifp->if_mtu > ETHERMTU)) {
5785 		sc->sc_rctl |= RCTL_LPE;
5786 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
5787 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
5788 	}
5789 
5790 	if (MCLBYTES == 2048) {
5791 		sc->sc_rctl |= RCTL_2k;
5792 	} else {
5793 		if (sc->sc_type >= WM_T_82543) {
5794 			switch (MCLBYTES) {
5795 			case 4096:
5796 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
5797 				break;
5798 			case 8192:
5799 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
5800 				break;
5801 			case 16384:
5802 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
5803 				break;
5804 			default:
5805 				panic("wm_init: MCLBYTES %d unsupported",
5806 				    MCLBYTES);
5807 				break;
5808 			}
5809 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
5810 	}
5811 
5812 	/* Enable ECC */
5813 	switch (sc->sc_type) {
5814 	case WM_T_82571:
5815 		reg = CSR_READ(sc, WMREG_PBA_ECC);
5816 		reg |= PBA_ECC_CORR_EN;
5817 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
5818 		break;
5819 	case WM_T_PCH_LPT:
5820 	case WM_T_PCH_SPT:
5821 		reg = CSR_READ(sc, WMREG_PBECCSTS);
5822 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
5823 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
5824 
5825 		sc->sc_ctrl |= CTRL_MEHE;
5826 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5827 		break;
5828 	default:
5829 		break;
5830 	}
5831 
5832 	/*
5833 	 * Set the receive filter.
5834 	 *
5835 	 * For 82575 and 82576, the RX descriptors must be initialized after
5836 	 * the setting of RCTL.EN in wm_set_filter()
5837 	 */
5838 	wm_set_filter(sc);
5839 
5840 	/* On 575 and later set RDT only if RX enabled */
5841 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5842 		int qidx;
5843 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
5844 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
5845 			for (i = 0; i < WM_NRXDESC; i++) {
5846 				mutex_enter(rxq->rxq_lock);
5847 				wm_init_rxdesc(rxq, i);
5848 				mutex_exit(rxq->rxq_lock);
5849 
5850 			}
5851 		}
5852 	}
5853 
5854 	wm_unset_stopping_flags(sc);
5855 
5856 	/* Start the one second link check clock. */
5857 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
5858 
5859 	/* ...all done! */
5860 	ifp->if_flags |= IFF_RUNNING;
5861 	ifp->if_flags &= ~IFF_OACTIVE;
5862 
5863  out:
5864 	sc->sc_if_flags = ifp->if_flags;
5865 	if (error)
5866 		log(LOG_ERR, "%s: interface not running\n",
5867 		    device_xname(sc->sc_dev));
5868 	return error;
5869 }
5870 
5871 /*
5872  * wm_stop:		[ifnet interface function]
5873  *
5874  *	Stop transmission on the interface.
5875  */
5876 static void
5877 wm_stop(struct ifnet *ifp, int disable)
5878 {
5879 	struct wm_softc *sc = ifp->if_softc;
5880 
5881 	WM_CORE_LOCK(sc);
5882 	wm_stop_locked(ifp, disable);
5883 	WM_CORE_UNLOCK(sc);
5884 }
5885 
5886 static void
5887 wm_stop_locked(struct ifnet *ifp, int disable)
5888 {
5889 	struct wm_softc *sc = ifp->if_softc;
5890 	struct wm_txsoft *txs;
5891 	int i, qidx;
5892 
5893 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
5894 		device_xname(sc->sc_dev), __func__));
5895 	KASSERT(WM_CORE_LOCKED(sc));
5896 
5897 	wm_set_stopping_flags(sc);
5898 
5899 	/* Stop the one second clock. */
5900 	callout_stop(&sc->sc_tick_ch);
5901 
5902 	/* Stop the 82547 Tx FIFO stall check timer. */
5903 	if (sc->sc_type == WM_T_82547)
5904 		callout_stop(&sc->sc_txfifo_ch);
5905 
5906 	if (sc->sc_flags & WM_F_HAS_MII) {
5907 		/* Down the MII. */
5908 		mii_down(&sc->sc_mii);
5909 	} else {
5910 #if 0
5911 		/* Should we clear PHY's status properly? */
5912 		wm_reset(sc);
5913 #endif
5914 	}
5915 
5916 	/* Stop the transmit and receive processes. */
5917 	CSR_WRITE(sc, WMREG_TCTL, 0);
5918 	CSR_WRITE(sc, WMREG_RCTL, 0);
5919 	sc->sc_rctl &= ~RCTL_EN;
5920 
5921 	/*
5922 	 * Clear the interrupt mask to ensure the device cannot assert its
5923 	 * interrupt line.
5924 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
5925 	 * service any currently pending or shared interrupt.
5926 	 */
5927 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
5928 	sc->sc_icr = 0;
5929 	if (wm_is_using_msix(sc)) {
5930 		if (sc->sc_type != WM_T_82574) {
5931 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
5932 			CSR_WRITE(sc, WMREG_EIAC, 0);
5933 		} else
5934 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
5935 	}
5936 
5937 	/* Release any queued transmit buffers. */
5938 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
5939 		struct wm_queue *wmq = &sc->sc_queue[qidx];
5940 		struct wm_txqueue *txq = &wmq->wmq_txq;
5941 		mutex_enter(txq->txq_lock);
5942 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
5943 			txs = &txq->txq_soft[i];
5944 			if (txs->txs_mbuf != NULL) {
5945 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
5946 				m_freem(txs->txs_mbuf);
5947 				txs->txs_mbuf = NULL;
5948 			}
5949 		}
5950 		mutex_exit(txq->txq_lock);
5951 	}
5952 
5953 	/* Mark the interface as down and cancel the watchdog timer. */
5954 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
5955 	ifp->if_timer = 0;
5956 
5957 	if (disable) {
5958 		for (i = 0; i < sc->sc_nqueues; i++) {
5959 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
5960 			mutex_enter(rxq->rxq_lock);
5961 			wm_rxdrain(rxq);
5962 			mutex_exit(rxq->rxq_lock);
5963 		}
5964 	}
5965 
5966 #if 0 /* notyet */
5967 	if (sc->sc_type >= WM_T_82544)
5968 		CSR_WRITE(sc, WMREG_WUC, 0);
5969 #endif
5970 }
5971 
5972 static void
5973 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
5974 {
5975 	struct mbuf *m;
5976 	int i;
5977 
5978 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
5979 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
5980 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
5981 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
5982 		    m->m_data, m->m_len, m->m_flags);
5983 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
5984 	    i, i == 1 ? "" : "s");
5985 }
5986 
5987 /*
5988  * wm_82547_txfifo_stall:
5989  *
5990  *	Callout used to wait for the 82547 Tx FIFO to drain,
5991  *	reset the FIFO pointers, and restart packet transmission.
5992  */
5993 static void
5994 wm_82547_txfifo_stall(void *arg)
5995 {
5996 	struct wm_softc *sc = arg;
5997 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
5998 
5999 	mutex_enter(txq->txq_lock);
6000 
6001 	if (txq->txq_stopping)
6002 		goto out;
6003 
6004 	if (txq->txq_fifo_stall) {
6005 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
6006 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
6007 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
6008 			/*
6009 			 * Packets have drained.  Stop transmitter, reset
6010 			 * FIFO pointers, restart transmitter, and kick
6011 			 * the packet queue.
6012 			 */
6013 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
6014 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
6015 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
6016 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
6017 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
6018 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
6019 			CSR_WRITE(sc, WMREG_TCTL, tctl);
6020 			CSR_WRITE_FLUSH(sc);
6021 
6022 			txq->txq_fifo_head = 0;
6023 			txq->txq_fifo_stall = 0;
6024 			wm_start_locked(&sc->sc_ethercom.ec_if);
6025 		} else {
6026 			/*
6027 			 * Still waiting for packets to drain; try again in
6028 			 * another tick.
6029 			 */
6030 			callout_schedule(&sc->sc_txfifo_ch, 1);
6031 		}
6032 	}
6033 
6034 out:
6035 	mutex_exit(txq->txq_lock);
6036 }
6037 
6038 /*
6039  * wm_82547_txfifo_bugchk:
6040  *
6041  *	Check for bug condition in the 82547 Tx FIFO.  We need to
6042  *	prevent enqueueing a packet that would wrap around the end
6043  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
6044  *
6045  *	We do this by checking the amount of space before the end
6046  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
6047  *	the Tx FIFO, wait for all remaining packets to drain, reset
6048  *	the internal FIFO pointers to the beginning, and restart
6049  *	transmission on the interface.
6050  */
6051 #define	WM_FIFO_HDR		0x10
6052 #define	WM_82547_PAD_LEN	0x3e0
6053 static int
6054 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
6055 {
6056 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
6057 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
6058 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
6059 
6060 	/* Just return if already stalled. */
6061 	if (txq->txq_fifo_stall)
6062 		return 1;
6063 
6064 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
6065 		/* Stall only occurs in half-duplex mode. */
6066 		goto send_packet;
6067 	}
6068 
6069 	if (len >= WM_82547_PAD_LEN + space) {
6070 		txq->txq_fifo_stall = 1;
6071 		callout_schedule(&sc->sc_txfifo_ch, 1);
6072 		return 1;
6073 	}
6074 
6075  send_packet:
6076 	txq->txq_fifo_head += len;
6077 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
6078 		txq->txq_fifo_head -= txq->txq_fifo_size;
6079 
6080 	return 0;
6081 }
6082 
6083 static int
6084 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
6085 {
6086 	int error;
6087 
6088 	/*
6089 	 * Allocate the control data structures, and create and load the
6090 	 * DMA map for it.
6091 	 *
6092 	 * NOTE: All Tx descriptors must be in the same 4G segment of
6093 	 * memory.  So must Rx descriptors.  We simplify by allocating
6094 	 * both sets within the same 4G segment.
6095 	 */
6096 	if (sc->sc_type < WM_T_82544)
6097 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
6098 	else
6099 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
6100 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
6101 		txq->txq_descsize = sizeof(nq_txdesc_t);
6102 	else
6103 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
6104 
6105 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
6106 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
6107 		    1, &txq->txq_desc_rseg, 0)) != 0) {
6108 		aprint_error_dev(sc->sc_dev,
6109 		    "unable to allocate TX control data, error = %d\n",
6110 		    error);
6111 		goto fail_0;
6112 	}
6113 
6114 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
6115 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
6116 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
6117 		aprint_error_dev(sc->sc_dev,
6118 		    "unable to map TX control data, error = %d\n", error);
6119 		goto fail_1;
6120 	}
6121 
6122 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
6123 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
6124 		aprint_error_dev(sc->sc_dev,
6125 		    "unable to create TX control data DMA map, error = %d\n",
6126 		    error);
6127 		goto fail_2;
6128 	}
6129 
6130 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
6131 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
6132 		aprint_error_dev(sc->sc_dev,
6133 		    "unable to load TX control data DMA map, error = %d\n",
6134 		    error);
6135 		goto fail_3;
6136 	}
6137 
6138 	return 0;
6139 
6140  fail_3:
6141 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
6142  fail_2:
6143 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
6144 	    WM_TXDESCS_SIZE(txq));
6145  fail_1:
6146 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
6147  fail_0:
6148 	return error;
6149 }
6150 
6151 static void
6152 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
6153 {
6154 
6155 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
6156 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
6157 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
6158 	    WM_TXDESCS_SIZE(txq));
6159 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
6160 }
6161 
6162 static int
6163 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
6164 {
6165 	int error;
6166 	size_t rxq_descs_size;
6167 
6168 	/*
6169 	 * Allocate the control data structures, and create and load the
6170 	 * DMA map for it.
6171 	 *
6172 	 * NOTE: All Tx descriptors must be in the same 4G segment of
6173 	 * memory.  So must Rx descriptors.  We simplify by allocating
6174 	 * both sets within the same 4G segment.
6175 	 */
6176 	rxq->rxq_ndesc = WM_NRXDESC;
6177 	if (sc->sc_type == WM_T_82574)
6178 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
6179 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
6180 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
6181 	else
6182 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
6183 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
6184 
6185 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
6186 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
6187 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
6188 		aprint_error_dev(sc->sc_dev,
6189 		    "unable to allocate RX control data, error = %d\n",
6190 		    error);
6191 		goto fail_0;
6192 	}
6193 
6194 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
6195 		    rxq->rxq_desc_rseg, rxq_descs_size,
6196 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
6197 		aprint_error_dev(sc->sc_dev,
6198 		    "unable to map RX control data, error = %d\n", error);
6199 		goto fail_1;
6200 	}
6201 
6202 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
6203 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
6204 		aprint_error_dev(sc->sc_dev,
6205 		    "unable to create RX control data DMA map, error = %d\n",
6206 		    error);
6207 		goto fail_2;
6208 	}
6209 
6210 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
6211 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
6212 		aprint_error_dev(sc->sc_dev,
6213 		    "unable to load RX control data DMA map, error = %d\n",
6214 		    error);
6215 		goto fail_3;
6216 	}
6217 
6218 	return 0;
6219 
6220  fail_3:
6221 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
6222  fail_2:
6223 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
6224 	    rxq_descs_size);
6225  fail_1:
6226 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
6227  fail_0:
6228 	return error;
6229 }
6230 
6231 static void
6232 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
6233 {
6234 
6235 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
6236 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
6237 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
6238 	    rxq->rxq_descsize * rxq->rxq_ndesc);
6239 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
6240 }
6241 
6242 
6243 static int
6244 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
6245 {
6246 	int i, error;
6247 
6248 	/* Create the transmit buffer DMA maps. */
6249 	WM_TXQUEUELEN(txq) =
6250 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
6251 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
6252 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
6253 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
6254 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
6255 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
6256 			aprint_error_dev(sc->sc_dev,
6257 			    "unable to create Tx DMA map %d, error = %d\n",
6258 			    i, error);
6259 			goto fail;
6260 		}
6261 	}
6262 
6263 	return 0;
6264 
6265  fail:
6266 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
6267 		if (txq->txq_soft[i].txs_dmamap != NULL)
6268 			bus_dmamap_destroy(sc->sc_dmat,
6269 			    txq->txq_soft[i].txs_dmamap);
6270 	}
6271 	return error;
6272 }
6273 
6274 static void
6275 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
6276 {
6277 	int i;
6278 
6279 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
6280 		if (txq->txq_soft[i].txs_dmamap != NULL)
6281 			bus_dmamap_destroy(sc->sc_dmat,
6282 			    txq->txq_soft[i].txs_dmamap);
6283 	}
6284 }
6285 
6286 static int
6287 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
6288 {
6289 	int i, error;
6290 
6291 	/* Create the receive buffer DMA maps. */
6292 	for (i = 0; i < rxq->rxq_ndesc; i++) {
6293 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
6294 			    MCLBYTES, 0, 0,
6295 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
6296 			aprint_error_dev(sc->sc_dev,
6297 			    "unable to create Rx DMA map %d error = %d\n",
6298 			    i, error);
6299 			goto fail;
6300 		}
6301 		rxq->rxq_soft[i].rxs_mbuf = NULL;
6302 	}
6303 
6304 	return 0;
6305 
6306  fail:
6307 	for (i = 0; i < rxq->rxq_ndesc; i++) {
6308 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
6309 			bus_dmamap_destroy(sc->sc_dmat,
6310 			    rxq->rxq_soft[i].rxs_dmamap);
6311 	}
6312 	return error;
6313 }
6314 
6315 static void
6316 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
6317 {
6318 	int i;
6319 
6320 	for (i = 0; i < rxq->rxq_ndesc; i++) {
6321 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
6322 			bus_dmamap_destroy(sc->sc_dmat,
6323 			    rxq->rxq_soft[i].rxs_dmamap);
6324 	}
6325 }
6326 
6327 /*
6328  * wm_alloc_quques:
6329  *	Allocate {tx,rx}descs and {tx,rx} buffers
6330  */
6331 static int
6332 wm_alloc_txrx_queues(struct wm_softc *sc)
6333 {
6334 	int i, error, tx_done, rx_done;
6335 
6336 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
6337 	    KM_SLEEP);
6338 	if (sc->sc_queue == NULL) {
6339 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
6340 		error = ENOMEM;
6341 		goto fail_0;
6342 	}
6343 
6344 	/*
6345 	 * For transmission
6346 	 */
6347 	error = 0;
6348 	tx_done = 0;
6349 	for (i = 0; i < sc->sc_nqueues; i++) {
6350 #ifdef WM_EVENT_COUNTERS
6351 		int j;
6352 		const char *xname;
6353 #endif
6354 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
6355 		txq->txq_sc = sc;
6356 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
6357 
6358 		error = wm_alloc_tx_descs(sc, txq);
6359 		if (error)
6360 			break;
6361 		error = wm_alloc_tx_buffer(sc, txq);
6362 		if (error) {
6363 			wm_free_tx_descs(sc, txq);
6364 			break;
6365 		}
6366 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
6367 		if (txq->txq_interq == NULL) {
6368 			wm_free_tx_descs(sc, txq);
6369 			wm_free_tx_buffer(sc, txq);
6370 			error = ENOMEM;
6371 			break;
6372 		}
6373 
6374 #ifdef WM_EVENT_COUNTERS
6375 		xname = device_xname(sc->sc_dev);
6376 
6377 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
6378 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
6379 		WM_Q_MISC_EVCNT_ATTACH(txq, txfifo_stall, txq, i, xname);
6380 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
6381 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
6382 
6383 		WM_Q_MISC_EVCNT_ATTACH(txq, txipsum, txq, i, xname);
6384 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum, txq, i, xname);
6385 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum6, txq, i, xname);
6386 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso, txq, i, xname);
6387 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso6, txq, i, xname);
6388 		WM_Q_MISC_EVCNT_ATTACH(txq, txtsopain, txq, i, xname);
6389 
6390 		for (j = 0; j < WM_NTXSEGS; j++) {
6391 			snprintf(txq->txq_txseg_evcnt_names[j],
6392 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
6393 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
6394 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
6395 		}
6396 
6397 		WM_Q_MISC_EVCNT_ATTACH(txq, txdrop, txq, i, xname);
6398 
6399 		WM_Q_MISC_EVCNT_ATTACH(txq, tu, txq, i, xname);
6400 #endif /* WM_EVENT_COUNTERS */
6401 
6402 		tx_done++;
6403 	}
6404 	if (error)
6405 		goto fail_1;
6406 
6407 	/*
6408 	 * For recieve
6409 	 */
6410 	error = 0;
6411 	rx_done = 0;
6412 	for (i = 0; i < sc->sc_nqueues; i++) {
6413 #ifdef WM_EVENT_COUNTERS
6414 		const char *xname;
6415 #endif
6416 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
6417 		rxq->rxq_sc = sc;
6418 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
6419 
6420 		error = wm_alloc_rx_descs(sc, rxq);
6421 		if (error)
6422 			break;
6423 
6424 		error = wm_alloc_rx_buffer(sc, rxq);
6425 		if (error) {
6426 			wm_free_rx_descs(sc, rxq);
6427 			break;
6428 		}
6429 
6430 #ifdef WM_EVENT_COUNTERS
6431 		xname = device_xname(sc->sc_dev);
6432 
6433 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxintr, rxq, i, xname);
6434 
6435 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxipsum, rxq, i, xname);
6436 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxtusum, rxq, i, xname);
6437 #endif /* WM_EVENT_COUNTERS */
6438 
6439 		rx_done++;
6440 	}
6441 	if (error)
6442 		goto fail_2;
6443 
6444 	return 0;
6445 
6446  fail_2:
6447 	for (i = 0; i < rx_done; i++) {
6448 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
6449 		wm_free_rx_buffer(sc, rxq);
6450 		wm_free_rx_descs(sc, rxq);
6451 		if (rxq->rxq_lock)
6452 			mutex_obj_free(rxq->rxq_lock);
6453 	}
6454  fail_1:
6455 	for (i = 0; i < tx_done; i++) {
6456 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
6457 		pcq_destroy(txq->txq_interq);
6458 		wm_free_tx_buffer(sc, txq);
6459 		wm_free_tx_descs(sc, txq);
6460 		if (txq->txq_lock)
6461 			mutex_obj_free(txq->txq_lock);
6462 	}
6463 
6464 	kmem_free(sc->sc_queue,
6465 	    sizeof(struct wm_queue) * sc->sc_nqueues);
6466  fail_0:
6467 	return error;
6468 }
6469 
6470 /*
6471  * wm_free_quques:
6472  *	Free {tx,rx}descs and {tx,rx} buffers
6473  */
6474 static void
6475 wm_free_txrx_queues(struct wm_softc *sc)
6476 {
6477 	int i;
6478 
6479 	for (i = 0; i < sc->sc_nqueues; i++) {
6480 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
6481 
6482 #ifdef WM_EVENT_COUNTERS
6483 		WM_Q_EVCNT_DETACH(rxq, rxintr, rxq, i);
6484 		WM_Q_EVCNT_DETACH(rxq, rxipsum, rxq, i);
6485 		WM_Q_EVCNT_DETACH(rxq, rxtusum, rxq, i);
6486 #endif /* WM_EVENT_COUNTERS */
6487 
6488 		wm_free_rx_buffer(sc, rxq);
6489 		wm_free_rx_descs(sc, rxq);
6490 		if (rxq->rxq_lock)
6491 			mutex_obj_free(rxq->rxq_lock);
6492 	}
6493 
6494 	for (i = 0; i < sc->sc_nqueues; i++) {
6495 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
6496 		struct mbuf *m;
6497 #ifdef WM_EVENT_COUNTERS
6498 		int j;
6499 
6500 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
6501 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
6502 		WM_Q_EVCNT_DETACH(txq, txfifo_stall, txq, i);
6503 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
6504 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
6505 		WM_Q_EVCNT_DETACH(txq, txipsum, txq, i);
6506 		WM_Q_EVCNT_DETACH(txq, txtusum, txq, i);
6507 		WM_Q_EVCNT_DETACH(txq, txtusum6, txq, i);
6508 		WM_Q_EVCNT_DETACH(txq, txtso, txq, i);
6509 		WM_Q_EVCNT_DETACH(txq, txtso6, txq, i);
6510 		WM_Q_EVCNT_DETACH(txq, txtsopain, txq, i);
6511 
6512 		for (j = 0; j < WM_NTXSEGS; j++)
6513 			evcnt_detach(&txq->txq_ev_txseg[j]);
6514 
6515 		WM_Q_EVCNT_DETACH(txq, txdrop, txq, i);
6516 		WM_Q_EVCNT_DETACH(txq, tu, txq, i);
6517 #endif /* WM_EVENT_COUNTERS */
6518 
6519 		/* drain txq_interq */
6520 		while ((m = pcq_get(txq->txq_interq)) != NULL)
6521 			m_freem(m);
6522 		pcq_destroy(txq->txq_interq);
6523 
6524 		wm_free_tx_buffer(sc, txq);
6525 		wm_free_tx_descs(sc, txq);
6526 		if (txq->txq_lock)
6527 			mutex_obj_free(txq->txq_lock);
6528 	}
6529 
6530 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
6531 }
6532 
6533 static void
6534 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
6535 {
6536 
6537 	KASSERT(mutex_owned(txq->txq_lock));
6538 
6539 	/* Initialize the transmit descriptor ring. */
6540 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
6541 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
6542 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
6543 	txq->txq_free = WM_NTXDESC(txq);
6544 	txq->txq_next = 0;
6545 }
6546 
6547 static void
6548 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
6549     struct wm_txqueue *txq)
6550 {
6551 
6552 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
6553 		device_xname(sc->sc_dev), __func__));
6554 	KASSERT(mutex_owned(txq->txq_lock));
6555 
6556 	if (sc->sc_type < WM_T_82543) {
6557 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
6558 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
6559 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
6560 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
6561 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
6562 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
6563 	} else {
6564 		int qid = wmq->wmq_id;
6565 
6566 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
6567 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
6568 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
6569 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
6570 
6571 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
6572 			/*
6573 			 * Don't write TDT before TCTL.EN is set.
6574 			 * See the document.
6575 			 */
6576 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
6577 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
6578 			    | TXDCTL_WTHRESH(0));
6579 		else {
6580 			/* XXX should update with AIM? */
6581 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
6582 			if (sc->sc_type >= WM_T_82540) {
6583 				/* should be same */
6584 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
6585 			}
6586 
6587 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
6588 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
6589 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
6590 		}
6591 	}
6592 }
6593 
6594 static void
6595 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
6596 {
6597 	int i;
6598 
6599 	KASSERT(mutex_owned(txq->txq_lock));
6600 
6601 	/* Initialize the transmit job descriptors. */
6602 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
6603 		txq->txq_soft[i].txs_mbuf = NULL;
6604 	txq->txq_sfree = WM_TXQUEUELEN(txq);
6605 	txq->txq_snext = 0;
6606 	txq->txq_sdirty = 0;
6607 }
6608 
6609 static void
6610 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
6611     struct wm_txqueue *txq)
6612 {
6613 
6614 	KASSERT(mutex_owned(txq->txq_lock));
6615 
6616 	/*
6617 	 * Set up some register offsets that are different between
6618 	 * the i82542 and the i82543 and later chips.
6619 	 */
6620 	if (sc->sc_type < WM_T_82543)
6621 		txq->txq_tdt_reg = WMREG_OLD_TDT;
6622 	else
6623 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
6624 
6625 	wm_init_tx_descs(sc, txq);
6626 	wm_init_tx_regs(sc, wmq, txq);
6627 	wm_init_tx_buffer(sc, txq);
6628 }
6629 
6630 static void
6631 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
6632     struct wm_rxqueue *rxq)
6633 {
6634 
6635 	KASSERT(mutex_owned(rxq->rxq_lock));
6636 
6637 	/*
6638 	 * Initialize the receive descriptor and receive job
6639 	 * descriptor rings.
6640 	 */
6641 	if (sc->sc_type < WM_T_82543) {
6642 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
6643 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
6644 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
6645 		    rxq->rxq_descsize * rxq->rxq_ndesc);
6646 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
6647 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
6648 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
6649 
6650 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
6651 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
6652 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
6653 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
6654 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
6655 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
6656 	} else {
6657 		int qid = wmq->wmq_id;
6658 
6659 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
6660 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
6661 		CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_descsize * rxq->rxq_ndesc);
6662 
6663 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
6664 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
6665 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
6666 
6667 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
6668 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
6669 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
6670 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
6671 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
6672 			    | RXDCTL_WTHRESH(1));
6673 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
6674 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
6675 		} else {
6676 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
6677 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
6678 			/* XXX should update with AIM? */
6679 			CSR_WRITE(sc, WMREG_RDTR, (wmq->wmq_itr / 4) | RDTR_FPD);
6680 			/* MUST be same */
6681 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
6682 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
6683 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
6684 		}
6685 	}
6686 }
6687 
6688 static int
6689 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
6690 {
6691 	struct wm_rxsoft *rxs;
6692 	int error, i;
6693 
6694 	KASSERT(mutex_owned(rxq->rxq_lock));
6695 
6696 	for (i = 0; i < rxq->rxq_ndesc; i++) {
6697 		rxs = &rxq->rxq_soft[i];
6698 		if (rxs->rxs_mbuf == NULL) {
6699 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
6700 				log(LOG_ERR, "%s: unable to allocate or map "
6701 				    "rx buffer %d, error = %d\n",
6702 				    device_xname(sc->sc_dev), i, error);
6703 				/*
6704 				 * XXX Should attempt to run with fewer receive
6705 				 * XXX buffers instead of just failing.
6706 				 */
6707 				wm_rxdrain(rxq);
6708 				return ENOMEM;
6709 			}
6710 		} else {
6711 			/*
6712 			 * For 82575 and 82576, the RX descriptors must be
6713 			 * initialized after the setting of RCTL.EN in
6714 			 * wm_set_filter()
6715 			 */
6716 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
6717 				wm_init_rxdesc(rxq, i);
6718 		}
6719 	}
6720 	rxq->rxq_ptr = 0;
6721 	rxq->rxq_discard = 0;
6722 	WM_RXCHAIN_RESET(rxq);
6723 
6724 	return 0;
6725 }
6726 
6727 static int
6728 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
6729     struct wm_rxqueue *rxq)
6730 {
6731 
6732 	KASSERT(mutex_owned(rxq->rxq_lock));
6733 
6734 	/*
6735 	 * Set up some register offsets that are different between
6736 	 * the i82542 and the i82543 and later chips.
6737 	 */
6738 	if (sc->sc_type < WM_T_82543)
6739 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
6740 	else
6741 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
6742 
6743 	wm_init_rx_regs(sc, wmq, rxq);
6744 	return wm_init_rx_buffer(sc, rxq);
6745 }
6746 
6747 /*
6748  * wm_init_quques:
6749  *	Initialize {tx,rx}descs and {tx,rx} buffers
6750  */
6751 static int
6752 wm_init_txrx_queues(struct wm_softc *sc)
6753 {
6754 	int i, error = 0;
6755 
6756 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
6757 		device_xname(sc->sc_dev), __func__));
6758 
6759 	for (i = 0; i < sc->sc_nqueues; i++) {
6760 		struct wm_queue *wmq = &sc->sc_queue[i];
6761 		struct wm_txqueue *txq = &wmq->wmq_txq;
6762 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
6763 
6764 		/*
6765 		 * TODO
6766 		 * Currently, use constant variable instead of AIM.
6767 		 * Furthermore, the interrupt interval of multiqueue which use
6768 		 * polling mode is less than default value.
6769 		 * More tuning and AIM are required.
6770 		 */
6771 		if (wm_is_using_multiqueue(sc))
6772 			wmq->wmq_itr = 50;
6773 		else
6774 			wmq->wmq_itr = sc->sc_itr_init;
6775 		wmq->wmq_set_itr = true;
6776 
6777 		mutex_enter(txq->txq_lock);
6778 		wm_init_tx_queue(sc, wmq, txq);
6779 		mutex_exit(txq->txq_lock);
6780 
6781 		mutex_enter(rxq->rxq_lock);
6782 		error = wm_init_rx_queue(sc, wmq, rxq);
6783 		mutex_exit(rxq->rxq_lock);
6784 		if (error)
6785 			break;
6786 	}
6787 
6788 	return error;
6789 }
6790 
6791 /*
6792  * wm_tx_offload:
6793  *
6794  *	Set up TCP/IP checksumming parameters for the
6795  *	specified packet.
6796  */
6797 static int
6798 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
6799     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
6800 {
6801 	struct mbuf *m0 = txs->txs_mbuf;
6802 	struct livengood_tcpip_ctxdesc *t;
6803 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
6804 	uint32_t ipcse;
6805 	struct ether_header *eh;
6806 	int offset, iphl;
6807 	uint8_t fields;
6808 
6809 	/*
6810 	 * XXX It would be nice if the mbuf pkthdr had offset
6811 	 * fields for the protocol headers.
6812 	 */
6813 
6814 	eh = mtod(m0, struct ether_header *);
6815 	switch (htons(eh->ether_type)) {
6816 	case ETHERTYPE_IP:
6817 	case ETHERTYPE_IPV6:
6818 		offset = ETHER_HDR_LEN;
6819 		break;
6820 
6821 	case ETHERTYPE_VLAN:
6822 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
6823 		break;
6824 
6825 	default:
6826 		/*
6827 		 * Don't support this protocol or encapsulation.
6828 		 */
6829 		*fieldsp = 0;
6830 		*cmdp = 0;
6831 		return 0;
6832 	}
6833 
6834 	if ((m0->m_pkthdr.csum_flags &
6835 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
6836 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
6837 	} else {
6838 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
6839 	}
6840 	ipcse = offset + iphl - 1;
6841 
6842 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
6843 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
6844 	seg = 0;
6845 	fields = 0;
6846 
6847 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
6848 		int hlen = offset + iphl;
6849 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
6850 
6851 		if (__predict_false(m0->m_len <
6852 				    (hlen + sizeof(struct tcphdr)))) {
6853 			/*
6854 			 * TCP/IP headers are not in the first mbuf; we need
6855 			 * to do this the slow and painful way.  Let's just
6856 			 * hope this doesn't happen very often.
6857 			 */
6858 			struct tcphdr th;
6859 
6860 			WM_Q_EVCNT_INCR(txq, txtsopain);
6861 
6862 			m_copydata(m0, hlen, sizeof(th), &th);
6863 			if (v4) {
6864 				struct ip ip;
6865 
6866 				m_copydata(m0, offset, sizeof(ip), &ip);
6867 				ip.ip_len = 0;
6868 				m_copyback(m0,
6869 				    offset + offsetof(struct ip, ip_len),
6870 				    sizeof(ip.ip_len), &ip.ip_len);
6871 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
6872 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
6873 			} else {
6874 				struct ip6_hdr ip6;
6875 
6876 				m_copydata(m0, offset, sizeof(ip6), &ip6);
6877 				ip6.ip6_plen = 0;
6878 				m_copyback(m0,
6879 				    offset + offsetof(struct ip6_hdr, ip6_plen),
6880 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
6881 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
6882 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
6883 			}
6884 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
6885 			    sizeof(th.th_sum), &th.th_sum);
6886 
6887 			hlen += th.th_off << 2;
6888 		} else {
6889 			/*
6890 			 * TCP/IP headers are in the first mbuf; we can do
6891 			 * this the easy way.
6892 			 */
6893 			struct tcphdr *th;
6894 
6895 			if (v4) {
6896 				struct ip *ip =
6897 				    (void *)(mtod(m0, char *) + offset);
6898 				th = (void *)(mtod(m0, char *) + hlen);
6899 
6900 				ip->ip_len = 0;
6901 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
6902 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
6903 			} else {
6904 				struct ip6_hdr *ip6 =
6905 				    (void *)(mtod(m0, char *) + offset);
6906 				th = (void *)(mtod(m0, char *) + hlen);
6907 
6908 				ip6->ip6_plen = 0;
6909 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
6910 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
6911 			}
6912 			hlen += th->th_off << 2;
6913 		}
6914 
6915 		if (v4) {
6916 			WM_Q_EVCNT_INCR(txq, txtso);
6917 			cmdlen |= WTX_TCPIP_CMD_IP;
6918 		} else {
6919 			WM_Q_EVCNT_INCR(txq, txtso6);
6920 			ipcse = 0;
6921 		}
6922 		cmd |= WTX_TCPIP_CMD_TSE;
6923 		cmdlen |= WTX_TCPIP_CMD_TSE |
6924 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
6925 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
6926 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
6927 	}
6928 
6929 	/*
6930 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
6931 	 * offload feature, if we load the context descriptor, we
6932 	 * MUST provide valid values for IPCSS and TUCSS fields.
6933 	 */
6934 
6935 	ipcs = WTX_TCPIP_IPCSS(offset) |
6936 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
6937 	    WTX_TCPIP_IPCSE(ipcse);
6938 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
6939 		WM_Q_EVCNT_INCR(txq, txipsum);
6940 		fields |= WTX_IXSM;
6941 	}
6942 
6943 	offset += iphl;
6944 
6945 	if (m0->m_pkthdr.csum_flags &
6946 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
6947 		WM_Q_EVCNT_INCR(txq, txtusum);
6948 		fields |= WTX_TXSM;
6949 		tucs = WTX_TCPIP_TUCSS(offset) |
6950 		    WTX_TCPIP_TUCSO(offset +
6951 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
6952 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
6953 	} else if ((m0->m_pkthdr.csum_flags &
6954 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
6955 		WM_Q_EVCNT_INCR(txq, txtusum6);
6956 		fields |= WTX_TXSM;
6957 		tucs = WTX_TCPIP_TUCSS(offset) |
6958 		    WTX_TCPIP_TUCSO(offset +
6959 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
6960 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
6961 	} else {
6962 		/* Just initialize it to a valid TCP context. */
6963 		tucs = WTX_TCPIP_TUCSS(offset) |
6964 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
6965 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
6966 	}
6967 
6968 	/*
6969 	 * We don't have to write context descriptor for every packet
6970 	 * except for 82574. For 82574, we must write context descriptor
6971 	 * for every packet when we use two descriptor queues.
6972 	 * It would be overhead to write context descriptor for every packet,
6973 	 * however it does not cause problems.
6974 	 */
6975 	/* Fill in the context descriptor. */
6976 	t = (struct livengood_tcpip_ctxdesc *)
6977 	    &txq->txq_descs[txq->txq_next];
6978 	t->tcpip_ipcs = htole32(ipcs);
6979 	t->tcpip_tucs = htole32(tucs);
6980 	t->tcpip_cmdlen = htole32(cmdlen);
6981 	t->tcpip_seg = htole32(seg);
6982 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
6983 
6984 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
6985 	txs->txs_ndesc++;
6986 
6987 	*cmdp = cmd;
6988 	*fieldsp = fields;
6989 
6990 	return 0;
6991 }
6992 
6993 static inline int
6994 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
6995 {
6996 	struct wm_softc *sc = ifp->if_softc;
6997 	u_int cpuid = cpu_index(curcpu());
6998 
6999 	/*
7000 	 * Currently, simple distribute strategy.
7001 	 * TODO:
7002 	 * distribute by flowid(RSS has value).
7003 	 */
7004         return (cpuid + ncpu - sc->sc_affinity_offset) % sc->sc_nqueues;
7005 }
7006 
7007 /*
7008  * wm_start:		[ifnet interface function]
7009  *
7010  *	Start packet transmission on the interface.
7011  */
7012 static void
7013 wm_start(struct ifnet *ifp)
7014 {
7015 	struct wm_softc *sc = ifp->if_softc;
7016 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
7017 
7018 #ifdef WM_MPSAFE
7019 	KASSERT(if_is_mpsafe(ifp));
7020 #endif
7021 	/*
7022 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
7023 	 */
7024 
7025 	mutex_enter(txq->txq_lock);
7026 	if (!txq->txq_stopping)
7027 		wm_start_locked(ifp);
7028 	mutex_exit(txq->txq_lock);
7029 }
7030 
7031 static void
7032 wm_start_locked(struct ifnet *ifp)
7033 {
7034 	struct wm_softc *sc = ifp->if_softc;
7035 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
7036 
7037 	wm_send_common_locked(ifp, txq, false);
7038 }
7039 
7040 static int
7041 wm_transmit(struct ifnet *ifp, struct mbuf *m)
7042 {
7043 	int qid;
7044 	struct wm_softc *sc = ifp->if_softc;
7045 	struct wm_txqueue *txq;
7046 
7047 	qid = wm_select_txqueue(ifp, m);
7048 	txq = &sc->sc_queue[qid].wmq_txq;
7049 
7050 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
7051 		m_freem(m);
7052 		WM_Q_EVCNT_INCR(txq, txdrop);
7053 		return ENOBUFS;
7054 	}
7055 
7056 	/*
7057 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
7058 	 */
7059 	ifp->if_obytes += m->m_pkthdr.len;
7060 	if (m->m_flags & M_MCAST)
7061 		ifp->if_omcasts++;
7062 
7063 	if (mutex_tryenter(txq->txq_lock)) {
7064 		if (!txq->txq_stopping)
7065 			wm_transmit_locked(ifp, txq);
7066 		mutex_exit(txq->txq_lock);
7067 	}
7068 
7069 	return 0;
7070 }
7071 
7072 static void
7073 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
7074 {
7075 
7076 	wm_send_common_locked(ifp, txq, true);
7077 }
7078 
7079 static void
7080 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
7081     bool is_transmit)
7082 {
7083 	struct wm_softc *sc = ifp->if_softc;
7084 	struct mbuf *m0;
7085 	struct wm_txsoft *txs;
7086 	bus_dmamap_t dmamap;
7087 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
7088 	bus_addr_t curaddr;
7089 	bus_size_t seglen, curlen;
7090 	uint32_t cksumcmd;
7091 	uint8_t cksumfields;
7092 
7093 	KASSERT(mutex_owned(txq->txq_lock));
7094 
7095 	if ((ifp->if_flags & IFF_RUNNING) == 0)
7096 		return;
7097 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
7098 		return;
7099 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
7100 		return;
7101 
7102 	/* Remember the previous number of free descriptors. */
7103 	ofree = txq->txq_free;
7104 
7105 	/*
7106 	 * Loop through the send queue, setting up transmit descriptors
7107 	 * until we drain the queue, or use up all available transmit
7108 	 * descriptors.
7109 	 */
7110 	for (;;) {
7111 		m0 = NULL;
7112 
7113 		/* Get a work queue entry. */
7114 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
7115 			wm_txeof(sc, txq);
7116 			if (txq->txq_sfree == 0) {
7117 				DPRINTF(WM_DEBUG_TX,
7118 				    ("%s: TX: no free job descriptors\n",
7119 					device_xname(sc->sc_dev)));
7120 				WM_Q_EVCNT_INCR(txq, txsstall);
7121 				break;
7122 			}
7123 		}
7124 
7125 		/* Grab a packet off the queue. */
7126 		if (is_transmit)
7127 			m0 = pcq_get(txq->txq_interq);
7128 		else
7129 			IFQ_DEQUEUE(&ifp->if_snd, m0);
7130 		if (m0 == NULL)
7131 			break;
7132 
7133 		DPRINTF(WM_DEBUG_TX,
7134 		    ("%s: TX: have packet to transmit: %p\n",
7135 		    device_xname(sc->sc_dev), m0));
7136 
7137 		txs = &txq->txq_soft[txq->txq_snext];
7138 		dmamap = txs->txs_dmamap;
7139 
7140 		use_tso = (m0->m_pkthdr.csum_flags &
7141 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
7142 
7143 		/*
7144 		 * So says the Linux driver:
7145 		 * The controller does a simple calculation to make sure
7146 		 * there is enough room in the FIFO before initiating the
7147 		 * DMA for each buffer.  The calc is:
7148 		 *	4 = ceil(buffer len / MSS)
7149 		 * To make sure we don't overrun the FIFO, adjust the max
7150 		 * buffer len if the MSS drops.
7151 		 */
7152 		dmamap->dm_maxsegsz =
7153 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
7154 		    ? m0->m_pkthdr.segsz << 2
7155 		    : WTX_MAX_LEN;
7156 
7157 		/*
7158 		 * Load the DMA map.  If this fails, the packet either
7159 		 * didn't fit in the allotted number of segments, or we
7160 		 * were short on resources.  For the too-many-segments
7161 		 * case, we simply report an error and drop the packet,
7162 		 * since we can't sanely copy a jumbo packet to a single
7163 		 * buffer.
7164 		 */
7165 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
7166 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
7167 		if (error) {
7168 			if (error == EFBIG) {
7169 				WM_Q_EVCNT_INCR(txq, txdrop);
7170 				log(LOG_ERR, "%s: Tx packet consumes too many "
7171 				    "DMA segments, dropping...\n",
7172 				    device_xname(sc->sc_dev));
7173 				wm_dump_mbuf_chain(sc, m0);
7174 				m_freem(m0);
7175 				continue;
7176 			}
7177 			/*  Short on resources, just stop for now. */
7178 			DPRINTF(WM_DEBUG_TX,
7179 			    ("%s: TX: dmamap load failed: %d\n",
7180 			    device_xname(sc->sc_dev), error));
7181 			break;
7182 		}
7183 
7184 		segs_needed = dmamap->dm_nsegs;
7185 		if (use_tso) {
7186 			/* For sentinel descriptor; see below. */
7187 			segs_needed++;
7188 		}
7189 
7190 		/*
7191 		 * Ensure we have enough descriptors free to describe
7192 		 * the packet.  Note, we always reserve one descriptor
7193 		 * at the end of the ring due to the semantics of the
7194 		 * TDT register, plus one more in the event we need
7195 		 * to load offload context.
7196 		 */
7197 		if (segs_needed > txq->txq_free - 2) {
7198 			/*
7199 			 * Not enough free descriptors to transmit this
7200 			 * packet.  We haven't committed anything yet,
7201 			 * so just unload the DMA map, put the packet
7202 			 * pack on the queue, and punt.  Notify the upper
7203 			 * layer that there are no more slots left.
7204 			 */
7205 			DPRINTF(WM_DEBUG_TX,
7206 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
7207 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
7208 			    segs_needed, txq->txq_free - 1));
7209 			if (!is_transmit)
7210 				ifp->if_flags |= IFF_OACTIVE;
7211 			txq->txq_flags |= WM_TXQ_NO_SPACE;
7212 			bus_dmamap_unload(sc->sc_dmat, dmamap);
7213 			WM_Q_EVCNT_INCR(txq, txdstall);
7214 			break;
7215 		}
7216 
7217 		/*
7218 		 * Check for 82547 Tx FIFO bug.  We need to do this
7219 		 * once we know we can transmit the packet, since we
7220 		 * do some internal FIFO space accounting here.
7221 		 */
7222 		if (sc->sc_type == WM_T_82547 &&
7223 		    wm_82547_txfifo_bugchk(sc, m0)) {
7224 			DPRINTF(WM_DEBUG_TX,
7225 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
7226 			    device_xname(sc->sc_dev)));
7227 			if (!is_transmit)
7228 				ifp->if_flags |= IFF_OACTIVE;
7229 			txq->txq_flags |= WM_TXQ_NO_SPACE;
7230 			bus_dmamap_unload(sc->sc_dmat, dmamap);
7231 			WM_Q_EVCNT_INCR(txq, txfifo_stall);
7232 			break;
7233 		}
7234 
7235 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
7236 
7237 		DPRINTF(WM_DEBUG_TX,
7238 		    ("%s: TX: packet has %d (%d) DMA segments\n",
7239 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
7240 
7241 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
7242 
7243 		/*
7244 		 * Store a pointer to the packet so that we can free it
7245 		 * later.
7246 		 *
7247 		 * Initially, we consider the number of descriptors the
7248 		 * packet uses the number of DMA segments.  This may be
7249 		 * incremented by 1 if we do checksum offload (a descriptor
7250 		 * is used to set the checksum context).
7251 		 */
7252 		txs->txs_mbuf = m0;
7253 		txs->txs_firstdesc = txq->txq_next;
7254 		txs->txs_ndesc = segs_needed;
7255 
7256 		/* Set up offload parameters for this packet. */
7257 		if (m0->m_pkthdr.csum_flags &
7258 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
7259 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
7260 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
7261 			if (wm_tx_offload(sc, txq, txs, &cksumcmd,
7262 					  &cksumfields) != 0) {
7263 				/* Error message already displayed. */
7264 				bus_dmamap_unload(sc->sc_dmat, dmamap);
7265 				continue;
7266 			}
7267 		} else {
7268 			cksumcmd = 0;
7269 			cksumfields = 0;
7270 		}
7271 
7272 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
7273 
7274 		/* Sync the DMA map. */
7275 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
7276 		    BUS_DMASYNC_PREWRITE);
7277 
7278 		/* Initialize the transmit descriptor. */
7279 		for (nexttx = txq->txq_next, seg = 0;
7280 		     seg < dmamap->dm_nsegs; seg++) {
7281 			for (seglen = dmamap->dm_segs[seg].ds_len,
7282 			     curaddr = dmamap->dm_segs[seg].ds_addr;
7283 			     seglen != 0;
7284 			     curaddr += curlen, seglen -= curlen,
7285 			     nexttx = WM_NEXTTX(txq, nexttx)) {
7286 				curlen = seglen;
7287 
7288 				/*
7289 				 * So says the Linux driver:
7290 				 * Work around for premature descriptor
7291 				 * write-backs in TSO mode.  Append a
7292 				 * 4-byte sentinel descriptor.
7293 				 */
7294 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
7295 				    curlen > 8)
7296 					curlen -= 4;
7297 
7298 				wm_set_dma_addr(
7299 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
7300 				txq->txq_descs[nexttx].wtx_cmdlen
7301 				    = htole32(cksumcmd | curlen);
7302 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
7303 				    = 0;
7304 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
7305 				    = cksumfields;
7306 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
7307 				lasttx = nexttx;
7308 
7309 				DPRINTF(WM_DEBUG_TX,
7310 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
7311 				     "len %#04zx\n",
7312 				    device_xname(sc->sc_dev), nexttx,
7313 				    (uint64_t)curaddr, curlen));
7314 			}
7315 		}
7316 
7317 		KASSERT(lasttx != -1);
7318 
7319 		/*
7320 		 * Set up the command byte on the last descriptor of
7321 		 * the packet.  If we're in the interrupt delay window,
7322 		 * delay the interrupt.
7323 		 */
7324 		txq->txq_descs[lasttx].wtx_cmdlen |=
7325 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
7326 
7327 		/*
7328 		 * If VLANs are enabled and the packet has a VLAN tag, set
7329 		 * up the descriptor to encapsulate the packet for us.
7330 		 *
7331 		 * This is only valid on the last descriptor of the packet.
7332 		 */
7333 		if (vlan_has_tag(m0)) {
7334 			txq->txq_descs[lasttx].wtx_cmdlen |=
7335 			    htole32(WTX_CMD_VLE);
7336 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
7337 			    = htole16(vlan_get_tag(m0));
7338 		}
7339 
7340 		txs->txs_lastdesc = lasttx;
7341 
7342 		DPRINTF(WM_DEBUG_TX,
7343 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
7344 		    device_xname(sc->sc_dev),
7345 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
7346 
7347 		/* Sync the descriptors we're using. */
7348 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
7349 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
7350 
7351 		/* Give the packet to the chip. */
7352 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
7353 
7354 		DPRINTF(WM_DEBUG_TX,
7355 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
7356 
7357 		DPRINTF(WM_DEBUG_TX,
7358 		    ("%s: TX: finished transmitting packet, job %d\n",
7359 		    device_xname(sc->sc_dev), txq->txq_snext));
7360 
7361 		/* Advance the tx pointer. */
7362 		txq->txq_free -= txs->txs_ndesc;
7363 		txq->txq_next = nexttx;
7364 
7365 		txq->txq_sfree--;
7366 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
7367 
7368 		/* Pass the packet to any BPF listeners. */
7369 		bpf_mtap(ifp, m0);
7370 	}
7371 
7372 	if (m0 != NULL) {
7373 		if (!is_transmit)
7374 			ifp->if_flags |= IFF_OACTIVE;
7375 		txq->txq_flags |= WM_TXQ_NO_SPACE;
7376 		WM_Q_EVCNT_INCR(txq, txdrop);
7377 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
7378 			__func__));
7379 		m_freem(m0);
7380 	}
7381 
7382 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
7383 		/* No more slots; notify upper layer. */
7384 		if (!is_transmit)
7385 			ifp->if_flags |= IFF_OACTIVE;
7386 		txq->txq_flags |= WM_TXQ_NO_SPACE;
7387 	}
7388 
7389 	if (txq->txq_free != ofree) {
7390 		/* Set a watchdog timer in case the chip flakes out. */
7391 		ifp->if_timer = 5;
7392 	}
7393 }
7394 
7395 /*
7396  * wm_nq_tx_offload:
7397  *
7398  *	Set up TCP/IP checksumming parameters for the
7399  *	specified packet, for NEWQUEUE devices
7400  */
7401 static int
7402 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
7403     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
7404 {
7405 	struct mbuf *m0 = txs->txs_mbuf;
7406 	uint32_t vl_len, mssidx, cmdc;
7407 	struct ether_header *eh;
7408 	int offset, iphl;
7409 
7410 	/*
7411 	 * XXX It would be nice if the mbuf pkthdr had offset
7412 	 * fields for the protocol headers.
7413 	 */
7414 	*cmdlenp = 0;
7415 	*fieldsp = 0;
7416 
7417 	eh = mtod(m0, struct ether_header *);
7418 	switch (htons(eh->ether_type)) {
7419 	case ETHERTYPE_IP:
7420 	case ETHERTYPE_IPV6:
7421 		offset = ETHER_HDR_LEN;
7422 		break;
7423 
7424 	case ETHERTYPE_VLAN:
7425 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
7426 		break;
7427 
7428 	default:
7429 		/* Don't support this protocol or encapsulation. */
7430 		*do_csum = false;
7431 		return 0;
7432 	}
7433 	*do_csum = true;
7434 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
7435 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
7436 
7437 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
7438 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
7439 
7440 	if ((m0->m_pkthdr.csum_flags &
7441 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
7442 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
7443 	} else {
7444 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
7445 	}
7446 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
7447 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
7448 
7449 	if (vlan_has_tag(m0)) {
7450 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
7451 		     << NQTXC_VLLEN_VLAN_SHIFT);
7452 		*cmdlenp |= NQTX_CMD_VLE;
7453 	}
7454 
7455 	mssidx = 0;
7456 
7457 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
7458 		int hlen = offset + iphl;
7459 		int tcp_hlen;
7460 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
7461 
7462 		if (__predict_false(m0->m_len <
7463 				    (hlen + sizeof(struct tcphdr)))) {
7464 			/*
7465 			 * TCP/IP headers are not in the first mbuf; we need
7466 			 * to do this the slow and painful way.  Let's just
7467 			 * hope this doesn't happen very often.
7468 			 */
7469 			struct tcphdr th;
7470 
7471 			WM_Q_EVCNT_INCR(txq, txtsopain);
7472 
7473 			m_copydata(m0, hlen, sizeof(th), &th);
7474 			if (v4) {
7475 				struct ip ip;
7476 
7477 				m_copydata(m0, offset, sizeof(ip), &ip);
7478 				ip.ip_len = 0;
7479 				m_copyback(m0,
7480 				    offset + offsetof(struct ip, ip_len),
7481 				    sizeof(ip.ip_len), &ip.ip_len);
7482 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
7483 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
7484 			} else {
7485 				struct ip6_hdr ip6;
7486 
7487 				m_copydata(m0, offset, sizeof(ip6), &ip6);
7488 				ip6.ip6_plen = 0;
7489 				m_copyback(m0,
7490 				    offset + offsetof(struct ip6_hdr, ip6_plen),
7491 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
7492 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
7493 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
7494 			}
7495 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
7496 			    sizeof(th.th_sum), &th.th_sum);
7497 
7498 			tcp_hlen = th.th_off << 2;
7499 		} else {
7500 			/*
7501 			 * TCP/IP headers are in the first mbuf; we can do
7502 			 * this the easy way.
7503 			 */
7504 			struct tcphdr *th;
7505 
7506 			if (v4) {
7507 				struct ip *ip =
7508 				    (void *)(mtod(m0, char *) + offset);
7509 				th = (void *)(mtod(m0, char *) + hlen);
7510 
7511 				ip->ip_len = 0;
7512 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
7513 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
7514 			} else {
7515 				struct ip6_hdr *ip6 =
7516 				    (void *)(mtod(m0, char *) + offset);
7517 				th = (void *)(mtod(m0, char *) + hlen);
7518 
7519 				ip6->ip6_plen = 0;
7520 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
7521 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
7522 			}
7523 			tcp_hlen = th->th_off << 2;
7524 		}
7525 		hlen += tcp_hlen;
7526 		*cmdlenp |= NQTX_CMD_TSE;
7527 
7528 		if (v4) {
7529 			WM_Q_EVCNT_INCR(txq, txtso);
7530 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
7531 		} else {
7532 			WM_Q_EVCNT_INCR(txq, txtso6);
7533 			*fieldsp |= NQTXD_FIELDS_TUXSM;
7534 		}
7535 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
7536 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
7537 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
7538 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
7539 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
7540 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
7541 	} else {
7542 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
7543 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
7544 	}
7545 
7546 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
7547 		*fieldsp |= NQTXD_FIELDS_IXSM;
7548 		cmdc |= NQTXC_CMD_IP4;
7549 	}
7550 
7551 	if (m0->m_pkthdr.csum_flags &
7552 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
7553 		WM_Q_EVCNT_INCR(txq, txtusum);
7554 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
7555 			cmdc |= NQTXC_CMD_TCP;
7556 		} else {
7557 			cmdc |= NQTXC_CMD_UDP;
7558 		}
7559 		cmdc |= NQTXC_CMD_IP4;
7560 		*fieldsp |= NQTXD_FIELDS_TUXSM;
7561 	}
7562 	if (m0->m_pkthdr.csum_flags &
7563 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
7564 		WM_Q_EVCNT_INCR(txq, txtusum6);
7565 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
7566 			cmdc |= NQTXC_CMD_TCP;
7567 		} else {
7568 			cmdc |= NQTXC_CMD_UDP;
7569 		}
7570 		cmdc |= NQTXC_CMD_IP6;
7571 		*fieldsp |= NQTXD_FIELDS_TUXSM;
7572 	}
7573 
7574 	/*
7575 	 * We don't have to write context descriptor for every packet to
7576 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
7577 	 * I210 and I211. It is enough to write once per a Tx queue for these
7578 	 * controllers.
7579 	 * It would be overhead to write context descriptor for every packet,
7580 	 * however it does not cause problems.
7581 	 */
7582 	/* Fill in the context descriptor. */
7583 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
7584 	    htole32(vl_len);
7585 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
7586 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
7587 	    htole32(cmdc);
7588 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
7589 	    htole32(mssidx);
7590 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
7591 	DPRINTF(WM_DEBUG_TX,
7592 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
7593 	    txq->txq_next, 0, vl_len));
7594 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
7595 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
7596 	txs->txs_ndesc++;
7597 	return 0;
7598 }
7599 
7600 /*
7601  * wm_nq_start:		[ifnet interface function]
7602  *
7603  *	Start packet transmission on the interface for NEWQUEUE devices
7604  */
7605 static void
7606 wm_nq_start(struct ifnet *ifp)
7607 {
7608 	struct wm_softc *sc = ifp->if_softc;
7609 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
7610 
7611 #ifdef WM_MPSAFE
7612 	KASSERT(if_is_mpsafe(ifp));
7613 #endif
7614 	/*
7615 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
7616 	 */
7617 
7618 	mutex_enter(txq->txq_lock);
7619 	if (!txq->txq_stopping)
7620 		wm_nq_start_locked(ifp);
7621 	mutex_exit(txq->txq_lock);
7622 }
7623 
7624 static void
7625 wm_nq_start_locked(struct ifnet *ifp)
7626 {
7627 	struct wm_softc *sc = ifp->if_softc;
7628 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
7629 
7630 	wm_nq_send_common_locked(ifp, txq, false);
7631 }
7632 
7633 static int
7634 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
7635 {
7636 	int qid;
7637 	struct wm_softc *sc = ifp->if_softc;
7638 	struct wm_txqueue *txq;
7639 
7640 	qid = wm_select_txqueue(ifp, m);
7641 	txq = &sc->sc_queue[qid].wmq_txq;
7642 
7643 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
7644 		m_freem(m);
7645 		WM_Q_EVCNT_INCR(txq, txdrop);
7646 		return ENOBUFS;
7647 	}
7648 
7649 	/*
7650 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
7651 	 */
7652 	ifp->if_obytes += m->m_pkthdr.len;
7653 	if (m->m_flags & M_MCAST)
7654 		ifp->if_omcasts++;
7655 
7656 	/*
7657 	 * The situations which this mutex_tryenter() fails at running time
7658 	 * are below two patterns.
7659 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
7660 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
7661 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
7662 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
7663 	 * In the case of (2), the last packet enqueued to txq->txq_interq is also
7664 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck, either.
7665 	 */
7666 	if (mutex_tryenter(txq->txq_lock)) {
7667 		if (!txq->txq_stopping)
7668 			wm_nq_transmit_locked(ifp, txq);
7669 		mutex_exit(txq->txq_lock);
7670 	}
7671 
7672 	return 0;
7673 }
7674 
7675 static void
7676 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
7677 {
7678 
7679 	wm_nq_send_common_locked(ifp, txq, true);
7680 }
7681 
7682 static void
7683 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
7684     bool is_transmit)
7685 {
7686 	struct wm_softc *sc = ifp->if_softc;
7687 	struct mbuf *m0;
7688 	struct wm_txsoft *txs;
7689 	bus_dmamap_t dmamap;
7690 	int error, nexttx, lasttx = -1, seg, segs_needed;
7691 	bool do_csum, sent;
7692 
7693 	KASSERT(mutex_owned(txq->txq_lock));
7694 
7695 	if ((ifp->if_flags & IFF_RUNNING) == 0)
7696 		return;
7697 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
7698 		return;
7699 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
7700 		return;
7701 
7702 	sent = false;
7703 
7704 	/*
7705 	 * Loop through the send queue, setting up transmit descriptors
7706 	 * until we drain the queue, or use up all available transmit
7707 	 * descriptors.
7708 	 */
7709 	for (;;) {
7710 		m0 = NULL;
7711 
7712 		/* Get a work queue entry. */
7713 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
7714 			wm_txeof(sc, txq);
7715 			if (txq->txq_sfree == 0) {
7716 				DPRINTF(WM_DEBUG_TX,
7717 				    ("%s: TX: no free job descriptors\n",
7718 					device_xname(sc->sc_dev)));
7719 				WM_Q_EVCNT_INCR(txq, txsstall);
7720 				break;
7721 			}
7722 		}
7723 
7724 		/* Grab a packet off the queue. */
7725 		if (is_transmit)
7726 			m0 = pcq_get(txq->txq_interq);
7727 		else
7728 			IFQ_DEQUEUE(&ifp->if_snd, m0);
7729 		if (m0 == NULL)
7730 			break;
7731 
7732 		DPRINTF(WM_DEBUG_TX,
7733 		    ("%s: TX: have packet to transmit: %p\n",
7734 		    device_xname(sc->sc_dev), m0));
7735 
7736 		txs = &txq->txq_soft[txq->txq_snext];
7737 		dmamap = txs->txs_dmamap;
7738 
7739 		/*
7740 		 * Load the DMA map.  If this fails, the packet either
7741 		 * didn't fit in the allotted number of segments, or we
7742 		 * were short on resources.  For the too-many-segments
7743 		 * case, we simply report an error and drop the packet,
7744 		 * since we can't sanely copy a jumbo packet to a single
7745 		 * buffer.
7746 		 */
7747 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
7748 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
7749 		if (error) {
7750 			if (error == EFBIG) {
7751 				WM_Q_EVCNT_INCR(txq, txdrop);
7752 				log(LOG_ERR, "%s: Tx packet consumes too many "
7753 				    "DMA segments, dropping...\n",
7754 				    device_xname(sc->sc_dev));
7755 				wm_dump_mbuf_chain(sc, m0);
7756 				m_freem(m0);
7757 				continue;
7758 			}
7759 			/* Short on resources, just stop for now. */
7760 			DPRINTF(WM_DEBUG_TX,
7761 			    ("%s: TX: dmamap load failed: %d\n",
7762 			    device_xname(sc->sc_dev), error));
7763 			break;
7764 		}
7765 
7766 		segs_needed = dmamap->dm_nsegs;
7767 
7768 		/*
7769 		 * Ensure we have enough descriptors free to describe
7770 		 * the packet.  Note, we always reserve one descriptor
7771 		 * at the end of the ring due to the semantics of the
7772 		 * TDT register, plus one more in the event we need
7773 		 * to load offload context.
7774 		 */
7775 		if (segs_needed > txq->txq_free - 2) {
7776 			/*
7777 			 * Not enough free descriptors to transmit this
7778 			 * packet.  We haven't committed anything yet,
7779 			 * so just unload the DMA map, put the packet
7780 			 * pack on the queue, and punt.  Notify the upper
7781 			 * layer that there are no more slots left.
7782 			 */
7783 			DPRINTF(WM_DEBUG_TX,
7784 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
7785 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
7786 			    segs_needed, txq->txq_free - 1));
7787 			if (!is_transmit)
7788 				ifp->if_flags |= IFF_OACTIVE;
7789 			txq->txq_flags |= WM_TXQ_NO_SPACE;
7790 			bus_dmamap_unload(sc->sc_dmat, dmamap);
7791 			WM_Q_EVCNT_INCR(txq, txdstall);
7792 			break;
7793 		}
7794 
7795 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
7796 
7797 		DPRINTF(WM_DEBUG_TX,
7798 		    ("%s: TX: packet has %d (%d) DMA segments\n",
7799 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
7800 
7801 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
7802 
7803 		/*
7804 		 * Store a pointer to the packet so that we can free it
7805 		 * later.
7806 		 *
7807 		 * Initially, we consider the number of descriptors the
7808 		 * packet uses the number of DMA segments.  This may be
7809 		 * incremented by 1 if we do checksum offload (a descriptor
7810 		 * is used to set the checksum context).
7811 		 */
7812 		txs->txs_mbuf = m0;
7813 		txs->txs_firstdesc = txq->txq_next;
7814 		txs->txs_ndesc = segs_needed;
7815 
7816 		/* Set up offload parameters for this packet. */
7817 		uint32_t cmdlen, fields, dcmdlen;
7818 		if (m0->m_pkthdr.csum_flags &
7819 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
7820 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
7821 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
7822 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
7823 			    &do_csum) != 0) {
7824 				/* Error message already displayed. */
7825 				bus_dmamap_unload(sc->sc_dmat, dmamap);
7826 				continue;
7827 			}
7828 		} else {
7829 			do_csum = false;
7830 			cmdlen = 0;
7831 			fields = 0;
7832 		}
7833 
7834 		/* Sync the DMA map. */
7835 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
7836 		    BUS_DMASYNC_PREWRITE);
7837 
7838 		/* Initialize the first transmit descriptor. */
7839 		nexttx = txq->txq_next;
7840 		if (!do_csum) {
7841 			/* setup a legacy descriptor */
7842 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
7843 			    dmamap->dm_segs[0].ds_addr);
7844 			txq->txq_descs[nexttx].wtx_cmdlen =
7845 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
7846 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
7847 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
7848 			if (vlan_has_tag(m0)) {
7849 				txq->txq_descs[nexttx].wtx_cmdlen |=
7850 				    htole32(WTX_CMD_VLE);
7851 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
7852 				    htole16(vlan_get_tag(m0));
7853 			} else {
7854 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
7855 			}
7856 			dcmdlen = 0;
7857 		} else {
7858 			/* setup an advanced data descriptor */
7859 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
7860 			    htole64(dmamap->dm_segs[0].ds_addr);
7861 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
7862 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
7863 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
7864 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
7865 			    htole32(fields);
7866 			DPRINTF(WM_DEBUG_TX,
7867 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
7868 			    device_xname(sc->sc_dev), nexttx,
7869 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
7870 			DPRINTF(WM_DEBUG_TX,
7871 			    ("\t 0x%08x%08x\n", fields,
7872 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
7873 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
7874 		}
7875 
7876 		lasttx = nexttx;
7877 		nexttx = WM_NEXTTX(txq, nexttx);
7878 		/*
7879 		 * fill in the next descriptors. legacy or advanced format
7880 		 * is the same here
7881 		 */
7882 		for (seg = 1; seg < dmamap->dm_nsegs;
7883 		    seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
7884 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
7885 			    htole64(dmamap->dm_segs[seg].ds_addr);
7886 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
7887 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
7888 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
7889 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
7890 			lasttx = nexttx;
7891 
7892 			DPRINTF(WM_DEBUG_TX,
7893 			    ("%s: TX: desc %d: %#" PRIx64 ", "
7894 			     "len %#04zx\n",
7895 			    device_xname(sc->sc_dev), nexttx,
7896 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
7897 			    dmamap->dm_segs[seg].ds_len));
7898 		}
7899 
7900 		KASSERT(lasttx != -1);
7901 
7902 		/*
7903 		 * Set up the command byte on the last descriptor of
7904 		 * the packet.  If we're in the interrupt delay window,
7905 		 * delay the interrupt.
7906 		 */
7907 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
7908 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
7909 		txq->txq_descs[lasttx].wtx_cmdlen |=
7910 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
7911 
7912 		txs->txs_lastdesc = lasttx;
7913 
7914 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
7915 		    device_xname(sc->sc_dev),
7916 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
7917 
7918 		/* Sync the descriptors we're using. */
7919 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
7920 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
7921 
7922 		/* Give the packet to the chip. */
7923 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
7924 		sent = true;
7925 
7926 		DPRINTF(WM_DEBUG_TX,
7927 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
7928 
7929 		DPRINTF(WM_DEBUG_TX,
7930 		    ("%s: TX: finished transmitting packet, job %d\n",
7931 		    device_xname(sc->sc_dev), txq->txq_snext));
7932 
7933 		/* Advance the tx pointer. */
7934 		txq->txq_free -= txs->txs_ndesc;
7935 		txq->txq_next = nexttx;
7936 
7937 		txq->txq_sfree--;
7938 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
7939 
7940 		/* Pass the packet to any BPF listeners. */
7941 		bpf_mtap(ifp, m0);
7942 	}
7943 
7944 	if (m0 != NULL) {
7945 		if (!is_transmit)
7946 			ifp->if_flags |= IFF_OACTIVE;
7947 		txq->txq_flags |= WM_TXQ_NO_SPACE;
7948 		WM_Q_EVCNT_INCR(txq, txdrop);
7949 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
7950 			__func__));
7951 		m_freem(m0);
7952 	}
7953 
7954 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
7955 		/* No more slots; notify upper layer. */
7956 		if (!is_transmit)
7957 			ifp->if_flags |= IFF_OACTIVE;
7958 		txq->txq_flags |= WM_TXQ_NO_SPACE;
7959 	}
7960 
7961 	if (sent) {
7962 		/* Set a watchdog timer in case the chip flakes out. */
7963 		ifp->if_timer = 5;
7964 	}
7965 }
7966 
7967 static void
7968 wm_deferred_start_locked(struct wm_txqueue *txq)
7969 {
7970 	struct wm_softc *sc = txq->txq_sc;
7971 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7972 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
7973 	int qid = wmq->wmq_id;
7974 
7975 	KASSERT(mutex_owned(txq->txq_lock));
7976 
7977 	if (txq->txq_stopping) {
7978 		mutex_exit(txq->txq_lock);
7979 		return;
7980 	}
7981 
7982 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
7983 		/* XXX need for ALTQ or one CPU system */
7984 		if (qid == 0)
7985 			wm_nq_start_locked(ifp);
7986 		wm_nq_transmit_locked(ifp, txq);
7987 	} else {
7988 		/* XXX need for ALTQ or one CPU system */
7989 		if (qid == 0)
7990 			wm_start_locked(ifp);
7991 		wm_transmit_locked(ifp, txq);
7992 	}
7993 }
7994 
7995 /* Interrupt */
7996 
7997 /*
7998  * wm_txeof:
7999  *
8000  *	Helper; handle transmit interrupts.
8001  */
8002 static int
8003 wm_txeof(struct wm_softc *sc, struct wm_txqueue *txq)
8004 {
8005 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
8006 	struct wm_txsoft *txs;
8007 	bool processed = false;
8008 	int count = 0;
8009 	int i;
8010 	uint8_t status;
8011 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
8012 
8013 	KASSERT(mutex_owned(txq->txq_lock));
8014 
8015 	if (txq->txq_stopping)
8016 		return 0;
8017 
8018 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
8019 	/* for ALTQ and legacy(not use multiqueue) ethernet controller */
8020 	if (wmq->wmq_id == 0)
8021 		ifp->if_flags &= ~IFF_OACTIVE;
8022 
8023 	/*
8024 	 * Go through the Tx list and free mbufs for those
8025 	 * frames which have been transmitted.
8026 	 */
8027 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
8028 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
8029 		txs = &txq->txq_soft[i];
8030 
8031 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
8032 			device_xname(sc->sc_dev), i));
8033 
8034 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
8035 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
8036 
8037 		status =
8038 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
8039 		if ((status & WTX_ST_DD) == 0) {
8040 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
8041 			    BUS_DMASYNC_PREREAD);
8042 			break;
8043 		}
8044 
8045 		processed = true;
8046 		count++;
8047 		DPRINTF(WM_DEBUG_TX,
8048 		    ("%s: TX: job %d done: descs %d..%d\n",
8049 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
8050 		    txs->txs_lastdesc));
8051 
8052 		/*
8053 		 * XXX We should probably be using the statistics
8054 		 * XXX registers, but I don't know if they exist
8055 		 * XXX on chips before the i82544.
8056 		 */
8057 
8058 #ifdef WM_EVENT_COUNTERS
8059 		if (status & WTX_ST_TU)
8060 			WM_Q_EVCNT_INCR(txq, tu);
8061 #endif /* WM_EVENT_COUNTERS */
8062 
8063 		if (status & (WTX_ST_EC | WTX_ST_LC)) {
8064 			ifp->if_oerrors++;
8065 			if (status & WTX_ST_LC)
8066 				log(LOG_WARNING, "%s: late collision\n",
8067 				    device_xname(sc->sc_dev));
8068 			else if (status & WTX_ST_EC) {
8069 				ifp->if_collisions += 16;
8070 				log(LOG_WARNING, "%s: excessive collisions\n",
8071 				    device_xname(sc->sc_dev));
8072 			}
8073 		} else
8074 			ifp->if_opackets++;
8075 
8076 		txq->txq_packets++;
8077 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
8078 
8079 		txq->txq_free += txs->txs_ndesc;
8080 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
8081 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
8082 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
8083 		m_freem(txs->txs_mbuf);
8084 		txs->txs_mbuf = NULL;
8085 	}
8086 
8087 	/* Update the dirty transmit buffer pointer. */
8088 	txq->txq_sdirty = i;
8089 	DPRINTF(WM_DEBUG_TX,
8090 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
8091 
8092 	if (count != 0)
8093 		rnd_add_uint32(&sc->rnd_source, count);
8094 
8095 	/*
8096 	 * If there are no more pending transmissions, cancel the watchdog
8097 	 * timer.
8098 	 */
8099 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
8100 		ifp->if_timer = 0;
8101 
8102 	return processed;
8103 }
8104 
8105 static inline uint32_t
8106 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
8107 {
8108 	struct wm_softc *sc = rxq->rxq_sc;
8109 
8110 	if (sc->sc_type == WM_T_82574)
8111 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
8112 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
8113 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
8114 	else
8115 		return rxq->rxq_descs[idx].wrx_status;
8116 }
8117 
8118 static inline uint32_t
8119 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
8120 {
8121 	struct wm_softc *sc = rxq->rxq_sc;
8122 
8123 	if (sc->sc_type == WM_T_82574)
8124 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
8125 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
8126 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
8127 	else
8128 		return rxq->rxq_descs[idx].wrx_errors;
8129 }
8130 
8131 static inline uint16_t
8132 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
8133 {
8134 	struct wm_softc *sc = rxq->rxq_sc;
8135 
8136 	if (sc->sc_type == WM_T_82574)
8137 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
8138 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
8139 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
8140 	else
8141 		return rxq->rxq_descs[idx].wrx_special;
8142 }
8143 
8144 static inline int
8145 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
8146 {
8147 	struct wm_softc *sc = rxq->rxq_sc;
8148 
8149 	if (sc->sc_type == WM_T_82574)
8150 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
8151 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
8152 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
8153 	else
8154 		return rxq->rxq_descs[idx].wrx_len;
8155 }
8156 
8157 #ifdef WM_DEBUG
8158 static inline uint32_t
8159 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
8160 {
8161 	struct wm_softc *sc = rxq->rxq_sc;
8162 
8163 	if (sc->sc_type == WM_T_82574)
8164 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
8165 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
8166 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
8167 	else
8168 		return 0;
8169 }
8170 
8171 static inline uint8_t
8172 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
8173 {
8174 	struct wm_softc *sc = rxq->rxq_sc;
8175 
8176 	if (sc->sc_type == WM_T_82574)
8177 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
8178 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
8179 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
8180 	else
8181 		return 0;
8182 }
8183 #endif /* WM_DEBUG */
8184 
8185 static inline bool
8186 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
8187     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
8188 {
8189 
8190 	if (sc->sc_type == WM_T_82574)
8191 		return (status & ext_bit) != 0;
8192 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
8193 		return (status & nq_bit) != 0;
8194 	else
8195 		return (status & legacy_bit) != 0;
8196 }
8197 
8198 static inline bool
8199 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
8200     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
8201 {
8202 
8203 	if (sc->sc_type == WM_T_82574)
8204 		return (error & ext_bit) != 0;
8205 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
8206 		return (error & nq_bit) != 0;
8207 	else
8208 		return (error & legacy_bit) != 0;
8209 }
8210 
8211 static inline bool
8212 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
8213 {
8214 
8215 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
8216 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
8217 		return true;
8218 	else
8219 		return false;
8220 }
8221 
8222 static inline bool
8223 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
8224 {
8225 	struct wm_softc *sc = rxq->rxq_sc;
8226 
8227 	/* XXXX missing error bit for newqueue? */
8228 	if (wm_rxdesc_is_set_error(sc, errors,
8229 		WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE,
8230 		EXTRXC_ERROR_CE|EXTRXC_ERROR_SE|EXTRXC_ERROR_SEQ|EXTRXC_ERROR_CXE|EXTRXC_ERROR_RXE,
8231 		NQRXC_ERROR_RXE)) {
8232 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE, EXTRXC_ERROR_SE, 0))
8233 			log(LOG_WARNING, "%s: symbol error\n",
8234 			    device_xname(sc->sc_dev));
8235 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ, EXTRXC_ERROR_SEQ, 0))
8236 			log(LOG_WARNING, "%s: receive sequence error\n",
8237 			    device_xname(sc->sc_dev));
8238 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE, EXTRXC_ERROR_CE, 0))
8239 			log(LOG_WARNING, "%s: CRC error\n",
8240 			    device_xname(sc->sc_dev));
8241 		return true;
8242 	}
8243 
8244 	return false;
8245 }
8246 
8247 static inline bool
8248 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
8249 {
8250 	struct wm_softc *sc = rxq->rxq_sc;
8251 
8252 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
8253 		NQRXC_STATUS_DD)) {
8254 		/* We have processed all of the receive descriptors. */
8255 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
8256 		return false;
8257 	}
8258 
8259 	return true;
8260 }
8261 
8262 static inline bool
8263 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status, uint16_t vlantag,
8264     struct mbuf *m)
8265 {
8266 
8267 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
8268 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
8269 		vlan_set_tag(m, le16toh(vlantag));
8270 	}
8271 
8272 	return true;
8273 }
8274 
8275 static inline void
8276 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
8277     uint32_t errors, struct mbuf *m)
8278 {
8279 	struct wm_softc *sc = rxq->rxq_sc;
8280 
8281 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
8282 		if (wm_rxdesc_is_set_status(sc, status,
8283 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
8284 			WM_Q_EVCNT_INCR(rxq, rxipsum);
8285 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
8286 			if (wm_rxdesc_is_set_error(sc, errors,
8287 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
8288 				m->m_pkthdr.csum_flags |=
8289 					M_CSUM_IPv4_BAD;
8290 		}
8291 		if (wm_rxdesc_is_set_status(sc, status,
8292 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
8293 			/*
8294 			 * Note: we don't know if this was TCP or UDP,
8295 			 * so we just set both bits, and expect the
8296 			 * upper layers to deal.
8297 			 */
8298 			WM_Q_EVCNT_INCR(rxq, rxtusum);
8299 			m->m_pkthdr.csum_flags |=
8300 				M_CSUM_TCPv4 | M_CSUM_UDPv4 |
8301 				M_CSUM_TCPv6 | M_CSUM_UDPv6;
8302 			if (wm_rxdesc_is_set_error(sc, errors,
8303 				WRX_ER_TCPE, EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
8304 				m->m_pkthdr.csum_flags |=
8305 					M_CSUM_TCP_UDP_BAD;
8306 		}
8307 	}
8308 }
8309 
8310 /*
8311  * wm_rxeof:
8312  *
8313  *	Helper; handle receive interrupts.
8314  */
8315 static void
8316 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
8317 {
8318 	struct wm_softc *sc = rxq->rxq_sc;
8319 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
8320 	struct wm_rxsoft *rxs;
8321 	struct mbuf *m;
8322 	int i, len;
8323 	int count = 0;
8324 	uint32_t status, errors;
8325 	uint16_t vlantag;
8326 
8327 	KASSERT(mutex_owned(rxq->rxq_lock));
8328 
8329 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
8330 		if (limit-- == 0) {
8331 			rxq->rxq_ptr = i;
8332 			break;
8333 		}
8334 
8335 		rxs = &rxq->rxq_soft[i];
8336 
8337 		DPRINTF(WM_DEBUG_RX,
8338 		    ("%s: RX: checking descriptor %d\n",
8339 		    device_xname(sc->sc_dev), i));
8340 		wm_cdrxsync(rxq, i,BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
8341 
8342 		status = wm_rxdesc_get_status(rxq, i);
8343 		errors = wm_rxdesc_get_errors(rxq, i);
8344 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
8345 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
8346 #ifdef WM_DEBUG
8347 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
8348 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
8349 #endif
8350 
8351 		if (!wm_rxdesc_dd(rxq, i, status)) {
8352 			/*
8353 			 * Update the receive pointer holding rxq_lock
8354 			 * consistent with increment counter.
8355 			 */
8356 			rxq->rxq_ptr = i;
8357 			break;
8358 		}
8359 
8360 		count++;
8361 		if (__predict_false(rxq->rxq_discard)) {
8362 			DPRINTF(WM_DEBUG_RX,
8363 			    ("%s: RX: discarding contents of descriptor %d\n",
8364 			    device_xname(sc->sc_dev), i));
8365 			wm_init_rxdesc(rxq, i);
8366 			if (wm_rxdesc_is_eop(rxq, status)) {
8367 				/* Reset our state. */
8368 				DPRINTF(WM_DEBUG_RX,
8369 				    ("%s: RX: resetting rxdiscard -> 0\n",
8370 				    device_xname(sc->sc_dev)));
8371 				rxq->rxq_discard = 0;
8372 			}
8373 			continue;
8374 		}
8375 
8376 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
8377 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
8378 
8379 		m = rxs->rxs_mbuf;
8380 
8381 		/*
8382 		 * Add a new receive buffer to the ring, unless of
8383 		 * course the length is zero. Treat the latter as a
8384 		 * failed mapping.
8385 		 */
8386 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
8387 			/*
8388 			 * Failed, throw away what we've done so
8389 			 * far, and discard the rest of the packet.
8390 			 */
8391 			ifp->if_ierrors++;
8392 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
8393 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
8394 			wm_init_rxdesc(rxq, i);
8395 			if (!wm_rxdesc_is_eop(rxq, status))
8396 				rxq->rxq_discard = 1;
8397 			if (rxq->rxq_head != NULL)
8398 				m_freem(rxq->rxq_head);
8399 			WM_RXCHAIN_RESET(rxq);
8400 			DPRINTF(WM_DEBUG_RX,
8401 			    ("%s: RX: Rx buffer allocation failed, "
8402 			    "dropping packet%s\n", device_xname(sc->sc_dev),
8403 			    rxq->rxq_discard ? " (discard)" : ""));
8404 			continue;
8405 		}
8406 
8407 		m->m_len = len;
8408 		rxq->rxq_len += len;
8409 		DPRINTF(WM_DEBUG_RX,
8410 		    ("%s: RX: buffer at %p len %d\n",
8411 		    device_xname(sc->sc_dev), m->m_data, len));
8412 
8413 		/* If this is not the end of the packet, keep looking. */
8414 		if (!wm_rxdesc_is_eop(rxq, status)) {
8415 			WM_RXCHAIN_LINK(rxq, m);
8416 			DPRINTF(WM_DEBUG_RX,
8417 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
8418 			    device_xname(sc->sc_dev), rxq->rxq_len));
8419 			continue;
8420 		}
8421 
8422 		/*
8423 		 * Okay, we have the entire packet now.  The chip is
8424 		 * configured to include the FCS except I350 and I21[01]
8425 		 * (not all chips can be configured to strip it),
8426 		 * so we need to trim it.
8427 		 * May need to adjust length of previous mbuf in the
8428 		 * chain if the current mbuf is too short.
8429 		 * For an eratta, the RCTL_SECRC bit in RCTL register
8430 		 * is always set in I350, so we don't trim it.
8431 		 */
8432 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
8433 		    && (sc->sc_type != WM_T_I210)
8434 		    && (sc->sc_type != WM_T_I211)) {
8435 			if (m->m_len < ETHER_CRC_LEN) {
8436 				rxq->rxq_tail->m_len
8437 				    -= (ETHER_CRC_LEN - m->m_len);
8438 				m->m_len = 0;
8439 			} else
8440 				m->m_len -= ETHER_CRC_LEN;
8441 			len = rxq->rxq_len - ETHER_CRC_LEN;
8442 		} else
8443 			len = rxq->rxq_len;
8444 
8445 		WM_RXCHAIN_LINK(rxq, m);
8446 
8447 		*rxq->rxq_tailp = NULL;
8448 		m = rxq->rxq_head;
8449 
8450 		WM_RXCHAIN_RESET(rxq);
8451 
8452 		DPRINTF(WM_DEBUG_RX,
8453 		    ("%s: RX: have entire packet, len -> %d\n",
8454 		    device_xname(sc->sc_dev), len));
8455 
8456 		/* If an error occurred, update stats and drop the packet. */
8457 		if (wm_rxdesc_has_errors(rxq, errors)) {
8458 			m_freem(m);
8459 			continue;
8460 		}
8461 
8462 		/* No errors.  Receive the packet. */
8463 		m_set_rcvif(m, ifp);
8464 		m->m_pkthdr.len = len;
8465 		/*
8466 		 * TODO
8467 		 * should be save rsshash and rsstype to this mbuf.
8468 		 */
8469 		DPRINTF(WM_DEBUG_RX,
8470 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
8471 			device_xname(sc->sc_dev), rsstype, rsshash));
8472 
8473 		/*
8474 		 * If VLANs are enabled, VLAN packets have been unwrapped
8475 		 * for us.  Associate the tag with the packet.
8476 		 */
8477 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
8478 			continue;
8479 
8480 		/* Set up checksum info for this packet. */
8481 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
8482 		/*
8483 		 * Update the receive pointer holding rxq_lock consistent with
8484 		 * increment counter.
8485 		 */
8486 		rxq->rxq_ptr = i;
8487 		rxq->rxq_packets++;
8488 		rxq->rxq_bytes += len;
8489 		mutex_exit(rxq->rxq_lock);
8490 
8491 		/* Pass it on. */
8492 		if_percpuq_enqueue(sc->sc_ipq, m);
8493 
8494 		mutex_enter(rxq->rxq_lock);
8495 
8496 		if (rxq->rxq_stopping)
8497 			break;
8498 	}
8499 
8500 	if (count != 0)
8501 		rnd_add_uint32(&sc->rnd_source, count);
8502 
8503 	DPRINTF(WM_DEBUG_RX,
8504 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
8505 }
8506 
8507 /*
8508  * wm_linkintr_gmii:
8509  *
8510  *	Helper; handle link interrupts for GMII.
8511  */
8512 static void
8513 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
8514 {
8515 
8516 	KASSERT(WM_CORE_LOCKED(sc));
8517 
8518 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
8519 		__func__));
8520 
8521 	if (icr & ICR_LSC) {
8522 		uint32_t reg;
8523 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
8524 
8525 		if ((status & STATUS_LU) != 0) {
8526 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
8527 				device_xname(sc->sc_dev),
8528 				(status & STATUS_FD) ? "FDX" : "HDX"));
8529 		} else {
8530 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
8531 				device_xname(sc->sc_dev)));
8532 		}
8533 		if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0))
8534 			wm_gig_downshift_workaround_ich8lan(sc);
8535 
8536 		if ((sc->sc_type == WM_T_ICH8)
8537 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
8538 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
8539 		}
8540 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
8541 			device_xname(sc->sc_dev)));
8542 		mii_pollstat(&sc->sc_mii);
8543 		if (sc->sc_type == WM_T_82543) {
8544 			int miistatus, active;
8545 
8546 			/*
8547 			 * With 82543, we need to force speed and
8548 			 * duplex on the MAC equal to what the PHY
8549 			 * speed and duplex configuration is.
8550 			 */
8551 			miistatus = sc->sc_mii.mii_media_status;
8552 
8553 			if (miistatus & IFM_ACTIVE) {
8554 				active = sc->sc_mii.mii_media_active;
8555 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
8556 				switch (IFM_SUBTYPE(active)) {
8557 				case IFM_10_T:
8558 					sc->sc_ctrl |= CTRL_SPEED_10;
8559 					break;
8560 				case IFM_100_TX:
8561 					sc->sc_ctrl |= CTRL_SPEED_100;
8562 					break;
8563 				case IFM_1000_T:
8564 					sc->sc_ctrl |= CTRL_SPEED_1000;
8565 					break;
8566 				default:
8567 					/*
8568 					 * fiber?
8569 					 * Shoud not enter here.
8570 					 */
8571 					printf("unknown media (%x)\n", active);
8572 					break;
8573 				}
8574 				if (active & IFM_FDX)
8575 					sc->sc_ctrl |= CTRL_FD;
8576 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8577 			}
8578 		} else if (sc->sc_type == WM_T_PCH) {
8579 			wm_k1_gig_workaround_hv(sc,
8580 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
8581 		}
8582 
8583 		if ((sc->sc_phytype == WMPHY_82578)
8584 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
8585 			== IFM_1000_T)) {
8586 
8587 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
8588 				delay(200*1000); /* XXX too big */
8589 
8590 				/* Link stall fix for link up */
8591 				wm_gmii_hv_writereg(sc->sc_dev, 1,
8592 				    HV_MUX_DATA_CTRL,
8593 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
8594 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
8595 				wm_gmii_hv_writereg(sc->sc_dev, 1,
8596 				    HV_MUX_DATA_CTRL,
8597 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
8598 			}
8599 		}
8600 		/*
8601 		 * I217 Packet Loss issue:
8602 		 * ensure that FEXTNVM4 Beacon Duration is set correctly
8603 		 * on power up.
8604 		 * Set the Beacon Duration for I217 to 8 usec
8605 		 */
8606 		if ((sc->sc_type == WM_T_PCH_LPT)
8607 		    || (sc->sc_type == WM_T_PCH_SPT)) {
8608 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
8609 			reg &= ~FEXTNVM4_BEACON_DURATION;
8610 			reg |= FEXTNVM4_BEACON_DURATION_8US;
8611 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
8612 		}
8613 
8614 		/* XXX Work-around I218 hang issue */
8615 		/* e1000_k1_workaround_lpt_lp() */
8616 
8617 		if ((sc->sc_type == WM_T_PCH_LPT)
8618 		    || (sc->sc_type == WM_T_PCH_SPT)) {
8619 			/*
8620 			 * Set platform power management values for Latency
8621 			 * Tolerance Reporting (LTR)
8622 			 */
8623 			wm_platform_pm_pch_lpt(sc,
8624 				((sc->sc_mii.mii_media_status & IFM_ACTIVE)
8625 				    != 0));
8626 		}
8627 
8628 		/* FEXTNVM6 K1-off workaround */
8629 		if (sc->sc_type == WM_T_PCH_SPT) {
8630 			reg = CSR_READ(sc, WMREG_FEXTNVM6);
8631 			if (CSR_READ(sc, WMREG_PCIEANACFG)
8632 			    & FEXTNVM6_K1_OFF_ENABLE)
8633 				reg |= FEXTNVM6_K1_OFF_ENABLE;
8634 			else
8635 				reg &= ~FEXTNVM6_K1_OFF_ENABLE;
8636 			CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
8637 		}
8638 	} else if (icr & ICR_RXSEQ) {
8639 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
8640 			device_xname(sc->sc_dev)));
8641 	}
8642 }
8643 
8644 /*
8645  * wm_linkintr_tbi:
8646  *
8647  *	Helper; handle link interrupts for TBI mode.
8648  */
8649 static void
8650 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
8651 {
8652 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
8653 	uint32_t status;
8654 
8655 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
8656 		__func__));
8657 
8658 	status = CSR_READ(sc, WMREG_STATUS);
8659 	if (icr & ICR_LSC) {
8660 		if (status & STATUS_LU) {
8661 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
8662 			    device_xname(sc->sc_dev),
8663 			    (status & STATUS_FD) ? "FDX" : "HDX"));
8664 			/*
8665 			 * NOTE: CTRL will update TFCE and RFCE automatically,
8666 			 * so we should update sc->sc_ctrl
8667 			 */
8668 
8669 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
8670 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
8671 			sc->sc_fcrtl &= ~FCRTL_XONE;
8672 			if (status & STATUS_FD)
8673 				sc->sc_tctl |=
8674 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
8675 			else
8676 				sc->sc_tctl |=
8677 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
8678 			if (sc->sc_ctrl & CTRL_TFCE)
8679 				sc->sc_fcrtl |= FCRTL_XONE;
8680 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
8681 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
8682 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
8683 				      sc->sc_fcrtl);
8684 			sc->sc_tbi_linkup = 1;
8685 			if_link_state_change(ifp, LINK_STATE_UP);
8686 		} else {
8687 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
8688 			    device_xname(sc->sc_dev)));
8689 			sc->sc_tbi_linkup = 0;
8690 			if_link_state_change(ifp, LINK_STATE_DOWN);
8691 		}
8692 		/* Update LED */
8693 		wm_tbi_serdes_set_linkled(sc);
8694 	} else if (icr & ICR_RXSEQ) {
8695 		DPRINTF(WM_DEBUG_LINK,
8696 		    ("%s: LINK: Receive sequence error\n",
8697 		    device_xname(sc->sc_dev)));
8698 	}
8699 }
8700 
8701 /*
8702  * wm_linkintr_serdes:
8703  *
8704  *	Helper; handle link interrupts for TBI mode.
8705  */
8706 static void
8707 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
8708 {
8709 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
8710 	struct mii_data *mii = &sc->sc_mii;
8711 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
8712 	uint32_t pcs_adv, pcs_lpab, reg;
8713 
8714 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
8715 		__func__));
8716 
8717 	if (icr & ICR_LSC) {
8718 		/* Check PCS */
8719 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
8720 		if ((reg & PCS_LSTS_LINKOK) != 0) {
8721 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
8722 				device_xname(sc->sc_dev)));
8723 			mii->mii_media_status |= IFM_ACTIVE;
8724 			sc->sc_tbi_linkup = 1;
8725 			if_link_state_change(ifp, LINK_STATE_UP);
8726 		} else {
8727 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
8728 				device_xname(sc->sc_dev)));
8729 			mii->mii_media_status |= IFM_NONE;
8730 			sc->sc_tbi_linkup = 0;
8731 			if_link_state_change(ifp, LINK_STATE_DOWN);
8732 			wm_tbi_serdes_set_linkled(sc);
8733 			return;
8734 		}
8735 		mii->mii_media_active |= IFM_1000_SX;
8736 		if ((reg & PCS_LSTS_FDX) != 0)
8737 			mii->mii_media_active |= IFM_FDX;
8738 		else
8739 			mii->mii_media_active |= IFM_HDX;
8740 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
8741 			/* Check flow */
8742 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
8743 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
8744 				DPRINTF(WM_DEBUG_LINK,
8745 				    ("XXX LINKOK but not ACOMP\n"));
8746 				return;
8747 			}
8748 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
8749 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
8750 			DPRINTF(WM_DEBUG_LINK,
8751 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
8752 			if ((pcs_adv & TXCW_SYM_PAUSE)
8753 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
8754 				mii->mii_media_active |= IFM_FLOW
8755 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
8756 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
8757 			    && (pcs_adv & TXCW_ASYM_PAUSE)
8758 			    && (pcs_lpab & TXCW_SYM_PAUSE)
8759 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
8760 				mii->mii_media_active |= IFM_FLOW
8761 				    | IFM_ETH_TXPAUSE;
8762 			else if ((pcs_adv & TXCW_SYM_PAUSE)
8763 			    && (pcs_adv & TXCW_ASYM_PAUSE)
8764 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
8765 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
8766 				mii->mii_media_active |= IFM_FLOW
8767 				    | IFM_ETH_RXPAUSE;
8768 		}
8769 		/* Update LED */
8770 		wm_tbi_serdes_set_linkled(sc);
8771 	} else {
8772 		DPRINTF(WM_DEBUG_LINK,
8773 		    ("%s: LINK: Receive sequence error\n",
8774 		    device_xname(sc->sc_dev)));
8775 	}
8776 }
8777 
8778 /*
8779  * wm_linkintr:
8780  *
8781  *	Helper; handle link interrupts.
8782  */
8783 static void
8784 wm_linkintr(struct wm_softc *sc, uint32_t icr)
8785 {
8786 
8787 	KASSERT(WM_CORE_LOCKED(sc));
8788 
8789 	if (sc->sc_flags & WM_F_HAS_MII)
8790 		wm_linkintr_gmii(sc, icr);
8791 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
8792 	    && (sc->sc_type >= WM_T_82575))
8793 		wm_linkintr_serdes(sc, icr);
8794 	else
8795 		wm_linkintr_tbi(sc, icr);
8796 }
8797 
8798 /*
8799  * wm_intr_legacy:
8800  *
8801  *	Interrupt service routine for INTx and MSI.
8802  */
8803 static int
8804 wm_intr_legacy(void *arg)
8805 {
8806 	struct wm_softc *sc = arg;
8807 	struct wm_queue *wmq = &sc->sc_queue[0];
8808 	struct wm_txqueue *txq = &wmq->wmq_txq;
8809 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
8810 	uint32_t icr, rndval = 0;
8811 	int handled = 0;
8812 
8813 	while (1 /* CONSTCOND */) {
8814 		icr = CSR_READ(sc, WMREG_ICR);
8815 		if ((icr & sc->sc_icr) == 0)
8816 			break;
8817 		if (handled == 0) {
8818 			DPRINTF(WM_DEBUG_TX,
8819 			    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
8820 		}
8821 		if (rndval == 0)
8822 			rndval = icr;
8823 
8824 		mutex_enter(rxq->rxq_lock);
8825 
8826 		if (rxq->rxq_stopping) {
8827 			mutex_exit(rxq->rxq_lock);
8828 			break;
8829 		}
8830 
8831 		handled = 1;
8832 
8833 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
8834 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
8835 			DPRINTF(WM_DEBUG_RX,
8836 			    ("%s: RX: got Rx intr 0x%08x\n",
8837 			    device_xname(sc->sc_dev),
8838 			    icr & (ICR_RXDMT0 | ICR_RXT0)));
8839 			WM_Q_EVCNT_INCR(rxq, rxintr);
8840 		}
8841 #endif
8842 		/*
8843 		 * wm_rxeof() does *not* call upper layer functions directly,
8844 		 * as if_percpuq_enqueue() just call softint_schedule().
8845 		 * So, we can call wm_rxeof() in interrupt context.
8846 		 */
8847 		wm_rxeof(rxq, UINT_MAX);
8848 
8849 		mutex_exit(rxq->rxq_lock);
8850 		mutex_enter(txq->txq_lock);
8851 
8852 		if (txq->txq_stopping) {
8853 			mutex_exit(txq->txq_lock);
8854 			break;
8855 		}
8856 
8857 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
8858 		if (icr & ICR_TXDW) {
8859 			DPRINTF(WM_DEBUG_TX,
8860 			    ("%s: TX: got TXDW interrupt\n",
8861 			    device_xname(sc->sc_dev)));
8862 			WM_Q_EVCNT_INCR(txq, txdw);
8863 		}
8864 #endif
8865 		wm_txeof(sc, txq);
8866 
8867 		mutex_exit(txq->txq_lock);
8868 		WM_CORE_LOCK(sc);
8869 
8870 		if (sc->sc_core_stopping) {
8871 			WM_CORE_UNLOCK(sc);
8872 			break;
8873 		}
8874 
8875 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
8876 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
8877 			wm_linkintr(sc, icr);
8878 		}
8879 
8880 		WM_CORE_UNLOCK(sc);
8881 
8882 		if (icr & ICR_RXO) {
8883 #if defined(WM_DEBUG)
8884 			log(LOG_WARNING, "%s: Receive overrun\n",
8885 			    device_xname(sc->sc_dev));
8886 #endif /* defined(WM_DEBUG) */
8887 		}
8888 	}
8889 
8890 	rnd_add_uint32(&sc->rnd_source, rndval);
8891 
8892 	if (handled) {
8893 		/* Try to get more packets going. */
8894 		softint_schedule(wmq->wmq_si);
8895 	}
8896 
8897 	return handled;
8898 }
8899 
8900 static inline void
8901 wm_txrxintr_disable(struct wm_queue *wmq)
8902 {
8903 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
8904 
8905 	if (sc->sc_type == WM_T_82574)
8906 		CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
8907 	else if (sc->sc_type == WM_T_82575)
8908 		CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
8909 	else
8910 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
8911 }
8912 
8913 static inline void
8914 wm_txrxintr_enable(struct wm_queue *wmq)
8915 {
8916 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
8917 
8918 	wm_itrs_calculate(sc, wmq);
8919 
8920 	if (sc->sc_type == WM_T_82574)
8921 		CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
8922 	else if (sc->sc_type == WM_T_82575)
8923 		CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
8924 	else
8925 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
8926 }
8927 
8928 static int
8929 wm_txrxintr_msix(void *arg)
8930 {
8931 	struct wm_queue *wmq = arg;
8932 	struct wm_txqueue *txq = &wmq->wmq_txq;
8933 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
8934 	struct wm_softc *sc = txq->txq_sc;
8935 	u_int limit = sc->sc_rx_intr_process_limit;
8936 
8937 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
8938 
8939 	DPRINTF(WM_DEBUG_TX,
8940 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
8941 
8942 	wm_txrxintr_disable(wmq);
8943 
8944 	mutex_enter(txq->txq_lock);
8945 
8946 	if (txq->txq_stopping) {
8947 		mutex_exit(txq->txq_lock);
8948 		return 0;
8949 	}
8950 
8951 	WM_Q_EVCNT_INCR(txq, txdw);
8952 	wm_txeof(sc, txq);
8953 	/* wm_deferred start() is done in wm_handle_queue(). */
8954 	mutex_exit(txq->txq_lock);
8955 
8956 	DPRINTF(WM_DEBUG_RX,
8957 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
8958 	mutex_enter(rxq->rxq_lock);
8959 
8960 	if (rxq->rxq_stopping) {
8961 		mutex_exit(rxq->rxq_lock);
8962 		return 0;
8963 	}
8964 
8965 	WM_Q_EVCNT_INCR(rxq, rxintr);
8966 	wm_rxeof(rxq, limit);
8967 	mutex_exit(rxq->rxq_lock);
8968 
8969 	wm_itrs_writereg(sc, wmq);
8970 
8971 	softint_schedule(wmq->wmq_si);
8972 
8973 	return 1;
8974 }
8975 
8976 static void
8977 wm_handle_queue(void *arg)
8978 {
8979 	struct wm_queue *wmq = arg;
8980 	struct wm_txqueue *txq = &wmq->wmq_txq;
8981 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
8982 	struct wm_softc *sc = txq->txq_sc;
8983 	u_int limit = sc->sc_rx_process_limit;
8984 
8985 	mutex_enter(txq->txq_lock);
8986 	if (txq->txq_stopping) {
8987 		mutex_exit(txq->txq_lock);
8988 		return;
8989 	}
8990 	wm_txeof(sc, txq);
8991 	wm_deferred_start_locked(txq);
8992 	mutex_exit(txq->txq_lock);
8993 
8994 	mutex_enter(rxq->rxq_lock);
8995 	if (rxq->rxq_stopping) {
8996 		mutex_exit(rxq->rxq_lock);
8997 		return;
8998 	}
8999 	WM_Q_EVCNT_INCR(rxq, rxintr);
9000 	wm_rxeof(rxq, limit);
9001 	mutex_exit(rxq->rxq_lock);
9002 
9003 	wm_txrxintr_enable(wmq);
9004 }
9005 
9006 /*
9007  * wm_linkintr_msix:
9008  *
9009  *	Interrupt service routine for link status change for MSI-X.
9010  */
9011 static int
9012 wm_linkintr_msix(void *arg)
9013 {
9014 	struct wm_softc *sc = arg;
9015 	uint32_t reg;
9016 
9017 	DPRINTF(WM_DEBUG_LINK,
9018 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
9019 
9020 	reg = CSR_READ(sc, WMREG_ICR);
9021 	WM_CORE_LOCK(sc);
9022 	if ((sc->sc_core_stopping) || ((reg & ICR_LSC) == 0))
9023 		goto out;
9024 
9025 	WM_EVCNT_INCR(&sc->sc_ev_linkintr);
9026 	wm_linkintr(sc, ICR_LSC);
9027 
9028 out:
9029 	WM_CORE_UNLOCK(sc);
9030 
9031 	if (sc->sc_type == WM_T_82574)
9032 		CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
9033 	else if (sc->sc_type == WM_T_82575)
9034 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
9035 	else
9036 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
9037 
9038 	return 1;
9039 }
9040 
9041 /*
9042  * Media related.
9043  * GMII, SGMII, TBI (and SERDES)
9044  */
9045 
9046 /* Common */
9047 
9048 /*
9049  * wm_tbi_serdes_set_linkled:
9050  *
9051  *	Update the link LED on TBI and SERDES devices.
9052  */
9053 static void
9054 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
9055 {
9056 
9057 	if (sc->sc_tbi_linkup)
9058 		sc->sc_ctrl |= CTRL_SWDPIN(0);
9059 	else
9060 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
9061 
9062 	/* 82540 or newer devices are active low */
9063 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
9064 
9065 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9066 }
9067 
9068 /* GMII related */
9069 
9070 /*
9071  * wm_gmii_reset:
9072  *
9073  *	Reset the PHY.
9074  */
9075 static void
9076 wm_gmii_reset(struct wm_softc *sc)
9077 {
9078 	uint32_t reg;
9079 	int rv;
9080 
9081 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
9082 		device_xname(sc->sc_dev), __func__));
9083 
9084 	rv = sc->phy.acquire(sc);
9085 	if (rv != 0) {
9086 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9087 		    __func__);
9088 		return;
9089 	}
9090 
9091 	switch (sc->sc_type) {
9092 	case WM_T_82542_2_0:
9093 	case WM_T_82542_2_1:
9094 		/* null */
9095 		break;
9096 	case WM_T_82543:
9097 		/*
9098 		 * With 82543, we need to force speed and duplex on the MAC
9099 		 * equal to what the PHY speed and duplex configuration is.
9100 		 * In addition, we need to perform a hardware reset on the PHY
9101 		 * to take it out of reset.
9102 		 */
9103 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
9104 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9105 
9106 		/* The PHY reset pin is active-low. */
9107 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
9108 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
9109 		    CTRL_EXT_SWDPIN(4));
9110 		reg |= CTRL_EXT_SWDPIO(4);
9111 
9112 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
9113 		CSR_WRITE_FLUSH(sc);
9114 		delay(10*1000);
9115 
9116 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
9117 		CSR_WRITE_FLUSH(sc);
9118 		delay(150);
9119 #if 0
9120 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
9121 #endif
9122 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
9123 		break;
9124 	case WM_T_82544:	/* reset 10000us */
9125 	case WM_T_82540:
9126 	case WM_T_82545:
9127 	case WM_T_82545_3:
9128 	case WM_T_82546:
9129 	case WM_T_82546_3:
9130 	case WM_T_82541:
9131 	case WM_T_82541_2:
9132 	case WM_T_82547:
9133 	case WM_T_82547_2:
9134 	case WM_T_82571:	/* reset 100us */
9135 	case WM_T_82572:
9136 	case WM_T_82573:
9137 	case WM_T_82574:
9138 	case WM_T_82575:
9139 	case WM_T_82576:
9140 	case WM_T_82580:
9141 	case WM_T_I350:
9142 	case WM_T_I354:
9143 	case WM_T_I210:
9144 	case WM_T_I211:
9145 	case WM_T_82583:
9146 	case WM_T_80003:
9147 		/* generic reset */
9148 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
9149 		CSR_WRITE_FLUSH(sc);
9150 		delay(20000);
9151 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9152 		CSR_WRITE_FLUSH(sc);
9153 		delay(20000);
9154 
9155 		if ((sc->sc_type == WM_T_82541)
9156 		    || (sc->sc_type == WM_T_82541_2)
9157 		    || (sc->sc_type == WM_T_82547)
9158 		    || (sc->sc_type == WM_T_82547_2)) {
9159 			/* workaround for igp are done in igp_reset() */
9160 			/* XXX add code to set LED after phy reset */
9161 		}
9162 		break;
9163 	case WM_T_ICH8:
9164 	case WM_T_ICH9:
9165 	case WM_T_ICH10:
9166 	case WM_T_PCH:
9167 	case WM_T_PCH2:
9168 	case WM_T_PCH_LPT:
9169 	case WM_T_PCH_SPT:
9170 		/* generic reset */
9171 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
9172 		CSR_WRITE_FLUSH(sc);
9173 		delay(100);
9174 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9175 		CSR_WRITE_FLUSH(sc);
9176 		delay(150);
9177 		break;
9178 	default:
9179 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
9180 		    __func__);
9181 		break;
9182 	}
9183 
9184 	sc->phy.release(sc);
9185 
9186 	/* get_cfg_done */
9187 	wm_get_cfg_done(sc);
9188 
9189 	/* extra setup */
9190 	switch (sc->sc_type) {
9191 	case WM_T_82542_2_0:
9192 	case WM_T_82542_2_1:
9193 	case WM_T_82543:
9194 	case WM_T_82544:
9195 	case WM_T_82540:
9196 	case WM_T_82545:
9197 	case WM_T_82545_3:
9198 	case WM_T_82546:
9199 	case WM_T_82546_3:
9200 	case WM_T_82541_2:
9201 	case WM_T_82547_2:
9202 	case WM_T_82571:
9203 	case WM_T_82572:
9204 	case WM_T_82573:
9205 	case WM_T_82574:
9206 	case WM_T_82583:
9207 	case WM_T_82575:
9208 	case WM_T_82576:
9209 	case WM_T_82580:
9210 	case WM_T_I350:
9211 	case WM_T_I354:
9212 	case WM_T_I210:
9213 	case WM_T_I211:
9214 	case WM_T_80003:
9215 		/* null */
9216 		break;
9217 	case WM_T_82541:
9218 	case WM_T_82547:
9219 		/* XXX Configure actively LED after PHY reset */
9220 		break;
9221 	case WM_T_ICH8:
9222 	case WM_T_ICH9:
9223 	case WM_T_ICH10:
9224 	case WM_T_PCH:
9225 	case WM_T_PCH2:
9226 	case WM_T_PCH_LPT:
9227 	case WM_T_PCH_SPT:
9228 		wm_phy_post_reset(sc);
9229 		break;
9230 	default:
9231 		panic("%s: unknown type\n", __func__);
9232 		break;
9233 	}
9234 }
9235 
9236 /*
9237  * Setup sc_phytype and mii_{read|write}reg.
9238  *
9239  *  To identify PHY type, correct read/write function should be selected.
9240  * To select correct read/write function, PCI ID or MAC type are required
9241  * without accessing PHY registers.
9242  *
9243  *  On the first call of this function, PHY ID is not known yet. Check
9244  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
9245  * result might be incorrect.
9246  *
9247  *  In the second call, PHY OUI and model is used to identify PHY type.
9248  * It might not be perfpect because of the lack of compared entry, but it
9249  * would be better than the first call.
9250  *
9251  *  If the detected new result and previous assumption is different,
9252  * diagnous message will be printed.
9253  */
9254 static void
9255 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
9256     uint16_t phy_model)
9257 {
9258 	device_t dev = sc->sc_dev;
9259 	struct mii_data *mii = &sc->sc_mii;
9260 	uint16_t new_phytype = WMPHY_UNKNOWN;
9261 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
9262 	mii_readreg_t new_readreg;
9263 	mii_writereg_t new_writereg;
9264 
9265 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
9266 		device_xname(sc->sc_dev), __func__));
9267 
9268 	if (mii->mii_readreg == NULL) {
9269 		/*
9270 		 *  This is the first call of this function. For ICH and PCH
9271 		 * variants, it's difficult to determine the PHY access method
9272 		 * by sc_type, so use the PCI product ID for some devices.
9273 		 */
9274 
9275 		switch (sc->sc_pcidevid) {
9276 		case PCI_PRODUCT_INTEL_PCH_M_LM:
9277 		case PCI_PRODUCT_INTEL_PCH_M_LC:
9278 			/* 82577 */
9279 			new_phytype = WMPHY_82577;
9280 			break;
9281 		case PCI_PRODUCT_INTEL_PCH_D_DM:
9282 		case PCI_PRODUCT_INTEL_PCH_D_DC:
9283 			/* 82578 */
9284 			new_phytype = WMPHY_82578;
9285 			break;
9286 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
9287 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
9288 			/* 82579 */
9289 			new_phytype = WMPHY_82579;
9290 			break;
9291 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
9292 		case PCI_PRODUCT_INTEL_82801I_BM:
9293 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
9294 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
9295 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
9296 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
9297 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
9298 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
9299 			/* ICH8, 9, 10 with 82567 */
9300 			new_phytype = WMPHY_BM;
9301 			break;
9302 		default:
9303 			break;
9304 		}
9305 	} else {
9306 		/* It's not the first call. Use PHY OUI and model */
9307 		switch (phy_oui) {
9308 		case MII_OUI_ATHEROS: /* XXX ??? */
9309 			switch (phy_model) {
9310 			case 0x0004: /* XXX */
9311 				new_phytype = WMPHY_82578;
9312 				break;
9313 			default:
9314 				break;
9315 			}
9316 			break;
9317 		case MII_OUI_xxMARVELL:
9318 			switch (phy_model) {
9319 			case MII_MODEL_xxMARVELL_I210:
9320 				new_phytype = WMPHY_I210;
9321 				break;
9322 			case MII_MODEL_xxMARVELL_E1011:
9323 			case MII_MODEL_xxMARVELL_E1000_3:
9324 			case MII_MODEL_xxMARVELL_E1000_5:
9325 			case MII_MODEL_xxMARVELL_E1112:
9326 				new_phytype = WMPHY_M88;
9327 				break;
9328 			case MII_MODEL_xxMARVELL_E1149:
9329 				new_phytype = WMPHY_BM;
9330 				break;
9331 			case MII_MODEL_xxMARVELL_E1111:
9332 			case MII_MODEL_xxMARVELL_I347:
9333 			case MII_MODEL_xxMARVELL_E1512:
9334 			case MII_MODEL_xxMARVELL_E1340M:
9335 			case MII_MODEL_xxMARVELL_E1543:
9336 				new_phytype = WMPHY_M88;
9337 				break;
9338 			case MII_MODEL_xxMARVELL_I82563:
9339 				new_phytype = WMPHY_GG82563;
9340 				break;
9341 			default:
9342 				break;
9343 			}
9344 			break;
9345 		case MII_OUI_INTEL:
9346 			switch (phy_model) {
9347 			case MII_MODEL_INTEL_I82577:
9348 				new_phytype = WMPHY_82577;
9349 				break;
9350 			case MII_MODEL_INTEL_I82579:
9351 				new_phytype = WMPHY_82579;
9352 				break;
9353 			case MII_MODEL_INTEL_I217:
9354 				new_phytype = WMPHY_I217;
9355 				break;
9356 			case MII_MODEL_INTEL_I82580:
9357 			case MII_MODEL_INTEL_I350:
9358 				new_phytype = WMPHY_82580;
9359 				break;
9360 			default:
9361 				break;
9362 			}
9363 			break;
9364 		case MII_OUI_yyINTEL:
9365 			switch (phy_model) {
9366 			case MII_MODEL_yyINTEL_I82562G:
9367 			case MII_MODEL_yyINTEL_I82562EM:
9368 			case MII_MODEL_yyINTEL_I82562ET:
9369 				new_phytype = WMPHY_IFE;
9370 				break;
9371 			case MII_MODEL_yyINTEL_IGP01E1000:
9372 				new_phytype = WMPHY_IGP;
9373 				break;
9374 			case MII_MODEL_yyINTEL_I82566:
9375 				new_phytype = WMPHY_IGP_3;
9376 				break;
9377 			default:
9378 				break;
9379 			}
9380 			break;
9381 		default:
9382 			break;
9383 		}
9384 		if (new_phytype == WMPHY_UNKNOWN)
9385 			aprint_verbose_dev(dev, "%s: unknown PHY model\n",
9386 			    __func__);
9387 
9388 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
9389 		    && (sc->sc_phytype != new_phytype )) {
9390 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
9391 			    "was incorrect. PHY type from PHY ID = %u\n",
9392 			    sc->sc_phytype, new_phytype);
9393 		}
9394 	}
9395 
9396 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
9397 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
9398 		/* SGMII */
9399 		new_readreg = wm_sgmii_readreg;
9400 		new_writereg = wm_sgmii_writereg;
9401 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
9402 		/* BM2 (phyaddr == 1) */
9403 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
9404 		    && (new_phytype != WMPHY_BM)
9405 		    && (new_phytype != WMPHY_UNKNOWN))
9406 			doubt_phytype = new_phytype;
9407 		new_phytype = WMPHY_BM;
9408 		new_readreg = wm_gmii_bm_readreg;
9409 		new_writereg = wm_gmii_bm_writereg;
9410 	} else if (sc->sc_type >= WM_T_PCH) {
9411 		/* All PCH* use _hv_ */
9412 		new_readreg = wm_gmii_hv_readreg;
9413 		new_writereg = wm_gmii_hv_writereg;
9414 	} else if (sc->sc_type >= WM_T_ICH8) {
9415 		/* non-82567 ICH8, 9 and 10 */
9416 		new_readreg = wm_gmii_i82544_readreg;
9417 		new_writereg = wm_gmii_i82544_writereg;
9418 	} else if (sc->sc_type >= WM_T_80003) {
9419 		/* 80003 */
9420 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
9421 		    && (new_phytype != WMPHY_GG82563)
9422 		    && (new_phytype != WMPHY_UNKNOWN))
9423 			doubt_phytype = new_phytype;
9424 		new_phytype = WMPHY_GG82563;
9425 		new_readreg = wm_gmii_i80003_readreg;
9426 		new_writereg = wm_gmii_i80003_writereg;
9427 	} else if (sc->sc_type >= WM_T_I210) {
9428 		/* I210 and I211 */
9429 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
9430 		    && (new_phytype != WMPHY_I210)
9431 		    && (new_phytype != WMPHY_UNKNOWN))
9432 			doubt_phytype = new_phytype;
9433 		new_phytype = WMPHY_I210;
9434 		new_readreg = wm_gmii_gs40g_readreg;
9435 		new_writereg = wm_gmii_gs40g_writereg;
9436 	} else if (sc->sc_type >= WM_T_82580) {
9437 		/* 82580, I350 and I354 */
9438 		new_readreg = wm_gmii_82580_readreg;
9439 		new_writereg = wm_gmii_82580_writereg;
9440 	} else if (sc->sc_type >= WM_T_82544) {
9441 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
9442 		new_readreg = wm_gmii_i82544_readreg;
9443 		new_writereg = wm_gmii_i82544_writereg;
9444 	} else {
9445 		new_readreg = wm_gmii_i82543_readreg;
9446 		new_writereg = wm_gmii_i82543_writereg;
9447 	}
9448 
9449 	if (new_phytype == WMPHY_BM) {
9450 		/* All BM use _bm_ */
9451 		new_readreg = wm_gmii_bm_readreg;
9452 		new_writereg = wm_gmii_bm_writereg;
9453 	}
9454 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_SPT)) {
9455 		/* All PCH* use _hv_ */
9456 		new_readreg = wm_gmii_hv_readreg;
9457 		new_writereg = wm_gmii_hv_writereg;
9458 	}
9459 
9460 	/* Diag output */
9461 	if (doubt_phytype != WMPHY_UNKNOWN)
9462 		aprint_error_dev(dev, "Assumed new PHY type was "
9463 		    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
9464 		    new_phytype);
9465 	else if ((sc->sc_phytype != WMPHY_UNKNOWN)
9466 	    && (sc->sc_phytype != new_phytype ))
9467 		aprint_error_dev(dev, "Previously assumed PHY type(%u)"
9468 		    "was incorrect. New PHY type = %u\n",
9469 		    sc->sc_phytype, new_phytype);
9470 
9471 	if ((mii->mii_readreg != NULL) && (new_phytype == WMPHY_UNKNOWN))
9472 		aprint_error_dev(dev, "PHY type is still unknown.\n");
9473 
9474 	if ((mii->mii_readreg != NULL) && (mii->mii_readreg != new_readreg))
9475 		aprint_error_dev(dev, "Previously assumed PHY read/write "
9476 		    "function was incorrect.\n");
9477 
9478 	/* Update now */
9479 	sc->sc_phytype = new_phytype;
9480 	mii->mii_readreg = new_readreg;
9481 	mii->mii_writereg = new_writereg;
9482 }
9483 
9484 /*
9485  * wm_get_phy_id_82575:
9486  *
9487  * Return PHY ID. Return -1 if it failed.
9488  */
9489 static int
9490 wm_get_phy_id_82575(struct wm_softc *sc)
9491 {
9492 	uint32_t reg;
9493 	int phyid = -1;
9494 
9495 	/* XXX */
9496 	if ((sc->sc_flags & WM_F_SGMII) == 0)
9497 		return -1;
9498 
9499 	if (wm_sgmii_uses_mdio(sc)) {
9500 		switch (sc->sc_type) {
9501 		case WM_T_82575:
9502 		case WM_T_82576:
9503 			reg = CSR_READ(sc, WMREG_MDIC);
9504 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
9505 			break;
9506 		case WM_T_82580:
9507 		case WM_T_I350:
9508 		case WM_T_I354:
9509 		case WM_T_I210:
9510 		case WM_T_I211:
9511 			reg = CSR_READ(sc, WMREG_MDICNFG);
9512 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
9513 			break;
9514 		default:
9515 			return -1;
9516 		}
9517 	}
9518 
9519 	return phyid;
9520 }
9521 
9522 
9523 /*
9524  * wm_gmii_mediainit:
9525  *
9526  *	Initialize media for use on 1000BASE-T devices.
9527  */
9528 static void
9529 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
9530 {
9531 	device_t dev = sc->sc_dev;
9532 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9533 	struct mii_data *mii = &sc->sc_mii;
9534 	uint32_t reg;
9535 
9536 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
9537 		device_xname(sc->sc_dev), __func__));
9538 
9539 	/* We have GMII. */
9540 	sc->sc_flags |= WM_F_HAS_MII;
9541 
9542 	if (sc->sc_type == WM_T_80003)
9543 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
9544 	else
9545 		sc->sc_tipg = TIPG_1000T_DFLT;
9546 
9547 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
9548 	if ((sc->sc_type == WM_T_82580)
9549 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
9550 	    || (sc->sc_type == WM_T_I211)) {
9551 		reg = CSR_READ(sc, WMREG_PHPM);
9552 		reg &= ~PHPM_GO_LINK_D;
9553 		CSR_WRITE(sc, WMREG_PHPM, reg);
9554 	}
9555 
9556 	/*
9557 	 * Let the chip set speed/duplex on its own based on
9558 	 * signals from the PHY.
9559 	 * XXXbouyer - I'm not sure this is right for the 80003,
9560 	 * the em driver only sets CTRL_SLU here - but it seems to work.
9561 	 */
9562 	sc->sc_ctrl |= CTRL_SLU;
9563 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9564 
9565 	/* Initialize our media structures and probe the GMII. */
9566 	mii->mii_ifp = ifp;
9567 
9568 	mii->mii_statchg = wm_gmii_statchg;
9569 
9570 	/* get PHY control from SMBus to PCIe */
9571 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
9572 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT))
9573 		wm_smbustopci(sc);
9574 
9575 	wm_gmii_reset(sc);
9576 
9577 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
9578 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
9579 	    wm_gmii_mediastatus);
9580 
9581 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
9582 	    || (sc->sc_type == WM_T_82580)
9583 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
9584 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
9585 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
9586 			/* Attach only one port */
9587 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
9588 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
9589 		} else {
9590 			int i, id;
9591 			uint32_t ctrl_ext;
9592 
9593 			id = wm_get_phy_id_82575(sc);
9594 			if (id != -1) {
9595 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
9596 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
9597 			}
9598 			if ((id == -1)
9599 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
9600 				/* Power on sgmii phy if it is disabled */
9601 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
9602 				CSR_WRITE(sc, WMREG_CTRL_EXT,
9603 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
9604 				CSR_WRITE_FLUSH(sc);
9605 				delay(300*1000); /* XXX too long */
9606 
9607 				/* from 1 to 8 */
9608 				for (i = 1; i < 8; i++)
9609 					mii_attach(sc->sc_dev, &sc->sc_mii,
9610 					    0xffffffff, i, MII_OFFSET_ANY,
9611 					    MIIF_DOPAUSE);
9612 
9613 				/* restore previous sfp cage power state */
9614 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
9615 			}
9616 		}
9617 	} else {
9618 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
9619 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
9620 	}
9621 
9622 	/*
9623 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
9624 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
9625 	 */
9626 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
9627 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
9628 		wm_set_mdio_slow_mode_hv(sc);
9629 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
9630 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
9631 	}
9632 
9633 	/*
9634 	 * (For ICH8 variants)
9635 	 * If PHY detection failed, use BM's r/w function and retry.
9636 	 */
9637 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
9638 		/* if failed, retry with *_bm_* */
9639 		aprint_verbose_dev(dev, "Assumed PHY access function "
9640 		    "(type = %d) might be incorrect. Use BM and retry.\n",
9641 		    sc->sc_phytype);
9642 		sc->sc_phytype = WMPHY_BM;
9643 		mii->mii_readreg = wm_gmii_bm_readreg;
9644 		mii->mii_writereg = wm_gmii_bm_writereg;
9645 
9646 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
9647 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
9648 	}
9649 
9650 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
9651 		/* Any PHY wasn't find */
9652 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
9653 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
9654 		sc->sc_phytype = WMPHY_NONE;
9655 	} else {
9656 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
9657 
9658 		/*
9659 		 * PHY Found! Check PHY type again by the second call of
9660 		 * wm_gmii_setup_phytype.
9661 		 */
9662 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
9663 		    child->mii_mpd_model);
9664 
9665 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
9666 	}
9667 }
9668 
9669 /*
9670  * wm_gmii_mediachange:	[ifmedia interface function]
9671  *
9672  *	Set hardware to newly-selected media on a 1000BASE-T device.
9673  */
9674 static int
9675 wm_gmii_mediachange(struct ifnet *ifp)
9676 {
9677 	struct wm_softc *sc = ifp->if_softc;
9678 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
9679 	int rc;
9680 
9681 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
9682 		device_xname(sc->sc_dev), __func__));
9683 	if ((ifp->if_flags & IFF_UP) == 0)
9684 		return 0;
9685 
9686 	/* Disable D0 LPLU. */
9687 	wm_lplu_d0_disable(sc);
9688 
9689 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
9690 	sc->sc_ctrl |= CTRL_SLU;
9691 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
9692 	    || (sc->sc_type > WM_T_82543)) {
9693 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
9694 	} else {
9695 		sc->sc_ctrl &= ~CTRL_ASDE;
9696 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
9697 		if (ife->ifm_media & IFM_FDX)
9698 			sc->sc_ctrl |= CTRL_FD;
9699 		switch (IFM_SUBTYPE(ife->ifm_media)) {
9700 		case IFM_10_T:
9701 			sc->sc_ctrl |= CTRL_SPEED_10;
9702 			break;
9703 		case IFM_100_TX:
9704 			sc->sc_ctrl |= CTRL_SPEED_100;
9705 			break;
9706 		case IFM_1000_T:
9707 			sc->sc_ctrl |= CTRL_SPEED_1000;
9708 			break;
9709 		default:
9710 			panic("wm_gmii_mediachange: bad media 0x%x",
9711 			    ife->ifm_media);
9712 		}
9713 	}
9714 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9715 	CSR_WRITE_FLUSH(sc);
9716 	if (sc->sc_type <= WM_T_82543)
9717 		wm_gmii_reset(sc);
9718 
9719 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
9720 		return 0;
9721 	return rc;
9722 }
9723 
9724 /*
9725  * wm_gmii_mediastatus:	[ifmedia interface function]
9726  *
9727  *	Get the current interface media status on a 1000BASE-T device.
9728  */
9729 static void
9730 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
9731 {
9732 	struct wm_softc *sc = ifp->if_softc;
9733 
9734 	ether_mediastatus(ifp, ifmr);
9735 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
9736 	    | sc->sc_flowflags;
9737 }
9738 
9739 #define	MDI_IO		CTRL_SWDPIN(2)
9740 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
9741 #define	MDI_CLK		CTRL_SWDPIN(3)
9742 
9743 static void
9744 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
9745 {
9746 	uint32_t i, v;
9747 
9748 	v = CSR_READ(sc, WMREG_CTRL);
9749 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
9750 	v |= MDI_DIR | CTRL_SWDPIO(3);
9751 
9752 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
9753 		if (data & i)
9754 			v |= MDI_IO;
9755 		else
9756 			v &= ~MDI_IO;
9757 		CSR_WRITE(sc, WMREG_CTRL, v);
9758 		CSR_WRITE_FLUSH(sc);
9759 		delay(10);
9760 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
9761 		CSR_WRITE_FLUSH(sc);
9762 		delay(10);
9763 		CSR_WRITE(sc, WMREG_CTRL, v);
9764 		CSR_WRITE_FLUSH(sc);
9765 		delay(10);
9766 	}
9767 }
9768 
9769 static uint32_t
9770 wm_i82543_mii_recvbits(struct wm_softc *sc)
9771 {
9772 	uint32_t v, i, data = 0;
9773 
9774 	v = CSR_READ(sc, WMREG_CTRL);
9775 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
9776 	v |= CTRL_SWDPIO(3);
9777 
9778 	CSR_WRITE(sc, WMREG_CTRL, v);
9779 	CSR_WRITE_FLUSH(sc);
9780 	delay(10);
9781 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
9782 	CSR_WRITE_FLUSH(sc);
9783 	delay(10);
9784 	CSR_WRITE(sc, WMREG_CTRL, v);
9785 	CSR_WRITE_FLUSH(sc);
9786 	delay(10);
9787 
9788 	for (i = 0; i < 16; i++) {
9789 		data <<= 1;
9790 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
9791 		CSR_WRITE_FLUSH(sc);
9792 		delay(10);
9793 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
9794 			data |= 1;
9795 		CSR_WRITE(sc, WMREG_CTRL, v);
9796 		CSR_WRITE_FLUSH(sc);
9797 		delay(10);
9798 	}
9799 
9800 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
9801 	CSR_WRITE_FLUSH(sc);
9802 	delay(10);
9803 	CSR_WRITE(sc, WMREG_CTRL, v);
9804 	CSR_WRITE_FLUSH(sc);
9805 	delay(10);
9806 
9807 	return data;
9808 }
9809 
9810 #undef MDI_IO
9811 #undef MDI_DIR
9812 #undef MDI_CLK
9813 
9814 /*
9815  * wm_gmii_i82543_readreg:	[mii interface function]
9816  *
9817  *	Read a PHY register on the GMII (i82543 version).
9818  */
9819 static int
9820 wm_gmii_i82543_readreg(device_t dev, int phy, int reg)
9821 {
9822 	struct wm_softc *sc = device_private(dev);
9823 	int rv;
9824 
9825 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
9826 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
9827 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
9828 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
9829 
9830 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
9831 	    device_xname(dev), phy, reg, rv));
9832 
9833 	return rv;
9834 }
9835 
9836 /*
9837  * wm_gmii_i82543_writereg:	[mii interface function]
9838  *
9839  *	Write a PHY register on the GMII (i82543 version).
9840  */
9841 static void
9842 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, int val)
9843 {
9844 	struct wm_softc *sc = device_private(dev);
9845 
9846 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
9847 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
9848 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
9849 	    (MII_COMMAND_START << 30), 32);
9850 }
9851 
9852 /*
9853  * wm_gmii_mdic_readreg:	[mii interface function]
9854  *
9855  *	Read a PHY register on the GMII.
9856  */
9857 static int
9858 wm_gmii_mdic_readreg(device_t dev, int phy, int reg)
9859 {
9860 	struct wm_softc *sc = device_private(dev);
9861 	uint32_t mdic = 0;
9862 	int i, rv;
9863 
9864 	if (reg > MII_ADDRMASK) {
9865 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
9866 		    __func__, sc->sc_phytype, reg);
9867 		reg &= MII_ADDRMASK;
9868 	}
9869 
9870 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
9871 	    MDIC_REGADD(reg));
9872 
9873 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
9874 		mdic = CSR_READ(sc, WMREG_MDIC);
9875 		if (mdic & MDIC_READY)
9876 			break;
9877 		delay(50);
9878 	}
9879 
9880 	if ((mdic & MDIC_READY) == 0) {
9881 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
9882 		    device_xname(dev), phy, reg);
9883 		rv = 0;
9884 	} else if (mdic & MDIC_E) {
9885 #if 0 /* This is normal if no PHY is present. */
9886 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
9887 		    device_xname(dev), phy, reg);
9888 #endif
9889 		rv = 0;
9890 	} else {
9891 		rv = MDIC_DATA(mdic);
9892 		if (rv == 0xffff)
9893 			rv = 0;
9894 	}
9895 
9896 	return rv;
9897 }
9898 
9899 /*
9900  * wm_gmii_mdic_writereg:	[mii interface function]
9901  *
9902  *	Write a PHY register on the GMII.
9903  */
9904 static void
9905 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, int val)
9906 {
9907 	struct wm_softc *sc = device_private(dev);
9908 	uint32_t mdic = 0;
9909 	int i;
9910 
9911 	if (reg > MII_ADDRMASK) {
9912 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
9913 		    __func__, sc->sc_phytype, reg);
9914 		reg &= MII_ADDRMASK;
9915 	}
9916 
9917 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
9918 	    MDIC_REGADD(reg) | MDIC_DATA(val));
9919 
9920 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
9921 		mdic = CSR_READ(sc, WMREG_MDIC);
9922 		if (mdic & MDIC_READY)
9923 			break;
9924 		delay(50);
9925 	}
9926 
9927 	if ((mdic & MDIC_READY) == 0)
9928 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
9929 		    device_xname(dev), phy, reg);
9930 	else if (mdic & MDIC_E)
9931 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
9932 		    device_xname(dev), phy, reg);
9933 }
9934 
9935 /*
9936  * wm_gmii_i82544_readreg:	[mii interface function]
9937  *
9938  *	Read a PHY register on the GMII.
9939  */
9940 static int
9941 wm_gmii_i82544_readreg(device_t dev, int phy, int reg)
9942 {
9943 	struct wm_softc *sc = device_private(dev);
9944 	int rv;
9945 
9946 	if (sc->phy.acquire(sc)) {
9947 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
9948 		return 0;
9949 	}
9950 
9951 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
9952 		switch (sc->sc_phytype) {
9953 		case WMPHY_IGP:
9954 		case WMPHY_IGP_2:
9955 		case WMPHY_IGP_3:
9956 			wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT, reg);
9957 			break;
9958 		default:
9959 #ifdef WM_DEBUG
9960 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
9961 			    __func__, sc->sc_phytype, reg);
9962 #endif
9963 			break;
9964 		}
9965 	}
9966 
9967 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
9968 	sc->phy.release(sc);
9969 
9970 	return rv;
9971 }
9972 
9973 /*
9974  * wm_gmii_i82544_writereg:	[mii interface function]
9975  *
9976  *	Write a PHY register on the GMII.
9977  */
9978 static void
9979 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, int val)
9980 {
9981 	struct wm_softc *sc = device_private(dev);
9982 
9983 	if (sc->phy.acquire(sc)) {
9984 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
9985 		return;
9986 	}
9987 
9988 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
9989 		switch (sc->sc_phytype) {
9990 		case WMPHY_IGP:
9991 		case WMPHY_IGP_2:
9992 		case WMPHY_IGP_3:
9993 			wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT, reg);
9994 			break;
9995 		default:
9996 #ifdef WM_DEBUG
9997 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
9998 			    __func__, sc->sc_phytype, reg);
9999 #endif
10000 			break;
10001 		}
10002 	}
10003 
10004 	wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
10005 	sc->phy.release(sc);
10006 }
10007 
10008 /*
10009  * wm_gmii_i80003_readreg:	[mii interface function]
10010  *
10011  *	Read a PHY register on the kumeran
10012  * This could be handled by the PHY layer if we didn't have to lock the
10013  * ressource ...
10014  */
10015 static int
10016 wm_gmii_i80003_readreg(device_t dev, int phy, int reg)
10017 {
10018 	struct wm_softc *sc = device_private(dev);
10019 	int page_select, temp;
10020 	int rv;
10021 
10022 	if (phy != 1) /* only one PHY on kumeran bus */
10023 		return 0;
10024 
10025 	if (sc->phy.acquire(sc)) {
10026 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
10027 		return 0;
10028 	}
10029 
10030 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
10031 		page_select = GG82563_PHY_PAGE_SELECT;
10032 	else {
10033 		/*
10034 		 * Use Alternative Page Select register to access registers
10035 		 * 30 and 31.
10036 		 */
10037 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
10038 	}
10039 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
10040 	wm_gmii_mdic_writereg(dev, phy, page_select, temp);
10041 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
10042 		/*
10043 		 * Wait more 200us for a bug of the ready bit in the MDIC
10044 		 * register.
10045 		 */
10046 		delay(200);
10047 		if (wm_gmii_mdic_readreg(dev, phy, page_select) != temp) {
10048 			device_printf(dev, "%s failed\n", __func__);
10049 			rv = 0; /* XXX */
10050 			goto out;
10051 		}
10052 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
10053 		delay(200);
10054 	} else
10055 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
10056 
10057 out:
10058 	sc->phy.release(sc);
10059 	return rv;
10060 }
10061 
10062 /*
10063  * wm_gmii_i80003_writereg:	[mii interface function]
10064  *
10065  *	Write a PHY register on the kumeran.
10066  * This could be handled by the PHY layer if we didn't have to lock the
10067  * ressource ...
10068  */
10069 static void
10070 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, int val)
10071 {
10072 	struct wm_softc *sc = device_private(dev);
10073 	int page_select, temp;
10074 
10075 	if (phy != 1) /* only one PHY on kumeran bus */
10076 		return;
10077 
10078 	if (sc->phy.acquire(sc)) {
10079 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
10080 		return;
10081 	}
10082 
10083 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
10084 		page_select = GG82563_PHY_PAGE_SELECT;
10085 	else {
10086 		/*
10087 		 * Use Alternative Page Select register to access registers
10088 		 * 30 and 31.
10089 		 */
10090 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
10091 	}
10092 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
10093 	wm_gmii_mdic_writereg(dev, phy, page_select, temp);
10094 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
10095 		/*
10096 		 * Wait more 200us for a bug of the ready bit in the MDIC
10097 		 * register.
10098 		 */
10099 		delay(200);
10100 		if (wm_gmii_mdic_readreg(dev, phy, page_select) != temp) {
10101 			device_printf(dev, "%s failed\n", __func__);
10102 			goto out;
10103 		}
10104 		wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
10105 		delay(200);
10106 	} else
10107 		wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
10108 
10109 out:
10110 	sc->phy.release(sc);
10111 }
10112 
10113 /*
10114  * wm_gmii_bm_readreg:	[mii interface function]
10115  *
10116  *	Read a PHY register on the kumeran
10117  * This could be handled by the PHY layer if we didn't have to lock the
10118  * ressource ...
10119  */
10120 static int
10121 wm_gmii_bm_readreg(device_t dev, int phy, int reg)
10122 {
10123 	struct wm_softc *sc = device_private(dev);
10124 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
10125 	uint16_t val;
10126 	int rv;
10127 
10128 	if (sc->phy.acquire(sc)) {
10129 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
10130 		return 0;
10131 	}
10132 
10133 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
10134 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
10135 		    || (reg == 31)) ? 1 : phy;
10136 	/* Page 800 works differently than the rest so it has its own func */
10137 	if (page == BM_WUC_PAGE) {
10138 		wm_access_phy_wakeup_reg_bm(dev, reg, &val, 1);
10139 		rv = val;
10140 		goto release;
10141 	}
10142 
10143 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
10144 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
10145 		    && (sc->sc_type != WM_T_82583))
10146 			wm_gmii_mdic_writereg(dev, phy,
10147 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
10148 		else
10149 			wm_gmii_mdic_writereg(dev, phy,
10150 			    BME1000_PHY_PAGE_SELECT, page);
10151 	}
10152 
10153 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
10154 
10155 release:
10156 	sc->phy.release(sc);
10157 	return rv;
10158 }
10159 
10160 /*
10161  * wm_gmii_bm_writereg:	[mii interface function]
10162  *
10163  *	Write a PHY register on the kumeran.
10164  * This could be handled by the PHY layer if we didn't have to lock the
10165  * ressource ...
10166  */
10167 static void
10168 wm_gmii_bm_writereg(device_t dev, int phy, int reg, int val)
10169 {
10170 	struct wm_softc *sc = device_private(dev);
10171 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
10172 
10173 	if (sc->phy.acquire(sc)) {
10174 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
10175 		return;
10176 	}
10177 
10178 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
10179 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
10180 		    || (reg == 31)) ? 1 : phy;
10181 	/* Page 800 works differently than the rest so it has its own func */
10182 	if (page == BM_WUC_PAGE) {
10183 		uint16_t tmp;
10184 
10185 		tmp = val;
10186 		wm_access_phy_wakeup_reg_bm(dev, reg, &tmp, 0);
10187 		goto release;
10188 	}
10189 
10190 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
10191 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
10192 		    && (sc->sc_type != WM_T_82583))
10193 			wm_gmii_mdic_writereg(dev, phy,
10194 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
10195 		else
10196 			wm_gmii_mdic_writereg(dev, phy,
10197 			    BME1000_PHY_PAGE_SELECT, page);
10198 	}
10199 
10200 	wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
10201 
10202 release:
10203 	sc->phy.release(sc);
10204 }
10205 
10206 static void
10207 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd)
10208 {
10209 	struct wm_softc *sc = device_private(dev);
10210 	uint16_t regnum = BM_PHY_REG_NUM(offset);
10211 	uint16_t wuce, reg;
10212 
10213 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
10214 		device_xname(dev), __func__));
10215 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
10216 	if (sc->sc_type == WM_T_PCH) {
10217 		/* XXX e1000 driver do nothing... why? */
10218 	}
10219 
10220 	/*
10221 	 * 1) Enable PHY wakeup register first.
10222 	 * See e1000_enable_phy_wakeup_reg_access_bm().
10223 	 */
10224 
10225 	/* Set page 769 */
10226 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
10227 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
10228 
10229 	/* Read WUCE and save it */
10230 	wuce = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG);
10231 
10232 	reg = wuce | BM_WUC_ENABLE_BIT;
10233 	reg &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
10234 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, reg);
10235 
10236 	/* Select page 800 */
10237 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
10238 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
10239 
10240 	/*
10241 	 * 2) Access PHY wakeup register.
10242 	 * See e1000_access_phy_wakeup_reg_bm.
10243 	 */
10244 
10245 	/* Write page 800 */
10246 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
10247 
10248 	if (rd)
10249 		*val = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE);
10250 	else
10251 		wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
10252 
10253 	/*
10254 	 * 3) Disable PHY wakeup register.
10255 	 * See e1000_disable_phy_wakeup_reg_access_bm().
10256 	 */
10257 	/* Set page 769 */
10258 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
10259 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
10260 
10261 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, wuce);
10262 }
10263 
10264 /*
10265  * wm_gmii_hv_readreg:	[mii interface function]
10266  *
10267  *	Read a PHY register on the kumeran
10268  * This could be handled by the PHY layer if we didn't have to lock the
10269  * ressource ...
10270  */
10271 static int
10272 wm_gmii_hv_readreg(device_t dev, int phy, int reg)
10273 {
10274 	struct wm_softc *sc = device_private(dev);
10275 	int rv;
10276 
10277 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
10278 		device_xname(dev), __func__));
10279 	if (sc->phy.acquire(sc)) {
10280 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
10281 		return 0;
10282 	}
10283 
10284 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg);
10285 	sc->phy.release(sc);
10286 	return rv;
10287 }
10288 
10289 static int
10290 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg)
10291 {
10292 	uint16_t page = BM_PHY_REG_PAGE(reg);
10293 	uint16_t regnum = BM_PHY_REG_NUM(reg);
10294 	uint16_t val;
10295 	int rv;
10296 
10297 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
10298 
10299 	/* Page 800 works differently than the rest so it has its own func */
10300 	if (page == BM_WUC_PAGE) {
10301 		wm_access_phy_wakeup_reg_bm(dev, reg, &val, 1);
10302 		return val;
10303 	}
10304 
10305 	/*
10306 	 * Lower than page 768 works differently than the rest so it has its
10307 	 * own func
10308 	 */
10309 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
10310 		printf("gmii_hv_readreg!!!\n");
10311 		return 0;
10312 	}
10313 
10314 	/*
10315 	 * XXX I21[789] documents say that the SMBus Address register is at
10316 	 * PHY address 01, Page 0 (not 768), Register 26.
10317 	 */
10318 	if (page == HV_INTC_FC_PAGE_START)
10319 		page = 0;
10320 
10321 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
10322 		wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
10323 		    page << BME1000_PAGE_SHIFT);
10324 	}
10325 
10326 	rv = wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK);
10327 	return rv;
10328 }
10329 
10330 /*
10331  * wm_gmii_hv_writereg:	[mii interface function]
10332  *
10333  *	Write a PHY register on the kumeran.
10334  * This could be handled by the PHY layer if we didn't have to lock the
10335  * ressource ...
10336  */
10337 static void
10338 wm_gmii_hv_writereg(device_t dev, int phy, int reg, int val)
10339 {
10340 	struct wm_softc *sc = device_private(dev);
10341 
10342 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
10343 		device_xname(dev), __func__));
10344 
10345 	if (sc->phy.acquire(sc)) {
10346 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
10347 		return;
10348 	}
10349 
10350 	wm_gmii_hv_writereg_locked(dev, phy, reg, val);
10351 	sc->phy.release(sc);
10352 }
10353 
10354 static void
10355 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, int val)
10356 {
10357 	struct wm_softc *sc = device_private(dev);
10358 	uint16_t page = BM_PHY_REG_PAGE(reg);
10359 	uint16_t regnum = BM_PHY_REG_NUM(reg);
10360 
10361 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
10362 
10363 	/* Page 800 works differently than the rest so it has its own func */
10364 	if (page == BM_WUC_PAGE) {
10365 		uint16_t tmp;
10366 
10367 		tmp = val;
10368 		wm_access_phy_wakeup_reg_bm(dev, reg, &tmp, 0);
10369 		return;
10370 	}
10371 
10372 	/*
10373 	 * Lower than page 768 works differently than the rest so it has its
10374 	 * own func
10375 	 */
10376 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
10377 		printf("gmii_hv_writereg!!!\n");
10378 		return;
10379 	}
10380 
10381 	{
10382 		/*
10383 		 * XXX I21[789] documents say that the SMBus Address register
10384 		 * is at PHY address 01, Page 0 (not 768), Register 26.
10385 		 */
10386 		if (page == HV_INTC_FC_PAGE_START)
10387 			page = 0;
10388 
10389 		/*
10390 		 * XXX Workaround MDIO accesses being disabled after entering
10391 		 * IEEE Power Down (whenever bit 11 of the PHY control
10392 		 * register is set)
10393 		 */
10394 		if (sc->sc_phytype == WMPHY_82578) {
10395 			struct mii_softc *child;
10396 
10397 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
10398 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
10399 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
10400 			    && ((val & (1 << 11)) != 0)) {
10401 				printf("XXX need workaround\n");
10402 			}
10403 		}
10404 
10405 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
10406 			wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
10407 			    page << BME1000_PAGE_SHIFT);
10408 		}
10409 	}
10410 
10411 	wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
10412 }
10413 
10414 /*
10415  * wm_gmii_82580_readreg:	[mii interface function]
10416  *
10417  *	Read a PHY register on the 82580 and I350.
10418  * This could be handled by the PHY layer if we didn't have to lock the
10419  * ressource ...
10420  */
10421 static int
10422 wm_gmii_82580_readreg(device_t dev, int phy, int reg)
10423 {
10424 	struct wm_softc *sc = device_private(dev);
10425 	int rv;
10426 
10427 	if (sc->phy.acquire(sc) != 0) {
10428 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
10429 		return 0;
10430 	}
10431 
10432 #ifdef DIAGNOSTIC
10433 	if (reg > MII_ADDRMASK) {
10434 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
10435 		    __func__, sc->sc_phytype, reg);
10436 		reg &= MII_ADDRMASK;
10437 	}
10438 #endif
10439 	rv = wm_gmii_mdic_readreg(dev, phy, reg);
10440 
10441 	sc->phy.release(sc);
10442 	return rv;
10443 }
10444 
10445 /*
10446  * wm_gmii_82580_writereg:	[mii interface function]
10447  *
10448  *	Write a PHY register on the 82580 and I350.
10449  * This could be handled by the PHY layer if we didn't have to lock the
10450  * ressource ...
10451  */
10452 static void
10453 wm_gmii_82580_writereg(device_t dev, int phy, int reg, int val)
10454 {
10455 	struct wm_softc *sc = device_private(dev);
10456 
10457 	if (sc->phy.acquire(sc) != 0) {
10458 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
10459 		return;
10460 	}
10461 
10462 #ifdef DIAGNOSTIC
10463 	if (reg > MII_ADDRMASK) {
10464 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
10465 		    __func__, sc->sc_phytype, reg);
10466 		reg &= MII_ADDRMASK;
10467 	}
10468 #endif
10469 	wm_gmii_mdic_writereg(dev, phy, reg, val);
10470 
10471 	sc->phy.release(sc);
10472 }
10473 
10474 /*
10475  * wm_gmii_gs40g_readreg:	[mii interface function]
10476  *
10477  *	Read a PHY register on the I2100 and I211.
10478  * This could be handled by the PHY layer if we didn't have to lock the
10479  * ressource ...
10480  */
10481 static int
10482 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg)
10483 {
10484 	struct wm_softc *sc = device_private(dev);
10485 	int page, offset;
10486 	int rv;
10487 
10488 	/* Acquire semaphore */
10489 	if (sc->phy.acquire(sc)) {
10490 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
10491 		return 0;
10492 	}
10493 
10494 	/* Page select */
10495 	page = reg >> GS40G_PAGE_SHIFT;
10496 	wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
10497 
10498 	/* Read reg */
10499 	offset = reg & GS40G_OFFSET_MASK;
10500 	rv = wm_gmii_mdic_readreg(dev, phy, offset);
10501 
10502 	sc->phy.release(sc);
10503 	return rv;
10504 }
10505 
10506 /*
10507  * wm_gmii_gs40g_writereg:	[mii interface function]
10508  *
10509  *	Write a PHY register on the I210 and I211.
10510  * This could be handled by the PHY layer if we didn't have to lock the
10511  * ressource ...
10512  */
10513 static void
10514 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, int val)
10515 {
10516 	struct wm_softc *sc = device_private(dev);
10517 	int page, offset;
10518 
10519 	/* Acquire semaphore */
10520 	if (sc->phy.acquire(sc)) {
10521 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
10522 		return;
10523 	}
10524 
10525 	/* Page select */
10526 	page = reg >> GS40G_PAGE_SHIFT;
10527 	wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
10528 
10529 	/* Write reg */
10530 	offset = reg & GS40G_OFFSET_MASK;
10531 	wm_gmii_mdic_writereg(dev, phy, offset, val);
10532 
10533 	/* Release semaphore */
10534 	sc->phy.release(sc);
10535 }
10536 
10537 /*
10538  * wm_gmii_statchg:	[mii interface function]
10539  *
10540  *	Callback from MII layer when media changes.
10541  */
10542 static void
10543 wm_gmii_statchg(struct ifnet *ifp)
10544 {
10545 	struct wm_softc *sc = ifp->if_softc;
10546 	struct mii_data *mii = &sc->sc_mii;
10547 
10548 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
10549 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
10550 	sc->sc_fcrtl &= ~FCRTL_XONE;
10551 
10552 	/*
10553 	 * Get flow control negotiation result.
10554 	 */
10555 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
10556 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
10557 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
10558 		mii->mii_media_active &= ~IFM_ETH_FMASK;
10559 	}
10560 
10561 	if (sc->sc_flowflags & IFM_FLOW) {
10562 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
10563 			sc->sc_ctrl |= CTRL_TFCE;
10564 			sc->sc_fcrtl |= FCRTL_XONE;
10565 		}
10566 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
10567 			sc->sc_ctrl |= CTRL_RFCE;
10568 	}
10569 
10570 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
10571 		DPRINTF(WM_DEBUG_LINK,
10572 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
10573 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
10574 	} else {
10575 		DPRINTF(WM_DEBUG_LINK,
10576 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
10577 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
10578 	}
10579 
10580 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10581 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
10582 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
10583 						 : WMREG_FCRTL, sc->sc_fcrtl);
10584 	if (sc->sc_type == WM_T_80003) {
10585 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
10586 		case IFM_1000_T:
10587 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
10588 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
10589 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
10590 			break;
10591 		default:
10592 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
10593 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
10594 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
10595 			break;
10596 		}
10597 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
10598 	}
10599 }
10600 
10601 /* kumeran related (80003, ICH* and PCH*) */
10602 
10603 /*
10604  * wm_kmrn_readreg:
10605  *
10606  *	Read a kumeran register
10607  */
10608 static int
10609 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
10610 {
10611 	int rv;
10612 
10613 	if (sc->sc_type == WM_T_80003)
10614 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
10615 	else
10616 		rv = sc->phy.acquire(sc);
10617 	if (rv != 0) {
10618 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
10619 		    __func__);
10620 		return rv;
10621 	}
10622 
10623 	rv = wm_kmrn_readreg_locked(sc, reg, val);
10624 
10625 	if (sc->sc_type == WM_T_80003)
10626 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
10627 	else
10628 		sc->phy.release(sc);
10629 
10630 	return rv;
10631 }
10632 
10633 static int
10634 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
10635 {
10636 
10637 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
10638 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
10639 	    KUMCTRLSTA_REN);
10640 	CSR_WRITE_FLUSH(sc);
10641 	delay(2);
10642 
10643 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
10644 
10645 	return 0;
10646 }
10647 
10648 /*
10649  * wm_kmrn_writereg:
10650  *
10651  *	Write a kumeran register
10652  */
10653 static int
10654 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
10655 {
10656 	int rv;
10657 
10658 	if (sc->sc_type == WM_T_80003)
10659 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
10660 	else
10661 		rv = sc->phy.acquire(sc);
10662 	if (rv != 0) {
10663 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
10664 		    __func__);
10665 		return rv;
10666 	}
10667 
10668 	rv = wm_kmrn_writereg_locked(sc, reg, val);
10669 
10670 	if (sc->sc_type == WM_T_80003)
10671 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
10672 	else
10673 		sc->phy.release(sc);
10674 
10675 	return rv;
10676 }
10677 
10678 static int
10679 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
10680 {
10681 
10682 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
10683 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
10684 
10685 	return 0;
10686 }
10687 
10688 /* SGMII related */
10689 
10690 /*
10691  * wm_sgmii_uses_mdio
10692  *
10693  * Check whether the transaction is to the internal PHY or the external
10694  * MDIO interface. Return true if it's MDIO.
10695  */
10696 static bool
10697 wm_sgmii_uses_mdio(struct wm_softc *sc)
10698 {
10699 	uint32_t reg;
10700 	bool ismdio = false;
10701 
10702 	switch (sc->sc_type) {
10703 	case WM_T_82575:
10704 	case WM_T_82576:
10705 		reg = CSR_READ(sc, WMREG_MDIC);
10706 		ismdio = ((reg & MDIC_DEST) != 0);
10707 		break;
10708 	case WM_T_82580:
10709 	case WM_T_I350:
10710 	case WM_T_I354:
10711 	case WM_T_I210:
10712 	case WM_T_I211:
10713 		reg = CSR_READ(sc, WMREG_MDICNFG);
10714 		ismdio = ((reg & MDICNFG_DEST) != 0);
10715 		break;
10716 	default:
10717 		break;
10718 	}
10719 
10720 	return ismdio;
10721 }
10722 
10723 /*
10724  * wm_sgmii_readreg:	[mii interface function]
10725  *
10726  *	Read a PHY register on the SGMII
10727  * This could be handled by the PHY layer if we didn't have to lock the
10728  * ressource ...
10729  */
10730 static int
10731 wm_sgmii_readreg(device_t dev, int phy, int reg)
10732 {
10733 	struct wm_softc *sc = device_private(dev);
10734 	uint32_t i2ccmd;
10735 	int i, rv;
10736 
10737 	if (sc->phy.acquire(sc)) {
10738 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
10739 		return 0;
10740 	}
10741 
10742 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
10743 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
10744 	    | I2CCMD_OPCODE_READ;
10745 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
10746 
10747 	/* Poll the ready bit */
10748 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
10749 		delay(50);
10750 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
10751 		if (i2ccmd & I2CCMD_READY)
10752 			break;
10753 	}
10754 	if ((i2ccmd & I2CCMD_READY) == 0)
10755 		device_printf(dev, "I2CCMD Read did not complete\n");
10756 	if ((i2ccmd & I2CCMD_ERROR) != 0)
10757 		device_printf(dev, "I2CCMD Error bit set\n");
10758 
10759 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
10760 
10761 	sc->phy.release(sc);
10762 	return rv;
10763 }
10764 
10765 /*
10766  * wm_sgmii_writereg:	[mii interface function]
10767  *
10768  *	Write a PHY register on the SGMII.
10769  * This could be handled by the PHY layer if we didn't have to lock the
10770  * ressource ...
10771  */
10772 static void
10773 wm_sgmii_writereg(device_t dev, int phy, int reg, int val)
10774 {
10775 	struct wm_softc *sc = device_private(dev);
10776 	uint32_t i2ccmd;
10777 	int i;
10778 	int val_swapped;
10779 
10780 	if (sc->phy.acquire(sc) != 0) {
10781 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
10782 		return;
10783 	}
10784 	/* Swap the data bytes for the I2C interface */
10785 	val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
10786 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
10787 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
10788 	    | I2CCMD_OPCODE_WRITE | val_swapped;
10789 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
10790 
10791 	/* Poll the ready bit */
10792 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
10793 		delay(50);
10794 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
10795 		if (i2ccmd & I2CCMD_READY)
10796 			break;
10797 	}
10798 	if ((i2ccmd & I2CCMD_READY) == 0)
10799 		device_printf(dev, "I2CCMD Write did not complete\n");
10800 	if ((i2ccmd & I2CCMD_ERROR) != 0)
10801 		device_printf(dev, "I2CCMD Error bit set\n");
10802 
10803 	sc->phy.release(sc);
10804 }
10805 
10806 /* TBI related */
10807 
10808 /*
10809  * wm_tbi_mediainit:
10810  *
10811  *	Initialize media for use on 1000BASE-X devices.
10812  */
10813 static void
10814 wm_tbi_mediainit(struct wm_softc *sc)
10815 {
10816 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
10817 	const char *sep = "";
10818 
10819 	if (sc->sc_type < WM_T_82543)
10820 		sc->sc_tipg = TIPG_WM_DFLT;
10821 	else
10822 		sc->sc_tipg = TIPG_LG_DFLT;
10823 
10824 	sc->sc_tbi_serdes_anegticks = 5;
10825 
10826 	/* Initialize our media structures */
10827 	sc->sc_mii.mii_ifp = ifp;
10828 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
10829 
10830 	if ((sc->sc_type >= WM_T_82575)
10831 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
10832 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
10833 		    wm_serdes_mediachange, wm_serdes_mediastatus);
10834 	else
10835 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
10836 		    wm_tbi_mediachange, wm_tbi_mediastatus);
10837 
10838 	/*
10839 	 * SWD Pins:
10840 	 *
10841 	 *	0 = Link LED (output)
10842 	 *	1 = Loss Of Signal (input)
10843 	 */
10844 	sc->sc_ctrl |= CTRL_SWDPIO(0);
10845 
10846 	/* XXX Perhaps this is only for TBI */
10847 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
10848 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
10849 
10850 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
10851 		sc->sc_ctrl &= ~CTRL_LRST;
10852 
10853 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10854 
10855 #define	ADD(ss, mm, dd)							\
10856 do {									\
10857 	aprint_normal("%s%s", sep, ss);					\
10858 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
10859 	sep = ", ";							\
10860 } while (/*CONSTCOND*/0)
10861 
10862 	aprint_normal_dev(sc->sc_dev, "");
10863 
10864 	if (sc->sc_type == WM_T_I354) {
10865 		uint32_t status;
10866 
10867 		status = CSR_READ(sc, WMREG_STATUS);
10868 		if (((status & STATUS_2P5_SKU) != 0)
10869 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
10870 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
10871 		} else
10872 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
10873 	} else if (sc->sc_type == WM_T_82545) {
10874 		/* Only 82545 is LX (XXX except SFP) */
10875 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
10876 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
10877 	} else {
10878 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
10879 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
10880 	}
10881 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
10882 	aprint_normal("\n");
10883 
10884 #undef ADD
10885 
10886 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
10887 }
10888 
10889 /*
10890  * wm_tbi_mediachange:	[ifmedia interface function]
10891  *
10892  *	Set hardware to newly-selected media on a 1000BASE-X device.
10893  */
10894 static int
10895 wm_tbi_mediachange(struct ifnet *ifp)
10896 {
10897 	struct wm_softc *sc = ifp->if_softc;
10898 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
10899 	uint32_t status;
10900 	int i;
10901 
10902 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
10903 		/* XXX need some work for >= 82571 and < 82575 */
10904 		if (sc->sc_type < WM_T_82575)
10905 			return 0;
10906 	}
10907 
10908 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
10909 	    || (sc->sc_type >= WM_T_82575))
10910 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
10911 
10912 	sc->sc_ctrl &= ~CTRL_LRST;
10913 	sc->sc_txcw = TXCW_ANE;
10914 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
10915 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
10916 	else if (ife->ifm_media & IFM_FDX)
10917 		sc->sc_txcw |= TXCW_FD;
10918 	else
10919 		sc->sc_txcw |= TXCW_HD;
10920 
10921 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
10922 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
10923 
10924 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
10925 		    device_xname(sc->sc_dev), sc->sc_txcw));
10926 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
10927 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10928 	CSR_WRITE_FLUSH(sc);
10929 	delay(1000);
10930 
10931 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
10932 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
10933 
10934 	/*
10935 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
10936 	 * optics detect a signal, 0 if they don't.
10937 	 */
10938 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
10939 		/* Have signal; wait for the link to come up. */
10940 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
10941 			delay(10000);
10942 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
10943 				break;
10944 		}
10945 
10946 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
10947 			    device_xname(sc->sc_dev),i));
10948 
10949 		status = CSR_READ(sc, WMREG_STATUS);
10950 		DPRINTF(WM_DEBUG_LINK,
10951 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
10952 			device_xname(sc->sc_dev),status, STATUS_LU));
10953 		if (status & STATUS_LU) {
10954 			/* Link is up. */
10955 			DPRINTF(WM_DEBUG_LINK,
10956 			    ("%s: LINK: set media -> link up %s\n",
10957 			    device_xname(sc->sc_dev),
10958 			    (status & STATUS_FD) ? "FDX" : "HDX"));
10959 
10960 			/*
10961 			 * NOTE: CTRL will update TFCE and RFCE automatically,
10962 			 * so we should update sc->sc_ctrl
10963 			 */
10964 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
10965 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
10966 			sc->sc_fcrtl &= ~FCRTL_XONE;
10967 			if (status & STATUS_FD)
10968 				sc->sc_tctl |=
10969 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
10970 			else
10971 				sc->sc_tctl |=
10972 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
10973 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
10974 				sc->sc_fcrtl |= FCRTL_XONE;
10975 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
10976 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
10977 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
10978 				      sc->sc_fcrtl);
10979 			sc->sc_tbi_linkup = 1;
10980 		} else {
10981 			if (i == WM_LINKUP_TIMEOUT)
10982 				wm_check_for_link(sc);
10983 			/* Link is down. */
10984 			DPRINTF(WM_DEBUG_LINK,
10985 			    ("%s: LINK: set media -> link down\n",
10986 			    device_xname(sc->sc_dev)));
10987 			sc->sc_tbi_linkup = 0;
10988 		}
10989 	} else {
10990 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
10991 		    device_xname(sc->sc_dev)));
10992 		sc->sc_tbi_linkup = 0;
10993 	}
10994 
10995 	wm_tbi_serdes_set_linkled(sc);
10996 
10997 	return 0;
10998 }
10999 
11000 /*
11001  * wm_tbi_mediastatus:	[ifmedia interface function]
11002  *
11003  *	Get the current interface media status on a 1000BASE-X device.
11004  */
11005 static void
11006 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
11007 {
11008 	struct wm_softc *sc = ifp->if_softc;
11009 	uint32_t ctrl, status;
11010 
11011 	ifmr->ifm_status = IFM_AVALID;
11012 	ifmr->ifm_active = IFM_ETHER;
11013 
11014 	status = CSR_READ(sc, WMREG_STATUS);
11015 	if ((status & STATUS_LU) == 0) {
11016 		ifmr->ifm_active |= IFM_NONE;
11017 		return;
11018 	}
11019 
11020 	ifmr->ifm_status |= IFM_ACTIVE;
11021 	/* Only 82545 is LX */
11022 	if (sc->sc_type == WM_T_82545)
11023 		ifmr->ifm_active |= IFM_1000_LX;
11024 	else
11025 		ifmr->ifm_active |= IFM_1000_SX;
11026 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
11027 		ifmr->ifm_active |= IFM_FDX;
11028 	else
11029 		ifmr->ifm_active |= IFM_HDX;
11030 	ctrl = CSR_READ(sc, WMREG_CTRL);
11031 	if (ctrl & CTRL_RFCE)
11032 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
11033 	if (ctrl & CTRL_TFCE)
11034 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
11035 }
11036 
11037 /* XXX TBI only */
11038 static int
11039 wm_check_for_link(struct wm_softc *sc)
11040 {
11041 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
11042 	uint32_t rxcw;
11043 	uint32_t ctrl;
11044 	uint32_t status;
11045 	uint32_t sig;
11046 
11047 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
11048 		/* XXX need some work for >= 82571 */
11049 		if (sc->sc_type >= WM_T_82571) {
11050 			sc->sc_tbi_linkup = 1;
11051 			return 0;
11052 		}
11053 	}
11054 
11055 	rxcw = CSR_READ(sc, WMREG_RXCW);
11056 	ctrl = CSR_READ(sc, WMREG_CTRL);
11057 	status = CSR_READ(sc, WMREG_STATUS);
11058 
11059 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
11060 
11061 	DPRINTF(WM_DEBUG_LINK,
11062 	    ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
11063 		device_xname(sc->sc_dev), __func__,
11064 		((ctrl & CTRL_SWDPIN(1)) == sig),
11065 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
11066 
11067 	/*
11068 	 * SWDPIN   LU RXCW
11069 	 *      0    0    0
11070 	 *      0    0    1	(should not happen)
11071 	 *      0    1    0	(should not happen)
11072 	 *      0    1    1	(should not happen)
11073 	 *      1    0    0	Disable autonego and force linkup
11074 	 *      1    0    1	got /C/ but not linkup yet
11075 	 *      1    1    0	(linkup)
11076 	 *      1    1    1	If IFM_AUTO, back to autonego
11077 	 *
11078 	 */
11079 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
11080 	    && ((status & STATUS_LU) == 0)
11081 	    && ((rxcw & RXCW_C) == 0)) {
11082 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
11083 			__func__));
11084 		sc->sc_tbi_linkup = 0;
11085 		/* Disable auto-negotiation in the TXCW register */
11086 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
11087 
11088 		/*
11089 		 * Force link-up and also force full-duplex.
11090 		 *
11091 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
11092 		 * so we should update sc->sc_ctrl
11093 		 */
11094 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
11095 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11096 	} else if (((status & STATUS_LU) != 0)
11097 	    && ((rxcw & RXCW_C) != 0)
11098 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
11099 		sc->sc_tbi_linkup = 1;
11100 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
11101 			__func__));
11102 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
11103 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
11104 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
11105 	    && ((rxcw & RXCW_C) != 0)) {
11106 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
11107 	} else {
11108 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
11109 			status));
11110 	}
11111 
11112 	return 0;
11113 }
11114 
11115 /*
11116  * wm_tbi_tick:
11117  *
11118  *	Check the link on TBI devices.
11119  *	This function acts as mii_tick().
11120  */
11121 static void
11122 wm_tbi_tick(struct wm_softc *sc)
11123 {
11124 	struct mii_data *mii = &sc->sc_mii;
11125 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
11126 	uint32_t status;
11127 
11128 	KASSERT(WM_CORE_LOCKED(sc));
11129 
11130 	status = CSR_READ(sc, WMREG_STATUS);
11131 
11132 	/* XXX is this needed? */
11133 	(void)CSR_READ(sc, WMREG_RXCW);
11134 	(void)CSR_READ(sc, WMREG_CTRL);
11135 
11136 	/* set link status */
11137 	if ((status & STATUS_LU) == 0) {
11138 		DPRINTF(WM_DEBUG_LINK,
11139 		    ("%s: LINK: checklink -> down\n",
11140 			device_xname(sc->sc_dev)));
11141 		sc->sc_tbi_linkup = 0;
11142 	} else if (sc->sc_tbi_linkup == 0) {
11143 		DPRINTF(WM_DEBUG_LINK,
11144 		    ("%s: LINK: checklink -> up %s\n",
11145 			device_xname(sc->sc_dev),
11146 			(status & STATUS_FD) ? "FDX" : "HDX"));
11147 		sc->sc_tbi_linkup = 1;
11148 		sc->sc_tbi_serdes_ticks = 0;
11149 	}
11150 
11151 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
11152 		goto setled;
11153 
11154 	if ((status & STATUS_LU) == 0) {
11155 		sc->sc_tbi_linkup = 0;
11156 		/* If the timer expired, retry autonegotiation */
11157 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
11158 		    && (++sc->sc_tbi_serdes_ticks
11159 			>= sc->sc_tbi_serdes_anegticks)) {
11160 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
11161 			sc->sc_tbi_serdes_ticks = 0;
11162 			/*
11163 			 * Reset the link, and let autonegotiation do
11164 			 * its thing
11165 			 */
11166 			sc->sc_ctrl |= CTRL_LRST;
11167 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11168 			CSR_WRITE_FLUSH(sc);
11169 			delay(1000);
11170 			sc->sc_ctrl &= ~CTRL_LRST;
11171 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11172 			CSR_WRITE_FLUSH(sc);
11173 			delay(1000);
11174 			CSR_WRITE(sc, WMREG_TXCW,
11175 			    sc->sc_txcw & ~TXCW_ANE);
11176 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
11177 		}
11178 	}
11179 
11180 setled:
11181 	wm_tbi_serdes_set_linkled(sc);
11182 }
11183 
11184 /* SERDES related */
11185 static void
11186 wm_serdes_power_up_link_82575(struct wm_softc *sc)
11187 {
11188 	uint32_t reg;
11189 
11190 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
11191 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
11192 		return;
11193 
11194 	reg = CSR_READ(sc, WMREG_PCS_CFG);
11195 	reg |= PCS_CFG_PCS_EN;
11196 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
11197 
11198 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
11199 	reg &= ~CTRL_EXT_SWDPIN(3);
11200 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
11201 	CSR_WRITE_FLUSH(sc);
11202 }
11203 
11204 static int
11205 wm_serdes_mediachange(struct ifnet *ifp)
11206 {
11207 	struct wm_softc *sc = ifp->if_softc;
11208 	bool pcs_autoneg = true; /* XXX */
11209 	uint32_t ctrl_ext, pcs_lctl, reg;
11210 
11211 	/* XXX Currently, this function is not called on 8257[12] */
11212 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
11213 	    || (sc->sc_type >= WM_T_82575))
11214 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
11215 
11216 	wm_serdes_power_up_link_82575(sc);
11217 
11218 	sc->sc_ctrl |= CTRL_SLU;
11219 
11220 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
11221 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
11222 
11223 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
11224 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
11225 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
11226 	case CTRL_EXT_LINK_MODE_SGMII:
11227 		pcs_autoneg = true;
11228 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
11229 		break;
11230 	case CTRL_EXT_LINK_MODE_1000KX:
11231 		pcs_autoneg = false;
11232 		/* FALLTHROUGH */
11233 	default:
11234 		if ((sc->sc_type == WM_T_82575)
11235 		    || (sc->sc_type == WM_T_82576)) {
11236 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
11237 				pcs_autoneg = false;
11238 		}
11239 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
11240 		    | CTRL_FRCFDX;
11241 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
11242 	}
11243 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11244 
11245 	if (pcs_autoneg) {
11246 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
11247 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
11248 
11249 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
11250 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
11251 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
11252 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
11253 	} else
11254 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
11255 
11256 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
11257 
11258 
11259 	return 0;
11260 }
11261 
11262 static void
11263 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
11264 {
11265 	struct wm_softc *sc = ifp->if_softc;
11266 	struct mii_data *mii = &sc->sc_mii;
11267 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
11268 	uint32_t pcs_adv, pcs_lpab, reg;
11269 
11270 	ifmr->ifm_status = IFM_AVALID;
11271 	ifmr->ifm_active = IFM_ETHER;
11272 
11273 	/* Check PCS */
11274 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
11275 	if ((reg & PCS_LSTS_LINKOK) == 0) {
11276 		ifmr->ifm_active |= IFM_NONE;
11277 		sc->sc_tbi_linkup = 0;
11278 		goto setled;
11279 	}
11280 
11281 	sc->sc_tbi_linkup = 1;
11282 	ifmr->ifm_status |= IFM_ACTIVE;
11283 	if (sc->sc_type == WM_T_I354) {
11284 		uint32_t status;
11285 
11286 		status = CSR_READ(sc, WMREG_STATUS);
11287 		if (((status & STATUS_2P5_SKU) != 0)
11288 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
11289 			ifmr->ifm_active |= IFM_2500_SX; /* XXX KX */
11290 		} else
11291 			ifmr->ifm_active |= IFM_1000_SX; /* XXX KX */
11292 	} else {
11293 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
11294 		case PCS_LSTS_SPEED_10:
11295 			ifmr->ifm_active |= IFM_10_T; /* XXX */
11296 			break;
11297 		case PCS_LSTS_SPEED_100:
11298 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
11299 			break;
11300 		case PCS_LSTS_SPEED_1000:
11301 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
11302 			break;
11303 		default:
11304 			device_printf(sc->sc_dev, "Unknown speed\n");
11305 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
11306 			break;
11307 		}
11308 	}
11309 	if ((reg & PCS_LSTS_FDX) != 0)
11310 		ifmr->ifm_active |= IFM_FDX;
11311 	else
11312 		ifmr->ifm_active |= IFM_HDX;
11313 	mii->mii_media_active &= ~IFM_ETH_FMASK;
11314 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
11315 		/* Check flow */
11316 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
11317 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
11318 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
11319 			goto setled;
11320 		}
11321 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
11322 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
11323 		DPRINTF(WM_DEBUG_LINK,
11324 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
11325 		if ((pcs_adv & TXCW_SYM_PAUSE)
11326 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
11327 			mii->mii_media_active |= IFM_FLOW
11328 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
11329 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
11330 		    && (pcs_adv & TXCW_ASYM_PAUSE)
11331 		    && (pcs_lpab & TXCW_SYM_PAUSE)
11332 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
11333 			mii->mii_media_active |= IFM_FLOW
11334 			    | IFM_ETH_TXPAUSE;
11335 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
11336 		    && (pcs_adv & TXCW_ASYM_PAUSE)
11337 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
11338 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
11339 			mii->mii_media_active |= IFM_FLOW
11340 			    | IFM_ETH_RXPAUSE;
11341 		}
11342 	}
11343 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
11344 	    | (mii->mii_media_active & IFM_ETH_FMASK);
11345 setled:
11346 	wm_tbi_serdes_set_linkled(sc);
11347 }
11348 
11349 /*
11350  * wm_serdes_tick:
11351  *
11352  *	Check the link on serdes devices.
11353  */
11354 static void
11355 wm_serdes_tick(struct wm_softc *sc)
11356 {
11357 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
11358 	struct mii_data *mii = &sc->sc_mii;
11359 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
11360 	uint32_t reg;
11361 
11362 	KASSERT(WM_CORE_LOCKED(sc));
11363 
11364 	mii->mii_media_status = IFM_AVALID;
11365 	mii->mii_media_active = IFM_ETHER;
11366 
11367 	/* Check PCS */
11368 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
11369 	if ((reg & PCS_LSTS_LINKOK) != 0) {
11370 		mii->mii_media_status |= IFM_ACTIVE;
11371 		sc->sc_tbi_linkup = 1;
11372 		sc->sc_tbi_serdes_ticks = 0;
11373 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
11374 		if ((reg & PCS_LSTS_FDX) != 0)
11375 			mii->mii_media_active |= IFM_FDX;
11376 		else
11377 			mii->mii_media_active |= IFM_HDX;
11378 	} else {
11379 		mii->mii_media_status |= IFM_NONE;
11380 		sc->sc_tbi_linkup = 0;
11381 		/* If the timer expired, retry autonegotiation */
11382 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
11383 		    && (++sc->sc_tbi_serdes_ticks
11384 			>= sc->sc_tbi_serdes_anegticks)) {
11385 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
11386 			sc->sc_tbi_serdes_ticks = 0;
11387 			/* XXX */
11388 			wm_serdes_mediachange(ifp);
11389 		}
11390 	}
11391 
11392 	wm_tbi_serdes_set_linkled(sc);
11393 }
11394 
11395 /* SFP related */
11396 
11397 static int
11398 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
11399 {
11400 	uint32_t i2ccmd;
11401 	int i;
11402 
11403 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
11404 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
11405 
11406 	/* Poll the ready bit */
11407 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
11408 		delay(50);
11409 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
11410 		if (i2ccmd & I2CCMD_READY)
11411 			break;
11412 	}
11413 	if ((i2ccmd & I2CCMD_READY) == 0)
11414 		return -1;
11415 	if ((i2ccmd & I2CCMD_ERROR) != 0)
11416 		return -1;
11417 
11418 	*data = i2ccmd & 0x00ff;
11419 
11420 	return 0;
11421 }
11422 
11423 static uint32_t
11424 wm_sfp_get_media_type(struct wm_softc *sc)
11425 {
11426 	uint32_t ctrl_ext;
11427 	uint8_t val = 0;
11428 	int timeout = 3;
11429 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
11430 	int rv = -1;
11431 
11432 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
11433 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
11434 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
11435 	CSR_WRITE_FLUSH(sc);
11436 
11437 	/* Read SFP module data */
11438 	while (timeout) {
11439 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
11440 		if (rv == 0)
11441 			break;
11442 		delay(100*1000); /* XXX too big */
11443 		timeout--;
11444 	}
11445 	if (rv != 0)
11446 		goto out;
11447 	switch (val) {
11448 	case SFF_SFP_ID_SFF:
11449 		aprint_normal_dev(sc->sc_dev,
11450 		    "Module/Connector soldered to board\n");
11451 		break;
11452 	case SFF_SFP_ID_SFP:
11453 		aprint_normal_dev(sc->sc_dev, "SFP\n");
11454 		break;
11455 	case SFF_SFP_ID_UNKNOWN:
11456 		goto out;
11457 	default:
11458 		break;
11459 	}
11460 
11461 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
11462 	if (rv != 0) {
11463 		goto out;
11464 	}
11465 
11466 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
11467 		mediatype = WM_MEDIATYPE_SERDES;
11468 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
11469 		sc->sc_flags |= WM_F_SGMII;
11470 		mediatype = WM_MEDIATYPE_COPPER;
11471 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
11472 		sc->sc_flags |= WM_F_SGMII;
11473 		mediatype = WM_MEDIATYPE_SERDES;
11474 	}
11475 
11476 out:
11477 	/* Restore I2C interface setting */
11478 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
11479 
11480 	return mediatype;
11481 }
11482 
11483 /*
11484  * NVM related.
11485  * Microwire, SPI (w/wo EERD) and Flash.
11486  */
11487 
11488 /* Both spi and uwire */
11489 
11490 /*
11491  * wm_eeprom_sendbits:
11492  *
11493  *	Send a series of bits to the EEPROM.
11494  */
11495 static void
11496 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
11497 {
11498 	uint32_t reg;
11499 	int x;
11500 
11501 	reg = CSR_READ(sc, WMREG_EECD);
11502 
11503 	for (x = nbits; x > 0; x--) {
11504 		if (bits & (1U << (x - 1)))
11505 			reg |= EECD_DI;
11506 		else
11507 			reg &= ~EECD_DI;
11508 		CSR_WRITE(sc, WMREG_EECD, reg);
11509 		CSR_WRITE_FLUSH(sc);
11510 		delay(2);
11511 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
11512 		CSR_WRITE_FLUSH(sc);
11513 		delay(2);
11514 		CSR_WRITE(sc, WMREG_EECD, reg);
11515 		CSR_WRITE_FLUSH(sc);
11516 		delay(2);
11517 	}
11518 }
11519 
11520 /*
11521  * wm_eeprom_recvbits:
11522  *
11523  *	Receive a series of bits from the EEPROM.
11524  */
11525 static void
11526 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
11527 {
11528 	uint32_t reg, val;
11529 	int x;
11530 
11531 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
11532 
11533 	val = 0;
11534 	for (x = nbits; x > 0; x--) {
11535 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
11536 		CSR_WRITE_FLUSH(sc);
11537 		delay(2);
11538 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
11539 			val |= (1U << (x - 1));
11540 		CSR_WRITE(sc, WMREG_EECD, reg);
11541 		CSR_WRITE_FLUSH(sc);
11542 		delay(2);
11543 	}
11544 	*valp = val;
11545 }
11546 
11547 /* Microwire */
11548 
11549 /*
11550  * wm_nvm_read_uwire:
11551  *
11552  *	Read a word from the EEPROM using the MicroWire protocol.
11553  */
11554 static int
11555 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
11556 {
11557 	uint32_t reg, val;
11558 	int i;
11559 
11560 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
11561 		device_xname(sc->sc_dev), __func__));
11562 
11563 	if (sc->nvm.acquire(sc) != 0)
11564 		return -1;
11565 
11566 	for (i = 0; i < wordcnt; i++) {
11567 		/* Clear SK and DI. */
11568 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
11569 		CSR_WRITE(sc, WMREG_EECD, reg);
11570 
11571 		/*
11572 		 * XXX: workaround for a bug in qemu-0.12.x and prior
11573 		 * and Xen.
11574 		 *
11575 		 * We use this workaround only for 82540 because qemu's
11576 		 * e1000 act as 82540.
11577 		 */
11578 		if (sc->sc_type == WM_T_82540) {
11579 			reg |= EECD_SK;
11580 			CSR_WRITE(sc, WMREG_EECD, reg);
11581 			reg &= ~EECD_SK;
11582 			CSR_WRITE(sc, WMREG_EECD, reg);
11583 			CSR_WRITE_FLUSH(sc);
11584 			delay(2);
11585 		}
11586 		/* XXX: end of workaround */
11587 
11588 		/* Set CHIP SELECT. */
11589 		reg |= EECD_CS;
11590 		CSR_WRITE(sc, WMREG_EECD, reg);
11591 		CSR_WRITE_FLUSH(sc);
11592 		delay(2);
11593 
11594 		/* Shift in the READ command. */
11595 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
11596 
11597 		/* Shift in address. */
11598 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
11599 
11600 		/* Shift out the data. */
11601 		wm_eeprom_recvbits(sc, &val, 16);
11602 		data[i] = val & 0xffff;
11603 
11604 		/* Clear CHIP SELECT. */
11605 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
11606 		CSR_WRITE(sc, WMREG_EECD, reg);
11607 		CSR_WRITE_FLUSH(sc);
11608 		delay(2);
11609 	}
11610 
11611 	sc->nvm.release(sc);
11612 	return 0;
11613 }
11614 
11615 /* SPI */
11616 
11617 /*
11618  * Set SPI and FLASH related information from the EECD register.
11619  * For 82541 and 82547, the word size is taken from EEPROM.
11620  */
11621 static int
11622 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
11623 {
11624 	int size;
11625 	uint32_t reg;
11626 	uint16_t data;
11627 
11628 	reg = CSR_READ(sc, WMREG_EECD);
11629 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
11630 
11631 	/* Read the size of NVM from EECD by default */
11632 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
11633 	switch (sc->sc_type) {
11634 	case WM_T_82541:
11635 	case WM_T_82541_2:
11636 	case WM_T_82547:
11637 	case WM_T_82547_2:
11638 		/* Set dummy value to access EEPROM */
11639 		sc->sc_nvm_wordsize = 64;
11640 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
11641 			aprint_error_dev(sc->sc_dev,
11642 			    "%s: failed to read EEPROM size\n", __func__);
11643 		}
11644 		reg = data;
11645 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
11646 		if (size == 0)
11647 			size = 6; /* 64 word size */
11648 		else
11649 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
11650 		break;
11651 	case WM_T_80003:
11652 	case WM_T_82571:
11653 	case WM_T_82572:
11654 	case WM_T_82573: /* SPI case */
11655 	case WM_T_82574: /* SPI case */
11656 	case WM_T_82583: /* SPI case */
11657 		size += NVM_WORD_SIZE_BASE_SHIFT;
11658 		if (size > 14)
11659 			size = 14;
11660 		break;
11661 	case WM_T_82575:
11662 	case WM_T_82576:
11663 	case WM_T_82580:
11664 	case WM_T_I350:
11665 	case WM_T_I354:
11666 	case WM_T_I210:
11667 	case WM_T_I211:
11668 		size += NVM_WORD_SIZE_BASE_SHIFT;
11669 		if (size > 15)
11670 			size = 15;
11671 		break;
11672 	default:
11673 		aprint_error_dev(sc->sc_dev,
11674 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
11675 		return -1;
11676 		break;
11677 	}
11678 
11679 	sc->sc_nvm_wordsize = 1 << size;
11680 
11681 	return 0;
11682 }
11683 
11684 /*
11685  * wm_nvm_ready_spi:
11686  *
11687  *	Wait for a SPI EEPROM to be ready for commands.
11688  */
11689 static int
11690 wm_nvm_ready_spi(struct wm_softc *sc)
11691 {
11692 	uint32_t val;
11693 	int usec;
11694 
11695 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
11696 		device_xname(sc->sc_dev), __func__));
11697 
11698 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
11699 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
11700 		wm_eeprom_recvbits(sc, &val, 8);
11701 		if ((val & SPI_SR_RDY) == 0)
11702 			break;
11703 	}
11704 	if (usec >= SPI_MAX_RETRIES) {
11705 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
11706 		return -1;
11707 	}
11708 	return 0;
11709 }
11710 
11711 /*
11712  * wm_nvm_read_spi:
11713  *
11714  *	Read a work from the EEPROM using the SPI protocol.
11715  */
11716 static int
11717 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
11718 {
11719 	uint32_t reg, val;
11720 	int i;
11721 	uint8_t opc;
11722 	int rv = 0;
11723 
11724 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
11725 		device_xname(sc->sc_dev), __func__));
11726 
11727 	if (sc->nvm.acquire(sc) != 0)
11728 		return -1;
11729 
11730 	/* Clear SK and CS. */
11731 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
11732 	CSR_WRITE(sc, WMREG_EECD, reg);
11733 	CSR_WRITE_FLUSH(sc);
11734 	delay(2);
11735 
11736 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
11737 		goto out;
11738 
11739 	/* Toggle CS to flush commands. */
11740 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
11741 	CSR_WRITE_FLUSH(sc);
11742 	delay(2);
11743 	CSR_WRITE(sc, WMREG_EECD, reg);
11744 	CSR_WRITE_FLUSH(sc);
11745 	delay(2);
11746 
11747 	opc = SPI_OPC_READ;
11748 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
11749 		opc |= SPI_OPC_A8;
11750 
11751 	wm_eeprom_sendbits(sc, opc, 8);
11752 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
11753 
11754 	for (i = 0; i < wordcnt; i++) {
11755 		wm_eeprom_recvbits(sc, &val, 16);
11756 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
11757 	}
11758 
11759 	/* Raise CS and clear SK. */
11760 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
11761 	CSR_WRITE(sc, WMREG_EECD, reg);
11762 	CSR_WRITE_FLUSH(sc);
11763 	delay(2);
11764 
11765 out:
11766 	sc->nvm.release(sc);
11767 	return rv;
11768 }
11769 
11770 /* Using with EERD */
11771 
11772 static int
11773 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
11774 {
11775 	uint32_t attempts = 100000;
11776 	uint32_t i, reg = 0;
11777 	int32_t done = -1;
11778 
11779 	for (i = 0; i < attempts; i++) {
11780 		reg = CSR_READ(sc, rw);
11781 
11782 		if (reg & EERD_DONE) {
11783 			done = 0;
11784 			break;
11785 		}
11786 		delay(5);
11787 	}
11788 
11789 	return done;
11790 }
11791 
11792 static int
11793 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
11794     uint16_t *data)
11795 {
11796 	int i, eerd = 0;
11797 	int rv = 0;
11798 
11799 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
11800 		device_xname(sc->sc_dev), __func__));
11801 
11802 	if (sc->nvm.acquire(sc) != 0)
11803 		return -1;
11804 
11805 	for (i = 0; i < wordcnt; i++) {
11806 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
11807 		CSR_WRITE(sc, WMREG_EERD, eerd);
11808 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
11809 		if (rv != 0) {
11810 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
11811 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
11812 			break;
11813 		}
11814 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
11815 	}
11816 
11817 	sc->nvm.release(sc);
11818 	return rv;
11819 }
11820 
11821 /* Flash */
11822 
11823 static int
11824 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
11825 {
11826 	uint32_t eecd;
11827 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
11828 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
11829 	uint8_t sig_byte = 0;
11830 
11831 	switch (sc->sc_type) {
11832 	case WM_T_PCH_SPT:
11833 		/*
11834 		 * In SPT, read from the CTRL_EXT reg instead of accessing the
11835 		 * sector valid bits from the NVM.
11836 		 */
11837 		*bank = CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_NVMVS;
11838 		if ((*bank == 0) || (*bank == 1)) {
11839 			aprint_error_dev(sc->sc_dev,
11840 			    "%s: no valid NVM bank present (%u)\n", __func__,
11841 				*bank);
11842 			return -1;
11843 		} else {
11844 			*bank = *bank - 2;
11845 			return 0;
11846 		}
11847 	case WM_T_ICH8:
11848 	case WM_T_ICH9:
11849 		eecd = CSR_READ(sc, WMREG_EECD);
11850 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
11851 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
11852 			return 0;
11853 		}
11854 		/* FALLTHROUGH */
11855 	default:
11856 		/* Default to 0 */
11857 		*bank = 0;
11858 
11859 		/* Check bank 0 */
11860 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
11861 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
11862 			*bank = 0;
11863 			return 0;
11864 		}
11865 
11866 		/* Check bank 1 */
11867 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
11868 		    &sig_byte);
11869 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
11870 			*bank = 1;
11871 			return 0;
11872 		}
11873 	}
11874 
11875 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
11876 		device_xname(sc->sc_dev)));
11877 	return -1;
11878 }
11879 
11880 /******************************************************************************
11881  * This function does initial flash setup so that a new read/write/erase cycle
11882  * can be started.
11883  *
11884  * sc - The pointer to the hw structure
11885  ****************************************************************************/
11886 static int32_t
11887 wm_ich8_cycle_init(struct wm_softc *sc)
11888 {
11889 	uint16_t hsfsts;
11890 	int32_t error = 1;
11891 	int32_t i     = 0;
11892 
11893 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
11894 
11895 	/* May be check the Flash Des Valid bit in Hw status */
11896 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
11897 		return error;
11898 	}
11899 
11900 	/* Clear FCERR in Hw status by writing 1 */
11901 	/* Clear DAEL in Hw status by writing a 1 */
11902 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
11903 
11904 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
11905 
11906 	/*
11907 	 * Either we should have a hardware SPI cycle in progress bit to check
11908 	 * against, in order to start a new cycle or FDONE bit should be
11909 	 * changed in the hardware so that it is 1 after harware reset, which
11910 	 * can then be used as an indication whether a cycle is in progress or
11911 	 * has been completed .. we should also have some software semaphore
11912 	 * mechanism to guard FDONE or the cycle in progress bit so that two
11913 	 * threads access to those bits can be sequentiallized or a way so that
11914 	 * 2 threads dont start the cycle at the same time
11915 	 */
11916 
11917 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
11918 		/*
11919 		 * There is no cycle running at present, so we can start a
11920 		 * cycle
11921 		 */
11922 
11923 		/* Begin by setting Flash Cycle Done. */
11924 		hsfsts |= HSFSTS_DONE;
11925 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
11926 		error = 0;
11927 	} else {
11928 		/*
11929 		 * otherwise poll for sometime so the current cycle has a
11930 		 * chance to end before giving up.
11931 		 */
11932 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
11933 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
11934 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
11935 				error = 0;
11936 				break;
11937 			}
11938 			delay(1);
11939 		}
11940 		if (error == 0) {
11941 			/*
11942 			 * Successful in waiting for previous cycle to timeout,
11943 			 * now set the Flash Cycle Done.
11944 			 */
11945 			hsfsts |= HSFSTS_DONE;
11946 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
11947 		}
11948 	}
11949 	return error;
11950 }
11951 
11952 /******************************************************************************
11953  * This function starts a flash cycle and waits for its completion
11954  *
11955  * sc - The pointer to the hw structure
11956  ****************************************************************************/
11957 static int32_t
11958 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
11959 {
11960 	uint16_t hsflctl;
11961 	uint16_t hsfsts;
11962 	int32_t error = 1;
11963 	uint32_t i = 0;
11964 
11965 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
11966 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
11967 	hsflctl |= HSFCTL_GO;
11968 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
11969 
11970 	/* Wait till FDONE bit is set to 1 */
11971 	do {
11972 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
11973 		if (hsfsts & HSFSTS_DONE)
11974 			break;
11975 		delay(1);
11976 		i++;
11977 	} while (i < timeout);
11978 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
11979 		error = 0;
11980 
11981 	return error;
11982 }
11983 
11984 /******************************************************************************
11985  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
11986  *
11987  * sc - The pointer to the hw structure
11988  * index - The index of the byte or word to read.
11989  * size - Size of data to read, 1=byte 2=word, 4=dword
11990  * data - Pointer to the word to store the value read.
11991  *****************************************************************************/
11992 static int32_t
11993 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
11994     uint32_t size, uint32_t *data)
11995 {
11996 	uint16_t hsfsts;
11997 	uint16_t hsflctl;
11998 	uint32_t flash_linear_address;
11999 	uint32_t flash_data = 0;
12000 	int32_t error = 1;
12001 	int32_t count = 0;
12002 
12003 	if (size < 1  || size > 4 || data == 0x0 ||
12004 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
12005 		return error;
12006 
12007 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
12008 	    sc->sc_ich8_flash_base;
12009 
12010 	do {
12011 		delay(1);
12012 		/* Steps */
12013 		error = wm_ich8_cycle_init(sc);
12014 		if (error)
12015 			break;
12016 
12017 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
12018 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
12019 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
12020 		    & HSFCTL_BCOUNT_MASK;
12021 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
12022 		if (sc->sc_type == WM_T_PCH_SPT) {
12023 			/*
12024 			 * In SPT, This register is in Lan memory space, not
12025 			 * flash. Therefore, only 32 bit access is supported.
12026 			 */
12027 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFCTL,
12028 			    (uint32_t)hsflctl);
12029 		} else
12030 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
12031 
12032 		/*
12033 		 * Write the last 24 bits of index into Flash Linear address
12034 		 * field in Flash Address
12035 		 */
12036 		/* TODO: TBD maybe check the index against the size of flash */
12037 
12038 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
12039 
12040 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
12041 
12042 		/*
12043 		 * Check if FCERR is set to 1, if set to 1, clear it and try
12044 		 * the whole sequence a few more times, else read in (shift in)
12045 		 * the Flash Data0, the order is least significant byte first
12046 		 * msb to lsb
12047 		 */
12048 		if (error == 0) {
12049 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
12050 			if (size == 1)
12051 				*data = (uint8_t)(flash_data & 0x000000FF);
12052 			else if (size == 2)
12053 				*data = (uint16_t)(flash_data & 0x0000FFFF);
12054 			else if (size == 4)
12055 				*data = (uint32_t)flash_data;
12056 			break;
12057 		} else {
12058 			/*
12059 			 * If we've gotten here, then things are probably
12060 			 * completely hosed, but if the error condition is
12061 			 * detected, it won't hurt to give it another try...
12062 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
12063 			 */
12064 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
12065 			if (hsfsts & HSFSTS_ERR) {
12066 				/* Repeat for some time before giving up. */
12067 				continue;
12068 			} else if ((hsfsts & HSFSTS_DONE) == 0)
12069 				break;
12070 		}
12071 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
12072 
12073 	return error;
12074 }
12075 
12076 /******************************************************************************
12077  * Reads a single byte from the NVM using the ICH8 flash access registers.
12078  *
12079  * sc - pointer to wm_hw structure
12080  * index - The index of the byte to read.
12081  * data - Pointer to a byte to store the value read.
12082  *****************************************************************************/
12083 static int32_t
12084 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
12085 {
12086 	int32_t status;
12087 	uint32_t word = 0;
12088 
12089 	status = wm_read_ich8_data(sc, index, 1, &word);
12090 	if (status == 0)
12091 		*data = (uint8_t)word;
12092 	else
12093 		*data = 0;
12094 
12095 	return status;
12096 }
12097 
12098 /******************************************************************************
12099  * Reads a word from the NVM using the ICH8 flash access registers.
12100  *
12101  * sc - pointer to wm_hw structure
12102  * index - The starting byte index of the word to read.
12103  * data - Pointer to a word to store the value read.
12104  *****************************************************************************/
12105 static int32_t
12106 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
12107 {
12108 	int32_t status;
12109 	uint32_t word = 0;
12110 
12111 	status = wm_read_ich8_data(sc, index, 2, &word);
12112 	if (status == 0)
12113 		*data = (uint16_t)word;
12114 	else
12115 		*data = 0;
12116 
12117 	return status;
12118 }
12119 
12120 /******************************************************************************
12121  * Reads a dword from the NVM using the ICH8 flash access registers.
12122  *
12123  * sc - pointer to wm_hw structure
12124  * index - The starting byte index of the word to read.
12125  * data - Pointer to a word to store the value read.
12126  *****************************************************************************/
12127 static int32_t
12128 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
12129 {
12130 	int32_t status;
12131 
12132 	status = wm_read_ich8_data(sc, index, 4, data);
12133 	return status;
12134 }
12135 
12136 /******************************************************************************
12137  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
12138  * register.
12139  *
12140  * sc - Struct containing variables accessed by shared code
12141  * offset - offset of word in the EEPROM to read
12142  * data - word read from the EEPROM
12143  * words - number of words to read
12144  *****************************************************************************/
12145 static int
12146 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
12147 {
12148 	int32_t  rv = 0;
12149 	uint32_t flash_bank = 0;
12150 	uint32_t act_offset = 0;
12151 	uint32_t bank_offset = 0;
12152 	uint16_t word = 0;
12153 	uint16_t i = 0;
12154 
12155 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
12156 		device_xname(sc->sc_dev), __func__));
12157 
12158 	if (sc->nvm.acquire(sc) != 0)
12159 		return -1;
12160 
12161 	/*
12162 	 * We need to know which is the valid flash bank.  In the event
12163 	 * that we didn't allocate eeprom_shadow_ram, we may not be
12164 	 * managing flash_bank.  So it cannot be trusted and needs
12165 	 * to be updated with each read.
12166 	 */
12167 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
12168 	if (rv) {
12169 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
12170 			device_xname(sc->sc_dev)));
12171 		flash_bank = 0;
12172 	}
12173 
12174 	/*
12175 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
12176 	 * size
12177 	 */
12178 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
12179 
12180 	for (i = 0; i < words; i++) {
12181 		/* The NVM part needs a byte offset, hence * 2 */
12182 		act_offset = bank_offset + ((offset + i) * 2);
12183 		rv = wm_read_ich8_word(sc, act_offset, &word);
12184 		if (rv) {
12185 			aprint_error_dev(sc->sc_dev,
12186 			    "%s: failed to read NVM\n", __func__);
12187 			break;
12188 		}
12189 		data[i] = word;
12190 	}
12191 
12192 	sc->nvm.release(sc);
12193 	return rv;
12194 }
12195 
12196 /******************************************************************************
12197  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
12198  * register.
12199  *
12200  * sc - Struct containing variables accessed by shared code
12201  * offset - offset of word in the EEPROM to read
12202  * data - word read from the EEPROM
12203  * words - number of words to read
12204  *****************************************************************************/
12205 static int
12206 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
12207 {
12208 	int32_t  rv = 0;
12209 	uint32_t flash_bank = 0;
12210 	uint32_t act_offset = 0;
12211 	uint32_t bank_offset = 0;
12212 	uint32_t dword = 0;
12213 	uint16_t i = 0;
12214 
12215 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
12216 		device_xname(sc->sc_dev), __func__));
12217 
12218 	if (sc->nvm.acquire(sc) != 0)
12219 		return -1;
12220 
12221 	/*
12222 	 * We need to know which is the valid flash bank.  In the event
12223 	 * that we didn't allocate eeprom_shadow_ram, we may not be
12224 	 * managing flash_bank.  So it cannot be trusted and needs
12225 	 * to be updated with each read.
12226 	 */
12227 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
12228 	if (rv) {
12229 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
12230 			device_xname(sc->sc_dev)));
12231 		flash_bank = 0;
12232 	}
12233 
12234 	/*
12235 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
12236 	 * size
12237 	 */
12238 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
12239 
12240 	for (i = 0; i < words; i++) {
12241 		/* The NVM part needs a byte offset, hence * 2 */
12242 		act_offset = bank_offset + ((offset + i) * 2);
12243 		/* but we must read dword aligned, so mask ... */
12244 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
12245 		if (rv) {
12246 			aprint_error_dev(sc->sc_dev,
12247 			    "%s: failed to read NVM\n", __func__);
12248 			break;
12249 		}
12250 		/* ... and pick out low or high word */
12251 		if ((act_offset & 0x2) == 0)
12252 			data[i] = (uint16_t)(dword & 0xFFFF);
12253 		else
12254 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
12255 	}
12256 
12257 	sc->nvm.release(sc);
12258 	return rv;
12259 }
12260 
12261 /* iNVM */
12262 
12263 static int
12264 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
12265 {
12266 	int32_t  rv = 0;
12267 	uint32_t invm_dword;
12268 	uint16_t i;
12269 	uint8_t record_type, word_address;
12270 
12271 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
12272 		device_xname(sc->sc_dev), __func__));
12273 
12274 	for (i = 0; i < INVM_SIZE; i++) {
12275 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
12276 		/* Get record type */
12277 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
12278 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
12279 			break;
12280 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
12281 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
12282 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
12283 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
12284 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
12285 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
12286 			if (word_address == address) {
12287 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
12288 				rv = 0;
12289 				break;
12290 			}
12291 		}
12292 	}
12293 
12294 	return rv;
12295 }
12296 
12297 static int
12298 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
12299 {
12300 	int rv = 0;
12301 	int i;
12302 
12303 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
12304 		device_xname(sc->sc_dev), __func__));
12305 
12306 	if (sc->nvm.acquire(sc) != 0)
12307 		return -1;
12308 
12309 	for (i = 0; i < words; i++) {
12310 		switch (offset + i) {
12311 		case NVM_OFF_MACADDR:
12312 		case NVM_OFF_MACADDR1:
12313 		case NVM_OFF_MACADDR2:
12314 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
12315 			if (rv != 0) {
12316 				data[i] = 0xffff;
12317 				rv = -1;
12318 			}
12319 			break;
12320 		case NVM_OFF_CFG2:
12321 			rv = wm_nvm_read_word_invm(sc, offset, data);
12322 			if (rv != 0) {
12323 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
12324 				rv = 0;
12325 			}
12326 			break;
12327 		case NVM_OFF_CFG4:
12328 			rv = wm_nvm_read_word_invm(sc, offset, data);
12329 			if (rv != 0) {
12330 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
12331 				rv = 0;
12332 			}
12333 			break;
12334 		case NVM_OFF_LED_1_CFG:
12335 			rv = wm_nvm_read_word_invm(sc, offset, data);
12336 			if (rv != 0) {
12337 				*data = NVM_LED_1_CFG_DEFAULT_I211;
12338 				rv = 0;
12339 			}
12340 			break;
12341 		case NVM_OFF_LED_0_2_CFG:
12342 			rv = wm_nvm_read_word_invm(sc, offset, data);
12343 			if (rv != 0) {
12344 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
12345 				rv = 0;
12346 			}
12347 			break;
12348 		case NVM_OFF_ID_LED_SETTINGS:
12349 			rv = wm_nvm_read_word_invm(sc, offset, data);
12350 			if (rv != 0) {
12351 				*data = ID_LED_RESERVED_FFFF;
12352 				rv = 0;
12353 			}
12354 			break;
12355 		default:
12356 			DPRINTF(WM_DEBUG_NVM,
12357 			    ("NVM word 0x%02x is not mapped.\n", offset));
12358 			*data = NVM_RESERVED_WORD;
12359 			break;
12360 		}
12361 	}
12362 
12363 	sc->nvm.release(sc);
12364 	return rv;
12365 }
12366 
12367 /* Lock, detecting NVM type, validate checksum, version and read */
12368 
12369 static int
12370 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
12371 {
12372 	uint32_t eecd = 0;
12373 
12374 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
12375 	    || sc->sc_type == WM_T_82583) {
12376 		eecd = CSR_READ(sc, WMREG_EECD);
12377 
12378 		/* Isolate bits 15 & 16 */
12379 		eecd = ((eecd >> 15) & 0x03);
12380 
12381 		/* If both bits are set, device is Flash type */
12382 		if (eecd == 0x03)
12383 			return 0;
12384 	}
12385 	return 1;
12386 }
12387 
12388 static int
12389 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
12390 {
12391 	uint32_t eec;
12392 
12393 	eec = CSR_READ(sc, WMREG_EEC);
12394 	if ((eec & EEC_FLASH_DETECTED) != 0)
12395 		return 1;
12396 
12397 	return 0;
12398 }
12399 
12400 /*
12401  * wm_nvm_validate_checksum
12402  *
12403  * The checksum is defined as the sum of the first 64 (16 bit) words.
12404  */
12405 static int
12406 wm_nvm_validate_checksum(struct wm_softc *sc)
12407 {
12408 	uint16_t checksum;
12409 	uint16_t eeprom_data;
12410 #ifdef WM_DEBUG
12411 	uint16_t csum_wordaddr, valid_checksum;
12412 #endif
12413 	int i;
12414 
12415 	checksum = 0;
12416 
12417 	/* Don't check for I211 */
12418 	if (sc->sc_type == WM_T_I211)
12419 		return 0;
12420 
12421 #ifdef WM_DEBUG
12422 	if (sc->sc_type == WM_T_PCH_LPT) {
12423 		csum_wordaddr = NVM_OFF_COMPAT;
12424 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
12425 	} else {
12426 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
12427 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
12428 	}
12429 
12430 	/* Dump EEPROM image for debug */
12431 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
12432 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
12433 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
12434 		/* XXX PCH_SPT? */
12435 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
12436 		if ((eeprom_data & valid_checksum) == 0) {
12437 			DPRINTF(WM_DEBUG_NVM,
12438 			    ("%s: NVM need to be updated (%04x != %04x)\n",
12439 				device_xname(sc->sc_dev), eeprom_data,
12440 				    valid_checksum));
12441 		}
12442 	}
12443 
12444 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
12445 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
12446 		for (i = 0; i < NVM_SIZE; i++) {
12447 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
12448 				printf("XXXX ");
12449 			else
12450 				printf("%04hx ", eeprom_data);
12451 			if (i % 8 == 7)
12452 				printf("\n");
12453 		}
12454 	}
12455 
12456 #endif /* WM_DEBUG */
12457 
12458 	for (i = 0; i < NVM_SIZE; i++) {
12459 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
12460 			return 1;
12461 		checksum += eeprom_data;
12462 	}
12463 
12464 	if (checksum != (uint16_t) NVM_CHECKSUM) {
12465 #ifdef WM_DEBUG
12466 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
12467 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
12468 #endif
12469 	}
12470 
12471 	return 0;
12472 }
12473 
12474 static void
12475 wm_nvm_version_invm(struct wm_softc *sc)
12476 {
12477 	uint32_t dword;
12478 
12479 	/*
12480 	 * Linux's code to decode version is very strange, so we don't
12481 	 * obey that algorithm and just use word 61 as the document.
12482 	 * Perhaps it's not perfect though...
12483 	 *
12484 	 * Example:
12485 	 *
12486 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
12487 	 */
12488 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
12489 	dword = __SHIFTOUT(dword, INVM_VER_1);
12490 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
12491 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
12492 }
12493 
12494 static void
12495 wm_nvm_version(struct wm_softc *sc)
12496 {
12497 	uint16_t major, minor, build, patch;
12498 	uint16_t uid0, uid1;
12499 	uint16_t nvm_data;
12500 	uint16_t off;
12501 	bool check_version = false;
12502 	bool check_optionrom = false;
12503 	bool have_build = false;
12504 	bool have_uid = true;
12505 
12506 	/*
12507 	 * Version format:
12508 	 *
12509 	 * XYYZ
12510 	 * X0YZ
12511 	 * X0YY
12512 	 *
12513 	 * Example:
12514 	 *
12515 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
12516 	 *	82571	0x50a6	5.10.6?
12517 	 *	82572	0x506a	5.6.10?
12518 	 *	82572EI	0x5069	5.6.9?
12519 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
12520 	 *		0x2013	2.1.3?
12521 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
12522 	 */
12523 
12524 	/*
12525 	 * XXX
12526 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
12527 	 * I've never seen on real 82574 hardware with such small SPI ROM.
12528 	 */
12529 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
12530 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
12531 		have_uid = false;
12532 
12533 	switch (sc->sc_type) {
12534 	case WM_T_82571:
12535 	case WM_T_82572:
12536 	case WM_T_82574:
12537 	case WM_T_82583:
12538 		check_version = true;
12539 		check_optionrom = true;
12540 		have_build = true;
12541 		break;
12542 	case WM_T_82575:
12543 	case WM_T_82576:
12544 	case WM_T_82580:
12545 		if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
12546 			check_version = true;
12547 		break;
12548 	case WM_T_I211:
12549 		wm_nvm_version_invm(sc);
12550 		have_uid = false;
12551 		goto printver;
12552 	case WM_T_I210:
12553 		if (!wm_nvm_get_flash_presence_i210(sc)) {
12554 			wm_nvm_version_invm(sc);
12555 			have_uid = false;
12556 			goto printver;
12557 		}
12558 		/* FALLTHROUGH */
12559 	case WM_T_I350:
12560 	case WM_T_I354:
12561 		check_version = true;
12562 		check_optionrom = true;
12563 		break;
12564 	default:
12565 		return;
12566 	}
12567 	if (check_version
12568 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
12569 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
12570 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
12571 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
12572 			build = nvm_data & NVM_BUILD_MASK;
12573 			have_build = true;
12574 		} else
12575 			minor = nvm_data & 0x00ff;
12576 
12577 		/* Decimal */
12578 		minor = (minor / 16) * 10 + (minor % 16);
12579 		sc->sc_nvm_ver_major = major;
12580 		sc->sc_nvm_ver_minor = minor;
12581 
12582 printver:
12583 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
12584 		    sc->sc_nvm_ver_minor);
12585 		if (have_build) {
12586 			sc->sc_nvm_ver_build = build;
12587 			aprint_verbose(".%d", build);
12588 		}
12589 	}
12590 
12591 	/* Assume the Option ROM area is at avove NVM_SIZE */
12592 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
12593 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
12594 		/* Option ROM Version */
12595 		if ((off != 0x0000) && (off != 0xffff)) {
12596 			int rv;
12597 
12598 			off += NVM_COMBO_VER_OFF;
12599 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
12600 			rv |= wm_nvm_read(sc, off, 1, &uid0);
12601 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
12602 			    && (uid1 != 0) && (uid1 != 0xffff)) {
12603 				/* 16bits */
12604 				major = uid0 >> 8;
12605 				build = (uid0 << 8) | (uid1 >> 8);
12606 				patch = uid1 & 0x00ff;
12607 				aprint_verbose(", option ROM Version %d.%d.%d",
12608 				    major, build, patch);
12609 			}
12610 		}
12611 	}
12612 
12613 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
12614 		aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
12615 }
12616 
12617 /*
12618  * wm_nvm_read:
12619  *
12620  *	Read data from the serial EEPROM.
12621  */
12622 static int
12623 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
12624 {
12625 	int rv;
12626 
12627 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
12628 		device_xname(sc->sc_dev), __func__));
12629 
12630 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
12631 		return -1;
12632 
12633 	rv = sc->nvm.read(sc, word, wordcnt, data);
12634 
12635 	return rv;
12636 }
12637 
12638 /*
12639  * Hardware semaphores.
12640  * Very complexed...
12641  */
12642 
12643 static int
12644 wm_get_null(struct wm_softc *sc)
12645 {
12646 
12647 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
12648 		device_xname(sc->sc_dev), __func__));
12649 	return 0;
12650 }
12651 
12652 static void
12653 wm_put_null(struct wm_softc *sc)
12654 {
12655 
12656 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
12657 		device_xname(sc->sc_dev), __func__));
12658 	return;
12659 }
12660 
12661 static int
12662 wm_get_eecd(struct wm_softc *sc)
12663 {
12664 	uint32_t reg;
12665 	int x;
12666 
12667 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
12668 		device_xname(sc->sc_dev), __func__));
12669 
12670 	reg = CSR_READ(sc, WMREG_EECD);
12671 
12672 	/* Request EEPROM access. */
12673 	reg |= EECD_EE_REQ;
12674 	CSR_WRITE(sc, WMREG_EECD, reg);
12675 
12676 	/* ..and wait for it to be granted. */
12677 	for (x = 0; x < 1000; x++) {
12678 		reg = CSR_READ(sc, WMREG_EECD);
12679 		if (reg & EECD_EE_GNT)
12680 			break;
12681 		delay(5);
12682 	}
12683 	if ((reg & EECD_EE_GNT) == 0) {
12684 		aprint_error_dev(sc->sc_dev,
12685 		    "could not acquire EEPROM GNT\n");
12686 		reg &= ~EECD_EE_REQ;
12687 		CSR_WRITE(sc, WMREG_EECD, reg);
12688 		return -1;
12689 	}
12690 
12691 	return 0;
12692 }
12693 
12694 static void
12695 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
12696 {
12697 
12698 	*eecd |= EECD_SK;
12699 	CSR_WRITE(sc, WMREG_EECD, *eecd);
12700 	CSR_WRITE_FLUSH(sc);
12701 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
12702 		delay(1);
12703 	else
12704 		delay(50);
12705 }
12706 
12707 static void
12708 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
12709 {
12710 
12711 	*eecd &= ~EECD_SK;
12712 	CSR_WRITE(sc, WMREG_EECD, *eecd);
12713 	CSR_WRITE_FLUSH(sc);
12714 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
12715 		delay(1);
12716 	else
12717 		delay(50);
12718 }
12719 
12720 static void
12721 wm_put_eecd(struct wm_softc *sc)
12722 {
12723 	uint32_t reg;
12724 
12725 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
12726 		device_xname(sc->sc_dev), __func__));
12727 
12728 	/* Stop nvm */
12729 	reg = CSR_READ(sc, WMREG_EECD);
12730 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
12731 		/* Pull CS high */
12732 		reg |= EECD_CS;
12733 		wm_nvm_eec_clock_lower(sc, &reg);
12734 	} else {
12735 		/* CS on Microwire is active-high */
12736 		reg &= ~(EECD_CS | EECD_DI);
12737 		CSR_WRITE(sc, WMREG_EECD, reg);
12738 		wm_nvm_eec_clock_raise(sc, &reg);
12739 		wm_nvm_eec_clock_lower(sc, &reg);
12740 	}
12741 
12742 	reg = CSR_READ(sc, WMREG_EECD);
12743 	reg &= ~EECD_EE_REQ;
12744 	CSR_WRITE(sc, WMREG_EECD, reg);
12745 
12746 	return;
12747 }
12748 
12749 /*
12750  * Get hardware semaphore.
12751  * Same as e1000_get_hw_semaphore_generic()
12752  */
12753 static int
12754 wm_get_swsm_semaphore(struct wm_softc *sc)
12755 {
12756 	int32_t timeout;
12757 	uint32_t swsm;
12758 
12759 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
12760 		device_xname(sc->sc_dev), __func__));
12761 	KASSERT(sc->sc_nvm_wordsize > 0);
12762 
12763 retry:
12764 	/* Get the SW semaphore. */
12765 	timeout = sc->sc_nvm_wordsize + 1;
12766 	while (timeout) {
12767 		swsm = CSR_READ(sc, WMREG_SWSM);
12768 
12769 		if ((swsm & SWSM_SMBI) == 0)
12770 			break;
12771 
12772 		delay(50);
12773 		timeout--;
12774 	}
12775 
12776 	if (timeout == 0) {
12777 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
12778 			/*
12779 			 * In rare circumstances, the SW semaphore may already
12780 			 * be held unintentionally. Clear the semaphore once
12781 			 * before giving up.
12782 			 */
12783 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
12784 			wm_put_swsm_semaphore(sc);
12785 			goto retry;
12786 		}
12787 		aprint_error_dev(sc->sc_dev,
12788 		    "could not acquire SWSM SMBI\n");
12789 		return 1;
12790 	}
12791 
12792 	/* Get the FW semaphore. */
12793 	timeout = sc->sc_nvm_wordsize + 1;
12794 	while (timeout) {
12795 		swsm = CSR_READ(sc, WMREG_SWSM);
12796 		swsm |= SWSM_SWESMBI;
12797 		CSR_WRITE(sc, WMREG_SWSM, swsm);
12798 		/* If we managed to set the bit we got the semaphore. */
12799 		swsm = CSR_READ(sc, WMREG_SWSM);
12800 		if (swsm & SWSM_SWESMBI)
12801 			break;
12802 
12803 		delay(50);
12804 		timeout--;
12805 	}
12806 
12807 	if (timeout == 0) {
12808 		aprint_error_dev(sc->sc_dev,
12809 		    "could not acquire SWSM SWESMBI\n");
12810 		/* Release semaphores */
12811 		wm_put_swsm_semaphore(sc);
12812 		return 1;
12813 	}
12814 	return 0;
12815 }
12816 
12817 /*
12818  * Put hardware semaphore.
12819  * Same as e1000_put_hw_semaphore_generic()
12820  */
12821 static void
12822 wm_put_swsm_semaphore(struct wm_softc *sc)
12823 {
12824 	uint32_t swsm;
12825 
12826 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
12827 		device_xname(sc->sc_dev), __func__));
12828 
12829 	swsm = CSR_READ(sc, WMREG_SWSM);
12830 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
12831 	CSR_WRITE(sc, WMREG_SWSM, swsm);
12832 }
12833 
12834 /*
12835  * Get SW/FW semaphore.
12836  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
12837  */
12838 static int
12839 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
12840 {
12841 	uint32_t swfw_sync;
12842 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
12843 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
12844 	int timeout;
12845 
12846 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
12847 		device_xname(sc->sc_dev), __func__));
12848 
12849 	if (sc->sc_type == WM_T_80003)
12850 		timeout = 50;
12851 	else
12852 		timeout = 200;
12853 
12854 	for (timeout = 0; timeout < 200; timeout++) {
12855 		if (wm_get_swsm_semaphore(sc)) {
12856 			aprint_error_dev(sc->sc_dev,
12857 			    "%s: failed to get semaphore\n",
12858 			    __func__);
12859 			return 1;
12860 		}
12861 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
12862 		if ((swfw_sync & (swmask | fwmask)) == 0) {
12863 			swfw_sync |= swmask;
12864 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
12865 			wm_put_swsm_semaphore(sc);
12866 			return 0;
12867 		}
12868 		wm_put_swsm_semaphore(sc);
12869 		delay(5000);
12870 	}
12871 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
12872 	    device_xname(sc->sc_dev), mask, swfw_sync);
12873 	return 1;
12874 }
12875 
12876 static void
12877 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
12878 {
12879 	uint32_t swfw_sync;
12880 
12881 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
12882 		device_xname(sc->sc_dev), __func__));
12883 
12884 	while (wm_get_swsm_semaphore(sc) != 0)
12885 		continue;
12886 
12887 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
12888 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
12889 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
12890 
12891 	wm_put_swsm_semaphore(sc);
12892 }
12893 
12894 static int
12895 wm_get_nvm_80003(struct wm_softc *sc)
12896 {
12897 	int rv;
12898 
12899 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
12900 		device_xname(sc->sc_dev), __func__));
12901 
12902 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
12903 		aprint_error_dev(sc->sc_dev,
12904 		    "%s: failed to get semaphore(SWFW)\n",
12905 		    __func__);
12906 		return rv;
12907 	}
12908 
12909 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
12910 	    && (rv = wm_get_eecd(sc)) != 0) {
12911 		aprint_error_dev(sc->sc_dev,
12912 		    "%s: failed to get semaphore(EECD)\n",
12913 		    __func__);
12914 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
12915 		return rv;
12916 	}
12917 
12918 	return 0;
12919 }
12920 
12921 static void
12922 wm_put_nvm_80003(struct wm_softc *sc)
12923 {
12924 
12925 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
12926 		device_xname(sc->sc_dev), __func__));
12927 
12928 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
12929 		wm_put_eecd(sc);
12930 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
12931 }
12932 
12933 static int
12934 wm_get_nvm_82571(struct wm_softc *sc)
12935 {
12936 	int rv;
12937 
12938 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
12939 		device_xname(sc->sc_dev), __func__));
12940 
12941 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
12942 		return rv;
12943 
12944 	switch (sc->sc_type) {
12945 	case WM_T_82573:
12946 		break;
12947 	default:
12948 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
12949 			rv = wm_get_eecd(sc);
12950 		break;
12951 	}
12952 
12953 	if (rv != 0) {
12954 		aprint_error_dev(sc->sc_dev,
12955 		    "%s: failed to get semaphore\n",
12956 		    __func__);
12957 		wm_put_swsm_semaphore(sc);
12958 	}
12959 
12960 	return rv;
12961 }
12962 
12963 static void
12964 wm_put_nvm_82571(struct wm_softc *sc)
12965 {
12966 
12967 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
12968 		device_xname(sc->sc_dev), __func__));
12969 
12970 	switch (sc->sc_type) {
12971 	case WM_T_82573:
12972 		break;
12973 	default:
12974 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
12975 			wm_put_eecd(sc);
12976 		break;
12977 	}
12978 
12979 	wm_put_swsm_semaphore(sc);
12980 }
12981 
12982 static int
12983 wm_get_phy_82575(struct wm_softc *sc)
12984 {
12985 
12986 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
12987 		device_xname(sc->sc_dev), __func__));
12988 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
12989 }
12990 
12991 static void
12992 wm_put_phy_82575(struct wm_softc *sc)
12993 {
12994 
12995 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
12996 		device_xname(sc->sc_dev), __func__));
12997 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
12998 }
12999 
13000 static int
13001 wm_get_swfwhw_semaphore(struct wm_softc *sc)
13002 {
13003 	uint32_t ext_ctrl;
13004 	int timeout = 200;
13005 
13006 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
13007 		device_xname(sc->sc_dev), __func__));
13008 
13009 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
13010 	for (timeout = 0; timeout < 200; timeout++) {
13011 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
13012 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
13013 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
13014 
13015 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
13016 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
13017 			return 0;
13018 		delay(5000);
13019 	}
13020 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
13021 	    device_xname(sc->sc_dev), ext_ctrl);
13022 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
13023 	return 1;
13024 }
13025 
13026 static void
13027 wm_put_swfwhw_semaphore(struct wm_softc *sc)
13028 {
13029 	uint32_t ext_ctrl;
13030 
13031 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
13032 		device_xname(sc->sc_dev), __func__));
13033 
13034 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
13035 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
13036 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
13037 
13038 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
13039 }
13040 
13041 static int
13042 wm_get_swflag_ich8lan(struct wm_softc *sc)
13043 {
13044 	uint32_t ext_ctrl;
13045 	int timeout;
13046 
13047 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
13048 		device_xname(sc->sc_dev), __func__));
13049 	mutex_enter(sc->sc_ich_phymtx);
13050 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
13051 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
13052 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
13053 			break;
13054 		delay(1000);
13055 	}
13056 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
13057 		printf("%s: SW has already locked the resource\n",
13058 		    device_xname(sc->sc_dev));
13059 		goto out;
13060 	}
13061 
13062 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
13063 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
13064 	for (timeout = 0; timeout < 1000; timeout++) {
13065 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
13066 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
13067 			break;
13068 		delay(1000);
13069 	}
13070 	if (timeout >= 1000) {
13071 		printf("%s: failed to acquire semaphore\n",
13072 		    device_xname(sc->sc_dev));
13073 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
13074 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
13075 		goto out;
13076 	}
13077 	return 0;
13078 
13079 out:
13080 	mutex_exit(sc->sc_ich_phymtx);
13081 	return 1;
13082 }
13083 
13084 static void
13085 wm_put_swflag_ich8lan(struct wm_softc *sc)
13086 {
13087 	uint32_t ext_ctrl;
13088 
13089 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
13090 		device_xname(sc->sc_dev), __func__));
13091 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
13092 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
13093 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
13094 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
13095 	} else {
13096 		printf("%s: Semaphore unexpectedly released\n",
13097 		    device_xname(sc->sc_dev));
13098 	}
13099 
13100 	mutex_exit(sc->sc_ich_phymtx);
13101 }
13102 
13103 static int
13104 wm_get_nvm_ich8lan(struct wm_softc *sc)
13105 {
13106 
13107 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
13108 		device_xname(sc->sc_dev), __func__));
13109 	mutex_enter(sc->sc_ich_nvmmtx);
13110 
13111 	return 0;
13112 }
13113 
13114 static void
13115 wm_put_nvm_ich8lan(struct wm_softc *sc)
13116 {
13117 
13118 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
13119 		device_xname(sc->sc_dev), __func__));
13120 	mutex_exit(sc->sc_ich_nvmmtx);
13121 }
13122 
13123 static int
13124 wm_get_hw_semaphore_82573(struct wm_softc *sc)
13125 {
13126 	int i = 0;
13127 	uint32_t reg;
13128 
13129 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
13130 		device_xname(sc->sc_dev), __func__));
13131 
13132 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
13133 	do {
13134 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
13135 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
13136 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
13137 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
13138 			break;
13139 		delay(2*1000);
13140 		i++;
13141 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
13142 
13143 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
13144 		wm_put_hw_semaphore_82573(sc);
13145 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
13146 		    device_xname(sc->sc_dev));
13147 		return -1;
13148 	}
13149 
13150 	return 0;
13151 }
13152 
13153 static void
13154 wm_put_hw_semaphore_82573(struct wm_softc *sc)
13155 {
13156 	uint32_t reg;
13157 
13158 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
13159 		device_xname(sc->sc_dev), __func__));
13160 
13161 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
13162 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
13163 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
13164 }
13165 
13166 /*
13167  * Management mode and power management related subroutines.
13168  * BMC, AMT, suspend/resume and EEE.
13169  */
13170 
13171 #ifdef WM_WOL
13172 static int
13173 wm_check_mng_mode(struct wm_softc *sc)
13174 {
13175 	int rv;
13176 
13177 	switch (sc->sc_type) {
13178 	case WM_T_ICH8:
13179 	case WM_T_ICH9:
13180 	case WM_T_ICH10:
13181 	case WM_T_PCH:
13182 	case WM_T_PCH2:
13183 	case WM_T_PCH_LPT:
13184 	case WM_T_PCH_SPT:
13185 		rv = wm_check_mng_mode_ich8lan(sc);
13186 		break;
13187 	case WM_T_82574:
13188 	case WM_T_82583:
13189 		rv = wm_check_mng_mode_82574(sc);
13190 		break;
13191 	case WM_T_82571:
13192 	case WM_T_82572:
13193 	case WM_T_82573:
13194 	case WM_T_80003:
13195 		rv = wm_check_mng_mode_generic(sc);
13196 		break;
13197 	default:
13198 		/* noting to do */
13199 		rv = 0;
13200 		break;
13201 	}
13202 
13203 	return rv;
13204 }
13205 
13206 static int
13207 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
13208 {
13209 	uint32_t fwsm;
13210 
13211 	fwsm = CSR_READ(sc, WMREG_FWSM);
13212 
13213 	if (((fwsm & FWSM_FW_VALID) != 0)
13214 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
13215 		return 1;
13216 
13217 	return 0;
13218 }
13219 
13220 static int
13221 wm_check_mng_mode_82574(struct wm_softc *sc)
13222 {
13223 	uint16_t data;
13224 
13225 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
13226 
13227 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
13228 		return 1;
13229 
13230 	return 0;
13231 }
13232 
13233 static int
13234 wm_check_mng_mode_generic(struct wm_softc *sc)
13235 {
13236 	uint32_t fwsm;
13237 
13238 	fwsm = CSR_READ(sc, WMREG_FWSM);
13239 
13240 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
13241 		return 1;
13242 
13243 	return 0;
13244 }
13245 #endif /* WM_WOL */
13246 
13247 static int
13248 wm_enable_mng_pass_thru(struct wm_softc *sc)
13249 {
13250 	uint32_t manc, fwsm, factps;
13251 
13252 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
13253 		return 0;
13254 
13255 	manc = CSR_READ(sc, WMREG_MANC);
13256 
13257 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
13258 		device_xname(sc->sc_dev), manc));
13259 	if ((manc & MANC_RECV_TCO_EN) == 0)
13260 		return 0;
13261 
13262 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
13263 		fwsm = CSR_READ(sc, WMREG_FWSM);
13264 		factps = CSR_READ(sc, WMREG_FACTPS);
13265 		if (((factps & FACTPS_MNGCG) == 0)
13266 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
13267 			return 1;
13268 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
13269 		uint16_t data;
13270 
13271 		factps = CSR_READ(sc, WMREG_FACTPS);
13272 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
13273 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
13274 			device_xname(sc->sc_dev), factps, data));
13275 		if (((factps & FACTPS_MNGCG) == 0)
13276 		    && ((data & NVM_CFG2_MNGM_MASK)
13277 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
13278 			return 1;
13279 	} else if (((manc & MANC_SMBUS_EN) != 0)
13280 	    && ((manc & MANC_ASF_EN) == 0))
13281 		return 1;
13282 
13283 	return 0;
13284 }
13285 
13286 static bool
13287 wm_phy_resetisblocked(struct wm_softc *sc)
13288 {
13289 	bool blocked = false;
13290 	uint32_t reg;
13291 	int i = 0;
13292 
13293 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
13294 		device_xname(sc->sc_dev), __func__));
13295 
13296 	switch (sc->sc_type) {
13297 	case WM_T_ICH8:
13298 	case WM_T_ICH9:
13299 	case WM_T_ICH10:
13300 	case WM_T_PCH:
13301 	case WM_T_PCH2:
13302 	case WM_T_PCH_LPT:
13303 	case WM_T_PCH_SPT:
13304 		do {
13305 			reg = CSR_READ(sc, WMREG_FWSM);
13306 			if ((reg & FWSM_RSPCIPHY) == 0) {
13307 				blocked = true;
13308 				delay(10*1000);
13309 				continue;
13310 			}
13311 			blocked = false;
13312 		} while (blocked && (i++ < 30));
13313 		return blocked;
13314 		break;
13315 	case WM_T_82571:
13316 	case WM_T_82572:
13317 	case WM_T_82573:
13318 	case WM_T_82574:
13319 	case WM_T_82583:
13320 	case WM_T_80003:
13321 		reg = CSR_READ(sc, WMREG_MANC);
13322 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
13323 			return true;
13324 		else
13325 			return false;
13326 		break;
13327 	default:
13328 		/* no problem */
13329 		break;
13330 	}
13331 
13332 	return false;
13333 }
13334 
13335 static void
13336 wm_get_hw_control(struct wm_softc *sc)
13337 {
13338 	uint32_t reg;
13339 
13340 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
13341 		device_xname(sc->sc_dev), __func__));
13342 
13343 	if (sc->sc_type == WM_T_82573) {
13344 		reg = CSR_READ(sc, WMREG_SWSM);
13345 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
13346 	} else if (sc->sc_type >= WM_T_82571) {
13347 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
13348 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
13349 	}
13350 }
13351 
13352 static void
13353 wm_release_hw_control(struct wm_softc *sc)
13354 {
13355 	uint32_t reg;
13356 
13357 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
13358 		device_xname(sc->sc_dev), __func__));
13359 
13360 	if (sc->sc_type == WM_T_82573) {
13361 		reg = CSR_READ(sc, WMREG_SWSM);
13362 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
13363 	} else if (sc->sc_type >= WM_T_82571) {
13364 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
13365 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
13366 	}
13367 }
13368 
13369 static void
13370 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
13371 {
13372 	uint32_t reg;
13373 
13374 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
13375 		device_xname(sc->sc_dev), __func__));
13376 
13377 	if (sc->sc_type < WM_T_PCH2)
13378 		return;
13379 
13380 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
13381 
13382 	if (gate)
13383 		reg |= EXTCNFCTR_GATE_PHY_CFG;
13384 	else
13385 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
13386 
13387 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
13388 }
13389 
13390 static void
13391 wm_smbustopci(struct wm_softc *sc)
13392 {
13393 	uint32_t fwsm, reg;
13394 	int rv = 0;
13395 
13396 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
13397 		device_xname(sc->sc_dev), __func__));
13398 
13399 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
13400 	wm_gate_hw_phy_config_ich8lan(sc, true);
13401 
13402 	/* Disable ULP */
13403 	wm_ulp_disable(sc);
13404 
13405 	/* Acquire PHY semaphore */
13406 	sc->phy.acquire(sc);
13407 
13408 	fwsm = CSR_READ(sc, WMREG_FWSM);
13409 	switch (sc->sc_type) {
13410 	case WM_T_PCH_LPT:
13411 	case WM_T_PCH_SPT:
13412 		if (wm_phy_is_accessible_pchlan(sc))
13413 			break;
13414 
13415 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
13416 		reg |= CTRL_EXT_FORCE_SMBUS;
13417 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
13418 #if 0
13419 		/* XXX Isn't this required??? */
13420 		CSR_WRITE_FLUSH(sc);
13421 #endif
13422 		delay(50 * 1000);
13423 		/* FALLTHROUGH */
13424 	case WM_T_PCH2:
13425 		if (wm_phy_is_accessible_pchlan(sc) == true)
13426 			break;
13427 		/* FALLTHROUGH */
13428 	case WM_T_PCH:
13429 		if (sc->sc_type == WM_T_PCH)
13430 			if ((fwsm & FWSM_FW_VALID) != 0)
13431 				break;
13432 
13433 		if (wm_phy_resetisblocked(sc) == true) {
13434 			printf("XXX reset is blocked(3)\n");
13435 			break;
13436 		}
13437 
13438 		wm_toggle_lanphypc_pch_lpt(sc);
13439 
13440 		if (sc->sc_type >= WM_T_PCH_LPT) {
13441 			if (wm_phy_is_accessible_pchlan(sc) == true)
13442 				break;
13443 
13444 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
13445 			reg &= ~CTRL_EXT_FORCE_SMBUS;
13446 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
13447 
13448 			if (wm_phy_is_accessible_pchlan(sc) == true)
13449 				break;
13450 			rv = -1;
13451 		}
13452 		break;
13453 	default:
13454 		break;
13455 	}
13456 
13457 	/* Release semaphore */
13458 	sc->phy.release(sc);
13459 
13460 	if (rv == 0) {
13461 		if (wm_phy_resetisblocked(sc)) {
13462 			printf("XXX reset is blocked(4)\n");
13463 			goto out;
13464 		}
13465 		wm_reset_phy(sc);
13466 		if (wm_phy_resetisblocked(sc))
13467 			printf("XXX reset is blocked(4)\n");
13468 	}
13469 
13470 out:
13471 	/*
13472 	 * Ungate automatic PHY configuration by hardware on non-managed 82579
13473 	 */
13474 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
13475 		delay(10*1000);
13476 		wm_gate_hw_phy_config_ich8lan(sc, false);
13477 	}
13478 }
13479 
13480 static void
13481 wm_init_manageability(struct wm_softc *sc)
13482 {
13483 
13484 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
13485 		device_xname(sc->sc_dev), __func__));
13486 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
13487 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
13488 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
13489 
13490 		/* Disable hardware interception of ARP */
13491 		manc &= ~MANC_ARP_EN;
13492 
13493 		/* Enable receiving management packets to the host */
13494 		if (sc->sc_type >= WM_T_82571) {
13495 			manc |= MANC_EN_MNG2HOST;
13496 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
13497 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
13498 		}
13499 
13500 		CSR_WRITE(sc, WMREG_MANC, manc);
13501 	}
13502 }
13503 
13504 static void
13505 wm_release_manageability(struct wm_softc *sc)
13506 {
13507 
13508 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
13509 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
13510 
13511 		manc |= MANC_ARP_EN;
13512 		if (sc->sc_type >= WM_T_82571)
13513 			manc &= ~MANC_EN_MNG2HOST;
13514 
13515 		CSR_WRITE(sc, WMREG_MANC, manc);
13516 	}
13517 }
13518 
13519 static void
13520 wm_get_wakeup(struct wm_softc *sc)
13521 {
13522 
13523 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
13524 	switch (sc->sc_type) {
13525 	case WM_T_82573:
13526 	case WM_T_82583:
13527 		sc->sc_flags |= WM_F_HAS_AMT;
13528 		/* FALLTHROUGH */
13529 	case WM_T_80003:
13530 	case WM_T_82575:
13531 	case WM_T_82576:
13532 	case WM_T_82580:
13533 	case WM_T_I350:
13534 	case WM_T_I354:
13535 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
13536 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
13537 		/* FALLTHROUGH */
13538 	case WM_T_82541:
13539 	case WM_T_82541_2:
13540 	case WM_T_82547:
13541 	case WM_T_82547_2:
13542 	case WM_T_82571:
13543 	case WM_T_82572:
13544 	case WM_T_82574:
13545 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
13546 		break;
13547 	case WM_T_ICH8:
13548 	case WM_T_ICH9:
13549 	case WM_T_ICH10:
13550 	case WM_T_PCH:
13551 	case WM_T_PCH2:
13552 	case WM_T_PCH_LPT:
13553 	case WM_T_PCH_SPT:
13554 		sc->sc_flags |= WM_F_HAS_AMT;
13555 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
13556 		break;
13557 	default:
13558 		break;
13559 	}
13560 
13561 	/* 1: HAS_MANAGE */
13562 	if (wm_enable_mng_pass_thru(sc) != 0)
13563 		sc->sc_flags |= WM_F_HAS_MANAGE;
13564 
13565 	/*
13566 	 * Note that the WOL flags is set after the resetting of the eeprom
13567 	 * stuff
13568 	 */
13569 }
13570 
13571 /*
13572  * Unconfigure Ultra Low Power mode.
13573  * Only for I217 and newer (see below).
13574  */
13575 static void
13576 wm_ulp_disable(struct wm_softc *sc)
13577 {
13578 	uint32_t reg;
13579 	int i = 0;
13580 
13581 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
13582 		device_xname(sc->sc_dev), __func__));
13583 	/* Exclude old devices */
13584 	if ((sc->sc_type < WM_T_PCH_LPT)
13585 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
13586 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
13587 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
13588 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
13589 		return;
13590 
13591 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
13592 		/* Request ME un-configure ULP mode in the PHY */
13593 		reg = CSR_READ(sc, WMREG_H2ME);
13594 		reg &= ~H2ME_ULP;
13595 		reg |= H2ME_ENFORCE_SETTINGS;
13596 		CSR_WRITE(sc, WMREG_H2ME, reg);
13597 
13598 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
13599 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
13600 			if (i++ == 30) {
13601 				printf("%s timed out\n", __func__);
13602 				return;
13603 			}
13604 			delay(10 * 1000);
13605 		}
13606 		reg = CSR_READ(sc, WMREG_H2ME);
13607 		reg &= ~H2ME_ENFORCE_SETTINGS;
13608 		CSR_WRITE(sc, WMREG_H2ME, reg);
13609 
13610 		return;
13611 	}
13612 
13613 	/* Acquire semaphore */
13614 	sc->phy.acquire(sc);
13615 
13616 	/* Toggle LANPHYPC */
13617 	wm_toggle_lanphypc_pch_lpt(sc);
13618 
13619 	/* Unforce SMBus mode in PHY */
13620 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
13621 	if (reg == 0x0000 || reg == 0xffff) {
13622 		uint32_t reg2;
13623 
13624 		printf("%s: Force SMBus first.\n", __func__);
13625 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
13626 		reg2 |= CTRL_EXT_FORCE_SMBUS;
13627 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
13628 		delay(50 * 1000);
13629 
13630 		reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
13631 	}
13632 	reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
13633 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, reg);
13634 
13635 	/* Unforce SMBus mode in MAC */
13636 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
13637 	reg &= ~CTRL_EXT_FORCE_SMBUS;
13638 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
13639 
13640 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL);
13641 	reg |= HV_PM_CTRL_K1_ENA;
13642 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, reg);
13643 
13644 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1);
13645 	reg &= ~(I218_ULP_CONFIG1_IND
13646 	    | I218_ULP_CONFIG1_STICKY_ULP
13647 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
13648 	    | I218_ULP_CONFIG1_WOL_HOST
13649 	    | I218_ULP_CONFIG1_INBAND_EXIT
13650 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
13651 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
13652 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
13653 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
13654 	reg |= I218_ULP_CONFIG1_START;
13655 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
13656 
13657 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
13658 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
13659 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
13660 
13661 	/* Release semaphore */
13662 	sc->phy.release(sc);
13663 	wm_gmii_reset(sc);
13664 	delay(50 * 1000);
13665 }
13666 
13667 /* WOL in the newer chipset interfaces (pchlan) */
13668 static void
13669 wm_enable_phy_wakeup(struct wm_softc *sc)
13670 {
13671 #if 0
13672 	uint16_t preg;
13673 
13674 	/* Copy MAC RARs to PHY RARs */
13675 
13676 	/* Copy MAC MTA to PHY MTA */
13677 
13678 	/* Configure PHY Rx Control register */
13679 
13680 	/* Enable PHY wakeup in MAC register */
13681 
13682 	/* Configure and enable PHY wakeup in PHY registers */
13683 
13684 	/* Activate PHY wakeup */
13685 
13686 	/* XXX */
13687 #endif
13688 }
13689 
13690 /* Power down workaround on D3 */
13691 static void
13692 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
13693 {
13694 	uint32_t reg;
13695 	int i;
13696 
13697 	for (i = 0; i < 2; i++) {
13698 		/* Disable link */
13699 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
13700 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
13701 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
13702 
13703 		/*
13704 		 * Call gig speed drop workaround on Gig disable before
13705 		 * accessing any PHY registers
13706 		 */
13707 		if (sc->sc_type == WM_T_ICH8)
13708 			wm_gig_downshift_workaround_ich8lan(sc);
13709 
13710 		/* Write VR power-down enable */
13711 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
13712 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
13713 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
13714 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
13715 
13716 		/* Read it back and test */
13717 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
13718 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
13719 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
13720 			break;
13721 
13722 		/* Issue PHY reset and repeat at most one more time */
13723 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
13724 	}
13725 }
13726 
13727 static void
13728 wm_enable_wakeup(struct wm_softc *sc)
13729 {
13730 	uint32_t reg, pmreg;
13731 	pcireg_t pmode;
13732 
13733 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
13734 		device_xname(sc->sc_dev), __func__));
13735 
13736 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
13737 		&pmreg, NULL) == 0)
13738 		return;
13739 
13740 	/* Advertise the wakeup capability */
13741 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
13742 	    | CTRL_SWDPIN(3));
13743 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
13744 
13745 	/* ICH workaround */
13746 	switch (sc->sc_type) {
13747 	case WM_T_ICH8:
13748 	case WM_T_ICH9:
13749 	case WM_T_ICH10:
13750 	case WM_T_PCH:
13751 	case WM_T_PCH2:
13752 	case WM_T_PCH_LPT:
13753 	case WM_T_PCH_SPT:
13754 		/* Disable gig during WOL */
13755 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
13756 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
13757 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
13758 		if (sc->sc_type == WM_T_PCH)
13759 			wm_gmii_reset(sc);
13760 
13761 		/* Power down workaround */
13762 		if (sc->sc_phytype == WMPHY_82577) {
13763 			struct mii_softc *child;
13764 
13765 			/* Assume that the PHY is copper */
13766 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
13767 			if ((child != NULL) && (child->mii_mpd_rev <= 2))
13768 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
13769 				    (768 << 5) | 25, 0x0444); /* magic num */
13770 		}
13771 		break;
13772 	default:
13773 		break;
13774 	}
13775 
13776 	/* Keep the laser running on fiber adapters */
13777 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
13778 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
13779 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
13780 		reg |= CTRL_EXT_SWDPIN(3);
13781 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
13782 	}
13783 
13784 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
13785 #if 0	/* for the multicast packet */
13786 	reg |= WUFC_MC;
13787 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
13788 #endif
13789 
13790 	if (sc->sc_type >= WM_T_PCH)
13791 		wm_enable_phy_wakeup(sc);
13792 	else {
13793 		CSR_WRITE(sc, WMREG_WUC, CSR_READ(sc, WMREG_WUC) | WUC_PME_EN);
13794 		CSR_WRITE(sc, WMREG_WUFC, reg);
13795 	}
13796 
13797 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
13798 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
13799 		|| (sc->sc_type == WM_T_PCH2))
13800 		    && (sc->sc_phytype == WMPHY_IGP_3))
13801 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
13802 
13803 	/* Request PME */
13804 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
13805 #if 0
13806 	/* Disable WOL */
13807 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
13808 #else
13809 	/* For WOL */
13810 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
13811 #endif
13812 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
13813 }
13814 
13815 /* Disable ASPM L0s and/or L1 for workaround */
13816 static void
13817 wm_disable_aspm(struct wm_softc *sc)
13818 {
13819 	pcireg_t reg, mask = 0;
13820 	unsigned const char *str = "";
13821 
13822 	/*
13823 	 *  Only for PCIe device which has PCIe capability in the PCI config
13824 	 * space.
13825 	 */
13826 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
13827 		return;
13828 
13829 	switch (sc->sc_type) {
13830 	case WM_T_82571:
13831 	case WM_T_82572:
13832 		/*
13833 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
13834 		 * State Power management L1 State (ASPM L1).
13835 		 */
13836 		mask = PCIE_LCSR_ASPM_L1;
13837 		str = "L1 is";
13838 		break;
13839 	case WM_T_82573:
13840 	case WM_T_82574:
13841 	case WM_T_82583:
13842 		/*
13843 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
13844 		 *
13845 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
13846 		 * some chipset.  The document of 82574 and 82583 says that
13847 		 * disabling L0s with some specific chipset is sufficient,
13848 		 * but we follow as of the Intel em driver does.
13849 		 *
13850 		 * References:
13851 		 * Errata 8 of the Specification Update of i82573.
13852 		 * Errata 20 of the Specification Update of i82574.
13853 		 * Errata 9 of the Specification Update of i82583.
13854 		 */
13855 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
13856 		str = "L0s and L1 are";
13857 		break;
13858 	default:
13859 		return;
13860 	}
13861 
13862 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
13863 	    sc->sc_pcixe_capoff + PCIE_LCSR);
13864 	reg &= ~mask;
13865 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
13866 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
13867 
13868 	/* Print only in wm_attach() */
13869 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
13870 		aprint_verbose_dev(sc->sc_dev,
13871 		    "ASPM %s disabled to workaround the errata.\n",
13872 			str);
13873 }
13874 
13875 /* LPLU */
13876 
13877 static void
13878 wm_lplu_d0_disable(struct wm_softc *sc)
13879 {
13880 	struct mii_data *mii = &sc->sc_mii;
13881 	uint32_t reg;
13882 
13883 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
13884 		device_xname(sc->sc_dev), __func__));
13885 
13886 	if (sc->sc_phytype == WMPHY_IFE)
13887 		return;
13888 
13889 	switch (sc->sc_type) {
13890 	case WM_T_82571:
13891 	case WM_T_82572:
13892 	case WM_T_82573:
13893 	case WM_T_82575:
13894 	case WM_T_82576:
13895 		reg = mii->mii_readreg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT);
13896 		reg &= ~PMR_D0_LPLU;
13897 		mii->mii_writereg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, reg);
13898 		break;
13899 	case WM_T_82580:
13900 	case WM_T_I350:
13901 	case WM_T_I210:
13902 	case WM_T_I211:
13903 		reg = CSR_READ(sc, WMREG_PHPM);
13904 		reg &= ~PHPM_D0A_LPLU;
13905 		CSR_WRITE(sc, WMREG_PHPM, reg);
13906 		break;
13907 	case WM_T_82574:
13908 	case WM_T_82583:
13909 	case WM_T_ICH8:
13910 	case WM_T_ICH9:
13911 	case WM_T_ICH10:
13912 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
13913 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
13914 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
13915 		CSR_WRITE_FLUSH(sc);
13916 		break;
13917 	case WM_T_PCH:
13918 	case WM_T_PCH2:
13919 	case WM_T_PCH_LPT:
13920 	case WM_T_PCH_SPT:
13921 		reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
13922 		reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
13923 		if (wm_phy_resetisblocked(sc) == false)
13924 			reg |= HV_OEM_BITS_ANEGNOW;
13925 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
13926 		break;
13927 	default:
13928 		break;
13929 	}
13930 }
13931 
13932 /* EEE */
13933 
13934 static void
13935 wm_set_eee_i350(struct wm_softc *sc)
13936 {
13937 	uint32_t ipcnfg, eeer;
13938 
13939 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
13940 	eeer = CSR_READ(sc, WMREG_EEER);
13941 
13942 	if ((sc->sc_flags & WM_F_EEE) != 0) {
13943 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
13944 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
13945 		    | EEER_LPI_FC);
13946 	} else {
13947 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
13948 		ipcnfg &= ~IPCNFG_10BASE_TE;
13949 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
13950 		    | EEER_LPI_FC);
13951 	}
13952 
13953 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
13954 	CSR_WRITE(sc, WMREG_EEER, eeer);
13955 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
13956 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
13957 }
13958 
13959 /*
13960  * Workarounds (mainly PHY related).
13961  * Basically, PHY's workarounds are in the PHY drivers.
13962  */
13963 
13964 /* Work-around for 82566 Kumeran PCS lock loss */
13965 static void
13966 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
13967 {
13968 	struct mii_data *mii = &sc->sc_mii;
13969 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
13970 	int i;
13971 	int reg;
13972 
13973 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
13974 		device_xname(sc->sc_dev), __func__));
13975 
13976 	/* If the link is not up, do nothing */
13977 	if ((status & STATUS_LU) == 0)
13978 		return;
13979 
13980 	/* Nothing to do if the link is other than 1Gbps */
13981 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
13982 		return;
13983 
13984 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
13985 	for (i = 0; i < 10; i++) {
13986 		/* read twice */
13987 		reg = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
13988 		reg = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
13989 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
13990 			goto out;	/* GOOD! */
13991 
13992 		/* Reset the PHY */
13993 		wm_reset_phy(sc);
13994 		delay(5*1000);
13995 	}
13996 
13997 	/* Disable GigE link negotiation */
13998 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
13999 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
14000 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
14001 
14002 	/*
14003 	 * Call gig speed drop workaround on Gig disable before accessing
14004 	 * any PHY registers.
14005 	 */
14006 	wm_gig_downshift_workaround_ich8lan(sc);
14007 
14008 out:
14009 	return;
14010 }
14011 
14012 /* WOL from S5 stops working */
14013 static void
14014 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
14015 {
14016 	uint16_t kmreg;
14017 
14018 	/* Only for igp3 */
14019 	if (sc->sc_phytype == WMPHY_IGP_3) {
14020 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
14021 			return;
14022 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
14023 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
14024 			return;
14025 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
14026 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
14027 	}
14028 }
14029 
14030 /*
14031  * Workaround for pch's PHYs
14032  * XXX should be moved to new PHY driver?
14033  */
14034 static void
14035 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
14036 {
14037 
14038 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
14039 		device_xname(sc->sc_dev), __func__));
14040 	KASSERT(sc->sc_type == WM_T_PCH);
14041 
14042 	if (sc->sc_phytype == WMPHY_82577)
14043 		wm_set_mdio_slow_mode_hv(sc);
14044 
14045 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
14046 
14047 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
14048 
14049 	/* 82578 */
14050 	if (sc->sc_phytype == WMPHY_82578) {
14051 		struct mii_softc *child;
14052 
14053 		/*
14054 		 * Return registers to default by doing a soft reset then
14055 		 * writing 0x3140 to the control register
14056 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
14057 		 */
14058 		child = LIST_FIRST(&sc->sc_mii.mii_phys);
14059 		if ((child != NULL) && (child->mii_mpd_rev < 2)) {
14060 			PHY_RESET(child);
14061 			sc->sc_mii.mii_writereg(sc->sc_dev, 2, MII_BMCR,
14062 			    0x3140);
14063 		}
14064 	}
14065 
14066 	/* Select page 0 */
14067 	sc->phy.acquire(sc);
14068 	wm_gmii_mdic_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
14069 	sc->phy.release(sc);
14070 
14071 	/*
14072 	 * Configure the K1 Si workaround during phy reset assuming there is
14073 	 * link so that it disables K1 if link is in 1Gbps.
14074 	 */
14075 	wm_k1_gig_workaround_hv(sc, 1);
14076 }
14077 
14078 static void
14079 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
14080 {
14081 
14082 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
14083 		device_xname(sc->sc_dev), __func__));
14084 	KASSERT(sc->sc_type == WM_T_PCH2);
14085 
14086 	wm_set_mdio_slow_mode_hv(sc);
14087 }
14088 
14089 static int
14090 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
14091 {
14092 	int k1_enable = sc->sc_nvm_k1_enabled;
14093 
14094 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
14095 		device_xname(sc->sc_dev), __func__));
14096 
14097 	if (sc->phy.acquire(sc) != 0)
14098 		return -1;
14099 
14100 	if (link) {
14101 		k1_enable = 0;
14102 
14103 		/* Link stall fix for link up */
14104 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
14105 	} else {
14106 		/* Link stall fix for link down */
14107 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
14108 	}
14109 
14110 	wm_configure_k1_ich8lan(sc, k1_enable);
14111 	sc->phy.release(sc);
14112 
14113 	return 0;
14114 }
14115 
14116 static void
14117 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
14118 {
14119 	uint32_t reg;
14120 
14121 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
14122 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
14123 	    reg | HV_KMRN_MDIO_SLOW);
14124 }
14125 
14126 static void
14127 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
14128 {
14129 	uint32_t ctrl, ctrl_ext, tmp;
14130 	uint16_t kmreg;
14131 	int rv;
14132 
14133 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
14134 	if (rv != 0)
14135 		return;
14136 
14137 	if (k1_enable)
14138 		kmreg |= KUMCTRLSTA_K1_ENABLE;
14139 	else
14140 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
14141 
14142 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
14143 	if (rv != 0)
14144 		return;
14145 
14146 	delay(20);
14147 
14148 	ctrl = CSR_READ(sc, WMREG_CTRL);
14149 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
14150 
14151 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
14152 	tmp |= CTRL_FRCSPD;
14153 
14154 	CSR_WRITE(sc, WMREG_CTRL, tmp);
14155 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
14156 	CSR_WRITE_FLUSH(sc);
14157 	delay(20);
14158 
14159 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
14160 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
14161 	CSR_WRITE_FLUSH(sc);
14162 	delay(20);
14163 
14164 	return;
14165 }
14166 
14167 /* special case - for 82575 - need to do manual init ... */
14168 static void
14169 wm_reset_init_script_82575(struct wm_softc *sc)
14170 {
14171 	/*
14172 	 * remark: this is untested code - we have no board without EEPROM
14173 	 *  same setup as mentioned int the FreeBSD driver for the i82575
14174 	 */
14175 
14176 	/* SerDes configuration via SERDESCTRL */
14177 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
14178 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
14179 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
14180 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
14181 
14182 	/* CCM configuration via CCMCTL register */
14183 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
14184 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
14185 
14186 	/* PCIe lanes configuration */
14187 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
14188 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
14189 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
14190 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
14191 
14192 	/* PCIe PLL Configuration */
14193 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
14194 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
14195 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
14196 }
14197 
14198 static void
14199 wm_reset_mdicnfg_82580(struct wm_softc *sc)
14200 {
14201 	uint32_t reg;
14202 	uint16_t nvmword;
14203 	int rv;
14204 
14205 	if ((sc->sc_flags & WM_F_SGMII) == 0)
14206 		return;
14207 
14208 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
14209 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
14210 	if (rv != 0) {
14211 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
14212 		    __func__);
14213 		return;
14214 	}
14215 
14216 	reg = CSR_READ(sc, WMREG_MDICNFG);
14217 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
14218 		reg |= MDICNFG_DEST;
14219 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
14220 		reg |= MDICNFG_COM_MDIO;
14221 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
14222 }
14223 
14224 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
14225 
14226 static bool
14227 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
14228 {
14229 	int i;
14230 	uint32_t reg;
14231 	uint16_t id1, id2;
14232 
14233 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
14234 		device_xname(sc->sc_dev), __func__));
14235 	id1 = id2 = 0xffff;
14236 	for (i = 0; i < 2; i++) {
14237 		id1 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1);
14238 		if (MII_INVALIDID(id1))
14239 			continue;
14240 		id2 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2);
14241 		if (MII_INVALIDID(id2))
14242 			continue;
14243 		break;
14244 	}
14245 	if (!MII_INVALIDID(id1) && !MII_INVALIDID(id2)) {
14246 		goto out;
14247 	}
14248 
14249 	if (sc->sc_type < WM_T_PCH_LPT) {
14250 		sc->phy.release(sc);
14251 		wm_set_mdio_slow_mode_hv(sc);
14252 		id1 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1);
14253 		id2 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2);
14254 		sc->phy.acquire(sc);
14255 	}
14256 	if (MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
14257 		printf("XXX return with false\n");
14258 		return false;
14259 	}
14260 out:
14261 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
14262 		/* Only unforce SMBus if ME is not active */
14263 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
14264 			/* Unforce SMBus mode in PHY */
14265 			reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
14266 			    CV_SMB_CTRL);
14267 			reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
14268 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
14269 			    CV_SMB_CTRL, reg);
14270 
14271 			/* Unforce SMBus mode in MAC */
14272 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
14273 			reg &= ~CTRL_EXT_FORCE_SMBUS;
14274 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
14275 		}
14276 	}
14277 	return true;
14278 }
14279 
14280 static void
14281 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
14282 {
14283 	uint32_t reg;
14284 	int i;
14285 
14286 	/* Set PHY Config Counter to 50msec */
14287 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
14288 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
14289 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
14290 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
14291 
14292 	/* Toggle LANPHYPC */
14293 	reg = CSR_READ(sc, WMREG_CTRL);
14294 	reg |= CTRL_LANPHYPC_OVERRIDE;
14295 	reg &= ~CTRL_LANPHYPC_VALUE;
14296 	CSR_WRITE(sc, WMREG_CTRL, reg);
14297 	CSR_WRITE_FLUSH(sc);
14298 	delay(1000);
14299 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
14300 	CSR_WRITE(sc, WMREG_CTRL, reg);
14301 	CSR_WRITE_FLUSH(sc);
14302 
14303 	if (sc->sc_type < WM_T_PCH_LPT)
14304 		delay(50 * 1000);
14305 	else {
14306 		i = 20;
14307 
14308 		do {
14309 			delay(5 * 1000);
14310 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
14311 		    && i--);
14312 
14313 		delay(30 * 1000);
14314 	}
14315 }
14316 
14317 static int
14318 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
14319 {
14320 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
14321 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
14322 	uint32_t rxa;
14323 	uint16_t scale = 0, lat_enc = 0;
14324 	int32_t obff_hwm = 0;
14325 	int64_t lat_ns, value;
14326 
14327 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
14328 		device_xname(sc->sc_dev), __func__));
14329 
14330 	if (link) {
14331 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
14332 		uint32_t status;
14333 		uint16_t speed;
14334 		pcireg_t preg;
14335 
14336 		status = CSR_READ(sc, WMREG_STATUS);
14337 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
14338 		case STATUS_SPEED_10:
14339 			speed = 10;
14340 			break;
14341 		case STATUS_SPEED_100:
14342 			speed = 100;
14343 			break;
14344 		case STATUS_SPEED_1000:
14345 			speed = 1000;
14346 			break;
14347 		default:
14348 			device_printf(sc->sc_dev, "Unknown speed "
14349 			    "(status = %08x)\n", status);
14350 			return -1;
14351 		}
14352 
14353 		/* Rx Packet Buffer Allocation size (KB) */
14354 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
14355 
14356 		/*
14357 		 * Determine the maximum latency tolerated by the device.
14358 		 *
14359 		 * Per the PCIe spec, the tolerated latencies are encoded as
14360 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
14361 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
14362 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
14363 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
14364 		 */
14365 		lat_ns = ((int64_t)rxa * 1024 -
14366 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
14367 			+ ETHER_HDR_LEN))) * 8 * 1000;
14368 		if (lat_ns < 0)
14369 			lat_ns = 0;
14370 		else
14371 			lat_ns /= speed;
14372 		value = lat_ns;
14373 
14374 		while (value > LTRV_VALUE) {
14375 			scale ++;
14376 			value = howmany(value, __BIT(5));
14377 		}
14378 		if (scale > LTRV_SCALE_MAX) {
14379 			printf("%s: Invalid LTR latency scale %d\n",
14380 			    device_xname(sc->sc_dev), scale);
14381 			return -1;
14382 		}
14383 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
14384 
14385 		/* Determine the maximum latency tolerated by the platform */
14386 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
14387 		    WM_PCI_LTR_CAP_LPT);
14388 		max_snoop = preg & 0xffff;
14389 		max_nosnoop = preg >> 16;
14390 
14391 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
14392 
14393 		if (lat_enc > max_ltr_enc) {
14394 			lat_enc = max_ltr_enc;
14395 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
14396 			    * PCI_LTR_SCALETONS(
14397 				    __SHIFTOUT(lat_enc,
14398 					PCI_LTR_MAXSNOOPLAT_SCALE));
14399 		}
14400 
14401 		if (lat_ns) {
14402 			lat_ns *= speed * 1000;
14403 			lat_ns /= 8;
14404 			lat_ns /= 1000000000;
14405 			obff_hwm = (int32_t)(rxa - lat_ns);
14406 		}
14407 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
14408 			device_printf(sc->sc_dev, "Invalid high water mark %d"
14409 			    "(rxa = %d, lat_ns = %d)\n",
14410 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
14411 			return -1;
14412 		}
14413 	}
14414 	/* Snoop and No-Snoop latencies the same */
14415 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
14416 	CSR_WRITE(sc, WMREG_LTRV, reg);
14417 
14418 	/* Set OBFF high water mark */
14419 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
14420 	reg |= obff_hwm;
14421 	CSR_WRITE(sc, WMREG_SVT, reg);
14422 
14423 	/* Enable OBFF */
14424 	reg = CSR_READ(sc, WMREG_SVCR);
14425 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
14426 	CSR_WRITE(sc, WMREG_SVCR, reg);
14427 
14428 	return 0;
14429 }
14430 
14431 /*
14432  * I210 Errata 25 and I211 Errata 10
14433  * Slow System Clock.
14434  */
14435 static void
14436 wm_pll_workaround_i210(struct wm_softc *sc)
14437 {
14438 	uint32_t mdicnfg, wuc;
14439 	uint32_t reg;
14440 	pcireg_t pcireg;
14441 	uint32_t pmreg;
14442 	uint16_t nvmword, tmp_nvmword;
14443 	int phyval;
14444 	bool wa_done = false;
14445 	int i;
14446 
14447 	/* Save WUC and MDICNFG registers */
14448 	wuc = CSR_READ(sc, WMREG_WUC);
14449 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
14450 
14451 	reg = mdicnfg & ~MDICNFG_DEST;
14452 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
14453 
14454 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
14455 		nvmword = INVM_DEFAULT_AL;
14456 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
14457 
14458 	/* Get Power Management cap offset */
14459 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
14460 		&pmreg, NULL) == 0)
14461 		return;
14462 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
14463 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
14464 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
14465 
14466 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
14467 			break; /* OK */
14468 		}
14469 
14470 		wa_done = true;
14471 		/* Directly reset the internal PHY */
14472 		reg = CSR_READ(sc, WMREG_CTRL);
14473 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
14474 
14475 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
14476 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
14477 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
14478 
14479 		CSR_WRITE(sc, WMREG_WUC, 0);
14480 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
14481 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
14482 
14483 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
14484 		    pmreg + PCI_PMCSR);
14485 		pcireg |= PCI_PMCSR_STATE_D3;
14486 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
14487 		    pmreg + PCI_PMCSR, pcireg);
14488 		delay(1000);
14489 		pcireg &= ~PCI_PMCSR_STATE_D3;
14490 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
14491 		    pmreg + PCI_PMCSR, pcireg);
14492 
14493 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
14494 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
14495 
14496 		/* Restore WUC register */
14497 		CSR_WRITE(sc, WMREG_WUC, wuc);
14498 	}
14499 
14500 	/* Restore MDICNFG setting */
14501 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
14502 	if (wa_done)
14503 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
14504 }
14505 
14506 static void
14507 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
14508 {
14509 	uint32_t reg;
14510 
14511 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
14512 		device_xname(sc->sc_dev), __func__));
14513 	KASSERT(sc->sc_type == WM_T_PCH_SPT);
14514 
14515 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
14516 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
14517 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
14518 
14519 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
14520 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
14521 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
14522 }
14523