xref: /netbsd-src/sys/dev/pci/if_wm.c (revision e89934bbf778a6d6d6894877c4da59d0c7835b0f)
1 /*	$NetBSD: if_wm.c,v 1.478 2017/02/13 05:02:21 knakahara Exp $	*/
2 
3 /*
4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed for the NetBSD Project by
20  *	Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 /*******************************************************************************
39 
40   Copyright (c) 2001-2005, Intel Corporation
41   All rights reserved.
42 
43   Redistribution and use in source and binary forms, with or without
44   modification, are permitted provided that the following conditions are met:
45 
46    1. Redistributions of source code must retain the above copyright notice,
47       this list of conditions and the following disclaimer.
48 
49    2. Redistributions in binary form must reproduce the above copyright
50       notice, this list of conditions and the following disclaimer in the
51       documentation and/or other materials provided with the distribution.
52 
53    3. Neither the name of the Intel Corporation nor the names of its
54       contributors may be used to endorse or promote products derived from
55       this software without specific prior written permission.
56 
57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67   POSSIBILITY OF SUCH DAMAGE.
68 
69 *******************************************************************************/
70 /*
71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72  *
73  * TODO (in order of importance):
74  *
75  *	- Check XXX'ed comments
76  *	- Disable D0 LPLU on 8257[12356], 82580 and I350.
77  *	- TX Multi queue improvement (refine queue selection logic)
78  *	- Split header buffer for newer descriptors
79  *	- EEE (Energy Efficiency Ethernet)
80  *	- Virtual Function
81  *	- Set LED correctly (based on contents in EEPROM)
82  *	- Rework how parameters are loaded from the EEPROM.
83  *	- Image Unique ID
84  */
85 
86 #include <sys/cdefs.h>
87 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.478 2017/02/13 05:02:21 knakahara Exp $");
88 
89 #ifdef _KERNEL_OPT
90 #include "opt_net_mpsafe.h"
91 #endif
92 
93 #include <sys/param.h>
94 #include <sys/systm.h>
95 #include <sys/callout.h>
96 #include <sys/mbuf.h>
97 #include <sys/malloc.h>
98 #include <sys/kmem.h>
99 #include <sys/kernel.h>
100 #include <sys/socket.h>
101 #include <sys/ioctl.h>
102 #include <sys/errno.h>
103 #include <sys/device.h>
104 #include <sys/queue.h>
105 #include <sys/syslog.h>
106 #include <sys/interrupt.h>
107 #include <sys/cpu.h>
108 #include <sys/pcq.h>
109 
110 #include <sys/rndsource.h>
111 
112 #include <net/if.h>
113 #include <net/if_dl.h>
114 #include <net/if_media.h>
115 #include <net/if_ether.h>
116 
117 #include <net/bpf.h>
118 
119 #include <netinet/in.h>			/* XXX for struct ip */
120 #include <netinet/in_systm.h>		/* XXX for struct ip */
121 #include <netinet/ip.h>			/* XXX for struct ip */
122 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
123 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
124 
125 #include <sys/bus.h>
126 #include <sys/intr.h>
127 #include <machine/endian.h>
128 
129 #include <dev/mii/mii.h>
130 #include <dev/mii/miivar.h>
131 #include <dev/mii/miidevs.h>
132 #include <dev/mii/mii_bitbang.h>
133 #include <dev/mii/ikphyreg.h>
134 #include <dev/mii/igphyreg.h>
135 #include <dev/mii/igphyvar.h>
136 #include <dev/mii/inbmphyreg.h>
137 
138 #include <dev/pci/pcireg.h>
139 #include <dev/pci/pcivar.h>
140 #include <dev/pci/pcidevs.h>
141 
142 #include <dev/pci/if_wmreg.h>
143 #include <dev/pci/if_wmvar.h>
144 
145 #ifdef WM_DEBUG
146 #define	WM_DEBUG_LINK		__BIT(0)
147 #define	WM_DEBUG_TX		__BIT(1)
148 #define	WM_DEBUG_RX		__BIT(2)
149 #define	WM_DEBUG_GMII		__BIT(3)
150 #define	WM_DEBUG_MANAGE		__BIT(4)
151 #define	WM_DEBUG_NVM		__BIT(5)
152 #define	WM_DEBUG_INIT		__BIT(6)
153 #define	WM_DEBUG_LOCK		__BIT(7)
154 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
155     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
156 
157 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
158 #else
159 #define	DPRINTF(x, y)	/* nothing */
160 #endif /* WM_DEBUG */
161 
162 #ifdef NET_MPSAFE
163 #define WM_MPSAFE	1
164 #endif
165 
166 /*
167  * This device driver's max interrupt numbers.
168  */
169 #define WM_MAX_NQUEUEINTR	16
170 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
171 
172 /*
173  * Transmit descriptor list size.  Due to errata, we can only have
174  * 256 hardware descriptors in the ring on < 82544, but we use 4096
175  * on >= 82544.  We tell the upper layers that they can queue a lot
176  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
177  * of them at a time.
178  *
179  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
180  * chains containing many small mbufs have been observed in zero-copy
181  * situations with jumbo frames.
182  */
183 #define	WM_NTXSEGS		256
184 #define	WM_IFQUEUELEN		256
185 #define	WM_TXQUEUELEN_MAX	64
186 #define	WM_TXQUEUELEN_MAX_82547	16
187 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
188 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
189 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
190 #define	WM_NTXDESC_82542	256
191 #define	WM_NTXDESC_82544	4096
192 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
193 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
194 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
195 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
196 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
197 
198 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
199 
200 #define	WM_TXINTERQSIZE		256
201 
202 /*
203  * Receive descriptor list size.  We have one Rx buffer for normal
204  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
205  * packet.  We allocate 256 receive descriptors, each with a 2k
206  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
207  */
208 #define	WM_NRXDESC		256
209 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
210 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
211 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
212 
213 typedef union txdescs {
214 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
215 	nq_txdesc_t      sctxu_nq_txdescs[WM_NTXDESC_82544];
216 } txdescs_t;
217 
218 typedef union rxdescs {
219 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
220 	ext_rxdesc_t      sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
221 	nq_rxdesc_t      sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
222 } rxdescs_t;
223 
224 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
225 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
226 
227 /*
228  * Software state for transmit jobs.
229  */
230 struct wm_txsoft {
231 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
232 	bus_dmamap_t txs_dmamap;	/* our DMA map */
233 	int txs_firstdesc;		/* first descriptor in packet */
234 	int txs_lastdesc;		/* last descriptor in packet */
235 	int txs_ndesc;			/* # of descriptors used */
236 };
237 
238 /*
239  * Software state for receive buffers.  Each descriptor gets a
240  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
241  * more than one buffer, we chain them together.
242  */
243 struct wm_rxsoft {
244 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
245 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
246 };
247 
248 #define WM_LINKUP_TIMEOUT	50
249 
250 static uint16_t swfwphysem[] = {
251 	SWFW_PHY0_SM,
252 	SWFW_PHY1_SM,
253 	SWFW_PHY2_SM,
254 	SWFW_PHY3_SM
255 };
256 
257 static const uint32_t wm_82580_rxpbs_table[] = {
258 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
259 };
260 
261 struct wm_softc;
262 
263 #ifdef WM_EVENT_COUNTERS
264 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
265 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
266 	struct evcnt qname##_ev_##evname;
267 
268 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
269 	do{								\
270 		snprintf((q)->qname##_##evname##_evcnt_name,		\
271 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
272 		    "%s%02d%s", #qname, (qnum), #evname);		\
273 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
274 		    (evtype), NULL, (xname),				\
275 		    (q)->qname##_##evname##_evcnt_name);		\
276 	}while(0)
277 
278 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
279 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
280 
281 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
282 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
283 
284 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
285 	evcnt_detach(&(q)->qname##_ev_##evname);
286 #endif /* WM_EVENT_COUNTERS */
287 
288 struct wm_txqueue {
289 	kmutex_t *txq_lock;		/* lock for tx operations */
290 
291 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
292 
293 	/* Software state for the transmit descriptors. */
294 	int txq_num;			/* must be a power of two */
295 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
296 
297 	/* TX control data structures. */
298 	int txq_ndesc;			/* must be a power of two */
299 	size_t txq_descsize;		/* a tx descriptor size */
300 	txdescs_t *txq_descs_u;
301         bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
302 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
303 	int txq_desc_rseg;		/* real number of control segment */
304 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
305 #define	txq_descs	txq_descs_u->sctxu_txdescs
306 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
307 
308 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
309 
310 	int txq_free;			/* number of free Tx descriptors */
311 	int txq_next;			/* next ready Tx descriptor */
312 
313 	int txq_sfree;			/* number of free Tx jobs */
314 	int txq_snext;			/* next free Tx job */
315 	int txq_sdirty;			/* dirty Tx jobs */
316 
317 	/* These 4 variables are used only on the 82547. */
318 	int txq_fifo_size;		/* Tx FIFO size */
319 	int txq_fifo_head;		/* current head of FIFO */
320 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
321 	int txq_fifo_stall;		/* Tx FIFO is stalled */
322 
323 	/*
324 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
325 	 * CPUs. This queue intermediate them without block.
326 	 */
327 	pcq_t *txq_interq;
328 
329 	/*
330 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
331 	 * to manage Tx H/W queue's busy flag.
332 	 */
333 	int txq_flags;			/* flags for H/W queue, see below */
334 #define	WM_TXQ_NO_SPACE	0x1
335 
336 	bool txq_stopping;
337 
338 #ifdef WM_EVENT_COUNTERS
339 	WM_Q_EVCNT_DEFINE(txq, txsstall)	/* Tx stalled due to no txs */
340 	WM_Q_EVCNT_DEFINE(txq, txdstall)	/* Tx stalled due to no txd */
341 	WM_Q_EVCNT_DEFINE(txq, txfifo_stall)	/* Tx FIFO stalls (82547) */
342 	WM_Q_EVCNT_DEFINE(txq, txdw)		/* Tx descriptor interrupts */
343 	WM_Q_EVCNT_DEFINE(txq, txqe)		/* Tx queue empty interrupts */
344 						/* XXX not used? */
345 
346 	WM_Q_EVCNT_DEFINE(txq, txipsum)		/* IP checksums comp. out-bound */
347 	WM_Q_EVCNT_DEFINE(txq,txtusum)		/* TCP/UDP cksums comp. out-bound */
348 	WM_Q_EVCNT_DEFINE(txq, txtusum6)	/* TCP/UDP v6 cksums comp. out-bound */
349 	WM_Q_EVCNT_DEFINE(txq, txtso)		/* TCP seg offload out-bound (IPv4) */
350 	WM_Q_EVCNT_DEFINE(txq, txtso6)		/* TCP seg offload out-bound (IPv6) */
351 	WM_Q_EVCNT_DEFINE(txq, txtsopain)	/* painful header manip. for TSO */
352 
353 	WM_Q_EVCNT_DEFINE(txq, txdrop)		/* Tx packets dropped(too many segs) */
354 
355 	WM_Q_EVCNT_DEFINE(txq, tu)		/* Tx underrun */
356 
357 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
358 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
359 #endif /* WM_EVENT_COUNTERS */
360 };
361 
362 struct wm_rxqueue {
363 	kmutex_t *rxq_lock;		/* lock for rx operations */
364 
365 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
366 
367 	/* Software state for the receive descriptors. */
368 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
369 
370 	/* RX control data structures. */
371 	int rxq_ndesc;			/* must be a power of two */
372 	size_t rxq_descsize;		/* a rx descriptor size */
373 	rxdescs_t *rxq_descs_u;
374 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
375 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
376 	int rxq_desc_rseg;		/* real number of control segment */
377 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
378 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
379 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
380 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
381 
382 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
383 
384 	int rxq_ptr;			/* next ready Rx desc/queue ent */
385 	int rxq_discard;
386 	int rxq_len;
387 	struct mbuf *rxq_head;
388 	struct mbuf *rxq_tail;
389 	struct mbuf **rxq_tailp;
390 
391 	bool rxq_stopping;
392 
393 #ifdef WM_EVENT_COUNTERS
394 	WM_Q_EVCNT_DEFINE(rxq, rxintr);		/* Rx interrupts */
395 
396 	WM_Q_EVCNT_DEFINE(rxq, rxipsum);	/* IP checksums checked in-bound */
397 	WM_Q_EVCNT_DEFINE(rxq, rxtusum);	/* TCP/UDP cksums checked in-bound */
398 #endif
399 };
400 
401 struct wm_queue {
402 	int wmq_id;			/* index of transmit and receive queues */
403 	int wmq_intr_idx;		/* index of MSI-X tables */
404 
405 	struct wm_txqueue wmq_txq;
406 	struct wm_rxqueue wmq_rxq;
407 };
408 
409 struct wm_phyop {
410 	int (*acquire)(struct wm_softc *);
411 	void (*release)(struct wm_softc *);
412 	int reset_delay_us;
413 };
414 
415 /*
416  * Software state per device.
417  */
418 struct wm_softc {
419 	device_t sc_dev;		/* generic device information */
420 	bus_space_tag_t sc_st;		/* bus space tag */
421 	bus_space_handle_t sc_sh;	/* bus space handle */
422 	bus_size_t sc_ss;		/* bus space size */
423 	bus_space_tag_t sc_iot;		/* I/O space tag */
424 	bus_space_handle_t sc_ioh;	/* I/O space handle */
425 	bus_size_t sc_ios;		/* I/O space size */
426 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
427 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
428 	bus_size_t sc_flashs;		/* flash registers space size */
429 	off_t sc_flashreg_offset;	/*
430 					 * offset to flash registers from
431 					 * start of BAR
432 					 */
433 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
434 
435 	struct ethercom sc_ethercom;	/* ethernet common data */
436 	struct mii_data sc_mii;		/* MII/media information */
437 
438 	pci_chipset_tag_t sc_pc;
439 	pcitag_t sc_pcitag;
440 	int sc_bus_speed;		/* PCI/PCIX bus speed */
441 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
442 
443 	uint16_t sc_pcidevid;		/* PCI device ID */
444 	wm_chip_type sc_type;		/* MAC type */
445 	int sc_rev;			/* MAC revision */
446 	wm_phy_type sc_phytype;		/* PHY type */
447 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
448 #define	WM_MEDIATYPE_UNKNOWN		0x00
449 #define	WM_MEDIATYPE_FIBER		0x01
450 #define	WM_MEDIATYPE_COPPER		0x02
451 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
452 	int sc_funcid;			/* unit number of the chip (0 to 3) */
453 	int sc_flags;			/* flags; see below */
454 	int sc_if_flags;		/* last if_flags */
455 	int sc_flowflags;		/* 802.3x flow control flags */
456 	int sc_align_tweak;
457 
458 	void *sc_ihs[WM_MAX_NINTR];	/*
459 					 * interrupt cookie.
460 					 * legacy and msi use sc_ihs[0].
461 					 */
462 	pci_intr_handle_t *sc_intrs;	/* legacy and msi use sc_intrs[0] */
463 	int sc_nintrs;			/* number of interrupts */
464 
465 	int sc_link_intr_idx;		/* index of MSI-X tables */
466 
467 	callout_t sc_tick_ch;		/* tick callout */
468 	bool sc_core_stopping;
469 
470 	int sc_nvm_ver_major;
471 	int sc_nvm_ver_minor;
472 	int sc_nvm_ver_build;
473 	int sc_nvm_addrbits;		/* NVM address bits */
474 	unsigned int sc_nvm_wordsize;	/* NVM word size */
475 	int sc_ich8_flash_base;
476 	int sc_ich8_flash_bank_size;
477 	int sc_nvm_k1_enabled;
478 
479 	int sc_nqueues;
480 	struct wm_queue *sc_queue;
481 
482 	int sc_affinity_offset;
483 
484 #ifdef WM_EVENT_COUNTERS
485 	/* Event counters. */
486 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
487 
488         /* WM_T_82542_2_1 only */
489 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
490 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
491 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
492 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
493 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
494 #endif /* WM_EVENT_COUNTERS */
495 
496 	/* This variable are used only on the 82547. */
497 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
498 
499 	uint32_t sc_ctrl;		/* prototype CTRL register */
500 #if 0
501 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
502 #endif
503 	uint32_t sc_icr;		/* prototype interrupt bits */
504 	uint32_t sc_itr;		/* prototype intr throttling reg */
505 	uint32_t sc_tctl;		/* prototype TCTL register */
506 	uint32_t sc_rctl;		/* prototype RCTL register */
507 	uint32_t sc_txcw;		/* prototype TXCW register */
508 	uint32_t sc_tipg;		/* prototype TIPG register */
509 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
510 	uint32_t sc_pba;		/* prototype PBA register */
511 
512 	int sc_tbi_linkup;		/* TBI link status */
513 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
514 	int sc_tbi_serdes_ticks;	/* tbi ticks */
515 
516 	int sc_mchash_type;		/* multicast filter offset */
517 
518 	krndsource_t rnd_source;	/* random source */
519 
520 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
521 
522 	kmutex_t *sc_core_lock;		/* lock for softc operations */
523 	kmutex_t *sc_ich_phymtx;	/*
524 					 * 82574/82583/ICH/PCH specific PHY
525 					 * mutex. For 82574/82583, the mutex
526 					 * is used for both PHY and NVM.
527 					 */
528 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
529 
530 	struct wm_phyop phy;
531 };
532 
533 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
534 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
535 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
536 
537 #ifdef WM_MPSAFE
538 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
539 #else
540 #define CALLOUT_FLAGS	0
541 #endif
542 
543 #define	WM_RXCHAIN_RESET(rxq)						\
544 do {									\
545 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
546 	*(rxq)->rxq_tailp = NULL;					\
547 	(rxq)->rxq_len = 0;						\
548 } while (/*CONSTCOND*/0)
549 
550 #define	WM_RXCHAIN_LINK(rxq, m)						\
551 do {									\
552 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
553 	(rxq)->rxq_tailp = &(m)->m_next;				\
554 } while (/*CONSTCOND*/0)
555 
556 #ifdef WM_EVENT_COUNTERS
557 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
558 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
559 
560 #define WM_Q_EVCNT_INCR(qname, evname)			\
561 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
562 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
563 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
564 #else /* !WM_EVENT_COUNTERS */
565 #define	WM_EVCNT_INCR(ev)	/* nothing */
566 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
567 
568 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
569 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
570 #endif /* !WM_EVENT_COUNTERS */
571 
572 #define	CSR_READ(sc, reg)						\
573 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
574 #define	CSR_WRITE(sc, reg, val)						\
575 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
576 #define	CSR_WRITE_FLUSH(sc)						\
577 	(void) CSR_READ((sc), WMREG_STATUS)
578 
579 #define ICH8_FLASH_READ32(sc, reg)					\
580 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
581 	    (reg) + sc->sc_flashreg_offset)
582 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
583 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
584 	    (reg) + sc->sc_flashreg_offset, (data))
585 
586 #define ICH8_FLASH_READ16(sc, reg)					\
587 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
588 	    (reg) + sc->sc_flashreg_offset)
589 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
590 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
591 	    (reg) + sc->sc_flashreg_offset, (data))
592 
593 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
594 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
595 
596 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
597 #define	WM_CDTXADDR_HI(txq, x)						\
598 	(sizeof(bus_addr_t) == 8 ?					\
599 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
600 
601 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
602 #define	WM_CDRXADDR_HI(rxq, x)						\
603 	(sizeof(bus_addr_t) == 8 ?					\
604 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
605 
606 /*
607  * Register read/write functions.
608  * Other than CSR_{READ|WRITE}().
609  */
610 #if 0
611 static inline uint32_t wm_io_read(struct wm_softc *, int);
612 #endif
613 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
614 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
615 	uint32_t, uint32_t);
616 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
617 
618 /*
619  * Descriptor sync/init functions.
620  */
621 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
622 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
623 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
624 
625 /*
626  * Device driver interface functions and commonly used functions.
627  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
628  */
629 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
630 static int	wm_match(device_t, cfdata_t, void *);
631 static void	wm_attach(device_t, device_t, void *);
632 static int	wm_detach(device_t, int);
633 static bool	wm_suspend(device_t, const pmf_qual_t *);
634 static bool	wm_resume(device_t, const pmf_qual_t *);
635 static void	wm_watchdog(struct ifnet *);
636 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *);
637 static void	wm_tick(void *);
638 static int	wm_ifflags_cb(struct ethercom *);
639 static int	wm_ioctl(struct ifnet *, u_long, void *);
640 /* MAC address related */
641 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
642 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
643 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
644 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
645 static void	wm_set_filter(struct wm_softc *);
646 /* Reset and init related */
647 static void	wm_set_vlan(struct wm_softc *);
648 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
649 static void	wm_get_auto_rd_done(struct wm_softc *);
650 static void	wm_lan_init_done(struct wm_softc *);
651 static void	wm_get_cfg_done(struct wm_softc *);
652 static void	wm_initialize_hardware_bits(struct wm_softc *);
653 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
654 static void	wm_reset_phy(struct wm_softc *);
655 static void	wm_flush_desc_rings(struct wm_softc *);
656 static void	wm_reset(struct wm_softc *);
657 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
658 static void	wm_rxdrain(struct wm_rxqueue *);
659 static void	wm_rss_getkey(uint8_t *);
660 static void	wm_init_rss(struct wm_softc *);
661 static void	wm_adjust_qnum(struct wm_softc *, int);
662 static int	wm_setup_legacy(struct wm_softc *);
663 static int	wm_setup_msix(struct wm_softc *);
664 static int	wm_init(struct ifnet *);
665 static int	wm_init_locked(struct ifnet *);
666 static void	wm_turnon(struct wm_softc *);
667 static void	wm_turnoff(struct wm_softc *);
668 static void	wm_stop(struct ifnet *, int);
669 static void	wm_stop_locked(struct ifnet *, int);
670 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
671 static void	wm_82547_txfifo_stall(void *);
672 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
673 /* DMA related */
674 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
675 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
676 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
677 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
678     struct wm_txqueue *);
679 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
680 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
681 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
682     struct wm_rxqueue *);
683 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
684 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
685 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
686 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
687 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
688 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
689 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
690     struct wm_txqueue *);
691 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
692     struct wm_rxqueue *);
693 static int	wm_alloc_txrx_queues(struct wm_softc *);
694 static void	wm_free_txrx_queues(struct wm_softc *);
695 static int	wm_init_txrx_queues(struct wm_softc *);
696 /* Start */
697 static int	wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
698     uint32_t *, uint8_t *);
699 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
700 static void	wm_start(struct ifnet *);
701 static void	wm_start_locked(struct ifnet *);
702 static int	wm_transmit(struct ifnet *, struct mbuf *);
703 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
704 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
705 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
706     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
707 static void	wm_nq_start(struct ifnet *);
708 static void	wm_nq_start_locked(struct ifnet *);
709 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
710 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
711 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
712 static void	wm_deferred_start(struct ifnet *);
713 /* Interrupt */
714 static int	wm_txeof(struct wm_softc *, struct wm_txqueue *);
715 static void	wm_rxeof(struct wm_rxqueue *);
716 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
717 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
718 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
719 static void	wm_linkintr(struct wm_softc *, uint32_t);
720 static int	wm_intr_legacy(void *);
721 static int	wm_txrxintr_msix(void *);
722 static int	wm_linkintr_msix(void *);
723 
724 /*
725  * Media related.
726  * GMII, SGMII, TBI, SERDES and SFP.
727  */
728 /* Common */
729 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
730 /* GMII related */
731 static void	wm_gmii_reset(struct wm_softc *);
732 static void	wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t, uint16_t);
733 static int	wm_get_phy_id_82575(struct wm_softc *);
734 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
735 static int	wm_gmii_mediachange(struct ifnet *);
736 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
737 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
738 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
739 static int	wm_gmii_i82543_readreg(device_t, int, int);
740 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
741 static int	wm_gmii_mdic_readreg(device_t, int, int);
742 static void	wm_gmii_mdic_writereg(device_t, int, int, int);
743 static int	wm_gmii_i82544_readreg(device_t, int, int);
744 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
745 static int	wm_gmii_i80003_readreg(device_t, int, int);
746 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
747 static int	wm_gmii_bm_readreg(device_t, int, int);
748 static void	wm_gmii_bm_writereg(device_t, int, int, int);
749 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
750 static int	wm_gmii_hv_readreg(device_t, int, int);
751 static int	wm_gmii_hv_readreg_locked(device_t, int, int);
752 static void	wm_gmii_hv_writereg(device_t, int, int, int);
753 static void	wm_gmii_hv_writereg_locked(device_t, int, int, int);
754 static int	wm_gmii_82580_readreg(device_t, int, int);
755 static void	wm_gmii_82580_writereg(device_t, int, int, int);
756 static int	wm_gmii_gs40g_readreg(device_t, int, int);
757 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
758 static void	wm_gmii_statchg(struct ifnet *);
759 /*
760  * kumeran related (80003, ICH* and PCH*).
761  * These functions are not for accessing MII registers but for accessing
762  * kumeran specific registers.
763  */
764 static int	wm_kmrn_readreg(struct wm_softc *, int);
765 static int	wm_kmrn_readreg_locked(struct wm_softc *, int);
766 static void	wm_kmrn_writereg(struct wm_softc *, int, int);
767 static void	wm_kmrn_writereg_locked(struct wm_softc *, int, int);
768 /* SGMII */
769 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
770 static int	wm_sgmii_readreg(device_t, int, int);
771 static void	wm_sgmii_writereg(device_t, int, int, int);
772 /* TBI related */
773 static void	wm_tbi_mediainit(struct wm_softc *);
774 static int	wm_tbi_mediachange(struct ifnet *);
775 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
776 static int	wm_check_for_link(struct wm_softc *);
777 static void	wm_tbi_tick(struct wm_softc *);
778 /* SERDES related */
779 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
780 static int	wm_serdes_mediachange(struct ifnet *);
781 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
782 static void	wm_serdes_tick(struct wm_softc *);
783 /* SFP related */
784 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
785 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
786 
787 /*
788  * NVM related.
789  * Microwire, SPI (w/wo EERD) and Flash.
790  */
791 /* Misc functions */
792 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
793 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
794 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
795 /* Microwire */
796 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
797 /* SPI */
798 static int	wm_nvm_ready_spi(struct wm_softc *);
799 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
800 /* Using with EERD */
801 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
802 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
803 /* Flash */
804 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
805     unsigned int *);
806 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
807 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
808 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
809 	uint32_t *);
810 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
811 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
812 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
813 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
814 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
815 /* iNVM */
816 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
817 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
818 /* Lock, detecting NVM type, validate checksum and read */
819 static int	wm_nvm_acquire(struct wm_softc *);
820 static void	wm_nvm_release(struct wm_softc *);
821 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
822 static int	wm_nvm_get_flash_presence_i210(struct wm_softc *);
823 static int	wm_nvm_validate_checksum(struct wm_softc *);
824 static void	wm_nvm_version_invm(struct wm_softc *);
825 static void	wm_nvm_version(struct wm_softc *);
826 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
827 
828 /*
829  * Hardware semaphores.
830  * Very complexed...
831  */
832 static int	wm_get_null(struct wm_softc *);
833 static void	wm_put_null(struct wm_softc *);
834 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
835 static void	wm_put_swsm_semaphore(struct wm_softc *);
836 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
837 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
838 static int	wm_get_phy_82575(struct wm_softc *);
839 static void	wm_put_phy_82575(struct wm_softc *);
840 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
841 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
842 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
843 static void	wm_put_swflag_ich8lan(struct wm_softc *);
844 static int	wm_get_nvm_ich8lan(struct wm_softc *);		/* For NVM */
845 static void	wm_put_nvm_ich8lan(struct wm_softc *);
846 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
847 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
848 
849 /*
850  * Management mode and power management related subroutines.
851  * BMC, AMT, suspend/resume and EEE.
852  */
853 #if 0
854 static int	wm_check_mng_mode(struct wm_softc *);
855 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
856 static int	wm_check_mng_mode_82574(struct wm_softc *);
857 static int	wm_check_mng_mode_generic(struct wm_softc *);
858 #endif
859 static int	wm_enable_mng_pass_thru(struct wm_softc *);
860 static bool	wm_phy_resetisblocked(struct wm_softc *);
861 static void	wm_get_hw_control(struct wm_softc *);
862 static void	wm_release_hw_control(struct wm_softc *);
863 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
864 static void	wm_smbustopci(struct wm_softc *);
865 static void	wm_init_manageability(struct wm_softc *);
866 static void	wm_release_manageability(struct wm_softc *);
867 static void	wm_get_wakeup(struct wm_softc *);
868 static void	wm_ulp_disable(struct wm_softc *);
869 static void	wm_enable_phy_wakeup(struct wm_softc *);
870 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
871 static void	wm_enable_wakeup(struct wm_softc *);
872 /* LPLU (Low Power Link Up) */
873 static void	wm_lplu_d0_disable(struct wm_softc *);
874 static void	wm_lplu_d0_disable_pch(struct wm_softc *);
875 /* EEE */
876 static void	wm_set_eee_i350(struct wm_softc *);
877 
878 /*
879  * Workarounds (mainly PHY related).
880  * Basically, PHY's workarounds are in the PHY drivers.
881  */
882 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
883 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
884 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
885 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
886 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
887 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
888 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
889 static void	wm_reset_init_script_82575(struct wm_softc *);
890 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
891 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
892 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
893 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
894 static void	wm_pll_workaround_i210(struct wm_softc *);
895 
896 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
897     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
898 
899 /*
900  * Devices supported by this driver.
901  */
902 static const struct wm_product {
903 	pci_vendor_id_t		wmp_vendor;
904 	pci_product_id_t	wmp_product;
905 	const char		*wmp_name;
906 	wm_chip_type		wmp_type;
907 	uint32_t		wmp_flags;
908 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
909 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
910 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
911 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
912 #define WMP_MEDIATYPE(x)	((x) & 0x03)
913 } wm_products[] = {
914 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
915 	  "Intel i82542 1000BASE-X Ethernet",
916 	  WM_T_82542_2_1,	WMP_F_FIBER },
917 
918 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
919 	  "Intel i82543GC 1000BASE-X Ethernet",
920 	  WM_T_82543,		WMP_F_FIBER },
921 
922 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
923 	  "Intel i82543GC 1000BASE-T Ethernet",
924 	  WM_T_82543,		WMP_F_COPPER },
925 
926 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
927 	  "Intel i82544EI 1000BASE-T Ethernet",
928 	  WM_T_82544,		WMP_F_COPPER },
929 
930 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
931 	  "Intel i82544EI 1000BASE-X Ethernet",
932 	  WM_T_82544,		WMP_F_FIBER },
933 
934 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
935 	  "Intel i82544GC 1000BASE-T Ethernet",
936 	  WM_T_82544,		WMP_F_COPPER },
937 
938 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
939 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
940 	  WM_T_82544,		WMP_F_COPPER },
941 
942 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
943 	  "Intel i82540EM 1000BASE-T Ethernet",
944 	  WM_T_82540,		WMP_F_COPPER },
945 
946 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
947 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
948 	  WM_T_82540,		WMP_F_COPPER },
949 
950 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
951 	  "Intel i82540EP 1000BASE-T Ethernet",
952 	  WM_T_82540,		WMP_F_COPPER },
953 
954 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
955 	  "Intel i82540EP 1000BASE-T Ethernet",
956 	  WM_T_82540,		WMP_F_COPPER },
957 
958 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
959 	  "Intel i82540EP 1000BASE-T Ethernet",
960 	  WM_T_82540,		WMP_F_COPPER },
961 
962 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
963 	  "Intel i82545EM 1000BASE-T Ethernet",
964 	  WM_T_82545,		WMP_F_COPPER },
965 
966 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
967 	  "Intel i82545GM 1000BASE-T Ethernet",
968 	  WM_T_82545_3,		WMP_F_COPPER },
969 
970 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
971 	  "Intel i82545GM 1000BASE-X Ethernet",
972 	  WM_T_82545_3,		WMP_F_FIBER },
973 
974 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
975 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
976 	  WM_T_82545_3,		WMP_F_SERDES },
977 
978 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
979 	  "Intel i82546EB 1000BASE-T Ethernet",
980 	  WM_T_82546,		WMP_F_COPPER },
981 
982 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
983 	  "Intel i82546EB 1000BASE-T Ethernet",
984 	  WM_T_82546,		WMP_F_COPPER },
985 
986 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
987 	  "Intel i82545EM 1000BASE-X Ethernet",
988 	  WM_T_82545,		WMP_F_FIBER },
989 
990 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
991 	  "Intel i82546EB 1000BASE-X Ethernet",
992 	  WM_T_82546,		WMP_F_FIBER },
993 
994 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
995 	  "Intel i82546GB 1000BASE-T Ethernet",
996 	  WM_T_82546_3,		WMP_F_COPPER },
997 
998 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
999 	  "Intel i82546GB 1000BASE-X Ethernet",
1000 	  WM_T_82546_3,		WMP_F_FIBER },
1001 
1002 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
1003 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
1004 	  WM_T_82546_3,		WMP_F_SERDES },
1005 
1006 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
1007 	  "i82546GB quad-port Gigabit Ethernet",
1008 	  WM_T_82546_3,		WMP_F_COPPER },
1009 
1010 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
1011 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
1012 	  WM_T_82546_3,		WMP_F_COPPER },
1013 
1014 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
1015 	  "Intel PRO/1000MT (82546GB)",
1016 	  WM_T_82546_3,		WMP_F_COPPER },
1017 
1018 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
1019 	  "Intel i82541EI 1000BASE-T Ethernet",
1020 	  WM_T_82541,		WMP_F_COPPER },
1021 
1022 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
1023 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
1024 	  WM_T_82541,		WMP_F_COPPER },
1025 
1026 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
1027 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
1028 	  WM_T_82541,		WMP_F_COPPER },
1029 
1030 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
1031 	  "Intel i82541ER 1000BASE-T Ethernet",
1032 	  WM_T_82541_2,		WMP_F_COPPER },
1033 
1034 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
1035 	  "Intel i82541GI 1000BASE-T Ethernet",
1036 	  WM_T_82541_2,		WMP_F_COPPER },
1037 
1038 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
1039 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
1040 	  WM_T_82541_2,		WMP_F_COPPER },
1041 
1042 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
1043 	  "Intel i82541PI 1000BASE-T Ethernet",
1044 	  WM_T_82541_2,		WMP_F_COPPER },
1045 
1046 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
1047 	  "Intel i82547EI 1000BASE-T Ethernet",
1048 	  WM_T_82547,		WMP_F_COPPER },
1049 
1050 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
1051 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
1052 	  WM_T_82547,		WMP_F_COPPER },
1053 
1054 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
1055 	  "Intel i82547GI 1000BASE-T Ethernet",
1056 	  WM_T_82547_2,		WMP_F_COPPER },
1057 
1058 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
1059 	  "Intel PRO/1000 PT (82571EB)",
1060 	  WM_T_82571,		WMP_F_COPPER },
1061 
1062 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
1063 	  "Intel PRO/1000 PF (82571EB)",
1064 	  WM_T_82571,		WMP_F_FIBER },
1065 
1066 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
1067 	  "Intel PRO/1000 PB (82571EB)",
1068 	  WM_T_82571,		WMP_F_SERDES },
1069 
1070 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
1071 	  "Intel PRO/1000 QT (82571EB)",
1072 	  WM_T_82571,		WMP_F_COPPER },
1073 
1074 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
1075 	  "Intel PRO/1000 PT Quad Port Server Adapter",
1076 	  WM_T_82571,		WMP_F_COPPER, },
1077 
1078 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
1079 	  "Intel Gigabit PT Quad Port Server ExpressModule",
1080 	  WM_T_82571,		WMP_F_COPPER, },
1081 
1082 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
1083 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
1084 	  WM_T_82571,		WMP_F_SERDES, },
1085 
1086 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
1087 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
1088 	  WM_T_82571,		WMP_F_SERDES, },
1089 
1090 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
1091 	  "Intel 82571EB Quad 1000baseX Ethernet",
1092 	  WM_T_82571,		WMP_F_FIBER, },
1093 
1094 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
1095 	  "Intel i82572EI 1000baseT Ethernet",
1096 	  WM_T_82572,		WMP_F_COPPER },
1097 
1098 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
1099 	  "Intel i82572EI 1000baseX Ethernet",
1100 	  WM_T_82572,		WMP_F_FIBER },
1101 
1102 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
1103 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
1104 	  WM_T_82572,		WMP_F_SERDES },
1105 
1106 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
1107 	  "Intel i82572EI 1000baseT Ethernet",
1108 	  WM_T_82572,		WMP_F_COPPER },
1109 
1110 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
1111 	  "Intel i82573E",
1112 	  WM_T_82573,		WMP_F_COPPER },
1113 
1114 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
1115 	  "Intel i82573E IAMT",
1116 	  WM_T_82573,		WMP_F_COPPER },
1117 
1118 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
1119 	  "Intel i82573L Gigabit Ethernet",
1120 	  WM_T_82573,		WMP_F_COPPER },
1121 
1122 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
1123 	  "Intel i82574L",
1124 	  WM_T_82574,		WMP_F_COPPER },
1125 
1126 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
1127 	  "Intel i82574L",
1128 	  WM_T_82574,		WMP_F_COPPER },
1129 
1130 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
1131 	  "Intel i82583V",
1132 	  WM_T_82583,		WMP_F_COPPER },
1133 
1134 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
1135 	  "i80003 dual 1000baseT Ethernet",
1136 	  WM_T_80003,		WMP_F_COPPER },
1137 
1138 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
1139 	  "i80003 dual 1000baseX Ethernet",
1140 	  WM_T_80003,		WMP_F_COPPER },
1141 
1142 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
1143 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
1144 	  WM_T_80003,		WMP_F_SERDES },
1145 
1146 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
1147 	  "Intel i80003 1000baseT Ethernet",
1148 	  WM_T_80003,		WMP_F_COPPER },
1149 
1150 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
1151 	  "Intel i80003 Gigabit Ethernet (SERDES)",
1152 	  WM_T_80003,		WMP_F_SERDES },
1153 
1154 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
1155 	  "Intel i82801H (M_AMT) LAN Controller",
1156 	  WM_T_ICH8,		WMP_F_COPPER },
1157 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
1158 	  "Intel i82801H (AMT) LAN Controller",
1159 	  WM_T_ICH8,		WMP_F_COPPER },
1160 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
1161 	  "Intel i82801H LAN Controller",
1162 	  WM_T_ICH8,		WMP_F_COPPER },
1163 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
1164 	  "Intel i82801H (IFE) 10/100 LAN Controller",
1165 	  WM_T_ICH8,		WMP_F_COPPER },
1166 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
1167 	  "Intel i82801H (M) LAN Controller",
1168 	  WM_T_ICH8,		WMP_F_COPPER },
1169 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
1170 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
1171 	  WM_T_ICH8,		WMP_F_COPPER },
1172 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
1173 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
1174 	  WM_T_ICH8,		WMP_F_COPPER },
1175 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
1176 	  "82567V-3 LAN Controller",
1177 	  WM_T_ICH8,		WMP_F_COPPER },
1178 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
1179 	  "82801I (AMT) LAN Controller",
1180 	  WM_T_ICH9,		WMP_F_COPPER },
1181 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
1182 	  "82801I 10/100 LAN Controller",
1183 	  WM_T_ICH9,		WMP_F_COPPER },
1184 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
1185 	  "82801I (G) 10/100 LAN Controller",
1186 	  WM_T_ICH9,		WMP_F_COPPER },
1187 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
1188 	  "82801I (GT) 10/100 LAN Controller",
1189 	  WM_T_ICH9,		WMP_F_COPPER },
1190 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
1191 	  "82801I (C) LAN Controller",
1192 	  WM_T_ICH9,		WMP_F_COPPER },
1193 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
1194 	  "82801I mobile LAN Controller",
1195 	  WM_T_ICH9,		WMP_F_COPPER },
1196 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
1197 	  "82801I mobile (V) LAN Controller",
1198 	  WM_T_ICH9,		WMP_F_COPPER },
1199 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
1200 	  "82801I mobile (AMT) LAN Controller",
1201 	  WM_T_ICH9,		WMP_F_COPPER },
1202 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
1203 	  "82567LM-4 LAN Controller",
1204 	  WM_T_ICH9,		WMP_F_COPPER },
1205 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
1206 	  "82567LM-2 LAN Controller",
1207 	  WM_T_ICH10,		WMP_F_COPPER },
1208 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
1209 	  "82567LF-2 LAN Controller",
1210 	  WM_T_ICH10,		WMP_F_COPPER },
1211 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
1212 	  "82567LM-3 LAN Controller",
1213 	  WM_T_ICH10,		WMP_F_COPPER },
1214 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
1215 	  "82567LF-3 LAN Controller",
1216 	  WM_T_ICH10,		WMP_F_COPPER },
1217 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
1218 	  "82567V-2 LAN Controller",
1219 	  WM_T_ICH10,		WMP_F_COPPER },
1220 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
1221 	  "82567V-3? LAN Controller",
1222 	  WM_T_ICH10,		WMP_F_COPPER },
1223 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
1224 	  "HANKSVILLE LAN Controller",
1225 	  WM_T_ICH10,		WMP_F_COPPER },
1226 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
1227 	  "PCH LAN (82577LM) Controller",
1228 	  WM_T_PCH,		WMP_F_COPPER },
1229 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
1230 	  "PCH LAN (82577LC) Controller",
1231 	  WM_T_PCH,		WMP_F_COPPER },
1232 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
1233 	  "PCH LAN (82578DM) Controller",
1234 	  WM_T_PCH,		WMP_F_COPPER },
1235 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
1236 	  "PCH LAN (82578DC) Controller",
1237 	  WM_T_PCH,		WMP_F_COPPER },
1238 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
1239 	  "PCH2 LAN (82579LM) Controller",
1240 	  WM_T_PCH2,		WMP_F_COPPER },
1241 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
1242 	  "PCH2 LAN (82579V) Controller",
1243 	  WM_T_PCH2,		WMP_F_COPPER },
1244 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
1245 	  "82575EB dual-1000baseT Ethernet",
1246 	  WM_T_82575,		WMP_F_COPPER },
1247 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
1248 	  "82575EB dual-1000baseX Ethernet (SERDES)",
1249 	  WM_T_82575,		WMP_F_SERDES },
1250 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
1251 	  "82575GB quad-1000baseT Ethernet",
1252 	  WM_T_82575,		WMP_F_COPPER },
1253 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
1254 	  "82575GB quad-1000baseT Ethernet (PM)",
1255 	  WM_T_82575,		WMP_F_COPPER },
1256 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
1257 	  "82576 1000BaseT Ethernet",
1258 	  WM_T_82576,		WMP_F_COPPER },
1259 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
1260 	  "82576 1000BaseX Ethernet",
1261 	  WM_T_82576,		WMP_F_FIBER },
1262 
1263 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
1264 	  "82576 gigabit Ethernet (SERDES)",
1265 	  WM_T_82576,		WMP_F_SERDES },
1266 
1267 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
1268 	  "82576 quad-1000BaseT Ethernet",
1269 	  WM_T_82576,		WMP_F_COPPER },
1270 
1271 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
1272 	  "82576 Gigabit ET2 Quad Port Server Adapter",
1273 	  WM_T_82576,		WMP_F_COPPER },
1274 
1275 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
1276 	  "82576 gigabit Ethernet",
1277 	  WM_T_82576,		WMP_F_COPPER },
1278 
1279 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
1280 	  "82576 gigabit Ethernet (SERDES)",
1281 	  WM_T_82576,		WMP_F_SERDES },
1282 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
1283 	  "82576 quad-gigabit Ethernet (SERDES)",
1284 	  WM_T_82576,		WMP_F_SERDES },
1285 
1286 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
1287 	  "82580 1000BaseT Ethernet",
1288 	  WM_T_82580,		WMP_F_COPPER },
1289 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
1290 	  "82580 1000BaseX Ethernet",
1291 	  WM_T_82580,		WMP_F_FIBER },
1292 
1293 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
1294 	  "82580 1000BaseT Ethernet (SERDES)",
1295 	  WM_T_82580,		WMP_F_SERDES },
1296 
1297 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
1298 	  "82580 gigabit Ethernet (SGMII)",
1299 	  WM_T_82580,		WMP_F_COPPER },
1300 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
1301 	  "82580 dual-1000BaseT Ethernet",
1302 	  WM_T_82580,		WMP_F_COPPER },
1303 
1304 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1305 	  "82580 quad-1000BaseX Ethernet",
1306 	  WM_T_82580,		WMP_F_FIBER },
1307 
1308 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
1309 	  "DH89XXCC Gigabit Ethernet (SGMII)",
1310 	  WM_T_82580,		WMP_F_COPPER },
1311 
1312 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
1313 	  "DH89XXCC Gigabit Ethernet (SERDES)",
1314 	  WM_T_82580,		WMP_F_SERDES },
1315 
1316 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
1317 	  "DH89XXCC 1000BASE-KX Ethernet",
1318 	  WM_T_82580,		WMP_F_SERDES },
1319 
1320 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
1321 	  "DH89XXCC Gigabit Ethernet (SFP)",
1322 	  WM_T_82580,		WMP_F_SERDES },
1323 
1324 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
1325 	  "I350 Gigabit Network Connection",
1326 	  WM_T_I350,		WMP_F_COPPER },
1327 
1328 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
1329 	  "I350 Gigabit Fiber Network Connection",
1330 	  WM_T_I350,		WMP_F_FIBER },
1331 
1332 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
1333 	  "I350 Gigabit Backplane Connection",
1334 	  WM_T_I350,		WMP_F_SERDES },
1335 
1336 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
1337 	  "I350 Quad Port Gigabit Ethernet",
1338 	  WM_T_I350,		WMP_F_SERDES },
1339 
1340 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
1341 	  "I350 Gigabit Connection",
1342 	  WM_T_I350,		WMP_F_COPPER },
1343 
1344 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
1345 	  "I354 Gigabit Ethernet (KX)",
1346 	  WM_T_I354,		WMP_F_SERDES },
1347 
1348 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
1349 	  "I354 Gigabit Ethernet (SGMII)",
1350 	  WM_T_I354,		WMP_F_COPPER },
1351 
1352 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
1353 	  "I354 Gigabit Ethernet (2.5G)",
1354 	  WM_T_I354,		WMP_F_COPPER },
1355 
1356 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
1357 	  "I210-T1 Ethernet Server Adapter",
1358 	  WM_T_I210,		WMP_F_COPPER },
1359 
1360 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
1361 	  "I210 Ethernet (Copper OEM)",
1362 	  WM_T_I210,		WMP_F_COPPER },
1363 
1364 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
1365 	  "I210 Ethernet (Copper IT)",
1366 	  WM_T_I210,		WMP_F_COPPER },
1367 
1368 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
1369 	  "I210 Ethernet (FLASH less)",
1370 	  WM_T_I210,		WMP_F_COPPER },
1371 
1372 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
1373 	  "I210 Gigabit Ethernet (Fiber)",
1374 	  WM_T_I210,		WMP_F_FIBER },
1375 
1376 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
1377 	  "I210 Gigabit Ethernet (SERDES)",
1378 	  WM_T_I210,		WMP_F_SERDES },
1379 
1380 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
1381 	  "I210 Gigabit Ethernet (FLASH less)",
1382 	  WM_T_I210,		WMP_F_SERDES },
1383 
1384 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
1385 	  "I210 Gigabit Ethernet (SGMII)",
1386 	  WM_T_I210,		WMP_F_COPPER },
1387 
1388 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
1389 	  "I211 Ethernet (COPPER)",
1390 	  WM_T_I211,		WMP_F_COPPER },
1391 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
1392 	  "I217 V Ethernet Connection",
1393 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1394 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
1395 	  "I217 LM Ethernet Connection",
1396 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1397 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
1398 	  "I218 V Ethernet Connection",
1399 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1400 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
1401 	  "I218 V Ethernet Connection",
1402 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1403 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
1404 	  "I218 V Ethernet Connection",
1405 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1406 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
1407 	  "I218 LM Ethernet Connection",
1408 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1409 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
1410 	  "I218 LM Ethernet Connection",
1411 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1412 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
1413 	  "I218 LM Ethernet Connection",
1414 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1415 #if 0
1416 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
1417 	  "I219 V Ethernet Connection",
1418 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1419 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
1420 	  "I219 V Ethernet Connection",
1421 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1422 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
1423 	  "I219 V Ethernet Connection",
1424 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1425 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
1426 	  "I219 V Ethernet Connection",
1427 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1428 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
1429 	  "I219 LM Ethernet Connection",
1430 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1431 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
1432 	  "I219 LM Ethernet Connection",
1433 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1434 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
1435 	  "I219 LM Ethernet Connection",
1436 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1437 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
1438 	  "I219 LM Ethernet Connection",
1439 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1440 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
1441 	  "I219 LM Ethernet Connection",
1442 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1443 #endif
1444 	{ 0,			0,
1445 	  NULL,
1446 	  0,			0 },
1447 };
1448 
1449 /*
1450  * Register read/write functions.
1451  * Other than CSR_{READ|WRITE}().
1452  */
1453 
1454 #if 0 /* Not currently used */
1455 static inline uint32_t
1456 wm_io_read(struct wm_softc *sc, int reg)
1457 {
1458 
1459 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1460 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1461 }
1462 #endif
1463 
1464 static inline void
1465 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1466 {
1467 
1468 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1469 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1470 }
1471 
1472 static inline void
1473 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1474     uint32_t data)
1475 {
1476 	uint32_t regval;
1477 	int i;
1478 
1479 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1480 
1481 	CSR_WRITE(sc, reg, regval);
1482 
1483 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1484 		delay(5);
1485 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1486 			break;
1487 	}
1488 	if (i == SCTL_CTL_POLL_TIMEOUT) {
1489 		aprint_error("%s: WARNING:"
1490 		    " i82575 reg 0x%08x setup did not indicate ready\n",
1491 		    device_xname(sc->sc_dev), reg);
1492 	}
1493 }
1494 
1495 static inline void
1496 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1497 {
1498 	wa->wa_low = htole32(v & 0xffffffffU);
1499 	if (sizeof(bus_addr_t) == 8)
1500 		wa->wa_high = htole32((uint64_t) v >> 32);
1501 	else
1502 		wa->wa_high = 0;
1503 }
1504 
1505 /*
1506  * Descriptor sync/init functions.
1507  */
1508 static inline void
1509 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
1510 {
1511 	struct wm_softc *sc = txq->txq_sc;
1512 
1513 	/* If it will wrap around, sync to the end of the ring. */
1514 	if ((start + num) > WM_NTXDESC(txq)) {
1515 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1516 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
1517 		    (WM_NTXDESC(txq) - start), ops);
1518 		num -= (WM_NTXDESC(txq) - start);
1519 		start = 0;
1520 	}
1521 
1522 	/* Now sync whatever is left. */
1523 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1524 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
1525 }
1526 
1527 static inline void
1528 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
1529 {
1530 	struct wm_softc *sc = rxq->rxq_sc;
1531 
1532 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
1533 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
1534 }
1535 
1536 static inline void
1537 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
1538 {
1539 	struct wm_softc *sc = rxq->rxq_sc;
1540 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
1541 	struct mbuf *m = rxs->rxs_mbuf;
1542 
1543 	/*
1544 	 * Note: We scoot the packet forward 2 bytes in the buffer
1545 	 * so that the payload after the Ethernet header is aligned
1546 	 * to a 4-byte boundary.
1547 
1548 	 * XXX BRAINDAMAGE ALERT!
1549 	 * The stupid chip uses the same size for every buffer, which
1550 	 * is set in the Receive Control register.  We are using the 2K
1551 	 * size option, but what we REALLY want is (2K - 2)!  For this
1552 	 * reason, we can't "scoot" packets longer than the standard
1553 	 * Ethernet MTU.  On strict-alignment platforms, if the total
1554 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
1555 	 * the upper layer copy the headers.
1556 	 */
1557 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
1558 
1559 	if (sc->sc_type == WM_T_82574) {
1560 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
1561 		rxd->erx_data.erxd_addr =
1562 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1563 		rxd->erx_data.erxd_dd = 0;
1564 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
1565 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
1566 
1567 		rxd->nqrx_data.nrxd_paddr =
1568 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1569 		/* Currently, split header is not supported. */
1570 		rxd->nqrx_data.nrxd_haddr = 0;
1571 	} else {
1572 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
1573 
1574 		wm_set_dma_addr(&rxd->wrx_addr,
1575 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1576 		rxd->wrx_len = 0;
1577 		rxd->wrx_cksum = 0;
1578 		rxd->wrx_status = 0;
1579 		rxd->wrx_errors = 0;
1580 		rxd->wrx_special = 0;
1581 	}
1582 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1583 
1584 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
1585 }
1586 
1587 /*
1588  * Device driver interface functions and commonly used functions.
1589  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
1590  */
1591 
1592 /* Lookup supported device table */
1593 static const struct wm_product *
1594 wm_lookup(const struct pci_attach_args *pa)
1595 {
1596 	const struct wm_product *wmp;
1597 
1598 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1599 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1600 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1601 			return wmp;
1602 	}
1603 	return NULL;
1604 }
1605 
1606 /* The match function (ca_match) */
1607 static int
1608 wm_match(device_t parent, cfdata_t cf, void *aux)
1609 {
1610 	struct pci_attach_args *pa = aux;
1611 
1612 	if (wm_lookup(pa) != NULL)
1613 		return 1;
1614 
1615 	return 0;
1616 }
1617 
1618 /* The attach function (ca_attach) */
1619 static void
1620 wm_attach(device_t parent, device_t self, void *aux)
1621 {
1622 	struct wm_softc *sc = device_private(self);
1623 	struct pci_attach_args *pa = aux;
1624 	prop_dictionary_t dict;
1625 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1626 	pci_chipset_tag_t pc = pa->pa_pc;
1627 	int counts[PCI_INTR_TYPE_SIZE];
1628 	pci_intr_type_t max_type;
1629 	const char *eetype, *xname;
1630 	bus_space_tag_t memt;
1631 	bus_space_handle_t memh;
1632 	bus_size_t memsize;
1633 	int memh_valid;
1634 	int i, error;
1635 	const struct wm_product *wmp;
1636 	prop_data_t ea;
1637 	prop_number_t pn;
1638 	uint8_t enaddr[ETHER_ADDR_LEN];
1639 	uint16_t cfg1, cfg2, swdpin, nvmword;
1640 	pcireg_t preg, memtype;
1641 	uint16_t eeprom_data, apme_mask;
1642 	bool force_clear_smbi;
1643 	uint32_t link_mode;
1644 	uint32_t reg;
1645 	void (*deferred_start_func)(struct ifnet *) = NULL;
1646 
1647 	sc->sc_dev = self;
1648 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
1649 	sc->sc_core_stopping = false;
1650 
1651 	wmp = wm_lookup(pa);
1652 #ifdef DIAGNOSTIC
1653 	if (wmp == NULL) {
1654 		printf("\n");
1655 		panic("wm_attach: impossible");
1656 	}
1657 #endif
1658 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
1659 
1660 	sc->sc_pc = pa->pa_pc;
1661 	sc->sc_pcitag = pa->pa_tag;
1662 
1663 	if (pci_dma64_available(pa))
1664 		sc->sc_dmat = pa->pa_dmat64;
1665 	else
1666 		sc->sc_dmat = pa->pa_dmat;
1667 
1668 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
1669 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
1670 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
1671 
1672 	sc->sc_type = wmp->wmp_type;
1673 
1674 	/* Set default function pointers */
1675 	sc->phy.acquire = wm_get_null;
1676 	sc->phy.release = wm_put_null;
1677 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
1678 
1679 	if (sc->sc_type < WM_T_82543) {
1680 		if (sc->sc_rev < 2) {
1681 			aprint_error_dev(sc->sc_dev,
1682 			    "i82542 must be at least rev. 2\n");
1683 			return;
1684 		}
1685 		if (sc->sc_rev < 3)
1686 			sc->sc_type = WM_T_82542_2_0;
1687 	}
1688 
1689 	/*
1690 	 * Disable MSI for Errata:
1691 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
1692 	 *
1693 	 *  82544: Errata 25
1694 	 *  82540: Errata  6 (easy to reproduce device timeout)
1695 	 *  82545: Errata  4 (easy to reproduce device timeout)
1696 	 *  82546: Errata 26 (easy to reproduce device timeout)
1697 	 *  82541: Errata  7 (easy to reproduce device timeout)
1698 	 *
1699 	 * "Byte Enables 2 and 3 are not set on MSI writes"
1700 	 *
1701 	 *  82571 & 82572: Errata 63
1702 	 */
1703 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
1704 	    || (sc->sc_type == WM_T_82572))
1705 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
1706 
1707 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1708 	    || (sc->sc_type == WM_T_82580)
1709 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
1710 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
1711 		sc->sc_flags |= WM_F_NEWQUEUE;
1712 
1713 	/* Set device properties (mactype) */
1714 	dict = device_properties(sc->sc_dev);
1715 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1716 
1717 	/*
1718 	 * Map the device.  All devices support memory-mapped acccess,
1719 	 * and it is really required for normal operation.
1720 	 */
1721 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1722 	switch (memtype) {
1723 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1724 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1725 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1726 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1727 		break;
1728 	default:
1729 		memh_valid = 0;
1730 		break;
1731 	}
1732 
1733 	if (memh_valid) {
1734 		sc->sc_st = memt;
1735 		sc->sc_sh = memh;
1736 		sc->sc_ss = memsize;
1737 	} else {
1738 		aprint_error_dev(sc->sc_dev,
1739 		    "unable to map device registers\n");
1740 		return;
1741 	}
1742 
1743 	/*
1744 	 * In addition, i82544 and later support I/O mapped indirect
1745 	 * register access.  It is not desirable (nor supported in
1746 	 * this driver) to use it for normal operation, though it is
1747 	 * required to work around bugs in some chip versions.
1748 	 */
1749 	if (sc->sc_type >= WM_T_82544) {
1750 		/* First we have to find the I/O BAR. */
1751 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1752 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
1753 			if (memtype == PCI_MAPREG_TYPE_IO)
1754 				break;
1755 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
1756 			    PCI_MAPREG_MEM_TYPE_64BIT)
1757 				i += 4;	/* skip high bits, too */
1758 		}
1759 		if (i < PCI_MAPREG_END) {
1760 			/*
1761 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
1762 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
1763 			 * It's no problem because newer chips has no this
1764 			 * bug.
1765 			 *
1766 			 * The i8254x doesn't apparently respond when the
1767 			 * I/O BAR is 0, which looks somewhat like it's not
1768 			 * been configured.
1769 			 */
1770 			preg = pci_conf_read(pc, pa->pa_tag, i);
1771 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1772 				aprint_error_dev(sc->sc_dev,
1773 				    "WARNING: I/O BAR at zero.\n");
1774 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1775 					0, &sc->sc_iot, &sc->sc_ioh,
1776 					NULL, &sc->sc_ios) == 0) {
1777 				sc->sc_flags |= WM_F_IOH_VALID;
1778 			} else {
1779 				aprint_error_dev(sc->sc_dev,
1780 				    "WARNING: unable to map I/O space\n");
1781 			}
1782 		}
1783 
1784 	}
1785 
1786 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
1787 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1788 	preg |= PCI_COMMAND_MASTER_ENABLE;
1789 	if (sc->sc_type < WM_T_82542_2_1)
1790 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1791 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1792 
1793 	/* power up chip */
1794 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1795 	    NULL)) && error != EOPNOTSUPP) {
1796 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1797 		return;
1798 	}
1799 
1800 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
1801 
1802 	/* Allocation settings */
1803 	max_type = PCI_INTR_TYPE_MSIX;
1804 	counts[PCI_INTR_TYPE_MSIX] = sc->sc_nqueues + 1;
1805 	counts[PCI_INTR_TYPE_MSI] = 1;
1806 	counts[PCI_INTR_TYPE_INTX] = 1;
1807 
1808 alloc_retry:
1809 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
1810 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
1811 		return;
1812 	}
1813 
1814 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
1815 		error = wm_setup_msix(sc);
1816 		if (error) {
1817 			pci_intr_release(pc, sc->sc_intrs,
1818 			    counts[PCI_INTR_TYPE_MSIX]);
1819 
1820 			/* Setup for MSI: Disable MSI-X */
1821 			max_type = PCI_INTR_TYPE_MSI;
1822 			counts[PCI_INTR_TYPE_MSI] = 1;
1823 			counts[PCI_INTR_TYPE_INTX] = 1;
1824 			goto alloc_retry;
1825 		}
1826 	} else 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
1827 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
1828 		error = wm_setup_legacy(sc);
1829 		if (error) {
1830 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
1831 			    counts[PCI_INTR_TYPE_MSI]);
1832 
1833 			/* The next try is for INTx: Disable MSI */
1834 			max_type = PCI_INTR_TYPE_INTX;
1835 			counts[PCI_INTR_TYPE_INTX] = 1;
1836 			goto alloc_retry;
1837 		}
1838 	} else {
1839 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
1840 		error = wm_setup_legacy(sc);
1841 		if (error) {
1842 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
1843 			    counts[PCI_INTR_TYPE_INTX]);
1844 			return;
1845 		}
1846 	}
1847 
1848 	/*
1849 	 * Check the function ID (unit number of the chip).
1850 	 */
1851 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
1852 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
1853 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1854 	    || (sc->sc_type == WM_T_82580)
1855 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
1856 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
1857 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
1858 	else
1859 		sc->sc_funcid = 0;
1860 
1861 	/*
1862 	 * Determine a few things about the bus we're connected to.
1863 	 */
1864 	if (sc->sc_type < WM_T_82543) {
1865 		/* We don't really know the bus characteristics here. */
1866 		sc->sc_bus_speed = 33;
1867 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1868 		/*
1869 		 * CSA (Communication Streaming Architecture) is about as fast
1870 		 * a 32-bit 66MHz PCI Bus.
1871 		 */
1872 		sc->sc_flags |= WM_F_CSA;
1873 		sc->sc_bus_speed = 66;
1874 		aprint_verbose_dev(sc->sc_dev,
1875 		    "Communication Streaming Architecture\n");
1876 		if (sc->sc_type == WM_T_82547) {
1877 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
1878 			callout_setfunc(&sc->sc_txfifo_ch,
1879 					wm_82547_txfifo_stall, sc);
1880 			aprint_verbose_dev(sc->sc_dev,
1881 			    "using 82547 Tx FIFO stall work-around\n");
1882 		}
1883 	} else if (sc->sc_type >= WM_T_82571) {
1884 		sc->sc_flags |= WM_F_PCIE;
1885 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1886 		    && (sc->sc_type != WM_T_ICH10)
1887 		    && (sc->sc_type != WM_T_PCH)
1888 		    && (sc->sc_type != WM_T_PCH2)
1889 		    && (sc->sc_type != WM_T_PCH_LPT)
1890 		    && (sc->sc_type != WM_T_PCH_SPT)) {
1891 			/* ICH* and PCH* have no PCIe capability registers */
1892 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1893 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
1894 				NULL) == 0)
1895 				aprint_error_dev(sc->sc_dev,
1896 				    "unable to find PCIe capability\n");
1897 		}
1898 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1899 	} else {
1900 		reg = CSR_READ(sc, WMREG_STATUS);
1901 		if (reg & STATUS_BUS64)
1902 			sc->sc_flags |= WM_F_BUS64;
1903 		if ((reg & STATUS_PCIX_MODE) != 0) {
1904 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1905 
1906 			sc->sc_flags |= WM_F_PCIX;
1907 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1908 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
1909 				aprint_error_dev(sc->sc_dev,
1910 				    "unable to find PCIX capability\n");
1911 			else if (sc->sc_type != WM_T_82545_3 &&
1912 				 sc->sc_type != WM_T_82546_3) {
1913 				/*
1914 				 * Work around a problem caused by the BIOS
1915 				 * setting the max memory read byte count
1916 				 * incorrectly.
1917 				 */
1918 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1919 				    sc->sc_pcixe_capoff + PCIX_CMD);
1920 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1921 				    sc->sc_pcixe_capoff + PCIX_STATUS);
1922 
1923 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
1924 				    PCIX_CMD_BYTECNT_SHIFT;
1925 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
1926 				    PCIX_STATUS_MAXB_SHIFT;
1927 				if (bytecnt > maxb) {
1928 					aprint_verbose_dev(sc->sc_dev,
1929 					    "resetting PCI-X MMRBC: %d -> %d\n",
1930 					    512 << bytecnt, 512 << maxb);
1931 					pcix_cmd = (pcix_cmd &
1932 					    ~PCIX_CMD_BYTECNT_MASK) |
1933 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
1934 					pci_conf_write(pa->pa_pc, pa->pa_tag,
1935 					    sc->sc_pcixe_capoff + PCIX_CMD,
1936 					    pcix_cmd);
1937 				}
1938 			}
1939 		}
1940 		/*
1941 		 * The quad port adapter is special; it has a PCIX-PCIX
1942 		 * bridge on the board, and can run the secondary bus at
1943 		 * a higher speed.
1944 		 */
1945 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1946 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1947 								      : 66;
1948 		} else if (sc->sc_flags & WM_F_PCIX) {
1949 			switch (reg & STATUS_PCIXSPD_MASK) {
1950 			case STATUS_PCIXSPD_50_66:
1951 				sc->sc_bus_speed = 66;
1952 				break;
1953 			case STATUS_PCIXSPD_66_100:
1954 				sc->sc_bus_speed = 100;
1955 				break;
1956 			case STATUS_PCIXSPD_100_133:
1957 				sc->sc_bus_speed = 133;
1958 				break;
1959 			default:
1960 				aprint_error_dev(sc->sc_dev,
1961 				    "unknown PCIXSPD %d; assuming 66MHz\n",
1962 				    reg & STATUS_PCIXSPD_MASK);
1963 				sc->sc_bus_speed = 66;
1964 				break;
1965 			}
1966 		} else
1967 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1968 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1969 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1970 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1971 	}
1972 
1973 	/* clear interesting stat counters */
1974 	CSR_READ(sc, WMREG_COLC);
1975 	CSR_READ(sc, WMREG_RXERRC);
1976 
1977 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
1978 	    || (sc->sc_type >= WM_T_ICH8))
1979 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
1980 	if (sc->sc_type >= WM_T_ICH8)
1981 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
1982 
1983 	/* Set PHY, NVM mutex related stuff */
1984 	switch (sc->sc_type) {
1985 	case WM_T_82542_2_0:
1986 	case WM_T_82542_2_1:
1987 	case WM_T_82543:
1988 	case WM_T_82544:
1989 		/* Microwire */
1990 		sc->sc_nvm_wordsize = 64;
1991 		sc->sc_nvm_addrbits = 6;
1992 		break;
1993 	case WM_T_82540:
1994 	case WM_T_82545:
1995 	case WM_T_82545_3:
1996 	case WM_T_82546:
1997 	case WM_T_82546_3:
1998 		/* Microwire */
1999 		reg = CSR_READ(sc, WMREG_EECD);
2000 		if (reg & EECD_EE_SIZE) {
2001 			sc->sc_nvm_wordsize = 256;
2002 			sc->sc_nvm_addrbits = 8;
2003 		} else {
2004 			sc->sc_nvm_wordsize = 64;
2005 			sc->sc_nvm_addrbits = 6;
2006 		}
2007 		sc->sc_flags |= WM_F_LOCK_EECD;
2008 		break;
2009 	case WM_T_82541:
2010 	case WM_T_82541_2:
2011 	case WM_T_82547:
2012 	case WM_T_82547_2:
2013 		sc->sc_flags |= WM_F_LOCK_EECD;
2014 		reg = CSR_READ(sc, WMREG_EECD);
2015 		if (reg & EECD_EE_TYPE) {
2016 			/* SPI */
2017 			sc->sc_flags |= WM_F_EEPROM_SPI;
2018 			wm_nvm_set_addrbits_size_eecd(sc);
2019 		} else {
2020 			/* Microwire */
2021 			if ((reg & EECD_EE_ABITS) != 0) {
2022 				sc->sc_nvm_wordsize = 256;
2023 				sc->sc_nvm_addrbits = 8;
2024 			} else {
2025 				sc->sc_nvm_wordsize = 64;
2026 				sc->sc_nvm_addrbits = 6;
2027 			}
2028 		}
2029 		break;
2030 	case WM_T_82571:
2031 	case WM_T_82572:
2032 		/* SPI */
2033 		sc->sc_flags |= WM_F_EEPROM_SPI;
2034 		wm_nvm_set_addrbits_size_eecd(sc);
2035 		sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
2036 		sc->phy.acquire = wm_get_swsm_semaphore;
2037 		sc->phy.release = wm_put_swsm_semaphore;
2038 		break;
2039 	case WM_T_82573:
2040 	case WM_T_82574:
2041 	case WM_T_82583:
2042 		if (sc->sc_type == WM_T_82573) {
2043 			sc->sc_flags |= WM_F_LOCK_SWSM;
2044 			sc->phy.acquire = wm_get_swsm_semaphore;
2045 			sc->phy.release = wm_put_swsm_semaphore;
2046 		} else {
2047 			sc->sc_flags |= WM_F_LOCK_EXTCNF;
2048 			/* Both PHY and NVM use the same semaphore. */
2049 			sc->phy.acquire
2050 			    = wm_get_swfwhw_semaphore;
2051 			sc->phy.release
2052 			    = wm_put_swfwhw_semaphore;
2053 		}
2054 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
2055 			sc->sc_flags |= WM_F_EEPROM_FLASH;
2056 			sc->sc_nvm_wordsize = 2048;
2057 		} else {
2058 			/* SPI */
2059 			sc->sc_flags |= WM_F_EEPROM_SPI;
2060 			wm_nvm_set_addrbits_size_eecd(sc);
2061 		}
2062 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
2063 		break;
2064 	case WM_T_82575:
2065 	case WM_T_82576:
2066 	case WM_T_82580:
2067 	case WM_T_I350:
2068 	case WM_T_I354:
2069 	case WM_T_80003:
2070 		/* SPI */
2071 		sc->sc_flags |= WM_F_EEPROM_SPI;
2072 		wm_nvm_set_addrbits_size_eecd(sc);
2073 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
2074 		    | WM_F_LOCK_SWSM;
2075 		sc->phy.acquire = wm_get_phy_82575;
2076 		sc->phy.release = wm_put_phy_82575;
2077 		break;
2078 	case WM_T_ICH8:
2079 	case WM_T_ICH9:
2080 	case WM_T_ICH10:
2081 	case WM_T_PCH:
2082 	case WM_T_PCH2:
2083 	case WM_T_PCH_LPT:
2084 		/* FLASH */
2085 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
2086 		sc->sc_nvm_wordsize = 2048;
2087 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
2088 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
2089 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
2090 			aprint_error_dev(sc->sc_dev,
2091 			    "can't map FLASH registers\n");
2092 			goto out;
2093 		}
2094 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
2095 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
2096 		    ICH_FLASH_SECTOR_SIZE;
2097 		sc->sc_ich8_flash_bank_size =
2098 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
2099 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
2100 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
2101 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
2102 		sc->sc_flashreg_offset = 0;
2103 		sc->phy.acquire = wm_get_swflag_ich8lan;
2104 		sc->phy.release = wm_put_swflag_ich8lan;
2105 		break;
2106 	case WM_T_PCH_SPT:
2107 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
2108 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
2109 		sc->sc_flasht = sc->sc_st;
2110 		sc->sc_flashh = sc->sc_sh;
2111 		sc->sc_ich8_flash_base = 0;
2112 		sc->sc_nvm_wordsize =
2113 			(((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
2114 			* NVM_SIZE_MULTIPLIER;
2115 		/* It is size in bytes, we want words */
2116 		sc->sc_nvm_wordsize /= 2;
2117 		/* assume 2 banks */
2118 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
2119 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
2120 		sc->phy.acquire = wm_get_swflag_ich8lan;
2121 		sc->phy.release = wm_put_swflag_ich8lan;
2122 		break;
2123 	case WM_T_I210:
2124 	case WM_T_I211:
2125 		if (wm_nvm_get_flash_presence_i210(sc)) {
2126 			wm_nvm_set_addrbits_size_eecd(sc);
2127 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
2128 			sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
2129 		} else {
2130 			sc->sc_nvm_wordsize = INVM_SIZE;
2131 			sc->sc_flags |= WM_F_EEPROM_INVM;
2132 		}
2133 		sc->sc_flags |= WM_F_LOCK_SWFW | WM_F_LOCK_SWSM;
2134 		sc->phy.acquire = wm_get_phy_82575;
2135 		sc->phy.release = wm_put_phy_82575;
2136 		break;
2137 	default:
2138 		break;
2139 	}
2140 
2141 	/* Reset the chip to a known state. */
2142 	wm_reset(sc);
2143 
2144 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
2145 	switch (sc->sc_type) {
2146 	case WM_T_82571:
2147 	case WM_T_82572:
2148 		reg = CSR_READ(sc, WMREG_SWSM2);
2149 		if ((reg & SWSM2_LOCK) == 0) {
2150 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
2151 			force_clear_smbi = true;
2152 		} else
2153 			force_clear_smbi = false;
2154 		break;
2155 	case WM_T_82573:
2156 	case WM_T_82574:
2157 	case WM_T_82583:
2158 		force_clear_smbi = true;
2159 		break;
2160 	default:
2161 		force_clear_smbi = false;
2162 		break;
2163 	}
2164 	if (force_clear_smbi) {
2165 		reg = CSR_READ(sc, WMREG_SWSM);
2166 		if ((reg & SWSM_SMBI) != 0)
2167 			aprint_error_dev(sc->sc_dev,
2168 			    "Please update the Bootagent\n");
2169 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
2170 	}
2171 
2172 	/*
2173 	 * Defer printing the EEPROM type until after verifying the checksum
2174 	 * This allows the EEPROM type to be printed correctly in the case
2175 	 * that no EEPROM is attached.
2176 	 */
2177 	/*
2178 	 * Validate the EEPROM checksum. If the checksum fails, flag
2179 	 * this for later, so we can fail future reads from the EEPROM.
2180 	 */
2181 	if (wm_nvm_validate_checksum(sc)) {
2182 		/*
2183 		 * Read twice again because some PCI-e parts fail the
2184 		 * first check due to the link being in sleep state.
2185 		 */
2186 		if (wm_nvm_validate_checksum(sc))
2187 			sc->sc_flags |= WM_F_EEPROM_INVALID;
2188 	}
2189 
2190 	/* Set device properties (macflags) */
2191 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
2192 
2193 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
2194 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
2195 	else {
2196 		aprint_verbose_dev(sc->sc_dev, "%u words ",
2197 		    sc->sc_nvm_wordsize);
2198 		if (sc->sc_flags & WM_F_EEPROM_INVM)
2199 			aprint_verbose("iNVM");
2200 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
2201 			aprint_verbose("FLASH(HW)");
2202 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
2203 			aprint_verbose("FLASH");
2204 		else {
2205 			if (sc->sc_flags & WM_F_EEPROM_SPI)
2206 				eetype = "SPI";
2207 			else
2208 				eetype = "MicroWire";
2209 			aprint_verbose("(%d address bits) %s EEPROM",
2210 			    sc->sc_nvm_addrbits, eetype);
2211 		}
2212 	}
2213 	wm_nvm_version(sc);
2214 	aprint_verbose("\n");
2215 
2216 	/* Check for I21[01] PLL workaround */
2217 	if (sc->sc_type == WM_T_I210)
2218 		sc->sc_flags |= WM_F_PLL_WA_I210;
2219 	if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
2220 		/* NVM image release 3.25 has a workaround */
2221 		if ((sc->sc_nvm_ver_major < 3)
2222 		    || ((sc->sc_nvm_ver_major == 3)
2223 			&& (sc->sc_nvm_ver_minor < 25))) {
2224 			aprint_verbose_dev(sc->sc_dev,
2225 			    "ROM image version %d.%d is older than 3.25\n",
2226 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
2227 			sc->sc_flags |= WM_F_PLL_WA_I210;
2228 		}
2229 	}
2230 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
2231 		wm_pll_workaround_i210(sc);
2232 
2233 	wm_get_wakeup(sc);
2234 
2235 	/* Non-AMT based hardware can now take control from firmware */
2236 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
2237 		wm_get_hw_control(sc);
2238 
2239 	/*
2240 	 * Read the Ethernet address from the EEPROM, if not first found
2241 	 * in device properties.
2242 	 */
2243 	ea = prop_dictionary_get(dict, "mac-address");
2244 	if (ea != NULL) {
2245 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
2246 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
2247 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
2248 	} else {
2249 		if (wm_read_mac_addr(sc, enaddr) != 0) {
2250 			aprint_error_dev(sc->sc_dev,
2251 			    "unable to read Ethernet address\n");
2252 			goto out;
2253 		}
2254 	}
2255 
2256 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
2257 	    ether_sprintf(enaddr));
2258 
2259 	/*
2260 	 * Read the config info from the EEPROM, and set up various
2261 	 * bits in the control registers based on their contents.
2262 	 */
2263 	pn = prop_dictionary_get(dict, "i82543-cfg1");
2264 	if (pn != NULL) {
2265 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2266 		cfg1 = (uint16_t) prop_number_integer_value(pn);
2267 	} else {
2268 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
2269 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
2270 			goto out;
2271 		}
2272 	}
2273 
2274 	pn = prop_dictionary_get(dict, "i82543-cfg2");
2275 	if (pn != NULL) {
2276 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2277 		cfg2 = (uint16_t) prop_number_integer_value(pn);
2278 	} else {
2279 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
2280 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
2281 			goto out;
2282 		}
2283 	}
2284 
2285 	/* check for WM_F_WOL */
2286 	switch (sc->sc_type) {
2287 	case WM_T_82542_2_0:
2288 	case WM_T_82542_2_1:
2289 	case WM_T_82543:
2290 		/* dummy? */
2291 		eeprom_data = 0;
2292 		apme_mask = NVM_CFG3_APME;
2293 		break;
2294 	case WM_T_82544:
2295 		apme_mask = NVM_CFG2_82544_APM_EN;
2296 		eeprom_data = cfg2;
2297 		break;
2298 	case WM_T_82546:
2299 	case WM_T_82546_3:
2300 	case WM_T_82571:
2301 	case WM_T_82572:
2302 	case WM_T_82573:
2303 	case WM_T_82574:
2304 	case WM_T_82583:
2305 	case WM_T_80003:
2306 	default:
2307 		apme_mask = NVM_CFG3_APME;
2308 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
2309 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
2310 		break;
2311 	case WM_T_82575:
2312 	case WM_T_82576:
2313 	case WM_T_82580:
2314 	case WM_T_I350:
2315 	case WM_T_I354: /* XXX ok? */
2316 	case WM_T_ICH8:
2317 	case WM_T_ICH9:
2318 	case WM_T_ICH10:
2319 	case WM_T_PCH:
2320 	case WM_T_PCH2:
2321 	case WM_T_PCH_LPT:
2322 	case WM_T_PCH_SPT:
2323 		/* XXX The funcid should be checked on some devices */
2324 		apme_mask = WUC_APME;
2325 		eeprom_data = CSR_READ(sc, WMREG_WUC);
2326 		break;
2327 	}
2328 
2329 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
2330 	if ((eeprom_data & apme_mask) != 0)
2331 		sc->sc_flags |= WM_F_WOL;
2332 #ifdef WM_DEBUG
2333 	if ((sc->sc_flags & WM_F_WOL) != 0)
2334 		printf("WOL\n");
2335 #endif
2336 
2337 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
2338 		/* Check NVM for autonegotiation */
2339 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
2340 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
2341 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
2342 		}
2343 	}
2344 
2345 	/*
2346 	 * XXX need special handling for some multiple port cards
2347 	 * to disable a paticular port.
2348 	 */
2349 
2350 	if (sc->sc_type >= WM_T_82544) {
2351 		pn = prop_dictionary_get(dict, "i82543-swdpin");
2352 		if (pn != NULL) {
2353 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2354 			swdpin = (uint16_t) prop_number_integer_value(pn);
2355 		} else {
2356 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
2357 				aprint_error_dev(sc->sc_dev,
2358 				    "unable to read SWDPIN\n");
2359 				goto out;
2360 			}
2361 		}
2362 	}
2363 
2364 	if (cfg1 & NVM_CFG1_ILOS)
2365 		sc->sc_ctrl |= CTRL_ILOS;
2366 
2367 	/*
2368 	 * XXX
2369 	 * This code isn't correct because pin 2 and 3 are located
2370 	 * in different position on newer chips. Check all datasheet.
2371 	 *
2372 	 * Until resolve this problem, check if a chip < 82580
2373 	 */
2374 	if (sc->sc_type <= WM_T_82580) {
2375 		if (sc->sc_type >= WM_T_82544) {
2376 			sc->sc_ctrl |=
2377 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
2378 			    CTRL_SWDPIO_SHIFT;
2379 			sc->sc_ctrl |=
2380 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
2381 			    CTRL_SWDPINS_SHIFT;
2382 		} else {
2383 			sc->sc_ctrl |=
2384 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
2385 			    CTRL_SWDPIO_SHIFT;
2386 		}
2387 	}
2388 
2389 	/* XXX For other than 82580? */
2390 	if (sc->sc_type == WM_T_82580) {
2391 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
2392 		if (nvmword & __BIT(13))
2393 			sc->sc_ctrl |= CTRL_ILOS;
2394 	}
2395 
2396 #if 0
2397 	if (sc->sc_type >= WM_T_82544) {
2398 		if (cfg1 & NVM_CFG1_IPS0)
2399 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
2400 		if (cfg1 & NVM_CFG1_IPS1)
2401 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
2402 		sc->sc_ctrl_ext |=
2403 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
2404 		    CTRL_EXT_SWDPIO_SHIFT;
2405 		sc->sc_ctrl_ext |=
2406 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
2407 		    CTRL_EXT_SWDPINS_SHIFT;
2408 	} else {
2409 		sc->sc_ctrl_ext |=
2410 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
2411 		    CTRL_EXT_SWDPIO_SHIFT;
2412 	}
2413 #endif
2414 
2415 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2416 #if 0
2417 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2418 #endif
2419 
2420 	if (sc->sc_type == WM_T_PCH) {
2421 		uint16_t val;
2422 
2423 		/* Save the NVM K1 bit setting */
2424 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
2425 
2426 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
2427 			sc->sc_nvm_k1_enabled = 1;
2428 		else
2429 			sc->sc_nvm_k1_enabled = 0;
2430 	}
2431 
2432 	/*
2433 	 * Determine if we're TBI,GMII or SGMII mode, and initialize the
2434 	 * media structures accordingly.
2435 	 */
2436 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
2437 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
2438 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
2439 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_82573
2440 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
2441 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
2442 		wm_gmii_mediainit(sc, wmp->wmp_product);
2443 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
2444 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
2445 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
2446 	    || (sc->sc_type ==WM_T_I211)) {
2447 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
2448 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
2449 		switch (link_mode) {
2450 		case CTRL_EXT_LINK_MODE_1000KX:
2451 			aprint_verbose_dev(sc->sc_dev, "1000KX\n");
2452 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
2453 			break;
2454 		case CTRL_EXT_LINK_MODE_SGMII:
2455 			if (wm_sgmii_uses_mdio(sc)) {
2456 				aprint_verbose_dev(sc->sc_dev,
2457 				    "SGMII(MDIO)\n");
2458 				sc->sc_flags |= WM_F_SGMII;
2459 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2460 				break;
2461 			}
2462 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
2463 			/*FALLTHROUGH*/
2464 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
2465 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
2466 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
2467 				if (link_mode
2468 				    == CTRL_EXT_LINK_MODE_SGMII) {
2469 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2470 					sc->sc_flags |= WM_F_SGMII;
2471 				} else {
2472 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
2473 					aprint_verbose_dev(sc->sc_dev,
2474 					    "SERDES\n");
2475 				}
2476 				break;
2477 			}
2478 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
2479 				aprint_verbose_dev(sc->sc_dev, "SERDES\n");
2480 
2481 			/* Change current link mode setting */
2482 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
2483 			switch (sc->sc_mediatype) {
2484 			case WM_MEDIATYPE_COPPER:
2485 				reg |= CTRL_EXT_LINK_MODE_SGMII;
2486 				break;
2487 			case WM_MEDIATYPE_SERDES:
2488 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
2489 				break;
2490 			default:
2491 				break;
2492 			}
2493 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2494 			break;
2495 		case CTRL_EXT_LINK_MODE_GMII:
2496 		default:
2497 			aprint_verbose_dev(sc->sc_dev, "Copper\n");
2498 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2499 			break;
2500 		}
2501 
2502 		reg &= ~CTRL_EXT_I2C_ENA;
2503 		if ((sc->sc_flags & WM_F_SGMII) != 0)
2504 			reg |= CTRL_EXT_I2C_ENA;
2505 		else
2506 			reg &= ~CTRL_EXT_I2C_ENA;
2507 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2508 
2509 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
2510 			wm_gmii_mediainit(sc, wmp->wmp_product);
2511 		else
2512 			wm_tbi_mediainit(sc);
2513 	} else if (sc->sc_type < WM_T_82543 ||
2514 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
2515 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
2516 			aprint_error_dev(sc->sc_dev,
2517 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
2518 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
2519 		}
2520 		wm_tbi_mediainit(sc);
2521 	} else {
2522 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
2523 			aprint_error_dev(sc->sc_dev,
2524 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
2525 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2526 		}
2527 		wm_gmii_mediainit(sc, wmp->wmp_product);
2528 	}
2529 
2530 	ifp = &sc->sc_ethercom.ec_if;
2531 	xname = device_xname(sc->sc_dev);
2532 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
2533 	ifp->if_softc = sc;
2534 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2535 	ifp->if_extflags = IFEF_START_MPSAFE;
2536 	ifp->if_ioctl = wm_ioctl;
2537 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
2538 		ifp->if_start = wm_nq_start;
2539 		if (sc->sc_nqueues > 1) {
2540 			ifp->if_transmit = wm_nq_transmit;
2541 			deferred_start_func = wm_deferred_start;
2542 		}
2543 	} else {
2544 		ifp->if_start = wm_start;
2545 		if (sc->sc_nqueues > 1) {
2546 			ifp->if_transmit = wm_transmit;
2547 			deferred_start_func = wm_deferred_start;
2548 		}
2549 	}
2550 	ifp->if_watchdog = wm_watchdog;
2551 	ifp->if_init = wm_init;
2552 	ifp->if_stop = wm_stop;
2553 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
2554 	IFQ_SET_READY(&ifp->if_snd);
2555 
2556 	/* Check for jumbo frame */
2557 	switch (sc->sc_type) {
2558 	case WM_T_82573:
2559 		/* XXX limited to 9234 if ASPM is disabled */
2560 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
2561 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
2562 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2563 		break;
2564 	case WM_T_82571:
2565 	case WM_T_82572:
2566 	case WM_T_82574:
2567 	case WM_T_82575:
2568 	case WM_T_82576:
2569 	case WM_T_82580:
2570 	case WM_T_I350:
2571 	case WM_T_I354: /* XXXX ok? */
2572 	case WM_T_I210:
2573 	case WM_T_I211:
2574 	case WM_T_80003:
2575 	case WM_T_ICH9:
2576 	case WM_T_ICH10:
2577 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
2578 	case WM_T_PCH_LPT:
2579 	case WM_T_PCH_SPT:
2580 		/* XXX limited to 9234 */
2581 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2582 		break;
2583 	case WM_T_PCH:
2584 		/* XXX limited to 4096 */
2585 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2586 		break;
2587 	case WM_T_82542_2_0:
2588 	case WM_T_82542_2_1:
2589 	case WM_T_82583:
2590 	case WM_T_ICH8:
2591 		/* No support for jumbo frame */
2592 		break;
2593 	default:
2594 		/* ETHER_MAX_LEN_JUMBO */
2595 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2596 		break;
2597 	}
2598 
2599 	/* If we're a i82543 or greater, we can support VLANs. */
2600 	if (sc->sc_type >= WM_T_82543)
2601 		sc->sc_ethercom.ec_capabilities |=
2602 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
2603 
2604 	/*
2605 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
2606 	 * on i82543 and later.
2607 	 */
2608 	if (sc->sc_type >= WM_T_82543) {
2609 		ifp->if_capabilities |=
2610 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
2611 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
2612 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
2613 		    IFCAP_CSUM_TCPv6_Tx |
2614 		    IFCAP_CSUM_UDPv6_Tx;
2615 	}
2616 
2617 	/*
2618 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
2619 	 *
2620 	 *	82541GI (8086:1076) ... no
2621 	 *	82572EI (8086:10b9) ... yes
2622 	 */
2623 	if (sc->sc_type >= WM_T_82571) {
2624 		ifp->if_capabilities |=
2625 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
2626 	}
2627 
2628 	/*
2629 	 * If we're a i82544 or greater (except i82547), we can do
2630 	 * TCP segmentation offload.
2631 	 */
2632 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
2633 		ifp->if_capabilities |= IFCAP_TSOv4;
2634 	}
2635 
2636 	if (sc->sc_type >= WM_T_82571) {
2637 		ifp->if_capabilities |= IFCAP_TSOv6;
2638 	}
2639 
2640 #ifdef WM_MPSAFE
2641 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2642 #else
2643 	sc->sc_core_lock = NULL;
2644 #endif
2645 
2646 	/* Attach the interface. */
2647 	if_initialize(ifp);
2648 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
2649 	if_deferred_start_init(ifp, deferred_start_func);
2650 	ether_ifattach(ifp, enaddr);
2651 	if_register(ifp);
2652 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
2653 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
2654 			  RND_FLAG_DEFAULT);
2655 
2656 #ifdef WM_EVENT_COUNTERS
2657 	/* Attach event counters. */
2658 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
2659 	    NULL, xname, "linkintr");
2660 
2661 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
2662 	    NULL, xname, "tx_xoff");
2663 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
2664 	    NULL, xname, "tx_xon");
2665 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
2666 	    NULL, xname, "rx_xoff");
2667 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
2668 	    NULL, xname, "rx_xon");
2669 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
2670 	    NULL, xname, "rx_macctl");
2671 #endif /* WM_EVENT_COUNTERS */
2672 
2673 	if (pmf_device_register(self, wm_suspend, wm_resume))
2674 		pmf_class_network_register(self, ifp);
2675 	else
2676 		aprint_error_dev(self, "couldn't establish power handler\n");
2677 
2678 	sc->sc_flags |= WM_F_ATTACHED;
2679  out:
2680 	return;
2681 }
2682 
2683 /* The detach function (ca_detach) */
2684 static int
2685 wm_detach(device_t self, int flags __unused)
2686 {
2687 	struct wm_softc *sc = device_private(self);
2688 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2689 	int i;
2690 
2691 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
2692 		return 0;
2693 
2694 	/* Stop the interface. Callouts are stopped in it. */
2695 	wm_stop(ifp, 1);
2696 
2697 	pmf_device_deregister(self);
2698 
2699 #ifdef WM_EVENT_COUNTERS
2700 	evcnt_detach(&sc->sc_ev_linkintr);
2701 
2702 	evcnt_detach(&sc->sc_ev_tx_xoff);
2703 	evcnt_detach(&sc->sc_ev_tx_xon);
2704 	evcnt_detach(&sc->sc_ev_rx_xoff);
2705 	evcnt_detach(&sc->sc_ev_rx_xon);
2706 	evcnt_detach(&sc->sc_ev_rx_macctl);
2707 #endif /* WM_EVENT_COUNTERS */
2708 
2709 	/* Tell the firmware about the release */
2710 	WM_CORE_LOCK(sc);
2711 	wm_release_manageability(sc);
2712 	wm_release_hw_control(sc);
2713 	wm_enable_wakeup(sc);
2714 	WM_CORE_UNLOCK(sc);
2715 
2716 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2717 
2718 	/* Delete all remaining media. */
2719 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2720 
2721 	ether_ifdetach(ifp);
2722 	if_detach(ifp);
2723 	if_percpuq_destroy(sc->sc_ipq);
2724 
2725 	/* Unload RX dmamaps and free mbufs */
2726 	for (i = 0; i < sc->sc_nqueues; i++) {
2727 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
2728 		mutex_enter(rxq->rxq_lock);
2729 		wm_rxdrain(rxq);
2730 		mutex_exit(rxq->rxq_lock);
2731 	}
2732 	/* Must unlock here */
2733 
2734 	/* Disestablish the interrupt handler */
2735 	for (i = 0; i < sc->sc_nintrs; i++) {
2736 		if (sc->sc_ihs[i] != NULL) {
2737 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
2738 			sc->sc_ihs[i] = NULL;
2739 		}
2740 	}
2741 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
2742 
2743 	wm_free_txrx_queues(sc);
2744 
2745 	/* Unmap the registers */
2746 	if (sc->sc_ss) {
2747 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
2748 		sc->sc_ss = 0;
2749 	}
2750 	if (sc->sc_ios) {
2751 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
2752 		sc->sc_ios = 0;
2753 	}
2754 	if (sc->sc_flashs) {
2755 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
2756 		sc->sc_flashs = 0;
2757 	}
2758 
2759 	if (sc->sc_core_lock)
2760 		mutex_obj_free(sc->sc_core_lock);
2761 	if (sc->sc_ich_phymtx)
2762 		mutex_obj_free(sc->sc_ich_phymtx);
2763 	if (sc->sc_ich_nvmmtx)
2764 		mutex_obj_free(sc->sc_ich_nvmmtx);
2765 
2766 	return 0;
2767 }
2768 
2769 static bool
2770 wm_suspend(device_t self, const pmf_qual_t *qual)
2771 {
2772 	struct wm_softc *sc = device_private(self);
2773 
2774 	wm_release_manageability(sc);
2775 	wm_release_hw_control(sc);
2776 	wm_enable_wakeup(sc);
2777 
2778 	return true;
2779 }
2780 
2781 static bool
2782 wm_resume(device_t self, const pmf_qual_t *qual)
2783 {
2784 	struct wm_softc *sc = device_private(self);
2785 
2786 	wm_init_manageability(sc);
2787 
2788 	return true;
2789 }
2790 
2791 /*
2792  * wm_watchdog:		[ifnet interface function]
2793  *
2794  *	Watchdog timer handler.
2795  */
2796 static void
2797 wm_watchdog(struct ifnet *ifp)
2798 {
2799 	int qid;
2800 	struct wm_softc *sc = ifp->if_softc;
2801 
2802 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
2803 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
2804 
2805 		wm_watchdog_txq(ifp, txq);
2806 	}
2807 
2808 	/* Reset the interface. */
2809 	(void) wm_init(ifp);
2810 
2811 	/*
2812 	 * There are still some upper layer processing which call
2813 	 * ifp->if_start(). e.g. ALTQ
2814 	 */
2815 	/* Try to get more packets going. */
2816 	ifp->if_start(ifp);
2817 }
2818 
2819 static void
2820 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq)
2821 {
2822 	struct wm_softc *sc = ifp->if_softc;
2823 
2824 	/*
2825 	 * Since we're using delayed interrupts, sweep up
2826 	 * before we report an error.
2827 	 */
2828 	mutex_enter(txq->txq_lock);
2829 	wm_txeof(sc, txq);
2830 	mutex_exit(txq->txq_lock);
2831 
2832 	if (txq->txq_free != WM_NTXDESC(txq)) {
2833 #ifdef WM_DEBUG
2834 		int i, j;
2835 		struct wm_txsoft *txs;
2836 #endif
2837 		log(LOG_ERR,
2838 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2839 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
2840 		    txq->txq_next);
2841 		ifp->if_oerrors++;
2842 #ifdef WM_DEBUG
2843 		for (i = txq->txq_sdirty; i != txq->txq_snext ;
2844 		    i = WM_NEXTTXS(txq, i)) {
2845 		    txs = &txq->txq_soft[i];
2846 		    printf("txs %d tx %d -> %d\n",
2847 			i, txs->txs_firstdesc, txs->txs_lastdesc);
2848 		    for (j = txs->txs_firstdesc; ;
2849 			j = WM_NEXTTX(txq, j)) {
2850 			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
2851 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
2852 			printf("\t %#08x%08x\n",
2853 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
2854 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
2855 			if (j == txs->txs_lastdesc)
2856 				break;
2857 			}
2858 		}
2859 #endif
2860 	}
2861 }
2862 
2863 /*
2864  * wm_tick:
2865  *
2866  *	One second timer, used to check link status, sweep up
2867  *	completed transmit jobs, etc.
2868  */
2869 static void
2870 wm_tick(void *arg)
2871 {
2872 	struct wm_softc *sc = arg;
2873 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2874 #ifndef WM_MPSAFE
2875 	int s = splnet();
2876 #endif
2877 
2878 	WM_CORE_LOCK(sc);
2879 
2880 	if (sc->sc_core_stopping)
2881 		goto out;
2882 
2883 	if (sc->sc_type >= WM_T_82542_2_1) {
2884 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
2885 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
2886 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
2887 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
2888 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
2889 	}
2890 
2891 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
2892 	ifp->if_ierrors += 0ULL + /* ensure quad_t */
2893 	    + CSR_READ(sc, WMREG_CRCERRS)
2894 	    + CSR_READ(sc, WMREG_ALGNERRC)
2895 	    + CSR_READ(sc, WMREG_SYMERRC)
2896 	    + CSR_READ(sc, WMREG_RXERRC)
2897 	    + CSR_READ(sc, WMREG_SEC)
2898 	    + CSR_READ(sc, WMREG_CEXTERR)
2899 	    + CSR_READ(sc, WMREG_RLEC);
2900 	/*
2901 	 * WMREG_RNBC is incremented when there is no available buffers in host
2902 	 * memory. It does not mean the number of dropped packet. Because
2903 	 * ethernet controller can receive packets in such case if there is
2904 	 * space in phy's FIFO.
2905 	 *
2906 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
2907 	 * own EVCNT instead of if_iqdrops.
2908 	 */
2909 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
2910 
2911 	if (sc->sc_flags & WM_F_HAS_MII)
2912 		mii_tick(&sc->sc_mii);
2913 	else if ((sc->sc_type >= WM_T_82575)
2914 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
2915 		wm_serdes_tick(sc);
2916 	else
2917 		wm_tbi_tick(sc);
2918 
2919 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2920 out:
2921 	WM_CORE_UNLOCK(sc);
2922 #ifndef WM_MPSAFE
2923 	splx(s);
2924 #endif
2925 }
2926 
2927 static int
2928 wm_ifflags_cb(struct ethercom *ec)
2929 {
2930 	struct ifnet *ifp = &ec->ec_if;
2931 	struct wm_softc *sc = ifp->if_softc;
2932 	int rc = 0;
2933 
2934 	WM_CORE_LOCK(sc);
2935 
2936 	int change = ifp->if_flags ^ sc->sc_if_flags;
2937 	sc->sc_if_flags = ifp->if_flags;
2938 
2939 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
2940 		rc = ENETRESET;
2941 		goto out;
2942 	}
2943 
2944 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
2945 		wm_set_filter(sc);
2946 
2947 	wm_set_vlan(sc);
2948 
2949 out:
2950 	WM_CORE_UNLOCK(sc);
2951 
2952 	return rc;
2953 }
2954 
2955 /*
2956  * wm_ioctl:		[ifnet interface function]
2957  *
2958  *	Handle control requests from the operator.
2959  */
2960 static int
2961 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2962 {
2963 	struct wm_softc *sc = ifp->if_softc;
2964 	struct ifreq *ifr = (struct ifreq *) data;
2965 	struct ifaddr *ifa = (struct ifaddr *)data;
2966 	struct sockaddr_dl *sdl;
2967 	int s, error;
2968 
2969 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
2970 		device_xname(sc->sc_dev), __func__));
2971 
2972 #ifndef WM_MPSAFE
2973 	s = splnet();
2974 #endif
2975 	switch (cmd) {
2976 	case SIOCSIFMEDIA:
2977 	case SIOCGIFMEDIA:
2978 		WM_CORE_LOCK(sc);
2979 		/* Flow control requires full-duplex mode. */
2980 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
2981 		    (ifr->ifr_media & IFM_FDX) == 0)
2982 			ifr->ifr_media &= ~IFM_ETH_FMASK;
2983 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
2984 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
2985 				/* We can do both TXPAUSE and RXPAUSE. */
2986 				ifr->ifr_media |=
2987 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
2988 			}
2989 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
2990 		}
2991 		WM_CORE_UNLOCK(sc);
2992 #ifdef WM_MPSAFE
2993 		s = splnet();
2994 #endif
2995 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
2996 #ifdef WM_MPSAFE
2997 		splx(s);
2998 #endif
2999 		break;
3000 	case SIOCINITIFADDR:
3001 		WM_CORE_LOCK(sc);
3002 		if (ifa->ifa_addr->sa_family == AF_LINK) {
3003 			sdl = satosdl(ifp->if_dl->ifa_addr);
3004 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
3005 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
3006 			/* unicast address is first multicast entry */
3007 			wm_set_filter(sc);
3008 			error = 0;
3009 			WM_CORE_UNLOCK(sc);
3010 			break;
3011 		}
3012 		WM_CORE_UNLOCK(sc);
3013 		/*FALLTHROUGH*/
3014 	default:
3015 #ifdef WM_MPSAFE
3016 		s = splnet();
3017 #endif
3018 		/* It may call wm_start, so unlock here */
3019 		error = ether_ioctl(ifp, cmd, data);
3020 #ifdef WM_MPSAFE
3021 		splx(s);
3022 #endif
3023 		if (error != ENETRESET)
3024 			break;
3025 
3026 		error = 0;
3027 
3028 		if (cmd == SIOCSIFCAP) {
3029 			error = (*ifp->if_init)(ifp);
3030 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
3031 			;
3032 		else if (ifp->if_flags & IFF_RUNNING) {
3033 			/*
3034 			 * Multicast list has changed; set the hardware filter
3035 			 * accordingly.
3036 			 */
3037 			WM_CORE_LOCK(sc);
3038 			wm_set_filter(sc);
3039 			WM_CORE_UNLOCK(sc);
3040 		}
3041 		break;
3042 	}
3043 
3044 #ifndef WM_MPSAFE
3045 	splx(s);
3046 #endif
3047 	return error;
3048 }
3049 
3050 /* MAC address related */
3051 
3052 /*
3053  * Get the offset of MAC address and return it.
3054  * If error occured, use offset 0.
3055  */
3056 static uint16_t
3057 wm_check_alt_mac_addr(struct wm_softc *sc)
3058 {
3059 	uint16_t myea[ETHER_ADDR_LEN / 2];
3060 	uint16_t offset = NVM_OFF_MACADDR;
3061 
3062 	/* Try to read alternative MAC address pointer */
3063 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
3064 		return 0;
3065 
3066 	/* Check pointer if it's valid or not. */
3067 	if ((offset == 0x0000) || (offset == 0xffff))
3068 		return 0;
3069 
3070 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
3071 	/*
3072 	 * Check whether alternative MAC address is valid or not.
3073 	 * Some cards have non 0xffff pointer but those don't use
3074 	 * alternative MAC address in reality.
3075 	 *
3076 	 * Check whether the broadcast bit is set or not.
3077 	 */
3078 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
3079 		if (((myea[0] & 0xff) & 0x01) == 0)
3080 			return offset; /* Found */
3081 
3082 	/* Not found */
3083 	return 0;
3084 }
3085 
3086 static int
3087 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
3088 {
3089 	uint16_t myea[ETHER_ADDR_LEN / 2];
3090 	uint16_t offset = NVM_OFF_MACADDR;
3091 	int do_invert = 0;
3092 
3093 	switch (sc->sc_type) {
3094 	case WM_T_82580:
3095 	case WM_T_I350:
3096 	case WM_T_I354:
3097 		/* EEPROM Top Level Partitioning */
3098 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
3099 		break;
3100 	case WM_T_82571:
3101 	case WM_T_82575:
3102 	case WM_T_82576:
3103 	case WM_T_80003:
3104 	case WM_T_I210:
3105 	case WM_T_I211:
3106 		offset = wm_check_alt_mac_addr(sc);
3107 		if (offset == 0)
3108 			if ((sc->sc_funcid & 0x01) == 1)
3109 				do_invert = 1;
3110 		break;
3111 	default:
3112 		if ((sc->sc_funcid & 0x01) == 1)
3113 			do_invert = 1;
3114 		break;
3115 	}
3116 
3117 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
3118 		goto bad;
3119 
3120 	enaddr[0] = myea[0] & 0xff;
3121 	enaddr[1] = myea[0] >> 8;
3122 	enaddr[2] = myea[1] & 0xff;
3123 	enaddr[3] = myea[1] >> 8;
3124 	enaddr[4] = myea[2] & 0xff;
3125 	enaddr[5] = myea[2] >> 8;
3126 
3127 	/*
3128 	 * Toggle the LSB of the MAC address on the second port
3129 	 * of some dual port cards.
3130 	 */
3131 	if (do_invert != 0)
3132 		enaddr[5] ^= 1;
3133 
3134 	return 0;
3135 
3136  bad:
3137 	return -1;
3138 }
3139 
3140 /*
3141  * wm_set_ral:
3142  *
3143  *	Set an entery in the receive address list.
3144  */
3145 static void
3146 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
3147 {
3148 	uint32_t ral_lo, ral_hi;
3149 
3150 	if (enaddr != NULL) {
3151 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
3152 		    (enaddr[3] << 24);
3153 		ral_hi = enaddr[4] | (enaddr[5] << 8);
3154 		ral_hi |= RAL_AV;
3155 	} else {
3156 		ral_lo = 0;
3157 		ral_hi = 0;
3158 	}
3159 
3160 	if (sc->sc_type >= WM_T_82544) {
3161 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
3162 		    ral_lo);
3163 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
3164 		    ral_hi);
3165 	} else {
3166 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
3167 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
3168 	}
3169 }
3170 
3171 /*
3172  * wm_mchash:
3173  *
3174  *	Compute the hash of the multicast address for the 4096-bit
3175  *	multicast filter.
3176  */
3177 static uint32_t
3178 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
3179 {
3180 	static const int lo_shift[4] = { 4, 3, 2, 0 };
3181 	static const int hi_shift[4] = { 4, 5, 6, 8 };
3182 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
3183 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
3184 	uint32_t hash;
3185 
3186 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3187 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3188 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
3189 	    || (sc->sc_type == WM_T_PCH_SPT)) {
3190 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
3191 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
3192 		return (hash & 0x3ff);
3193 	}
3194 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
3195 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
3196 
3197 	return (hash & 0xfff);
3198 }
3199 
3200 /*
3201  * wm_set_filter:
3202  *
3203  *	Set up the receive filter.
3204  */
3205 static void
3206 wm_set_filter(struct wm_softc *sc)
3207 {
3208 	struct ethercom *ec = &sc->sc_ethercom;
3209 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3210 	struct ether_multi *enm;
3211 	struct ether_multistep step;
3212 	bus_addr_t mta_reg;
3213 	uint32_t hash, reg, bit;
3214 	int i, size, ralmax;
3215 
3216 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3217 		device_xname(sc->sc_dev), __func__));
3218 
3219 	if (sc->sc_type >= WM_T_82544)
3220 		mta_reg = WMREG_CORDOVA_MTA;
3221 	else
3222 		mta_reg = WMREG_MTA;
3223 
3224 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
3225 
3226 	if (ifp->if_flags & IFF_BROADCAST)
3227 		sc->sc_rctl |= RCTL_BAM;
3228 	if (ifp->if_flags & IFF_PROMISC) {
3229 		sc->sc_rctl |= RCTL_UPE;
3230 		goto allmulti;
3231 	}
3232 
3233 	/*
3234 	 * Set the station address in the first RAL slot, and
3235 	 * clear the remaining slots.
3236 	 */
3237 	if (sc->sc_type == WM_T_ICH8)
3238 		size = WM_RAL_TABSIZE_ICH8 -1;
3239 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
3240 	    || (sc->sc_type == WM_T_PCH))
3241 		size = WM_RAL_TABSIZE_ICH8;
3242 	else if (sc->sc_type == WM_T_PCH2)
3243 		size = WM_RAL_TABSIZE_PCH2;
3244 	else if ((sc->sc_type == WM_T_PCH_LPT) ||(sc->sc_type == WM_T_PCH_SPT))
3245 		size = WM_RAL_TABSIZE_PCH_LPT;
3246 	else if (sc->sc_type == WM_T_82575)
3247 		size = WM_RAL_TABSIZE_82575;
3248 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
3249 		size = WM_RAL_TABSIZE_82576;
3250 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
3251 		size = WM_RAL_TABSIZE_I350;
3252 	else
3253 		size = WM_RAL_TABSIZE;
3254 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
3255 
3256 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
3257 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
3258 		switch (i) {
3259 		case 0:
3260 			/* We can use all entries */
3261 			ralmax = size;
3262 			break;
3263 		case 1:
3264 			/* Only RAR[0] */
3265 			ralmax = 1;
3266 			break;
3267 		default:
3268 			/* available SHRA + RAR[0] */
3269 			ralmax = i + 1;
3270 		}
3271 	} else
3272 		ralmax = size;
3273 	for (i = 1; i < size; i++) {
3274 		if (i < ralmax)
3275 			wm_set_ral(sc, NULL, i);
3276 	}
3277 
3278 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3279 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3280 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
3281 	    || (sc->sc_type == WM_T_PCH_SPT))
3282 		size = WM_ICH8_MC_TABSIZE;
3283 	else
3284 		size = WM_MC_TABSIZE;
3285 	/* Clear out the multicast table. */
3286 	for (i = 0; i < size; i++)
3287 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
3288 
3289 	ETHER_LOCK(ec);
3290 	ETHER_FIRST_MULTI(step, ec, enm);
3291 	while (enm != NULL) {
3292 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
3293 			ETHER_UNLOCK(ec);
3294 			/*
3295 			 * We must listen to a range of multicast addresses.
3296 			 * For now, just accept all multicasts, rather than
3297 			 * trying to set only those filter bits needed to match
3298 			 * the range.  (At this time, the only use of address
3299 			 * ranges is for IP multicast routing, for which the
3300 			 * range is big enough to require all bits set.)
3301 			 */
3302 			goto allmulti;
3303 		}
3304 
3305 		hash = wm_mchash(sc, enm->enm_addrlo);
3306 
3307 		reg = (hash >> 5);
3308 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3309 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3310 		    || (sc->sc_type == WM_T_PCH2)
3311 		    || (sc->sc_type == WM_T_PCH_LPT)
3312 		    || (sc->sc_type == WM_T_PCH_SPT))
3313 			reg &= 0x1f;
3314 		else
3315 			reg &= 0x7f;
3316 		bit = hash & 0x1f;
3317 
3318 		hash = CSR_READ(sc, mta_reg + (reg << 2));
3319 		hash |= 1U << bit;
3320 
3321 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
3322 			/*
3323 			 * 82544 Errata 9: Certain register cannot be written
3324 			 * with particular alignments in PCI-X bus operation
3325 			 * (FCAH, MTA and VFTA).
3326 			 */
3327 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
3328 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3329 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
3330 		} else
3331 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3332 
3333 		ETHER_NEXT_MULTI(step, enm);
3334 	}
3335 	ETHER_UNLOCK(ec);
3336 
3337 	ifp->if_flags &= ~IFF_ALLMULTI;
3338 	goto setit;
3339 
3340  allmulti:
3341 	ifp->if_flags |= IFF_ALLMULTI;
3342 	sc->sc_rctl |= RCTL_MPE;
3343 
3344  setit:
3345 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
3346 }
3347 
3348 /* Reset and init related */
3349 
3350 static void
3351 wm_set_vlan(struct wm_softc *sc)
3352 {
3353 
3354 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3355 		device_xname(sc->sc_dev), __func__));
3356 
3357 	/* Deal with VLAN enables. */
3358 	if (VLAN_ATTACHED(&sc->sc_ethercom))
3359 		sc->sc_ctrl |= CTRL_VME;
3360 	else
3361 		sc->sc_ctrl &= ~CTRL_VME;
3362 
3363 	/* Write the control registers. */
3364 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3365 }
3366 
3367 static void
3368 wm_set_pcie_completion_timeout(struct wm_softc *sc)
3369 {
3370 	uint32_t gcr;
3371 	pcireg_t ctrl2;
3372 
3373 	gcr = CSR_READ(sc, WMREG_GCR);
3374 
3375 	/* Only take action if timeout value is defaulted to 0 */
3376 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
3377 		goto out;
3378 
3379 	if ((gcr & GCR_CAP_VER2) == 0) {
3380 		gcr |= GCR_CMPL_TMOUT_10MS;
3381 		goto out;
3382 	}
3383 
3384 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
3385 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
3386 	ctrl2 |= WM_PCIE_DCSR2_16MS;
3387 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
3388 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
3389 
3390 out:
3391 	/* Disable completion timeout resend */
3392 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
3393 
3394 	CSR_WRITE(sc, WMREG_GCR, gcr);
3395 }
3396 
3397 void
3398 wm_get_auto_rd_done(struct wm_softc *sc)
3399 {
3400 	int i;
3401 
3402 	/* wait for eeprom to reload */
3403 	switch (sc->sc_type) {
3404 	case WM_T_82571:
3405 	case WM_T_82572:
3406 	case WM_T_82573:
3407 	case WM_T_82574:
3408 	case WM_T_82583:
3409 	case WM_T_82575:
3410 	case WM_T_82576:
3411 	case WM_T_82580:
3412 	case WM_T_I350:
3413 	case WM_T_I354:
3414 	case WM_T_I210:
3415 	case WM_T_I211:
3416 	case WM_T_80003:
3417 	case WM_T_ICH8:
3418 	case WM_T_ICH9:
3419 		for (i = 0; i < 10; i++) {
3420 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
3421 				break;
3422 			delay(1000);
3423 		}
3424 		if (i == 10) {
3425 			log(LOG_ERR, "%s: auto read from eeprom failed to "
3426 			    "complete\n", device_xname(sc->sc_dev));
3427 		}
3428 		break;
3429 	default:
3430 		break;
3431 	}
3432 }
3433 
3434 void
3435 wm_lan_init_done(struct wm_softc *sc)
3436 {
3437 	uint32_t reg = 0;
3438 	int i;
3439 
3440 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3441 		device_xname(sc->sc_dev), __func__));
3442 
3443 	/* Wait for eeprom to reload */
3444 	switch (sc->sc_type) {
3445 	case WM_T_ICH10:
3446 	case WM_T_PCH:
3447 	case WM_T_PCH2:
3448 	case WM_T_PCH_LPT:
3449 	case WM_T_PCH_SPT:
3450 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
3451 			reg = CSR_READ(sc, WMREG_STATUS);
3452 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
3453 				break;
3454 			delay(100);
3455 		}
3456 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
3457 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
3458 			    "complete\n", device_xname(sc->sc_dev), __func__);
3459 		}
3460 		break;
3461 	default:
3462 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3463 		    __func__);
3464 		break;
3465 	}
3466 
3467 	reg &= ~STATUS_LAN_INIT_DONE;
3468 	CSR_WRITE(sc, WMREG_STATUS, reg);
3469 }
3470 
3471 void
3472 wm_get_cfg_done(struct wm_softc *sc)
3473 {
3474 	int mask;
3475 	uint32_t reg;
3476 	int i;
3477 
3478 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3479 		device_xname(sc->sc_dev), __func__));
3480 
3481 	/* Wait for eeprom to reload */
3482 	switch (sc->sc_type) {
3483 	case WM_T_82542_2_0:
3484 	case WM_T_82542_2_1:
3485 		/* null */
3486 		break;
3487 	case WM_T_82543:
3488 	case WM_T_82544:
3489 	case WM_T_82540:
3490 	case WM_T_82545:
3491 	case WM_T_82545_3:
3492 	case WM_T_82546:
3493 	case WM_T_82546_3:
3494 	case WM_T_82541:
3495 	case WM_T_82541_2:
3496 	case WM_T_82547:
3497 	case WM_T_82547_2:
3498 	case WM_T_82573:
3499 	case WM_T_82574:
3500 	case WM_T_82583:
3501 		/* generic */
3502 		delay(10*1000);
3503 		break;
3504 	case WM_T_80003:
3505 	case WM_T_82571:
3506 	case WM_T_82572:
3507 	case WM_T_82575:
3508 	case WM_T_82576:
3509 	case WM_T_82580:
3510 	case WM_T_I350:
3511 	case WM_T_I354:
3512 	case WM_T_I210:
3513 	case WM_T_I211:
3514 		if (sc->sc_type == WM_T_82571) {
3515 			/* Only 82571 shares port 0 */
3516 			mask = EEMNGCTL_CFGDONE_0;
3517 		} else
3518 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
3519 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
3520 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
3521 				break;
3522 			delay(1000);
3523 		}
3524 		if (i >= WM_PHY_CFG_TIMEOUT) {
3525 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
3526 				device_xname(sc->sc_dev), __func__));
3527 		}
3528 		break;
3529 	case WM_T_ICH8:
3530 	case WM_T_ICH9:
3531 	case WM_T_ICH10:
3532 	case WM_T_PCH:
3533 	case WM_T_PCH2:
3534 	case WM_T_PCH_LPT:
3535 	case WM_T_PCH_SPT:
3536 		delay(10*1000);
3537 		if (sc->sc_type >= WM_T_ICH10)
3538 			wm_lan_init_done(sc);
3539 		else
3540 			wm_get_auto_rd_done(sc);
3541 
3542 		reg = CSR_READ(sc, WMREG_STATUS);
3543 		if ((reg & STATUS_PHYRA) != 0)
3544 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
3545 		break;
3546 	default:
3547 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3548 		    __func__);
3549 		break;
3550 	}
3551 }
3552 
3553 /* Init hardware bits */
3554 void
3555 wm_initialize_hardware_bits(struct wm_softc *sc)
3556 {
3557 	uint32_t tarc0, tarc1, reg;
3558 
3559 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3560 		device_xname(sc->sc_dev), __func__));
3561 
3562 	/* For 82571 variant, 80003 and ICHs */
3563 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
3564 	    || (sc->sc_type >= WM_T_80003)) {
3565 
3566 		/* Transmit Descriptor Control 0 */
3567 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
3568 		reg |= TXDCTL_COUNT_DESC;
3569 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
3570 
3571 		/* Transmit Descriptor Control 1 */
3572 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
3573 		reg |= TXDCTL_COUNT_DESC;
3574 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
3575 
3576 		/* TARC0 */
3577 		tarc0 = CSR_READ(sc, WMREG_TARC0);
3578 		switch (sc->sc_type) {
3579 		case WM_T_82571:
3580 		case WM_T_82572:
3581 		case WM_T_82573:
3582 		case WM_T_82574:
3583 		case WM_T_82583:
3584 		case WM_T_80003:
3585 			/* Clear bits 30..27 */
3586 			tarc0 &= ~__BITS(30, 27);
3587 			break;
3588 		default:
3589 			break;
3590 		}
3591 
3592 		switch (sc->sc_type) {
3593 		case WM_T_82571:
3594 		case WM_T_82572:
3595 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
3596 
3597 			tarc1 = CSR_READ(sc, WMREG_TARC1);
3598 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
3599 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
3600 			/* 8257[12] Errata No.7 */
3601 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
3602 
3603 			/* TARC1 bit 28 */
3604 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3605 				tarc1 &= ~__BIT(28);
3606 			else
3607 				tarc1 |= __BIT(28);
3608 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
3609 
3610 			/*
3611 			 * 8257[12] Errata No.13
3612 			 * Disable Dyamic Clock Gating.
3613 			 */
3614 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
3615 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
3616 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3617 			break;
3618 		case WM_T_82573:
3619 		case WM_T_82574:
3620 		case WM_T_82583:
3621 			if ((sc->sc_type == WM_T_82574)
3622 			    || (sc->sc_type == WM_T_82583))
3623 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
3624 
3625 			/* Extended Device Control */
3626 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
3627 			reg &= ~__BIT(23);	/* Clear bit 23 */
3628 			reg |= __BIT(22);	/* Set bit 22 */
3629 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3630 
3631 			/* Device Control */
3632 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
3633 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3634 
3635 			/* PCIe Control Register */
3636 			/*
3637 			 * 82573 Errata (unknown).
3638 			 *
3639 			 * 82574 Errata 25 and 82583 Errata 12
3640 			 * "Dropped Rx Packets":
3641 			 *   NVM Image Version 2.1.4 and newer has no this bug.
3642 			 */
3643 			reg = CSR_READ(sc, WMREG_GCR);
3644 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
3645 			CSR_WRITE(sc, WMREG_GCR, reg);
3646 
3647 			if ((sc->sc_type == WM_T_82574)
3648 			    || (sc->sc_type == WM_T_82583)) {
3649 				/*
3650 				 * Document says this bit must be set for
3651 				 * proper operation.
3652 				 */
3653 				reg = CSR_READ(sc, WMREG_GCR);
3654 				reg |= __BIT(22);
3655 				CSR_WRITE(sc, WMREG_GCR, reg);
3656 
3657 				/*
3658 				 * Apply workaround for hardware errata
3659 				 * documented in errata docs Fixes issue where
3660 				 * some error prone or unreliable PCIe
3661 				 * completions are occurring, particularly
3662 				 * with ASPM enabled. Without fix, issue can
3663 				 * cause Tx timeouts.
3664 				 */
3665 				reg = CSR_READ(sc, WMREG_GCR2);
3666 				reg |= __BIT(0);
3667 				CSR_WRITE(sc, WMREG_GCR2, reg);
3668 			}
3669 			break;
3670 		case WM_T_80003:
3671 			/* TARC0 */
3672 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
3673 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
3674 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
3675 
3676 			/* TARC1 bit 28 */
3677 			tarc1 = CSR_READ(sc, WMREG_TARC1);
3678 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3679 				tarc1 &= ~__BIT(28);
3680 			else
3681 				tarc1 |= __BIT(28);
3682 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
3683 			break;
3684 		case WM_T_ICH8:
3685 		case WM_T_ICH9:
3686 		case WM_T_ICH10:
3687 		case WM_T_PCH:
3688 		case WM_T_PCH2:
3689 		case WM_T_PCH_LPT:
3690 		case WM_T_PCH_SPT:
3691 			/* TARC0 */
3692 			if ((sc->sc_type == WM_T_ICH8)
3693 			    || (sc->sc_type == WM_T_PCH_SPT)) {
3694 				/* Set TARC0 bits 29 and 28 */
3695 				tarc0 |= __BITS(29, 28);
3696 			}
3697 			/* Set TARC0 bits 23,24,26,27 */
3698 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
3699 
3700 			/* CTRL_EXT */
3701 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
3702 			reg |= __BIT(22);	/* Set bit 22 */
3703 			/*
3704 			 * Enable PHY low-power state when MAC is at D3
3705 			 * w/o WoL
3706 			 */
3707 			if (sc->sc_type >= WM_T_PCH)
3708 				reg |= CTRL_EXT_PHYPDEN;
3709 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3710 
3711 			/* TARC1 */
3712 			tarc1 = CSR_READ(sc, WMREG_TARC1);
3713 			/* bit 28 */
3714 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3715 				tarc1 &= ~__BIT(28);
3716 			else
3717 				tarc1 |= __BIT(28);
3718 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
3719 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
3720 
3721 			/* Device Status */
3722 			if (sc->sc_type == WM_T_ICH8) {
3723 				reg = CSR_READ(sc, WMREG_STATUS);
3724 				reg &= ~__BIT(31);
3725 				CSR_WRITE(sc, WMREG_STATUS, reg);
3726 
3727 			}
3728 
3729 			/* IOSFPC */
3730 			if (sc->sc_type == WM_T_PCH_SPT) {
3731 				reg = CSR_READ(sc, WMREG_IOSFPC);
3732 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
3733 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
3734 			}
3735 			/*
3736 			 * Work-around descriptor data corruption issue during
3737 			 * NFS v2 UDP traffic, just disable the NFS filtering
3738 			 * capability.
3739 			 */
3740 			reg = CSR_READ(sc, WMREG_RFCTL);
3741 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
3742 			CSR_WRITE(sc, WMREG_RFCTL, reg);
3743 			break;
3744 		default:
3745 			break;
3746 		}
3747 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
3748 
3749 		switch (sc->sc_type) {
3750 		/*
3751 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
3752 		 * Avoid RSS Hash Value bug.
3753 		 */
3754 		case WM_T_82571:
3755 		case WM_T_82572:
3756 		case WM_T_82573:
3757 		case WM_T_80003:
3758 		case WM_T_ICH8:
3759 			reg = CSR_READ(sc, WMREG_RFCTL);
3760 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
3761 			CSR_WRITE(sc, WMREG_RFCTL, reg);
3762 			break;
3763 		case WM_T_82574:
3764 			/* use extened Rx descriptor. */
3765 			reg = CSR_READ(sc, WMREG_RFCTL);
3766 			reg |= WMREG_RFCTL_EXSTEN;
3767 			CSR_WRITE(sc, WMREG_RFCTL, reg);
3768 			break;
3769 		default:
3770 			break;
3771 		}
3772 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
3773 		/*
3774 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
3775 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
3776 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
3777 		 * Correctly by the Device"
3778 		 *
3779 		 * I354(C2000) Errata AVR53:
3780 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
3781 		 * Hang"
3782 		 */
3783 		reg = CSR_READ(sc, WMREG_RFCTL);
3784 		reg |= WMREG_RFCTL_IPV6EXDIS;
3785 		CSR_WRITE(sc, WMREG_RFCTL, reg);
3786 	}
3787 }
3788 
3789 static uint32_t
3790 wm_rxpbs_adjust_82580(uint32_t val)
3791 {
3792 	uint32_t rv = 0;
3793 
3794 	if (val < __arraycount(wm_82580_rxpbs_table))
3795 		rv = wm_82580_rxpbs_table[val];
3796 
3797 	return rv;
3798 }
3799 
3800 /*
3801  * wm_reset_phy:
3802  *
3803  *	generic PHY reset function.
3804  *	Same as e1000_phy_hw_reset_generic()
3805  */
3806 static void
3807 wm_reset_phy(struct wm_softc *sc)
3808 {
3809 	uint32_t reg;
3810 
3811 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3812 		device_xname(sc->sc_dev), __func__));
3813 	if (wm_phy_resetisblocked(sc))
3814 		return;
3815 
3816 	sc->phy.acquire(sc);
3817 
3818 	reg = CSR_READ(sc, WMREG_CTRL);
3819 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
3820 	CSR_WRITE_FLUSH(sc);
3821 
3822 	delay(sc->phy.reset_delay_us);
3823 
3824 	CSR_WRITE(sc, WMREG_CTRL, reg);
3825 	CSR_WRITE_FLUSH(sc);
3826 
3827 	delay(150);
3828 
3829 	sc->phy.release(sc);
3830 
3831 	wm_get_cfg_done(sc);
3832 }
3833 
3834 static void
3835 wm_flush_desc_rings(struct wm_softc *sc)
3836 {
3837 	pcireg_t preg;
3838 	uint32_t reg;
3839 	int nexttx;
3840 
3841 	/* First, disable MULR fix in FEXTNVM11 */
3842 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
3843 	reg |= FEXTNVM11_DIS_MULRFIX;
3844 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
3845 
3846 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
3847 	reg = CSR_READ(sc, WMREG_TDLEN(0));
3848 	if (((preg & DESCRING_STATUS_FLUSH_REQ) != 0) && (reg != 0)) {
3849 		struct wm_txqueue *txq;
3850 		wiseman_txdesc_t *txd;
3851 
3852 		/* TX */
3853 		printf("%s: Need TX flush (reg = %08x, len = %u)\n",
3854 		    device_xname(sc->sc_dev), preg, reg);
3855 		reg = CSR_READ(sc, WMREG_TCTL);
3856 		CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
3857 
3858 		txq = &sc->sc_queue[0].wmq_txq;
3859 		nexttx = txq->txq_next;
3860 		txd = &txq->txq_descs[nexttx];
3861 		wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
3862 		txd->wtx_cmdlen = htole32(WTX_CMD_IFCS| 512);
3863 		txd->wtx_fields.wtxu_status = 0;
3864 		txd->wtx_fields.wtxu_options = 0;
3865 		txd->wtx_fields.wtxu_vlan = 0;
3866 
3867 		bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
3868 			BUS_SPACE_BARRIER_WRITE);
3869 
3870 		txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
3871 		CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
3872 		bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
3873 			BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
3874 		delay(250);
3875 	}
3876 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
3877 	if (preg & DESCRING_STATUS_FLUSH_REQ) {
3878 		uint32_t rctl;
3879 
3880 		/* RX */
3881 		printf("%s: Need RX flush (reg = %08x)\n",
3882 		    device_xname(sc->sc_dev), preg);
3883 		rctl = CSR_READ(sc, WMREG_RCTL);
3884 		CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
3885 		CSR_WRITE_FLUSH(sc);
3886 		delay(150);
3887 
3888 		reg = CSR_READ(sc, WMREG_RXDCTL(0));
3889 		/* zero the lower 14 bits (prefetch and host thresholds) */
3890 		reg &= 0xffffc000;
3891 		/*
3892 		 * update thresholds: prefetch threshold to 31, host threshold
3893 		 * to 1 and make sure the granularity is "descriptors" and not
3894 		 * "cache lines"
3895 		 */
3896 		reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
3897 		CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
3898 
3899 		/*
3900 		 * momentarily enable the RX ring for the changes to take
3901 		 * effect
3902 		 */
3903 		CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
3904 		CSR_WRITE_FLUSH(sc);
3905 		delay(150);
3906 		CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
3907 	}
3908 }
3909 
3910 /*
3911  * wm_reset:
3912  *
3913  *	Reset the i82542 chip.
3914  */
3915 static void
3916 wm_reset(struct wm_softc *sc)
3917 {
3918 	int phy_reset = 0;
3919 	int i, error = 0;
3920 	uint32_t reg;
3921 
3922 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3923 		device_xname(sc->sc_dev), __func__));
3924 	KASSERT(sc->sc_type != 0);
3925 
3926 	/*
3927 	 * Allocate on-chip memory according to the MTU size.
3928 	 * The Packet Buffer Allocation register must be written
3929 	 * before the chip is reset.
3930 	 */
3931 	switch (sc->sc_type) {
3932 	case WM_T_82547:
3933 	case WM_T_82547_2:
3934 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3935 		    PBA_22K : PBA_30K;
3936 		for (i = 0; i < sc->sc_nqueues; i++) {
3937 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
3938 			txq->txq_fifo_head = 0;
3939 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
3940 			txq->txq_fifo_size =
3941 				(PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
3942 			txq->txq_fifo_stall = 0;
3943 		}
3944 		break;
3945 	case WM_T_82571:
3946 	case WM_T_82572:
3947 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
3948 	case WM_T_80003:
3949 		sc->sc_pba = PBA_32K;
3950 		break;
3951 	case WM_T_82573:
3952 		sc->sc_pba = PBA_12K;
3953 		break;
3954 	case WM_T_82574:
3955 	case WM_T_82583:
3956 		sc->sc_pba = PBA_20K;
3957 		break;
3958 	case WM_T_82576:
3959 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
3960 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
3961 		break;
3962 	case WM_T_82580:
3963 	case WM_T_I350:
3964 	case WM_T_I354:
3965 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
3966 		break;
3967 	case WM_T_I210:
3968 	case WM_T_I211:
3969 		sc->sc_pba = PBA_34K;
3970 		break;
3971 	case WM_T_ICH8:
3972 		/* Workaround for a bit corruption issue in FIFO memory */
3973 		sc->sc_pba = PBA_8K;
3974 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
3975 		break;
3976 	case WM_T_ICH9:
3977 	case WM_T_ICH10:
3978 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
3979 		    PBA_14K : PBA_10K;
3980 		break;
3981 	case WM_T_PCH:
3982 	case WM_T_PCH2:
3983 	case WM_T_PCH_LPT:
3984 	case WM_T_PCH_SPT:
3985 		sc->sc_pba = PBA_26K;
3986 		break;
3987 	default:
3988 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3989 		    PBA_40K : PBA_48K;
3990 		break;
3991 	}
3992 	/*
3993 	 * Only old or non-multiqueue devices have the PBA register
3994 	 * XXX Need special handling for 82575.
3995 	 */
3996 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
3997 	    || (sc->sc_type == WM_T_82575))
3998 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
3999 
4000 	/* Prevent the PCI-E bus from sticking */
4001 	if (sc->sc_flags & WM_F_PCIE) {
4002 		int timeout = 800;
4003 
4004 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
4005 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4006 
4007 		while (timeout--) {
4008 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
4009 			    == 0)
4010 				break;
4011 			delay(100);
4012 		}
4013 	}
4014 
4015 	/* Set the completion timeout for interface */
4016 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
4017 	    || (sc->sc_type == WM_T_82580)
4018 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
4019 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
4020 		wm_set_pcie_completion_timeout(sc);
4021 
4022 	/* Clear interrupt */
4023 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4024 	if (sc->sc_nintrs > 1) {
4025 		if (sc->sc_type != WM_T_82574) {
4026 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
4027 			CSR_WRITE(sc, WMREG_EIAC, 0);
4028 		} else {
4029 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
4030 		}
4031 	}
4032 
4033 	/* Stop the transmit and receive processes. */
4034 	CSR_WRITE(sc, WMREG_RCTL, 0);
4035 	sc->sc_rctl &= ~RCTL_EN;
4036 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
4037 	CSR_WRITE_FLUSH(sc);
4038 
4039 	/* XXX set_tbi_sbp_82543() */
4040 
4041 	delay(10*1000);
4042 
4043 	/* Must acquire the MDIO ownership before MAC reset */
4044 	switch (sc->sc_type) {
4045 	case WM_T_82573:
4046 	case WM_T_82574:
4047 	case WM_T_82583:
4048 		error = wm_get_hw_semaphore_82573(sc);
4049 		break;
4050 	default:
4051 		break;
4052 	}
4053 
4054 	/*
4055 	 * 82541 Errata 29? & 82547 Errata 28?
4056 	 * See also the description about PHY_RST bit in CTRL register
4057 	 * in 8254x_GBe_SDM.pdf.
4058 	 */
4059 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
4060 		CSR_WRITE(sc, WMREG_CTRL,
4061 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
4062 		CSR_WRITE_FLUSH(sc);
4063 		delay(5000);
4064 	}
4065 
4066 	switch (sc->sc_type) {
4067 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
4068 	case WM_T_82541:
4069 	case WM_T_82541_2:
4070 	case WM_T_82547:
4071 	case WM_T_82547_2:
4072 		/*
4073 		 * On some chipsets, a reset through a memory-mapped write
4074 		 * cycle can cause the chip to reset before completing the
4075 		 * write cycle.  This causes major headache that can be
4076 		 * avoided by issuing the reset via indirect register writes
4077 		 * through I/O space.
4078 		 *
4079 		 * So, if we successfully mapped the I/O BAR at attach time,
4080 		 * use that.  Otherwise, try our luck with a memory-mapped
4081 		 * reset.
4082 		 */
4083 		if (sc->sc_flags & WM_F_IOH_VALID)
4084 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
4085 		else
4086 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
4087 		break;
4088 	case WM_T_82545_3:
4089 	case WM_T_82546_3:
4090 		/* Use the shadow control register on these chips. */
4091 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
4092 		break;
4093 	case WM_T_80003:
4094 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
4095 		sc->phy.acquire(sc);
4096 		CSR_WRITE(sc, WMREG_CTRL, reg);
4097 		sc->phy.release(sc);
4098 		break;
4099 	case WM_T_ICH8:
4100 	case WM_T_ICH9:
4101 	case WM_T_ICH10:
4102 	case WM_T_PCH:
4103 	case WM_T_PCH2:
4104 	case WM_T_PCH_LPT:
4105 	case WM_T_PCH_SPT:
4106 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
4107 		if (wm_phy_resetisblocked(sc) == false) {
4108 			/*
4109 			 * Gate automatic PHY configuration by hardware on
4110 			 * non-managed 82579
4111 			 */
4112 			if ((sc->sc_type == WM_T_PCH2)
4113 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
4114 				== 0))
4115 				wm_gate_hw_phy_config_ich8lan(sc, true);
4116 
4117 			reg |= CTRL_PHY_RESET;
4118 			phy_reset = 1;
4119 		} else
4120 			printf("XXX reset is blocked!!!\n");
4121 		sc->phy.acquire(sc);
4122 		CSR_WRITE(sc, WMREG_CTRL, reg);
4123 		/* Don't insert a completion barrier when reset */
4124 		delay(20*1000);
4125 		mutex_exit(sc->sc_ich_phymtx);
4126 		break;
4127 	case WM_T_82580:
4128 	case WM_T_I350:
4129 	case WM_T_I354:
4130 	case WM_T_I210:
4131 	case WM_T_I211:
4132 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
4133 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
4134 			CSR_WRITE_FLUSH(sc);
4135 		delay(5000);
4136 		break;
4137 	case WM_T_82542_2_0:
4138 	case WM_T_82542_2_1:
4139 	case WM_T_82543:
4140 	case WM_T_82540:
4141 	case WM_T_82545:
4142 	case WM_T_82546:
4143 	case WM_T_82571:
4144 	case WM_T_82572:
4145 	case WM_T_82573:
4146 	case WM_T_82574:
4147 	case WM_T_82575:
4148 	case WM_T_82576:
4149 	case WM_T_82583:
4150 	default:
4151 		/* Everything else can safely use the documented method. */
4152 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
4153 		break;
4154 	}
4155 
4156 	/* Must release the MDIO ownership after MAC reset */
4157 	switch (sc->sc_type) {
4158 	case WM_T_82573:
4159 	case WM_T_82574:
4160 	case WM_T_82583:
4161 		if (error == 0)
4162 			wm_put_hw_semaphore_82573(sc);
4163 		break;
4164 	default:
4165 		break;
4166 	}
4167 
4168 	if (phy_reset != 0)
4169 		wm_get_cfg_done(sc);
4170 
4171 	/* reload EEPROM */
4172 	switch (sc->sc_type) {
4173 	case WM_T_82542_2_0:
4174 	case WM_T_82542_2_1:
4175 	case WM_T_82543:
4176 	case WM_T_82544:
4177 		delay(10);
4178 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
4179 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4180 		CSR_WRITE_FLUSH(sc);
4181 		delay(2000);
4182 		break;
4183 	case WM_T_82540:
4184 	case WM_T_82545:
4185 	case WM_T_82545_3:
4186 	case WM_T_82546:
4187 	case WM_T_82546_3:
4188 		delay(5*1000);
4189 		/* XXX Disable HW ARPs on ASF enabled adapters */
4190 		break;
4191 	case WM_T_82541:
4192 	case WM_T_82541_2:
4193 	case WM_T_82547:
4194 	case WM_T_82547_2:
4195 		delay(20000);
4196 		/* XXX Disable HW ARPs on ASF enabled adapters */
4197 		break;
4198 	case WM_T_82571:
4199 	case WM_T_82572:
4200 	case WM_T_82573:
4201 	case WM_T_82574:
4202 	case WM_T_82583:
4203 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
4204 			delay(10);
4205 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
4206 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4207 			CSR_WRITE_FLUSH(sc);
4208 		}
4209 		/* check EECD_EE_AUTORD */
4210 		wm_get_auto_rd_done(sc);
4211 		/*
4212 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
4213 		 * is set.
4214 		 */
4215 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
4216 		    || (sc->sc_type == WM_T_82583))
4217 			delay(25*1000);
4218 		break;
4219 	case WM_T_82575:
4220 	case WM_T_82576:
4221 	case WM_T_82580:
4222 	case WM_T_I350:
4223 	case WM_T_I354:
4224 	case WM_T_I210:
4225 	case WM_T_I211:
4226 	case WM_T_80003:
4227 		/* check EECD_EE_AUTORD */
4228 		wm_get_auto_rd_done(sc);
4229 		break;
4230 	case WM_T_ICH8:
4231 	case WM_T_ICH9:
4232 	case WM_T_ICH10:
4233 	case WM_T_PCH:
4234 	case WM_T_PCH2:
4235 	case WM_T_PCH_LPT:
4236 	case WM_T_PCH_SPT:
4237 		break;
4238 	default:
4239 		panic("%s: unknown type\n", __func__);
4240 	}
4241 
4242 	/* Check whether EEPROM is present or not */
4243 	switch (sc->sc_type) {
4244 	case WM_T_82575:
4245 	case WM_T_82576:
4246 	case WM_T_82580:
4247 	case WM_T_I350:
4248 	case WM_T_I354:
4249 	case WM_T_ICH8:
4250 	case WM_T_ICH9:
4251 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
4252 			/* Not found */
4253 			sc->sc_flags |= WM_F_EEPROM_INVALID;
4254 			if (sc->sc_type == WM_T_82575)
4255 				wm_reset_init_script_82575(sc);
4256 		}
4257 		break;
4258 	default:
4259 		break;
4260 	}
4261 
4262 	if ((sc->sc_type == WM_T_82580)
4263 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
4264 		/* clear global device reset status bit */
4265 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
4266 	}
4267 
4268 	/* Clear any pending interrupt events. */
4269 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4270 	reg = CSR_READ(sc, WMREG_ICR);
4271 	if (sc->sc_nintrs > 1) {
4272 		if (sc->sc_type != WM_T_82574) {
4273 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
4274 			CSR_WRITE(sc, WMREG_EIAC, 0);
4275 		} else
4276 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
4277 	}
4278 
4279 	/* reload sc_ctrl */
4280 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
4281 
4282 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
4283 		wm_set_eee_i350(sc);
4284 
4285 	/* Clear the host wakeup bit after lcd reset */
4286 	if (sc->sc_type >= WM_T_PCH) {
4287 		reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
4288 		    BM_PORT_GEN_CFG);
4289 		reg &= ~BM_WUC_HOST_WU_BIT;
4290 		wm_gmii_hv_writereg(sc->sc_dev, 2,
4291 		    BM_PORT_GEN_CFG, reg);
4292 	}
4293 
4294 	/*
4295 	 * For PCH, this write will make sure that any noise will be detected
4296 	 * as a CRC error and be dropped rather than show up as a bad packet
4297 	 * to the DMA engine
4298 	 */
4299 	if (sc->sc_type == WM_T_PCH)
4300 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
4301 
4302 	if (sc->sc_type >= WM_T_82544)
4303 		CSR_WRITE(sc, WMREG_WUC, 0);
4304 
4305 	wm_reset_mdicnfg_82580(sc);
4306 
4307 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
4308 		wm_pll_workaround_i210(sc);
4309 }
4310 
4311 /*
4312  * wm_add_rxbuf:
4313  *
4314  *	Add a receive buffer to the indiciated descriptor.
4315  */
4316 static int
4317 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
4318 {
4319 	struct wm_softc *sc = rxq->rxq_sc;
4320 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
4321 	struct mbuf *m;
4322 	int error;
4323 
4324 	KASSERT(mutex_owned(rxq->rxq_lock));
4325 
4326 	MGETHDR(m, M_DONTWAIT, MT_DATA);
4327 	if (m == NULL)
4328 		return ENOBUFS;
4329 
4330 	MCLGET(m, M_DONTWAIT);
4331 	if ((m->m_flags & M_EXT) == 0) {
4332 		m_freem(m);
4333 		return ENOBUFS;
4334 	}
4335 
4336 	if (rxs->rxs_mbuf != NULL)
4337 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4338 
4339 	rxs->rxs_mbuf = m;
4340 
4341 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
4342 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
4343 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
4344 	if (error) {
4345 		/* XXX XXX XXX */
4346 		aprint_error_dev(sc->sc_dev,
4347 		    "unable to load rx DMA map %d, error = %d\n",
4348 		    idx, error);
4349 		panic("wm_add_rxbuf");
4350 	}
4351 
4352 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
4353 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
4354 
4355 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4356 		if ((sc->sc_rctl & RCTL_EN) != 0)
4357 			wm_init_rxdesc(rxq, idx);
4358 	} else
4359 		wm_init_rxdesc(rxq, idx);
4360 
4361 	return 0;
4362 }
4363 
4364 /*
4365  * wm_rxdrain:
4366  *
4367  *	Drain the receive queue.
4368  */
4369 static void
4370 wm_rxdrain(struct wm_rxqueue *rxq)
4371 {
4372 	struct wm_softc *sc = rxq->rxq_sc;
4373 	struct wm_rxsoft *rxs;
4374 	int i;
4375 
4376 	KASSERT(mutex_owned(rxq->rxq_lock));
4377 
4378 	for (i = 0; i < WM_NRXDESC; i++) {
4379 		rxs = &rxq->rxq_soft[i];
4380 		if (rxs->rxs_mbuf != NULL) {
4381 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4382 			m_freem(rxs->rxs_mbuf);
4383 			rxs->rxs_mbuf = NULL;
4384 		}
4385 	}
4386 }
4387 
4388 
4389 /*
4390  * XXX copy from FreeBSD's sys/net/rss_config.c
4391  */
4392 /*
4393  * RSS secret key, intended to prevent attacks on load-balancing.  Its
4394  * effectiveness may be limited by algorithm choice and available entropy
4395  * during the boot.
4396  *
4397  * XXXRW: And that we don't randomize it yet!
4398  *
4399  * This is the default Microsoft RSS specification key which is also
4400  * the Chelsio T5 firmware default key.
4401  */
4402 #define RSS_KEYSIZE 40
4403 static uint8_t wm_rss_key[RSS_KEYSIZE] = {
4404 	0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
4405 	0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
4406 	0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
4407 	0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
4408 	0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
4409 };
4410 
4411 /*
4412  * Caller must pass an array of size sizeof(rss_key).
4413  *
4414  * XXX
4415  * As if_ixgbe may use this function, this function should not be
4416  * if_wm specific function.
4417  */
4418 static void
4419 wm_rss_getkey(uint8_t *key)
4420 {
4421 
4422 	memcpy(key, wm_rss_key, sizeof(wm_rss_key));
4423 }
4424 
4425 /*
4426  * Setup registers for RSS.
4427  *
4428  * XXX not yet VMDq support
4429  */
4430 static void
4431 wm_init_rss(struct wm_softc *sc)
4432 {
4433 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
4434 	int i;
4435 
4436 	CTASSERT(sizeof(rss_key) == sizeof(wm_rss_key));
4437 
4438 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
4439 		int qid, reta_ent;
4440 
4441 		qid  = i % sc->sc_nqueues;
4442 		switch(sc->sc_type) {
4443 		case WM_T_82574:
4444 			reta_ent = __SHIFTIN(qid,
4445 			    RETA_ENT_QINDEX_MASK_82574);
4446 			break;
4447 		case WM_T_82575:
4448 			reta_ent = __SHIFTIN(qid,
4449 			    RETA_ENT_QINDEX1_MASK_82575);
4450 			break;
4451 		default:
4452 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
4453 			break;
4454 		}
4455 
4456 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
4457 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
4458 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
4459 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
4460 	}
4461 
4462 	wm_rss_getkey((uint8_t *)rss_key);
4463 	for (i = 0; i < RSSRK_NUM_REGS; i++)
4464 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
4465 
4466 	if (sc->sc_type == WM_T_82574)
4467 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
4468 	else
4469 		mrqc = MRQC_ENABLE_RSS_MQ;
4470 
4471 	/*
4472 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
4473 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
4474 	 */
4475 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
4476 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
4477 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
4478 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
4479 
4480 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
4481 }
4482 
4483 /*
4484  * Adjust TX and RX queue numbers which the system actulally uses.
4485  *
4486  * The numbers are affected by below parameters.
4487  *     - The nubmer of hardware queues
4488  *     - The number of MSI-X vectors (= "nvectors" argument)
4489  *     - ncpu
4490  */
4491 static void
4492 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
4493 {
4494 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
4495 
4496 	if (nvectors < 2) {
4497 		sc->sc_nqueues = 1;
4498 		return;
4499 	}
4500 
4501 	switch(sc->sc_type) {
4502 	case WM_T_82572:
4503 		hw_ntxqueues = 2;
4504 		hw_nrxqueues = 2;
4505 		break;
4506 	case WM_T_82574:
4507 		hw_ntxqueues = 2;
4508 		hw_nrxqueues = 2;
4509 		break;
4510 	case WM_T_82575:
4511 		hw_ntxqueues = 4;
4512 		hw_nrxqueues = 4;
4513 		break;
4514 	case WM_T_82576:
4515 		hw_ntxqueues = 16;
4516 		hw_nrxqueues = 16;
4517 		break;
4518 	case WM_T_82580:
4519 	case WM_T_I350:
4520 	case WM_T_I354:
4521 		hw_ntxqueues = 8;
4522 		hw_nrxqueues = 8;
4523 		break;
4524 	case WM_T_I210:
4525 		hw_ntxqueues = 4;
4526 		hw_nrxqueues = 4;
4527 		break;
4528 	case WM_T_I211:
4529 		hw_ntxqueues = 2;
4530 		hw_nrxqueues = 2;
4531 		break;
4532 		/*
4533 		 * As below ethernet controllers does not support MSI-X,
4534 		 * this driver let them not use multiqueue.
4535 		 *     - WM_T_80003
4536 		 *     - WM_T_ICH8
4537 		 *     - WM_T_ICH9
4538 		 *     - WM_T_ICH10
4539 		 *     - WM_T_PCH
4540 		 *     - WM_T_PCH2
4541 		 *     - WM_T_PCH_LPT
4542 		 */
4543 	default:
4544 		hw_ntxqueues = 1;
4545 		hw_nrxqueues = 1;
4546 		break;
4547 	}
4548 
4549 	hw_nqueues = min(hw_ntxqueues, hw_nrxqueues);
4550 
4551 	/*
4552 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
4553 	 * the number of queues used actually.
4554 	 */
4555 	if (nvectors < hw_nqueues + 1) {
4556 		sc->sc_nqueues = nvectors - 1;
4557 	} else {
4558 		sc->sc_nqueues = hw_nqueues;
4559 	}
4560 
4561 	/*
4562 	 * As queues more then cpus cannot improve scaling, we limit
4563 	 * the number of queues used actually.
4564 	 */
4565 	if (ncpu < sc->sc_nqueues)
4566 		sc->sc_nqueues = ncpu;
4567 }
4568 
4569 /*
4570  * Both single interrupt MSI and INTx can use this function.
4571  */
4572 static int
4573 wm_setup_legacy(struct wm_softc *sc)
4574 {
4575 	pci_chipset_tag_t pc = sc->sc_pc;
4576 	const char *intrstr = NULL;
4577 	char intrbuf[PCI_INTRSTR_LEN];
4578 	int error;
4579 
4580 	error = wm_alloc_txrx_queues(sc);
4581 	if (error) {
4582 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
4583 		    error);
4584 		return ENOMEM;
4585 	}
4586 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
4587 	    sizeof(intrbuf));
4588 #ifdef WM_MPSAFE
4589 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
4590 #endif
4591 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
4592 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
4593 	if (sc->sc_ihs[0] == NULL) {
4594 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
4595 		    (pci_intr_type(pc, sc->sc_intrs[0])
4596 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
4597 		return ENOMEM;
4598 	}
4599 
4600 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
4601 	sc->sc_nintrs = 1;
4602 	return 0;
4603 }
4604 
4605 static int
4606 wm_setup_msix(struct wm_softc *sc)
4607 {
4608 	void *vih;
4609 	kcpuset_t *affinity;
4610 	int qidx, error, intr_idx, txrx_established;
4611 	pci_chipset_tag_t pc = sc->sc_pc;
4612 	const char *intrstr = NULL;
4613 	char intrbuf[PCI_INTRSTR_LEN];
4614 	char intr_xname[INTRDEVNAMEBUF];
4615 
4616 	if (sc->sc_nqueues < ncpu) {
4617 		/*
4618 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
4619 		 * interrupts start from CPU#1.
4620 		 */
4621 		sc->sc_affinity_offset = 1;
4622 	} else {
4623 		/*
4624 		 * In this case, this device use all CPUs. So, we unify
4625 		 * affinitied cpu_index to msix vector number for readability.
4626 		 */
4627 		sc->sc_affinity_offset = 0;
4628 	}
4629 
4630 	error = wm_alloc_txrx_queues(sc);
4631 	if (error) {
4632 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
4633 		    error);
4634 		return ENOMEM;
4635 	}
4636 
4637 	kcpuset_create(&affinity, false);
4638 	intr_idx = 0;
4639 
4640 	/*
4641 	 * TX and RX
4642 	 */
4643 	txrx_established = 0;
4644 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
4645 		struct wm_queue *wmq = &sc->sc_queue[qidx];
4646 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
4647 
4648 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
4649 		    sizeof(intrbuf));
4650 #ifdef WM_MPSAFE
4651 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
4652 		    PCI_INTR_MPSAFE, true);
4653 #endif
4654 		memset(intr_xname, 0, sizeof(intr_xname));
4655 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
4656 		    device_xname(sc->sc_dev), qidx);
4657 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
4658 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
4659 		if (vih == NULL) {
4660 			aprint_error_dev(sc->sc_dev,
4661 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
4662 			    intrstr ? " at " : "",
4663 			    intrstr ? intrstr : "");
4664 
4665 			goto fail;
4666 		}
4667 		kcpuset_zero(affinity);
4668 		/* Round-robin affinity */
4669 		kcpuset_set(affinity, affinity_to);
4670 		error = interrupt_distribute(vih, affinity, NULL);
4671 		if (error == 0) {
4672 			aprint_normal_dev(sc->sc_dev,
4673 			    "for TX and RX interrupting at %s affinity to %u\n",
4674 			    intrstr, affinity_to);
4675 		} else {
4676 			aprint_normal_dev(sc->sc_dev,
4677 			    "for TX and RX interrupting at %s\n", intrstr);
4678 		}
4679 		sc->sc_ihs[intr_idx] = vih;
4680 		wmq->wmq_id= qidx;
4681 		wmq->wmq_intr_idx = intr_idx;
4682 
4683 		txrx_established++;
4684 		intr_idx++;
4685 	}
4686 
4687 	/*
4688 	 * LINK
4689 	 */
4690 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
4691 	    sizeof(intrbuf));
4692 #ifdef WM_MPSAFE
4693 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
4694 #endif
4695 	memset(intr_xname, 0, sizeof(intr_xname));
4696 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
4697 	    device_xname(sc->sc_dev));
4698 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
4699 		    IPL_NET, wm_linkintr_msix, sc, intr_xname);
4700 	if (vih == NULL) {
4701 		aprint_error_dev(sc->sc_dev,
4702 		    "unable to establish MSI-X(for LINK)%s%s\n",
4703 		    intrstr ? " at " : "",
4704 		    intrstr ? intrstr : "");
4705 
4706 		goto fail;
4707 	}
4708 	/* keep default affinity to LINK interrupt */
4709 	aprint_normal_dev(sc->sc_dev,
4710 	    "for LINK interrupting at %s\n", intrstr);
4711 	sc->sc_ihs[intr_idx] = vih;
4712 	sc->sc_link_intr_idx = intr_idx;
4713 
4714 	sc->sc_nintrs = sc->sc_nqueues + 1;
4715 	kcpuset_destroy(affinity);
4716 	return 0;
4717 
4718  fail:
4719 	for (qidx = 0; qidx < txrx_established; qidx++) {
4720 		struct wm_queue *wmq = &sc->sc_queue[qidx];
4721 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
4722 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
4723 	}
4724 
4725 	kcpuset_destroy(affinity);
4726 	return ENOMEM;
4727 }
4728 
4729 static void
4730 wm_turnon(struct wm_softc *sc)
4731 {
4732 	int i;
4733 
4734 	KASSERT(WM_CORE_LOCKED(sc));
4735 
4736 	/*
4737 	 * must unset stopping flags in ascending order.
4738 	 */
4739 	for(i = 0; i < sc->sc_nqueues; i++) {
4740 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
4741 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
4742 
4743 		mutex_enter(txq->txq_lock);
4744 		txq->txq_stopping = false;
4745 		mutex_exit(txq->txq_lock);
4746 
4747 		mutex_enter(rxq->rxq_lock);
4748 		rxq->rxq_stopping = false;
4749 		mutex_exit(rxq->rxq_lock);
4750 	}
4751 
4752 	sc->sc_core_stopping = false;
4753 }
4754 
4755 static void
4756 wm_turnoff(struct wm_softc *sc)
4757 {
4758 	int i;
4759 
4760 	KASSERT(WM_CORE_LOCKED(sc));
4761 
4762 	sc->sc_core_stopping = true;
4763 
4764 	/*
4765 	 * must set stopping flags in ascending order.
4766 	 */
4767 	for(i = 0; i < sc->sc_nqueues; i++) {
4768 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
4769 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
4770 
4771 		mutex_enter(rxq->rxq_lock);
4772 		rxq->rxq_stopping = true;
4773 		mutex_exit(rxq->rxq_lock);
4774 
4775 		mutex_enter(txq->txq_lock);
4776 		txq->txq_stopping = true;
4777 		mutex_exit(txq->txq_lock);
4778 	}
4779 }
4780 
4781 /*
4782  * wm_init:		[ifnet interface function]
4783  *
4784  *	Initialize the interface.
4785  */
4786 static int
4787 wm_init(struct ifnet *ifp)
4788 {
4789 	struct wm_softc *sc = ifp->if_softc;
4790 	int ret;
4791 
4792 	WM_CORE_LOCK(sc);
4793 	ret = wm_init_locked(ifp);
4794 	WM_CORE_UNLOCK(sc);
4795 
4796 	return ret;
4797 }
4798 
4799 static int
4800 wm_init_locked(struct ifnet *ifp)
4801 {
4802 	struct wm_softc *sc = ifp->if_softc;
4803 	int i, j, trynum, error = 0;
4804 	uint32_t reg;
4805 
4806 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
4807 		device_xname(sc->sc_dev), __func__));
4808 	KASSERT(WM_CORE_LOCKED(sc));
4809 
4810 	/*
4811 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
4812 	 * There is a small but measurable benefit to avoiding the adjusment
4813 	 * of the descriptor so that the headers are aligned, for normal mtu,
4814 	 * on such platforms.  One possibility is that the DMA itself is
4815 	 * slightly more efficient if the front of the entire packet (instead
4816 	 * of the front of the headers) is aligned.
4817 	 *
4818 	 * Note we must always set align_tweak to 0 if we are using
4819 	 * jumbo frames.
4820 	 */
4821 #ifdef __NO_STRICT_ALIGNMENT
4822 	sc->sc_align_tweak = 0;
4823 #else
4824 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
4825 		sc->sc_align_tweak = 0;
4826 	else
4827 		sc->sc_align_tweak = 2;
4828 #endif /* __NO_STRICT_ALIGNMENT */
4829 
4830 	/* Cancel any pending I/O. */
4831 	wm_stop_locked(ifp, 0);
4832 
4833 	/* update statistics before reset */
4834 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
4835 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
4836 
4837 	/* PCH_SPT hardware workaround */
4838 	if (sc->sc_type == WM_T_PCH_SPT)
4839 		wm_flush_desc_rings(sc);
4840 
4841 	/* Reset the chip to a known state. */
4842 	wm_reset(sc);
4843 
4844 	/* AMT based hardware can now take control from firmware */
4845 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
4846 		wm_get_hw_control(sc);
4847 
4848 	/* Init hardware bits */
4849 	wm_initialize_hardware_bits(sc);
4850 
4851 	/* Reset the PHY. */
4852 	if (sc->sc_flags & WM_F_HAS_MII)
4853 		wm_gmii_reset(sc);
4854 
4855 	/* Calculate (E)ITR value */
4856 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4857 		sc->sc_itr = 450;	/* For EITR */
4858 	} else if (sc->sc_type >= WM_T_82543) {
4859 		/*
4860 		 * Set up the interrupt throttling register (units of 256ns)
4861 		 * Note that a footnote in Intel's documentation says this
4862 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
4863 		 * or 10Mbit mode.  Empirically, it appears to be the case
4864 		 * that that is also true for the 1024ns units of the other
4865 		 * interrupt-related timer registers -- so, really, we ought
4866 		 * to divide this value by 4 when the link speed is low.
4867 		 *
4868 		 * XXX implement this division at link speed change!
4869 		 */
4870 
4871 		/*
4872 		 * For N interrupts/sec, set this value to:
4873 		 * 1000000000 / (N * 256).  Note that we set the
4874 		 * absolute and packet timer values to this value
4875 		 * divided by 4 to get "simple timer" behavior.
4876 		 */
4877 
4878 		sc->sc_itr = 1500;		/* 2604 ints/sec */
4879 	}
4880 
4881 	error = wm_init_txrx_queues(sc);
4882 	if (error)
4883 		goto out;
4884 
4885 	/*
4886 	 * Clear out the VLAN table -- we don't use it (yet).
4887 	 */
4888 	CSR_WRITE(sc, WMREG_VET, 0);
4889 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
4890 		trynum = 10; /* Due to hw errata */
4891 	else
4892 		trynum = 1;
4893 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
4894 		for (j = 0; j < trynum; j++)
4895 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
4896 
4897 	/*
4898 	 * Set up flow-control parameters.
4899 	 *
4900 	 * XXX Values could probably stand some tuning.
4901 	 */
4902 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
4903 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
4904 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
4905 	    && (sc->sc_type != WM_T_PCH_SPT)) {
4906 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
4907 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
4908 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
4909 	}
4910 
4911 	sc->sc_fcrtl = FCRTL_DFLT;
4912 	if (sc->sc_type < WM_T_82543) {
4913 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
4914 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
4915 	} else {
4916 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
4917 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
4918 	}
4919 
4920 	if (sc->sc_type == WM_T_80003)
4921 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
4922 	else
4923 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
4924 
4925 	/* Writes the control register. */
4926 	wm_set_vlan(sc);
4927 
4928 	if (sc->sc_flags & WM_F_HAS_MII) {
4929 		int val;
4930 
4931 		switch (sc->sc_type) {
4932 		case WM_T_80003:
4933 		case WM_T_ICH8:
4934 		case WM_T_ICH9:
4935 		case WM_T_ICH10:
4936 		case WM_T_PCH:
4937 		case WM_T_PCH2:
4938 		case WM_T_PCH_LPT:
4939 		case WM_T_PCH_SPT:
4940 			/*
4941 			 * Set the mac to wait the maximum time between each
4942 			 * iteration and increase the max iterations when
4943 			 * polling the phy; this fixes erroneous timeouts at
4944 			 * 10Mbps.
4945 			 */
4946 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
4947 			    0xFFFF);
4948 			val = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM);
4949 			val |= 0x3F;
4950 			wm_kmrn_writereg(sc,
4951 			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
4952 			break;
4953 		default:
4954 			break;
4955 		}
4956 
4957 		if (sc->sc_type == WM_T_80003) {
4958 			val = CSR_READ(sc, WMREG_CTRL_EXT);
4959 			val &= ~CTRL_EXT_LINK_MODE_MASK;
4960 			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
4961 
4962 			/* Bypass RX and TX FIFO's */
4963 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
4964 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
4965 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
4966 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
4967 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
4968 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
4969 		}
4970 	}
4971 #if 0
4972 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
4973 #endif
4974 
4975 	/* Set up checksum offload parameters. */
4976 	reg = CSR_READ(sc, WMREG_RXCSUM);
4977 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
4978 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
4979 		reg |= RXCSUM_IPOFL;
4980 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
4981 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
4982 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
4983 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
4984 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
4985 
4986 	/* Set up MSI-X */
4987 	if (sc->sc_nintrs > 1) {
4988 		uint32_t ivar;
4989 		struct wm_queue *wmq;
4990 		int qid, qintr_idx;
4991 
4992 		if (sc->sc_type == WM_T_82575) {
4993 			/* Interrupt control */
4994 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
4995 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
4996 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4997 
4998 			/* TX and RX */
4999 			for (i = 0; i < sc->sc_nqueues; i++) {
5000 				wmq = &sc->sc_queue[i];
5001 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
5002 				    EITR_TX_QUEUE(wmq->wmq_id)
5003 				    | EITR_RX_QUEUE(wmq->wmq_id));
5004 			}
5005 			/* Link status */
5006 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
5007 			    EITR_OTHER);
5008 		} else if (sc->sc_type == WM_T_82574) {
5009 			/* Interrupt control */
5010 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
5011 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
5012 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5013 
5014 			ivar = 0;
5015 			/* TX and RX */
5016 			for (i = 0; i < sc->sc_nqueues; i++) {
5017 				wmq = &sc->sc_queue[i];
5018 				qid = wmq->wmq_id;
5019 				qintr_idx = wmq->wmq_intr_idx;
5020 
5021 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
5022 				    IVAR_TX_MASK_Q_82574(qid));
5023 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
5024 				    IVAR_RX_MASK_Q_82574(qid));
5025 			}
5026 			/* Link status */
5027 			ivar |= __SHIFTIN((IVAR_VALID_82574
5028 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
5029 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
5030 		} else {
5031 			/* Interrupt control */
5032 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
5033 			    | GPIE_EIAME | GPIE_PBA);
5034 
5035 			switch (sc->sc_type) {
5036 			case WM_T_82580:
5037 			case WM_T_I350:
5038 			case WM_T_I354:
5039 			case WM_T_I210:
5040 			case WM_T_I211:
5041 				/* TX and RX */
5042 				for (i = 0; i < sc->sc_nqueues; i++) {
5043 					wmq = &sc->sc_queue[i];
5044 					qid = wmq->wmq_id;
5045 					qintr_idx = wmq->wmq_intr_idx;
5046 
5047 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
5048 					ivar &= ~IVAR_TX_MASK_Q(qid);
5049 					ivar |= __SHIFTIN((qintr_idx
5050 						| IVAR_VALID),
5051 					    IVAR_TX_MASK_Q(qid));
5052 					ivar &= ~IVAR_RX_MASK_Q(qid);
5053 					ivar |= __SHIFTIN((qintr_idx
5054 						| IVAR_VALID),
5055 					    IVAR_RX_MASK_Q(qid));
5056 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
5057 				}
5058 				break;
5059 			case WM_T_82576:
5060 				/* TX and RX */
5061 				for (i = 0; i < sc->sc_nqueues; i++) {
5062 					wmq = &sc->sc_queue[i];
5063 					qid = wmq->wmq_id;
5064 					qintr_idx = wmq->wmq_intr_idx;
5065 
5066 					ivar = CSR_READ(sc,
5067 					    WMREG_IVAR_Q_82576(qid));
5068 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
5069 					ivar |= __SHIFTIN((qintr_idx
5070 						| IVAR_VALID),
5071 					    IVAR_TX_MASK_Q_82576(qid));
5072 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
5073 					ivar |= __SHIFTIN((qintr_idx
5074 						| IVAR_VALID),
5075 					    IVAR_RX_MASK_Q_82576(qid));
5076 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
5077 					    ivar);
5078 				}
5079 				break;
5080 			default:
5081 				break;
5082 			}
5083 
5084 			/* Link status */
5085 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
5086 			    IVAR_MISC_OTHER);
5087 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
5088 		}
5089 
5090 		if (sc->sc_nqueues > 1) {
5091 			wm_init_rss(sc);
5092 
5093 			/*
5094 			** NOTE: Receive Full-Packet Checksum Offload
5095 			** is mutually exclusive with Multiqueue. However
5096 			** this is not the same as TCP/IP checksums which
5097 			** still work.
5098 			*/
5099 			reg = CSR_READ(sc, WMREG_RXCSUM);
5100 			reg |= RXCSUM_PCSD;
5101 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
5102 		}
5103 	}
5104 
5105 	/* Set up the interrupt registers. */
5106 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
5107 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
5108 	    ICR_RXO | ICR_RXT0;
5109 	if (sc->sc_nintrs > 1) {
5110 		uint32_t mask;
5111 		struct wm_queue *wmq;
5112 
5113 		switch (sc->sc_type) {
5114 		case WM_T_82574:
5115 			CSR_WRITE(sc, WMREG_EIAC_82574,
5116 			    WMREG_EIAC_82574_MSIX_MASK);
5117 			sc->sc_icr |= WMREG_EIAC_82574_MSIX_MASK;
5118 			CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
5119 			break;
5120 		default:
5121 			if (sc->sc_type == WM_T_82575) {
5122 				mask = 0;
5123 				for (i = 0; i < sc->sc_nqueues; i++) {
5124 					wmq = &sc->sc_queue[i];
5125 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
5126 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
5127 				}
5128 				mask |= EITR_OTHER;
5129 			} else {
5130 				mask = 0;
5131 				for (i = 0; i < sc->sc_nqueues; i++) {
5132 					wmq = &sc->sc_queue[i];
5133 					mask |= 1 << wmq->wmq_intr_idx;
5134 				}
5135 				mask |= 1 << sc->sc_link_intr_idx;
5136 			}
5137 			CSR_WRITE(sc, WMREG_EIAC, mask);
5138 			CSR_WRITE(sc, WMREG_EIAM, mask);
5139 			CSR_WRITE(sc, WMREG_EIMS, mask);
5140 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
5141 			break;
5142 		}
5143 	} else
5144 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
5145 
5146 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5147 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5148 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
5149 	    || (sc->sc_type == WM_T_PCH_SPT)) {
5150 		reg = CSR_READ(sc, WMREG_KABGTXD);
5151 		reg |= KABGTXD_BGSQLBIAS;
5152 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
5153 	}
5154 
5155 	/* Set up the inter-packet gap. */
5156 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
5157 
5158 	if (sc->sc_type >= WM_T_82543) {
5159 		/*
5160 		 * XXX 82574 has both ITR and EITR. SET EITR when we use
5161 		 * the multi queue function with MSI-X.
5162 		 */
5163 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5164 			int qidx;
5165 			for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
5166 				struct wm_queue *wmq = &sc->sc_queue[qidx];
5167 				CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx),
5168 				    sc->sc_itr);
5169 			}
5170 			/*
5171 			 * Link interrupts occur much less than TX
5172 			 * interrupts and RX interrupts. So, we don't
5173 			 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
5174 			 * FreeBSD's if_igb.
5175 			 */
5176 		} else
5177 			CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
5178 	}
5179 
5180 	/* Set the VLAN ethernetype. */
5181 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
5182 
5183 	/*
5184 	 * Set up the transmit control register; we start out with
5185 	 * a collision distance suitable for FDX, but update it whe
5186 	 * we resolve the media type.
5187 	 */
5188 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
5189 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
5190 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
5191 	if (sc->sc_type >= WM_T_82571)
5192 		sc->sc_tctl |= TCTL_MULR;
5193 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
5194 
5195 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5196 		/* Write TDT after TCTL.EN is set. See the document. */
5197 		CSR_WRITE(sc, WMREG_TDT(0), 0);
5198 	}
5199 
5200 	if (sc->sc_type == WM_T_80003) {
5201 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
5202 		reg &= ~TCTL_EXT_GCEX_MASK;
5203 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
5204 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
5205 	}
5206 
5207 	/* Set the media. */
5208 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
5209 		goto out;
5210 
5211 	/* Configure for OS presence */
5212 	wm_init_manageability(sc);
5213 
5214 	/*
5215 	 * Set up the receive control register; we actually program
5216 	 * the register when we set the receive filter.  Use multicast
5217 	 * address offset type 0.
5218 	 *
5219 	 * Only the i82544 has the ability to strip the incoming
5220 	 * CRC, so we don't enable that feature.
5221 	 */
5222 	sc->sc_mchash_type = 0;
5223 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
5224 	    | RCTL_MO(sc->sc_mchash_type);
5225 
5226 	/*
5227 	 * 82574 use one buffer extended Rx descriptor.
5228 	 */
5229 	if (sc->sc_type == WM_T_82574)
5230 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
5231 
5232 	/*
5233 	 * The I350 has a bug where it always strips the CRC whether
5234 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
5235 	 */
5236 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
5237 	    || (sc->sc_type == WM_T_I210))
5238 		sc->sc_rctl |= RCTL_SECRC;
5239 
5240 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
5241 	    && (ifp->if_mtu > ETHERMTU)) {
5242 		sc->sc_rctl |= RCTL_LPE;
5243 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
5244 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
5245 	}
5246 
5247 	if (MCLBYTES == 2048) {
5248 		sc->sc_rctl |= RCTL_2k;
5249 	} else {
5250 		if (sc->sc_type >= WM_T_82543) {
5251 			switch (MCLBYTES) {
5252 			case 4096:
5253 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
5254 				break;
5255 			case 8192:
5256 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
5257 				break;
5258 			case 16384:
5259 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
5260 				break;
5261 			default:
5262 				panic("wm_init: MCLBYTES %d unsupported",
5263 				    MCLBYTES);
5264 				break;
5265 			}
5266 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
5267 	}
5268 
5269 	/* Set the receive filter. */
5270 	wm_set_filter(sc);
5271 
5272 	/* Enable ECC */
5273 	switch (sc->sc_type) {
5274 	case WM_T_82571:
5275 		reg = CSR_READ(sc, WMREG_PBA_ECC);
5276 		reg |= PBA_ECC_CORR_EN;
5277 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
5278 		break;
5279 	case WM_T_PCH_LPT:
5280 	case WM_T_PCH_SPT:
5281 		reg = CSR_READ(sc, WMREG_PBECCSTS);
5282 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
5283 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
5284 
5285 		sc->sc_ctrl |= CTRL_MEHE;
5286 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5287 		break;
5288 	default:
5289 		break;
5290 	}
5291 
5292 	/* On 575 and later set RDT only if RX enabled */
5293 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5294 		int qidx;
5295 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
5296 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
5297 			for (i = 0; i < WM_NRXDESC; i++) {
5298 				mutex_enter(rxq->rxq_lock);
5299 				wm_init_rxdesc(rxq, i);
5300 				mutex_exit(rxq->rxq_lock);
5301 
5302 			}
5303 		}
5304 	}
5305 
5306 	wm_turnon(sc);
5307 
5308 	/* Start the one second link check clock. */
5309 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
5310 
5311 	/* ...all done! */
5312 	ifp->if_flags |= IFF_RUNNING;
5313 	ifp->if_flags &= ~IFF_OACTIVE;
5314 
5315  out:
5316 	sc->sc_if_flags = ifp->if_flags;
5317 	if (error)
5318 		log(LOG_ERR, "%s: interface not running\n",
5319 		    device_xname(sc->sc_dev));
5320 	return error;
5321 }
5322 
5323 /*
5324  * wm_stop:		[ifnet interface function]
5325  *
5326  *	Stop transmission on the interface.
5327  */
5328 static void
5329 wm_stop(struct ifnet *ifp, int disable)
5330 {
5331 	struct wm_softc *sc = ifp->if_softc;
5332 
5333 	WM_CORE_LOCK(sc);
5334 	wm_stop_locked(ifp, disable);
5335 	WM_CORE_UNLOCK(sc);
5336 }
5337 
5338 static void
5339 wm_stop_locked(struct ifnet *ifp, int disable)
5340 {
5341 	struct wm_softc *sc = ifp->if_softc;
5342 	struct wm_txsoft *txs;
5343 	int i, qidx;
5344 
5345 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
5346 		device_xname(sc->sc_dev), __func__));
5347 	KASSERT(WM_CORE_LOCKED(sc));
5348 
5349 	wm_turnoff(sc);
5350 
5351 	/* Stop the one second clock. */
5352 	callout_stop(&sc->sc_tick_ch);
5353 
5354 	/* Stop the 82547 Tx FIFO stall check timer. */
5355 	if (sc->sc_type == WM_T_82547)
5356 		callout_stop(&sc->sc_txfifo_ch);
5357 
5358 	if (sc->sc_flags & WM_F_HAS_MII) {
5359 		/* Down the MII. */
5360 		mii_down(&sc->sc_mii);
5361 	} else {
5362 #if 0
5363 		/* Should we clear PHY's status properly? */
5364 		wm_reset(sc);
5365 #endif
5366 	}
5367 
5368 	/* Stop the transmit and receive processes. */
5369 	CSR_WRITE(sc, WMREG_TCTL, 0);
5370 	CSR_WRITE(sc, WMREG_RCTL, 0);
5371 	sc->sc_rctl &= ~RCTL_EN;
5372 
5373 	/*
5374 	 * Clear the interrupt mask to ensure the device cannot assert its
5375 	 * interrupt line.
5376 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
5377 	 * service any currently pending or shared interrupt.
5378 	 */
5379 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
5380 	sc->sc_icr = 0;
5381 	if (sc->sc_nintrs > 1) {
5382 		if (sc->sc_type != WM_T_82574) {
5383 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
5384 			CSR_WRITE(sc, WMREG_EIAC, 0);
5385 		} else
5386 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
5387 	}
5388 
5389 	/* Release any queued transmit buffers. */
5390 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
5391 		struct wm_queue *wmq = &sc->sc_queue[qidx];
5392 		struct wm_txqueue *txq = &wmq->wmq_txq;
5393 		mutex_enter(txq->txq_lock);
5394 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
5395 			txs = &txq->txq_soft[i];
5396 			if (txs->txs_mbuf != NULL) {
5397 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
5398 				m_freem(txs->txs_mbuf);
5399 				txs->txs_mbuf = NULL;
5400 			}
5401 		}
5402 		mutex_exit(txq->txq_lock);
5403 	}
5404 
5405 	/* Mark the interface as down and cancel the watchdog timer. */
5406 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
5407 	ifp->if_timer = 0;
5408 
5409 	if (disable) {
5410 		for (i = 0; i < sc->sc_nqueues; i++) {
5411 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
5412 			mutex_enter(rxq->rxq_lock);
5413 			wm_rxdrain(rxq);
5414 			mutex_exit(rxq->rxq_lock);
5415 		}
5416 	}
5417 
5418 #if 0 /* notyet */
5419 	if (sc->sc_type >= WM_T_82544)
5420 		CSR_WRITE(sc, WMREG_WUC, 0);
5421 #endif
5422 }
5423 
5424 static void
5425 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
5426 {
5427 	struct mbuf *m;
5428 	int i;
5429 
5430 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
5431 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
5432 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
5433 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
5434 		    m->m_data, m->m_len, m->m_flags);
5435 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
5436 	    i, i == 1 ? "" : "s");
5437 }
5438 
5439 /*
5440  * wm_82547_txfifo_stall:
5441  *
5442  *	Callout used to wait for the 82547 Tx FIFO to drain,
5443  *	reset the FIFO pointers, and restart packet transmission.
5444  */
5445 static void
5446 wm_82547_txfifo_stall(void *arg)
5447 {
5448 	struct wm_softc *sc = arg;
5449 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
5450 
5451 	mutex_enter(txq->txq_lock);
5452 
5453 	if (txq->txq_stopping)
5454 		goto out;
5455 
5456 	if (txq->txq_fifo_stall) {
5457 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
5458 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
5459 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
5460 			/*
5461 			 * Packets have drained.  Stop transmitter, reset
5462 			 * FIFO pointers, restart transmitter, and kick
5463 			 * the packet queue.
5464 			 */
5465 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
5466 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
5467 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
5468 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
5469 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
5470 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
5471 			CSR_WRITE(sc, WMREG_TCTL, tctl);
5472 			CSR_WRITE_FLUSH(sc);
5473 
5474 			txq->txq_fifo_head = 0;
5475 			txq->txq_fifo_stall = 0;
5476 			wm_start_locked(&sc->sc_ethercom.ec_if);
5477 		} else {
5478 			/*
5479 			 * Still waiting for packets to drain; try again in
5480 			 * another tick.
5481 			 */
5482 			callout_schedule(&sc->sc_txfifo_ch, 1);
5483 		}
5484 	}
5485 
5486 out:
5487 	mutex_exit(txq->txq_lock);
5488 }
5489 
5490 /*
5491  * wm_82547_txfifo_bugchk:
5492  *
5493  *	Check for bug condition in the 82547 Tx FIFO.  We need to
5494  *	prevent enqueueing a packet that would wrap around the end
5495  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
5496  *
5497  *	We do this by checking the amount of space before the end
5498  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
5499  *	the Tx FIFO, wait for all remaining packets to drain, reset
5500  *	the internal FIFO pointers to the beginning, and restart
5501  *	transmission on the interface.
5502  */
5503 #define	WM_FIFO_HDR		0x10
5504 #define	WM_82547_PAD_LEN	0x3e0
5505 static int
5506 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
5507 {
5508 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
5509 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
5510 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
5511 
5512 	/* Just return if already stalled. */
5513 	if (txq->txq_fifo_stall)
5514 		return 1;
5515 
5516 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
5517 		/* Stall only occurs in half-duplex mode. */
5518 		goto send_packet;
5519 	}
5520 
5521 	if (len >= WM_82547_PAD_LEN + space) {
5522 		txq->txq_fifo_stall = 1;
5523 		callout_schedule(&sc->sc_txfifo_ch, 1);
5524 		return 1;
5525 	}
5526 
5527  send_packet:
5528 	txq->txq_fifo_head += len;
5529 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
5530 		txq->txq_fifo_head -= txq->txq_fifo_size;
5531 
5532 	return 0;
5533 }
5534 
5535 static int
5536 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
5537 {
5538 	int error;
5539 
5540 	/*
5541 	 * Allocate the control data structures, and create and load the
5542 	 * DMA map for it.
5543 	 *
5544 	 * NOTE: All Tx descriptors must be in the same 4G segment of
5545 	 * memory.  So must Rx descriptors.  We simplify by allocating
5546 	 * both sets within the same 4G segment.
5547 	 */
5548 	if (sc->sc_type < WM_T_82544)
5549 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
5550 	else
5551 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
5552 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
5553 		txq->txq_descsize = sizeof(nq_txdesc_t);
5554 	else
5555 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
5556 
5557 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
5558 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
5559 		    1, &txq->txq_desc_rseg, 0)) != 0) {
5560 		aprint_error_dev(sc->sc_dev,
5561 		    "unable to allocate TX control data, error = %d\n",
5562 		    error);
5563 		goto fail_0;
5564 	}
5565 
5566 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
5567 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
5568 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
5569 		aprint_error_dev(sc->sc_dev,
5570 		    "unable to map TX control data, error = %d\n", error);
5571 		goto fail_1;
5572 	}
5573 
5574 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
5575 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
5576 		aprint_error_dev(sc->sc_dev,
5577 		    "unable to create TX control data DMA map, error = %d\n",
5578 		    error);
5579 		goto fail_2;
5580 	}
5581 
5582 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
5583 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
5584 		aprint_error_dev(sc->sc_dev,
5585 		    "unable to load TX control data DMA map, error = %d\n",
5586 		    error);
5587 		goto fail_3;
5588 	}
5589 
5590 	return 0;
5591 
5592  fail_3:
5593 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
5594  fail_2:
5595 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
5596 	    WM_TXDESCS_SIZE(txq));
5597  fail_1:
5598 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
5599  fail_0:
5600 	return error;
5601 }
5602 
5603 static void
5604 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
5605 {
5606 
5607 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
5608 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
5609 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
5610 	    WM_TXDESCS_SIZE(txq));
5611 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
5612 }
5613 
5614 static int
5615 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
5616 {
5617 	int error;
5618 	size_t rxq_descs_size;
5619 
5620 	/*
5621 	 * Allocate the control data structures, and create and load the
5622 	 * DMA map for it.
5623 	 *
5624 	 * NOTE: All Tx descriptors must be in the same 4G segment of
5625 	 * memory.  So must Rx descriptors.  We simplify by allocating
5626 	 * both sets within the same 4G segment.
5627 	 */
5628 	rxq->rxq_ndesc = WM_NRXDESC;
5629 	if (sc->sc_type == WM_T_82574)
5630 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
5631 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
5632 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
5633 	else
5634 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
5635 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
5636 
5637 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
5638 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
5639 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
5640 		aprint_error_dev(sc->sc_dev,
5641 		    "unable to allocate RX control data, error = %d\n",
5642 		    error);
5643 		goto fail_0;
5644 	}
5645 
5646 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
5647 		    rxq->rxq_desc_rseg, rxq_descs_size,
5648 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
5649 		aprint_error_dev(sc->sc_dev,
5650 		    "unable to map RX control data, error = %d\n", error);
5651 		goto fail_1;
5652 	}
5653 
5654 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
5655 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
5656 		aprint_error_dev(sc->sc_dev,
5657 		    "unable to create RX control data DMA map, error = %d\n",
5658 		    error);
5659 		goto fail_2;
5660 	}
5661 
5662 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
5663 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
5664 		aprint_error_dev(sc->sc_dev,
5665 		    "unable to load RX control data DMA map, error = %d\n",
5666 		    error);
5667 		goto fail_3;
5668 	}
5669 
5670 	return 0;
5671 
5672  fail_3:
5673 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
5674  fail_2:
5675 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
5676 	    rxq_descs_size);
5677  fail_1:
5678 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
5679  fail_0:
5680 	return error;
5681 }
5682 
5683 static void
5684 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
5685 {
5686 
5687 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
5688 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
5689 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
5690 	    rxq->rxq_descsize * rxq->rxq_ndesc);
5691 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
5692 }
5693 
5694 
5695 static int
5696 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
5697 {
5698 	int i, error;
5699 
5700 	/* Create the transmit buffer DMA maps. */
5701 	WM_TXQUEUELEN(txq) =
5702 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
5703 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
5704 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
5705 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
5706 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
5707 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
5708 			aprint_error_dev(sc->sc_dev,
5709 			    "unable to create Tx DMA map %d, error = %d\n",
5710 			    i, error);
5711 			goto fail;
5712 		}
5713 	}
5714 
5715 	return 0;
5716 
5717  fail:
5718 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
5719 		if (txq->txq_soft[i].txs_dmamap != NULL)
5720 			bus_dmamap_destroy(sc->sc_dmat,
5721 			    txq->txq_soft[i].txs_dmamap);
5722 	}
5723 	return error;
5724 }
5725 
5726 static void
5727 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
5728 {
5729 	int i;
5730 
5731 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
5732 		if (txq->txq_soft[i].txs_dmamap != NULL)
5733 			bus_dmamap_destroy(sc->sc_dmat,
5734 			    txq->txq_soft[i].txs_dmamap);
5735 	}
5736 }
5737 
5738 static int
5739 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
5740 {
5741 	int i, error;
5742 
5743 	/* Create the receive buffer DMA maps. */
5744 	for (i = 0; i < rxq->rxq_ndesc; i++) {
5745 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
5746 			    MCLBYTES, 0, 0,
5747 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
5748 			aprint_error_dev(sc->sc_dev,
5749 			    "unable to create Rx DMA map %d error = %d\n",
5750 			    i, error);
5751 			goto fail;
5752 		}
5753 		rxq->rxq_soft[i].rxs_mbuf = NULL;
5754 	}
5755 
5756 	return 0;
5757 
5758  fail:
5759 	for (i = 0; i < rxq->rxq_ndesc; i++) {
5760 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
5761 			bus_dmamap_destroy(sc->sc_dmat,
5762 			    rxq->rxq_soft[i].rxs_dmamap);
5763 	}
5764 	return error;
5765 }
5766 
5767 static void
5768 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
5769 {
5770 	int i;
5771 
5772 	for (i = 0; i < rxq->rxq_ndesc; i++) {
5773 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
5774 			bus_dmamap_destroy(sc->sc_dmat,
5775 			    rxq->rxq_soft[i].rxs_dmamap);
5776 	}
5777 }
5778 
5779 /*
5780  * wm_alloc_quques:
5781  *	Allocate {tx,rx}descs and {tx,rx} buffers
5782  */
5783 static int
5784 wm_alloc_txrx_queues(struct wm_softc *sc)
5785 {
5786 	int i, error, tx_done, rx_done;
5787 
5788 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
5789 	    KM_SLEEP);
5790 	if (sc->sc_queue == NULL) {
5791 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
5792 		error = ENOMEM;
5793 		goto fail_0;
5794 	}
5795 
5796 	/*
5797 	 * For transmission
5798 	 */
5799 	error = 0;
5800 	tx_done = 0;
5801 	for (i = 0; i < sc->sc_nqueues; i++) {
5802 #ifdef WM_EVENT_COUNTERS
5803 		int j;
5804 		const char *xname;
5805 #endif
5806 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
5807 		txq->txq_sc = sc;
5808 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
5809 
5810 		error = wm_alloc_tx_descs(sc, txq);
5811 		if (error)
5812 			break;
5813 		error = wm_alloc_tx_buffer(sc, txq);
5814 		if (error) {
5815 			wm_free_tx_descs(sc, txq);
5816 			break;
5817 		}
5818 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
5819 		if (txq->txq_interq == NULL) {
5820 			wm_free_tx_descs(sc, txq);
5821 			wm_free_tx_buffer(sc, txq);
5822 			error = ENOMEM;
5823 			break;
5824 		}
5825 
5826 #ifdef WM_EVENT_COUNTERS
5827 		xname = device_xname(sc->sc_dev);
5828 
5829 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
5830 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
5831 		WM_Q_MISC_EVCNT_ATTACH(txq, txfifo_stall, txq, i, xname);
5832 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
5833 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
5834 
5835 		WM_Q_MISC_EVCNT_ATTACH(txq, txipsum, txq, i, xname);
5836 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum, txq, i, xname);
5837 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum6, txq, i, xname);
5838 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso, txq, i, xname);
5839 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso6, txq, i, xname);
5840 		WM_Q_MISC_EVCNT_ATTACH(txq, txtsopain, txq, i, xname);
5841 
5842 		for (j = 0; j < WM_NTXSEGS; j++) {
5843 			snprintf(txq->txq_txseg_evcnt_names[j],
5844 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
5845 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
5846 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
5847 		}
5848 
5849 		WM_Q_MISC_EVCNT_ATTACH(txq, txdrop, txq, i, xname);
5850 
5851 		WM_Q_MISC_EVCNT_ATTACH(txq, tu, txq, i, xname);
5852 #endif /* WM_EVENT_COUNTERS */
5853 
5854 		tx_done++;
5855 	}
5856 	if (error)
5857 		goto fail_1;
5858 
5859 	/*
5860 	 * For recieve
5861 	 */
5862 	error = 0;
5863 	rx_done = 0;
5864 	for (i = 0; i < sc->sc_nqueues; i++) {
5865 #ifdef WM_EVENT_COUNTERS
5866 		const char *xname;
5867 #endif
5868 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
5869 		rxq->rxq_sc = sc;
5870 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
5871 
5872 		error = wm_alloc_rx_descs(sc, rxq);
5873 		if (error)
5874 			break;
5875 
5876 		error = wm_alloc_rx_buffer(sc, rxq);
5877 		if (error) {
5878 			wm_free_rx_descs(sc, rxq);
5879 			break;
5880 		}
5881 
5882 #ifdef WM_EVENT_COUNTERS
5883 		xname = device_xname(sc->sc_dev);
5884 
5885 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxintr, rxq, i, xname);
5886 
5887 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxipsum, rxq, i, xname);
5888 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxtusum, rxq, i, xname);
5889 #endif /* WM_EVENT_COUNTERS */
5890 
5891 		rx_done++;
5892 	}
5893 	if (error)
5894 		goto fail_2;
5895 
5896 	return 0;
5897 
5898  fail_2:
5899 	for (i = 0; i < rx_done; i++) {
5900 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
5901 		wm_free_rx_buffer(sc, rxq);
5902 		wm_free_rx_descs(sc, rxq);
5903 		if (rxq->rxq_lock)
5904 			mutex_obj_free(rxq->rxq_lock);
5905 	}
5906  fail_1:
5907 	for (i = 0; i < tx_done; i++) {
5908 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
5909 		pcq_destroy(txq->txq_interq);
5910 		wm_free_tx_buffer(sc, txq);
5911 		wm_free_tx_descs(sc, txq);
5912 		if (txq->txq_lock)
5913 			mutex_obj_free(txq->txq_lock);
5914 	}
5915 
5916 	kmem_free(sc->sc_queue,
5917 	    sizeof(struct wm_queue) * sc->sc_nqueues);
5918  fail_0:
5919 	return error;
5920 }
5921 
5922 /*
5923  * wm_free_quques:
5924  *	Free {tx,rx}descs and {tx,rx} buffers
5925  */
5926 static void
5927 wm_free_txrx_queues(struct wm_softc *sc)
5928 {
5929 	int i;
5930 
5931 	for (i = 0; i < sc->sc_nqueues; i++) {
5932 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
5933 
5934 #ifdef WM_EVENT_COUNTERS
5935 		WM_Q_EVCNT_DETACH(rxq, rxintr, rxq, i);
5936 		WM_Q_EVCNT_DETACH(rxq, rxipsum, rxq, i);
5937 		WM_Q_EVCNT_DETACH(rxq, rxtusum, rxq, i);
5938 #endif /* WM_EVENT_COUNTERS */
5939 
5940 		wm_free_rx_buffer(sc, rxq);
5941 		wm_free_rx_descs(sc, rxq);
5942 		if (rxq->rxq_lock)
5943 			mutex_obj_free(rxq->rxq_lock);
5944 	}
5945 
5946 	for (i = 0; i < sc->sc_nqueues; i++) {
5947 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
5948 		struct mbuf *m;
5949 #ifdef WM_EVENT_COUNTERS
5950 		int j;
5951 
5952 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
5953 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
5954 		WM_Q_EVCNT_DETACH(txq, txfifo_stall, txq, i);
5955 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
5956 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
5957 		WM_Q_EVCNT_DETACH(txq, txipsum, txq, i);
5958 		WM_Q_EVCNT_DETACH(txq, txtusum, txq, i);
5959 		WM_Q_EVCNT_DETACH(txq, txtusum6, txq, i);
5960 		WM_Q_EVCNT_DETACH(txq, txtso, txq, i);
5961 		WM_Q_EVCNT_DETACH(txq, txtso6, txq, i);
5962 		WM_Q_EVCNT_DETACH(txq, txtsopain, txq, i);
5963 
5964 		for (j = 0; j < WM_NTXSEGS; j++)
5965 			evcnt_detach(&txq->txq_ev_txseg[j]);
5966 
5967 		WM_Q_EVCNT_DETACH(txq, txdrop, txq, i);
5968 		WM_Q_EVCNT_DETACH(txq, tu, txq, i);
5969 #endif /* WM_EVENT_COUNTERS */
5970 
5971 		/* drain txq_interq */
5972 		while ((m = pcq_get(txq->txq_interq)) != NULL)
5973 			m_freem(m);
5974 		pcq_destroy(txq->txq_interq);
5975 
5976 		wm_free_tx_buffer(sc, txq);
5977 		wm_free_tx_descs(sc, txq);
5978 		if (txq->txq_lock)
5979 			mutex_obj_free(txq->txq_lock);
5980 	}
5981 
5982 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
5983 }
5984 
5985 static void
5986 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
5987 {
5988 
5989 	KASSERT(mutex_owned(txq->txq_lock));
5990 
5991 	/* Initialize the transmit descriptor ring. */
5992 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
5993 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
5994 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
5995 	txq->txq_free = WM_NTXDESC(txq);
5996 	txq->txq_next = 0;
5997 }
5998 
5999 static void
6000 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
6001     struct wm_txqueue *txq)
6002 {
6003 
6004 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
6005 		device_xname(sc->sc_dev), __func__));
6006 	KASSERT(mutex_owned(txq->txq_lock));
6007 
6008 	if (sc->sc_type < WM_T_82543) {
6009 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
6010 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
6011 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
6012 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
6013 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
6014 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
6015 	} else {
6016 		int qid = wmq->wmq_id;
6017 
6018 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
6019 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
6020 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
6021 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
6022 
6023 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
6024 			/*
6025 			 * Don't write TDT before TCTL.EN is set.
6026 			 * See the document.
6027 			 */
6028 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
6029 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
6030 			    | TXDCTL_WTHRESH(0));
6031 		else {
6032 			/* ITR / 4 */
6033 			CSR_WRITE(sc, WMREG_TIDV, sc->sc_itr / 4);
6034 			if (sc->sc_type >= WM_T_82540) {
6035 				/* should be same */
6036 				CSR_WRITE(sc, WMREG_TADV, sc->sc_itr / 4);
6037 			}
6038 
6039 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
6040 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
6041 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
6042 		}
6043 	}
6044 }
6045 
6046 static void
6047 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
6048 {
6049 	int i;
6050 
6051 	KASSERT(mutex_owned(txq->txq_lock));
6052 
6053 	/* Initialize the transmit job descriptors. */
6054 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
6055 		txq->txq_soft[i].txs_mbuf = NULL;
6056 	txq->txq_sfree = WM_TXQUEUELEN(txq);
6057 	txq->txq_snext = 0;
6058 	txq->txq_sdirty = 0;
6059 }
6060 
6061 static void
6062 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
6063     struct wm_txqueue *txq)
6064 {
6065 
6066 	KASSERT(mutex_owned(txq->txq_lock));
6067 
6068 	/*
6069 	 * Set up some register offsets that are different between
6070 	 * the i82542 and the i82543 and later chips.
6071 	 */
6072 	if (sc->sc_type < WM_T_82543)
6073 		txq->txq_tdt_reg = WMREG_OLD_TDT;
6074 	else
6075 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
6076 
6077 	wm_init_tx_descs(sc, txq);
6078 	wm_init_tx_regs(sc, wmq, txq);
6079 	wm_init_tx_buffer(sc, txq);
6080 }
6081 
6082 static void
6083 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
6084     struct wm_rxqueue *rxq)
6085 {
6086 
6087 	KASSERT(mutex_owned(rxq->rxq_lock));
6088 
6089 	/*
6090 	 * Initialize the receive descriptor and receive job
6091 	 * descriptor rings.
6092 	 */
6093 	if (sc->sc_type < WM_T_82543) {
6094 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
6095 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
6096 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
6097 		    rxq->rxq_descsize * rxq->rxq_ndesc);
6098 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
6099 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
6100 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
6101 
6102 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
6103 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
6104 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
6105 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
6106 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
6107 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
6108 	} else {
6109 		int qid = wmq->wmq_id;
6110 
6111 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
6112 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
6113 		CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_descsize * rxq->rxq_ndesc);
6114 
6115 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
6116 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
6117 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
6118 
6119 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
6120 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
6121 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
6122 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
6123 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
6124 			    | RXDCTL_WTHRESH(1));
6125 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
6126 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
6127 		} else {
6128 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
6129 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
6130 			/* ITR / 4 */
6131 			CSR_WRITE(sc, WMREG_RDTR, (sc->sc_itr / 4) | RDTR_FPD);
6132 			/* MUST be same */
6133 			CSR_WRITE(sc, WMREG_RADV, sc->sc_itr / 4);
6134 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
6135 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
6136 		}
6137 	}
6138 }
6139 
6140 static int
6141 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
6142 {
6143 	struct wm_rxsoft *rxs;
6144 	int error, i;
6145 
6146 	KASSERT(mutex_owned(rxq->rxq_lock));
6147 
6148 	for (i = 0; i < rxq->rxq_ndesc; i++) {
6149 		rxs = &rxq->rxq_soft[i];
6150 		if (rxs->rxs_mbuf == NULL) {
6151 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
6152 				log(LOG_ERR, "%s: unable to allocate or map "
6153 				    "rx buffer %d, error = %d\n",
6154 				    device_xname(sc->sc_dev), i, error);
6155 				/*
6156 				 * XXX Should attempt to run with fewer receive
6157 				 * XXX buffers instead of just failing.
6158 				 */
6159 				wm_rxdrain(rxq);
6160 				return ENOMEM;
6161 			}
6162 		} else {
6163 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
6164 				wm_init_rxdesc(rxq, i);
6165 			/*
6166 			 * For 82575 and newer device, the RX descriptors
6167 			 * must be initialized after the setting of RCTL.EN in
6168 			 * wm_set_filter()
6169 			 */
6170 		}
6171 	}
6172 	rxq->rxq_ptr = 0;
6173 	rxq->rxq_discard = 0;
6174 	WM_RXCHAIN_RESET(rxq);
6175 
6176 	return 0;
6177 }
6178 
6179 static int
6180 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
6181     struct wm_rxqueue *rxq)
6182 {
6183 
6184 	KASSERT(mutex_owned(rxq->rxq_lock));
6185 
6186 	/*
6187 	 * Set up some register offsets that are different between
6188 	 * the i82542 and the i82543 and later chips.
6189 	 */
6190 	if (sc->sc_type < WM_T_82543)
6191 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
6192 	else
6193 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
6194 
6195 	wm_init_rx_regs(sc, wmq, rxq);
6196 	return wm_init_rx_buffer(sc, rxq);
6197 }
6198 
6199 /*
6200  * wm_init_quques:
6201  *	Initialize {tx,rx}descs and {tx,rx} buffers
6202  */
6203 static int
6204 wm_init_txrx_queues(struct wm_softc *sc)
6205 {
6206 	int i, error = 0;
6207 
6208 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
6209 		device_xname(sc->sc_dev), __func__));
6210 
6211 	for (i = 0; i < sc->sc_nqueues; i++) {
6212 		struct wm_queue *wmq = &sc->sc_queue[i];
6213 		struct wm_txqueue *txq = &wmq->wmq_txq;
6214 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
6215 
6216 		mutex_enter(txq->txq_lock);
6217 		wm_init_tx_queue(sc, wmq, txq);
6218 		mutex_exit(txq->txq_lock);
6219 
6220 		mutex_enter(rxq->rxq_lock);
6221 		error = wm_init_rx_queue(sc, wmq, rxq);
6222 		mutex_exit(rxq->rxq_lock);
6223 		if (error)
6224 			break;
6225 	}
6226 
6227 	return error;
6228 }
6229 
6230 /*
6231  * wm_tx_offload:
6232  *
6233  *	Set up TCP/IP checksumming parameters for the
6234  *	specified packet.
6235  */
6236 static int
6237 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
6238     uint8_t *fieldsp)
6239 {
6240 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
6241 	struct mbuf *m0 = txs->txs_mbuf;
6242 	struct livengood_tcpip_ctxdesc *t;
6243 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
6244 	uint32_t ipcse;
6245 	struct ether_header *eh;
6246 	int offset, iphl;
6247 	uint8_t fields;
6248 
6249 	/*
6250 	 * XXX It would be nice if the mbuf pkthdr had offset
6251 	 * fields for the protocol headers.
6252 	 */
6253 
6254 	eh = mtod(m0, struct ether_header *);
6255 	switch (htons(eh->ether_type)) {
6256 	case ETHERTYPE_IP:
6257 	case ETHERTYPE_IPV6:
6258 		offset = ETHER_HDR_LEN;
6259 		break;
6260 
6261 	case ETHERTYPE_VLAN:
6262 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
6263 		break;
6264 
6265 	default:
6266 		/*
6267 		 * Don't support this protocol or encapsulation.
6268 		 */
6269 		*fieldsp = 0;
6270 		*cmdp = 0;
6271 		return 0;
6272 	}
6273 
6274 	if ((m0->m_pkthdr.csum_flags &
6275 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4)) != 0) {
6276 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
6277 	} else {
6278 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
6279 	}
6280 	ipcse = offset + iphl - 1;
6281 
6282 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
6283 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
6284 	seg = 0;
6285 	fields = 0;
6286 
6287 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
6288 		int hlen = offset + iphl;
6289 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
6290 
6291 		if (__predict_false(m0->m_len <
6292 				    (hlen + sizeof(struct tcphdr)))) {
6293 			/*
6294 			 * TCP/IP headers are not in the first mbuf; we need
6295 			 * to do this the slow and painful way.  Let's just
6296 			 * hope this doesn't happen very often.
6297 			 */
6298 			struct tcphdr th;
6299 
6300 			WM_Q_EVCNT_INCR(txq, txtsopain);
6301 
6302 			m_copydata(m0, hlen, sizeof(th), &th);
6303 			if (v4) {
6304 				struct ip ip;
6305 
6306 				m_copydata(m0, offset, sizeof(ip), &ip);
6307 				ip.ip_len = 0;
6308 				m_copyback(m0,
6309 				    offset + offsetof(struct ip, ip_len),
6310 				    sizeof(ip.ip_len), &ip.ip_len);
6311 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
6312 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
6313 			} else {
6314 				struct ip6_hdr ip6;
6315 
6316 				m_copydata(m0, offset, sizeof(ip6), &ip6);
6317 				ip6.ip6_plen = 0;
6318 				m_copyback(m0,
6319 				    offset + offsetof(struct ip6_hdr, ip6_plen),
6320 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
6321 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
6322 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
6323 			}
6324 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
6325 			    sizeof(th.th_sum), &th.th_sum);
6326 
6327 			hlen += th.th_off << 2;
6328 		} else {
6329 			/*
6330 			 * TCP/IP headers are in the first mbuf; we can do
6331 			 * this the easy way.
6332 			 */
6333 			struct tcphdr *th;
6334 
6335 			if (v4) {
6336 				struct ip *ip =
6337 				    (void *)(mtod(m0, char *) + offset);
6338 				th = (void *)(mtod(m0, char *) + hlen);
6339 
6340 				ip->ip_len = 0;
6341 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
6342 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
6343 			} else {
6344 				struct ip6_hdr *ip6 =
6345 				    (void *)(mtod(m0, char *) + offset);
6346 				th = (void *)(mtod(m0, char *) + hlen);
6347 
6348 				ip6->ip6_plen = 0;
6349 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
6350 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
6351 			}
6352 			hlen += th->th_off << 2;
6353 		}
6354 
6355 		if (v4) {
6356 			WM_Q_EVCNT_INCR(txq, txtso);
6357 			cmdlen |= WTX_TCPIP_CMD_IP;
6358 		} else {
6359 			WM_Q_EVCNT_INCR(txq, txtso6);
6360 			ipcse = 0;
6361 		}
6362 		cmd |= WTX_TCPIP_CMD_TSE;
6363 		cmdlen |= WTX_TCPIP_CMD_TSE |
6364 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
6365 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
6366 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
6367 	}
6368 
6369 	/*
6370 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
6371 	 * offload feature, if we load the context descriptor, we
6372 	 * MUST provide valid values for IPCSS and TUCSS fields.
6373 	 */
6374 
6375 	ipcs = WTX_TCPIP_IPCSS(offset) |
6376 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
6377 	    WTX_TCPIP_IPCSE(ipcse);
6378 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
6379 		WM_Q_EVCNT_INCR(txq, txipsum);
6380 		fields |= WTX_IXSM;
6381 	}
6382 
6383 	offset += iphl;
6384 
6385 	if (m0->m_pkthdr.csum_flags &
6386 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
6387 		WM_Q_EVCNT_INCR(txq, txtusum);
6388 		fields |= WTX_TXSM;
6389 		tucs = WTX_TCPIP_TUCSS(offset) |
6390 		    WTX_TCPIP_TUCSO(offset +
6391 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
6392 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
6393 	} else if ((m0->m_pkthdr.csum_flags &
6394 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
6395 		WM_Q_EVCNT_INCR(txq, txtusum6);
6396 		fields |= WTX_TXSM;
6397 		tucs = WTX_TCPIP_TUCSS(offset) |
6398 		    WTX_TCPIP_TUCSO(offset +
6399 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
6400 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
6401 	} else {
6402 		/* Just initialize it to a valid TCP context. */
6403 		tucs = WTX_TCPIP_TUCSS(offset) |
6404 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
6405 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
6406 	}
6407 
6408 	/* Fill in the context descriptor. */
6409 	t = (struct livengood_tcpip_ctxdesc *)
6410 	    &txq->txq_descs[txq->txq_next];
6411 	t->tcpip_ipcs = htole32(ipcs);
6412 	t->tcpip_tucs = htole32(tucs);
6413 	t->tcpip_cmdlen = htole32(cmdlen);
6414 	t->tcpip_seg = htole32(seg);
6415 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
6416 
6417 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
6418 	txs->txs_ndesc++;
6419 
6420 	*cmdp = cmd;
6421 	*fieldsp = fields;
6422 
6423 	return 0;
6424 }
6425 
6426 static inline int
6427 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
6428 {
6429 	struct wm_softc *sc = ifp->if_softc;
6430 	u_int cpuid = cpu_index(curcpu());
6431 
6432 	/*
6433 	 * Currently, simple distribute strategy.
6434 	 * TODO:
6435 	 * distribute by flowid(RSS has value).
6436 	 */
6437 	return (cpuid + sc->sc_affinity_offset) % sc->sc_nqueues;
6438 }
6439 
6440 /*
6441  * wm_start:		[ifnet interface function]
6442  *
6443  *	Start packet transmission on the interface.
6444  */
6445 static void
6446 wm_start(struct ifnet *ifp)
6447 {
6448 	struct wm_softc *sc = ifp->if_softc;
6449 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
6450 
6451 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
6452 
6453 	/*
6454 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
6455 	 */
6456 
6457 	mutex_enter(txq->txq_lock);
6458 	if (!txq->txq_stopping)
6459 		wm_start_locked(ifp);
6460 	mutex_exit(txq->txq_lock);
6461 }
6462 
6463 static void
6464 wm_start_locked(struct ifnet *ifp)
6465 {
6466 	struct wm_softc *sc = ifp->if_softc;
6467 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
6468 
6469 	wm_send_common_locked(ifp, txq, false);
6470 }
6471 
6472 static int
6473 wm_transmit(struct ifnet *ifp, struct mbuf *m)
6474 {
6475 	int qid;
6476 	struct wm_softc *sc = ifp->if_softc;
6477 	struct wm_txqueue *txq;
6478 
6479 	qid = wm_select_txqueue(ifp, m);
6480 	txq = &sc->sc_queue[qid].wmq_txq;
6481 
6482 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
6483 		m_freem(m);
6484 		WM_Q_EVCNT_INCR(txq, txdrop);
6485 		return ENOBUFS;
6486 	}
6487 
6488 	/*
6489 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
6490 	 */
6491 	ifp->if_obytes += m->m_pkthdr.len;
6492 	if (m->m_flags & M_MCAST)
6493 		ifp->if_omcasts++;
6494 
6495 	if (mutex_tryenter(txq->txq_lock)) {
6496 		if (!txq->txq_stopping)
6497 			wm_transmit_locked(ifp, txq);
6498 		mutex_exit(txq->txq_lock);
6499 	}
6500 
6501 	return 0;
6502 }
6503 
6504 static void
6505 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
6506 {
6507 
6508 	wm_send_common_locked(ifp, txq, true);
6509 }
6510 
6511 static void
6512 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
6513     bool is_transmit)
6514 {
6515 	struct wm_softc *sc = ifp->if_softc;
6516 	struct mbuf *m0;
6517 	struct m_tag *mtag;
6518 	struct wm_txsoft *txs;
6519 	bus_dmamap_t dmamap;
6520 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
6521 	bus_addr_t curaddr;
6522 	bus_size_t seglen, curlen;
6523 	uint32_t cksumcmd;
6524 	uint8_t cksumfields;
6525 
6526 	KASSERT(mutex_owned(txq->txq_lock));
6527 
6528 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
6529 		return;
6530 
6531 	/* Remember the previous number of free descriptors. */
6532 	ofree = txq->txq_free;
6533 
6534 	/*
6535 	 * Loop through the send queue, setting up transmit descriptors
6536 	 * until we drain the queue, or use up all available transmit
6537 	 * descriptors.
6538 	 */
6539 	for (;;) {
6540 		m0 = NULL;
6541 
6542 		/* Get a work queue entry. */
6543 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
6544 			wm_txeof(sc, txq);
6545 			if (txq->txq_sfree == 0) {
6546 				DPRINTF(WM_DEBUG_TX,
6547 				    ("%s: TX: no free job descriptors\n",
6548 					device_xname(sc->sc_dev)));
6549 				WM_Q_EVCNT_INCR(txq, txsstall);
6550 				break;
6551 			}
6552 		}
6553 
6554 		/* Grab a packet off the queue. */
6555 		if (is_transmit)
6556 			m0 = pcq_get(txq->txq_interq);
6557 		else
6558 			IFQ_DEQUEUE(&ifp->if_snd, m0);
6559 		if (m0 == NULL)
6560 			break;
6561 
6562 		DPRINTF(WM_DEBUG_TX,
6563 		    ("%s: TX: have packet to transmit: %p\n",
6564 		    device_xname(sc->sc_dev), m0));
6565 
6566 		txs = &txq->txq_soft[txq->txq_snext];
6567 		dmamap = txs->txs_dmamap;
6568 
6569 		use_tso = (m0->m_pkthdr.csum_flags &
6570 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
6571 
6572 		/*
6573 		 * So says the Linux driver:
6574 		 * The controller does a simple calculation to make sure
6575 		 * there is enough room in the FIFO before initiating the
6576 		 * DMA for each buffer.  The calc is:
6577 		 *	4 = ceil(buffer len / MSS)
6578 		 * To make sure we don't overrun the FIFO, adjust the max
6579 		 * buffer len if the MSS drops.
6580 		 */
6581 		dmamap->dm_maxsegsz =
6582 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
6583 		    ? m0->m_pkthdr.segsz << 2
6584 		    : WTX_MAX_LEN;
6585 
6586 		/*
6587 		 * Load the DMA map.  If this fails, the packet either
6588 		 * didn't fit in the allotted number of segments, or we
6589 		 * were short on resources.  For the too-many-segments
6590 		 * case, we simply report an error and drop the packet,
6591 		 * since we can't sanely copy a jumbo packet to a single
6592 		 * buffer.
6593 		 */
6594 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
6595 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
6596 		if (error) {
6597 			if (error == EFBIG) {
6598 				WM_Q_EVCNT_INCR(txq, txdrop);
6599 				log(LOG_ERR, "%s: Tx packet consumes too many "
6600 				    "DMA segments, dropping...\n",
6601 				    device_xname(sc->sc_dev));
6602 				wm_dump_mbuf_chain(sc, m0);
6603 				m_freem(m0);
6604 				continue;
6605 			}
6606 			/*  Short on resources, just stop for now. */
6607 			DPRINTF(WM_DEBUG_TX,
6608 			    ("%s: TX: dmamap load failed: %d\n",
6609 			    device_xname(sc->sc_dev), error));
6610 			break;
6611 		}
6612 
6613 		segs_needed = dmamap->dm_nsegs;
6614 		if (use_tso) {
6615 			/* For sentinel descriptor; see below. */
6616 			segs_needed++;
6617 		}
6618 
6619 		/*
6620 		 * Ensure we have enough descriptors free to describe
6621 		 * the packet.  Note, we always reserve one descriptor
6622 		 * at the end of the ring due to the semantics of the
6623 		 * TDT register, plus one more in the event we need
6624 		 * to load offload context.
6625 		 */
6626 		if (segs_needed > txq->txq_free - 2) {
6627 			/*
6628 			 * Not enough free descriptors to transmit this
6629 			 * packet.  We haven't committed anything yet,
6630 			 * so just unload the DMA map, put the packet
6631 			 * pack on the queue, and punt.  Notify the upper
6632 			 * layer that there are no more slots left.
6633 			 */
6634 			DPRINTF(WM_DEBUG_TX,
6635 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
6636 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
6637 			    segs_needed, txq->txq_free - 1));
6638 			ifp->if_flags |= IFF_OACTIVE;
6639 			bus_dmamap_unload(sc->sc_dmat, dmamap);
6640 			WM_Q_EVCNT_INCR(txq, txdstall);
6641 			break;
6642 		}
6643 
6644 		/*
6645 		 * Check for 82547 Tx FIFO bug.  We need to do this
6646 		 * once we know we can transmit the packet, since we
6647 		 * do some internal FIFO space accounting here.
6648 		 */
6649 		if (sc->sc_type == WM_T_82547 &&
6650 		    wm_82547_txfifo_bugchk(sc, m0)) {
6651 			DPRINTF(WM_DEBUG_TX,
6652 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
6653 			    device_xname(sc->sc_dev)));
6654 			ifp->if_flags |= IFF_OACTIVE;
6655 			bus_dmamap_unload(sc->sc_dmat, dmamap);
6656 			WM_Q_EVCNT_INCR(txq, txfifo_stall);
6657 			break;
6658 		}
6659 
6660 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
6661 
6662 		DPRINTF(WM_DEBUG_TX,
6663 		    ("%s: TX: packet has %d (%d) DMA segments\n",
6664 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
6665 
6666 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
6667 
6668 		/*
6669 		 * Store a pointer to the packet so that we can free it
6670 		 * later.
6671 		 *
6672 		 * Initially, we consider the number of descriptors the
6673 		 * packet uses the number of DMA segments.  This may be
6674 		 * incremented by 1 if we do checksum offload (a descriptor
6675 		 * is used to set the checksum context).
6676 		 */
6677 		txs->txs_mbuf = m0;
6678 		txs->txs_firstdesc = txq->txq_next;
6679 		txs->txs_ndesc = segs_needed;
6680 
6681 		/* Set up offload parameters for this packet. */
6682 		if (m0->m_pkthdr.csum_flags &
6683 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
6684 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
6685 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
6686 			if (wm_tx_offload(sc, txs, &cksumcmd,
6687 					  &cksumfields) != 0) {
6688 				/* Error message already displayed. */
6689 				bus_dmamap_unload(sc->sc_dmat, dmamap);
6690 				continue;
6691 			}
6692 		} else {
6693 			cksumcmd = 0;
6694 			cksumfields = 0;
6695 		}
6696 
6697 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
6698 
6699 		/* Sync the DMA map. */
6700 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
6701 		    BUS_DMASYNC_PREWRITE);
6702 
6703 		/* Initialize the transmit descriptor. */
6704 		for (nexttx = txq->txq_next, seg = 0;
6705 		     seg < dmamap->dm_nsegs; seg++) {
6706 			for (seglen = dmamap->dm_segs[seg].ds_len,
6707 			     curaddr = dmamap->dm_segs[seg].ds_addr;
6708 			     seglen != 0;
6709 			     curaddr += curlen, seglen -= curlen,
6710 			     nexttx = WM_NEXTTX(txq, nexttx)) {
6711 				curlen = seglen;
6712 
6713 				/*
6714 				 * So says the Linux driver:
6715 				 * Work around for premature descriptor
6716 				 * write-backs in TSO mode.  Append a
6717 				 * 4-byte sentinel descriptor.
6718 				 */
6719 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
6720 				    curlen > 8)
6721 					curlen -= 4;
6722 
6723 				wm_set_dma_addr(
6724 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
6725 				txq->txq_descs[nexttx].wtx_cmdlen
6726 				    = htole32(cksumcmd | curlen);
6727 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
6728 				    = 0;
6729 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
6730 				    = cksumfields;
6731 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
6732 				lasttx = nexttx;
6733 
6734 				DPRINTF(WM_DEBUG_TX,
6735 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
6736 				     "len %#04zx\n",
6737 				    device_xname(sc->sc_dev), nexttx,
6738 				    (uint64_t)curaddr, curlen));
6739 			}
6740 		}
6741 
6742 		KASSERT(lasttx != -1);
6743 
6744 		/*
6745 		 * Set up the command byte on the last descriptor of
6746 		 * the packet.  If we're in the interrupt delay window,
6747 		 * delay the interrupt.
6748 		 */
6749 		txq->txq_descs[lasttx].wtx_cmdlen |=
6750 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
6751 
6752 		/*
6753 		 * If VLANs are enabled and the packet has a VLAN tag, set
6754 		 * up the descriptor to encapsulate the packet for us.
6755 		 *
6756 		 * This is only valid on the last descriptor of the packet.
6757 		 */
6758 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
6759 			txq->txq_descs[lasttx].wtx_cmdlen |=
6760 			    htole32(WTX_CMD_VLE);
6761 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
6762 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
6763 		}
6764 
6765 		txs->txs_lastdesc = lasttx;
6766 
6767 		DPRINTF(WM_DEBUG_TX,
6768 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
6769 		    device_xname(sc->sc_dev),
6770 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
6771 
6772 		/* Sync the descriptors we're using. */
6773 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
6774 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
6775 
6776 		/* Give the packet to the chip. */
6777 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
6778 
6779 		DPRINTF(WM_DEBUG_TX,
6780 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
6781 
6782 		DPRINTF(WM_DEBUG_TX,
6783 		    ("%s: TX: finished transmitting packet, job %d\n",
6784 		    device_xname(sc->sc_dev), txq->txq_snext));
6785 
6786 		/* Advance the tx pointer. */
6787 		txq->txq_free -= txs->txs_ndesc;
6788 		txq->txq_next = nexttx;
6789 
6790 		txq->txq_sfree--;
6791 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
6792 
6793 		/* Pass the packet to any BPF listeners. */
6794 		bpf_mtap(ifp, m0);
6795 	}
6796 
6797 	if (m0 != NULL) {
6798 		ifp->if_flags |= IFF_OACTIVE;
6799 		WM_Q_EVCNT_INCR(txq, txdrop);
6800 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
6801 			__func__));
6802 		m_freem(m0);
6803 	}
6804 
6805 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
6806 		/* No more slots; notify upper layer. */
6807 		ifp->if_flags |= IFF_OACTIVE;
6808 	}
6809 
6810 	if (txq->txq_free != ofree) {
6811 		/* Set a watchdog timer in case the chip flakes out. */
6812 		ifp->if_timer = 5;
6813 	}
6814 }
6815 
6816 /*
6817  * wm_nq_tx_offload:
6818  *
6819  *	Set up TCP/IP checksumming parameters for the
6820  *	specified packet, for NEWQUEUE devices
6821  */
6822 static int
6823 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
6824     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
6825 {
6826 	struct mbuf *m0 = txs->txs_mbuf;
6827 	struct m_tag *mtag;
6828 	uint32_t vl_len, mssidx, cmdc;
6829 	struct ether_header *eh;
6830 	int offset, iphl;
6831 
6832 	/*
6833 	 * XXX It would be nice if the mbuf pkthdr had offset
6834 	 * fields for the protocol headers.
6835 	 */
6836 	*cmdlenp = 0;
6837 	*fieldsp = 0;
6838 
6839 	eh = mtod(m0, struct ether_header *);
6840 	switch (htons(eh->ether_type)) {
6841 	case ETHERTYPE_IP:
6842 	case ETHERTYPE_IPV6:
6843 		offset = ETHER_HDR_LEN;
6844 		break;
6845 
6846 	case ETHERTYPE_VLAN:
6847 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
6848 		break;
6849 
6850 	default:
6851 		/* Don't support this protocol or encapsulation. */
6852 		*do_csum = false;
6853 		return 0;
6854 	}
6855 	*do_csum = true;
6856 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
6857 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
6858 
6859 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
6860 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
6861 
6862 	if ((m0->m_pkthdr.csum_flags &
6863 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
6864 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
6865 	} else {
6866 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
6867 	}
6868 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
6869 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
6870 
6871 	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
6872 		vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
6873 		     << NQTXC_VLLEN_VLAN_SHIFT);
6874 		*cmdlenp |= NQTX_CMD_VLE;
6875 	}
6876 
6877 	mssidx = 0;
6878 
6879 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
6880 		int hlen = offset + iphl;
6881 		int tcp_hlen;
6882 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
6883 
6884 		if (__predict_false(m0->m_len <
6885 				    (hlen + sizeof(struct tcphdr)))) {
6886 			/*
6887 			 * TCP/IP headers are not in the first mbuf; we need
6888 			 * to do this the slow and painful way.  Let's just
6889 			 * hope this doesn't happen very often.
6890 			 */
6891 			struct tcphdr th;
6892 
6893 			WM_Q_EVCNT_INCR(txq, txtsopain);
6894 
6895 			m_copydata(m0, hlen, sizeof(th), &th);
6896 			if (v4) {
6897 				struct ip ip;
6898 
6899 				m_copydata(m0, offset, sizeof(ip), &ip);
6900 				ip.ip_len = 0;
6901 				m_copyback(m0,
6902 				    offset + offsetof(struct ip, ip_len),
6903 				    sizeof(ip.ip_len), &ip.ip_len);
6904 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
6905 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
6906 			} else {
6907 				struct ip6_hdr ip6;
6908 
6909 				m_copydata(m0, offset, sizeof(ip6), &ip6);
6910 				ip6.ip6_plen = 0;
6911 				m_copyback(m0,
6912 				    offset + offsetof(struct ip6_hdr, ip6_plen),
6913 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
6914 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
6915 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
6916 			}
6917 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
6918 			    sizeof(th.th_sum), &th.th_sum);
6919 
6920 			tcp_hlen = th.th_off << 2;
6921 		} else {
6922 			/*
6923 			 * TCP/IP headers are in the first mbuf; we can do
6924 			 * this the easy way.
6925 			 */
6926 			struct tcphdr *th;
6927 
6928 			if (v4) {
6929 				struct ip *ip =
6930 				    (void *)(mtod(m0, char *) + offset);
6931 				th = (void *)(mtod(m0, char *) + hlen);
6932 
6933 				ip->ip_len = 0;
6934 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
6935 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
6936 			} else {
6937 				struct ip6_hdr *ip6 =
6938 				    (void *)(mtod(m0, char *) + offset);
6939 				th = (void *)(mtod(m0, char *) + hlen);
6940 
6941 				ip6->ip6_plen = 0;
6942 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
6943 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
6944 			}
6945 			tcp_hlen = th->th_off << 2;
6946 		}
6947 		hlen += tcp_hlen;
6948 		*cmdlenp |= NQTX_CMD_TSE;
6949 
6950 		if (v4) {
6951 			WM_Q_EVCNT_INCR(txq, txtso);
6952 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
6953 		} else {
6954 			WM_Q_EVCNT_INCR(txq, txtso6);
6955 			*fieldsp |= NQTXD_FIELDS_TUXSM;
6956 		}
6957 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
6958 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
6959 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
6960 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
6961 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
6962 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
6963 	} else {
6964 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
6965 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
6966 	}
6967 
6968 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
6969 		*fieldsp |= NQTXD_FIELDS_IXSM;
6970 		cmdc |= NQTXC_CMD_IP4;
6971 	}
6972 
6973 	if (m0->m_pkthdr.csum_flags &
6974 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
6975 		WM_Q_EVCNT_INCR(txq, txtusum);
6976 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
6977 			cmdc |= NQTXC_CMD_TCP;
6978 		} else {
6979 			cmdc |= NQTXC_CMD_UDP;
6980 		}
6981 		cmdc |= NQTXC_CMD_IP4;
6982 		*fieldsp |= NQTXD_FIELDS_TUXSM;
6983 	}
6984 	if (m0->m_pkthdr.csum_flags &
6985 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
6986 		WM_Q_EVCNT_INCR(txq, txtusum6);
6987 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
6988 			cmdc |= NQTXC_CMD_TCP;
6989 		} else {
6990 			cmdc |= NQTXC_CMD_UDP;
6991 		}
6992 		cmdc |= NQTXC_CMD_IP6;
6993 		*fieldsp |= NQTXD_FIELDS_TUXSM;
6994 	}
6995 
6996 	/* Fill in the context descriptor. */
6997 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
6998 	    htole32(vl_len);
6999 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
7000 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
7001 	    htole32(cmdc);
7002 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
7003 	    htole32(mssidx);
7004 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
7005 	DPRINTF(WM_DEBUG_TX,
7006 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
7007 	    txq->txq_next, 0, vl_len));
7008 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
7009 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
7010 	txs->txs_ndesc++;
7011 	return 0;
7012 }
7013 
7014 /*
7015  * wm_nq_start:		[ifnet interface function]
7016  *
7017  *	Start packet transmission on the interface for NEWQUEUE devices
7018  */
7019 static void
7020 wm_nq_start(struct ifnet *ifp)
7021 {
7022 	struct wm_softc *sc = ifp->if_softc;
7023 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
7024 
7025 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
7026 
7027 	/*
7028 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
7029 	 */
7030 
7031 	mutex_enter(txq->txq_lock);
7032 	if (!txq->txq_stopping)
7033 		wm_nq_start_locked(ifp);
7034 	mutex_exit(txq->txq_lock);
7035 }
7036 
7037 static void
7038 wm_nq_start_locked(struct ifnet *ifp)
7039 {
7040 	struct wm_softc *sc = ifp->if_softc;
7041 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
7042 
7043 	wm_nq_send_common_locked(ifp, txq, false);
7044 }
7045 
7046 static int
7047 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
7048 {
7049 	int qid;
7050 	struct wm_softc *sc = ifp->if_softc;
7051 	struct wm_txqueue *txq;
7052 
7053 	qid = wm_select_txqueue(ifp, m);
7054 	txq = &sc->sc_queue[qid].wmq_txq;
7055 
7056 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
7057 		m_freem(m);
7058 		WM_Q_EVCNT_INCR(txq, txdrop);
7059 		return ENOBUFS;
7060 	}
7061 
7062 	/*
7063 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
7064 	 */
7065 	ifp->if_obytes += m->m_pkthdr.len;
7066 	if (m->m_flags & M_MCAST)
7067 		ifp->if_omcasts++;
7068 
7069 	/*
7070 	 * The situations which this mutex_tryenter() fails at running time
7071 	 * are below two patterns.
7072 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
7073 	 *     (2) contention with deferred if_start softint(wm_deferred_start())
7074 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
7075 	 * dequeued by wm_deferred_start(). So, it does not get stuck.
7076 	 * In the case of (2), the last packet enqueued to txq->txq_interq is also
7077 	 * dequeued by wm_deferred_start(). So, it does not get stuck, either.
7078 	 */
7079 	if (mutex_tryenter(txq->txq_lock)) {
7080 		if (!txq->txq_stopping)
7081 			wm_nq_transmit_locked(ifp, txq);
7082 		mutex_exit(txq->txq_lock);
7083 	}
7084 
7085 	return 0;
7086 }
7087 
7088 static void
7089 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
7090 {
7091 
7092 	wm_nq_send_common_locked(ifp, txq, true);
7093 }
7094 
7095 static void
7096 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
7097     bool is_transmit)
7098 {
7099 	struct wm_softc *sc = ifp->if_softc;
7100 	struct mbuf *m0;
7101 	struct m_tag *mtag;
7102 	struct wm_txsoft *txs;
7103 	bus_dmamap_t dmamap;
7104 	int error, nexttx, lasttx = -1, seg, segs_needed;
7105 	bool do_csum, sent;
7106 
7107 	KASSERT(mutex_owned(txq->txq_lock));
7108 
7109 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
7110 		return;
7111 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
7112 		return;
7113 
7114 	sent = false;
7115 
7116 	/*
7117 	 * Loop through the send queue, setting up transmit descriptors
7118 	 * until we drain the queue, or use up all available transmit
7119 	 * descriptors.
7120 	 */
7121 	for (;;) {
7122 		m0 = NULL;
7123 
7124 		/* Get a work queue entry. */
7125 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
7126 			wm_txeof(sc, txq);
7127 			if (txq->txq_sfree == 0) {
7128 				DPRINTF(WM_DEBUG_TX,
7129 				    ("%s: TX: no free job descriptors\n",
7130 					device_xname(sc->sc_dev)));
7131 				WM_Q_EVCNT_INCR(txq, txsstall);
7132 				break;
7133 			}
7134 		}
7135 
7136 		/* Grab a packet off the queue. */
7137 		if (is_transmit)
7138 			m0 = pcq_get(txq->txq_interq);
7139 		else
7140 			IFQ_DEQUEUE(&ifp->if_snd, m0);
7141 		if (m0 == NULL)
7142 			break;
7143 
7144 		DPRINTF(WM_DEBUG_TX,
7145 		    ("%s: TX: have packet to transmit: %p\n",
7146 		    device_xname(sc->sc_dev), m0));
7147 
7148 		txs = &txq->txq_soft[txq->txq_snext];
7149 		dmamap = txs->txs_dmamap;
7150 
7151 		/*
7152 		 * Load the DMA map.  If this fails, the packet either
7153 		 * didn't fit in the allotted number of segments, or we
7154 		 * were short on resources.  For the too-many-segments
7155 		 * case, we simply report an error and drop the packet,
7156 		 * since we can't sanely copy a jumbo packet to a single
7157 		 * buffer.
7158 		 */
7159 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
7160 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
7161 		if (error) {
7162 			if (error == EFBIG) {
7163 				WM_Q_EVCNT_INCR(txq, txdrop);
7164 				log(LOG_ERR, "%s: Tx packet consumes too many "
7165 				    "DMA segments, dropping...\n",
7166 				    device_xname(sc->sc_dev));
7167 				wm_dump_mbuf_chain(sc, m0);
7168 				m_freem(m0);
7169 				continue;
7170 			}
7171 			/* Short on resources, just stop for now. */
7172 			DPRINTF(WM_DEBUG_TX,
7173 			    ("%s: TX: dmamap load failed: %d\n",
7174 			    device_xname(sc->sc_dev), error));
7175 			break;
7176 		}
7177 
7178 		segs_needed = dmamap->dm_nsegs;
7179 
7180 		/*
7181 		 * Ensure we have enough descriptors free to describe
7182 		 * the packet.  Note, we always reserve one descriptor
7183 		 * at the end of the ring due to the semantics of the
7184 		 * TDT register, plus one more in the event we need
7185 		 * to load offload context.
7186 		 */
7187 		if (segs_needed > txq->txq_free - 2) {
7188 			/*
7189 			 * Not enough free descriptors to transmit this
7190 			 * packet.  We haven't committed anything yet,
7191 			 * so just unload the DMA map, put the packet
7192 			 * pack on the queue, and punt.  Notify the upper
7193 			 * layer that there are no more slots left.
7194 			 */
7195 			DPRINTF(WM_DEBUG_TX,
7196 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
7197 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
7198 			    segs_needed, txq->txq_free - 1));
7199 			txq->txq_flags |= WM_TXQ_NO_SPACE;
7200 			bus_dmamap_unload(sc->sc_dmat, dmamap);
7201 			WM_Q_EVCNT_INCR(txq, txdstall);
7202 			break;
7203 		}
7204 
7205 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
7206 
7207 		DPRINTF(WM_DEBUG_TX,
7208 		    ("%s: TX: packet has %d (%d) DMA segments\n",
7209 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
7210 
7211 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
7212 
7213 		/*
7214 		 * Store a pointer to the packet so that we can free it
7215 		 * later.
7216 		 *
7217 		 * Initially, we consider the number of descriptors the
7218 		 * packet uses the number of DMA segments.  This may be
7219 		 * incremented by 1 if we do checksum offload (a descriptor
7220 		 * is used to set the checksum context).
7221 		 */
7222 		txs->txs_mbuf = m0;
7223 		txs->txs_firstdesc = txq->txq_next;
7224 		txs->txs_ndesc = segs_needed;
7225 
7226 		/* Set up offload parameters for this packet. */
7227 		uint32_t cmdlen, fields, dcmdlen;
7228 		if (m0->m_pkthdr.csum_flags &
7229 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
7230 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
7231 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
7232 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
7233 			    &do_csum) != 0) {
7234 				/* Error message already displayed. */
7235 				bus_dmamap_unload(sc->sc_dmat, dmamap);
7236 				continue;
7237 			}
7238 		} else {
7239 			do_csum = false;
7240 			cmdlen = 0;
7241 			fields = 0;
7242 		}
7243 
7244 		/* Sync the DMA map. */
7245 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
7246 		    BUS_DMASYNC_PREWRITE);
7247 
7248 		/* Initialize the first transmit descriptor. */
7249 		nexttx = txq->txq_next;
7250 		if (!do_csum) {
7251 			/* setup a legacy descriptor */
7252 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
7253 			    dmamap->dm_segs[0].ds_addr);
7254 			txq->txq_descs[nexttx].wtx_cmdlen =
7255 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
7256 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
7257 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
7258 			if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
7259 			    NULL) {
7260 				txq->txq_descs[nexttx].wtx_cmdlen |=
7261 				    htole32(WTX_CMD_VLE);
7262 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
7263 				    htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
7264 			} else {
7265 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
7266 			}
7267 			dcmdlen = 0;
7268 		} else {
7269 			/* setup an advanced data descriptor */
7270 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
7271 			    htole64(dmamap->dm_segs[0].ds_addr);
7272 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
7273 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
7274 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
7275 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
7276 			    htole32(fields);
7277 			DPRINTF(WM_DEBUG_TX,
7278 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
7279 			    device_xname(sc->sc_dev), nexttx,
7280 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
7281 			DPRINTF(WM_DEBUG_TX,
7282 			    ("\t 0x%08x%08x\n", fields,
7283 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
7284 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
7285 		}
7286 
7287 		lasttx = nexttx;
7288 		nexttx = WM_NEXTTX(txq, nexttx);
7289 		/*
7290 		 * fill in the next descriptors. legacy or adcanced format
7291 		 * is the same here
7292 		 */
7293 		for (seg = 1; seg < dmamap->dm_nsegs;
7294 		    seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
7295 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
7296 			    htole64(dmamap->dm_segs[seg].ds_addr);
7297 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
7298 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
7299 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
7300 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
7301 			lasttx = nexttx;
7302 
7303 			DPRINTF(WM_DEBUG_TX,
7304 			    ("%s: TX: desc %d: %#" PRIx64 ", "
7305 			     "len %#04zx\n",
7306 			    device_xname(sc->sc_dev), nexttx,
7307 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
7308 			    dmamap->dm_segs[seg].ds_len));
7309 		}
7310 
7311 		KASSERT(lasttx != -1);
7312 
7313 		/*
7314 		 * Set up the command byte on the last descriptor of
7315 		 * the packet.  If we're in the interrupt delay window,
7316 		 * delay the interrupt.
7317 		 */
7318 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
7319 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
7320 		txq->txq_descs[lasttx].wtx_cmdlen |=
7321 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
7322 
7323 		txs->txs_lastdesc = lasttx;
7324 
7325 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
7326 		    device_xname(sc->sc_dev),
7327 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
7328 
7329 		/* Sync the descriptors we're using. */
7330 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
7331 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
7332 
7333 		/* Give the packet to the chip. */
7334 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
7335 		sent = true;
7336 
7337 		DPRINTF(WM_DEBUG_TX,
7338 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
7339 
7340 		DPRINTF(WM_DEBUG_TX,
7341 		    ("%s: TX: finished transmitting packet, job %d\n",
7342 		    device_xname(sc->sc_dev), txq->txq_snext));
7343 
7344 		/* Advance the tx pointer. */
7345 		txq->txq_free -= txs->txs_ndesc;
7346 		txq->txq_next = nexttx;
7347 
7348 		txq->txq_sfree--;
7349 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
7350 
7351 		/* Pass the packet to any BPF listeners. */
7352 		bpf_mtap(ifp, m0);
7353 	}
7354 
7355 	if (m0 != NULL) {
7356 		txq->txq_flags |= WM_TXQ_NO_SPACE;
7357 		WM_Q_EVCNT_INCR(txq, txdrop);
7358 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
7359 			__func__));
7360 		m_freem(m0);
7361 	}
7362 
7363 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
7364 		/* No more slots; notify upper layer. */
7365 		txq->txq_flags |= WM_TXQ_NO_SPACE;
7366 	}
7367 
7368 	if (sent) {
7369 		/* Set a watchdog timer in case the chip flakes out. */
7370 		ifp->if_timer = 5;
7371 	}
7372 }
7373 
7374 static void
7375 wm_deferred_start(struct ifnet *ifp)
7376 {
7377 	struct wm_softc *sc = ifp->if_softc;
7378 	int qid = 0;
7379 
7380 	/*
7381 	 * Try to transmit on all Tx queues. Passing a txq somehow and
7382 	 * transmitting only on the txq may be better.
7383 	 */
7384 	for (; qid < sc->sc_nqueues; qid++) {
7385 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
7386 
7387 		/*
7388 		 * We must mutex_enter(txq->txq_lock) instead of
7389 		 * mutex_tryenter(txq->txq_lock) here.
7390 		 * mutex_tryenter(txq->txq_lock) would fail as this txq's
7391 		 * txq_stopping flag is being set. In this case, this device
7392 		 * begin to stop, so we must not start any Tx processing.
7393 		 * However, it may start Tx processing for sc_queue[qid+1]
7394 		 * if we use mutex_tryenter() here.
7395 		 */
7396 		mutex_enter(txq->txq_lock);
7397 		if (txq->txq_stopping) {
7398 			mutex_exit(txq->txq_lock);
7399 			return;
7400 		}
7401 
7402 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
7403 			/* XXX need for ALTQ */
7404 			if (qid == 0)
7405 				wm_nq_start_locked(ifp);
7406 			wm_nq_transmit_locked(ifp, txq);
7407 		} else {
7408 			/* XXX need for ALTQ */
7409 			if (qid == 0)
7410 				wm_start_locked(ifp);
7411 			wm_transmit_locked(ifp, txq);
7412 		}
7413 		mutex_exit(txq->txq_lock);
7414 	}
7415 }
7416 
7417 /* Interrupt */
7418 
7419 /*
7420  * wm_txeof:
7421  *
7422  *	Helper; handle transmit interrupts.
7423  */
7424 static int
7425 wm_txeof(struct wm_softc *sc, struct wm_txqueue *txq)
7426 {
7427 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7428 	struct wm_txsoft *txs;
7429 	bool processed = false;
7430 	int count = 0;
7431 	int i;
7432 	uint8_t status;
7433 
7434 	KASSERT(mutex_owned(txq->txq_lock));
7435 
7436 	if (txq->txq_stopping)
7437 		return 0;
7438 
7439 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
7440 		txq->txq_flags &= ~WM_TXQ_NO_SPACE;
7441 	else
7442 		ifp->if_flags &= ~IFF_OACTIVE;
7443 
7444 	/*
7445 	 * Go through the Tx list and free mbufs for those
7446 	 * frames which have been transmitted.
7447 	 */
7448 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
7449 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
7450 		txs = &txq->txq_soft[i];
7451 
7452 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
7453 			device_xname(sc->sc_dev), i));
7454 
7455 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
7456 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
7457 
7458 		status =
7459 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
7460 		if ((status & WTX_ST_DD) == 0) {
7461 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
7462 			    BUS_DMASYNC_PREREAD);
7463 			break;
7464 		}
7465 
7466 		processed = true;
7467 		count++;
7468 		DPRINTF(WM_DEBUG_TX,
7469 		    ("%s: TX: job %d done: descs %d..%d\n",
7470 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
7471 		    txs->txs_lastdesc));
7472 
7473 		/*
7474 		 * XXX We should probably be using the statistics
7475 		 * XXX registers, but I don't know if they exist
7476 		 * XXX on chips before the i82544.
7477 		 */
7478 
7479 #ifdef WM_EVENT_COUNTERS
7480 		if (status & WTX_ST_TU)
7481 			WM_Q_EVCNT_INCR(txq, tu);
7482 #endif /* WM_EVENT_COUNTERS */
7483 
7484 		if (status & (WTX_ST_EC | WTX_ST_LC)) {
7485 			ifp->if_oerrors++;
7486 			if (status & WTX_ST_LC)
7487 				log(LOG_WARNING, "%s: late collision\n",
7488 				    device_xname(sc->sc_dev));
7489 			else if (status & WTX_ST_EC) {
7490 				ifp->if_collisions += 16;
7491 				log(LOG_WARNING, "%s: excessive collisions\n",
7492 				    device_xname(sc->sc_dev));
7493 			}
7494 		} else
7495 			ifp->if_opackets++;
7496 
7497 		txq->txq_free += txs->txs_ndesc;
7498 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
7499 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
7500 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
7501 		m_freem(txs->txs_mbuf);
7502 		txs->txs_mbuf = NULL;
7503 	}
7504 
7505 	/* Update the dirty transmit buffer pointer. */
7506 	txq->txq_sdirty = i;
7507 	DPRINTF(WM_DEBUG_TX,
7508 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
7509 
7510 	if (count != 0)
7511 		rnd_add_uint32(&sc->rnd_source, count);
7512 
7513 	/*
7514 	 * If there are no more pending transmissions, cancel the watchdog
7515 	 * timer.
7516 	 */
7517 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
7518 		ifp->if_timer = 0;
7519 
7520 	return processed;
7521 }
7522 
7523 static inline uint32_t
7524 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
7525 {
7526 	struct wm_softc *sc = rxq->rxq_sc;
7527 
7528 	if (sc->sc_type == WM_T_82574)
7529 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
7530 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
7531 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
7532 	else
7533 		return rxq->rxq_descs[idx].wrx_status;
7534 }
7535 
7536 static inline uint32_t
7537 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
7538 {
7539 	struct wm_softc *sc = rxq->rxq_sc;
7540 
7541 	if (sc->sc_type == WM_T_82574)
7542 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
7543 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
7544 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
7545 	else
7546 		return rxq->rxq_descs[idx].wrx_errors;
7547 }
7548 
7549 static inline uint16_t
7550 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
7551 {
7552 	struct wm_softc *sc = rxq->rxq_sc;
7553 
7554 	if (sc->sc_type == WM_T_82574)
7555 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
7556 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
7557 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
7558 	else
7559 		return rxq->rxq_descs[idx].wrx_special;
7560 }
7561 
7562 static inline int
7563 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
7564 {
7565 	struct wm_softc *sc = rxq->rxq_sc;
7566 
7567 	if (sc->sc_type == WM_T_82574)
7568 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
7569 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
7570 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
7571 	else
7572 		return rxq->rxq_descs[idx].wrx_len;
7573 }
7574 
7575 #ifdef WM_DEBUG
7576 static inline uint32_t
7577 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
7578 {
7579 	struct wm_softc *sc = rxq->rxq_sc;
7580 
7581 	if (sc->sc_type == WM_T_82574)
7582 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
7583 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
7584 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
7585 	else
7586 		return 0;
7587 }
7588 
7589 static inline uint8_t
7590 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
7591 {
7592 	struct wm_softc *sc = rxq->rxq_sc;
7593 
7594 	if (sc->sc_type == WM_T_82574)
7595 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
7596 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
7597 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
7598 	else
7599 		return 0;
7600 }
7601 #endif /* WM_DEBUG */
7602 
7603 static inline bool
7604 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
7605     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
7606 {
7607 
7608 	if (sc->sc_type == WM_T_82574)
7609 		return (status & ext_bit) != 0;
7610 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
7611 		return (status & nq_bit) != 0;
7612 	else
7613 		return (status & legacy_bit) != 0;
7614 }
7615 
7616 static inline bool
7617 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
7618     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
7619 {
7620 
7621 	if (sc->sc_type == WM_T_82574)
7622 		return (error & ext_bit) != 0;
7623 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
7624 		return (error & nq_bit) != 0;
7625 	else
7626 		return (error & legacy_bit) != 0;
7627 }
7628 
7629 static inline bool
7630 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
7631 {
7632 
7633 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
7634 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
7635 		return true;
7636 	else
7637 		return false;
7638 }
7639 
7640 static inline bool
7641 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
7642 {
7643 	struct wm_softc *sc = rxq->rxq_sc;
7644 
7645 	/* XXXX missing error bit for newqueue? */
7646 	if (wm_rxdesc_is_set_error(sc, errors,
7647 		WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE,
7648 		EXTRXC_ERROR_CE|EXTRXC_ERROR_SE|EXTRXC_ERROR_SEQ|EXTRXC_ERROR_CXE|EXTRXC_ERROR_RXE,
7649 		NQRXC_ERROR_RXE)) {
7650 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE, EXTRXC_ERROR_SE, 0))
7651 			log(LOG_WARNING, "%s: symbol error\n",
7652 			    device_xname(sc->sc_dev));
7653 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ, EXTRXC_ERROR_SEQ, 0))
7654 			log(LOG_WARNING, "%s: receive sequence error\n",
7655 			    device_xname(sc->sc_dev));
7656 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE, EXTRXC_ERROR_CE, 0))
7657 			log(LOG_WARNING, "%s: CRC error\n",
7658 			    device_xname(sc->sc_dev));
7659 		return true;
7660 	}
7661 
7662 	return false;
7663 }
7664 
7665 static inline bool
7666 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
7667 {
7668 	struct wm_softc *sc = rxq->rxq_sc;
7669 
7670 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
7671 		NQRXC_STATUS_DD)) {
7672 		/* We have processed all of the receive descriptors. */
7673 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
7674 		return false;
7675 	}
7676 
7677 	return true;
7678 }
7679 
7680 static inline bool
7681 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status, uint16_t vlantag,
7682     struct mbuf *m)
7683 {
7684 	struct ifnet *ifp = &rxq->rxq_sc->sc_ethercom.ec_if;
7685 
7686 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
7687 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
7688 		VLAN_INPUT_TAG(ifp, m, le16toh(vlantag), return false);
7689 	}
7690 
7691 	return true;
7692 }
7693 
7694 static inline void
7695 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
7696     uint32_t errors, struct mbuf *m)
7697 {
7698 	struct wm_softc *sc = rxq->rxq_sc;
7699 
7700 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
7701 		if (wm_rxdesc_is_set_status(sc, status,
7702 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
7703 			WM_Q_EVCNT_INCR(rxq, rxipsum);
7704 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
7705 			if (wm_rxdesc_is_set_error(sc, errors,
7706 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
7707 				m->m_pkthdr.csum_flags |=
7708 					M_CSUM_IPv4_BAD;
7709 		}
7710 		if (wm_rxdesc_is_set_status(sc, status,
7711 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
7712 			/*
7713 			 * Note: we don't know if this was TCP or UDP,
7714 			 * so we just set both bits, and expect the
7715 			 * upper layers to deal.
7716 			 */
7717 			WM_Q_EVCNT_INCR(rxq, rxtusum);
7718 			m->m_pkthdr.csum_flags |=
7719 				M_CSUM_TCPv4 | M_CSUM_UDPv4 |
7720 				M_CSUM_TCPv6 | M_CSUM_UDPv6;
7721 			if (wm_rxdesc_is_set_error(sc, errors,
7722 				WRX_ER_TCPE, EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
7723 				m->m_pkthdr.csum_flags |=
7724 					M_CSUM_TCP_UDP_BAD;
7725 		}
7726 	}
7727 }
7728 
7729 /*
7730  * wm_rxeof:
7731  *
7732  *	Helper; handle receive interrupts.
7733  */
7734 static void
7735 wm_rxeof(struct wm_rxqueue *rxq)
7736 {
7737 	struct wm_softc *sc = rxq->rxq_sc;
7738 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7739 	struct wm_rxsoft *rxs;
7740 	struct mbuf *m;
7741 	int i, len;
7742 	int count = 0;
7743 	uint32_t status, errors;
7744 	uint16_t vlantag;
7745 
7746 	KASSERT(mutex_owned(rxq->rxq_lock));
7747 
7748 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
7749 		rxs = &rxq->rxq_soft[i];
7750 
7751 		DPRINTF(WM_DEBUG_RX,
7752 		    ("%s: RX: checking descriptor %d\n",
7753 		    device_xname(sc->sc_dev), i));
7754 		wm_cdrxsync(rxq, i,BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
7755 
7756 		status = wm_rxdesc_get_status(rxq, i);
7757 		errors = wm_rxdesc_get_errors(rxq, i);
7758 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
7759 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
7760 #ifdef WM_DEBUG
7761 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
7762 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
7763 #endif
7764 
7765 		if (!wm_rxdesc_dd(rxq, i, status))
7766 			break;
7767 
7768 		count++;
7769 		if (__predict_false(rxq->rxq_discard)) {
7770 			DPRINTF(WM_DEBUG_RX,
7771 			    ("%s: RX: discarding contents of descriptor %d\n",
7772 			    device_xname(sc->sc_dev), i));
7773 			wm_init_rxdesc(rxq, i);
7774 			if (wm_rxdesc_is_eop(rxq, status)) {
7775 				/* Reset our state. */
7776 				DPRINTF(WM_DEBUG_RX,
7777 				    ("%s: RX: resetting rxdiscard -> 0\n",
7778 				    device_xname(sc->sc_dev)));
7779 				rxq->rxq_discard = 0;
7780 			}
7781 			continue;
7782 		}
7783 
7784 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
7785 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
7786 
7787 		m = rxs->rxs_mbuf;
7788 
7789 		/*
7790 		 * Add a new receive buffer to the ring, unless of
7791 		 * course the length is zero. Treat the latter as a
7792 		 * failed mapping.
7793 		 */
7794 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
7795 			/*
7796 			 * Failed, throw away what we've done so
7797 			 * far, and discard the rest of the packet.
7798 			 */
7799 			ifp->if_ierrors++;
7800 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
7801 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
7802 			wm_init_rxdesc(rxq, i);
7803 			if (!wm_rxdesc_is_eop(rxq, status))
7804 				rxq->rxq_discard = 1;
7805 			if (rxq->rxq_head != NULL)
7806 				m_freem(rxq->rxq_head);
7807 			WM_RXCHAIN_RESET(rxq);
7808 			DPRINTF(WM_DEBUG_RX,
7809 			    ("%s: RX: Rx buffer allocation failed, "
7810 			    "dropping packet%s\n", device_xname(sc->sc_dev),
7811 			    rxq->rxq_discard ? " (discard)" : ""));
7812 			continue;
7813 		}
7814 
7815 		m->m_len = len;
7816 		rxq->rxq_len += len;
7817 		DPRINTF(WM_DEBUG_RX,
7818 		    ("%s: RX: buffer at %p len %d\n",
7819 		    device_xname(sc->sc_dev), m->m_data, len));
7820 
7821 		/* If this is not the end of the packet, keep looking. */
7822 		if (!wm_rxdesc_is_eop(rxq, status)) {
7823 			WM_RXCHAIN_LINK(rxq, m);
7824 			DPRINTF(WM_DEBUG_RX,
7825 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
7826 			    device_xname(sc->sc_dev), rxq->rxq_len));
7827 			continue;
7828 		}
7829 
7830 		/*
7831 		 * Okay, we have the entire packet now.  The chip is
7832 		 * configured to include the FCS except I350 and I21[01]
7833 		 * (not all chips can be configured to strip it),
7834 		 * so we need to trim it.
7835 		 * May need to adjust length of previous mbuf in the
7836 		 * chain if the current mbuf is too short.
7837 		 * For an eratta, the RCTL_SECRC bit in RCTL register
7838 		 * is always set in I350, so we don't trim it.
7839 		 */
7840 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
7841 		    && (sc->sc_type != WM_T_I210)
7842 		    && (sc->sc_type != WM_T_I211)) {
7843 			if (m->m_len < ETHER_CRC_LEN) {
7844 				rxq->rxq_tail->m_len
7845 				    -= (ETHER_CRC_LEN - m->m_len);
7846 				m->m_len = 0;
7847 			} else
7848 				m->m_len -= ETHER_CRC_LEN;
7849 			len = rxq->rxq_len - ETHER_CRC_LEN;
7850 		} else
7851 			len = rxq->rxq_len;
7852 
7853 		WM_RXCHAIN_LINK(rxq, m);
7854 
7855 		*rxq->rxq_tailp = NULL;
7856 		m = rxq->rxq_head;
7857 
7858 		WM_RXCHAIN_RESET(rxq);
7859 
7860 		DPRINTF(WM_DEBUG_RX,
7861 		    ("%s: RX: have entire packet, len -> %d\n",
7862 		    device_xname(sc->sc_dev), len));
7863 
7864 		/* If an error occurred, update stats and drop the packet. */
7865 		if (wm_rxdesc_has_errors(rxq, errors)) {
7866 			m_freem(m);
7867 			continue;
7868 		}
7869 
7870 		/* No errors.  Receive the packet. */
7871 		m_set_rcvif(m, ifp);
7872 		m->m_pkthdr.len = len;
7873 		/*
7874 		 * TODO
7875 		 * should be save rsshash and rsstype to this mbuf.
7876 		 */
7877 		DPRINTF(WM_DEBUG_RX,
7878 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
7879 			device_xname(sc->sc_dev), rsstype, rsshash));
7880 
7881 		/*
7882 		 * If VLANs are enabled, VLAN packets have been unwrapped
7883 		 * for us.  Associate the tag with the packet.
7884 		 */
7885 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
7886 			continue;
7887 
7888 		/* Set up checksum info for this packet. */
7889 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
7890 
7891 		mutex_exit(rxq->rxq_lock);
7892 
7893 		/* Pass it on. */
7894 		if_percpuq_enqueue(sc->sc_ipq, m);
7895 
7896 		mutex_enter(rxq->rxq_lock);
7897 
7898 		if (rxq->rxq_stopping)
7899 			break;
7900 	}
7901 
7902 	/* Update the receive pointer. */
7903 	rxq->rxq_ptr = i;
7904 	if (count != 0)
7905 		rnd_add_uint32(&sc->rnd_source, count);
7906 
7907 	DPRINTF(WM_DEBUG_RX,
7908 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
7909 }
7910 
7911 /*
7912  * wm_linkintr_gmii:
7913  *
7914  *	Helper; handle link interrupts for GMII.
7915  */
7916 static void
7917 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
7918 {
7919 
7920 	KASSERT(WM_CORE_LOCKED(sc));
7921 
7922 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
7923 		__func__));
7924 
7925 	if (icr & ICR_LSC) {
7926 		uint32_t reg;
7927 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
7928 
7929 		if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0))
7930 			wm_gig_downshift_workaround_ich8lan(sc);
7931 
7932 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
7933 			device_xname(sc->sc_dev)));
7934 		mii_pollstat(&sc->sc_mii);
7935 		if (sc->sc_type == WM_T_82543) {
7936 			int miistatus, active;
7937 
7938 			/*
7939 			 * With 82543, we need to force speed and
7940 			 * duplex on the MAC equal to what the PHY
7941 			 * speed and duplex configuration is.
7942 			 */
7943 			miistatus = sc->sc_mii.mii_media_status;
7944 
7945 			if (miistatus & IFM_ACTIVE) {
7946 				active = sc->sc_mii.mii_media_active;
7947 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
7948 				switch (IFM_SUBTYPE(active)) {
7949 				case IFM_10_T:
7950 					sc->sc_ctrl |= CTRL_SPEED_10;
7951 					break;
7952 				case IFM_100_TX:
7953 					sc->sc_ctrl |= CTRL_SPEED_100;
7954 					break;
7955 				case IFM_1000_T:
7956 					sc->sc_ctrl |= CTRL_SPEED_1000;
7957 					break;
7958 				default:
7959 					/*
7960 					 * fiber?
7961 					 * Shoud not enter here.
7962 					 */
7963 					printf("unknown media (%x)\n", active);
7964 					break;
7965 				}
7966 				if (active & IFM_FDX)
7967 					sc->sc_ctrl |= CTRL_FD;
7968 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7969 			}
7970 		} else if ((sc->sc_type == WM_T_ICH8)
7971 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
7972 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
7973 		} else if (sc->sc_type == WM_T_PCH) {
7974 			wm_k1_gig_workaround_hv(sc,
7975 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
7976 		}
7977 
7978 		if ((sc->sc_phytype == WMPHY_82578)
7979 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
7980 			== IFM_1000_T)) {
7981 
7982 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
7983 				delay(200*1000); /* XXX too big */
7984 
7985 				/* Link stall fix for link up */
7986 				wm_gmii_hv_writereg(sc->sc_dev, 1,
7987 				    HV_MUX_DATA_CTRL,
7988 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
7989 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
7990 				wm_gmii_hv_writereg(sc->sc_dev, 1,
7991 				    HV_MUX_DATA_CTRL,
7992 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
7993 			}
7994 		}
7995 		/*
7996 		 * I217 Packet Loss issue:
7997 		 * ensure that FEXTNVM4 Beacon Duration is set correctly
7998 		 * on power up.
7999 		 * Set the Beacon Duration for I217 to 8 usec
8000 		 */
8001 		if ((sc->sc_type == WM_T_PCH_LPT)
8002 		    || (sc->sc_type == WM_T_PCH_SPT)) {
8003 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
8004 			reg &= ~FEXTNVM4_BEACON_DURATION;
8005 			reg |= FEXTNVM4_BEACON_DURATION_8US;
8006 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
8007 		}
8008 
8009 		/* XXX Work-around I218 hang issue */
8010 		/* e1000_k1_workaround_lpt_lp() */
8011 
8012 		if ((sc->sc_type == WM_T_PCH_LPT)
8013 		    || (sc->sc_type == WM_T_PCH_SPT)) {
8014 			/*
8015 			 * Set platform power management values for Latency
8016 			 * Tolerance Reporting (LTR)
8017 			 */
8018 			wm_platform_pm_pch_lpt(sc,
8019 				((sc->sc_mii.mii_media_status & IFM_ACTIVE)
8020 				    != 0));
8021 		}
8022 
8023 		/* FEXTNVM6 K1-off workaround */
8024 		if (sc->sc_type == WM_T_PCH_SPT) {
8025 			reg = CSR_READ(sc, WMREG_FEXTNVM6);
8026 			if (CSR_READ(sc, WMREG_PCIEANACFG)
8027 			    & FEXTNVM6_K1_OFF_ENABLE)
8028 				reg |= FEXTNVM6_K1_OFF_ENABLE;
8029 			else
8030 				reg &= ~FEXTNVM6_K1_OFF_ENABLE;
8031 			CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
8032 		}
8033 	} else if (icr & ICR_RXSEQ) {
8034 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
8035 			device_xname(sc->sc_dev)));
8036 	}
8037 }
8038 
8039 /*
8040  * wm_linkintr_tbi:
8041  *
8042  *	Helper; handle link interrupts for TBI mode.
8043  */
8044 static void
8045 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
8046 {
8047 	uint32_t status;
8048 
8049 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
8050 		__func__));
8051 
8052 	status = CSR_READ(sc, WMREG_STATUS);
8053 	if (icr & ICR_LSC) {
8054 		if (status & STATUS_LU) {
8055 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
8056 			    device_xname(sc->sc_dev),
8057 			    (status & STATUS_FD) ? "FDX" : "HDX"));
8058 			/*
8059 			 * NOTE: CTRL will update TFCE and RFCE automatically,
8060 			 * so we should update sc->sc_ctrl
8061 			 */
8062 
8063 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
8064 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
8065 			sc->sc_fcrtl &= ~FCRTL_XONE;
8066 			if (status & STATUS_FD)
8067 				sc->sc_tctl |=
8068 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
8069 			else
8070 				sc->sc_tctl |=
8071 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
8072 			if (sc->sc_ctrl & CTRL_TFCE)
8073 				sc->sc_fcrtl |= FCRTL_XONE;
8074 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
8075 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
8076 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
8077 				      sc->sc_fcrtl);
8078 			sc->sc_tbi_linkup = 1;
8079 		} else {
8080 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
8081 			    device_xname(sc->sc_dev)));
8082 			sc->sc_tbi_linkup = 0;
8083 		}
8084 		/* Update LED */
8085 		wm_tbi_serdes_set_linkled(sc);
8086 	} else if (icr & ICR_RXSEQ) {
8087 		DPRINTF(WM_DEBUG_LINK,
8088 		    ("%s: LINK: Receive sequence error\n",
8089 		    device_xname(sc->sc_dev)));
8090 	}
8091 }
8092 
8093 /*
8094  * wm_linkintr_serdes:
8095  *
8096  *	Helper; handle link interrupts for TBI mode.
8097  */
8098 static void
8099 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
8100 {
8101 	struct mii_data *mii = &sc->sc_mii;
8102 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
8103 	uint32_t pcs_adv, pcs_lpab, reg;
8104 
8105 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
8106 		__func__));
8107 
8108 	if (icr & ICR_LSC) {
8109 		/* Check PCS */
8110 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
8111 		if ((reg & PCS_LSTS_LINKOK) != 0) {
8112 			mii->mii_media_status |= IFM_ACTIVE;
8113 			sc->sc_tbi_linkup = 1;
8114 		} else {
8115 			mii->mii_media_status |= IFM_NONE;
8116 			sc->sc_tbi_linkup = 0;
8117 			wm_tbi_serdes_set_linkled(sc);
8118 			return;
8119 		}
8120 		mii->mii_media_active |= IFM_1000_SX;
8121 		if ((reg & PCS_LSTS_FDX) != 0)
8122 			mii->mii_media_active |= IFM_FDX;
8123 		else
8124 			mii->mii_media_active |= IFM_HDX;
8125 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
8126 			/* Check flow */
8127 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
8128 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
8129 				DPRINTF(WM_DEBUG_LINK,
8130 				    ("XXX LINKOK but not ACOMP\n"));
8131 				return;
8132 			}
8133 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
8134 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
8135 			DPRINTF(WM_DEBUG_LINK,
8136 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
8137 			if ((pcs_adv & TXCW_SYM_PAUSE)
8138 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
8139 				mii->mii_media_active |= IFM_FLOW
8140 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
8141 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
8142 			    && (pcs_adv & TXCW_ASYM_PAUSE)
8143 			    && (pcs_lpab & TXCW_SYM_PAUSE)
8144 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
8145 				mii->mii_media_active |= IFM_FLOW
8146 				    | IFM_ETH_TXPAUSE;
8147 			else if ((pcs_adv & TXCW_SYM_PAUSE)
8148 			    && (pcs_adv & TXCW_ASYM_PAUSE)
8149 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
8150 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
8151 				mii->mii_media_active |= IFM_FLOW
8152 				    | IFM_ETH_RXPAUSE;
8153 		}
8154 		/* Update LED */
8155 		wm_tbi_serdes_set_linkled(sc);
8156 	} else {
8157 		DPRINTF(WM_DEBUG_LINK,
8158 		    ("%s: LINK: Receive sequence error\n",
8159 		    device_xname(sc->sc_dev)));
8160 	}
8161 }
8162 
8163 /*
8164  * wm_linkintr:
8165  *
8166  *	Helper; handle link interrupts.
8167  */
8168 static void
8169 wm_linkintr(struct wm_softc *sc, uint32_t icr)
8170 {
8171 
8172 	KASSERT(WM_CORE_LOCKED(sc));
8173 
8174 	if (sc->sc_flags & WM_F_HAS_MII)
8175 		wm_linkintr_gmii(sc, icr);
8176 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
8177 	    && (sc->sc_type >= WM_T_82575))
8178 		wm_linkintr_serdes(sc, icr);
8179 	else
8180 		wm_linkintr_tbi(sc, icr);
8181 }
8182 
8183 /*
8184  * wm_intr_legacy:
8185  *
8186  *	Interrupt service routine for INTx and MSI.
8187  */
8188 static int
8189 wm_intr_legacy(void *arg)
8190 {
8191 	struct wm_softc *sc = arg;
8192 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
8193 	struct wm_rxqueue *rxq = &sc->sc_queue[0].wmq_rxq;
8194 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
8195 	uint32_t icr, rndval = 0;
8196 	int handled = 0;
8197 
8198 	DPRINTF(WM_DEBUG_TX,
8199 	    ("%s: INTx: got intr\n", device_xname(sc->sc_dev)));
8200 	while (1 /* CONSTCOND */) {
8201 		icr = CSR_READ(sc, WMREG_ICR);
8202 		if ((icr & sc->sc_icr) == 0)
8203 			break;
8204 		if (rndval == 0)
8205 			rndval = icr;
8206 
8207 		mutex_enter(rxq->rxq_lock);
8208 
8209 		if (rxq->rxq_stopping) {
8210 			mutex_exit(rxq->rxq_lock);
8211 			break;
8212 		}
8213 
8214 		handled = 1;
8215 
8216 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
8217 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
8218 			DPRINTF(WM_DEBUG_RX,
8219 			    ("%s: RX: got Rx intr 0x%08x\n",
8220 			    device_xname(sc->sc_dev),
8221 			    icr & (ICR_RXDMT0 | ICR_RXT0)));
8222 			WM_Q_EVCNT_INCR(rxq, rxintr);
8223 		}
8224 #endif
8225 		wm_rxeof(rxq);
8226 
8227 		mutex_exit(rxq->rxq_lock);
8228 		mutex_enter(txq->txq_lock);
8229 
8230 		if (txq->txq_stopping) {
8231 			mutex_exit(txq->txq_lock);
8232 			break;
8233 		}
8234 
8235 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
8236 		if (icr & ICR_TXDW) {
8237 			DPRINTF(WM_DEBUG_TX,
8238 			    ("%s: TX: got TXDW interrupt\n",
8239 			    device_xname(sc->sc_dev)));
8240 			WM_Q_EVCNT_INCR(txq, txdw);
8241 		}
8242 #endif
8243 		wm_txeof(sc, txq);
8244 
8245 		mutex_exit(txq->txq_lock);
8246 		WM_CORE_LOCK(sc);
8247 
8248 		if (sc->sc_core_stopping) {
8249 			WM_CORE_UNLOCK(sc);
8250 			break;
8251 		}
8252 
8253 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
8254 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
8255 			wm_linkintr(sc, icr);
8256 		}
8257 
8258 		WM_CORE_UNLOCK(sc);
8259 
8260 		if (icr & ICR_RXO) {
8261 #if defined(WM_DEBUG)
8262 			log(LOG_WARNING, "%s: Receive overrun\n",
8263 			    device_xname(sc->sc_dev));
8264 #endif /* defined(WM_DEBUG) */
8265 		}
8266 	}
8267 
8268 	rnd_add_uint32(&sc->rnd_source, rndval);
8269 
8270 	if (handled) {
8271 		/* Try to get more packets going. */
8272 		if_schedule_deferred_start(ifp);
8273 	}
8274 
8275 	return handled;
8276 }
8277 
8278 static int
8279 wm_txrxintr_msix(void *arg)
8280 {
8281 	struct wm_queue *wmq = arg;
8282 	struct wm_txqueue *txq = &wmq->wmq_txq;
8283 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
8284 	struct wm_softc *sc = txq->txq_sc;
8285 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
8286 
8287 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
8288 
8289 	DPRINTF(WM_DEBUG_TX,
8290 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
8291 
8292 	if (sc->sc_type == WM_T_82574)
8293 		CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
8294 	else if (sc->sc_type == WM_T_82575)
8295 		CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
8296 	else
8297 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
8298 
8299 	mutex_enter(txq->txq_lock);
8300 
8301 	if (txq->txq_stopping) {
8302 		mutex_exit(txq->txq_lock);
8303 		return 0;
8304 	}
8305 
8306 	WM_Q_EVCNT_INCR(txq, txdw);
8307 	wm_txeof(sc, txq);
8308 
8309 	/* Try to get more packets going. */
8310 	if (pcq_peek(txq->txq_interq) != NULL)
8311 		if_schedule_deferred_start(ifp);
8312 	/*
8313 	 * There are still some upper layer processing which call
8314 	 * ifp->if_start(). e.g. ALTQ
8315 	 */
8316 	if (wmq->wmq_id == 0)
8317 		if_schedule_deferred_start(ifp);
8318 
8319 	mutex_exit(txq->txq_lock);
8320 
8321 	DPRINTF(WM_DEBUG_RX,
8322 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
8323 	mutex_enter(rxq->rxq_lock);
8324 
8325 	if (rxq->rxq_stopping) {
8326 		mutex_exit(rxq->rxq_lock);
8327 		return 0;
8328 	}
8329 
8330 	WM_Q_EVCNT_INCR(rxq, rxintr);
8331 	wm_rxeof(rxq);
8332 	mutex_exit(rxq->rxq_lock);
8333 
8334 	if (sc->sc_type == WM_T_82574)
8335 		CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
8336 	else if (sc->sc_type == WM_T_82575)
8337 		CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
8338 	else
8339 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
8340 
8341 	return 1;
8342 }
8343 
8344 /*
8345  * wm_linkintr_msix:
8346  *
8347  *	Interrupt service routine for link status change for MSI-X.
8348  */
8349 static int
8350 wm_linkintr_msix(void *arg)
8351 {
8352 	struct wm_softc *sc = arg;
8353 	uint32_t reg;
8354 
8355 	DPRINTF(WM_DEBUG_LINK,
8356 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
8357 
8358 	reg = CSR_READ(sc, WMREG_ICR);
8359 	WM_CORE_LOCK(sc);
8360 	if ((sc->sc_core_stopping) || ((reg & ICR_LSC) == 0))
8361 		goto out;
8362 
8363 	WM_EVCNT_INCR(&sc->sc_ev_linkintr);
8364 	wm_linkintr(sc, ICR_LSC);
8365 
8366 out:
8367 	WM_CORE_UNLOCK(sc);
8368 
8369 	if (sc->sc_type == WM_T_82574)
8370 		CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
8371 	else if (sc->sc_type == WM_T_82575)
8372 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
8373 	else
8374 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
8375 
8376 	return 1;
8377 }
8378 
8379 /*
8380  * Media related.
8381  * GMII, SGMII, TBI (and SERDES)
8382  */
8383 
8384 /* Common */
8385 
8386 /*
8387  * wm_tbi_serdes_set_linkled:
8388  *
8389  *	Update the link LED on TBI and SERDES devices.
8390  */
8391 static void
8392 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
8393 {
8394 
8395 	if (sc->sc_tbi_linkup)
8396 		sc->sc_ctrl |= CTRL_SWDPIN(0);
8397 	else
8398 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
8399 
8400 	/* 82540 or newer devices are active low */
8401 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
8402 
8403 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8404 }
8405 
8406 /* GMII related */
8407 
8408 /*
8409  * wm_gmii_reset:
8410  *
8411  *	Reset the PHY.
8412  */
8413 static void
8414 wm_gmii_reset(struct wm_softc *sc)
8415 {
8416 	uint32_t reg;
8417 	int rv;
8418 
8419 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
8420 		device_xname(sc->sc_dev), __func__));
8421 
8422 	rv = sc->phy.acquire(sc);
8423 	if (rv != 0) {
8424 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8425 		    __func__);
8426 		return;
8427 	}
8428 
8429 	switch (sc->sc_type) {
8430 	case WM_T_82542_2_0:
8431 	case WM_T_82542_2_1:
8432 		/* null */
8433 		break;
8434 	case WM_T_82543:
8435 		/*
8436 		 * With 82543, we need to force speed and duplex on the MAC
8437 		 * equal to what the PHY speed and duplex configuration is.
8438 		 * In addition, we need to perform a hardware reset on the PHY
8439 		 * to take it out of reset.
8440 		 */
8441 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
8442 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8443 
8444 		/* The PHY reset pin is active-low. */
8445 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
8446 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
8447 		    CTRL_EXT_SWDPIN(4));
8448 		reg |= CTRL_EXT_SWDPIO(4);
8449 
8450 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
8451 		CSR_WRITE_FLUSH(sc);
8452 		delay(10*1000);
8453 
8454 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
8455 		CSR_WRITE_FLUSH(sc);
8456 		delay(150);
8457 #if 0
8458 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
8459 #endif
8460 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
8461 		break;
8462 	case WM_T_82544:	/* reset 10000us */
8463 	case WM_T_82540:
8464 	case WM_T_82545:
8465 	case WM_T_82545_3:
8466 	case WM_T_82546:
8467 	case WM_T_82546_3:
8468 	case WM_T_82541:
8469 	case WM_T_82541_2:
8470 	case WM_T_82547:
8471 	case WM_T_82547_2:
8472 	case WM_T_82571:	/* reset 100us */
8473 	case WM_T_82572:
8474 	case WM_T_82573:
8475 	case WM_T_82574:
8476 	case WM_T_82575:
8477 	case WM_T_82576:
8478 	case WM_T_82580:
8479 	case WM_T_I350:
8480 	case WM_T_I354:
8481 	case WM_T_I210:
8482 	case WM_T_I211:
8483 	case WM_T_82583:
8484 	case WM_T_80003:
8485 		/* generic reset */
8486 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
8487 		CSR_WRITE_FLUSH(sc);
8488 		delay(20000);
8489 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8490 		CSR_WRITE_FLUSH(sc);
8491 		delay(20000);
8492 
8493 		if ((sc->sc_type == WM_T_82541)
8494 		    || (sc->sc_type == WM_T_82541_2)
8495 		    || (sc->sc_type == WM_T_82547)
8496 		    || (sc->sc_type == WM_T_82547_2)) {
8497 			/* workaround for igp are done in igp_reset() */
8498 			/* XXX add code to set LED after phy reset */
8499 		}
8500 		break;
8501 	case WM_T_ICH8:
8502 	case WM_T_ICH9:
8503 	case WM_T_ICH10:
8504 	case WM_T_PCH:
8505 	case WM_T_PCH2:
8506 	case WM_T_PCH_LPT:
8507 	case WM_T_PCH_SPT:
8508 		/* generic reset */
8509 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
8510 		CSR_WRITE_FLUSH(sc);
8511 		delay(100);
8512 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8513 		CSR_WRITE_FLUSH(sc);
8514 		delay(150);
8515 		break;
8516 	default:
8517 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
8518 		    __func__);
8519 		break;
8520 	}
8521 
8522 	sc->phy.release(sc);
8523 
8524 	/* get_cfg_done */
8525 	wm_get_cfg_done(sc);
8526 
8527 	/* extra setup */
8528 	switch (sc->sc_type) {
8529 	case WM_T_82542_2_0:
8530 	case WM_T_82542_2_1:
8531 	case WM_T_82543:
8532 	case WM_T_82544:
8533 	case WM_T_82540:
8534 	case WM_T_82545:
8535 	case WM_T_82545_3:
8536 	case WM_T_82546:
8537 	case WM_T_82546_3:
8538 	case WM_T_82541_2:
8539 	case WM_T_82547_2:
8540 	case WM_T_82571:
8541 	case WM_T_82572:
8542 	case WM_T_82573:
8543 	case WM_T_82575:
8544 	case WM_T_82576:
8545 	case WM_T_82580:
8546 	case WM_T_I350:
8547 	case WM_T_I354:
8548 	case WM_T_I210:
8549 	case WM_T_I211:
8550 	case WM_T_80003:
8551 		/* null */
8552 		break;
8553 	case WM_T_82574:
8554 	case WM_T_82583:
8555 		wm_lplu_d0_disable(sc);
8556 		break;
8557 	case WM_T_82541:
8558 	case WM_T_82547:
8559 		/* XXX Configure actively LED after PHY reset */
8560 		break;
8561 	case WM_T_ICH8:
8562 	case WM_T_ICH9:
8563 	case WM_T_ICH10:
8564 	case WM_T_PCH:
8565 	case WM_T_PCH2:
8566 	case WM_T_PCH_LPT:
8567 	case WM_T_PCH_SPT:
8568 		/* Allow time for h/w to get to a quiescent state afer reset */
8569 		delay(10*1000);
8570 
8571 		if (sc->sc_type == WM_T_PCH)
8572 			wm_hv_phy_workaround_ich8lan(sc);
8573 
8574 		if (sc->sc_type == WM_T_PCH2)
8575 			wm_lv_phy_workaround_ich8lan(sc);
8576 
8577 		/* Clear the host wakeup bit after lcd reset */
8578 		if (sc->sc_type >= WM_T_PCH) {
8579 			reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
8580 			    BM_PORT_GEN_CFG);
8581 			reg &= ~BM_WUC_HOST_WU_BIT;
8582 			wm_gmii_hv_writereg(sc->sc_dev, 2,
8583 			    BM_PORT_GEN_CFG, reg);
8584 		}
8585 
8586 		/*
8587 		 * XXX Configure the LCD with th extended configuration region
8588 		 * in NVM
8589 		 */
8590 
8591 		/* Disable D0 LPLU. */
8592 		if (sc->sc_type >= WM_T_PCH)	/* PCH* */
8593 			wm_lplu_d0_disable_pch(sc);
8594 		else
8595 			wm_lplu_d0_disable(sc);	/* ICH* */
8596 		break;
8597 	default:
8598 		panic("%s: unknown type\n", __func__);
8599 		break;
8600 	}
8601 }
8602 
8603 /*
8604  * Setup sc_phytype and mii_{read|write}reg.
8605  *
8606  *  To identify PHY type, correct read/write function should be selected.
8607  * To select correct read/write function, PCI ID or MAC type are required
8608  * without accessing PHY registers.
8609  *
8610  *  On the first call of this function, PHY ID is not known yet. Check
8611  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
8612  * result might be incorrect.
8613  *
8614  *  In the second call, PHY OUI and model is used to identify PHY type.
8615  * It might not be perfpect because of the lack of compared entry, but it
8616  * would be better than the first call.
8617  *
8618  *  If the detected new result and previous assumption is different,
8619  * diagnous message will be printed.
8620  */
8621 static void
8622 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
8623     uint16_t phy_model)
8624 {
8625 	device_t dev = sc->sc_dev;
8626 	struct mii_data *mii = &sc->sc_mii;
8627 	uint16_t new_phytype = WMPHY_UNKNOWN;
8628 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
8629 	mii_readreg_t new_readreg;
8630 	mii_writereg_t new_writereg;
8631 
8632 	if (mii->mii_readreg == NULL) {
8633 		/*
8634 		 *  This is the first call of this function. For ICH and PCH
8635 		 * variants, it's difficult to determine the PHY access method
8636 		 * by sc_type, so use the PCI product ID for some devices.
8637 		 */
8638 
8639 		switch (sc->sc_pcidevid) {
8640 		case PCI_PRODUCT_INTEL_PCH_M_LM:
8641 		case PCI_PRODUCT_INTEL_PCH_M_LC:
8642 			/* 82577 */
8643 			new_phytype = WMPHY_82577;
8644 			break;
8645 		case PCI_PRODUCT_INTEL_PCH_D_DM:
8646 		case PCI_PRODUCT_INTEL_PCH_D_DC:
8647 			/* 82578 */
8648 			new_phytype = WMPHY_82578;
8649 			break;
8650 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
8651 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
8652 			/* 82579 */
8653 			new_phytype = WMPHY_82579;
8654 			break;
8655 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
8656 		case PCI_PRODUCT_INTEL_82801I_BM:
8657 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
8658 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
8659 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
8660 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
8661 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
8662 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
8663 			/* ICH8, 9, 10 with 82567 */
8664 			new_phytype = WMPHY_BM;
8665 			break;
8666 		default:
8667 			break;
8668 		}
8669 	} else {
8670 		/* It's not the first call. Use PHY OUI and model */
8671 		switch (phy_oui) {
8672 		case MII_OUI_ATHEROS: /* XXX ??? */
8673 			switch (phy_model) {
8674 			case 0x0004: /* XXX */
8675 				new_phytype = WMPHY_82578;
8676 				break;
8677 			default:
8678 				break;
8679 			}
8680 			break;
8681 		case MII_OUI_xxMARVELL:
8682 			switch (phy_model) {
8683 			case MII_MODEL_xxMARVELL_I210:
8684 				new_phytype = WMPHY_I210;
8685 				break;
8686 			case MII_MODEL_xxMARVELL_E1011:
8687 			case MII_MODEL_xxMARVELL_E1000_3:
8688 			case MII_MODEL_xxMARVELL_E1000_5:
8689 			case MII_MODEL_xxMARVELL_E1112:
8690 				new_phytype = WMPHY_M88;
8691 				break;
8692 			case MII_MODEL_xxMARVELL_E1149:
8693 				new_phytype = WMPHY_BM;
8694 				break;
8695 			case MII_MODEL_xxMARVELL_E1111:
8696 			case MII_MODEL_xxMARVELL_I347:
8697 			case MII_MODEL_xxMARVELL_E1512:
8698 			case MII_MODEL_xxMARVELL_E1340M:
8699 			case MII_MODEL_xxMARVELL_E1543:
8700 				new_phytype = WMPHY_M88;
8701 				break;
8702 			case MII_MODEL_xxMARVELL_I82563:
8703 				new_phytype = WMPHY_GG82563;
8704 				break;
8705 			default:
8706 				break;
8707 			}
8708 			break;
8709 		case MII_OUI_INTEL:
8710 			switch (phy_model) {
8711 			case MII_MODEL_INTEL_I82577:
8712 				new_phytype = WMPHY_82577;
8713 				break;
8714 			case MII_MODEL_INTEL_I82579:
8715 				new_phytype = WMPHY_82579;
8716 				break;
8717 			case MII_MODEL_INTEL_I217:
8718 				new_phytype = WMPHY_I217;
8719 				break;
8720 			case MII_MODEL_INTEL_I82580:
8721 			case MII_MODEL_INTEL_I350:
8722 				new_phytype = WMPHY_82580;
8723 				break;
8724 			default:
8725 				break;
8726 			}
8727 			break;
8728 		case MII_OUI_yyINTEL:
8729 			switch (phy_model) {
8730 			case MII_MODEL_yyINTEL_I82562G:
8731 			case MII_MODEL_yyINTEL_I82562EM:
8732 			case MII_MODEL_yyINTEL_I82562ET:
8733 				new_phytype = WMPHY_IFE;
8734 				break;
8735 			case MII_MODEL_yyINTEL_IGP01E1000:
8736 				new_phytype = WMPHY_IGP;
8737 				break;
8738 			case MII_MODEL_yyINTEL_I82566:
8739 				new_phytype = WMPHY_IGP_3;
8740 				break;
8741 			default:
8742 				break;
8743 			}
8744 			break;
8745 		default:
8746 			break;
8747 		}
8748 		if (new_phytype == WMPHY_UNKNOWN)
8749 			aprint_verbose_dev(dev, "%s: unknown PHY model\n",
8750 			    __func__);
8751 
8752 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
8753 		    && (sc->sc_phytype != new_phytype )) {
8754 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
8755 			    "was incorrect. PHY type from PHY ID = %u\n",
8756 			    sc->sc_phytype, new_phytype);
8757 		}
8758 	}
8759 
8760 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
8761 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
8762 		/* SGMII */
8763 		new_readreg = wm_sgmii_readreg;
8764 		new_writereg = wm_sgmii_writereg;
8765 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
8766 		/* BM2 (phyaddr == 1) */
8767 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
8768 		    && (new_phytype != WMPHY_BM)
8769 		    && (new_phytype != WMPHY_UNKNOWN))
8770 			doubt_phytype = new_phytype;
8771 		new_phytype = WMPHY_BM;
8772 		new_readreg = wm_gmii_bm_readreg;
8773 		new_writereg = wm_gmii_bm_writereg;
8774 	} else if (sc->sc_type >= WM_T_PCH) {
8775 		/* All PCH* use _hv_ */
8776 		new_readreg = wm_gmii_hv_readreg;
8777 		new_writereg = wm_gmii_hv_writereg;
8778 	} else if (sc->sc_type >= WM_T_ICH8) {
8779 		/* non-82567 ICH8, 9 and 10 */
8780 		new_readreg = wm_gmii_i82544_readreg;
8781 		new_writereg = wm_gmii_i82544_writereg;
8782 	} else if (sc->sc_type >= WM_T_80003) {
8783 		/* 80003 */
8784 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
8785 		    && (new_phytype != WMPHY_GG82563)
8786 		    && (new_phytype != WMPHY_UNKNOWN))
8787 			doubt_phytype = new_phytype;
8788 		new_phytype = WMPHY_GG82563;
8789 		new_readreg = wm_gmii_i80003_readreg;
8790 		new_writereg = wm_gmii_i80003_writereg;
8791 	} else if (sc->sc_type >= WM_T_I210) {
8792 		/* I210 and I211 */
8793 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
8794 		    && (new_phytype != WMPHY_I210)
8795 		    && (new_phytype != WMPHY_UNKNOWN))
8796 			doubt_phytype = new_phytype;
8797 		new_phytype = WMPHY_I210;
8798 		new_readreg = wm_gmii_gs40g_readreg;
8799 		new_writereg = wm_gmii_gs40g_writereg;
8800 	} else if (sc->sc_type >= WM_T_82580) {
8801 		/* 82580, I350 and I354 */
8802 		new_readreg = wm_gmii_82580_readreg;
8803 		new_writereg = wm_gmii_82580_writereg;
8804 	} else if (sc->sc_type >= WM_T_82544) {
8805 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
8806 		new_readreg = wm_gmii_i82544_readreg;
8807 		new_writereg = wm_gmii_i82544_writereg;
8808 	} else {
8809 		new_readreg = wm_gmii_i82543_readreg;
8810 		new_writereg = wm_gmii_i82543_writereg;
8811 	}
8812 
8813 	if (new_phytype == WMPHY_BM) {
8814 		/* All BM use _bm_ */
8815 		new_readreg = wm_gmii_bm_readreg;
8816 		new_writereg = wm_gmii_bm_writereg;
8817 	}
8818 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_SPT)) {
8819 		/* All PCH* use _hv_ */
8820 		new_readreg = wm_gmii_hv_readreg;
8821 		new_writereg = wm_gmii_hv_writereg;
8822 	}
8823 
8824 	/* Diag output */
8825 	if (doubt_phytype != WMPHY_UNKNOWN)
8826 		aprint_error_dev(dev, "Assumed new PHY type was "
8827 		    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
8828 		    new_phytype);
8829 	else if ((sc->sc_phytype != WMPHY_UNKNOWN)
8830 	    && (sc->sc_phytype != new_phytype ))
8831 		aprint_error_dev(dev, "Previously assumed PHY type(%u)"
8832 		    "was incorrect. New PHY type = %u\n",
8833 		    sc->sc_phytype, new_phytype);
8834 
8835 	if ((mii->mii_readreg != NULL) && (new_phytype == WMPHY_UNKNOWN))
8836 		aprint_error_dev(dev, "PHY type is still unknown.\n");
8837 
8838 	if ((mii->mii_readreg != NULL) && (mii->mii_readreg != new_readreg))
8839 		aprint_error_dev(dev, "Previously assumed PHY read/write "
8840 		    "function was incorrect.\n");
8841 
8842 	/* Update now */
8843 	sc->sc_phytype = new_phytype;
8844 	mii->mii_readreg = new_readreg;
8845 	mii->mii_writereg = new_writereg;
8846 }
8847 
8848 /*
8849  * wm_get_phy_id_82575:
8850  *
8851  * Return PHY ID. Return -1 if it failed.
8852  */
8853 static int
8854 wm_get_phy_id_82575(struct wm_softc *sc)
8855 {
8856 	uint32_t reg;
8857 	int phyid = -1;
8858 
8859 	/* XXX */
8860 	if ((sc->sc_flags & WM_F_SGMII) == 0)
8861 		return -1;
8862 
8863 	if (wm_sgmii_uses_mdio(sc)) {
8864 		switch (sc->sc_type) {
8865 		case WM_T_82575:
8866 		case WM_T_82576:
8867 			reg = CSR_READ(sc, WMREG_MDIC);
8868 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
8869 			break;
8870 		case WM_T_82580:
8871 		case WM_T_I350:
8872 		case WM_T_I354:
8873 		case WM_T_I210:
8874 		case WM_T_I211:
8875 			reg = CSR_READ(sc, WMREG_MDICNFG);
8876 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
8877 			break;
8878 		default:
8879 			return -1;
8880 		}
8881 	}
8882 
8883 	return phyid;
8884 }
8885 
8886 
8887 /*
8888  * wm_gmii_mediainit:
8889  *
8890  *	Initialize media for use on 1000BASE-T devices.
8891  */
8892 static void
8893 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
8894 {
8895 	device_t dev = sc->sc_dev;
8896 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
8897 	struct mii_data *mii = &sc->sc_mii;
8898 	uint32_t reg;
8899 
8900 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
8901 		device_xname(sc->sc_dev), __func__));
8902 
8903 	/* We have GMII. */
8904 	sc->sc_flags |= WM_F_HAS_MII;
8905 
8906 	if (sc->sc_type == WM_T_80003)
8907 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
8908 	else
8909 		sc->sc_tipg = TIPG_1000T_DFLT;
8910 
8911 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
8912 	if ((sc->sc_type == WM_T_82580)
8913 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
8914 	    || (sc->sc_type == WM_T_I211)) {
8915 		reg = CSR_READ(sc, WMREG_PHPM);
8916 		reg &= ~PHPM_GO_LINK_D;
8917 		CSR_WRITE(sc, WMREG_PHPM, reg);
8918 	}
8919 
8920 	/*
8921 	 * Let the chip set speed/duplex on its own based on
8922 	 * signals from the PHY.
8923 	 * XXXbouyer - I'm not sure this is right for the 80003,
8924 	 * the em driver only sets CTRL_SLU here - but it seems to work.
8925 	 */
8926 	sc->sc_ctrl |= CTRL_SLU;
8927 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8928 
8929 	/* Initialize our media structures and probe the GMII. */
8930 	mii->mii_ifp = ifp;
8931 
8932 	/*
8933 	 * The first call of wm_mii_setup_phytype. The result might be
8934 	 * incorrect.
8935 	 */
8936 	wm_gmii_setup_phytype(sc, 0, 0);
8937 
8938 	mii->mii_statchg = wm_gmii_statchg;
8939 
8940 	/* get PHY control from SMBus to PCIe */
8941 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
8942 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT))
8943 		wm_smbustopci(sc);
8944 
8945 	wm_gmii_reset(sc);
8946 
8947 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
8948 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
8949 	    wm_gmii_mediastatus);
8950 
8951 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
8952 	    || (sc->sc_type == WM_T_82580)
8953 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
8954 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
8955 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
8956 			/* Attach only one port */
8957 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
8958 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
8959 		} else {
8960 			int i, id;
8961 			uint32_t ctrl_ext;
8962 
8963 			id = wm_get_phy_id_82575(sc);
8964 			if (id != -1) {
8965 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
8966 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
8967 			}
8968 			if ((id == -1)
8969 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
8970 				/* Power on sgmii phy if it is disabled */
8971 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
8972 				CSR_WRITE(sc, WMREG_CTRL_EXT,
8973 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
8974 				CSR_WRITE_FLUSH(sc);
8975 				delay(300*1000); /* XXX too long */
8976 
8977 				/* from 1 to 8 */
8978 				for (i = 1; i < 8; i++)
8979 					mii_attach(sc->sc_dev, &sc->sc_mii,
8980 					    0xffffffff, i, MII_OFFSET_ANY,
8981 					    MIIF_DOPAUSE);
8982 
8983 				/* restore previous sfp cage power state */
8984 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
8985 			}
8986 		}
8987 	} else {
8988 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
8989 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
8990 	}
8991 
8992 	/*
8993 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
8994 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
8995 	 */
8996 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
8997 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
8998 		wm_set_mdio_slow_mode_hv(sc);
8999 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
9000 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
9001 	}
9002 
9003 	/*
9004 	 * (For ICH8 variants)
9005 	 * If PHY detection failed, use BM's r/w function and retry.
9006 	 */
9007 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
9008 		/* if failed, retry with *_bm_* */
9009 		aprint_verbose_dev(dev, "Assumed PHY access function "
9010 		    "(type = %d) might be incorrect. Use BM and retry.\n",
9011 		    sc->sc_phytype);
9012 		sc->sc_phytype = WMPHY_BM;
9013 		mii->mii_readreg = wm_gmii_bm_readreg;
9014 		mii->mii_writereg = wm_gmii_bm_writereg;
9015 
9016 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
9017 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
9018 	}
9019 
9020 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
9021 		/* Any PHY wasn't find */
9022 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
9023 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
9024 		sc->sc_phytype = WMPHY_NONE;
9025 	} else {
9026 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
9027 
9028 		/*
9029 		 * PHY Found! Check PHY type again by the second call of
9030 		 * wm_mii_setup_phytype.
9031 		 */
9032 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
9033 		    child->mii_mpd_model);
9034 
9035 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
9036 	}
9037 }
9038 
9039 /*
9040  * wm_gmii_mediachange:	[ifmedia interface function]
9041  *
9042  *	Set hardware to newly-selected media on a 1000BASE-T device.
9043  */
9044 static int
9045 wm_gmii_mediachange(struct ifnet *ifp)
9046 {
9047 	struct wm_softc *sc = ifp->if_softc;
9048 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
9049 	int rc;
9050 
9051 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
9052 		device_xname(sc->sc_dev), __func__));
9053 	if ((ifp->if_flags & IFF_UP) == 0)
9054 		return 0;
9055 
9056 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
9057 	sc->sc_ctrl |= CTRL_SLU;
9058 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
9059 	    || (sc->sc_type > WM_T_82543)) {
9060 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
9061 	} else {
9062 		sc->sc_ctrl &= ~CTRL_ASDE;
9063 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
9064 		if (ife->ifm_media & IFM_FDX)
9065 			sc->sc_ctrl |= CTRL_FD;
9066 		switch (IFM_SUBTYPE(ife->ifm_media)) {
9067 		case IFM_10_T:
9068 			sc->sc_ctrl |= CTRL_SPEED_10;
9069 			break;
9070 		case IFM_100_TX:
9071 			sc->sc_ctrl |= CTRL_SPEED_100;
9072 			break;
9073 		case IFM_1000_T:
9074 			sc->sc_ctrl |= CTRL_SPEED_1000;
9075 			break;
9076 		default:
9077 			panic("wm_gmii_mediachange: bad media 0x%x",
9078 			    ife->ifm_media);
9079 		}
9080 	}
9081 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9082 	if (sc->sc_type <= WM_T_82543)
9083 		wm_gmii_reset(sc);
9084 
9085 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
9086 		return 0;
9087 	return rc;
9088 }
9089 
9090 /*
9091  * wm_gmii_mediastatus:	[ifmedia interface function]
9092  *
9093  *	Get the current interface media status on a 1000BASE-T device.
9094  */
9095 static void
9096 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
9097 {
9098 	struct wm_softc *sc = ifp->if_softc;
9099 
9100 	ether_mediastatus(ifp, ifmr);
9101 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
9102 	    | sc->sc_flowflags;
9103 }
9104 
9105 #define	MDI_IO		CTRL_SWDPIN(2)
9106 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
9107 #define	MDI_CLK		CTRL_SWDPIN(3)
9108 
9109 static void
9110 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
9111 {
9112 	uint32_t i, v;
9113 
9114 	v = CSR_READ(sc, WMREG_CTRL);
9115 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
9116 	v |= MDI_DIR | CTRL_SWDPIO(3);
9117 
9118 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
9119 		if (data & i)
9120 			v |= MDI_IO;
9121 		else
9122 			v &= ~MDI_IO;
9123 		CSR_WRITE(sc, WMREG_CTRL, v);
9124 		CSR_WRITE_FLUSH(sc);
9125 		delay(10);
9126 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
9127 		CSR_WRITE_FLUSH(sc);
9128 		delay(10);
9129 		CSR_WRITE(sc, WMREG_CTRL, v);
9130 		CSR_WRITE_FLUSH(sc);
9131 		delay(10);
9132 	}
9133 }
9134 
9135 static uint32_t
9136 wm_i82543_mii_recvbits(struct wm_softc *sc)
9137 {
9138 	uint32_t v, i, data = 0;
9139 
9140 	v = CSR_READ(sc, WMREG_CTRL);
9141 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
9142 	v |= CTRL_SWDPIO(3);
9143 
9144 	CSR_WRITE(sc, WMREG_CTRL, v);
9145 	CSR_WRITE_FLUSH(sc);
9146 	delay(10);
9147 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
9148 	CSR_WRITE_FLUSH(sc);
9149 	delay(10);
9150 	CSR_WRITE(sc, WMREG_CTRL, v);
9151 	CSR_WRITE_FLUSH(sc);
9152 	delay(10);
9153 
9154 	for (i = 0; i < 16; i++) {
9155 		data <<= 1;
9156 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
9157 		CSR_WRITE_FLUSH(sc);
9158 		delay(10);
9159 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
9160 			data |= 1;
9161 		CSR_WRITE(sc, WMREG_CTRL, v);
9162 		CSR_WRITE_FLUSH(sc);
9163 		delay(10);
9164 	}
9165 
9166 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
9167 	CSR_WRITE_FLUSH(sc);
9168 	delay(10);
9169 	CSR_WRITE(sc, WMREG_CTRL, v);
9170 	CSR_WRITE_FLUSH(sc);
9171 	delay(10);
9172 
9173 	return data;
9174 }
9175 
9176 #undef MDI_IO
9177 #undef MDI_DIR
9178 #undef MDI_CLK
9179 
9180 /*
9181  * wm_gmii_i82543_readreg:	[mii interface function]
9182  *
9183  *	Read a PHY register on the GMII (i82543 version).
9184  */
9185 static int
9186 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
9187 {
9188 	struct wm_softc *sc = device_private(self);
9189 	int rv;
9190 
9191 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
9192 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
9193 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
9194 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
9195 
9196 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
9197 	    device_xname(sc->sc_dev), phy, reg, rv));
9198 
9199 	return rv;
9200 }
9201 
9202 /*
9203  * wm_gmii_i82543_writereg:	[mii interface function]
9204  *
9205  *	Write a PHY register on the GMII (i82543 version).
9206  */
9207 static void
9208 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
9209 {
9210 	struct wm_softc *sc = device_private(self);
9211 
9212 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
9213 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
9214 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
9215 	    (MII_COMMAND_START << 30), 32);
9216 }
9217 
9218 /*
9219  * wm_gmii_mdic_readreg:	[mii interface function]
9220  *
9221  *	Read a PHY register on the GMII.
9222  */
9223 static int
9224 wm_gmii_mdic_readreg(device_t self, int phy, int reg)
9225 {
9226 	struct wm_softc *sc = device_private(self);
9227 	uint32_t mdic = 0;
9228 	int i, rv;
9229 
9230 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
9231 	    MDIC_REGADD(reg));
9232 
9233 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
9234 		mdic = CSR_READ(sc, WMREG_MDIC);
9235 		if (mdic & MDIC_READY)
9236 			break;
9237 		delay(50);
9238 	}
9239 
9240 	if ((mdic & MDIC_READY) == 0) {
9241 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
9242 		    device_xname(sc->sc_dev), phy, reg);
9243 		rv = 0;
9244 	} else if (mdic & MDIC_E) {
9245 #if 0 /* This is normal if no PHY is present. */
9246 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
9247 		    device_xname(sc->sc_dev), phy, reg);
9248 #endif
9249 		rv = 0;
9250 	} else {
9251 		rv = MDIC_DATA(mdic);
9252 		if (rv == 0xffff)
9253 			rv = 0;
9254 	}
9255 
9256 	return rv;
9257 }
9258 
9259 /*
9260  * wm_gmii_mdic_writereg:	[mii interface function]
9261  *
9262  *	Write a PHY register on the GMII.
9263  */
9264 static void
9265 wm_gmii_mdic_writereg(device_t self, int phy, int reg, int val)
9266 {
9267 	struct wm_softc *sc = device_private(self);
9268 	uint32_t mdic = 0;
9269 	int i;
9270 
9271 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
9272 	    MDIC_REGADD(reg) | MDIC_DATA(val));
9273 
9274 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
9275 		mdic = CSR_READ(sc, WMREG_MDIC);
9276 		if (mdic & MDIC_READY)
9277 			break;
9278 		delay(50);
9279 	}
9280 
9281 	if ((mdic & MDIC_READY) == 0)
9282 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
9283 		    device_xname(sc->sc_dev), phy, reg);
9284 	else if (mdic & MDIC_E)
9285 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
9286 		    device_xname(sc->sc_dev), phy, reg);
9287 }
9288 
9289 /*
9290  * wm_gmii_i82544_readreg:	[mii interface function]
9291  *
9292  *	Read a PHY register on the GMII.
9293  */
9294 static int
9295 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
9296 {
9297 	struct wm_softc *sc = device_private(self);
9298 	int rv;
9299 
9300 	if (sc->phy.acquire(sc)) {
9301 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9302 		    __func__);
9303 		return 0;
9304 	}
9305 	rv = wm_gmii_mdic_readreg(self, phy, reg);
9306 	sc->phy.release(sc);
9307 
9308 	return rv;
9309 }
9310 
9311 /*
9312  * wm_gmii_i82544_writereg:	[mii interface function]
9313  *
9314  *	Write a PHY register on the GMII.
9315  */
9316 static void
9317 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
9318 {
9319 	struct wm_softc *sc = device_private(self);
9320 
9321 	if (sc->phy.acquire(sc)) {
9322 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9323 		    __func__);
9324 	}
9325 	wm_gmii_mdic_writereg(self, phy, reg, val);
9326 	sc->phy.release(sc);
9327 }
9328 
9329 /*
9330  * wm_gmii_i80003_readreg:	[mii interface function]
9331  *
9332  *	Read a PHY register on the kumeran
9333  * This could be handled by the PHY layer if we didn't have to lock the
9334  * ressource ...
9335  */
9336 static int
9337 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
9338 {
9339 	struct wm_softc *sc = device_private(self);
9340 	int rv;
9341 
9342 	if (phy != 1) /* only one PHY on kumeran bus */
9343 		return 0;
9344 
9345 	if (sc->phy.acquire(sc)) {
9346 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9347 		    __func__);
9348 		return 0;
9349 	}
9350 
9351 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) {
9352 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
9353 		    reg >> GG82563_PAGE_SHIFT);
9354 	} else {
9355 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
9356 		    reg >> GG82563_PAGE_SHIFT);
9357 	}
9358 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
9359 	delay(200);
9360 	rv = wm_gmii_mdic_readreg(self, phy, reg & MII_ADDRMASK);
9361 	delay(200);
9362 	sc->phy.release(sc);
9363 
9364 	return rv;
9365 }
9366 
9367 /*
9368  * wm_gmii_i80003_writereg:	[mii interface function]
9369  *
9370  *	Write a PHY register on the kumeran.
9371  * This could be handled by the PHY layer if we didn't have to lock the
9372  * ressource ...
9373  */
9374 static void
9375 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
9376 {
9377 	struct wm_softc *sc = device_private(self);
9378 
9379 	if (phy != 1) /* only one PHY on kumeran bus */
9380 		return;
9381 
9382 	if (sc->phy.acquire(sc)) {
9383 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9384 		    __func__);
9385 		return;
9386 	}
9387 
9388 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) {
9389 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
9390 		    reg >> GG82563_PAGE_SHIFT);
9391 	} else {
9392 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
9393 		    reg >> GG82563_PAGE_SHIFT);
9394 	}
9395 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
9396 	delay(200);
9397 	wm_gmii_mdic_writereg(self, phy, reg & MII_ADDRMASK, val);
9398 	delay(200);
9399 
9400 	sc->phy.release(sc);
9401 }
9402 
9403 /*
9404  * wm_gmii_bm_readreg:	[mii interface function]
9405  *
9406  *	Read a PHY register on the kumeran
9407  * This could be handled by the PHY layer if we didn't have to lock the
9408  * ressource ...
9409  */
9410 static int
9411 wm_gmii_bm_readreg(device_t self, int phy, int reg)
9412 {
9413 	struct wm_softc *sc = device_private(self);
9414 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
9415 	uint16_t val;
9416 	int rv;
9417 
9418 	if (sc->phy.acquire(sc)) {
9419 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9420 		    __func__);
9421 		return 0;
9422 	}
9423 
9424 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
9425 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
9426 		    || (reg == 31)) ? 1 : phy;
9427 	/* Page 800 works differently than the rest so it has its own func */
9428 	if (page == BM_WUC_PAGE) {
9429 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
9430 		rv = val;
9431 		goto release;
9432 	}
9433 
9434 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
9435 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
9436 		    && (sc->sc_type != WM_T_82583))
9437 			wm_gmii_mdic_writereg(self, phy,
9438 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
9439 		else
9440 			wm_gmii_mdic_writereg(self, phy,
9441 			    BME1000_PHY_PAGE_SELECT, page);
9442 	}
9443 
9444 	rv = wm_gmii_mdic_readreg(self, phy, reg & MII_ADDRMASK);
9445 
9446 release:
9447 	sc->phy.release(sc);
9448 	return rv;
9449 }
9450 
9451 /*
9452  * wm_gmii_bm_writereg:	[mii interface function]
9453  *
9454  *	Write a PHY register on the kumeran.
9455  * This could be handled by the PHY layer if we didn't have to lock the
9456  * ressource ...
9457  */
9458 static void
9459 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
9460 {
9461 	struct wm_softc *sc = device_private(self);
9462 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
9463 
9464 	if (sc->phy.acquire(sc)) {
9465 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9466 		    __func__);
9467 		return;
9468 	}
9469 
9470 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
9471 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
9472 		    || (reg == 31)) ? 1 : phy;
9473 	/* Page 800 works differently than the rest so it has its own func */
9474 	if (page == BM_WUC_PAGE) {
9475 		uint16_t tmp;
9476 
9477 		tmp = val;
9478 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
9479 		goto release;
9480 	}
9481 
9482 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
9483 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
9484 		    && (sc->sc_type != WM_T_82583))
9485 			wm_gmii_mdic_writereg(self, phy,
9486 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
9487 		else
9488 			wm_gmii_mdic_writereg(self, phy,
9489 			    BME1000_PHY_PAGE_SELECT, page);
9490 	}
9491 
9492 	wm_gmii_mdic_writereg(self, phy, reg & MII_ADDRMASK, val);
9493 
9494 release:
9495 	sc->phy.release(sc);
9496 }
9497 
9498 static void
9499 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
9500 {
9501 	struct wm_softc *sc = device_private(self);
9502 	uint16_t regnum = BM_PHY_REG_NUM(offset);
9503 	uint16_t wuce, reg;
9504 
9505 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
9506 		device_xname(sc->sc_dev), __func__));
9507 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
9508 	if (sc->sc_type == WM_T_PCH) {
9509 		/* XXX e1000 driver do nothing... why? */
9510 	}
9511 
9512 	/*
9513 	 * 1) Enable PHY wakeup register first.
9514 	 * See e1000_enable_phy_wakeup_reg_access_bm().
9515 	 */
9516 
9517 	/* Set page 769 */
9518 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
9519 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
9520 
9521 	/* Read WUCE and save it */
9522 	wuce = wm_gmii_mdic_readreg(self, 1, BM_WUC_ENABLE_REG);
9523 
9524 	reg = wuce | BM_WUC_ENABLE_BIT;
9525 	reg &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
9526 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ENABLE_REG, reg);
9527 
9528 	/* Select page 800 */
9529 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
9530 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
9531 
9532 	/*
9533 	 * 2) Access PHY wakeup register.
9534 	 * See e1000_access_phy_wakeup_reg_bm.
9535 	 */
9536 
9537 	/* Write page 800 */
9538 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
9539 
9540 	if (rd)
9541 		*val = wm_gmii_mdic_readreg(self, 1, BM_WUC_DATA_OPCODE);
9542 	else
9543 		wm_gmii_mdic_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
9544 
9545 	/*
9546 	 * 3) Disable PHY wakeup register.
9547 	 * See e1000_disable_phy_wakeup_reg_access_bm().
9548 	 */
9549 	/* Set page 769 */
9550 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
9551 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
9552 
9553 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
9554 }
9555 
9556 /*
9557  * wm_gmii_hv_readreg:	[mii interface function]
9558  *
9559  *	Read a PHY register on the kumeran
9560  * This could be handled by the PHY layer if we didn't have to lock the
9561  * ressource ...
9562  */
9563 static int
9564 wm_gmii_hv_readreg(device_t self, int phy, int reg)
9565 {
9566 	struct wm_softc *sc = device_private(self);
9567 	int rv;
9568 
9569 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
9570 		device_xname(sc->sc_dev), __func__));
9571 	if (sc->phy.acquire(sc)) {
9572 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9573 		    __func__);
9574 		return 0;
9575 	}
9576 
9577 	rv = wm_gmii_hv_readreg_locked(self, phy, reg);
9578 	sc->phy.release(sc);
9579 	return rv;
9580 }
9581 
9582 static int
9583 wm_gmii_hv_readreg_locked(device_t self, int phy, int reg)
9584 {
9585 	uint16_t page = BM_PHY_REG_PAGE(reg);
9586 	uint16_t regnum = BM_PHY_REG_NUM(reg);
9587 	uint16_t val;
9588 	int rv;
9589 
9590 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
9591 
9592 	/* Page 800 works differently than the rest so it has its own func */
9593 	if (page == BM_WUC_PAGE) {
9594 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
9595 		return val;
9596 	}
9597 
9598 	/*
9599 	 * Lower than page 768 works differently than the rest so it has its
9600 	 * own func
9601 	 */
9602 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
9603 		printf("gmii_hv_readreg!!!\n");
9604 		return 0;
9605 	}
9606 
9607 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
9608 		wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
9609 		    page << BME1000_PAGE_SHIFT);
9610 	}
9611 
9612 	rv = wm_gmii_mdic_readreg(self, phy, regnum & MII_ADDRMASK);
9613 	return rv;
9614 }
9615 
9616 /*
9617  * wm_gmii_hv_writereg:	[mii interface function]
9618  *
9619  *	Write a PHY register on the kumeran.
9620  * This could be handled by the PHY layer if we didn't have to lock the
9621  * ressource ...
9622  */
9623 static void
9624 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
9625 {
9626 	struct wm_softc *sc = device_private(self);
9627 
9628 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
9629 		device_xname(sc->sc_dev), __func__));
9630 
9631 	if (sc->phy.acquire(sc)) {
9632 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9633 		    __func__);
9634 		return;
9635 	}
9636 
9637 	wm_gmii_hv_writereg_locked(self, phy, reg, val);
9638 	sc->phy.release(sc);
9639 }
9640 
9641 static void
9642 wm_gmii_hv_writereg_locked(device_t self, int phy, int reg, int val)
9643 {
9644 	struct wm_softc *sc = device_private(self);
9645 	uint16_t page = BM_PHY_REG_PAGE(reg);
9646 	uint16_t regnum = BM_PHY_REG_NUM(reg);
9647 
9648 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
9649 
9650 	/* Page 800 works differently than the rest so it has its own func */
9651 	if (page == BM_WUC_PAGE) {
9652 		uint16_t tmp;
9653 
9654 		tmp = val;
9655 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
9656 		return;
9657 	}
9658 
9659 	/*
9660 	 * Lower than page 768 works differently than the rest so it has its
9661 	 * own func
9662 	 */
9663 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
9664 		printf("gmii_hv_writereg!!!\n");
9665 		return;
9666 	}
9667 
9668 	{
9669 		/*
9670 		 * XXX Workaround MDIO accesses being disabled after entering
9671 		 * IEEE Power Down (whenever bit 11 of the PHY control
9672 		 * register is set)
9673 		 */
9674 		if (sc->sc_phytype == WMPHY_82578) {
9675 			struct mii_softc *child;
9676 
9677 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
9678 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
9679 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
9680 			    && ((val & (1 << 11)) != 0)) {
9681 				printf("XXX need workaround\n");
9682 			}
9683 		}
9684 
9685 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
9686 			wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
9687 			    page << BME1000_PAGE_SHIFT);
9688 		}
9689 	}
9690 
9691 	wm_gmii_mdic_writereg(self, phy, regnum & MII_ADDRMASK, val);
9692 }
9693 
9694 /*
9695  * wm_gmii_82580_readreg:	[mii interface function]
9696  *
9697  *	Read a PHY register on the 82580 and I350.
9698  * This could be handled by the PHY layer if we didn't have to lock the
9699  * ressource ...
9700  */
9701 static int
9702 wm_gmii_82580_readreg(device_t self, int phy, int reg)
9703 {
9704 	struct wm_softc *sc = device_private(self);
9705 	int rv;
9706 
9707 	if (sc->phy.acquire(sc) != 0) {
9708 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9709 		    __func__);
9710 		return 0;
9711 	}
9712 
9713 	rv = wm_gmii_mdic_readreg(self, phy, reg);
9714 
9715 	sc->phy.release(sc);
9716 	return rv;
9717 }
9718 
9719 /*
9720  * wm_gmii_82580_writereg:	[mii interface function]
9721  *
9722  *	Write a PHY register on the 82580 and I350.
9723  * This could be handled by the PHY layer if we didn't have to lock the
9724  * ressource ...
9725  */
9726 static void
9727 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
9728 {
9729 	struct wm_softc *sc = device_private(self);
9730 
9731 	if (sc->phy.acquire(sc) != 0) {
9732 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9733 		    __func__);
9734 		return;
9735 	}
9736 
9737 	wm_gmii_mdic_writereg(self, phy, reg, val);
9738 
9739 	sc->phy.release(sc);
9740 }
9741 
9742 /*
9743  * wm_gmii_gs40g_readreg:	[mii interface function]
9744  *
9745  *	Read a PHY register on the I2100 and I211.
9746  * This could be handled by the PHY layer if we didn't have to lock the
9747  * ressource ...
9748  */
9749 static int
9750 wm_gmii_gs40g_readreg(device_t self, int phy, int reg)
9751 {
9752 	struct wm_softc *sc = device_private(self);
9753 	int page, offset;
9754 	int rv;
9755 
9756 	/* Acquire semaphore */
9757 	if (sc->phy.acquire(sc)) {
9758 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9759 		    __func__);
9760 		return 0;
9761 	}
9762 
9763 	/* Page select */
9764 	page = reg >> GS40G_PAGE_SHIFT;
9765 	wm_gmii_mdic_writereg(self, phy, GS40G_PAGE_SELECT, page);
9766 
9767 	/* Read reg */
9768 	offset = reg & GS40G_OFFSET_MASK;
9769 	rv = wm_gmii_mdic_readreg(self, phy, offset);
9770 
9771 	sc->phy.release(sc);
9772 	return rv;
9773 }
9774 
9775 /*
9776  * wm_gmii_gs40g_writereg:	[mii interface function]
9777  *
9778  *	Write a PHY register on the I210 and I211.
9779  * This could be handled by the PHY layer if we didn't have to lock the
9780  * ressource ...
9781  */
9782 static void
9783 wm_gmii_gs40g_writereg(device_t self, int phy, int reg, int val)
9784 {
9785 	struct wm_softc *sc = device_private(self);
9786 	int page, offset;
9787 
9788 	/* Acquire semaphore */
9789 	if (sc->phy.acquire(sc)) {
9790 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9791 		    __func__);
9792 		return;
9793 	}
9794 
9795 	/* Page select */
9796 	page = reg >> GS40G_PAGE_SHIFT;
9797 	wm_gmii_mdic_writereg(self, phy, GS40G_PAGE_SELECT, page);
9798 
9799 	/* Write reg */
9800 	offset = reg & GS40G_OFFSET_MASK;
9801 	wm_gmii_mdic_writereg(self, phy, offset, val);
9802 
9803 	/* Release semaphore */
9804 	sc->phy.release(sc);
9805 }
9806 
9807 /*
9808  * wm_gmii_statchg:	[mii interface function]
9809  *
9810  *	Callback from MII layer when media changes.
9811  */
9812 static void
9813 wm_gmii_statchg(struct ifnet *ifp)
9814 {
9815 	struct wm_softc *sc = ifp->if_softc;
9816 	struct mii_data *mii = &sc->sc_mii;
9817 
9818 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
9819 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
9820 	sc->sc_fcrtl &= ~FCRTL_XONE;
9821 
9822 	/*
9823 	 * Get flow control negotiation result.
9824 	 */
9825 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
9826 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
9827 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
9828 		mii->mii_media_active &= ~IFM_ETH_FMASK;
9829 	}
9830 
9831 	if (sc->sc_flowflags & IFM_FLOW) {
9832 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
9833 			sc->sc_ctrl |= CTRL_TFCE;
9834 			sc->sc_fcrtl |= FCRTL_XONE;
9835 		}
9836 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
9837 			sc->sc_ctrl |= CTRL_RFCE;
9838 	}
9839 
9840 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
9841 		DPRINTF(WM_DEBUG_LINK,
9842 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
9843 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
9844 	} else {
9845 		DPRINTF(WM_DEBUG_LINK,
9846 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
9847 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
9848 	}
9849 
9850 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9851 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
9852 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
9853 						 : WMREG_FCRTL, sc->sc_fcrtl);
9854 	if (sc->sc_type == WM_T_80003) {
9855 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
9856 		case IFM_1000_T:
9857 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
9858 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
9859 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
9860 			break;
9861 		default:
9862 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
9863 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
9864 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
9865 			break;
9866 		}
9867 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
9868 	}
9869 }
9870 
9871 /* kumeran related (80003, ICH* and PCH*) */
9872 
9873 /*
9874  * wm_kmrn_readreg:
9875  *
9876  *	Read a kumeran register
9877  */
9878 static int
9879 wm_kmrn_readreg(struct wm_softc *sc, int reg)
9880 {
9881 	int rv;
9882 
9883 	if (sc->sc_type == WM_T_80003)
9884 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
9885 	else
9886 		rv = sc->phy.acquire(sc);
9887 	if (rv != 0) {
9888 		aprint_error_dev(sc->sc_dev,
9889 		    "%s: failed to get semaphore\n", __func__);
9890 		return 0;
9891 	}
9892 
9893 	rv = wm_kmrn_readreg_locked(sc, reg);
9894 
9895 	if (sc->sc_type == WM_T_80003)
9896 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
9897 	else
9898 		sc->phy.release(sc);
9899 
9900 	return rv;
9901 }
9902 
9903 static int
9904 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg)
9905 {
9906 	int rv;
9907 
9908 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
9909 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
9910 	    KUMCTRLSTA_REN);
9911 	CSR_WRITE_FLUSH(sc);
9912 	delay(2);
9913 
9914 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
9915 
9916 	return rv;
9917 }
9918 
9919 /*
9920  * wm_kmrn_writereg:
9921  *
9922  *	Write a kumeran register
9923  */
9924 static void
9925 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
9926 {
9927 	int rv;
9928 
9929 	if (sc->sc_type == WM_T_80003)
9930 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
9931 	else
9932 		rv = sc->phy.acquire(sc);
9933 	if (rv != 0) {
9934 		aprint_error_dev(sc->sc_dev,
9935 		    "%s: failed to get semaphore\n", __func__);
9936 		return;
9937 	}
9938 
9939 	wm_kmrn_writereg_locked(sc, reg, val);
9940 
9941 	if (sc->sc_type == WM_T_80003)
9942 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
9943 	else
9944 		sc->phy.release(sc);
9945 }
9946 
9947 static void
9948 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, int val)
9949 {
9950 
9951 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
9952 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
9953 	    (val & KUMCTRLSTA_MASK));
9954 }
9955 
9956 /* SGMII related */
9957 
9958 /*
9959  * wm_sgmii_uses_mdio
9960  *
9961  * Check whether the transaction is to the internal PHY or the external
9962  * MDIO interface. Return true if it's MDIO.
9963  */
9964 static bool
9965 wm_sgmii_uses_mdio(struct wm_softc *sc)
9966 {
9967 	uint32_t reg;
9968 	bool ismdio = false;
9969 
9970 	switch (sc->sc_type) {
9971 	case WM_T_82575:
9972 	case WM_T_82576:
9973 		reg = CSR_READ(sc, WMREG_MDIC);
9974 		ismdio = ((reg & MDIC_DEST) != 0);
9975 		break;
9976 	case WM_T_82580:
9977 	case WM_T_I350:
9978 	case WM_T_I354:
9979 	case WM_T_I210:
9980 	case WM_T_I211:
9981 		reg = CSR_READ(sc, WMREG_MDICNFG);
9982 		ismdio = ((reg & MDICNFG_DEST) != 0);
9983 		break;
9984 	default:
9985 		break;
9986 	}
9987 
9988 	return ismdio;
9989 }
9990 
9991 /*
9992  * wm_sgmii_readreg:	[mii interface function]
9993  *
9994  *	Read a PHY register on the SGMII
9995  * This could be handled by the PHY layer if we didn't have to lock the
9996  * ressource ...
9997  */
9998 static int
9999 wm_sgmii_readreg(device_t self, int phy, int reg)
10000 {
10001 	struct wm_softc *sc = device_private(self);
10002 	uint32_t i2ccmd;
10003 	int i, rv;
10004 
10005 	if (sc->phy.acquire(sc)) {
10006 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
10007 		    __func__);
10008 		return 0;
10009 	}
10010 
10011 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
10012 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
10013 	    | I2CCMD_OPCODE_READ;
10014 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
10015 
10016 	/* Poll the ready bit */
10017 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
10018 		delay(50);
10019 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
10020 		if (i2ccmd & I2CCMD_READY)
10021 			break;
10022 	}
10023 	if ((i2ccmd & I2CCMD_READY) == 0)
10024 		aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
10025 	if ((i2ccmd & I2CCMD_ERROR) != 0)
10026 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
10027 
10028 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
10029 
10030 	sc->phy.release(sc);
10031 	return rv;
10032 }
10033 
10034 /*
10035  * wm_sgmii_writereg:	[mii interface function]
10036  *
10037  *	Write a PHY register on the SGMII.
10038  * This could be handled by the PHY layer if we didn't have to lock the
10039  * ressource ...
10040  */
10041 static void
10042 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
10043 {
10044 	struct wm_softc *sc = device_private(self);
10045 	uint32_t i2ccmd;
10046 	int i;
10047 	int val_swapped;
10048 
10049 	if (sc->phy.acquire(sc) != 0) {
10050 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
10051 		    __func__);
10052 		return;
10053 	}
10054 	/* Swap the data bytes for the I2C interface */
10055 	val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
10056 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
10057 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
10058 	    | I2CCMD_OPCODE_WRITE | val_swapped;
10059 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
10060 
10061 	/* Poll the ready bit */
10062 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
10063 		delay(50);
10064 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
10065 		if (i2ccmd & I2CCMD_READY)
10066 			break;
10067 	}
10068 	if ((i2ccmd & I2CCMD_READY) == 0)
10069 		aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
10070 	if ((i2ccmd & I2CCMD_ERROR) != 0)
10071 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
10072 
10073 	sc->phy.release(sc);
10074 }
10075 
10076 /* TBI related */
10077 
10078 /*
10079  * wm_tbi_mediainit:
10080  *
10081  *	Initialize media for use on 1000BASE-X devices.
10082  */
10083 static void
10084 wm_tbi_mediainit(struct wm_softc *sc)
10085 {
10086 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
10087 	const char *sep = "";
10088 
10089 	if (sc->sc_type < WM_T_82543)
10090 		sc->sc_tipg = TIPG_WM_DFLT;
10091 	else
10092 		sc->sc_tipg = TIPG_LG_DFLT;
10093 
10094 	sc->sc_tbi_serdes_anegticks = 5;
10095 
10096 	/* Initialize our media structures */
10097 	sc->sc_mii.mii_ifp = ifp;
10098 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
10099 
10100 	if ((sc->sc_type >= WM_T_82575)
10101 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
10102 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
10103 		    wm_serdes_mediachange, wm_serdes_mediastatus);
10104 	else
10105 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
10106 		    wm_tbi_mediachange, wm_tbi_mediastatus);
10107 
10108 	/*
10109 	 * SWD Pins:
10110 	 *
10111 	 *	0 = Link LED (output)
10112 	 *	1 = Loss Of Signal (input)
10113 	 */
10114 	sc->sc_ctrl |= CTRL_SWDPIO(0);
10115 
10116 	/* XXX Perhaps this is only for TBI */
10117 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
10118 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
10119 
10120 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
10121 		sc->sc_ctrl &= ~CTRL_LRST;
10122 
10123 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10124 
10125 #define	ADD(ss, mm, dd)							\
10126 do {									\
10127 	aprint_normal("%s%s", sep, ss);					\
10128 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
10129 	sep = ", ";							\
10130 } while (/*CONSTCOND*/0)
10131 
10132 	aprint_normal_dev(sc->sc_dev, "");
10133 
10134 	if (sc->sc_type == WM_T_I354) {
10135 		uint32_t status;
10136 
10137 		status = CSR_READ(sc, WMREG_STATUS);
10138 		if (((status & STATUS_2P5_SKU) != 0)
10139 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
10140 			ADD("2500baseKX-FDX", IFM_2500_SX | IFM_FDX,ANAR_X_FD);
10141 		} else
10142 			ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX,ANAR_X_FD);
10143 	} else if (sc->sc_type == WM_T_82545) {
10144 		/* Only 82545 is LX (XXX except SFP) */
10145 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
10146 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
10147 	} else {
10148 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
10149 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
10150 	}
10151 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
10152 	aprint_normal("\n");
10153 
10154 #undef ADD
10155 
10156 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
10157 }
10158 
10159 /*
10160  * wm_tbi_mediachange:	[ifmedia interface function]
10161  *
10162  *	Set hardware to newly-selected media on a 1000BASE-X device.
10163  */
10164 static int
10165 wm_tbi_mediachange(struct ifnet *ifp)
10166 {
10167 	struct wm_softc *sc = ifp->if_softc;
10168 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
10169 	uint32_t status;
10170 	int i;
10171 
10172 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
10173 		/* XXX need some work for >= 82571 and < 82575 */
10174 		if (sc->sc_type < WM_T_82575)
10175 			return 0;
10176 	}
10177 
10178 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
10179 	    || (sc->sc_type >= WM_T_82575))
10180 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
10181 
10182 	sc->sc_ctrl &= ~CTRL_LRST;
10183 	sc->sc_txcw = TXCW_ANE;
10184 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
10185 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
10186 	else if (ife->ifm_media & IFM_FDX)
10187 		sc->sc_txcw |= TXCW_FD;
10188 	else
10189 		sc->sc_txcw |= TXCW_HD;
10190 
10191 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
10192 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
10193 
10194 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
10195 		    device_xname(sc->sc_dev), sc->sc_txcw));
10196 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
10197 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10198 	CSR_WRITE_FLUSH(sc);
10199 	delay(1000);
10200 
10201 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
10202 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
10203 
10204 	/*
10205 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
10206 	 * optics detect a signal, 0 if they don't.
10207 	 */
10208 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
10209 		/* Have signal; wait for the link to come up. */
10210 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
10211 			delay(10000);
10212 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
10213 				break;
10214 		}
10215 
10216 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
10217 			    device_xname(sc->sc_dev),i));
10218 
10219 		status = CSR_READ(sc, WMREG_STATUS);
10220 		DPRINTF(WM_DEBUG_LINK,
10221 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
10222 			device_xname(sc->sc_dev),status, STATUS_LU));
10223 		if (status & STATUS_LU) {
10224 			/* Link is up. */
10225 			DPRINTF(WM_DEBUG_LINK,
10226 			    ("%s: LINK: set media -> link up %s\n",
10227 			    device_xname(sc->sc_dev),
10228 			    (status & STATUS_FD) ? "FDX" : "HDX"));
10229 
10230 			/*
10231 			 * NOTE: CTRL will update TFCE and RFCE automatically,
10232 			 * so we should update sc->sc_ctrl
10233 			 */
10234 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
10235 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
10236 			sc->sc_fcrtl &= ~FCRTL_XONE;
10237 			if (status & STATUS_FD)
10238 				sc->sc_tctl |=
10239 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
10240 			else
10241 				sc->sc_tctl |=
10242 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
10243 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
10244 				sc->sc_fcrtl |= FCRTL_XONE;
10245 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
10246 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
10247 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
10248 				      sc->sc_fcrtl);
10249 			sc->sc_tbi_linkup = 1;
10250 		} else {
10251 			if (i == WM_LINKUP_TIMEOUT)
10252 				wm_check_for_link(sc);
10253 			/* Link is down. */
10254 			DPRINTF(WM_DEBUG_LINK,
10255 			    ("%s: LINK: set media -> link down\n",
10256 			    device_xname(sc->sc_dev)));
10257 			sc->sc_tbi_linkup = 0;
10258 		}
10259 	} else {
10260 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
10261 		    device_xname(sc->sc_dev)));
10262 		sc->sc_tbi_linkup = 0;
10263 	}
10264 
10265 	wm_tbi_serdes_set_linkled(sc);
10266 
10267 	return 0;
10268 }
10269 
10270 /*
10271  * wm_tbi_mediastatus:	[ifmedia interface function]
10272  *
10273  *	Get the current interface media status on a 1000BASE-X device.
10274  */
10275 static void
10276 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
10277 {
10278 	struct wm_softc *sc = ifp->if_softc;
10279 	uint32_t ctrl, status;
10280 
10281 	ifmr->ifm_status = IFM_AVALID;
10282 	ifmr->ifm_active = IFM_ETHER;
10283 
10284 	status = CSR_READ(sc, WMREG_STATUS);
10285 	if ((status & STATUS_LU) == 0) {
10286 		ifmr->ifm_active |= IFM_NONE;
10287 		return;
10288 	}
10289 
10290 	ifmr->ifm_status |= IFM_ACTIVE;
10291 	/* Only 82545 is LX */
10292 	if (sc->sc_type == WM_T_82545)
10293 		ifmr->ifm_active |= IFM_1000_LX;
10294 	else
10295 		ifmr->ifm_active |= IFM_1000_SX;
10296 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
10297 		ifmr->ifm_active |= IFM_FDX;
10298 	else
10299 		ifmr->ifm_active |= IFM_HDX;
10300 	ctrl = CSR_READ(sc, WMREG_CTRL);
10301 	if (ctrl & CTRL_RFCE)
10302 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
10303 	if (ctrl & CTRL_TFCE)
10304 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
10305 }
10306 
10307 /* XXX TBI only */
10308 static int
10309 wm_check_for_link(struct wm_softc *sc)
10310 {
10311 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
10312 	uint32_t rxcw;
10313 	uint32_t ctrl;
10314 	uint32_t status;
10315 	uint32_t sig;
10316 
10317 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
10318 		/* XXX need some work for >= 82571 */
10319 		if (sc->sc_type >= WM_T_82571) {
10320 			sc->sc_tbi_linkup = 1;
10321 			return 0;
10322 		}
10323 	}
10324 
10325 	rxcw = CSR_READ(sc, WMREG_RXCW);
10326 	ctrl = CSR_READ(sc, WMREG_CTRL);
10327 	status = CSR_READ(sc, WMREG_STATUS);
10328 
10329 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
10330 
10331 	DPRINTF(WM_DEBUG_LINK,
10332 	    ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
10333 		device_xname(sc->sc_dev), __func__,
10334 		((ctrl & CTRL_SWDPIN(1)) == sig),
10335 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
10336 
10337 	/*
10338 	 * SWDPIN   LU RXCW
10339 	 *      0    0    0
10340 	 *      0    0    1	(should not happen)
10341 	 *      0    1    0	(should not happen)
10342 	 *      0    1    1	(should not happen)
10343 	 *      1    0    0	Disable autonego and force linkup
10344 	 *      1    0    1	got /C/ but not linkup yet
10345 	 *      1    1    0	(linkup)
10346 	 *      1    1    1	If IFM_AUTO, back to autonego
10347 	 *
10348 	 */
10349 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
10350 	    && ((status & STATUS_LU) == 0)
10351 	    && ((rxcw & RXCW_C) == 0)) {
10352 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
10353 			__func__));
10354 		sc->sc_tbi_linkup = 0;
10355 		/* Disable auto-negotiation in the TXCW register */
10356 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
10357 
10358 		/*
10359 		 * Force link-up and also force full-duplex.
10360 		 *
10361 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
10362 		 * so we should update sc->sc_ctrl
10363 		 */
10364 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
10365 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10366 	} else if (((status & STATUS_LU) != 0)
10367 	    && ((rxcw & RXCW_C) != 0)
10368 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
10369 		sc->sc_tbi_linkup = 1;
10370 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
10371 			__func__));
10372 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
10373 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
10374 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
10375 	    && ((rxcw & RXCW_C) != 0)) {
10376 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
10377 	} else {
10378 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
10379 			status));
10380 	}
10381 
10382 	return 0;
10383 }
10384 
10385 /*
10386  * wm_tbi_tick:
10387  *
10388  *	Check the link on TBI devices.
10389  *	This function acts as mii_tick().
10390  */
10391 static void
10392 wm_tbi_tick(struct wm_softc *sc)
10393 {
10394 	struct mii_data *mii = &sc->sc_mii;
10395 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
10396 	uint32_t status;
10397 
10398 	KASSERT(WM_CORE_LOCKED(sc));
10399 
10400 	status = CSR_READ(sc, WMREG_STATUS);
10401 
10402 	/* XXX is this needed? */
10403 	(void)CSR_READ(sc, WMREG_RXCW);
10404 	(void)CSR_READ(sc, WMREG_CTRL);
10405 
10406 	/* set link status */
10407 	if ((status & STATUS_LU) == 0) {
10408 		DPRINTF(WM_DEBUG_LINK,
10409 		    ("%s: LINK: checklink -> down\n",
10410 			device_xname(sc->sc_dev)));
10411 		sc->sc_tbi_linkup = 0;
10412 	} else if (sc->sc_tbi_linkup == 0) {
10413 		DPRINTF(WM_DEBUG_LINK,
10414 		    ("%s: LINK: checklink -> up %s\n",
10415 			device_xname(sc->sc_dev),
10416 			(status & STATUS_FD) ? "FDX" : "HDX"));
10417 		sc->sc_tbi_linkup = 1;
10418 		sc->sc_tbi_serdes_ticks = 0;
10419 	}
10420 
10421 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
10422 		goto setled;
10423 
10424 	if ((status & STATUS_LU) == 0) {
10425 		sc->sc_tbi_linkup = 0;
10426 		/* If the timer expired, retry autonegotiation */
10427 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
10428 		    && (++sc->sc_tbi_serdes_ticks
10429 			>= sc->sc_tbi_serdes_anegticks)) {
10430 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
10431 			sc->sc_tbi_serdes_ticks = 0;
10432 			/*
10433 			 * Reset the link, and let autonegotiation do
10434 			 * its thing
10435 			 */
10436 			sc->sc_ctrl |= CTRL_LRST;
10437 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10438 			CSR_WRITE_FLUSH(sc);
10439 			delay(1000);
10440 			sc->sc_ctrl &= ~CTRL_LRST;
10441 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10442 			CSR_WRITE_FLUSH(sc);
10443 			delay(1000);
10444 			CSR_WRITE(sc, WMREG_TXCW,
10445 			    sc->sc_txcw & ~TXCW_ANE);
10446 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
10447 		}
10448 	}
10449 
10450 setled:
10451 	wm_tbi_serdes_set_linkled(sc);
10452 }
10453 
10454 /* SERDES related */
10455 static void
10456 wm_serdes_power_up_link_82575(struct wm_softc *sc)
10457 {
10458 	uint32_t reg;
10459 
10460 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
10461 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
10462 		return;
10463 
10464 	reg = CSR_READ(sc, WMREG_PCS_CFG);
10465 	reg |= PCS_CFG_PCS_EN;
10466 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
10467 
10468 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
10469 	reg &= ~CTRL_EXT_SWDPIN(3);
10470 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
10471 	CSR_WRITE_FLUSH(sc);
10472 }
10473 
10474 static int
10475 wm_serdes_mediachange(struct ifnet *ifp)
10476 {
10477 	struct wm_softc *sc = ifp->if_softc;
10478 	bool pcs_autoneg = true; /* XXX */
10479 	uint32_t ctrl_ext, pcs_lctl, reg;
10480 
10481 	/* XXX Currently, this function is not called on 8257[12] */
10482 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
10483 	    || (sc->sc_type >= WM_T_82575))
10484 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
10485 
10486 	wm_serdes_power_up_link_82575(sc);
10487 
10488 	sc->sc_ctrl |= CTRL_SLU;
10489 
10490 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
10491 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
10492 
10493 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
10494 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
10495 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
10496 	case CTRL_EXT_LINK_MODE_SGMII:
10497 		pcs_autoneg = true;
10498 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
10499 		break;
10500 	case CTRL_EXT_LINK_MODE_1000KX:
10501 		pcs_autoneg = false;
10502 		/* FALLTHROUGH */
10503 	default:
10504 		if ((sc->sc_type == WM_T_82575)
10505 		    || (sc->sc_type == WM_T_82576)) {
10506 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
10507 				pcs_autoneg = false;
10508 		}
10509 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
10510 		    | CTRL_FRCFDX;
10511 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
10512 	}
10513 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10514 
10515 	if (pcs_autoneg) {
10516 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
10517 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
10518 
10519 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
10520 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
10521 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
10522 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
10523 	} else
10524 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
10525 
10526 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
10527 
10528 
10529 	return 0;
10530 }
10531 
10532 static void
10533 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
10534 {
10535 	struct wm_softc *sc = ifp->if_softc;
10536 	struct mii_data *mii = &sc->sc_mii;
10537 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
10538 	uint32_t pcs_adv, pcs_lpab, reg;
10539 
10540 	ifmr->ifm_status = IFM_AVALID;
10541 	ifmr->ifm_active = IFM_ETHER;
10542 
10543 	/* Check PCS */
10544 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
10545 	if ((reg & PCS_LSTS_LINKOK) == 0) {
10546 		ifmr->ifm_active |= IFM_NONE;
10547 		sc->sc_tbi_linkup = 0;
10548 		goto setled;
10549 	}
10550 
10551 	sc->sc_tbi_linkup = 1;
10552 	ifmr->ifm_status |= IFM_ACTIVE;
10553 	if (sc->sc_type == WM_T_I354) {
10554 		uint32_t status;
10555 
10556 		status = CSR_READ(sc, WMREG_STATUS);
10557 		if (((status & STATUS_2P5_SKU) != 0)
10558 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
10559 			ifmr->ifm_active |= IFM_2500_SX; /* XXX KX */
10560 		} else
10561 			ifmr->ifm_active |= IFM_1000_SX; /* XXX KX */
10562 	} else {
10563 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
10564 		case PCS_LSTS_SPEED_10:
10565 			ifmr->ifm_active |= IFM_10_T; /* XXX */
10566 			break;
10567 		case PCS_LSTS_SPEED_100:
10568 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
10569 			break;
10570 		case PCS_LSTS_SPEED_1000:
10571 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
10572 			break;
10573 		default:
10574 			device_printf(sc->sc_dev, "Unknown speed\n");
10575 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
10576 			break;
10577 		}
10578 	}
10579 	if ((reg & PCS_LSTS_FDX) != 0)
10580 		ifmr->ifm_active |= IFM_FDX;
10581 	else
10582 		ifmr->ifm_active |= IFM_HDX;
10583 	mii->mii_media_active &= ~IFM_ETH_FMASK;
10584 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
10585 		/* Check flow */
10586 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
10587 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
10588 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
10589 			goto setled;
10590 		}
10591 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
10592 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
10593 		DPRINTF(WM_DEBUG_LINK,
10594 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
10595 		if ((pcs_adv & TXCW_SYM_PAUSE)
10596 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
10597 			mii->mii_media_active |= IFM_FLOW
10598 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
10599 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
10600 		    && (pcs_adv & TXCW_ASYM_PAUSE)
10601 		    && (pcs_lpab & TXCW_SYM_PAUSE)
10602 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
10603 			mii->mii_media_active |= IFM_FLOW
10604 			    | IFM_ETH_TXPAUSE;
10605 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
10606 		    && (pcs_adv & TXCW_ASYM_PAUSE)
10607 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
10608 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
10609 			mii->mii_media_active |= IFM_FLOW
10610 			    | IFM_ETH_RXPAUSE;
10611 		}
10612 	}
10613 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
10614 	    | (mii->mii_media_active & IFM_ETH_FMASK);
10615 setled:
10616 	wm_tbi_serdes_set_linkled(sc);
10617 }
10618 
10619 /*
10620  * wm_serdes_tick:
10621  *
10622  *	Check the link on serdes devices.
10623  */
10624 static void
10625 wm_serdes_tick(struct wm_softc *sc)
10626 {
10627 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
10628 	struct mii_data *mii = &sc->sc_mii;
10629 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
10630 	uint32_t reg;
10631 
10632 	KASSERT(WM_CORE_LOCKED(sc));
10633 
10634 	mii->mii_media_status = IFM_AVALID;
10635 	mii->mii_media_active = IFM_ETHER;
10636 
10637 	/* Check PCS */
10638 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
10639 	if ((reg & PCS_LSTS_LINKOK) != 0) {
10640 		mii->mii_media_status |= IFM_ACTIVE;
10641 		sc->sc_tbi_linkup = 1;
10642 		sc->sc_tbi_serdes_ticks = 0;
10643 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
10644 		if ((reg & PCS_LSTS_FDX) != 0)
10645 			mii->mii_media_active |= IFM_FDX;
10646 		else
10647 			mii->mii_media_active |= IFM_HDX;
10648 	} else {
10649 		mii->mii_media_status |= IFM_NONE;
10650 		sc->sc_tbi_linkup = 0;
10651 		/* If the timer expired, retry autonegotiation */
10652 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
10653 		    && (++sc->sc_tbi_serdes_ticks
10654 			>= sc->sc_tbi_serdes_anegticks)) {
10655 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
10656 			sc->sc_tbi_serdes_ticks = 0;
10657 			/* XXX */
10658 			wm_serdes_mediachange(ifp);
10659 		}
10660 	}
10661 
10662 	wm_tbi_serdes_set_linkled(sc);
10663 }
10664 
10665 /* SFP related */
10666 
10667 static int
10668 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
10669 {
10670 	uint32_t i2ccmd;
10671 	int i;
10672 
10673 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
10674 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
10675 
10676 	/* Poll the ready bit */
10677 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
10678 		delay(50);
10679 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
10680 		if (i2ccmd & I2CCMD_READY)
10681 			break;
10682 	}
10683 	if ((i2ccmd & I2CCMD_READY) == 0)
10684 		return -1;
10685 	if ((i2ccmd & I2CCMD_ERROR) != 0)
10686 		return -1;
10687 
10688 	*data = i2ccmd & 0x00ff;
10689 
10690 	return 0;
10691 }
10692 
10693 static uint32_t
10694 wm_sfp_get_media_type(struct wm_softc *sc)
10695 {
10696 	uint32_t ctrl_ext;
10697 	uint8_t val = 0;
10698 	int timeout = 3;
10699 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
10700 	int rv = -1;
10701 
10702 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
10703 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
10704 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
10705 	CSR_WRITE_FLUSH(sc);
10706 
10707 	/* Read SFP module data */
10708 	while (timeout) {
10709 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
10710 		if (rv == 0)
10711 			break;
10712 		delay(100*1000); /* XXX too big */
10713 		timeout--;
10714 	}
10715 	if (rv != 0)
10716 		goto out;
10717 	switch (val) {
10718 	case SFF_SFP_ID_SFF:
10719 		aprint_normal_dev(sc->sc_dev,
10720 		    "Module/Connector soldered to board\n");
10721 		break;
10722 	case SFF_SFP_ID_SFP:
10723 		aprint_normal_dev(sc->sc_dev, "SFP\n");
10724 		break;
10725 	case SFF_SFP_ID_UNKNOWN:
10726 		goto out;
10727 	default:
10728 		break;
10729 	}
10730 
10731 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
10732 	if (rv != 0) {
10733 		goto out;
10734 	}
10735 
10736 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
10737 		mediatype = WM_MEDIATYPE_SERDES;
10738 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
10739 		sc->sc_flags |= WM_F_SGMII;
10740 		mediatype = WM_MEDIATYPE_COPPER;
10741 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
10742 		sc->sc_flags |= WM_F_SGMII;
10743 		mediatype = WM_MEDIATYPE_SERDES;
10744 	}
10745 
10746 out:
10747 	/* Restore I2C interface setting */
10748 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
10749 
10750 	return mediatype;
10751 }
10752 
10753 /*
10754  * NVM related.
10755  * Microwire, SPI (w/wo EERD) and Flash.
10756  */
10757 
10758 /* Both spi and uwire */
10759 
10760 /*
10761  * wm_eeprom_sendbits:
10762  *
10763  *	Send a series of bits to the EEPROM.
10764  */
10765 static void
10766 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
10767 {
10768 	uint32_t reg;
10769 	int x;
10770 
10771 	reg = CSR_READ(sc, WMREG_EECD);
10772 
10773 	for (x = nbits; x > 0; x--) {
10774 		if (bits & (1U << (x - 1)))
10775 			reg |= EECD_DI;
10776 		else
10777 			reg &= ~EECD_DI;
10778 		CSR_WRITE(sc, WMREG_EECD, reg);
10779 		CSR_WRITE_FLUSH(sc);
10780 		delay(2);
10781 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
10782 		CSR_WRITE_FLUSH(sc);
10783 		delay(2);
10784 		CSR_WRITE(sc, WMREG_EECD, reg);
10785 		CSR_WRITE_FLUSH(sc);
10786 		delay(2);
10787 	}
10788 }
10789 
10790 /*
10791  * wm_eeprom_recvbits:
10792  *
10793  *	Receive a series of bits from the EEPROM.
10794  */
10795 static void
10796 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
10797 {
10798 	uint32_t reg, val;
10799 	int x;
10800 
10801 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
10802 
10803 	val = 0;
10804 	for (x = nbits; x > 0; x--) {
10805 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
10806 		CSR_WRITE_FLUSH(sc);
10807 		delay(2);
10808 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
10809 			val |= (1U << (x - 1));
10810 		CSR_WRITE(sc, WMREG_EECD, reg);
10811 		CSR_WRITE_FLUSH(sc);
10812 		delay(2);
10813 	}
10814 	*valp = val;
10815 }
10816 
10817 /* Microwire */
10818 
10819 /*
10820  * wm_nvm_read_uwire:
10821  *
10822  *	Read a word from the EEPROM using the MicroWire protocol.
10823  */
10824 static int
10825 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
10826 {
10827 	uint32_t reg, val;
10828 	int i;
10829 
10830 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
10831 		device_xname(sc->sc_dev), __func__));
10832 
10833 	for (i = 0; i < wordcnt; i++) {
10834 		/* Clear SK and DI. */
10835 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
10836 		CSR_WRITE(sc, WMREG_EECD, reg);
10837 
10838 		/*
10839 		 * XXX: workaround for a bug in qemu-0.12.x and prior
10840 		 * and Xen.
10841 		 *
10842 		 * We use this workaround only for 82540 because qemu's
10843 		 * e1000 act as 82540.
10844 		 */
10845 		if (sc->sc_type == WM_T_82540) {
10846 			reg |= EECD_SK;
10847 			CSR_WRITE(sc, WMREG_EECD, reg);
10848 			reg &= ~EECD_SK;
10849 			CSR_WRITE(sc, WMREG_EECD, reg);
10850 			CSR_WRITE_FLUSH(sc);
10851 			delay(2);
10852 		}
10853 		/* XXX: end of workaround */
10854 
10855 		/* Set CHIP SELECT. */
10856 		reg |= EECD_CS;
10857 		CSR_WRITE(sc, WMREG_EECD, reg);
10858 		CSR_WRITE_FLUSH(sc);
10859 		delay(2);
10860 
10861 		/* Shift in the READ command. */
10862 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
10863 
10864 		/* Shift in address. */
10865 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
10866 
10867 		/* Shift out the data. */
10868 		wm_eeprom_recvbits(sc, &val, 16);
10869 		data[i] = val & 0xffff;
10870 
10871 		/* Clear CHIP SELECT. */
10872 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
10873 		CSR_WRITE(sc, WMREG_EECD, reg);
10874 		CSR_WRITE_FLUSH(sc);
10875 		delay(2);
10876 	}
10877 
10878 	return 0;
10879 }
10880 
10881 /* SPI */
10882 
10883 /*
10884  * Set SPI and FLASH related information from the EECD register.
10885  * For 82541 and 82547, the word size is taken from EEPROM.
10886  */
10887 static int
10888 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
10889 {
10890 	int size;
10891 	uint32_t reg;
10892 	uint16_t data;
10893 
10894 	reg = CSR_READ(sc, WMREG_EECD);
10895 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
10896 
10897 	/* Read the size of NVM from EECD by default */
10898 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
10899 	switch (sc->sc_type) {
10900 	case WM_T_82541:
10901 	case WM_T_82541_2:
10902 	case WM_T_82547:
10903 	case WM_T_82547_2:
10904 		/* Set dummy value to access EEPROM */
10905 		sc->sc_nvm_wordsize = 64;
10906 		wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
10907 		reg = data;
10908 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
10909 		if (size == 0)
10910 			size = 6; /* 64 word size */
10911 		else
10912 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
10913 		break;
10914 	case WM_T_80003:
10915 	case WM_T_82571:
10916 	case WM_T_82572:
10917 	case WM_T_82573: /* SPI case */
10918 	case WM_T_82574: /* SPI case */
10919 	case WM_T_82583: /* SPI case */
10920 		size += NVM_WORD_SIZE_BASE_SHIFT;
10921 		if (size > 14)
10922 			size = 14;
10923 		break;
10924 	case WM_T_82575:
10925 	case WM_T_82576:
10926 	case WM_T_82580:
10927 	case WM_T_I350:
10928 	case WM_T_I354:
10929 	case WM_T_I210:
10930 	case WM_T_I211:
10931 		size += NVM_WORD_SIZE_BASE_SHIFT;
10932 		if (size > 15)
10933 			size = 15;
10934 		break;
10935 	default:
10936 		aprint_error_dev(sc->sc_dev,
10937 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
10938 		return -1;
10939 		break;
10940 	}
10941 
10942 	sc->sc_nvm_wordsize = 1 << size;
10943 
10944 	return 0;
10945 }
10946 
10947 /*
10948  * wm_nvm_ready_spi:
10949  *
10950  *	Wait for a SPI EEPROM to be ready for commands.
10951  */
10952 static int
10953 wm_nvm_ready_spi(struct wm_softc *sc)
10954 {
10955 	uint32_t val;
10956 	int usec;
10957 
10958 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
10959 		device_xname(sc->sc_dev), __func__));
10960 
10961 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
10962 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
10963 		wm_eeprom_recvbits(sc, &val, 8);
10964 		if ((val & SPI_SR_RDY) == 0)
10965 			break;
10966 	}
10967 	if (usec >= SPI_MAX_RETRIES) {
10968 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
10969 		return 1;
10970 	}
10971 	return 0;
10972 }
10973 
10974 /*
10975  * wm_nvm_read_spi:
10976  *
10977  *	Read a work from the EEPROM using the SPI protocol.
10978  */
10979 static int
10980 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
10981 {
10982 	uint32_t reg, val;
10983 	int i;
10984 	uint8_t opc;
10985 
10986 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
10987 		device_xname(sc->sc_dev), __func__));
10988 
10989 	/* Clear SK and CS. */
10990 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
10991 	CSR_WRITE(sc, WMREG_EECD, reg);
10992 	CSR_WRITE_FLUSH(sc);
10993 	delay(2);
10994 
10995 	if (wm_nvm_ready_spi(sc))
10996 		return 1;
10997 
10998 	/* Toggle CS to flush commands. */
10999 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
11000 	CSR_WRITE_FLUSH(sc);
11001 	delay(2);
11002 	CSR_WRITE(sc, WMREG_EECD, reg);
11003 	CSR_WRITE_FLUSH(sc);
11004 	delay(2);
11005 
11006 	opc = SPI_OPC_READ;
11007 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
11008 		opc |= SPI_OPC_A8;
11009 
11010 	wm_eeprom_sendbits(sc, opc, 8);
11011 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
11012 
11013 	for (i = 0; i < wordcnt; i++) {
11014 		wm_eeprom_recvbits(sc, &val, 16);
11015 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
11016 	}
11017 
11018 	/* Raise CS and clear SK. */
11019 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
11020 	CSR_WRITE(sc, WMREG_EECD, reg);
11021 	CSR_WRITE_FLUSH(sc);
11022 	delay(2);
11023 
11024 	return 0;
11025 }
11026 
11027 /* Using with EERD */
11028 
11029 static int
11030 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
11031 {
11032 	uint32_t attempts = 100000;
11033 	uint32_t i, reg = 0;
11034 	int32_t done = -1;
11035 
11036 	for (i = 0; i < attempts; i++) {
11037 		reg = CSR_READ(sc, rw);
11038 
11039 		if (reg & EERD_DONE) {
11040 			done = 0;
11041 			break;
11042 		}
11043 		delay(5);
11044 	}
11045 
11046 	return done;
11047 }
11048 
11049 static int
11050 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
11051     uint16_t *data)
11052 {
11053 	int i, eerd = 0;
11054 	int error = 0;
11055 
11056 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
11057 		device_xname(sc->sc_dev), __func__));
11058 
11059 	for (i = 0; i < wordcnt; i++) {
11060 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
11061 
11062 		CSR_WRITE(sc, WMREG_EERD, eerd);
11063 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
11064 		if (error != 0)
11065 			break;
11066 
11067 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
11068 	}
11069 
11070 	return error;
11071 }
11072 
11073 /* Flash */
11074 
11075 static int
11076 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
11077 {
11078 	uint32_t eecd;
11079 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
11080 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
11081 	uint8_t sig_byte = 0;
11082 
11083 	switch (sc->sc_type) {
11084 	case WM_T_PCH_SPT:
11085 		/*
11086 		 * In SPT, read from the CTRL_EXT reg instead of accessing the
11087 		 * sector valid bits from the NVM.
11088 		 */
11089 		*bank = CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_NVMVS;
11090 		if ((*bank == 0) || (*bank == 1)) {
11091 			aprint_error_dev(sc->sc_dev,
11092 			    "%s: no valid NVM bank present (%u)\n", __func__,
11093 				*bank);
11094 			return -1;
11095 		} else {
11096 			*bank = *bank - 2;
11097 			return 0;
11098 		}
11099 	case WM_T_ICH8:
11100 	case WM_T_ICH9:
11101 		eecd = CSR_READ(sc, WMREG_EECD);
11102 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
11103 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
11104 			return 0;
11105 		}
11106 		/* FALLTHROUGH */
11107 	default:
11108 		/* Default to 0 */
11109 		*bank = 0;
11110 
11111 		/* Check bank 0 */
11112 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
11113 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
11114 			*bank = 0;
11115 			return 0;
11116 		}
11117 
11118 		/* Check bank 1 */
11119 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
11120 		    &sig_byte);
11121 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
11122 			*bank = 1;
11123 			return 0;
11124 		}
11125 	}
11126 
11127 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
11128 		device_xname(sc->sc_dev)));
11129 	return -1;
11130 }
11131 
11132 /******************************************************************************
11133  * This function does initial flash setup so that a new read/write/erase cycle
11134  * can be started.
11135  *
11136  * sc - The pointer to the hw structure
11137  ****************************************************************************/
11138 static int32_t
11139 wm_ich8_cycle_init(struct wm_softc *sc)
11140 {
11141 	uint16_t hsfsts;
11142 	int32_t error = 1;
11143 	int32_t i     = 0;
11144 
11145 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
11146 
11147 	/* May be check the Flash Des Valid bit in Hw status */
11148 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
11149 		return error;
11150 	}
11151 
11152 	/* Clear FCERR in Hw status by writing 1 */
11153 	/* Clear DAEL in Hw status by writing a 1 */
11154 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
11155 
11156 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
11157 
11158 	/*
11159 	 * Either we should have a hardware SPI cycle in progress bit to check
11160 	 * against, in order to start a new cycle or FDONE bit should be
11161 	 * changed in the hardware so that it is 1 after harware reset, which
11162 	 * can then be used as an indication whether a cycle is in progress or
11163 	 * has been completed .. we should also have some software semaphore
11164 	 * mechanism to guard FDONE or the cycle in progress bit so that two
11165 	 * threads access to those bits can be sequentiallized or a way so that
11166 	 * 2 threads dont start the cycle at the same time
11167 	 */
11168 
11169 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
11170 		/*
11171 		 * There is no cycle running at present, so we can start a
11172 		 * cycle
11173 		 */
11174 
11175 		/* Begin by setting Flash Cycle Done. */
11176 		hsfsts |= HSFSTS_DONE;
11177 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
11178 		error = 0;
11179 	} else {
11180 		/*
11181 		 * otherwise poll for sometime so the current cycle has a
11182 		 * chance to end before giving up.
11183 		 */
11184 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
11185 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
11186 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
11187 				error = 0;
11188 				break;
11189 			}
11190 			delay(1);
11191 		}
11192 		if (error == 0) {
11193 			/*
11194 			 * Successful in waiting for previous cycle to timeout,
11195 			 * now set the Flash Cycle Done.
11196 			 */
11197 			hsfsts |= HSFSTS_DONE;
11198 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
11199 		}
11200 	}
11201 	return error;
11202 }
11203 
11204 /******************************************************************************
11205  * This function starts a flash cycle and waits for its completion
11206  *
11207  * sc - The pointer to the hw structure
11208  ****************************************************************************/
11209 static int32_t
11210 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
11211 {
11212 	uint16_t hsflctl;
11213 	uint16_t hsfsts;
11214 	int32_t error = 1;
11215 	uint32_t i = 0;
11216 
11217 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
11218 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
11219 	hsflctl |= HSFCTL_GO;
11220 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
11221 
11222 	/* Wait till FDONE bit is set to 1 */
11223 	do {
11224 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
11225 		if (hsfsts & HSFSTS_DONE)
11226 			break;
11227 		delay(1);
11228 		i++;
11229 	} while (i < timeout);
11230 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
11231 		error = 0;
11232 
11233 	return error;
11234 }
11235 
11236 /******************************************************************************
11237  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
11238  *
11239  * sc - The pointer to the hw structure
11240  * index - The index of the byte or word to read.
11241  * size - Size of data to read, 1=byte 2=word, 4=dword
11242  * data - Pointer to the word to store the value read.
11243  *****************************************************************************/
11244 static int32_t
11245 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
11246     uint32_t size, uint32_t *data)
11247 {
11248 	uint16_t hsfsts;
11249 	uint16_t hsflctl;
11250 	uint32_t flash_linear_address;
11251 	uint32_t flash_data = 0;
11252 	int32_t error = 1;
11253 	int32_t count = 0;
11254 
11255 	if (size < 1  || size > 4 || data == 0x0 ||
11256 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
11257 		return error;
11258 
11259 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
11260 	    sc->sc_ich8_flash_base;
11261 
11262 	do {
11263 		delay(1);
11264 		/* Steps */
11265 		error = wm_ich8_cycle_init(sc);
11266 		if (error)
11267 			break;
11268 
11269 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
11270 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
11271 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
11272 		    & HSFCTL_BCOUNT_MASK;
11273 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
11274 		if (sc->sc_type == WM_T_PCH_SPT) {
11275 			/*
11276 			 * In SPT, This register is in Lan memory space, not
11277 			 * flash. Therefore, only 32 bit access is supported.
11278 			 */
11279 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFCTL,
11280 			    (uint32_t)hsflctl);
11281 		} else
11282 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
11283 
11284 		/*
11285 		 * Write the last 24 bits of index into Flash Linear address
11286 		 * field in Flash Address
11287 		 */
11288 		/* TODO: TBD maybe check the index against the size of flash */
11289 
11290 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
11291 
11292 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
11293 
11294 		/*
11295 		 * Check if FCERR is set to 1, if set to 1, clear it and try
11296 		 * the whole sequence a few more times, else read in (shift in)
11297 		 * the Flash Data0, the order is least significant byte first
11298 		 * msb to lsb
11299 		 */
11300 		if (error == 0) {
11301 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
11302 			if (size == 1)
11303 				*data = (uint8_t)(flash_data & 0x000000FF);
11304 			else if (size == 2)
11305 				*data = (uint16_t)(flash_data & 0x0000FFFF);
11306 			else if (size == 4)
11307 				*data = (uint32_t)flash_data;
11308 			break;
11309 		} else {
11310 			/*
11311 			 * If we've gotten here, then things are probably
11312 			 * completely hosed, but if the error condition is
11313 			 * detected, it won't hurt to give it another try...
11314 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
11315 			 */
11316 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
11317 			if (hsfsts & HSFSTS_ERR) {
11318 				/* Repeat for some time before giving up. */
11319 				continue;
11320 			} else if ((hsfsts & HSFSTS_DONE) == 0)
11321 				break;
11322 		}
11323 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
11324 
11325 	return error;
11326 }
11327 
11328 /******************************************************************************
11329  * Reads a single byte from the NVM using the ICH8 flash access registers.
11330  *
11331  * sc - pointer to wm_hw structure
11332  * index - The index of the byte to read.
11333  * data - Pointer to a byte to store the value read.
11334  *****************************************************************************/
11335 static int32_t
11336 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
11337 {
11338 	int32_t status;
11339 	uint32_t word = 0;
11340 
11341 	status = wm_read_ich8_data(sc, index, 1, &word);
11342 	if (status == 0)
11343 		*data = (uint8_t)word;
11344 	else
11345 		*data = 0;
11346 
11347 	return status;
11348 }
11349 
11350 /******************************************************************************
11351  * Reads a word from the NVM using the ICH8 flash access registers.
11352  *
11353  * sc - pointer to wm_hw structure
11354  * index - The starting byte index of the word to read.
11355  * data - Pointer to a word to store the value read.
11356  *****************************************************************************/
11357 static int32_t
11358 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
11359 {
11360 	int32_t status;
11361 	uint32_t word = 0;
11362 
11363 	status = wm_read_ich8_data(sc, index, 2, &word);
11364 	if (status == 0)
11365 		*data = (uint16_t)word;
11366 	else
11367 		*data = 0;
11368 
11369 	return status;
11370 }
11371 
11372 /******************************************************************************
11373  * Reads a dword from the NVM using the ICH8 flash access registers.
11374  *
11375  * sc - pointer to wm_hw structure
11376  * index - The starting byte index of the word to read.
11377  * data - Pointer to a word to store the value read.
11378  *****************************************************************************/
11379 static int32_t
11380 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
11381 {
11382 	int32_t status;
11383 
11384 	status = wm_read_ich8_data(sc, index, 4, data);
11385 	return status;
11386 }
11387 
11388 /******************************************************************************
11389  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
11390  * register.
11391  *
11392  * sc - Struct containing variables accessed by shared code
11393  * offset - offset of word in the EEPROM to read
11394  * data - word read from the EEPROM
11395  * words - number of words to read
11396  *****************************************************************************/
11397 static int
11398 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
11399 {
11400 	int32_t  error = 0;
11401 	uint32_t flash_bank = 0;
11402 	uint32_t act_offset = 0;
11403 	uint32_t bank_offset = 0;
11404 	uint16_t word = 0;
11405 	uint16_t i = 0;
11406 
11407 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
11408 		device_xname(sc->sc_dev), __func__));
11409 
11410 	/*
11411 	 * We need to know which is the valid flash bank.  In the event
11412 	 * that we didn't allocate eeprom_shadow_ram, we may not be
11413 	 * managing flash_bank.  So it cannot be trusted and needs
11414 	 * to be updated with each read.
11415 	 */
11416 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
11417 	if (error) {
11418 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
11419 			device_xname(sc->sc_dev)));
11420 		flash_bank = 0;
11421 	}
11422 
11423 	/*
11424 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
11425 	 * size
11426 	 */
11427 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
11428 
11429 	error = wm_get_swfwhw_semaphore(sc);
11430 	if (error) {
11431 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
11432 		    __func__);
11433 		return error;
11434 	}
11435 
11436 	for (i = 0; i < words; i++) {
11437 		/* The NVM part needs a byte offset, hence * 2 */
11438 		act_offset = bank_offset + ((offset + i) * 2);
11439 		error = wm_read_ich8_word(sc, act_offset, &word);
11440 		if (error) {
11441 			aprint_error_dev(sc->sc_dev,
11442 			    "%s: failed to read NVM\n", __func__);
11443 			break;
11444 		}
11445 		data[i] = word;
11446 	}
11447 
11448 	wm_put_swfwhw_semaphore(sc);
11449 	return error;
11450 }
11451 
11452 /******************************************************************************
11453  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
11454  * register.
11455  *
11456  * sc - Struct containing variables accessed by shared code
11457  * offset - offset of word in the EEPROM to read
11458  * data - word read from the EEPROM
11459  * words - number of words to read
11460  *****************************************************************************/
11461 static int
11462 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
11463 {
11464 	int32_t  error = 0;
11465 	uint32_t flash_bank = 0;
11466 	uint32_t act_offset = 0;
11467 	uint32_t bank_offset = 0;
11468 	uint32_t dword = 0;
11469 	uint16_t i = 0;
11470 
11471 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
11472 		device_xname(sc->sc_dev), __func__));
11473 
11474 	/*
11475 	 * We need to know which is the valid flash bank.  In the event
11476 	 * that we didn't allocate eeprom_shadow_ram, we may not be
11477 	 * managing flash_bank.  So it cannot be trusted and needs
11478 	 * to be updated with each read.
11479 	 */
11480 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
11481 	if (error) {
11482 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
11483 			device_xname(sc->sc_dev)));
11484 		flash_bank = 0;
11485 	}
11486 
11487 	/*
11488 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
11489 	 * size
11490 	 */
11491 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
11492 
11493 	error = wm_get_swfwhw_semaphore(sc);
11494 	if (error) {
11495 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
11496 		    __func__);
11497 		return error;
11498 	}
11499 
11500 	for (i = 0; i < words; i++) {
11501 		/* The NVM part needs a byte offset, hence * 2 */
11502 		act_offset = bank_offset + ((offset + i) * 2);
11503 		/* but we must read dword aligned, so mask ... */
11504 		error = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
11505 		if (error) {
11506 			aprint_error_dev(sc->sc_dev,
11507 			    "%s: failed to read NVM\n", __func__);
11508 			break;
11509 		}
11510 		/* ... and pick out low or high word */
11511 		if ((act_offset & 0x2) == 0)
11512 			data[i] = (uint16_t)(dword & 0xFFFF);
11513 		else
11514 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
11515 	}
11516 
11517 	wm_put_swfwhw_semaphore(sc);
11518 	return error;
11519 }
11520 
11521 /* iNVM */
11522 
11523 static int
11524 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
11525 {
11526 	int32_t  rv = 0;
11527 	uint32_t invm_dword;
11528 	uint16_t i;
11529 	uint8_t record_type, word_address;
11530 
11531 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
11532 		device_xname(sc->sc_dev), __func__));
11533 
11534 	for (i = 0; i < INVM_SIZE; i++) {
11535 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
11536 		/* Get record type */
11537 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
11538 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
11539 			break;
11540 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
11541 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
11542 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
11543 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
11544 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
11545 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
11546 			if (word_address == address) {
11547 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
11548 				rv = 0;
11549 				break;
11550 			}
11551 		}
11552 	}
11553 
11554 	return rv;
11555 }
11556 
11557 static int
11558 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
11559 {
11560 	int rv = 0;
11561 	int i;
11562 
11563 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
11564 		device_xname(sc->sc_dev), __func__));
11565 
11566 	for (i = 0; i < words; i++) {
11567 		switch (offset + i) {
11568 		case NVM_OFF_MACADDR:
11569 		case NVM_OFF_MACADDR1:
11570 		case NVM_OFF_MACADDR2:
11571 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
11572 			if (rv != 0) {
11573 				data[i] = 0xffff;
11574 				rv = -1;
11575 			}
11576 			break;
11577 		case NVM_OFF_CFG2:
11578 			rv = wm_nvm_read_word_invm(sc, offset, data);
11579 			if (rv != 0) {
11580 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
11581 				rv = 0;
11582 			}
11583 			break;
11584 		case NVM_OFF_CFG4:
11585 			rv = wm_nvm_read_word_invm(sc, offset, data);
11586 			if (rv != 0) {
11587 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
11588 				rv = 0;
11589 			}
11590 			break;
11591 		case NVM_OFF_LED_1_CFG:
11592 			rv = wm_nvm_read_word_invm(sc, offset, data);
11593 			if (rv != 0) {
11594 				*data = NVM_LED_1_CFG_DEFAULT_I211;
11595 				rv = 0;
11596 			}
11597 			break;
11598 		case NVM_OFF_LED_0_2_CFG:
11599 			rv = wm_nvm_read_word_invm(sc, offset, data);
11600 			if (rv != 0) {
11601 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
11602 				rv = 0;
11603 			}
11604 			break;
11605 		case NVM_OFF_ID_LED_SETTINGS:
11606 			rv = wm_nvm_read_word_invm(sc, offset, data);
11607 			if (rv != 0) {
11608 				*data = ID_LED_RESERVED_FFFF;
11609 				rv = 0;
11610 			}
11611 			break;
11612 		default:
11613 			DPRINTF(WM_DEBUG_NVM,
11614 			    ("NVM word 0x%02x is not mapped.\n", offset));
11615 			*data = NVM_RESERVED_WORD;
11616 			break;
11617 		}
11618 	}
11619 
11620 	return rv;
11621 }
11622 
11623 /* Lock, detecting NVM type, validate checksum, version and read */
11624 
11625 /*
11626  * wm_nvm_acquire:
11627  *
11628  *	Perform the EEPROM handshake required on some chips.
11629  */
11630 static int
11631 wm_nvm_acquire(struct wm_softc *sc)
11632 {
11633 	uint32_t reg;
11634 	int x;
11635 	int ret = 0;
11636 
11637 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
11638 		device_xname(sc->sc_dev), __func__));
11639 
11640 	if (sc->sc_type >= WM_T_ICH8) {
11641 		ret = wm_get_nvm_ich8lan(sc);
11642 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
11643 		ret = wm_get_swfwhw_semaphore(sc);
11644 	} else if (sc->sc_flags & WM_F_LOCK_SWFW) {
11645 		/* This will also do wm_get_swsm_semaphore() if needed */
11646 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
11647 	} else if (sc->sc_flags & WM_F_LOCK_SWSM) {
11648 		ret = wm_get_swsm_semaphore(sc);
11649 	}
11650 
11651 	if (ret) {
11652 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
11653 			__func__);
11654 		return 1;
11655 	}
11656 
11657 	if (sc->sc_flags & WM_F_LOCK_EECD) {
11658 		reg = CSR_READ(sc, WMREG_EECD);
11659 
11660 		/* Request EEPROM access. */
11661 		reg |= EECD_EE_REQ;
11662 		CSR_WRITE(sc, WMREG_EECD, reg);
11663 
11664 		/* ..and wait for it to be granted. */
11665 		for (x = 0; x < 1000; x++) {
11666 			reg = CSR_READ(sc, WMREG_EECD);
11667 			if (reg & EECD_EE_GNT)
11668 				break;
11669 			delay(5);
11670 		}
11671 		if ((reg & EECD_EE_GNT) == 0) {
11672 			aprint_error_dev(sc->sc_dev,
11673 			    "could not acquire EEPROM GNT\n");
11674 			reg &= ~EECD_EE_REQ;
11675 			CSR_WRITE(sc, WMREG_EECD, reg);
11676 			if (sc->sc_flags & WM_F_LOCK_EXTCNF)
11677 				wm_put_swfwhw_semaphore(sc);
11678 			if (sc->sc_flags & WM_F_LOCK_SWFW)
11679 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
11680 			else if (sc->sc_flags & WM_F_LOCK_SWSM)
11681 				wm_put_swsm_semaphore(sc);
11682 			return 1;
11683 		}
11684 	}
11685 
11686 	return 0;
11687 }
11688 
11689 /*
11690  * wm_nvm_release:
11691  *
11692  *	Release the EEPROM mutex.
11693  */
11694 static void
11695 wm_nvm_release(struct wm_softc *sc)
11696 {
11697 	uint32_t reg;
11698 
11699 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
11700 		device_xname(sc->sc_dev), __func__));
11701 
11702 	if (sc->sc_flags & WM_F_LOCK_EECD) {
11703 		reg = CSR_READ(sc, WMREG_EECD);
11704 		reg &= ~EECD_EE_REQ;
11705 		CSR_WRITE(sc, WMREG_EECD, reg);
11706 	}
11707 
11708 	if (sc->sc_type >= WM_T_ICH8) {
11709 		wm_put_nvm_ich8lan(sc);
11710 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
11711 		wm_put_swfwhw_semaphore(sc);
11712 	if (sc->sc_flags & WM_F_LOCK_SWFW)
11713 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
11714 	else if (sc->sc_flags & WM_F_LOCK_SWSM)
11715 		wm_put_swsm_semaphore(sc);
11716 }
11717 
11718 static int
11719 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
11720 {
11721 	uint32_t eecd = 0;
11722 
11723 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
11724 	    || sc->sc_type == WM_T_82583) {
11725 		eecd = CSR_READ(sc, WMREG_EECD);
11726 
11727 		/* Isolate bits 15 & 16 */
11728 		eecd = ((eecd >> 15) & 0x03);
11729 
11730 		/* If both bits are set, device is Flash type */
11731 		if (eecd == 0x03)
11732 			return 0;
11733 	}
11734 	return 1;
11735 }
11736 
11737 static int
11738 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
11739 {
11740 	uint32_t eec;
11741 
11742 	eec = CSR_READ(sc, WMREG_EEC);
11743 	if ((eec & EEC_FLASH_DETECTED) != 0)
11744 		return 1;
11745 
11746 	return 0;
11747 }
11748 
11749 /*
11750  * wm_nvm_validate_checksum
11751  *
11752  * The checksum is defined as the sum of the first 64 (16 bit) words.
11753  */
11754 static int
11755 wm_nvm_validate_checksum(struct wm_softc *sc)
11756 {
11757 	uint16_t checksum;
11758 	uint16_t eeprom_data;
11759 #ifdef WM_DEBUG
11760 	uint16_t csum_wordaddr, valid_checksum;
11761 #endif
11762 	int i;
11763 
11764 	checksum = 0;
11765 
11766 	/* Don't check for I211 */
11767 	if (sc->sc_type == WM_T_I211)
11768 		return 0;
11769 
11770 #ifdef WM_DEBUG
11771 	if (sc->sc_type == WM_T_PCH_LPT) {
11772 		csum_wordaddr = NVM_OFF_COMPAT;
11773 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
11774 	} else {
11775 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
11776 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
11777 	}
11778 
11779 	/* Dump EEPROM image for debug */
11780 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
11781 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
11782 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
11783 		/* XXX PCH_SPT? */
11784 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
11785 		if ((eeprom_data & valid_checksum) == 0) {
11786 			DPRINTF(WM_DEBUG_NVM,
11787 			    ("%s: NVM need to be updated (%04x != %04x)\n",
11788 				device_xname(sc->sc_dev), eeprom_data,
11789 				    valid_checksum));
11790 		}
11791 	}
11792 
11793 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
11794 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
11795 		for (i = 0; i < NVM_SIZE; i++) {
11796 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
11797 				printf("XXXX ");
11798 			else
11799 				printf("%04hx ", eeprom_data);
11800 			if (i % 8 == 7)
11801 				printf("\n");
11802 		}
11803 	}
11804 
11805 #endif /* WM_DEBUG */
11806 
11807 	for (i = 0; i < NVM_SIZE; i++) {
11808 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
11809 			return 1;
11810 		checksum += eeprom_data;
11811 	}
11812 
11813 	if (checksum != (uint16_t) NVM_CHECKSUM) {
11814 #ifdef WM_DEBUG
11815 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
11816 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
11817 #endif
11818 	}
11819 
11820 	return 0;
11821 }
11822 
11823 static void
11824 wm_nvm_version_invm(struct wm_softc *sc)
11825 {
11826 	uint32_t dword;
11827 
11828 	/*
11829 	 * Linux's code to decode version is very strange, so we don't
11830 	 * obey that algorithm and just use word 61 as the document.
11831 	 * Perhaps it's not perfect though...
11832 	 *
11833 	 * Example:
11834 	 *
11835 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
11836 	 */
11837 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
11838 	dword = __SHIFTOUT(dword, INVM_VER_1);
11839 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
11840 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
11841 }
11842 
11843 static void
11844 wm_nvm_version(struct wm_softc *sc)
11845 {
11846 	uint16_t major, minor, build, patch;
11847 	uint16_t uid0, uid1;
11848 	uint16_t nvm_data;
11849 	uint16_t off;
11850 	bool check_version = false;
11851 	bool check_optionrom = false;
11852 	bool have_build = false;
11853 
11854 	/*
11855 	 * Version format:
11856 	 *
11857 	 * XYYZ
11858 	 * X0YZ
11859 	 * X0YY
11860 	 *
11861 	 * Example:
11862 	 *
11863 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
11864 	 *	82571	0x50a6	5.10.6?
11865 	 *	82572	0x506a	5.6.10?
11866 	 *	82572EI	0x5069	5.6.9?
11867 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
11868 	 *		0x2013	2.1.3?
11869 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
11870 	 */
11871 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1);
11872 	switch (sc->sc_type) {
11873 	case WM_T_82571:
11874 	case WM_T_82572:
11875 	case WM_T_82574:
11876 	case WM_T_82583:
11877 		check_version = true;
11878 		check_optionrom = true;
11879 		have_build = true;
11880 		break;
11881 	case WM_T_82575:
11882 	case WM_T_82576:
11883 	case WM_T_82580:
11884 		if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
11885 			check_version = true;
11886 		break;
11887 	case WM_T_I211:
11888 		wm_nvm_version_invm(sc);
11889 		goto printver;
11890 	case WM_T_I210:
11891 		if (!wm_nvm_get_flash_presence_i210(sc)) {
11892 			wm_nvm_version_invm(sc);
11893 			goto printver;
11894 		}
11895 		/* FALLTHROUGH */
11896 	case WM_T_I350:
11897 	case WM_T_I354:
11898 		check_version = true;
11899 		check_optionrom = true;
11900 		break;
11901 	default:
11902 		return;
11903 	}
11904 	if (check_version) {
11905 		wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data);
11906 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
11907 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
11908 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
11909 			build = nvm_data & NVM_BUILD_MASK;
11910 			have_build = true;
11911 		} else
11912 			minor = nvm_data & 0x00ff;
11913 
11914 		/* Decimal */
11915 		minor = (minor / 16) * 10 + (minor % 16);
11916 		sc->sc_nvm_ver_major = major;
11917 		sc->sc_nvm_ver_minor = minor;
11918 
11919 printver:
11920 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
11921 		    sc->sc_nvm_ver_minor);
11922 		if (have_build) {
11923 			sc->sc_nvm_ver_build = build;
11924 			aprint_verbose(".%d", build);
11925 		}
11926 	}
11927 	if (check_optionrom) {
11928 		wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off);
11929 		/* Option ROM Version */
11930 		if ((off != 0x0000) && (off != 0xffff)) {
11931 			off += NVM_COMBO_VER_OFF;
11932 			wm_nvm_read(sc, off + 1, 1, &uid1);
11933 			wm_nvm_read(sc, off, 1, &uid0);
11934 			if ((uid0 != 0) && (uid0 != 0xffff)
11935 			    && (uid1 != 0) && (uid1 != 0xffff)) {
11936 				/* 16bits */
11937 				major = uid0 >> 8;
11938 				build = (uid0 << 8) | (uid1 >> 8);
11939 				patch = uid1 & 0x00ff;
11940 				aprint_verbose(", option ROM Version %d.%d.%d",
11941 				    major, build, patch);
11942 			}
11943 		}
11944 	}
11945 
11946 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
11947 	aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
11948 }
11949 
11950 /*
11951  * wm_nvm_read:
11952  *
11953  *	Read data from the serial EEPROM.
11954  */
11955 static int
11956 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
11957 {
11958 	int rv;
11959 
11960 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
11961 		device_xname(sc->sc_dev), __func__));
11962 
11963 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
11964 		return 1;
11965 
11966 	if (wm_nvm_acquire(sc))
11967 		return 1;
11968 
11969 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
11970 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
11971 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
11972 		rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
11973 	else if (sc->sc_type == WM_T_PCH_SPT)
11974 		rv = wm_nvm_read_spt(sc, word, wordcnt, data);
11975 	else if (sc->sc_flags & WM_F_EEPROM_INVM)
11976 		rv = wm_nvm_read_invm(sc, word, wordcnt, data);
11977 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
11978 		rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
11979 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
11980 		rv = wm_nvm_read_spi(sc, word, wordcnt, data);
11981 	else
11982 		rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
11983 
11984 	wm_nvm_release(sc);
11985 	return rv;
11986 }
11987 
11988 /*
11989  * Hardware semaphores.
11990  * Very complexed...
11991  */
11992 
11993 static int
11994 wm_get_null(struct wm_softc *sc)
11995 {
11996 
11997 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11998 		device_xname(sc->sc_dev), __func__));
11999 	return 0;
12000 }
12001 
12002 static void
12003 wm_put_null(struct wm_softc *sc)
12004 {
12005 
12006 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
12007 		device_xname(sc->sc_dev), __func__));
12008 	return;
12009 }
12010 
12011 /*
12012  * Get hardware semaphore.
12013  * Same as e1000_get_hw_semaphore_generic()
12014  */
12015 static int
12016 wm_get_swsm_semaphore(struct wm_softc *sc)
12017 {
12018 	int32_t timeout;
12019 	uint32_t swsm;
12020 
12021 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
12022 		device_xname(sc->sc_dev), __func__));
12023 	KASSERT(sc->sc_nvm_wordsize > 0);
12024 
12025 	/* Get the SW semaphore. */
12026 	timeout = sc->sc_nvm_wordsize + 1;
12027 	while (timeout) {
12028 		swsm = CSR_READ(sc, WMREG_SWSM);
12029 
12030 		if ((swsm & SWSM_SMBI) == 0)
12031 			break;
12032 
12033 		delay(50);
12034 		timeout--;
12035 	}
12036 
12037 	if (timeout == 0) {
12038 		aprint_error_dev(sc->sc_dev,
12039 		    "could not acquire SWSM SMBI\n");
12040 		return 1;
12041 	}
12042 
12043 	/* Get the FW semaphore. */
12044 	timeout = sc->sc_nvm_wordsize + 1;
12045 	while (timeout) {
12046 		swsm = CSR_READ(sc, WMREG_SWSM);
12047 		swsm |= SWSM_SWESMBI;
12048 		CSR_WRITE(sc, WMREG_SWSM, swsm);
12049 		/* If we managed to set the bit we got the semaphore. */
12050 		swsm = CSR_READ(sc, WMREG_SWSM);
12051 		if (swsm & SWSM_SWESMBI)
12052 			break;
12053 
12054 		delay(50);
12055 		timeout--;
12056 	}
12057 
12058 	if (timeout == 0) {
12059 		aprint_error_dev(sc->sc_dev,
12060 		    "could not acquire SWSM SWESMBI\n");
12061 		/* Release semaphores */
12062 		wm_put_swsm_semaphore(sc);
12063 		return 1;
12064 	}
12065 	return 0;
12066 }
12067 
12068 /*
12069  * Put hardware semaphore.
12070  * Same as e1000_put_hw_semaphore_generic()
12071  */
12072 static void
12073 wm_put_swsm_semaphore(struct wm_softc *sc)
12074 {
12075 	uint32_t swsm;
12076 
12077 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
12078 		device_xname(sc->sc_dev), __func__));
12079 
12080 	swsm = CSR_READ(sc, WMREG_SWSM);
12081 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
12082 	CSR_WRITE(sc, WMREG_SWSM, swsm);
12083 }
12084 
12085 /*
12086  * Get SW/FW semaphore.
12087  * Same as e1000_acquire_swfw_sync_82575().
12088  */
12089 static int
12090 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
12091 {
12092 	uint32_t swfw_sync;
12093 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
12094 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
12095 	int timeout = 200;
12096 
12097 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
12098 		device_xname(sc->sc_dev), __func__));
12099 	KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
12100 
12101 	for (timeout = 0; timeout < 200; timeout++) {
12102 		if (sc->sc_flags & WM_F_LOCK_SWSM) {
12103 			if (wm_get_swsm_semaphore(sc)) {
12104 				aprint_error_dev(sc->sc_dev,
12105 				    "%s: failed to get semaphore\n",
12106 				    __func__);
12107 				return 1;
12108 			}
12109 		}
12110 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
12111 		if ((swfw_sync & (swmask | fwmask)) == 0) {
12112 			swfw_sync |= swmask;
12113 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
12114 			if (sc->sc_flags & WM_F_LOCK_SWSM)
12115 				wm_put_swsm_semaphore(sc);
12116 			return 0;
12117 		}
12118 		if (sc->sc_flags & WM_F_LOCK_SWSM)
12119 			wm_put_swsm_semaphore(sc);
12120 		delay(5000);
12121 	}
12122 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
12123 	    device_xname(sc->sc_dev), mask, swfw_sync);
12124 	return 1;
12125 }
12126 
12127 static void
12128 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
12129 {
12130 	uint32_t swfw_sync;
12131 
12132 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
12133 		device_xname(sc->sc_dev), __func__));
12134 	KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
12135 
12136 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
12137 		while (wm_get_swsm_semaphore(sc) != 0)
12138 			continue;
12139 	}
12140 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
12141 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
12142 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
12143 	if (sc->sc_flags & WM_F_LOCK_SWSM)
12144 		wm_put_swsm_semaphore(sc);
12145 }
12146 
12147 static int
12148 wm_get_phy_82575(struct wm_softc *sc)
12149 {
12150 
12151 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
12152 		device_xname(sc->sc_dev), __func__));
12153 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
12154 }
12155 
12156 static void
12157 wm_put_phy_82575(struct wm_softc *sc)
12158 {
12159 
12160 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
12161 		device_xname(sc->sc_dev), __func__));
12162 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
12163 }
12164 
12165 static int
12166 wm_get_swfwhw_semaphore(struct wm_softc *sc)
12167 {
12168 	uint32_t ext_ctrl;
12169 	int timeout = 200;
12170 
12171 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
12172 		device_xname(sc->sc_dev), __func__));
12173 
12174 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
12175 	for (timeout = 0; timeout < 200; timeout++) {
12176 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
12177 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
12178 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
12179 
12180 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
12181 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
12182 			return 0;
12183 		delay(5000);
12184 	}
12185 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
12186 	    device_xname(sc->sc_dev), ext_ctrl);
12187 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
12188 	return 1;
12189 }
12190 
12191 static void
12192 wm_put_swfwhw_semaphore(struct wm_softc *sc)
12193 {
12194 	uint32_t ext_ctrl;
12195 
12196 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
12197 		device_xname(sc->sc_dev), __func__));
12198 
12199 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
12200 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
12201 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
12202 
12203 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
12204 }
12205 
12206 static int
12207 wm_get_swflag_ich8lan(struct wm_softc *sc)
12208 {
12209 	uint32_t ext_ctrl;
12210 	int timeout;
12211 
12212 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
12213 		device_xname(sc->sc_dev), __func__));
12214 	mutex_enter(sc->sc_ich_phymtx);
12215 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
12216 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
12217 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
12218 			break;
12219 		delay(1000);
12220 	}
12221 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
12222 		printf("%s: SW has already locked the resource\n",
12223 		    device_xname(sc->sc_dev));
12224 		goto out;
12225 	}
12226 
12227 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
12228 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
12229 	for (timeout = 0; timeout < 1000; timeout++) {
12230 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
12231 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
12232 			break;
12233 		delay(1000);
12234 	}
12235 	if (timeout >= 1000) {
12236 		printf("%s: failed to acquire semaphore\n",
12237 		    device_xname(sc->sc_dev));
12238 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
12239 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
12240 		goto out;
12241 	}
12242 	return 0;
12243 
12244 out:
12245 	mutex_exit(sc->sc_ich_phymtx);
12246 	return 1;
12247 }
12248 
12249 static void
12250 wm_put_swflag_ich8lan(struct wm_softc *sc)
12251 {
12252 	uint32_t ext_ctrl;
12253 
12254 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
12255 		device_xname(sc->sc_dev), __func__));
12256 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
12257 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
12258 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
12259 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
12260 	} else {
12261 		printf("%s: Semaphore unexpectedly released\n",
12262 		    device_xname(sc->sc_dev));
12263 	}
12264 
12265 	mutex_exit(sc->sc_ich_phymtx);
12266 }
12267 
12268 static int
12269 wm_get_nvm_ich8lan(struct wm_softc *sc)
12270 {
12271 
12272 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
12273 		device_xname(sc->sc_dev), __func__));
12274 	mutex_enter(sc->sc_ich_nvmmtx);
12275 
12276 	return 0;
12277 }
12278 
12279 static void
12280 wm_put_nvm_ich8lan(struct wm_softc *sc)
12281 {
12282 
12283 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
12284 		device_xname(sc->sc_dev), __func__));
12285 	mutex_exit(sc->sc_ich_nvmmtx);
12286 }
12287 
12288 static int
12289 wm_get_hw_semaphore_82573(struct wm_softc *sc)
12290 {
12291 	int i = 0;
12292 	uint32_t reg;
12293 
12294 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
12295 		device_xname(sc->sc_dev), __func__));
12296 
12297 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
12298 	do {
12299 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
12300 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
12301 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
12302 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
12303 			break;
12304 		delay(2*1000);
12305 		i++;
12306 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
12307 
12308 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
12309 		wm_put_hw_semaphore_82573(sc);
12310 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
12311 		    device_xname(sc->sc_dev));
12312 		return -1;
12313 	}
12314 
12315 	return 0;
12316 }
12317 
12318 static void
12319 wm_put_hw_semaphore_82573(struct wm_softc *sc)
12320 {
12321 	uint32_t reg;
12322 
12323 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
12324 		device_xname(sc->sc_dev), __func__));
12325 
12326 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
12327 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
12328 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
12329 }
12330 
12331 /*
12332  * Management mode and power management related subroutines.
12333  * BMC, AMT, suspend/resume and EEE.
12334  */
12335 
12336 #ifdef WM_WOL
12337 static int
12338 wm_check_mng_mode(struct wm_softc *sc)
12339 {
12340 	int rv;
12341 
12342 	switch (sc->sc_type) {
12343 	case WM_T_ICH8:
12344 	case WM_T_ICH9:
12345 	case WM_T_ICH10:
12346 	case WM_T_PCH:
12347 	case WM_T_PCH2:
12348 	case WM_T_PCH_LPT:
12349 	case WM_T_PCH_SPT:
12350 		rv = wm_check_mng_mode_ich8lan(sc);
12351 		break;
12352 	case WM_T_82574:
12353 	case WM_T_82583:
12354 		rv = wm_check_mng_mode_82574(sc);
12355 		break;
12356 	case WM_T_82571:
12357 	case WM_T_82572:
12358 	case WM_T_82573:
12359 	case WM_T_80003:
12360 		rv = wm_check_mng_mode_generic(sc);
12361 		break;
12362 	default:
12363 		/* noting to do */
12364 		rv = 0;
12365 		break;
12366 	}
12367 
12368 	return rv;
12369 }
12370 
12371 static int
12372 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
12373 {
12374 	uint32_t fwsm;
12375 
12376 	fwsm = CSR_READ(sc, WMREG_FWSM);
12377 
12378 	if (((fwsm & FWSM_FW_VALID) != 0)
12379 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
12380 		return 1;
12381 
12382 	return 0;
12383 }
12384 
12385 static int
12386 wm_check_mng_mode_82574(struct wm_softc *sc)
12387 {
12388 	uint16_t data;
12389 
12390 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
12391 
12392 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
12393 		return 1;
12394 
12395 	return 0;
12396 }
12397 
12398 static int
12399 wm_check_mng_mode_generic(struct wm_softc *sc)
12400 {
12401 	uint32_t fwsm;
12402 
12403 	fwsm = CSR_READ(sc, WMREG_FWSM);
12404 
12405 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
12406 		return 1;
12407 
12408 	return 0;
12409 }
12410 #endif /* WM_WOL */
12411 
12412 static int
12413 wm_enable_mng_pass_thru(struct wm_softc *sc)
12414 {
12415 	uint32_t manc, fwsm, factps;
12416 
12417 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
12418 		return 0;
12419 
12420 	manc = CSR_READ(sc, WMREG_MANC);
12421 
12422 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
12423 		device_xname(sc->sc_dev), manc));
12424 	if ((manc & MANC_RECV_TCO_EN) == 0)
12425 		return 0;
12426 
12427 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
12428 		fwsm = CSR_READ(sc, WMREG_FWSM);
12429 		factps = CSR_READ(sc, WMREG_FACTPS);
12430 		if (((factps & FACTPS_MNGCG) == 0)
12431 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
12432 			return 1;
12433 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
12434 		uint16_t data;
12435 
12436 		factps = CSR_READ(sc, WMREG_FACTPS);
12437 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
12438 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
12439 			device_xname(sc->sc_dev), factps, data));
12440 		if (((factps & FACTPS_MNGCG) == 0)
12441 		    && ((data & NVM_CFG2_MNGM_MASK)
12442 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
12443 			return 1;
12444 	} else if (((manc & MANC_SMBUS_EN) != 0)
12445 	    && ((manc & MANC_ASF_EN) == 0))
12446 		return 1;
12447 
12448 	return 0;
12449 }
12450 
12451 static bool
12452 wm_phy_resetisblocked(struct wm_softc *sc)
12453 {
12454 	bool blocked = false;
12455 	uint32_t reg;
12456 	int i = 0;
12457 
12458 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
12459 		device_xname(sc->sc_dev), __func__));
12460 
12461 	switch (sc->sc_type) {
12462 	case WM_T_ICH8:
12463 	case WM_T_ICH9:
12464 	case WM_T_ICH10:
12465 	case WM_T_PCH:
12466 	case WM_T_PCH2:
12467 	case WM_T_PCH_LPT:
12468 	case WM_T_PCH_SPT:
12469 		do {
12470 			reg = CSR_READ(sc, WMREG_FWSM);
12471 			if ((reg & FWSM_RSPCIPHY) == 0) {
12472 				blocked = true;
12473 				delay(10*1000);
12474 				continue;
12475 			}
12476 			blocked = false;
12477 		} while (blocked && (i++ < 30));
12478 		return blocked;
12479 		break;
12480 	case WM_T_82571:
12481 	case WM_T_82572:
12482 	case WM_T_82573:
12483 	case WM_T_82574:
12484 	case WM_T_82583:
12485 	case WM_T_80003:
12486 		reg = CSR_READ(sc, WMREG_MANC);
12487 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
12488 			return true;
12489 		else
12490 			return false;
12491 		break;
12492 	default:
12493 		/* no problem */
12494 		break;
12495 	}
12496 
12497 	return false;
12498 }
12499 
12500 static void
12501 wm_get_hw_control(struct wm_softc *sc)
12502 {
12503 	uint32_t reg;
12504 
12505 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
12506 		device_xname(sc->sc_dev), __func__));
12507 
12508 	if (sc->sc_type == WM_T_82573) {
12509 		reg = CSR_READ(sc, WMREG_SWSM);
12510 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
12511 	} else if (sc->sc_type >= WM_T_82571) {
12512 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
12513 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
12514 	}
12515 }
12516 
12517 static void
12518 wm_release_hw_control(struct wm_softc *sc)
12519 {
12520 	uint32_t reg;
12521 
12522 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
12523 		device_xname(sc->sc_dev), __func__));
12524 
12525 	if (sc->sc_type == WM_T_82573) {
12526 		reg = CSR_READ(sc, WMREG_SWSM);
12527 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
12528 	} else if (sc->sc_type >= WM_T_82571) {
12529 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
12530 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
12531 	}
12532 }
12533 
12534 static void
12535 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
12536 {
12537 	uint32_t reg;
12538 
12539 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
12540 		device_xname(sc->sc_dev), __func__));
12541 
12542 	if (sc->sc_type < WM_T_PCH2)
12543 		return;
12544 
12545 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
12546 
12547 	if (gate)
12548 		reg |= EXTCNFCTR_GATE_PHY_CFG;
12549 	else
12550 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
12551 
12552 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
12553 }
12554 
12555 static void
12556 wm_smbustopci(struct wm_softc *sc)
12557 {
12558 	uint32_t fwsm, reg;
12559 	int rv = 0;
12560 
12561 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
12562 		device_xname(sc->sc_dev), __func__));
12563 
12564 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
12565 	wm_gate_hw_phy_config_ich8lan(sc, true);
12566 
12567 	/* Disable ULP */
12568 	wm_ulp_disable(sc);
12569 
12570 	/* Acquire PHY semaphore */
12571 	sc->phy.acquire(sc);
12572 
12573 	fwsm = CSR_READ(sc, WMREG_FWSM);
12574 	switch (sc->sc_type) {
12575 	case WM_T_PCH_LPT:
12576 	case WM_T_PCH_SPT:
12577 		if (wm_phy_is_accessible_pchlan(sc))
12578 			break;
12579 
12580 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
12581 		reg |= CTRL_EXT_FORCE_SMBUS;
12582 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
12583 #if 0
12584 		/* XXX Isn't this required??? */
12585 		CSR_WRITE_FLUSH(sc);
12586 #endif
12587 		delay(50 * 1000);
12588 		/* FALLTHROUGH */
12589 	case WM_T_PCH2:
12590 		if (wm_phy_is_accessible_pchlan(sc) == true)
12591 			break;
12592 		/* FALLTHROUGH */
12593 	case WM_T_PCH:
12594 		if (sc->sc_type == WM_T_PCH)
12595 			if ((fwsm & FWSM_FW_VALID) != 0)
12596 				break;
12597 
12598 		if (wm_phy_resetisblocked(sc) == true) {
12599 			printf("XXX reset is blocked(3)\n");
12600 			break;
12601 		}
12602 
12603 		wm_toggle_lanphypc_pch_lpt(sc);
12604 
12605 		if (sc->sc_type >= WM_T_PCH_LPT) {
12606 			if (wm_phy_is_accessible_pchlan(sc) == true)
12607 				break;
12608 
12609 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
12610 			reg &= ~CTRL_EXT_FORCE_SMBUS;
12611 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
12612 
12613 			if (wm_phy_is_accessible_pchlan(sc) == true)
12614 				break;
12615 			rv = -1;
12616 		}
12617 		break;
12618 	default:
12619 		break;
12620 	}
12621 
12622 	/* Release semaphore */
12623 	sc->phy.release(sc);
12624 
12625 	if (rv == 0) {
12626 		if (wm_phy_resetisblocked(sc)) {
12627 			printf("XXX reset is blocked(4)\n");
12628 			goto out;
12629 		}
12630 		wm_reset_phy(sc);
12631 		if (wm_phy_resetisblocked(sc))
12632 			printf("XXX reset is blocked(4)\n");
12633 	}
12634 
12635 out:
12636 	/*
12637 	 * Ungate automatic PHY configuration by hardware on non-managed 82579
12638 	 */
12639 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
12640 		delay(10*1000);
12641 		wm_gate_hw_phy_config_ich8lan(sc, false);
12642 	}
12643 }
12644 
12645 static void
12646 wm_init_manageability(struct wm_softc *sc)
12647 {
12648 
12649 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
12650 		device_xname(sc->sc_dev), __func__));
12651 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
12652 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
12653 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
12654 
12655 		/* Disable hardware interception of ARP */
12656 		manc &= ~MANC_ARP_EN;
12657 
12658 		/* Enable receiving management packets to the host */
12659 		if (sc->sc_type >= WM_T_82571) {
12660 			manc |= MANC_EN_MNG2HOST;
12661 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
12662 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
12663 		}
12664 
12665 		CSR_WRITE(sc, WMREG_MANC, manc);
12666 	}
12667 }
12668 
12669 static void
12670 wm_release_manageability(struct wm_softc *sc)
12671 {
12672 
12673 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
12674 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
12675 
12676 		manc |= MANC_ARP_EN;
12677 		if (sc->sc_type >= WM_T_82571)
12678 			manc &= ~MANC_EN_MNG2HOST;
12679 
12680 		CSR_WRITE(sc, WMREG_MANC, manc);
12681 	}
12682 }
12683 
12684 static void
12685 wm_get_wakeup(struct wm_softc *sc)
12686 {
12687 
12688 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
12689 	switch (sc->sc_type) {
12690 	case WM_T_82573:
12691 	case WM_T_82583:
12692 		sc->sc_flags |= WM_F_HAS_AMT;
12693 		/* FALLTHROUGH */
12694 	case WM_T_80003:
12695 	case WM_T_82575:
12696 	case WM_T_82576:
12697 	case WM_T_82580:
12698 	case WM_T_I350:
12699 	case WM_T_I354:
12700 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
12701 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
12702 		/* FALLTHROUGH */
12703 	case WM_T_82541:
12704 	case WM_T_82541_2:
12705 	case WM_T_82547:
12706 	case WM_T_82547_2:
12707 	case WM_T_82571:
12708 	case WM_T_82572:
12709 	case WM_T_82574:
12710 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
12711 		break;
12712 	case WM_T_ICH8:
12713 	case WM_T_ICH9:
12714 	case WM_T_ICH10:
12715 	case WM_T_PCH:
12716 	case WM_T_PCH2:
12717 	case WM_T_PCH_LPT:
12718 	case WM_T_PCH_SPT:
12719 		sc->sc_flags |= WM_F_HAS_AMT;
12720 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
12721 		break;
12722 	default:
12723 		break;
12724 	}
12725 
12726 	/* 1: HAS_MANAGE */
12727 	if (wm_enable_mng_pass_thru(sc) != 0)
12728 		sc->sc_flags |= WM_F_HAS_MANAGE;
12729 
12730 #ifdef WM_DEBUG
12731 	printf("\n");
12732 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
12733 		printf("HAS_AMT,");
12734 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
12735 		printf("ARC_SUBSYS_VALID,");
12736 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
12737 		printf("ASF_FIRMWARE_PRES,");
12738 	if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
12739 		printf("HAS_MANAGE,");
12740 	printf("\n");
12741 #endif
12742 	/*
12743 	 * Note that the WOL flags is set after the resetting of the eeprom
12744 	 * stuff
12745 	 */
12746 }
12747 
12748 /*
12749  * Unconfigure Ultra Low Power mode.
12750  * Only for I217 and newer (see below).
12751  */
12752 static void
12753 wm_ulp_disable(struct wm_softc *sc)
12754 {
12755 	uint32_t reg;
12756 	int i = 0;
12757 
12758 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
12759 		device_xname(sc->sc_dev), __func__));
12760 	/* Exclude old devices */
12761 	if ((sc->sc_type < WM_T_PCH_LPT)
12762 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
12763 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
12764 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
12765 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
12766 		return;
12767 
12768 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
12769 		/* Request ME un-configure ULP mode in the PHY */
12770 		reg = CSR_READ(sc, WMREG_H2ME);
12771 		reg &= ~H2ME_ULP;
12772 		reg |= H2ME_ENFORCE_SETTINGS;
12773 		CSR_WRITE(sc, WMREG_H2ME, reg);
12774 
12775 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
12776 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
12777 			if (i++ == 30) {
12778 				printf("%s timed out\n", __func__);
12779 				return;
12780 			}
12781 			delay(10 * 1000);
12782 		}
12783 		reg = CSR_READ(sc, WMREG_H2ME);
12784 		reg &= ~H2ME_ENFORCE_SETTINGS;
12785 		CSR_WRITE(sc, WMREG_H2ME, reg);
12786 
12787 		return;
12788 	}
12789 
12790 	/* Acquire semaphore */
12791 	sc->phy.acquire(sc);
12792 
12793 	/* Toggle LANPHYPC */
12794 	wm_toggle_lanphypc_pch_lpt(sc);
12795 
12796 	/* Unforce SMBus mode in PHY */
12797 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
12798 	if (reg == 0x0000 || reg == 0xffff) {
12799 		uint32_t reg2;
12800 
12801 		printf("%s: Force SMBus first.\n", __func__);
12802 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
12803 		reg2 |= CTRL_EXT_FORCE_SMBUS;
12804 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
12805 		delay(50 * 1000);
12806 
12807 		reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
12808 	}
12809 	reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
12810 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, reg);
12811 
12812 	/* Unforce SMBus mode in MAC */
12813 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
12814 	reg &= ~CTRL_EXT_FORCE_SMBUS;
12815 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
12816 
12817 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL);
12818 	reg |= HV_PM_CTRL_K1_ENA;
12819 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, reg);
12820 
12821 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1);
12822 	reg &= ~(I218_ULP_CONFIG1_IND
12823 	    | I218_ULP_CONFIG1_STICKY_ULP
12824 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
12825 	    | I218_ULP_CONFIG1_WOL_HOST
12826 	    | I218_ULP_CONFIG1_INBAND_EXIT
12827 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
12828 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
12829 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
12830 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
12831 	reg |= I218_ULP_CONFIG1_START;
12832 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
12833 
12834 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
12835 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
12836 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
12837 
12838 	/* Release semaphore */
12839 	sc->phy.release(sc);
12840 	wm_gmii_reset(sc);
12841 	delay(50 * 1000);
12842 }
12843 
12844 /* WOL in the newer chipset interfaces (pchlan) */
12845 static void
12846 wm_enable_phy_wakeup(struct wm_softc *sc)
12847 {
12848 #if 0
12849 	uint16_t preg;
12850 
12851 	/* Copy MAC RARs to PHY RARs */
12852 
12853 	/* Copy MAC MTA to PHY MTA */
12854 
12855 	/* Configure PHY Rx Control register */
12856 
12857 	/* Enable PHY wakeup in MAC register */
12858 
12859 	/* Configure and enable PHY wakeup in PHY registers */
12860 
12861 	/* Activate PHY wakeup */
12862 
12863 	/* XXX */
12864 #endif
12865 }
12866 
12867 /* Power down workaround on D3 */
12868 static void
12869 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
12870 {
12871 	uint32_t reg;
12872 	int i;
12873 
12874 	for (i = 0; i < 2; i++) {
12875 		/* Disable link */
12876 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
12877 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
12878 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
12879 
12880 		/*
12881 		 * Call gig speed drop workaround on Gig disable before
12882 		 * accessing any PHY registers
12883 		 */
12884 		if (sc->sc_type == WM_T_ICH8)
12885 			wm_gig_downshift_workaround_ich8lan(sc);
12886 
12887 		/* Write VR power-down enable */
12888 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
12889 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
12890 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
12891 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
12892 
12893 		/* Read it back and test */
12894 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
12895 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
12896 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
12897 			break;
12898 
12899 		/* Issue PHY reset and repeat at most one more time */
12900 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
12901 	}
12902 }
12903 
12904 static void
12905 wm_enable_wakeup(struct wm_softc *sc)
12906 {
12907 	uint32_t reg, pmreg;
12908 	pcireg_t pmode;
12909 
12910 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
12911 		device_xname(sc->sc_dev), __func__));
12912 
12913 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
12914 		&pmreg, NULL) == 0)
12915 		return;
12916 
12917 	/* Advertise the wakeup capability */
12918 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
12919 	    | CTRL_SWDPIN(3));
12920 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
12921 
12922 	/* ICH workaround */
12923 	switch (sc->sc_type) {
12924 	case WM_T_ICH8:
12925 	case WM_T_ICH9:
12926 	case WM_T_ICH10:
12927 	case WM_T_PCH:
12928 	case WM_T_PCH2:
12929 	case WM_T_PCH_LPT:
12930 	case WM_T_PCH_SPT:
12931 		/* Disable gig during WOL */
12932 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
12933 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
12934 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
12935 		if (sc->sc_type == WM_T_PCH)
12936 			wm_gmii_reset(sc);
12937 
12938 		/* Power down workaround */
12939 		if (sc->sc_phytype == WMPHY_82577) {
12940 			struct mii_softc *child;
12941 
12942 			/* Assume that the PHY is copper */
12943 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
12944 			if (child->mii_mpd_rev <= 2)
12945 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
12946 				    (768 << 5) | 25, 0x0444); /* magic num */
12947 		}
12948 		break;
12949 	default:
12950 		break;
12951 	}
12952 
12953 	/* Keep the laser running on fiber adapters */
12954 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
12955 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
12956 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
12957 		reg |= CTRL_EXT_SWDPIN(3);
12958 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
12959 	}
12960 
12961 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
12962 #if 0	/* for the multicast packet */
12963 	reg |= WUFC_MC;
12964 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
12965 #endif
12966 
12967 	if (sc->sc_type >= WM_T_PCH)
12968 		wm_enable_phy_wakeup(sc);
12969 	else {
12970 		CSR_WRITE(sc, WMREG_WUC, CSR_READ(sc, WMREG_WUC) | WUC_PME_EN);
12971 		CSR_WRITE(sc, WMREG_WUFC, reg);
12972 	}
12973 
12974 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
12975 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
12976 		|| (sc->sc_type == WM_T_PCH2))
12977 		    && (sc->sc_phytype == WMPHY_IGP_3))
12978 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
12979 
12980 	/* Request PME */
12981 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
12982 #if 0
12983 	/* Disable WOL */
12984 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
12985 #else
12986 	/* For WOL */
12987 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
12988 #endif
12989 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
12990 }
12991 
12992 /* LPLU */
12993 
12994 static void
12995 wm_lplu_d0_disable(struct wm_softc *sc)
12996 {
12997 	uint32_t reg;
12998 
12999 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
13000 		device_xname(sc->sc_dev), __func__));
13001 
13002 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
13003 	reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
13004 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
13005 }
13006 
13007 static void
13008 wm_lplu_d0_disable_pch(struct wm_softc *sc)
13009 {
13010 	uint32_t reg;
13011 
13012 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
13013 		device_xname(sc->sc_dev), __func__));
13014 
13015 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
13016 	reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
13017 	reg |= HV_OEM_BITS_ANEGNOW;
13018 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
13019 }
13020 
13021 /* EEE */
13022 
13023 static void
13024 wm_set_eee_i350(struct wm_softc *sc)
13025 {
13026 	uint32_t ipcnfg, eeer;
13027 
13028 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
13029 	eeer = CSR_READ(sc, WMREG_EEER);
13030 
13031 	if ((sc->sc_flags & WM_F_EEE) != 0) {
13032 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
13033 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
13034 		    | EEER_LPI_FC);
13035 	} else {
13036 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
13037 		ipcnfg &= ~IPCNFG_10BASE_TE;
13038 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
13039 		    | EEER_LPI_FC);
13040 	}
13041 
13042 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
13043 	CSR_WRITE(sc, WMREG_EEER, eeer);
13044 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
13045 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
13046 }
13047 
13048 /*
13049  * Workarounds (mainly PHY related).
13050  * Basically, PHY's workarounds are in the PHY drivers.
13051  */
13052 
13053 /* Work-around for 82566 Kumeran PCS lock loss */
13054 static void
13055 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
13056 {
13057 #if 0
13058 	int miistatus, active, i;
13059 	int reg;
13060 
13061 	miistatus = sc->sc_mii.mii_media_status;
13062 
13063 	/* If the link is not up, do nothing */
13064 	if ((miistatus & IFM_ACTIVE) == 0)
13065 		return;
13066 
13067 	active = sc->sc_mii.mii_media_active;
13068 
13069 	/* Nothing to do if the link is other than 1Gbps */
13070 	if (IFM_SUBTYPE(active) != IFM_1000_T)
13071 		return;
13072 
13073 	for (i = 0; i < 10; i++) {
13074 		/* read twice */
13075 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
13076 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
13077 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
13078 			goto out;	/* GOOD! */
13079 
13080 		/* Reset the PHY */
13081 		wm_gmii_reset(sc);
13082 		delay(5*1000);
13083 	}
13084 
13085 	/* Disable GigE link negotiation */
13086 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
13087 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
13088 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
13089 
13090 	/*
13091 	 * Call gig speed drop workaround on Gig disable before accessing
13092 	 * any PHY registers.
13093 	 */
13094 	wm_gig_downshift_workaround_ich8lan(sc);
13095 
13096 out:
13097 	return;
13098 #endif
13099 }
13100 
13101 /* WOL from S5 stops working */
13102 static void
13103 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
13104 {
13105 	uint16_t kmrn_reg;
13106 
13107 	/* Only for igp3 */
13108 	if (sc->sc_phytype == WMPHY_IGP_3) {
13109 		kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
13110 		kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
13111 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
13112 		kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
13113 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
13114 	}
13115 }
13116 
13117 /*
13118  * Workaround for pch's PHYs
13119  * XXX should be moved to new PHY driver?
13120  */
13121 static void
13122 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
13123 {
13124 
13125 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
13126 		device_xname(sc->sc_dev), __func__));
13127 	KASSERT(sc->sc_type == WM_T_PCH);
13128 
13129 	if (sc->sc_phytype == WMPHY_82577)
13130 		wm_set_mdio_slow_mode_hv(sc);
13131 
13132 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
13133 
13134 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
13135 
13136 	/* 82578 */
13137 	if (sc->sc_phytype == WMPHY_82578) {
13138 		struct mii_softc *child;
13139 
13140 		/*
13141 		 * Return registers to default by doing a soft reset then
13142 		 * writing 0x3140 to the control register
13143 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
13144 		 */
13145 		child = LIST_FIRST(&sc->sc_mii.mii_phys);
13146 		if ((child != NULL) && (child->mii_mpd_rev < 2)) {
13147 			PHY_RESET(child);
13148 			sc->sc_mii.mii_writereg(sc->sc_dev, 2, MII_BMCR,
13149 			    0x3140);
13150 		}
13151 	}
13152 
13153 	/* Select page 0 */
13154 	sc->phy.acquire(sc);
13155 	wm_gmii_mdic_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
13156 	sc->phy.release(sc);
13157 
13158 	/*
13159 	 * Configure the K1 Si workaround during phy reset assuming there is
13160 	 * link so that it disables K1 if link is in 1Gbps.
13161 	 */
13162 	wm_k1_gig_workaround_hv(sc, 1);
13163 }
13164 
13165 static void
13166 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
13167 {
13168 
13169 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
13170 		device_xname(sc->sc_dev), __func__));
13171 	KASSERT(sc->sc_type == WM_T_PCH2);
13172 
13173 	wm_set_mdio_slow_mode_hv(sc);
13174 }
13175 
13176 static int
13177 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
13178 {
13179 	int k1_enable = sc->sc_nvm_k1_enabled;
13180 
13181 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
13182 		device_xname(sc->sc_dev), __func__));
13183 
13184 	if (sc->phy.acquire(sc) != 0)
13185 		return -1;
13186 
13187 	if (link) {
13188 		k1_enable = 0;
13189 
13190 		/* Link stall fix for link up */
13191 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
13192 	} else {
13193 		/* Link stall fix for link down */
13194 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
13195 	}
13196 
13197 	wm_configure_k1_ich8lan(sc, k1_enable);
13198 	sc->phy.release(sc);
13199 
13200 	return 0;
13201 }
13202 
13203 static void
13204 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
13205 {
13206 	uint32_t reg;
13207 
13208 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
13209 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
13210 	    reg | HV_KMRN_MDIO_SLOW);
13211 }
13212 
13213 static void
13214 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
13215 {
13216 	uint32_t ctrl, ctrl_ext, tmp;
13217 	uint16_t kmrn_reg;
13218 
13219 	kmrn_reg = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
13220 
13221 	if (k1_enable)
13222 		kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
13223 	else
13224 		kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
13225 
13226 	wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
13227 
13228 	delay(20);
13229 
13230 	ctrl = CSR_READ(sc, WMREG_CTRL);
13231 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
13232 
13233 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
13234 	tmp |= CTRL_FRCSPD;
13235 
13236 	CSR_WRITE(sc, WMREG_CTRL, tmp);
13237 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
13238 	CSR_WRITE_FLUSH(sc);
13239 	delay(20);
13240 
13241 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
13242 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
13243 	CSR_WRITE_FLUSH(sc);
13244 	delay(20);
13245 }
13246 
13247 /* special case - for 82575 - need to do manual init ... */
13248 static void
13249 wm_reset_init_script_82575(struct wm_softc *sc)
13250 {
13251 	/*
13252 	 * remark: this is untested code - we have no board without EEPROM
13253 	 *  same setup as mentioned int the FreeBSD driver for the i82575
13254 	 */
13255 
13256 	/* SerDes configuration via SERDESCTRL */
13257 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
13258 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
13259 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
13260 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
13261 
13262 	/* CCM configuration via CCMCTL register */
13263 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
13264 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
13265 
13266 	/* PCIe lanes configuration */
13267 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
13268 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
13269 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
13270 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
13271 
13272 	/* PCIe PLL Configuration */
13273 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
13274 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
13275 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
13276 }
13277 
13278 static void
13279 wm_reset_mdicnfg_82580(struct wm_softc *sc)
13280 {
13281 	uint32_t reg;
13282 	uint16_t nvmword;
13283 	int rv;
13284 
13285 	if ((sc->sc_flags & WM_F_SGMII) == 0)
13286 		return;
13287 
13288 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
13289 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
13290 	if (rv != 0) {
13291 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
13292 		    __func__);
13293 		return;
13294 	}
13295 
13296 	reg = CSR_READ(sc, WMREG_MDICNFG);
13297 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
13298 		reg |= MDICNFG_DEST;
13299 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
13300 		reg |= MDICNFG_COM_MDIO;
13301 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
13302 }
13303 
13304 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
13305 
13306 static bool
13307 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
13308 {
13309 	int i;
13310 	uint32_t reg;
13311 	uint16_t id1, id2;
13312 
13313 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
13314 		device_xname(sc->sc_dev), __func__));
13315 	id1 = id2 = 0xffff;
13316 	for (i = 0; i < 2; i++) {
13317 		id1 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1);
13318 		if (MII_INVALIDID(id1))
13319 			continue;
13320 		id2 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2);
13321 		if (MII_INVALIDID(id2))
13322 			continue;
13323 		break;
13324 	}
13325 	if (!MII_INVALIDID(id1) && !MII_INVALIDID(id2)) {
13326 		goto out;
13327 	}
13328 
13329 	if (sc->sc_type < WM_T_PCH_LPT) {
13330 		sc->phy.release(sc);
13331 		wm_set_mdio_slow_mode_hv(sc);
13332 		id1 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1);
13333 		id2 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2);
13334 		sc->phy.acquire(sc);
13335 	}
13336 	if (MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
13337 		printf("XXX return with false\n");
13338 		return false;
13339 	}
13340 out:
13341 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
13342 		/* Only unforce SMBus if ME is not active */
13343 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
13344 			/* Unforce SMBus mode in PHY */
13345 			reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
13346 			    CV_SMB_CTRL);
13347 			reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
13348 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
13349 			    CV_SMB_CTRL, reg);
13350 
13351 			/* Unforce SMBus mode in MAC */
13352 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
13353 			reg &= ~CTRL_EXT_FORCE_SMBUS;
13354 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
13355 		}
13356 	}
13357 	return true;
13358 }
13359 
13360 static void
13361 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
13362 {
13363 	uint32_t reg;
13364 	int i;
13365 
13366 	/* Set PHY Config Counter to 50msec */
13367 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
13368 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
13369 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
13370 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
13371 
13372 	/* Toggle LANPHYPC */
13373 	reg = CSR_READ(sc, WMREG_CTRL);
13374 	reg |= CTRL_LANPHYPC_OVERRIDE;
13375 	reg &= ~CTRL_LANPHYPC_VALUE;
13376 	CSR_WRITE(sc, WMREG_CTRL, reg);
13377 	CSR_WRITE_FLUSH(sc);
13378 	delay(1000);
13379 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
13380 	CSR_WRITE(sc, WMREG_CTRL, reg);
13381 	CSR_WRITE_FLUSH(sc);
13382 
13383 	if (sc->sc_type < WM_T_PCH_LPT)
13384 		delay(50 * 1000);
13385 	else {
13386 		i = 20;
13387 
13388 		do {
13389 			delay(5 * 1000);
13390 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
13391 		    && i--);
13392 
13393 		delay(30 * 1000);
13394 	}
13395 }
13396 
13397 static int
13398 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
13399 {
13400 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
13401 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
13402 	uint32_t rxa;
13403 	uint16_t scale = 0, lat_enc = 0;
13404 	int64_t lat_ns, value;
13405 
13406 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
13407 		device_xname(sc->sc_dev), __func__));
13408 
13409 	if (link) {
13410 		pcireg_t preg;
13411 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
13412 
13413 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
13414 
13415 		/*
13416 		 * Determine the maximum latency tolerated by the device.
13417 		 *
13418 		 * Per the PCIe spec, the tolerated latencies are encoded as
13419 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
13420 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
13421 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
13422 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
13423 		 */
13424 		lat_ns = ((int64_t)rxa * 1024 -
13425 		    (2 * (int64_t)sc->sc_ethercom.ec_if.if_mtu)) * 8 * 1000;
13426 		if (lat_ns < 0)
13427 			lat_ns = 0;
13428 		else {
13429 			uint32_t status;
13430 			uint16_t speed;
13431 
13432 			status = CSR_READ(sc, WMREG_STATUS);
13433 			switch (__SHIFTOUT(status, STATUS_SPEED)) {
13434 			case STATUS_SPEED_10:
13435 				speed = 10;
13436 				break;
13437 			case STATUS_SPEED_100:
13438 				speed = 100;
13439 				break;
13440 			case STATUS_SPEED_1000:
13441 				speed = 1000;
13442 				break;
13443 			default:
13444 				printf("%s: Unknown speed (status = %08x)\n",
13445 				    device_xname(sc->sc_dev), status);
13446 				return -1;
13447 			}
13448 			lat_ns /= speed;
13449 		}
13450 		value = lat_ns;
13451 
13452 		while (value > LTRV_VALUE) {
13453 			scale ++;
13454 			value = howmany(value, __BIT(5));
13455 		}
13456 		if (scale > LTRV_SCALE_MAX) {
13457 			printf("%s: Invalid LTR latency scale %d\n",
13458 			    device_xname(sc->sc_dev), scale);
13459 			return -1;
13460 		}
13461 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
13462 
13463 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
13464 		    WM_PCI_LTR_CAP_LPT);
13465 		max_snoop = preg & 0xffff;
13466 		max_nosnoop = preg >> 16;
13467 
13468 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
13469 
13470 		if (lat_enc > max_ltr_enc) {
13471 			lat_enc = max_ltr_enc;
13472 		}
13473 	}
13474 	/* Snoop and No-Snoop latencies the same */
13475 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
13476 	CSR_WRITE(sc, WMREG_LTRV, reg);
13477 
13478 	return 0;
13479 }
13480 
13481 /*
13482  * I210 Errata 25 and I211 Errata 10
13483  * Slow System Clock.
13484  */
13485 static void
13486 wm_pll_workaround_i210(struct wm_softc *sc)
13487 {
13488 	uint32_t mdicnfg, wuc;
13489 	uint32_t reg;
13490 	pcireg_t pcireg;
13491 	uint32_t pmreg;
13492 	uint16_t nvmword, tmp_nvmword;
13493 	int phyval;
13494 	bool wa_done = false;
13495 	int i;
13496 
13497 	/* Save WUC and MDICNFG registers */
13498 	wuc = CSR_READ(sc, WMREG_WUC);
13499 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
13500 
13501 	reg = mdicnfg & ~MDICNFG_DEST;
13502 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
13503 
13504 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
13505 		nvmword = INVM_DEFAULT_AL;
13506 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
13507 
13508 	/* Get Power Management cap offset */
13509 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
13510 		&pmreg, NULL) == 0)
13511 		return;
13512 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
13513 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
13514 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
13515 
13516 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
13517 			break; /* OK */
13518 		}
13519 
13520 		wa_done = true;
13521 		/* Directly reset the internal PHY */
13522 		reg = CSR_READ(sc, WMREG_CTRL);
13523 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
13524 
13525 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
13526 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
13527 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
13528 
13529 		CSR_WRITE(sc, WMREG_WUC, 0);
13530 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
13531 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
13532 
13533 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
13534 		    pmreg + PCI_PMCSR);
13535 		pcireg |= PCI_PMCSR_STATE_D3;
13536 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
13537 		    pmreg + PCI_PMCSR, pcireg);
13538 		delay(1000);
13539 		pcireg &= ~PCI_PMCSR_STATE_D3;
13540 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
13541 		    pmreg + PCI_PMCSR, pcireg);
13542 
13543 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
13544 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
13545 
13546 		/* Restore WUC register */
13547 		CSR_WRITE(sc, WMREG_WUC, wuc);
13548 	}
13549 
13550 	/* Restore MDICNFG setting */
13551 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
13552 	if (wa_done)
13553 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
13554 }
13555