xref: /netbsd-src/sys/dev/pci/if_wm.c (revision 501cd18a74d52bfcca7d9e7e3b0d472bbc870558)
1 /*	$NetBSD: if_wm.c,v 1.465 2017/01/10 08:57:39 msaitoh Exp $	*/
2 
3 /*
4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed for the NetBSD Project by
20  *	Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 /*******************************************************************************
39 
40   Copyright (c) 2001-2005, Intel Corporation
41   All rights reserved.
42 
43   Redistribution and use in source and binary forms, with or without
44   modification, are permitted provided that the following conditions are met:
45 
46    1. Redistributions of source code must retain the above copyright notice,
47       this list of conditions and the following disclaimer.
48 
49    2. Redistributions in binary form must reproduce the above copyright
50       notice, this list of conditions and the following disclaimer in the
51       documentation and/or other materials provided with the distribution.
52 
53    3. Neither the name of the Intel Corporation nor the names of its
54       contributors may be used to endorse or promote products derived from
55       this software without specific prior written permission.
56 
57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67   POSSIBILITY OF SUCH DAMAGE.
68 
69 *******************************************************************************/
70 /*
71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72  *
73  * TODO (in order of importance):
74  *
75  *	- Check XXX'ed comments
76  *	- Disable D0 LPLU on 8257[12356], 82580 and I350.
77  *	- TX Multi queue improvement (refine queue selection logic)
78  *	- Advanced Receive Descriptor
79  *	- EEE (Energy Efficiency Ethernet)
80  *	- Virtual Function
81  *	- Set LED correctly (based on contents in EEPROM)
82  *	- Rework how parameters are loaded from the EEPROM.
83  *	- Image Unique ID
84  */
85 
86 #include <sys/cdefs.h>
87 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.465 2017/01/10 08:57:39 msaitoh Exp $");
88 
89 #ifdef _KERNEL_OPT
90 #include "opt_net_mpsafe.h"
91 #endif
92 
93 #include <sys/param.h>
94 #include <sys/systm.h>
95 #include <sys/callout.h>
96 #include <sys/mbuf.h>
97 #include <sys/malloc.h>
98 #include <sys/kmem.h>
99 #include <sys/kernel.h>
100 #include <sys/socket.h>
101 #include <sys/ioctl.h>
102 #include <sys/errno.h>
103 #include <sys/device.h>
104 #include <sys/queue.h>
105 #include <sys/syslog.h>
106 #include <sys/interrupt.h>
107 #include <sys/cpu.h>
108 #include <sys/pcq.h>
109 
110 #include <sys/rndsource.h>
111 
112 #include <net/if.h>
113 #include <net/if_dl.h>
114 #include <net/if_media.h>
115 #include <net/if_ether.h>
116 
117 #include <net/bpf.h>
118 
119 #include <netinet/in.h>			/* XXX for struct ip */
120 #include <netinet/in_systm.h>		/* XXX for struct ip */
121 #include <netinet/ip.h>			/* XXX for struct ip */
122 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
123 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
124 
125 #include <sys/bus.h>
126 #include <sys/intr.h>
127 #include <machine/endian.h>
128 
129 #include <dev/mii/mii.h>
130 #include <dev/mii/miivar.h>
131 #include <dev/mii/miidevs.h>
132 #include <dev/mii/mii_bitbang.h>
133 #include <dev/mii/ikphyreg.h>
134 #include <dev/mii/igphyreg.h>
135 #include <dev/mii/igphyvar.h>
136 #include <dev/mii/inbmphyreg.h>
137 
138 #include <dev/pci/pcireg.h>
139 #include <dev/pci/pcivar.h>
140 #include <dev/pci/pcidevs.h>
141 
142 #include <dev/pci/if_wmreg.h>
143 #include <dev/pci/if_wmvar.h>
144 
145 #ifdef WM_DEBUG
146 #define	WM_DEBUG_LINK		__BIT(0)
147 #define	WM_DEBUG_TX		__BIT(1)
148 #define	WM_DEBUG_RX		__BIT(2)
149 #define	WM_DEBUG_GMII		__BIT(3)
150 #define	WM_DEBUG_MANAGE		__BIT(4)
151 #define	WM_DEBUG_NVM		__BIT(5)
152 #define	WM_DEBUG_INIT		__BIT(6)
153 #define	WM_DEBUG_LOCK		__BIT(7)
154 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
155     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
156 
157 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
158 #else
159 #define	DPRINTF(x, y)	/* nothing */
160 #endif /* WM_DEBUG */
161 
162 #ifdef NET_MPSAFE
163 #define WM_MPSAFE	1
164 #endif
165 
166 /*
167  * This device driver's max interrupt numbers.
168  */
169 #define WM_MAX_NQUEUEINTR	16
170 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
171 
172 /*
173  * Transmit descriptor list size.  Due to errata, we can only have
174  * 256 hardware descriptors in the ring on < 82544, but we use 4096
175  * on >= 82544.  We tell the upper layers that they can queue a lot
176  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
177  * of them at a time.
178  *
179  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
180  * chains containing many small mbufs have been observed in zero-copy
181  * situations with jumbo frames.
182  */
183 #define	WM_NTXSEGS		256
184 #define	WM_IFQUEUELEN		256
185 #define	WM_TXQUEUELEN_MAX	64
186 #define	WM_TXQUEUELEN_MAX_82547	16
187 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
188 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
189 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
190 #define	WM_NTXDESC_82542	256
191 #define	WM_NTXDESC_82544	4096
192 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
193 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
194 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
195 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
196 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
197 
198 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
199 
200 #define	WM_TXINTERQSIZE		256
201 
202 /*
203  * Receive descriptor list size.  We have one Rx buffer for normal
204  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
205  * packet.  We allocate 256 receive descriptors, each with a 2k
206  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
207  */
208 #define	WM_NRXDESC		256
209 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
210 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
211 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
212 
213 typedef union txdescs {
214 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
215 	nq_txdesc_t      sctxu_nq_txdescs[WM_NTXDESC_82544];
216 } txdescs_t;
217 
218 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
219 #define	WM_CDRXOFF(x)	(sizeof(wiseman_rxdesc_t) * x)
220 
221 /*
222  * Software state for transmit jobs.
223  */
224 struct wm_txsoft {
225 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
226 	bus_dmamap_t txs_dmamap;	/* our DMA map */
227 	int txs_firstdesc;		/* first descriptor in packet */
228 	int txs_lastdesc;		/* last descriptor in packet */
229 	int txs_ndesc;			/* # of descriptors used */
230 };
231 
232 /*
233  * Software state for receive buffers.  Each descriptor gets a
234  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
235  * more than one buffer, we chain them together.
236  */
237 struct wm_rxsoft {
238 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
239 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
240 };
241 
242 #define WM_LINKUP_TIMEOUT	50
243 
244 static uint16_t swfwphysem[] = {
245 	SWFW_PHY0_SM,
246 	SWFW_PHY1_SM,
247 	SWFW_PHY2_SM,
248 	SWFW_PHY3_SM
249 };
250 
251 static const uint32_t wm_82580_rxpbs_table[] = {
252 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
253 };
254 
255 struct wm_softc;
256 
257 #ifdef WM_EVENT_COUNTERS
258 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
259 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
260 	struct evcnt qname##_ev_##evname;
261 
262 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
263 	do{								\
264 		snprintf((q)->qname##_##evname##_evcnt_name,		\
265 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
266 		    "%s%02d%s", #qname, (qnum), #evname);		\
267 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
268 		    (evtype), NULL, (xname),				\
269 		    (q)->qname##_##evname##_evcnt_name);		\
270 	}while(0)
271 
272 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
273 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
274 
275 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
276 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
277 #endif /* WM_EVENT_COUNTERS */
278 
279 struct wm_txqueue {
280 	kmutex_t *txq_lock;		/* lock for tx operations */
281 
282 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
283 
284 	/* Software state for the transmit descriptors. */
285 	int txq_num;			/* must be a power of two */
286 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
287 
288 	/* TX control data structures. */
289 	int txq_ndesc;			/* must be a power of two */
290 	size_t txq_descsize;		/* a tx descriptor size */
291 	txdescs_t *txq_descs_u;
292         bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
293 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
294 	int txq_desc_rseg;		/* real number of control segment */
295 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
296 #define	txq_descs	txq_descs_u->sctxu_txdescs
297 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
298 
299 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
300 
301 	int txq_free;			/* number of free Tx descriptors */
302 	int txq_next;			/* next ready Tx descriptor */
303 
304 	int txq_sfree;			/* number of free Tx jobs */
305 	int txq_snext;			/* next free Tx job */
306 	int txq_sdirty;			/* dirty Tx jobs */
307 
308 	/* These 4 variables are used only on the 82547. */
309 	int txq_fifo_size;		/* Tx FIFO size */
310 	int txq_fifo_head;		/* current head of FIFO */
311 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
312 	int txq_fifo_stall;		/* Tx FIFO is stalled */
313 
314 	/*
315 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
316 	 * CPUs. This queue intermediate them without block.
317 	 */
318 	pcq_t *txq_interq;
319 
320 	/*
321 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
322 	 * to manage Tx H/W queue's busy flag.
323 	 */
324 	int txq_flags;			/* flags for H/W queue, see below */
325 #define	WM_TXQ_NO_SPACE	0x1
326 
327 	bool txq_stopping;
328 
329 #ifdef WM_EVENT_COUNTERS
330 	WM_Q_EVCNT_DEFINE(txq, txsstall)	/* Tx stalled due to no txs */
331 	WM_Q_EVCNT_DEFINE(txq, txdstall)	/* Tx stalled due to no txd */
332 	WM_Q_EVCNT_DEFINE(txq, txfifo_stall)	/* Tx FIFO stalls (82547) */
333 	WM_Q_EVCNT_DEFINE(txq, txdw)		/* Tx descriptor interrupts */
334 	WM_Q_EVCNT_DEFINE(txq, txqe)		/* Tx queue empty interrupts */
335 						/* XXX not used? */
336 
337 	WM_Q_EVCNT_DEFINE(txq, txipsum)		/* IP checksums comp. out-bound */
338 	WM_Q_EVCNT_DEFINE(txq,txtusum)		/* TCP/UDP cksums comp. out-bound */
339 	WM_Q_EVCNT_DEFINE(txq, txtusum6)	/* TCP/UDP v6 cksums comp. out-bound */
340 	WM_Q_EVCNT_DEFINE(txq, txtso)		/* TCP seg offload out-bound (IPv4) */
341 	WM_Q_EVCNT_DEFINE(txq, txtso6)		/* TCP seg offload out-bound (IPv6) */
342 	WM_Q_EVCNT_DEFINE(txq, txtsopain)	/* painful header manip. for TSO */
343 
344 	WM_Q_EVCNT_DEFINE(txq, txdrop)		/* Tx packets dropped(too many segs) */
345 
346 	WM_Q_EVCNT_DEFINE(txq, tu)		/* Tx underrun */
347 
348 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
349 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
350 #endif /* WM_EVENT_COUNTERS */
351 };
352 
353 struct wm_rxqueue {
354 	kmutex_t *rxq_lock;		/* lock for rx operations */
355 
356 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
357 
358 	/* Software state for the receive descriptors. */
359 	wiseman_rxdesc_t *rxq_descs;
360 
361 	/* RX control data structures. */
362 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
363 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
364 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
365 	int rxq_desc_rseg;		/* real number of control segment */
366 	size_t rxq_desc_size;		/* control data size */
367 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
368 
369 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
370 
371 	int rxq_ptr;			/* next ready Rx desc/queue ent */
372 	int rxq_discard;
373 	int rxq_len;
374 	struct mbuf *rxq_head;
375 	struct mbuf *rxq_tail;
376 	struct mbuf **rxq_tailp;
377 
378 	bool rxq_stopping;
379 
380 #ifdef WM_EVENT_COUNTERS
381 	WM_Q_EVCNT_DEFINE(rxq, rxintr);		/* Rx interrupts */
382 
383 	WM_Q_EVCNT_DEFINE(rxq, rxipsum);	/* IP checksums checked in-bound */
384 	WM_Q_EVCNT_DEFINE(rxq, rxtusum);	/* TCP/UDP cksums checked in-bound */
385 #endif
386 };
387 
388 struct wm_queue {
389 	int wmq_id;			/* index of transmit and receive queues */
390 	int wmq_intr_idx;		/* index of MSI-X tables */
391 
392 	struct wm_txqueue wmq_txq;
393 	struct wm_rxqueue wmq_rxq;
394 };
395 
396 struct wm_phyop {
397 	int (*acquire)(struct wm_softc *);
398 	void (*release)(struct wm_softc *);
399 	int reset_delay_us;
400 };
401 
402 /*
403  * Software state per device.
404  */
405 struct wm_softc {
406 	device_t sc_dev;		/* generic device information */
407 	bus_space_tag_t sc_st;		/* bus space tag */
408 	bus_space_handle_t sc_sh;	/* bus space handle */
409 	bus_size_t sc_ss;		/* bus space size */
410 	bus_space_tag_t sc_iot;		/* I/O space tag */
411 	bus_space_handle_t sc_ioh;	/* I/O space handle */
412 	bus_size_t sc_ios;		/* I/O space size */
413 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
414 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
415 	bus_size_t sc_flashs;		/* flash registers space size */
416 	off_t sc_flashreg_offset;	/*
417 					 * offset to flash registers from
418 					 * start of BAR
419 					 */
420 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
421 
422 	struct ethercom sc_ethercom;	/* ethernet common data */
423 	struct mii_data sc_mii;		/* MII/media information */
424 
425 	pci_chipset_tag_t sc_pc;
426 	pcitag_t sc_pcitag;
427 	int sc_bus_speed;		/* PCI/PCIX bus speed */
428 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
429 
430 	uint16_t sc_pcidevid;		/* PCI device ID */
431 	wm_chip_type sc_type;		/* MAC type */
432 	int sc_rev;			/* MAC revision */
433 	wm_phy_type sc_phytype;		/* PHY type */
434 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
435 #define	WM_MEDIATYPE_UNKNOWN		0x00
436 #define	WM_MEDIATYPE_FIBER		0x01
437 #define	WM_MEDIATYPE_COPPER		0x02
438 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
439 	int sc_funcid;			/* unit number of the chip (0 to 3) */
440 	int sc_flags;			/* flags; see below */
441 	int sc_if_flags;		/* last if_flags */
442 	int sc_flowflags;		/* 802.3x flow control flags */
443 	int sc_align_tweak;
444 
445 	void *sc_ihs[WM_MAX_NINTR];	/*
446 					 * interrupt cookie.
447 					 * legacy and msi use sc_ihs[0].
448 					 */
449 	pci_intr_handle_t *sc_intrs;	/* legacy and msi use sc_intrs[0] */
450 	int sc_nintrs;			/* number of interrupts */
451 
452 	int sc_link_intr_idx;		/* index of MSI-X tables */
453 
454 	callout_t sc_tick_ch;		/* tick callout */
455 	bool sc_core_stopping;
456 
457 	int sc_nvm_ver_major;
458 	int sc_nvm_ver_minor;
459 	int sc_nvm_ver_build;
460 	int sc_nvm_addrbits;		/* NVM address bits */
461 	unsigned int sc_nvm_wordsize;	/* NVM word size */
462 	int sc_ich8_flash_base;
463 	int sc_ich8_flash_bank_size;
464 	int sc_nvm_k1_enabled;
465 
466 	int sc_nqueues;
467 	struct wm_queue *sc_queue;
468 
469 	int sc_affinity_offset;
470 
471 #ifdef WM_EVENT_COUNTERS
472 	/* Event counters. */
473 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
474 
475         /* WM_T_82542_2_1 only */
476 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
477 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
478 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
479 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
480 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
481 #endif /* WM_EVENT_COUNTERS */
482 
483 	/* This variable are used only on the 82547. */
484 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
485 
486 	uint32_t sc_ctrl;		/* prototype CTRL register */
487 #if 0
488 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
489 #endif
490 	uint32_t sc_icr;		/* prototype interrupt bits */
491 	uint32_t sc_itr;		/* prototype intr throttling reg */
492 	uint32_t sc_tctl;		/* prototype TCTL register */
493 	uint32_t sc_rctl;		/* prototype RCTL register */
494 	uint32_t sc_txcw;		/* prototype TXCW register */
495 	uint32_t sc_tipg;		/* prototype TIPG register */
496 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
497 	uint32_t sc_pba;		/* prototype PBA register */
498 
499 	int sc_tbi_linkup;		/* TBI link status */
500 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
501 	int sc_tbi_serdes_ticks;	/* tbi ticks */
502 
503 	int sc_mchash_type;		/* multicast filter offset */
504 
505 	krndsource_t rnd_source;	/* random source */
506 
507 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
508 
509 	kmutex_t *sc_core_lock;		/* lock for softc operations */
510 	kmutex_t *sc_ich_phymtx;	/*
511 					 * 82574/82583/ICH/PCH specific PHY
512 					 * mutex. For 82574/82583, the mutex
513 					 * is used for both PHY and NVM.
514 					 */
515 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
516 
517 	struct wm_phyop phy;
518 };
519 
520 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
521 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
522 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
523 
524 #ifdef WM_MPSAFE
525 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
526 #else
527 #define CALLOUT_FLAGS	0
528 #endif
529 
530 #define	WM_RXCHAIN_RESET(rxq)						\
531 do {									\
532 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
533 	*(rxq)->rxq_tailp = NULL;					\
534 	(rxq)->rxq_len = 0;						\
535 } while (/*CONSTCOND*/0)
536 
537 #define	WM_RXCHAIN_LINK(rxq, m)						\
538 do {									\
539 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
540 	(rxq)->rxq_tailp = &(m)->m_next;				\
541 } while (/*CONSTCOND*/0)
542 
543 #ifdef WM_EVENT_COUNTERS
544 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
545 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
546 
547 #define WM_Q_EVCNT_INCR(qname, evname)			\
548 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
549 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
550 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
551 #else /* !WM_EVENT_COUNTERS */
552 #define	WM_EVCNT_INCR(ev)	/* nothing */
553 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
554 
555 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
556 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
557 #endif /* !WM_EVENT_COUNTERS */
558 
559 #define	CSR_READ(sc, reg)						\
560 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
561 #define	CSR_WRITE(sc, reg, val)						\
562 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
563 #define	CSR_WRITE_FLUSH(sc)						\
564 	(void) CSR_READ((sc), WMREG_STATUS)
565 
566 #define ICH8_FLASH_READ32(sc, reg)					\
567 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
568 	    (reg) + sc->sc_flashreg_offset)
569 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
570 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
571 	    (reg) + sc->sc_flashreg_offset, (data))
572 
573 #define ICH8_FLASH_READ16(sc, reg)					\
574 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
575 	    (reg) + sc->sc_flashreg_offset)
576 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
577 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
578 	    (reg) + sc->sc_flashreg_offset, (data))
579 
580 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
581 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((x)))
582 
583 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
584 #define	WM_CDTXADDR_HI(txq, x)						\
585 	(sizeof(bus_addr_t) == 8 ?					\
586 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
587 
588 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
589 #define	WM_CDRXADDR_HI(rxq, x)						\
590 	(sizeof(bus_addr_t) == 8 ?					\
591 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
592 
593 /*
594  * Register read/write functions.
595  * Other than CSR_{READ|WRITE}().
596  */
597 #if 0
598 static inline uint32_t wm_io_read(struct wm_softc *, int);
599 #endif
600 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
601 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
602 	uint32_t, uint32_t);
603 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
604 
605 /*
606  * Descriptor sync/init functions.
607  */
608 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
609 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
610 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
611 
612 /*
613  * Device driver interface functions and commonly used functions.
614  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
615  */
616 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
617 static int	wm_match(device_t, cfdata_t, void *);
618 static void	wm_attach(device_t, device_t, void *);
619 static int	wm_detach(device_t, int);
620 static bool	wm_suspend(device_t, const pmf_qual_t *);
621 static bool	wm_resume(device_t, const pmf_qual_t *);
622 static void	wm_watchdog(struct ifnet *);
623 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *);
624 static void	wm_tick(void *);
625 static int	wm_ifflags_cb(struct ethercom *);
626 static int	wm_ioctl(struct ifnet *, u_long, void *);
627 /* MAC address related */
628 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
629 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
630 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
631 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
632 static void	wm_set_filter(struct wm_softc *);
633 /* Reset and init related */
634 static void	wm_set_vlan(struct wm_softc *);
635 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
636 static void	wm_get_auto_rd_done(struct wm_softc *);
637 static void	wm_lan_init_done(struct wm_softc *);
638 static void	wm_get_cfg_done(struct wm_softc *);
639 static void	wm_initialize_hardware_bits(struct wm_softc *);
640 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
641 static void	wm_reset_phy(struct wm_softc *);
642 static void	wm_flush_desc_rings(struct wm_softc *);
643 static void	wm_reset(struct wm_softc *);
644 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
645 static void	wm_rxdrain(struct wm_rxqueue *);
646 static void	wm_rss_getkey(uint8_t *);
647 static void	wm_init_rss(struct wm_softc *);
648 static void	wm_adjust_qnum(struct wm_softc *, int);
649 static int	wm_setup_legacy(struct wm_softc *);
650 static int	wm_setup_msix(struct wm_softc *);
651 static int	wm_init(struct ifnet *);
652 static int	wm_init_locked(struct ifnet *);
653 static void	wm_turnon(struct wm_softc *);
654 static void	wm_turnoff(struct wm_softc *);
655 static void	wm_stop(struct ifnet *, int);
656 static void	wm_stop_locked(struct ifnet *, int);
657 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
658 static void	wm_82547_txfifo_stall(void *);
659 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
660 /* DMA related */
661 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
662 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
663 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
664 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
665     struct wm_txqueue *);
666 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
667 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
668 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
669     struct wm_rxqueue *);
670 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
671 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
672 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
673 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
674 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
675 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
676 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
677     struct wm_txqueue *);
678 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
679     struct wm_rxqueue *);
680 static int	wm_alloc_txrx_queues(struct wm_softc *);
681 static void	wm_free_txrx_queues(struct wm_softc *);
682 static int	wm_init_txrx_queues(struct wm_softc *);
683 /* Start */
684 static int	wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
685     uint32_t *, uint8_t *);
686 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
687 static void	wm_start(struct ifnet *);
688 static void	wm_start_locked(struct ifnet *);
689 static int	wm_transmit(struct ifnet *, struct mbuf *);
690 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
691 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
692 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
693     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
694 static void	wm_nq_start(struct ifnet *);
695 static void	wm_nq_start_locked(struct ifnet *);
696 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
697 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
698 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
699 static void	wm_deferred_start(struct ifnet *);
700 /* Interrupt */
701 static int	wm_txeof(struct wm_softc *, struct wm_txqueue *);
702 static void	wm_rxeof(struct wm_rxqueue *);
703 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
704 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
705 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
706 static void	wm_linkintr(struct wm_softc *, uint32_t);
707 static int	wm_intr_legacy(void *);
708 static int	wm_txrxintr_msix(void *);
709 static int	wm_linkintr_msix(void *);
710 
711 /*
712  * Media related.
713  * GMII, SGMII, TBI, SERDES and SFP.
714  */
715 /* Common */
716 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
717 /* GMII related */
718 static void	wm_gmii_reset(struct wm_softc *);
719 static int	wm_get_phy_id_82575(struct wm_softc *);
720 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
721 static int	wm_gmii_mediachange(struct ifnet *);
722 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
723 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
724 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
725 static int	wm_gmii_i82543_readreg(device_t, int, int);
726 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
727 static int	wm_gmii_mdic_readreg(device_t, int, int);
728 static void	wm_gmii_mdic_writereg(device_t, int, int, int);
729 static int	wm_gmii_i82544_readreg(device_t, int, int);
730 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
731 static int	wm_gmii_i80003_readreg(device_t, int, int);
732 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
733 static int	wm_gmii_bm_readreg(device_t, int, int);
734 static void	wm_gmii_bm_writereg(device_t, int, int, int);
735 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
736 static int	wm_gmii_hv_readreg(device_t, int, int);
737 static int	wm_gmii_hv_readreg_locked(device_t, int, int);
738 static void	wm_gmii_hv_writereg(device_t, int, int, int);
739 static void	wm_gmii_hv_writereg_locked(device_t, int, int, int);
740 static int	wm_gmii_82580_readreg(device_t, int, int);
741 static void	wm_gmii_82580_writereg(device_t, int, int, int);
742 static int	wm_gmii_gs40g_readreg(device_t, int, int);
743 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
744 static void	wm_gmii_statchg(struct ifnet *);
745 /*
746  * kumeran related (80003, ICH* and PCH*).
747  * These functions are not for accessing MII registers but for accessing
748  * kumeran specific registers.
749  */
750 static int	wm_kmrn_readreg(struct wm_softc *, int);
751 static int	wm_kmrn_readreg_locked(struct wm_softc *, int);
752 static void	wm_kmrn_writereg(struct wm_softc *, int, int);
753 static void	wm_kmrn_writereg_locked(struct wm_softc *, int, int);
754 /* SGMII */
755 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
756 static int	wm_sgmii_readreg(device_t, int, int);
757 static void	wm_sgmii_writereg(device_t, int, int, int);
758 /* TBI related */
759 static void	wm_tbi_mediainit(struct wm_softc *);
760 static int	wm_tbi_mediachange(struct ifnet *);
761 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
762 static int	wm_check_for_link(struct wm_softc *);
763 static void	wm_tbi_tick(struct wm_softc *);
764 /* SERDES related */
765 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
766 static int	wm_serdes_mediachange(struct ifnet *);
767 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
768 static void	wm_serdes_tick(struct wm_softc *);
769 /* SFP related */
770 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
771 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
772 
773 /*
774  * NVM related.
775  * Microwire, SPI (w/wo EERD) and Flash.
776  */
777 /* Misc functions */
778 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
779 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
780 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
781 /* Microwire */
782 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
783 /* SPI */
784 static int	wm_nvm_ready_spi(struct wm_softc *);
785 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
786 /* Using with EERD */
787 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
788 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
789 /* Flash */
790 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
791     unsigned int *);
792 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
793 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
794 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
795 	uint32_t *);
796 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
797 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
798 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
799 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
800 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
801 /* iNVM */
802 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
803 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
804 /* Lock, detecting NVM type, validate checksum and read */
805 static int	wm_nvm_acquire(struct wm_softc *);
806 static void	wm_nvm_release(struct wm_softc *);
807 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
808 static int	wm_nvm_get_flash_presence_i210(struct wm_softc *);
809 static int	wm_nvm_validate_checksum(struct wm_softc *);
810 static void	wm_nvm_version_invm(struct wm_softc *);
811 static void	wm_nvm_version(struct wm_softc *);
812 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
813 
814 /*
815  * Hardware semaphores.
816  * Very complexed...
817  */
818 static int	wm_get_null(struct wm_softc *);
819 static void	wm_put_null(struct wm_softc *);
820 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
821 static void	wm_put_swsm_semaphore(struct wm_softc *);
822 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
823 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
824 static int	wm_get_phy_82575(struct wm_softc *);
825 static void	wm_put_phy_82575(struct wm_softc *);
826 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
827 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
828 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
829 static void	wm_put_swflag_ich8lan(struct wm_softc *);
830 static int	wm_get_nvm_ich8lan(struct wm_softc *);		/* For NVM */
831 static void	wm_put_nvm_ich8lan(struct wm_softc *);
832 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
833 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
834 
835 /*
836  * Management mode and power management related subroutines.
837  * BMC, AMT, suspend/resume and EEE.
838  */
839 #if 0
840 static int	wm_check_mng_mode(struct wm_softc *);
841 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
842 static int	wm_check_mng_mode_82574(struct wm_softc *);
843 static int	wm_check_mng_mode_generic(struct wm_softc *);
844 #endif
845 static int	wm_enable_mng_pass_thru(struct wm_softc *);
846 static bool	wm_phy_resetisblocked(struct wm_softc *);
847 static void	wm_get_hw_control(struct wm_softc *);
848 static void	wm_release_hw_control(struct wm_softc *);
849 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
850 static void	wm_smbustopci(struct wm_softc *);
851 static void	wm_init_manageability(struct wm_softc *);
852 static void	wm_release_manageability(struct wm_softc *);
853 static void	wm_get_wakeup(struct wm_softc *);
854 static void	wm_ulp_disable(struct wm_softc *);
855 static void	wm_enable_phy_wakeup(struct wm_softc *);
856 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
857 static void	wm_enable_wakeup(struct wm_softc *);
858 /* LPLU (Low Power Link Up) */
859 static void	wm_lplu_d0_disable(struct wm_softc *);
860 static void	wm_lplu_d0_disable_pch(struct wm_softc *);
861 /* EEE */
862 static void	wm_set_eee_i350(struct wm_softc *);
863 
864 /*
865  * Workarounds (mainly PHY related).
866  * Basically, PHY's workarounds are in the PHY drivers.
867  */
868 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
869 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
870 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
871 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
872 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
873 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
874 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
875 static void	wm_reset_init_script_82575(struct wm_softc *);
876 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
877 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
878 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
879 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
880 static void	wm_pll_workaround_i210(struct wm_softc *);
881 
882 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
883     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
884 
885 /*
886  * Devices supported by this driver.
887  */
888 static const struct wm_product {
889 	pci_vendor_id_t		wmp_vendor;
890 	pci_product_id_t	wmp_product;
891 	const char		*wmp_name;
892 	wm_chip_type		wmp_type;
893 	uint32_t		wmp_flags;
894 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
895 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
896 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
897 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
898 #define WMP_MEDIATYPE(x)	((x) & 0x03)
899 } wm_products[] = {
900 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
901 	  "Intel i82542 1000BASE-X Ethernet",
902 	  WM_T_82542_2_1,	WMP_F_FIBER },
903 
904 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
905 	  "Intel i82543GC 1000BASE-X Ethernet",
906 	  WM_T_82543,		WMP_F_FIBER },
907 
908 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
909 	  "Intel i82543GC 1000BASE-T Ethernet",
910 	  WM_T_82543,		WMP_F_COPPER },
911 
912 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
913 	  "Intel i82544EI 1000BASE-T Ethernet",
914 	  WM_T_82544,		WMP_F_COPPER },
915 
916 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
917 	  "Intel i82544EI 1000BASE-X Ethernet",
918 	  WM_T_82544,		WMP_F_FIBER },
919 
920 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
921 	  "Intel i82544GC 1000BASE-T Ethernet",
922 	  WM_T_82544,		WMP_F_COPPER },
923 
924 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
925 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
926 	  WM_T_82544,		WMP_F_COPPER },
927 
928 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
929 	  "Intel i82540EM 1000BASE-T Ethernet",
930 	  WM_T_82540,		WMP_F_COPPER },
931 
932 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
933 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
934 	  WM_T_82540,		WMP_F_COPPER },
935 
936 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
937 	  "Intel i82540EP 1000BASE-T Ethernet",
938 	  WM_T_82540,		WMP_F_COPPER },
939 
940 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
941 	  "Intel i82540EP 1000BASE-T Ethernet",
942 	  WM_T_82540,		WMP_F_COPPER },
943 
944 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
945 	  "Intel i82540EP 1000BASE-T Ethernet",
946 	  WM_T_82540,		WMP_F_COPPER },
947 
948 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
949 	  "Intel i82545EM 1000BASE-T Ethernet",
950 	  WM_T_82545,		WMP_F_COPPER },
951 
952 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
953 	  "Intel i82545GM 1000BASE-T Ethernet",
954 	  WM_T_82545_3,		WMP_F_COPPER },
955 
956 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
957 	  "Intel i82545GM 1000BASE-X Ethernet",
958 	  WM_T_82545_3,		WMP_F_FIBER },
959 
960 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
961 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
962 	  WM_T_82545_3,		WMP_F_SERDES },
963 
964 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
965 	  "Intel i82546EB 1000BASE-T Ethernet",
966 	  WM_T_82546,		WMP_F_COPPER },
967 
968 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
969 	  "Intel i82546EB 1000BASE-T Ethernet",
970 	  WM_T_82546,		WMP_F_COPPER },
971 
972 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
973 	  "Intel i82545EM 1000BASE-X Ethernet",
974 	  WM_T_82545,		WMP_F_FIBER },
975 
976 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
977 	  "Intel i82546EB 1000BASE-X Ethernet",
978 	  WM_T_82546,		WMP_F_FIBER },
979 
980 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
981 	  "Intel i82546GB 1000BASE-T Ethernet",
982 	  WM_T_82546_3,		WMP_F_COPPER },
983 
984 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
985 	  "Intel i82546GB 1000BASE-X Ethernet",
986 	  WM_T_82546_3,		WMP_F_FIBER },
987 
988 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
989 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
990 	  WM_T_82546_3,		WMP_F_SERDES },
991 
992 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
993 	  "i82546GB quad-port Gigabit Ethernet",
994 	  WM_T_82546_3,		WMP_F_COPPER },
995 
996 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
997 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
998 	  WM_T_82546_3,		WMP_F_COPPER },
999 
1000 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
1001 	  "Intel PRO/1000MT (82546GB)",
1002 	  WM_T_82546_3,		WMP_F_COPPER },
1003 
1004 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
1005 	  "Intel i82541EI 1000BASE-T Ethernet",
1006 	  WM_T_82541,		WMP_F_COPPER },
1007 
1008 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
1009 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
1010 	  WM_T_82541,		WMP_F_COPPER },
1011 
1012 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
1013 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
1014 	  WM_T_82541,		WMP_F_COPPER },
1015 
1016 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
1017 	  "Intel i82541ER 1000BASE-T Ethernet",
1018 	  WM_T_82541_2,		WMP_F_COPPER },
1019 
1020 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
1021 	  "Intel i82541GI 1000BASE-T Ethernet",
1022 	  WM_T_82541_2,		WMP_F_COPPER },
1023 
1024 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
1025 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
1026 	  WM_T_82541_2,		WMP_F_COPPER },
1027 
1028 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
1029 	  "Intel i82541PI 1000BASE-T Ethernet",
1030 	  WM_T_82541_2,		WMP_F_COPPER },
1031 
1032 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
1033 	  "Intel i82547EI 1000BASE-T Ethernet",
1034 	  WM_T_82547,		WMP_F_COPPER },
1035 
1036 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
1037 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
1038 	  WM_T_82547,		WMP_F_COPPER },
1039 
1040 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
1041 	  "Intel i82547GI 1000BASE-T Ethernet",
1042 	  WM_T_82547_2,		WMP_F_COPPER },
1043 
1044 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
1045 	  "Intel PRO/1000 PT (82571EB)",
1046 	  WM_T_82571,		WMP_F_COPPER },
1047 
1048 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
1049 	  "Intel PRO/1000 PF (82571EB)",
1050 	  WM_T_82571,		WMP_F_FIBER },
1051 
1052 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
1053 	  "Intel PRO/1000 PB (82571EB)",
1054 	  WM_T_82571,		WMP_F_SERDES },
1055 
1056 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
1057 	  "Intel PRO/1000 QT (82571EB)",
1058 	  WM_T_82571,		WMP_F_COPPER },
1059 
1060 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
1061 	  "Intel PRO/1000 PT Quad Port Server Adapter",
1062 	  WM_T_82571,		WMP_F_COPPER, },
1063 
1064 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
1065 	  "Intel Gigabit PT Quad Port Server ExpressModule",
1066 	  WM_T_82571,		WMP_F_COPPER, },
1067 
1068 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
1069 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
1070 	  WM_T_82571,		WMP_F_SERDES, },
1071 
1072 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
1073 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
1074 	  WM_T_82571,		WMP_F_SERDES, },
1075 
1076 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
1077 	  "Intel 82571EB Quad 1000baseX Ethernet",
1078 	  WM_T_82571,		WMP_F_FIBER, },
1079 
1080 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
1081 	  "Intel i82572EI 1000baseT Ethernet",
1082 	  WM_T_82572,		WMP_F_COPPER },
1083 
1084 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
1085 	  "Intel i82572EI 1000baseX Ethernet",
1086 	  WM_T_82572,		WMP_F_FIBER },
1087 
1088 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
1089 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
1090 	  WM_T_82572,		WMP_F_SERDES },
1091 
1092 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
1093 	  "Intel i82572EI 1000baseT Ethernet",
1094 	  WM_T_82572,		WMP_F_COPPER },
1095 
1096 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
1097 	  "Intel i82573E",
1098 	  WM_T_82573,		WMP_F_COPPER },
1099 
1100 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
1101 	  "Intel i82573E IAMT",
1102 	  WM_T_82573,		WMP_F_COPPER },
1103 
1104 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
1105 	  "Intel i82573L Gigabit Ethernet",
1106 	  WM_T_82573,		WMP_F_COPPER },
1107 
1108 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
1109 	  "Intel i82574L",
1110 	  WM_T_82574,		WMP_F_COPPER },
1111 
1112 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
1113 	  "Intel i82574L",
1114 	  WM_T_82574,		WMP_F_COPPER },
1115 
1116 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
1117 	  "Intel i82583V",
1118 	  WM_T_82583,		WMP_F_COPPER },
1119 
1120 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
1121 	  "i80003 dual 1000baseT Ethernet",
1122 	  WM_T_80003,		WMP_F_COPPER },
1123 
1124 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
1125 	  "i80003 dual 1000baseX Ethernet",
1126 	  WM_T_80003,		WMP_F_COPPER },
1127 
1128 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
1129 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
1130 	  WM_T_80003,		WMP_F_SERDES },
1131 
1132 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
1133 	  "Intel i80003 1000baseT Ethernet",
1134 	  WM_T_80003,		WMP_F_COPPER },
1135 
1136 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
1137 	  "Intel i80003 Gigabit Ethernet (SERDES)",
1138 	  WM_T_80003,		WMP_F_SERDES },
1139 
1140 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
1141 	  "Intel i82801H (M_AMT) LAN Controller",
1142 	  WM_T_ICH8,		WMP_F_COPPER },
1143 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
1144 	  "Intel i82801H (AMT) LAN Controller",
1145 	  WM_T_ICH8,		WMP_F_COPPER },
1146 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
1147 	  "Intel i82801H LAN Controller",
1148 	  WM_T_ICH8,		WMP_F_COPPER },
1149 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
1150 	  "Intel i82801H (IFE) 10/100 LAN Controller",
1151 	  WM_T_ICH8,		WMP_F_COPPER },
1152 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
1153 	  "Intel i82801H (M) LAN Controller",
1154 	  WM_T_ICH8,		WMP_F_COPPER },
1155 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
1156 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
1157 	  WM_T_ICH8,		WMP_F_COPPER },
1158 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
1159 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
1160 	  WM_T_ICH8,		WMP_F_COPPER },
1161 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
1162 	  "82567V-3 LAN Controller",
1163 	  WM_T_ICH8,		WMP_F_COPPER },
1164 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
1165 	  "82801I (AMT) LAN Controller",
1166 	  WM_T_ICH9,		WMP_F_COPPER },
1167 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
1168 	  "82801I 10/100 LAN Controller",
1169 	  WM_T_ICH9,		WMP_F_COPPER },
1170 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
1171 	  "82801I (G) 10/100 LAN Controller",
1172 	  WM_T_ICH9,		WMP_F_COPPER },
1173 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
1174 	  "82801I (GT) 10/100 LAN Controller",
1175 	  WM_T_ICH9,		WMP_F_COPPER },
1176 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
1177 	  "82801I (C) LAN Controller",
1178 	  WM_T_ICH9,		WMP_F_COPPER },
1179 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
1180 	  "82801I mobile LAN Controller",
1181 	  WM_T_ICH9,		WMP_F_COPPER },
1182 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
1183 	  "82801I mobile (V) LAN Controller",
1184 	  WM_T_ICH9,		WMP_F_COPPER },
1185 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
1186 	  "82801I mobile (AMT) LAN Controller",
1187 	  WM_T_ICH9,		WMP_F_COPPER },
1188 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
1189 	  "82567LM-4 LAN Controller",
1190 	  WM_T_ICH9,		WMP_F_COPPER },
1191 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
1192 	  "82567LM-2 LAN Controller",
1193 	  WM_T_ICH10,		WMP_F_COPPER },
1194 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
1195 	  "82567LF-2 LAN Controller",
1196 	  WM_T_ICH10,		WMP_F_COPPER },
1197 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
1198 	  "82567LM-3 LAN Controller",
1199 	  WM_T_ICH10,		WMP_F_COPPER },
1200 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
1201 	  "82567LF-3 LAN Controller",
1202 	  WM_T_ICH10,		WMP_F_COPPER },
1203 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
1204 	  "82567V-2 LAN Controller",
1205 	  WM_T_ICH10,		WMP_F_COPPER },
1206 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
1207 	  "82567V-3? LAN Controller",
1208 	  WM_T_ICH10,		WMP_F_COPPER },
1209 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
1210 	  "HANKSVILLE LAN Controller",
1211 	  WM_T_ICH10,		WMP_F_COPPER },
1212 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
1213 	  "PCH LAN (82577LM) Controller",
1214 	  WM_T_PCH,		WMP_F_COPPER },
1215 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
1216 	  "PCH LAN (82577LC) Controller",
1217 	  WM_T_PCH,		WMP_F_COPPER },
1218 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
1219 	  "PCH LAN (82578DM) Controller",
1220 	  WM_T_PCH,		WMP_F_COPPER },
1221 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
1222 	  "PCH LAN (82578DC) Controller",
1223 	  WM_T_PCH,		WMP_F_COPPER },
1224 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
1225 	  "PCH2 LAN (82579LM) Controller",
1226 	  WM_T_PCH2,		WMP_F_COPPER },
1227 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
1228 	  "PCH2 LAN (82579V) Controller",
1229 	  WM_T_PCH2,		WMP_F_COPPER },
1230 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
1231 	  "82575EB dual-1000baseT Ethernet",
1232 	  WM_T_82575,		WMP_F_COPPER },
1233 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
1234 	  "82575EB dual-1000baseX Ethernet (SERDES)",
1235 	  WM_T_82575,		WMP_F_SERDES },
1236 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
1237 	  "82575GB quad-1000baseT Ethernet",
1238 	  WM_T_82575,		WMP_F_COPPER },
1239 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
1240 	  "82575GB quad-1000baseT Ethernet (PM)",
1241 	  WM_T_82575,		WMP_F_COPPER },
1242 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
1243 	  "82576 1000BaseT Ethernet",
1244 	  WM_T_82576,		WMP_F_COPPER },
1245 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
1246 	  "82576 1000BaseX Ethernet",
1247 	  WM_T_82576,		WMP_F_FIBER },
1248 
1249 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
1250 	  "82576 gigabit Ethernet (SERDES)",
1251 	  WM_T_82576,		WMP_F_SERDES },
1252 
1253 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
1254 	  "82576 quad-1000BaseT Ethernet",
1255 	  WM_T_82576,		WMP_F_COPPER },
1256 
1257 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
1258 	  "82576 Gigabit ET2 Quad Port Server Adapter",
1259 	  WM_T_82576,		WMP_F_COPPER },
1260 
1261 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
1262 	  "82576 gigabit Ethernet",
1263 	  WM_T_82576,		WMP_F_COPPER },
1264 
1265 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
1266 	  "82576 gigabit Ethernet (SERDES)",
1267 	  WM_T_82576,		WMP_F_SERDES },
1268 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
1269 	  "82576 quad-gigabit Ethernet (SERDES)",
1270 	  WM_T_82576,		WMP_F_SERDES },
1271 
1272 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
1273 	  "82580 1000BaseT Ethernet",
1274 	  WM_T_82580,		WMP_F_COPPER },
1275 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
1276 	  "82580 1000BaseX Ethernet",
1277 	  WM_T_82580,		WMP_F_FIBER },
1278 
1279 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
1280 	  "82580 1000BaseT Ethernet (SERDES)",
1281 	  WM_T_82580,		WMP_F_SERDES },
1282 
1283 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
1284 	  "82580 gigabit Ethernet (SGMII)",
1285 	  WM_T_82580,		WMP_F_COPPER },
1286 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
1287 	  "82580 dual-1000BaseT Ethernet",
1288 	  WM_T_82580,		WMP_F_COPPER },
1289 
1290 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1291 	  "82580 quad-1000BaseX Ethernet",
1292 	  WM_T_82580,		WMP_F_FIBER },
1293 
1294 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
1295 	  "DH89XXCC Gigabit Ethernet (SGMII)",
1296 	  WM_T_82580,		WMP_F_COPPER },
1297 
1298 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
1299 	  "DH89XXCC Gigabit Ethernet (SERDES)",
1300 	  WM_T_82580,		WMP_F_SERDES },
1301 
1302 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
1303 	  "DH89XXCC 1000BASE-KX Ethernet",
1304 	  WM_T_82580,		WMP_F_SERDES },
1305 
1306 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
1307 	  "DH89XXCC Gigabit Ethernet (SFP)",
1308 	  WM_T_82580,		WMP_F_SERDES },
1309 
1310 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
1311 	  "I350 Gigabit Network Connection",
1312 	  WM_T_I350,		WMP_F_COPPER },
1313 
1314 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
1315 	  "I350 Gigabit Fiber Network Connection",
1316 	  WM_T_I350,		WMP_F_FIBER },
1317 
1318 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
1319 	  "I350 Gigabit Backplane Connection",
1320 	  WM_T_I350,		WMP_F_SERDES },
1321 
1322 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
1323 	  "I350 Quad Port Gigabit Ethernet",
1324 	  WM_T_I350,		WMP_F_SERDES },
1325 
1326 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
1327 	  "I350 Gigabit Connection",
1328 	  WM_T_I350,		WMP_F_COPPER },
1329 
1330 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
1331 	  "I354 Gigabit Ethernet (KX)",
1332 	  WM_T_I354,		WMP_F_SERDES },
1333 
1334 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
1335 	  "I354 Gigabit Ethernet (SGMII)",
1336 	  WM_T_I354,		WMP_F_COPPER },
1337 
1338 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
1339 	  "I354 Gigabit Ethernet (2.5G)",
1340 	  WM_T_I354,		WMP_F_COPPER },
1341 
1342 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
1343 	  "I210-T1 Ethernet Server Adapter",
1344 	  WM_T_I210,		WMP_F_COPPER },
1345 
1346 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
1347 	  "I210 Ethernet (Copper OEM)",
1348 	  WM_T_I210,		WMP_F_COPPER },
1349 
1350 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
1351 	  "I210 Ethernet (Copper IT)",
1352 	  WM_T_I210,		WMP_F_COPPER },
1353 
1354 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
1355 	  "I210 Ethernet (FLASH less)",
1356 	  WM_T_I210,		WMP_F_COPPER },
1357 
1358 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
1359 	  "I210 Gigabit Ethernet (Fiber)",
1360 	  WM_T_I210,		WMP_F_FIBER },
1361 
1362 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
1363 	  "I210 Gigabit Ethernet (SERDES)",
1364 	  WM_T_I210,		WMP_F_SERDES },
1365 
1366 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
1367 	  "I210 Gigabit Ethernet (FLASH less)",
1368 	  WM_T_I210,		WMP_F_SERDES },
1369 
1370 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
1371 	  "I210 Gigabit Ethernet (SGMII)",
1372 	  WM_T_I210,		WMP_F_COPPER },
1373 
1374 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
1375 	  "I211 Ethernet (COPPER)",
1376 	  WM_T_I211,		WMP_F_COPPER },
1377 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
1378 	  "I217 V Ethernet Connection",
1379 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1380 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
1381 	  "I217 LM Ethernet Connection",
1382 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1383 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
1384 	  "I218 V Ethernet Connection",
1385 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1386 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
1387 	  "I218 V Ethernet Connection",
1388 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1389 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
1390 	  "I218 V Ethernet Connection",
1391 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1392 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
1393 	  "I218 LM Ethernet Connection",
1394 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1395 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
1396 	  "I218 LM Ethernet Connection",
1397 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1398 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
1399 	  "I218 LM Ethernet Connection",
1400 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1401 #if 0
1402 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
1403 	  "I219 V Ethernet Connection",
1404 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1405 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
1406 	  "I219 V Ethernet Connection",
1407 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1408 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
1409 	  "I219 V Ethernet Connection",
1410 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1411 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
1412 	  "I219 V Ethernet Connection",
1413 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1414 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
1415 	  "I219 LM Ethernet Connection",
1416 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1417 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
1418 	  "I219 LM Ethernet Connection",
1419 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1420 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
1421 	  "I219 LM Ethernet Connection",
1422 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1423 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
1424 	  "I219 LM Ethernet Connection",
1425 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1426 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
1427 	  "I219 LM Ethernet Connection",
1428 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1429 #endif
1430 	{ 0,			0,
1431 	  NULL,
1432 	  0,			0 },
1433 };
1434 
1435 /*
1436  * Register read/write functions.
1437  * Other than CSR_{READ|WRITE}().
1438  */
1439 
1440 #if 0 /* Not currently used */
1441 static inline uint32_t
1442 wm_io_read(struct wm_softc *sc, int reg)
1443 {
1444 
1445 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1446 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1447 }
1448 #endif
1449 
1450 static inline void
1451 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1452 {
1453 
1454 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1455 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1456 }
1457 
1458 static inline void
1459 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1460     uint32_t data)
1461 {
1462 	uint32_t regval;
1463 	int i;
1464 
1465 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1466 
1467 	CSR_WRITE(sc, reg, regval);
1468 
1469 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1470 		delay(5);
1471 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1472 			break;
1473 	}
1474 	if (i == SCTL_CTL_POLL_TIMEOUT) {
1475 		aprint_error("%s: WARNING:"
1476 		    " i82575 reg 0x%08x setup did not indicate ready\n",
1477 		    device_xname(sc->sc_dev), reg);
1478 	}
1479 }
1480 
1481 static inline void
1482 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1483 {
1484 	wa->wa_low = htole32(v & 0xffffffffU);
1485 	if (sizeof(bus_addr_t) == 8)
1486 		wa->wa_high = htole32((uint64_t) v >> 32);
1487 	else
1488 		wa->wa_high = 0;
1489 }
1490 
1491 /*
1492  * Descriptor sync/init functions.
1493  */
1494 static inline void
1495 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
1496 {
1497 	struct wm_softc *sc = txq->txq_sc;
1498 
1499 	/* If it will wrap around, sync to the end of the ring. */
1500 	if ((start + num) > WM_NTXDESC(txq)) {
1501 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1502 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
1503 		    (WM_NTXDESC(txq) - start), ops);
1504 		num -= (WM_NTXDESC(txq) - start);
1505 		start = 0;
1506 	}
1507 
1508 	/* Now sync whatever is left. */
1509 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1510 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
1511 }
1512 
1513 static inline void
1514 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
1515 {
1516 	struct wm_softc *sc = rxq->rxq_sc;
1517 
1518 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
1519 	    WM_CDRXOFF(start), sizeof(wiseman_rxdesc_t), ops);
1520 }
1521 
1522 static inline void
1523 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
1524 {
1525 	struct wm_softc *sc = rxq->rxq_sc;
1526 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
1527 	wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
1528 	struct mbuf *m = rxs->rxs_mbuf;
1529 
1530 	/*
1531 	 * Note: We scoot the packet forward 2 bytes in the buffer
1532 	 * so that the payload after the Ethernet header is aligned
1533 	 * to a 4-byte boundary.
1534 
1535 	 * XXX BRAINDAMAGE ALERT!
1536 	 * The stupid chip uses the same size for every buffer, which
1537 	 * is set in the Receive Control register.  We are using the 2K
1538 	 * size option, but what we REALLY want is (2K - 2)!  For this
1539 	 * reason, we can't "scoot" packets longer than the standard
1540 	 * Ethernet MTU.  On strict-alignment platforms, if the total
1541 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
1542 	 * the upper layer copy the headers.
1543 	 */
1544 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
1545 
1546 	wm_set_dma_addr(&rxd->wrx_addr,
1547 	    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1548 	rxd->wrx_len = 0;
1549 	rxd->wrx_cksum = 0;
1550 	rxd->wrx_status = 0;
1551 	rxd->wrx_errors = 0;
1552 	rxd->wrx_special = 0;
1553 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1554 
1555 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
1556 }
1557 
1558 /*
1559  * Device driver interface functions and commonly used functions.
1560  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
1561  */
1562 
1563 /* Lookup supported device table */
1564 static const struct wm_product *
1565 wm_lookup(const struct pci_attach_args *pa)
1566 {
1567 	const struct wm_product *wmp;
1568 
1569 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1570 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1571 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1572 			return wmp;
1573 	}
1574 	return NULL;
1575 }
1576 
1577 /* The match function (ca_match) */
1578 static int
1579 wm_match(device_t parent, cfdata_t cf, void *aux)
1580 {
1581 	struct pci_attach_args *pa = aux;
1582 
1583 	if (wm_lookup(pa) != NULL)
1584 		return 1;
1585 
1586 	return 0;
1587 }
1588 
1589 /* The attach function (ca_attach) */
1590 static void
1591 wm_attach(device_t parent, device_t self, void *aux)
1592 {
1593 	struct wm_softc *sc = device_private(self);
1594 	struct pci_attach_args *pa = aux;
1595 	prop_dictionary_t dict;
1596 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1597 	pci_chipset_tag_t pc = pa->pa_pc;
1598 	int counts[PCI_INTR_TYPE_SIZE];
1599 	pci_intr_type_t max_type;
1600 	const char *eetype, *xname;
1601 	bus_space_tag_t memt;
1602 	bus_space_handle_t memh;
1603 	bus_size_t memsize;
1604 	int memh_valid;
1605 	int i, error;
1606 	const struct wm_product *wmp;
1607 	prop_data_t ea;
1608 	prop_number_t pn;
1609 	uint8_t enaddr[ETHER_ADDR_LEN];
1610 	uint16_t cfg1, cfg2, swdpin, nvmword;
1611 	pcireg_t preg, memtype;
1612 	uint16_t eeprom_data, apme_mask;
1613 	bool force_clear_smbi;
1614 	uint32_t link_mode;
1615 	uint32_t reg;
1616 	void (*deferred_start_func)(struct ifnet *) = NULL;
1617 
1618 	sc->sc_dev = self;
1619 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
1620 	sc->sc_core_stopping = false;
1621 
1622 	wmp = wm_lookup(pa);
1623 #ifdef DIAGNOSTIC
1624 	if (wmp == NULL) {
1625 		printf("\n");
1626 		panic("wm_attach: impossible");
1627 	}
1628 #endif
1629 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
1630 
1631 	sc->sc_pc = pa->pa_pc;
1632 	sc->sc_pcitag = pa->pa_tag;
1633 
1634 	if (pci_dma64_available(pa))
1635 		sc->sc_dmat = pa->pa_dmat64;
1636 	else
1637 		sc->sc_dmat = pa->pa_dmat;
1638 
1639 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
1640 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
1641 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
1642 
1643 	sc->sc_type = wmp->wmp_type;
1644 
1645 	/* Set default function pointers */
1646 	sc->phy.acquire = wm_get_null;
1647 	sc->phy.release = wm_put_null;
1648 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
1649 
1650 	if (sc->sc_type < WM_T_82543) {
1651 		if (sc->sc_rev < 2) {
1652 			aprint_error_dev(sc->sc_dev,
1653 			    "i82542 must be at least rev. 2\n");
1654 			return;
1655 		}
1656 		if (sc->sc_rev < 3)
1657 			sc->sc_type = WM_T_82542_2_0;
1658 	}
1659 
1660 	/*
1661 	 * Disable MSI for Errata:
1662 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
1663 	 *
1664 	 *  82544: Errata 25
1665 	 *  82540: Errata  6 (easy to reproduce device timeout)
1666 	 *  82545: Errata  4 (easy to reproduce device timeout)
1667 	 *  82546: Errata 26 (easy to reproduce device timeout)
1668 	 *  82541: Errata  7 (easy to reproduce device timeout)
1669 	 *
1670 	 * "Byte Enables 2 and 3 are not set on MSI writes"
1671 	 *
1672 	 *  82571 & 82572: Errata 63
1673 	 */
1674 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
1675 	    || (sc->sc_type == WM_T_82572))
1676 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
1677 
1678 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1679 	    || (sc->sc_type == WM_T_82580)
1680 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
1681 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
1682 		sc->sc_flags |= WM_F_NEWQUEUE;
1683 
1684 	/* Set device properties (mactype) */
1685 	dict = device_properties(sc->sc_dev);
1686 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1687 
1688 	/*
1689 	 * Map the device.  All devices support memory-mapped acccess,
1690 	 * and it is really required for normal operation.
1691 	 */
1692 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1693 	switch (memtype) {
1694 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1695 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1696 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1697 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1698 		break;
1699 	default:
1700 		memh_valid = 0;
1701 		break;
1702 	}
1703 
1704 	if (memh_valid) {
1705 		sc->sc_st = memt;
1706 		sc->sc_sh = memh;
1707 		sc->sc_ss = memsize;
1708 	} else {
1709 		aprint_error_dev(sc->sc_dev,
1710 		    "unable to map device registers\n");
1711 		return;
1712 	}
1713 
1714 	/*
1715 	 * In addition, i82544 and later support I/O mapped indirect
1716 	 * register access.  It is not desirable (nor supported in
1717 	 * this driver) to use it for normal operation, though it is
1718 	 * required to work around bugs in some chip versions.
1719 	 */
1720 	if (sc->sc_type >= WM_T_82544) {
1721 		/* First we have to find the I/O BAR. */
1722 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1723 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
1724 			if (memtype == PCI_MAPREG_TYPE_IO)
1725 				break;
1726 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
1727 			    PCI_MAPREG_MEM_TYPE_64BIT)
1728 				i += 4;	/* skip high bits, too */
1729 		}
1730 		if (i < PCI_MAPREG_END) {
1731 			/*
1732 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
1733 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
1734 			 * It's no problem because newer chips has no this
1735 			 * bug.
1736 			 *
1737 			 * The i8254x doesn't apparently respond when the
1738 			 * I/O BAR is 0, which looks somewhat like it's not
1739 			 * been configured.
1740 			 */
1741 			preg = pci_conf_read(pc, pa->pa_tag, i);
1742 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1743 				aprint_error_dev(sc->sc_dev,
1744 				    "WARNING: I/O BAR at zero.\n");
1745 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1746 					0, &sc->sc_iot, &sc->sc_ioh,
1747 					NULL, &sc->sc_ios) == 0) {
1748 				sc->sc_flags |= WM_F_IOH_VALID;
1749 			} else {
1750 				aprint_error_dev(sc->sc_dev,
1751 				    "WARNING: unable to map I/O space\n");
1752 			}
1753 		}
1754 
1755 	}
1756 
1757 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
1758 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1759 	preg |= PCI_COMMAND_MASTER_ENABLE;
1760 	if (sc->sc_type < WM_T_82542_2_1)
1761 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1762 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1763 
1764 	/* power up chip */
1765 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1766 	    NULL)) && error != EOPNOTSUPP) {
1767 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1768 		return;
1769 	}
1770 
1771 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
1772 
1773 	/* Allocation settings */
1774 	max_type = PCI_INTR_TYPE_MSIX;
1775 	counts[PCI_INTR_TYPE_MSIX] = sc->sc_nqueues + 1;
1776 	counts[PCI_INTR_TYPE_MSI] = 1;
1777 	counts[PCI_INTR_TYPE_INTX] = 1;
1778 
1779 alloc_retry:
1780 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
1781 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
1782 		return;
1783 	}
1784 
1785 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
1786 		error = wm_setup_msix(sc);
1787 		if (error) {
1788 			pci_intr_release(pc, sc->sc_intrs,
1789 			    counts[PCI_INTR_TYPE_MSIX]);
1790 
1791 			/* Setup for MSI: Disable MSI-X */
1792 			max_type = PCI_INTR_TYPE_MSI;
1793 			counts[PCI_INTR_TYPE_MSI] = 1;
1794 			counts[PCI_INTR_TYPE_INTX] = 1;
1795 			goto alloc_retry;
1796 		}
1797 	} else 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
1798 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
1799 		error = wm_setup_legacy(sc);
1800 		if (error) {
1801 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
1802 			    counts[PCI_INTR_TYPE_MSI]);
1803 
1804 			/* The next try is for INTx: Disable MSI */
1805 			max_type = PCI_INTR_TYPE_INTX;
1806 			counts[PCI_INTR_TYPE_INTX] = 1;
1807 			goto alloc_retry;
1808 		}
1809 	} else {
1810 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
1811 		error = wm_setup_legacy(sc);
1812 		if (error) {
1813 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
1814 			    counts[PCI_INTR_TYPE_INTX]);
1815 			return;
1816 		}
1817 	}
1818 
1819 	/*
1820 	 * Check the function ID (unit number of the chip).
1821 	 */
1822 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
1823 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
1824 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1825 	    || (sc->sc_type == WM_T_82580)
1826 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
1827 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
1828 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
1829 	else
1830 		sc->sc_funcid = 0;
1831 
1832 	/*
1833 	 * Determine a few things about the bus we're connected to.
1834 	 */
1835 	if (sc->sc_type < WM_T_82543) {
1836 		/* We don't really know the bus characteristics here. */
1837 		sc->sc_bus_speed = 33;
1838 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1839 		/*
1840 		 * CSA (Communication Streaming Architecture) is about as fast
1841 		 * a 32-bit 66MHz PCI Bus.
1842 		 */
1843 		sc->sc_flags |= WM_F_CSA;
1844 		sc->sc_bus_speed = 66;
1845 		aprint_verbose_dev(sc->sc_dev,
1846 		    "Communication Streaming Architecture\n");
1847 		if (sc->sc_type == WM_T_82547) {
1848 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
1849 			callout_setfunc(&sc->sc_txfifo_ch,
1850 					wm_82547_txfifo_stall, sc);
1851 			aprint_verbose_dev(sc->sc_dev,
1852 			    "using 82547 Tx FIFO stall work-around\n");
1853 		}
1854 	} else if (sc->sc_type >= WM_T_82571) {
1855 		sc->sc_flags |= WM_F_PCIE;
1856 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1857 		    && (sc->sc_type != WM_T_ICH10)
1858 		    && (sc->sc_type != WM_T_PCH)
1859 		    && (sc->sc_type != WM_T_PCH2)
1860 		    && (sc->sc_type != WM_T_PCH_LPT)
1861 		    && (sc->sc_type != WM_T_PCH_SPT)) {
1862 			/* ICH* and PCH* have no PCIe capability registers */
1863 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1864 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
1865 				NULL) == 0)
1866 				aprint_error_dev(sc->sc_dev,
1867 				    "unable to find PCIe capability\n");
1868 		}
1869 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1870 	} else {
1871 		reg = CSR_READ(sc, WMREG_STATUS);
1872 		if (reg & STATUS_BUS64)
1873 			sc->sc_flags |= WM_F_BUS64;
1874 		if ((reg & STATUS_PCIX_MODE) != 0) {
1875 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1876 
1877 			sc->sc_flags |= WM_F_PCIX;
1878 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1879 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
1880 				aprint_error_dev(sc->sc_dev,
1881 				    "unable to find PCIX capability\n");
1882 			else if (sc->sc_type != WM_T_82545_3 &&
1883 				 sc->sc_type != WM_T_82546_3) {
1884 				/*
1885 				 * Work around a problem caused by the BIOS
1886 				 * setting the max memory read byte count
1887 				 * incorrectly.
1888 				 */
1889 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1890 				    sc->sc_pcixe_capoff + PCIX_CMD);
1891 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1892 				    sc->sc_pcixe_capoff + PCIX_STATUS);
1893 
1894 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
1895 				    PCIX_CMD_BYTECNT_SHIFT;
1896 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
1897 				    PCIX_STATUS_MAXB_SHIFT;
1898 				if (bytecnt > maxb) {
1899 					aprint_verbose_dev(sc->sc_dev,
1900 					    "resetting PCI-X MMRBC: %d -> %d\n",
1901 					    512 << bytecnt, 512 << maxb);
1902 					pcix_cmd = (pcix_cmd &
1903 					    ~PCIX_CMD_BYTECNT_MASK) |
1904 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
1905 					pci_conf_write(pa->pa_pc, pa->pa_tag,
1906 					    sc->sc_pcixe_capoff + PCIX_CMD,
1907 					    pcix_cmd);
1908 				}
1909 			}
1910 		}
1911 		/*
1912 		 * The quad port adapter is special; it has a PCIX-PCIX
1913 		 * bridge on the board, and can run the secondary bus at
1914 		 * a higher speed.
1915 		 */
1916 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1917 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1918 								      : 66;
1919 		} else if (sc->sc_flags & WM_F_PCIX) {
1920 			switch (reg & STATUS_PCIXSPD_MASK) {
1921 			case STATUS_PCIXSPD_50_66:
1922 				sc->sc_bus_speed = 66;
1923 				break;
1924 			case STATUS_PCIXSPD_66_100:
1925 				sc->sc_bus_speed = 100;
1926 				break;
1927 			case STATUS_PCIXSPD_100_133:
1928 				sc->sc_bus_speed = 133;
1929 				break;
1930 			default:
1931 				aprint_error_dev(sc->sc_dev,
1932 				    "unknown PCIXSPD %d; assuming 66MHz\n",
1933 				    reg & STATUS_PCIXSPD_MASK);
1934 				sc->sc_bus_speed = 66;
1935 				break;
1936 			}
1937 		} else
1938 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1939 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1940 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1941 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1942 	}
1943 
1944 	/* clear interesting stat counters */
1945 	CSR_READ(sc, WMREG_COLC);
1946 	CSR_READ(sc, WMREG_RXERRC);
1947 
1948 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
1949 	    || (sc->sc_type >= WM_T_ICH8))
1950 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
1951 	if (sc->sc_type >= WM_T_ICH8)
1952 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
1953 
1954 	/* Set PHY, NVM mutex related stuff */
1955 	switch (sc->sc_type) {
1956 	case WM_T_82542_2_0:
1957 	case WM_T_82542_2_1:
1958 	case WM_T_82543:
1959 	case WM_T_82544:
1960 		/* Microwire */
1961 		sc->sc_nvm_wordsize = 64;
1962 		sc->sc_nvm_addrbits = 6;
1963 		break;
1964 	case WM_T_82540:
1965 	case WM_T_82545:
1966 	case WM_T_82545_3:
1967 	case WM_T_82546:
1968 	case WM_T_82546_3:
1969 		/* Microwire */
1970 		reg = CSR_READ(sc, WMREG_EECD);
1971 		if (reg & EECD_EE_SIZE) {
1972 			sc->sc_nvm_wordsize = 256;
1973 			sc->sc_nvm_addrbits = 8;
1974 		} else {
1975 			sc->sc_nvm_wordsize = 64;
1976 			sc->sc_nvm_addrbits = 6;
1977 		}
1978 		sc->sc_flags |= WM_F_LOCK_EECD;
1979 		break;
1980 	case WM_T_82541:
1981 	case WM_T_82541_2:
1982 	case WM_T_82547:
1983 	case WM_T_82547_2:
1984 		sc->sc_flags |= WM_F_LOCK_EECD;
1985 		reg = CSR_READ(sc, WMREG_EECD);
1986 		if (reg & EECD_EE_TYPE) {
1987 			/* SPI */
1988 			sc->sc_flags |= WM_F_EEPROM_SPI;
1989 			wm_nvm_set_addrbits_size_eecd(sc);
1990 		} else {
1991 			/* Microwire */
1992 			if ((reg & EECD_EE_ABITS) != 0) {
1993 				sc->sc_nvm_wordsize = 256;
1994 				sc->sc_nvm_addrbits = 8;
1995 			} else {
1996 				sc->sc_nvm_wordsize = 64;
1997 				sc->sc_nvm_addrbits = 6;
1998 			}
1999 		}
2000 		break;
2001 	case WM_T_82571:
2002 	case WM_T_82572:
2003 		/* SPI */
2004 		sc->sc_flags |= WM_F_EEPROM_SPI;
2005 		wm_nvm_set_addrbits_size_eecd(sc);
2006 		sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
2007 		sc->phy.acquire = wm_get_swsm_semaphore;
2008 		sc->phy.release = wm_put_swsm_semaphore;
2009 		break;
2010 	case WM_T_82573:
2011 	case WM_T_82574:
2012 	case WM_T_82583:
2013 		if (sc->sc_type == WM_T_82573) {
2014 			sc->sc_flags |= WM_F_LOCK_SWSM;
2015 			sc->phy.acquire = wm_get_swsm_semaphore;
2016 			sc->phy.release = wm_put_swsm_semaphore;
2017 		} else {
2018 			sc->sc_flags |= WM_F_LOCK_EXTCNF;
2019 			/* Both PHY and NVM use the same semaphore. */
2020 			sc->phy.acquire
2021 			    = wm_get_swfwhw_semaphore;
2022 			sc->phy.release
2023 			    = wm_put_swfwhw_semaphore;
2024 		}
2025 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
2026 			sc->sc_flags |= WM_F_EEPROM_FLASH;
2027 			sc->sc_nvm_wordsize = 2048;
2028 		} else {
2029 			/* SPI */
2030 			sc->sc_flags |= WM_F_EEPROM_SPI;
2031 			wm_nvm_set_addrbits_size_eecd(sc);
2032 		}
2033 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
2034 		break;
2035 	case WM_T_82575:
2036 	case WM_T_82576:
2037 	case WM_T_82580:
2038 	case WM_T_I350:
2039 	case WM_T_I354:
2040 	case WM_T_80003:
2041 		/* SPI */
2042 		sc->sc_flags |= WM_F_EEPROM_SPI;
2043 		wm_nvm_set_addrbits_size_eecd(sc);
2044 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
2045 		    | WM_F_LOCK_SWSM;
2046 		sc->phy.acquire = wm_get_phy_82575;
2047 		sc->phy.release = wm_put_phy_82575;
2048 		break;
2049 	case WM_T_ICH8:
2050 	case WM_T_ICH9:
2051 	case WM_T_ICH10:
2052 	case WM_T_PCH:
2053 	case WM_T_PCH2:
2054 	case WM_T_PCH_LPT:
2055 		/* FLASH */
2056 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
2057 		sc->sc_nvm_wordsize = 2048;
2058 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
2059 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
2060 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
2061 			aprint_error_dev(sc->sc_dev,
2062 			    "can't map FLASH registers\n");
2063 			goto out;
2064 		}
2065 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
2066 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
2067 		    ICH_FLASH_SECTOR_SIZE;
2068 		sc->sc_ich8_flash_bank_size =
2069 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
2070 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
2071 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
2072 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
2073 		sc->sc_flashreg_offset = 0;
2074 		sc->phy.acquire = wm_get_swflag_ich8lan;
2075 		sc->phy.release = wm_put_swflag_ich8lan;
2076 		break;
2077 	case WM_T_PCH_SPT:
2078 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
2079 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
2080 		sc->sc_flasht = sc->sc_st;
2081 		sc->sc_flashh = sc->sc_sh;
2082 		sc->sc_ich8_flash_base = 0;
2083 		sc->sc_nvm_wordsize =
2084 			(((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
2085 			* NVM_SIZE_MULTIPLIER;
2086 		/* It is size in bytes, we want words */
2087 		sc->sc_nvm_wordsize /= 2;
2088 		/* assume 2 banks */
2089 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
2090 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
2091 		sc->phy.acquire = wm_get_swflag_ich8lan;
2092 		sc->phy.release = wm_put_swflag_ich8lan;
2093 		break;
2094 	case WM_T_I210:
2095 	case WM_T_I211:
2096 		if (wm_nvm_get_flash_presence_i210(sc)) {
2097 			wm_nvm_set_addrbits_size_eecd(sc);
2098 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
2099 			sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
2100 		} else {
2101 			sc->sc_nvm_wordsize = INVM_SIZE;
2102 			sc->sc_flags |= WM_F_EEPROM_INVM;
2103 		}
2104 		sc->sc_flags |= WM_F_LOCK_SWFW | WM_F_LOCK_SWSM;
2105 		sc->phy.acquire = wm_get_phy_82575;
2106 		sc->phy.release = wm_put_phy_82575;
2107 		break;
2108 	default:
2109 		break;
2110 	}
2111 
2112 	/* Reset the chip to a known state. */
2113 	wm_reset(sc);
2114 
2115 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
2116 	switch (sc->sc_type) {
2117 	case WM_T_82571:
2118 	case WM_T_82572:
2119 		reg = CSR_READ(sc, WMREG_SWSM2);
2120 		if ((reg & SWSM2_LOCK) == 0) {
2121 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
2122 			force_clear_smbi = true;
2123 		} else
2124 			force_clear_smbi = false;
2125 		break;
2126 	case WM_T_82573:
2127 	case WM_T_82574:
2128 	case WM_T_82583:
2129 		force_clear_smbi = true;
2130 		break;
2131 	default:
2132 		force_clear_smbi = false;
2133 		break;
2134 	}
2135 	if (force_clear_smbi) {
2136 		reg = CSR_READ(sc, WMREG_SWSM);
2137 		if ((reg & SWSM_SMBI) != 0)
2138 			aprint_error_dev(sc->sc_dev,
2139 			    "Please update the Bootagent\n");
2140 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
2141 	}
2142 
2143 	/*
2144 	 * Defer printing the EEPROM type until after verifying the checksum
2145 	 * This allows the EEPROM type to be printed correctly in the case
2146 	 * that no EEPROM is attached.
2147 	 */
2148 	/*
2149 	 * Validate the EEPROM checksum. If the checksum fails, flag
2150 	 * this for later, so we can fail future reads from the EEPROM.
2151 	 */
2152 	if (wm_nvm_validate_checksum(sc)) {
2153 		/*
2154 		 * Read twice again because some PCI-e parts fail the
2155 		 * first check due to the link being in sleep state.
2156 		 */
2157 		if (wm_nvm_validate_checksum(sc))
2158 			sc->sc_flags |= WM_F_EEPROM_INVALID;
2159 	}
2160 
2161 	/* Set device properties (macflags) */
2162 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
2163 
2164 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
2165 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
2166 	else {
2167 		aprint_verbose_dev(sc->sc_dev, "%u words ",
2168 		    sc->sc_nvm_wordsize);
2169 		if (sc->sc_flags & WM_F_EEPROM_INVM)
2170 			aprint_verbose("iNVM");
2171 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
2172 			aprint_verbose("FLASH(HW)");
2173 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
2174 			aprint_verbose("FLASH");
2175 		else {
2176 			if (sc->sc_flags & WM_F_EEPROM_SPI)
2177 				eetype = "SPI";
2178 			else
2179 				eetype = "MicroWire";
2180 			aprint_verbose("(%d address bits) %s EEPROM",
2181 			    sc->sc_nvm_addrbits, eetype);
2182 		}
2183 	}
2184 	wm_nvm_version(sc);
2185 	aprint_verbose("\n");
2186 
2187 	/* Check for I21[01] PLL workaround */
2188 	if (sc->sc_type == WM_T_I210)
2189 		sc->sc_flags |= WM_F_PLL_WA_I210;
2190 	if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
2191 		/* NVM image release 3.25 has a workaround */
2192 		if ((sc->sc_nvm_ver_major < 3)
2193 		    || ((sc->sc_nvm_ver_major == 3)
2194 			&& (sc->sc_nvm_ver_minor < 25))) {
2195 			aprint_verbose_dev(sc->sc_dev,
2196 			    "ROM image version %d.%d is older than 3.25\n",
2197 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
2198 			sc->sc_flags |= WM_F_PLL_WA_I210;
2199 		}
2200 	}
2201 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
2202 		wm_pll_workaround_i210(sc);
2203 
2204 	wm_get_wakeup(sc);
2205 
2206 	/* Non-AMT based hardware can now take control from firmware */
2207 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
2208 		wm_get_hw_control(sc);
2209 
2210 	/*
2211 	 * Read the Ethernet address from the EEPROM, if not first found
2212 	 * in device properties.
2213 	 */
2214 	ea = prop_dictionary_get(dict, "mac-address");
2215 	if (ea != NULL) {
2216 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
2217 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
2218 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
2219 	} else {
2220 		if (wm_read_mac_addr(sc, enaddr) != 0) {
2221 			aprint_error_dev(sc->sc_dev,
2222 			    "unable to read Ethernet address\n");
2223 			goto out;
2224 		}
2225 	}
2226 
2227 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
2228 	    ether_sprintf(enaddr));
2229 
2230 	/*
2231 	 * Read the config info from the EEPROM, and set up various
2232 	 * bits in the control registers based on their contents.
2233 	 */
2234 	pn = prop_dictionary_get(dict, "i82543-cfg1");
2235 	if (pn != NULL) {
2236 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2237 		cfg1 = (uint16_t) prop_number_integer_value(pn);
2238 	} else {
2239 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
2240 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
2241 			goto out;
2242 		}
2243 	}
2244 
2245 	pn = prop_dictionary_get(dict, "i82543-cfg2");
2246 	if (pn != NULL) {
2247 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2248 		cfg2 = (uint16_t) prop_number_integer_value(pn);
2249 	} else {
2250 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
2251 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
2252 			goto out;
2253 		}
2254 	}
2255 
2256 	/* check for WM_F_WOL */
2257 	switch (sc->sc_type) {
2258 	case WM_T_82542_2_0:
2259 	case WM_T_82542_2_1:
2260 	case WM_T_82543:
2261 		/* dummy? */
2262 		eeprom_data = 0;
2263 		apme_mask = NVM_CFG3_APME;
2264 		break;
2265 	case WM_T_82544:
2266 		apme_mask = NVM_CFG2_82544_APM_EN;
2267 		eeprom_data = cfg2;
2268 		break;
2269 	case WM_T_82546:
2270 	case WM_T_82546_3:
2271 	case WM_T_82571:
2272 	case WM_T_82572:
2273 	case WM_T_82573:
2274 	case WM_T_82574:
2275 	case WM_T_82583:
2276 	case WM_T_80003:
2277 	default:
2278 		apme_mask = NVM_CFG3_APME;
2279 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
2280 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
2281 		break;
2282 	case WM_T_82575:
2283 	case WM_T_82576:
2284 	case WM_T_82580:
2285 	case WM_T_I350:
2286 	case WM_T_I354: /* XXX ok? */
2287 	case WM_T_ICH8:
2288 	case WM_T_ICH9:
2289 	case WM_T_ICH10:
2290 	case WM_T_PCH:
2291 	case WM_T_PCH2:
2292 	case WM_T_PCH_LPT:
2293 	case WM_T_PCH_SPT:
2294 		/* XXX The funcid should be checked on some devices */
2295 		apme_mask = WUC_APME;
2296 		eeprom_data = CSR_READ(sc, WMREG_WUC);
2297 		break;
2298 	}
2299 
2300 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
2301 	if ((eeprom_data & apme_mask) != 0)
2302 		sc->sc_flags |= WM_F_WOL;
2303 #ifdef WM_DEBUG
2304 	if ((sc->sc_flags & WM_F_WOL) != 0)
2305 		printf("WOL\n");
2306 #endif
2307 
2308 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
2309 		/* Check NVM for autonegotiation */
2310 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
2311 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
2312 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
2313 		}
2314 	}
2315 
2316 	/*
2317 	 * XXX need special handling for some multiple port cards
2318 	 * to disable a paticular port.
2319 	 */
2320 
2321 	if (sc->sc_type >= WM_T_82544) {
2322 		pn = prop_dictionary_get(dict, "i82543-swdpin");
2323 		if (pn != NULL) {
2324 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2325 			swdpin = (uint16_t) prop_number_integer_value(pn);
2326 		} else {
2327 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
2328 				aprint_error_dev(sc->sc_dev,
2329 				    "unable to read SWDPIN\n");
2330 				goto out;
2331 			}
2332 		}
2333 	}
2334 
2335 	if (cfg1 & NVM_CFG1_ILOS)
2336 		sc->sc_ctrl |= CTRL_ILOS;
2337 
2338 	/*
2339 	 * XXX
2340 	 * This code isn't correct because pin 2 and 3 are located
2341 	 * in different position on newer chips. Check all datasheet.
2342 	 *
2343 	 * Until resolve this problem, check if a chip < 82580
2344 	 */
2345 	if (sc->sc_type <= WM_T_82580) {
2346 		if (sc->sc_type >= WM_T_82544) {
2347 			sc->sc_ctrl |=
2348 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
2349 			    CTRL_SWDPIO_SHIFT;
2350 			sc->sc_ctrl |=
2351 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
2352 			    CTRL_SWDPINS_SHIFT;
2353 		} else {
2354 			sc->sc_ctrl |=
2355 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
2356 			    CTRL_SWDPIO_SHIFT;
2357 		}
2358 	}
2359 
2360 	/* XXX For other than 82580? */
2361 	if (sc->sc_type == WM_T_82580) {
2362 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
2363 		if (nvmword & __BIT(13))
2364 			sc->sc_ctrl |= CTRL_ILOS;
2365 	}
2366 
2367 #if 0
2368 	if (sc->sc_type >= WM_T_82544) {
2369 		if (cfg1 & NVM_CFG1_IPS0)
2370 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
2371 		if (cfg1 & NVM_CFG1_IPS1)
2372 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
2373 		sc->sc_ctrl_ext |=
2374 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
2375 		    CTRL_EXT_SWDPIO_SHIFT;
2376 		sc->sc_ctrl_ext |=
2377 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
2378 		    CTRL_EXT_SWDPINS_SHIFT;
2379 	} else {
2380 		sc->sc_ctrl_ext |=
2381 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
2382 		    CTRL_EXT_SWDPIO_SHIFT;
2383 	}
2384 #endif
2385 
2386 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2387 #if 0
2388 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2389 #endif
2390 
2391 	if (sc->sc_type == WM_T_PCH) {
2392 		uint16_t val;
2393 
2394 		/* Save the NVM K1 bit setting */
2395 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
2396 
2397 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
2398 			sc->sc_nvm_k1_enabled = 1;
2399 		else
2400 			sc->sc_nvm_k1_enabled = 0;
2401 	}
2402 
2403 	/*
2404 	 * Determine if we're TBI,GMII or SGMII mode, and initialize the
2405 	 * media structures accordingly.
2406 	 */
2407 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
2408 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
2409 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
2410 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_82573
2411 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
2412 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
2413 		wm_gmii_mediainit(sc, wmp->wmp_product);
2414 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
2415 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
2416 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
2417 	    || (sc->sc_type ==WM_T_I211)) {
2418 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
2419 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
2420 		switch (link_mode) {
2421 		case CTRL_EXT_LINK_MODE_1000KX:
2422 			aprint_verbose_dev(sc->sc_dev, "1000KX\n");
2423 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
2424 			break;
2425 		case CTRL_EXT_LINK_MODE_SGMII:
2426 			if (wm_sgmii_uses_mdio(sc)) {
2427 				aprint_verbose_dev(sc->sc_dev,
2428 				    "SGMII(MDIO)\n");
2429 				sc->sc_flags |= WM_F_SGMII;
2430 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2431 				break;
2432 			}
2433 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
2434 			/*FALLTHROUGH*/
2435 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
2436 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
2437 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
2438 				if (link_mode
2439 				    == CTRL_EXT_LINK_MODE_SGMII) {
2440 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2441 					sc->sc_flags |= WM_F_SGMII;
2442 				} else {
2443 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
2444 					aprint_verbose_dev(sc->sc_dev,
2445 					    "SERDES\n");
2446 				}
2447 				break;
2448 			}
2449 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
2450 				aprint_verbose_dev(sc->sc_dev, "SERDES\n");
2451 
2452 			/* Change current link mode setting */
2453 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
2454 			switch (sc->sc_mediatype) {
2455 			case WM_MEDIATYPE_COPPER:
2456 				reg |= CTRL_EXT_LINK_MODE_SGMII;
2457 				break;
2458 			case WM_MEDIATYPE_SERDES:
2459 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
2460 				break;
2461 			default:
2462 				break;
2463 			}
2464 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2465 			break;
2466 		case CTRL_EXT_LINK_MODE_GMII:
2467 		default:
2468 			aprint_verbose_dev(sc->sc_dev, "Copper\n");
2469 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2470 			break;
2471 		}
2472 
2473 		reg &= ~CTRL_EXT_I2C_ENA;
2474 		if ((sc->sc_flags & WM_F_SGMII) != 0)
2475 			reg |= CTRL_EXT_I2C_ENA;
2476 		else
2477 			reg &= ~CTRL_EXT_I2C_ENA;
2478 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2479 
2480 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
2481 			wm_gmii_mediainit(sc, wmp->wmp_product);
2482 		else
2483 			wm_tbi_mediainit(sc);
2484 	} else if (sc->sc_type < WM_T_82543 ||
2485 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
2486 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
2487 			aprint_error_dev(sc->sc_dev,
2488 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
2489 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
2490 		}
2491 		wm_tbi_mediainit(sc);
2492 	} else {
2493 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
2494 			aprint_error_dev(sc->sc_dev,
2495 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
2496 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2497 		}
2498 		wm_gmii_mediainit(sc, wmp->wmp_product);
2499 	}
2500 
2501 	ifp = &sc->sc_ethercom.ec_if;
2502 	xname = device_xname(sc->sc_dev);
2503 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
2504 	ifp->if_softc = sc;
2505 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2506 	ifp->if_extflags = IFEF_START_MPSAFE;
2507 	ifp->if_ioctl = wm_ioctl;
2508 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
2509 		ifp->if_start = wm_nq_start;
2510 		if (sc->sc_nqueues > 1) {
2511 			ifp->if_transmit = wm_nq_transmit;
2512 			deferred_start_func = wm_deferred_start;
2513 		}
2514 	} else {
2515 		ifp->if_start = wm_start;
2516 		if (sc->sc_nqueues > 1) {
2517 			ifp->if_transmit = wm_transmit;
2518 			deferred_start_func = wm_deferred_start;
2519 		}
2520 	}
2521 	ifp->if_watchdog = wm_watchdog;
2522 	ifp->if_init = wm_init;
2523 	ifp->if_stop = wm_stop;
2524 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
2525 	IFQ_SET_READY(&ifp->if_snd);
2526 
2527 	/* Check for jumbo frame */
2528 	switch (sc->sc_type) {
2529 	case WM_T_82573:
2530 		/* XXX limited to 9234 if ASPM is disabled */
2531 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
2532 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
2533 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2534 		break;
2535 	case WM_T_82571:
2536 	case WM_T_82572:
2537 	case WM_T_82574:
2538 	case WM_T_82575:
2539 	case WM_T_82576:
2540 	case WM_T_82580:
2541 	case WM_T_I350:
2542 	case WM_T_I354: /* XXXX ok? */
2543 	case WM_T_I210:
2544 	case WM_T_I211:
2545 	case WM_T_80003:
2546 	case WM_T_ICH9:
2547 	case WM_T_ICH10:
2548 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
2549 	case WM_T_PCH_LPT:
2550 	case WM_T_PCH_SPT:
2551 		/* XXX limited to 9234 */
2552 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2553 		break;
2554 	case WM_T_PCH:
2555 		/* XXX limited to 4096 */
2556 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2557 		break;
2558 	case WM_T_82542_2_0:
2559 	case WM_T_82542_2_1:
2560 	case WM_T_82583:
2561 	case WM_T_ICH8:
2562 		/* No support for jumbo frame */
2563 		break;
2564 	default:
2565 		/* ETHER_MAX_LEN_JUMBO */
2566 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2567 		break;
2568 	}
2569 
2570 	/* If we're a i82543 or greater, we can support VLANs. */
2571 	if (sc->sc_type >= WM_T_82543)
2572 		sc->sc_ethercom.ec_capabilities |=
2573 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
2574 
2575 	/*
2576 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
2577 	 * on i82543 and later.
2578 	 */
2579 	if (sc->sc_type >= WM_T_82543) {
2580 		ifp->if_capabilities |=
2581 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
2582 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
2583 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
2584 		    IFCAP_CSUM_TCPv6_Tx |
2585 		    IFCAP_CSUM_UDPv6_Tx;
2586 	}
2587 
2588 	/*
2589 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
2590 	 *
2591 	 *	82541GI (8086:1076) ... no
2592 	 *	82572EI (8086:10b9) ... yes
2593 	 */
2594 	if (sc->sc_type >= WM_T_82571) {
2595 		ifp->if_capabilities |=
2596 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
2597 	}
2598 
2599 	/*
2600 	 * If we're a i82544 or greater (except i82547), we can do
2601 	 * TCP segmentation offload.
2602 	 */
2603 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
2604 		ifp->if_capabilities |= IFCAP_TSOv4;
2605 	}
2606 
2607 	if (sc->sc_type >= WM_T_82571) {
2608 		ifp->if_capabilities |= IFCAP_TSOv6;
2609 	}
2610 
2611 #ifdef WM_MPSAFE
2612 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2613 #else
2614 	sc->sc_core_lock = NULL;
2615 #endif
2616 
2617 	/* Attach the interface. */
2618 	if_initialize(ifp);
2619 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
2620 	if_deferred_start_init(ifp, deferred_start_func);
2621 	ether_ifattach(ifp, enaddr);
2622 	if_register(ifp);
2623 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
2624 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
2625 			  RND_FLAG_DEFAULT);
2626 
2627 #ifdef WM_EVENT_COUNTERS
2628 	/* Attach event counters. */
2629 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
2630 	    NULL, xname, "linkintr");
2631 
2632 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
2633 	    NULL, xname, "tx_xoff");
2634 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
2635 	    NULL, xname, "tx_xon");
2636 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
2637 	    NULL, xname, "rx_xoff");
2638 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
2639 	    NULL, xname, "rx_xon");
2640 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
2641 	    NULL, xname, "rx_macctl");
2642 #endif /* WM_EVENT_COUNTERS */
2643 
2644 	if (pmf_device_register(self, wm_suspend, wm_resume))
2645 		pmf_class_network_register(self, ifp);
2646 	else
2647 		aprint_error_dev(self, "couldn't establish power handler\n");
2648 
2649 	sc->sc_flags |= WM_F_ATTACHED;
2650  out:
2651 	return;
2652 }
2653 
2654 /* The detach function (ca_detach) */
2655 static int
2656 wm_detach(device_t self, int flags __unused)
2657 {
2658 	struct wm_softc *sc = device_private(self);
2659 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2660 	int i;
2661 
2662 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
2663 		return 0;
2664 
2665 	/* Stop the interface. Callouts are stopped in it. */
2666 	wm_stop(ifp, 1);
2667 
2668 	pmf_device_deregister(self);
2669 
2670 	/* Tell the firmware about the release */
2671 	WM_CORE_LOCK(sc);
2672 	wm_release_manageability(sc);
2673 	wm_release_hw_control(sc);
2674 	wm_enable_wakeup(sc);
2675 	WM_CORE_UNLOCK(sc);
2676 
2677 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2678 
2679 	/* Delete all remaining media. */
2680 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2681 
2682 	ether_ifdetach(ifp);
2683 	if_detach(ifp);
2684 	if_percpuq_destroy(sc->sc_ipq);
2685 
2686 	/* Unload RX dmamaps and free mbufs */
2687 	for (i = 0; i < sc->sc_nqueues; i++) {
2688 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
2689 		mutex_enter(rxq->rxq_lock);
2690 		wm_rxdrain(rxq);
2691 		mutex_exit(rxq->rxq_lock);
2692 	}
2693 	/* Must unlock here */
2694 
2695 	/* Disestablish the interrupt handler */
2696 	for (i = 0; i < sc->sc_nintrs; i++) {
2697 		if (sc->sc_ihs[i] != NULL) {
2698 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
2699 			sc->sc_ihs[i] = NULL;
2700 		}
2701 	}
2702 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
2703 
2704 	wm_free_txrx_queues(sc);
2705 
2706 	/* Unmap the registers */
2707 	if (sc->sc_ss) {
2708 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
2709 		sc->sc_ss = 0;
2710 	}
2711 	if (sc->sc_ios) {
2712 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
2713 		sc->sc_ios = 0;
2714 	}
2715 	if (sc->sc_flashs) {
2716 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
2717 		sc->sc_flashs = 0;
2718 	}
2719 
2720 	if (sc->sc_core_lock)
2721 		mutex_obj_free(sc->sc_core_lock);
2722 	if (sc->sc_ich_phymtx)
2723 		mutex_obj_free(sc->sc_ich_phymtx);
2724 	if (sc->sc_ich_nvmmtx)
2725 		mutex_obj_free(sc->sc_ich_nvmmtx);
2726 
2727 	return 0;
2728 }
2729 
2730 static bool
2731 wm_suspend(device_t self, const pmf_qual_t *qual)
2732 {
2733 	struct wm_softc *sc = device_private(self);
2734 
2735 	wm_release_manageability(sc);
2736 	wm_release_hw_control(sc);
2737 	wm_enable_wakeup(sc);
2738 
2739 	return true;
2740 }
2741 
2742 static bool
2743 wm_resume(device_t self, const pmf_qual_t *qual)
2744 {
2745 	struct wm_softc *sc = device_private(self);
2746 
2747 	wm_init_manageability(sc);
2748 
2749 	return true;
2750 }
2751 
2752 /*
2753  * wm_watchdog:		[ifnet interface function]
2754  *
2755  *	Watchdog timer handler.
2756  */
2757 static void
2758 wm_watchdog(struct ifnet *ifp)
2759 {
2760 	int qid;
2761 	struct wm_softc *sc = ifp->if_softc;
2762 
2763 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
2764 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
2765 
2766 		wm_watchdog_txq(ifp, txq);
2767 	}
2768 
2769 	/* Reset the interface. */
2770 	(void) wm_init(ifp);
2771 
2772 	/*
2773 	 * There are still some upper layer processing which call
2774 	 * ifp->if_start(). e.g. ALTQ
2775 	 */
2776 	/* Try to get more packets going. */
2777 	ifp->if_start(ifp);
2778 }
2779 
2780 static void
2781 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq)
2782 {
2783 	struct wm_softc *sc = ifp->if_softc;
2784 
2785 	/*
2786 	 * Since we're using delayed interrupts, sweep up
2787 	 * before we report an error.
2788 	 */
2789 	mutex_enter(txq->txq_lock);
2790 	wm_txeof(sc, txq);
2791 	mutex_exit(txq->txq_lock);
2792 
2793 	if (txq->txq_free != WM_NTXDESC(txq)) {
2794 #ifdef WM_DEBUG
2795 		int i, j;
2796 		struct wm_txsoft *txs;
2797 #endif
2798 		log(LOG_ERR,
2799 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2800 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
2801 		    txq->txq_next);
2802 		ifp->if_oerrors++;
2803 #ifdef WM_DEBUG
2804 		for (i = txq->txq_sdirty; i != txq->txq_snext ;
2805 		    i = WM_NEXTTXS(txq, i)) {
2806 		    txs = &txq->txq_soft[i];
2807 		    printf("txs %d tx %d -> %d\n",
2808 			i, txs->txs_firstdesc, txs->txs_lastdesc);
2809 		    for (j = txs->txs_firstdesc; ;
2810 			j = WM_NEXTTX(txq, j)) {
2811 			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
2812 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
2813 			printf("\t %#08x%08x\n",
2814 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
2815 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
2816 			if (j == txs->txs_lastdesc)
2817 				break;
2818 			}
2819 		}
2820 #endif
2821 	}
2822 }
2823 
2824 /*
2825  * wm_tick:
2826  *
2827  *	One second timer, used to check link status, sweep up
2828  *	completed transmit jobs, etc.
2829  */
2830 static void
2831 wm_tick(void *arg)
2832 {
2833 	struct wm_softc *sc = arg;
2834 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2835 #ifndef WM_MPSAFE
2836 	int s = splnet();
2837 #endif
2838 
2839 	WM_CORE_LOCK(sc);
2840 
2841 	if (sc->sc_core_stopping)
2842 		goto out;
2843 
2844 	if (sc->sc_type >= WM_T_82542_2_1) {
2845 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
2846 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
2847 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
2848 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
2849 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
2850 	}
2851 
2852 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
2853 	ifp->if_ierrors += 0ULL + /* ensure quad_t */
2854 	    + CSR_READ(sc, WMREG_CRCERRS)
2855 	    + CSR_READ(sc, WMREG_ALGNERRC)
2856 	    + CSR_READ(sc, WMREG_SYMERRC)
2857 	    + CSR_READ(sc, WMREG_RXERRC)
2858 	    + CSR_READ(sc, WMREG_SEC)
2859 	    + CSR_READ(sc, WMREG_CEXTERR)
2860 	    + CSR_READ(sc, WMREG_RLEC);
2861 	/*
2862 	 * WMREG_RNBC is incremented when there is no available buffers in host
2863 	 * memory. It does not mean the number of dropped packet. Because
2864 	 * ethernet controller can receive packets in such case if there is
2865 	 * space in phy's FIFO.
2866 	 *
2867 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
2868 	 * own EVCNT instead of if_iqdrops.
2869 	 */
2870 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
2871 
2872 	if (sc->sc_flags & WM_F_HAS_MII)
2873 		mii_tick(&sc->sc_mii);
2874 	else if ((sc->sc_type >= WM_T_82575)
2875 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
2876 		wm_serdes_tick(sc);
2877 	else
2878 		wm_tbi_tick(sc);
2879 
2880 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2881 out:
2882 	WM_CORE_UNLOCK(sc);
2883 #ifndef WM_MPSAFE
2884 	splx(s);
2885 #endif
2886 }
2887 
2888 static int
2889 wm_ifflags_cb(struct ethercom *ec)
2890 {
2891 	struct ifnet *ifp = &ec->ec_if;
2892 	struct wm_softc *sc = ifp->if_softc;
2893 	int rc = 0;
2894 
2895 	WM_CORE_LOCK(sc);
2896 
2897 	int change = ifp->if_flags ^ sc->sc_if_flags;
2898 	sc->sc_if_flags = ifp->if_flags;
2899 
2900 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
2901 		rc = ENETRESET;
2902 		goto out;
2903 	}
2904 
2905 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
2906 		wm_set_filter(sc);
2907 
2908 	wm_set_vlan(sc);
2909 
2910 out:
2911 	WM_CORE_UNLOCK(sc);
2912 
2913 	return rc;
2914 }
2915 
2916 /*
2917  * wm_ioctl:		[ifnet interface function]
2918  *
2919  *	Handle control requests from the operator.
2920  */
2921 static int
2922 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2923 {
2924 	struct wm_softc *sc = ifp->if_softc;
2925 	struct ifreq *ifr = (struct ifreq *) data;
2926 	struct ifaddr *ifa = (struct ifaddr *)data;
2927 	struct sockaddr_dl *sdl;
2928 	int s, error;
2929 
2930 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
2931 		device_xname(sc->sc_dev), __func__));
2932 
2933 #ifndef WM_MPSAFE
2934 	s = splnet();
2935 #endif
2936 	switch (cmd) {
2937 	case SIOCSIFMEDIA:
2938 	case SIOCGIFMEDIA:
2939 		WM_CORE_LOCK(sc);
2940 		/* Flow control requires full-duplex mode. */
2941 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
2942 		    (ifr->ifr_media & IFM_FDX) == 0)
2943 			ifr->ifr_media &= ~IFM_ETH_FMASK;
2944 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
2945 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
2946 				/* We can do both TXPAUSE and RXPAUSE. */
2947 				ifr->ifr_media |=
2948 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
2949 			}
2950 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
2951 		}
2952 		WM_CORE_UNLOCK(sc);
2953 #ifdef WM_MPSAFE
2954 		s = splnet();
2955 #endif
2956 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
2957 #ifdef WM_MPSAFE
2958 		splx(s);
2959 #endif
2960 		break;
2961 	case SIOCINITIFADDR:
2962 		WM_CORE_LOCK(sc);
2963 		if (ifa->ifa_addr->sa_family == AF_LINK) {
2964 			sdl = satosdl(ifp->if_dl->ifa_addr);
2965 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
2966 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
2967 			/* unicast address is first multicast entry */
2968 			wm_set_filter(sc);
2969 			error = 0;
2970 			WM_CORE_UNLOCK(sc);
2971 			break;
2972 		}
2973 		WM_CORE_UNLOCK(sc);
2974 		/*FALLTHROUGH*/
2975 	default:
2976 #ifdef WM_MPSAFE
2977 		s = splnet();
2978 #endif
2979 		/* It may call wm_start, so unlock here */
2980 		error = ether_ioctl(ifp, cmd, data);
2981 #ifdef WM_MPSAFE
2982 		splx(s);
2983 #endif
2984 		if (error != ENETRESET)
2985 			break;
2986 
2987 		error = 0;
2988 
2989 		if (cmd == SIOCSIFCAP) {
2990 			error = (*ifp->if_init)(ifp);
2991 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
2992 			;
2993 		else if (ifp->if_flags & IFF_RUNNING) {
2994 			/*
2995 			 * Multicast list has changed; set the hardware filter
2996 			 * accordingly.
2997 			 */
2998 			WM_CORE_LOCK(sc);
2999 			wm_set_filter(sc);
3000 			WM_CORE_UNLOCK(sc);
3001 		}
3002 		break;
3003 	}
3004 
3005 #ifndef WM_MPSAFE
3006 	splx(s);
3007 #endif
3008 	return error;
3009 }
3010 
3011 /* MAC address related */
3012 
3013 /*
3014  * Get the offset of MAC address and return it.
3015  * If error occured, use offset 0.
3016  */
3017 static uint16_t
3018 wm_check_alt_mac_addr(struct wm_softc *sc)
3019 {
3020 	uint16_t myea[ETHER_ADDR_LEN / 2];
3021 	uint16_t offset = NVM_OFF_MACADDR;
3022 
3023 	/* Try to read alternative MAC address pointer */
3024 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
3025 		return 0;
3026 
3027 	/* Check pointer if it's valid or not. */
3028 	if ((offset == 0x0000) || (offset == 0xffff))
3029 		return 0;
3030 
3031 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
3032 	/*
3033 	 * Check whether alternative MAC address is valid or not.
3034 	 * Some cards have non 0xffff pointer but those don't use
3035 	 * alternative MAC address in reality.
3036 	 *
3037 	 * Check whether the broadcast bit is set or not.
3038 	 */
3039 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
3040 		if (((myea[0] & 0xff) & 0x01) == 0)
3041 			return offset; /* Found */
3042 
3043 	/* Not found */
3044 	return 0;
3045 }
3046 
3047 static int
3048 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
3049 {
3050 	uint16_t myea[ETHER_ADDR_LEN / 2];
3051 	uint16_t offset = NVM_OFF_MACADDR;
3052 	int do_invert = 0;
3053 
3054 	switch (sc->sc_type) {
3055 	case WM_T_82580:
3056 	case WM_T_I350:
3057 	case WM_T_I354:
3058 		/* EEPROM Top Level Partitioning */
3059 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
3060 		break;
3061 	case WM_T_82571:
3062 	case WM_T_82575:
3063 	case WM_T_82576:
3064 	case WM_T_80003:
3065 	case WM_T_I210:
3066 	case WM_T_I211:
3067 		offset = wm_check_alt_mac_addr(sc);
3068 		if (offset == 0)
3069 			if ((sc->sc_funcid & 0x01) == 1)
3070 				do_invert = 1;
3071 		break;
3072 	default:
3073 		if ((sc->sc_funcid & 0x01) == 1)
3074 			do_invert = 1;
3075 		break;
3076 	}
3077 
3078 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
3079 		goto bad;
3080 
3081 	enaddr[0] = myea[0] & 0xff;
3082 	enaddr[1] = myea[0] >> 8;
3083 	enaddr[2] = myea[1] & 0xff;
3084 	enaddr[3] = myea[1] >> 8;
3085 	enaddr[4] = myea[2] & 0xff;
3086 	enaddr[5] = myea[2] >> 8;
3087 
3088 	/*
3089 	 * Toggle the LSB of the MAC address on the second port
3090 	 * of some dual port cards.
3091 	 */
3092 	if (do_invert != 0)
3093 		enaddr[5] ^= 1;
3094 
3095 	return 0;
3096 
3097  bad:
3098 	return -1;
3099 }
3100 
3101 /*
3102  * wm_set_ral:
3103  *
3104  *	Set an entery in the receive address list.
3105  */
3106 static void
3107 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
3108 {
3109 	uint32_t ral_lo, ral_hi;
3110 
3111 	if (enaddr != NULL) {
3112 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
3113 		    (enaddr[3] << 24);
3114 		ral_hi = enaddr[4] | (enaddr[5] << 8);
3115 		ral_hi |= RAL_AV;
3116 	} else {
3117 		ral_lo = 0;
3118 		ral_hi = 0;
3119 	}
3120 
3121 	if (sc->sc_type >= WM_T_82544) {
3122 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
3123 		    ral_lo);
3124 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
3125 		    ral_hi);
3126 	} else {
3127 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
3128 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
3129 	}
3130 }
3131 
3132 /*
3133  * wm_mchash:
3134  *
3135  *	Compute the hash of the multicast address for the 4096-bit
3136  *	multicast filter.
3137  */
3138 static uint32_t
3139 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
3140 {
3141 	static const int lo_shift[4] = { 4, 3, 2, 0 };
3142 	static const int hi_shift[4] = { 4, 5, 6, 8 };
3143 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
3144 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
3145 	uint32_t hash;
3146 
3147 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3148 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3149 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
3150 	    || (sc->sc_type == WM_T_PCH_SPT)) {
3151 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
3152 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
3153 		return (hash & 0x3ff);
3154 	}
3155 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
3156 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
3157 
3158 	return (hash & 0xfff);
3159 }
3160 
3161 /*
3162  * wm_set_filter:
3163  *
3164  *	Set up the receive filter.
3165  */
3166 static void
3167 wm_set_filter(struct wm_softc *sc)
3168 {
3169 	struct ethercom *ec = &sc->sc_ethercom;
3170 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3171 	struct ether_multi *enm;
3172 	struct ether_multistep step;
3173 	bus_addr_t mta_reg;
3174 	uint32_t hash, reg, bit;
3175 	int i, size, ralmax;
3176 
3177 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3178 		device_xname(sc->sc_dev), __func__));
3179 
3180 	if (sc->sc_type >= WM_T_82544)
3181 		mta_reg = WMREG_CORDOVA_MTA;
3182 	else
3183 		mta_reg = WMREG_MTA;
3184 
3185 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
3186 
3187 	if (ifp->if_flags & IFF_BROADCAST)
3188 		sc->sc_rctl |= RCTL_BAM;
3189 	if (ifp->if_flags & IFF_PROMISC) {
3190 		sc->sc_rctl |= RCTL_UPE;
3191 		goto allmulti;
3192 	}
3193 
3194 	/*
3195 	 * Set the station address in the first RAL slot, and
3196 	 * clear the remaining slots.
3197 	 */
3198 	if (sc->sc_type == WM_T_ICH8)
3199 		size = WM_RAL_TABSIZE_ICH8 -1;
3200 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
3201 	    || (sc->sc_type == WM_T_PCH))
3202 		size = WM_RAL_TABSIZE_ICH8;
3203 	else if (sc->sc_type == WM_T_PCH2)
3204 		size = WM_RAL_TABSIZE_PCH2;
3205 	else if ((sc->sc_type == WM_T_PCH_LPT) ||(sc->sc_type == WM_T_PCH_SPT))
3206 		size = WM_RAL_TABSIZE_PCH_LPT;
3207 	else if (sc->sc_type == WM_T_82575)
3208 		size = WM_RAL_TABSIZE_82575;
3209 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
3210 		size = WM_RAL_TABSIZE_82576;
3211 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
3212 		size = WM_RAL_TABSIZE_I350;
3213 	else
3214 		size = WM_RAL_TABSIZE;
3215 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
3216 
3217 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
3218 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
3219 		switch (i) {
3220 		case 0:
3221 			/* We can use all entries */
3222 			ralmax = size;
3223 			break;
3224 		case 1:
3225 			/* Only RAR[0] */
3226 			ralmax = 1;
3227 			break;
3228 		default:
3229 			/* available SHRA + RAR[0] */
3230 			ralmax = i + 1;
3231 		}
3232 	} else
3233 		ralmax = size;
3234 	for (i = 1; i < size; i++) {
3235 		if (i < ralmax)
3236 			wm_set_ral(sc, NULL, i);
3237 	}
3238 
3239 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3240 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3241 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
3242 	    || (sc->sc_type == WM_T_PCH_SPT))
3243 		size = WM_ICH8_MC_TABSIZE;
3244 	else
3245 		size = WM_MC_TABSIZE;
3246 	/* Clear out the multicast table. */
3247 	for (i = 0; i < size; i++)
3248 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
3249 
3250 	ETHER_LOCK(ec);
3251 	ETHER_FIRST_MULTI(step, ec, enm);
3252 	while (enm != NULL) {
3253 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
3254 			ETHER_UNLOCK(ec);
3255 			/*
3256 			 * We must listen to a range of multicast addresses.
3257 			 * For now, just accept all multicasts, rather than
3258 			 * trying to set only those filter bits needed to match
3259 			 * the range.  (At this time, the only use of address
3260 			 * ranges is for IP multicast routing, for which the
3261 			 * range is big enough to require all bits set.)
3262 			 */
3263 			goto allmulti;
3264 		}
3265 
3266 		hash = wm_mchash(sc, enm->enm_addrlo);
3267 
3268 		reg = (hash >> 5);
3269 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3270 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3271 		    || (sc->sc_type == WM_T_PCH2)
3272 		    || (sc->sc_type == WM_T_PCH_LPT)
3273 		    || (sc->sc_type == WM_T_PCH_SPT))
3274 			reg &= 0x1f;
3275 		else
3276 			reg &= 0x7f;
3277 		bit = hash & 0x1f;
3278 
3279 		hash = CSR_READ(sc, mta_reg + (reg << 2));
3280 		hash |= 1U << bit;
3281 
3282 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
3283 			/*
3284 			 * 82544 Errata 9: Certain register cannot be written
3285 			 * with particular alignments in PCI-X bus operation
3286 			 * (FCAH, MTA and VFTA).
3287 			 */
3288 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
3289 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3290 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
3291 		} else
3292 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3293 
3294 		ETHER_NEXT_MULTI(step, enm);
3295 	}
3296 	ETHER_UNLOCK(ec);
3297 
3298 	ifp->if_flags &= ~IFF_ALLMULTI;
3299 	goto setit;
3300 
3301  allmulti:
3302 	ifp->if_flags |= IFF_ALLMULTI;
3303 	sc->sc_rctl |= RCTL_MPE;
3304 
3305  setit:
3306 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
3307 }
3308 
3309 /* Reset and init related */
3310 
3311 static void
3312 wm_set_vlan(struct wm_softc *sc)
3313 {
3314 
3315 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3316 		device_xname(sc->sc_dev), __func__));
3317 
3318 	/* Deal with VLAN enables. */
3319 	if (VLAN_ATTACHED(&sc->sc_ethercom))
3320 		sc->sc_ctrl |= CTRL_VME;
3321 	else
3322 		sc->sc_ctrl &= ~CTRL_VME;
3323 
3324 	/* Write the control registers. */
3325 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3326 }
3327 
3328 static void
3329 wm_set_pcie_completion_timeout(struct wm_softc *sc)
3330 {
3331 	uint32_t gcr;
3332 	pcireg_t ctrl2;
3333 
3334 	gcr = CSR_READ(sc, WMREG_GCR);
3335 
3336 	/* Only take action if timeout value is defaulted to 0 */
3337 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
3338 		goto out;
3339 
3340 	if ((gcr & GCR_CAP_VER2) == 0) {
3341 		gcr |= GCR_CMPL_TMOUT_10MS;
3342 		goto out;
3343 	}
3344 
3345 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
3346 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
3347 	ctrl2 |= WM_PCIE_DCSR2_16MS;
3348 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
3349 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
3350 
3351 out:
3352 	/* Disable completion timeout resend */
3353 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
3354 
3355 	CSR_WRITE(sc, WMREG_GCR, gcr);
3356 }
3357 
3358 void
3359 wm_get_auto_rd_done(struct wm_softc *sc)
3360 {
3361 	int i;
3362 
3363 	/* wait for eeprom to reload */
3364 	switch (sc->sc_type) {
3365 	case WM_T_82571:
3366 	case WM_T_82572:
3367 	case WM_T_82573:
3368 	case WM_T_82574:
3369 	case WM_T_82583:
3370 	case WM_T_82575:
3371 	case WM_T_82576:
3372 	case WM_T_82580:
3373 	case WM_T_I350:
3374 	case WM_T_I354:
3375 	case WM_T_I210:
3376 	case WM_T_I211:
3377 	case WM_T_80003:
3378 	case WM_T_ICH8:
3379 	case WM_T_ICH9:
3380 		for (i = 0; i < 10; i++) {
3381 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
3382 				break;
3383 			delay(1000);
3384 		}
3385 		if (i == 10) {
3386 			log(LOG_ERR, "%s: auto read from eeprom failed to "
3387 			    "complete\n", device_xname(sc->sc_dev));
3388 		}
3389 		break;
3390 	default:
3391 		break;
3392 	}
3393 }
3394 
3395 void
3396 wm_lan_init_done(struct wm_softc *sc)
3397 {
3398 	uint32_t reg = 0;
3399 	int i;
3400 
3401 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3402 		device_xname(sc->sc_dev), __func__));
3403 
3404 	/* Wait for eeprom to reload */
3405 	switch (sc->sc_type) {
3406 	case WM_T_ICH10:
3407 	case WM_T_PCH:
3408 	case WM_T_PCH2:
3409 	case WM_T_PCH_LPT:
3410 	case WM_T_PCH_SPT:
3411 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
3412 			reg = CSR_READ(sc, WMREG_STATUS);
3413 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
3414 				break;
3415 			delay(100);
3416 		}
3417 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
3418 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
3419 			    "complete\n", device_xname(sc->sc_dev), __func__);
3420 		}
3421 		break;
3422 	default:
3423 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3424 		    __func__);
3425 		break;
3426 	}
3427 
3428 	reg &= ~STATUS_LAN_INIT_DONE;
3429 	CSR_WRITE(sc, WMREG_STATUS, reg);
3430 }
3431 
3432 void
3433 wm_get_cfg_done(struct wm_softc *sc)
3434 {
3435 	int mask;
3436 	uint32_t reg;
3437 	int i;
3438 
3439 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3440 		device_xname(sc->sc_dev), __func__));
3441 
3442 	/* Wait for eeprom to reload */
3443 	switch (sc->sc_type) {
3444 	case WM_T_82542_2_0:
3445 	case WM_T_82542_2_1:
3446 		/* null */
3447 		break;
3448 	case WM_T_82543:
3449 	case WM_T_82544:
3450 	case WM_T_82540:
3451 	case WM_T_82545:
3452 	case WM_T_82545_3:
3453 	case WM_T_82546:
3454 	case WM_T_82546_3:
3455 	case WM_T_82541:
3456 	case WM_T_82541_2:
3457 	case WM_T_82547:
3458 	case WM_T_82547_2:
3459 	case WM_T_82573:
3460 	case WM_T_82574:
3461 	case WM_T_82583:
3462 		/* generic */
3463 		delay(10*1000);
3464 		break;
3465 	case WM_T_80003:
3466 	case WM_T_82571:
3467 	case WM_T_82572:
3468 	case WM_T_82575:
3469 	case WM_T_82576:
3470 	case WM_T_82580:
3471 	case WM_T_I350:
3472 	case WM_T_I354:
3473 	case WM_T_I210:
3474 	case WM_T_I211:
3475 		if (sc->sc_type == WM_T_82571) {
3476 			/* Only 82571 shares port 0 */
3477 			mask = EEMNGCTL_CFGDONE_0;
3478 		} else
3479 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
3480 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
3481 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
3482 				break;
3483 			delay(1000);
3484 		}
3485 		if (i >= WM_PHY_CFG_TIMEOUT) {
3486 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
3487 				device_xname(sc->sc_dev), __func__));
3488 		}
3489 		break;
3490 	case WM_T_ICH8:
3491 	case WM_T_ICH9:
3492 	case WM_T_ICH10:
3493 	case WM_T_PCH:
3494 	case WM_T_PCH2:
3495 	case WM_T_PCH_LPT:
3496 	case WM_T_PCH_SPT:
3497 		delay(10*1000);
3498 		if (sc->sc_type >= WM_T_ICH10)
3499 			wm_lan_init_done(sc);
3500 		else
3501 			wm_get_auto_rd_done(sc);
3502 
3503 		reg = CSR_READ(sc, WMREG_STATUS);
3504 		if ((reg & STATUS_PHYRA) != 0)
3505 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
3506 		break;
3507 	default:
3508 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3509 		    __func__);
3510 		break;
3511 	}
3512 }
3513 
3514 /* Init hardware bits */
3515 void
3516 wm_initialize_hardware_bits(struct wm_softc *sc)
3517 {
3518 	uint32_t tarc0, tarc1, reg;
3519 
3520 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3521 		device_xname(sc->sc_dev), __func__));
3522 
3523 	/* For 82571 variant, 80003 and ICHs */
3524 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
3525 	    || (sc->sc_type >= WM_T_80003)) {
3526 
3527 		/* Transmit Descriptor Control 0 */
3528 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
3529 		reg |= TXDCTL_COUNT_DESC;
3530 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
3531 
3532 		/* Transmit Descriptor Control 1 */
3533 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
3534 		reg |= TXDCTL_COUNT_DESC;
3535 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
3536 
3537 		/* TARC0 */
3538 		tarc0 = CSR_READ(sc, WMREG_TARC0);
3539 		switch (sc->sc_type) {
3540 		case WM_T_82571:
3541 		case WM_T_82572:
3542 		case WM_T_82573:
3543 		case WM_T_82574:
3544 		case WM_T_82583:
3545 		case WM_T_80003:
3546 			/* Clear bits 30..27 */
3547 			tarc0 &= ~__BITS(30, 27);
3548 			break;
3549 		default:
3550 			break;
3551 		}
3552 
3553 		switch (sc->sc_type) {
3554 		case WM_T_82571:
3555 		case WM_T_82572:
3556 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
3557 
3558 			tarc1 = CSR_READ(sc, WMREG_TARC1);
3559 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
3560 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
3561 			/* 8257[12] Errata No.7 */
3562 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
3563 
3564 			/* TARC1 bit 28 */
3565 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3566 				tarc1 &= ~__BIT(28);
3567 			else
3568 				tarc1 |= __BIT(28);
3569 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
3570 
3571 			/*
3572 			 * 8257[12] Errata No.13
3573 			 * Disable Dyamic Clock Gating.
3574 			 */
3575 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
3576 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
3577 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3578 			break;
3579 		case WM_T_82573:
3580 		case WM_T_82574:
3581 		case WM_T_82583:
3582 			if ((sc->sc_type == WM_T_82574)
3583 			    || (sc->sc_type == WM_T_82583))
3584 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
3585 
3586 			/* Extended Device Control */
3587 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
3588 			reg &= ~__BIT(23);	/* Clear bit 23 */
3589 			reg |= __BIT(22);	/* Set bit 22 */
3590 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3591 
3592 			/* Device Control */
3593 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
3594 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3595 
3596 			/* PCIe Control Register */
3597 			/*
3598 			 * 82573 Errata (unknown).
3599 			 *
3600 			 * 82574 Errata 25 and 82583 Errata 12
3601 			 * "Dropped Rx Packets":
3602 			 *   NVM Image Version 2.1.4 and newer has no this bug.
3603 			 */
3604 			reg = CSR_READ(sc, WMREG_GCR);
3605 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
3606 			CSR_WRITE(sc, WMREG_GCR, reg);
3607 
3608 			if ((sc->sc_type == WM_T_82574)
3609 			    || (sc->sc_type == WM_T_82583)) {
3610 				/*
3611 				 * Document says this bit must be set for
3612 				 * proper operation.
3613 				 */
3614 				reg = CSR_READ(sc, WMREG_GCR);
3615 				reg |= __BIT(22);
3616 				CSR_WRITE(sc, WMREG_GCR, reg);
3617 
3618 				/*
3619 				 * Apply workaround for hardware errata
3620 				 * documented in errata docs Fixes issue where
3621 				 * some error prone or unreliable PCIe
3622 				 * completions are occurring, particularly
3623 				 * with ASPM enabled. Without fix, issue can
3624 				 * cause Tx timeouts.
3625 				 */
3626 				reg = CSR_READ(sc, WMREG_GCR2);
3627 				reg |= __BIT(0);
3628 				CSR_WRITE(sc, WMREG_GCR2, reg);
3629 			}
3630 			break;
3631 		case WM_T_80003:
3632 			/* TARC0 */
3633 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
3634 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
3635 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
3636 
3637 			/* TARC1 bit 28 */
3638 			tarc1 = CSR_READ(sc, WMREG_TARC1);
3639 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3640 				tarc1 &= ~__BIT(28);
3641 			else
3642 				tarc1 |= __BIT(28);
3643 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
3644 			break;
3645 		case WM_T_ICH8:
3646 		case WM_T_ICH9:
3647 		case WM_T_ICH10:
3648 		case WM_T_PCH:
3649 		case WM_T_PCH2:
3650 		case WM_T_PCH_LPT:
3651 		case WM_T_PCH_SPT:
3652 			/* TARC0 */
3653 			if ((sc->sc_type == WM_T_ICH8)
3654 			    || (sc->sc_type == WM_T_PCH_SPT)) {
3655 				/* Set TARC0 bits 29 and 28 */
3656 				tarc0 |= __BITS(29, 28);
3657 			}
3658 			/* Set TARC0 bits 23,24,26,27 */
3659 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
3660 
3661 			/* CTRL_EXT */
3662 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
3663 			reg |= __BIT(22);	/* Set bit 22 */
3664 			/*
3665 			 * Enable PHY low-power state when MAC is at D3
3666 			 * w/o WoL
3667 			 */
3668 			if (sc->sc_type >= WM_T_PCH)
3669 				reg |= CTRL_EXT_PHYPDEN;
3670 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3671 
3672 			/* TARC1 */
3673 			tarc1 = CSR_READ(sc, WMREG_TARC1);
3674 			/* bit 28 */
3675 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3676 				tarc1 &= ~__BIT(28);
3677 			else
3678 				tarc1 |= __BIT(28);
3679 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
3680 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
3681 
3682 			/* Device Status */
3683 			if (sc->sc_type == WM_T_ICH8) {
3684 				reg = CSR_READ(sc, WMREG_STATUS);
3685 				reg &= ~__BIT(31);
3686 				CSR_WRITE(sc, WMREG_STATUS, reg);
3687 
3688 			}
3689 
3690 			/* IOSFPC */
3691 			if (sc->sc_type == WM_T_PCH_SPT) {
3692 				reg = CSR_READ(sc, WMREG_IOSFPC);
3693 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
3694 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
3695 			}
3696 			/*
3697 			 * Work-around descriptor data corruption issue during
3698 			 * NFS v2 UDP traffic, just disable the NFS filtering
3699 			 * capability.
3700 			 */
3701 			reg = CSR_READ(sc, WMREG_RFCTL);
3702 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
3703 			CSR_WRITE(sc, WMREG_RFCTL, reg);
3704 			break;
3705 		default:
3706 			break;
3707 		}
3708 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
3709 
3710 		switch (sc->sc_type) {
3711 		/*
3712 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
3713 		 * Avoid RSS Hash Value bug.
3714 		 */
3715 		case WM_T_82571:
3716 		case WM_T_82572:
3717 		case WM_T_82573:
3718 		case WM_T_80003:
3719 		case WM_T_ICH8:
3720 			reg = CSR_READ(sc, WMREG_RFCTL);
3721 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
3722 			CSR_WRITE(sc, WMREG_RFCTL, reg);
3723 			break;
3724 		default:
3725 			break;
3726 		}
3727 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
3728 		/*
3729 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
3730 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
3731 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
3732 		 * Correctly by the Device"
3733 		 *
3734 		 * I354(C2000) Errata AVR53:
3735 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
3736 		 * Hang"
3737 		 */
3738 		reg = CSR_READ(sc, WMREG_RFCTL);
3739 		reg |= WMREG_RFCTL_IPV6EXDIS;
3740 		CSR_WRITE(sc, WMREG_RFCTL, reg);
3741 	}
3742 }
3743 
3744 static uint32_t
3745 wm_rxpbs_adjust_82580(uint32_t val)
3746 {
3747 	uint32_t rv = 0;
3748 
3749 	if (val < __arraycount(wm_82580_rxpbs_table))
3750 		rv = wm_82580_rxpbs_table[val];
3751 
3752 	return rv;
3753 }
3754 
3755 /*
3756  * wm_reset_phy:
3757  *
3758  *	generic PHY reset function.
3759  *	Same as e1000_phy_hw_reset_generic()
3760  */
3761 static void
3762 wm_reset_phy(struct wm_softc *sc)
3763 {
3764 	uint32_t reg;
3765 
3766 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3767 		device_xname(sc->sc_dev), __func__));
3768 	if (wm_phy_resetisblocked(sc))
3769 		return;
3770 
3771 	sc->phy.acquire(sc);
3772 
3773 	reg = CSR_READ(sc, WMREG_CTRL);
3774 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
3775 	CSR_WRITE_FLUSH(sc);
3776 
3777 	delay(sc->phy.reset_delay_us);
3778 
3779 	CSR_WRITE(sc, WMREG_CTRL, reg);
3780 	CSR_WRITE_FLUSH(sc);
3781 
3782 	delay(150);
3783 
3784 	sc->phy.release(sc);
3785 
3786 	wm_get_cfg_done(sc);
3787 }
3788 
3789 static void
3790 wm_flush_desc_rings(struct wm_softc *sc)
3791 {
3792 	pcireg_t preg;
3793 	uint32_t reg;
3794 	int nexttx;
3795 
3796 	/* First, disable MULR fix in FEXTNVM11 */
3797 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
3798 	reg |= FEXTNVM11_DIS_MULRFIX;
3799 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
3800 
3801 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
3802 	reg = CSR_READ(sc, WMREG_TDLEN(0));
3803 	if (((preg & DESCRING_STATUS_FLUSH_REQ) != 0) && (reg != 0)) {
3804 		struct wm_txqueue *txq;
3805 		wiseman_txdesc_t *txd;
3806 
3807 		/* TX */
3808 		printf("%s: Need TX flush (reg = %08x, len = %u)\n",
3809 		    device_xname(sc->sc_dev), preg, reg);
3810 		reg = CSR_READ(sc, WMREG_TCTL);
3811 		CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
3812 
3813 		txq = &sc->sc_queue[0].wmq_txq;
3814 		nexttx = txq->txq_next;
3815 		txd = &txq->txq_descs[nexttx];
3816 		wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
3817 		txd->wtx_cmdlen = htole32(WTX_CMD_IFCS| 512);
3818 		txd->wtx_fields.wtxu_status = 0;
3819 		txd->wtx_fields.wtxu_options = 0;
3820 		txd->wtx_fields.wtxu_vlan = 0;
3821 
3822 		bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
3823 			BUS_SPACE_BARRIER_WRITE);
3824 
3825 		txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
3826 		CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
3827 		bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
3828 			BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
3829 		delay(250);
3830 	}
3831 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
3832 	if (preg & DESCRING_STATUS_FLUSH_REQ) {
3833 		uint32_t rctl;
3834 
3835 		/* RX */
3836 		printf("%s: Need RX flush (reg = %08x)\n",
3837 		    device_xname(sc->sc_dev), preg);
3838 		rctl = CSR_READ(sc, WMREG_RCTL);
3839 		CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
3840 		CSR_WRITE_FLUSH(sc);
3841 		delay(150);
3842 
3843 		reg = CSR_READ(sc, WMREG_RXDCTL(0));
3844 		/* zero the lower 14 bits (prefetch and host thresholds) */
3845 		reg &= 0xffffc000;
3846 		/*
3847 		 * update thresholds: prefetch threshold to 31, host threshold
3848 		 * to 1 and make sure the granularity is "descriptors" and not
3849 		 * "cache lines"
3850 		 */
3851 		reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
3852 		CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
3853 
3854 		/*
3855 		 * momentarily enable the RX ring for the changes to take
3856 		 * effect
3857 		 */
3858 		CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
3859 		CSR_WRITE_FLUSH(sc);
3860 		delay(150);
3861 		CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
3862 	}
3863 }
3864 
3865 /*
3866  * wm_reset:
3867  *
3868  *	Reset the i82542 chip.
3869  */
3870 static void
3871 wm_reset(struct wm_softc *sc)
3872 {
3873 	int phy_reset = 0;
3874 	int i, error = 0;
3875 	uint32_t reg;
3876 
3877 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3878 		device_xname(sc->sc_dev), __func__));
3879 	KASSERT(sc->sc_type != 0);
3880 
3881 	/*
3882 	 * Allocate on-chip memory according to the MTU size.
3883 	 * The Packet Buffer Allocation register must be written
3884 	 * before the chip is reset.
3885 	 */
3886 	switch (sc->sc_type) {
3887 	case WM_T_82547:
3888 	case WM_T_82547_2:
3889 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3890 		    PBA_22K : PBA_30K;
3891 		for (i = 0; i < sc->sc_nqueues; i++) {
3892 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
3893 			txq->txq_fifo_head = 0;
3894 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
3895 			txq->txq_fifo_size =
3896 				(PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
3897 			txq->txq_fifo_stall = 0;
3898 		}
3899 		break;
3900 	case WM_T_82571:
3901 	case WM_T_82572:
3902 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
3903 	case WM_T_80003:
3904 		sc->sc_pba = PBA_32K;
3905 		break;
3906 	case WM_T_82573:
3907 		sc->sc_pba = PBA_12K;
3908 		break;
3909 	case WM_T_82574:
3910 	case WM_T_82583:
3911 		sc->sc_pba = PBA_20K;
3912 		break;
3913 	case WM_T_82576:
3914 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
3915 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
3916 		break;
3917 	case WM_T_82580:
3918 	case WM_T_I350:
3919 	case WM_T_I354:
3920 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
3921 		break;
3922 	case WM_T_I210:
3923 	case WM_T_I211:
3924 		sc->sc_pba = PBA_34K;
3925 		break;
3926 	case WM_T_ICH8:
3927 		/* Workaround for a bit corruption issue in FIFO memory */
3928 		sc->sc_pba = PBA_8K;
3929 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
3930 		break;
3931 	case WM_T_ICH9:
3932 	case WM_T_ICH10:
3933 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
3934 		    PBA_14K : PBA_10K;
3935 		break;
3936 	case WM_T_PCH:
3937 	case WM_T_PCH2:
3938 	case WM_T_PCH_LPT:
3939 	case WM_T_PCH_SPT:
3940 		sc->sc_pba = PBA_26K;
3941 		break;
3942 	default:
3943 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3944 		    PBA_40K : PBA_48K;
3945 		break;
3946 	}
3947 	/*
3948 	 * Only old or non-multiqueue devices have the PBA register
3949 	 * XXX Need special handling for 82575.
3950 	 */
3951 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
3952 	    || (sc->sc_type == WM_T_82575))
3953 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
3954 
3955 	/* Prevent the PCI-E bus from sticking */
3956 	if (sc->sc_flags & WM_F_PCIE) {
3957 		int timeout = 800;
3958 
3959 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
3960 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3961 
3962 		while (timeout--) {
3963 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
3964 			    == 0)
3965 				break;
3966 			delay(100);
3967 		}
3968 	}
3969 
3970 	/* Set the completion timeout for interface */
3971 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
3972 	    || (sc->sc_type == WM_T_82580)
3973 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
3974 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
3975 		wm_set_pcie_completion_timeout(sc);
3976 
3977 	/* Clear interrupt */
3978 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3979 	if (sc->sc_nintrs > 1) {
3980 		if (sc->sc_type != WM_T_82574) {
3981 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
3982 			CSR_WRITE(sc, WMREG_EIAC, 0);
3983 		} else {
3984 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
3985 		}
3986 	}
3987 
3988 	/* Stop the transmit and receive processes. */
3989 	CSR_WRITE(sc, WMREG_RCTL, 0);
3990 	sc->sc_rctl &= ~RCTL_EN;
3991 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
3992 	CSR_WRITE_FLUSH(sc);
3993 
3994 	/* XXX set_tbi_sbp_82543() */
3995 
3996 	delay(10*1000);
3997 
3998 	/* Must acquire the MDIO ownership before MAC reset */
3999 	switch (sc->sc_type) {
4000 	case WM_T_82573:
4001 	case WM_T_82574:
4002 	case WM_T_82583:
4003 		error = wm_get_hw_semaphore_82573(sc);
4004 		break;
4005 	default:
4006 		break;
4007 	}
4008 
4009 	/*
4010 	 * 82541 Errata 29? & 82547 Errata 28?
4011 	 * See also the description about PHY_RST bit in CTRL register
4012 	 * in 8254x_GBe_SDM.pdf.
4013 	 */
4014 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
4015 		CSR_WRITE(sc, WMREG_CTRL,
4016 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
4017 		CSR_WRITE_FLUSH(sc);
4018 		delay(5000);
4019 	}
4020 
4021 	switch (sc->sc_type) {
4022 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
4023 	case WM_T_82541:
4024 	case WM_T_82541_2:
4025 	case WM_T_82547:
4026 	case WM_T_82547_2:
4027 		/*
4028 		 * On some chipsets, a reset through a memory-mapped write
4029 		 * cycle can cause the chip to reset before completing the
4030 		 * write cycle.  This causes major headache that can be
4031 		 * avoided by issuing the reset via indirect register writes
4032 		 * through I/O space.
4033 		 *
4034 		 * So, if we successfully mapped the I/O BAR at attach time,
4035 		 * use that.  Otherwise, try our luck with a memory-mapped
4036 		 * reset.
4037 		 */
4038 		if (sc->sc_flags & WM_F_IOH_VALID)
4039 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
4040 		else
4041 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
4042 		break;
4043 	case WM_T_82545_3:
4044 	case WM_T_82546_3:
4045 		/* Use the shadow control register on these chips. */
4046 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
4047 		break;
4048 	case WM_T_80003:
4049 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
4050 		sc->phy.acquire(sc);
4051 		CSR_WRITE(sc, WMREG_CTRL, reg);
4052 		sc->phy.release(sc);
4053 		break;
4054 	case WM_T_ICH8:
4055 	case WM_T_ICH9:
4056 	case WM_T_ICH10:
4057 	case WM_T_PCH:
4058 	case WM_T_PCH2:
4059 	case WM_T_PCH_LPT:
4060 	case WM_T_PCH_SPT:
4061 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
4062 		if (wm_phy_resetisblocked(sc) == false) {
4063 			/*
4064 			 * Gate automatic PHY configuration by hardware on
4065 			 * non-managed 82579
4066 			 */
4067 			if ((sc->sc_type == WM_T_PCH2)
4068 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
4069 				== 0))
4070 				wm_gate_hw_phy_config_ich8lan(sc, true);
4071 
4072 			reg |= CTRL_PHY_RESET;
4073 			phy_reset = 1;
4074 		} else
4075 			printf("XXX reset is blocked!!!\n");
4076 		sc->phy.acquire(sc);
4077 		CSR_WRITE(sc, WMREG_CTRL, reg);
4078 		/* Don't insert a completion barrier when reset */
4079 		delay(20*1000);
4080 		mutex_exit(sc->sc_ich_phymtx);
4081 		break;
4082 	case WM_T_82580:
4083 	case WM_T_I350:
4084 	case WM_T_I354:
4085 	case WM_T_I210:
4086 	case WM_T_I211:
4087 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
4088 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
4089 			CSR_WRITE_FLUSH(sc);
4090 		delay(5000);
4091 		break;
4092 	case WM_T_82542_2_0:
4093 	case WM_T_82542_2_1:
4094 	case WM_T_82543:
4095 	case WM_T_82540:
4096 	case WM_T_82545:
4097 	case WM_T_82546:
4098 	case WM_T_82571:
4099 	case WM_T_82572:
4100 	case WM_T_82573:
4101 	case WM_T_82574:
4102 	case WM_T_82575:
4103 	case WM_T_82576:
4104 	case WM_T_82583:
4105 	default:
4106 		/* Everything else can safely use the documented method. */
4107 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
4108 		break;
4109 	}
4110 
4111 	/* Must release the MDIO ownership after MAC reset */
4112 	switch (sc->sc_type) {
4113 	case WM_T_82573:
4114 	case WM_T_82574:
4115 	case WM_T_82583:
4116 		if (error == 0)
4117 			wm_put_hw_semaphore_82573(sc);
4118 		break;
4119 	default:
4120 		break;
4121 	}
4122 
4123 	if (phy_reset != 0)
4124 		wm_get_cfg_done(sc);
4125 
4126 	/* reload EEPROM */
4127 	switch (sc->sc_type) {
4128 	case WM_T_82542_2_0:
4129 	case WM_T_82542_2_1:
4130 	case WM_T_82543:
4131 	case WM_T_82544:
4132 		delay(10);
4133 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
4134 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4135 		CSR_WRITE_FLUSH(sc);
4136 		delay(2000);
4137 		break;
4138 	case WM_T_82540:
4139 	case WM_T_82545:
4140 	case WM_T_82545_3:
4141 	case WM_T_82546:
4142 	case WM_T_82546_3:
4143 		delay(5*1000);
4144 		/* XXX Disable HW ARPs on ASF enabled adapters */
4145 		break;
4146 	case WM_T_82541:
4147 	case WM_T_82541_2:
4148 	case WM_T_82547:
4149 	case WM_T_82547_2:
4150 		delay(20000);
4151 		/* XXX Disable HW ARPs on ASF enabled adapters */
4152 		break;
4153 	case WM_T_82571:
4154 	case WM_T_82572:
4155 	case WM_T_82573:
4156 	case WM_T_82574:
4157 	case WM_T_82583:
4158 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
4159 			delay(10);
4160 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
4161 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4162 			CSR_WRITE_FLUSH(sc);
4163 		}
4164 		/* check EECD_EE_AUTORD */
4165 		wm_get_auto_rd_done(sc);
4166 		/*
4167 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
4168 		 * is set.
4169 		 */
4170 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
4171 		    || (sc->sc_type == WM_T_82583))
4172 			delay(25*1000);
4173 		break;
4174 	case WM_T_82575:
4175 	case WM_T_82576:
4176 	case WM_T_82580:
4177 	case WM_T_I350:
4178 	case WM_T_I354:
4179 	case WM_T_I210:
4180 	case WM_T_I211:
4181 	case WM_T_80003:
4182 		/* check EECD_EE_AUTORD */
4183 		wm_get_auto_rd_done(sc);
4184 		break;
4185 	case WM_T_ICH8:
4186 	case WM_T_ICH9:
4187 	case WM_T_ICH10:
4188 	case WM_T_PCH:
4189 	case WM_T_PCH2:
4190 	case WM_T_PCH_LPT:
4191 	case WM_T_PCH_SPT:
4192 		break;
4193 	default:
4194 		panic("%s: unknown type\n", __func__);
4195 	}
4196 
4197 	/* Check whether EEPROM is present or not */
4198 	switch (sc->sc_type) {
4199 	case WM_T_82575:
4200 	case WM_T_82576:
4201 	case WM_T_82580:
4202 	case WM_T_I350:
4203 	case WM_T_I354:
4204 	case WM_T_ICH8:
4205 	case WM_T_ICH9:
4206 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
4207 			/* Not found */
4208 			sc->sc_flags |= WM_F_EEPROM_INVALID;
4209 			if (sc->sc_type == WM_T_82575)
4210 				wm_reset_init_script_82575(sc);
4211 		}
4212 		break;
4213 	default:
4214 		break;
4215 	}
4216 
4217 	if ((sc->sc_type == WM_T_82580)
4218 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
4219 		/* clear global device reset status bit */
4220 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
4221 	}
4222 
4223 	/* Clear any pending interrupt events. */
4224 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4225 	reg = CSR_READ(sc, WMREG_ICR);
4226 	if (sc->sc_nintrs > 1) {
4227 		if (sc->sc_type != WM_T_82574) {
4228 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
4229 			CSR_WRITE(sc, WMREG_EIAC, 0);
4230 		} else
4231 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
4232 	}
4233 
4234 	/* reload sc_ctrl */
4235 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
4236 
4237 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
4238 		wm_set_eee_i350(sc);
4239 
4240 	/* Clear the host wakeup bit after lcd reset */
4241 	if (sc->sc_type >= WM_T_PCH) {
4242 		reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
4243 		    BM_PORT_GEN_CFG);
4244 		reg &= ~BM_WUC_HOST_WU_BIT;
4245 		wm_gmii_hv_writereg(sc->sc_dev, 2,
4246 		    BM_PORT_GEN_CFG, reg);
4247 	}
4248 
4249 	/*
4250 	 * For PCH, this write will make sure that any noise will be detected
4251 	 * as a CRC error and be dropped rather than show up as a bad packet
4252 	 * to the DMA engine
4253 	 */
4254 	if (sc->sc_type == WM_T_PCH)
4255 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
4256 
4257 	if (sc->sc_type >= WM_T_82544)
4258 		CSR_WRITE(sc, WMREG_WUC, 0);
4259 
4260 	wm_reset_mdicnfg_82580(sc);
4261 
4262 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
4263 		wm_pll_workaround_i210(sc);
4264 }
4265 
4266 /*
4267  * wm_add_rxbuf:
4268  *
4269  *	Add a receive buffer to the indiciated descriptor.
4270  */
4271 static int
4272 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
4273 {
4274 	struct wm_softc *sc = rxq->rxq_sc;
4275 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
4276 	struct mbuf *m;
4277 	int error;
4278 
4279 	KASSERT(mutex_owned(rxq->rxq_lock));
4280 
4281 	MGETHDR(m, M_DONTWAIT, MT_DATA);
4282 	if (m == NULL)
4283 		return ENOBUFS;
4284 
4285 	MCLGET(m, M_DONTWAIT);
4286 	if ((m->m_flags & M_EXT) == 0) {
4287 		m_freem(m);
4288 		return ENOBUFS;
4289 	}
4290 
4291 	if (rxs->rxs_mbuf != NULL)
4292 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4293 
4294 	rxs->rxs_mbuf = m;
4295 
4296 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
4297 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
4298 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
4299 	if (error) {
4300 		/* XXX XXX XXX */
4301 		aprint_error_dev(sc->sc_dev,
4302 		    "unable to load rx DMA map %d, error = %d\n",
4303 		    idx, error);
4304 		panic("wm_add_rxbuf");
4305 	}
4306 
4307 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
4308 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
4309 
4310 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4311 		if ((sc->sc_rctl & RCTL_EN) != 0)
4312 			wm_init_rxdesc(rxq, idx);
4313 	} else
4314 		wm_init_rxdesc(rxq, idx);
4315 
4316 	return 0;
4317 }
4318 
4319 /*
4320  * wm_rxdrain:
4321  *
4322  *	Drain the receive queue.
4323  */
4324 static void
4325 wm_rxdrain(struct wm_rxqueue *rxq)
4326 {
4327 	struct wm_softc *sc = rxq->rxq_sc;
4328 	struct wm_rxsoft *rxs;
4329 	int i;
4330 
4331 	KASSERT(mutex_owned(rxq->rxq_lock));
4332 
4333 	for (i = 0; i < WM_NRXDESC; i++) {
4334 		rxs = &rxq->rxq_soft[i];
4335 		if (rxs->rxs_mbuf != NULL) {
4336 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4337 			m_freem(rxs->rxs_mbuf);
4338 			rxs->rxs_mbuf = NULL;
4339 		}
4340 	}
4341 }
4342 
4343 
4344 /*
4345  * XXX copy from FreeBSD's sys/net/rss_config.c
4346  */
4347 /*
4348  * RSS secret key, intended to prevent attacks on load-balancing.  Its
4349  * effectiveness may be limited by algorithm choice and available entropy
4350  * during the boot.
4351  *
4352  * XXXRW: And that we don't randomize it yet!
4353  *
4354  * This is the default Microsoft RSS specification key which is also
4355  * the Chelsio T5 firmware default key.
4356  */
4357 #define RSS_KEYSIZE 40
4358 static uint8_t wm_rss_key[RSS_KEYSIZE] = {
4359 	0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
4360 	0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
4361 	0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
4362 	0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
4363 	0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
4364 };
4365 
4366 /*
4367  * Caller must pass an array of size sizeof(rss_key).
4368  *
4369  * XXX
4370  * As if_ixgbe may use this function, this function should not be
4371  * if_wm specific function.
4372  */
4373 static void
4374 wm_rss_getkey(uint8_t *key)
4375 {
4376 
4377 	memcpy(key, wm_rss_key, sizeof(wm_rss_key));
4378 }
4379 
4380 /*
4381  * Setup registers for RSS.
4382  *
4383  * XXX not yet VMDq support
4384  */
4385 static void
4386 wm_init_rss(struct wm_softc *sc)
4387 {
4388 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
4389 	int i;
4390 
4391 	CTASSERT(sizeof(rss_key) == sizeof(wm_rss_key));
4392 
4393 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
4394 		int qid, reta_ent;
4395 
4396 		qid  = i % sc->sc_nqueues;
4397 		switch(sc->sc_type) {
4398 		case WM_T_82574:
4399 			reta_ent = __SHIFTIN(qid,
4400 			    RETA_ENT_QINDEX_MASK_82574);
4401 			break;
4402 		case WM_T_82575:
4403 			reta_ent = __SHIFTIN(qid,
4404 			    RETA_ENT_QINDEX1_MASK_82575);
4405 			break;
4406 		default:
4407 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
4408 			break;
4409 		}
4410 
4411 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
4412 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
4413 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
4414 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
4415 	}
4416 
4417 	wm_rss_getkey((uint8_t *)rss_key);
4418 	for (i = 0; i < RSSRK_NUM_REGS; i++)
4419 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
4420 
4421 	if (sc->sc_type == WM_T_82574)
4422 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
4423 	else
4424 		mrqc = MRQC_ENABLE_RSS_MQ;
4425 
4426 	/*
4427 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
4428 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
4429 	 */
4430 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
4431 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
4432 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
4433 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
4434 
4435 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
4436 }
4437 
4438 /*
4439  * Adjust TX and RX queue numbers which the system actulally uses.
4440  *
4441  * The numbers are affected by below parameters.
4442  *     - The nubmer of hardware queues
4443  *     - The number of MSI-X vectors (= "nvectors" argument)
4444  *     - ncpu
4445  */
4446 static void
4447 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
4448 {
4449 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
4450 
4451 	if (nvectors < 2) {
4452 		sc->sc_nqueues = 1;
4453 		return;
4454 	}
4455 
4456 	switch(sc->sc_type) {
4457 	case WM_T_82572:
4458 		hw_ntxqueues = 2;
4459 		hw_nrxqueues = 2;
4460 		break;
4461 	case WM_T_82574:
4462 		hw_ntxqueues = 2;
4463 		hw_nrxqueues = 2;
4464 		break;
4465 	case WM_T_82575:
4466 		hw_ntxqueues = 4;
4467 		hw_nrxqueues = 4;
4468 		break;
4469 	case WM_T_82576:
4470 		hw_ntxqueues = 16;
4471 		hw_nrxqueues = 16;
4472 		break;
4473 	case WM_T_82580:
4474 	case WM_T_I350:
4475 	case WM_T_I354:
4476 		hw_ntxqueues = 8;
4477 		hw_nrxqueues = 8;
4478 		break;
4479 	case WM_T_I210:
4480 		hw_ntxqueues = 4;
4481 		hw_nrxqueues = 4;
4482 		break;
4483 	case WM_T_I211:
4484 		hw_ntxqueues = 2;
4485 		hw_nrxqueues = 2;
4486 		break;
4487 		/*
4488 		 * As below ethernet controllers does not support MSI-X,
4489 		 * this driver let them not use multiqueue.
4490 		 *     - WM_T_80003
4491 		 *     - WM_T_ICH8
4492 		 *     - WM_T_ICH9
4493 		 *     - WM_T_ICH10
4494 		 *     - WM_T_PCH
4495 		 *     - WM_T_PCH2
4496 		 *     - WM_T_PCH_LPT
4497 		 */
4498 	default:
4499 		hw_ntxqueues = 1;
4500 		hw_nrxqueues = 1;
4501 		break;
4502 	}
4503 
4504 	hw_nqueues = min(hw_ntxqueues, hw_nrxqueues);
4505 
4506 	/*
4507 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
4508 	 * the number of queues used actually.
4509 	 */
4510 	if (nvectors < hw_nqueues + 1) {
4511 		sc->sc_nqueues = nvectors - 1;
4512 	} else {
4513 		sc->sc_nqueues = hw_nqueues;
4514 	}
4515 
4516 	/*
4517 	 * As queues more then cpus cannot improve scaling, we limit
4518 	 * the number of queues used actually.
4519 	 */
4520 	if (ncpu < sc->sc_nqueues)
4521 		sc->sc_nqueues = ncpu;
4522 }
4523 
4524 /*
4525  * Both single interrupt MSI and INTx can use this function.
4526  */
4527 static int
4528 wm_setup_legacy(struct wm_softc *sc)
4529 {
4530 	pci_chipset_tag_t pc = sc->sc_pc;
4531 	const char *intrstr = NULL;
4532 	char intrbuf[PCI_INTRSTR_LEN];
4533 	int error;
4534 
4535 	error = wm_alloc_txrx_queues(sc);
4536 	if (error) {
4537 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
4538 		    error);
4539 		return ENOMEM;
4540 	}
4541 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
4542 	    sizeof(intrbuf));
4543 #ifdef WM_MPSAFE
4544 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
4545 #endif
4546 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
4547 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
4548 	if (sc->sc_ihs[0] == NULL) {
4549 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
4550 		    (pci_intr_type(pc, sc->sc_intrs[0])
4551 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
4552 		return ENOMEM;
4553 	}
4554 
4555 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
4556 	sc->sc_nintrs = 1;
4557 	return 0;
4558 }
4559 
4560 static int
4561 wm_setup_msix(struct wm_softc *sc)
4562 {
4563 	void *vih;
4564 	kcpuset_t *affinity;
4565 	int qidx, error, intr_idx, txrx_established;
4566 	pci_chipset_tag_t pc = sc->sc_pc;
4567 	const char *intrstr = NULL;
4568 	char intrbuf[PCI_INTRSTR_LEN];
4569 	char intr_xname[INTRDEVNAMEBUF];
4570 
4571 	if (sc->sc_nqueues < ncpu) {
4572 		/*
4573 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
4574 		 * interrupts start from CPU#1.
4575 		 */
4576 		sc->sc_affinity_offset = 1;
4577 	} else {
4578 		/*
4579 		 * In this case, this device use all CPUs. So, we unify
4580 		 * affinitied cpu_index to msix vector number for readability.
4581 		 */
4582 		sc->sc_affinity_offset = 0;
4583 	}
4584 
4585 	error = wm_alloc_txrx_queues(sc);
4586 	if (error) {
4587 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
4588 		    error);
4589 		return ENOMEM;
4590 	}
4591 
4592 	kcpuset_create(&affinity, false);
4593 	intr_idx = 0;
4594 
4595 	/*
4596 	 * TX and RX
4597 	 */
4598 	txrx_established = 0;
4599 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
4600 		struct wm_queue *wmq = &sc->sc_queue[qidx];
4601 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
4602 
4603 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
4604 		    sizeof(intrbuf));
4605 #ifdef WM_MPSAFE
4606 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
4607 		    PCI_INTR_MPSAFE, true);
4608 #endif
4609 		memset(intr_xname, 0, sizeof(intr_xname));
4610 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
4611 		    device_xname(sc->sc_dev), qidx);
4612 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
4613 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
4614 		if (vih == NULL) {
4615 			aprint_error_dev(sc->sc_dev,
4616 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
4617 			    intrstr ? " at " : "",
4618 			    intrstr ? intrstr : "");
4619 
4620 			goto fail;
4621 		}
4622 		kcpuset_zero(affinity);
4623 		/* Round-robin affinity */
4624 		kcpuset_set(affinity, affinity_to);
4625 		error = interrupt_distribute(vih, affinity, NULL);
4626 		if (error == 0) {
4627 			aprint_normal_dev(sc->sc_dev,
4628 			    "for TX and RX interrupting at %s affinity to %u\n",
4629 			    intrstr, affinity_to);
4630 		} else {
4631 			aprint_normal_dev(sc->sc_dev,
4632 			    "for TX and RX interrupting at %s\n", intrstr);
4633 		}
4634 		sc->sc_ihs[intr_idx] = vih;
4635 		wmq->wmq_id= qidx;
4636 		wmq->wmq_intr_idx = intr_idx;
4637 
4638 		txrx_established++;
4639 		intr_idx++;
4640 	}
4641 
4642 	/*
4643 	 * LINK
4644 	 */
4645 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
4646 	    sizeof(intrbuf));
4647 #ifdef WM_MPSAFE
4648 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
4649 #endif
4650 	memset(intr_xname, 0, sizeof(intr_xname));
4651 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
4652 	    device_xname(sc->sc_dev));
4653 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
4654 		    IPL_NET, wm_linkintr_msix, sc, intr_xname);
4655 	if (vih == NULL) {
4656 		aprint_error_dev(sc->sc_dev,
4657 		    "unable to establish MSI-X(for LINK)%s%s\n",
4658 		    intrstr ? " at " : "",
4659 		    intrstr ? intrstr : "");
4660 
4661 		goto fail;
4662 	}
4663 	/* keep default affinity to LINK interrupt */
4664 	aprint_normal_dev(sc->sc_dev,
4665 	    "for LINK interrupting at %s\n", intrstr);
4666 	sc->sc_ihs[intr_idx] = vih;
4667 	sc->sc_link_intr_idx = intr_idx;
4668 
4669 	sc->sc_nintrs = sc->sc_nqueues + 1;
4670 	kcpuset_destroy(affinity);
4671 	return 0;
4672 
4673  fail:
4674 	for (qidx = 0; qidx < txrx_established; qidx++) {
4675 		struct wm_queue *wmq = &sc->sc_queue[qidx];
4676 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
4677 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
4678 	}
4679 
4680 	kcpuset_destroy(affinity);
4681 	return ENOMEM;
4682 }
4683 
4684 static void
4685 wm_turnon(struct wm_softc *sc)
4686 {
4687 	int i;
4688 
4689 	KASSERT(WM_CORE_LOCKED(sc));
4690 
4691 	for(i = 0; i < sc->sc_nqueues; i++) {
4692 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
4693 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
4694 
4695 		mutex_enter(txq->txq_lock);
4696 		txq->txq_stopping = false;
4697 		mutex_exit(txq->txq_lock);
4698 
4699 		mutex_enter(rxq->rxq_lock);
4700 		rxq->rxq_stopping = false;
4701 		mutex_exit(rxq->rxq_lock);
4702 	}
4703 
4704 	sc->sc_core_stopping = false;
4705 }
4706 
4707 static void
4708 wm_turnoff(struct wm_softc *sc)
4709 {
4710 	int i;
4711 
4712 	KASSERT(WM_CORE_LOCKED(sc));
4713 
4714 	sc->sc_core_stopping = true;
4715 
4716 	for(i = 0; i < sc->sc_nqueues; i++) {
4717 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
4718 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
4719 
4720 		mutex_enter(rxq->rxq_lock);
4721 		rxq->rxq_stopping = true;
4722 		mutex_exit(rxq->rxq_lock);
4723 
4724 		mutex_enter(txq->txq_lock);
4725 		txq->txq_stopping = true;
4726 		mutex_exit(txq->txq_lock);
4727 	}
4728 }
4729 
4730 /*
4731  * wm_init:		[ifnet interface function]
4732  *
4733  *	Initialize the interface.
4734  */
4735 static int
4736 wm_init(struct ifnet *ifp)
4737 {
4738 	struct wm_softc *sc = ifp->if_softc;
4739 	int ret;
4740 
4741 	WM_CORE_LOCK(sc);
4742 	ret = wm_init_locked(ifp);
4743 	WM_CORE_UNLOCK(sc);
4744 
4745 	return ret;
4746 }
4747 
4748 static int
4749 wm_init_locked(struct ifnet *ifp)
4750 {
4751 	struct wm_softc *sc = ifp->if_softc;
4752 	int i, j, trynum, error = 0;
4753 	uint32_t reg;
4754 
4755 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
4756 		device_xname(sc->sc_dev), __func__));
4757 	KASSERT(WM_CORE_LOCKED(sc));
4758 
4759 	/*
4760 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
4761 	 * There is a small but measurable benefit to avoiding the adjusment
4762 	 * of the descriptor so that the headers are aligned, for normal mtu,
4763 	 * on such platforms.  One possibility is that the DMA itself is
4764 	 * slightly more efficient if the front of the entire packet (instead
4765 	 * of the front of the headers) is aligned.
4766 	 *
4767 	 * Note we must always set align_tweak to 0 if we are using
4768 	 * jumbo frames.
4769 	 */
4770 #ifdef __NO_STRICT_ALIGNMENT
4771 	sc->sc_align_tweak = 0;
4772 #else
4773 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
4774 		sc->sc_align_tweak = 0;
4775 	else
4776 		sc->sc_align_tweak = 2;
4777 #endif /* __NO_STRICT_ALIGNMENT */
4778 
4779 	/* Cancel any pending I/O. */
4780 	wm_stop_locked(ifp, 0);
4781 
4782 	/* update statistics before reset */
4783 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
4784 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
4785 
4786 	/* PCH_SPT hardware workaround */
4787 	if (sc->sc_type == WM_T_PCH_SPT)
4788 		wm_flush_desc_rings(sc);
4789 
4790 	/* Reset the chip to a known state. */
4791 	wm_reset(sc);
4792 
4793 	/* AMT based hardware can now take control from firmware */
4794 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
4795 		wm_get_hw_control(sc);
4796 
4797 	/* Init hardware bits */
4798 	wm_initialize_hardware_bits(sc);
4799 
4800 	/* Reset the PHY. */
4801 	if (sc->sc_flags & WM_F_HAS_MII)
4802 		wm_gmii_reset(sc);
4803 
4804 	/* Calculate (E)ITR value */
4805 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4806 		sc->sc_itr = 450;	/* For EITR */
4807 	} else if (sc->sc_type >= WM_T_82543) {
4808 		/*
4809 		 * Set up the interrupt throttling register (units of 256ns)
4810 		 * Note that a footnote in Intel's documentation says this
4811 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
4812 		 * or 10Mbit mode.  Empirically, it appears to be the case
4813 		 * that that is also true for the 1024ns units of the other
4814 		 * interrupt-related timer registers -- so, really, we ought
4815 		 * to divide this value by 4 when the link speed is low.
4816 		 *
4817 		 * XXX implement this division at link speed change!
4818 		 */
4819 
4820 		/*
4821 		 * For N interrupts/sec, set this value to:
4822 		 * 1000000000 / (N * 256).  Note that we set the
4823 		 * absolute and packet timer values to this value
4824 		 * divided by 4 to get "simple timer" behavior.
4825 		 */
4826 
4827 		sc->sc_itr = 1500;		/* 2604 ints/sec */
4828 	}
4829 
4830 	error = wm_init_txrx_queues(sc);
4831 	if (error)
4832 		goto out;
4833 
4834 	/*
4835 	 * Clear out the VLAN table -- we don't use it (yet).
4836 	 */
4837 	CSR_WRITE(sc, WMREG_VET, 0);
4838 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
4839 		trynum = 10; /* Due to hw errata */
4840 	else
4841 		trynum = 1;
4842 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
4843 		for (j = 0; j < trynum; j++)
4844 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
4845 
4846 	/*
4847 	 * Set up flow-control parameters.
4848 	 *
4849 	 * XXX Values could probably stand some tuning.
4850 	 */
4851 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
4852 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
4853 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
4854 	    && (sc->sc_type != WM_T_PCH_SPT)) {
4855 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
4856 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
4857 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
4858 	}
4859 
4860 	sc->sc_fcrtl = FCRTL_DFLT;
4861 	if (sc->sc_type < WM_T_82543) {
4862 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
4863 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
4864 	} else {
4865 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
4866 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
4867 	}
4868 
4869 	if (sc->sc_type == WM_T_80003)
4870 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
4871 	else
4872 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
4873 
4874 	/* Writes the control register. */
4875 	wm_set_vlan(sc);
4876 
4877 	if (sc->sc_flags & WM_F_HAS_MII) {
4878 		int val;
4879 
4880 		switch (sc->sc_type) {
4881 		case WM_T_80003:
4882 		case WM_T_ICH8:
4883 		case WM_T_ICH9:
4884 		case WM_T_ICH10:
4885 		case WM_T_PCH:
4886 		case WM_T_PCH2:
4887 		case WM_T_PCH_LPT:
4888 		case WM_T_PCH_SPT:
4889 			/*
4890 			 * Set the mac to wait the maximum time between each
4891 			 * iteration and increase the max iterations when
4892 			 * polling the phy; this fixes erroneous timeouts at
4893 			 * 10Mbps.
4894 			 */
4895 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
4896 			    0xFFFF);
4897 			val = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM);
4898 			val |= 0x3F;
4899 			wm_kmrn_writereg(sc,
4900 			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
4901 			break;
4902 		default:
4903 			break;
4904 		}
4905 
4906 		if (sc->sc_type == WM_T_80003) {
4907 			val = CSR_READ(sc, WMREG_CTRL_EXT);
4908 			val &= ~CTRL_EXT_LINK_MODE_MASK;
4909 			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
4910 
4911 			/* Bypass RX and TX FIFO's */
4912 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
4913 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
4914 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
4915 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
4916 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
4917 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
4918 		}
4919 	}
4920 #if 0
4921 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
4922 #endif
4923 
4924 	/* Set up checksum offload parameters. */
4925 	reg = CSR_READ(sc, WMREG_RXCSUM);
4926 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
4927 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
4928 		reg |= RXCSUM_IPOFL;
4929 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
4930 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
4931 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
4932 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
4933 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
4934 
4935 	/* Set up MSI-X */
4936 	if (sc->sc_nintrs > 1) {
4937 		uint32_t ivar;
4938 		struct wm_queue *wmq;
4939 		int qid, qintr_idx;
4940 
4941 		if (sc->sc_type == WM_T_82575) {
4942 			/* Interrupt control */
4943 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
4944 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
4945 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4946 
4947 			/* TX and RX */
4948 			for (i = 0; i < sc->sc_nqueues; i++) {
4949 				wmq = &sc->sc_queue[i];
4950 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
4951 				    EITR_TX_QUEUE(wmq->wmq_id)
4952 				    | EITR_RX_QUEUE(wmq->wmq_id));
4953 			}
4954 			/* Link status */
4955 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
4956 			    EITR_OTHER);
4957 		} else if (sc->sc_type == WM_T_82574) {
4958 			/* Interrupt control */
4959 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
4960 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
4961 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4962 
4963 			ivar = 0;
4964 			/* TX and RX */
4965 			for (i = 0; i < sc->sc_nqueues; i++) {
4966 				wmq = &sc->sc_queue[i];
4967 				qid = wmq->wmq_id;
4968 				qintr_idx = wmq->wmq_intr_idx;
4969 
4970 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
4971 				    IVAR_TX_MASK_Q_82574(qid));
4972 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
4973 				    IVAR_RX_MASK_Q_82574(qid));
4974 			}
4975 			/* Link status */
4976 			ivar |= __SHIFTIN((IVAR_VALID_82574
4977 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
4978 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
4979 		} else {
4980 			/* Interrupt control */
4981 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
4982 			    | GPIE_EIAME | GPIE_PBA);
4983 
4984 			switch (sc->sc_type) {
4985 			case WM_T_82580:
4986 			case WM_T_I350:
4987 			case WM_T_I354:
4988 			case WM_T_I210:
4989 			case WM_T_I211:
4990 				/* TX and RX */
4991 				for (i = 0; i < sc->sc_nqueues; i++) {
4992 					wmq = &sc->sc_queue[i];
4993 					qid = wmq->wmq_id;
4994 					qintr_idx = wmq->wmq_intr_idx;
4995 
4996 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
4997 					ivar &= ~IVAR_TX_MASK_Q(qid);
4998 					ivar |= __SHIFTIN((qintr_idx
4999 						| IVAR_VALID),
5000 					    IVAR_TX_MASK_Q(qid));
5001 					ivar &= ~IVAR_RX_MASK_Q(qid);
5002 					ivar |= __SHIFTIN((qintr_idx
5003 						| IVAR_VALID),
5004 					    IVAR_RX_MASK_Q(qid));
5005 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
5006 				}
5007 				break;
5008 			case WM_T_82576:
5009 				/* TX and RX */
5010 				for (i = 0; i < sc->sc_nqueues; i++) {
5011 					wmq = &sc->sc_queue[i];
5012 					qid = wmq->wmq_id;
5013 					qintr_idx = wmq->wmq_intr_idx;
5014 
5015 					ivar = CSR_READ(sc,
5016 					    WMREG_IVAR_Q_82576(qid));
5017 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
5018 					ivar |= __SHIFTIN((qintr_idx
5019 						| IVAR_VALID),
5020 					    IVAR_TX_MASK_Q_82576(qid));
5021 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
5022 					ivar |= __SHIFTIN((qintr_idx
5023 						| IVAR_VALID),
5024 					    IVAR_RX_MASK_Q_82576(qid));
5025 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
5026 					    ivar);
5027 				}
5028 				break;
5029 			default:
5030 				break;
5031 			}
5032 
5033 			/* Link status */
5034 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
5035 			    IVAR_MISC_OTHER);
5036 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
5037 		}
5038 
5039 		if (sc->sc_nqueues > 1) {
5040 			wm_init_rss(sc);
5041 
5042 			/*
5043 			** NOTE: Receive Full-Packet Checksum Offload
5044 			** is mutually exclusive with Multiqueue. However
5045 			** this is not the same as TCP/IP checksums which
5046 			** still work.
5047 			*/
5048 			reg = CSR_READ(sc, WMREG_RXCSUM);
5049 			reg |= RXCSUM_PCSD;
5050 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
5051 		}
5052 	}
5053 
5054 	/* Set up the interrupt registers. */
5055 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
5056 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
5057 	    ICR_RXO | ICR_RXT0;
5058 	if (sc->sc_nintrs > 1) {
5059 		uint32_t mask;
5060 		struct wm_queue *wmq;
5061 
5062 		switch (sc->sc_type) {
5063 		case WM_T_82574:
5064 			CSR_WRITE(sc, WMREG_EIAC_82574,
5065 			    WMREG_EIAC_82574_MSIX_MASK);
5066 			sc->sc_icr |= WMREG_EIAC_82574_MSIX_MASK;
5067 			CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
5068 			break;
5069 		default:
5070 			if (sc->sc_type == WM_T_82575) {
5071 				mask = 0;
5072 				for (i = 0; i < sc->sc_nqueues; i++) {
5073 					wmq = &sc->sc_queue[i];
5074 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
5075 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
5076 				}
5077 				mask |= EITR_OTHER;
5078 			} else {
5079 				mask = 0;
5080 				for (i = 0; i < sc->sc_nqueues; i++) {
5081 					wmq = &sc->sc_queue[i];
5082 					mask |= 1 << wmq->wmq_intr_idx;
5083 				}
5084 				mask |= 1 << sc->sc_link_intr_idx;
5085 			}
5086 			CSR_WRITE(sc, WMREG_EIAC, mask);
5087 			CSR_WRITE(sc, WMREG_EIAM, mask);
5088 			CSR_WRITE(sc, WMREG_EIMS, mask);
5089 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
5090 			break;
5091 		}
5092 	} else
5093 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
5094 
5095 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5096 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5097 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
5098 	    || (sc->sc_type == WM_T_PCH_SPT)) {
5099 		reg = CSR_READ(sc, WMREG_KABGTXD);
5100 		reg |= KABGTXD_BGSQLBIAS;
5101 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
5102 	}
5103 
5104 	/* Set up the inter-packet gap. */
5105 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
5106 
5107 	if (sc->sc_type >= WM_T_82543) {
5108 		/*
5109 		 * XXX 82574 has both ITR and EITR. SET EITR when we use
5110 		 * the multi queue function with MSI-X.
5111 		 */
5112 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5113 			int qidx;
5114 			for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
5115 				struct wm_queue *wmq = &sc->sc_queue[qidx];
5116 				CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx),
5117 				    sc->sc_itr);
5118 			}
5119 			/*
5120 			 * Link interrupts occur much less than TX
5121 			 * interrupts and RX interrupts. So, we don't
5122 			 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
5123 			 * FreeBSD's if_igb.
5124 			 */
5125 		} else
5126 			CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
5127 	}
5128 
5129 	/* Set the VLAN ethernetype. */
5130 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
5131 
5132 	/*
5133 	 * Set up the transmit control register; we start out with
5134 	 * a collision distance suitable for FDX, but update it whe
5135 	 * we resolve the media type.
5136 	 */
5137 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
5138 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
5139 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
5140 	if (sc->sc_type >= WM_T_82571)
5141 		sc->sc_tctl |= TCTL_MULR;
5142 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
5143 
5144 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5145 		/* Write TDT after TCTL.EN is set. See the document. */
5146 		CSR_WRITE(sc, WMREG_TDT(0), 0);
5147 	}
5148 
5149 	if (sc->sc_type == WM_T_80003) {
5150 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
5151 		reg &= ~TCTL_EXT_GCEX_MASK;
5152 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
5153 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
5154 	}
5155 
5156 	/* Set the media. */
5157 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
5158 		goto out;
5159 
5160 	/* Configure for OS presence */
5161 	wm_init_manageability(sc);
5162 
5163 	/*
5164 	 * Set up the receive control register; we actually program
5165 	 * the register when we set the receive filter.  Use multicast
5166 	 * address offset type 0.
5167 	 *
5168 	 * Only the i82544 has the ability to strip the incoming
5169 	 * CRC, so we don't enable that feature.
5170 	 */
5171 	sc->sc_mchash_type = 0;
5172 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
5173 	    | RCTL_MO(sc->sc_mchash_type);
5174 
5175 	/*
5176 	 * The I350 has a bug where it always strips the CRC whether
5177 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
5178 	 */
5179 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
5180 	    || (sc->sc_type == WM_T_I210))
5181 		sc->sc_rctl |= RCTL_SECRC;
5182 
5183 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
5184 	    && (ifp->if_mtu > ETHERMTU)) {
5185 		sc->sc_rctl |= RCTL_LPE;
5186 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
5187 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
5188 	}
5189 
5190 	if (MCLBYTES == 2048) {
5191 		sc->sc_rctl |= RCTL_2k;
5192 	} else {
5193 		if (sc->sc_type >= WM_T_82543) {
5194 			switch (MCLBYTES) {
5195 			case 4096:
5196 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
5197 				break;
5198 			case 8192:
5199 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
5200 				break;
5201 			case 16384:
5202 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
5203 				break;
5204 			default:
5205 				panic("wm_init: MCLBYTES %d unsupported",
5206 				    MCLBYTES);
5207 				break;
5208 			}
5209 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
5210 	}
5211 
5212 	/* Set the receive filter. */
5213 	wm_set_filter(sc);
5214 
5215 	/* Enable ECC */
5216 	switch (sc->sc_type) {
5217 	case WM_T_82571:
5218 		reg = CSR_READ(sc, WMREG_PBA_ECC);
5219 		reg |= PBA_ECC_CORR_EN;
5220 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
5221 		break;
5222 	case WM_T_PCH_LPT:
5223 	case WM_T_PCH_SPT:
5224 		reg = CSR_READ(sc, WMREG_PBECCSTS);
5225 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
5226 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
5227 
5228 		sc->sc_ctrl |= CTRL_MEHE;
5229 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5230 		break;
5231 	default:
5232 		break;
5233 	}
5234 
5235 	/* On 575 and later set RDT only if RX enabled */
5236 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5237 		int qidx;
5238 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
5239 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
5240 			for (i = 0; i < WM_NRXDESC; i++) {
5241 				mutex_enter(rxq->rxq_lock);
5242 				wm_init_rxdesc(rxq, i);
5243 				mutex_exit(rxq->rxq_lock);
5244 
5245 			}
5246 		}
5247 	}
5248 
5249 	wm_turnon(sc);
5250 
5251 	/* Start the one second link check clock. */
5252 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
5253 
5254 	/* ...all done! */
5255 	ifp->if_flags |= IFF_RUNNING;
5256 	ifp->if_flags &= ~IFF_OACTIVE;
5257 
5258  out:
5259 	sc->sc_if_flags = ifp->if_flags;
5260 	if (error)
5261 		log(LOG_ERR, "%s: interface not running\n",
5262 		    device_xname(sc->sc_dev));
5263 	return error;
5264 }
5265 
5266 /*
5267  * wm_stop:		[ifnet interface function]
5268  *
5269  *	Stop transmission on the interface.
5270  */
5271 static void
5272 wm_stop(struct ifnet *ifp, int disable)
5273 {
5274 	struct wm_softc *sc = ifp->if_softc;
5275 
5276 	WM_CORE_LOCK(sc);
5277 	wm_stop_locked(ifp, disable);
5278 	WM_CORE_UNLOCK(sc);
5279 }
5280 
5281 static void
5282 wm_stop_locked(struct ifnet *ifp, int disable)
5283 {
5284 	struct wm_softc *sc = ifp->if_softc;
5285 	struct wm_txsoft *txs;
5286 	int i, qidx;
5287 
5288 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
5289 		device_xname(sc->sc_dev), __func__));
5290 	KASSERT(WM_CORE_LOCKED(sc));
5291 
5292 	wm_turnoff(sc);
5293 
5294 	/* Stop the one second clock. */
5295 	callout_stop(&sc->sc_tick_ch);
5296 
5297 	/* Stop the 82547 Tx FIFO stall check timer. */
5298 	if (sc->sc_type == WM_T_82547)
5299 		callout_stop(&sc->sc_txfifo_ch);
5300 
5301 	if (sc->sc_flags & WM_F_HAS_MII) {
5302 		/* Down the MII. */
5303 		mii_down(&sc->sc_mii);
5304 	} else {
5305 #if 0
5306 		/* Should we clear PHY's status properly? */
5307 		wm_reset(sc);
5308 #endif
5309 	}
5310 
5311 	/* Stop the transmit and receive processes. */
5312 	CSR_WRITE(sc, WMREG_TCTL, 0);
5313 	CSR_WRITE(sc, WMREG_RCTL, 0);
5314 	sc->sc_rctl &= ~RCTL_EN;
5315 
5316 	/*
5317 	 * Clear the interrupt mask to ensure the device cannot assert its
5318 	 * interrupt line.
5319 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
5320 	 * service any currently pending or shared interrupt.
5321 	 */
5322 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
5323 	sc->sc_icr = 0;
5324 	if (sc->sc_nintrs > 1) {
5325 		if (sc->sc_type != WM_T_82574) {
5326 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
5327 			CSR_WRITE(sc, WMREG_EIAC, 0);
5328 		} else
5329 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
5330 	}
5331 
5332 	/* Release any queued transmit buffers. */
5333 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
5334 		struct wm_queue *wmq = &sc->sc_queue[qidx];
5335 		struct wm_txqueue *txq = &wmq->wmq_txq;
5336 		mutex_enter(txq->txq_lock);
5337 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
5338 			txs = &txq->txq_soft[i];
5339 			if (txs->txs_mbuf != NULL) {
5340 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
5341 				m_freem(txs->txs_mbuf);
5342 				txs->txs_mbuf = NULL;
5343 			}
5344 		}
5345 		mutex_exit(txq->txq_lock);
5346 	}
5347 
5348 	/* Mark the interface as down and cancel the watchdog timer. */
5349 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
5350 	ifp->if_timer = 0;
5351 
5352 	if (disable) {
5353 		for (i = 0; i < sc->sc_nqueues; i++) {
5354 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
5355 			mutex_enter(rxq->rxq_lock);
5356 			wm_rxdrain(rxq);
5357 			mutex_exit(rxq->rxq_lock);
5358 		}
5359 	}
5360 
5361 #if 0 /* notyet */
5362 	if (sc->sc_type >= WM_T_82544)
5363 		CSR_WRITE(sc, WMREG_WUC, 0);
5364 #endif
5365 }
5366 
5367 static void
5368 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
5369 {
5370 	struct mbuf *m;
5371 	int i;
5372 
5373 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
5374 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
5375 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
5376 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
5377 		    m->m_data, m->m_len, m->m_flags);
5378 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
5379 	    i, i == 1 ? "" : "s");
5380 }
5381 
5382 /*
5383  * wm_82547_txfifo_stall:
5384  *
5385  *	Callout used to wait for the 82547 Tx FIFO to drain,
5386  *	reset the FIFO pointers, and restart packet transmission.
5387  */
5388 static void
5389 wm_82547_txfifo_stall(void *arg)
5390 {
5391 	struct wm_softc *sc = arg;
5392 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
5393 
5394 	mutex_enter(txq->txq_lock);
5395 
5396 	if (txq->txq_stopping)
5397 		goto out;
5398 
5399 	if (txq->txq_fifo_stall) {
5400 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
5401 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
5402 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
5403 			/*
5404 			 * Packets have drained.  Stop transmitter, reset
5405 			 * FIFO pointers, restart transmitter, and kick
5406 			 * the packet queue.
5407 			 */
5408 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
5409 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
5410 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
5411 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
5412 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
5413 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
5414 			CSR_WRITE(sc, WMREG_TCTL, tctl);
5415 			CSR_WRITE_FLUSH(sc);
5416 
5417 			txq->txq_fifo_head = 0;
5418 			txq->txq_fifo_stall = 0;
5419 			wm_start_locked(&sc->sc_ethercom.ec_if);
5420 		} else {
5421 			/*
5422 			 * Still waiting for packets to drain; try again in
5423 			 * another tick.
5424 			 */
5425 			callout_schedule(&sc->sc_txfifo_ch, 1);
5426 		}
5427 	}
5428 
5429 out:
5430 	mutex_exit(txq->txq_lock);
5431 }
5432 
5433 /*
5434  * wm_82547_txfifo_bugchk:
5435  *
5436  *	Check for bug condition in the 82547 Tx FIFO.  We need to
5437  *	prevent enqueueing a packet that would wrap around the end
5438  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
5439  *
5440  *	We do this by checking the amount of space before the end
5441  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
5442  *	the Tx FIFO, wait for all remaining packets to drain, reset
5443  *	the internal FIFO pointers to the beginning, and restart
5444  *	transmission on the interface.
5445  */
5446 #define	WM_FIFO_HDR		0x10
5447 #define	WM_82547_PAD_LEN	0x3e0
5448 static int
5449 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
5450 {
5451 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
5452 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
5453 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
5454 
5455 	/* Just return if already stalled. */
5456 	if (txq->txq_fifo_stall)
5457 		return 1;
5458 
5459 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
5460 		/* Stall only occurs in half-duplex mode. */
5461 		goto send_packet;
5462 	}
5463 
5464 	if (len >= WM_82547_PAD_LEN + space) {
5465 		txq->txq_fifo_stall = 1;
5466 		callout_schedule(&sc->sc_txfifo_ch, 1);
5467 		return 1;
5468 	}
5469 
5470  send_packet:
5471 	txq->txq_fifo_head += len;
5472 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
5473 		txq->txq_fifo_head -= txq->txq_fifo_size;
5474 
5475 	return 0;
5476 }
5477 
5478 static int
5479 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
5480 {
5481 	int error;
5482 
5483 	/*
5484 	 * Allocate the control data structures, and create and load the
5485 	 * DMA map for it.
5486 	 *
5487 	 * NOTE: All Tx descriptors must be in the same 4G segment of
5488 	 * memory.  So must Rx descriptors.  We simplify by allocating
5489 	 * both sets within the same 4G segment.
5490 	 */
5491 	if (sc->sc_type < WM_T_82544)
5492 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
5493 	else
5494 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
5495 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
5496 		txq->txq_descsize = sizeof(nq_txdesc_t);
5497 	else
5498 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
5499 
5500 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
5501 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
5502 		    1, &txq->txq_desc_rseg, 0)) != 0) {
5503 		aprint_error_dev(sc->sc_dev,
5504 		    "unable to allocate TX control data, error = %d\n",
5505 		    error);
5506 		goto fail_0;
5507 	}
5508 
5509 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
5510 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
5511 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
5512 		aprint_error_dev(sc->sc_dev,
5513 		    "unable to map TX control data, error = %d\n", error);
5514 		goto fail_1;
5515 	}
5516 
5517 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
5518 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
5519 		aprint_error_dev(sc->sc_dev,
5520 		    "unable to create TX control data DMA map, error = %d\n",
5521 		    error);
5522 		goto fail_2;
5523 	}
5524 
5525 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
5526 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
5527 		aprint_error_dev(sc->sc_dev,
5528 		    "unable to load TX control data DMA map, error = %d\n",
5529 		    error);
5530 		goto fail_3;
5531 	}
5532 
5533 	return 0;
5534 
5535  fail_3:
5536 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
5537  fail_2:
5538 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
5539 	    WM_TXDESCS_SIZE(txq));
5540  fail_1:
5541 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
5542  fail_0:
5543 	return error;
5544 }
5545 
5546 static void
5547 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
5548 {
5549 
5550 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
5551 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
5552 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
5553 	    WM_TXDESCS_SIZE(txq));
5554 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
5555 }
5556 
5557 static int
5558 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
5559 {
5560 	int error;
5561 
5562 	/*
5563 	 * Allocate the control data structures, and create and load the
5564 	 * DMA map for it.
5565 	 *
5566 	 * NOTE: All Tx descriptors must be in the same 4G segment of
5567 	 * memory.  So must Rx descriptors.  We simplify by allocating
5568 	 * both sets within the same 4G segment.
5569 	 */
5570 	rxq->rxq_desc_size = sizeof(wiseman_rxdesc_t) * WM_NRXDESC;
5571 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq->rxq_desc_size,
5572 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
5573 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
5574 		aprint_error_dev(sc->sc_dev,
5575 		    "unable to allocate RX control data, error = %d\n",
5576 		    error);
5577 		goto fail_0;
5578 	}
5579 
5580 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
5581 		    rxq->rxq_desc_rseg, rxq->rxq_desc_size,
5582 		    (void **)&rxq->rxq_descs, BUS_DMA_COHERENT)) != 0) {
5583 		aprint_error_dev(sc->sc_dev,
5584 		    "unable to map RX control data, error = %d\n", error);
5585 		goto fail_1;
5586 	}
5587 
5588 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq->rxq_desc_size, 1,
5589 		    rxq->rxq_desc_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
5590 		aprint_error_dev(sc->sc_dev,
5591 		    "unable to create RX control data DMA map, error = %d\n",
5592 		    error);
5593 		goto fail_2;
5594 	}
5595 
5596 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
5597 		    rxq->rxq_descs, rxq->rxq_desc_size, NULL, 0)) != 0) {
5598 		aprint_error_dev(sc->sc_dev,
5599 		    "unable to load RX control data DMA map, error = %d\n",
5600 		    error);
5601 		goto fail_3;
5602 	}
5603 
5604 	return 0;
5605 
5606  fail_3:
5607 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
5608  fail_2:
5609 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
5610 	    rxq->rxq_desc_size);
5611  fail_1:
5612 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
5613  fail_0:
5614 	return error;
5615 }
5616 
5617 static void
5618 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
5619 {
5620 
5621 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
5622 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
5623 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
5624 	    rxq->rxq_desc_size);
5625 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
5626 }
5627 
5628 
5629 static int
5630 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
5631 {
5632 	int i, error;
5633 
5634 	/* Create the transmit buffer DMA maps. */
5635 	WM_TXQUEUELEN(txq) =
5636 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
5637 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
5638 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
5639 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
5640 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
5641 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
5642 			aprint_error_dev(sc->sc_dev,
5643 			    "unable to create Tx DMA map %d, error = %d\n",
5644 			    i, error);
5645 			goto fail;
5646 		}
5647 	}
5648 
5649 	return 0;
5650 
5651  fail:
5652 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
5653 		if (txq->txq_soft[i].txs_dmamap != NULL)
5654 			bus_dmamap_destroy(sc->sc_dmat,
5655 			    txq->txq_soft[i].txs_dmamap);
5656 	}
5657 	return error;
5658 }
5659 
5660 static void
5661 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
5662 {
5663 	int i;
5664 
5665 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
5666 		if (txq->txq_soft[i].txs_dmamap != NULL)
5667 			bus_dmamap_destroy(sc->sc_dmat,
5668 			    txq->txq_soft[i].txs_dmamap);
5669 	}
5670 }
5671 
5672 static int
5673 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
5674 {
5675 	int i, error;
5676 
5677 	/* Create the receive buffer DMA maps. */
5678 	for (i = 0; i < WM_NRXDESC; i++) {
5679 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
5680 			    MCLBYTES, 0, 0,
5681 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
5682 			aprint_error_dev(sc->sc_dev,
5683 			    "unable to create Rx DMA map %d error = %d\n",
5684 			    i, error);
5685 			goto fail;
5686 		}
5687 		rxq->rxq_soft[i].rxs_mbuf = NULL;
5688 	}
5689 
5690 	return 0;
5691 
5692  fail:
5693 	for (i = 0; i < WM_NRXDESC; i++) {
5694 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
5695 			bus_dmamap_destroy(sc->sc_dmat,
5696 			    rxq->rxq_soft[i].rxs_dmamap);
5697 	}
5698 	return error;
5699 }
5700 
5701 static void
5702 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
5703 {
5704 	int i;
5705 
5706 	for (i = 0; i < WM_NRXDESC; i++) {
5707 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
5708 			bus_dmamap_destroy(sc->sc_dmat,
5709 			    rxq->rxq_soft[i].rxs_dmamap);
5710 	}
5711 }
5712 
5713 /*
5714  * wm_alloc_quques:
5715  *	Allocate {tx,rx}descs and {tx,rx} buffers
5716  */
5717 static int
5718 wm_alloc_txrx_queues(struct wm_softc *sc)
5719 {
5720 	int i, error, tx_done, rx_done;
5721 
5722 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
5723 	    KM_SLEEP);
5724 	if (sc->sc_queue == NULL) {
5725 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
5726 		error = ENOMEM;
5727 		goto fail_0;
5728 	}
5729 
5730 	/*
5731 	 * For transmission
5732 	 */
5733 	error = 0;
5734 	tx_done = 0;
5735 	for (i = 0; i < sc->sc_nqueues; i++) {
5736 #ifdef WM_EVENT_COUNTERS
5737 		int j;
5738 		const char *xname;
5739 #endif
5740 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
5741 		txq->txq_sc = sc;
5742 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
5743 
5744 		error = wm_alloc_tx_descs(sc, txq);
5745 		if (error)
5746 			break;
5747 		error = wm_alloc_tx_buffer(sc, txq);
5748 		if (error) {
5749 			wm_free_tx_descs(sc, txq);
5750 			break;
5751 		}
5752 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
5753 		if (txq->txq_interq == NULL) {
5754 			wm_free_tx_descs(sc, txq);
5755 			wm_free_tx_buffer(sc, txq);
5756 			error = ENOMEM;
5757 			break;
5758 		}
5759 
5760 #ifdef WM_EVENT_COUNTERS
5761 		xname = device_xname(sc->sc_dev);
5762 
5763 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
5764 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
5765 		WM_Q_MISC_EVCNT_ATTACH(txq, txfifo_stall, txq, i, xname);
5766 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
5767 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
5768 
5769 		WM_Q_MISC_EVCNT_ATTACH(txq, txipsum, txq, i, xname);
5770 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum, txq, i, xname);
5771 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum6, txq, i, xname);
5772 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso, txq, i, xname);
5773 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso6, txq, i, xname);
5774 		WM_Q_MISC_EVCNT_ATTACH(txq, txtsopain, txq, i, xname);
5775 
5776 		for (j = 0; j < WM_NTXSEGS; j++) {
5777 			snprintf(txq->txq_txseg_evcnt_names[j],
5778 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
5779 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
5780 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
5781 		}
5782 
5783 		WM_Q_MISC_EVCNT_ATTACH(txq, txdrop, txq, i, xname);
5784 
5785 		WM_Q_MISC_EVCNT_ATTACH(txq, tu, txq, i, xname);
5786 #endif /* WM_EVENT_COUNTERS */
5787 
5788 		tx_done++;
5789 	}
5790 	if (error)
5791 		goto fail_1;
5792 
5793 	/*
5794 	 * For recieve
5795 	 */
5796 	error = 0;
5797 	rx_done = 0;
5798 	for (i = 0; i < sc->sc_nqueues; i++) {
5799 #ifdef WM_EVENT_COUNTERS
5800 		const char *xname;
5801 #endif
5802 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
5803 		rxq->rxq_sc = sc;
5804 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
5805 
5806 		error = wm_alloc_rx_descs(sc, rxq);
5807 		if (error)
5808 			break;
5809 
5810 		error = wm_alloc_rx_buffer(sc, rxq);
5811 		if (error) {
5812 			wm_free_rx_descs(sc, rxq);
5813 			break;
5814 		}
5815 
5816 #ifdef WM_EVENT_COUNTERS
5817 		xname = device_xname(sc->sc_dev);
5818 
5819 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxintr, rxq, i, xname);
5820 
5821 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxipsum, rxq, i, xname);
5822 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxtusum, rxq, i, xname);
5823 #endif /* WM_EVENT_COUNTERS */
5824 
5825 		rx_done++;
5826 	}
5827 	if (error)
5828 		goto fail_2;
5829 
5830 	return 0;
5831 
5832  fail_2:
5833 	for (i = 0; i < rx_done; i++) {
5834 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
5835 		wm_free_rx_buffer(sc, rxq);
5836 		wm_free_rx_descs(sc, rxq);
5837 		if (rxq->rxq_lock)
5838 			mutex_obj_free(rxq->rxq_lock);
5839 	}
5840  fail_1:
5841 	for (i = 0; i < tx_done; i++) {
5842 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
5843 		pcq_destroy(txq->txq_interq);
5844 		wm_free_tx_buffer(sc, txq);
5845 		wm_free_tx_descs(sc, txq);
5846 		if (txq->txq_lock)
5847 			mutex_obj_free(txq->txq_lock);
5848 	}
5849 
5850 	kmem_free(sc->sc_queue,
5851 	    sizeof(struct wm_queue) * sc->sc_nqueues);
5852  fail_0:
5853 	return error;
5854 }
5855 
5856 /*
5857  * wm_free_quques:
5858  *	Free {tx,rx}descs and {tx,rx} buffers
5859  */
5860 static void
5861 wm_free_txrx_queues(struct wm_softc *sc)
5862 {
5863 	int i;
5864 
5865 	for (i = 0; i < sc->sc_nqueues; i++) {
5866 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
5867 		wm_free_rx_buffer(sc, rxq);
5868 		wm_free_rx_descs(sc, rxq);
5869 		if (rxq->rxq_lock)
5870 			mutex_obj_free(rxq->rxq_lock);
5871 	}
5872 
5873 	for (i = 0; i < sc->sc_nqueues; i++) {
5874 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
5875 		wm_free_tx_buffer(sc, txq);
5876 		wm_free_tx_descs(sc, txq);
5877 		if (txq->txq_lock)
5878 			mutex_obj_free(txq->txq_lock);
5879 	}
5880 
5881 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
5882 }
5883 
5884 static void
5885 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
5886 {
5887 
5888 	KASSERT(mutex_owned(txq->txq_lock));
5889 
5890 	/* Initialize the transmit descriptor ring. */
5891 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
5892 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
5893 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
5894 	txq->txq_free = WM_NTXDESC(txq);
5895 	txq->txq_next = 0;
5896 }
5897 
5898 static void
5899 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
5900     struct wm_txqueue *txq)
5901 {
5902 
5903 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
5904 		device_xname(sc->sc_dev), __func__));
5905 	KASSERT(mutex_owned(txq->txq_lock));
5906 
5907 	if (sc->sc_type < WM_T_82543) {
5908 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
5909 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
5910 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
5911 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
5912 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
5913 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
5914 	} else {
5915 		int qid = wmq->wmq_id;
5916 
5917 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
5918 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
5919 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
5920 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
5921 
5922 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
5923 			/*
5924 			 * Don't write TDT before TCTL.EN is set.
5925 			 * See the document.
5926 			 */
5927 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
5928 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
5929 			    | TXDCTL_WTHRESH(0));
5930 		else {
5931 			/* ITR / 4 */
5932 			CSR_WRITE(sc, WMREG_TIDV, sc->sc_itr / 4);
5933 			if (sc->sc_type >= WM_T_82540) {
5934 				/* should be same */
5935 				CSR_WRITE(sc, WMREG_TADV, sc->sc_itr / 4);
5936 			}
5937 
5938 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
5939 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
5940 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
5941 		}
5942 	}
5943 }
5944 
5945 static void
5946 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
5947 {
5948 	int i;
5949 
5950 	KASSERT(mutex_owned(txq->txq_lock));
5951 
5952 	/* Initialize the transmit job descriptors. */
5953 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
5954 		txq->txq_soft[i].txs_mbuf = NULL;
5955 	txq->txq_sfree = WM_TXQUEUELEN(txq);
5956 	txq->txq_snext = 0;
5957 	txq->txq_sdirty = 0;
5958 }
5959 
5960 static void
5961 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
5962     struct wm_txqueue *txq)
5963 {
5964 
5965 	KASSERT(mutex_owned(txq->txq_lock));
5966 
5967 	/*
5968 	 * Set up some register offsets that are different between
5969 	 * the i82542 and the i82543 and later chips.
5970 	 */
5971 	if (sc->sc_type < WM_T_82543)
5972 		txq->txq_tdt_reg = WMREG_OLD_TDT;
5973 	else
5974 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
5975 
5976 	wm_init_tx_descs(sc, txq);
5977 	wm_init_tx_regs(sc, wmq, txq);
5978 	wm_init_tx_buffer(sc, txq);
5979 }
5980 
5981 static void
5982 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
5983     struct wm_rxqueue *rxq)
5984 {
5985 
5986 	KASSERT(mutex_owned(rxq->rxq_lock));
5987 
5988 	/*
5989 	 * Initialize the receive descriptor and receive job
5990 	 * descriptor rings.
5991 	 */
5992 	if (sc->sc_type < WM_T_82543) {
5993 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
5994 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
5995 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
5996 		    sizeof(wiseman_rxdesc_t) * WM_NRXDESC);
5997 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
5998 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
5999 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
6000 
6001 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
6002 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
6003 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
6004 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
6005 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
6006 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
6007 	} else {
6008 		int qid = wmq->wmq_id;
6009 
6010 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
6011 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
6012 		CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_desc_size);
6013 
6014 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
6015 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
6016 				panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
6017 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_LEGACY
6018 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
6019 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
6020 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
6021 			    | RXDCTL_WTHRESH(1));
6022 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
6023 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
6024 		} else {
6025 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
6026 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
6027 			/* ITR / 4 */
6028 			CSR_WRITE(sc, WMREG_RDTR, (sc->sc_itr / 4) | RDTR_FPD);
6029 			/* MUST be same */
6030 			CSR_WRITE(sc, WMREG_RADV, sc->sc_itr / 4);
6031 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
6032 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
6033 		}
6034 	}
6035 }
6036 
6037 static int
6038 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
6039 {
6040 	struct wm_rxsoft *rxs;
6041 	int error, i;
6042 
6043 	KASSERT(mutex_owned(rxq->rxq_lock));
6044 
6045 	for (i = 0; i < WM_NRXDESC; i++) {
6046 		rxs = &rxq->rxq_soft[i];
6047 		if (rxs->rxs_mbuf == NULL) {
6048 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
6049 				log(LOG_ERR, "%s: unable to allocate or map "
6050 				    "rx buffer %d, error = %d\n",
6051 				    device_xname(sc->sc_dev), i, error);
6052 				/*
6053 				 * XXX Should attempt to run with fewer receive
6054 				 * XXX buffers instead of just failing.
6055 				 */
6056 				wm_rxdrain(rxq);
6057 				return ENOMEM;
6058 			}
6059 		} else {
6060 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
6061 				wm_init_rxdesc(rxq, i);
6062 			/*
6063 			 * For 82575 and newer device, the RX descriptors
6064 			 * must be initialized after the setting of RCTL.EN in
6065 			 * wm_set_filter()
6066 			 */
6067 		}
6068 	}
6069 	rxq->rxq_ptr = 0;
6070 	rxq->rxq_discard = 0;
6071 	WM_RXCHAIN_RESET(rxq);
6072 
6073 	return 0;
6074 }
6075 
6076 static int
6077 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
6078     struct wm_rxqueue *rxq)
6079 {
6080 
6081 	KASSERT(mutex_owned(rxq->rxq_lock));
6082 
6083 	/*
6084 	 * Set up some register offsets that are different between
6085 	 * the i82542 and the i82543 and later chips.
6086 	 */
6087 	if (sc->sc_type < WM_T_82543)
6088 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
6089 	else
6090 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
6091 
6092 	wm_init_rx_regs(sc, wmq, rxq);
6093 	return wm_init_rx_buffer(sc, rxq);
6094 }
6095 
6096 /*
6097  * wm_init_quques:
6098  *	Initialize {tx,rx}descs and {tx,rx} buffers
6099  */
6100 static int
6101 wm_init_txrx_queues(struct wm_softc *sc)
6102 {
6103 	int i, error = 0;
6104 
6105 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
6106 		device_xname(sc->sc_dev), __func__));
6107 
6108 	for (i = 0; i < sc->sc_nqueues; i++) {
6109 		struct wm_queue *wmq = &sc->sc_queue[i];
6110 		struct wm_txqueue *txq = &wmq->wmq_txq;
6111 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
6112 
6113 		mutex_enter(txq->txq_lock);
6114 		wm_init_tx_queue(sc, wmq, txq);
6115 		mutex_exit(txq->txq_lock);
6116 
6117 		mutex_enter(rxq->rxq_lock);
6118 		error = wm_init_rx_queue(sc, wmq, rxq);
6119 		mutex_exit(rxq->rxq_lock);
6120 		if (error)
6121 			break;
6122 	}
6123 
6124 	return error;
6125 }
6126 
6127 /*
6128  * wm_tx_offload:
6129  *
6130  *	Set up TCP/IP checksumming parameters for the
6131  *	specified packet.
6132  */
6133 static int
6134 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
6135     uint8_t *fieldsp)
6136 {
6137 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
6138 	struct mbuf *m0 = txs->txs_mbuf;
6139 	struct livengood_tcpip_ctxdesc *t;
6140 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
6141 	uint32_t ipcse;
6142 	struct ether_header *eh;
6143 	int offset, iphl;
6144 	uint8_t fields;
6145 
6146 	/*
6147 	 * XXX It would be nice if the mbuf pkthdr had offset
6148 	 * fields for the protocol headers.
6149 	 */
6150 
6151 	eh = mtod(m0, struct ether_header *);
6152 	switch (htons(eh->ether_type)) {
6153 	case ETHERTYPE_IP:
6154 	case ETHERTYPE_IPV6:
6155 		offset = ETHER_HDR_LEN;
6156 		break;
6157 
6158 	case ETHERTYPE_VLAN:
6159 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
6160 		break;
6161 
6162 	default:
6163 		/*
6164 		 * Don't support this protocol or encapsulation.
6165 		 */
6166 		*fieldsp = 0;
6167 		*cmdp = 0;
6168 		return 0;
6169 	}
6170 
6171 	if ((m0->m_pkthdr.csum_flags &
6172 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4)) != 0) {
6173 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
6174 	} else {
6175 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
6176 	}
6177 	ipcse = offset + iphl - 1;
6178 
6179 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
6180 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
6181 	seg = 0;
6182 	fields = 0;
6183 
6184 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
6185 		int hlen = offset + iphl;
6186 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
6187 
6188 		if (__predict_false(m0->m_len <
6189 				    (hlen + sizeof(struct tcphdr)))) {
6190 			/*
6191 			 * TCP/IP headers are not in the first mbuf; we need
6192 			 * to do this the slow and painful way.  Let's just
6193 			 * hope this doesn't happen very often.
6194 			 */
6195 			struct tcphdr th;
6196 
6197 			WM_Q_EVCNT_INCR(txq, txtsopain);
6198 
6199 			m_copydata(m0, hlen, sizeof(th), &th);
6200 			if (v4) {
6201 				struct ip ip;
6202 
6203 				m_copydata(m0, offset, sizeof(ip), &ip);
6204 				ip.ip_len = 0;
6205 				m_copyback(m0,
6206 				    offset + offsetof(struct ip, ip_len),
6207 				    sizeof(ip.ip_len), &ip.ip_len);
6208 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
6209 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
6210 			} else {
6211 				struct ip6_hdr ip6;
6212 
6213 				m_copydata(m0, offset, sizeof(ip6), &ip6);
6214 				ip6.ip6_plen = 0;
6215 				m_copyback(m0,
6216 				    offset + offsetof(struct ip6_hdr, ip6_plen),
6217 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
6218 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
6219 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
6220 			}
6221 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
6222 			    sizeof(th.th_sum), &th.th_sum);
6223 
6224 			hlen += th.th_off << 2;
6225 		} else {
6226 			/*
6227 			 * TCP/IP headers are in the first mbuf; we can do
6228 			 * this the easy way.
6229 			 */
6230 			struct tcphdr *th;
6231 
6232 			if (v4) {
6233 				struct ip *ip =
6234 				    (void *)(mtod(m0, char *) + offset);
6235 				th = (void *)(mtod(m0, char *) + hlen);
6236 
6237 				ip->ip_len = 0;
6238 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
6239 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
6240 			} else {
6241 				struct ip6_hdr *ip6 =
6242 				    (void *)(mtod(m0, char *) + offset);
6243 				th = (void *)(mtod(m0, char *) + hlen);
6244 
6245 				ip6->ip6_plen = 0;
6246 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
6247 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
6248 			}
6249 			hlen += th->th_off << 2;
6250 		}
6251 
6252 		if (v4) {
6253 			WM_Q_EVCNT_INCR(txq, txtso);
6254 			cmdlen |= WTX_TCPIP_CMD_IP;
6255 		} else {
6256 			WM_Q_EVCNT_INCR(txq, txtso6);
6257 			ipcse = 0;
6258 		}
6259 		cmd |= WTX_TCPIP_CMD_TSE;
6260 		cmdlen |= WTX_TCPIP_CMD_TSE |
6261 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
6262 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
6263 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
6264 	}
6265 
6266 	/*
6267 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
6268 	 * offload feature, if we load the context descriptor, we
6269 	 * MUST provide valid values for IPCSS and TUCSS fields.
6270 	 */
6271 
6272 	ipcs = WTX_TCPIP_IPCSS(offset) |
6273 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
6274 	    WTX_TCPIP_IPCSE(ipcse);
6275 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
6276 		WM_Q_EVCNT_INCR(txq, txipsum);
6277 		fields |= WTX_IXSM;
6278 	}
6279 
6280 	offset += iphl;
6281 
6282 	if (m0->m_pkthdr.csum_flags &
6283 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
6284 		WM_Q_EVCNT_INCR(txq, txtusum);
6285 		fields |= WTX_TXSM;
6286 		tucs = WTX_TCPIP_TUCSS(offset) |
6287 		    WTX_TCPIP_TUCSO(offset +
6288 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
6289 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
6290 	} else if ((m0->m_pkthdr.csum_flags &
6291 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
6292 		WM_Q_EVCNT_INCR(txq, txtusum6);
6293 		fields |= WTX_TXSM;
6294 		tucs = WTX_TCPIP_TUCSS(offset) |
6295 		    WTX_TCPIP_TUCSO(offset +
6296 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
6297 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
6298 	} else {
6299 		/* Just initialize it to a valid TCP context. */
6300 		tucs = WTX_TCPIP_TUCSS(offset) |
6301 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
6302 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
6303 	}
6304 
6305 	/* Fill in the context descriptor. */
6306 	t = (struct livengood_tcpip_ctxdesc *)
6307 	    &txq->txq_descs[txq->txq_next];
6308 	t->tcpip_ipcs = htole32(ipcs);
6309 	t->tcpip_tucs = htole32(tucs);
6310 	t->tcpip_cmdlen = htole32(cmdlen);
6311 	t->tcpip_seg = htole32(seg);
6312 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
6313 
6314 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
6315 	txs->txs_ndesc++;
6316 
6317 	*cmdp = cmd;
6318 	*fieldsp = fields;
6319 
6320 	return 0;
6321 }
6322 
6323 static inline int
6324 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
6325 {
6326 	struct wm_softc *sc = ifp->if_softc;
6327 	u_int cpuid = cpu_index(curcpu());
6328 
6329 	/*
6330 	 * Currently, simple distribute strategy.
6331 	 * TODO:
6332 	 * distribute by flowid(RSS has value).
6333 	 */
6334 	return (cpuid + sc->sc_affinity_offset) % sc->sc_nqueues;
6335 }
6336 
6337 /*
6338  * wm_start:		[ifnet interface function]
6339  *
6340  *	Start packet transmission on the interface.
6341  */
6342 static void
6343 wm_start(struct ifnet *ifp)
6344 {
6345 	struct wm_softc *sc = ifp->if_softc;
6346 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
6347 
6348 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
6349 
6350 	/*
6351 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
6352 	 */
6353 
6354 	mutex_enter(txq->txq_lock);
6355 	if (!txq->txq_stopping)
6356 		wm_start_locked(ifp);
6357 	mutex_exit(txq->txq_lock);
6358 }
6359 
6360 static void
6361 wm_start_locked(struct ifnet *ifp)
6362 {
6363 	struct wm_softc *sc = ifp->if_softc;
6364 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
6365 
6366 	wm_send_common_locked(ifp, txq, false);
6367 }
6368 
6369 static int
6370 wm_transmit(struct ifnet *ifp, struct mbuf *m)
6371 {
6372 	int qid;
6373 	struct wm_softc *sc = ifp->if_softc;
6374 	struct wm_txqueue *txq;
6375 
6376 	qid = wm_select_txqueue(ifp, m);
6377 	txq = &sc->sc_queue[qid].wmq_txq;
6378 
6379 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
6380 		m_freem(m);
6381 		WM_Q_EVCNT_INCR(txq, txdrop);
6382 		return ENOBUFS;
6383 	}
6384 
6385 	/*
6386 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
6387 	 */
6388 	ifp->if_obytes += m->m_pkthdr.len;
6389 	if (m->m_flags & M_MCAST)
6390 		ifp->if_omcasts++;
6391 
6392 	if (mutex_tryenter(txq->txq_lock)) {
6393 		if (!txq->txq_stopping)
6394 			wm_transmit_locked(ifp, txq);
6395 		mutex_exit(txq->txq_lock);
6396 	}
6397 
6398 	return 0;
6399 }
6400 
6401 static void
6402 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
6403 {
6404 
6405 	wm_send_common_locked(ifp, txq, true);
6406 }
6407 
6408 static void
6409 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
6410     bool is_transmit)
6411 {
6412 	struct wm_softc *sc = ifp->if_softc;
6413 	struct mbuf *m0;
6414 	struct m_tag *mtag;
6415 	struct wm_txsoft *txs;
6416 	bus_dmamap_t dmamap;
6417 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
6418 	bus_addr_t curaddr;
6419 	bus_size_t seglen, curlen;
6420 	uint32_t cksumcmd;
6421 	uint8_t cksumfields;
6422 
6423 	KASSERT(mutex_owned(txq->txq_lock));
6424 
6425 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
6426 		return;
6427 
6428 	/* Remember the previous number of free descriptors. */
6429 	ofree = txq->txq_free;
6430 
6431 	/*
6432 	 * Loop through the send queue, setting up transmit descriptors
6433 	 * until we drain the queue, or use up all available transmit
6434 	 * descriptors.
6435 	 */
6436 	for (;;) {
6437 		m0 = NULL;
6438 
6439 		/* Get a work queue entry. */
6440 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
6441 			wm_txeof(sc, txq);
6442 			if (txq->txq_sfree == 0) {
6443 				DPRINTF(WM_DEBUG_TX,
6444 				    ("%s: TX: no free job descriptors\n",
6445 					device_xname(sc->sc_dev)));
6446 				WM_Q_EVCNT_INCR(txq, txsstall);
6447 				break;
6448 			}
6449 		}
6450 
6451 		/* Grab a packet off the queue. */
6452 		if (is_transmit)
6453 			m0 = pcq_get(txq->txq_interq);
6454 		else
6455 			IFQ_DEQUEUE(&ifp->if_snd, m0);
6456 		if (m0 == NULL)
6457 			break;
6458 
6459 		DPRINTF(WM_DEBUG_TX,
6460 		    ("%s: TX: have packet to transmit: %p\n",
6461 		    device_xname(sc->sc_dev), m0));
6462 
6463 		txs = &txq->txq_soft[txq->txq_snext];
6464 		dmamap = txs->txs_dmamap;
6465 
6466 		use_tso = (m0->m_pkthdr.csum_flags &
6467 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
6468 
6469 		/*
6470 		 * So says the Linux driver:
6471 		 * The controller does a simple calculation to make sure
6472 		 * there is enough room in the FIFO before initiating the
6473 		 * DMA for each buffer.  The calc is:
6474 		 *	4 = ceil(buffer len / MSS)
6475 		 * To make sure we don't overrun the FIFO, adjust the max
6476 		 * buffer len if the MSS drops.
6477 		 */
6478 		dmamap->dm_maxsegsz =
6479 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
6480 		    ? m0->m_pkthdr.segsz << 2
6481 		    : WTX_MAX_LEN;
6482 
6483 		/*
6484 		 * Load the DMA map.  If this fails, the packet either
6485 		 * didn't fit in the allotted number of segments, or we
6486 		 * were short on resources.  For the too-many-segments
6487 		 * case, we simply report an error and drop the packet,
6488 		 * since we can't sanely copy a jumbo packet to a single
6489 		 * buffer.
6490 		 */
6491 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
6492 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
6493 		if (error) {
6494 			if (error == EFBIG) {
6495 				WM_Q_EVCNT_INCR(txq, txdrop);
6496 				log(LOG_ERR, "%s: Tx packet consumes too many "
6497 				    "DMA segments, dropping...\n",
6498 				    device_xname(sc->sc_dev));
6499 				wm_dump_mbuf_chain(sc, m0);
6500 				m_freem(m0);
6501 				continue;
6502 			}
6503 			/*  Short on resources, just stop for now. */
6504 			DPRINTF(WM_DEBUG_TX,
6505 			    ("%s: TX: dmamap load failed: %d\n",
6506 			    device_xname(sc->sc_dev), error));
6507 			break;
6508 		}
6509 
6510 		segs_needed = dmamap->dm_nsegs;
6511 		if (use_tso) {
6512 			/* For sentinel descriptor; see below. */
6513 			segs_needed++;
6514 		}
6515 
6516 		/*
6517 		 * Ensure we have enough descriptors free to describe
6518 		 * the packet.  Note, we always reserve one descriptor
6519 		 * at the end of the ring due to the semantics of the
6520 		 * TDT register, plus one more in the event we need
6521 		 * to load offload context.
6522 		 */
6523 		if (segs_needed > txq->txq_free - 2) {
6524 			/*
6525 			 * Not enough free descriptors to transmit this
6526 			 * packet.  We haven't committed anything yet,
6527 			 * so just unload the DMA map, put the packet
6528 			 * pack on the queue, and punt.  Notify the upper
6529 			 * layer that there are no more slots left.
6530 			 */
6531 			DPRINTF(WM_DEBUG_TX,
6532 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
6533 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
6534 			    segs_needed, txq->txq_free - 1));
6535 			ifp->if_flags |= IFF_OACTIVE;
6536 			bus_dmamap_unload(sc->sc_dmat, dmamap);
6537 			WM_Q_EVCNT_INCR(txq, txdstall);
6538 			break;
6539 		}
6540 
6541 		/*
6542 		 * Check for 82547 Tx FIFO bug.  We need to do this
6543 		 * once we know we can transmit the packet, since we
6544 		 * do some internal FIFO space accounting here.
6545 		 */
6546 		if (sc->sc_type == WM_T_82547 &&
6547 		    wm_82547_txfifo_bugchk(sc, m0)) {
6548 			DPRINTF(WM_DEBUG_TX,
6549 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
6550 			    device_xname(sc->sc_dev)));
6551 			ifp->if_flags |= IFF_OACTIVE;
6552 			bus_dmamap_unload(sc->sc_dmat, dmamap);
6553 			WM_Q_EVCNT_INCR(txq, txfifo_stall);
6554 			break;
6555 		}
6556 
6557 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
6558 
6559 		DPRINTF(WM_DEBUG_TX,
6560 		    ("%s: TX: packet has %d (%d) DMA segments\n",
6561 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
6562 
6563 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
6564 
6565 		/*
6566 		 * Store a pointer to the packet so that we can free it
6567 		 * later.
6568 		 *
6569 		 * Initially, we consider the number of descriptors the
6570 		 * packet uses the number of DMA segments.  This may be
6571 		 * incremented by 1 if we do checksum offload (a descriptor
6572 		 * is used to set the checksum context).
6573 		 */
6574 		txs->txs_mbuf = m0;
6575 		txs->txs_firstdesc = txq->txq_next;
6576 		txs->txs_ndesc = segs_needed;
6577 
6578 		/* Set up offload parameters for this packet. */
6579 		if (m0->m_pkthdr.csum_flags &
6580 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
6581 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
6582 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
6583 			if (wm_tx_offload(sc, txs, &cksumcmd,
6584 					  &cksumfields) != 0) {
6585 				/* Error message already displayed. */
6586 				bus_dmamap_unload(sc->sc_dmat, dmamap);
6587 				continue;
6588 			}
6589 		} else {
6590 			cksumcmd = 0;
6591 			cksumfields = 0;
6592 		}
6593 
6594 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
6595 
6596 		/* Sync the DMA map. */
6597 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
6598 		    BUS_DMASYNC_PREWRITE);
6599 
6600 		/* Initialize the transmit descriptor. */
6601 		for (nexttx = txq->txq_next, seg = 0;
6602 		     seg < dmamap->dm_nsegs; seg++) {
6603 			for (seglen = dmamap->dm_segs[seg].ds_len,
6604 			     curaddr = dmamap->dm_segs[seg].ds_addr;
6605 			     seglen != 0;
6606 			     curaddr += curlen, seglen -= curlen,
6607 			     nexttx = WM_NEXTTX(txq, nexttx)) {
6608 				curlen = seglen;
6609 
6610 				/*
6611 				 * So says the Linux driver:
6612 				 * Work around for premature descriptor
6613 				 * write-backs in TSO mode.  Append a
6614 				 * 4-byte sentinel descriptor.
6615 				 */
6616 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
6617 				    curlen > 8)
6618 					curlen -= 4;
6619 
6620 				wm_set_dma_addr(
6621 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
6622 				txq->txq_descs[nexttx].wtx_cmdlen
6623 				    = htole32(cksumcmd | curlen);
6624 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
6625 				    = 0;
6626 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
6627 				    = cksumfields;
6628 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
6629 				lasttx = nexttx;
6630 
6631 				DPRINTF(WM_DEBUG_TX,
6632 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
6633 				     "len %#04zx\n",
6634 				    device_xname(sc->sc_dev), nexttx,
6635 				    (uint64_t)curaddr, curlen));
6636 			}
6637 		}
6638 
6639 		KASSERT(lasttx != -1);
6640 
6641 		/*
6642 		 * Set up the command byte on the last descriptor of
6643 		 * the packet.  If we're in the interrupt delay window,
6644 		 * delay the interrupt.
6645 		 */
6646 		txq->txq_descs[lasttx].wtx_cmdlen |=
6647 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
6648 
6649 		/*
6650 		 * If VLANs are enabled and the packet has a VLAN tag, set
6651 		 * up the descriptor to encapsulate the packet for us.
6652 		 *
6653 		 * This is only valid on the last descriptor of the packet.
6654 		 */
6655 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
6656 			txq->txq_descs[lasttx].wtx_cmdlen |=
6657 			    htole32(WTX_CMD_VLE);
6658 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
6659 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
6660 		}
6661 
6662 		txs->txs_lastdesc = lasttx;
6663 
6664 		DPRINTF(WM_DEBUG_TX,
6665 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
6666 		    device_xname(sc->sc_dev),
6667 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
6668 
6669 		/* Sync the descriptors we're using. */
6670 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
6671 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
6672 
6673 		/* Give the packet to the chip. */
6674 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
6675 
6676 		DPRINTF(WM_DEBUG_TX,
6677 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
6678 
6679 		DPRINTF(WM_DEBUG_TX,
6680 		    ("%s: TX: finished transmitting packet, job %d\n",
6681 		    device_xname(sc->sc_dev), txq->txq_snext));
6682 
6683 		/* Advance the tx pointer. */
6684 		txq->txq_free -= txs->txs_ndesc;
6685 		txq->txq_next = nexttx;
6686 
6687 		txq->txq_sfree--;
6688 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
6689 
6690 		/* Pass the packet to any BPF listeners. */
6691 		bpf_mtap(ifp, m0);
6692 	}
6693 
6694 	if (m0 != NULL) {
6695 		ifp->if_flags |= IFF_OACTIVE;
6696 		WM_Q_EVCNT_INCR(txq, txdrop);
6697 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
6698 			__func__));
6699 		m_freem(m0);
6700 	}
6701 
6702 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
6703 		/* No more slots; notify upper layer. */
6704 		ifp->if_flags |= IFF_OACTIVE;
6705 	}
6706 
6707 	if (txq->txq_free != ofree) {
6708 		/* Set a watchdog timer in case the chip flakes out. */
6709 		ifp->if_timer = 5;
6710 	}
6711 }
6712 
6713 /*
6714  * wm_nq_tx_offload:
6715  *
6716  *	Set up TCP/IP checksumming parameters for the
6717  *	specified packet, for NEWQUEUE devices
6718  */
6719 static int
6720 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
6721     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
6722 {
6723 	struct mbuf *m0 = txs->txs_mbuf;
6724 	struct m_tag *mtag;
6725 	uint32_t vl_len, mssidx, cmdc;
6726 	struct ether_header *eh;
6727 	int offset, iphl;
6728 
6729 	/*
6730 	 * XXX It would be nice if the mbuf pkthdr had offset
6731 	 * fields for the protocol headers.
6732 	 */
6733 	*cmdlenp = 0;
6734 	*fieldsp = 0;
6735 
6736 	eh = mtod(m0, struct ether_header *);
6737 	switch (htons(eh->ether_type)) {
6738 	case ETHERTYPE_IP:
6739 	case ETHERTYPE_IPV6:
6740 		offset = ETHER_HDR_LEN;
6741 		break;
6742 
6743 	case ETHERTYPE_VLAN:
6744 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
6745 		break;
6746 
6747 	default:
6748 		/* Don't support this protocol or encapsulation. */
6749 		*do_csum = false;
6750 		return 0;
6751 	}
6752 	*do_csum = true;
6753 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
6754 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
6755 
6756 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
6757 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
6758 
6759 	if ((m0->m_pkthdr.csum_flags &
6760 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
6761 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
6762 	} else {
6763 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
6764 	}
6765 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
6766 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
6767 
6768 	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
6769 		vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
6770 		     << NQTXC_VLLEN_VLAN_SHIFT);
6771 		*cmdlenp |= NQTX_CMD_VLE;
6772 	}
6773 
6774 	mssidx = 0;
6775 
6776 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
6777 		int hlen = offset + iphl;
6778 		int tcp_hlen;
6779 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
6780 
6781 		if (__predict_false(m0->m_len <
6782 				    (hlen + sizeof(struct tcphdr)))) {
6783 			/*
6784 			 * TCP/IP headers are not in the first mbuf; we need
6785 			 * to do this the slow and painful way.  Let's just
6786 			 * hope this doesn't happen very often.
6787 			 */
6788 			struct tcphdr th;
6789 
6790 			WM_Q_EVCNT_INCR(txq, txtsopain);
6791 
6792 			m_copydata(m0, hlen, sizeof(th), &th);
6793 			if (v4) {
6794 				struct ip ip;
6795 
6796 				m_copydata(m0, offset, sizeof(ip), &ip);
6797 				ip.ip_len = 0;
6798 				m_copyback(m0,
6799 				    offset + offsetof(struct ip, ip_len),
6800 				    sizeof(ip.ip_len), &ip.ip_len);
6801 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
6802 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
6803 			} else {
6804 				struct ip6_hdr ip6;
6805 
6806 				m_copydata(m0, offset, sizeof(ip6), &ip6);
6807 				ip6.ip6_plen = 0;
6808 				m_copyback(m0,
6809 				    offset + offsetof(struct ip6_hdr, ip6_plen),
6810 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
6811 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
6812 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
6813 			}
6814 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
6815 			    sizeof(th.th_sum), &th.th_sum);
6816 
6817 			tcp_hlen = th.th_off << 2;
6818 		} else {
6819 			/*
6820 			 * TCP/IP headers are in the first mbuf; we can do
6821 			 * this the easy way.
6822 			 */
6823 			struct tcphdr *th;
6824 
6825 			if (v4) {
6826 				struct ip *ip =
6827 				    (void *)(mtod(m0, char *) + offset);
6828 				th = (void *)(mtod(m0, char *) + hlen);
6829 
6830 				ip->ip_len = 0;
6831 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
6832 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
6833 			} else {
6834 				struct ip6_hdr *ip6 =
6835 				    (void *)(mtod(m0, char *) + offset);
6836 				th = (void *)(mtod(m0, char *) + hlen);
6837 
6838 				ip6->ip6_plen = 0;
6839 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
6840 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
6841 			}
6842 			tcp_hlen = th->th_off << 2;
6843 		}
6844 		hlen += tcp_hlen;
6845 		*cmdlenp |= NQTX_CMD_TSE;
6846 
6847 		if (v4) {
6848 			WM_Q_EVCNT_INCR(txq, txtso);
6849 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
6850 		} else {
6851 			WM_Q_EVCNT_INCR(txq, txtso6);
6852 			*fieldsp |= NQTXD_FIELDS_TUXSM;
6853 		}
6854 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
6855 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
6856 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
6857 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
6858 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
6859 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
6860 	} else {
6861 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
6862 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
6863 	}
6864 
6865 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
6866 		*fieldsp |= NQTXD_FIELDS_IXSM;
6867 		cmdc |= NQTXC_CMD_IP4;
6868 	}
6869 
6870 	if (m0->m_pkthdr.csum_flags &
6871 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
6872 		WM_Q_EVCNT_INCR(txq, txtusum);
6873 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
6874 			cmdc |= NQTXC_CMD_TCP;
6875 		} else {
6876 			cmdc |= NQTXC_CMD_UDP;
6877 		}
6878 		cmdc |= NQTXC_CMD_IP4;
6879 		*fieldsp |= NQTXD_FIELDS_TUXSM;
6880 	}
6881 	if (m0->m_pkthdr.csum_flags &
6882 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
6883 		WM_Q_EVCNT_INCR(txq, txtusum6);
6884 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
6885 			cmdc |= NQTXC_CMD_TCP;
6886 		} else {
6887 			cmdc |= NQTXC_CMD_UDP;
6888 		}
6889 		cmdc |= NQTXC_CMD_IP6;
6890 		*fieldsp |= NQTXD_FIELDS_TUXSM;
6891 	}
6892 
6893 	/* Fill in the context descriptor. */
6894 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
6895 	    htole32(vl_len);
6896 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
6897 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
6898 	    htole32(cmdc);
6899 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
6900 	    htole32(mssidx);
6901 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
6902 	DPRINTF(WM_DEBUG_TX,
6903 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
6904 	    txq->txq_next, 0, vl_len));
6905 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
6906 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
6907 	txs->txs_ndesc++;
6908 	return 0;
6909 }
6910 
6911 /*
6912  * wm_nq_start:		[ifnet interface function]
6913  *
6914  *	Start packet transmission on the interface for NEWQUEUE devices
6915  */
6916 static void
6917 wm_nq_start(struct ifnet *ifp)
6918 {
6919 	struct wm_softc *sc = ifp->if_softc;
6920 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
6921 
6922 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
6923 
6924 	/*
6925 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
6926 	 */
6927 
6928 	mutex_enter(txq->txq_lock);
6929 	if (!txq->txq_stopping)
6930 		wm_nq_start_locked(ifp);
6931 	mutex_exit(txq->txq_lock);
6932 }
6933 
6934 static void
6935 wm_nq_start_locked(struct ifnet *ifp)
6936 {
6937 	struct wm_softc *sc = ifp->if_softc;
6938 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
6939 
6940 	wm_nq_send_common_locked(ifp, txq, false);
6941 }
6942 
6943 static int
6944 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
6945 {
6946 	int qid;
6947 	struct wm_softc *sc = ifp->if_softc;
6948 	struct wm_txqueue *txq;
6949 
6950 	qid = wm_select_txqueue(ifp, m);
6951 	txq = &sc->sc_queue[qid].wmq_txq;
6952 
6953 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
6954 		m_freem(m);
6955 		WM_Q_EVCNT_INCR(txq, txdrop);
6956 		return ENOBUFS;
6957 	}
6958 
6959 	/*
6960 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
6961 	 */
6962 	ifp->if_obytes += m->m_pkthdr.len;
6963 	if (m->m_flags & M_MCAST)
6964 		ifp->if_omcasts++;
6965 
6966 	if (mutex_tryenter(txq->txq_lock)) {
6967 		if (!txq->txq_stopping)
6968 			wm_nq_transmit_locked(ifp, txq);
6969 		mutex_exit(txq->txq_lock);
6970 	}
6971 
6972 	return 0;
6973 }
6974 
6975 static void
6976 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
6977 {
6978 
6979 	wm_nq_send_common_locked(ifp, txq, true);
6980 }
6981 
6982 static void
6983 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
6984     bool is_transmit)
6985 {
6986 	struct wm_softc *sc = ifp->if_softc;
6987 	struct mbuf *m0;
6988 	struct m_tag *mtag;
6989 	struct wm_txsoft *txs;
6990 	bus_dmamap_t dmamap;
6991 	int error, nexttx, lasttx = -1, seg, segs_needed;
6992 	bool do_csum, sent;
6993 
6994 	KASSERT(mutex_owned(txq->txq_lock));
6995 
6996 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
6997 		return;
6998 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
6999 		return;
7000 
7001 	sent = false;
7002 
7003 	/*
7004 	 * Loop through the send queue, setting up transmit descriptors
7005 	 * until we drain the queue, or use up all available transmit
7006 	 * descriptors.
7007 	 */
7008 	for (;;) {
7009 		m0 = NULL;
7010 
7011 		/* Get a work queue entry. */
7012 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
7013 			wm_txeof(sc, txq);
7014 			if (txq->txq_sfree == 0) {
7015 				DPRINTF(WM_DEBUG_TX,
7016 				    ("%s: TX: no free job descriptors\n",
7017 					device_xname(sc->sc_dev)));
7018 				WM_Q_EVCNT_INCR(txq, txsstall);
7019 				break;
7020 			}
7021 		}
7022 
7023 		/* Grab a packet off the queue. */
7024 		if (is_transmit)
7025 			m0 = pcq_get(txq->txq_interq);
7026 		else
7027 			IFQ_DEQUEUE(&ifp->if_snd, m0);
7028 		if (m0 == NULL)
7029 			break;
7030 
7031 		DPRINTF(WM_DEBUG_TX,
7032 		    ("%s: TX: have packet to transmit: %p\n",
7033 		    device_xname(sc->sc_dev), m0));
7034 
7035 		txs = &txq->txq_soft[txq->txq_snext];
7036 		dmamap = txs->txs_dmamap;
7037 
7038 		/*
7039 		 * Load the DMA map.  If this fails, the packet either
7040 		 * didn't fit in the allotted number of segments, or we
7041 		 * were short on resources.  For the too-many-segments
7042 		 * case, we simply report an error and drop the packet,
7043 		 * since we can't sanely copy a jumbo packet to a single
7044 		 * buffer.
7045 		 */
7046 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
7047 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
7048 		if (error) {
7049 			if (error == EFBIG) {
7050 				WM_Q_EVCNT_INCR(txq, txdrop);
7051 				log(LOG_ERR, "%s: Tx packet consumes too many "
7052 				    "DMA segments, dropping...\n",
7053 				    device_xname(sc->sc_dev));
7054 				wm_dump_mbuf_chain(sc, m0);
7055 				m_freem(m0);
7056 				continue;
7057 			}
7058 			/* Short on resources, just stop for now. */
7059 			DPRINTF(WM_DEBUG_TX,
7060 			    ("%s: TX: dmamap load failed: %d\n",
7061 			    device_xname(sc->sc_dev), error));
7062 			break;
7063 		}
7064 
7065 		segs_needed = dmamap->dm_nsegs;
7066 
7067 		/*
7068 		 * Ensure we have enough descriptors free to describe
7069 		 * the packet.  Note, we always reserve one descriptor
7070 		 * at the end of the ring due to the semantics of the
7071 		 * TDT register, plus one more in the event we need
7072 		 * to load offload context.
7073 		 */
7074 		if (segs_needed > txq->txq_free - 2) {
7075 			/*
7076 			 * Not enough free descriptors to transmit this
7077 			 * packet.  We haven't committed anything yet,
7078 			 * so just unload the DMA map, put the packet
7079 			 * pack on the queue, and punt.  Notify the upper
7080 			 * layer that there are no more slots left.
7081 			 */
7082 			DPRINTF(WM_DEBUG_TX,
7083 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
7084 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
7085 			    segs_needed, txq->txq_free - 1));
7086 			txq->txq_flags |= WM_TXQ_NO_SPACE;
7087 			bus_dmamap_unload(sc->sc_dmat, dmamap);
7088 			WM_Q_EVCNT_INCR(txq, txdstall);
7089 			break;
7090 		}
7091 
7092 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
7093 
7094 		DPRINTF(WM_DEBUG_TX,
7095 		    ("%s: TX: packet has %d (%d) DMA segments\n",
7096 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
7097 
7098 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
7099 
7100 		/*
7101 		 * Store a pointer to the packet so that we can free it
7102 		 * later.
7103 		 *
7104 		 * Initially, we consider the number of descriptors the
7105 		 * packet uses the number of DMA segments.  This may be
7106 		 * incremented by 1 if we do checksum offload (a descriptor
7107 		 * is used to set the checksum context).
7108 		 */
7109 		txs->txs_mbuf = m0;
7110 		txs->txs_firstdesc = txq->txq_next;
7111 		txs->txs_ndesc = segs_needed;
7112 
7113 		/* Set up offload parameters for this packet. */
7114 		uint32_t cmdlen, fields, dcmdlen;
7115 		if (m0->m_pkthdr.csum_flags &
7116 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
7117 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
7118 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
7119 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
7120 			    &do_csum) != 0) {
7121 				/* Error message already displayed. */
7122 				bus_dmamap_unload(sc->sc_dmat, dmamap);
7123 				continue;
7124 			}
7125 		} else {
7126 			do_csum = false;
7127 			cmdlen = 0;
7128 			fields = 0;
7129 		}
7130 
7131 		/* Sync the DMA map. */
7132 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
7133 		    BUS_DMASYNC_PREWRITE);
7134 
7135 		/* Initialize the first transmit descriptor. */
7136 		nexttx = txq->txq_next;
7137 		if (!do_csum) {
7138 			/* setup a legacy descriptor */
7139 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
7140 			    dmamap->dm_segs[0].ds_addr);
7141 			txq->txq_descs[nexttx].wtx_cmdlen =
7142 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
7143 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
7144 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
7145 			if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
7146 			    NULL) {
7147 				txq->txq_descs[nexttx].wtx_cmdlen |=
7148 				    htole32(WTX_CMD_VLE);
7149 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
7150 				    htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
7151 			} else {
7152 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
7153 			}
7154 			dcmdlen = 0;
7155 		} else {
7156 			/* setup an advanced data descriptor */
7157 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
7158 			    htole64(dmamap->dm_segs[0].ds_addr);
7159 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
7160 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
7161 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
7162 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
7163 			    htole32(fields);
7164 			DPRINTF(WM_DEBUG_TX,
7165 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
7166 			    device_xname(sc->sc_dev), nexttx,
7167 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
7168 			DPRINTF(WM_DEBUG_TX,
7169 			    ("\t 0x%08x%08x\n", fields,
7170 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
7171 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
7172 		}
7173 
7174 		lasttx = nexttx;
7175 		nexttx = WM_NEXTTX(txq, nexttx);
7176 		/*
7177 		 * fill in the next descriptors. legacy or adcanced format
7178 		 * is the same here
7179 		 */
7180 		for (seg = 1; seg < dmamap->dm_nsegs;
7181 		    seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
7182 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
7183 			    htole64(dmamap->dm_segs[seg].ds_addr);
7184 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
7185 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
7186 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
7187 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
7188 			lasttx = nexttx;
7189 
7190 			DPRINTF(WM_DEBUG_TX,
7191 			    ("%s: TX: desc %d: %#" PRIx64 ", "
7192 			     "len %#04zx\n",
7193 			    device_xname(sc->sc_dev), nexttx,
7194 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
7195 			    dmamap->dm_segs[seg].ds_len));
7196 		}
7197 
7198 		KASSERT(lasttx != -1);
7199 
7200 		/*
7201 		 * Set up the command byte on the last descriptor of
7202 		 * the packet.  If we're in the interrupt delay window,
7203 		 * delay the interrupt.
7204 		 */
7205 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
7206 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
7207 		txq->txq_descs[lasttx].wtx_cmdlen |=
7208 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
7209 
7210 		txs->txs_lastdesc = lasttx;
7211 
7212 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
7213 		    device_xname(sc->sc_dev),
7214 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
7215 
7216 		/* Sync the descriptors we're using. */
7217 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
7218 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
7219 
7220 		/* Give the packet to the chip. */
7221 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
7222 		sent = true;
7223 
7224 		DPRINTF(WM_DEBUG_TX,
7225 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
7226 
7227 		DPRINTF(WM_DEBUG_TX,
7228 		    ("%s: TX: finished transmitting packet, job %d\n",
7229 		    device_xname(sc->sc_dev), txq->txq_snext));
7230 
7231 		/* Advance the tx pointer. */
7232 		txq->txq_free -= txs->txs_ndesc;
7233 		txq->txq_next = nexttx;
7234 
7235 		txq->txq_sfree--;
7236 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
7237 
7238 		/* Pass the packet to any BPF listeners. */
7239 		bpf_mtap(ifp, m0);
7240 	}
7241 
7242 	if (m0 != NULL) {
7243 		txq->txq_flags |= WM_TXQ_NO_SPACE;
7244 		WM_Q_EVCNT_INCR(txq, txdrop);
7245 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
7246 			__func__));
7247 		m_freem(m0);
7248 	}
7249 
7250 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
7251 		/* No more slots; notify upper layer. */
7252 		txq->txq_flags |= WM_TXQ_NO_SPACE;
7253 	}
7254 
7255 	if (sent) {
7256 		/* Set a watchdog timer in case the chip flakes out. */
7257 		ifp->if_timer = 5;
7258 	}
7259 }
7260 
7261 static void
7262 wm_deferred_start(struct ifnet *ifp)
7263 {
7264 	struct wm_softc *sc = ifp->if_softc;
7265 	int qid = 0;
7266 
7267 	/*
7268 	 * Try to transmit on all Tx queues. Passing a txq somehow and
7269 	 * transmitting only on the txq may be better.
7270 	 */
7271 restart:
7272 	WM_CORE_LOCK(sc);
7273 	if (sc->sc_core_stopping)
7274 		goto out;
7275 
7276 	for (; qid < sc->sc_nqueues; qid++) {
7277 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
7278 
7279 		if (!mutex_tryenter(txq->txq_lock))
7280 			continue;
7281 
7282 		if (txq->txq_stopping) {
7283 			mutex_exit(txq->txq_lock);
7284 			continue;
7285 		}
7286 		WM_CORE_UNLOCK(sc);
7287 
7288 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
7289 			/* XXX need for ALTQ */
7290 			if (qid == 0)
7291 				wm_nq_start_locked(ifp);
7292 			wm_nq_transmit_locked(ifp, txq);
7293 		} else {
7294 			/* XXX need for ALTQ */
7295 			if (qid == 0)
7296 				wm_start_locked(ifp);
7297 			wm_transmit_locked(ifp, txq);
7298 		}
7299 		mutex_exit(txq->txq_lock);
7300 
7301 		qid++;
7302 		goto restart;
7303 	}
7304 out:
7305 	WM_CORE_UNLOCK(sc);
7306 }
7307 
7308 /* Interrupt */
7309 
7310 /*
7311  * wm_txeof:
7312  *
7313  *	Helper; handle transmit interrupts.
7314  */
7315 static int
7316 wm_txeof(struct wm_softc *sc, struct wm_txqueue *txq)
7317 {
7318 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7319 	struct wm_txsoft *txs;
7320 	bool processed = false;
7321 	int count = 0;
7322 	int i;
7323 	uint8_t status;
7324 
7325 	KASSERT(mutex_owned(txq->txq_lock));
7326 
7327 	if (txq->txq_stopping)
7328 		return 0;
7329 
7330 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
7331 		txq->txq_flags &= ~WM_TXQ_NO_SPACE;
7332 	else
7333 		ifp->if_flags &= ~IFF_OACTIVE;
7334 
7335 	/*
7336 	 * Go through the Tx list and free mbufs for those
7337 	 * frames which have been transmitted.
7338 	 */
7339 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
7340 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
7341 		txs = &txq->txq_soft[i];
7342 
7343 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
7344 			device_xname(sc->sc_dev), i));
7345 
7346 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
7347 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
7348 
7349 		status =
7350 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
7351 		if ((status & WTX_ST_DD) == 0) {
7352 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
7353 			    BUS_DMASYNC_PREREAD);
7354 			break;
7355 		}
7356 
7357 		processed = true;
7358 		count++;
7359 		DPRINTF(WM_DEBUG_TX,
7360 		    ("%s: TX: job %d done: descs %d..%d\n",
7361 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
7362 		    txs->txs_lastdesc));
7363 
7364 		/*
7365 		 * XXX We should probably be using the statistics
7366 		 * XXX registers, but I don't know if they exist
7367 		 * XXX on chips before the i82544.
7368 		 */
7369 
7370 #ifdef WM_EVENT_COUNTERS
7371 		if (status & WTX_ST_TU)
7372 			WM_Q_EVCNT_INCR(txq, tu);
7373 #endif /* WM_EVENT_COUNTERS */
7374 
7375 		if (status & (WTX_ST_EC | WTX_ST_LC)) {
7376 			ifp->if_oerrors++;
7377 			if (status & WTX_ST_LC)
7378 				log(LOG_WARNING, "%s: late collision\n",
7379 				    device_xname(sc->sc_dev));
7380 			else if (status & WTX_ST_EC) {
7381 				ifp->if_collisions += 16;
7382 				log(LOG_WARNING, "%s: excessive collisions\n",
7383 				    device_xname(sc->sc_dev));
7384 			}
7385 		} else
7386 			ifp->if_opackets++;
7387 
7388 		txq->txq_free += txs->txs_ndesc;
7389 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
7390 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
7391 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
7392 		m_freem(txs->txs_mbuf);
7393 		txs->txs_mbuf = NULL;
7394 	}
7395 
7396 	/* Update the dirty transmit buffer pointer. */
7397 	txq->txq_sdirty = i;
7398 	DPRINTF(WM_DEBUG_TX,
7399 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
7400 
7401 	if (count != 0)
7402 		rnd_add_uint32(&sc->rnd_source, count);
7403 
7404 	/*
7405 	 * If there are no more pending transmissions, cancel the watchdog
7406 	 * timer.
7407 	 */
7408 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
7409 		ifp->if_timer = 0;
7410 
7411 	return processed;
7412 }
7413 
7414 /*
7415  * wm_rxeof:
7416  *
7417  *	Helper; handle receive interrupts.
7418  */
7419 static void
7420 wm_rxeof(struct wm_rxqueue *rxq)
7421 {
7422 	struct wm_softc *sc = rxq->rxq_sc;
7423 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7424 	struct wm_rxsoft *rxs;
7425 	struct mbuf *m;
7426 	int i, len;
7427 	int count = 0;
7428 	uint8_t status, errors;
7429 	uint16_t vlantag;
7430 
7431 	KASSERT(mutex_owned(rxq->rxq_lock));
7432 
7433 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
7434 		rxs = &rxq->rxq_soft[i];
7435 
7436 		DPRINTF(WM_DEBUG_RX,
7437 		    ("%s: RX: checking descriptor %d\n",
7438 		    device_xname(sc->sc_dev), i));
7439 
7440 		wm_cdrxsync(rxq, i,BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
7441 
7442 		status = rxq->rxq_descs[i].wrx_status;
7443 		errors = rxq->rxq_descs[i].wrx_errors;
7444 		len = le16toh(rxq->rxq_descs[i].wrx_len);
7445 		vlantag = rxq->rxq_descs[i].wrx_special;
7446 
7447 		if ((status & WRX_ST_DD) == 0) {
7448 			/* We have processed all of the receive descriptors. */
7449 			wm_cdrxsync(rxq, i, BUS_DMASYNC_PREREAD);
7450 			break;
7451 		}
7452 
7453 		count++;
7454 		if (__predict_false(rxq->rxq_discard)) {
7455 			DPRINTF(WM_DEBUG_RX,
7456 			    ("%s: RX: discarding contents of descriptor %d\n",
7457 			    device_xname(sc->sc_dev), i));
7458 			wm_init_rxdesc(rxq, i);
7459 			if (status & WRX_ST_EOP) {
7460 				/* Reset our state. */
7461 				DPRINTF(WM_DEBUG_RX,
7462 				    ("%s: RX: resetting rxdiscard -> 0\n",
7463 				    device_xname(sc->sc_dev)));
7464 				rxq->rxq_discard = 0;
7465 			}
7466 			continue;
7467 		}
7468 
7469 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
7470 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
7471 
7472 		m = rxs->rxs_mbuf;
7473 
7474 		/*
7475 		 * Add a new receive buffer to the ring, unless of
7476 		 * course the length is zero. Treat the latter as a
7477 		 * failed mapping.
7478 		 */
7479 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
7480 			/*
7481 			 * Failed, throw away what we've done so
7482 			 * far, and discard the rest of the packet.
7483 			 */
7484 			ifp->if_ierrors++;
7485 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
7486 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
7487 			wm_init_rxdesc(rxq, i);
7488 			if ((status & WRX_ST_EOP) == 0)
7489 				rxq->rxq_discard = 1;
7490 			if (rxq->rxq_head != NULL)
7491 				m_freem(rxq->rxq_head);
7492 			WM_RXCHAIN_RESET(rxq);
7493 			DPRINTF(WM_DEBUG_RX,
7494 			    ("%s: RX: Rx buffer allocation failed, "
7495 			    "dropping packet%s\n", device_xname(sc->sc_dev),
7496 			    rxq->rxq_discard ? " (discard)" : ""));
7497 			continue;
7498 		}
7499 
7500 		m->m_len = len;
7501 		rxq->rxq_len += len;
7502 		DPRINTF(WM_DEBUG_RX,
7503 		    ("%s: RX: buffer at %p len %d\n",
7504 		    device_xname(sc->sc_dev), m->m_data, len));
7505 
7506 		/* If this is not the end of the packet, keep looking. */
7507 		if ((status & WRX_ST_EOP) == 0) {
7508 			WM_RXCHAIN_LINK(rxq, m);
7509 			DPRINTF(WM_DEBUG_RX,
7510 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
7511 			    device_xname(sc->sc_dev), rxq->rxq_len));
7512 			continue;
7513 		}
7514 
7515 		/*
7516 		 * Okay, we have the entire packet now.  The chip is
7517 		 * configured to include the FCS except I350 and I21[01]
7518 		 * (not all chips can be configured to strip it),
7519 		 * so we need to trim it.
7520 		 * May need to adjust length of previous mbuf in the
7521 		 * chain if the current mbuf is too short.
7522 		 * For an eratta, the RCTL_SECRC bit in RCTL register
7523 		 * is always set in I350, so we don't trim it.
7524 		 */
7525 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
7526 		    && (sc->sc_type != WM_T_I210)
7527 		    && (sc->sc_type != WM_T_I211)) {
7528 			if (m->m_len < ETHER_CRC_LEN) {
7529 				rxq->rxq_tail->m_len
7530 				    -= (ETHER_CRC_LEN - m->m_len);
7531 				m->m_len = 0;
7532 			} else
7533 				m->m_len -= ETHER_CRC_LEN;
7534 			len = rxq->rxq_len - ETHER_CRC_LEN;
7535 		} else
7536 			len = rxq->rxq_len;
7537 
7538 		WM_RXCHAIN_LINK(rxq, m);
7539 
7540 		*rxq->rxq_tailp = NULL;
7541 		m = rxq->rxq_head;
7542 
7543 		WM_RXCHAIN_RESET(rxq);
7544 
7545 		DPRINTF(WM_DEBUG_RX,
7546 		    ("%s: RX: have entire packet, len -> %d\n",
7547 		    device_xname(sc->sc_dev), len));
7548 
7549 		/* If an error occurred, update stats and drop the packet. */
7550 		if (errors &
7551 		     (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
7552 			if (errors & WRX_ER_SE)
7553 				log(LOG_WARNING, "%s: symbol error\n",
7554 				    device_xname(sc->sc_dev));
7555 			else if (errors & WRX_ER_SEQ)
7556 				log(LOG_WARNING, "%s: receive sequence error\n",
7557 				    device_xname(sc->sc_dev));
7558 			else if (errors & WRX_ER_CE)
7559 				log(LOG_WARNING, "%s: CRC error\n",
7560 				    device_xname(sc->sc_dev));
7561 			m_freem(m);
7562 			continue;
7563 		}
7564 
7565 		/* No errors.  Receive the packet. */
7566 		m_set_rcvif(m, ifp);
7567 		m->m_pkthdr.len = len;
7568 
7569 		/*
7570 		 * If VLANs are enabled, VLAN packets have been unwrapped
7571 		 * for us.  Associate the tag with the packet.
7572 		 */
7573 		/* XXXX should check for i350 and i354 */
7574 		if ((status & WRX_ST_VP) != 0) {
7575 			VLAN_INPUT_TAG(ifp, m, le16toh(vlantag), continue);
7576 		}
7577 
7578 		/* Set up checksum info for this packet. */
7579 		if ((status & WRX_ST_IXSM) == 0) {
7580 			if (status & WRX_ST_IPCS) {
7581 				WM_Q_EVCNT_INCR(rxq, rxipsum);
7582 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
7583 				if (errors & WRX_ER_IPE)
7584 					m->m_pkthdr.csum_flags |=
7585 					    M_CSUM_IPv4_BAD;
7586 			}
7587 			if (status & WRX_ST_TCPCS) {
7588 				/*
7589 				 * Note: we don't know if this was TCP or UDP,
7590 				 * so we just set both bits, and expect the
7591 				 * upper layers to deal.
7592 				 */
7593 				WM_Q_EVCNT_INCR(rxq, rxtusum);
7594 				m->m_pkthdr.csum_flags |=
7595 				    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
7596 				    M_CSUM_TCPv6 | M_CSUM_UDPv6;
7597 				if (errors & WRX_ER_TCPE)
7598 					m->m_pkthdr.csum_flags |=
7599 					    M_CSUM_TCP_UDP_BAD;
7600 			}
7601 		}
7602 
7603 		mutex_exit(rxq->rxq_lock);
7604 
7605 		/* Pass it on. */
7606 		if_percpuq_enqueue(sc->sc_ipq, m);
7607 
7608 		mutex_enter(rxq->rxq_lock);
7609 
7610 		if (rxq->rxq_stopping)
7611 			break;
7612 	}
7613 
7614 	/* Update the receive pointer. */
7615 	rxq->rxq_ptr = i;
7616 	if (count != 0)
7617 		rnd_add_uint32(&sc->rnd_source, count);
7618 
7619 	DPRINTF(WM_DEBUG_RX,
7620 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
7621 }
7622 
7623 /*
7624  * wm_linkintr_gmii:
7625  *
7626  *	Helper; handle link interrupts for GMII.
7627  */
7628 static void
7629 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
7630 {
7631 
7632 	KASSERT(WM_CORE_LOCKED(sc));
7633 
7634 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
7635 		__func__));
7636 
7637 	if (icr & ICR_LSC) {
7638 		uint32_t reg;
7639 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
7640 
7641 		if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0))
7642 			wm_gig_downshift_workaround_ich8lan(sc);
7643 
7644 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
7645 			device_xname(sc->sc_dev)));
7646 		mii_pollstat(&sc->sc_mii);
7647 		if (sc->sc_type == WM_T_82543) {
7648 			int miistatus, active;
7649 
7650 			/*
7651 			 * With 82543, we need to force speed and
7652 			 * duplex on the MAC equal to what the PHY
7653 			 * speed and duplex configuration is.
7654 			 */
7655 			miistatus = sc->sc_mii.mii_media_status;
7656 
7657 			if (miistatus & IFM_ACTIVE) {
7658 				active = sc->sc_mii.mii_media_active;
7659 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
7660 				switch (IFM_SUBTYPE(active)) {
7661 				case IFM_10_T:
7662 					sc->sc_ctrl |= CTRL_SPEED_10;
7663 					break;
7664 				case IFM_100_TX:
7665 					sc->sc_ctrl |= CTRL_SPEED_100;
7666 					break;
7667 				case IFM_1000_T:
7668 					sc->sc_ctrl |= CTRL_SPEED_1000;
7669 					break;
7670 				default:
7671 					/*
7672 					 * fiber?
7673 					 * Shoud not enter here.
7674 					 */
7675 					printf("unknown media (%x)\n", active);
7676 					break;
7677 				}
7678 				if (active & IFM_FDX)
7679 					sc->sc_ctrl |= CTRL_FD;
7680 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7681 			}
7682 		} else if ((sc->sc_type == WM_T_ICH8)
7683 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
7684 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
7685 		} else if (sc->sc_type == WM_T_PCH) {
7686 			wm_k1_gig_workaround_hv(sc,
7687 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
7688 		}
7689 
7690 		if ((sc->sc_phytype == WMPHY_82578)
7691 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
7692 			== IFM_1000_T)) {
7693 
7694 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
7695 				delay(200*1000); /* XXX too big */
7696 
7697 				/* Link stall fix for link up */
7698 				wm_gmii_hv_writereg(sc->sc_dev, 1,
7699 				    HV_MUX_DATA_CTRL,
7700 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
7701 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
7702 				wm_gmii_hv_writereg(sc->sc_dev, 1,
7703 				    HV_MUX_DATA_CTRL,
7704 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
7705 			}
7706 		}
7707 		/*
7708 		 * I217 Packet Loss issue:
7709 		 * ensure that FEXTNVM4 Beacon Duration is set correctly
7710 		 * on power up.
7711 		 * Set the Beacon Duration for I217 to 8 usec
7712 		 */
7713 		if ((sc->sc_type == WM_T_PCH_LPT)
7714 		    || (sc->sc_type == WM_T_PCH_SPT)) {
7715 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
7716 			reg &= ~FEXTNVM4_BEACON_DURATION;
7717 			reg |= FEXTNVM4_BEACON_DURATION_8US;
7718 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
7719 		}
7720 
7721 		/* XXX Work-around I218 hang issue */
7722 		/* e1000_k1_workaround_lpt_lp() */
7723 
7724 		if ((sc->sc_type == WM_T_PCH_LPT)
7725 		    || (sc->sc_type == WM_T_PCH_SPT)) {
7726 			/*
7727 			 * Set platform power management values for Latency
7728 			 * Tolerance Reporting (LTR)
7729 			 */
7730 			wm_platform_pm_pch_lpt(sc,
7731 				((sc->sc_mii.mii_media_status & IFM_ACTIVE)
7732 				    != 0));
7733 		}
7734 
7735 		/* FEXTNVM6 K1-off workaround */
7736 		if (sc->sc_type == WM_T_PCH_SPT) {
7737 			reg = CSR_READ(sc, WMREG_FEXTNVM6);
7738 			if (CSR_READ(sc, WMREG_PCIEANACFG)
7739 			    & FEXTNVM6_K1_OFF_ENABLE)
7740 				reg |= FEXTNVM6_K1_OFF_ENABLE;
7741 			else
7742 				reg &= ~FEXTNVM6_K1_OFF_ENABLE;
7743 			CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
7744 		}
7745 	} else if (icr & ICR_RXSEQ) {
7746 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
7747 			device_xname(sc->sc_dev)));
7748 	}
7749 }
7750 
7751 /*
7752  * wm_linkintr_tbi:
7753  *
7754  *	Helper; handle link interrupts for TBI mode.
7755  */
7756 static void
7757 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
7758 {
7759 	uint32_t status;
7760 
7761 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
7762 		__func__));
7763 
7764 	status = CSR_READ(sc, WMREG_STATUS);
7765 	if (icr & ICR_LSC) {
7766 		if (status & STATUS_LU) {
7767 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
7768 			    device_xname(sc->sc_dev),
7769 			    (status & STATUS_FD) ? "FDX" : "HDX"));
7770 			/*
7771 			 * NOTE: CTRL will update TFCE and RFCE automatically,
7772 			 * so we should update sc->sc_ctrl
7773 			 */
7774 
7775 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
7776 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
7777 			sc->sc_fcrtl &= ~FCRTL_XONE;
7778 			if (status & STATUS_FD)
7779 				sc->sc_tctl |=
7780 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
7781 			else
7782 				sc->sc_tctl |=
7783 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
7784 			if (sc->sc_ctrl & CTRL_TFCE)
7785 				sc->sc_fcrtl |= FCRTL_XONE;
7786 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
7787 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
7788 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
7789 				      sc->sc_fcrtl);
7790 			sc->sc_tbi_linkup = 1;
7791 		} else {
7792 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
7793 			    device_xname(sc->sc_dev)));
7794 			sc->sc_tbi_linkup = 0;
7795 		}
7796 		/* Update LED */
7797 		wm_tbi_serdes_set_linkled(sc);
7798 	} else if (icr & ICR_RXSEQ) {
7799 		DPRINTF(WM_DEBUG_LINK,
7800 		    ("%s: LINK: Receive sequence error\n",
7801 		    device_xname(sc->sc_dev)));
7802 	}
7803 }
7804 
7805 /*
7806  * wm_linkintr_serdes:
7807  *
7808  *	Helper; handle link interrupts for TBI mode.
7809  */
7810 static void
7811 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
7812 {
7813 	struct mii_data *mii = &sc->sc_mii;
7814 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7815 	uint32_t pcs_adv, pcs_lpab, reg;
7816 
7817 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
7818 		__func__));
7819 
7820 	if (icr & ICR_LSC) {
7821 		/* Check PCS */
7822 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
7823 		if ((reg & PCS_LSTS_LINKOK) != 0) {
7824 			mii->mii_media_status |= IFM_ACTIVE;
7825 			sc->sc_tbi_linkup = 1;
7826 		} else {
7827 			mii->mii_media_status |= IFM_NONE;
7828 			sc->sc_tbi_linkup = 0;
7829 			wm_tbi_serdes_set_linkled(sc);
7830 			return;
7831 		}
7832 		mii->mii_media_active |= IFM_1000_SX;
7833 		if ((reg & PCS_LSTS_FDX) != 0)
7834 			mii->mii_media_active |= IFM_FDX;
7835 		else
7836 			mii->mii_media_active |= IFM_HDX;
7837 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
7838 			/* Check flow */
7839 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
7840 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
7841 				DPRINTF(WM_DEBUG_LINK,
7842 				    ("XXX LINKOK but not ACOMP\n"));
7843 				return;
7844 			}
7845 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
7846 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
7847 			DPRINTF(WM_DEBUG_LINK,
7848 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
7849 			if ((pcs_adv & TXCW_SYM_PAUSE)
7850 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
7851 				mii->mii_media_active |= IFM_FLOW
7852 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
7853 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
7854 			    && (pcs_adv & TXCW_ASYM_PAUSE)
7855 			    && (pcs_lpab & TXCW_SYM_PAUSE)
7856 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
7857 				mii->mii_media_active |= IFM_FLOW
7858 				    | IFM_ETH_TXPAUSE;
7859 			else if ((pcs_adv & TXCW_SYM_PAUSE)
7860 			    && (pcs_adv & TXCW_ASYM_PAUSE)
7861 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
7862 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
7863 				mii->mii_media_active |= IFM_FLOW
7864 				    | IFM_ETH_RXPAUSE;
7865 		}
7866 		/* Update LED */
7867 		wm_tbi_serdes_set_linkled(sc);
7868 	} else {
7869 		DPRINTF(WM_DEBUG_LINK,
7870 		    ("%s: LINK: Receive sequence error\n",
7871 		    device_xname(sc->sc_dev)));
7872 	}
7873 }
7874 
7875 /*
7876  * wm_linkintr:
7877  *
7878  *	Helper; handle link interrupts.
7879  */
7880 static void
7881 wm_linkintr(struct wm_softc *sc, uint32_t icr)
7882 {
7883 
7884 	KASSERT(WM_CORE_LOCKED(sc));
7885 
7886 	if (sc->sc_flags & WM_F_HAS_MII)
7887 		wm_linkintr_gmii(sc, icr);
7888 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
7889 	    && (sc->sc_type >= WM_T_82575))
7890 		wm_linkintr_serdes(sc, icr);
7891 	else
7892 		wm_linkintr_tbi(sc, icr);
7893 }
7894 
7895 /*
7896  * wm_intr_legacy:
7897  *
7898  *	Interrupt service routine for INTx and MSI.
7899  */
7900 static int
7901 wm_intr_legacy(void *arg)
7902 {
7903 	struct wm_softc *sc = arg;
7904 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
7905 	struct wm_rxqueue *rxq = &sc->sc_queue[0].wmq_rxq;
7906 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7907 	uint32_t icr, rndval = 0;
7908 	int handled = 0;
7909 
7910 	DPRINTF(WM_DEBUG_TX,
7911 	    ("%s: INTx: got intr\n", device_xname(sc->sc_dev)));
7912 	while (1 /* CONSTCOND */) {
7913 		icr = CSR_READ(sc, WMREG_ICR);
7914 		if ((icr & sc->sc_icr) == 0)
7915 			break;
7916 		if (rndval == 0)
7917 			rndval = icr;
7918 
7919 		mutex_enter(rxq->rxq_lock);
7920 
7921 		if (rxq->rxq_stopping) {
7922 			mutex_exit(rxq->rxq_lock);
7923 			break;
7924 		}
7925 
7926 		handled = 1;
7927 
7928 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
7929 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
7930 			DPRINTF(WM_DEBUG_RX,
7931 			    ("%s: RX: got Rx intr 0x%08x\n",
7932 			    device_xname(sc->sc_dev),
7933 			    icr & (ICR_RXDMT0 | ICR_RXT0)));
7934 			WM_Q_EVCNT_INCR(rxq, rxintr);
7935 		}
7936 #endif
7937 		wm_rxeof(rxq);
7938 
7939 		mutex_exit(rxq->rxq_lock);
7940 		mutex_enter(txq->txq_lock);
7941 
7942 		if (txq->txq_stopping) {
7943 			mutex_exit(txq->txq_lock);
7944 			break;
7945 		}
7946 
7947 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
7948 		if (icr & ICR_TXDW) {
7949 			DPRINTF(WM_DEBUG_TX,
7950 			    ("%s: TX: got TXDW interrupt\n",
7951 			    device_xname(sc->sc_dev)));
7952 			WM_Q_EVCNT_INCR(txq, txdw);
7953 		}
7954 #endif
7955 		wm_txeof(sc, txq);
7956 
7957 		mutex_exit(txq->txq_lock);
7958 		WM_CORE_LOCK(sc);
7959 
7960 		if (sc->sc_core_stopping) {
7961 			WM_CORE_UNLOCK(sc);
7962 			break;
7963 		}
7964 
7965 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
7966 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
7967 			wm_linkintr(sc, icr);
7968 		}
7969 
7970 		WM_CORE_UNLOCK(sc);
7971 
7972 		if (icr & ICR_RXO) {
7973 #if defined(WM_DEBUG)
7974 			log(LOG_WARNING, "%s: Receive overrun\n",
7975 			    device_xname(sc->sc_dev));
7976 #endif /* defined(WM_DEBUG) */
7977 		}
7978 	}
7979 
7980 	rnd_add_uint32(&sc->rnd_source, rndval);
7981 
7982 	if (handled) {
7983 		/* Try to get more packets going. */
7984 		if_schedule_deferred_start(ifp);
7985 	}
7986 
7987 	return handled;
7988 }
7989 
7990 static int
7991 wm_txrxintr_msix(void *arg)
7992 {
7993 	struct wm_queue *wmq = arg;
7994 	struct wm_txqueue *txq = &wmq->wmq_txq;
7995 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
7996 	struct wm_softc *sc = txq->txq_sc;
7997 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7998 
7999 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
8000 
8001 	DPRINTF(WM_DEBUG_TX,
8002 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
8003 
8004 	if (sc->sc_type == WM_T_82574)
8005 		CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
8006 	else if (sc->sc_type == WM_T_82575)
8007 		CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
8008 	else
8009 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
8010 
8011 	mutex_enter(txq->txq_lock);
8012 
8013 	if (txq->txq_stopping) {
8014 		mutex_exit(txq->txq_lock);
8015 		return 0;
8016 	}
8017 
8018 	WM_Q_EVCNT_INCR(txq, txdw);
8019 	wm_txeof(sc, txq);
8020 
8021 	/* Try to get more packets going. */
8022 	if (pcq_peek(txq->txq_interq) != NULL)
8023 		if_schedule_deferred_start(ifp);
8024 	/*
8025 	 * There are still some upper layer processing which call
8026 	 * ifp->if_start(). e.g. ALTQ
8027 	 */
8028 	if (wmq->wmq_id == 0)
8029 		if_schedule_deferred_start(ifp);
8030 
8031 	mutex_exit(txq->txq_lock);
8032 
8033 	DPRINTF(WM_DEBUG_RX,
8034 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
8035 	mutex_enter(rxq->rxq_lock);
8036 
8037 	if (rxq->rxq_stopping) {
8038 		mutex_exit(rxq->rxq_lock);
8039 		return 0;
8040 	}
8041 
8042 	WM_Q_EVCNT_INCR(rxq, rxintr);
8043 	wm_rxeof(rxq);
8044 	mutex_exit(rxq->rxq_lock);
8045 
8046 	if (sc->sc_type == WM_T_82574)
8047 		CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
8048 	else if (sc->sc_type == WM_T_82575)
8049 		CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
8050 	else
8051 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
8052 
8053 	return 1;
8054 }
8055 
8056 /*
8057  * wm_linkintr_msix:
8058  *
8059  *	Interrupt service routine for link status change for MSI-X.
8060  */
8061 static int
8062 wm_linkintr_msix(void *arg)
8063 {
8064 	struct wm_softc *sc = arg;
8065 	uint32_t reg;
8066 
8067 	DPRINTF(WM_DEBUG_LINK,
8068 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
8069 
8070 	reg = CSR_READ(sc, WMREG_ICR);
8071 	WM_CORE_LOCK(sc);
8072 	if ((sc->sc_core_stopping) || ((reg & ICR_LSC) == 0))
8073 		goto out;
8074 
8075 	WM_EVCNT_INCR(&sc->sc_ev_linkintr);
8076 	wm_linkintr(sc, ICR_LSC);
8077 
8078 out:
8079 	WM_CORE_UNLOCK(sc);
8080 
8081 	if (sc->sc_type == WM_T_82574)
8082 		CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
8083 	else if (sc->sc_type == WM_T_82575)
8084 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
8085 	else
8086 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
8087 
8088 	return 1;
8089 }
8090 
8091 /*
8092  * Media related.
8093  * GMII, SGMII, TBI (and SERDES)
8094  */
8095 
8096 /* Common */
8097 
8098 /*
8099  * wm_tbi_serdes_set_linkled:
8100  *
8101  *	Update the link LED on TBI and SERDES devices.
8102  */
8103 static void
8104 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
8105 {
8106 
8107 	if (sc->sc_tbi_linkup)
8108 		sc->sc_ctrl |= CTRL_SWDPIN(0);
8109 	else
8110 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
8111 
8112 	/* 82540 or newer devices are active low */
8113 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
8114 
8115 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8116 }
8117 
8118 /* GMII related */
8119 
8120 /*
8121  * wm_gmii_reset:
8122  *
8123  *	Reset the PHY.
8124  */
8125 static void
8126 wm_gmii_reset(struct wm_softc *sc)
8127 {
8128 	uint32_t reg;
8129 	int rv;
8130 
8131 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
8132 		device_xname(sc->sc_dev), __func__));
8133 
8134 	rv = sc->phy.acquire(sc);
8135 	if (rv != 0) {
8136 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8137 		    __func__);
8138 		return;
8139 	}
8140 
8141 	switch (sc->sc_type) {
8142 	case WM_T_82542_2_0:
8143 	case WM_T_82542_2_1:
8144 		/* null */
8145 		break;
8146 	case WM_T_82543:
8147 		/*
8148 		 * With 82543, we need to force speed and duplex on the MAC
8149 		 * equal to what the PHY speed and duplex configuration is.
8150 		 * In addition, we need to perform a hardware reset on the PHY
8151 		 * to take it out of reset.
8152 		 */
8153 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
8154 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8155 
8156 		/* The PHY reset pin is active-low. */
8157 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
8158 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
8159 		    CTRL_EXT_SWDPIN(4));
8160 		reg |= CTRL_EXT_SWDPIO(4);
8161 
8162 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
8163 		CSR_WRITE_FLUSH(sc);
8164 		delay(10*1000);
8165 
8166 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
8167 		CSR_WRITE_FLUSH(sc);
8168 		delay(150);
8169 #if 0
8170 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
8171 #endif
8172 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
8173 		break;
8174 	case WM_T_82544:	/* reset 10000us */
8175 	case WM_T_82540:
8176 	case WM_T_82545:
8177 	case WM_T_82545_3:
8178 	case WM_T_82546:
8179 	case WM_T_82546_3:
8180 	case WM_T_82541:
8181 	case WM_T_82541_2:
8182 	case WM_T_82547:
8183 	case WM_T_82547_2:
8184 	case WM_T_82571:	/* reset 100us */
8185 	case WM_T_82572:
8186 	case WM_T_82573:
8187 	case WM_T_82574:
8188 	case WM_T_82575:
8189 	case WM_T_82576:
8190 	case WM_T_82580:
8191 	case WM_T_I350:
8192 	case WM_T_I354:
8193 	case WM_T_I210:
8194 	case WM_T_I211:
8195 	case WM_T_82583:
8196 	case WM_T_80003:
8197 		/* generic reset */
8198 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
8199 		CSR_WRITE_FLUSH(sc);
8200 		delay(20000);
8201 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8202 		CSR_WRITE_FLUSH(sc);
8203 		delay(20000);
8204 
8205 		if ((sc->sc_type == WM_T_82541)
8206 		    || (sc->sc_type == WM_T_82541_2)
8207 		    || (sc->sc_type == WM_T_82547)
8208 		    || (sc->sc_type == WM_T_82547_2)) {
8209 			/* workaround for igp are done in igp_reset() */
8210 			/* XXX add code to set LED after phy reset */
8211 		}
8212 		break;
8213 	case WM_T_ICH8:
8214 	case WM_T_ICH9:
8215 	case WM_T_ICH10:
8216 	case WM_T_PCH:
8217 	case WM_T_PCH2:
8218 	case WM_T_PCH_LPT:
8219 	case WM_T_PCH_SPT:
8220 		/* generic reset */
8221 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
8222 		CSR_WRITE_FLUSH(sc);
8223 		delay(100);
8224 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8225 		CSR_WRITE_FLUSH(sc);
8226 		delay(150);
8227 		break;
8228 	default:
8229 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
8230 		    __func__);
8231 		break;
8232 	}
8233 
8234 	sc->phy.release(sc);
8235 
8236 	/* get_cfg_done */
8237 	wm_get_cfg_done(sc);
8238 
8239 	/* extra setup */
8240 	switch (sc->sc_type) {
8241 	case WM_T_82542_2_0:
8242 	case WM_T_82542_2_1:
8243 	case WM_T_82543:
8244 	case WM_T_82544:
8245 	case WM_T_82540:
8246 	case WM_T_82545:
8247 	case WM_T_82545_3:
8248 	case WM_T_82546:
8249 	case WM_T_82546_3:
8250 	case WM_T_82541_2:
8251 	case WM_T_82547_2:
8252 	case WM_T_82571:
8253 	case WM_T_82572:
8254 	case WM_T_82573:
8255 	case WM_T_82575:
8256 	case WM_T_82576:
8257 	case WM_T_82580:
8258 	case WM_T_I350:
8259 	case WM_T_I354:
8260 	case WM_T_I210:
8261 	case WM_T_I211:
8262 	case WM_T_80003:
8263 		/* null */
8264 		break;
8265 	case WM_T_82574:
8266 	case WM_T_82583:
8267 		wm_lplu_d0_disable(sc);
8268 		break;
8269 	case WM_T_82541:
8270 	case WM_T_82547:
8271 		/* XXX Configure actively LED after PHY reset */
8272 		break;
8273 	case WM_T_ICH8:
8274 	case WM_T_ICH9:
8275 	case WM_T_ICH10:
8276 	case WM_T_PCH:
8277 	case WM_T_PCH2:
8278 	case WM_T_PCH_LPT:
8279 	case WM_T_PCH_SPT:
8280 		/* Allow time for h/w to get to a quiescent state afer reset */
8281 		delay(10*1000);
8282 
8283 		if (sc->sc_type == WM_T_PCH)
8284 			wm_hv_phy_workaround_ich8lan(sc);
8285 
8286 		if (sc->sc_type == WM_T_PCH2)
8287 			wm_lv_phy_workaround_ich8lan(sc);
8288 
8289 		/* Clear the host wakeup bit after lcd reset */
8290 		if (sc->sc_type >= WM_T_PCH) {
8291 			reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
8292 			    BM_PORT_GEN_CFG);
8293 			reg &= ~BM_WUC_HOST_WU_BIT;
8294 			wm_gmii_hv_writereg(sc->sc_dev, 2,
8295 			    BM_PORT_GEN_CFG, reg);
8296 		}
8297 
8298 		/*
8299 		 * XXX Configure the LCD with th extended configuration region
8300 		 * in NVM
8301 		 */
8302 
8303 		/* Disable D0 LPLU. */
8304 		if (sc->sc_type >= WM_T_PCH)	/* PCH* */
8305 			wm_lplu_d0_disable_pch(sc);
8306 		else
8307 			wm_lplu_d0_disable(sc);	/* ICH* */
8308 		break;
8309 	default:
8310 		panic("%s: unknown type\n", __func__);
8311 		break;
8312 	}
8313 }
8314 
8315 /*
8316  * wm_get_phy_id_82575:
8317  *
8318  * Return PHY ID. Return -1 if it failed.
8319  */
8320 static int
8321 wm_get_phy_id_82575(struct wm_softc *sc)
8322 {
8323 	uint32_t reg;
8324 	int phyid = -1;
8325 
8326 	/* XXX */
8327 	if ((sc->sc_flags & WM_F_SGMII) == 0)
8328 		return -1;
8329 
8330 	if (wm_sgmii_uses_mdio(sc)) {
8331 		switch (sc->sc_type) {
8332 		case WM_T_82575:
8333 		case WM_T_82576:
8334 			reg = CSR_READ(sc, WMREG_MDIC);
8335 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
8336 			break;
8337 		case WM_T_82580:
8338 		case WM_T_I350:
8339 		case WM_T_I354:
8340 		case WM_T_I210:
8341 		case WM_T_I211:
8342 			reg = CSR_READ(sc, WMREG_MDICNFG);
8343 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
8344 			break;
8345 		default:
8346 			return -1;
8347 		}
8348 	}
8349 
8350 	return phyid;
8351 }
8352 
8353 
8354 /*
8355  * wm_gmii_mediainit:
8356  *
8357  *	Initialize media for use on 1000BASE-T devices.
8358  */
8359 static void
8360 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
8361 {
8362 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
8363 	struct mii_data *mii = &sc->sc_mii;
8364 	uint32_t reg;
8365 
8366 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
8367 		device_xname(sc->sc_dev), __func__));
8368 
8369 	/* We have GMII. */
8370 	sc->sc_flags |= WM_F_HAS_MII;
8371 
8372 	if (sc->sc_type == WM_T_80003)
8373 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
8374 	else
8375 		sc->sc_tipg = TIPG_1000T_DFLT;
8376 
8377 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
8378 	if ((sc->sc_type == WM_T_82580)
8379 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
8380 	    || (sc->sc_type == WM_T_I211)) {
8381 		reg = CSR_READ(sc, WMREG_PHPM);
8382 		reg &= ~PHPM_GO_LINK_D;
8383 		CSR_WRITE(sc, WMREG_PHPM, reg);
8384 	}
8385 
8386 	/*
8387 	 * Let the chip set speed/duplex on its own based on
8388 	 * signals from the PHY.
8389 	 * XXXbouyer - I'm not sure this is right for the 80003,
8390 	 * the em driver only sets CTRL_SLU here - but it seems to work.
8391 	 */
8392 	sc->sc_ctrl |= CTRL_SLU;
8393 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8394 
8395 	/* Initialize our media structures and probe the GMII. */
8396 	mii->mii_ifp = ifp;
8397 
8398 	/*
8399 	 * Determine the PHY access method.
8400 	 *
8401 	 *  For SGMII, use SGMII specific method.
8402 	 *
8403 	 *  For some devices, we can determine the PHY access method
8404 	 * from sc_type.
8405 	 *
8406 	 *  For ICH and PCH variants, it's difficult to determine the PHY
8407 	 * access  method by sc_type, so use the PCI product ID for some
8408 	 * devices.
8409 	 * For other ICH8 variants, try to use igp's method. If the PHY
8410 	 * can't detect, then use bm's method.
8411 	 */
8412 	switch (prodid) {
8413 	case PCI_PRODUCT_INTEL_PCH_M_LM:
8414 	case PCI_PRODUCT_INTEL_PCH_M_LC:
8415 		/* 82577 */
8416 		sc->sc_phytype = WMPHY_82577;
8417 		break;
8418 	case PCI_PRODUCT_INTEL_PCH_D_DM:
8419 	case PCI_PRODUCT_INTEL_PCH_D_DC:
8420 		/* 82578 */
8421 		sc->sc_phytype = WMPHY_82578;
8422 		break;
8423 	case PCI_PRODUCT_INTEL_PCH2_LV_LM:
8424 	case PCI_PRODUCT_INTEL_PCH2_LV_V:
8425 		/* 82579 */
8426 		sc->sc_phytype = WMPHY_82579;
8427 		break;
8428 	case PCI_PRODUCT_INTEL_82801H_82567V_3:
8429 	case PCI_PRODUCT_INTEL_82801I_BM:
8430 	case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
8431 	case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
8432 	case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
8433 	case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
8434 	case PCI_PRODUCT_INTEL_82801J_R_BM_V:
8435 		/* ICH8, 9, 10 with 82567 */
8436 		sc->sc_phytype = WMPHY_BM;
8437 		mii->mii_readreg = wm_gmii_bm_readreg;
8438 		mii->mii_writereg = wm_gmii_bm_writereg;
8439 		break;
8440 	default:
8441 		if (((sc->sc_flags & WM_F_SGMII) != 0)
8442 		    && !wm_sgmii_uses_mdio(sc)){
8443 			/* SGMII */
8444 			mii->mii_readreg = wm_sgmii_readreg;
8445 			mii->mii_writereg = wm_sgmii_writereg;
8446 		} else if ((sc->sc_type == WM_T_82574)
8447 		    || (sc->sc_type == WM_T_82583)) {
8448 			/* BM2 (phyaddr == 1) */
8449 			sc->sc_phytype = WMPHY_BM;
8450 			mii->mii_readreg = wm_gmii_bm_readreg;
8451 			mii->mii_writereg = wm_gmii_bm_writereg;
8452 		} else if (sc->sc_type >= WM_T_ICH8) {
8453 			/* non-82567 ICH8, 9 and 10 */
8454 			mii->mii_readreg = wm_gmii_i82544_readreg;
8455 			mii->mii_writereg = wm_gmii_i82544_writereg;
8456 		} else if (sc->sc_type >= WM_T_80003) {
8457 			/* 80003 */
8458 			sc->sc_phytype = WMPHY_GG82563;
8459 			mii->mii_readreg = wm_gmii_i80003_readreg;
8460 			mii->mii_writereg = wm_gmii_i80003_writereg;
8461 		} else if (sc->sc_type >= WM_T_I210) {
8462 			/* I210 and I211 */
8463 			sc->sc_phytype = WMPHY_210;
8464 			mii->mii_readreg = wm_gmii_gs40g_readreg;
8465 			mii->mii_writereg = wm_gmii_gs40g_writereg;
8466 		} else if (sc->sc_type >= WM_T_82580) {
8467 			/* 82580, I350 and I354 */
8468 			sc->sc_phytype = WMPHY_82580;
8469 			mii->mii_readreg = wm_gmii_82580_readreg;
8470 			mii->mii_writereg = wm_gmii_82580_writereg;
8471 		} else if (sc->sc_type >= WM_T_82544) {
8472 			/* 82544, 0, [56], [17], 8257[1234] and 82583 */
8473 			mii->mii_readreg = wm_gmii_i82544_readreg;
8474 			mii->mii_writereg = wm_gmii_i82544_writereg;
8475 		} else {
8476 			mii->mii_readreg = wm_gmii_i82543_readreg;
8477 			mii->mii_writereg = wm_gmii_i82543_writereg;
8478 		}
8479 		break;
8480 	}
8481 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_SPT)) {
8482 		/* All PCH* use _hv_ */
8483 		mii->mii_readreg = wm_gmii_hv_readreg;
8484 		mii->mii_writereg = wm_gmii_hv_writereg;
8485 	}
8486 	mii->mii_statchg = wm_gmii_statchg;
8487 
8488 	/* get PHY control from SMBus to PCIe */
8489 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
8490 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT))
8491 		wm_smbustopci(sc);
8492 
8493 	wm_gmii_reset(sc);
8494 
8495 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
8496 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
8497 	    wm_gmii_mediastatus);
8498 
8499 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
8500 	    || (sc->sc_type == WM_T_82580)
8501 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
8502 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
8503 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
8504 			/* Attach only one port */
8505 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
8506 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
8507 		} else {
8508 			int i, id;
8509 			uint32_t ctrl_ext;
8510 
8511 			id = wm_get_phy_id_82575(sc);
8512 			if (id != -1) {
8513 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
8514 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
8515 			}
8516 			if ((id == -1)
8517 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
8518 				/* Power on sgmii phy if it is disabled */
8519 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
8520 				CSR_WRITE(sc, WMREG_CTRL_EXT,
8521 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
8522 				CSR_WRITE_FLUSH(sc);
8523 				delay(300*1000); /* XXX too long */
8524 
8525 				/* from 1 to 8 */
8526 				for (i = 1; i < 8; i++)
8527 					mii_attach(sc->sc_dev, &sc->sc_mii,
8528 					    0xffffffff, i, MII_OFFSET_ANY,
8529 					    MIIF_DOPAUSE);
8530 
8531 				/* restore previous sfp cage power state */
8532 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
8533 			}
8534 		}
8535 	} else {
8536 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
8537 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
8538 	}
8539 
8540 	/*
8541 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
8542 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
8543 	 */
8544 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
8545 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
8546 		wm_set_mdio_slow_mode_hv(sc);
8547 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
8548 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
8549 	}
8550 
8551 	/*
8552 	 * (For ICH8 variants)
8553 	 * If PHY detection failed, use BM's r/w function and retry.
8554 	 */
8555 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
8556 		/* if failed, retry with *_bm_* */
8557 		mii->mii_readreg = wm_gmii_bm_readreg;
8558 		mii->mii_writereg = wm_gmii_bm_writereg;
8559 
8560 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
8561 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
8562 	}
8563 
8564 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
8565 		/* Any PHY wasn't find */
8566 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
8567 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
8568 		sc->sc_phytype = WMPHY_NONE;
8569 	} else {
8570 		/*
8571 		 * PHY Found!
8572 		 * Check PHY type.
8573 		 */
8574 		uint32_t model;
8575 		struct mii_softc *child;
8576 
8577 		child = LIST_FIRST(&mii->mii_phys);
8578 		model = child->mii_mpd_model;
8579 		if (model == MII_MODEL_yyINTEL_I82566)
8580 			sc->sc_phytype = WMPHY_IGP_3;
8581 
8582 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
8583 	}
8584 }
8585 
8586 /*
8587  * wm_gmii_mediachange:	[ifmedia interface function]
8588  *
8589  *	Set hardware to newly-selected media on a 1000BASE-T device.
8590  */
8591 static int
8592 wm_gmii_mediachange(struct ifnet *ifp)
8593 {
8594 	struct wm_softc *sc = ifp->if_softc;
8595 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
8596 	int rc;
8597 
8598 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
8599 		device_xname(sc->sc_dev), __func__));
8600 	if ((ifp->if_flags & IFF_UP) == 0)
8601 		return 0;
8602 
8603 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
8604 	sc->sc_ctrl |= CTRL_SLU;
8605 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
8606 	    || (sc->sc_type > WM_T_82543)) {
8607 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
8608 	} else {
8609 		sc->sc_ctrl &= ~CTRL_ASDE;
8610 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
8611 		if (ife->ifm_media & IFM_FDX)
8612 			sc->sc_ctrl |= CTRL_FD;
8613 		switch (IFM_SUBTYPE(ife->ifm_media)) {
8614 		case IFM_10_T:
8615 			sc->sc_ctrl |= CTRL_SPEED_10;
8616 			break;
8617 		case IFM_100_TX:
8618 			sc->sc_ctrl |= CTRL_SPEED_100;
8619 			break;
8620 		case IFM_1000_T:
8621 			sc->sc_ctrl |= CTRL_SPEED_1000;
8622 			break;
8623 		default:
8624 			panic("wm_gmii_mediachange: bad media 0x%x",
8625 			    ife->ifm_media);
8626 		}
8627 	}
8628 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8629 	if (sc->sc_type <= WM_T_82543)
8630 		wm_gmii_reset(sc);
8631 
8632 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
8633 		return 0;
8634 	return rc;
8635 }
8636 
8637 /*
8638  * wm_gmii_mediastatus:	[ifmedia interface function]
8639  *
8640  *	Get the current interface media status on a 1000BASE-T device.
8641  */
8642 static void
8643 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
8644 {
8645 	struct wm_softc *sc = ifp->if_softc;
8646 
8647 	ether_mediastatus(ifp, ifmr);
8648 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
8649 	    | sc->sc_flowflags;
8650 }
8651 
8652 #define	MDI_IO		CTRL_SWDPIN(2)
8653 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
8654 #define	MDI_CLK		CTRL_SWDPIN(3)
8655 
8656 static void
8657 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
8658 {
8659 	uint32_t i, v;
8660 
8661 	v = CSR_READ(sc, WMREG_CTRL);
8662 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
8663 	v |= MDI_DIR | CTRL_SWDPIO(3);
8664 
8665 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
8666 		if (data & i)
8667 			v |= MDI_IO;
8668 		else
8669 			v &= ~MDI_IO;
8670 		CSR_WRITE(sc, WMREG_CTRL, v);
8671 		CSR_WRITE_FLUSH(sc);
8672 		delay(10);
8673 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
8674 		CSR_WRITE_FLUSH(sc);
8675 		delay(10);
8676 		CSR_WRITE(sc, WMREG_CTRL, v);
8677 		CSR_WRITE_FLUSH(sc);
8678 		delay(10);
8679 	}
8680 }
8681 
8682 static uint32_t
8683 wm_i82543_mii_recvbits(struct wm_softc *sc)
8684 {
8685 	uint32_t v, i, data = 0;
8686 
8687 	v = CSR_READ(sc, WMREG_CTRL);
8688 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
8689 	v |= CTRL_SWDPIO(3);
8690 
8691 	CSR_WRITE(sc, WMREG_CTRL, v);
8692 	CSR_WRITE_FLUSH(sc);
8693 	delay(10);
8694 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
8695 	CSR_WRITE_FLUSH(sc);
8696 	delay(10);
8697 	CSR_WRITE(sc, WMREG_CTRL, v);
8698 	CSR_WRITE_FLUSH(sc);
8699 	delay(10);
8700 
8701 	for (i = 0; i < 16; i++) {
8702 		data <<= 1;
8703 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
8704 		CSR_WRITE_FLUSH(sc);
8705 		delay(10);
8706 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
8707 			data |= 1;
8708 		CSR_WRITE(sc, WMREG_CTRL, v);
8709 		CSR_WRITE_FLUSH(sc);
8710 		delay(10);
8711 	}
8712 
8713 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
8714 	CSR_WRITE_FLUSH(sc);
8715 	delay(10);
8716 	CSR_WRITE(sc, WMREG_CTRL, v);
8717 	CSR_WRITE_FLUSH(sc);
8718 	delay(10);
8719 
8720 	return data;
8721 }
8722 
8723 #undef MDI_IO
8724 #undef MDI_DIR
8725 #undef MDI_CLK
8726 
8727 /*
8728  * wm_gmii_i82543_readreg:	[mii interface function]
8729  *
8730  *	Read a PHY register on the GMII (i82543 version).
8731  */
8732 static int
8733 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
8734 {
8735 	struct wm_softc *sc = device_private(self);
8736 	int rv;
8737 
8738 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
8739 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
8740 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
8741 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
8742 
8743 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
8744 	    device_xname(sc->sc_dev), phy, reg, rv));
8745 
8746 	return rv;
8747 }
8748 
8749 /*
8750  * wm_gmii_i82543_writereg:	[mii interface function]
8751  *
8752  *	Write a PHY register on the GMII (i82543 version).
8753  */
8754 static void
8755 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
8756 {
8757 	struct wm_softc *sc = device_private(self);
8758 
8759 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
8760 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
8761 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
8762 	    (MII_COMMAND_START << 30), 32);
8763 }
8764 
8765 /*
8766  * wm_gmii_mdic_readreg:	[mii interface function]
8767  *
8768  *	Read a PHY register on the GMII.
8769  */
8770 static int
8771 wm_gmii_mdic_readreg(device_t self, int phy, int reg)
8772 {
8773 	struct wm_softc *sc = device_private(self);
8774 	uint32_t mdic = 0;
8775 	int i, rv;
8776 
8777 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
8778 	    MDIC_REGADD(reg));
8779 
8780 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
8781 		mdic = CSR_READ(sc, WMREG_MDIC);
8782 		if (mdic & MDIC_READY)
8783 			break;
8784 		delay(50);
8785 	}
8786 
8787 	if ((mdic & MDIC_READY) == 0) {
8788 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
8789 		    device_xname(sc->sc_dev), phy, reg);
8790 		rv = 0;
8791 	} else if (mdic & MDIC_E) {
8792 #if 0 /* This is normal if no PHY is present. */
8793 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
8794 		    device_xname(sc->sc_dev), phy, reg);
8795 #endif
8796 		rv = 0;
8797 	} else {
8798 		rv = MDIC_DATA(mdic);
8799 		if (rv == 0xffff)
8800 			rv = 0;
8801 	}
8802 
8803 	return rv;
8804 }
8805 
8806 /*
8807  * wm_gmii_mdic_writereg:	[mii interface function]
8808  *
8809  *	Write a PHY register on the GMII.
8810  */
8811 static void
8812 wm_gmii_mdic_writereg(device_t self, int phy, int reg, int val)
8813 {
8814 	struct wm_softc *sc = device_private(self);
8815 	uint32_t mdic = 0;
8816 	int i;
8817 
8818 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
8819 	    MDIC_REGADD(reg) | MDIC_DATA(val));
8820 
8821 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
8822 		mdic = CSR_READ(sc, WMREG_MDIC);
8823 		if (mdic & MDIC_READY)
8824 			break;
8825 		delay(50);
8826 	}
8827 
8828 	if ((mdic & MDIC_READY) == 0)
8829 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
8830 		    device_xname(sc->sc_dev), phy, reg);
8831 	else if (mdic & MDIC_E)
8832 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
8833 		    device_xname(sc->sc_dev), phy, reg);
8834 }
8835 
8836 /*
8837  * wm_gmii_i82544_readreg:	[mii interface function]
8838  *
8839  *	Read a PHY register on the GMII.
8840  */
8841 static int
8842 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
8843 {
8844 	struct wm_softc *sc = device_private(self);
8845 	int rv;
8846 
8847 	if (sc->phy.acquire(sc)) {
8848 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8849 		    __func__);
8850 		return 0;
8851 	}
8852 	rv = wm_gmii_mdic_readreg(self, phy, reg);
8853 	sc->phy.release(sc);
8854 
8855 	return rv;
8856 }
8857 
8858 /*
8859  * wm_gmii_i82544_writereg:	[mii interface function]
8860  *
8861  *	Write a PHY register on the GMII.
8862  */
8863 static void
8864 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
8865 {
8866 	struct wm_softc *sc = device_private(self);
8867 
8868 	if (sc->phy.acquire(sc)) {
8869 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8870 		    __func__);
8871 	}
8872 	wm_gmii_mdic_writereg(self, phy, reg, val);
8873 	sc->phy.release(sc);
8874 }
8875 
8876 /*
8877  * wm_gmii_i80003_readreg:	[mii interface function]
8878  *
8879  *	Read a PHY register on the kumeran
8880  * This could be handled by the PHY layer if we didn't have to lock the
8881  * ressource ...
8882  */
8883 static int
8884 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
8885 {
8886 	struct wm_softc *sc = device_private(self);
8887 	int rv;
8888 
8889 	if (phy != 1) /* only one PHY on kumeran bus */
8890 		return 0;
8891 
8892 	if (sc->phy.acquire(sc)) {
8893 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8894 		    __func__);
8895 		return 0;
8896 	}
8897 
8898 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) {
8899 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
8900 		    reg >> GG82563_PAGE_SHIFT);
8901 	} else {
8902 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
8903 		    reg >> GG82563_PAGE_SHIFT);
8904 	}
8905 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
8906 	delay(200);
8907 	rv = wm_gmii_mdic_readreg(self, phy, reg & MII_ADDRMASK);
8908 	delay(200);
8909 	sc->phy.release(sc);
8910 
8911 	return rv;
8912 }
8913 
8914 /*
8915  * wm_gmii_i80003_writereg:	[mii interface function]
8916  *
8917  *	Write a PHY register on the kumeran.
8918  * This could be handled by the PHY layer if we didn't have to lock the
8919  * ressource ...
8920  */
8921 static void
8922 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
8923 {
8924 	struct wm_softc *sc = device_private(self);
8925 
8926 	if (phy != 1) /* only one PHY on kumeran bus */
8927 		return;
8928 
8929 	if (sc->phy.acquire(sc)) {
8930 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8931 		    __func__);
8932 		return;
8933 	}
8934 
8935 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) {
8936 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
8937 		    reg >> GG82563_PAGE_SHIFT);
8938 	} else {
8939 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
8940 		    reg >> GG82563_PAGE_SHIFT);
8941 	}
8942 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
8943 	delay(200);
8944 	wm_gmii_mdic_writereg(self, phy, reg & MII_ADDRMASK, val);
8945 	delay(200);
8946 
8947 	sc->phy.release(sc);
8948 }
8949 
8950 /*
8951  * wm_gmii_bm_readreg:	[mii interface function]
8952  *
8953  *	Read a PHY register on the kumeran
8954  * This could be handled by the PHY layer if we didn't have to lock the
8955  * ressource ...
8956  */
8957 static int
8958 wm_gmii_bm_readreg(device_t self, int phy, int reg)
8959 {
8960 	struct wm_softc *sc = device_private(self);
8961 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
8962 	uint16_t val;
8963 	int rv;
8964 
8965 	if (sc->phy.acquire(sc)) {
8966 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8967 		    __func__);
8968 		return 0;
8969 	}
8970 
8971 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
8972 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
8973 		    || (reg == 31)) ? 1 : phy;
8974 	/* Page 800 works differently than the rest so it has its own func */
8975 	if (page == BM_WUC_PAGE) {
8976 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
8977 		rv = val;
8978 		goto release;
8979 	}
8980 
8981 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
8982 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
8983 		    && (sc->sc_type != WM_T_82583))
8984 			wm_gmii_mdic_writereg(self, phy,
8985 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
8986 		else
8987 			wm_gmii_mdic_writereg(self, phy,
8988 			    BME1000_PHY_PAGE_SELECT, page);
8989 	}
8990 
8991 	rv = wm_gmii_mdic_readreg(self, phy, reg & MII_ADDRMASK);
8992 
8993 release:
8994 	sc->phy.release(sc);
8995 	return rv;
8996 }
8997 
8998 /*
8999  * wm_gmii_bm_writereg:	[mii interface function]
9000  *
9001  *	Write a PHY register on the kumeran.
9002  * This could be handled by the PHY layer if we didn't have to lock the
9003  * ressource ...
9004  */
9005 static void
9006 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
9007 {
9008 	struct wm_softc *sc = device_private(self);
9009 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
9010 
9011 	if (sc->phy.acquire(sc)) {
9012 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9013 		    __func__);
9014 		return;
9015 	}
9016 
9017 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
9018 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
9019 		    || (reg == 31)) ? 1 : phy;
9020 	/* Page 800 works differently than the rest so it has its own func */
9021 	if (page == BM_WUC_PAGE) {
9022 		uint16_t tmp;
9023 
9024 		tmp = val;
9025 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
9026 		goto release;
9027 	}
9028 
9029 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
9030 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
9031 		    && (sc->sc_type != WM_T_82583))
9032 			wm_gmii_mdic_writereg(self, phy,
9033 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
9034 		else
9035 			wm_gmii_mdic_writereg(self, phy,
9036 			    BME1000_PHY_PAGE_SELECT, page);
9037 	}
9038 
9039 	wm_gmii_mdic_writereg(self, phy, reg & MII_ADDRMASK, val);
9040 
9041 release:
9042 	sc->phy.release(sc);
9043 }
9044 
9045 static void
9046 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
9047 {
9048 	struct wm_softc *sc = device_private(self);
9049 	uint16_t regnum = BM_PHY_REG_NUM(offset);
9050 	uint16_t wuce, reg;
9051 
9052 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
9053 		device_xname(sc->sc_dev), __func__));
9054 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
9055 	if (sc->sc_type == WM_T_PCH) {
9056 		/* XXX e1000 driver do nothing... why? */
9057 	}
9058 
9059 	/*
9060 	 * 1) Enable PHY wakeup register first.
9061 	 * See e1000_enable_phy_wakeup_reg_access_bm().
9062 	 */
9063 
9064 	/* Set page 769 */
9065 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
9066 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
9067 
9068 	/* Read WUCE and save it */
9069 	wuce = wm_gmii_mdic_readreg(self, 1, BM_WUC_ENABLE_REG);
9070 
9071 	reg = wuce | BM_WUC_ENABLE_BIT;
9072 	reg &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
9073 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ENABLE_REG, reg);
9074 
9075 	/* Select page 800 */
9076 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
9077 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
9078 
9079 	/*
9080 	 * 2) Access PHY wakeup register.
9081 	 * See e1000_access_phy_wakeup_reg_bm.
9082 	 */
9083 
9084 	/* Write page 800 */
9085 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
9086 
9087 	if (rd)
9088 		*val = wm_gmii_mdic_readreg(self, 1, BM_WUC_DATA_OPCODE);
9089 	else
9090 		wm_gmii_mdic_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
9091 
9092 	/*
9093 	 * 3) Disable PHY wakeup register.
9094 	 * See e1000_disable_phy_wakeup_reg_access_bm().
9095 	 */
9096 	/* Set page 769 */
9097 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
9098 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
9099 
9100 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
9101 }
9102 
9103 /*
9104  * wm_gmii_hv_readreg:	[mii interface function]
9105  *
9106  *	Read a PHY register on the kumeran
9107  * This could be handled by the PHY layer if we didn't have to lock the
9108  * ressource ...
9109  */
9110 static int
9111 wm_gmii_hv_readreg(device_t self, int phy, int reg)
9112 {
9113 	struct wm_softc *sc = device_private(self);
9114 	int rv;
9115 
9116 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
9117 		device_xname(sc->sc_dev), __func__));
9118 	if (sc->phy.acquire(sc)) {
9119 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9120 		    __func__);
9121 		return 0;
9122 	}
9123 
9124 	rv = wm_gmii_hv_readreg_locked(self, phy, reg);
9125 	sc->phy.release(sc);
9126 	return rv;
9127 }
9128 
9129 static int
9130 wm_gmii_hv_readreg_locked(device_t self, int phy, int reg)
9131 {
9132 	uint16_t page = BM_PHY_REG_PAGE(reg);
9133 	uint16_t regnum = BM_PHY_REG_NUM(reg);
9134 	uint16_t val;
9135 	int rv;
9136 
9137 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
9138 
9139 	/* Page 800 works differently than the rest so it has its own func */
9140 	if (page == BM_WUC_PAGE) {
9141 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
9142 		return val;
9143 	}
9144 
9145 	/*
9146 	 * Lower than page 768 works differently than the rest so it has its
9147 	 * own func
9148 	 */
9149 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
9150 		printf("gmii_hv_readreg!!!\n");
9151 		return 0;
9152 	}
9153 
9154 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
9155 		wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
9156 		    page << BME1000_PAGE_SHIFT);
9157 	}
9158 
9159 	rv = wm_gmii_mdic_readreg(self, phy, regnum & MII_ADDRMASK);
9160 	return rv;
9161 }
9162 
9163 /*
9164  * wm_gmii_hv_writereg:	[mii interface function]
9165  *
9166  *	Write a PHY register on the kumeran.
9167  * This could be handled by the PHY layer if we didn't have to lock the
9168  * ressource ...
9169  */
9170 static void
9171 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
9172 {
9173 	struct wm_softc *sc = device_private(self);
9174 
9175 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
9176 		device_xname(sc->sc_dev), __func__));
9177 
9178 	if (sc->phy.acquire(sc)) {
9179 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9180 		    __func__);
9181 		return;
9182 	}
9183 
9184 	wm_gmii_hv_writereg_locked(self, phy, reg, val);
9185 	sc->phy.release(sc);
9186 }
9187 
9188 static void
9189 wm_gmii_hv_writereg_locked(device_t self, int phy, int reg, int val)
9190 {
9191 	struct wm_softc *sc = device_private(self);
9192 	uint16_t page = BM_PHY_REG_PAGE(reg);
9193 	uint16_t regnum = BM_PHY_REG_NUM(reg);
9194 
9195 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
9196 
9197 	/* Page 800 works differently than the rest so it has its own func */
9198 	if (page == BM_WUC_PAGE) {
9199 		uint16_t tmp;
9200 
9201 		tmp = val;
9202 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
9203 		return;
9204 	}
9205 
9206 	/*
9207 	 * Lower than page 768 works differently than the rest so it has its
9208 	 * own func
9209 	 */
9210 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
9211 		printf("gmii_hv_writereg!!!\n");
9212 		return;
9213 	}
9214 
9215 	{
9216 		/*
9217 		 * XXX Workaround MDIO accesses being disabled after entering
9218 		 * IEEE Power Down (whenever bit 11 of the PHY control
9219 		 * register is set)
9220 		 */
9221 		if (sc->sc_phytype == WMPHY_82578) {
9222 			struct mii_softc *child;
9223 
9224 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
9225 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
9226 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
9227 			    && ((val & (1 << 11)) != 0)) {
9228 				printf("XXX need workaround\n");
9229 			}
9230 		}
9231 
9232 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
9233 			wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
9234 			    page << BME1000_PAGE_SHIFT);
9235 		}
9236 	}
9237 
9238 	wm_gmii_mdic_writereg(self, phy, regnum & MII_ADDRMASK, val);
9239 }
9240 
9241 /*
9242  * wm_gmii_82580_readreg:	[mii interface function]
9243  *
9244  *	Read a PHY register on the 82580 and I350.
9245  * This could be handled by the PHY layer if we didn't have to lock the
9246  * ressource ...
9247  */
9248 static int
9249 wm_gmii_82580_readreg(device_t self, int phy, int reg)
9250 {
9251 	struct wm_softc *sc = device_private(self);
9252 	int rv;
9253 
9254 	if (sc->phy.acquire(sc) != 0) {
9255 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9256 		    __func__);
9257 		return 0;
9258 	}
9259 
9260 	rv = wm_gmii_mdic_readreg(self, phy, reg);
9261 
9262 	sc->phy.release(sc);
9263 	return rv;
9264 }
9265 
9266 /*
9267  * wm_gmii_82580_writereg:	[mii interface function]
9268  *
9269  *	Write a PHY register on the 82580 and I350.
9270  * This could be handled by the PHY layer if we didn't have to lock the
9271  * ressource ...
9272  */
9273 static void
9274 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
9275 {
9276 	struct wm_softc *sc = device_private(self);
9277 
9278 	if (sc->phy.acquire(sc) != 0) {
9279 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9280 		    __func__);
9281 		return;
9282 	}
9283 
9284 	wm_gmii_mdic_writereg(self, phy, reg, val);
9285 
9286 	sc->phy.release(sc);
9287 }
9288 
9289 /*
9290  * wm_gmii_gs40g_readreg:	[mii interface function]
9291  *
9292  *	Read a PHY register on the I2100 and I211.
9293  * This could be handled by the PHY layer if we didn't have to lock the
9294  * ressource ...
9295  */
9296 static int
9297 wm_gmii_gs40g_readreg(device_t self, int phy, int reg)
9298 {
9299 	struct wm_softc *sc = device_private(self);
9300 	int page, offset;
9301 	int rv;
9302 
9303 	/* Acquire semaphore */
9304 	if (sc->phy.acquire(sc)) {
9305 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9306 		    __func__);
9307 		return 0;
9308 	}
9309 
9310 	/* Page select */
9311 	page = reg >> GS40G_PAGE_SHIFT;
9312 	wm_gmii_mdic_writereg(self, phy, GS40G_PAGE_SELECT, page);
9313 
9314 	/* Read reg */
9315 	offset = reg & GS40G_OFFSET_MASK;
9316 	rv = wm_gmii_mdic_readreg(self, phy, offset);
9317 
9318 	sc->phy.release(sc);
9319 	return rv;
9320 }
9321 
9322 /*
9323  * wm_gmii_gs40g_writereg:	[mii interface function]
9324  *
9325  *	Write a PHY register on the I210 and I211.
9326  * This could be handled by the PHY layer if we didn't have to lock the
9327  * ressource ...
9328  */
9329 static void
9330 wm_gmii_gs40g_writereg(device_t self, int phy, int reg, int val)
9331 {
9332 	struct wm_softc *sc = device_private(self);
9333 	int page, offset;
9334 
9335 	/* Acquire semaphore */
9336 	if (sc->phy.acquire(sc)) {
9337 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9338 		    __func__);
9339 		return;
9340 	}
9341 
9342 	/* Page select */
9343 	page = reg >> GS40G_PAGE_SHIFT;
9344 	wm_gmii_mdic_writereg(self, phy, GS40G_PAGE_SELECT, page);
9345 
9346 	/* Write reg */
9347 	offset = reg & GS40G_OFFSET_MASK;
9348 	wm_gmii_mdic_writereg(self, phy, offset, val);
9349 
9350 	/* Release semaphore */
9351 	sc->phy.release(sc);
9352 }
9353 
9354 /*
9355  * wm_gmii_statchg:	[mii interface function]
9356  *
9357  *	Callback from MII layer when media changes.
9358  */
9359 static void
9360 wm_gmii_statchg(struct ifnet *ifp)
9361 {
9362 	struct wm_softc *sc = ifp->if_softc;
9363 	struct mii_data *mii = &sc->sc_mii;
9364 
9365 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
9366 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
9367 	sc->sc_fcrtl &= ~FCRTL_XONE;
9368 
9369 	/*
9370 	 * Get flow control negotiation result.
9371 	 */
9372 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
9373 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
9374 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
9375 		mii->mii_media_active &= ~IFM_ETH_FMASK;
9376 	}
9377 
9378 	if (sc->sc_flowflags & IFM_FLOW) {
9379 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
9380 			sc->sc_ctrl |= CTRL_TFCE;
9381 			sc->sc_fcrtl |= FCRTL_XONE;
9382 		}
9383 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
9384 			sc->sc_ctrl |= CTRL_RFCE;
9385 	}
9386 
9387 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
9388 		DPRINTF(WM_DEBUG_LINK,
9389 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
9390 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
9391 	} else {
9392 		DPRINTF(WM_DEBUG_LINK,
9393 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
9394 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
9395 	}
9396 
9397 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9398 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
9399 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
9400 						 : WMREG_FCRTL, sc->sc_fcrtl);
9401 	if (sc->sc_type == WM_T_80003) {
9402 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
9403 		case IFM_1000_T:
9404 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
9405 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
9406 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
9407 			break;
9408 		default:
9409 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
9410 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
9411 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
9412 			break;
9413 		}
9414 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
9415 	}
9416 }
9417 
9418 /* kumeran related (80003, ICH* and PCH*) */
9419 
9420 /*
9421  * wm_kmrn_readreg:
9422  *
9423  *	Read a kumeran register
9424  */
9425 static int
9426 wm_kmrn_readreg(struct wm_softc *sc, int reg)
9427 {
9428 	int rv;
9429 
9430 	if (sc->sc_type == WM_T_80003)
9431 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
9432 	else
9433 		rv = sc->phy.acquire(sc);
9434 	if (rv != 0) {
9435 		aprint_error_dev(sc->sc_dev,
9436 		    "%s: failed to get semaphore\n", __func__);
9437 		return 0;
9438 	}
9439 
9440 	rv = wm_kmrn_readreg_locked(sc, reg);
9441 
9442 	if (sc->sc_type == WM_T_80003)
9443 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
9444 	else
9445 		sc->phy.release(sc);
9446 
9447 	return rv;
9448 }
9449 
9450 static int
9451 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg)
9452 {
9453 	int rv;
9454 
9455 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
9456 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
9457 	    KUMCTRLSTA_REN);
9458 	CSR_WRITE_FLUSH(sc);
9459 	delay(2);
9460 
9461 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
9462 
9463 	return rv;
9464 }
9465 
9466 /*
9467  * wm_kmrn_writereg:
9468  *
9469  *	Write a kumeran register
9470  */
9471 static void
9472 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
9473 {
9474 	int rv;
9475 
9476 	if (sc->sc_type == WM_T_80003)
9477 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
9478 	else
9479 		rv = sc->phy.acquire(sc);
9480 	if (rv != 0) {
9481 		aprint_error_dev(sc->sc_dev,
9482 		    "%s: failed to get semaphore\n", __func__);
9483 		return;
9484 	}
9485 
9486 	wm_kmrn_writereg_locked(sc, reg, val);
9487 
9488 	if (sc->sc_type == WM_T_80003)
9489 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
9490 	else
9491 		sc->phy.release(sc);
9492 }
9493 
9494 static void
9495 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, int val)
9496 {
9497 
9498 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
9499 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
9500 	    (val & KUMCTRLSTA_MASK));
9501 }
9502 
9503 /* SGMII related */
9504 
9505 /*
9506  * wm_sgmii_uses_mdio
9507  *
9508  * Check whether the transaction is to the internal PHY or the external
9509  * MDIO interface. Return true if it's MDIO.
9510  */
9511 static bool
9512 wm_sgmii_uses_mdio(struct wm_softc *sc)
9513 {
9514 	uint32_t reg;
9515 	bool ismdio = false;
9516 
9517 	switch (sc->sc_type) {
9518 	case WM_T_82575:
9519 	case WM_T_82576:
9520 		reg = CSR_READ(sc, WMREG_MDIC);
9521 		ismdio = ((reg & MDIC_DEST) != 0);
9522 		break;
9523 	case WM_T_82580:
9524 	case WM_T_I350:
9525 	case WM_T_I354:
9526 	case WM_T_I210:
9527 	case WM_T_I211:
9528 		reg = CSR_READ(sc, WMREG_MDICNFG);
9529 		ismdio = ((reg & MDICNFG_DEST) != 0);
9530 		break;
9531 	default:
9532 		break;
9533 	}
9534 
9535 	return ismdio;
9536 }
9537 
9538 /*
9539  * wm_sgmii_readreg:	[mii interface function]
9540  *
9541  *	Read a PHY register on the SGMII
9542  * This could be handled by the PHY layer if we didn't have to lock the
9543  * ressource ...
9544  */
9545 static int
9546 wm_sgmii_readreg(device_t self, int phy, int reg)
9547 {
9548 	struct wm_softc *sc = device_private(self);
9549 	uint32_t i2ccmd;
9550 	int i, rv;
9551 
9552 	if (sc->phy.acquire(sc)) {
9553 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9554 		    __func__);
9555 		return 0;
9556 	}
9557 
9558 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
9559 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
9560 	    | I2CCMD_OPCODE_READ;
9561 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
9562 
9563 	/* Poll the ready bit */
9564 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
9565 		delay(50);
9566 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
9567 		if (i2ccmd & I2CCMD_READY)
9568 			break;
9569 	}
9570 	if ((i2ccmd & I2CCMD_READY) == 0)
9571 		aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
9572 	if ((i2ccmd & I2CCMD_ERROR) != 0)
9573 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
9574 
9575 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
9576 
9577 	sc->phy.release(sc);
9578 	return rv;
9579 }
9580 
9581 /*
9582  * wm_sgmii_writereg:	[mii interface function]
9583  *
9584  *	Write a PHY register on the SGMII.
9585  * This could be handled by the PHY layer if we didn't have to lock the
9586  * ressource ...
9587  */
9588 static void
9589 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
9590 {
9591 	struct wm_softc *sc = device_private(self);
9592 	uint32_t i2ccmd;
9593 	int i;
9594 	int val_swapped;
9595 
9596 	if (sc->phy.acquire(sc) != 0) {
9597 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9598 		    __func__);
9599 		return;
9600 	}
9601 	/* Swap the data bytes for the I2C interface */
9602 	val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
9603 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
9604 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
9605 	    | I2CCMD_OPCODE_WRITE | val_swapped;
9606 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
9607 
9608 	/* Poll the ready bit */
9609 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
9610 		delay(50);
9611 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
9612 		if (i2ccmd & I2CCMD_READY)
9613 			break;
9614 	}
9615 	if ((i2ccmd & I2CCMD_READY) == 0)
9616 		aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
9617 	if ((i2ccmd & I2CCMD_ERROR) != 0)
9618 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
9619 
9620 	sc->phy.release(sc);
9621 }
9622 
9623 /* TBI related */
9624 
9625 /*
9626  * wm_tbi_mediainit:
9627  *
9628  *	Initialize media for use on 1000BASE-X devices.
9629  */
9630 static void
9631 wm_tbi_mediainit(struct wm_softc *sc)
9632 {
9633 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9634 	const char *sep = "";
9635 
9636 	if (sc->sc_type < WM_T_82543)
9637 		sc->sc_tipg = TIPG_WM_DFLT;
9638 	else
9639 		sc->sc_tipg = TIPG_LG_DFLT;
9640 
9641 	sc->sc_tbi_serdes_anegticks = 5;
9642 
9643 	/* Initialize our media structures */
9644 	sc->sc_mii.mii_ifp = ifp;
9645 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
9646 
9647 	if ((sc->sc_type >= WM_T_82575)
9648 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
9649 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
9650 		    wm_serdes_mediachange, wm_serdes_mediastatus);
9651 	else
9652 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
9653 		    wm_tbi_mediachange, wm_tbi_mediastatus);
9654 
9655 	/*
9656 	 * SWD Pins:
9657 	 *
9658 	 *	0 = Link LED (output)
9659 	 *	1 = Loss Of Signal (input)
9660 	 */
9661 	sc->sc_ctrl |= CTRL_SWDPIO(0);
9662 
9663 	/* XXX Perhaps this is only for TBI */
9664 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
9665 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
9666 
9667 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
9668 		sc->sc_ctrl &= ~CTRL_LRST;
9669 
9670 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9671 
9672 #define	ADD(ss, mm, dd)							\
9673 do {									\
9674 	aprint_normal("%s%s", sep, ss);					\
9675 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
9676 	sep = ", ";							\
9677 } while (/*CONSTCOND*/0)
9678 
9679 	aprint_normal_dev(sc->sc_dev, "");
9680 
9681 	if (sc->sc_type == WM_T_I354) {
9682 		uint32_t status;
9683 
9684 		status = CSR_READ(sc, WMREG_STATUS);
9685 		if (((status & STATUS_2P5_SKU) != 0)
9686 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
9687 			ADD("2500baseKX-FDX", IFM_2500_SX | IFM_FDX,ANAR_X_FD);
9688 		} else
9689 			ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX,ANAR_X_FD);
9690 	} else if (sc->sc_type == WM_T_82545) {
9691 		/* Only 82545 is LX (XXX except SFP) */
9692 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
9693 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
9694 	} else {
9695 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
9696 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
9697 	}
9698 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
9699 	aprint_normal("\n");
9700 
9701 #undef ADD
9702 
9703 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
9704 }
9705 
9706 /*
9707  * wm_tbi_mediachange:	[ifmedia interface function]
9708  *
9709  *	Set hardware to newly-selected media on a 1000BASE-X device.
9710  */
9711 static int
9712 wm_tbi_mediachange(struct ifnet *ifp)
9713 {
9714 	struct wm_softc *sc = ifp->if_softc;
9715 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
9716 	uint32_t status;
9717 	int i;
9718 
9719 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
9720 		/* XXX need some work for >= 82571 and < 82575 */
9721 		if (sc->sc_type < WM_T_82575)
9722 			return 0;
9723 	}
9724 
9725 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
9726 	    || (sc->sc_type >= WM_T_82575))
9727 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
9728 
9729 	sc->sc_ctrl &= ~CTRL_LRST;
9730 	sc->sc_txcw = TXCW_ANE;
9731 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
9732 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
9733 	else if (ife->ifm_media & IFM_FDX)
9734 		sc->sc_txcw |= TXCW_FD;
9735 	else
9736 		sc->sc_txcw |= TXCW_HD;
9737 
9738 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
9739 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
9740 
9741 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
9742 		    device_xname(sc->sc_dev), sc->sc_txcw));
9743 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
9744 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9745 	CSR_WRITE_FLUSH(sc);
9746 	delay(1000);
9747 
9748 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
9749 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
9750 
9751 	/*
9752 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
9753 	 * optics detect a signal, 0 if they don't.
9754 	 */
9755 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
9756 		/* Have signal; wait for the link to come up. */
9757 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
9758 			delay(10000);
9759 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
9760 				break;
9761 		}
9762 
9763 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
9764 			    device_xname(sc->sc_dev),i));
9765 
9766 		status = CSR_READ(sc, WMREG_STATUS);
9767 		DPRINTF(WM_DEBUG_LINK,
9768 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
9769 			device_xname(sc->sc_dev),status, STATUS_LU));
9770 		if (status & STATUS_LU) {
9771 			/* Link is up. */
9772 			DPRINTF(WM_DEBUG_LINK,
9773 			    ("%s: LINK: set media -> link up %s\n",
9774 			    device_xname(sc->sc_dev),
9775 			    (status & STATUS_FD) ? "FDX" : "HDX"));
9776 
9777 			/*
9778 			 * NOTE: CTRL will update TFCE and RFCE automatically,
9779 			 * so we should update sc->sc_ctrl
9780 			 */
9781 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
9782 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
9783 			sc->sc_fcrtl &= ~FCRTL_XONE;
9784 			if (status & STATUS_FD)
9785 				sc->sc_tctl |=
9786 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
9787 			else
9788 				sc->sc_tctl |=
9789 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
9790 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
9791 				sc->sc_fcrtl |= FCRTL_XONE;
9792 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
9793 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
9794 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
9795 				      sc->sc_fcrtl);
9796 			sc->sc_tbi_linkup = 1;
9797 		} else {
9798 			if (i == WM_LINKUP_TIMEOUT)
9799 				wm_check_for_link(sc);
9800 			/* Link is down. */
9801 			DPRINTF(WM_DEBUG_LINK,
9802 			    ("%s: LINK: set media -> link down\n",
9803 			    device_xname(sc->sc_dev)));
9804 			sc->sc_tbi_linkup = 0;
9805 		}
9806 	} else {
9807 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
9808 		    device_xname(sc->sc_dev)));
9809 		sc->sc_tbi_linkup = 0;
9810 	}
9811 
9812 	wm_tbi_serdes_set_linkled(sc);
9813 
9814 	return 0;
9815 }
9816 
9817 /*
9818  * wm_tbi_mediastatus:	[ifmedia interface function]
9819  *
9820  *	Get the current interface media status on a 1000BASE-X device.
9821  */
9822 static void
9823 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
9824 {
9825 	struct wm_softc *sc = ifp->if_softc;
9826 	uint32_t ctrl, status;
9827 
9828 	ifmr->ifm_status = IFM_AVALID;
9829 	ifmr->ifm_active = IFM_ETHER;
9830 
9831 	status = CSR_READ(sc, WMREG_STATUS);
9832 	if ((status & STATUS_LU) == 0) {
9833 		ifmr->ifm_active |= IFM_NONE;
9834 		return;
9835 	}
9836 
9837 	ifmr->ifm_status |= IFM_ACTIVE;
9838 	/* Only 82545 is LX */
9839 	if (sc->sc_type == WM_T_82545)
9840 		ifmr->ifm_active |= IFM_1000_LX;
9841 	else
9842 		ifmr->ifm_active |= IFM_1000_SX;
9843 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
9844 		ifmr->ifm_active |= IFM_FDX;
9845 	else
9846 		ifmr->ifm_active |= IFM_HDX;
9847 	ctrl = CSR_READ(sc, WMREG_CTRL);
9848 	if (ctrl & CTRL_RFCE)
9849 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
9850 	if (ctrl & CTRL_TFCE)
9851 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
9852 }
9853 
9854 /* XXX TBI only */
9855 static int
9856 wm_check_for_link(struct wm_softc *sc)
9857 {
9858 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
9859 	uint32_t rxcw;
9860 	uint32_t ctrl;
9861 	uint32_t status;
9862 	uint32_t sig;
9863 
9864 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
9865 		/* XXX need some work for >= 82571 */
9866 		if (sc->sc_type >= WM_T_82571) {
9867 			sc->sc_tbi_linkup = 1;
9868 			return 0;
9869 		}
9870 	}
9871 
9872 	rxcw = CSR_READ(sc, WMREG_RXCW);
9873 	ctrl = CSR_READ(sc, WMREG_CTRL);
9874 	status = CSR_READ(sc, WMREG_STATUS);
9875 
9876 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
9877 
9878 	DPRINTF(WM_DEBUG_LINK,
9879 	    ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
9880 		device_xname(sc->sc_dev), __func__,
9881 		((ctrl & CTRL_SWDPIN(1)) == sig),
9882 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
9883 
9884 	/*
9885 	 * SWDPIN   LU RXCW
9886 	 *      0    0    0
9887 	 *      0    0    1	(should not happen)
9888 	 *      0    1    0	(should not happen)
9889 	 *      0    1    1	(should not happen)
9890 	 *      1    0    0	Disable autonego and force linkup
9891 	 *      1    0    1	got /C/ but not linkup yet
9892 	 *      1    1    0	(linkup)
9893 	 *      1    1    1	If IFM_AUTO, back to autonego
9894 	 *
9895 	 */
9896 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
9897 	    && ((status & STATUS_LU) == 0)
9898 	    && ((rxcw & RXCW_C) == 0)) {
9899 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
9900 			__func__));
9901 		sc->sc_tbi_linkup = 0;
9902 		/* Disable auto-negotiation in the TXCW register */
9903 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
9904 
9905 		/*
9906 		 * Force link-up and also force full-duplex.
9907 		 *
9908 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
9909 		 * so we should update sc->sc_ctrl
9910 		 */
9911 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
9912 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9913 	} else if (((status & STATUS_LU) != 0)
9914 	    && ((rxcw & RXCW_C) != 0)
9915 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
9916 		sc->sc_tbi_linkup = 1;
9917 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
9918 			__func__));
9919 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
9920 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
9921 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
9922 	    && ((rxcw & RXCW_C) != 0)) {
9923 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
9924 	} else {
9925 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
9926 			status));
9927 	}
9928 
9929 	return 0;
9930 }
9931 
9932 /*
9933  * wm_tbi_tick:
9934  *
9935  *	Check the link on TBI devices.
9936  *	This function acts as mii_tick().
9937  */
9938 static void
9939 wm_tbi_tick(struct wm_softc *sc)
9940 {
9941 	struct mii_data *mii = &sc->sc_mii;
9942 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
9943 	uint32_t status;
9944 
9945 	KASSERT(WM_CORE_LOCKED(sc));
9946 
9947 	status = CSR_READ(sc, WMREG_STATUS);
9948 
9949 	/* XXX is this needed? */
9950 	(void)CSR_READ(sc, WMREG_RXCW);
9951 	(void)CSR_READ(sc, WMREG_CTRL);
9952 
9953 	/* set link status */
9954 	if ((status & STATUS_LU) == 0) {
9955 		DPRINTF(WM_DEBUG_LINK,
9956 		    ("%s: LINK: checklink -> down\n",
9957 			device_xname(sc->sc_dev)));
9958 		sc->sc_tbi_linkup = 0;
9959 	} else if (sc->sc_tbi_linkup == 0) {
9960 		DPRINTF(WM_DEBUG_LINK,
9961 		    ("%s: LINK: checklink -> up %s\n",
9962 			device_xname(sc->sc_dev),
9963 			(status & STATUS_FD) ? "FDX" : "HDX"));
9964 		sc->sc_tbi_linkup = 1;
9965 		sc->sc_tbi_serdes_ticks = 0;
9966 	}
9967 
9968 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
9969 		goto setled;
9970 
9971 	if ((status & STATUS_LU) == 0) {
9972 		sc->sc_tbi_linkup = 0;
9973 		/* If the timer expired, retry autonegotiation */
9974 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
9975 		    && (++sc->sc_tbi_serdes_ticks
9976 			>= sc->sc_tbi_serdes_anegticks)) {
9977 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
9978 			sc->sc_tbi_serdes_ticks = 0;
9979 			/*
9980 			 * Reset the link, and let autonegotiation do
9981 			 * its thing
9982 			 */
9983 			sc->sc_ctrl |= CTRL_LRST;
9984 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9985 			CSR_WRITE_FLUSH(sc);
9986 			delay(1000);
9987 			sc->sc_ctrl &= ~CTRL_LRST;
9988 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9989 			CSR_WRITE_FLUSH(sc);
9990 			delay(1000);
9991 			CSR_WRITE(sc, WMREG_TXCW,
9992 			    sc->sc_txcw & ~TXCW_ANE);
9993 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
9994 		}
9995 	}
9996 
9997 setled:
9998 	wm_tbi_serdes_set_linkled(sc);
9999 }
10000 
10001 /* SERDES related */
10002 static void
10003 wm_serdes_power_up_link_82575(struct wm_softc *sc)
10004 {
10005 	uint32_t reg;
10006 
10007 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
10008 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
10009 		return;
10010 
10011 	reg = CSR_READ(sc, WMREG_PCS_CFG);
10012 	reg |= PCS_CFG_PCS_EN;
10013 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
10014 
10015 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
10016 	reg &= ~CTRL_EXT_SWDPIN(3);
10017 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
10018 	CSR_WRITE_FLUSH(sc);
10019 }
10020 
10021 static int
10022 wm_serdes_mediachange(struct ifnet *ifp)
10023 {
10024 	struct wm_softc *sc = ifp->if_softc;
10025 	bool pcs_autoneg = true; /* XXX */
10026 	uint32_t ctrl_ext, pcs_lctl, reg;
10027 
10028 	/* XXX Currently, this function is not called on 8257[12] */
10029 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
10030 	    || (sc->sc_type >= WM_T_82575))
10031 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
10032 
10033 	wm_serdes_power_up_link_82575(sc);
10034 
10035 	sc->sc_ctrl |= CTRL_SLU;
10036 
10037 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
10038 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
10039 
10040 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
10041 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
10042 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
10043 	case CTRL_EXT_LINK_MODE_SGMII:
10044 		pcs_autoneg = true;
10045 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
10046 		break;
10047 	case CTRL_EXT_LINK_MODE_1000KX:
10048 		pcs_autoneg = false;
10049 		/* FALLTHROUGH */
10050 	default:
10051 		if ((sc->sc_type == WM_T_82575)
10052 		    || (sc->sc_type == WM_T_82576)) {
10053 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
10054 				pcs_autoneg = false;
10055 		}
10056 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
10057 		    | CTRL_FRCFDX;
10058 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
10059 	}
10060 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10061 
10062 	if (pcs_autoneg) {
10063 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
10064 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
10065 
10066 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
10067 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
10068 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
10069 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
10070 	} else
10071 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
10072 
10073 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
10074 
10075 
10076 	return 0;
10077 }
10078 
10079 static void
10080 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
10081 {
10082 	struct wm_softc *sc = ifp->if_softc;
10083 	struct mii_data *mii = &sc->sc_mii;
10084 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
10085 	uint32_t pcs_adv, pcs_lpab, reg;
10086 
10087 	ifmr->ifm_status = IFM_AVALID;
10088 	ifmr->ifm_active = IFM_ETHER;
10089 
10090 	/* Check PCS */
10091 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
10092 	if ((reg & PCS_LSTS_LINKOK) == 0) {
10093 		ifmr->ifm_active |= IFM_NONE;
10094 		sc->sc_tbi_linkup = 0;
10095 		goto setled;
10096 	}
10097 
10098 	sc->sc_tbi_linkup = 1;
10099 	ifmr->ifm_status |= IFM_ACTIVE;
10100 	if (sc->sc_type == WM_T_I354) {
10101 		uint32_t status;
10102 
10103 		status = CSR_READ(sc, WMREG_STATUS);
10104 		if (((status & STATUS_2P5_SKU) != 0)
10105 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
10106 			ifmr->ifm_active |= IFM_2500_SX; /* XXX KX */
10107 		} else
10108 			ifmr->ifm_active |= IFM_1000_SX; /* XXX KX */
10109 	} else {
10110 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
10111 		case PCS_LSTS_SPEED_10:
10112 			ifmr->ifm_active |= IFM_10_T; /* XXX */
10113 			break;
10114 		case PCS_LSTS_SPEED_100:
10115 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
10116 			break;
10117 		case PCS_LSTS_SPEED_1000:
10118 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
10119 			break;
10120 		default:
10121 			device_printf(sc->sc_dev, "Unknown speed\n");
10122 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
10123 			break;
10124 		}
10125 	}
10126 	if ((reg & PCS_LSTS_FDX) != 0)
10127 		ifmr->ifm_active |= IFM_FDX;
10128 	else
10129 		ifmr->ifm_active |= IFM_HDX;
10130 	mii->mii_media_active &= ~IFM_ETH_FMASK;
10131 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
10132 		/* Check flow */
10133 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
10134 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
10135 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
10136 			goto setled;
10137 		}
10138 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
10139 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
10140 		DPRINTF(WM_DEBUG_LINK,
10141 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
10142 		if ((pcs_adv & TXCW_SYM_PAUSE)
10143 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
10144 			mii->mii_media_active |= IFM_FLOW
10145 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
10146 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
10147 		    && (pcs_adv & TXCW_ASYM_PAUSE)
10148 		    && (pcs_lpab & TXCW_SYM_PAUSE)
10149 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
10150 			mii->mii_media_active |= IFM_FLOW
10151 			    | IFM_ETH_TXPAUSE;
10152 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
10153 		    && (pcs_adv & TXCW_ASYM_PAUSE)
10154 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
10155 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
10156 			mii->mii_media_active |= IFM_FLOW
10157 			    | IFM_ETH_RXPAUSE;
10158 		}
10159 	}
10160 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
10161 	    | (mii->mii_media_active & IFM_ETH_FMASK);
10162 setled:
10163 	wm_tbi_serdes_set_linkled(sc);
10164 }
10165 
10166 /*
10167  * wm_serdes_tick:
10168  *
10169  *	Check the link on serdes devices.
10170  */
10171 static void
10172 wm_serdes_tick(struct wm_softc *sc)
10173 {
10174 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
10175 	struct mii_data *mii = &sc->sc_mii;
10176 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
10177 	uint32_t reg;
10178 
10179 	KASSERT(WM_CORE_LOCKED(sc));
10180 
10181 	mii->mii_media_status = IFM_AVALID;
10182 	mii->mii_media_active = IFM_ETHER;
10183 
10184 	/* Check PCS */
10185 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
10186 	if ((reg & PCS_LSTS_LINKOK) != 0) {
10187 		mii->mii_media_status |= IFM_ACTIVE;
10188 		sc->sc_tbi_linkup = 1;
10189 		sc->sc_tbi_serdes_ticks = 0;
10190 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
10191 		if ((reg & PCS_LSTS_FDX) != 0)
10192 			mii->mii_media_active |= IFM_FDX;
10193 		else
10194 			mii->mii_media_active |= IFM_HDX;
10195 	} else {
10196 		mii->mii_media_status |= IFM_NONE;
10197 		sc->sc_tbi_linkup = 0;
10198 		/* If the timer expired, retry autonegotiation */
10199 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
10200 		    && (++sc->sc_tbi_serdes_ticks
10201 			>= sc->sc_tbi_serdes_anegticks)) {
10202 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
10203 			sc->sc_tbi_serdes_ticks = 0;
10204 			/* XXX */
10205 			wm_serdes_mediachange(ifp);
10206 		}
10207 	}
10208 
10209 	wm_tbi_serdes_set_linkled(sc);
10210 }
10211 
10212 /* SFP related */
10213 
10214 static int
10215 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
10216 {
10217 	uint32_t i2ccmd;
10218 	int i;
10219 
10220 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
10221 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
10222 
10223 	/* Poll the ready bit */
10224 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
10225 		delay(50);
10226 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
10227 		if (i2ccmd & I2CCMD_READY)
10228 			break;
10229 	}
10230 	if ((i2ccmd & I2CCMD_READY) == 0)
10231 		return -1;
10232 	if ((i2ccmd & I2CCMD_ERROR) != 0)
10233 		return -1;
10234 
10235 	*data = i2ccmd & 0x00ff;
10236 
10237 	return 0;
10238 }
10239 
10240 static uint32_t
10241 wm_sfp_get_media_type(struct wm_softc *sc)
10242 {
10243 	uint32_t ctrl_ext;
10244 	uint8_t val = 0;
10245 	int timeout = 3;
10246 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
10247 	int rv = -1;
10248 
10249 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
10250 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
10251 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
10252 	CSR_WRITE_FLUSH(sc);
10253 
10254 	/* Read SFP module data */
10255 	while (timeout) {
10256 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
10257 		if (rv == 0)
10258 			break;
10259 		delay(100*1000); /* XXX too big */
10260 		timeout--;
10261 	}
10262 	if (rv != 0)
10263 		goto out;
10264 	switch (val) {
10265 	case SFF_SFP_ID_SFF:
10266 		aprint_normal_dev(sc->sc_dev,
10267 		    "Module/Connector soldered to board\n");
10268 		break;
10269 	case SFF_SFP_ID_SFP:
10270 		aprint_normal_dev(sc->sc_dev, "SFP\n");
10271 		break;
10272 	case SFF_SFP_ID_UNKNOWN:
10273 		goto out;
10274 	default:
10275 		break;
10276 	}
10277 
10278 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
10279 	if (rv != 0) {
10280 		goto out;
10281 	}
10282 
10283 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
10284 		mediatype = WM_MEDIATYPE_SERDES;
10285 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
10286 		sc->sc_flags |= WM_F_SGMII;
10287 		mediatype = WM_MEDIATYPE_COPPER;
10288 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
10289 		sc->sc_flags |= WM_F_SGMII;
10290 		mediatype = WM_MEDIATYPE_SERDES;
10291 	}
10292 
10293 out:
10294 	/* Restore I2C interface setting */
10295 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
10296 
10297 	return mediatype;
10298 }
10299 
10300 /*
10301  * NVM related.
10302  * Microwire, SPI (w/wo EERD) and Flash.
10303  */
10304 
10305 /* Both spi and uwire */
10306 
10307 /*
10308  * wm_eeprom_sendbits:
10309  *
10310  *	Send a series of bits to the EEPROM.
10311  */
10312 static void
10313 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
10314 {
10315 	uint32_t reg;
10316 	int x;
10317 
10318 	reg = CSR_READ(sc, WMREG_EECD);
10319 
10320 	for (x = nbits; x > 0; x--) {
10321 		if (bits & (1U << (x - 1)))
10322 			reg |= EECD_DI;
10323 		else
10324 			reg &= ~EECD_DI;
10325 		CSR_WRITE(sc, WMREG_EECD, reg);
10326 		CSR_WRITE_FLUSH(sc);
10327 		delay(2);
10328 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
10329 		CSR_WRITE_FLUSH(sc);
10330 		delay(2);
10331 		CSR_WRITE(sc, WMREG_EECD, reg);
10332 		CSR_WRITE_FLUSH(sc);
10333 		delay(2);
10334 	}
10335 }
10336 
10337 /*
10338  * wm_eeprom_recvbits:
10339  *
10340  *	Receive a series of bits from the EEPROM.
10341  */
10342 static void
10343 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
10344 {
10345 	uint32_t reg, val;
10346 	int x;
10347 
10348 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
10349 
10350 	val = 0;
10351 	for (x = nbits; x > 0; x--) {
10352 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
10353 		CSR_WRITE_FLUSH(sc);
10354 		delay(2);
10355 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
10356 			val |= (1U << (x - 1));
10357 		CSR_WRITE(sc, WMREG_EECD, reg);
10358 		CSR_WRITE_FLUSH(sc);
10359 		delay(2);
10360 	}
10361 	*valp = val;
10362 }
10363 
10364 /* Microwire */
10365 
10366 /*
10367  * wm_nvm_read_uwire:
10368  *
10369  *	Read a word from the EEPROM using the MicroWire protocol.
10370  */
10371 static int
10372 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
10373 {
10374 	uint32_t reg, val;
10375 	int i;
10376 
10377 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
10378 		device_xname(sc->sc_dev), __func__));
10379 
10380 	for (i = 0; i < wordcnt; i++) {
10381 		/* Clear SK and DI. */
10382 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
10383 		CSR_WRITE(sc, WMREG_EECD, reg);
10384 
10385 		/*
10386 		 * XXX: workaround for a bug in qemu-0.12.x and prior
10387 		 * and Xen.
10388 		 *
10389 		 * We use this workaround only for 82540 because qemu's
10390 		 * e1000 act as 82540.
10391 		 */
10392 		if (sc->sc_type == WM_T_82540) {
10393 			reg |= EECD_SK;
10394 			CSR_WRITE(sc, WMREG_EECD, reg);
10395 			reg &= ~EECD_SK;
10396 			CSR_WRITE(sc, WMREG_EECD, reg);
10397 			CSR_WRITE_FLUSH(sc);
10398 			delay(2);
10399 		}
10400 		/* XXX: end of workaround */
10401 
10402 		/* Set CHIP SELECT. */
10403 		reg |= EECD_CS;
10404 		CSR_WRITE(sc, WMREG_EECD, reg);
10405 		CSR_WRITE_FLUSH(sc);
10406 		delay(2);
10407 
10408 		/* Shift in the READ command. */
10409 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
10410 
10411 		/* Shift in address. */
10412 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
10413 
10414 		/* Shift out the data. */
10415 		wm_eeprom_recvbits(sc, &val, 16);
10416 		data[i] = val & 0xffff;
10417 
10418 		/* Clear CHIP SELECT. */
10419 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
10420 		CSR_WRITE(sc, WMREG_EECD, reg);
10421 		CSR_WRITE_FLUSH(sc);
10422 		delay(2);
10423 	}
10424 
10425 	return 0;
10426 }
10427 
10428 /* SPI */
10429 
10430 /*
10431  * Set SPI and FLASH related information from the EECD register.
10432  * For 82541 and 82547, the word size is taken from EEPROM.
10433  */
10434 static int
10435 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
10436 {
10437 	int size;
10438 	uint32_t reg;
10439 	uint16_t data;
10440 
10441 	reg = CSR_READ(sc, WMREG_EECD);
10442 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
10443 
10444 	/* Read the size of NVM from EECD by default */
10445 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
10446 	switch (sc->sc_type) {
10447 	case WM_T_82541:
10448 	case WM_T_82541_2:
10449 	case WM_T_82547:
10450 	case WM_T_82547_2:
10451 		/* Set dummy value to access EEPROM */
10452 		sc->sc_nvm_wordsize = 64;
10453 		wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
10454 		reg = data;
10455 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
10456 		if (size == 0)
10457 			size = 6; /* 64 word size */
10458 		else
10459 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
10460 		break;
10461 	case WM_T_80003:
10462 	case WM_T_82571:
10463 	case WM_T_82572:
10464 	case WM_T_82573: /* SPI case */
10465 	case WM_T_82574: /* SPI case */
10466 	case WM_T_82583: /* SPI case */
10467 		size += NVM_WORD_SIZE_BASE_SHIFT;
10468 		if (size > 14)
10469 			size = 14;
10470 		break;
10471 	case WM_T_82575:
10472 	case WM_T_82576:
10473 	case WM_T_82580:
10474 	case WM_T_I350:
10475 	case WM_T_I354:
10476 	case WM_T_I210:
10477 	case WM_T_I211:
10478 		size += NVM_WORD_SIZE_BASE_SHIFT;
10479 		if (size > 15)
10480 			size = 15;
10481 		break;
10482 	default:
10483 		aprint_error_dev(sc->sc_dev,
10484 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
10485 		return -1;
10486 		break;
10487 	}
10488 
10489 	sc->sc_nvm_wordsize = 1 << size;
10490 
10491 	return 0;
10492 }
10493 
10494 /*
10495  * wm_nvm_ready_spi:
10496  *
10497  *	Wait for a SPI EEPROM to be ready for commands.
10498  */
10499 static int
10500 wm_nvm_ready_spi(struct wm_softc *sc)
10501 {
10502 	uint32_t val;
10503 	int usec;
10504 
10505 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
10506 		device_xname(sc->sc_dev), __func__));
10507 
10508 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
10509 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
10510 		wm_eeprom_recvbits(sc, &val, 8);
10511 		if ((val & SPI_SR_RDY) == 0)
10512 			break;
10513 	}
10514 	if (usec >= SPI_MAX_RETRIES) {
10515 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
10516 		return 1;
10517 	}
10518 	return 0;
10519 }
10520 
10521 /*
10522  * wm_nvm_read_spi:
10523  *
10524  *	Read a work from the EEPROM using the SPI protocol.
10525  */
10526 static int
10527 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
10528 {
10529 	uint32_t reg, val;
10530 	int i;
10531 	uint8_t opc;
10532 
10533 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
10534 		device_xname(sc->sc_dev), __func__));
10535 
10536 	/* Clear SK and CS. */
10537 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
10538 	CSR_WRITE(sc, WMREG_EECD, reg);
10539 	CSR_WRITE_FLUSH(sc);
10540 	delay(2);
10541 
10542 	if (wm_nvm_ready_spi(sc))
10543 		return 1;
10544 
10545 	/* Toggle CS to flush commands. */
10546 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
10547 	CSR_WRITE_FLUSH(sc);
10548 	delay(2);
10549 	CSR_WRITE(sc, WMREG_EECD, reg);
10550 	CSR_WRITE_FLUSH(sc);
10551 	delay(2);
10552 
10553 	opc = SPI_OPC_READ;
10554 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
10555 		opc |= SPI_OPC_A8;
10556 
10557 	wm_eeprom_sendbits(sc, opc, 8);
10558 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
10559 
10560 	for (i = 0; i < wordcnt; i++) {
10561 		wm_eeprom_recvbits(sc, &val, 16);
10562 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
10563 	}
10564 
10565 	/* Raise CS and clear SK. */
10566 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
10567 	CSR_WRITE(sc, WMREG_EECD, reg);
10568 	CSR_WRITE_FLUSH(sc);
10569 	delay(2);
10570 
10571 	return 0;
10572 }
10573 
10574 /* Using with EERD */
10575 
10576 static int
10577 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
10578 {
10579 	uint32_t attempts = 100000;
10580 	uint32_t i, reg = 0;
10581 	int32_t done = -1;
10582 
10583 	for (i = 0; i < attempts; i++) {
10584 		reg = CSR_READ(sc, rw);
10585 
10586 		if (reg & EERD_DONE) {
10587 			done = 0;
10588 			break;
10589 		}
10590 		delay(5);
10591 	}
10592 
10593 	return done;
10594 }
10595 
10596 static int
10597 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
10598     uint16_t *data)
10599 {
10600 	int i, eerd = 0;
10601 	int error = 0;
10602 
10603 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
10604 		device_xname(sc->sc_dev), __func__));
10605 
10606 	for (i = 0; i < wordcnt; i++) {
10607 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
10608 
10609 		CSR_WRITE(sc, WMREG_EERD, eerd);
10610 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
10611 		if (error != 0)
10612 			break;
10613 
10614 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
10615 	}
10616 
10617 	return error;
10618 }
10619 
10620 /* Flash */
10621 
10622 static int
10623 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
10624 {
10625 	uint32_t eecd;
10626 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
10627 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
10628 	uint8_t sig_byte = 0;
10629 
10630 	switch (sc->sc_type) {
10631 	case WM_T_PCH_SPT:
10632 		/*
10633 		 * In SPT, read from the CTRL_EXT reg instead of accessing the
10634 		 * sector valid bits from the NVM.
10635 		 */
10636 		*bank = CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_NVMVS;
10637 		if ((*bank == 0) || (*bank == 1)) {
10638 			aprint_error_dev(sc->sc_dev,
10639 			    "%s: no valid NVM bank present (%u)\n", __func__,
10640 				*bank);
10641 			return -1;
10642 		} else {
10643 			*bank = *bank - 2;
10644 			return 0;
10645 		}
10646 	case WM_T_ICH8:
10647 	case WM_T_ICH9:
10648 		eecd = CSR_READ(sc, WMREG_EECD);
10649 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
10650 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
10651 			return 0;
10652 		}
10653 		/* FALLTHROUGH */
10654 	default:
10655 		/* Default to 0 */
10656 		*bank = 0;
10657 
10658 		/* Check bank 0 */
10659 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
10660 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
10661 			*bank = 0;
10662 			return 0;
10663 		}
10664 
10665 		/* Check bank 1 */
10666 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
10667 		    &sig_byte);
10668 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
10669 			*bank = 1;
10670 			return 0;
10671 		}
10672 	}
10673 
10674 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
10675 		device_xname(sc->sc_dev)));
10676 	return -1;
10677 }
10678 
10679 /******************************************************************************
10680  * This function does initial flash setup so that a new read/write/erase cycle
10681  * can be started.
10682  *
10683  * sc - The pointer to the hw structure
10684  ****************************************************************************/
10685 static int32_t
10686 wm_ich8_cycle_init(struct wm_softc *sc)
10687 {
10688 	uint16_t hsfsts;
10689 	int32_t error = 1;
10690 	int32_t i     = 0;
10691 
10692 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
10693 
10694 	/* May be check the Flash Des Valid bit in Hw status */
10695 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
10696 		return error;
10697 	}
10698 
10699 	/* Clear FCERR in Hw status by writing 1 */
10700 	/* Clear DAEL in Hw status by writing a 1 */
10701 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
10702 
10703 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
10704 
10705 	/*
10706 	 * Either we should have a hardware SPI cycle in progress bit to check
10707 	 * against, in order to start a new cycle or FDONE bit should be
10708 	 * changed in the hardware so that it is 1 after harware reset, which
10709 	 * can then be used as an indication whether a cycle is in progress or
10710 	 * has been completed .. we should also have some software semaphore
10711 	 * mechanism to guard FDONE or the cycle in progress bit so that two
10712 	 * threads access to those bits can be sequentiallized or a way so that
10713 	 * 2 threads dont start the cycle at the same time
10714 	 */
10715 
10716 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
10717 		/*
10718 		 * There is no cycle running at present, so we can start a
10719 		 * cycle
10720 		 */
10721 
10722 		/* Begin by setting Flash Cycle Done. */
10723 		hsfsts |= HSFSTS_DONE;
10724 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
10725 		error = 0;
10726 	} else {
10727 		/*
10728 		 * otherwise poll for sometime so the current cycle has a
10729 		 * chance to end before giving up.
10730 		 */
10731 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
10732 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
10733 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
10734 				error = 0;
10735 				break;
10736 			}
10737 			delay(1);
10738 		}
10739 		if (error == 0) {
10740 			/*
10741 			 * Successful in waiting for previous cycle to timeout,
10742 			 * now set the Flash Cycle Done.
10743 			 */
10744 			hsfsts |= HSFSTS_DONE;
10745 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
10746 		}
10747 	}
10748 	return error;
10749 }
10750 
10751 /******************************************************************************
10752  * This function starts a flash cycle and waits for its completion
10753  *
10754  * sc - The pointer to the hw structure
10755  ****************************************************************************/
10756 static int32_t
10757 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
10758 {
10759 	uint16_t hsflctl;
10760 	uint16_t hsfsts;
10761 	int32_t error = 1;
10762 	uint32_t i = 0;
10763 
10764 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
10765 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
10766 	hsflctl |= HSFCTL_GO;
10767 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
10768 
10769 	/* Wait till FDONE bit is set to 1 */
10770 	do {
10771 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
10772 		if (hsfsts & HSFSTS_DONE)
10773 			break;
10774 		delay(1);
10775 		i++;
10776 	} while (i < timeout);
10777 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
10778 		error = 0;
10779 
10780 	return error;
10781 }
10782 
10783 /******************************************************************************
10784  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
10785  *
10786  * sc - The pointer to the hw structure
10787  * index - The index of the byte or word to read.
10788  * size - Size of data to read, 1=byte 2=word, 4=dword
10789  * data - Pointer to the word to store the value read.
10790  *****************************************************************************/
10791 static int32_t
10792 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
10793     uint32_t size, uint32_t *data)
10794 {
10795 	uint16_t hsfsts;
10796 	uint16_t hsflctl;
10797 	uint32_t flash_linear_address;
10798 	uint32_t flash_data = 0;
10799 	int32_t error = 1;
10800 	int32_t count = 0;
10801 
10802 	if (size < 1  || size > 4 || data == 0x0 ||
10803 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
10804 		return error;
10805 
10806 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
10807 	    sc->sc_ich8_flash_base;
10808 
10809 	do {
10810 		delay(1);
10811 		/* Steps */
10812 		error = wm_ich8_cycle_init(sc);
10813 		if (error)
10814 			break;
10815 
10816 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
10817 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
10818 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
10819 		    & HSFCTL_BCOUNT_MASK;
10820 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
10821 		if (sc->sc_type == WM_T_PCH_SPT) {
10822 			/*
10823 			 * In SPT, This register is in Lan memory space, not
10824 			 * flash. Therefore, only 32 bit access is supported.
10825 			 */
10826 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFCTL,
10827 			    (uint32_t)hsflctl);
10828 		} else
10829 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
10830 
10831 		/*
10832 		 * Write the last 24 bits of index into Flash Linear address
10833 		 * field in Flash Address
10834 		 */
10835 		/* TODO: TBD maybe check the index against the size of flash */
10836 
10837 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
10838 
10839 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
10840 
10841 		/*
10842 		 * Check if FCERR is set to 1, if set to 1, clear it and try
10843 		 * the whole sequence a few more times, else read in (shift in)
10844 		 * the Flash Data0, the order is least significant byte first
10845 		 * msb to lsb
10846 		 */
10847 		if (error == 0) {
10848 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
10849 			if (size == 1)
10850 				*data = (uint8_t)(flash_data & 0x000000FF);
10851 			else if (size == 2)
10852 				*data = (uint16_t)(flash_data & 0x0000FFFF);
10853 			else if (size == 4)
10854 				*data = (uint32_t)flash_data;
10855 			break;
10856 		} else {
10857 			/*
10858 			 * If we've gotten here, then things are probably
10859 			 * completely hosed, but if the error condition is
10860 			 * detected, it won't hurt to give it another try...
10861 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
10862 			 */
10863 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
10864 			if (hsfsts & HSFSTS_ERR) {
10865 				/* Repeat for some time before giving up. */
10866 				continue;
10867 			} else if ((hsfsts & HSFSTS_DONE) == 0)
10868 				break;
10869 		}
10870 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
10871 
10872 	return error;
10873 }
10874 
10875 /******************************************************************************
10876  * Reads a single byte from the NVM using the ICH8 flash access registers.
10877  *
10878  * sc - pointer to wm_hw structure
10879  * index - The index of the byte to read.
10880  * data - Pointer to a byte to store the value read.
10881  *****************************************************************************/
10882 static int32_t
10883 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
10884 {
10885 	int32_t status;
10886 	uint32_t word = 0;
10887 
10888 	status = wm_read_ich8_data(sc, index, 1, &word);
10889 	if (status == 0)
10890 		*data = (uint8_t)word;
10891 	else
10892 		*data = 0;
10893 
10894 	return status;
10895 }
10896 
10897 /******************************************************************************
10898  * Reads a word from the NVM using the ICH8 flash access registers.
10899  *
10900  * sc - pointer to wm_hw structure
10901  * index - The starting byte index of the word to read.
10902  * data - Pointer to a word to store the value read.
10903  *****************************************************************************/
10904 static int32_t
10905 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
10906 {
10907 	int32_t status;
10908 	uint32_t word = 0;
10909 
10910 	status = wm_read_ich8_data(sc, index, 2, &word);
10911 	if (status == 0)
10912 		*data = (uint16_t)word;
10913 	else
10914 		*data = 0;
10915 
10916 	return status;
10917 }
10918 
10919 /******************************************************************************
10920  * Reads a dword from the NVM using the ICH8 flash access registers.
10921  *
10922  * sc - pointer to wm_hw structure
10923  * index - The starting byte index of the word to read.
10924  * data - Pointer to a word to store the value read.
10925  *****************************************************************************/
10926 static int32_t
10927 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
10928 {
10929 	int32_t status;
10930 
10931 	status = wm_read_ich8_data(sc, index, 4, data);
10932 	return status;
10933 }
10934 
10935 /******************************************************************************
10936  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
10937  * register.
10938  *
10939  * sc - Struct containing variables accessed by shared code
10940  * offset - offset of word in the EEPROM to read
10941  * data - word read from the EEPROM
10942  * words - number of words to read
10943  *****************************************************************************/
10944 static int
10945 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
10946 {
10947 	int32_t  error = 0;
10948 	uint32_t flash_bank = 0;
10949 	uint32_t act_offset = 0;
10950 	uint32_t bank_offset = 0;
10951 	uint16_t word = 0;
10952 	uint16_t i = 0;
10953 
10954 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
10955 		device_xname(sc->sc_dev), __func__));
10956 
10957 	/*
10958 	 * We need to know which is the valid flash bank.  In the event
10959 	 * that we didn't allocate eeprom_shadow_ram, we may not be
10960 	 * managing flash_bank.  So it cannot be trusted and needs
10961 	 * to be updated with each read.
10962 	 */
10963 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
10964 	if (error) {
10965 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
10966 			device_xname(sc->sc_dev)));
10967 		flash_bank = 0;
10968 	}
10969 
10970 	/*
10971 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
10972 	 * size
10973 	 */
10974 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
10975 
10976 	error = wm_get_swfwhw_semaphore(sc);
10977 	if (error) {
10978 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
10979 		    __func__);
10980 		return error;
10981 	}
10982 
10983 	for (i = 0; i < words; i++) {
10984 		/* The NVM part needs a byte offset, hence * 2 */
10985 		act_offset = bank_offset + ((offset + i) * 2);
10986 		error = wm_read_ich8_word(sc, act_offset, &word);
10987 		if (error) {
10988 			aprint_error_dev(sc->sc_dev,
10989 			    "%s: failed to read NVM\n", __func__);
10990 			break;
10991 		}
10992 		data[i] = word;
10993 	}
10994 
10995 	wm_put_swfwhw_semaphore(sc);
10996 	return error;
10997 }
10998 
10999 /******************************************************************************
11000  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
11001  * register.
11002  *
11003  * sc - Struct containing variables accessed by shared code
11004  * offset - offset of word in the EEPROM to read
11005  * data - word read from the EEPROM
11006  * words - number of words to read
11007  *****************************************************************************/
11008 static int
11009 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
11010 {
11011 	int32_t  error = 0;
11012 	uint32_t flash_bank = 0;
11013 	uint32_t act_offset = 0;
11014 	uint32_t bank_offset = 0;
11015 	uint32_t dword = 0;
11016 	uint16_t i = 0;
11017 
11018 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
11019 		device_xname(sc->sc_dev), __func__));
11020 
11021 	/*
11022 	 * We need to know which is the valid flash bank.  In the event
11023 	 * that we didn't allocate eeprom_shadow_ram, we may not be
11024 	 * managing flash_bank.  So it cannot be trusted and needs
11025 	 * to be updated with each read.
11026 	 */
11027 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
11028 	if (error) {
11029 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
11030 			device_xname(sc->sc_dev)));
11031 		flash_bank = 0;
11032 	}
11033 
11034 	/*
11035 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
11036 	 * size
11037 	 */
11038 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
11039 
11040 	error = wm_get_swfwhw_semaphore(sc);
11041 	if (error) {
11042 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
11043 		    __func__);
11044 		return error;
11045 	}
11046 
11047 	for (i = 0; i < words; i++) {
11048 		/* The NVM part needs a byte offset, hence * 2 */
11049 		act_offset = bank_offset + ((offset + i) * 2);
11050 		/* but we must read dword aligned, so mask ... */
11051 		error = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
11052 		if (error) {
11053 			aprint_error_dev(sc->sc_dev,
11054 			    "%s: failed to read NVM\n", __func__);
11055 			break;
11056 		}
11057 		/* ... and pick out low or high word */
11058 		if ((act_offset & 0x2) == 0)
11059 			data[i] = (uint16_t)(dword & 0xFFFF);
11060 		else
11061 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
11062 	}
11063 
11064 	wm_put_swfwhw_semaphore(sc);
11065 	return error;
11066 }
11067 
11068 /* iNVM */
11069 
11070 static int
11071 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
11072 {
11073 	int32_t  rv = 0;
11074 	uint32_t invm_dword;
11075 	uint16_t i;
11076 	uint8_t record_type, word_address;
11077 
11078 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
11079 		device_xname(sc->sc_dev), __func__));
11080 
11081 	for (i = 0; i < INVM_SIZE; i++) {
11082 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
11083 		/* Get record type */
11084 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
11085 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
11086 			break;
11087 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
11088 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
11089 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
11090 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
11091 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
11092 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
11093 			if (word_address == address) {
11094 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
11095 				rv = 0;
11096 				break;
11097 			}
11098 		}
11099 	}
11100 
11101 	return rv;
11102 }
11103 
11104 static int
11105 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
11106 {
11107 	int rv = 0;
11108 	int i;
11109 
11110 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
11111 		device_xname(sc->sc_dev), __func__));
11112 
11113 	for (i = 0; i < words; i++) {
11114 		switch (offset + i) {
11115 		case NVM_OFF_MACADDR:
11116 		case NVM_OFF_MACADDR1:
11117 		case NVM_OFF_MACADDR2:
11118 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
11119 			if (rv != 0) {
11120 				data[i] = 0xffff;
11121 				rv = -1;
11122 			}
11123 			break;
11124 		case NVM_OFF_CFG2:
11125 			rv = wm_nvm_read_word_invm(sc, offset, data);
11126 			if (rv != 0) {
11127 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
11128 				rv = 0;
11129 			}
11130 			break;
11131 		case NVM_OFF_CFG4:
11132 			rv = wm_nvm_read_word_invm(sc, offset, data);
11133 			if (rv != 0) {
11134 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
11135 				rv = 0;
11136 			}
11137 			break;
11138 		case NVM_OFF_LED_1_CFG:
11139 			rv = wm_nvm_read_word_invm(sc, offset, data);
11140 			if (rv != 0) {
11141 				*data = NVM_LED_1_CFG_DEFAULT_I211;
11142 				rv = 0;
11143 			}
11144 			break;
11145 		case NVM_OFF_LED_0_2_CFG:
11146 			rv = wm_nvm_read_word_invm(sc, offset, data);
11147 			if (rv != 0) {
11148 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
11149 				rv = 0;
11150 			}
11151 			break;
11152 		case NVM_OFF_ID_LED_SETTINGS:
11153 			rv = wm_nvm_read_word_invm(sc, offset, data);
11154 			if (rv != 0) {
11155 				*data = ID_LED_RESERVED_FFFF;
11156 				rv = 0;
11157 			}
11158 			break;
11159 		default:
11160 			DPRINTF(WM_DEBUG_NVM,
11161 			    ("NVM word 0x%02x is not mapped.\n", offset));
11162 			*data = NVM_RESERVED_WORD;
11163 			break;
11164 		}
11165 	}
11166 
11167 	return rv;
11168 }
11169 
11170 /* Lock, detecting NVM type, validate checksum, version and read */
11171 
11172 /*
11173  * wm_nvm_acquire:
11174  *
11175  *	Perform the EEPROM handshake required on some chips.
11176  */
11177 static int
11178 wm_nvm_acquire(struct wm_softc *sc)
11179 {
11180 	uint32_t reg;
11181 	int x;
11182 	int ret = 0;
11183 
11184 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
11185 		device_xname(sc->sc_dev), __func__));
11186 
11187 	if (sc->sc_type >= WM_T_ICH8) {
11188 		ret = wm_get_nvm_ich8lan(sc);
11189 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
11190 		ret = wm_get_swfwhw_semaphore(sc);
11191 	} else if (sc->sc_flags & WM_F_LOCK_SWFW) {
11192 		/* This will also do wm_get_swsm_semaphore() if needed */
11193 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
11194 	} else if (sc->sc_flags & WM_F_LOCK_SWSM) {
11195 		ret = wm_get_swsm_semaphore(sc);
11196 	}
11197 
11198 	if (ret) {
11199 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
11200 			__func__);
11201 		return 1;
11202 	}
11203 
11204 	if (sc->sc_flags & WM_F_LOCK_EECD) {
11205 		reg = CSR_READ(sc, WMREG_EECD);
11206 
11207 		/* Request EEPROM access. */
11208 		reg |= EECD_EE_REQ;
11209 		CSR_WRITE(sc, WMREG_EECD, reg);
11210 
11211 		/* ..and wait for it to be granted. */
11212 		for (x = 0; x < 1000; x++) {
11213 			reg = CSR_READ(sc, WMREG_EECD);
11214 			if (reg & EECD_EE_GNT)
11215 				break;
11216 			delay(5);
11217 		}
11218 		if ((reg & EECD_EE_GNT) == 0) {
11219 			aprint_error_dev(sc->sc_dev,
11220 			    "could not acquire EEPROM GNT\n");
11221 			reg &= ~EECD_EE_REQ;
11222 			CSR_WRITE(sc, WMREG_EECD, reg);
11223 			if (sc->sc_flags & WM_F_LOCK_EXTCNF)
11224 				wm_put_swfwhw_semaphore(sc);
11225 			if (sc->sc_flags & WM_F_LOCK_SWFW)
11226 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
11227 			else if (sc->sc_flags & WM_F_LOCK_SWSM)
11228 				wm_put_swsm_semaphore(sc);
11229 			return 1;
11230 		}
11231 	}
11232 
11233 	return 0;
11234 }
11235 
11236 /*
11237  * wm_nvm_release:
11238  *
11239  *	Release the EEPROM mutex.
11240  */
11241 static void
11242 wm_nvm_release(struct wm_softc *sc)
11243 {
11244 	uint32_t reg;
11245 
11246 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
11247 		device_xname(sc->sc_dev), __func__));
11248 
11249 	if (sc->sc_flags & WM_F_LOCK_EECD) {
11250 		reg = CSR_READ(sc, WMREG_EECD);
11251 		reg &= ~EECD_EE_REQ;
11252 		CSR_WRITE(sc, WMREG_EECD, reg);
11253 	}
11254 
11255 	if (sc->sc_type >= WM_T_ICH8) {
11256 		wm_put_nvm_ich8lan(sc);
11257 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
11258 		wm_put_swfwhw_semaphore(sc);
11259 	if (sc->sc_flags & WM_F_LOCK_SWFW)
11260 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
11261 	else if (sc->sc_flags & WM_F_LOCK_SWSM)
11262 		wm_put_swsm_semaphore(sc);
11263 }
11264 
11265 static int
11266 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
11267 {
11268 	uint32_t eecd = 0;
11269 
11270 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
11271 	    || sc->sc_type == WM_T_82583) {
11272 		eecd = CSR_READ(sc, WMREG_EECD);
11273 
11274 		/* Isolate bits 15 & 16 */
11275 		eecd = ((eecd >> 15) & 0x03);
11276 
11277 		/* If both bits are set, device is Flash type */
11278 		if (eecd == 0x03)
11279 			return 0;
11280 	}
11281 	return 1;
11282 }
11283 
11284 static int
11285 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
11286 {
11287 	uint32_t eec;
11288 
11289 	eec = CSR_READ(sc, WMREG_EEC);
11290 	if ((eec & EEC_FLASH_DETECTED) != 0)
11291 		return 1;
11292 
11293 	return 0;
11294 }
11295 
11296 /*
11297  * wm_nvm_validate_checksum
11298  *
11299  * The checksum is defined as the sum of the first 64 (16 bit) words.
11300  */
11301 static int
11302 wm_nvm_validate_checksum(struct wm_softc *sc)
11303 {
11304 	uint16_t checksum;
11305 	uint16_t eeprom_data;
11306 #ifdef WM_DEBUG
11307 	uint16_t csum_wordaddr, valid_checksum;
11308 #endif
11309 	int i;
11310 
11311 	checksum = 0;
11312 
11313 	/* Don't check for I211 */
11314 	if (sc->sc_type == WM_T_I211)
11315 		return 0;
11316 
11317 #ifdef WM_DEBUG
11318 	if (sc->sc_type == WM_T_PCH_LPT) {
11319 		csum_wordaddr = NVM_OFF_COMPAT;
11320 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
11321 	} else {
11322 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
11323 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
11324 	}
11325 
11326 	/* Dump EEPROM image for debug */
11327 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
11328 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
11329 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
11330 		/* XXX PCH_SPT? */
11331 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
11332 		if ((eeprom_data & valid_checksum) == 0) {
11333 			DPRINTF(WM_DEBUG_NVM,
11334 			    ("%s: NVM need to be updated (%04x != %04x)\n",
11335 				device_xname(sc->sc_dev), eeprom_data,
11336 				    valid_checksum));
11337 		}
11338 	}
11339 
11340 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
11341 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
11342 		for (i = 0; i < NVM_SIZE; i++) {
11343 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
11344 				printf("XXXX ");
11345 			else
11346 				printf("%04hx ", eeprom_data);
11347 			if (i % 8 == 7)
11348 				printf("\n");
11349 		}
11350 	}
11351 
11352 #endif /* WM_DEBUG */
11353 
11354 	for (i = 0; i < NVM_SIZE; i++) {
11355 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
11356 			return 1;
11357 		checksum += eeprom_data;
11358 	}
11359 
11360 	if (checksum != (uint16_t) NVM_CHECKSUM) {
11361 #ifdef WM_DEBUG
11362 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
11363 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
11364 #endif
11365 	}
11366 
11367 	return 0;
11368 }
11369 
11370 static void
11371 wm_nvm_version_invm(struct wm_softc *sc)
11372 {
11373 	uint32_t dword;
11374 
11375 	/*
11376 	 * Linux's code to decode version is very strange, so we don't
11377 	 * obey that algorithm and just use word 61 as the document.
11378 	 * Perhaps it's not perfect though...
11379 	 *
11380 	 * Example:
11381 	 *
11382 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
11383 	 */
11384 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
11385 	dword = __SHIFTOUT(dword, INVM_VER_1);
11386 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
11387 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
11388 }
11389 
11390 static void
11391 wm_nvm_version(struct wm_softc *sc)
11392 {
11393 	uint16_t major, minor, build, patch;
11394 	uint16_t uid0, uid1;
11395 	uint16_t nvm_data;
11396 	uint16_t off;
11397 	bool check_version = false;
11398 	bool check_optionrom = false;
11399 	bool have_build = false;
11400 
11401 	/*
11402 	 * Version format:
11403 	 *
11404 	 * XYYZ
11405 	 * X0YZ
11406 	 * X0YY
11407 	 *
11408 	 * Example:
11409 	 *
11410 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
11411 	 *	82571	0x50a6	5.10.6?
11412 	 *	82572	0x506a	5.6.10?
11413 	 *	82572EI	0x5069	5.6.9?
11414 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
11415 	 *		0x2013	2.1.3?
11416 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
11417 	 */
11418 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1);
11419 	switch (sc->sc_type) {
11420 	case WM_T_82571:
11421 	case WM_T_82572:
11422 	case WM_T_82574:
11423 	case WM_T_82583:
11424 		check_version = true;
11425 		check_optionrom = true;
11426 		have_build = true;
11427 		break;
11428 	case WM_T_82575:
11429 	case WM_T_82576:
11430 	case WM_T_82580:
11431 		if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
11432 			check_version = true;
11433 		break;
11434 	case WM_T_I211:
11435 		wm_nvm_version_invm(sc);
11436 		goto printver;
11437 	case WM_T_I210:
11438 		if (!wm_nvm_get_flash_presence_i210(sc)) {
11439 			wm_nvm_version_invm(sc);
11440 			goto printver;
11441 		}
11442 		/* FALLTHROUGH */
11443 	case WM_T_I350:
11444 	case WM_T_I354:
11445 		check_version = true;
11446 		check_optionrom = true;
11447 		break;
11448 	default:
11449 		return;
11450 	}
11451 	if (check_version) {
11452 		wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data);
11453 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
11454 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
11455 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
11456 			build = nvm_data & NVM_BUILD_MASK;
11457 			have_build = true;
11458 		} else
11459 			minor = nvm_data & 0x00ff;
11460 
11461 		/* Decimal */
11462 		minor = (minor / 16) * 10 + (minor % 16);
11463 		sc->sc_nvm_ver_major = major;
11464 		sc->sc_nvm_ver_minor = minor;
11465 
11466 printver:
11467 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
11468 		    sc->sc_nvm_ver_minor);
11469 		if (have_build) {
11470 			sc->sc_nvm_ver_build = build;
11471 			aprint_verbose(".%d", build);
11472 		}
11473 	}
11474 	if (check_optionrom) {
11475 		wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off);
11476 		/* Option ROM Version */
11477 		if ((off != 0x0000) && (off != 0xffff)) {
11478 			off += NVM_COMBO_VER_OFF;
11479 			wm_nvm_read(sc, off + 1, 1, &uid1);
11480 			wm_nvm_read(sc, off, 1, &uid0);
11481 			if ((uid0 != 0) && (uid0 != 0xffff)
11482 			    && (uid1 != 0) && (uid1 != 0xffff)) {
11483 				/* 16bits */
11484 				major = uid0 >> 8;
11485 				build = (uid0 << 8) | (uid1 >> 8);
11486 				patch = uid1 & 0x00ff;
11487 				aprint_verbose(", option ROM Version %d.%d.%d",
11488 				    major, build, patch);
11489 			}
11490 		}
11491 	}
11492 
11493 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
11494 	aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
11495 }
11496 
11497 /*
11498  * wm_nvm_read:
11499  *
11500  *	Read data from the serial EEPROM.
11501  */
11502 static int
11503 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
11504 {
11505 	int rv;
11506 
11507 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
11508 		device_xname(sc->sc_dev), __func__));
11509 
11510 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
11511 		return 1;
11512 
11513 	if (wm_nvm_acquire(sc))
11514 		return 1;
11515 
11516 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
11517 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
11518 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
11519 		rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
11520 	else if (sc->sc_type == WM_T_PCH_SPT)
11521 		rv = wm_nvm_read_spt(sc, word, wordcnt, data);
11522 	else if (sc->sc_flags & WM_F_EEPROM_INVM)
11523 		rv = wm_nvm_read_invm(sc, word, wordcnt, data);
11524 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
11525 		rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
11526 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
11527 		rv = wm_nvm_read_spi(sc, word, wordcnt, data);
11528 	else
11529 		rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
11530 
11531 	wm_nvm_release(sc);
11532 	return rv;
11533 }
11534 
11535 /*
11536  * Hardware semaphores.
11537  * Very complexed...
11538  */
11539 
11540 static int
11541 wm_get_null(struct wm_softc *sc)
11542 {
11543 
11544 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11545 		device_xname(sc->sc_dev), __func__));
11546 	return 0;
11547 }
11548 
11549 static void
11550 wm_put_null(struct wm_softc *sc)
11551 {
11552 
11553 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11554 		device_xname(sc->sc_dev), __func__));
11555 	return;
11556 }
11557 
11558 /*
11559  * Get hardware semaphore.
11560  * Same as e1000_get_hw_semaphore_generic()
11561  */
11562 static int
11563 wm_get_swsm_semaphore(struct wm_softc *sc)
11564 {
11565 	int32_t timeout;
11566 	uint32_t swsm;
11567 
11568 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11569 		device_xname(sc->sc_dev), __func__));
11570 	KASSERT(sc->sc_nvm_wordsize > 0);
11571 
11572 	/* Get the SW semaphore. */
11573 	timeout = sc->sc_nvm_wordsize + 1;
11574 	while (timeout) {
11575 		swsm = CSR_READ(sc, WMREG_SWSM);
11576 
11577 		if ((swsm & SWSM_SMBI) == 0)
11578 			break;
11579 
11580 		delay(50);
11581 		timeout--;
11582 	}
11583 
11584 	if (timeout == 0) {
11585 		aprint_error_dev(sc->sc_dev,
11586 		    "could not acquire SWSM SMBI\n");
11587 		return 1;
11588 	}
11589 
11590 	/* Get the FW semaphore. */
11591 	timeout = sc->sc_nvm_wordsize + 1;
11592 	while (timeout) {
11593 		swsm = CSR_READ(sc, WMREG_SWSM);
11594 		swsm |= SWSM_SWESMBI;
11595 		CSR_WRITE(sc, WMREG_SWSM, swsm);
11596 		/* If we managed to set the bit we got the semaphore. */
11597 		swsm = CSR_READ(sc, WMREG_SWSM);
11598 		if (swsm & SWSM_SWESMBI)
11599 			break;
11600 
11601 		delay(50);
11602 		timeout--;
11603 	}
11604 
11605 	if (timeout == 0) {
11606 		aprint_error_dev(sc->sc_dev,
11607 		    "could not acquire SWSM SWESMBI\n");
11608 		/* Release semaphores */
11609 		wm_put_swsm_semaphore(sc);
11610 		return 1;
11611 	}
11612 	return 0;
11613 }
11614 
11615 /*
11616  * Put hardware semaphore.
11617  * Same as e1000_put_hw_semaphore_generic()
11618  */
11619 static void
11620 wm_put_swsm_semaphore(struct wm_softc *sc)
11621 {
11622 	uint32_t swsm;
11623 
11624 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11625 		device_xname(sc->sc_dev), __func__));
11626 
11627 	swsm = CSR_READ(sc, WMREG_SWSM);
11628 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
11629 	CSR_WRITE(sc, WMREG_SWSM, swsm);
11630 }
11631 
11632 /*
11633  * Get SW/FW semaphore.
11634  * Same as e1000_acquire_swfw_sync_82575().
11635  */
11636 static int
11637 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
11638 {
11639 	uint32_t swfw_sync;
11640 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
11641 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
11642 	int timeout = 200;
11643 
11644 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11645 		device_xname(sc->sc_dev), __func__));
11646 	KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
11647 
11648 	for (timeout = 0; timeout < 200; timeout++) {
11649 		if (sc->sc_flags & WM_F_LOCK_SWSM) {
11650 			if (wm_get_swsm_semaphore(sc)) {
11651 				aprint_error_dev(sc->sc_dev,
11652 				    "%s: failed to get semaphore\n",
11653 				    __func__);
11654 				return 1;
11655 			}
11656 		}
11657 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
11658 		if ((swfw_sync & (swmask | fwmask)) == 0) {
11659 			swfw_sync |= swmask;
11660 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
11661 			if (sc->sc_flags & WM_F_LOCK_SWSM)
11662 				wm_put_swsm_semaphore(sc);
11663 			return 0;
11664 		}
11665 		if (sc->sc_flags & WM_F_LOCK_SWSM)
11666 			wm_put_swsm_semaphore(sc);
11667 		delay(5000);
11668 	}
11669 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
11670 	    device_xname(sc->sc_dev), mask, swfw_sync);
11671 	return 1;
11672 }
11673 
11674 static void
11675 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
11676 {
11677 	uint32_t swfw_sync;
11678 
11679 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11680 		device_xname(sc->sc_dev), __func__));
11681 	KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
11682 
11683 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
11684 		while (wm_get_swsm_semaphore(sc) != 0)
11685 			continue;
11686 	}
11687 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
11688 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
11689 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
11690 	if (sc->sc_flags & WM_F_LOCK_SWSM)
11691 		wm_put_swsm_semaphore(sc);
11692 }
11693 
11694 static int
11695 wm_get_phy_82575(struct wm_softc *sc)
11696 {
11697 
11698 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11699 		device_xname(sc->sc_dev), __func__));
11700 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
11701 }
11702 
11703 static void
11704 wm_put_phy_82575(struct wm_softc *sc)
11705 {
11706 
11707 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11708 		device_xname(sc->sc_dev), __func__));
11709 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
11710 }
11711 
11712 static int
11713 wm_get_swfwhw_semaphore(struct wm_softc *sc)
11714 {
11715 	uint32_t ext_ctrl;
11716 	int timeout = 200;
11717 
11718 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11719 		device_xname(sc->sc_dev), __func__));
11720 
11721 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
11722 	for (timeout = 0; timeout < 200; timeout++) {
11723 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
11724 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
11725 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
11726 
11727 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
11728 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
11729 			return 0;
11730 		delay(5000);
11731 	}
11732 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
11733 	    device_xname(sc->sc_dev), ext_ctrl);
11734 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
11735 	return 1;
11736 }
11737 
11738 static void
11739 wm_put_swfwhw_semaphore(struct wm_softc *sc)
11740 {
11741 	uint32_t ext_ctrl;
11742 
11743 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11744 		device_xname(sc->sc_dev), __func__));
11745 
11746 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
11747 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
11748 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
11749 
11750 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
11751 }
11752 
11753 static int
11754 wm_get_swflag_ich8lan(struct wm_softc *sc)
11755 {
11756 	uint32_t ext_ctrl;
11757 	int timeout;
11758 
11759 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11760 		device_xname(sc->sc_dev), __func__));
11761 	mutex_enter(sc->sc_ich_phymtx);
11762 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
11763 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
11764 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
11765 			break;
11766 		delay(1000);
11767 	}
11768 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
11769 		printf("%s: SW has already locked the resource\n",
11770 		    device_xname(sc->sc_dev));
11771 		goto out;
11772 	}
11773 
11774 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
11775 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
11776 	for (timeout = 0; timeout < 1000; timeout++) {
11777 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
11778 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
11779 			break;
11780 		delay(1000);
11781 	}
11782 	if (timeout >= 1000) {
11783 		printf("%s: failed to acquire semaphore\n",
11784 		    device_xname(sc->sc_dev));
11785 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
11786 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
11787 		goto out;
11788 	}
11789 	return 0;
11790 
11791 out:
11792 	mutex_exit(sc->sc_ich_phymtx);
11793 	return 1;
11794 }
11795 
11796 static void
11797 wm_put_swflag_ich8lan(struct wm_softc *sc)
11798 {
11799 	uint32_t ext_ctrl;
11800 
11801 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11802 		device_xname(sc->sc_dev), __func__));
11803 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
11804 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
11805 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
11806 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
11807 	} else {
11808 		printf("%s: Semaphore unexpectedly released\n",
11809 		    device_xname(sc->sc_dev));
11810 	}
11811 
11812 	mutex_exit(sc->sc_ich_phymtx);
11813 }
11814 
11815 static int
11816 wm_get_nvm_ich8lan(struct wm_softc *sc)
11817 {
11818 
11819 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11820 		device_xname(sc->sc_dev), __func__));
11821 	mutex_enter(sc->sc_ich_nvmmtx);
11822 
11823 	return 0;
11824 }
11825 
11826 static void
11827 wm_put_nvm_ich8lan(struct wm_softc *sc)
11828 {
11829 
11830 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11831 		device_xname(sc->sc_dev), __func__));
11832 	mutex_exit(sc->sc_ich_nvmmtx);
11833 }
11834 
11835 static int
11836 wm_get_hw_semaphore_82573(struct wm_softc *sc)
11837 {
11838 	int i = 0;
11839 	uint32_t reg;
11840 
11841 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11842 		device_xname(sc->sc_dev), __func__));
11843 
11844 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
11845 	do {
11846 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
11847 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
11848 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
11849 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
11850 			break;
11851 		delay(2*1000);
11852 		i++;
11853 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
11854 
11855 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
11856 		wm_put_hw_semaphore_82573(sc);
11857 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
11858 		    device_xname(sc->sc_dev));
11859 		return -1;
11860 	}
11861 
11862 	return 0;
11863 }
11864 
11865 static void
11866 wm_put_hw_semaphore_82573(struct wm_softc *sc)
11867 {
11868 	uint32_t reg;
11869 
11870 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11871 		device_xname(sc->sc_dev), __func__));
11872 
11873 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
11874 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
11875 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
11876 }
11877 
11878 /*
11879  * Management mode and power management related subroutines.
11880  * BMC, AMT, suspend/resume and EEE.
11881  */
11882 
11883 #ifdef WM_WOL
11884 static int
11885 wm_check_mng_mode(struct wm_softc *sc)
11886 {
11887 	int rv;
11888 
11889 	switch (sc->sc_type) {
11890 	case WM_T_ICH8:
11891 	case WM_T_ICH9:
11892 	case WM_T_ICH10:
11893 	case WM_T_PCH:
11894 	case WM_T_PCH2:
11895 	case WM_T_PCH_LPT:
11896 	case WM_T_PCH_SPT:
11897 		rv = wm_check_mng_mode_ich8lan(sc);
11898 		break;
11899 	case WM_T_82574:
11900 	case WM_T_82583:
11901 		rv = wm_check_mng_mode_82574(sc);
11902 		break;
11903 	case WM_T_82571:
11904 	case WM_T_82572:
11905 	case WM_T_82573:
11906 	case WM_T_80003:
11907 		rv = wm_check_mng_mode_generic(sc);
11908 		break;
11909 	default:
11910 		/* noting to do */
11911 		rv = 0;
11912 		break;
11913 	}
11914 
11915 	return rv;
11916 }
11917 
11918 static int
11919 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
11920 {
11921 	uint32_t fwsm;
11922 
11923 	fwsm = CSR_READ(sc, WMREG_FWSM);
11924 
11925 	if (((fwsm & FWSM_FW_VALID) != 0)
11926 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
11927 		return 1;
11928 
11929 	return 0;
11930 }
11931 
11932 static int
11933 wm_check_mng_mode_82574(struct wm_softc *sc)
11934 {
11935 	uint16_t data;
11936 
11937 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
11938 
11939 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
11940 		return 1;
11941 
11942 	return 0;
11943 }
11944 
11945 static int
11946 wm_check_mng_mode_generic(struct wm_softc *sc)
11947 {
11948 	uint32_t fwsm;
11949 
11950 	fwsm = CSR_READ(sc, WMREG_FWSM);
11951 
11952 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
11953 		return 1;
11954 
11955 	return 0;
11956 }
11957 #endif /* WM_WOL */
11958 
11959 static int
11960 wm_enable_mng_pass_thru(struct wm_softc *sc)
11961 {
11962 	uint32_t manc, fwsm, factps;
11963 
11964 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
11965 		return 0;
11966 
11967 	manc = CSR_READ(sc, WMREG_MANC);
11968 
11969 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
11970 		device_xname(sc->sc_dev), manc));
11971 	if ((manc & MANC_RECV_TCO_EN) == 0)
11972 		return 0;
11973 
11974 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
11975 		fwsm = CSR_READ(sc, WMREG_FWSM);
11976 		factps = CSR_READ(sc, WMREG_FACTPS);
11977 		if (((factps & FACTPS_MNGCG) == 0)
11978 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
11979 			return 1;
11980 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
11981 		uint16_t data;
11982 
11983 		factps = CSR_READ(sc, WMREG_FACTPS);
11984 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
11985 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
11986 			device_xname(sc->sc_dev), factps, data));
11987 		if (((factps & FACTPS_MNGCG) == 0)
11988 		    && ((data & NVM_CFG2_MNGM_MASK)
11989 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
11990 			return 1;
11991 	} else if (((manc & MANC_SMBUS_EN) != 0)
11992 	    && ((manc & MANC_ASF_EN) == 0))
11993 		return 1;
11994 
11995 	return 0;
11996 }
11997 
11998 static bool
11999 wm_phy_resetisblocked(struct wm_softc *sc)
12000 {
12001 	bool blocked = false;
12002 	uint32_t reg;
12003 	int i = 0;
12004 
12005 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
12006 		device_xname(sc->sc_dev), __func__));
12007 
12008 	switch (sc->sc_type) {
12009 	case WM_T_ICH8:
12010 	case WM_T_ICH9:
12011 	case WM_T_ICH10:
12012 	case WM_T_PCH:
12013 	case WM_T_PCH2:
12014 	case WM_T_PCH_LPT:
12015 	case WM_T_PCH_SPT:
12016 		do {
12017 			reg = CSR_READ(sc, WMREG_FWSM);
12018 			if ((reg & FWSM_RSPCIPHY) == 0) {
12019 				blocked = true;
12020 				delay(10*1000);
12021 				continue;
12022 			}
12023 			blocked = false;
12024 		} while (blocked && (i++ < 30));
12025 		return blocked;
12026 		break;
12027 	case WM_T_82571:
12028 	case WM_T_82572:
12029 	case WM_T_82573:
12030 	case WM_T_82574:
12031 	case WM_T_82583:
12032 	case WM_T_80003:
12033 		reg = CSR_READ(sc, WMREG_MANC);
12034 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
12035 			return true;
12036 		else
12037 			return false;
12038 		break;
12039 	default:
12040 		/* no problem */
12041 		break;
12042 	}
12043 
12044 	return false;
12045 }
12046 
12047 static void
12048 wm_get_hw_control(struct wm_softc *sc)
12049 {
12050 	uint32_t reg;
12051 
12052 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
12053 		device_xname(sc->sc_dev), __func__));
12054 
12055 	if (sc->sc_type == WM_T_82573) {
12056 		reg = CSR_READ(sc, WMREG_SWSM);
12057 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
12058 	} else if (sc->sc_type >= WM_T_82571) {
12059 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
12060 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
12061 	}
12062 }
12063 
12064 static void
12065 wm_release_hw_control(struct wm_softc *sc)
12066 {
12067 	uint32_t reg;
12068 
12069 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
12070 		device_xname(sc->sc_dev), __func__));
12071 
12072 	if (sc->sc_type == WM_T_82573) {
12073 		reg = CSR_READ(sc, WMREG_SWSM);
12074 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
12075 	} else if (sc->sc_type >= WM_T_82571) {
12076 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
12077 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
12078 	}
12079 }
12080 
12081 static void
12082 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
12083 {
12084 	uint32_t reg;
12085 
12086 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
12087 		device_xname(sc->sc_dev), __func__));
12088 
12089 	if (sc->sc_type < WM_T_PCH2)
12090 		return;
12091 
12092 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
12093 
12094 	if (gate)
12095 		reg |= EXTCNFCTR_GATE_PHY_CFG;
12096 	else
12097 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
12098 
12099 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
12100 }
12101 
12102 static void
12103 wm_smbustopci(struct wm_softc *sc)
12104 {
12105 	uint32_t fwsm, reg;
12106 	int rv = 0;
12107 
12108 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
12109 		device_xname(sc->sc_dev), __func__));
12110 
12111 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
12112 	wm_gate_hw_phy_config_ich8lan(sc, true);
12113 
12114 	/* Disable ULP */
12115 	wm_ulp_disable(sc);
12116 
12117 	/* Acquire PHY semaphore */
12118 	sc->phy.acquire(sc);
12119 
12120 	fwsm = CSR_READ(sc, WMREG_FWSM);
12121 	switch (sc->sc_type) {
12122 	case WM_T_PCH_LPT:
12123 	case WM_T_PCH_SPT:
12124 		if (wm_phy_is_accessible_pchlan(sc))
12125 			break;
12126 
12127 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
12128 		reg |= CTRL_EXT_FORCE_SMBUS;
12129 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
12130 #if 0
12131 		/* XXX Isn't this required??? */
12132 		CSR_WRITE_FLUSH(sc);
12133 #endif
12134 		delay(50 * 1000);
12135 		/* FALLTHROUGH */
12136 	case WM_T_PCH2:
12137 		if (wm_phy_is_accessible_pchlan(sc) == true)
12138 			break;
12139 		/* FALLTHROUGH */
12140 	case WM_T_PCH:
12141 		if (sc->sc_type == WM_T_PCH)
12142 			if ((fwsm & FWSM_FW_VALID) != 0)
12143 				break;
12144 
12145 		if (wm_phy_resetisblocked(sc) == true) {
12146 			printf("XXX reset is blocked(3)\n");
12147 			break;
12148 		}
12149 
12150 		wm_toggle_lanphypc_pch_lpt(sc);
12151 
12152 		if (sc->sc_type >= WM_T_PCH_LPT) {
12153 			if (wm_phy_is_accessible_pchlan(sc) == true)
12154 				break;
12155 
12156 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
12157 			reg &= ~CTRL_EXT_FORCE_SMBUS;
12158 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
12159 
12160 			if (wm_phy_is_accessible_pchlan(sc) == true)
12161 				break;
12162 			rv = -1;
12163 		}
12164 		break;
12165 	default:
12166 		break;
12167 	}
12168 
12169 	/* Release semaphore */
12170 	sc->phy.release(sc);
12171 
12172 	if (rv == 0) {
12173 		if (wm_phy_resetisblocked(sc)) {
12174 			printf("XXX reset is blocked(4)\n");
12175 			goto out;
12176 		}
12177 		wm_reset_phy(sc);
12178 		if (wm_phy_resetisblocked(sc))
12179 			printf("XXX reset is blocked(4)\n");
12180 	}
12181 
12182 out:
12183 	/*
12184 	 * Ungate automatic PHY configuration by hardware on non-managed 82579
12185 	 */
12186 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
12187 		delay(10*1000);
12188 		wm_gate_hw_phy_config_ich8lan(sc, false);
12189 	}
12190 }
12191 
12192 static void
12193 wm_init_manageability(struct wm_softc *sc)
12194 {
12195 
12196 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
12197 		device_xname(sc->sc_dev), __func__));
12198 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
12199 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
12200 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
12201 
12202 		/* Disable hardware interception of ARP */
12203 		manc &= ~MANC_ARP_EN;
12204 
12205 		/* Enable receiving management packets to the host */
12206 		if (sc->sc_type >= WM_T_82571) {
12207 			manc |= MANC_EN_MNG2HOST;
12208 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
12209 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
12210 		}
12211 
12212 		CSR_WRITE(sc, WMREG_MANC, manc);
12213 	}
12214 }
12215 
12216 static void
12217 wm_release_manageability(struct wm_softc *sc)
12218 {
12219 
12220 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
12221 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
12222 
12223 		manc |= MANC_ARP_EN;
12224 		if (sc->sc_type >= WM_T_82571)
12225 			manc &= ~MANC_EN_MNG2HOST;
12226 
12227 		CSR_WRITE(sc, WMREG_MANC, manc);
12228 	}
12229 }
12230 
12231 static void
12232 wm_get_wakeup(struct wm_softc *sc)
12233 {
12234 
12235 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
12236 	switch (sc->sc_type) {
12237 	case WM_T_82573:
12238 	case WM_T_82583:
12239 		sc->sc_flags |= WM_F_HAS_AMT;
12240 		/* FALLTHROUGH */
12241 	case WM_T_80003:
12242 	case WM_T_82575:
12243 	case WM_T_82576:
12244 	case WM_T_82580:
12245 	case WM_T_I350:
12246 	case WM_T_I354:
12247 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
12248 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
12249 		/* FALLTHROUGH */
12250 	case WM_T_82541:
12251 	case WM_T_82541_2:
12252 	case WM_T_82547:
12253 	case WM_T_82547_2:
12254 	case WM_T_82571:
12255 	case WM_T_82572:
12256 	case WM_T_82574:
12257 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
12258 		break;
12259 	case WM_T_ICH8:
12260 	case WM_T_ICH9:
12261 	case WM_T_ICH10:
12262 	case WM_T_PCH:
12263 	case WM_T_PCH2:
12264 	case WM_T_PCH_LPT:
12265 	case WM_T_PCH_SPT:
12266 		sc->sc_flags |= WM_F_HAS_AMT;
12267 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
12268 		break;
12269 	default:
12270 		break;
12271 	}
12272 
12273 	/* 1: HAS_MANAGE */
12274 	if (wm_enable_mng_pass_thru(sc) != 0)
12275 		sc->sc_flags |= WM_F_HAS_MANAGE;
12276 
12277 #ifdef WM_DEBUG
12278 	printf("\n");
12279 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
12280 		printf("HAS_AMT,");
12281 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
12282 		printf("ARC_SUBSYS_VALID,");
12283 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
12284 		printf("ASF_FIRMWARE_PRES,");
12285 	if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
12286 		printf("HAS_MANAGE,");
12287 	printf("\n");
12288 #endif
12289 	/*
12290 	 * Note that the WOL flags is set after the resetting of the eeprom
12291 	 * stuff
12292 	 */
12293 }
12294 
12295 /*
12296  * Unconfigure Ultra Low Power mode.
12297  * Only for I217 and newer (see below).
12298  */
12299 static void
12300 wm_ulp_disable(struct wm_softc *sc)
12301 {
12302 	uint32_t reg;
12303 	int i = 0;
12304 
12305 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
12306 		device_xname(sc->sc_dev), __func__));
12307 	/* Exclude old devices */
12308 	if ((sc->sc_type < WM_T_PCH_LPT)
12309 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
12310 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
12311 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
12312 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
12313 		return;
12314 
12315 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
12316 		/* Request ME un-configure ULP mode in the PHY */
12317 		reg = CSR_READ(sc, WMREG_H2ME);
12318 		reg &= ~H2ME_ULP;
12319 		reg |= H2ME_ENFORCE_SETTINGS;
12320 		CSR_WRITE(sc, WMREG_H2ME, reg);
12321 
12322 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
12323 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
12324 			if (i++ == 30) {
12325 				printf("%s timed out\n", __func__);
12326 				return;
12327 			}
12328 			delay(10 * 1000);
12329 		}
12330 		reg = CSR_READ(sc, WMREG_H2ME);
12331 		reg &= ~H2ME_ENFORCE_SETTINGS;
12332 		CSR_WRITE(sc, WMREG_H2ME, reg);
12333 
12334 		return;
12335 	}
12336 
12337 	/* Acquire semaphore */
12338 	sc->phy.acquire(sc);
12339 
12340 	/* Toggle LANPHYPC */
12341 	wm_toggle_lanphypc_pch_lpt(sc);
12342 
12343 	/* Unforce SMBus mode in PHY */
12344 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
12345 	if (reg == 0x0000 || reg == 0xffff) {
12346 		uint32_t reg2;
12347 
12348 		printf("%s: Force SMBus first.\n", __func__);
12349 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
12350 		reg2 |= CTRL_EXT_FORCE_SMBUS;
12351 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
12352 		delay(50 * 1000);
12353 
12354 		reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
12355 	}
12356 	reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
12357 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, reg);
12358 
12359 	/* Unforce SMBus mode in MAC */
12360 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
12361 	reg &= ~CTRL_EXT_FORCE_SMBUS;
12362 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
12363 
12364 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL);
12365 	reg |= HV_PM_CTRL_K1_ENA;
12366 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, reg);
12367 
12368 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1);
12369 	reg &= ~(I218_ULP_CONFIG1_IND
12370 	    | I218_ULP_CONFIG1_STICKY_ULP
12371 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
12372 	    | I218_ULP_CONFIG1_WOL_HOST
12373 	    | I218_ULP_CONFIG1_INBAND_EXIT
12374 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
12375 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
12376 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
12377 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
12378 	reg |= I218_ULP_CONFIG1_START;
12379 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
12380 
12381 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
12382 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
12383 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
12384 
12385 	/* Release semaphore */
12386 	sc->phy.release(sc);
12387 	wm_gmii_reset(sc);
12388 	delay(50 * 1000);
12389 }
12390 
12391 /* WOL in the newer chipset interfaces (pchlan) */
12392 static void
12393 wm_enable_phy_wakeup(struct wm_softc *sc)
12394 {
12395 #if 0
12396 	uint16_t preg;
12397 
12398 	/* Copy MAC RARs to PHY RARs */
12399 
12400 	/* Copy MAC MTA to PHY MTA */
12401 
12402 	/* Configure PHY Rx Control register */
12403 
12404 	/* Enable PHY wakeup in MAC register */
12405 
12406 	/* Configure and enable PHY wakeup in PHY registers */
12407 
12408 	/* Activate PHY wakeup */
12409 
12410 	/* XXX */
12411 #endif
12412 }
12413 
12414 /* Power down workaround on D3 */
12415 static void
12416 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
12417 {
12418 	uint32_t reg;
12419 	int i;
12420 
12421 	for (i = 0; i < 2; i++) {
12422 		/* Disable link */
12423 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
12424 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
12425 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
12426 
12427 		/*
12428 		 * Call gig speed drop workaround on Gig disable before
12429 		 * accessing any PHY registers
12430 		 */
12431 		if (sc->sc_type == WM_T_ICH8)
12432 			wm_gig_downshift_workaround_ich8lan(sc);
12433 
12434 		/* Write VR power-down enable */
12435 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
12436 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
12437 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
12438 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
12439 
12440 		/* Read it back and test */
12441 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
12442 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
12443 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
12444 			break;
12445 
12446 		/* Issue PHY reset and repeat at most one more time */
12447 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
12448 	}
12449 }
12450 
12451 static void
12452 wm_enable_wakeup(struct wm_softc *sc)
12453 {
12454 	uint32_t reg, pmreg;
12455 	pcireg_t pmode;
12456 
12457 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
12458 		device_xname(sc->sc_dev), __func__));
12459 
12460 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
12461 		&pmreg, NULL) == 0)
12462 		return;
12463 
12464 	/* Advertise the wakeup capability */
12465 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
12466 	    | CTRL_SWDPIN(3));
12467 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
12468 
12469 	/* ICH workaround */
12470 	switch (sc->sc_type) {
12471 	case WM_T_ICH8:
12472 	case WM_T_ICH9:
12473 	case WM_T_ICH10:
12474 	case WM_T_PCH:
12475 	case WM_T_PCH2:
12476 	case WM_T_PCH_LPT:
12477 	case WM_T_PCH_SPT:
12478 		/* Disable gig during WOL */
12479 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
12480 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
12481 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
12482 		if (sc->sc_type == WM_T_PCH)
12483 			wm_gmii_reset(sc);
12484 
12485 		/* Power down workaround */
12486 		if (sc->sc_phytype == WMPHY_82577) {
12487 			struct mii_softc *child;
12488 
12489 			/* Assume that the PHY is copper */
12490 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
12491 			if (child->mii_mpd_rev <= 2)
12492 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
12493 				    (768 << 5) | 25, 0x0444); /* magic num */
12494 		}
12495 		break;
12496 	default:
12497 		break;
12498 	}
12499 
12500 	/* Keep the laser running on fiber adapters */
12501 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
12502 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
12503 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
12504 		reg |= CTRL_EXT_SWDPIN(3);
12505 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
12506 	}
12507 
12508 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
12509 #if 0	/* for the multicast packet */
12510 	reg |= WUFC_MC;
12511 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
12512 #endif
12513 
12514 	if (sc->sc_type >= WM_T_PCH)
12515 		wm_enable_phy_wakeup(sc);
12516 	else {
12517 		CSR_WRITE(sc, WMREG_WUC, CSR_READ(sc, WMREG_WUC) | WUC_PME_EN);
12518 		CSR_WRITE(sc, WMREG_WUFC, reg);
12519 	}
12520 
12521 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
12522 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
12523 		|| (sc->sc_type == WM_T_PCH2))
12524 		    && (sc->sc_phytype == WMPHY_IGP_3))
12525 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
12526 
12527 	/* Request PME */
12528 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
12529 #if 0
12530 	/* Disable WOL */
12531 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
12532 #else
12533 	/* For WOL */
12534 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
12535 #endif
12536 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
12537 }
12538 
12539 /* LPLU */
12540 
12541 static void
12542 wm_lplu_d0_disable(struct wm_softc *sc)
12543 {
12544 	uint32_t reg;
12545 
12546 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
12547 		device_xname(sc->sc_dev), __func__));
12548 
12549 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
12550 	reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
12551 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
12552 }
12553 
12554 static void
12555 wm_lplu_d0_disable_pch(struct wm_softc *sc)
12556 {
12557 	uint32_t reg;
12558 
12559 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
12560 		device_xname(sc->sc_dev), __func__));
12561 
12562 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
12563 	reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
12564 	reg |= HV_OEM_BITS_ANEGNOW;
12565 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
12566 }
12567 
12568 /* EEE */
12569 
12570 static void
12571 wm_set_eee_i350(struct wm_softc *sc)
12572 {
12573 	uint32_t ipcnfg, eeer;
12574 
12575 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
12576 	eeer = CSR_READ(sc, WMREG_EEER);
12577 
12578 	if ((sc->sc_flags & WM_F_EEE) != 0) {
12579 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
12580 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
12581 		    | EEER_LPI_FC);
12582 	} else {
12583 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
12584 		ipcnfg &= ~IPCNFG_10BASE_TE;
12585 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
12586 		    | EEER_LPI_FC);
12587 	}
12588 
12589 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
12590 	CSR_WRITE(sc, WMREG_EEER, eeer);
12591 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
12592 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
12593 }
12594 
12595 /*
12596  * Workarounds (mainly PHY related).
12597  * Basically, PHY's workarounds are in the PHY drivers.
12598  */
12599 
12600 /* Work-around for 82566 Kumeran PCS lock loss */
12601 static void
12602 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
12603 {
12604 #if 0
12605 	int miistatus, active, i;
12606 	int reg;
12607 
12608 	miistatus = sc->sc_mii.mii_media_status;
12609 
12610 	/* If the link is not up, do nothing */
12611 	if ((miistatus & IFM_ACTIVE) == 0)
12612 		return;
12613 
12614 	active = sc->sc_mii.mii_media_active;
12615 
12616 	/* Nothing to do if the link is other than 1Gbps */
12617 	if (IFM_SUBTYPE(active) != IFM_1000_T)
12618 		return;
12619 
12620 	for (i = 0; i < 10; i++) {
12621 		/* read twice */
12622 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
12623 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
12624 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
12625 			goto out;	/* GOOD! */
12626 
12627 		/* Reset the PHY */
12628 		wm_gmii_reset(sc);
12629 		delay(5*1000);
12630 	}
12631 
12632 	/* Disable GigE link negotiation */
12633 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
12634 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
12635 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
12636 
12637 	/*
12638 	 * Call gig speed drop workaround on Gig disable before accessing
12639 	 * any PHY registers.
12640 	 */
12641 	wm_gig_downshift_workaround_ich8lan(sc);
12642 
12643 out:
12644 	return;
12645 #endif
12646 }
12647 
12648 /* WOL from S5 stops working */
12649 static void
12650 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
12651 {
12652 	uint16_t kmrn_reg;
12653 
12654 	/* Only for igp3 */
12655 	if (sc->sc_phytype == WMPHY_IGP_3) {
12656 		kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
12657 		kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
12658 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
12659 		kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
12660 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
12661 	}
12662 }
12663 
12664 /*
12665  * Workaround for pch's PHYs
12666  * XXX should be moved to new PHY driver?
12667  */
12668 static void
12669 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
12670 {
12671 
12672 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
12673 		device_xname(sc->sc_dev), __func__));
12674 	KASSERT(sc->sc_type == WM_T_PCH);
12675 
12676 	if (sc->sc_phytype == WMPHY_82577)
12677 		wm_set_mdio_slow_mode_hv(sc);
12678 
12679 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
12680 
12681 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
12682 
12683 	/* 82578 */
12684 	if (sc->sc_phytype == WMPHY_82578) {
12685 		struct mii_softc *child;
12686 
12687 		/*
12688 		 * Return registers to default by doing a soft reset then
12689 		 * writing 0x3140 to the control register
12690 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
12691 		 */
12692 		child = LIST_FIRST(&sc->sc_mii.mii_phys);
12693 		if ((child != NULL) && (child->mii_mpd_rev < 2)) {
12694 			PHY_RESET(child);
12695 			sc->sc_mii.mii_writereg(sc->sc_dev, 2, MII_BMCR,
12696 			    0x3140);
12697 		}
12698 	}
12699 
12700 	/* Select page 0 */
12701 	sc->phy.acquire(sc);
12702 	wm_gmii_mdic_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
12703 	sc->phy.release(sc);
12704 
12705 	/*
12706 	 * Configure the K1 Si workaround during phy reset assuming there is
12707 	 * link so that it disables K1 if link is in 1Gbps.
12708 	 */
12709 	wm_k1_gig_workaround_hv(sc, 1);
12710 }
12711 
12712 static void
12713 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
12714 {
12715 
12716 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
12717 		device_xname(sc->sc_dev), __func__));
12718 	KASSERT(sc->sc_type == WM_T_PCH2);
12719 
12720 	wm_set_mdio_slow_mode_hv(sc);
12721 }
12722 
12723 static int
12724 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
12725 {
12726 	int k1_enable = sc->sc_nvm_k1_enabled;
12727 
12728 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
12729 		device_xname(sc->sc_dev), __func__));
12730 
12731 	if (sc->phy.acquire(sc) != 0)
12732 		return -1;
12733 
12734 	if (link) {
12735 		k1_enable = 0;
12736 
12737 		/* Link stall fix for link up */
12738 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
12739 	} else {
12740 		/* Link stall fix for link down */
12741 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
12742 	}
12743 
12744 	wm_configure_k1_ich8lan(sc, k1_enable);
12745 	sc->phy.release(sc);
12746 
12747 	return 0;
12748 }
12749 
12750 static void
12751 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
12752 {
12753 	uint32_t reg;
12754 
12755 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
12756 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
12757 	    reg | HV_KMRN_MDIO_SLOW);
12758 }
12759 
12760 static void
12761 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
12762 {
12763 	uint32_t ctrl, ctrl_ext, tmp;
12764 	uint16_t kmrn_reg;
12765 
12766 	kmrn_reg = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
12767 
12768 	if (k1_enable)
12769 		kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
12770 	else
12771 		kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
12772 
12773 	wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
12774 
12775 	delay(20);
12776 
12777 	ctrl = CSR_READ(sc, WMREG_CTRL);
12778 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
12779 
12780 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
12781 	tmp |= CTRL_FRCSPD;
12782 
12783 	CSR_WRITE(sc, WMREG_CTRL, tmp);
12784 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
12785 	CSR_WRITE_FLUSH(sc);
12786 	delay(20);
12787 
12788 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
12789 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
12790 	CSR_WRITE_FLUSH(sc);
12791 	delay(20);
12792 }
12793 
12794 /* special case - for 82575 - need to do manual init ... */
12795 static void
12796 wm_reset_init_script_82575(struct wm_softc *sc)
12797 {
12798 	/*
12799 	 * remark: this is untested code - we have no board without EEPROM
12800 	 *  same setup as mentioned int the FreeBSD driver for the i82575
12801 	 */
12802 
12803 	/* SerDes configuration via SERDESCTRL */
12804 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
12805 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
12806 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
12807 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
12808 
12809 	/* CCM configuration via CCMCTL register */
12810 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
12811 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
12812 
12813 	/* PCIe lanes configuration */
12814 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
12815 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
12816 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
12817 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
12818 
12819 	/* PCIe PLL Configuration */
12820 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
12821 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
12822 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
12823 }
12824 
12825 static void
12826 wm_reset_mdicnfg_82580(struct wm_softc *sc)
12827 {
12828 	uint32_t reg;
12829 	uint16_t nvmword;
12830 	int rv;
12831 
12832 	if ((sc->sc_flags & WM_F_SGMII) == 0)
12833 		return;
12834 
12835 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
12836 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
12837 	if (rv != 0) {
12838 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
12839 		    __func__);
12840 		return;
12841 	}
12842 
12843 	reg = CSR_READ(sc, WMREG_MDICNFG);
12844 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
12845 		reg |= MDICNFG_DEST;
12846 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
12847 		reg |= MDICNFG_COM_MDIO;
12848 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
12849 }
12850 
12851 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
12852 
12853 static bool
12854 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
12855 {
12856 	int i;
12857 	uint32_t reg;
12858 	uint16_t id1, id2;
12859 
12860 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
12861 		device_xname(sc->sc_dev), __func__));
12862 	id1 = id2 = 0xffff;
12863 	for (i = 0; i < 2; i++) {
12864 		id1 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1);
12865 		if (MII_INVALIDID(id1))
12866 			continue;
12867 		id2 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2);
12868 		if (MII_INVALIDID(id2))
12869 			continue;
12870 		break;
12871 	}
12872 	if (!MII_INVALIDID(id1) && !MII_INVALIDID(id2)) {
12873 		goto out;
12874 	}
12875 
12876 	if (sc->sc_type < WM_T_PCH_LPT) {
12877 		sc->phy.release(sc);
12878 		wm_set_mdio_slow_mode_hv(sc);
12879 		id1 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1);
12880 		id2 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2);
12881 		sc->phy.acquire(sc);
12882 	}
12883 	if (MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
12884 		printf("XXX return with false\n");
12885 		return false;
12886 	}
12887 out:
12888 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
12889 		/* Only unforce SMBus if ME is not active */
12890 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
12891 			/* Unforce SMBus mode in PHY */
12892 			reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
12893 			    CV_SMB_CTRL);
12894 			reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
12895 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
12896 			    CV_SMB_CTRL, reg);
12897 
12898 			/* Unforce SMBus mode in MAC */
12899 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
12900 			reg &= ~CTRL_EXT_FORCE_SMBUS;
12901 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
12902 		}
12903 	}
12904 	return true;
12905 }
12906 
12907 static void
12908 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
12909 {
12910 	uint32_t reg;
12911 	int i;
12912 
12913 	/* Set PHY Config Counter to 50msec */
12914 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
12915 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
12916 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
12917 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
12918 
12919 	/* Toggle LANPHYPC */
12920 	reg = CSR_READ(sc, WMREG_CTRL);
12921 	reg |= CTRL_LANPHYPC_OVERRIDE;
12922 	reg &= ~CTRL_LANPHYPC_VALUE;
12923 	CSR_WRITE(sc, WMREG_CTRL, reg);
12924 	CSR_WRITE_FLUSH(sc);
12925 	delay(1000);
12926 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
12927 	CSR_WRITE(sc, WMREG_CTRL, reg);
12928 	CSR_WRITE_FLUSH(sc);
12929 
12930 	if (sc->sc_type < WM_T_PCH_LPT)
12931 		delay(50 * 1000);
12932 	else {
12933 		i = 20;
12934 
12935 		do {
12936 			delay(5 * 1000);
12937 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
12938 		    && i--);
12939 
12940 		delay(30 * 1000);
12941 	}
12942 }
12943 
12944 static int
12945 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
12946 {
12947 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
12948 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
12949 	uint32_t rxa;
12950 	uint16_t scale = 0, lat_enc = 0;
12951 	int64_t lat_ns, value;
12952 
12953 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
12954 		device_xname(sc->sc_dev), __func__));
12955 
12956 	if (link) {
12957 		pcireg_t preg;
12958 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
12959 
12960 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
12961 
12962 		/*
12963 		 * Determine the maximum latency tolerated by the device.
12964 		 *
12965 		 * Per the PCIe spec, the tolerated latencies are encoded as
12966 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
12967 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
12968 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
12969 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
12970 		 */
12971 		lat_ns = ((int64_t)rxa * 1024 -
12972 		    (2 * (int64_t)sc->sc_ethercom.ec_if.if_mtu)) * 8 * 1000;
12973 		if (lat_ns < 0)
12974 			lat_ns = 0;
12975 		else {
12976 			uint32_t status;
12977 			uint16_t speed;
12978 
12979 			status = CSR_READ(sc, WMREG_STATUS);
12980 			switch (__SHIFTOUT(status, STATUS_SPEED)) {
12981 			case STATUS_SPEED_10:
12982 				speed = 10;
12983 				break;
12984 			case STATUS_SPEED_100:
12985 				speed = 100;
12986 				break;
12987 			case STATUS_SPEED_1000:
12988 				speed = 1000;
12989 				break;
12990 			default:
12991 				printf("%s: Unknown speed (status = %08x)\n",
12992 				    device_xname(sc->sc_dev), status);
12993 				return -1;
12994 			}
12995 			lat_ns /= speed;
12996 		}
12997 		value = lat_ns;
12998 
12999 		while (value > LTRV_VALUE) {
13000 			scale ++;
13001 			value = howmany(value, __BIT(5));
13002 		}
13003 		if (scale > LTRV_SCALE_MAX) {
13004 			printf("%s: Invalid LTR latency scale %d\n",
13005 			    device_xname(sc->sc_dev), scale);
13006 			return -1;
13007 		}
13008 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
13009 
13010 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
13011 		    WM_PCI_LTR_CAP_LPT);
13012 		max_snoop = preg & 0xffff;
13013 		max_nosnoop = preg >> 16;
13014 
13015 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
13016 
13017 		if (lat_enc > max_ltr_enc) {
13018 			lat_enc = max_ltr_enc;
13019 		}
13020 	}
13021 	/* Snoop and No-Snoop latencies the same */
13022 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
13023 	CSR_WRITE(sc, WMREG_LTRV, reg);
13024 
13025 	return 0;
13026 }
13027 
13028 /*
13029  * I210 Errata 25 and I211 Errata 10
13030  * Slow System Clock.
13031  */
13032 static void
13033 wm_pll_workaround_i210(struct wm_softc *sc)
13034 {
13035 	uint32_t mdicnfg, wuc;
13036 	uint32_t reg;
13037 	pcireg_t pcireg;
13038 	uint32_t pmreg;
13039 	uint16_t nvmword, tmp_nvmword;
13040 	int phyval;
13041 	bool wa_done = false;
13042 	int i;
13043 
13044 	/* Save WUC and MDICNFG registers */
13045 	wuc = CSR_READ(sc, WMREG_WUC);
13046 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
13047 
13048 	reg = mdicnfg & ~MDICNFG_DEST;
13049 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
13050 
13051 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
13052 		nvmword = INVM_DEFAULT_AL;
13053 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
13054 
13055 	/* Get Power Management cap offset */
13056 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
13057 		&pmreg, NULL) == 0)
13058 		return;
13059 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
13060 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
13061 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
13062 
13063 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
13064 			break; /* OK */
13065 		}
13066 
13067 		wa_done = true;
13068 		/* Directly reset the internal PHY */
13069 		reg = CSR_READ(sc, WMREG_CTRL);
13070 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
13071 
13072 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
13073 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
13074 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
13075 
13076 		CSR_WRITE(sc, WMREG_WUC, 0);
13077 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
13078 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
13079 
13080 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
13081 		    pmreg + PCI_PMCSR);
13082 		pcireg |= PCI_PMCSR_STATE_D3;
13083 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
13084 		    pmreg + PCI_PMCSR, pcireg);
13085 		delay(1000);
13086 		pcireg &= ~PCI_PMCSR_STATE_D3;
13087 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
13088 		    pmreg + PCI_PMCSR, pcireg);
13089 
13090 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
13091 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
13092 
13093 		/* Restore WUC register */
13094 		CSR_WRITE(sc, WMREG_WUC, wuc);
13095 	}
13096 
13097 	/* Restore MDICNFG setting */
13098 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
13099 	if (wa_done)
13100 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
13101 }
13102