xref: /netbsd-src/sys/dev/pci/if_wm.c (revision d909946ca08dceb44d7d0f22ec9488679695d976)
1 /*	$NetBSD: if_wm.c,v 1.417 2016/08/10 04:52:40 knakahara Exp $	*/
2 
3 /*
4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed for the NetBSD Project by
20  *	Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 /*******************************************************************************
39 
40   Copyright (c) 2001-2005, Intel Corporation
41   All rights reserved.
42 
43   Redistribution and use in source and binary forms, with or without
44   modification, are permitted provided that the following conditions are met:
45 
46    1. Redistributions of source code must retain the above copyright notice,
47       this list of conditions and the following disclaimer.
48 
49    2. Redistributions in binary form must reproduce the above copyright
50       notice, this list of conditions and the following disclaimer in the
51       documentation and/or other materials provided with the distribution.
52 
53    3. Neither the name of the Intel Corporation nor the names of its
54       contributors may be used to endorse or promote products derived from
55       this software without specific prior written permission.
56 
57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67   POSSIBILITY OF SUCH DAMAGE.
68 
69 *******************************************************************************/
70 /*
71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72  *
73  * TODO (in order of importance):
74  *
75  *	- Check XXX'ed comments
76  *	- Disable D0 LPLU on 8257[12356], 82580 and I350.
77  *	- TX Multi queue improvement (refine queue selection logic)
78  *	- Advanced Receive Descriptor
79  *	- EEE (Energy Efficiency Ethernet)
80  *	- Virtual Function
81  *	- Set LED correctly (based on contents in EEPROM)
82  *	- Rework how parameters are loaded from the EEPROM.
83  *	- Image Unique ID
84  */
85 
86 #include <sys/cdefs.h>
87 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.417 2016/08/10 04:52:40 knakahara Exp $");
88 
89 #ifdef _KERNEL_OPT
90 #include "opt_net_mpsafe.h"
91 #endif
92 
93 #include <sys/param.h>
94 #include <sys/systm.h>
95 #include <sys/callout.h>
96 #include <sys/mbuf.h>
97 #include <sys/malloc.h>
98 #include <sys/kmem.h>
99 #include <sys/kernel.h>
100 #include <sys/socket.h>
101 #include <sys/ioctl.h>
102 #include <sys/errno.h>
103 #include <sys/device.h>
104 #include <sys/queue.h>
105 #include <sys/syslog.h>
106 #include <sys/interrupt.h>
107 #include <sys/cpu.h>
108 #include <sys/pcq.h>
109 
110 #include <sys/rndsource.h>
111 
112 #include <net/if.h>
113 #include <net/if_dl.h>
114 #include <net/if_media.h>
115 #include <net/if_ether.h>
116 
117 #include <net/bpf.h>
118 
119 #include <netinet/in.h>			/* XXX for struct ip */
120 #include <netinet/in_systm.h>		/* XXX for struct ip */
121 #include <netinet/ip.h>			/* XXX for struct ip */
122 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
123 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
124 
125 #include <sys/bus.h>
126 #include <sys/intr.h>
127 #include <machine/endian.h>
128 
129 #include <dev/mii/mii.h>
130 #include <dev/mii/miivar.h>
131 #include <dev/mii/miidevs.h>
132 #include <dev/mii/mii_bitbang.h>
133 #include <dev/mii/ikphyreg.h>
134 #include <dev/mii/igphyreg.h>
135 #include <dev/mii/igphyvar.h>
136 #include <dev/mii/inbmphyreg.h>
137 
138 #include <dev/pci/pcireg.h>
139 #include <dev/pci/pcivar.h>
140 #include <dev/pci/pcidevs.h>
141 
142 #include <dev/pci/if_wmreg.h>
143 #include <dev/pci/if_wmvar.h>
144 
145 #ifdef WM_DEBUG
146 #define	WM_DEBUG_LINK		0x01
147 #define	WM_DEBUG_TX		0x02
148 #define	WM_DEBUG_RX		0x04
149 #define	WM_DEBUG_GMII		0x08
150 #define	WM_DEBUG_MANAGE		0x10
151 #define	WM_DEBUG_NVM		0x20
152 #define	WM_DEBUG_INIT		0x40
153 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
154     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT;
155 
156 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
157 #else
158 #define	DPRINTF(x, y)	/* nothing */
159 #endif /* WM_DEBUG */
160 
161 #ifdef NET_MPSAFE
162 #define WM_MPSAFE	1
163 #endif
164 
165 /*
166  * This device driver's max interrupt numbers.
167  */
168 #define WM_MAX_NQUEUEINTR	16
169 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
170 
171 /*
172  * Transmit descriptor list size.  Due to errata, we can only have
173  * 256 hardware descriptors in the ring on < 82544, but we use 4096
174  * on >= 82544.  We tell the upper layers that they can queue a lot
175  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
176  * of them at a time.
177  *
178  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
179  * chains containing many small mbufs have been observed in zero-copy
180  * situations with jumbo frames.
181  */
182 #define	WM_NTXSEGS		256
183 #define	WM_IFQUEUELEN		256
184 #define	WM_TXQUEUELEN_MAX	64
185 #define	WM_TXQUEUELEN_MAX_82547	16
186 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
187 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
188 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
189 #define	WM_NTXDESC_82542	256
190 #define	WM_NTXDESC_82544	4096
191 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
192 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
193 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
194 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
195 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
196 
197 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
198 
199 #define	WM_TXINTERQSIZE		256
200 
201 /*
202  * Receive descriptor list size.  We have one Rx buffer for normal
203  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
204  * packet.  We allocate 256 receive descriptors, each with a 2k
205  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
206  */
207 #define	WM_NRXDESC		256
208 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
209 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
210 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
211 
212 typedef union txdescs {
213 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
214 	nq_txdesc_t      sctxu_nq_txdescs[WM_NTXDESC_82544];
215 } txdescs_t;
216 
217 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
218 #define	WM_CDRXOFF(x)	(sizeof(wiseman_rxdesc_t) * x)
219 
220 /*
221  * Software state for transmit jobs.
222  */
223 struct wm_txsoft {
224 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
225 	bus_dmamap_t txs_dmamap;	/* our DMA map */
226 	int txs_firstdesc;		/* first descriptor in packet */
227 	int txs_lastdesc;		/* last descriptor in packet */
228 	int txs_ndesc;			/* # of descriptors used */
229 };
230 
231 /*
232  * Software state for receive buffers.  Each descriptor gets a
233  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
234  * more than one buffer, we chain them together.
235  */
236 struct wm_rxsoft {
237 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
238 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
239 };
240 
241 #define WM_LINKUP_TIMEOUT	50
242 
243 static uint16_t swfwphysem[] = {
244 	SWFW_PHY0_SM,
245 	SWFW_PHY1_SM,
246 	SWFW_PHY2_SM,
247 	SWFW_PHY3_SM
248 };
249 
250 static const uint32_t wm_82580_rxpbs_table[] = {
251 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
252 };
253 
254 struct wm_softc;
255 
256 #ifdef WM_EVENT_COUNTERS
257 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
258 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
259 	struct evcnt qname##_ev_##evname;
260 
261 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
262 	do{								\
263 		snprintf((q)->qname##_##evname##_evcnt_name,		\
264 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
265 		    "%s%02d%s", #qname, (qnum), #evname);		\
266 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
267 		    (evtype), NULL, (xname),				\
268 		    (q)->qname##_##evname##_evcnt_name);		\
269 	}while(0)
270 
271 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
272 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
273 
274 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
275 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
276 #endif /* WM_EVENT_COUNTERS */
277 
278 struct wm_txqueue {
279 	kmutex_t *txq_lock;		/* lock for tx operations */
280 
281 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
282 
283 	/* Software state for the transmit descriptors. */
284 	int txq_num;			/* must be a power of two */
285 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
286 
287 	/* TX control data structures. */
288 	int txq_ndesc;			/* must be a power of two */
289 	size_t txq_descsize;		/* a tx descriptor size */
290 	txdescs_t *txq_descs_u;
291         bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
292 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
293 	int txq_desc_rseg;		/* real number of control segment */
294 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
295 #define	txq_descs	txq_descs_u->sctxu_txdescs
296 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
297 
298 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
299 
300 	int txq_free;			/* number of free Tx descriptors */
301 	int txq_next;			/* next ready Tx descriptor */
302 
303 	int txq_sfree;			/* number of free Tx jobs */
304 	int txq_snext;			/* next free Tx job */
305 	int txq_sdirty;			/* dirty Tx jobs */
306 
307 	/* These 4 variables are used only on the 82547. */
308 	int txq_fifo_size;		/* Tx FIFO size */
309 	int txq_fifo_head;		/* current head of FIFO */
310 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
311 	int txq_fifo_stall;		/* Tx FIFO is stalled */
312 
313 	/*
314 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
315 	 * CPUs. This queue intermediate them without block.
316 	 */
317 	pcq_t *txq_interq;
318 
319 	/*
320 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
321 	 * to manage Tx H/W queue's busy flag.
322 	 */
323 	int txq_flags;			/* flags for H/W queue, see below */
324 #define	WM_TXQ_NO_SPACE	0x1
325 
326 #ifdef WM_EVENT_COUNTERS
327 	WM_Q_EVCNT_DEFINE(txq, txsstall)	/* Tx stalled due to no txs */
328 	WM_Q_EVCNT_DEFINE(txq, txdstall)	/* Tx stalled due to no txd */
329 	WM_Q_EVCNT_DEFINE(txq, txfifo_stall)	/* Tx FIFO stalls (82547) */
330 	WM_Q_EVCNT_DEFINE(txq, txdw)		/* Tx descriptor interrupts */
331 	WM_Q_EVCNT_DEFINE(txq, txqe)		/* Tx queue empty interrupts */
332 						/* XXX not used? */
333 
334 	WM_Q_EVCNT_DEFINE(txq, txipsum)		/* IP checksums comp. out-bound */
335 	WM_Q_EVCNT_DEFINE(txq,txtusum)		/* TCP/UDP cksums comp. out-bound */
336 	WM_Q_EVCNT_DEFINE(txq, txtusum6)	/* TCP/UDP v6 cksums comp. out-bound */
337 	WM_Q_EVCNT_DEFINE(txq, txtso)		/* TCP seg offload out-bound (IPv4) */
338 	WM_Q_EVCNT_DEFINE(txq, txtso6)		/* TCP seg offload out-bound (IPv6) */
339 	WM_Q_EVCNT_DEFINE(txq, txtsopain)	/* painful header manip. for TSO */
340 
341 	WM_Q_EVCNT_DEFINE(txq, txdrop)		/* Tx packets dropped(too many segs) */
342 
343 	WM_Q_EVCNT_DEFINE(txq, tu)		/* Tx underrun */
344 
345 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
346 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
347 #endif /* WM_EVENT_COUNTERS */
348 };
349 
350 struct wm_rxqueue {
351 	kmutex_t *rxq_lock;		/* lock for rx operations */
352 
353 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
354 
355 	/* Software state for the receive descriptors. */
356 	wiseman_rxdesc_t *rxq_descs;
357 
358 	/* RX control data structures. */
359 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
360 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
361 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
362 	int rxq_desc_rseg;		/* real number of control segment */
363 	size_t rxq_desc_size;		/* control data size */
364 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
365 
366 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
367 
368 	int rxq_ptr;			/* next ready Rx desc/queue ent */
369 	int rxq_discard;
370 	int rxq_len;
371 	struct mbuf *rxq_head;
372 	struct mbuf *rxq_tail;
373 	struct mbuf **rxq_tailp;
374 
375 #ifdef WM_EVENT_COUNTERS
376 	WM_Q_EVCNT_DEFINE(rxq, rxintr);		/* Rx interrupts */
377 
378 	WM_Q_EVCNT_DEFINE(rxq, rxipsum);	/* IP checksums checked in-bound */
379 	WM_Q_EVCNT_DEFINE(rxq, rxtusum);	/* TCP/UDP cksums checked in-bound */
380 #endif
381 };
382 
383 struct wm_queue {
384 	int wmq_id;			/* index of transmit and receive queues */
385 	int wmq_intr_idx;		/* index of MSI-X tables */
386 
387 	struct wm_txqueue wmq_txq;
388 	struct wm_rxqueue wmq_rxq;
389 };
390 
391 /*
392  * Software state per device.
393  */
394 struct wm_softc {
395 	device_t sc_dev;		/* generic device information */
396 	bus_space_tag_t sc_st;		/* bus space tag */
397 	bus_space_handle_t sc_sh;	/* bus space handle */
398 	bus_size_t sc_ss;		/* bus space size */
399 	bus_space_tag_t sc_iot;		/* I/O space tag */
400 	bus_space_handle_t sc_ioh;	/* I/O space handle */
401 	bus_size_t sc_ios;		/* I/O space size */
402 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
403 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
404 	bus_size_t sc_flashs;		/* flash registers space size */
405 	off_t sc_flashreg_offset;	/*
406 					 * offset to flash registers from
407 					 * start of BAR
408 					 */
409 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
410 
411 	struct ethercom sc_ethercom;	/* ethernet common data */
412 	struct mii_data sc_mii;		/* MII/media information */
413 
414 	pci_chipset_tag_t sc_pc;
415 	pcitag_t sc_pcitag;
416 	int sc_bus_speed;		/* PCI/PCIX bus speed */
417 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
418 
419 	uint16_t sc_pcidevid;		/* PCI device ID */
420 	wm_chip_type sc_type;		/* MAC type */
421 	int sc_rev;			/* MAC revision */
422 	wm_phy_type sc_phytype;		/* PHY type */
423 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
424 #define	WM_MEDIATYPE_UNKNOWN		0x00
425 #define	WM_MEDIATYPE_FIBER		0x01
426 #define	WM_MEDIATYPE_COPPER		0x02
427 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
428 	int sc_funcid;			/* unit number of the chip (0 to 3) */
429 	int sc_flags;			/* flags; see below */
430 	int sc_if_flags;		/* last if_flags */
431 	int sc_flowflags;		/* 802.3x flow control flags */
432 	int sc_align_tweak;
433 
434 	void *sc_ihs[WM_MAX_NINTR];	/*
435 					 * interrupt cookie.
436 					 * legacy and msi use sc_ihs[0].
437 					 */
438 	pci_intr_handle_t *sc_intrs;	/* legacy and msi use sc_intrs[0] */
439 	int sc_nintrs;			/* number of interrupts */
440 
441 	int sc_link_intr_idx;		/* index of MSI-X tables */
442 
443 	callout_t sc_tick_ch;		/* tick callout */
444 	bool sc_stopping;
445 
446 	int sc_nvm_ver_major;
447 	int sc_nvm_ver_minor;
448 	int sc_nvm_ver_build;
449 	int sc_nvm_addrbits;		/* NVM address bits */
450 	unsigned int sc_nvm_wordsize;	/* NVM word size */
451 	int sc_ich8_flash_base;
452 	int sc_ich8_flash_bank_size;
453 	int sc_nvm_k1_enabled;
454 
455 	int sc_nqueues;
456 	struct wm_queue *sc_queue;
457 
458 	int sc_affinity_offset;
459 
460 #ifdef WM_EVENT_COUNTERS
461 	/* Event counters. */
462 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
463 
464         /* WM_T_82542_2_1 only */
465 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
466 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
467 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
468 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
469 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
470 #endif /* WM_EVENT_COUNTERS */
471 
472 	/* This variable are used only on the 82547. */
473 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
474 
475 	uint32_t sc_ctrl;		/* prototype CTRL register */
476 #if 0
477 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
478 #endif
479 	uint32_t sc_icr;		/* prototype interrupt bits */
480 	uint32_t sc_itr;		/* prototype intr throttling reg */
481 	uint32_t sc_tctl;		/* prototype TCTL register */
482 	uint32_t sc_rctl;		/* prototype RCTL register */
483 	uint32_t sc_txcw;		/* prototype TXCW register */
484 	uint32_t sc_tipg;		/* prototype TIPG register */
485 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
486 	uint32_t sc_pba;		/* prototype PBA register */
487 
488 	int sc_tbi_linkup;		/* TBI link status */
489 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
490 	int sc_tbi_serdes_ticks;	/* tbi ticks */
491 
492 	int sc_mchash_type;		/* multicast filter offset */
493 
494 	krndsource_t rnd_source;	/* random source */
495 
496 	kmutex_t *sc_core_lock;		/* lock for softc operations */
497 
498 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
499 };
500 
501 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
502 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
503 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
504 
505 #ifdef WM_MPSAFE
506 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
507 #else
508 #define CALLOUT_FLAGS	0
509 #endif
510 
511 #define	WM_RXCHAIN_RESET(rxq)						\
512 do {									\
513 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
514 	*(rxq)->rxq_tailp = NULL;					\
515 	(rxq)->rxq_len = 0;						\
516 } while (/*CONSTCOND*/0)
517 
518 #define	WM_RXCHAIN_LINK(rxq, m)						\
519 do {									\
520 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
521 	(rxq)->rxq_tailp = &(m)->m_next;				\
522 } while (/*CONSTCOND*/0)
523 
524 #ifdef WM_EVENT_COUNTERS
525 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
526 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
527 
528 #define WM_Q_EVCNT_INCR(qname, evname)			\
529 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
530 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
531 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
532 #else /* !WM_EVENT_COUNTERS */
533 #define	WM_EVCNT_INCR(ev)	/* nothing */
534 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
535 
536 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
537 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
538 #endif /* !WM_EVENT_COUNTERS */
539 
540 #define	CSR_READ(sc, reg)						\
541 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
542 #define	CSR_WRITE(sc, reg, val)						\
543 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
544 #define	CSR_WRITE_FLUSH(sc)						\
545 	(void) CSR_READ((sc), WMREG_STATUS)
546 
547 #define ICH8_FLASH_READ32(sc, reg)					\
548 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
549 	    (reg) + sc->sc_flashreg_offset)
550 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
551 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
552 	    (reg) + sc->sc_flashreg_offset, (data))
553 
554 #define ICH8_FLASH_READ16(sc, reg)					\
555 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
556 	    (reg) + sc->sc_flashreg_offset)
557 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
558 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
559 	    (reg) + sc->sc_flashreg_offset, (data))
560 
561 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
562 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((x)))
563 
564 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
565 #define	WM_CDTXADDR_HI(txq, x)						\
566 	(sizeof(bus_addr_t) == 8 ?					\
567 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
568 
569 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
570 #define	WM_CDRXADDR_HI(rxq, x)						\
571 	(sizeof(bus_addr_t) == 8 ?					\
572 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
573 
574 /*
575  * Register read/write functions.
576  * Other than CSR_{READ|WRITE}().
577  */
578 #if 0
579 static inline uint32_t wm_io_read(struct wm_softc *, int);
580 #endif
581 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
582 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
583 	uint32_t, uint32_t);
584 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
585 
586 /*
587  * Descriptor sync/init functions.
588  */
589 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
590 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
591 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
592 
593 /*
594  * Device driver interface functions and commonly used functions.
595  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
596  */
597 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
598 static int	wm_match(device_t, cfdata_t, void *);
599 static void	wm_attach(device_t, device_t, void *);
600 static int	wm_detach(device_t, int);
601 static bool	wm_suspend(device_t, const pmf_qual_t *);
602 static bool	wm_resume(device_t, const pmf_qual_t *);
603 static void	wm_watchdog(struct ifnet *);
604 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *);
605 static void	wm_tick(void *);
606 static int	wm_ifflags_cb(struct ethercom *);
607 static int	wm_ioctl(struct ifnet *, u_long, void *);
608 /* MAC address related */
609 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
610 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
611 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
612 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
613 static void	wm_set_filter(struct wm_softc *);
614 /* Reset and init related */
615 static void	wm_set_vlan(struct wm_softc *);
616 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
617 static void	wm_get_auto_rd_done(struct wm_softc *);
618 static void	wm_lan_init_done(struct wm_softc *);
619 static void	wm_get_cfg_done(struct wm_softc *);
620 static void	wm_initialize_hardware_bits(struct wm_softc *);
621 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
622 static void	wm_reset(struct wm_softc *);
623 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
624 static void	wm_rxdrain(struct wm_rxqueue *);
625 static void	wm_rss_getkey(uint8_t *);
626 static void	wm_init_rss(struct wm_softc *);
627 static void	wm_adjust_qnum(struct wm_softc *, int);
628 static int	wm_setup_legacy(struct wm_softc *);
629 static int	wm_setup_msix(struct wm_softc *);
630 static int	wm_init(struct ifnet *);
631 static int	wm_init_locked(struct ifnet *);
632 static void	wm_stop(struct ifnet *, int);
633 static void	wm_stop_locked(struct ifnet *, int);
634 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
635 static void	wm_82547_txfifo_stall(void *);
636 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
637 /* DMA related */
638 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
639 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
640 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
641 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
642     struct wm_txqueue *);
643 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
644 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
645 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
646     struct wm_rxqueue *);
647 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
648 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
649 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
650 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
651 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
652 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
653 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
654     struct wm_txqueue *);
655 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
656     struct wm_rxqueue *);
657 static int	wm_alloc_txrx_queues(struct wm_softc *);
658 static void	wm_free_txrx_queues(struct wm_softc *);
659 static int	wm_init_txrx_queues(struct wm_softc *);
660 /* Start */
661 static int	wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
662     uint32_t *, uint8_t *);
663 static void	wm_start(struct ifnet *);
664 static void	wm_start_locked(struct ifnet *);
665 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
666     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
667 static void	wm_nq_start(struct ifnet *);
668 static void	wm_nq_start_locked(struct ifnet *);
669 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
670 static inline int	wm_nq_select_txqueue(struct ifnet *, struct mbuf *);
671 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
672 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
673 /* Interrupt */
674 static int	wm_txeof(struct wm_softc *, struct wm_txqueue *);
675 static void	wm_rxeof(struct wm_rxqueue *);
676 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
677 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
678 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
679 static void	wm_linkintr(struct wm_softc *, uint32_t);
680 static int	wm_intr_legacy(void *);
681 static int	wm_txrxintr_msix(void *);
682 static int	wm_linkintr_msix(void *);
683 
684 /*
685  * Media related.
686  * GMII, SGMII, TBI, SERDES and SFP.
687  */
688 /* Common */
689 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
690 /* GMII related */
691 static void	wm_gmii_reset(struct wm_softc *);
692 static int	wm_get_phy_id_82575(struct wm_softc *);
693 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
694 static int	wm_gmii_mediachange(struct ifnet *);
695 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
696 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
697 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
698 static int	wm_gmii_i82543_readreg(device_t, int, int);
699 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
700 static int	wm_gmii_i82544_readreg(device_t, int, int);
701 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
702 static int	wm_gmii_i80003_readreg(device_t, int, int);
703 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
704 static int	wm_gmii_bm_readreg(device_t, int, int);
705 static void	wm_gmii_bm_writereg(device_t, int, int, int);
706 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
707 static int	wm_gmii_hv_readreg(device_t, int, int);
708 static void	wm_gmii_hv_writereg(device_t, int, int, int);
709 static int	wm_gmii_82580_readreg(device_t, int, int);
710 static void	wm_gmii_82580_writereg(device_t, int, int, int);
711 static int	wm_gmii_gs40g_readreg(device_t, int, int);
712 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
713 static void	wm_gmii_statchg(struct ifnet *);
714 static int	wm_kmrn_readreg(struct wm_softc *, int);
715 static void	wm_kmrn_writereg(struct wm_softc *, int, int);
716 /* SGMII */
717 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
718 static int	wm_sgmii_readreg(device_t, int, int);
719 static void	wm_sgmii_writereg(device_t, int, int, int);
720 /* TBI related */
721 static void	wm_tbi_mediainit(struct wm_softc *);
722 static int	wm_tbi_mediachange(struct ifnet *);
723 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
724 static int	wm_check_for_link(struct wm_softc *);
725 static void	wm_tbi_tick(struct wm_softc *);
726 /* SERDES related */
727 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
728 static int	wm_serdes_mediachange(struct ifnet *);
729 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
730 static void	wm_serdes_tick(struct wm_softc *);
731 /* SFP related */
732 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
733 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
734 
735 /*
736  * NVM related.
737  * Microwire, SPI (w/wo EERD) and Flash.
738  */
739 /* Misc functions */
740 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
741 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
742 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
743 /* Microwire */
744 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
745 /* SPI */
746 static int	wm_nvm_ready_spi(struct wm_softc *);
747 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
748 /* Using with EERD */
749 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
750 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
751 /* Flash */
752 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
753     unsigned int *);
754 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
755 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
756 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
757 	uint32_t *);
758 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
759 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
760 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
761 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
762 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
763 /* iNVM */
764 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
765 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
766 /* Lock, detecting NVM type, validate checksum and read */
767 static int	wm_nvm_acquire(struct wm_softc *);
768 static void	wm_nvm_release(struct wm_softc *);
769 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
770 static int	wm_nvm_get_flash_presence_i210(struct wm_softc *);
771 static int	wm_nvm_validate_checksum(struct wm_softc *);
772 static void	wm_nvm_version_invm(struct wm_softc *);
773 static void	wm_nvm_version(struct wm_softc *);
774 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
775 
776 /*
777  * Hardware semaphores.
778  * Very complexed...
779  */
780 static int	wm_get_swsm_semaphore(struct wm_softc *);
781 static void	wm_put_swsm_semaphore(struct wm_softc *);
782 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
783 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
784 static int	wm_get_swfwhw_semaphore(struct wm_softc *);
785 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
786 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
787 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
788 
789 /*
790  * Management mode and power management related subroutines.
791  * BMC, AMT, suspend/resume and EEE.
792  */
793 #ifdef WM_WOL
794 static int	wm_check_mng_mode(struct wm_softc *);
795 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
796 static int	wm_check_mng_mode_82574(struct wm_softc *);
797 static int	wm_check_mng_mode_generic(struct wm_softc *);
798 #endif
799 static int	wm_enable_mng_pass_thru(struct wm_softc *);
800 static bool	wm_phy_resetisblocked(struct wm_softc *);
801 static void	wm_get_hw_control(struct wm_softc *);
802 static void	wm_release_hw_control(struct wm_softc *);
803 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
804 static void	wm_smbustopci(struct wm_softc *);
805 static void	wm_init_manageability(struct wm_softc *);
806 static void	wm_release_manageability(struct wm_softc *);
807 static void	wm_get_wakeup(struct wm_softc *);
808 #ifdef WM_WOL
809 static void	wm_enable_phy_wakeup(struct wm_softc *);
810 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
811 static void	wm_enable_wakeup(struct wm_softc *);
812 #endif
813 /* LPLU (Low Power Link Up) */
814 static void	wm_lplu_d0_disable(struct wm_softc *);
815 static void	wm_lplu_d0_disable_pch(struct wm_softc *);
816 /* EEE */
817 static void	wm_set_eee_i350(struct wm_softc *);
818 
819 /*
820  * Workarounds (mainly PHY related).
821  * Basically, PHY's workarounds are in the PHY drivers.
822  */
823 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
824 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
825 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
826 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
827 static void	wm_k1_gig_workaround_hv(struct wm_softc *, int);
828 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
829 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
830 static void	wm_reset_init_script_82575(struct wm_softc *);
831 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
832 static void	wm_pll_workaround_i210(struct wm_softc *);
833 
834 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
835     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
836 
837 /*
838  * Devices supported by this driver.
839  */
840 static const struct wm_product {
841 	pci_vendor_id_t		wmp_vendor;
842 	pci_product_id_t	wmp_product;
843 	const char		*wmp_name;
844 	wm_chip_type		wmp_type;
845 	uint32_t		wmp_flags;
846 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
847 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
848 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
849 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
850 #define WMP_MEDIATYPE(x)	((x) & 0x03)
851 } wm_products[] = {
852 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
853 	  "Intel i82542 1000BASE-X Ethernet",
854 	  WM_T_82542_2_1,	WMP_F_FIBER },
855 
856 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
857 	  "Intel i82543GC 1000BASE-X Ethernet",
858 	  WM_T_82543,		WMP_F_FIBER },
859 
860 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
861 	  "Intel i82543GC 1000BASE-T Ethernet",
862 	  WM_T_82543,		WMP_F_COPPER },
863 
864 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
865 	  "Intel i82544EI 1000BASE-T Ethernet",
866 	  WM_T_82544,		WMP_F_COPPER },
867 
868 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
869 	  "Intel i82544EI 1000BASE-X Ethernet",
870 	  WM_T_82544,		WMP_F_FIBER },
871 
872 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
873 	  "Intel i82544GC 1000BASE-T Ethernet",
874 	  WM_T_82544,		WMP_F_COPPER },
875 
876 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
877 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
878 	  WM_T_82544,		WMP_F_COPPER },
879 
880 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
881 	  "Intel i82540EM 1000BASE-T Ethernet",
882 	  WM_T_82540,		WMP_F_COPPER },
883 
884 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
885 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
886 	  WM_T_82540,		WMP_F_COPPER },
887 
888 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
889 	  "Intel i82540EP 1000BASE-T Ethernet",
890 	  WM_T_82540,		WMP_F_COPPER },
891 
892 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
893 	  "Intel i82540EP 1000BASE-T Ethernet",
894 	  WM_T_82540,		WMP_F_COPPER },
895 
896 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
897 	  "Intel i82540EP 1000BASE-T Ethernet",
898 	  WM_T_82540,		WMP_F_COPPER },
899 
900 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
901 	  "Intel i82545EM 1000BASE-T Ethernet",
902 	  WM_T_82545,		WMP_F_COPPER },
903 
904 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
905 	  "Intel i82545GM 1000BASE-T Ethernet",
906 	  WM_T_82545_3,		WMP_F_COPPER },
907 
908 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
909 	  "Intel i82545GM 1000BASE-X Ethernet",
910 	  WM_T_82545_3,		WMP_F_FIBER },
911 
912 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
913 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
914 	  WM_T_82545_3,		WMP_F_SERDES },
915 
916 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
917 	  "Intel i82546EB 1000BASE-T Ethernet",
918 	  WM_T_82546,		WMP_F_COPPER },
919 
920 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
921 	  "Intel i82546EB 1000BASE-T Ethernet",
922 	  WM_T_82546,		WMP_F_COPPER },
923 
924 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
925 	  "Intel i82545EM 1000BASE-X Ethernet",
926 	  WM_T_82545,		WMP_F_FIBER },
927 
928 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
929 	  "Intel i82546EB 1000BASE-X Ethernet",
930 	  WM_T_82546,		WMP_F_FIBER },
931 
932 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
933 	  "Intel i82546GB 1000BASE-T Ethernet",
934 	  WM_T_82546_3,		WMP_F_COPPER },
935 
936 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
937 	  "Intel i82546GB 1000BASE-X Ethernet",
938 	  WM_T_82546_3,		WMP_F_FIBER },
939 
940 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
941 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
942 	  WM_T_82546_3,		WMP_F_SERDES },
943 
944 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
945 	  "i82546GB quad-port Gigabit Ethernet",
946 	  WM_T_82546_3,		WMP_F_COPPER },
947 
948 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
949 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
950 	  WM_T_82546_3,		WMP_F_COPPER },
951 
952 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
953 	  "Intel PRO/1000MT (82546GB)",
954 	  WM_T_82546_3,		WMP_F_COPPER },
955 
956 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
957 	  "Intel i82541EI 1000BASE-T Ethernet",
958 	  WM_T_82541,		WMP_F_COPPER },
959 
960 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
961 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
962 	  WM_T_82541,		WMP_F_COPPER },
963 
964 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
965 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
966 	  WM_T_82541,		WMP_F_COPPER },
967 
968 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
969 	  "Intel i82541ER 1000BASE-T Ethernet",
970 	  WM_T_82541_2,		WMP_F_COPPER },
971 
972 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
973 	  "Intel i82541GI 1000BASE-T Ethernet",
974 	  WM_T_82541_2,		WMP_F_COPPER },
975 
976 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
977 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
978 	  WM_T_82541_2,		WMP_F_COPPER },
979 
980 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
981 	  "Intel i82541PI 1000BASE-T Ethernet",
982 	  WM_T_82541_2,		WMP_F_COPPER },
983 
984 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
985 	  "Intel i82547EI 1000BASE-T Ethernet",
986 	  WM_T_82547,		WMP_F_COPPER },
987 
988 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
989 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
990 	  WM_T_82547,		WMP_F_COPPER },
991 
992 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
993 	  "Intel i82547GI 1000BASE-T Ethernet",
994 	  WM_T_82547_2,		WMP_F_COPPER },
995 
996 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
997 	  "Intel PRO/1000 PT (82571EB)",
998 	  WM_T_82571,		WMP_F_COPPER },
999 
1000 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
1001 	  "Intel PRO/1000 PF (82571EB)",
1002 	  WM_T_82571,		WMP_F_FIBER },
1003 
1004 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
1005 	  "Intel PRO/1000 PB (82571EB)",
1006 	  WM_T_82571,		WMP_F_SERDES },
1007 
1008 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
1009 	  "Intel PRO/1000 QT (82571EB)",
1010 	  WM_T_82571,		WMP_F_COPPER },
1011 
1012 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
1013 	  "Intel PRO/1000 PT Quad Port Server Adapter",
1014 	  WM_T_82571,		WMP_F_COPPER, },
1015 
1016 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
1017 	  "Intel Gigabit PT Quad Port Server ExpressModule",
1018 	  WM_T_82571,		WMP_F_COPPER, },
1019 
1020 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
1021 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
1022 	  WM_T_82571,		WMP_F_SERDES, },
1023 
1024 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
1025 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
1026 	  WM_T_82571,		WMP_F_SERDES, },
1027 
1028 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
1029 	  "Intel 82571EB Quad 1000baseX Ethernet",
1030 	  WM_T_82571,		WMP_F_FIBER, },
1031 
1032 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
1033 	  "Intel i82572EI 1000baseT Ethernet",
1034 	  WM_T_82572,		WMP_F_COPPER },
1035 
1036 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
1037 	  "Intel i82572EI 1000baseX Ethernet",
1038 	  WM_T_82572,		WMP_F_FIBER },
1039 
1040 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
1041 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
1042 	  WM_T_82572,		WMP_F_SERDES },
1043 
1044 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
1045 	  "Intel i82572EI 1000baseT Ethernet",
1046 	  WM_T_82572,		WMP_F_COPPER },
1047 
1048 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
1049 	  "Intel i82573E",
1050 	  WM_T_82573,		WMP_F_COPPER },
1051 
1052 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
1053 	  "Intel i82573E IAMT",
1054 	  WM_T_82573,		WMP_F_COPPER },
1055 
1056 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
1057 	  "Intel i82573L Gigabit Ethernet",
1058 	  WM_T_82573,		WMP_F_COPPER },
1059 
1060 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
1061 	  "Intel i82574L",
1062 	  WM_T_82574,		WMP_F_COPPER },
1063 
1064 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
1065 	  "Intel i82574L",
1066 	  WM_T_82574,		WMP_F_COPPER },
1067 
1068 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
1069 	  "Intel i82583V",
1070 	  WM_T_82583,		WMP_F_COPPER },
1071 
1072 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
1073 	  "i80003 dual 1000baseT Ethernet",
1074 	  WM_T_80003,		WMP_F_COPPER },
1075 
1076 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
1077 	  "i80003 dual 1000baseX Ethernet",
1078 	  WM_T_80003,		WMP_F_COPPER },
1079 
1080 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
1081 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
1082 	  WM_T_80003,		WMP_F_SERDES },
1083 
1084 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
1085 	  "Intel i80003 1000baseT Ethernet",
1086 	  WM_T_80003,		WMP_F_COPPER },
1087 
1088 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
1089 	  "Intel i80003 Gigabit Ethernet (SERDES)",
1090 	  WM_T_80003,		WMP_F_SERDES },
1091 
1092 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
1093 	  "Intel i82801H (M_AMT) LAN Controller",
1094 	  WM_T_ICH8,		WMP_F_COPPER },
1095 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
1096 	  "Intel i82801H (AMT) LAN Controller",
1097 	  WM_T_ICH8,		WMP_F_COPPER },
1098 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
1099 	  "Intel i82801H LAN Controller",
1100 	  WM_T_ICH8,		WMP_F_COPPER },
1101 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
1102 	  "Intel i82801H (IFE) LAN Controller",
1103 	  WM_T_ICH8,		WMP_F_COPPER },
1104 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
1105 	  "Intel i82801H (M) LAN Controller",
1106 	  WM_T_ICH8,		WMP_F_COPPER },
1107 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
1108 	  "Intel i82801H IFE (GT) LAN Controller",
1109 	  WM_T_ICH8,		WMP_F_COPPER },
1110 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
1111 	  "Intel i82801H IFE (G) LAN Controller",
1112 	  WM_T_ICH8,		WMP_F_COPPER },
1113 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
1114 	  "82801I (AMT) LAN Controller",
1115 	  WM_T_ICH9,		WMP_F_COPPER },
1116 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
1117 	  "82801I LAN Controller",
1118 	  WM_T_ICH9,		WMP_F_COPPER },
1119 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
1120 	  "82801I (G) LAN Controller",
1121 	  WM_T_ICH9,		WMP_F_COPPER },
1122 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
1123 	  "82801I (GT) LAN Controller",
1124 	  WM_T_ICH9,		WMP_F_COPPER },
1125 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
1126 	  "82801I (C) LAN Controller",
1127 	  WM_T_ICH9,		WMP_F_COPPER },
1128 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
1129 	  "82801I mobile LAN Controller",
1130 	  WM_T_ICH9,		WMP_F_COPPER },
1131 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IGP_M_V,
1132 	  "82801I mobile (V) LAN Controller",
1133 	  WM_T_ICH9,		WMP_F_COPPER },
1134 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
1135 	  "82801I mobile (AMT) LAN Controller",
1136 	  WM_T_ICH9,		WMP_F_COPPER },
1137 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
1138 	  "82567LM-4 LAN Controller",
1139 	  WM_T_ICH9,		WMP_F_COPPER },
1140 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_82567V_3,
1141 	  "82567V-3 LAN Controller",
1142 	  WM_T_ICH9,		WMP_F_COPPER },
1143 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
1144 	  "82567LM-2 LAN Controller",
1145 	  WM_T_ICH10,		WMP_F_COPPER },
1146 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
1147 	  "82567LF-2 LAN Controller",
1148 	  WM_T_ICH10,		WMP_F_COPPER },
1149 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
1150 	  "82567LM-3 LAN Controller",
1151 	  WM_T_ICH10,		WMP_F_COPPER },
1152 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
1153 	  "82567LF-3 LAN Controller",
1154 	  WM_T_ICH10,		WMP_F_COPPER },
1155 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
1156 	  "82567V-2 LAN Controller",
1157 	  WM_T_ICH10,		WMP_F_COPPER },
1158 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
1159 	  "82567V-3? LAN Controller",
1160 	  WM_T_ICH10,		WMP_F_COPPER },
1161 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
1162 	  "HANKSVILLE LAN Controller",
1163 	  WM_T_ICH10,		WMP_F_COPPER },
1164 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
1165 	  "PCH LAN (82577LM) Controller",
1166 	  WM_T_PCH,		WMP_F_COPPER },
1167 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
1168 	  "PCH LAN (82577LC) Controller",
1169 	  WM_T_PCH,		WMP_F_COPPER },
1170 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
1171 	  "PCH LAN (82578DM) Controller",
1172 	  WM_T_PCH,		WMP_F_COPPER },
1173 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
1174 	  "PCH LAN (82578DC) Controller",
1175 	  WM_T_PCH,		WMP_F_COPPER },
1176 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
1177 	  "PCH2 LAN (82579LM) Controller",
1178 	  WM_T_PCH2,		WMP_F_COPPER },
1179 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
1180 	  "PCH2 LAN (82579V) Controller",
1181 	  WM_T_PCH2,		WMP_F_COPPER },
1182 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
1183 	  "82575EB dual-1000baseT Ethernet",
1184 	  WM_T_82575,		WMP_F_COPPER },
1185 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
1186 	  "82575EB dual-1000baseX Ethernet (SERDES)",
1187 	  WM_T_82575,		WMP_F_SERDES },
1188 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
1189 	  "82575GB quad-1000baseT Ethernet",
1190 	  WM_T_82575,		WMP_F_COPPER },
1191 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
1192 	  "82575GB quad-1000baseT Ethernet (PM)",
1193 	  WM_T_82575,		WMP_F_COPPER },
1194 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
1195 	  "82576 1000BaseT Ethernet",
1196 	  WM_T_82576,		WMP_F_COPPER },
1197 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
1198 	  "82576 1000BaseX Ethernet",
1199 	  WM_T_82576,		WMP_F_FIBER },
1200 
1201 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
1202 	  "82576 gigabit Ethernet (SERDES)",
1203 	  WM_T_82576,		WMP_F_SERDES },
1204 
1205 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
1206 	  "82576 quad-1000BaseT Ethernet",
1207 	  WM_T_82576,		WMP_F_COPPER },
1208 
1209 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
1210 	  "82576 Gigabit ET2 Quad Port Server Adapter",
1211 	  WM_T_82576,		WMP_F_COPPER },
1212 
1213 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
1214 	  "82576 gigabit Ethernet",
1215 	  WM_T_82576,		WMP_F_COPPER },
1216 
1217 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
1218 	  "82576 gigabit Ethernet (SERDES)",
1219 	  WM_T_82576,		WMP_F_SERDES },
1220 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
1221 	  "82576 quad-gigabit Ethernet (SERDES)",
1222 	  WM_T_82576,		WMP_F_SERDES },
1223 
1224 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
1225 	  "82580 1000BaseT Ethernet",
1226 	  WM_T_82580,		WMP_F_COPPER },
1227 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
1228 	  "82580 1000BaseX Ethernet",
1229 	  WM_T_82580,		WMP_F_FIBER },
1230 
1231 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
1232 	  "82580 1000BaseT Ethernet (SERDES)",
1233 	  WM_T_82580,		WMP_F_SERDES },
1234 
1235 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
1236 	  "82580 gigabit Ethernet (SGMII)",
1237 	  WM_T_82580,		WMP_F_COPPER },
1238 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
1239 	  "82580 dual-1000BaseT Ethernet",
1240 	  WM_T_82580,		WMP_F_COPPER },
1241 
1242 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1243 	  "82580 quad-1000BaseX Ethernet",
1244 	  WM_T_82580,		WMP_F_FIBER },
1245 
1246 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
1247 	  "DH89XXCC Gigabit Ethernet (SGMII)",
1248 	  WM_T_82580,		WMP_F_COPPER },
1249 
1250 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
1251 	  "DH89XXCC Gigabit Ethernet (SERDES)",
1252 	  WM_T_82580,		WMP_F_SERDES },
1253 
1254 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
1255 	  "DH89XXCC 1000BASE-KX Ethernet",
1256 	  WM_T_82580,		WMP_F_SERDES },
1257 
1258 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
1259 	  "DH89XXCC Gigabit Ethernet (SFP)",
1260 	  WM_T_82580,		WMP_F_SERDES },
1261 
1262 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
1263 	  "I350 Gigabit Network Connection",
1264 	  WM_T_I350,		WMP_F_COPPER },
1265 
1266 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
1267 	  "I350 Gigabit Fiber Network Connection",
1268 	  WM_T_I350,		WMP_F_FIBER },
1269 
1270 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
1271 	  "I350 Gigabit Backplane Connection",
1272 	  WM_T_I350,		WMP_F_SERDES },
1273 
1274 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
1275 	  "I350 Quad Port Gigabit Ethernet",
1276 	  WM_T_I350,		WMP_F_SERDES },
1277 
1278 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
1279 	  "I350 Gigabit Connection",
1280 	  WM_T_I350,		WMP_F_COPPER },
1281 
1282 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
1283 	  "I354 Gigabit Ethernet (KX)",
1284 	  WM_T_I354,		WMP_F_SERDES },
1285 
1286 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
1287 	  "I354 Gigabit Ethernet (SGMII)",
1288 	  WM_T_I354,		WMP_F_COPPER },
1289 
1290 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
1291 	  "I354 Gigabit Ethernet (2.5G)",
1292 	  WM_T_I354,		WMP_F_COPPER },
1293 
1294 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
1295 	  "I210-T1 Ethernet Server Adapter",
1296 	  WM_T_I210,		WMP_F_COPPER },
1297 
1298 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
1299 	  "I210 Ethernet (Copper OEM)",
1300 	  WM_T_I210,		WMP_F_COPPER },
1301 
1302 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
1303 	  "I210 Ethernet (Copper IT)",
1304 	  WM_T_I210,		WMP_F_COPPER },
1305 
1306 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
1307 	  "I210 Ethernet (FLASH less)",
1308 	  WM_T_I210,		WMP_F_COPPER },
1309 
1310 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
1311 	  "I210 Gigabit Ethernet (Fiber)",
1312 	  WM_T_I210,		WMP_F_FIBER },
1313 
1314 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
1315 	  "I210 Gigabit Ethernet (SERDES)",
1316 	  WM_T_I210,		WMP_F_SERDES },
1317 
1318 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
1319 	  "I210 Gigabit Ethernet (FLASH less)",
1320 	  WM_T_I210,		WMP_F_SERDES },
1321 
1322 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
1323 	  "I210 Gigabit Ethernet (SGMII)",
1324 	  WM_T_I210,		WMP_F_COPPER },
1325 
1326 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
1327 	  "I211 Ethernet (COPPER)",
1328 	  WM_T_I211,		WMP_F_COPPER },
1329 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
1330 	  "I217 V Ethernet Connection",
1331 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1332 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
1333 	  "I217 LM Ethernet Connection",
1334 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1335 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
1336 	  "I218 V Ethernet Connection",
1337 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1338 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
1339 	  "I218 V Ethernet Connection",
1340 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1341 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
1342 	  "I218 V Ethernet Connection",
1343 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1344 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
1345 	  "I218 LM Ethernet Connection",
1346 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1347 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
1348 	  "I218 LM Ethernet Connection",
1349 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1350 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
1351 	  "I218 LM Ethernet Connection",
1352 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1353 #if 0
1354 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
1355 	  "I219 V Ethernet Connection",
1356 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1357 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
1358 	  "I219 V Ethernet Connection",
1359 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1360 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
1361 	  "I219 LM Ethernet Connection",
1362 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1363 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
1364 	  "I219 LM Ethernet Connection",
1365 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1366 #endif
1367 	{ 0,			0,
1368 	  NULL,
1369 	  0,			0 },
1370 };
1371 
1372 /*
1373  * Register read/write functions.
1374  * Other than CSR_{READ|WRITE}().
1375  */
1376 
1377 #if 0 /* Not currently used */
1378 static inline uint32_t
1379 wm_io_read(struct wm_softc *sc, int reg)
1380 {
1381 
1382 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1383 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1384 }
1385 #endif
1386 
1387 static inline void
1388 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1389 {
1390 
1391 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1392 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1393 }
1394 
1395 static inline void
1396 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1397     uint32_t data)
1398 {
1399 	uint32_t regval;
1400 	int i;
1401 
1402 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1403 
1404 	CSR_WRITE(sc, reg, regval);
1405 
1406 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1407 		delay(5);
1408 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1409 			break;
1410 	}
1411 	if (i == SCTL_CTL_POLL_TIMEOUT) {
1412 		aprint_error("%s: WARNING:"
1413 		    " i82575 reg 0x%08x setup did not indicate ready\n",
1414 		    device_xname(sc->sc_dev), reg);
1415 	}
1416 }
1417 
1418 static inline void
1419 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1420 {
1421 	wa->wa_low = htole32(v & 0xffffffffU);
1422 	if (sizeof(bus_addr_t) == 8)
1423 		wa->wa_high = htole32((uint64_t) v >> 32);
1424 	else
1425 		wa->wa_high = 0;
1426 }
1427 
1428 /*
1429  * Descriptor sync/init functions.
1430  */
1431 static inline void
1432 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
1433 {
1434 	struct wm_softc *sc = txq->txq_sc;
1435 
1436 	/* If it will wrap around, sync to the end of the ring. */
1437 	if ((start + num) > WM_NTXDESC(txq)) {
1438 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1439 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
1440 		    (WM_NTXDESC(txq) - start), ops);
1441 		num -= (WM_NTXDESC(txq) - start);
1442 		start = 0;
1443 	}
1444 
1445 	/* Now sync whatever is left. */
1446 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1447 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
1448 }
1449 
1450 static inline void
1451 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
1452 {
1453 	struct wm_softc *sc = rxq->rxq_sc;
1454 
1455 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
1456 	    WM_CDRXOFF(start), sizeof(wiseman_rxdesc_t), ops);
1457 }
1458 
1459 static inline void
1460 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
1461 {
1462 	struct wm_softc *sc = rxq->rxq_sc;
1463 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
1464 	wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
1465 	struct mbuf *m = rxs->rxs_mbuf;
1466 
1467 	/*
1468 	 * Note: We scoot the packet forward 2 bytes in the buffer
1469 	 * so that the payload after the Ethernet header is aligned
1470 	 * to a 4-byte boundary.
1471 
1472 	 * XXX BRAINDAMAGE ALERT!
1473 	 * The stupid chip uses the same size for every buffer, which
1474 	 * is set in the Receive Control register.  We are using the 2K
1475 	 * size option, but what we REALLY want is (2K - 2)!  For this
1476 	 * reason, we can't "scoot" packets longer than the standard
1477 	 * Ethernet MTU.  On strict-alignment platforms, if the total
1478 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
1479 	 * the upper layer copy the headers.
1480 	 */
1481 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
1482 
1483 	wm_set_dma_addr(&rxd->wrx_addr,
1484 	    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1485 	rxd->wrx_len = 0;
1486 	rxd->wrx_cksum = 0;
1487 	rxd->wrx_status = 0;
1488 	rxd->wrx_errors = 0;
1489 	rxd->wrx_special = 0;
1490 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1491 
1492 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
1493 }
1494 
1495 /*
1496  * Device driver interface functions and commonly used functions.
1497  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
1498  */
1499 
1500 /* Lookup supported device table */
1501 static const struct wm_product *
1502 wm_lookup(const struct pci_attach_args *pa)
1503 {
1504 	const struct wm_product *wmp;
1505 
1506 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1507 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1508 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1509 			return wmp;
1510 	}
1511 	return NULL;
1512 }
1513 
1514 /* The match function (ca_match) */
1515 static int
1516 wm_match(device_t parent, cfdata_t cf, void *aux)
1517 {
1518 	struct pci_attach_args *pa = aux;
1519 
1520 	if (wm_lookup(pa) != NULL)
1521 		return 1;
1522 
1523 	return 0;
1524 }
1525 
1526 /* The attach function (ca_attach) */
1527 static void
1528 wm_attach(device_t parent, device_t self, void *aux)
1529 {
1530 	struct wm_softc *sc = device_private(self);
1531 	struct pci_attach_args *pa = aux;
1532 	prop_dictionary_t dict;
1533 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1534 	pci_chipset_tag_t pc = pa->pa_pc;
1535 	int counts[PCI_INTR_TYPE_SIZE];
1536 	pci_intr_type_t max_type;
1537 	const char *eetype, *xname;
1538 	bus_space_tag_t memt;
1539 	bus_space_handle_t memh;
1540 	bus_size_t memsize;
1541 	int memh_valid;
1542 	int i, error;
1543 	const struct wm_product *wmp;
1544 	prop_data_t ea;
1545 	prop_number_t pn;
1546 	uint8_t enaddr[ETHER_ADDR_LEN];
1547 	uint16_t cfg1, cfg2, swdpin, nvmword;
1548 	pcireg_t preg, memtype;
1549 	uint16_t eeprom_data, apme_mask;
1550 	bool force_clear_smbi;
1551 	uint32_t link_mode;
1552 	uint32_t reg;
1553 
1554 	sc->sc_dev = self;
1555 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
1556 	sc->sc_stopping = false;
1557 
1558 	wmp = wm_lookup(pa);
1559 #ifdef DIAGNOSTIC
1560 	if (wmp == NULL) {
1561 		printf("\n");
1562 		panic("wm_attach: impossible");
1563 	}
1564 #endif
1565 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
1566 
1567 	sc->sc_pc = pa->pa_pc;
1568 	sc->sc_pcitag = pa->pa_tag;
1569 
1570 	if (pci_dma64_available(pa))
1571 		sc->sc_dmat = pa->pa_dmat64;
1572 	else
1573 		sc->sc_dmat = pa->pa_dmat;
1574 
1575 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
1576 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
1577 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
1578 
1579 	sc->sc_type = wmp->wmp_type;
1580 	if (sc->sc_type < WM_T_82543) {
1581 		if (sc->sc_rev < 2) {
1582 			aprint_error_dev(sc->sc_dev,
1583 			    "i82542 must be at least rev. 2\n");
1584 			return;
1585 		}
1586 		if (sc->sc_rev < 3)
1587 			sc->sc_type = WM_T_82542_2_0;
1588 	}
1589 
1590 	/*
1591 	 * Disable MSI for Errata:
1592 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
1593 	 *
1594 	 *  82544: Errata 25
1595 	 *  82540: Errata  6 (easy to reproduce device timeout)
1596 	 *  82545: Errata  4 (easy to reproduce device timeout)
1597 	 *  82546: Errata 26 (easy to reproduce device timeout)
1598 	 *  82541: Errata  7 (easy to reproduce device timeout)
1599 	 *
1600 	 * "Byte Enables 2 and 3 are not set on MSI writes"
1601 	 *
1602 	 *  82571 & 82572: Errata 63
1603 	 */
1604 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
1605 	    || (sc->sc_type == WM_T_82572))
1606 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
1607 
1608 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1609 	    || (sc->sc_type == WM_T_82580)
1610 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
1611 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
1612 		sc->sc_flags |= WM_F_NEWQUEUE;
1613 
1614 	/* Set device properties (mactype) */
1615 	dict = device_properties(sc->sc_dev);
1616 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1617 
1618 	/*
1619 	 * Map the device.  All devices support memory-mapped acccess,
1620 	 * and it is really required for normal operation.
1621 	 */
1622 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1623 	switch (memtype) {
1624 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1625 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1626 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1627 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1628 		break;
1629 	default:
1630 		memh_valid = 0;
1631 		break;
1632 	}
1633 
1634 	if (memh_valid) {
1635 		sc->sc_st = memt;
1636 		sc->sc_sh = memh;
1637 		sc->sc_ss = memsize;
1638 	} else {
1639 		aprint_error_dev(sc->sc_dev,
1640 		    "unable to map device registers\n");
1641 		return;
1642 	}
1643 
1644 	/*
1645 	 * In addition, i82544 and later support I/O mapped indirect
1646 	 * register access.  It is not desirable (nor supported in
1647 	 * this driver) to use it for normal operation, though it is
1648 	 * required to work around bugs in some chip versions.
1649 	 */
1650 	if (sc->sc_type >= WM_T_82544) {
1651 		/* First we have to find the I/O BAR. */
1652 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1653 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
1654 			if (memtype == PCI_MAPREG_TYPE_IO)
1655 				break;
1656 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
1657 			    PCI_MAPREG_MEM_TYPE_64BIT)
1658 				i += 4;	/* skip high bits, too */
1659 		}
1660 		if (i < PCI_MAPREG_END) {
1661 			/*
1662 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
1663 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
1664 			 * It's no problem because newer chips has no this
1665 			 * bug.
1666 			 *
1667 			 * The i8254x doesn't apparently respond when the
1668 			 * I/O BAR is 0, which looks somewhat like it's not
1669 			 * been configured.
1670 			 */
1671 			preg = pci_conf_read(pc, pa->pa_tag, i);
1672 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1673 				aprint_error_dev(sc->sc_dev,
1674 				    "WARNING: I/O BAR at zero.\n");
1675 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1676 					0, &sc->sc_iot, &sc->sc_ioh,
1677 					NULL, &sc->sc_ios) == 0) {
1678 				sc->sc_flags |= WM_F_IOH_VALID;
1679 			} else {
1680 				aprint_error_dev(sc->sc_dev,
1681 				    "WARNING: unable to map I/O space\n");
1682 			}
1683 		}
1684 
1685 	}
1686 
1687 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
1688 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1689 	preg |= PCI_COMMAND_MASTER_ENABLE;
1690 	if (sc->sc_type < WM_T_82542_2_1)
1691 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1692 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1693 
1694 	/* power up chip */
1695 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1696 	    NULL)) && error != EOPNOTSUPP) {
1697 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1698 		return;
1699 	}
1700 
1701 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
1702 
1703 	/* Allocation settings */
1704 	max_type = PCI_INTR_TYPE_MSIX;
1705 	counts[PCI_INTR_TYPE_MSIX] = sc->sc_nqueues + 1;
1706 	counts[PCI_INTR_TYPE_MSI] = 1;
1707 	counts[PCI_INTR_TYPE_INTX] = 1;
1708 
1709 alloc_retry:
1710 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
1711 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
1712 		return;
1713 	}
1714 
1715 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
1716 		error = wm_setup_msix(sc);
1717 		if (error) {
1718 			pci_intr_release(pc, sc->sc_intrs,
1719 			    counts[PCI_INTR_TYPE_MSIX]);
1720 
1721 			/* Setup for MSI: Disable MSI-X */
1722 			max_type = PCI_INTR_TYPE_MSI;
1723 			counts[PCI_INTR_TYPE_MSI] = 1;
1724 			counts[PCI_INTR_TYPE_INTX] = 1;
1725 			goto alloc_retry;
1726 		}
1727 	} else 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
1728 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
1729 		error = wm_setup_legacy(sc);
1730 		if (error) {
1731 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
1732 			    counts[PCI_INTR_TYPE_MSI]);
1733 
1734 			/* The next try is for INTx: Disable MSI */
1735 			max_type = PCI_INTR_TYPE_INTX;
1736 			counts[PCI_INTR_TYPE_INTX] = 1;
1737 			goto alloc_retry;
1738 		}
1739 	} else {
1740 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
1741 		error = wm_setup_legacy(sc);
1742 		if (error) {
1743 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
1744 			    counts[PCI_INTR_TYPE_INTX]);
1745 			return;
1746 		}
1747 	}
1748 
1749 	/*
1750 	 * Check the function ID (unit number of the chip).
1751 	 */
1752 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
1753 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
1754 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1755 	    || (sc->sc_type == WM_T_82580)
1756 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
1757 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
1758 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
1759 	else
1760 		sc->sc_funcid = 0;
1761 
1762 	/*
1763 	 * Determine a few things about the bus we're connected to.
1764 	 */
1765 	if (sc->sc_type < WM_T_82543) {
1766 		/* We don't really know the bus characteristics here. */
1767 		sc->sc_bus_speed = 33;
1768 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1769 		/*
1770 		 * CSA (Communication Streaming Architecture) is about as fast
1771 		 * a 32-bit 66MHz PCI Bus.
1772 		 */
1773 		sc->sc_flags |= WM_F_CSA;
1774 		sc->sc_bus_speed = 66;
1775 		aprint_verbose_dev(sc->sc_dev,
1776 		    "Communication Streaming Architecture\n");
1777 		if (sc->sc_type == WM_T_82547) {
1778 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
1779 			callout_setfunc(&sc->sc_txfifo_ch,
1780 					wm_82547_txfifo_stall, sc);
1781 			aprint_verbose_dev(sc->sc_dev,
1782 			    "using 82547 Tx FIFO stall work-around\n");
1783 		}
1784 	} else if (sc->sc_type >= WM_T_82571) {
1785 		sc->sc_flags |= WM_F_PCIE;
1786 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1787 		    && (sc->sc_type != WM_T_ICH10)
1788 		    && (sc->sc_type != WM_T_PCH)
1789 		    && (sc->sc_type != WM_T_PCH2)
1790 		    && (sc->sc_type != WM_T_PCH_LPT)
1791 		    && (sc->sc_type != WM_T_PCH_SPT)) {
1792 			/* ICH* and PCH* have no PCIe capability registers */
1793 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1794 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
1795 				NULL) == 0)
1796 				aprint_error_dev(sc->sc_dev,
1797 				    "unable to find PCIe capability\n");
1798 		}
1799 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1800 	} else {
1801 		reg = CSR_READ(sc, WMREG_STATUS);
1802 		if (reg & STATUS_BUS64)
1803 			sc->sc_flags |= WM_F_BUS64;
1804 		if ((reg & STATUS_PCIX_MODE) != 0) {
1805 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1806 
1807 			sc->sc_flags |= WM_F_PCIX;
1808 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1809 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
1810 				aprint_error_dev(sc->sc_dev,
1811 				    "unable to find PCIX capability\n");
1812 			else if (sc->sc_type != WM_T_82545_3 &&
1813 				 sc->sc_type != WM_T_82546_3) {
1814 				/*
1815 				 * Work around a problem caused by the BIOS
1816 				 * setting the max memory read byte count
1817 				 * incorrectly.
1818 				 */
1819 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1820 				    sc->sc_pcixe_capoff + PCIX_CMD);
1821 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1822 				    sc->sc_pcixe_capoff + PCIX_STATUS);
1823 
1824 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
1825 				    PCIX_CMD_BYTECNT_SHIFT;
1826 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
1827 				    PCIX_STATUS_MAXB_SHIFT;
1828 				if (bytecnt > maxb) {
1829 					aprint_verbose_dev(sc->sc_dev,
1830 					    "resetting PCI-X MMRBC: %d -> %d\n",
1831 					    512 << bytecnt, 512 << maxb);
1832 					pcix_cmd = (pcix_cmd &
1833 					    ~PCIX_CMD_BYTECNT_MASK) |
1834 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
1835 					pci_conf_write(pa->pa_pc, pa->pa_tag,
1836 					    sc->sc_pcixe_capoff + PCIX_CMD,
1837 					    pcix_cmd);
1838 				}
1839 			}
1840 		}
1841 		/*
1842 		 * The quad port adapter is special; it has a PCIX-PCIX
1843 		 * bridge on the board, and can run the secondary bus at
1844 		 * a higher speed.
1845 		 */
1846 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1847 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1848 								      : 66;
1849 		} else if (sc->sc_flags & WM_F_PCIX) {
1850 			switch (reg & STATUS_PCIXSPD_MASK) {
1851 			case STATUS_PCIXSPD_50_66:
1852 				sc->sc_bus_speed = 66;
1853 				break;
1854 			case STATUS_PCIXSPD_66_100:
1855 				sc->sc_bus_speed = 100;
1856 				break;
1857 			case STATUS_PCIXSPD_100_133:
1858 				sc->sc_bus_speed = 133;
1859 				break;
1860 			default:
1861 				aprint_error_dev(sc->sc_dev,
1862 				    "unknown PCIXSPD %d; assuming 66MHz\n",
1863 				    reg & STATUS_PCIXSPD_MASK);
1864 				sc->sc_bus_speed = 66;
1865 				break;
1866 			}
1867 		} else
1868 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1869 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1870 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1871 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1872 	}
1873 
1874 	/* clear interesting stat counters */
1875 	CSR_READ(sc, WMREG_COLC);
1876 	CSR_READ(sc, WMREG_RXERRC);
1877 
1878 	/* get PHY control from SMBus to PCIe */
1879 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
1880 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT))
1881 		wm_smbustopci(sc);
1882 
1883 	/* Reset the chip to a known state. */
1884 	wm_reset(sc);
1885 
1886 	/* Get some information about the EEPROM. */
1887 	switch (sc->sc_type) {
1888 	case WM_T_82542_2_0:
1889 	case WM_T_82542_2_1:
1890 	case WM_T_82543:
1891 	case WM_T_82544:
1892 		/* Microwire */
1893 		sc->sc_nvm_wordsize = 64;
1894 		sc->sc_nvm_addrbits = 6;
1895 		break;
1896 	case WM_T_82540:
1897 	case WM_T_82545:
1898 	case WM_T_82545_3:
1899 	case WM_T_82546:
1900 	case WM_T_82546_3:
1901 		/* Microwire */
1902 		reg = CSR_READ(sc, WMREG_EECD);
1903 		if (reg & EECD_EE_SIZE) {
1904 			sc->sc_nvm_wordsize = 256;
1905 			sc->sc_nvm_addrbits = 8;
1906 		} else {
1907 			sc->sc_nvm_wordsize = 64;
1908 			sc->sc_nvm_addrbits = 6;
1909 		}
1910 		sc->sc_flags |= WM_F_LOCK_EECD;
1911 		break;
1912 	case WM_T_82541:
1913 	case WM_T_82541_2:
1914 	case WM_T_82547:
1915 	case WM_T_82547_2:
1916 		sc->sc_flags |= WM_F_LOCK_EECD;
1917 		reg = CSR_READ(sc, WMREG_EECD);
1918 		if (reg & EECD_EE_TYPE) {
1919 			/* SPI */
1920 			sc->sc_flags |= WM_F_EEPROM_SPI;
1921 			wm_nvm_set_addrbits_size_eecd(sc);
1922 		} else {
1923 			/* Microwire */
1924 			if ((reg & EECD_EE_ABITS) != 0) {
1925 				sc->sc_nvm_wordsize = 256;
1926 				sc->sc_nvm_addrbits = 8;
1927 			} else {
1928 				sc->sc_nvm_wordsize = 64;
1929 				sc->sc_nvm_addrbits = 6;
1930 			}
1931 		}
1932 		break;
1933 	case WM_T_82571:
1934 	case WM_T_82572:
1935 		/* SPI */
1936 		sc->sc_flags |= WM_F_EEPROM_SPI;
1937 		wm_nvm_set_addrbits_size_eecd(sc);
1938 		sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
1939 		break;
1940 	case WM_T_82573:
1941 		sc->sc_flags |= WM_F_LOCK_SWSM;
1942 		/* FALLTHROUGH */
1943 	case WM_T_82574:
1944 	case WM_T_82583:
1945 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
1946 			sc->sc_flags |= WM_F_EEPROM_FLASH;
1947 			sc->sc_nvm_wordsize = 2048;
1948 		} else {
1949 			/* SPI */
1950 			sc->sc_flags |= WM_F_EEPROM_SPI;
1951 			wm_nvm_set_addrbits_size_eecd(sc);
1952 		}
1953 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1954 		break;
1955 	case WM_T_82575:
1956 	case WM_T_82576:
1957 	case WM_T_82580:
1958 	case WM_T_I350:
1959 	case WM_T_I354:
1960 	case WM_T_80003:
1961 		/* SPI */
1962 		sc->sc_flags |= WM_F_EEPROM_SPI;
1963 		wm_nvm_set_addrbits_size_eecd(sc);
1964 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
1965 		    | WM_F_LOCK_SWSM;
1966 		break;
1967 	case WM_T_ICH8:
1968 	case WM_T_ICH9:
1969 	case WM_T_ICH10:
1970 	case WM_T_PCH:
1971 	case WM_T_PCH2:
1972 	case WM_T_PCH_LPT:
1973 		/* FLASH */
1974 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
1975 		sc->sc_nvm_wordsize = 2048;
1976 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
1977 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
1978 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
1979 			aprint_error_dev(sc->sc_dev,
1980 			    "can't map FLASH registers\n");
1981 			goto out;
1982 		}
1983 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
1984 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
1985 		    ICH_FLASH_SECTOR_SIZE;
1986 		sc->sc_ich8_flash_bank_size =
1987 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
1988 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
1989 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
1990 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
1991 		sc->sc_flashreg_offset = 0;
1992 		break;
1993 	case WM_T_PCH_SPT:
1994 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
1995 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
1996 		sc->sc_flasht = sc->sc_st;
1997 		sc->sc_flashh = sc->sc_sh;
1998 		sc->sc_ich8_flash_base = 0;
1999 		sc->sc_nvm_wordsize =
2000 			(((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
2001 			* NVM_SIZE_MULTIPLIER;
2002 		/* It is size in bytes, we want words */
2003 		sc->sc_nvm_wordsize /= 2;
2004 		/* assume 2 banks */
2005 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
2006 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
2007 		break;
2008 	case WM_T_I210:
2009 	case WM_T_I211:
2010 		if (wm_nvm_get_flash_presence_i210(sc)) {
2011 			wm_nvm_set_addrbits_size_eecd(sc);
2012 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
2013 			sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW;
2014 		} else {
2015 			sc->sc_nvm_wordsize = INVM_SIZE;
2016 			sc->sc_flags |= WM_F_EEPROM_INVM;
2017 			sc->sc_flags |= WM_F_LOCK_SWFW;
2018 		}
2019 		break;
2020 	default:
2021 		break;
2022 	}
2023 
2024 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
2025 	switch (sc->sc_type) {
2026 	case WM_T_82571:
2027 	case WM_T_82572:
2028 		reg = CSR_READ(sc, WMREG_SWSM2);
2029 		if ((reg & SWSM2_LOCK) == 0) {
2030 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
2031 			force_clear_smbi = true;
2032 		} else
2033 			force_clear_smbi = false;
2034 		break;
2035 	case WM_T_82573:
2036 	case WM_T_82574:
2037 	case WM_T_82583:
2038 		force_clear_smbi = true;
2039 		break;
2040 	default:
2041 		force_clear_smbi = false;
2042 		break;
2043 	}
2044 	if (force_clear_smbi) {
2045 		reg = CSR_READ(sc, WMREG_SWSM);
2046 		if ((reg & SWSM_SMBI) != 0)
2047 			aprint_error_dev(sc->sc_dev,
2048 			    "Please update the Bootagent\n");
2049 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
2050 	}
2051 
2052 	/*
2053 	 * Defer printing the EEPROM type until after verifying the checksum
2054 	 * This allows the EEPROM type to be printed correctly in the case
2055 	 * that no EEPROM is attached.
2056 	 */
2057 	/*
2058 	 * Validate the EEPROM checksum. If the checksum fails, flag
2059 	 * this for later, so we can fail future reads from the EEPROM.
2060 	 */
2061 	if (wm_nvm_validate_checksum(sc)) {
2062 		/*
2063 		 * Read twice again because some PCI-e parts fail the
2064 		 * first check due to the link being in sleep state.
2065 		 */
2066 		if (wm_nvm_validate_checksum(sc))
2067 			sc->sc_flags |= WM_F_EEPROM_INVALID;
2068 	}
2069 
2070 	/* Set device properties (macflags) */
2071 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
2072 
2073 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
2074 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
2075 	else {
2076 		aprint_verbose_dev(sc->sc_dev, "%u words ",
2077 		    sc->sc_nvm_wordsize);
2078 		if (sc->sc_flags & WM_F_EEPROM_INVM)
2079 			aprint_verbose("iNVM");
2080 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
2081 			aprint_verbose("FLASH(HW)");
2082 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
2083 			aprint_verbose("FLASH");
2084 		else {
2085 			if (sc->sc_flags & WM_F_EEPROM_SPI)
2086 				eetype = "SPI";
2087 			else
2088 				eetype = "MicroWire";
2089 			aprint_verbose("(%d address bits) %s EEPROM",
2090 			    sc->sc_nvm_addrbits, eetype);
2091 		}
2092 	}
2093 	wm_nvm_version(sc);
2094 	aprint_verbose("\n");
2095 
2096 	/* Check for I21[01] PLL workaround */
2097 	if (sc->sc_type == WM_T_I210)
2098 		sc->sc_flags |= WM_F_PLL_WA_I210;
2099 	if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
2100 		/* NVM image release 3.25 has a workaround */
2101 		if ((sc->sc_nvm_ver_major < 3)
2102 		    || ((sc->sc_nvm_ver_major == 3)
2103 			&& (sc->sc_nvm_ver_minor < 25))) {
2104 			aprint_verbose_dev(sc->sc_dev,
2105 			    "ROM image version %d.%d is older than 3.25\n",
2106 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
2107 			sc->sc_flags |= WM_F_PLL_WA_I210;
2108 		}
2109 	}
2110 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
2111 		wm_pll_workaround_i210(sc);
2112 
2113 	wm_get_wakeup(sc);
2114 	switch (sc->sc_type) {
2115 	case WM_T_82571:
2116 	case WM_T_82572:
2117 	case WM_T_82573:
2118 	case WM_T_82574:
2119 	case WM_T_82583:
2120 	case WM_T_80003:
2121 	case WM_T_ICH8:
2122 	case WM_T_ICH9:
2123 	case WM_T_ICH10:
2124 	case WM_T_PCH:
2125 	case WM_T_PCH2:
2126 	case WM_T_PCH_LPT:
2127 	case WM_T_PCH_SPT:
2128 		/* Non-AMT based hardware can now take control from firmware */
2129 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
2130 			wm_get_hw_control(sc);
2131 		break;
2132 	default:
2133 		break;
2134 	}
2135 
2136 	/*
2137 	 * Read the Ethernet address from the EEPROM, if not first found
2138 	 * in device properties.
2139 	 */
2140 	ea = prop_dictionary_get(dict, "mac-address");
2141 	if (ea != NULL) {
2142 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
2143 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
2144 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
2145 	} else {
2146 		if (wm_read_mac_addr(sc, enaddr) != 0) {
2147 			aprint_error_dev(sc->sc_dev,
2148 			    "unable to read Ethernet address\n");
2149 			goto out;
2150 		}
2151 	}
2152 
2153 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
2154 	    ether_sprintf(enaddr));
2155 
2156 	/*
2157 	 * Read the config info from the EEPROM, and set up various
2158 	 * bits in the control registers based on their contents.
2159 	 */
2160 	pn = prop_dictionary_get(dict, "i82543-cfg1");
2161 	if (pn != NULL) {
2162 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2163 		cfg1 = (uint16_t) prop_number_integer_value(pn);
2164 	} else {
2165 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
2166 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
2167 			goto out;
2168 		}
2169 	}
2170 
2171 	pn = prop_dictionary_get(dict, "i82543-cfg2");
2172 	if (pn != NULL) {
2173 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2174 		cfg2 = (uint16_t) prop_number_integer_value(pn);
2175 	} else {
2176 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
2177 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
2178 			goto out;
2179 		}
2180 	}
2181 
2182 	/* check for WM_F_WOL */
2183 	switch (sc->sc_type) {
2184 	case WM_T_82542_2_0:
2185 	case WM_T_82542_2_1:
2186 	case WM_T_82543:
2187 		/* dummy? */
2188 		eeprom_data = 0;
2189 		apme_mask = NVM_CFG3_APME;
2190 		break;
2191 	case WM_T_82544:
2192 		apme_mask = NVM_CFG2_82544_APM_EN;
2193 		eeprom_data = cfg2;
2194 		break;
2195 	case WM_T_82546:
2196 	case WM_T_82546_3:
2197 	case WM_T_82571:
2198 	case WM_T_82572:
2199 	case WM_T_82573:
2200 	case WM_T_82574:
2201 	case WM_T_82583:
2202 	case WM_T_80003:
2203 	default:
2204 		apme_mask = NVM_CFG3_APME;
2205 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
2206 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
2207 		break;
2208 	case WM_T_82575:
2209 	case WM_T_82576:
2210 	case WM_T_82580:
2211 	case WM_T_I350:
2212 	case WM_T_I354: /* XXX ok? */
2213 	case WM_T_ICH8:
2214 	case WM_T_ICH9:
2215 	case WM_T_ICH10:
2216 	case WM_T_PCH:
2217 	case WM_T_PCH2:
2218 	case WM_T_PCH_LPT:
2219 	case WM_T_PCH_SPT:
2220 		/* XXX The funcid should be checked on some devices */
2221 		apme_mask = WUC_APME;
2222 		eeprom_data = CSR_READ(sc, WMREG_WUC);
2223 		break;
2224 	}
2225 
2226 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
2227 	if ((eeprom_data & apme_mask) != 0)
2228 		sc->sc_flags |= WM_F_WOL;
2229 #ifdef WM_DEBUG
2230 	if ((sc->sc_flags & WM_F_WOL) != 0)
2231 		printf("WOL\n");
2232 #endif
2233 
2234 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
2235 		/* Check NVM for autonegotiation */
2236 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
2237 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
2238 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
2239 		}
2240 	}
2241 
2242 	/*
2243 	 * XXX need special handling for some multiple port cards
2244 	 * to disable a paticular port.
2245 	 */
2246 
2247 	if (sc->sc_type >= WM_T_82544) {
2248 		pn = prop_dictionary_get(dict, "i82543-swdpin");
2249 		if (pn != NULL) {
2250 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2251 			swdpin = (uint16_t) prop_number_integer_value(pn);
2252 		} else {
2253 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
2254 				aprint_error_dev(sc->sc_dev,
2255 				    "unable to read SWDPIN\n");
2256 				goto out;
2257 			}
2258 		}
2259 	}
2260 
2261 	if (cfg1 & NVM_CFG1_ILOS)
2262 		sc->sc_ctrl |= CTRL_ILOS;
2263 
2264 	/*
2265 	 * XXX
2266 	 * This code isn't correct because pin 2 and 3 are located
2267 	 * in different position on newer chips. Check all datasheet.
2268 	 *
2269 	 * Until resolve this problem, check if a chip < 82580
2270 	 */
2271 	if (sc->sc_type <= WM_T_82580) {
2272 		if (sc->sc_type >= WM_T_82544) {
2273 			sc->sc_ctrl |=
2274 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
2275 			    CTRL_SWDPIO_SHIFT;
2276 			sc->sc_ctrl |=
2277 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
2278 			    CTRL_SWDPINS_SHIFT;
2279 		} else {
2280 			sc->sc_ctrl |=
2281 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
2282 			    CTRL_SWDPIO_SHIFT;
2283 		}
2284 	}
2285 
2286 	/* XXX For other than 82580? */
2287 	if (sc->sc_type == WM_T_82580) {
2288 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
2289 		if (nvmword & __BIT(13))
2290 			sc->sc_ctrl |= CTRL_ILOS;
2291 	}
2292 
2293 #if 0
2294 	if (sc->sc_type >= WM_T_82544) {
2295 		if (cfg1 & NVM_CFG1_IPS0)
2296 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
2297 		if (cfg1 & NVM_CFG1_IPS1)
2298 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
2299 		sc->sc_ctrl_ext |=
2300 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
2301 		    CTRL_EXT_SWDPIO_SHIFT;
2302 		sc->sc_ctrl_ext |=
2303 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
2304 		    CTRL_EXT_SWDPINS_SHIFT;
2305 	} else {
2306 		sc->sc_ctrl_ext |=
2307 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
2308 		    CTRL_EXT_SWDPIO_SHIFT;
2309 	}
2310 #endif
2311 
2312 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2313 #if 0
2314 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2315 #endif
2316 
2317 	if (sc->sc_type == WM_T_PCH) {
2318 		uint16_t val;
2319 
2320 		/* Save the NVM K1 bit setting */
2321 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
2322 
2323 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
2324 			sc->sc_nvm_k1_enabled = 1;
2325 		else
2326 			sc->sc_nvm_k1_enabled = 0;
2327 	}
2328 
2329 	/*
2330 	 * Determine if we're TBI,GMII or SGMII mode, and initialize the
2331 	 * media structures accordingly.
2332 	 */
2333 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
2334 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
2335 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
2336 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_82573
2337 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
2338 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
2339 		wm_gmii_mediainit(sc, wmp->wmp_product);
2340 	} else if (sc->sc_type < WM_T_82543 ||
2341 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
2342 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
2343 			aprint_error_dev(sc->sc_dev,
2344 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
2345 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
2346 		}
2347 		wm_tbi_mediainit(sc);
2348 	} else {
2349 		switch (sc->sc_type) {
2350 		case WM_T_82575:
2351 		case WM_T_82576:
2352 		case WM_T_82580:
2353 		case WM_T_I350:
2354 		case WM_T_I354:
2355 		case WM_T_I210:
2356 		case WM_T_I211:
2357 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
2358 			link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
2359 			switch (link_mode) {
2360 			case CTRL_EXT_LINK_MODE_1000KX:
2361 				aprint_verbose_dev(sc->sc_dev, "1000KX\n");
2362 				sc->sc_mediatype = WM_MEDIATYPE_SERDES;
2363 				break;
2364 			case CTRL_EXT_LINK_MODE_SGMII:
2365 				if (wm_sgmii_uses_mdio(sc)) {
2366 					aprint_verbose_dev(sc->sc_dev,
2367 					    "SGMII(MDIO)\n");
2368 					sc->sc_flags |= WM_F_SGMII;
2369 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2370 					break;
2371 				}
2372 				aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
2373 				/*FALLTHROUGH*/
2374 			case CTRL_EXT_LINK_MODE_PCIE_SERDES:
2375 				sc->sc_mediatype = wm_sfp_get_media_type(sc);
2376 				if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
2377 					if (link_mode
2378 					    == CTRL_EXT_LINK_MODE_SGMII) {
2379 						sc->sc_mediatype
2380 						    = WM_MEDIATYPE_COPPER;
2381 						sc->sc_flags |= WM_F_SGMII;
2382 					} else {
2383 						sc->sc_mediatype
2384 						    = WM_MEDIATYPE_SERDES;
2385 						aprint_verbose_dev(sc->sc_dev,
2386 						    "SERDES\n");
2387 					}
2388 					break;
2389 				}
2390 				if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
2391 					aprint_verbose_dev(sc->sc_dev,
2392 					    "SERDES\n");
2393 
2394 				/* Change current link mode setting */
2395 				reg &= ~CTRL_EXT_LINK_MODE_MASK;
2396 				switch (sc->sc_mediatype) {
2397 				case WM_MEDIATYPE_COPPER:
2398 					reg |= CTRL_EXT_LINK_MODE_SGMII;
2399 					break;
2400 				case WM_MEDIATYPE_SERDES:
2401 					reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
2402 					break;
2403 				default:
2404 					break;
2405 				}
2406 				CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2407 				break;
2408 			case CTRL_EXT_LINK_MODE_GMII:
2409 			default:
2410 				aprint_verbose_dev(sc->sc_dev, "Copper\n");
2411 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2412 				break;
2413 			}
2414 
2415 			reg &= ~CTRL_EXT_I2C_ENA;
2416 			if ((sc->sc_flags & WM_F_SGMII) != 0)
2417 				reg |= CTRL_EXT_I2C_ENA;
2418 			else
2419 				reg &= ~CTRL_EXT_I2C_ENA;
2420 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2421 
2422 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
2423 				wm_gmii_mediainit(sc, wmp->wmp_product);
2424 			else
2425 				wm_tbi_mediainit(sc);
2426 			break;
2427 		default:
2428 			if (sc->sc_mediatype == WM_MEDIATYPE_FIBER)
2429 				aprint_error_dev(sc->sc_dev,
2430 				    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
2431 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2432 			wm_gmii_mediainit(sc, wmp->wmp_product);
2433 		}
2434 	}
2435 
2436 	ifp = &sc->sc_ethercom.ec_if;
2437 	xname = device_xname(sc->sc_dev);
2438 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
2439 	ifp->if_softc = sc;
2440 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2441 	ifp->if_extflags = IFEF_START_MPSAFE;
2442 	ifp->if_ioctl = wm_ioctl;
2443 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
2444 		ifp->if_start = wm_nq_start;
2445 		if (sc->sc_nqueues > 1)
2446 			ifp->if_transmit = wm_nq_transmit;
2447 	} else
2448 		ifp->if_start = wm_start;
2449 	ifp->if_watchdog = wm_watchdog;
2450 	ifp->if_init = wm_init;
2451 	ifp->if_stop = wm_stop;
2452 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
2453 	IFQ_SET_READY(&ifp->if_snd);
2454 
2455 	/* Check for jumbo frame */
2456 	switch (sc->sc_type) {
2457 	case WM_T_82573:
2458 		/* XXX limited to 9234 if ASPM is disabled */
2459 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
2460 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
2461 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2462 		break;
2463 	case WM_T_82571:
2464 	case WM_T_82572:
2465 	case WM_T_82574:
2466 	case WM_T_82575:
2467 	case WM_T_82576:
2468 	case WM_T_82580:
2469 	case WM_T_I350:
2470 	case WM_T_I354: /* XXXX ok? */
2471 	case WM_T_I210:
2472 	case WM_T_I211:
2473 	case WM_T_80003:
2474 	case WM_T_ICH9:
2475 	case WM_T_ICH10:
2476 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
2477 	case WM_T_PCH_LPT:
2478 	case WM_T_PCH_SPT:
2479 		/* XXX limited to 9234 */
2480 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2481 		break;
2482 	case WM_T_PCH:
2483 		/* XXX limited to 4096 */
2484 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2485 		break;
2486 	case WM_T_82542_2_0:
2487 	case WM_T_82542_2_1:
2488 	case WM_T_82583:
2489 	case WM_T_ICH8:
2490 		/* No support for jumbo frame */
2491 		break;
2492 	default:
2493 		/* ETHER_MAX_LEN_JUMBO */
2494 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2495 		break;
2496 	}
2497 
2498 	/* If we're a i82543 or greater, we can support VLANs. */
2499 	if (sc->sc_type >= WM_T_82543)
2500 		sc->sc_ethercom.ec_capabilities |=
2501 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
2502 
2503 	/*
2504 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
2505 	 * on i82543 and later.
2506 	 */
2507 	if (sc->sc_type >= WM_T_82543) {
2508 		ifp->if_capabilities |=
2509 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
2510 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
2511 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
2512 		    IFCAP_CSUM_TCPv6_Tx |
2513 		    IFCAP_CSUM_UDPv6_Tx;
2514 	}
2515 
2516 	/*
2517 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
2518 	 *
2519 	 *	82541GI (8086:1076) ... no
2520 	 *	82572EI (8086:10b9) ... yes
2521 	 */
2522 	if (sc->sc_type >= WM_T_82571) {
2523 		ifp->if_capabilities |=
2524 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
2525 	}
2526 
2527 	/*
2528 	 * If we're a i82544 or greater (except i82547), we can do
2529 	 * TCP segmentation offload.
2530 	 */
2531 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
2532 		ifp->if_capabilities |= IFCAP_TSOv4;
2533 	}
2534 
2535 	if (sc->sc_type >= WM_T_82571) {
2536 		ifp->if_capabilities |= IFCAP_TSOv6;
2537 	}
2538 
2539 #ifdef WM_MPSAFE
2540 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2541 #else
2542 	sc->sc_core_lock = NULL;
2543 #endif
2544 
2545 	/* Attach the interface. */
2546 	if_initialize(ifp);
2547 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
2548 	ether_ifattach(ifp, enaddr);
2549 	if_register(ifp);
2550 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
2551 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
2552 			  RND_FLAG_DEFAULT);
2553 
2554 #ifdef WM_EVENT_COUNTERS
2555 	/* Attach event counters. */
2556 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
2557 	    NULL, xname, "linkintr");
2558 
2559 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
2560 	    NULL, xname, "tx_xoff");
2561 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
2562 	    NULL, xname, "tx_xon");
2563 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
2564 	    NULL, xname, "rx_xoff");
2565 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
2566 	    NULL, xname, "rx_xon");
2567 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
2568 	    NULL, xname, "rx_macctl");
2569 #endif /* WM_EVENT_COUNTERS */
2570 
2571 	if (pmf_device_register(self, wm_suspend, wm_resume))
2572 		pmf_class_network_register(self, ifp);
2573 	else
2574 		aprint_error_dev(self, "couldn't establish power handler\n");
2575 
2576 	sc->sc_flags |= WM_F_ATTACHED;
2577  out:
2578 	return;
2579 }
2580 
2581 /* The detach function (ca_detach) */
2582 static int
2583 wm_detach(device_t self, int flags __unused)
2584 {
2585 	struct wm_softc *sc = device_private(self);
2586 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2587 	int i;
2588 
2589 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
2590 		return 0;
2591 
2592 	/* Stop the interface. Callouts are stopped in it. */
2593 	wm_stop(ifp, 1);
2594 
2595 	pmf_device_deregister(self);
2596 
2597 	/* Tell the firmware about the release */
2598 	WM_CORE_LOCK(sc);
2599 	wm_release_manageability(sc);
2600 	wm_release_hw_control(sc);
2601 	WM_CORE_UNLOCK(sc);
2602 
2603 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2604 
2605 	/* Delete all remaining media. */
2606 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2607 
2608 	ether_ifdetach(ifp);
2609 	if_detach(ifp);
2610 	if_percpuq_destroy(sc->sc_ipq);
2611 
2612 	/* Unload RX dmamaps and free mbufs */
2613 	for (i = 0; i < sc->sc_nqueues; i++) {
2614 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
2615 		mutex_enter(rxq->rxq_lock);
2616 		wm_rxdrain(rxq);
2617 		mutex_exit(rxq->rxq_lock);
2618 	}
2619 	/* Must unlock here */
2620 
2621 	/* Disestablish the interrupt handler */
2622 	for (i = 0; i < sc->sc_nintrs; i++) {
2623 		if (sc->sc_ihs[i] != NULL) {
2624 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
2625 			sc->sc_ihs[i] = NULL;
2626 		}
2627 	}
2628 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
2629 
2630 	wm_free_txrx_queues(sc);
2631 
2632 	/* Unmap the registers */
2633 	if (sc->sc_ss) {
2634 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
2635 		sc->sc_ss = 0;
2636 	}
2637 	if (sc->sc_ios) {
2638 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
2639 		sc->sc_ios = 0;
2640 	}
2641 	if (sc->sc_flashs) {
2642 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
2643 		sc->sc_flashs = 0;
2644 	}
2645 
2646 	if (sc->sc_core_lock)
2647 		mutex_obj_free(sc->sc_core_lock);
2648 
2649 	return 0;
2650 }
2651 
2652 static bool
2653 wm_suspend(device_t self, const pmf_qual_t *qual)
2654 {
2655 	struct wm_softc *sc = device_private(self);
2656 
2657 	wm_release_manageability(sc);
2658 	wm_release_hw_control(sc);
2659 #ifdef WM_WOL
2660 	wm_enable_wakeup(sc);
2661 #endif
2662 
2663 	return true;
2664 }
2665 
2666 static bool
2667 wm_resume(device_t self, const pmf_qual_t *qual)
2668 {
2669 	struct wm_softc *sc = device_private(self);
2670 
2671 	wm_init_manageability(sc);
2672 
2673 	return true;
2674 }
2675 
2676 /*
2677  * wm_watchdog:		[ifnet interface function]
2678  *
2679  *	Watchdog timer handler.
2680  */
2681 static void
2682 wm_watchdog(struct ifnet *ifp)
2683 {
2684 	int qid;
2685 	struct wm_softc *sc = ifp->if_softc;
2686 
2687 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
2688 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
2689 
2690 		wm_watchdog_txq(ifp, txq);
2691 	}
2692 
2693 	/* Reset the interface. */
2694 	(void) wm_init(ifp);
2695 
2696 	/*
2697 	 * There are still some upper layer processing which call
2698 	 * ifp->if_start(). e.g. ALTQ
2699 	 */
2700 	/* Try to get more packets going. */
2701 	ifp->if_start(ifp);
2702 }
2703 
2704 static void
2705 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq)
2706 {
2707 	struct wm_softc *sc = ifp->if_softc;
2708 
2709 	/*
2710 	 * Since we're using delayed interrupts, sweep up
2711 	 * before we report an error.
2712 	 */
2713 	mutex_enter(txq->txq_lock);
2714 	wm_txeof(sc, txq);
2715 	mutex_exit(txq->txq_lock);
2716 
2717 	if (txq->txq_free != WM_NTXDESC(txq)) {
2718 #ifdef WM_DEBUG
2719 		int i, j;
2720 		struct wm_txsoft *txs;
2721 #endif
2722 		log(LOG_ERR,
2723 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2724 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
2725 		    txq->txq_next);
2726 		ifp->if_oerrors++;
2727 #ifdef WM_DEBUG
2728 		for (i = txq->txq_sdirty; i != txq->txq_snext ;
2729 		    i = WM_NEXTTXS(txq, i)) {
2730 		    txs = &txq->txq_soft[i];
2731 		    printf("txs %d tx %d -> %d\n",
2732 			i, txs->txs_firstdesc, txs->txs_lastdesc);
2733 		    for (j = txs->txs_firstdesc; ;
2734 			j = WM_NEXTTX(txq, j)) {
2735 			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
2736 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
2737 			printf("\t %#08x%08x\n",
2738 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
2739 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
2740 			if (j == txs->txs_lastdesc)
2741 				break;
2742 			}
2743 		}
2744 #endif
2745 	}
2746 }
2747 
2748 /*
2749  * wm_tick:
2750  *
2751  *	One second timer, used to check link status, sweep up
2752  *	completed transmit jobs, etc.
2753  */
2754 static void
2755 wm_tick(void *arg)
2756 {
2757 	struct wm_softc *sc = arg;
2758 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2759 #ifndef WM_MPSAFE
2760 	int s = splnet();
2761 #endif
2762 
2763 	WM_CORE_LOCK(sc);
2764 
2765 	if (sc->sc_stopping)
2766 		goto out;
2767 
2768 	if (sc->sc_type >= WM_T_82542_2_1) {
2769 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
2770 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
2771 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
2772 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
2773 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
2774 	}
2775 
2776 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
2777 	ifp->if_ierrors += 0ULL + /* ensure quad_t */
2778 	    + CSR_READ(sc, WMREG_CRCERRS)
2779 	    + CSR_READ(sc, WMREG_ALGNERRC)
2780 	    + CSR_READ(sc, WMREG_SYMERRC)
2781 	    + CSR_READ(sc, WMREG_RXERRC)
2782 	    + CSR_READ(sc, WMREG_SEC)
2783 	    + CSR_READ(sc, WMREG_CEXTERR)
2784 	    + CSR_READ(sc, WMREG_RLEC);
2785 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
2786 
2787 	if (sc->sc_flags & WM_F_HAS_MII)
2788 		mii_tick(&sc->sc_mii);
2789 	else if ((sc->sc_type >= WM_T_82575)
2790 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
2791 		wm_serdes_tick(sc);
2792 	else
2793 		wm_tbi_tick(sc);
2794 
2795 out:
2796 	WM_CORE_UNLOCK(sc);
2797 #ifndef WM_MPSAFE
2798 	splx(s);
2799 #endif
2800 
2801 	if (!sc->sc_stopping)
2802 		callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2803 }
2804 
2805 static int
2806 wm_ifflags_cb(struct ethercom *ec)
2807 {
2808 	struct ifnet *ifp = &ec->ec_if;
2809 	struct wm_softc *sc = ifp->if_softc;
2810 	int change = ifp->if_flags ^ sc->sc_if_flags;
2811 	int rc = 0;
2812 
2813 	WM_CORE_LOCK(sc);
2814 
2815 	if (change != 0)
2816 		sc->sc_if_flags = ifp->if_flags;
2817 
2818 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
2819 		rc = ENETRESET;
2820 		goto out;
2821 	}
2822 
2823 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
2824 		wm_set_filter(sc);
2825 
2826 	wm_set_vlan(sc);
2827 
2828 out:
2829 	WM_CORE_UNLOCK(sc);
2830 
2831 	return rc;
2832 }
2833 
2834 /*
2835  * wm_ioctl:		[ifnet interface function]
2836  *
2837  *	Handle control requests from the operator.
2838  */
2839 static int
2840 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2841 {
2842 	struct wm_softc *sc = ifp->if_softc;
2843 	struct ifreq *ifr = (struct ifreq *) data;
2844 	struct ifaddr *ifa = (struct ifaddr *)data;
2845 	struct sockaddr_dl *sdl;
2846 	int s, error;
2847 
2848 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
2849 		device_xname(sc->sc_dev), __func__));
2850 #ifndef WM_MPSAFE
2851 	s = splnet();
2852 #endif
2853 	switch (cmd) {
2854 	case SIOCSIFMEDIA:
2855 	case SIOCGIFMEDIA:
2856 		WM_CORE_LOCK(sc);
2857 		/* Flow control requires full-duplex mode. */
2858 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
2859 		    (ifr->ifr_media & IFM_FDX) == 0)
2860 			ifr->ifr_media &= ~IFM_ETH_FMASK;
2861 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
2862 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
2863 				/* We can do both TXPAUSE and RXPAUSE. */
2864 				ifr->ifr_media |=
2865 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
2866 			}
2867 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
2868 		}
2869 		WM_CORE_UNLOCK(sc);
2870 #ifdef WM_MPSAFE
2871 		s = splnet();
2872 #endif
2873 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
2874 #ifdef WM_MPSAFE
2875 		splx(s);
2876 #endif
2877 		break;
2878 	case SIOCINITIFADDR:
2879 		WM_CORE_LOCK(sc);
2880 		if (ifa->ifa_addr->sa_family == AF_LINK) {
2881 			sdl = satosdl(ifp->if_dl->ifa_addr);
2882 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
2883 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
2884 			/* unicast address is first multicast entry */
2885 			wm_set_filter(sc);
2886 			error = 0;
2887 			WM_CORE_UNLOCK(sc);
2888 			break;
2889 		}
2890 		WM_CORE_UNLOCK(sc);
2891 		/*FALLTHROUGH*/
2892 	default:
2893 #ifdef WM_MPSAFE
2894 		s = splnet();
2895 #endif
2896 		/* It may call wm_start, so unlock here */
2897 		error = ether_ioctl(ifp, cmd, data);
2898 #ifdef WM_MPSAFE
2899 		splx(s);
2900 #endif
2901 		if (error != ENETRESET)
2902 			break;
2903 
2904 		error = 0;
2905 
2906 		if (cmd == SIOCSIFCAP) {
2907 			error = (*ifp->if_init)(ifp);
2908 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
2909 			;
2910 		else if (ifp->if_flags & IFF_RUNNING) {
2911 			/*
2912 			 * Multicast list has changed; set the hardware filter
2913 			 * accordingly.
2914 			 */
2915 			WM_CORE_LOCK(sc);
2916 			wm_set_filter(sc);
2917 			WM_CORE_UNLOCK(sc);
2918 		}
2919 		break;
2920 	}
2921 
2922 #ifndef WM_MPSAFE
2923 	splx(s);
2924 #endif
2925 	return error;
2926 }
2927 
2928 /* MAC address related */
2929 
2930 /*
2931  * Get the offset of MAC address and return it.
2932  * If error occured, use offset 0.
2933  */
2934 static uint16_t
2935 wm_check_alt_mac_addr(struct wm_softc *sc)
2936 {
2937 	uint16_t myea[ETHER_ADDR_LEN / 2];
2938 	uint16_t offset = NVM_OFF_MACADDR;
2939 
2940 	/* Try to read alternative MAC address pointer */
2941 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
2942 		return 0;
2943 
2944 	/* Check pointer if it's valid or not. */
2945 	if ((offset == 0x0000) || (offset == 0xffff))
2946 		return 0;
2947 
2948 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
2949 	/*
2950 	 * Check whether alternative MAC address is valid or not.
2951 	 * Some cards have non 0xffff pointer but those don't use
2952 	 * alternative MAC address in reality.
2953 	 *
2954 	 * Check whether the broadcast bit is set or not.
2955 	 */
2956 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
2957 		if (((myea[0] & 0xff) & 0x01) == 0)
2958 			return offset; /* Found */
2959 
2960 	/* Not found */
2961 	return 0;
2962 }
2963 
2964 static int
2965 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
2966 {
2967 	uint16_t myea[ETHER_ADDR_LEN / 2];
2968 	uint16_t offset = NVM_OFF_MACADDR;
2969 	int do_invert = 0;
2970 
2971 	switch (sc->sc_type) {
2972 	case WM_T_82580:
2973 	case WM_T_I350:
2974 	case WM_T_I354:
2975 		/* EEPROM Top Level Partitioning */
2976 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
2977 		break;
2978 	case WM_T_82571:
2979 	case WM_T_82575:
2980 	case WM_T_82576:
2981 	case WM_T_80003:
2982 	case WM_T_I210:
2983 	case WM_T_I211:
2984 		offset = wm_check_alt_mac_addr(sc);
2985 		if (offset == 0)
2986 			if ((sc->sc_funcid & 0x01) == 1)
2987 				do_invert = 1;
2988 		break;
2989 	default:
2990 		if ((sc->sc_funcid & 0x01) == 1)
2991 			do_invert = 1;
2992 		break;
2993 	}
2994 
2995 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]),
2996 		myea) != 0)
2997 		goto bad;
2998 
2999 	enaddr[0] = myea[0] & 0xff;
3000 	enaddr[1] = myea[0] >> 8;
3001 	enaddr[2] = myea[1] & 0xff;
3002 	enaddr[3] = myea[1] >> 8;
3003 	enaddr[4] = myea[2] & 0xff;
3004 	enaddr[5] = myea[2] >> 8;
3005 
3006 	/*
3007 	 * Toggle the LSB of the MAC address on the second port
3008 	 * of some dual port cards.
3009 	 */
3010 	if (do_invert != 0)
3011 		enaddr[5] ^= 1;
3012 
3013 	return 0;
3014 
3015  bad:
3016 	return -1;
3017 }
3018 
3019 /*
3020  * wm_set_ral:
3021  *
3022  *	Set an entery in the receive address list.
3023  */
3024 static void
3025 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
3026 {
3027 	uint32_t ral_lo, ral_hi;
3028 
3029 	if (enaddr != NULL) {
3030 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
3031 		    (enaddr[3] << 24);
3032 		ral_hi = enaddr[4] | (enaddr[5] << 8);
3033 		ral_hi |= RAL_AV;
3034 	} else {
3035 		ral_lo = 0;
3036 		ral_hi = 0;
3037 	}
3038 
3039 	if (sc->sc_type >= WM_T_82544) {
3040 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
3041 		    ral_lo);
3042 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
3043 		    ral_hi);
3044 	} else {
3045 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
3046 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
3047 	}
3048 }
3049 
3050 /*
3051  * wm_mchash:
3052  *
3053  *	Compute the hash of the multicast address for the 4096-bit
3054  *	multicast filter.
3055  */
3056 static uint32_t
3057 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
3058 {
3059 	static const int lo_shift[4] = { 4, 3, 2, 0 };
3060 	static const int hi_shift[4] = { 4, 5, 6, 8 };
3061 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
3062 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
3063 	uint32_t hash;
3064 
3065 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3066 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3067 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
3068 	    || (sc->sc_type == WM_T_PCH_SPT)) {
3069 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
3070 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
3071 		return (hash & 0x3ff);
3072 	}
3073 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
3074 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
3075 
3076 	return (hash & 0xfff);
3077 }
3078 
3079 /*
3080  * wm_set_filter:
3081  *
3082  *	Set up the receive filter.
3083  */
3084 static void
3085 wm_set_filter(struct wm_softc *sc)
3086 {
3087 	struct ethercom *ec = &sc->sc_ethercom;
3088 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3089 	struct ether_multi *enm;
3090 	struct ether_multistep step;
3091 	bus_addr_t mta_reg;
3092 	uint32_t hash, reg, bit;
3093 	int i, size, ralmax;
3094 
3095 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3096 		device_xname(sc->sc_dev), __func__));
3097 	if (sc->sc_type >= WM_T_82544)
3098 		mta_reg = WMREG_CORDOVA_MTA;
3099 	else
3100 		mta_reg = WMREG_MTA;
3101 
3102 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
3103 
3104 	if (ifp->if_flags & IFF_BROADCAST)
3105 		sc->sc_rctl |= RCTL_BAM;
3106 	if (ifp->if_flags & IFF_PROMISC) {
3107 		sc->sc_rctl |= RCTL_UPE;
3108 		goto allmulti;
3109 	}
3110 
3111 	/*
3112 	 * Set the station address in the first RAL slot, and
3113 	 * clear the remaining slots.
3114 	 */
3115 	if (sc->sc_type == WM_T_ICH8)
3116 		size = WM_RAL_TABSIZE_ICH8 -1;
3117 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
3118 	    || (sc->sc_type == WM_T_PCH))
3119 		size = WM_RAL_TABSIZE_ICH8;
3120 	else if (sc->sc_type == WM_T_PCH2)
3121 		size = WM_RAL_TABSIZE_PCH2;
3122 	else if ((sc->sc_type == WM_T_PCH_LPT) ||(sc->sc_type == WM_T_PCH_SPT))
3123 		size = WM_RAL_TABSIZE_PCH_LPT;
3124 	else if (sc->sc_type == WM_T_82575)
3125 		size = WM_RAL_TABSIZE_82575;
3126 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
3127 		size = WM_RAL_TABSIZE_82576;
3128 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
3129 		size = WM_RAL_TABSIZE_I350;
3130 	else
3131 		size = WM_RAL_TABSIZE;
3132 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
3133 
3134 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
3135 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
3136 		switch (i) {
3137 		case 0:
3138 			/* We can use all entries */
3139 			ralmax = size;
3140 			break;
3141 		case 1:
3142 			/* Only RAR[0] */
3143 			ralmax = 1;
3144 			break;
3145 		default:
3146 			/* available SHRA + RAR[0] */
3147 			ralmax = i + 1;
3148 		}
3149 	} else
3150 		ralmax = size;
3151 	for (i = 1; i < size; i++) {
3152 		if (i < ralmax)
3153 			wm_set_ral(sc, NULL, i);
3154 	}
3155 
3156 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3157 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3158 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
3159 	    || (sc->sc_type == WM_T_PCH_SPT))
3160 		size = WM_ICH8_MC_TABSIZE;
3161 	else
3162 		size = WM_MC_TABSIZE;
3163 	/* Clear out the multicast table. */
3164 	for (i = 0; i < size; i++)
3165 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
3166 
3167 	ETHER_FIRST_MULTI(step, ec, enm);
3168 	while (enm != NULL) {
3169 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
3170 			/*
3171 			 * We must listen to a range of multicast addresses.
3172 			 * For now, just accept all multicasts, rather than
3173 			 * trying to set only those filter bits needed to match
3174 			 * the range.  (At this time, the only use of address
3175 			 * ranges is for IP multicast routing, for which the
3176 			 * range is big enough to require all bits set.)
3177 			 */
3178 			goto allmulti;
3179 		}
3180 
3181 		hash = wm_mchash(sc, enm->enm_addrlo);
3182 
3183 		reg = (hash >> 5);
3184 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3185 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3186 		    || (sc->sc_type == WM_T_PCH2)
3187 		    || (sc->sc_type == WM_T_PCH_LPT)
3188 		    || (sc->sc_type == WM_T_PCH_SPT))
3189 			reg &= 0x1f;
3190 		else
3191 			reg &= 0x7f;
3192 		bit = hash & 0x1f;
3193 
3194 		hash = CSR_READ(sc, mta_reg + (reg << 2));
3195 		hash |= 1U << bit;
3196 
3197 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
3198 			/*
3199 			 * 82544 Errata 9: Certain register cannot be written
3200 			 * with particular alignments in PCI-X bus operation
3201 			 * (FCAH, MTA and VFTA).
3202 			 */
3203 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
3204 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3205 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
3206 		} else
3207 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3208 
3209 		ETHER_NEXT_MULTI(step, enm);
3210 	}
3211 
3212 	ifp->if_flags &= ~IFF_ALLMULTI;
3213 	goto setit;
3214 
3215  allmulti:
3216 	ifp->if_flags |= IFF_ALLMULTI;
3217 	sc->sc_rctl |= RCTL_MPE;
3218 
3219  setit:
3220 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
3221 }
3222 
3223 /* Reset and init related */
3224 
3225 static void
3226 wm_set_vlan(struct wm_softc *sc)
3227 {
3228 
3229 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3230 		device_xname(sc->sc_dev), __func__));
3231 	/* Deal with VLAN enables. */
3232 	if (VLAN_ATTACHED(&sc->sc_ethercom))
3233 		sc->sc_ctrl |= CTRL_VME;
3234 	else
3235 		sc->sc_ctrl &= ~CTRL_VME;
3236 
3237 	/* Write the control registers. */
3238 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3239 }
3240 
3241 static void
3242 wm_set_pcie_completion_timeout(struct wm_softc *sc)
3243 {
3244 	uint32_t gcr;
3245 	pcireg_t ctrl2;
3246 
3247 	gcr = CSR_READ(sc, WMREG_GCR);
3248 
3249 	/* Only take action if timeout value is defaulted to 0 */
3250 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
3251 		goto out;
3252 
3253 	if ((gcr & GCR_CAP_VER2) == 0) {
3254 		gcr |= GCR_CMPL_TMOUT_10MS;
3255 		goto out;
3256 	}
3257 
3258 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
3259 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
3260 	ctrl2 |= WM_PCIE_DCSR2_16MS;
3261 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
3262 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
3263 
3264 out:
3265 	/* Disable completion timeout resend */
3266 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
3267 
3268 	CSR_WRITE(sc, WMREG_GCR, gcr);
3269 }
3270 
3271 void
3272 wm_get_auto_rd_done(struct wm_softc *sc)
3273 {
3274 	int i;
3275 
3276 	/* wait for eeprom to reload */
3277 	switch (sc->sc_type) {
3278 	case WM_T_82571:
3279 	case WM_T_82572:
3280 	case WM_T_82573:
3281 	case WM_T_82574:
3282 	case WM_T_82583:
3283 	case WM_T_82575:
3284 	case WM_T_82576:
3285 	case WM_T_82580:
3286 	case WM_T_I350:
3287 	case WM_T_I354:
3288 	case WM_T_I210:
3289 	case WM_T_I211:
3290 	case WM_T_80003:
3291 	case WM_T_ICH8:
3292 	case WM_T_ICH9:
3293 		for (i = 0; i < 10; i++) {
3294 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
3295 				break;
3296 			delay(1000);
3297 		}
3298 		if (i == 10) {
3299 			log(LOG_ERR, "%s: auto read from eeprom failed to "
3300 			    "complete\n", device_xname(sc->sc_dev));
3301 		}
3302 		break;
3303 	default:
3304 		break;
3305 	}
3306 }
3307 
3308 void
3309 wm_lan_init_done(struct wm_softc *sc)
3310 {
3311 	uint32_t reg = 0;
3312 	int i;
3313 
3314 	/* wait for eeprom to reload */
3315 	switch (sc->sc_type) {
3316 	case WM_T_ICH10:
3317 	case WM_T_PCH:
3318 	case WM_T_PCH2:
3319 	case WM_T_PCH_LPT:
3320 	case WM_T_PCH_SPT:
3321 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
3322 			reg = CSR_READ(sc, WMREG_STATUS);
3323 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
3324 				break;
3325 			delay(100);
3326 		}
3327 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
3328 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
3329 			    "complete\n", device_xname(sc->sc_dev), __func__);
3330 		}
3331 		break;
3332 	default:
3333 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3334 		    __func__);
3335 		break;
3336 	}
3337 
3338 	reg &= ~STATUS_LAN_INIT_DONE;
3339 	CSR_WRITE(sc, WMREG_STATUS, reg);
3340 }
3341 
3342 void
3343 wm_get_cfg_done(struct wm_softc *sc)
3344 {
3345 	int mask;
3346 	uint32_t reg;
3347 	int i;
3348 
3349 	/* wait for eeprom to reload */
3350 	switch (sc->sc_type) {
3351 	case WM_T_82542_2_0:
3352 	case WM_T_82542_2_1:
3353 		/* null */
3354 		break;
3355 	case WM_T_82543:
3356 	case WM_T_82544:
3357 	case WM_T_82540:
3358 	case WM_T_82545:
3359 	case WM_T_82545_3:
3360 	case WM_T_82546:
3361 	case WM_T_82546_3:
3362 	case WM_T_82541:
3363 	case WM_T_82541_2:
3364 	case WM_T_82547:
3365 	case WM_T_82547_2:
3366 	case WM_T_82573:
3367 	case WM_T_82574:
3368 	case WM_T_82583:
3369 		/* generic */
3370 		delay(10*1000);
3371 		break;
3372 	case WM_T_80003:
3373 	case WM_T_82571:
3374 	case WM_T_82572:
3375 	case WM_T_82575:
3376 	case WM_T_82576:
3377 	case WM_T_82580:
3378 	case WM_T_I350:
3379 	case WM_T_I354:
3380 	case WM_T_I210:
3381 	case WM_T_I211:
3382 		if (sc->sc_type == WM_T_82571) {
3383 			/* Only 82571 shares port 0 */
3384 			mask = EEMNGCTL_CFGDONE_0;
3385 		} else
3386 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
3387 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
3388 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
3389 				break;
3390 			delay(1000);
3391 		}
3392 		if (i >= WM_PHY_CFG_TIMEOUT) {
3393 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
3394 				device_xname(sc->sc_dev), __func__));
3395 		}
3396 		break;
3397 	case WM_T_ICH8:
3398 	case WM_T_ICH9:
3399 	case WM_T_ICH10:
3400 	case WM_T_PCH:
3401 	case WM_T_PCH2:
3402 	case WM_T_PCH_LPT:
3403 	case WM_T_PCH_SPT:
3404 		delay(10*1000);
3405 		if (sc->sc_type >= WM_T_ICH10)
3406 			wm_lan_init_done(sc);
3407 		else
3408 			wm_get_auto_rd_done(sc);
3409 
3410 		reg = CSR_READ(sc, WMREG_STATUS);
3411 		if ((reg & STATUS_PHYRA) != 0)
3412 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
3413 		break;
3414 	default:
3415 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3416 		    __func__);
3417 		break;
3418 	}
3419 }
3420 
3421 /* Init hardware bits */
3422 void
3423 wm_initialize_hardware_bits(struct wm_softc *sc)
3424 {
3425 	uint32_t tarc0, tarc1, reg;
3426 
3427 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3428 		device_xname(sc->sc_dev), __func__));
3429 	/* For 82571 variant, 80003 and ICHs */
3430 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
3431 	    || (sc->sc_type >= WM_T_80003)) {
3432 
3433 		/* Transmit Descriptor Control 0 */
3434 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
3435 		reg |= TXDCTL_COUNT_DESC;
3436 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
3437 
3438 		/* Transmit Descriptor Control 1 */
3439 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
3440 		reg |= TXDCTL_COUNT_DESC;
3441 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
3442 
3443 		/* TARC0 */
3444 		tarc0 = CSR_READ(sc, WMREG_TARC0);
3445 		switch (sc->sc_type) {
3446 		case WM_T_82571:
3447 		case WM_T_82572:
3448 		case WM_T_82573:
3449 		case WM_T_82574:
3450 		case WM_T_82583:
3451 		case WM_T_80003:
3452 			/* Clear bits 30..27 */
3453 			tarc0 &= ~__BITS(30, 27);
3454 			break;
3455 		default:
3456 			break;
3457 		}
3458 
3459 		switch (sc->sc_type) {
3460 		case WM_T_82571:
3461 		case WM_T_82572:
3462 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
3463 
3464 			tarc1 = CSR_READ(sc, WMREG_TARC1);
3465 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
3466 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
3467 			/* 8257[12] Errata No.7 */
3468 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
3469 
3470 			/* TARC1 bit 28 */
3471 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3472 				tarc1 &= ~__BIT(28);
3473 			else
3474 				tarc1 |= __BIT(28);
3475 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
3476 
3477 			/*
3478 			 * 8257[12] Errata No.13
3479 			 * Disable Dyamic Clock Gating.
3480 			 */
3481 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
3482 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
3483 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3484 			break;
3485 		case WM_T_82573:
3486 		case WM_T_82574:
3487 		case WM_T_82583:
3488 			if ((sc->sc_type == WM_T_82574)
3489 			    || (sc->sc_type == WM_T_82583))
3490 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
3491 
3492 			/* Extended Device Control */
3493 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
3494 			reg &= ~__BIT(23);	/* Clear bit 23 */
3495 			reg |= __BIT(22);	/* Set bit 22 */
3496 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3497 
3498 			/* Device Control */
3499 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
3500 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3501 
3502 			/* PCIe Control Register */
3503 			/*
3504 			 * 82573 Errata (unknown).
3505 			 *
3506 			 * 82574 Errata 25 and 82583 Errata 12
3507 			 * "Dropped Rx Packets":
3508 			 *   NVM Image Version 2.1.4 and newer has no this bug.
3509 			 */
3510 			reg = CSR_READ(sc, WMREG_GCR);
3511 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
3512 			CSR_WRITE(sc, WMREG_GCR, reg);
3513 
3514 			if ((sc->sc_type == WM_T_82574)
3515 			    || (sc->sc_type == WM_T_82583)) {
3516 				/*
3517 				 * Document says this bit must be set for
3518 				 * proper operation.
3519 				 */
3520 				reg = CSR_READ(sc, WMREG_GCR);
3521 				reg |= __BIT(22);
3522 				CSR_WRITE(sc, WMREG_GCR, reg);
3523 
3524 				/*
3525 				 * Apply workaround for hardware errata
3526 				 * documented in errata docs Fixes issue where
3527 				 * some error prone or unreliable PCIe
3528 				 * completions are occurring, particularly
3529 				 * with ASPM enabled. Without fix, issue can
3530 				 * cause Tx timeouts.
3531 				 */
3532 				reg = CSR_READ(sc, WMREG_GCR2);
3533 				reg |= __BIT(0);
3534 				CSR_WRITE(sc, WMREG_GCR2, reg);
3535 			}
3536 			break;
3537 		case WM_T_80003:
3538 			/* TARC0 */
3539 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
3540 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
3541 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
3542 
3543 			/* TARC1 bit 28 */
3544 			tarc1 = CSR_READ(sc, WMREG_TARC1);
3545 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3546 				tarc1 &= ~__BIT(28);
3547 			else
3548 				tarc1 |= __BIT(28);
3549 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
3550 			break;
3551 		case WM_T_ICH8:
3552 		case WM_T_ICH9:
3553 		case WM_T_ICH10:
3554 		case WM_T_PCH:
3555 		case WM_T_PCH2:
3556 		case WM_T_PCH_LPT:
3557 		case WM_T_PCH_SPT:
3558 			/* TARC0 */
3559 			if ((sc->sc_type == WM_T_ICH8)
3560 			    || (sc->sc_type == WM_T_PCH_SPT)) {
3561 				/* Set TARC0 bits 29 and 28 */
3562 				tarc0 |= __BITS(29, 28);
3563 			}
3564 			/* Set TARC0 bits 23,24,26,27 */
3565 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
3566 
3567 			/* CTRL_EXT */
3568 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
3569 			reg |= __BIT(22);	/* Set bit 22 */
3570 			/*
3571 			 * Enable PHY low-power state when MAC is at D3
3572 			 * w/o WoL
3573 			 */
3574 			if (sc->sc_type >= WM_T_PCH)
3575 				reg |= CTRL_EXT_PHYPDEN;
3576 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3577 
3578 			/* TARC1 */
3579 			tarc1 = CSR_READ(sc, WMREG_TARC1);
3580 			/* bit 28 */
3581 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3582 				tarc1 &= ~__BIT(28);
3583 			else
3584 				tarc1 |= __BIT(28);
3585 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
3586 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
3587 
3588 			/* Device Status */
3589 			if (sc->sc_type == WM_T_ICH8) {
3590 				reg = CSR_READ(sc, WMREG_STATUS);
3591 				reg &= ~__BIT(31);
3592 				CSR_WRITE(sc, WMREG_STATUS, reg);
3593 
3594 			}
3595 
3596 			/* IOSFPC */
3597 			if (sc->sc_type == WM_T_PCH_SPT) {
3598 				reg = CSR_READ(sc, WMREG_IOSFPC);
3599 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
3600 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
3601 			}
3602 			/*
3603 			 * Work-around descriptor data corruption issue during
3604 			 * NFS v2 UDP traffic, just disable the NFS filtering
3605 			 * capability.
3606 			 */
3607 			reg = CSR_READ(sc, WMREG_RFCTL);
3608 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
3609 			CSR_WRITE(sc, WMREG_RFCTL, reg);
3610 			break;
3611 		default:
3612 			break;
3613 		}
3614 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
3615 
3616 		/*
3617 		 * 8257[12] Errata No.52 and some others.
3618 		 * Avoid RSS Hash Value bug.
3619 		 */
3620 		switch (sc->sc_type) {
3621 		case WM_T_82571:
3622 		case WM_T_82572:
3623 		case WM_T_82573:
3624 		case WM_T_80003:
3625 		case WM_T_ICH8:
3626 			reg = CSR_READ(sc, WMREG_RFCTL);
3627 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
3628 			CSR_WRITE(sc, WMREG_RFCTL, reg);
3629 			break;
3630 		default:
3631 			break;
3632 		}
3633 	}
3634 }
3635 
3636 static uint32_t
3637 wm_rxpbs_adjust_82580(uint32_t val)
3638 {
3639 	uint32_t rv = 0;
3640 
3641 	if (val < __arraycount(wm_82580_rxpbs_table))
3642 		rv = wm_82580_rxpbs_table[val];
3643 
3644 	return rv;
3645 }
3646 
3647 /*
3648  * wm_reset:
3649  *
3650  *	Reset the i82542 chip.
3651  */
3652 static void
3653 wm_reset(struct wm_softc *sc)
3654 {
3655 	int phy_reset = 0;
3656 	int i, error = 0;
3657 	uint32_t reg, mask;
3658 
3659 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3660 		device_xname(sc->sc_dev), __func__));
3661 	/*
3662 	 * Allocate on-chip memory according to the MTU size.
3663 	 * The Packet Buffer Allocation register must be written
3664 	 * before the chip is reset.
3665 	 */
3666 	switch (sc->sc_type) {
3667 	case WM_T_82547:
3668 	case WM_T_82547_2:
3669 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3670 		    PBA_22K : PBA_30K;
3671 		for (i = 0; i < sc->sc_nqueues; i++) {
3672 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
3673 			txq->txq_fifo_head = 0;
3674 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
3675 			txq->txq_fifo_size =
3676 				(PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
3677 			txq->txq_fifo_stall = 0;
3678 		}
3679 		break;
3680 	case WM_T_82571:
3681 	case WM_T_82572:
3682 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
3683 	case WM_T_80003:
3684 		sc->sc_pba = PBA_32K;
3685 		break;
3686 	case WM_T_82573:
3687 		sc->sc_pba = PBA_12K;
3688 		break;
3689 	case WM_T_82574:
3690 	case WM_T_82583:
3691 		sc->sc_pba = PBA_20K;
3692 		break;
3693 	case WM_T_82576:
3694 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
3695 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
3696 		break;
3697 	case WM_T_82580:
3698 	case WM_T_I350:
3699 	case WM_T_I354:
3700 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
3701 		break;
3702 	case WM_T_I210:
3703 	case WM_T_I211:
3704 		sc->sc_pba = PBA_34K;
3705 		break;
3706 	case WM_T_ICH8:
3707 		/* Workaround for a bit corruption issue in FIFO memory */
3708 		sc->sc_pba = PBA_8K;
3709 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
3710 		break;
3711 	case WM_T_ICH9:
3712 	case WM_T_ICH10:
3713 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
3714 		    PBA_14K : PBA_10K;
3715 		break;
3716 	case WM_T_PCH:
3717 	case WM_T_PCH2:
3718 	case WM_T_PCH_LPT:
3719 	case WM_T_PCH_SPT:
3720 		sc->sc_pba = PBA_26K;
3721 		break;
3722 	default:
3723 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3724 		    PBA_40K : PBA_48K;
3725 		break;
3726 	}
3727 	/*
3728 	 * Only old or non-multiqueue devices have the PBA register
3729 	 * XXX Need special handling for 82575.
3730 	 */
3731 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
3732 	    || (sc->sc_type == WM_T_82575))
3733 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
3734 
3735 	/* Prevent the PCI-E bus from sticking */
3736 	if (sc->sc_flags & WM_F_PCIE) {
3737 		int timeout = 800;
3738 
3739 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
3740 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3741 
3742 		while (timeout--) {
3743 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
3744 			    == 0)
3745 				break;
3746 			delay(100);
3747 		}
3748 	}
3749 
3750 	/* Set the completion timeout for interface */
3751 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
3752 	    || (sc->sc_type == WM_T_82580)
3753 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
3754 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
3755 		wm_set_pcie_completion_timeout(sc);
3756 
3757 	/* Clear interrupt */
3758 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3759 	if (sc->sc_nintrs > 1) {
3760 		if (sc->sc_type != WM_T_82574) {
3761 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
3762 			CSR_WRITE(sc, WMREG_EIAC, 0);
3763 		} else {
3764 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
3765 		}
3766 	}
3767 
3768 	/* Stop the transmit and receive processes. */
3769 	CSR_WRITE(sc, WMREG_RCTL, 0);
3770 	sc->sc_rctl &= ~RCTL_EN;
3771 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
3772 	CSR_WRITE_FLUSH(sc);
3773 
3774 	/* XXX set_tbi_sbp_82543() */
3775 
3776 	delay(10*1000);
3777 
3778 	/* Must acquire the MDIO ownership before MAC reset */
3779 	switch (sc->sc_type) {
3780 	case WM_T_82573:
3781 	case WM_T_82574:
3782 	case WM_T_82583:
3783 		error = wm_get_hw_semaphore_82573(sc);
3784 		break;
3785 	default:
3786 		break;
3787 	}
3788 
3789 	/*
3790 	 * 82541 Errata 29? & 82547 Errata 28?
3791 	 * See also the description about PHY_RST bit in CTRL register
3792 	 * in 8254x_GBe_SDM.pdf.
3793 	 */
3794 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
3795 		CSR_WRITE(sc, WMREG_CTRL,
3796 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
3797 		CSR_WRITE_FLUSH(sc);
3798 		delay(5000);
3799 	}
3800 
3801 	switch (sc->sc_type) {
3802 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
3803 	case WM_T_82541:
3804 	case WM_T_82541_2:
3805 	case WM_T_82547:
3806 	case WM_T_82547_2:
3807 		/*
3808 		 * On some chipsets, a reset through a memory-mapped write
3809 		 * cycle can cause the chip to reset before completing the
3810 		 * write cycle.  This causes major headache that can be
3811 		 * avoided by issuing the reset via indirect register writes
3812 		 * through I/O space.
3813 		 *
3814 		 * So, if we successfully mapped the I/O BAR at attach time,
3815 		 * use that.  Otherwise, try our luck with a memory-mapped
3816 		 * reset.
3817 		 */
3818 		if (sc->sc_flags & WM_F_IOH_VALID)
3819 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
3820 		else
3821 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
3822 		break;
3823 	case WM_T_82545_3:
3824 	case WM_T_82546_3:
3825 		/* Use the shadow control register on these chips. */
3826 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
3827 		break;
3828 	case WM_T_80003:
3829 		mask = swfwphysem[sc->sc_funcid];
3830 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3831 		wm_get_swfw_semaphore(sc, mask);
3832 		CSR_WRITE(sc, WMREG_CTRL, reg);
3833 		wm_put_swfw_semaphore(sc, mask);
3834 		break;
3835 	case WM_T_ICH8:
3836 	case WM_T_ICH9:
3837 	case WM_T_ICH10:
3838 	case WM_T_PCH:
3839 	case WM_T_PCH2:
3840 	case WM_T_PCH_LPT:
3841 	case WM_T_PCH_SPT:
3842 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3843 		if (wm_phy_resetisblocked(sc) == false) {
3844 			/*
3845 			 * Gate automatic PHY configuration by hardware on
3846 			 * non-managed 82579
3847 			 */
3848 			if ((sc->sc_type == WM_T_PCH2)
3849 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
3850 				== 0))
3851 				wm_gate_hw_phy_config_ich8lan(sc, true);
3852 
3853 			reg |= CTRL_PHY_RESET;
3854 			phy_reset = 1;
3855 		} else
3856 			printf("XXX reset is blocked!!!\n");
3857 		wm_get_swfwhw_semaphore(sc);
3858 		CSR_WRITE(sc, WMREG_CTRL, reg);
3859 		/* Don't insert a completion barrier when reset */
3860 		delay(20*1000);
3861 		wm_put_swfwhw_semaphore(sc);
3862 		break;
3863 	case WM_T_82580:
3864 	case WM_T_I350:
3865 	case WM_T_I354:
3866 	case WM_T_I210:
3867 	case WM_T_I211:
3868 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3869 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
3870 			CSR_WRITE_FLUSH(sc);
3871 		delay(5000);
3872 		break;
3873 	case WM_T_82542_2_0:
3874 	case WM_T_82542_2_1:
3875 	case WM_T_82543:
3876 	case WM_T_82540:
3877 	case WM_T_82545:
3878 	case WM_T_82546:
3879 	case WM_T_82571:
3880 	case WM_T_82572:
3881 	case WM_T_82573:
3882 	case WM_T_82574:
3883 	case WM_T_82575:
3884 	case WM_T_82576:
3885 	case WM_T_82583:
3886 	default:
3887 		/* Everything else can safely use the documented method. */
3888 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3889 		break;
3890 	}
3891 
3892 	/* Must release the MDIO ownership after MAC reset */
3893 	switch (sc->sc_type) {
3894 	case WM_T_82573:
3895 	case WM_T_82574:
3896 	case WM_T_82583:
3897 		if (error == 0)
3898 			wm_put_hw_semaphore_82573(sc);
3899 		break;
3900 	default:
3901 		break;
3902 	}
3903 
3904 	if (phy_reset != 0)
3905 		wm_get_cfg_done(sc);
3906 
3907 	/* reload EEPROM */
3908 	switch (sc->sc_type) {
3909 	case WM_T_82542_2_0:
3910 	case WM_T_82542_2_1:
3911 	case WM_T_82543:
3912 	case WM_T_82544:
3913 		delay(10);
3914 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3915 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3916 		CSR_WRITE_FLUSH(sc);
3917 		delay(2000);
3918 		break;
3919 	case WM_T_82540:
3920 	case WM_T_82545:
3921 	case WM_T_82545_3:
3922 	case WM_T_82546:
3923 	case WM_T_82546_3:
3924 		delay(5*1000);
3925 		/* XXX Disable HW ARPs on ASF enabled adapters */
3926 		break;
3927 	case WM_T_82541:
3928 	case WM_T_82541_2:
3929 	case WM_T_82547:
3930 	case WM_T_82547_2:
3931 		delay(20000);
3932 		/* XXX Disable HW ARPs on ASF enabled adapters */
3933 		break;
3934 	case WM_T_82571:
3935 	case WM_T_82572:
3936 	case WM_T_82573:
3937 	case WM_T_82574:
3938 	case WM_T_82583:
3939 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
3940 			delay(10);
3941 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3942 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3943 			CSR_WRITE_FLUSH(sc);
3944 		}
3945 		/* check EECD_EE_AUTORD */
3946 		wm_get_auto_rd_done(sc);
3947 		/*
3948 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
3949 		 * is set.
3950 		 */
3951 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
3952 		    || (sc->sc_type == WM_T_82583))
3953 			delay(25*1000);
3954 		break;
3955 	case WM_T_82575:
3956 	case WM_T_82576:
3957 	case WM_T_82580:
3958 	case WM_T_I350:
3959 	case WM_T_I354:
3960 	case WM_T_I210:
3961 	case WM_T_I211:
3962 	case WM_T_80003:
3963 		/* check EECD_EE_AUTORD */
3964 		wm_get_auto_rd_done(sc);
3965 		break;
3966 	case WM_T_ICH8:
3967 	case WM_T_ICH9:
3968 	case WM_T_ICH10:
3969 	case WM_T_PCH:
3970 	case WM_T_PCH2:
3971 	case WM_T_PCH_LPT:
3972 	case WM_T_PCH_SPT:
3973 		break;
3974 	default:
3975 		panic("%s: unknown type\n", __func__);
3976 	}
3977 
3978 	/* Check whether EEPROM is present or not */
3979 	switch (sc->sc_type) {
3980 	case WM_T_82575:
3981 	case WM_T_82576:
3982 	case WM_T_82580:
3983 	case WM_T_I350:
3984 	case WM_T_I354:
3985 	case WM_T_ICH8:
3986 	case WM_T_ICH9:
3987 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
3988 			/* Not found */
3989 			sc->sc_flags |= WM_F_EEPROM_INVALID;
3990 			if (sc->sc_type == WM_T_82575)
3991 				wm_reset_init_script_82575(sc);
3992 		}
3993 		break;
3994 	default:
3995 		break;
3996 	}
3997 
3998 	if ((sc->sc_type == WM_T_82580)
3999 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
4000 		/* clear global device reset status bit */
4001 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
4002 	}
4003 
4004 	/* Clear any pending interrupt events. */
4005 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4006 	reg = CSR_READ(sc, WMREG_ICR);
4007 	if (sc->sc_nintrs > 1) {
4008 		if (sc->sc_type != WM_T_82574) {
4009 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
4010 			CSR_WRITE(sc, WMREG_EIAC, 0);
4011 		} else
4012 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
4013 	}
4014 
4015 	/* reload sc_ctrl */
4016 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
4017 
4018 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
4019 		wm_set_eee_i350(sc);
4020 
4021 	/* dummy read from WUC */
4022 	if (sc->sc_type == WM_T_PCH)
4023 		reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
4024 	/*
4025 	 * For PCH, this write will make sure that any noise will be detected
4026 	 * as a CRC error and be dropped rather than show up as a bad packet
4027 	 * to the DMA engine
4028 	 */
4029 	if (sc->sc_type == WM_T_PCH)
4030 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
4031 
4032 	if (sc->sc_type >= WM_T_82544)
4033 		CSR_WRITE(sc, WMREG_WUC, 0);
4034 
4035 	wm_reset_mdicnfg_82580(sc);
4036 
4037 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
4038 		wm_pll_workaround_i210(sc);
4039 }
4040 
4041 /*
4042  * wm_add_rxbuf:
4043  *
4044  *	Add a receive buffer to the indiciated descriptor.
4045  */
4046 static int
4047 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
4048 {
4049 	struct wm_softc *sc = rxq->rxq_sc;
4050 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
4051 	struct mbuf *m;
4052 	int error;
4053 
4054 	KASSERT(mutex_owned(rxq->rxq_lock));
4055 
4056 	MGETHDR(m, M_DONTWAIT, MT_DATA);
4057 	if (m == NULL)
4058 		return ENOBUFS;
4059 
4060 	MCLGET(m, M_DONTWAIT);
4061 	if ((m->m_flags & M_EXT) == 0) {
4062 		m_freem(m);
4063 		return ENOBUFS;
4064 	}
4065 
4066 	if (rxs->rxs_mbuf != NULL)
4067 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4068 
4069 	rxs->rxs_mbuf = m;
4070 
4071 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
4072 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
4073 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
4074 	if (error) {
4075 		/* XXX XXX XXX */
4076 		aprint_error_dev(sc->sc_dev,
4077 		    "unable to load rx DMA map %d, error = %d\n",
4078 		    idx, error);
4079 		panic("wm_add_rxbuf");
4080 	}
4081 
4082 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
4083 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
4084 
4085 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4086 		if ((sc->sc_rctl & RCTL_EN) != 0)
4087 			wm_init_rxdesc(rxq, idx);
4088 	} else
4089 		wm_init_rxdesc(rxq, idx);
4090 
4091 	return 0;
4092 }
4093 
4094 /*
4095  * wm_rxdrain:
4096  *
4097  *	Drain the receive queue.
4098  */
4099 static void
4100 wm_rxdrain(struct wm_rxqueue *rxq)
4101 {
4102 	struct wm_softc *sc = rxq->rxq_sc;
4103 	struct wm_rxsoft *rxs;
4104 	int i;
4105 
4106 	KASSERT(mutex_owned(rxq->rxq_lock));
4107 
4108 	for (i = 0; i < WM_NRXDESC; i++) {
4109 		rxs = &rxq->rxq_soft[i];
4110 		if (rxs->rxs_mbuf != NULL) {
4111 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4112 			m_freem(rxs->rxs_mbuf);
4113 			rxs->rxs_mbuf = NULL;
4114 		}
4115 	}
4116 }
4117 
4118 
4119 /*
4120  * XXX copy from FreeBSD's sys/net/rss_config.c
4121  */
4122 /*
4123  * RSS secret key, intended to prevent attacks on load-balancing.  Its
4124  * effectiveness may be limited by algorithm choice and available entropy
4125  * during the boot.
4126  *
4127  * XXXRW: And that we don't randomize it yet!
4128  *
4129  * This is the default Microsoft RSS specification key which is also
4130  * the Chelsio T5 firmware default key.
4131  */
4132 #define RSS_KEYSIZE 40
4133 static uint8_t wm_rss_key[RSS_KEYSIZE] = {
4134 	0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
4135 	0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
4136 	0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
4137 	0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
4138 	0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
4139 };
4140 
4141 /*
4142  * Caller must pass an array of size sizeof(rss_key).
4143  *
4144  * XXX
4145  * As if_ixgbe may use this function, this function should not be
4146  * if_wm specific function.
4147  */
4148 static void
4149 wm_rss_getkey(uint8_t *key)
4150 {
4151 
4152 	memcpy(key, wm_rss_key, sizeof(wm_rss_key));
4153 }
4154 
4155 /*
4156  * Setup registers for RSS.
4157  *
4158  * XXX not yet VMDq support
4159  */
4160 static void
4161 wm_init_rss(struct wm_softc *sc)
4162 {
4163 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
4164 	int i;
4165 
4166 	CTASSERT(sizeof(rss_key) == sizeof(wm_rss_key));
4167 
4168 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
4169 		int qid, reta_ent;
4170 
4171 		qid  = i % sc->sc_nqueues;
4172 		switch(sc->sc_type) {
4173 		case WM_T_82574:
4174 			reta_ent = __SHIFTIN(qid,
4175 			    RETA_ENT_QINDEX_MASK_82574);
4176 			break;
4177 		case WM_T_82575:
4178 			reta_ent = __SHIFTIN(qid,
4179 			    RETA_ENT_QINDEX1_MASK_82575);
4180 			break;
4181 		default:
4182 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
4183 			break;
4184 		}
4185 
4186 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
4187 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
4188 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
4189 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
4190 	}
4191 
4192 	wm_rss_getkey((uint8_t *)rss_key);
4193 	for (i = 0; i < RSSRK_NUM_REGS; i++)
4194 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
4195 
4196 	if (sc->sc_type == WM_T_82574)
4197 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
4198 	else
4199 		mrqc = MRQC_ENABLE_RSS_MQ;
4200 
4201 	/* XXXX
4202 	 * The same as FreeBSD igb.
4203 	 * Why doesn't use MRQC_RSS_FIELD_IPV6_EX?
4204 	 */
4205 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
4206 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
4207 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
4208 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
4209 
4210 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
4211 }
4212 
4213 /*
4214  * Adjust TX and RX queue numbers which the system actulally uses.
4215  *
4216  * The numbers are affected by below parameters.
4217  *     - The nubmer of hardware queues
4218  *     - The number of MSI-X vectors (= "nvectors" argument)
4219  *     - ncpu
4220  */
4221 static void
4222 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
4223 {
4224 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
4225 
4226 	if (nvectors < 2) {
4227 		sc->sc_nqueues = 1;
4228 		return;
4229 	}
4230 
4231 	switch(sc->sc_type) {
4232 	case WM_T_82572:
4233 		hw_ntxqueues = 2;
4234 		hw_nrxqueues = 2;
4235 		break;
4236 	case WM_T_82574:
4237 		hw_ntxqueues = 2;
4238 		hw_nrxqueues = 2;
4239 		break;
4240 	case WM_T_82575:
4241 		hw_ntxqueues = 4;
4242 		hw_nrxqueues = 4;
4243 		break;
4244 	case WM_T_82576:
4245 		hw_ntxqueues = 16;
4246 		hw_nrxqueues = 16;
4247 		break;
4248 	case WM_T_82580:
4249 	case WM_T_I350:
4250 	case WM_T_I354:
4251 		hw_ntxqueues = 8;
4252 		hw_nrxqueues = 8;
4253 		break;
4254 	case WM_T_I210:
4255 		hw_ntxqueues = 4;
4256 		hw_nrxqueues = 4;
4257 		break;
4258 	case WM_T_I211:
4259 		hw_ntxqueues = 2;
4260 		hw_nrxqueues = 2;
4261 		break;
4262 		/*
4263 		 * As below ethernet controllers does not support MSI-X,
4264 		 * this driver let them not use multiqueue.
4265 		 *     - WM_T_80003
4266 		 *     - WM_T_ICH8
4267 		 *     - WM_T_ICH9
4268 		 *     - WM_T_ICH10
4269 		 *     - WM_T_PCH
4270 		 *     - WM_T_PCH2
4271 		 *     - WM_T_PCH_LPT
4272 		 */
4273 	default:
4274 		hw_ntxqueues = 1;
4275 		hw_nrxqueues = 1;
4276 		break;
4277 	}
4278 
4279 	hw_nqueues = min(hw_ntxqueues, hw_nrxqueues);
4280 
4281 	/*
4282 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
4283 	 * the number of queues used actually.
4284 	 */
4285 	if (nvectors < hw_nqueues + 1) {
4286 		sc->sc_nqueues = nvectors - 1;
4287 	} else {
4288 		sc->sc_nqueues = hw_nqueues;
4289 	}
4290 
4291 	/*
4292 	 * As queues more then cpus cannot improve scaling, we limit
4293 	 * the number of queues used actually.
4294 	 */
4295 	if (ncpu < sc->sc_nqueues)
4296 		sc->sc_nqueues = ncpu;
4297 }
4298 
4299 /*
4300  * Both single interrupt MSI and INTx can use this function.
4301  */
4302 static int
4303 wm_setup_legacy(struct wm_softc *sc)
4304 {
4305 	pci_chipset_tag_t pc = sc->sc_pc;
4306 	const char *intrstr = NULL;
4307 	char intrbuf[PCI_INTRSTR_LEN];
4308 	int error;
4309 
4310 	error = wm_alloc_txrx_queues(sc);
4311 	if (error) {
4312 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
4313 		    error);
4314 		return ENOMEM;
4315 	}
4316 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
4317 	    sizeof(intrbuf));
4318 #ifdef WM_MPSAFE
4319 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
4320 #endif
4321 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
4322 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
4323 	if (sc->sc_ihs[0] == NULL) {
4324 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
4325 		    (pci_intr_type(pc, sc->sc_intrs[0])
4326 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
4327 		return ENOMEM;
4328 	}
4329 
4330 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
4331 	sc->sc_nintrs = 1;
4332 	return 0;
4333 }
4334 
4335 static int
4336 wm_setup_msix(struct wm_softc *sc)
4337 {
4338 	void *vih;
4339 	kcpuset_t *affinity;
4340 	int qidx, error, intr_idx, txrx_established;
4341 	pci_chipset_tag_t pc = sc->sc_pc;
4342 	const char *intrstr = NULL;
4343 	char intrbuf[PCI_INTRSTR_LEN];
4344 	char intr_xname[INTRDEVNAMEBUF];
4345 
4346 	if (sc->sc_nqueues < ncpu) {
4347 		/*
4348 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
4349 		 * interrupts start from CPU#1.
4350 		 */
4351 		sc->sc_affinity_offset = 1;
4352 	} else {
4353 		/*
4354 		 * In this case, this device use all CPUs. So, we unify
4355 		 * affinitied cpu_index to msix vector number for readability.
4356 		 */
4357 		sc->sc_affinity_offset = 0;
4358 	}
4359 
4360 	error = wm_alloc_txrx_queues(sc);
4361 	if (error) {
4362 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
4363 		    error);
4364 		return ENOMEM;
4365 	}
4366 
4367 	kcpuset_create(&affinity, false);
4368 	intr_idx = 0;
4369 
4370 	/*
4371 	 * TX and RX
4372 	 */
4373 	txrx_established = 0;
4374 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
4375 		struct wm_queue *wmq = &sc->sc_queue[qidx];
4376 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
4377 
4378 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
4379 		    sizeof(intrbuf));
4380 #ifdef WM_MPSAFE
4381 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
4382 		    PCI_INTR_MPSAFE, true);
4383 #endif
4384 		memset(intr_xname, 0, sizeof(intr_xname));
4385 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
4386 		    device_xname(sc->sc_dev), qidx);
4387 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
4388 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
4389 		if (vih == NULL) {
4390 			aprint_error_dev(sc->sc_dev,
4391 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
4392 			    intrstr ? " at " : "",
4393 			    intrstr ? intrstr : "");
4394 
4395 			goto fail;
4396 		}
4397 		kcpuset_zero(affinity);
4398 		/* Round-robin affinity */
4399 		kcpuset_set(affinity, affinity_to);
4400 		error = interrupt_distribute(vih, affinity, NULL);
4401 		if (error == 0) {
4402 			aprint_normal_dev(sc->sc_dev,
4403 			    "for TX and RX interrupting at %s affinity to %u\n",
4404 			    intrstr, affinity_to);
4405 		} else {
4406 			aprint_normal_dev(sc->sc_dev,
4407 			    "for TX and RX interrupting at %s\n", intrstr);
4408 		}
4409 		sc->sc_ihs[intr_idx] = vih;
4410 		wmq->wmq_id= qidx;
4411 		wmq->wmq_intr_idx = intr_idx;
4412 
4413 		txrx_established++;
4414 		intr_idx++;
4415 	}
4416 
4417 	/*
4418 	 * LINK
4419 	 */
4420 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
4421 	    sizeof(intrbuf));
4422 #ifdef WM_MPSAFE
4423 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
4424 #endif
4425 	memset(intr_xname, 0, sizeof(intr_xname));
4426 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
4427 	    device_xname(sc->sc_dev));
4428 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
4429 		    IPL_NET, wm_linkintr_msix, sc, intr_xname);
4430 	if (vih == NULL) {
4431 		aprint_error_dev(sc->sc_dev,
4432 		    "unable to establish MSI-X(for LINK)%s%s\n",
4433 		    intrstr ? " at " : "",
4434 		    intrstr ? intrstr : "");
4435 
4436 		goto fail;
4437 	}
4438 	/* keep default affinity to LINK interrupt */
4439 	aprint_normal_dev(sc->sc_dev,
4440 	    "for LINK interrupting at %s\n", intrstr);
4441 	sc->sc_ihs[intr_idx] = vih;
4442 	sc->sc_link_intr_idx = intr_idx;
4443 
4444 	sc->sc_nintrs = sc->sc_nqueues + 1;
4445 	kcpuset_destroy(affinity);
4446 	return 0;
4447 
4448  fail:
4449 	for (qidx = 0; qidx < txrx_established; qidx++) {
4450 		struct wm_queue *wmq = &sc->sc_queue[qidx];
4451 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
4452 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
4453 	}
4454 
4455 	kcpuset_destroy(affinity);
4456 	return ENOMEM;
4457 }
4458 
4459 /*
4460  * wm_init:		[ifnet interface function]
4461  *
4462  *	Initialize the interface.
4463  */
4464 static int
4465 wm_init(struct ifnet *ifp)
4466 {
4467 	struct wm_softc *sc = ifp->if_softc;
4468 	int ret;
4469 
4470 	WM_CORE_LOCK(sc);
4471 	ret = wm_init_locked(ifp);
4472 	WM_CORE_UNLOCK(sc);
4473 
4474 	return ret;
4475 }
4476 
4477 static int
4478 wm_init_locked(struct ifnet *ifp)
4479 {
4480 	struct wm_softc *sc = ifp->if_softc;
4481 	int i, j, trynum, error = 0;
4482 	uint32_t reg;
4483 
4484 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
4485 		device_xname(sc->sc_dev), __func__));
4486 	KASSERT(WM_CORE_LOCKED(sc));
4487 	/*
4488 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
4489 	 * There is a small but measurable benefit to avoiding the adjusment
4490 	 * of the descriptor so that the headers are aligned, for normal mtu,
4491 	 * on such platforms.  One possibility is that the DMA itself is
4492 	 * slightly more efficient if the front of the entire packet (instead
4493 	 * of the front of the headers) is aligned.
4494 	 *
4495 	 * Note we must always set align_tweak to 0 if we are using
4496 	 * jumbo frames.
4497 	 */
4498 #ifdef __NO_STRICT_ALIGNMENT
4499 	sc->sc_align_tweak = 0;
4500 #else
4501 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
4502 		sc->sc_align_tweak = 0;
4503 	else
4504 		sc->sc_align_tweak = 2;
4505 #endif /* __NO_STRICT_ALIGNMENT */
4506 
4507 	/* Cancel any pending I/O. */
4508 	wm_stop_locked(ifp, 0);
4509 
4510 	/* update statistics before reset */
4511 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
4512 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
4513 
4514 	/* Reset the chip to a known state. */
4515 	wm_reset(sc);
4516 
4517 	switch (sc->sc_type) {
4518 	case WM_T_82571:
4519 	case WM_T_82572:
4520 	case WM_T_82573:
4521 	case WM_T_82574:
4522 	case WM_T_82583:
4523 	case WM_T_80003:
4524 	case WM_T_ICH8:
4525 	case WM_T_ICH9:
4526 	case WM_T_ICH10:
4527 	case WM_T_PCH:
4528 	case WM_T_PCH2:
4529 	case WM_T_PCH_LPT:
4530 	case WM_T_PCH_SPT:
4531 		/* AMT based hardware can now take control from firmware */
4532 		if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
4533 			wm_get_hw_control(sc);
4534 		break;
4535 	default:
4536 		break;
4537 	}
4538 
4539 	/* Init hardware bits */
4540 	wm_initialize_hardware_bits(sc);
4541 
4542 	/* Reset the PHY. */
4543 	if (sc->sc_flags & WM_F_HAS_MII)
4544 		wm_gmii_reset(sc);
4545 
4546 	/* Calculate (E)ITR value */
4547 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4548 		sc->sc_itr = 450;	/* For EITR */
4549 	} else if (sc->sc_type >= WM_T_82543) {
4550 		/*
4551 		 * Set up the interrupt throttling register (units of 256ns)
4552 		 * Note that a footnote in Intel's documentation says this
4553 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
4554 		 * or 10Mbit mode.  Empirically, it appears to be the case
4555 		 * that that is also true for the 1024ns units of the other
4556 		 * interrupt-related timer registers -- so, really, we ought
4557 		 * to divide this value by 4 when the link speed is low.
4558 		 *
4559 		 * XXX implement this division at link speed change!
4560 		 */
4561 
4562 		/*
4563 		 * For N interrupts/sec, set this value to:
4564 		 * 1000000000 / (N * 256).  Note that we set the
4565 		 * absolute and packet timer values to this value
4566 		 * divided by 4 to get "simple timer" behavior.
4567 		 */
4568 
4569 		sc->sc_itr = 1500;		/* 2604 ints/sec */
4570 	}
4571 
4572 	error = wm_init_txrx_queues(sc);
4573 	if (error)
4574 		goto out;
4575 
4576 	/*
4577 	 * Clear out the VLAN table -- we don't use it (yet).
4578 	 */
4579 	CSR_WRITE(sc, WMREG_VET, 0);
4580 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
4581 		trynum = 10; /* Due to hw errata */
4582 	else
4583 		trynum = 1;
4584 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
4585 		for (j = 0; j < trynum; j++)
4586 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
4587 
4588 	/*
4589 	 * Set up flow-control parameters.
4590 	 *
4591 	 * XXX Values could probably stand some tuning.
4592 	 */
4593 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
4594 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
4595 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
4596 	    && (sc->sc_type != WM_T_PCH_SPT)) {
4597 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
4598 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
4599 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
4600 	}
4601 
4602 	sc->sc_fcrtl = FCRTL_DFLT;
4603 	if (sc->sc_type < WM_T_82543) {
4604 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
4605 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
4606 	} else {
4607 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
4608 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
4609 	}
4610 
4611 	if (sc->sc_type == WM_T_80003)
4612 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
4613 	else
4614 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
4615 
4616 	/* Writes the control register. */
4617 	wm_set_vlan(sc);
4618 
4619 	if (sc->sc_flags & WM_F_HAS_MII) {
4620 		int val;
4621 
4622 		switch (sc->sc_type) {
4623 		case WM_T_80003:
4624 		case WM_T_ICH8:
4625 		case WM_T_ICH9:
4626 		case WM_T_ICH10:
4627 		case WM_T_PCH:
4628 		case WM_T_PCH2:
4629 		case WM_T_PCH_LPT:
4630 		case WM_T_PCH_SPT:
4631 			/*
4632 			 * Set the mac to wait the maximum time between each
4633 			 * iteration and increase the max iterations when
4634 			 * polling the phy; this fixes erroneous timeouts at
4635 			 * 10Mbps.
4636 			 */
4637 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
4638 			    0xFFFF);
4639 			val = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM);
4640 			val |= 0x3F;
4641 			wm_kmrn_writereg(sc,
4642 			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
4643 			break;
4644 		default:
4645 			break;
4646 		}
4647 
4648 		if (sc->sc_type == WM_T_80003) {
4649 			val = CSR_READ(sc, WMREG_CTRL_EXT);
4650 			val &= ~CTRL_EXT_LINK_MODE_MASK;
4651 			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
4652 
4653 			/* Bypass RX and TX FIFO's */
4654 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
4655 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
4656 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
4657 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
4658 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
4659 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
4660 		}
4661 	}
4662 #if 0
4663 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
4664 #endif
4665 
4666 	/* Set up checksum offload parameters. */
4667 	reg = CSR_READ(sc, WMREG_RXCSUM);
4668 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
4669 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
4670 		reg |= RXCSUM_IPOFL;
4671 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
4672 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
4673 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
4674 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
4675 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
4676 
4677 	/* Set up MSI-X */
4678 	if (sc->sc_nintrs > 1) {
4679 		uint32_t ivar;
4680 		struct wm_queue *wmq;
4681 		int qid, qintr_idx;
4682 
4683 		if (sc->sc_type == WM_T_82575) {
4684 			/* Interrupt control */
4685 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
4686 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
4687 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4688 
4689 			/* TX and RX */
4690 			for (i = 0; i < sc->sc_nqueues; i++) {
4691 				wmq = &sc->sc_queue[i];
4692 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
4693 				    EITR_TX_QUEUE(wmq->wmq_id)
4694 				    | EITR_RX_QUEUE(wmq->wmq_id));
4695 			}
4696 			/* Link status */
4697 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
4698 			    EITR_OTHER);
4699 		} else if (sc->sc_type == WM_T_82574) {
4700 			/* Interrupt control */
4701 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
4702 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
4703 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4704 
4705 			ivar = 0;
4706 			/* TX and RX */
4707 			for (i = 0; i < sc->sc_nqueues; i++) {
4708 				wmq = &sc->sc_queue[i];
4709 				qid = wmq->wmq_id;
4710 				qintr_idx = wmq->wmq_intr_idx;
4711 
4712 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
4713 				    IVAR_TX_MASK_Q_82574(qid));
4714 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
4715 				    IVAR_RX_MASK_Q_82574(qid));
4716 			}
4717 			/* Link status */
4718 			ivar |= __SHIFTIN((IVAR_VALID_82574
4719 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
4720 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
4721 		} else {
4722 			/* Interrupt control */
4723 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
4724 			    | GPIE_EIAME | GPIE_PBA);
4725 
4726 			switch (sc->sc_type) {
4727 			case WM_T_82580:
4728 			case WM_T_I350:
4729 			case WM_T_I354:
4730 			case WM_T_I210:
4731 			case WM_T_I211:
4732 				/* TX and RX */
4733 				for (i = 0; i < sc->sc_nqueues; i++) {
4734 					wmq = &sc->sc_queue[i];
4735 					qid = wmq->wmq_id;
4736 					qintr_idx = wmq->wmq_intr_idx;
4737 
4738 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
4739 					ivar &= ~IVAR_TX_MASK_Q(qid);
4740 					ivar |= __SHIFTIN((qintr_idx
4741 						| IVAR_VALID),
4742 					    IVAR_TX_MASK_Q(qid));
4743 					ivar &= ~IVAR_RX_MASK_Q(qid);
4744 					ivar |= __SHIFTIN((qintr_idx
4745 						| IVAR_VALID),
4746 					    IVAR_RX_MASK_Q(qid));
4747 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
4748 				}
4749 				break;
4750 			case WM_T_82576:
4751 				/* TX and RX */
4752 				for (i = 0; i < sc->sc_nqueues; i++) {
4753 					wmq = &sc->sc_queue[i];
4754 					qid = wmq->wmq_id;
4755 					qintr_idx = wmq->wmq_intr_idx;
4756 
4757 					ivar = CSR_READ(sc,
4758 					    WMREG_IVAR_Q_82576(qid));
4759 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
4760 					ivar |= __SHIFTIN((qintr_idx
4761 						| IVAR_VALID),
4762 					    IVAR_TX_MASK_Q_82576(qid));
4763 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
4764 					ivar |= __SHIFTIN((qintr_idx
4765 						| IVAR_VALID),
4766 					    IVAR_RX_MASK_Q_82576(qid));
4767 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
4768 					    ivar);
4769 				}
4770 				break;
4771 			default:
4772 				break;
4773 			}
4774 
4775 			/* Link status */
4776 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
4777 			    IVAR_MISC_OTHER);
4778 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
4779 		}
4780 
4781 		if (sc->sc_nqueues > 1) {
4782 			wm_init_rss(sc);
4783 
4784 			/*
4785 			** NOTE: Receive Full-Packet Checksum Offload
4786 			** is mutually exclusive with Multiqueue. However
4787 			** this is not the same as TCP/IP checksums which
4788 			** still work.
4789 			*/
4790 			reg = CSR_READ(sc, WMREG_RXCSUM);
4791 			reg |= RXCSUM_PCSD;
4792 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
4793 		}
4794 	}
4795 
4796 	/* Set up the interrupt registers. */
4797 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4798 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
4799 	    ICR_RXO | ICR_RXT0;
4800 	if (sc->sc_nintrs > 1) {
4801 		uint32_t mask;
4802 		struct wm_queue *wmq;
4803 
4804 		switch (sc->sc_type) {
4805 		case WM_T_82574:
4806 			CSR_WRITE(sc, WMREG_EIAC_82574,
4807 			    WMREG_EIAC_82574_MSIX_MASK);
4808 			sc->sc_icr |= WMREG_EIAC_82574_MSIX_MASK;
4809 			CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
4810 			break;
4811 		default:
4812 			if (sc->sc_type == WM_T_82575) {
4813 				mask = 0;
4814 				for (i = 0; i < sc->sc_nqueues; i++) {
4815 					wmq = &sc->sc_queue[i];
4816 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
4817 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
4818 				}
4819 				mask |= EITR_OTHER;
4820 			} else {
4821 				mask = 0;
4822 				for (i = 0; i < sc->sc_nqueues; i++) {
4823 					wmq = &sc->sc_queue[i];
4824 					mask |= 1 << wmq->wmq_intr_idx;
4825 				}
4826 				mask |= 1 << sc->sc_link_intr_idx;
4827 			}
4828 			CSR_WRITE(sc, WMREG_EIAC, mask);
4829 			CSR_WRITE(sc, WMREG_EIAM, mask);
4830 			CSR_WRITE(sc, WMREG_EIMS, mask);
4831 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
4832 			break;
4833 		}
4834 	} else
4835 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
4836 
4837 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4838 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4839 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
4840 	    || (sc->sc_type == WM_T_PCH_SPT)) {
4841 		reg = CSR_READ(sc, WMREG_KABGTXD);
4842 		reg |= KABGTXD_BGSQLBIAS;
4843 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
4844 	}
4845 
4846 	/* Set up the inter-packet gap. */
4847 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
4848 
4849 	if (sc->sc_type >= WM_T_82543) {
4850 		/*
4851 		 * XXX 82574 has both ITR and EITR. SET EITR when we use
4852 		 * the multi queue function with MSI-X.
4853 		 */
4854 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4855 			int qidx;
4856 			for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
4857 				struct wm_queue *wmq = &sc->sc_queue[qidx];
4858 				CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx),
4859 				    sc->sc_itr);
4860 			}
4861 			/*
4862 			 * Link interrupts occur much less than TX
4863 			 * interrupts and RX interrupts. So, we don't
4864 			 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
4865 			 * FreeBSD's if_igb.
4866 			 */
4867 		} else
4868 			CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
4869 	}
4870 
4871 	/* Set the VLAN ethernetype. */
4872 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
4873 
4874 	/*
4875 	 * Set up the transmit control register; we start out with
4876 	 * a collision distance suitable for FDX, but update it whe
4877 	 * we resolve the media type.
4878 	 */
4879 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
4880 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
4881 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4882 	if (sc->sc_type >= WM_T_82571)
4883 		sc->sc_tctl |= TCTL_MULR;
4884 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4885 
4886 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4887 		/* Write TDT after TCTL.EN is set. See the document. */
4888 		CSR_WRITE(sc, WMREG_TDT(0), 0);
4889 	}
4890 
4891 	if (sc->sc_type == WM_T_80003) {
4892 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
4893 		reg &= ~TCTL_EXT_GCEX_MASK;
4894 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
4895 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
4896 	}
4897 
4898 	/* Set the media. */
4899 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
4900 		goto out;
4901 
4902 	/* Configure for OS presence */
4903 	wm_init_manageability(sc);
4904 
4905 	/*
4906 	 * Set up the receive control register; we actually program
4907 	 * the register when we set the receive filter.  Use multicast
4908 	 * address offset type 0.
4909 	 *
4910 	 * Only the i82544 has the ability to strip the incoming
4911 	 * CRC, so we don't enable that feature.
4912 	 */
4913 	sc->sc_mchash_type = 0;
4914 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
4915 	    | RCTL_MO(sc->sc_mchash_type);
4916 
4917 	/*
4918 	 * The I350 has a bug where it always strips the CRC whether
4919 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
4920 	 */
4921 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
4922 	    || (sc->sc_type == WM_T_I210))
4923 		sc->sc_rctl |= RCTL_SECRC;
4924 
4925 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
4926 	    && (ifp->if_mtu > ETHERMTU)) {
4927 		sc->sc_rctl |= RCTL_LPE;
4928 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4929 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
4930 	}
4931 
4932 	if (MCLBYTES == 2048) {
4933 		sc->sc_rctl |= RCTL_2k;
4934 	} else {
4935 		if (sc->sc_type >= WM_T_82543) {
4936 			switch (MCLBYTES) {
4937 			case 4096:
4938 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
4939 				break;
4940 			case 8192:
4941 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
4942 				break;
4943 			case 16384:
4944 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
4945 				break;
4946 			default:
4947 				panic("wm_init: MCLBYTES %d unsupported",
4948 				    MCLBYTES);
4949 				break;
4950 			}
4951 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
4952 	}
4953 
4954 	/* Set the receive filter. */
4955 	wm_set_filter(sc);
4956 
4957 	/* Enable ECC */
4958 	switch (sc->sc_type) {
4959 	case WM_T_82571:
4960 		reg = CSR_READ(sc, WMREG_PBA_ECC);
4961 		reg |= PBA_ECC_CORR_EN;
4962 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
4963 		break;
4964 	case WM_T_PCH_LPT:
4965 	case WM_T_PCH_SPT:
4966 		reg = CSR_READ(sc, WMREG_PBECCSTS);
4967 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
4968 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
4969 
4970 		reg = CSR_READ(sc, WMREG_CTRL);
4971 		reg |= CTRL_MEHE;
4972 		CSR_WRITE(sc, WMREG_CTRL, reg);
4973 		break;
4974 	default:
4975 		break;
4976 	}
4977 
4978 	/* On 575 and later set RDT only if RX enabled */
4979 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4980 		int qidx;
4981 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
4982 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
4983 			for (i = 0; i < WM_NRXDESC; i++) {
4984 				mutex_enter(rxq->rxq_lock);
4985 				wm_init_rxdesc(rxq, i);
4986 				mutex_exit(rxq->rxq_lock);
4987 
4988 			}
4989 		}
4990 	}
4991 
4992 	sc->sc_stopping = false;
4993 
4994 	/* Start the one second link check clock. */
4995 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
4996 
4997 	/* ...all done! */
4998 	ifp->if_flags |= IFF_RUNNING;
4999 	ifp->if_flags &= ~IFF_OACTIVE;
5000 
5001  out:
5002 	sc->sc_if_flags = ifp->if_flags;
5003 	if (error)
5004 		log(LOG_ERR, "%s: interface not running\n",
5005 		    device_xname(sc->sc_dev));
5006 	return error;
5007 }
5008 
5009 /*
5010  * wm_stop:		[ifnet interface function]
5011  *
5012  *	Stop transmission on the interface.
5013  */
5014 static void
5015 wm_stop(struct ifnet *ifp, int disable)
5016 {
5017 	struct wm_softc *sc = ifp->if_softc;
5018 
5019 	WM_CORE_LOCK(sc);
5020 	wm_stop_locked(ifp, disable);
5021 	WM_CORE_UNLOCK(sc);
5022 }
5023 
5024 static void
5025 wm_stop_locked(struct ifnet *ifp, int disable)
5026 {
5027 	struct wm_softc *sc = ifp->if_softc;
5028 	struct wm_txsoft *txs;
5029 	int i, qidx;
5030 
5031 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
5032 		device_xname(sc->sc_dev), __func__));
5033 	KASSERT(WM_CORE_LOCKED(sc));
5034 
5035 	sc->sc_stopping = true;
5036 
5037 	/* Stop the one second clock. */
5038 	callout_stop(&sc->sc_tick_ch);
5039 
5040 	/* Stop the 82547 Tx FIFO stall check timer. */
5041 	if (sc->sc_type == WM_T_82547)
5042 		callout_stop(&sc->sc_txfifo_ch);
5043 
5044 	if (sc->sc_flags & WM_F_HAS_MII) {
5045 		/* Down the MII. */
5046 		mii_down(&sc->sc_mii);
5047 	} else {
5048 #if 0
5049 		/* Should we clear PHY's status properly? */
5050 		wm_reset(sc);
5051 #endif
5052 	}
5053 
5054 	/* Stop the transmit and receive processes. */
5055 	CSR_WRITE(sc, WMREG_TCTL, 0);
5056 	CSR_WRITE(sc, WMREG_RCTL, 0);
5057 	sc->sc_rctl &= ~RCTL_EN;
5058 
5059 	/*
5060 	 * Clear the interrupt mask to ensure the device cannot assert its
5061 	 * interrupt line.
5062 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
5063 	 * service any currently pending or shared interrupt.
5064 	 */
5065 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
5066 	sc->sc_icr = 0;
5067 	if (sc->sc_nintrs > 1) {
5068 		if (sc->sc_type != WM_T_82574) {
5069 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
5070 			CSR_WRITE(sc, WMREG_EIAC, 0);
5071 		} else
5072 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
5073 	}
5074 
5075 	/* Release any queued transmit buffers. */
5076 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
5077 		struct wm_queue *wmq = &sc->sc_queue[qidx];
5078 		struct wm_txqueue *txq = &wmq->wmq_txq;
5079 		mutex_enter(txq->txq_lock);
5080 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
5081 			txs = &txq->txq_soft[i];
5082 			if (txs->txs_mbuf != NULL) {
5083 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
5084 				m_freem(txs->txs_mbuf);
5085 				txs->txs_mbuf = NULL;
5086 			}
5087 		}
5088 		if (sc->sc_type == WM_T_PCH_SPT) {
5089 			pcireg_t preg;
5090 			uint32_t reg;
5091 			int nexttx;
5092 
5093 			/* First, disable MULR fix in FEXTNVM11 */
5094 			reg = CSR_READ(sc, WMREG_FEXTNVM11);
5095 			reg |= FEXTNVM11_DIS_MULRFIX;
5096 			CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
5097 
5098 			preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
5099 			    WM_PCI_DESCRING_STATUS);
5100 			reg = CSR_READ(sc, WMREG_TDLEN(0));
5101 			printf("XXX RST: FLUSH = %08x, len = %u\n",
5102 			    (uint32_t)(preg & DESCRING_STATUS_FLUSH_REQ), reg);
5103 			if (((preg & DESCRING_STATUS_FLUSH_REQ) != 0)
5104 			    && (reg != 0)) {
5105 				/* TX */
5106 				printf("XXX need TX flush (reg = %08x)\n",
5107 				    preg);
5108 				wm_init_tx_descs(sc, txq);
5109 				wm_init_tx_regs(sc, wmq, txq);
5110 				nexttx = txq->txq_next;
5111 				wm_set_dma_addr(
5112 					&txq->txq_descs[nexttx].wtx_addr,
5113 					WM_CDTXADDR(txq, nexttx));
5114 				txq->txq_descs[nexttx].wtx_cmdlen
5115 				    = htole32(WTX_CMD_IFCS | 512);
5116 				wm_cdtxsync(txq, nexttx, 1,
5117 				    BUS_DMASYNC_PREREAD |BUS_DMASYNC_PREWRITE);
5118 				CSR_WRITE(sc, WMREG_TCTL, TCTL_EN);
5119 				CSR_WRITE(sc, WMREG_TDT(0), nexttx);
5120 				CSR_WRITE_FLUSH(sc);
5121 				delay(250);
5122 				CSR_WRITE(sc, WMREG_TCTL, 0);
5123 			}
5124 			preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
5125 			    WM_PCI_DESCRING_STATUS);
5126 			if (preg & DESCRING_STATUS_FLUSH_REQ) {
5127 				/* RX */
5128 				printf("XXX need RX flush\n");
5129 			}
5130 		}
5131 		mutex_exit(txq->txq_lock);
5132 	}
5133 
5134 	/* Mark the interface as down and cancel the watchdog timer. */
5135 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
5136 	ifp->if_timer = 0;
5137 
5138 	if (disable) {
5139 		for (i = 0; i < sc->sc_nqueues; i++) {
5140 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
5141 			mutex_enter(rxq->rxq_lock);
5142 			wm_rxdrain(rxq);
5143 			mutex_exit(rxq->rxq_lock);
5144 		}
5145 	}
5146 
5147 #if 0 /* notyet */
5148 	if (sc->sc_type >= WM_T_82544)
5149 		CSR_WRITE(sc, WMREG_WUC, 0);
5150 #endif
5151 }
5152 
5153 static void
5154 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
5155 {
5156 	struct mbuf *m;
5157 	int i;
5158 
5159 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
5160 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
5161 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
5162 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
5163 		    m->m_data, m->m_len, m->m_flags);
5164 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
5165 	    i, i == 1 ? "" : "s");
5166 }
5167 
5168 /*
5169  * wm_82547_txfifo_stall:
5170  *
5171  *	Callout used to wait for the 82547 Tx FIFO to drain,
5172  *	reset the FIFO pointers, and restart packet transmission.
5173  */
5174 static void
5175 wm_82547_txfifo_stall(void *arg)
5176 {
5177 	struct wm_softc *sc = arg;
5178 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
5179 
5180 	mutex_enter(txq->txq_lock);
5181 
5182 	if (sc->sc_stopping)
5183 		goto out;
5184 
5185 	if (txq->txq_fifo_stall) {
5186 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
5187 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
5188 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
5189 			/*
5190 			 * Packets have drained.  Stop transmitter, reset
5191 			 * FIFO pointers, restart transmitter, and kick
5192 			 * the packet queue.
5193 			 */
5194 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
5195 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
5196 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
5197 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
5198 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
5199 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
5200 			CSR_WRITE(sc, WMREG_TCTL, tctl);
5201 			CSR_WRITE_FLUSH(sc);
5202 
5203 			txq->txq_fifo_head = 0;
5204 			txq->txq_fifo_stall = 0;
5205 			wm_start_locked(&sc->sc_ethercom.ec_if);
5206 		} else {
5207 			/*
5208 			 * Still waiting for packets to drain; try again in
5209 			 * another tick.
5210 			 */
5211 			callout_schedule(&sc->sc_txfifo_ch, 1);
5212 		}
5213 	}
5214 
5215 out:
5216 	mutex_exit(txq->txq_lock);
5217 }
5218 
5219 /*
5220  * wm_82547_txfifo_bugchk:
5221  *
5222  *	Check for bug condition in the 82547 Tx FIFO.  We need to
5223  *	prevent enqueueing a packet that would wrap around the end
5224  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
5225  *
5226  *	We do this by checking the amount of space before the end
5227  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
5228  *	the Tx FIFO, wait for all remaining packets to drain, reset
5229  *	the internal FIFO pointers to the beginning, and restart
5230  *	transmission on the interface.
5231  */
5232 #define	WM_FIFO_HDR		0x10
5233 #define	WM_82547_PAD_LEN	0x3e0
5234 static int
5235 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
5236 {
5237 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
5238 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
5239 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
5240 
5241 	/* Just return if already stalled. */
5242 	if (txq->txq_fifo_stall)
5243 		return 1;
5244 
5245 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
5246 		/* Stall only occurs in half-duplex mode. */
5247 		goto send_packet;
5248 	}
5249 
5250 	if (len >= WM_82547_PAD_LEN + space) {
5251 		txq->txq_fifo_stall = 1;
5252 		callout_schedule(&sc->sc_txfifo_ch, 1);
5253 		return 1;
5254 	}
5255 
5256  send_packet:
5257 	txq->txq_fifo_head += len;
5258 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
5259 		txq->txq_fifo_head -= txq->txq_fifo_size;
5260 
5261 	return 0;
5262 }
5263 
5264 static int
5265 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
5266 {
5267 	int error;
5268 
5269 	/*
5270 	 * Allocate the control data structures, and create and load the
5271 	 * DMA map for it.
5272 	 *
5273 	 * NOTE: All Tx descriptors must be in the same 4G segment of
5274 	 * memory.  So must Rx descriptors.  We simplify by allocating
5275 	 * both sets within the same 4G segment.
5276 	 */
5277 	if (sc->sc_type < WM_T_82544)
5278 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
5279 	else
5280 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
5281 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
5282 		txq->txq_descsize = sizeof(nq_txdesc_t);
5283 	else
5284 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
5285 
5286 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
5287 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
5288 		    1, &txq->txq_desc_rseg, 0)) != 0) {
5289 		aprint_error_dev(sc->sc_dev,
5290 		    "unable to allocate TX control data, error = %d\n",
5291 		    error);
5292 		goto fail_0;
5293 	}
5294 
5295 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
5296 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
5297 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
5298 		aprint_error_dev(sc->sc_dev,
5299 		    "unable to map TX control data, error = %d\n", error);
5300 		goto fail_1;
5301 	}
5302 
5303 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
5304 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
5305 		aprint_error_dev(sc->sc_dev,
5306 		    "unable to create TX control data DMA map, error = %d\n",
5307 		    error);
5308 		goto fail_2;
5309 	}
5310 
5311 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
5312 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
5313 		aprint_error_dev(sc->sc_dev,
5314 		    "unable to load TX control data DMA map, error = %d\n",
5315 		    error);
5316 		goto fail_3;
5317 	}
5318 
5319 	return 0;
5320 
5321  fail_3:
5322 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
5323  fail_2:
5324 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
5325 	    WM_TXDESCS_SIZE(txq));
5326  fail_1:
5327 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
5328  fail_0:
5329 	return error;
5330 }
5331 
5332 static void
5333 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
5334 {
5335 
5336 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
5337 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
5338 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
5339 	    WM_TXDESCS_SIZE(txq));
5340 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
5341 }
5342 
5343 static int
5344 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
5345 {
5346 	int error;
5347 
5348 	/*
5349 	 * Allocate the control data structures, and create and load the
5350 	 * DMA map for it.
5351 	 *
5352 	 * NOTE: All Tx descriptors must be in the same 4G segment of
5353 	 * memory.  So must Rx descriptors.  We simplify by allocating
5354 	 * both sets within the same 4G segment.
5355 	 */
5356 	rxq->rxq_desc_size = sizeof(wiseman_rxdesc_t) * WM_NRXDESC;
5357 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq->rxq_desc_size,
5358 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
5359 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
5360 		aprint_error_dev(sc->sc_dev,
5361 		    "unable to allocate RX control data, error = %d\n",
5362 		    error);
5363 		goto fail_0;
5364 	}
5365 
5366 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
5367 		    rxq->rxq_desc_rseg, rxq->rxq_desc_size,
5368 		    (void **)&rxq->rxq_descs, BUS_DMA_COHERENT)) != 0) {
5369 		aprint_error_dev(sc->sc_dev,
5370 		    "unable to map RX control data, error = %d\n", error);
5371 		goto fail_1;
5372 	}
5373 
5374 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq->rxq_desc_size, 1,
5375 		    rxq->rxq_desc_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
5376 		aprint_error_dev(sc->sc_dev,
5377 		    "unable to create RX control data DMA map, error = %d\n",
5378 		    error);
5379 		goto fail_2;
5380 	}
5381 
5382 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
5383 		    rxq->rxq_descs, rxq->rxq_desc_size, NULL, 0)) != 0) {
5384 		aprint_error_dev(sc->sc_dev,
5385 		    "unable to load RX control data DMA map, error = %d\n",
5386 		    error);
5387 		goto fail_3;
5388 	}
5389 
5390 	return 0;
5391 
5392  fail_3:
5393 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
5394  fail_2:
5395 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
5396 	    rxq->rxq_desc_size);
5397  fail_1:
5398 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
5399  fail_0:
5400 	return error;
5401 }
5402 
5403 static void
5404 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
5405 {
5406 
5407 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
5408 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
5409 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
5410 	    rxq->rxq_desc_size);
5411 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
5412 }
5413 
5414 
5415 static int
5416 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
5417 {
5418 	int i, error;
5419 
5420 	/* Create the transmit buffer DMA maps. */
5421 	WM_TXQUEUELEN(txq) =
5422 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
5423 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
5424 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
5425 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
5426 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
5427 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
5428 			aprint_error_dev(sc->sc_dev,
5429 			    "unable to create Tx DMA map %d, error = %d\n",
5430 			    i, error);
5431 			goto fail;
5432 		}
5433 	}
5434 
5435 	return 0;
5436 
5437  fail:
5438 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
5439 		if (txq->txq_soft[i].txs_dmamap != NULL)
5440 			bus_dmamap_destroy(sc->sc_dmat,
5441 			    txq->txq_soft[i].txs_dmamap);
5442 	}
5443 	return error;
5444 }
5445 
5446 static void
5447 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
5448 {
5449 	int i;
5450 
5451 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
5452 		if (txq->txq_soft[i].txs_dmamap != NULL)
5453 			bus_dmamap_destroy(sc->sc_dmat,
5454 			    txq->txq_soft[i].txs_dmamap);
5455 	}
5456 }
5457 
5458 static int
5459 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
5460 {
5461 	int i, error;
5462 
5463 	/* Create the receive buffer DMA maps. */
5464 	for (i = 0; i < WM_NRXDESC; i++) {
5465 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
5466 			    MCLBYTES, 0, 0,
5467 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
5468 			aprint_error_dev(sc->sc_dev,
5469 			    "unable to create Rx DMA map %d error = %d\n",
5470 			    i, error);
5471 			goto fail;
5472 		}
5473 		rxq->rxq_soft[i].rxs_mbuf = NULL;
5474 	}
5475 
5476 	return 0;
5477 
5478  fail:
5479 	for (i = 0; i < WM_NRXDESC; i++) {
5480 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
5481 			bus_dmamap_destroy(sc->sc_dmat,
5482 			    rxq->rxq_soft[i].rxs_dmamap);
5483 	}
5484 	return error;
5485 }
5486 
5487 static void
5488 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
5489 {
5490 	int i;
5491 
5492 	for (i = 0; i < WM_NRXDESC; i++) {
5493 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
5494 			bus_dmamap_destroy(sc->sc_dmat,
5495 			    rxq->rxq_soft[i].rxs_dmamap);
5496 	}
5497 }
5498 
5499 /*
5500  * wm_alloc_quques:
5501  *	Allocate {tx,rx}descs and {tx,rx} buffers
5502  */
5503 static int
5504 wm_alloc_txrx_queues(struct wm_softc *sc)
5505 {
5506 	int i, error, tx_done, rx_done;
5507 
5508 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
5509 	    KM_SLEEP);
5510 	if (sc->sc_queue == NULL) {
5511 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
5512 		error = ENOMEM;
5513 		goto fail_0;
5514 	}
5515 
5516 	/*
5517 	 * For transmission
5518 	 */
5519 	error = 0;
5520 	tx_done = 0;
5521 	for (i = 0; i < sc->sc_nqueues; i++) {
5522 #ifdef WM_EVENT_COUNTERS
5523 		int j;
5524 		const char *xname;
5525 #endif
5526 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
5527 		txq->txq_sc = sc;
5528 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
5529 
5530 		error = wm_alloc_tx_descs(sc, txq);
5531 		if (error)
5532 			break;
5533 		error = wm_alloc_tx_buffer(sc, txq);
5534 		if (error) {
5535 			wm_free_tx_descs(sc, txq);
5536 			break;
5537 		}
5538 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
5539 		if (txq->txq_interq == NULL) {
5540 			wm_free_tx_descs(sc, txq);
5541 			wm_free_tx_buffer(sc, txq);
5542 			error = ENOMEM;
5543 			break;
5544 		}
5545 
5546 #ifdef WM_EVENT_COUNTERS
5547 		xname = device_xname(sc->sc_dev);
5548 
5549 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
5550 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
5551 		WM_Q_MISC_EVCNT_ATTACH(txq, txfifo_stall, txq, i, xname);
5552 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
5553 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
5554 
5555 		WM_Q_MISC_EVCNT_ATTACH(txq, txipsum, txq, i, xname);
5556 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum, txq, i, xname);
5557 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum6, txq, i, xname);
5558 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso, txq, i, xname);
5559 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso6, txq, i, xname);
5560 		WM_Q_MISC_EVCNT_ATTACH(txq, txtsopain, txq, i, xname);
5561 
5562 		for (j = 0; j < WM_NTXSEGS; j++) {
5563 			snprintf(txq->txq_txseg_evcnt_names[j],
5564 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
5565 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
5566 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
5567 		}
5568 
5569 		WM_Q_MISC_EVCNT_ATTACH(txq, txdrop, txq, i, xname);
5570 
5571 		WM_Q_MISC_EVCNT_ATTACH(txq, tu, txq, i, xname);
5572 #endif /* WM_EVENT_COUNTERS */
5573 
5574 		tx_done++;
5575 	}
5576 	if (error)
5577 		goto fail_1;
5578 
5579 	/*
5580 	 * For recieve
5581 	 */
5582 	error = 0;
5583 	rx_done = 0;
5584 	for (i = 0; i < sc->sc_nqueues; i++) {
5585 #ifdef WM_EVENT_COUNTERS
5586 		const char *xname;
5587 #endif
5588 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
5589 		rxq->rxq_sc = sc;
5590 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
5591 
5592 		error = wm_alloc_rx_descs(sc, rxq);
5593 		if (error)
5594 			break;
5595 
5596 		error = wm_alloc_rx_buffer(sc, rxq);
5597 		if (error) {
5598 			wm_free_rx_descs(sc, rxq);
5599 			break;
5600 		}
5601 
5602 #ifdef WM_EVENT_COUNTERS
5603 		xname = device_xname(sc->sc_dev);
5604 
5605 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxintr, rxq, i, xname);
5606 
5607 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxipsum, rxq, i, xname);
5608 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxtusum, rxq, i, xname);
5609 #endif /* WM_EVENT_COUNTERS */
5610 
5611 		rx_done++;
5612 	}
5613 	if (error)
5614 		goto fail_2;
5615 
5616 	return 0;
5617 
5618  fail_2:
5619 	for (i = 0; i < rx_done; i++) {
5620 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
5621 		wm_free_rx_buffer(sc, rxq);
5622 		wm_free_rx_descs(sc, rxq);
5623 		if (rxq->rxq_lock)
5624 			mutex_obj_free(rxq->rxq_lock);
5625 	}
5626  fail_1:
5627 	for (i = 0; i < tx_done; i++) {
5628 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
5629 		pcq_destroy(txq->txq_interq);
5630 		wm_free_tx_buffer(sc, txq);
5631 		wm_free_tx_descs(sc, txq);
5632 		if (txq->txq_lock)
5633 			mutex_obj_free(txq->txq_lock);
5634 	}
5635 
5636 	kmem_free(sc->sc_queue,
5637 	    sizeof(struct wm_queue) * sc->sc_nqueues);
5638  fail_0:
5639 	return error;
5640 }
5641 
5642 /*
5643  * wm_free_quques:
5644  *	Free {tx,rx}descs and {tx,rx} buffers
5645  */
5646 static void
5647 wm_free_txrx_queues(struct wm_softc *sc)
5648 {
5649 	int i;
5650 
5651 	for (i = 0; i < sc->sc_nqueues; i++) {
5652 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
5653 		wm_free_rx_buffer(sc, rxq);
5654 		wm_free_rx_descs(sc, rxq);
5655 		if (rxq->rxq_lock)
5656 			mutex_obj_free(rxq->rxq_lock);
5657 	}
5658 
5659 	for (i = 0; i < sc->sc_nqueues; i++) {
5660 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
5661 		wm_free_tx_buffer(sc, txq);
5662 		wm_free_tx_descs(sc, txq);
5663 		if (txq->txq_lock)
5664 			mutex_obj_free(txq->txq_lock);
5665 	}
5666 
5667 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
5668 }
5669 
5670 static void
5671 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
5672 {
5673 
5674 	KASSERT(mutex_owned(txq->txq_lock));
5675 
5676 	/* Initialize the transmit descriptor ring. */
5677 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
5678 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
5679 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
5680 	txq->txq_free = WM_NTXDESC(txq);
5681 	txq->txq_next = 0;
5682 }
5683 
5684 static void
5685 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
5686     struct wm_txqueue *txq)
5687 {
5688 
5689 	KASSERT(mutex_owned(txq->txq_lock));
5690 
5691 	if (sc->sc_type < WM_T_82543) {
5692 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
5693 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
5694 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
5695 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
5696 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
5697 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
5698 	} else {
5699 		int qid = wmq->wmq_id;
5700 
5701 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
5702 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
5703 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
5704 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
5705 
5706 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
5707 			/*
5708 			 * Don't write TDT before TCTL.EN is set.
5709 			 * See the document.
5710 			 */
5711 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
5712 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
5713 			    | TXDCTL_WTHRESH(0));
5714 		else {
5715 			/* ITR / 4 */
5716 			CSR_WRITE(sc, WMREG_TIDV, sc->sc_itr / 4);
5717 			if (sc->sc_type >= WM_T_82540) {
5718 				/* should be same */
5719 				CSR_WRITE(sc, WMREG_TADV, sc->sc_itr / 4);
5720 			}
5721 
5722 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
5723 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
5724 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
5725 		}
5726 	}
5727 }
5728 
5729 static void
5730 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
5731 {
5732 	int i;
5733 
5734 	KASSERT(mutex_owned(txq->txq_lock));
5735 
5736 	/* Initialize the transmit job descriptors. */
5737 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
5738 		txq->txq_soft[i].txs_mbuf = NULL;
5739 	txq->txq_sfree = WM_TXQUEUELEN(txq);
5740 	txq->txq_snext = 0;
5741 	txq->txq_sdirty = 0;
5742 }
5743 
5744 static void
5745 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
5746     struct wm_txqueue *txq)
5747 {
5748 
5749 	KASSERT(mutex_owned(txq->txq_lock));
5750 
5751 	/*
5752 	 * Set up some register offsets that are different between
5753 	 * the i82542 and the i82543 and later chips.
5754 	 */
5755 	if (sc->sc_type < WM_T_82543)
5756 		txq->txq_tdt_reg = WMREG_OLD_TDT;
5757 	else
5758 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
5759 
5760 	wm_init_tx_descs(sc, txq);
5761 	wm_init_tx_regs(sc, wmq, txq);
5762 	wm_init_tx_buffer(sc, txq);
5763 }
5764 
5765 static void
5766 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
5767     struct wm_rxqueue *rxq)
5768 {
5769 
5770 	KASSERT(mutex_owned(rxq->rxq_lock));
5771 
5772 	/*
5773 	 * Initialize the receive descriptor and receive job
5774 	 * descriptor rings.
5775 	 */
5776 	if (sc->sc_type < WM_T_82543) {
5777 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
5778 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
5779 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
5780 		    sizeof(wiseman_rxdesc_t) * WM_NRXDESC);
5781 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
5782 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
5783 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
5784 
5785 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
5786 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
5787 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
5788 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
5789 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
5790 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
5791 	} else {
5792 		int qid = wmq->wmq_id;
5793 
5794 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
5795 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
5796 		CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_desc_size);
5797 
5798 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5799 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
5800 				panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
5801 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_LEGACY
5802 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
5803 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
5804 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
5805 			    | RXDCTL_WTHRESH(1));
5806 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
5807 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
5808 		} else {
5809 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
5810 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
5811 			/* ITR / 4 */
5812 			CSR_WRITE(sc, WMREG_RDTR, (sc->sc_itr / 4) | RDTR_FPD);
5813 			/* MUST be same */
5814 			CSR_WRITE(sc, WMREG_RADV, sc->sc_itr / 4);
5815 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
5816 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
5817 		}
5818 	}
5819 }
5820 
5821 static int
5822 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
5823 {
5824 	struct wm_rxsoft *rxs;
5825 	int error, i;
5826 
5827 	KASSERT(mutex_owned(rxq->rxq_lock));
5828 
5829 	for (i = 0; i < WM_NRXDESC; i++) {
5830 		rxs = &rxq->rxq_soft[i];
5831 		if (rxs->rxs_mbuf == NULL) {
5832 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
5833 				log(LOG_ERR, "%s: unable to allocate or map "
5834 				    "rx buffer %d, error = %d\n",
5835 				    device_xname(sc->sc_dev), i, error);
5836 				/*
5837 				 * XXX Should attempt to run with fewer receive
5838 				 * XXX buffers instead of just failing.
5839 				 */
5840 				wm_rxdrain(rxq);
5841 				return ENOMEM;
5842 			}
5843 		} else {
5844 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
5845 				wm_init_rxdesc(rxq, i);
5846 			/*
5847 			 * For 82575 and newer device, the RX descriptors
5848 			 * must be initialized after the setting of RCTL.EN in
5849 			 * wm_set_filter()
5850 			 */
5851 		}
5852 	}
5853 	rxq->rxq_ptr = 0;
5854 	rxq->rxq_discard = 0;
5855 	WM_RXCHAIN_RESET(rxq);
5856 
5857 	return 0;
5858 }
5859 
5860 static int
5861 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
5862     struct wm_rxqueue *rxq)
5863 {
5864 
5865 	KASSERT(mutex_owned(rxq->rxq_lock));
5866 
5867 	/*
5868 	 * Set up some register offsets that are different between
5869 	 * the i82542 and the i82543 and later chips.
5870 	 */
5871 	if (sc->sc_type < WM_T_82543)
5872 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
5873 	else
5874 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
5875 
5876 	wm_init_rx_regs(sc, wmq, rxq);
5877 	return wm_init_rx_buffer(sc, rxq);
5878 }
5879 
5880 /*
5881  * wm_init_quques:
5882  *	Initialize {tx,rx}descs and {tx,rx} buffers
5883  */
5884 static int
5885 wm_init_txrx_queues(struct wm_softc *sc)
5886 {
5887 	int i, error = 0;
5888 
5889 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
5890 		device_xname(sc->sc_dev), __func__));
5891 	for (i = 0; i < sc->sc_nqueues; i++) {
5892 		struct wm_queue *wmq = &sc->sc_queue[i];
5893 		struct wm_txqueue *txq = &wmq->wmq_txq;
5894 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
5895 
5896 		mutex_enter(txq->txq_lock);
5897 		wm_init_tx_queue(sc, wmq, txq);
5898 		mutex_exit(txq->txq_lock);
5899 
5900 		mutex_enter(rxq->rxq_lock);
5901 		error = wm_init_rx_queue(sc, wmq, rxq);
5902 		mutex_exit(rxq->rxq_lock);
5903 		if (error)
5904 			break;
5905 	}
5906 
5907 	return error;
5908 }
5909 
5910 /*
5911  * wm_tx_offload:
5912  *
5913  *	Set up TCP/IP checksumming parameters for the
5914  *	specified packet.
5915  */
5916 static int
5917 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
5918     uint8_t *fieldsp)
5919 {
5920 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
5921 	struct mbuf *m0 = txs->txs_mbuf;
5922 	struct livengood_tcpip_ctxdesc *t;
5923 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
5924 	uint32_t ipcse;
5925 	struct ether_header *eh;
5926 	int offset, iphl;
5927 	uint8_t fields;
5928 
5929 	/*
5930 	 * XXX It would be nice if the mbuf pkthdr had offset
5931 	 * fields for the protocol headers.
5932 	 */
5933 
5934 	eh = mtod(m0, struct ether_header *);
5935 	switch (htons(eh->ether_type)) {
5936 	case ETHERTYPE_IP:
5937 	case ETHERTYPE_IPV6:
5938 		offset = ETHER_HDR_LEN;
5939 		break;
5940 
5941 	case ETHERTYPE_VLAN:
5942 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
5943 		break;
5944 
5945 	default:
5946 		/*
5947 		 * Don't support this protocol or encapsulation.
5948 		 */
5949 		*fieldsp = 0;
5950 		*cmdp = 0;
5951 		return 0;
5952 	}
5953 
5954 	if ((m0->m_pkthdr.csum_flags &
5955 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4)) != 0) {
5956 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
5957 	} else {
5958 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
5959 	}
5960 	ipcse = offset + iphl - 1;
5961 
5962 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
5963 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
5964 	seg = 0;
5965 	fields = 0;
5966 
5967 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
5968 		int hlen = offset + iphl;
5969 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
5970 
5971 		if (__predict_false(m0->m_len <
5972 				    (hlen + sizeof(struct tcphdr)))) {
5973 			/*
5974 			 * TCP/IP headers are not in the first mbuf; we need
5975 			 * to do this the slow and painful way.  Let's just
5976 			 * hope this doesn't happen very often.
5977 			 */
5978 			struct tcphdr th;
5979 
5980 			WM_Q_EVCNT_INCR(txq, txtsopain);
5981 
5982 			m_copydata(m0, hlen, sizeof(th), &th);
5983 			if (v4) {
5984 				struct ip ip;
5985 
5986 				m_copydata(m0, offset, sizeof(ip), &ip);
5987 				ip.ip_len = 0;
5988 				m_copyback(m0,
5989 				    offset + offsetof(struct ip, ip_len),
5990 				    sizeof(ip.ip_len), &ip.ip_len);
5991 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
5992 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
5993 			} else {
5994 				struct ip6_hdr ip6;
5995 
5996 				m_copydata(m0, offset, sizeof(ip6), &ip6);
5997 				ip6.ip6_plen = 0;
5998 				m_copyback(m0,
5999 				    offset + offsetof(struct ip6_hdr, ip6_plen),
6000 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
6001 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
6002 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
6003 			}
6004 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
6005 			    sizeof(th.th_sum), &th.th_sum);
6006 
6007 			hlen += th.th_off << 2;
6008 		} else {
6009 			/*
6010 			 * TCP/IP headers are in the first mbuf; we can do
6011 			 * this the easy way.
6012 			 */
6013 			struct tcphdr *th;
6014 
6015 			if (v4) {
6016 				struct ip *ip =
6017 				    (void *)(mtod(m0, char *) + offset);
6018 				th = (void *)(mtod(m0, char *) + hlen);
6019 
6020 				ip->ip_len = 0;
6021 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
6022 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
6023 			} else {
6024 				struct ip6_hdr *ip6 =
6025 				    (void *)(mtod(m0, char *) + offset);
6026 				th = (void *)(mtod(m0, char *) + hlen);
6027 
6028 				ip6->ip6_plen = 0;
6029 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
6030 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
6031 			}
6032 			hlen += th->th_off << 2;
6033 		}
6034 
6035 		if (v4) {
6036 			WM_Q_EVCNT_INCR(txq, txtso);
6037 			cmdlen |= WTX_TCPIP_CMD_IP;
6038 		} else {
6039 			WM_Q_EVCNT_INCR(txq, txtso6);
6040 			ipcse = 0;
6041 		}
6042 		cmd |= WTX_TCPIP_CMD_TSE;
6043 		cmdlen |= WTX_TCPIP_CMD_TSE |
6044 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
6045 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
6046 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
6047 	}
6048 
6049 	/*
6050 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
6051 	 * offload feature, if we load the context descriptor, we
6052 	 * MUST provide valid values for IPCSS and TUCSS fields.
6053 	 */
6054 
6055 	ipcs = WTX_TCPIP_IPCSS(offset) |
6056 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
6057 	    WTX_TCPIP_IPCSE(ipcse);
6058 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
6059 		WM_Q_EVCNT_INCR(txq, txipsum);
6060 		fields |= WTX_IXSM;
6061 	}
6062 
6063 	offset += iphl;
6064 
6065 	if (m0->m_pkthdr.csum_flags &
6066 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
6067 		WM_Q_EVCNT_INCR(txq, txtusum);
6068 		fields |= WTX_TXSM;
6069 		tucs = WTX_TCPIP_TUCSS(offset) |
6070 		    WTX_TCPIP_TUCSO(offset +
6071 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
6072 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
6073 	} else if ((m0->m_pkthdr.csum_flags &
6074 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
6075 		WM_Q_EVCNT_INCR(txq, txtusum6);
6076 		fields |= WTX_TXSM;
6077 		tucs = WTX_TCPIP_TUCSS(offset) |
6078 		    WTX_TCPIP_TUCSO(offset +
6079 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
6080 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
6081 	} else {
6082 		/* Just initialize it to a valid TCP context. */
6083 		tucs = WTX_TCPIP_TUCSS(offset) |
6084 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
6085 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
6086 	}
6087 
6088 	/* Fill in the context descriptor. */
6089 	t = (struct livengood_tcpip_ctxdesc *)
6090 	    &txq->txq_descs[txq->txq_next];
6091 	t->tcpip_ipcs = htole32(ipcs);
6092 	t->tcpip_tucs = htole32(tucs);
6093 	t->tcpip_cmdlen = htole32(cmdlen);
6094 	t->tcpip_seg = htole32(seg);
6095 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
6096 
6097 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
6098 	txs->txs_ndesc++;
6099 
6100 	*cmdp = cmd;
6101 	*fieldsp = fields;
6102 
6103 	return 0;
6104 }
6105 
6106 /*
6107  * wm_start:		[ifnet interface function]
6108  *
6109  *	Start packet transmission on the interface.
6110  */
6111 static void
6112 wm_start(struct ifnet *ifp)
6113 {
6114 	struct wm_softc *sc = ifp->if_softc;
6115 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
6116 
6117 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
6118 
6119 	mutex_enter(txq->txq_lock);
6120 	if (!sc->sc_stopping)
6121 		wm_start_locked(ifp);
6122 	mutex_exit(txq->txq_lock);
6123 }
6124 
6125 static void
6126 wm_start_locked(struct ifnet *ifp)
6127 {
6128 	struct wm_softc *sc = ifp->if_softc;
6129 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
6130 	struct mbuf *m0;
6131 	struct m_tag *mtag;
6132 	struct wm_txsoft *txs;
6133 	bus_dmamap_t dmamap;
6134 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
6135 	bus_addr_t curaddr;
6136 	bus_size_t seglen, curlen;
6137 	uint32_t cksumcmd;
6138 	uint8_t cksumfields;
6139 
6140 	KASSERT(mutex_owned(txq->txq_lock));
6141 
6142 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
6143 		return;
6144 
6145 	/* Remember the previous number of free descriptors. */
6146 	ofree = txq->txq_free;
6147 
6148 	/*
6149 	 * Loop through the send queue, setting up transmit descriptors
6150 	 * until we drain the queue, or use up all available transmit
6151 	 * descriptors.
6152 	 */
6153 	for (;;) {
6154 		m0 = NULL;
6155 
6156 		/* Get a work queue entry. */
6157 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
6158 			wm_txeof(sc, txq);
6159 			if (txq->txq_sfree == 0) {
6160 				DPRINTF(WM_DEBUG_TX,
6161 				    ("%s: TX: no free job descriptors\n",
6162 					device_xname(sc->sc_dev)));
6163 				WM_Q_EVCNT_INCR(txq, txsstall);
6164 				break;
6165 			}
6166 		}
6167 
6168 		/* Grab a packet off the queue. */
6169 		IFQ_DEQUEUE(&ifp->if_snd, m0);
6170 		if (m0 == NULL)
6171 			break;
6172 
6173 		DPRINTF(WM_DEBUG_TX,
6174 		    ("%s: TX: have packet to transmit: %p\n",
6175 		    device_xname(sc->sc_dev), m0));
6176 
6177 		txs = &txq->txq_soft[txq->txq_snext];
6178 		dmamap = txs->txs_dmamap;
6179 
6180 		use_tso = (m0->m_pkthdr.csum_flags &
6181 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
6182 
6183 		/*
6184 		 * So says the Linux driver:
6185 		 * The controller does a simple calculation to make sure
6186 		 * there is enough room in the FIFO before initiating the
6187 		 * DMA for each buffer.  The calc is:
6188 		 *	4 = ceil(buffer len / MSS)
6189 		 * To make sure we don't overrun the FIFO, adjust the max
6190 		 * buffer len if the MSS drops.
6191 		 */
6192 		dmamap->dm_maxsegsz =
6193 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
6194 		    ? m0->m_pkthdr.segsz << 2
6195 		    : WTX_MAX_LEN;
6196 
6197 		/*
6198 		 * Load the DMA map.  If this fails, the packet either
6199 		 * didn't fit in the allotted number of segments, or we
6200 		 * were short on resources.  For the too-many-segments
6201 		 * case, we simply report an error and drop the packet,
6202 		 * since we can't sanely copy a jumbo packet to a single
6203 		 * buffer.
6204 		 */
6205 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
6206 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
6207 		if (error) {
6208 			if (error == EFBIG) {
6209 				WM_Q_EVCNT_INCR(txq, txdrop);
6210 				log(LOG_ERR, "%s: Tx packet consumes too many "
6211 				    "DMA segments, dropping...\n",
6212 				    device_xname(sc->sc_dev));
6213 				wm_dump_mbuf_chain(sc, m0);
6214 				m_freem(m0);
6215 				continue;
6216 			}
6217 			/*  Short on resources, just stop for now. */
6218 			DPRINTF(WM_DEBUG_TX,
6219 			    ("%s: TX: dmamap load failed: %d\n",
6220 			    device_xname(sc->sc_dev), error));
6221 			break;
6222 		}
6223 
6224 		segs_needed = dmamap->dm_nsegs;
6225 		if (use_tso) {
6226 			/* For sentinel descriptor; see below. */
6227 			segs_needed++;
6228 		}
6229 
6230 		/*
6231 		 * Ensure we have enough descriptors free to describe
6232 		 * the packet.  Note, we always reserve one descriptor
6233 		 * at the end of the ring due to the semantics of the
6234 		 * TDT register, plus one more in the event we need
6235 		 * to load offload context.
6236 		 */
6237 		if (segs_needed > txq->txq_free - 2) {
6238 			/*
6239 			 * Not enough free descriptors to transmit this
6240 			 * packet.  We haven't committed anything yet,
6241 			 * so just unload the DMA map, put the packet
6242 			 * pack on the queue, and punt.  Notify the upper
6243 			 * layer that there are no more slots left.
6244 			 */
6245 			DPRINTF(WM_DEBUG_TX,
6246 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
6247 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
6248 			    segs_needed, txq->txq_free - 1));
6249 			ifp->if_flags |= IFF_OACTIVE;
6250 			bus_dmamap_unload(sc->sc_dmat, dmamap);
6251 			WM_Q_EVCNT_INCR(txq, txdstall);
6252 			break;
6253 		}
6254 
6255 		/*
6256 		 * Check for 82547 Tx FIFO bug.  We need to do this
6257 		 * once we know we can transmit the packet, since we
6258 		 * do some internal FIFO space accounting here.
6259 		 */
6260 		if (sc->sc_type == WM_T_82547 &&
6261 		    wm_82547_txfifo_bugchk(sc, m0)) {
6262 			DPRINTF(WM_DEBUG_TX,
6263 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
6264 			    device_xname(sc->sc_dev)));
6265 			ifp->if_flags |= IFF_OACTIVE;
6266 			bus_dmamap_unload(sc->sc_dmat, dmamap);
6267 			WM_Q_EVCNT_INCR(txq, txfifo_stall);
6268 			break;
6269 		}
6270 
6271 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
6272 
6273 		DPRINTF(WM_DEBUG_TX,
6274 		    ("%s: TX: packet has %d (%d) DMA segments\n",
6275 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
6276 
6277 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
6278 
6279 		/*
6280 		 * Store a pointer to the packet so that we can free it
6281 		 * later.
6282 		 *
6283 		 * Initially, we consider the number of descriptors the
6284 		 * packet uses the number of DMA segments.  This may be
6285 		 * incremented by 1 if we do checksum offload (a descriptor
6286 		 * is used to set the checksum context).
6287 		 */
6288 		txs->txs_mbuf = m0;
6289 		txs->txs_firstdesc = txq->txq_next;
6290 		txs->txs_ndesc = segs_needed;
6291 
6292 		/* Set up offload parameters for this packet. */
6293 		if (m0->m_pkthdr.csum_flags &
6294 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
6295 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
6296 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
6297 			if (wm_tx_offload(sc, txs, &cksumcmd,
6298 					  &cksumfields) != 0) {
6299 				/* Error message already displayed. */
6300 				bus_dmamap_unload(sc->sc_dmat, dmamap);
6301 				continue;
6302 			}
6303 		} else {
6304 			cksumcmd = 0;
6305 			cksumfields = 0;
6306 		}
6307 
6308 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
6309 
6310 		/* Sync the DMA map. */
6311 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
6312 		    BUS_DMASYNC_PREWRITE);
6313 
6314 		/* Initialize the transmit descriptor. */
6315 		for (nexttx = txq->txq_next, seg = 0;
6316 		     seg < dmamap->dm_nsegs; seg++) {
6317 			for (seglen = dmamap->dm_segs[seg].ds_len,
6318 			     curaddr = dmamap->dm_segs[seg].ds_addr;
6319 			     seglen != 0;
6320 			     curaddr += curlen, seglen -= curlen,
6321 			     nexttx = WM_NEXTTX(txq, nexttx)) {
6322 				curlen = seglen;
6323 
6324 				/*
6325 				 * So says the Linux driver:
6326 				 * Work around for premature descriptor
6327 				 * write-backs in TSO mode.  Append a
6328 				 * 4-byte sentinel descriptor.
6329 				 */
6330 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
6331 				    curlen > 8)
6332 					curlen -= 4;
6333 
6334 				wm_set_dma_addr(
6335 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
6336 				txq->txq_descs[nexttx].wtx_cmdlen
6337 				    = htole32(cksumcmd | curlen);
6338 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
6339 				    = 0;
6340 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
6341 				    = cksumfields;
6342 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
6343 				lasttx = nexttx;
6344 
6345 				DPRINTF(WM_DEBUG_TX,
6346 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
6347 				     "len %#04zx\n",
6348 				    device_xname(sc->sc_dev), nexttx,
6349 				    (uint64_t)curaddr, curlen));
6350 			}
6351 		}
6352 
6353 		KASSERT(lasttx != -1);
6354 
6355 		/*
6356 		 * Set up the command byte on the last descriptor of
6357 		 * the packet.  If we're in the interrupt delay window,
6358 		 * delay the interrupt.
6359 		 */
6360 		txq->txq_descs[lasttx].wtx_cmdlen |=
6361 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
6362 
6363 		/*
6364 		 * If VLANs are enabled and the packet has a VLAN tag, set
6365 		 * up the descriptor to encapsulate the packet for us.
6366 		 *
6367 		 * This is only valid on the last descriptor of the packet.
6368 		 */
6369 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
6370 			txq->txq_descs[lasttx].wtx_cmdlen |=
6371 			    htole32(WTX_CMD_VLE);
6372 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
6373 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
6374 		}
6375 
6376 		txs->txs_lastdesc = lasttx;
6377 
6378 		DPRINTF(WM_DEBUG_TX,
6379 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
6380 		    device_xname(sc->sc_dev),
6381 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
6382 
6383 		/* Sync the descriptors we're using. */
6384 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
6385 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
6386 
6387 		/* Give the packet to the chip. */
6388 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
6389 
6390 		DPRINTF(WM_DEBUG_TX,
6391 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
6392 
6393 		DPRINTF(WM_DEBUG_TX,
6394 		    ("%s: TX: finished transmitting packet, job %d\n",
6395 		    device_xname(sc->sc_dev), txq->txq_snext));
6396 
6397 		/* Advance the tx pointer. */
6398 		txq->txq_free -= txs->txs_ndesc;
6399 		txq->txq_next = nexttx;
6400 
6401 		txq->txq_sfree--;
6402 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
6403 
6404 		/* Pass the packet to any BPF listeners. */
6405 		bpf_mtap(ifp, m0);
6406 	}
6407 
6408 	if (m0 != NULL) {
6409 		ifp->if_flags |= IFF_OACTIVE;
6410 		WM_Q_EVCNT_INCR(txq, txdrop);
6411 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
6412 			__func__));
6413 		m_freem(m0);
6414 	}
6415 
6416 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
6417 		/* No more slots; notify upper layer. */
6418 		ifp->if_flags |= IFF_OACTIVE;
6419 	}
6420 
6421 	if (txq->txq_free != ofree) {
6422 		/* Set a watchdog timer in case the chip flakes out. */
6423 		ifp->if_timer = 5;
6424 	}
6425 }
6426 
6427 /*
6428  * wm_nq_tx_offload:
6429  *
6430  *	Set up TCP/IP checksumming parameters for the
6431  *	specified packet, for NEWQUEUE devices
6432  */
6433 static int
6434 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
6435     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
6436 {
6437 	struct mbuf *m0 = txs->txs_mbuf;
6438 	struct m_tag *mtag;
6439 	uint32_t vl_len, mssidx, cmdc;
6440 	struct ether_header *eh;
6441 	int offset, iphl;
6442 
6443 	/*
6444 	 * XXX It would be nice if the mbuf pkthdr had offset
6445 	 * fields for the protocol headers.
6446 	 */
6447 	*cmdlenp = 0;
6448 	*fieldsp = 0;
6449 
6450 	eh = mtod(m0, struct ether_header *);
6451 	switch (htons(eh->ether_type)) {
6452 	case ETHERTYPE_IP:
6453 	case ETHERTYPE_IPV6:
6454 		offset = ETHER_HDR_LEN;
6455 		break;
6456 
6457 	case ETHERTYPE_VLAN:
6458 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
6459 		break;
6460 
6461 	default:
6462 		/* Don't support this protocol or encapsulation. */
6463 		*do_csum = false;
6464 		return 0;
6465 	}
6466 	*do_csum = true;
6467 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
6468 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
6469 
6470 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
6471 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
6472 
6473 	if ((m0->m_pkthdr.csum_flags &
6474 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
6475 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
6476 	} else {
6477 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
6478 	}
6479 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
6480 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
6481 
6482 	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
6483 		vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
6484 		     << NQTXC_VLLEN_VLAN_SHIFT);
6485 		*cmdlenp |= NQTX_CMD_VLE;
6486 	}
6487 
6488 	mssidx = 0;
6489 
6490 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
6491 		int hlen = offset + iphl;
6492 		int tcp_hlen;
6493 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
6494 
6495 		if (__predict_false(m0->m_len <
6496 				    (hlen + sizeof(struct tcphdr)))) {
6497 			/*
6498 			 * TCP/IP headers are not in the first mbuf; we need
6499 			 * to do this the slow and painful way.  Let's just
6500 			 * hope this doesn't happen very often.
6501 			 */
6502 			struct tcphdr th;
6503 
6504 			WM_Q_EVCNT_INCR(txq, txtsopain);
6505 
6506 			m_copydata(m0, hlen, sizeof(th), &th);
6507 			if (v4) {
6508 				struct ip ip;
6509 
6510 				m_copydata(m0, offset, sizeof(ip), &ip);
6511 				ip.ip_len = 0;
6512 				m_copyback(m0,
6513 				    offset + offsetof(struct ip, ip_len),
6514 				    sizeof(ip.ip_len), &ip.ip_len);
6515 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
6516 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
6517 			} else {
6518 				struct ip6_hdr ip6;
6519 
6520 				m_copydata(m0, offset, sizeof(ip6), &ip6);
6521 				ip6.ip6_plen = 0;
6522 				m_copyback(m0,
6523 				    offset + offsetof(struct ip6_hdr, ip6_plen),
6524 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
6525 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
6526 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
6527 			}
6528 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
6529 			    sizeof(th.th_sum), &th.th_sum);
6530 
6531 			tcp_hlen = th.th_off << 2;
6532 		} else {
6533 			/*
6534 			 * TCP/IP headers are in the first mbuf; we can do
6535 			 * this the easy way.
6536 			 */
6537 			struct tcphdr *th;
6538 
6539 			if (v4) {
6540 				struct ip *ip =
6541 				    (void *)(mtod(m0, char *) + offset);
6542 				th = (void *)(mtod(m0, char *) + hlen);
6543 
6544 				ip->ip_len = 0;
6545 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
6546 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
6547 			} else {
6548 				struct ip6_hdr *ip6 =
6549 				    (void *)(mtod(m0, char *) + offset);
6550 				th = (void *)(mtod(m0, char *) + hlen);
6551 
6552 				ip6->ip6_plen = 0;
6553 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
6554 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
6555 			}
6556 			tcp_hlen = th->th_off << 2;
6557 		}
6558 		hlen += tcp_hlen;
6559 		*cmdlenp |= NQTX_CMD_TSE;
6560 
6561 		if (v4) {
6562 			WM_Q_EVCNT_INCR(txq, txtso);
6563 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
6564 		} else {
6565 			WM_Q_EVCNT_INCR(txq, txtso6);
6566 			*fieldsp |= NQTXD_FIELDS_TUXSM;
6567 		}
6568 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
6569 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
6570 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
6571 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
6572 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
6573 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
6574 	} else {
6575 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
6576 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
6577 	}
6578 
6579 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
6580 		*fieldsp |= NQTXD_FIELDS_IXSM;
6581 		cmdc |= NQTXC_CMD_IP4;
6582 	}
6583 
6584 	if (m0->m_pkthdr.csum_flags &
6585 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
6586 		WM_Q_EVCNT_INCR(txq, txtusum);
6587 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
6588 			cmdc |= NQTXC_CMD_TCP;
6589 		} else {
6590 			cmdc |= NQTXC_CMD_UDP;
6591 		}
6592 		cmdc |= NQTXC_CMD_IP4;
6593 		*fieldsp |= NQTXD_FIELDS_TUXSM;
6594 	}
6595 	if (m0->m_pkthdr.csum_flags &
6596 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
6597 		WM_Q_EVCNT_INCR(txq, txtusum6);
6598 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
6599 			cmdc |= NQTXC_CMD_TCP;
6600 		} else {
6601 			cmdc |= NQTXC_CMD_UDP;
6602 		}
6603 		cmdc |= NQTXC_CMD_IP6;
6604 		*fieldsp |= NQTXD_FIELDS_TUXSM;
6605 	}
6606 
6607 	/* Fill in the context descriptor. */
6608 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
6609 	    htole32(vl_len);
6610 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
6611 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
6612 	    htole32(cmdc);
6613 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
6614 	    htole32(mssidx);
6615 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
6616 	DPRINTF(WM_DEBUG_TX,
6617 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
6618 	    txq->txq_next, 0, vl_len));
6619 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
6620 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
6621 	txs->txs_ndesc++;
6622 	return 0;
6623 }
6624 
6625 /*
6626  * wm_nq_start:		[ifnet interface function]
6627  *
6628  *	Start packet transmission on the interface for NEWQUEUE devices
6629  */
6630 static void
6631 wm_nq_start(struct ifnet *ifp)
6632 {
6633 	struct wm_softc *sc = ifp->if_softc;
6634 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
6635 
6636 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
6637 
6638 	mutex_enter(txq->txq_lock);
6639 	if (!sc->sc_stopping)
6640 		wm_nq_start_locked(ifp);
6641 	mutex_exit(txq->txq_lock);
6642 }
6643 
6644 static void
6645 wm_nq_start_locked(struct ifnet *ifp)
6646 {
6647 	struct wm_softc *sc = ifp->if_softc;
6648 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
6649 
6650 	wm_nq_send_common_locked(ifp, txq, false);
6651 }
6652 
6653 static inline int
6654 wm_nq_select_txqueue(struct ifnet *ifp, struct mbuf *m)
6655 {
6656 	struct wm_softc *sc = ifp->if_softc;
6657 	u_int cpuid = cpu_index(curcpu());
6658 
6659 	/*
6660 	 * Currently, simple distribute strategy.
6661 	 * TODO:
6662 	 * destribute by flowid(RSS has value).
6663 	 */
6664 	return (cpuid + sc->sc_affinity_offset) % sc->sc_nqueues;
6665 }
6666 
6667 static int
6668 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
6669 {
6670 	int qid;
6671 	struct wm_softc *sc = ifp->if_softc;
6672 	struct wm_txqueue *txq;
6673 
6674 	qid = wm_nq_select_txqueue(ifp, m);
6675 	txq = &sc->sc_queue[qid].wmq_txq;
6676 
6677 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
6678 		m_freem(m);
6679 		WM_Q_EVCNT_INCR(txq, txdrop);
6680 		return ENOBUFS;
6681 	}
6682 
6683 	if (mutex_tryenter(txq->txq_lock)) {
6684 		/* XXXX should be per TX queue */
6685 		ifp->if_obytes += m->m_pkthdr.len;
6686 		if (m->m_flags & M_MCAST)
6687 			ifp->if_omcasts++;
6688 
6689 		if (!sc->sc_stopping)
6690 			wm_nq_transmit_locked(ifp, txq);
6691 		mutex_exit(txq->txq_lock);
6692 	}
6693 
6694 	return 0;
6695 }
6696 
6697 static void
6698 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
6699 {
6700 
6701 	wm_nq_send_common_locked(ifp, txq, true);
6702 }
6703 
6704 static void
6705 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
6706     bool is_transmit)
6707 {
6708 	struct wm_softc *sc = ifp->if_softc;
6709 	struct mbuf *m0;
6710 	struct m_tag *mtag;
6711 	struct wm_txsoft *txs;
6712 	bus_dmamap_t dmamap;
6713 	int error, nexttx, lasttx = -1, seg, segs_needed;
6714 	bool do_csum, sent;
6715 
6716 	KASSERT(mutex_owned(txq->txq_lock));
6717 
6718 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
6719 		return;
6720 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
6721 		return;
6722 
6723 	sent = false;
6724 
6725 	/*
6726 	 * Loop through the send queue, setting up transmit descriptors
6727 	 * until we drain the queue, or use up all available transmit
6728 	 * descriptors.
6729 	 */
6730 	for (;;) {
6731 		m0 = NULL;
6732 
6733 		/* Get a work queue entry. */
6734 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
6735 			wm_txeof(sc, txq);
6736 			if (txq->txq_sfree == 0) {
6737 				DPRINTF(WM_DEBUG_TX,
6738 				    ("%s: TX: no free job descriptors\n",
6739 					device_xname(sc->sc_dev)));
6740 				WM_Q_EVCNT_INCR(txq, txsstall);
6741 				break;
6742 			}
6743 		}
6744 
6745 		/* Grab a packet off the queue. */
6746 		if (is_transmit)
6747 			m0 = pcq_get(txq->txq_interq);
6748 		else
6749 			IFQ_DEQUEUE(&ifp->if_snd, m0);
6750 		if (m0 == NULL)
6751 			break;
6752 
6753 		DPRINTF(WM_DEBUG_TX,
6754 		    ("%s: TX: have packet to transmit: %p\n",
6755 		    device_xname(sc->sc_dev), m0));
6756 
6757 		txs = &txq->txq_soft[txq->txq_snext];
6758 		dmamap = txs->txs_dmamap;
6759 
6760 		/*
6761 		 * Load the DMA map.  If this fails, the packet either
6762 		 * didn't fit in the allotted number of segments, or we
6763 		 * were short on resources.  For the too-many-segments
6764 		 * case, we simply report an error and drop the packet,
6765 		 * since we can't sanely copy a jumbo packet to a single
6766 		 * buffer.
6767 		 */
6768 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
6769 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
6770 		if (error) {
6771 			if (error == EFBIG) {
6772 				WM_Q_EVCNT_INCR(txq, txdrop);
6773 				log(LOG_ERR, "%s: Tx packet consumes too many "
6774 				    "DMA segments, dropping...\n",
6775 				    device_xname(sc->sc_dev));
6776 				wm_dump_mbuf_chain(sc, m0);
6777 				m_freem(m0);
6778 				continue;
6779 			}
6780 			/* Short on resources, just stop for now. */
6781 			DPRINTF(WM_DEBUG_TX,
6782 			    ("%s: TX: dmamap load failed: %d\n",
6783 			    device_xname(sc->sc_dev), error));
6784 			break;
6785 		}
6786 
6787 		segs_needed = dmamap->dm_nsegs;
6788 
6789 		/*
6790 		 * Ensure we have enough descriptors free to describe
6791 		 * the packet.  Note, we always reserve one descriptor
6792 		 * at the end of the ring due to the semantics of the
6793 		 * TDT register, plus one more in the event we need
6794 		 * to load offload context.
6795 		 */
6796 		if (segs_needed > txq->txq_free - 2) {
6797 			/*
6798 			 * Not enough free descriptors to transmit this
6799 			 * packet.  We haven't committed anything yet,
6800 			 * so just unload the DMA map, put the packet
6801 			 * pack on the queue, and punt.  Notify the upper
6802 			 * layer that there are no more slots left.
6803 			 */
6804 			DPRINTF(WM_DEBUG_TX,
6805 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
6806 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
6807 			    segs_needed, txq->txq_free - 1));
6808 			txq->txq_flags |= WM_TXQ_NO_SPACE;
6809 			bus_dmamap_unload(sc->sc_dmat, dmamap);
6810 			WM_Q_EVCNT_INCR(txq, txdstall);
6811 			break;
6812 		}
6813 
6814 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
6815 
6816 		DPRINTF(WM_DEBUG_TX,
6817 		    ("%s: TX: packet has %d (%d) DMA segments\n",
6818 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
6819 
6820 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
6821 
6822 		/*
6823 		 * Store a pointer to the packet so that we can free it
6824 		 * later.
6825 		 *
6826 		 * Initially, we consider the number of descriptors the
6827 		 * packet uses the number of DMA segments.  This may be
6828 		 * incremented by 1 if we do checksum offload (a descriptor
6829 		 * is used to set the checksum context).
6830 		 */
6831 		txs->txs_mbuf = m0;
6832 		txs->txs_firstdesc = txq->txq_next;
6833 		txs->txs_ndesc = segs_needed;
6834 
6835 		/* Set up offload parameters for this packet. */
6836 		uint32_t cmdlen, fields, dcmdlen;
6837 		if (m0->m_pkthdr.csum_flags &
6838 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
6839 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
6840 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
6841 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
6842 			    &do_csum) != 0) {
6843 				/* Error message already displayed. */
6844 				bus_dmamap_unload(sc->sc_dmat, dmamap);
6845 				continue;
6846 			}
6847 		} else {
6848 			do_csum = false;
6849 			cmdlen = 0;
6850 			fields = 0;
6851 		}
6852 
6853 		/* Sync the DMA map. */
6854 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
6855 		    BUS_DMASYNC_PREWRITE);
6856 
6857 		/* Initialize the first transmit descriptor. */
6858 		nexttx = txq->txq_next;
6859 		if (!do_csum) {
6860 			/* setup a legacy descriptor */
6861 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
6862 			    dmamap->dm_segs[0].ds_addr);
6863 			txq->txq_descs[nexttx].wtx_cmdlen =
6864 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
6865 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
6866 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
6867 			if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
6868 			    NULL) {
6869 				txq->txq_descs[nexttx].wtx_cmdlen |=
6870 				    htole32(WTX_CMD_VLE);
6871 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
6872 				    htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
6873 			} else {
6874 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
6875 			}
6876 			dcmdlen = 0;
6877 		} else {
6878 			/* setup an advanced data descriptor */
6879 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
6880 			    htole64(dmamap->dm_segs[0].ds_addr);
6881 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
6882 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
6883 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
6884 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
6885 			    htole32(fields);
6886 			DPRINTF(WM_DEBUG_TX,
6887 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
6888 			    device_xname(sc->sc_dev), nexttx,
6889 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
6890 			DPRINTF(WM_DEBUG_TX,
6891 			    ("\t 0x%08x%08x\n", fields,
6892 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
6893 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
6894 		}
6895 
6896 		lasttx = nexttx;
6897 		nexttx = WM_NEXTTX(txq, nexttx);
6898 		/*
6899 		 * fill in the next descriptors. legacy or adcanced format
6900 		 * is the same here
6901 		 */
6902 		for (seg = 1; seg < dmamap->dm_nsegs;
6903 		    seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
6904 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
6905 			    htole64(dmamap->dm_segs[seg].ds_addr);
6906 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
6907 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
6908 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
6909 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
6910 			lasttx = nexttx;
6911 
6912 			DPRINTF(WM_DEBUG_TX,
6913 			    ("%s: TX: desc %d: %#" PRIx64 ", "
6914 			     "len %#04zx\n",
6915 			    device_xname(sc->sc_dev), nexttx,
6916 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
6917 			    dmamap->dm_segs[seg].ds_len));
6918 		}
6919 
6920 		KASSERT(lasttx != -1);
6921 
6922 		/*
6923 		 * Set up the command byte on the last descriptor of
6924 		 * the packet.  If we're in the interrupt delay window,
6925 		 * delay the interrupt.
6926 		 */
6927 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
6928 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
6929 		txq->txq_descs[lasttx].wtx_cmdlen |=
6930 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
6931 
6932 		txs->txs_lastdesc = lasttx;
6933 
6934 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
6935 		    device_xname(sc->sc_dev),
6936 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
6937 
6938 		/* Sync the descriptors we're using. */
6939 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
6940 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
6941 
6942 		/* Give the packet to the chip. */
6943 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
6944 		sent = true;
6945 
6946 		DPRINTF(WM_DEBUG_TX,
6947 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
6948 
6949 		DPRINTF(WM_DEBUG_TX,
6950 		    ("%s: TX: finished transmitting packet, job %d\n",
6951 		    device_xname(sc->sc_dev), txq->txq_snext));
6952 
6953 		/* Advance the tx pointer. */
6954 		txq->txq_free -= txs->txs_ndesc;
6955 		txq->txq_next = nexttx;
6956 
6957 		txq->txq_sfree--;
6958 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
6959 
6960 		/* Pass the packet to any BPF listeners. */
6961 		bpf_mtap(ifp, m0);
6962 	}
6963 
6964 	if (m0 != NULL) {
6965 		txq->txq_flags |= WM_TXQ_NO_SPACE;
6966 		WM_Q_EVCNT_INCR(txq, txdrop);
6967 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
6968 			__func__));
6969 		m_freem(m0);
6970 	}
6971 
6972 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
6973 		/* No more slots; notify upper layer. */
6974 		txq->txq_flags |= WM_TXQ_NO_SPACE;
6975 	}
6976 
6977 	if (sent) {
6978 		/* Set a watchdog timer in case the chip flakes out. */
6979 		ifp->if_timer = 5;
6980 	}
6981 }
6982 
6983 /* Interrupt */
6984 
6985 /*
6986  * wm_txeof:
6987  *
6988  *	Helper; handle transmit interrupts.
6989  */
6990 static int
6991 wm_txeof(struct wm_softc *sc, struct wm_txqueue *txq)
6992 {
6993 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6994 	struct wm_txsoft *txs;
6995 	bool processed = false;
6996 	int count = 0;
6997 	int i;
6998 	uint8_t status;
6999 
7000 	KASSERT(mutex_owned(txq->txq_lock));
7001 
7002 	if (sc->sc_stopping)
7003 		return 0;
7004 
7005 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
7006 		txq->txq_flags &= ~WM_TXQ_NO_SPACE;
7007 	else
7008 		ifp->if_flags &= ~IFF_OACTIVE;
7009 
7010 	/*
7011 	 * Go through the Tx list and free mbufs for those
7012 	 * frames which have been transmitted.
7013 	 */
7014 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
7015 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
7016 		txs = &txq->txq_soft[i];
7017 
7018 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
7019 			device_xname(sc->sc_dev), i));
7020 
7021 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
7022 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
7023 
7024 		status =
7025 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
7026 		if ((status & WTX_ST_DD) == 0) {
7027 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
7028 			    BUS_DMASYNC_PREREAD);
7029 			break;
7030 		}
7031 
7032 		processed = true;
7033 		count++;
7034 		DPRINTF(WM_DEBUG_TX,
7035 		    ("%s: TX: job %d done: descs %d..%d\n",
7036 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
7037 		    txs->txs_lastdesc));
7038 
7039 		/*
7040 		 * XXX We should probably be using the statistics
7041 		 * XXX registers, but I don't know if they exist
7042 		 * XXX on chips before the i82544.
7043 		 */
7044 
7045 #ifdef WM_EVENT_COUNTERS
7046 		if (status & WTX_ST_TU)
7047 			WM_Q_EVCNT_INCR(txq, tu);
7048 #endif /* WM_EVENT_COUNTERS */
7049 
7050 		if (status & (WTX_ST_EC | WTX_ST_LC)) {
7051 			ifp->if_oerrors++;
7052 			if (status & WTX_ST_LC)
7053 				log(LOG_WARNING, "%s: late collision\n",
7054 				    device_xname(sc->sc_dev));
7055 			else if (status & WTX_ST_EC) {
7056 				ifp->if_collisions += 16;
7057 				log(LOG_WARNING, "%s: excessive collisions\n",
7058 				    device_xname(sc->sc_dev));
7059 			}
7060 		} else
7061 			ifp->if_opackets++;
7062 
7063 		txq->txq_free += txs->txs_ndesc;
7064 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
7065 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
7066 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
7067 		m_freem(txs->txs_mbuf);
7068 		txs->txs_mbuf = NULL;
7069 	}
7070 
7071 	/* Update the dirty transmit buffer pointer. */
7072 	txq->txq_sdirty = i;
7073 	DPRINTF(WM_DEBUG_TX,
7074 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
7075 
7076 	if (count != 0)
7077 		rnd_add_uint32(&sc->rnd_source, count);
7078 
7079 	/*
7080 	 * If there are no more pending transmissions, cancel the watchdog
7081 	 * timer.
7082 	 */
7083 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
7084 		ifp->if_timer = 0;
7085 
7086 	return processed;
7087 }
7088 
7089 /*
7090  * wm_rxeof:
7091  *
7092  *	Helper; handle receive interrupts.
7093  */
7094 static void
7095 wm_rxeof(struct wm_rxqueue *rxq)
7096 {
7097 	struct wm_softc *sc = rxq->rxq_sc;
7098 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7099 	struct wm_rxsoft *rxs;
7100 	struct mbuf *m;
7101 	int i, len;
7102 	int count = 0;
7103 	uint8_t status, errors;
7104 	uint16_t vlantag;
7105 
7106 	KASSERT(mutex_owned(rxq->rxq_lock));
7107 
7108 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
7109 		rxs = &rxq->rxq_soft[i];
7110 
7111 		DPRINTF(WM_DEBUG_RX,
7112 		    ("%s: RX: checking descriptor %d\n",
7113 		    device_xname(sc->sc_dev), i));
7114 
7115 		wm_cdrxsync(rxq, i,BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
7116 
7117 		status = rxq->rxq_descs[i].wrx_status;
7118 		errors = rxq->rxq_descs[i].wrx_errors;
7119 		len = le16toh(rxq->rxq_descs[i].wrx_len);
7120 		vlantag = rxq->rxq_descs[i].wrx_special;
7121 
7122 		if ((status & WRX_ST_DD) == 0) {
7123 			/* We have processed all of the receive descriptors. */
7124 			wm_cdrxsync(rxq, i, BUS_DMASYNC_PREREAD);
7125 			break;
7126 		}
7127 
7128 		count++;
7129 		if (__predict_false(rxq->rxq_discard)) {
7130 			DPRINTF(WM_DEBUG_RX,
7131 			    ("%s: RX: discarding contents of descriptor %d\n",
7132 			    device_xname(sc->sc_dev), i));
7133 			wm_init_rxdesc(rxq, i);
7134 			if (status & WRX_ST_EOP) {
7135 				/* Reset our state. */
7136 				DPRINTF(WM_DEBUG_RX,
7137 				    ("%s: RX: resetting rxdiscard -> 0\n",
7138 				    device_xname(sc->sc_dev)));
7139 				rxq->rxq_discard = 0;
7140 			}
7141 			continue;
7142 		}
7143 
7144 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
7145 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
7146 
7147 		m = rxs->rxs_mbuf;
7148 
7149 		/*
7150 		 * Add a new receive buffer to the ring, unless of
7151 		 * course the length is zero. Treat the latter as a
7152 		 * failed mapping.
7153 		 */
7154 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
7155 			/*
7156 			 * Failed, throw away what we've done so
7157 			 * far, and discard the rest of the packet.
7158 			 */
7159 			ifp->if_ierrors++;
7160 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
7161 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
7162 			wm_init_rxdesc(rxq, i);
7163 			if ((status & WRX_ST_EOP) == 0)
7164 				rxq->rxq_discard = 1;
7165 			if (rxq->rxq_head != NULL)
7166 				m_freem(rxq->rxq_head);
7167 			WM_RXCHAIN_RESET(rxq);
7168 			DPRINTF(WM_DEBUG_RX,
7169 			    ("%s: RX: Rx buffer allocation failed, "
7170 			    "dropping packet%s\n", device_xname(sc->sc_dev),
7171 			    rxq->rxq_discard ? " (discard)" : ""));
7172 			continue;
7173 		}
7174 
7175 		m->m_len = len;
7176 		rxq->rxq_len += len;
7177 		DPRINTF(WM_DEBUG_RX,
7178 		    ("%s: RX: buffer at %p len %d\n",
7179 		    device_xname(sc->sc_dev), m->m_data, len));
7180 
7181 		/* If this is not the end of the packet, keep looking. */
7182 		if ((status & WRX_ST_EOP) == 0) {
7183 			WM_RXCHAIN_LINK(rxq, m);
7184 			DPRINTF(WM_DEBUG_RX,
7185 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
7186 			    device_xname(sc->sc_dev), rxq->rxq_len));
7187 			continue;
7188 		}
7189 
7190 		/*
7191 		 * Okay, we have the entire packet now.  The chip is
7192 		 * configured to include the FCS except I350 and I21[01]
7193 		 * (not all chips can be configured to strip it),
7194 		 * so we need to trim it.
7195 		 * May need to adjust length of previous mbuf in the
7196 		 * chain if the current mbuf is too short.
7197 		 * For an eratta, the RCTL_SECRC bit in RCTL register
7198 		 * is always set in I350, so we don't trim it.
7199 		 */
7200 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
7201 		    && (sc->sc_type != WM_T_I210)
7202 		    && (sc->sc_type != WM_T_I211)) {
7203 			if (m->m_len < ETHER_CRC_LEN) {
7204 				rxq->rxq_tail->m_len
7205 				    -= (ETHER_CRC_LEN - m->m_len);
7206 				m->m_len = 0;
7207 			} else
7208 				m->m_len -= ETHER_CRC_LEN;
7209 			len = rxq->rxq_len - ETHER_CRC_LEN;
7210 		} else
7211 			len = rxq->rxq_len;
7212 
7213 		WM_RXCHAIN_LINK(rxq, m);
7214 
7215 		*rxq->rxq_tailp = NULL;
7216 		m = rxq->rxq_head;
7217 
7218 		WM_RXCHAIN_RESET(rxq);
7219 
7220 		DPRINTF(WM_DEBUG_RX,
7221 		    ("%s: RX: have entire packet, len -> %d\n",
7222 		    device_xname(sc->sc_dev), len));
7223 
7224 		/* If an error occurred, update stats and drop the packet. */
7225 		if (errors &
7226 		     (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
7227 			if (errors & WRX_ER_SE)
7228 				log(LOG_WARNING, "%s: symbol error\n",
7229 				    device_xname(sc->sc_dev));
7230 			else if (errors & WRX_ER_SEQ)
7231 				log(LOG_WARNING, "%s: receive sequence error\n",
7232 				    device_xname(sc->sc_dev));
7233 			else if (errors & WRX_ER_CE)
7234 				log(LOG_WARNING, "%s: CRC error\n",
7235 				    device_xname(sc->sc_dev));
7236 			m_freem(m);
7237 			continue;
7238 		}
7239 
7240 		/* No errors.  Receive the packet. */
7241 		m_set_rcvif(m, ifp);
7242 		m->m_pkthdr.len = len;
7243 
7244 		/*
7245 		 * If VLANs are enabled, VLAN packets have been unwrapped
7246 		 * for us.  Associate the tag with the packet.
7247 		 */
7248 		/* XXXX should check for i350 and i354 */
7249 		if ((status & WRX_ST_VP) != 0) {
7250 			VLAN_INPUT_TAG(ifp, m, le16toh(vlantag), continue);
7251 		}
7252 
7253 		/* Set up checksum info for this packet. */
7254 		if ((status & WRX_ST_IXSM) == 0) {
7255 			if (status & WRX_ST_IPCS) {
7256 				WM_Q_EVCNT_INCR(rxq, rxipsum);
7257 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
7258 				if (errors & WRX_ER_IPE)
7259 					m->m_pkthdr.csum_flags |=
7260 					    M_CSUM_IPv4_BAD;
7261 			}
7262 			if (status & WRX_ST_TCPCS) {
7263 				/*
7264 				 * Note: we don't know if this was TCP or UDP,
7265 				 * so we just set both bits, and expect the
7266 				 * upper layers to deal.
7267 				 */
7268 				WM_Q_EVCNT_INCR(rxq, rxtusum);
7269 				m->m_pkthdr.csum_flags |=
7270 				    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
7271 				    M_CSUM_TCPv6 | M_CSUM_UDPv6;
7272 				if (errors & WRX_ER_TCPE)
7273 					m->m_pkthdr.csum_flags |=
7274 					    M_CSUM_TCP_UDP_BAD;
7275 			}
7276 		}
7277 
7278 		ifp->if_ipackets++;
7279 
7280 		mutex_exit(rxq->rxq_lock);
7281 
7282 		/* Pass this up to any BPF listeners. */
7283 		bpf_mtap(ifp, m);
7284 
7285 		/* Pass it on. */
7286 		if_percpuq_enqueue(sc->sc_ipq, m);
7287 
7288 		mutex_enter(rxq->rxq_lock);
7289 
7290 		if (sc->sc_stopping)
7291 			break;
7292 	}
7293 
7294 	/* Update the receive pointer. */
7295 	rxq->rxq_ptr = i;
7296 	if (count != 0)
7297 		rnd_add_uint32(&sc->rnd_source, count);
7298 
7299 	DPRINTF(WM_DEBUG_RX,
7300 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
7301 }
7302 
7303 /*
7304  * wm_linkintr_gmii:
7305  *
7306  *	Helper; handle link interrupts for GMII.
7307  */
7308 static void
7309 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
7310 {
7311 
7312 	KASSERT(WM_CORE_LOCKED(sc));
7313 
7314 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
7315 		__func__));
7316 
7317 	if (icr & ICR_LSC) {
7318 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
7319 
7320 		if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0))
7321 			wm_gig_downshift_workaround_ich8lan(sc);
7322 
7323 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
7324 			device_xname(sc->sc_dev)));
7325 		mii_pollstat(&sc->sc_mii);
7326 		if (sc->sc_type == WM_T_82543) {
7327 			int miistatus, active;
7328 
7329 			/*
7330 			 * With 82543, we need to force speed and
7331 			 * duplex on the MAC equal to what the PHY
7332 			 * speed and duplex configuration is.
7333 			 */
7334 			miistatus = sc->sc_mii.mii_media_status;
7335 
7336 			if (miistatus & IFM_ACTIVE) {
7337 				active = sc->sc_mii.mii_media_active;
7338 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
7339 				switch (IFM_SUBTYPE(active)) {
7340 				case IFM_10_T:
7341 					sc->sc_ctrl |= CTRL_SPEED_10;
7342 					break;
7343 				case IFM_100_TX:
7344 					sc->sc_ctrl |= CTRL_SPEED_100;
7345 					break;
7346 				case IFM_1000_T:
7347 					sc->sc_ctrl |= CTRL_SPEED_1000;
7348 					break;
7349 				default:
7350 					/*
7351 					 * fiber?
7352 					 * Shoud not enter here.
7353 					 */
7354 					printf("unknown media (%x)\n", active);
7355 					break;
7356 				}
7357 				if (active & IFM_FDX)
7358 					sc->sc_ctrl |= CTRL_FD;
7359 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7360 			}
7361 		} else if ((sc->sc_type == WM_T_ICH8)
7362 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
7363 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
7364 		} else if (sc->sc_type == WM_T_PCH) {
7365 			wm_k1_gig_workaround_hv(sc,
7366 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
7367 		}
7368 
7369 		if ((sc->sc_phytype == WMPHY_82578)
7370 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
7371 			== IFM_1000_T)) {
7372 
7373 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
7374 				delay(200*1000); /* XXX too big */
7375 
7376 				/* Link stall fix for link up */
7377 				wm_gmii_hv_writereg(sc->sc_dev, 1,
7378 				    HV_MUX_DATA_CTRL,
7379 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
7380 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
7381 				wm_gmii_hv_writereg(sc->sc_dev, 1,
7382 				    HV_MUX_DATA_CTRL,
7383 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
7384 			}
7385 		}
7386 	} else if (icr & ICR_RXSEQ) {
7387 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
7388 			device_xname(sc->sc_dev)));
7389 	}
7390 }
7391 
7392 /*
7393  * wm_linkintr_tbi:
7394  *
7395  *	Helper; handle link interrupts for TBI mode.
7396  */
7397 static void
7398 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
7399 {
7400 	uint32_t status;
7401 
7402 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
7403 		__func__));
7404 
7405 	status = CSR_READ(sc, WMREG_STATUS);
7406 	if (icr & ICR_LSC) {
7407 		if (status & STATUS_LU) {
7408 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
7409 			    device_xname(sc->sc_dev),
7410 			    (status & STATUS_FD) ? "FDX" : "HDX"));
7411 			/*
7412 			 * NOTE: CTRL will update TFCE and RFCE automatically,
7413 			 * so we should update sc->sc_ctrl
7414 			 */
7415 
7416 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
7417 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
7418 			sc->sc_fcrtl &= ~FCRTL_XONE;
7419 			if (status & STATUS_FD)
7420 				sc->sc_tctl |=
7421 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
7422 			else
7423 				sc->sc_tctl |=
7424 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
7425 			if (sc->sc_ctrl & CTRL_TFCE)
7426 				sc->sc_fcrtl |= FCRTL_XONE;
7427 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
7428 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
7429 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
7430 				      sc->sc_fcrtl);
7431 			sc->sc_tbi_linkup = 1;
7432 		} else {
7433 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
7434 			    device_xname(sc->sc_dev)));
7435 			sc->sc_tbi_linkup = 0;
7436 		}
7437 		/* Update LED */
7438 		wm_tbi_serdes_set_linkled(sc);
7439 	} else if (icr & ICR_RXSEQ) {
7440 		DPRINTF(WM_DEBUG_LINK,
7441 		    ("%s: LINK: Receive sequence error\n",
7442 		    device_xname(sc->sc_dev)));
7443 	}
7444 }
7445 
7446 /*
7447  * wm_linkintr_serdes:
7448  *
7449  *	Helper; handle link interrupts for TBI mode.
7450  */
7451 static void
7452 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
7453 {
7454 	struct mii_data *mii = &sc->sc_mii;
7455 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7456 	uint32_t pcs_adv, pcs_lpab, reg;
7457 
7458 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
7459 		__func__));
7460 
7461 	if (icr & ICR_LSC) {
7462 		/* Check PCS */
7463 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
7464 		if ((reg & PCS_LSTS_LINKOK) != 0) {
7465 			mii->mii_media_status |= IFM_ACTIVE;
7466 			sc->sc_tbi_linkup = 1;
7467 		} else {
7468 			mii->mii_media_status |= IFM_NONE;
7469 			sc->sc_tbi_linkup = 0;
7470 			wm_tbi_serdes_set_linkled(sc);
7471 			return;
7472 		}
7473 		mii->mii_media_active |= IFM_1000_SX;
7474 		if ((reg & PCS_LSTS_FDX) != 0)
7475 			mii->mii_media_active |= IFM_FDX;
7476 		else
7477 			mii->mii_media_active |= IFM_HDX;
7478 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
7479 			/* Check flow */
7480 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
7481 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
7482 				DPRINTF(WM_DEBUG_LINK,
7483 				    ("XXX LINKOK but not ACOMP\n"));
7484 				return;
7485 			}
7486 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
7487 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
7488 			DPRINTF(WM_DEBUG_LINK,
7489 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
7490 			if ((pcs_adv & TXCW_SYM_PAUSE)
7491 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
7492 				mii->mii_media_active |= IFM_FLOW
7493 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
7494 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
7495 			    && (pcs_adv & TXCW_ASYM_PAUSE)
7496 			    && (pcs_lpab & TXCW_SYM_PAUSE)
7497 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
7498 				mii->mii_media_active |= IFM_FLOW
7499 				    | IFM_ETH_TXPAUSE;
7500 			else if ((pcs_adv & TXCW_SYM_PAUSE)
7501 			    && (pcs_adv & TXCW_ASYM_PAUSE)
7502 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
7503 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
7504 				mii->mii_media_active |= IFM_FLOW
7505 				    | IFM_ETH_RXPAUSE;
7506 		}
7507 		/* Update LED */
7508 		wm_tbi_serdes_set_linkled(sc);
7509 	} else {
7510 		DPRINTF(WM_DEBUG_LINK,
7511 		    ("%s: LINK: Receive sequence error\n",
7512 		    device_xname(sc->sc_dev)));
7513 	}
7514 }
7515 
7516 /*
7517  * wm_linkintr:
7518  *
7519  *	Helper; handle link interrupts.
7520  */
7521 static void
7522 wm_linkintr(struct wm_softc *sc, uint32_t icr)
7523 {
7524 
7525 	KASSERT(WM_CORE_LOCKED(sc));
7526 
7527 	if (sc->sc_flags & WM_F_HAS_MII)
7528 		wm_linkintr_gmii(sc, icr);
7529 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
7530 	    && (sc->sc_type >= WM_T_82575))
7531 		wm_linkintr_serdes(sc, icr);
7532 	else
7533 		wm_linkintr_tbi(sc, icr);
7534 }
7535 
7536 /*
7537  * wm_intr_legacy:
7538  *
7539  *	Interrupt service routine for INTx and MSI.
7540  */
7541 static int
7542 wm_intr_legacy(void *arg)
7543 {
7544 	struct wm_softc *sc = arg;
7545 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
7546 	struct wm_rxqueue *rxq = &sc->sc_queue[0].wmq_rxq;
7547 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7548 	uint32_t icr, rndval = 0;
7549 	int handled = 0;
7550 
7551 	DPRINTF(WM_DEBUG_TX,
7552 	    ("%s: INTx: got intr\n", device_xname(sc->sc_dev)));
7553 	while (1 /* CONSTCOND */) {
7554 		icr = CSR_READ(sc, WMREG_ICR);
7555 		if ((icr & sc->sc_icr) == 0)
7556 			break;
7557 		if (rndval == 0)
7558 			rndval = icr;
7559 
7560 		mutex_enter(rxq->rxq_lock);
7561 
7562 		if (sc->sc_stopping) {
7563 			mutex_exit(rxq->rxq_lock);
7564 			break;
7565 		}
7566 
7567 		handled = 1;
7568 
7569 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
7570 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
7571 			DPRINTF(WM_DEBUG_RX,
7572 			    ("%s: RX: got Rx intr 0x%08x\n",
7573 			    device_xname(sc->sc_dev),
7574 			    icr & (ICR_RXDMT0 | ICR_RXT0)));
7575 			WM_Q_EVCNT_INCR(rxq, rxintr);
7576 		}
7577 #endif
7578 		wm_rxeof(rxq);
7579 
7580 		mutex_exit(rxq->rxq_lock);
7581 		mutex_enter(txq->txq_lock);
7582 
7583 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
7584 		if (icr & ICR_TXDW) {
7585 			DPRINTF(WM_DEBUG_TX,
7586 			    ("%s: TX: got TXDW interrupt\n",
7587 			    device_xname(sc->sc_dev)));
7588 			WM_Q_EVCNT_INCR(txq, txdw);
7589 		}
7590 #endif
7591 		wm_txeof(sc, txq);
7592 
7593 		mutex_exit(txq->txq_lock);
7594 		WM_CORE_LOCK(sc);
7595 
7596 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
7597 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
7598 			wm_linkintr(sc, icr);
7599 		}
7600 
7601 		WM_CORE_UNLOCK(sc);
7602 
7603 		if (icr & ICR_RXO) {
7604 #if defined(WM_DEBUG)
7605 			log(LOG_WARNING, "%s: Receive overrun\n",
7606 			    device_xname(sc->sc_dev));
7607 #endif /* defined(WM_DEBUG) */
7608 		}
7609 	}
7610 
7611 	rnd_add_uint32(&sc->rnd_source, rndval);
7612 
7613 	if (handled) {
7614 		/* Try to get more packets going. */
7615 		ifp->if_start(ifp);
7616 	}
7617 
7618 	return handled;
7619 }
7620 
7621 static int
7622 wm_txrxintr_msix(void *arg)
7623 {
7624 	struct wm_queue *wmq = arg;
7625 	struct wm_txqueue *txq = &wmq->wmq_txq;
7626 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
7627 	struct wm_softc *sc = txq->txq_sc;
7628 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7629 
7630 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
7631 
7632 	DPRINTF(WM_DEBUG_TX,
7633 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
7634 
7635 	if (sc->sc_type == WM_T_82574)
7636 		CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
7637 	else if (sc->sc_type == WM_T_82575)
7638 		CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
7639 	else
7640 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
7641 
7642 	if (!sc->sc_stopping) {
7643 		mutex_enter(txq->txq_lock);
7644 
7645 		WM_Q_EVCNT_INCR(txq, txdw);
7646 		wm_txeof(sc, txq);
7647 
7648 		/* Try to get more packets going. */
7649 		if (pcq_peek(txq->txq_interq) != NULL)
7650 			wm_nq_transmit_locked(ifp, txq);
7651 		/*
7652 		 * There are still some upper layer processing which call
7653 		 * ifp->if_start(). e.g. ALTQ
7654 		 */
7655 		if (wmq->wmq_id == 0) {
7656 			if (!IFQ_IS_EMPTY(&ifp->if_snd))
7657 				wm_nq_start_locked(ifp);
7658 		}
7659 		mutex_exit(txq->txq_lock);
7660 	}
7661 
7662 	DPRINTF(WM_DEBUG_RX,
7663 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
7664 
7665 	if (!sc->sc_stopping) {
7666 		mutex_enter(rxq->rxq_lock);
7667 		WM_Q_EVCNT_INCR(rxq, rxintr);
7668 		wm_rxeof(rxq);
7669 		mutex_exit(rxq->rxq_lock);
7670 	}
7671 
7672 	if (sc->sc_type == WM_T_82574)
7673 		CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
7674 	else if (sc->sc_type == WM_T_82575)
7675 		CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
7676 	else
7677 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
7678 
7679 	return 1;
7680 }
7681 
7682 /*
7683  * wm_linkintr_msix:
7684  *
7685  *	Interrupt service routine for link status change for MSI-X.
7686  */
7687 static int
7688 wm_linkintr_msix(void *arg)
7689 {
7690 	struct wm_softc *sc = arg;
7691 	uint32_t reg;
7692 
7693 	DPRINTF(WM_DEBUG_LINK,
7694 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
7695 
7696 	reg = CSR_READ(sc, WMREG_ICR);
7697 	WM_CORE_LOCK(sc);
7698 	if ((sc->sc_stopping) || ((reg & ICR_LSC) == 0))
7699 		goto out;
7700 
7701 	WM_EVCNT_INCR(&sc->sc_ev_linkintr);
7702 	wm_linkintr(sc, ICR_LSC);
7703 
7704 out:
7705 	WM_CORE_UNLOCK(sc);
7706 
7707 	if (sc->sc_type == WM_T_82574)
7708 		CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
7709 	else if (sc->sc_type == WM_T_82575)
7710 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
7711 	else
7712 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
7713 
7714 	return 1;
7715 }
7716 
7717 /*
7718  * Media related.
7719  * GMII, SGMII, TBI (and SERDES)
7720  */
7721 
7722 /* Common */
7723 
7724 /*
7725  * wm_tbi_serdes_set_linkled:
7726  *
7727  *	Update the link LED on TBI and SERDES devices.
7728  */
7729 static void
7730 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
7731 {
7732 
7733 	if (sc->sc_tbi_linkup)
7734 		sc->sc_ctrl |= CTRL_SWDPIN(0);
7735 	else
7736 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
7737 
7738 	/* 82540 or newer devices are active low */
7739 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
7740 
7741 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7742 }
7743 
7744 /* GMII related */
7745 
7746 /*
7747  * wm_gmii_reset:
7748  *
7749  *	Reset the PHY.
7750  */
7751 static void
7752 wm_gmii_reset(struct wm_softc *sc)
7753 {
7754 	uint32_t reg;
7755 	int rv;
7756 
7757 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
7758 		device_xname(sc->sc_dev), __func__));
7759 	/* get phy semaphore */
7760 	switch (sc->sc_type) {
7761 	case WM_T_82571:
7762 	case WM_T_82572:
7763 	case WM_T_82573:
7764 	case WM_T_82574:
7765 	case WM_T_82583:
7766 		 /* XXX should get sw semaphore, too */
7767 		rv = wm_get_swsm_semaphore(sc);
7768 		break;
7769 	case WM_T_82575:
7770 	case WM_T_82576:
7771 	case WM_T_82580:
7772 	case WM_T_I350:
7773 	case WM_T_I354:
7774 	case WM_T_I210:
7775 	case WM_T_I211:
7776 	case WM_T_80003:
7777 		rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
7778 		break;
7779 	case WM_T_ICH8:
7780 	case WM_T_ICH9:
7781 	case WM_T_ICH10:
7782 	case WM_T_PCH:
7783 	case WM_T_PCH2:
7784 	case WM_T_PCH_LPT:
7785 	case WM_T_PCH_SPT:
7786 		rv = wm_get_swfwhw_semaphore(sc);
7787 		break;
7788 	default:
7789 		/* nothing to do*/
7790 		rv = 0;
7791 		break;
7792 	}
7793 	if (rv != 0) {
7794 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7795 		    __func__);
7796 		return;
7797 	}
7798 
7799 	switch (sc->sc_type) {
7800 	case WM_T_82542_2_0:
7801 	case WM_T_82542_2_1:
7802 		/* null */
7803 		break;
7804 	case WM_T_82543:
7805 		/*
7806 		 * With 82543, we need to force speed and duplex on the MAC
7807 		 * equal to what the PHY speed and duplex configuration is.
7808 		 * In addition, we need to perform a hardware reset on the PHY
7809 		 * to take it out of reset.
7810 		 */
7811 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
7812 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7813 
7814 		/* The PHY reset pin is active-low. */
7815 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
7816 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
7817 		    CTRL_EXT_SWDPIN(4));
7818 		reg |= CTRL_EXT_SWDPIO(4);
7819 
7820 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
7821 		CSR_WRITE_FLUSH(sc);
7822 		delay(10*1000);
7823 
7824 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
7825 		CSR_WRITE_FLUSH(sc);
7826 		delay(150);
7827 #if 0
7828 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
7829 #endif
7830 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
7831 		break;
7832 	case WM_T_82544:	/* reset 10000us */
7833 	case WM_T_82540:
7834 	case WM_T_82545:
7835 	case WM_T_82545_3:
7836 	case WM_T_82546:
7837 	case WM_T_82546_3:
7838 	case WM_T_82541:
7839 	case WM_T_82541_2:
7840 	case WM_T_82547:
7841 	case WM_T_82547_2:
7842 	case WM_T_82571:	/* reset 100us */
7843 	case WM_T_82572:
7844 	case WM_T_82573:
7845 	case WM_T_82574:
7846 	case WM_T_82575:
7847 	case WM_T_82576:
7848 	case WM_T_82580:
7849 	case WM_T_I350:
7850 	case WM_T_I354:
7851 	case WM_T_I210:
7852 	case WM_T_I211:
7853 	case WM_T_82583:
7854 	case WM_T_80003:
7855 		/* generic reset */
7856 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
7857 		CSR_WRITE_FLUSH(sc);
7858 		delay(20000);
7859 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7860 		CSR_WRITE_FLUSH(sc);
7861 		delay(20000);
7862 
7863 		if ((sc->sc_type == WM_T_82541)
7864 		    || (sc->sc_type == WM_T_82541_2)
7865 		    || (sc->sc_type == WM_T_82547)
7866 		    || (sc->sc_type == WM_T_82547_2)) {
7867 			/* workaround for igp are done in igp_reset() */
7868 			/* XXX add code to set LED after phy reset */
7869 		}
7870 		break;
7871 	case WM_T_ICH8:
7872 	case WM_T_ICH9:
7873 	case WM_T_ICH10:
7874 	case WM_T_PCH:
7875 	case WM_T_PCH2:
7876 	case WM_T_PCH_LPT:
7877 	case WM_T_PCH_SPT:
7878 		/* generic reset */
7879 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
7880 		CSR_WRITE_FLUSH(sc);
7881 		delay(100);
7882 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7883 		CSR_WRITE_FLUSH(sc);
7884 		delay(150);
7885 		break;
7886 	default:
7887 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
7888 		    __func__);
7889 		break;
7890 	}
7891 
7892 	/* release PHY semaphore */
7893 	switch (sc->sc_type) {
7894 	case WM_T_82571:
7895 	case WM_T_82572:
7896 	case WM_T_82573:
7897 	case WM_T_82574:
7898 	case WM_T_82583:
7899 		 /* XXX should put sw semaphore, too */
7900 		wm_put_swsm_semaphore(sc);
7901 		break;
7902 	case WM_T_82575:
7903 	case WM_T_82576:
7904 	case WM_T_82580:
7905 	case WM_T_I350:
7906 	case WM_T_I354:
7907 	case WM_T_I210:
7908 	case WM_T_I211:
7909 	case WM_T_80003:
7910 		wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
7911 		break;
7912 	case WM_T_ICH8:
7913 	case WM_T_ICH9:
7914 	case WM_T_ICH10:
7915 	case WM_T_PCH:
7916 	case WM_T_PCH2:
7917 	case WM_T_PCH_LPT:
7918 	case WM_T_PCH_SPT:
7919 		wm_put_swfwhw_semaphore(sc);
7920 		break;
7921 	default:
7922 		/* nothing to do */
7923 		rv = 0;
7924 		break;
7925 	}
7926 
7927 	/* get_cfg_done */
7928 	wm_get_cfg_done(sc);
7929 
7930 	/* extra setup */
7931 	switch (sc->sc_type) {
7932 	case WM_T_82542_2_0:
7933 	case WM_T_82542_2_1:
7934 	case WM_T_82543:
7935 	case WM_T_82544:
7936 	case WM_T_82540:
7937 	case WM_T_82545:
7938 	case WM_T_82545_3:
7939 	case WM_T_82546:
7940 	case WM_T_82546_3:
7941 	case WM_T_82541_2:
7942 	case WM_T_82547_2:
7943 	case WM_T_82571:
7944 	case WM_T_82572:
7945 	case WM_T_82573:
7946 	case WM_T_82575:
7947 	case WM_T_82576:
7948 	case WM_T_82580:
7949 	case WM_T_I350:
7950 	case WM_T_I354:
7951 	case WM_T_I210:
7952 	case WM_T_I211:
7953 	case WM_T_80003:
7954 		/* null */
7955 		break;
7956 	case WM_T_82574:
7957 	case WM_T_82583:
7958 		wm_lplu_d0_disable(sc);
7959 		break;
7960 	case WM_T_82541:
7961 	case WM_T_82547:
7962 		/* XXX Configure actively LED after PHY reset */
7963 		break;
7964 	case WM_T_ICH8:
7965 	case WM_T_ICH9:
7966 	case WM_T_ICH10:
7967 	case WM_T_PCH:
7968 	case WM_T_PCH2:
7969 	case WM_T_PCH_LPT:
7970 	case WM_T_PCH_SPT:
7971 		/* Allow time for h/w to get to a quiescent state afer reset */
7972 		delay(10*1000);
7973 
7974 		if (sc->sc_type == WM_T_PCH)
7975 			wm_hv_phy_workaround_ich8lan(sc);
7976 
7977 		if (sc->sc_type == WM_T_PCH2)
7978 			wm_lv_phy_workaround_ich8lan(sc);
7979 
7980 		if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
7981 			/*
7982 			 * dummy read to clear the phy wakeup bit after lcd
7983 			 * reset
7984 			 */
7985 			reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
7986 		}
7987 
7988 		/*
7989 		 * XXX Configure the LCD with th extended configuration region
7990 		 * in NVM
7991 		 */
7992 
7993 		/* Disable D0 LPLU. */
7994 		if (sc->sc_type >= WM_T_PCH)	/* PCH* */
7995 			wm_lplu_d0_disable_pch(sc);
7996 		else
7997 			wm_lplu_d0_disable(sc);	/* ICH* */
7998 		break;
7999 	default:
8000 		panic("%s: unknown type\n", __func__);
8001 		break;
8002 	}
8003 }
8004 
8005 /*
8006  * wm_get_phy_id_82575:
8007  *
8008  * Return PHY ID. Return -1 if it failed.
8009  */
8010 static int
8011 wm_get_phy_id_82575(struct wm_softc *sc)
8012 {
8013 	uint32_t reg;
8014 	int phyid = -1;
8015 
8016 	/* XXX */
8017 	if ((sc->sc_flags & WM_F_SGMII) == 0)
8018 		return -1;
8019 
8020 	if (wm_sgmii_uses_mdio(sc)) {
8021 		switch (sc->sc_type) {
8022 		case WM_T_82575:
8023 		case WM_T_82576:
8024 			reg = CSR_READ(sc, WMREG_MDIC);
8025 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
8026 			break;
8027 		case WM_T_82580:
8028 		case WM_T_I350:
8029 		case WM_T_I354:
8030 		case WM_T_I210:
8031 		case WM_T_I211:
8032 			reg = CSR_READ(sc, WMREG_MDICNFG);
8033 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
8034 			break;
8035 		default:
8036 			return -1;
8037 		}
8038 	}
8039 
8040 	return phyid;
8041 }
8042 
8043 
8044 /*
8045  * wm_gmii_mediainit:
8046  *
8047  *	Initialize media for use on 1000BASE-T devices.
8048  */
8049 static void
8050 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
8051 {
8052 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
8053 	struct mii_data *mii = &sc->sc_mii;
8054 	uint32_t reg;
8055 
8056 	/* We have GMII. */
8057 	sc->sc_flags |= WM_F_HAS_MII;
8058 
8059 	if (sc->sc_type == WM_T_80003)
8060 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
8061 	else
8062 		sc->sc_tipg = TIPG_1000T_DFLT;
8063 
8064 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
8065 	if ((sc->sc_type == WM_T_82580)
8066 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
8067 	    || (sc->sc_type == WM_T_I211)) {
8068 		reg = CSR_READ(sc, WMREG_PHPM);
8069 		reg &= ~PHPM_GO_LINK_D;
8070 		CSR_WRITE(sc, WMREG_PHPM, reg);
8071 	}
8072 
8073 	/*
8074 	 * Let the chip set speed/duplex on its own based on
8075 	 * signals from the PHY.
8076 	 * XXXbouyer - I'm not sure this is right for the 80003,
8077 	 * the em driver only sets CTRL_SLU here - but it seems to work.
8078 	 */
8079 	sc->sc_ctrl |= CTRL_SLU;
8080 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8081 
8082 	/* Initialize our media structures and probe the GMII. */
8083 	mii->mii_ifp = ifp;
8084 
8085 	/*
8086 	 * Determine the PHY access method.
8087 	 *
8088 	 *  For SGMII, use SGMII specific method.
8089 	 *
8090 	 *  For some devices, we can determine the PHY access method
8091 	 * from sc_type.
8092 	 *
8093 	 *  For ICH and PCH variants, it's difficult to determine the PHY
8094 	 * access  method by sc_type, so use the PCI product ID for some
8095 	 * devices.
8096 	 * For other ICH8 variants, try to use igp's method. If the PHY
8097 	 * can't detect, then use bm's method.
8098 	 */
8099 	switch (prodid) {
8100 	case PCI_PRODUCT_INTEL_PCH_M_LM:
8101 	case PCI_PRODUCT_INTEL_PCH_M_LC:
8102 		/* 82577 */
8103 		sc->sc_phytype = WMPHY_82577;
8104 		break;
8105 	case PCI_PRODUCT_INTEL_PCH_D_DM:
8106 	case PCI_PRODUCT_INTEL_PCH_D_DC:
8107 		/* 82578 */
8108 		sc->sc_phytype = WMPHY_82578;
8109 		break;
8110 	case PCI_PRODUCT_INTEL_PCH2_LV_LM:
8111 	case PCI_PRODUCT_INTEL_PCH2_LV_V:
8112 		/* 82579 */
8113 		sc->sc_phytype = WMPHY_82579;
8114 		break;
8115 	case PCI_PRODUCT_INTEL_82801I_BM:
8116 	case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
8117 	case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
8118 	case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
8119 	case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
8120 	case PCI_PRODUCT_INTEL_82801J_R_BM_V:
8121 		/* 82567 */
8122 		sc->sc_phytype = WMPHY_BM;
8123 		mii->mii_readreg = wm_gmii_bm_readreg;
8124 		mii->mii_writereg = wm_gmii_bm_writereg;
8125 		break;
8126 	default:
8127 		if (((sc->sc_flags & WM_F_SGMII) != 0)
8128 		    && !wm_sgmii_uses_mdio(sc)){
8129 			/* SGMII */
8130 			mii->mii_readreg = wm_sgmii_readreg;
8131 			mii->mii_writereg = wm_sgmii_writereg;
8132 		} else if (sc->sc_type >= WM_T_80003) {
8133 			/* 80003 */
8134 			mii->mii_readreg = wm_gmii_i80003_readreg;
8135 			mii->mii_writereg = wm_gmii_i80003_writereg;
8136 		} else if (sc->sc_type >= WM_T_I210) {
8137 			/* I210 and I211 */
8138 			mii->mii_readreg = wm_gmii_gs40g_readreg;
8139 			mii->mii_writereg = wm_gmii_gs40g_writereg;
8140 		} else if (sc->sc_type >= WM_T_82580) {
8141 			/* 82580, I350 and I354 */
8142 			sc->sc_phytype = WMPHY_82580;
8143 			mii->mii_readreg = wm_gmii_82580_readreg;
8144 			mii->mii_writereg = wm_gmii_82580_writereg;
8145 		} else if (sc->sc_type >= WM_T_82544) {
8146 			/* 82544, 0, [56], [17], 8257[1234] and 82583 */
8147 			mii->mii_readreg = wm_gmii_i82544_readreg;
8148 			mii->mii_writereg = wm_gmii_i82544_writereg;
8149 		} else {
8150 			mii->mii_readreg = wm_gmii_i82543_readreg;
8151 			mii->mii_writereg = wm_gmii_i82543_writereg;
8152 		}
8153 		break;
8154 	}
8155 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_SPT)) {
8156 		/* All PCH* use _hv_ */
8157 		mii->mii_readreg = wm_gmii_hv_readreg;
8158 		mii->mii_writereg = wm_gmii_hv_writereg;
8159 	}
8160 	mii->mii_statchg = wm_gmii_statchg;
8161 
8162 	wm_gmii_reset(sc);
8163 
8164 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
8165 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
8166 	    wm_gmii_mediastatus);
8167 
8168 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
8169 	    || (sc->sc_type == WM_T_82580)
8170 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
8171 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
8172 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
8173 			/* Attach only one port */
8174 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
8175 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
8176 		} else {
8177 			int i, id;
8178 			uint32_t ctrl_ext;
8179 
8180 			id = wm_get_phy_id_82575(sc);
8181 			if (id != -1) {
8182 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
8183 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
8184 			}
8185 			if ((id == -1)
8186 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
8187 				/* Power on sgmii phy if it is disabled */
8188 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
8189 				CSR_WRITE(sc, WMREG_CTRL_EXT,
8190 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
8191 				CSR_WRITE_FLUSH(sc);
8192 				delay(300*1000); /* XXX too long */
8193 
8194 				/* from 1 to 8 */
8195 				for (i = 1; i < 8; i++)
8196 					mii_attach(sc->sc_dev, &sc->sc_mii,
8197 					    0xffffffff, i, MII_OFFSET_ANY,
8198 					    MIIF_DOPAUSE);
8199 
8200 				/* restore previous sfp cage power state */
8201 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
8202 			}
8203 		}
8204 	} else {
8205 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
8206 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
8207 	}
8208 
8209 	/*
8210 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
8211 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
8212 	 */
8213 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
8214 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
8215 		wm_set_mdio_slow_mode_hv(sc);
8216 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
8217 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
8218 	}
8219 
8220 	/*
8221 	 * (For ICH8 variants)
8222 	 * If PHY detection failed, use BM's r/w function and retry.
8223 	 */
8224 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
8225 		/* if failed, retry with *_bm_* */
8226 		mii->mii_readreg = wm_gmii_bm_readreg;
8227 		mii->mii_writereg = wm_gmii_bm_writereg;
8228 
8229 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
8230 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
8231 	}
8232 
8233 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
8234 		/* Any PHY wasn't find */
8235 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
8236 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
8237 		sc->sc_phytype = WMPHY_NONE;
8238 	} else {
8239 		/*
8240 		 * PHY Found!
8241 		 * Check PHY type.
8242 		 */
8243 		uint32_t model;
8244 		struct mii_softc *child;
8245 
8246 		child = LIST_FIRST(&mii->mii_phys);
8247 		model = child->mii_mpd_model;
8248 		if (model == MII_MODEL_yyINTEL_I82566)
8249 			sc->sc_phytype = WMPHY_IGP_3;
8250 
8251 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
8252 	}
8253 }
8254 
8255 /*
8256  * wm_gmii_mediachange:	[ifmedia interface function]
8257  *
8258  *	Set hardware to newly-selected media on a 1000BASE-T device.
8259  */
8260 static int
8261 wm_gmii_mediachange(struct ifnet *ifp)
8262 {
8263 	struct wm_softc *sc = ifp->if_softc;
8264 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
8265 	int rc;
8266 
8267 	if ((ifp->if_flags & IFF_UP) == 0)
8268 		return 0;
8269 
8270 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
8271 	sc->sc_ctrl |= CTRL_SLU;
8272 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
8273 	    || (sc->sc_type > WM_T_82543)) {
8274 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
8275 	} else {
8276 		sc->sc_ctrl &= ~CTRL_ASDE;
8277 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
8278 		if (ife->ifm_media & IFM_FDX)
8279 			sc->sc_ctrl |= CTRL_FD;
8280 		switch (IFM_SUBTYPE(ife->ifm_media)) {
8281 		case IFM_10_T:
8282 			sc->sc_ctrl |= CTRL_SPEED_10;
8283 			break;
8284 		case IFM_100_TX:
8285 			sc->sc_ctrl |= CTRL_SPEED_100;
8286 			break;
8287 		case IFM_1000_T:
8288 			sc->sc_ctrl |= CTRL_SPEED_1000;
8289 			break;
8290 		default:
8291 			panic("wm_gmii_mediachange: bad media 0x%x",
8292 			    ife->ifm_media);
8293 		}
8294 	}
8295 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8296 	if (sc->sc_type <= WM_T_82543)
8297 		wm_gmii_reset(sc);
8298 
8299 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
8300 		return 0;
8301 	return rc;
8302 }
8303 
8304 /*
8305  * wm_gmii_mediastatus:	[ifmedia interface function]
8306  *
8307  *	Get the current interface media status on a 1000BASE-T device.
8308  */
8309 static void
8310 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
8311 {
8312 	struct wm_softc *sc = ifp->if_softc;
8313 
8314 	ether_mediastatus(ifp, ifmr);
8315 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
8316 	    | sc->sc_flowflags;
8317 }
8318 
8319 #define	MDI_IO		CTRL_SWDPIN(2)
8320 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
8321 #define	MDI_CLK		CTRL_SWDPIN(3)
8322 
8323 static void
8324 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
8325 {
8326 	uint32_t i, v;
8327 
8328 	v = CSR_READ(sc, WMREG_CTRL);
8329 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
8330 	v |= MDI_DIR | CTRL_SWDPIO(3);
8331 
8332 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
8333 		if (data & i)
8334 			v |= MDI_IO;
8335 		else
8336 			v &= ~MDI_IO;
8337 		CSR_WRITE(sc, WMREG_CTRL, v);
8338 		CSR_WRITE_FLUSH(sc);
8339 		delay(10);
8340 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
8341 		CSR_WRITE_FLUSH(sc);
8342 		delay(10);
8343 		CSR_WRITE(sc, WMREG_CTRL, v);
8344 		CSR_WRITE_FLUSH(sc);
8345 		delay(10);
8346 	}
8347 }
8348 
8349 static uint32_t
8350 wm_i82543_mii_recvbits(struct wm_softc *sc)
8351 {
8352 	uint32_t v, i, data = 0;
8353 
8354 	v = CSR_READ(sc, WMREG_CTRL);
8355 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
8356 	v |= CTRL_SWDPIO(3);
8357 
8358 	CSR_WRITE(sc, WMREG_CTRL, v);
8359 	CSR_WRITE_FLUSH(sc);
8360 	delay(10);
8361 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
8362 	CSR_WRITE_FLUSH(sc);
8363 	delay(10);
8364 	CSR_WRITE(sc, WMREG_CTRL, v);
8365 	CSR_WRITE_FLUSH(sc);
8366 	delay(10);
8367 
8368 	for (i = 0; i < 16; i++) {
8369 		data <<= 1;
8370 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
8371 		CSR_WRITE_FLUSH(sc);
8372 		delay(10);
8373 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
8374 			data |= 1;
8375 		CSR_WRITE(sc, WMREG_CTRL, v);
8376 		CSR_WRITE_FLUSH(sc);
8377 		delay(10);
8378 	}
8379 
8380 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
8381 	CSR_WRITE_FLUSH(sc);
8382 	delay(10);
8383 	CSR_WRITE(sc, WMREG_CTRL, v);
8384 	CSR_WRITE_FLUSH(sc);
8385 	delay(10);
8386 
8387 	return data;
8388 }
8389 
8390 #undef MDI_IO
8391 #undef MDI_DIR
8392 #undef MDI_CLK
8393 
8394 /*
8395  * wm_gmii_i82543_readreg:	[mii interface function]
8396  *
8397  *	Read a PHY register on the GMII (i82543 version).
8398  */
8399 static int
8400 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
8401 {
8402 	struct wm_softc *sc = device_private(self);
8403 	int rv;
8404 
8405 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
8406 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
8407 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
8408 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
8409 
8410 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
8411 	    device_xname(sc->sc_dev), phy, reg, rv));
8412 
8413 	return rv;
8414 }
8415 
8416 /*
8417  * wm_gmii_i82543_writereg:	[mii interface function]
8418  *
8419  *	Write a PHY register on the GMII (i82543 version).
8420  */
8421 static void
8422 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
8423 {
8424 	struct wm_softc *sc = device_private(self);
8425 
8426 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
8427 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
8428 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
8429 	    (MII_COMMAND_START << 30), 32);
8430 }
8431 
8432 /*
8433  * wm_gmii_i82544_readreg:	[mii interface function]
8434  *
8435  *	Read a PHY register on the GMII.
8436  */
8437 static int
8438 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
8439 {
8440 	struct wm_softc *sc = device_private(self);
8441 	uint32_t mdic = 0;
8442 	int i, rv;
8443 
8444 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
8445 	    MDIC_REGADD(reg));
8446 
8447 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
8448 		mdic = CSR_READ(sc, WMREG_MDIC);
8449 		if (mdic & MDIC_READY)
8450 			break;
8451 		delay(50);
8452 	}
8453 
8454 	if ((mdic & MDIC_READY) == 0) {
8455 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
8456 		    device_xname(sc->sc_dev), phy, reg);
8457 		rv = 0;
8458 	} else if (mdic & MDIC_E) {
8459 #if 0 /* This is normal if no PHY is present. */
8460 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
8461 		    device_xname(sc->sc_dev), phy, reg);
8462 #endif
8463 		rv = 0;
8464 	} else {
8465 		rv = MDIC_DATA(mdic);
8466 		if (rv == 0xffff)
8467 			rv = 0;
8468 	}
8469 
8470 	return rv;
8471 }
8472 
8473 /*
8474  * wm_gmii_i82544_writereg:	[mii interface function]
8475  *
8476  *	Write a PHY register on the GMII.
8477  */
8478 static void
8479 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
8480 {
8481 	struct wm_softc *sc = device_private(self);
8482 	uint32_t mdic = 0;
8483 	int i;
8484 
8485 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
8486 	    MDIC_REGADD(reg) | MDIC_DATA(val));
8487 
8488 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
8489 		mdic = CSR_READ(sc, WMREG_MDIC);
8490 		if (mdic & MDIC_READY)
8491 			break;
8492 		delay(50);
8493 	}
8494 
8495 	if ((mdic & MDIC_READY) == 0)
8496 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
8497 		    device_xname(sc->sc_dev), phy, reg);
8498 	else if (mdic & MDIC_E)
8499 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
8500 		    device_xname(sc->sc_dev), phy, reg);
8501 }
8502 
8503 /*
8504  * wm_gmii_i80003_readreg:	[mii interface function]
8505  *
8506  *	Read a PHY register on the kumeran
8507  * This could be handled by the PHY layer if we didn't have to lock the
8508  * ressource ...
8509  */
8510 static int
8511 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
8512 {
8513 	struct wm_softc *sc = device_private(self);
8514 	int sem;
8515 	int rv;
8516 
8517 	if (phy != 1) /* only one PHY on kumeran bus */
8518 		return 0;
8519 
8520 	sem = swfwphysem[sc->sc_funcid];
8521 	if (wm_get_swfw_semaphore(sc, sem)) {
8522 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8523 		    __func__);
8524 		return 0;
8525 	}
8526 
8527 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
8528 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
8529 		    reg >> GG82563_PAGE_SHIFT);
8530 	} else {
8531 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
8532 		    reg >> GG82563_PAGE_SHIFT);
8533 	}
8534 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
8535 	delay(200);
8536 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
8537 	delay(200);
8538 
8539 	wm_put_swfw_semaphore(sc, sem);
8540 	return rv;
8541 }
8542 
8543 /*
8544  * wm_gmii_i80003_writereg:	[mii interface function]
8545  *
8546  *	Write a PHY register on the kumeran.
8547  * This could be handled by the PHY layer if we didn't have to lock the
8548  * ressource ...
8549  */
8550 static void
8551 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
8552 {
8553 	struct wm_softc *sc = device_private(self);
8554 	int sem;
8555 
8556 	if (phy != 1) /* only one PHY on kumeran bus */
8557 		return;
8558 
8559 	sem = swfwphysem[sc->sc_funcid];
8560 	if (wm_get_swfw_semaphore(sc, sem)) {
8561 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8562 		    __func__);
8563 		return;
8564 	}
8565 
8566 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
8567 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
8568 		    reg >> GG82563_PAGE_SHIFT);
8569 	} else {
8570 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
8571 		    reg >> GG82563_PAGE_SHIFT);
8572 	}
8573 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
8574 	delay(200);
8575 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
8576 	delay(200);
8577 
8578 	wm_put_swfw_semaphore(sc, sem);
8579 }
8580 
8581 /*
8582  * wm_gmii_bm_readreg:	[mii interface function]
8583  *
8584  *	Read a PHY register on the kumeran
8585  * This could be handled by the PHY layer if we didn't have to lock the
8586  * ressource ...
8587  */
8588 static int
8589 wm_gmii_bm_readreg(device_t self, int phy, int reg)
8590 {
8591 	struct wm_softc *sc = device_private(self);
8592 	int sem;
8593 	int rv;
8594 
8595 	sem = swfwphysem[sc->sc_funcid];
8596 	if (wm_get_swfw_semaphore(sc, sem)) {
8597 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8598 		    __func__);
8599 		return 0;
8600 	}
8601 
8602 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
8603 		if (phy == 1)
8604 			wm_gmii_i82544_writereg(self, phy,
8605 			    MII_IGPHY_PAGE_SELECT, reg);
8606 		else
8607 			wm_gmii_i82544_writereg(self, phy,
8608 			    GG82563_PHY_PAGE_SELECT,
8609 			    reg >> GG82563_PAGE_SHIFT);
8610 	}
8611 
8612 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
8613 	wm_put_swfw_semaphore(sc, sem);
8614 	return rv;
8615 }
8616 
8617 /*
8618  * wm_gmii_bm_writereg:	[mii interface function]
8619  *
8620  *	Write a PHY register on the kumeran.
8621  * This could be handled by the PHY layer if we didn't have to lock the
8622  * ressource ...
8623  */
8624 static void
8625 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
8626 {
8627 	struct wm_softc *sc = device_private(self);
8628 	int sem;
8629 
8630 	sem = swfwphysem[sc->sc_funcid];
8631 	if (wm_get_swfw_semaphore(sc, sem)) {
8632 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8633 		    __func__);
8634 		return;
8635 	}
8636 
8637 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
8638 		if (phy == 1)
8639 			wm_gmii_i82544_writereg(self, phy,
8640 			    MII_IGPHY_PAGE_SELECT, reg);
8641 		else
8642 			wm_gmii_i82544_writereg(self, phy,
8643 			    GG82563_PHY_PAGE_SELECT,
8644 			    reg >> GG82563_PAGE_SHIFT);
8645 	}
8646 
8647 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
8648 	wm_put_swfw_semaphore(sc, sem);
8649 }
8650 
8651 static void
8652 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
8653 {
8654 	struct wm_softc *sc = device_private(self);
8655 	uint16_t regnum = BM_PHY_REG_NUM(offset);
8656 	uint16_t wuce;
8657 
8658 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
8659 	if (sc->sc_type == WM_T_PCH) {
8660 		/* XXX e1000 driver do nothing... why? */
8661 	}
8662 
8663 	/* Set page 769 */
8664 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
8665 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
8666 
8667 	wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
8668 
8669 	wuce &= ~BM_WUC_HOST_WU_BIT;
8670 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
8671 	    wuce | BM_WUC_ENABLE_BIT);
8672 
8673 	/* Select page 800 */
8674 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
8675 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
8676 
8677 	/* Write page 800 */
8678 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
8679 
8680 	if (rd)
8681 		*val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
8682 	else
8683 		wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
8684 
8685 	/* Set page 769 */
8686 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
8687 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
8688 
8689 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
8690 }
8691 
8692 /*
8693  * wm_gmii_hv_readreg:	[mii interface function]
8694  *
8695  *	Read a PHY register on the kumeran
8696  * This could be handled by the PHY layer if we didn't have to lock the
8697  * ressource ...
8698  */
8699 static int
8700 wm_gmii_hv_readreg(device_t self, int phy, int reg)
8701 {
8702 	struct wm_softc *sc = device_private(self);
8703 	uint16_t page = BM_PHY_REG_PAGE(reg);
8704 	uint16_t regnum = BM_PHY_REG_NUM(reg);
8705 	uint16_t val;
8706 	int rv;
8707 
8708 	if (wm_get_swfwhw_semaphore(sc)) {
8709 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8710 		    __func__);
8711 		return 0;
8712 	}
8713 
8714 	/* XXX Workaround failure in MDIO access while cable is disconnected */
8715 	if (sc->sc_phytype == WMPHY_82577) {
8716 		/* XXX must write */
8717 	}
8718 
8719 	/* Page 800 works differently than the rest so it has its own func */
8720 	if (page == BM_WUC_PAGE) {
8721 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
8722 		return val;
8723 	}
8724 
8725 	/*
8726 	 * Lower than page 768 works differently than the rest so it has its
8727 	 * own func
8728 	 */
8729 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
8730 		printf("gmii_hv_readreg!!!\n");
8731 		return 0;
8732 	}
8733 
8734 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
8735 		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
8736 		    page << BME1000_PAGE_SHIFT);
8737 	}
8738 
8739 	rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
8740 	wm_put_swfwhw_semaphore(sc);
8741 	return rv;
8742 }
8743 
8744 /*
8745  * wm_gmii_hv_writereg:	[mii interface function]
8746  *
8747  *	Write a PHY register on the kumeran.
8748  * This could be handled by the PHY layer if we didn't have to lock the
8749  * ressource ...
8750  */
8751 static void
8752 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
8753 {
8754 	struct wm_softc *sc = device_private(self);
8755 	uint16_t page = BM_PHY_REG_PAGE(reg);
8756 	uint16_t regnum = BM_PHY_REG_NUM(reg);
8757 
8758 	if (wm_get_swfwhw_semaphore(sc)) {
8759 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8760 		    __func__);
8761 		return;
8762 	}
8763 
8764 	/* XXX Workaround failure in MDIO access while cable is disconnected */
8765 
8766 	/* Page 800 works differently than the rest so it has its own func */
8767 	if (page == BM_WUC_PAGE) {
8768 		uint16_t tmp;
8769 
8770 		tmp = val;
8771 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
8772 		return;
8773 	}
8774 
8775 	/*
8776 	 * Lower than page 768 works differently than the rest so it has its
8777 	 * own func
8778 	 */
8779 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
8780 		printf("gmii_hv_writereg!!!\n");
8781 		return;
8782 	}
8783 
8784 	/*
8785 	 * XXX Workaround MDIO accesses being disabled after entering IEEE
8786 	 * Power Down (whenever bit 11 of the PHY control register is set)
8787 	 */
8788 
8789 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
8790 		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
8791 		    page << BME1000_PAGE_SHIFT);
8792 	}
8793 
8794 	wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
8795 	wm_put_swfwhw_semaphore(sc);
8796 }
8797 
8798 /*
8799  * wm_gmii_82580_readreg:	[mii interface function]
8800  *
8801  *	Read a PHY register on the 82580 and I350.
8802  * This could be handled by the PHY layer if we didn't have to lock the
8803  * ressource ...
8804  */
8805 static int
8806 wm_gmii_82580_readreg(device_t self, int phy, int reg)
8807 {
8808 	struct wm_softc *sc = device_private(self);
8809 	int sem;
8810 	int rv;
8811 
8812 	sem = swfwphysem[sc->sc_funcid];
8813 	if (wm_get_swfw_semaphore(sc, sem)) {
8814 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8815 		    __func__);
8816 		return 0;
8817 	}
8818 
8819 	rv = wm_gmii_i82544_readreg(self, phy, reg);
8820 
8821 	wm_put_swfw_semaphore(sc, sem);
8822 	return rv;
8823 }
8824 
8825 /*
8826  * wm_gmii_82580_writereg:	[mii interface function]
8827  *
8828  *	Write a PHY register on the 82580 and I350.
8829  * This could be handled by the PHY layer if we didn't have to lock the
8830  * ressource ...
8831  */
8832 static void
8833 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
8834 {
8835 	struct wm_softc *sc = device_private(self);
8836 	int sem;
8837 
8838 	sem = swfwphysem[sc->sc_funcid];
8839 	if (wm_get_swfw_semaphore(sc, sem)) {
8840 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8841 		    __func__);
8842 		return;
8843 	}
8844 
8845 	wm_gmii_i82544_writereg(self, phy, reg, val);
8846 
8847 	wm_put_swfw_semaphore(sc, sem);
8848 }
8849 
8850 /*
8851  * wm_gmii_gs40g_readreg:	[mii interface function]
8852  *
8853  *	Read a PHY register on the I2100 and I211.
8854  * This could be handled by the PHY layer if we didn't have to lock the
8855  * ressource ...
8856  */
8857 static int
8858 wm_gmii_gs40g_readreg(device_t self, int phy, int reg)
8859 {
8860 	struct wm_softc *sc = device_private(self);
8861 	int sem;
8862 	int page, offset;
8863 	int rv;
8864 
8865 	/* Acquire semaphore */
8866 	sem = swfwphysem[sc->sc_funcid];
8867 	if (wm_get_swfw_semaphore(sc, sem)) {
8868 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8869 		    __func__);
8870 		return 0;
8871 	}
8872 
8873 	/* Page select */
8874 	page = reg >> GS40G_PAGE_SHIFT;
8875 	wm_gmii_i82544_writereg(self, phy, GS40G_PAGE_SELECT, page);
8876 
8877 	/* Read reg */
8878 	offset = reg & GS40G_OFFSET_MASK;
8879 	rv = wm_gmii_i82544_readreg(self, phy, offset);
8880 
8881 	wm_put_swfw_semaphore(sc, sem);
8882 	return rv;
8883 }
8884 
8885 /*
8886  * wm_gmii_gs40g_writereg:	[mii interface function]
8887  *
8888  *	Write a PHY register on the I210 and I211.
8889  * This could be handled by the PHY layer if we didn't have to lock the
8890  * ressource ...
8891  */
8892 static void
8893 wm_gmii_gs40g_writereg(device_t self, int phy, int reg, int val)
8894 {
8895 	struct wm_softc *sc = device_private(self);
8896 	int sem;
8897 	int page, offset;
8898 
8899 	/* Acquire semaphore */
8900 	sem = swfwphysem[sc->sc_funcid];
8901 	if (wm_get_swfw_semaphore(sc, sem)) {
8902 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8903 		    __func__);
8904 		return;
8905 	}
8906 
8907 	/* Page select */
8908 	page = reg >> GS40G_PAGE_SHIFT;
8909 	wm_gmii_i82544_writereg(self, phy, GS40G_PAGE_SELECT, page);
8910 
8911 	/* Write reg */
8912 	offset = reg & GS40G_OFFSET_MASK;
8913 	wm_gmii_i82544_writereg(self, phy, offset, val);
8914 
8915 	/* Release semaphore */
8916 	wm_put_swfw_semaphore(sc, sem);
8917 }
8918 
8919 /*
8920  * wm_gmii_statchg:	[mii interface function]
8921  *
8922  *	Callback from MII layer when media changes.
8923  */
8924 static void
8925 wm_gmii_statchg(struct ifnet *ifp)
8926 {
8927 	struct wm_softc *sc = ifp->if_softc;
8928 	struct mii_data *mii = &sc->sc_mii;
8929 
8930 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
8931 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
8932 	sc->sc_fcrtl &= ~FCRTL_XONE;
8933 
8934 	/*
8935 	 * Get flow control negotiation result.
8936 	 */
8937 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
8938 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
8939 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
8940 		mii->mii_media_active &= ~IFM_ETH_FMASK;
8941 	}
8942 
8943 	if (sc->sc_flowflags & IFM_FLOW) {
8944 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
8945 			sc->sc_ctrl |= CTRL_TFCE;
8946 			sc->sc_fcrtl |= FCRTL_XONE;
8947 		}
8948 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
8949 			sc->sc_ctrl |= CTRL_RFCE;
8950 	}
8951 
8952 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
8953 		DPRINTF(WM_DEBUG_LINK,
8954 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
8955 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
8956 	} else {
8957 		DPRINTF(WM_DEBUG_LINK,
8958 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
8959 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
8960 	}
8961 
8962 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8963 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
8964 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
8965 						 : WMREG_FCRTL, sc->sc_fcrtl);
8966 	if (sc->sc_type == WM_T_80003) {
8967 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
8968 		case IFM_1000_T:
8969 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
8970 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
8971 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
8972 			break;
8973 		default:
8974 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
8975 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
8976 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
8977 			break;
8978 		}
8979 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
8980 	}
8981 }
8982 
8983 /*
8984  * wm_kmrn_readreg:
8985  *
8986  *	Read a kumeran register
8987  */
8988 static int
8989 wm_kmrn_readreg(struct wm_softc *sc, int reg)
8990 {
8991 	int rv;
8992 
8993 	if (sc->sc_flags & WM_F_LOCK_SWFW) {
8994 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
8995 			aprint_error_dev(sc->sc_dev,
8996 			    "%s: failed to get semaphore\n", __func__);
8997 			return 0;
8998 		}
8999 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
9000 		if (wm_get_swfwhw_semaphore(sc)) {
9001 			aprint_error_dev(sc->sc_dev,
9002 			    "%s: failed to get semaphore\n", __func__);
9003 			return 0;
9004 		}
9005 	}
9006 
9007 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
9008 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
9009 	    KUMCTRLSTA_REN);
9010 	CSR_WRITE_FLUSH(sc);
9011 	delay(2);
9012 
9013 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
9014 
9015 	if (sc->sc_flags & WM_F_LOCK_SWFW)
9016 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
9017 	else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
9018 		wm_put_swfwhw_semaphore(sc);
9019 
9020 	return rv;
9021 }
9022 
9023 /*
9024  * wm_kmrn_writereg:
9025  *
9026  *	Write a kumeran register
9027  */
9028 static void
9029 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
9030 {
9031 
9032 	if (sc->sc_flags & WM_F_LOCK_SWFW) {
9033 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
9034 			aprint_error_dev(sc->sc_dev,
9035 			    "%s: failed to get semaphore\n", __func__);
9036 			return;
9037 		}
9038 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
9039 		if (wm_get_swfwhw_semaphore(sc)) {
9040 			aprint_error_dev(sc->sc_dev,
9041 			    "%s: failed to get semaphore\n", __func__);
9042 			return;
9043 		}
9044 	}
9045 
9046 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
9047 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
9048 	    (val & KUMCTRLSTA_MASK));
9049 
9050 	if (sc->sc_flags & WM_F_LOCK_SWFW)
9051 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
9052 	else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
9053 		wm_put_swfwhw_semaphore(sc);
9054 }
9055 
9056 /* SGMII related */
9057 
9058 /*
9059  * wm_sgmii_uses_mdio
9060  *
9061  * Check whether the transaction is to the internal PHY or the external
9062  * MDIO interface. Return true if it's MDIO.
9063  */
9064 static bool
9065 wm_sgmii_uses_mdio(struct wm_softc *sc)
9066 {
9067 	uint32_t reg;
9068 	bool ismdio = false;
9069 
9070 	switch (sc->sc_type) {
9071 	case WM_T_82575:
9072 	case WM_T_82576:
9073 		reg = CSR_READ(sc, WMREG_MDIC);
9074 		ismdio = ((reg & MDIC_DEST) != 0);
9075 		break;
9076 	case WM_T_82580:
9077 	case WM_T_I350:
9078 	case WM_T_I354:
9079 	case WM_T_I210:
9080 	case WM_T_I211:
9081 		reg = CSR_READ(sc, WMREG_MDICNFG);
9082 		ismdio = ((reg & MDICNFG_DEST) != 0);
9083 		break;
9084 	default:
9085 		break;
9086 	}
9087 
9088 	return ismdio;
9089 }
9090 
9091 /*
9092  * wm_sgmii_readreg:	[mii interface function]
9093  *
9094  *	Read a PHY register on the SGMII
9095  * This could be handled by the PHY layer if we didn't have to lock the
9096  * ressource ...
9097  */
9098 static int
9099 wm_sgmii_readreg(device_t self, int phy, int reg)
9100 {
9101 	struct wm_softc *sc = device_private(self);
9102 	uint32_t i2ccmd;
9103 	int i, rv;
9104 
9105 	if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
9106 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9107 		    __func__);
9108 		return 0;
9109 	}
9110 
9111 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
9112 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
9113 	    | I2CCMD_OPCODE_READ;
9114 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
9115 
9116 	/* Poll the ready bit */
9117 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
9118 		delay(50);
9119 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
9120 		if (i2ccmd & I2CCMD_READY)
9121 			break;
9122 	}
9123 	if ((i2ccmd & I2CCMD_READY) == 0)
9124 		aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
9125 	if ((i2ccmd & I2CCMD_ERROR) != 0)
9126 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
9127 
9128 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
9129 
9130 	wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
9131 	return rv;
9132 }
9133 
9134 /*
9135  * wm_sgmii_writereg:	[mii interface function]
9136  *
9137  *	Write a PHY register on the SGMII.
9138  * This could be handled by the PHY layer if we didn't have to lock the
9139  * ressource ...
9140  */
9141 static void
9142 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
9143 {
9144 	struct wm_softc *sc = device_private(self);
9145 	uint32_t i2ccmd;
9146 	int i;
9147 	int val_swapped;
9148 
9149 	if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
9150 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9151 		    __func__);
9152 		return;
9153 	}
9154 	/* Swap the data bytes for the I2C interface */
9155 	val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
9156 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
9157 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
9158 	    | I2CCMD_OPCODE_WRITE | val_swapped;
9159 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
9160 
9161 	/* Poll the ready bit */
9162 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
9163 		delay(50);
9164 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
9165 		if (i2ccmd & I2CCMD_READY)
9166 			break;
9167 	}
9168 	if ((i2ccmd & I2CCMD_READY) == 0)
9169 		aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
9170 	if ((i2ccmd & I2CCMD_ERROR) != 0)
9171 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
9172 
9173 	wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
9174 }
9175 
9176 /* TBI related */
9177 
9178 /*
9179  * wm_tbi_mediainit:
9180  *
9181  *	Initialize media for use on 1000BASE-X devices.
9182  */
9183 static void
9184 wm_tbi_mediainit(struct wm_softc *sc)
9185 {
9186 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9187 	const char *sep = "";
9188 
9189 	if (sc->sc_type < WM_T_82543)
9190 		sc->sc_tipg = TIPG_WM_DFLT;
9191 	else
9192 		sc->sc_tipg = TIPG_LG_DFLT;
9193 
9194 	sc->sc_tbi_serdes_anegticks = 5;
9195 
9196 	/* Initialize our media structures */
9197 	sc->sc_mii.mii_ifp = ifp;
9198 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
9199 
9200 	if ((sc->sc_type >= WM_T_82575)
9201 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
9202 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
9203 		    wm_serdes_mediachange, wm_serdes_mediastatus);
9204 	else
9205 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
9206 		    wm_tbi_mediachange, wm_tbi_mediastatus);
9207 
9208 	/*
9209 	 * SWD Pins:
9210 	 *
9211 	 *	0 = Link LED (output)
9212 	 *	1 = Loss Of Signal (input)
9213 	 */
9214 	sc->sc_ctrl |= CTRL_SWDPIO(0);
9215 
9216 	/* XXX Perhaps this is only for TBI */
9217 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
9218 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
9219 
9220 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
9221 		sc->sc_ctrl &= ~CTRL_LRST;
9222 
9223 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9224 
9225 #define	ADD(ss, mm, dd)							\
9226 do {									\
9227 	aprint_normal("%s%s", sep, ss);					\
9228 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
9229 	sep = ", ";							\
9230 } while (/*CONSTCOND*/0)
9231 
9232 	aprint_normal_dev(sc->sc_dev, "");
9233 
9234 	/* Only 82545 is LX */
9235 	if (sc->sc_type == WM_T_82545) {
9236 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
9237 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
9238 	} else {
9239 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
9240 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
9241 	}
9242 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
9243 	aprint_normal("\n");
9244 
9245 #undef ADD
9246 
9247 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
9248 }
9249 
9250 /*
9251  * wm_tbi_mediachange:	[ifmedia interface function]
9252  *
9253  *	Set hardware to newly-selected media on a 1000BASE-X device.
9254  */
9255 static int
9256 wm_tbi_mediachange(struct ifnet *ifp)
9257 {
9258 	struct wm_softc *sc = ifp->if_softc;
9259 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
9260 	uint32_t status;
9261 	int i;
9262 
9263 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
9264 		/* XXX need some work for >= 82571 and < 82575 */
9265 		if (sc->sc_type < WM_T_82575)
9266 			return 0;
9267 	}
9268 
9269 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
9270 	    || (sc->sc_type >= WM_T_82575))
9271 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
9272 
9273 	sc->sc_ctrl &= ~CTRL_LRST;
9274 	sc->sc_txcw = TXCW_ANE;
9275 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
9276 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
9277 	else if (ife->ifm_media & IFM_FDX)
9278 		sc->sc_txcw |= TXCW_FD;
9279 	else
9280 		sc->sc_txcw |= TXCW_HD;
9281 
9282 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
9283 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
9284 
9285 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
9286 		    device_xname(sc->sc_dev), sc->sc_txcw));
9287 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
9288 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9289 	CSR_WRITE_FLUSH(sc);
9290 	delay(1000);
9291 
9292 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
9293 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
9294 
9295 	/*
9296 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
9297 	 * optics detect a signal, 0 if they don't.
9298 	 */
9299 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
9300 		/* Have signal; wait for the link to come up. */
9301 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
9302 			delay(10000);
9303 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
9304 				break;
9305 		}
9306 
9307 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
9308 			    device_xname(sc->sc_dev),i));
9309 
9310 		status = CSR_READ(sc, WMREG_STATUS);
9311 		DPRINTF(WM_DEBUG_LINK,
9312 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
9313 			device_xname(sc->sc_dev),status, STATUS_LU));
9314 		if (status & STATUS_LU) {
9315 			/* Link is up. */
9316 			DPRINTF(WM_DEBUG_LINK,
9317 			    ("%s: LINK: set media -> link up %s\n",
9318 			    device_xname(sc->sc_dev),
9319 			    (status & STATUS_FD) ? "FDX" : "HDX"));
9320 
9321 			/*
9322 			 * NOTE: CTRL will update TFCE and RFCE automatically,
9323 			 * so we should update sc->sc_ctrl
9324 			 */
9325 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
9326 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
9327 			sc->sc_fcrtl &= ~FCRTL_XONE;
9328 			if (status & STATUS_FD)
9329 				sc->sc_tctl |=
9330 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
9331 			else
9332 				sc->sc_tctl |=
9333 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
9334 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
9335 				sc->sc_fcrtl |= FCRTL_XONE;
9336 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
9337 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
9338 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
9339 				      sc->sc_fcrtl);
9340 			sc->sc_tbi_linkup = 1;
9341 		} else {
9342 			if (i == WM_LINKUP_TIMEOUT)
9343 				wm_check_for_link(sc);
9344 			/* Link is down. */
9345 			DPRINTF(WM_DEBUG_LINK,
9346 			    ("%s: LINK: set media -> link down\n",
9347 			    device_xname(sc->sc_dev)));
9348 			sc->sc_tbi_linkup = 0;
9349 		}
9350 	} else {
9351 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
9352 		    device_xname(sc->sc_dev)));
9353 		sc->sc_tbi_linkup = 0;
9354 	}
9355 
9356 	wm_tbi_serdes_set_linkled(sc);
9357 
9358 	return 0;
9359 }
9360 
9361 /*
9362  * wm_tbi_mediastatus:	[ifmedia interface function]
9363  *
9364  *	Get the current interface media status on a 1000BASE-X device.
9365  */
9366 static void
9367 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
9368 {
9369 	struct wm_softc *sc = ifp->if_softc;
9370 	uint32_t ctrl, status;
9371 
9372 	ifmr->ifm_status = IFM_AVALID;
9373 	ifmr->ifm_active = IFM_ETHER;
9374 
9375 	status = CSR_READ(sc, WMREG_STATUS);
9376 	if ((status & STATUS_LU) == 0) {
9377 		ifmr->ifm_active |= IFM_NONE;
9378 		return;
9379 	}
9380 
9381 	ifmr->ifm_status |= IFM_ACTIVE;
9382 	/* Only 82545 is LX */
9383 	if (sc->sc_type == WM_T_82545)
9384 		ifmr->ifm_active |= IFM_1000_LX;
9385 	else
9386 		ifmr->ifm_active |= IFM_1000_SX;
9387 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
9388 		ifmr->ifm_active |= IFM_FDX;
9389 	else
9390 		ifmr->ifm_active |= IFM_HDX;
9391 	ctrl = CSR_READ(sc, WMREG_CTRL);
9392 	if (ctrl & CTRL_RFCE)
9393 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
9394 	if (ctrl & CTRL_TFCE)
9395 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
9396 }
9397 
9398 /* XXX TBI only */
9399 static int
9400 wm_check_for_link(struct wm_softc *sc)
9401 {
9402 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
9403 	uint32_t rxcw;
9404 	uint32_t ctrl;
9405 	uint32_t status;
9406 	uint32_t sig;
9407 
9408 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
9409 		/* XXX need some work for >= 82571 */
9410 		if (sc->sc_type >= WM_T_82571) {
9411 			sc->sc_tbi_linkup = 1;
9412 			return 0;
9413 		}
9414 	}
9415 
9416 	rxcw = CSR_READ(sc, WMREG_RXCW);
9417 	ctrl = CSR_READ(sc, WMREG_CTRL);
9418 	status = CSR_READ(sc, WMREG_STATUS);
9419 
9420 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
9421 
9422 	DPRINTF(WM_DEBUG_LINK,
9423 	    ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
9424 		device_xname(sc->sc_dev), __func__,
9425 		((ctrl & CTRL_SWDPIN(1)) == sig),
9426 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
9427 
9428 	/*
9429 	 * SWDPIN   LU RXCW
9430 	 *      0    0    0
9431 	 *      0    0    1	(should not happen)
9432 	 *      0    1    0	(should not happen)
9433 	 *      0    1    1	(should not happen)
9434 	 *      1    0    0	Disable autonego and force linkup
9435 	 *      1    0    1	got /C/ but not linkup yet
9436 	 *      1    1    0	(linkup)
9437 	 *      1    1    1	If IFM_AUTO, back to autonego
9438 	 *
9439 	 */
9440 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
9441 	    && ((status & STATUS_LU) == 0)
9442 	    && ((rxcw & RXCW_C) == 0)) {
9443 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
9444 			__func__));
9445 		sc->sc_tbi_linkup = 0;
9446 		/* Disable auto-negotiation in the TXCW register */
9447 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
9448 
9449 		/*
9450 		 * Force link-up and also force full-duplex.
9451 		 *
9452 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
9453 		 * so we should update sc->sc_ctrl
9454 		 */
9455 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
9456 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9457 	} else if (((status & STATUS_LU) != 0)
9458 	    && ((rxcw & RXCW_C) != 0)
9459 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
9460 		sc->sc_tbi_linkup = 1;
9461 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
9462 			__func__));
9463 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
9464 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
9465 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
9466 	    && ((rxcw & RXCW_C) != 0)) {
9467 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
9468 	} else {
9469 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
9470 			status));
9471 	}
9472 
9473 	return 0;
9474 }
9475 
9476 /*
9477  * wm_tbi_tick:
9478  *
9479  *	Check the link on TBI devices.
9480  *	This function acts as mii_tick().
9481  */
9482 static void
9483 wm_tbi_tick(struct wm_softc *sc)
9484 {
9485 	struct mii_data *mii = &sc->sc_mii;
9486 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
9487 	uint32_t status;
9488 
9489 	KASSERT(WM_CORE_LOCKED(sc));
9490 
9491 	status = CSR_READ(sc, WMREG_STATUS);
9492 
9493 	/* XXX is this needed? */
9494 	(void)CSR_READ(sc, WMREG_RXCW);
9495 	(void)CSR_READ(sc, WMREG_CTRL);
9496 
9497 	/* set link status */
9498 	if ((status & STATUS_LU) == 0) {
9499 		DPRINTF(WM_DEBUG_LINK,
9500 		    ("%s: LINK: checklink -> down\n",
9501 			device_xname(sc->sc_dev)));
9502 		sc->sc_tbi_linkup = 0;
9503 	} else if (sc->sc_tbi_linkup == 0) {
9504 		DPRINTF(WM_DEBUG_LINK,
9505 		    ("%s: LINK: checklink -> up %s\n",
9506 			device_xname(sc->sc_dev),
9507 			(status & STATUS_FD) ? "FDX" : "HDX"));
9508 		sc->sc_tbi_linkup = 1;
9509 		sc->sc_tbi_serdes_ticks = 0;
9510 	}
9511 
9512 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
9513 		goto setled;
9514 
9515 	if ((status & STATUS_LU) == 0) {
9516 		sc->sc_tbi_linkup = 0;
9517 		/* If the timer expired, retry autonegotiation */
9518 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
9519 		    && (++sc->sc_tbi_serdes_ticks
9520 			>= sc->sc_tbi_serdes_anegticks)) {
9521 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
9522 			sc->sc_tbi_serdes_ticks = 0;
9523 			/*
9524 			 * Reset the link, and let autonegotiation do
9525 			 * its thing
9526 			 */
9527 			sc->sc_ctrl |= CTRL_LRST;
9528 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9529 			CSR_WRITE_FLUSH(sc);
9530 			delay(1000);
9531 			sc->sc_ctrl &= ~CTRL_LRST;
9532 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9533 			CSR_WRITE_FLUSH(sc);
9534 			delay(1000);
9535 			CSR_WRITE(sc, WMREG_TXCW,
9536 			    sc->sc_txcw & ~TXCW_ANE);
9537 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
9538 		}
9539 	}
9540 
9541 setled:
9542 	wm_tbi_serdes_set_linkled(sc);
9543 }
9544 
9545 /* SERDES related */
9546 static void
9547 wm_serdes_power_up_link_82575(struct wm_softc *sc)
9548 {
9549 	uint32_t reg;
9550 
9551 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
9552 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
9553 		return;
9554 
9555 	reg = CSR_READ(sc, WMREG_PCS_CFG);
9556 	reg |= PCS_CFG_PCS_EN;
9557 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
9558 
9559 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
9560 	reg &= ~CTRL_EXT_SWDPIN(3);
9561 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
9562 	CSR_WRITE_FLUSH(sc);
9563 }
9564 
9565 static int
9566 wm_serdes_mediachange(struct ifnet *ifp)
9567 {
9568 	struct wm_softc *sc = ifp->if_softc;
9569 	bool pcs_autoneg = true; /* XXX */
9570 	uint32_t ctrl_ext, pcs_lctl, reg;
9571 
9572 	/* XXX Currently, this function is not called on 8257[12] */
9573 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
9574 	    || (sc->sc_type >= WM_T_82575))
9575 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
9576 
9577 	wm_serdes_power_up_link_82575(sc);
9578 
9579 	sc->sc_ctrl |= CTRL_SLU;
9580 
9581 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
9582 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
9583 
9584 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
9585 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
9586 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
9587 	case CTRL_EXT_LINK_MODE_SGMII:
9588 		pcs_autoneg = true;
9589 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
9590 		break;
9591 	case CTRL_EXT_LINK_MODE_1000KX:
9592 		pcs_autoneg = false;
9593 		/* FALLTHROUGH */
9594 	default:
9595 		if ((sc->sc_type == WM_T_82575)
9596 		    || (sc->sc_type == WM_T_82576)) {
9597 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
9598 				pcs_autoneg = false;
9599 		}
9600 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
9601 		    | CTRL_FRCFDX;
9602 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
9603 	}
9604 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9605 
9606 	if (pcs_autoneg) {
9607 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
9608 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
9609 
9610 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
9611 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
9612 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
9613 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
9614 	} else
9615 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
9616 
9617 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
9618 
9619 
9620 	return 0;
9621 }
9622 
9623 static void
9624 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
9625 {
9626 	struct wm_softc *sc = ifp->if_softc;
9627 	struct mii_data *mii = &sc->sc_mii;
9628 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
9629 	uint32_t pcs_adv, pcs_lpab, reg;
9630 
9631 	ifmr->ifm_status = IFM_AVALID;
9632 	ifmr->ifm_active = IFM_ETHER;
9633 
9634 	/* Check PCS */
9635 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
9636 	if ((reg & PCS_LSTS_LINKOK) == 0) {
9637 		ifmr->ifm_active |= IFM_NONE;
9638 		sc->sc_tbi_linkup = 0;
9639 		goto setled;
9640 	}
9641 
9642 	sc->sc_tbi_linkup = 1;
9643 	ifmr->ifm_status |= IFM_ACTIVE;
9644 	ifmr->ifm_active |= IFM_1000_SX; /* XXX */
9645 	if ((reg & PCS_LSTS_FDX) != 0)
9646 		ifmr->ifm_active |= IFM_FDX;
9647 	else
9648 		ifmr->ifm_active |= IFM_HDX;
9649 	mii->mii_media_active &= ~IFM_ETH_FMASK;
9650 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
9651 		/* Check flow */
9652 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
9653 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
9654 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
9655 			goto setled;
9656 		}
9657 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
9658 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
9659 		DPRINTF(WM_DEBUG_LINK,
9660 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
9661 		if ((pcs_adv & TXCW_SYM_PAUSE)
9662 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
9663 			mii->mii_media_active |= IFM_FLOW
9664 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
9665 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
9666 		    && (pcs_adv & TXCW_ASYM_PAUSE)
9667 		    && (pcs_lpab & TXCW_SYM_PAUSE)
9668 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
9669 			mii->mii_media_active |= IFM_FLOW
9670 			    | IFM_ETH_TXPAUSE;
9671 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
9672 		    && (pcs_adv & TXCW_ASYM_PAUSE)
9673 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
9674 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
9675 			mii->mii_media_active |= IFM_FLOW
9676 			    | IFM_ETH_RXPAUSE;
9677 		} else {
9678 		}
9679 	}
9680 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
9681 	    | (mii->mii_media_active & IFM_ETH_FMASK);
9682 setled:
9683 	wm_tbi_serdes_set_linkled(sc);
9684 }
9685 
9686 /*
9687  * wm_serdes_tick:
9688  *
9689  *	Check the link on serdes devices.
9690  */
9691 static void
9692 wm_serdes_tick(struct wm_softc *sc)
9693 {
9694 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9695 	struct mii_data *mii = &sc->sc_mii;
9696 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
9697 	uint32_t reg;
9698 
9699 	KASSERT(WM_CORE_LOCKED(sc));
9700 
9701 	mii->mii_media_status = IFM_AVALID;
9702 	mii->mii_media_active = IFM_ETHER;
9703 
9704 	/* Check PCS */
9705 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
9706 	if ((reg & PCS_LSTS_LINKOK) != 0) {
9707 		mii->mii_media_status |= IFM_ACTIVE;
9708 		sc->sc_tbi_linkup = 1;
9709 		sc->sc_tbi_serdes_ticks = 0;
9710 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
9711 		if ((reg & PCS_LSTS_FDX) != 0)
9712 			mii->mii_media_active |= IFM_FDX;
9713 		else
9714 			mii->mii_media_active |= IFM_HDX;
9715 	} else {
9716 		mii->mii_media_status |= IFM_NONE;
9717 		sc->sc_tbi_linkup = 0;
9718 		    /* If the timer expired, retry autonegotiation */
9719 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
9720 		    && (++sc->sc_tbi_serdes_ticks
9721 			>= sc->sc_tbi_serdes_anegticks)) {
9722 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
9723 			sc->sc_tbi_serdes_ticks = 0;
9724 			/* XXX */
9725 			wm_serdes_mediachange(ifp);
9726 		}
9727 	}
9728 
9729 	wm_tbi_serdes_set_linkled(sc);
9730 }
9731 
9732 /* SFP related */
9733 
9734 static int
9735 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
9736 {
9737 	uint32_t i2ccmd;
9738 	int i;
9739 
9740 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
9741 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
9742 
9743 	/* Poll the ready bit */
9744 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
9745 		delay(50);
9746 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
9747 		if (i2ccmd & I2CCMD_READY)
9748 			break;
9749 	}
9750 	if ((i2ccmd & I2CCMD_READY) == 0)
9751 		return -1;
9752 	if ((i2ccmd & I2CCMD_ERROR) != 0)
9753 		return -1;
9754 
9755 	*data = i2ccmd & 0x00ff;
9756 
9757 	return 0;
9758 }
9759 
9760 static uint32_t
9761 wm_sfp_get_media_type(struct wm_softc *sc)
9762 {
9763 	uint32_t ctrl_ext;
9764 	uint8_t val = 0;
9765 	int timeout = 3;
9766 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
9767 	int rv = -1;
9768 
9769 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
9770 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
9771 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
9772 	CSR_WRITE_FLUSH(sc);
9773 
9774 	/* Read SFP module data */
9775 	while (timeout) {
9776 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
9777 		if (rv == 0)
9778 			break;
9779 		delay(100*1000); /* XXX too big */
9780 		timeout--;
9781 	}
9782 	if (rv != 0)
9783 		goto out;
9784 	switch (val) {
9785 	case SFF_SFP_ID_SFF:
9786 		aprint_normal_dev(sc->sc_dev,
9787 		    "Module/Connector soldered to board\n");
9788 		break;
9789 	case SFF_SFP_ID_SFP:
9790 		aprint_normal_dev(sc->sc_dev, "SFP\n");
9791 		break;
9792 	case SFF_SFP_ID_UNKNOWN:
9793 		goto out;
9794 	default:
9795 		break;
9796 	}
9797 
9798 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
9799 	if (rv != 0) {
9800 		goto out;
9801 	}
9802 
9803 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
9804 		mediatype = WM_MEDIATYPE_SERDES;
9805 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
9806 		sc->sc_flags |= WM_F_SGMII;
9807 		mediatype = WM_MEDIATYPE_COPPER;
9808 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
9809 		sc->sc_flags |= WM_F_SGMII;
9810 		mediatype = WM_MEDIATYPE_SERDES;
9811 	}
9812 
9813 out:
9814 	/* Restore I2C interface setting */
9815 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
9816 
9817 	return mediatype;
9818 }
9819 /*
9820  * NVM related.
9821  * Microwire, SPI (w/wo EERD) and Flash.
9822  */
9823 
9824 /* Both spi and uwire */
9825 
9826 /*
9827  * wm_eeprom_sendbits:
9828  *
9829  *	Send a series of bits to the EEPROM.
9830  */
9831 static void
9832 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
9833 {
9834 	uint32_t reg;
9835 	int x;
9836 
9837 	reg = CSR_READ(sc, WMREG_EECD);
9838 
9839 	for (x = nbits; x > 0; x--) {
9840 		if (bits & (1U << (x - 1)))
9841 			reg |= EECD_DI;
9842 		else
9843 			reg &= ~EECD_DI;
9844 		CSR_WRITE(sc, WMREG_EECD, reg);
9845 		CSR_WRITE_FLUSH(sc);
9846 		delay(2);
9847 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
9848 		CSR_WRITE_FLUSH(sc);
9849 		delay(2);
9850 		CSR_WRITE(sc, WMREG_EECD, reg);
9851 		CSR_WRITE_FLUSH(sc);
9852 		delay(2);
9853 	}
9854 }
9855 
9856 /*
9857  * wm_eeprom_recvbits:
9858  *
9859  *	Receive a series of bits from the EEPROM.
9860  */
9861 static void
9862 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
9863 {
9864 	uint32_t reg, val;
9865 	int x;
9866 
9867 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
9868 
9869 	val = 0;
9870 	for (x = nbits; x > 0; x--) {
9871 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
9872 		CSR_WRITE_FLUSH(sc);
9873 		delay(2);
9874 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
9875 			val |= (1U << (x - 1));
9876 		CSR_WRITE(sc, WMREG_EECD, reg);
9877 		CSR_WRITE_FLUSH(sc);
9878 		delay(2);
9879 	}
9880 	*valp = val;
9881 }
9882 
9883 /* Microwire */
9884 
9885 /*
9886  * wm_nvm_read_uwire:
9887  *
9888  *	Read a word from the EEPROM using the MicroWire protocol.
9889  */
9890 static int
9891 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
9892 {
9893 	uint32_t reg, val;
9894 	int i;
9895 
9896 	for (i = 0; i < wordcnt; i++) {
9897 		/* Clear SK and DI. */
9898 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
9899 		CSR_WRITE(sc, WMREG_EECD, reg);
9900 
9901 		/*
9902 		 * XXX: workaround for a bug in qemu-0.12.x and prior
9903 		 * and Xen.
9904 		 *
9905 		 * We use this workaround only for 82540 because qemu's
9906 		 * e1000 act as 82540.
9907 		 */
9908 		if (sc->sc_type == WM_T_82540) {
9909 			reg |= EECD_SK;
9910 			CSR_WRITE(sc, WMREG_EECD, reg);
9911 			reg &= ~EECD_SK;
9912 			CSR_WRITE(sc, WMREG_EECD, reg);
9913 			CSR_WRITE_FLUSH(sc);
9914 			delay(2);
9915 		}
9916 		/* XXX: end of workaround */
9917 
9918 		/* Set CHIP SELECT. */
9919 		reg |= EECD_CS;
9920 		CSR_WRITE(sc, WMREG_EECD, reg);
9921 		CSR_WRITE_FLUSH(sc);
9922 		delay(2);
9923 
9924 		/* Shift in the READ command. */
9925 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
9926 
9927 		/* Shift in address. */
9928 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
9929 
9930 		/* Shift out the data. */
9931 		wm_eeprom_recvbits(sc, &val, 16);
9932 		data[i] = val & 0xffff;
9933 
9934 		/* Clear CHIP SELECT. */
9935 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
9936 		CSR_WRITE(sc, WMREG_EECD, reg);
9937 		CSR_WRITE_FLUSH(sc);
9938 		delay(2);
9939 	}
9940 
9941 	return 0;
9942 }
9943 
9944 /* SPI */
9945 
9946 /*
9947  * Set SPI and FLASH related information from the EECD register.
9948  * For 82541 and 82547, the word size is taken from EEPROM.
9949  */
9950 static int
9951 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
9952 {
9953 	int size;
9954 	uint32_t reg;
9955 	uint16_t data;
9956 
9957 	reg = CSR_READ(sc, WMREG_EECD);
9958 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
9959 
9960 	/* Read the size of NVM from EECD by default */
9961 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
9962 	switch (sc->sc_type) {
9963 	case WM_T_82541:
9964 	case WM_T_82541_2:
9965 	case WM_T_82547:
9966 	case WM_T_82547_2:
9967 		/* Set dummy value to access EEPROM */
9968 		sc->sc_nvm_wordsize = 64;
9969 		wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
9970 		reg = data;
9971 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
9972 		if (size == 0)
9973 			size = 6; /* 64 word size */
9974 		else
9975 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
9976 		break;
9977 	case WM_T_80003:
9978 	case WM_T_82571:
9979 	case WM_T_82572:
9980 	case WM_T_82573: /* SPI case */
9981 	case WM_T_82574: /* SPI case */
9982 	case WM_T_82583: /* SPI case */
9983 		size += NVM_WORD_SIZE_BASE_SHIFT;
9984 		if (size > 14)
9985 			size = 14;
9986 		break;
9987 	case WM_T_82575:
9988 	case WM_T_82576:
9989 	case WM_T_82580:
9990 	case WM_T_I350:
9991 	case WM_T_I354:
9992 	case WM_T_I210:
9993 	case WM_T_I211:
9994 		size += NVM_WORD_SIZE_BASE_SHIFT;
9995 		if (size > 15)
9996 			size = 15;
9997 		break;
9998 	default:
9999 		aprint_error_dev(sc->sc_dev,
10000 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
10001 		return -1;
10002 		break;
10003 	}
10004 
10005 	sc->sc_nvm_wordsize = 1 << size;
10006 
10007 	return 0;
10008 }
10009 
10010 /*
10011  * wm_nvm_ready_spi:
10012  *
10013  *	Wait for a SPI EEPROM to be ready for commands.
10014  */
10015 static int
10016 wm_nvm_ready_spi(struct wm_softc *sc)
10017 {
10018 	uint32_t val;
10019 	int usec;
10020 
10021 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
10022 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
10023 		wm_eeprom_recvbits(sc, &val, 8);
10024 		if ((val & SPI_SR_RDY) == 0)
10025 			break;
10026 	}
10027 	if (usec >= SPI_MAX_RETRIES) {
10028 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
10029 		return 1;
10030 	}
10031 	return 0;
10032 }
10033 
10034 /*
10035  * wm_nvm_read_spi:
10036  *
10037  *	Read a work from the EEPROM using the SPI protocol.
10038  */
10039 static int
10040 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
10041 {
10042 	uint32_t reg, val;
10043 	int i;
10044 	uint8_t opc;
10045 
10046 	/* Clear SK and CS. */
10047 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
10048 	CSR_WRITE(sc, WMREG_EECD, reg);
10049 	CSR_WRITE_FLUSH(sc);
10050 	delay(2);
10051 
10052 	if (wm_nvm_ready_spi(sc))
10053 		return 1;
10054 
10055 	/* Toggle CS to flush commands. */
10056 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
10057 	CSR_WRITE_FLUSH(sc);
10058 	delay(2);
10059 	CSR_WRITE(sc, WMREG_EECD, reg);
10060 	CSR_WRITE_FLUSH(sc);
10061 	delay(2);
10062 
10063 	opc = SPI_OPC_READ;
10064 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
10065 		opc |= SPI_OPC_A8;
10066 
10067 	wm_eeprom_sendbits(sc, opc, 8);
10068 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
10069 
10070 	for (i = 0; i < wordcnt; i++) {
10071 		wm_eeprom_recvbits(sc, &val, 16);
10072 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
10073 	}
10074 
10075 	/* Raise CS and clear SK. */
10076 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
10077 	CSR_WRITE(sc, WMREG_EECD, reg);
10078 	CSR_WRITE_FLUSH(sc);
10079 	delay(2);
10080 
10081 	return 0;
10082 }
10083 
10084 /* Using with EERD */
10085 
10086 static int
10087 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
10088 {
10089 	uint32_t attempts = 100000;
10090 	uint32_t i, reg = 0;
10091 	int32_t done = -1;
10092 
10093 	for (i = 0; i < attempts; i++) {
10094 		reg = CSR_READ(sc, rw);
10095 
10096 		if (reg & EERD_DONE) {
10097 			done = 0;
10098 			break;
10099 		}
10100 		delay(5);
10101 	}
10102 
10103 	return done;
10104 }
10105 
10106 static int
10107 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
10108     uint16_t *data)
10109 {
10110 	int i, eerd = 0;
10111 	int error = 0;
10112 
10113 	for (i = 0; i < wordcnt; i++) {
10114 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
10115 
10116 		CSR_WRITE(sc, WMREG_EERD, eerd);
10117 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
10118 		if (error != 0)
10119 			break;
10120 
10121 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
10122 	}
10123 
10124 	return error;
10125 }
10126 
10127 /* Flash */
10128 
10129 static int
10130 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
10131 {
10132 	uint32_t eecd;
10133 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
10134 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
10135 	uint8_t sig_byte = 0;
10136 
10137 	switch (sc->sc_type) {
10138 	case WM_T_PCH_SPT:
10139 		/*
10140 		 * In SPT, read from the CTRL_EXT reg instead of accessing the
10141 		 * sector valid bits from the NVM.
10142 		 */
10143 		*bank = CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_NVMVS;
10144 		if ((*bank == 0) || (*bank == 1)) {
10145 			aprint_error_dev(sc->sc_dev,
10146 					 "%s: no valid NVM bank present\n",
10147 				__func__);
10148 			return -1;
10149 		} else {
10150 			*bank = *bank - 2;
10151 			return 0;
10152 		}
10153 	case WM_T_ICH8:
10154 	case WM_T_ICH9:
10155 		eecd = CSR_READ(sc, WMREG_EECD);
10156 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
10157 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
10158 			return 0;
10159 		}
10160 		/* FALLTHROUGH */
10161 	default:
10162 		/* Default to 0 */
10163 		*bank = 0;
10164 
10165 		/* Check bank 0 */
10166 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
10167 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
10168 			*bank = 0;
10169 			return 0;
10170 		}
10171 
10172 		/* Check bank 1 */
10173 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
10174 		    &sig_byte);
10175 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
10176 			*bank = 1;
10177 			return 0;
10178 		}
10179 	}
10180 
10181 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
10182 		device_xname(sc->sc_dev)));
10183 	return -1;
10184 }
10185 
10186 /******************************************************************************
10187  * This function does initial flash setup so that a new read/write/erase cycle
10188  * can be started.
10189  *
10190  * sc - The pointer to the hw structure
10191  ****************************************************************************/
10192 static int32_t
10193 wm_ich8_cycle_init(struct wm_softc *sc)
10194 {
10195 	uint16_t hsfsts;
10196 	int32_t error = 1;
10197 	int32_t i     = 0;
10198 
10199 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
10200 
10201 	/* May be check the Flash Des Valid bit in Hw status */
10202 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
10203 		return error;
10204 	}
10205 
10206 	/* Clear FCERR in Hw status by writing 1 */
10207 	/* Clear DAEL in Hw status by writing a 1 */
10208 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
10209 
10210 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
10211 
10212 	/*
10213 	 * Either we should have a hardware SPI cycle in progress bit to check
10214 	 * against, in order to start a new cycle or FDONE bit should be
10215 	 * changed in the hardware so that it is 1 after harware reset, which
10216 	 * can then be used as an indication whether a cycle is in progress or
10217 	 * has been completed .. we should also have some software semaphore
10218 	 * mechanism to guard FDONE or the cycle in progress bit so that two
10219 	 * threads access to those bits can be sequentiallized or a way so that
10220 	 * 2 threads dont start the cycle at the same time
10221 	 */
10222 
10223 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
10224 		/*
10225 		 * There is no cycle running at present, so we can start a
10226 		 * cycle
10227 		 */
10228 
10229 		/* Begin by setting Flash Cycle Done. */
10230 		hsfsts |= HSFSTS_DONE;
10231 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
10232 		error = 0;
10233 	} else {
10234 		/*
10235 		 * otherwise poll for sometime so the current cycle has a
10236 		 * chance to end before giving up.
10237 		 */
10238 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
10239 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
10240 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
10241 				error = 0;
10242 				break;
10243 			}
10244 			delay(1);
10245 		}
10246 		if (error == 0) {
10247 			/*
10248 			 * Successful in waiting for previous cycle to timeout,
10249 			 * now set the Flash Cycle Done.
10250 			 */
10251 			hsfsts |= HSFSTS_DONE;
10252 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
10253 		}
10254 	}
10255 	return error;
10256 }
10257 
10258 /******************************************************************************
10259  * This function starts a flash cycle and waits for its completion
10260  *
10261  * sc - The pointer to the hw structure
10262  ****************************************************************************/
10263 static int32_t
10264 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
10265 {
10266 	uint16_t hsflctl;
10267 	uint16_t hsfsts;
10268 	int32_t error = 1;
10269 	uint32_t i = 0;
10270 
10271 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
10272 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
10273 	hsflctl |= HSFCTL_GO;
10274 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
10275 
10276 	/* Wait till FDONE bit is set to 1 */
10277 	do {
10278 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
10279 		if (hsfsts & HSFSTS_DONE)
10280 			break;
10281 		delay(1);
10282 		i++;
10283 	} while (i < timeout);
10284 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
10285 		error = 0;
10286 
10287 	return error;
10288 }
10289 
10290 /******************************************************************************
10291  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
10292  *
10293  * sc - The pointer to the hw structure
10294  * index - The index of the byte or word to read.
10295  * size - Size of data to read, 1=byte 2=word, 4=dword
10296  * data - Pointer to the word to store the value read.
10297  *****************************************************************************/
10298 static int32_t
10299 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
10300     uint32_t size, uint32_t *data)
10301 {
10302 	uint16_t hsfsts;
10303 	uint16_t hsflctl;
10304 	uint32_t flash_linear_address;
10305 	uint32_t flash_data = 0;
10306 	int32_t error = 1;
10307 	int32_t count = 0;
10308 
10309 	if (size < 1  || size > 4 || data == 0x0 ||
10310 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
10311 		return error;
10312 
10313 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
10314 	    sc->sc_ich8_flash_base;
10315 
10316 	do {
10317 		delay(1);
10318 		/* Steps */
10319 		error = wm_ich8_cycle_init(sc);
10320 		if (error)
10321 			break;
10322 
10323 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
10324 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
10325 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
10326 		    & HSFCTL_BCOUNT_MASK;
10327 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
10328 		if (sc->sc_type == WM_T_PCH_SPT) {
10329 			/*
10330 			 * In SPT, This register is in Lan memory space, not
10331 			 * flash. Therefore, only 32 bit access is supported.
10332 			 */
10333 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFCTL,
10334 			    (uint32_t)hsflctl);
10335 		} else
10336 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
10337 
10338 		/*
10339 		 * Write the last 24 bits of index into Flash Linear address
10340 		 * field in Flash Address
10341 		 */
10342 		/* TODO: TBD maybe check the index against the size of flash */
10343 
10344 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
10345 
10346 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
10347 
10348 		/*
10349 		 * Check if FCERR is set to 1, if set to 1, clear it and try
10350 		 * the whole sequence a few more times, else read in (shift in)
10351 		 * the Flash Data0, the order is least significant byte first
10352 		 * msb to lsb
10353 		 */
10354 		if (error == 0) {
10355 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
10356 			if (size == 1)
10357 				*data = (uint8_t)(flash_data & 0x000000FF);
10358 			else if (size == 2)
10359 				*data = (uint16_t)(flash_data & 0x0000FFFF);
10360 			else if (size == 4)
10361 				*data = (uint32_t)flash_data;
10362 			break;
10363 		} else {
10364 			/*
10365 			 * If we've gotten here, then things are probably
10366 			 * completely hosed, but if the error condition is
10367 			 * detected, it won't hurt to give it another try...
10368 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
10369 			 */
10370 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
10371 			if (hsfsts & HSFSTS_ERR) {
10372 				/* Repeat for some time before giving up. */
10373 				continue;
10374 			} else if ((hsfsts & HSFSTS_DONE) == 0)
10375 				break;
10376 		}
10377 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
10378 
10379 	return error;
10380 }
10381 
10382 /******************************************************************************
10383  * Reads a single byte from the NVM using the ICH8 flash access registers.
10384  *
10385  * sc - pointer to wm_hw structure
10386  * index - The index of the byte to read.
10387  * data - Pointer to a byte to store the value read.
10388  *****************************************************************************/
10389 static int32_t
10390 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
10391 {
10392 	int32_t status;
10393 	uint32_t word = 0;
10394 
10395 	status = wm_read_ich8_data(sc, index, 1, &word);
10396 	if (status == 0)
10397 		*data = (uint8_t)word;
10398 	else
10399 		*data = 0;
10400 
10401 	return status;
10402 }
10403 
10404 /******************************************************************************
10405  * Reads a word from the NVM using the ICH8 flash access registers.
10406  *
10407  * sc - pointer to wm_hw structure
10408  * index - The starting byte index of the word to read.
10409  * data - Pointer to a word to store the value read.
10410  *****************************************************************************/
10411 static int32_t
10412 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
10413 {
10414 	int32_t status;
10415 	uint32_t word = 0;
10416 
10417 	status = wm_read_ich8_data(sc, index, 2, &word);
10418 	if (status == 0)
10419 		*data = (uint16_t)word;
10420 	else
10421 		*data = 0;
10422 
10423 	return status;
10424 }
10425 
10426 /******************************************************************************
10427  * Reads a dword from the NVM using the ICH8 flash access registers.
10428  *
10429  * sc - pointer to wm_hw structure
10430  * index - The starting byte index of the word to read.
10431  * data - Pointer to a word to store the value read.
10432  *****************************************************************************/
10433 static int32_t
10434 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
10435 {
10436 	int32_t status;
10437 
10438 	status = wm_read_ich8_data(sc, index, 4, data);
10439 	return status;
10440 }
10441 
10442 /******************************************************************************
10443  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
10444  * register.
10445  *
10446  * sc - Struct containing variables accessed by shared code
10447  * offset - offset of word in the EEPROM to read
10448  * data - word read from the EEPROM
10449  * words - number of words to read
10450  *****************************************************************************/
10451 static int
10452 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
10453 {
10454 	int32_t  error = 0;
10455 	uint32_t flash_bank = 0;
10456 	uint32_t act_offset = 0;
10457 	uint32_t bank_offset = 0;
10458 	uint16_t word = 0;
10459 	uint16_t i = 0;
10460 
10461 	/*
10462 	 * We need to know which is the valid flash bank.  In the event
10463 	 * that we didn't allocate eeprom_shadow_ram, we may not be
10464 	 * managing flash_bank.  So it cannot be trusted and needs
10465 	 * to be updated with each read.
10466 	 */
10467 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
10468 	if (error) {
10469 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
10470 			device_xname(sc->sc_dev)));
10471 		flash_bank = 0;
10472 	}
10473 
10474 	/*
10475 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
10476 	 * size
10477 	 */
10478 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
10479 
10480 	error = wm_get_swfwhw_semaphore(sc);
10481 	if (error) {
10482 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
10483 		    __func__);
10484 		return error;
10485 	}
10486 
10487 	for (i = 0; i < words; i++) {
10488 		/* The NVM part needs a byte offset, hence * 2 */
10489 		act_offset = bank_offset + ((offset + i) * 2);
10490 		error = wm_read_ich8_word(sc, act_offset, &word);
10491 		if (error) {
10492 			aprint_error_dev(sc->sc_dev,
10493 			    "%s: failed to read NVM\n", __func__);
10494 			break;
10495 		}
10496 		data[i] = word;
10497 	}
10498 
10499 	wm_put_swfwhw_semaphore(sc);
10500 	return error;
10501 }
10502 
10503 /******************************************************************************
10504  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
10505  * register.
10506  *
10507  * sc - Struct containing variables accessed by shared code
10508  * offset - offset of word in the EEPROM to read
10509  * data - word read from the EEPROM
10510  * words - number of words to read
10511  *****************************************************************************/
10512 static int
10513 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
10514 {
10515 	int32_t  error = 0;
10516 	uint32_t flash_bank = 0;
10517 	uint32_t act_offset = 0;
10518 	uint32_t bank_offset = 0;
10519 	uint32_t dword = 0;
10520 	uint16_t i = 0;
10521 
10522 	/*
10523 	 * We need to know which is the valid flash bank.  In the event
10524 	 * that we didn't allocate eeprom_shadow_ram, we may not be
10525 	 * managing flash_bank.  So it cannot be trusted and needs
10526 	 * to be updated with each read.
10527 	 */
10528 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
10529 	if (error) {
10530 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
10531 			device_xname(sc->sc_dev)));
10532 		flash_bank = 0;
10533 	}
10534 
10535 	/*
10536 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
10537 	 * size
10538 	 */
10539 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
10540 
10541 	error = wm_get_swfwhw_semaphore(sc);
10542 	if (error) {
10543 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
10544 		    __func__);
10545 		return error;
10546 	}
10547 
10548 	for (i = 0; i < words; i++) {
10549 		/* The NVM part needs a byte offset, hence * 2 */
10550 		act_offset = bank_offset + ((offset + i) * 2);
10551 		/* but we must read dword aligned, so mask ... */
10552 		error = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
10553 		if (error) {
10554 			aprint_error_dev(sc->sc_dev,
10555 			    "%s: failed to read NVM\n", __func__);
10556 			break;
10557 		}
10558 		/* ... and pick out low or high word */
10559 		if ((act_offset & 0x2) == 0)
10560 			data[i] = (uint16_t)(dword & 0xFFFF);
10561 		else
10562 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
10563 	}
10564 
10565 	wm_put_swfwhw_semaphore(sc);
10566 	return error;
10567 }
10568 
10569 /* iNVM */
10570 
10571 static int
10572 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
10573 {
10574 	int32_t  rv = 0;
10575 	uint32_t invm_dword;
10576 	uint16_t i;
10577 	uint8_t record_type, word_address;
10578 
10579 	for (i = 0; i < INVM_SIZE; i++) {
10580 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
10581 		/* Get record type */
10582 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
10583 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
10584 			break;
10585 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
10586 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
10587 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
10588 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
10589 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
10590 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
10591 			if (word_address == address) {
10592 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
10593 				rv = 0;
10594 				break;
10595 			}
10596 		}
10597 	}
10598 
10599 	return rv;
10600 }
10601 
10602 static int
10603 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
10604 {
10605 	int rv = 0;
10606 	int i;
10607 
10608 	for (i = 0; i < words; i++) {
10609 		switch (offset + i) {
10610 		case NVM_OFF_MACADDR:
10611 		case NVM_OFF_MACADDR1:
10612 		case NVM_OFF_MACADDR2:
10613 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
10614 			if (rv != 0) {
10615 				data[i] = 0xffff;
10616 				rv = -1;
10617 			}
10618 			break;
10619 		case NVM_OFF_CFG2:
10620 			rv = wm_nvm_read_word_invm(sc, offset, data);
10621 			if (rv != 0) {
10622 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
10623 				rv = 0;
10624 			}
10625 			break;
10626 		case NVM_OFF_CFG4:
10627 			rv = wm_nvm_read_word_invm(sc, offset, data);
10628 			if (rv != 0) {
10629 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
10630 				rv = 0;
10631 			}
10632 			break;
10633 		case NVM_OFF_LED_1_CFG:
10634 			rv = wm_nvm_read_word_invm(sc, offset, data);
10635 			if (rv != 0) {
10636 				*data = NVM_LED_1_CFG_DEFAULT_I211;
10637 				rv = 0;
10638 			}
10639 			break;
10640 		case NVM_OFF_LED_0_2_CFG:
10641 			rv = wm_nvm_read_word_invm(sc, offset, data);
10642 			if (rv != 0) {
10643 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
10644 				rv = 0;
10645 			}
10646 			break;
10647 		case NVM_OFF_ID_LED_SETTINGS:
10648 			rv = wm_nvm_read_word_invm(sc, offset, data);
10649 			if (rv != 0) {
10650 				*data = ID_LED_RESERVED_FFFF;
10651 				rv = 0;
10652 			}
10653 			break;
10654 		default:
10655 			DPRINTF(WM_DEBUG_NVM,
10656 			    ("NVM word 0x%02x is not mapped.\n", offset));
10657 			*data = NVM_RESERVED_WORD;
10658 			break;
10659 		}
10660 	}
10661 
10662 	return rv;
10663 }
10664 
10665 /* Lock, detecting NVM type, validate checksum, version and read */
10666 
10667 /*
10668  * wm_nvm_acquire:
10669  *
10670  *	Perform the EEPROM handshake required on some chips.
10671  */
10672 static int
10673 wm_nvm_acquire(struct wm_softc *sc)
10674 {
10675 	uint32_t reg;
10676 	int x;
10677 	int ret = 0;
10678 
10679 	/* always success */
10680 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
10681 		return 0;
10682 
10683 	if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
10684 		ret = wm_get_swfwhw_semaphore(sc);
10685 	} else if (sc->sc_flags & WM_F_LOCK_SWFW) {
10686 		/* This will also do wm_get_swsm_semaphore() if needed */
10687 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
10688 	} else if (sc->sc_flags & WM_F_LOCK_SWSM) {
10689 		ret = wm_get_swsm_semaphore(sc);
10690 	}
10691 
10692 	if (ret) {
10693 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
10694 			__func__);
10695 		return 1;
10696 	}
10697 
10698 	if (sc->sc_flags & WM_F_LOCK_EECD) {
10699 		reg = CSR_READ(sc, WMREG_EECD);
10700 
10701 		/* Request EEPROM access. */
10702 		reg |= EECD_EE_REQ;
10703 		CSR_WRITE(sc, WMREG_EECD, reg);
10704 
10705 		/* ..and wait for it to be granted. */
10706 		for (x = 0; x < 1000; x++) {
10707 			reg = CSR_READ(sc, WMREG_EECD);
10708 			if (reg & EECD_EE_GNT)
10709 				break;
10710 			delay(5);
10711 		}
10712 		if ((reg & EECD_EE_GNT) == 0) {
10713 			aprint_error_dev(sc->sc_dev,
10714 			    "could not acquire EEPROM GNT\n");
10715 			reg &= ~EECD_EE_REQ;
10716 			CSR_WRITE(sc, WMREG_EECD, reg);
10717 			if (sc->sc_flags & WM_F_LOCK_EXTCNF)
10718 				wm_put_swfwhw_semaphore(sc);
10719 			if (sc->sc_flags & WM_F_LOCK_SWFW)
10720 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
10721 			else if (sc->sc_flags & WM_F_LOCK_SWSM)
10722 				wm_put_swsm_semaphore(sc);
10723 			return 1;
10724 		}
10725 	}
10726 
10727 	return 0;
10728 }
10729 
10730 /*
10731  * wm_nvm_release:
10732  *
10733  *	Release the EEPROM mutex.
10734  */
10735 static void
10736 wm_nvm_release(struct wm_softc *sc)
10737 {
10738 	uint32_t reg;
10739 
10740 	/* always success */
10741 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
10742 		return;
10743 
10744 	if (sc->sc_flags & WM_F_LOCK_EECD) {
10745 		reg = CSR_READ(sc, WMREG_EECD);
10746 		reg &= ~EECD_EE_REQ;
10747 		CSR_WRITE(sc, WMREG_EECD, reg);
10748 	}
10749 
10750 	if (sc->sc_flags & WM_F_LOCK_EXTCNF)
10751 		wm_put_swfwhw_semaphore(sc);
10752 	if (sc->sc_flags & WM_F_LOCK_SWFW)
10753 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
10754 	else if (sc->sc_flags & WM_F_LOCK_SWSM)
10755 		wm_put_swsm_semaphore(sc);
10756 }
10757 
10758 static int
10759 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
10760 {
10761 	uint32_t eecd = 0;
10762 
10763 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
10764 	    || sc->sc_type == WM_T_82583) {
10765 		eecd = CSR_READ(sc, WMREG_EECD);
10766 
10767 		/* Isolate bits 15 & 16 */
10768 		eecd = ((eecd >> 15) & 0x03);
10769 
10770 		/* If both bits are set, device is Flash type */
10771 		if (eecd == 0x03)
10772 			return 0;
10773 	}
10774 	return 1;
10775 }
10776 
10777 static int
10778 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
10779 {
10780 	uint32_t eec;
10781 
10782 	eec = CSR_READ(sc, WMREG_EEC);
10783 	if ((eec & EEC_FLASH_DETECTED) != 0)
10784 		return 1;
10785 
10786 	return 0;
10787 }
10788 
10789 /*
10790  * wm_nvm_validate_checksum
10791  *
10792  * The checksum is defined as the sum of the first 64 (16 bit) words.
10793  */
10794 static int
10795 wm_nvm_validate_checksum(struct wm_softc *sc)
10796 {
10797 	uint16_t checksum;
10798 	uint16_t eeprom_data;
10799 #ifdef WM_DEBUG
10800 	uint16_t csum_wordaddr, valid_checksum;
10801 #endif
10802 	int i;
10803 
10804 	checksum = 0;
10805 
10806 	/* Don't check for I211 */
10807 	if (sc->sc_type == WM_T_I211)
10808 		return 0;
10809 
10810 #ifdef WM_DEBUG
10811 	if (sc->sc_type == WM_T_PCH_LPT) {
10812 		csum_wordaddr = NVM_OFF_COMPAT;
10813 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
10814 	} else {
10815 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
10816 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
10817 	}
10818 
10819 	/* Dump EEPROM image for debug */
10820 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
10821 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
10822 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
10823 		/* XXX PCH_SPT? */
10824 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
10825 		if ((eeprom_data & valid_checksum) == 0) {
10826 			DPRINTF(WM_DEBUG_NVM,
10827 			    ("%s: NVM need to be updated (%04x != %04x)\n",
10828 				device_xname(sc->sc_dev), eeprom_data,
10829 				    valid_checksum));
10830 		}
10831 	}
10832 
10833 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
10834 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
10835 		for (i = 0; i < NVM_SIZE; i++) {
10836 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
10837 				printf("XXXX ");
10838 			else
10839 				printf("%04hx ", eeprom_data);
10840 			if (i % 8 == 7)
10841 				printf("\n");
10842 		}
10843 	}
10844 
10845 #endif /* WM_DEBUG */
10846 
10847 	for (i = 0; i < NVM_SIZE; i++) {
10848 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
10849 			return 1;
10850 		checksum += eeprom_data;
10851 	}
10852 
10853 	if (checksum != (uint16_t) NVM_CHECKSUM) {
10854 #ifdef WM_DEBUG
10855 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
10856 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
10857 #endif
10858 	}
10859 
10860 	return 0;
10861 }
10862 
10863 static void
10864 wm_nvm_version_invm(struct wm_softc *sc)
10865 {
10866 	uint32_t dword;
10867 
10868 	/*
10869 	 * Linux's code to decode version is very strange, so we don't
10870 	 * obey that algorithm and just use word 61 as the document.
10871 	 * Perhaps it's not perfect though...
10872 	 *
10873 	 * Example:
10874 	 *
10875 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
10876 	 */
10877 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
10878 	dword = __SHIFTOUT(dword, INVM_VER_1);
10879 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
10880 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
10881 }
10882 
10883 static void
10884 wm_nvm_version(struct wm_softc *sc)
10885 {
10886 	uint16_t major, minor, build, patch;
10887 	uint16_t uid0, uid1;
10888 	uint16_t nvm_data;
10889 	uint16_t off;
10890 	bool check_version = false;
10891 	bool check_optionrom = false;
10892 	bool have_build = false;
10893 
10894 	/*
10895 	 * Version format:
10896 	 *
10897 	 * XYYZ
10898 	 * X0YZ
10899 	 * X0YY
10900 	 *
10901 	 * Example:
10902 	 *
10903 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
10904 	 *	82571	0x50a6	5.10.6?
10905 	 *	82572	0x506a	5.6.10?
10906 	 *	82572EI	0x5069	5.6.9?
10907 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
10908 	 *		0x2013	2.1.3?
10909 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
10910 	 */
10911 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1);
10912 	switch (sc->sc_type) {
10913 	case WM_T_82571:
10914 	case WM_T_82572:
10915 	case WM_T_82574:
10916 	case WM_T_82583:
10917 		check_version = true;
10918 		check_optionrom = true;
10919 		have_build = true;
10920 		break;
10921 	case WM_T_82575:
10922 	case WM_T_82576:
10923 	case WM_T_82580:
10924 		if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
10925 			check_version = true;
10926 		break;
10927 	case WM_T_I211:
10928 		wm_nvm_version_invm(sc);
10929 		goto printver;
10930 	case WM_T_I210:
10931 		if (!wm_nvm_get_flash_presence_i210(sc)) {
10932 			wm_nvm_version_invm(sc);
10933 			goto printver;
10934 		}
10935 		/* FALLTHROUGH */
10936 	case WM_T_I350:
10937 	case WM_T_I354:
10938 		check_version = true;
10939 		check_optionrom = true;
10940 		break;
10941 	default:
10942 		return;
10943 	}
10944 	if (check_version) {
10945 		wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data);
10946 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
10947 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
10948 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
10949 			build = nvm_data & NVM_BUILD_MASK;
10950 			have_build = true;
10951 		} else
10952 			minor = nvm_data & 0x00ff;
10953 
10954 		/* Decimal */
10955 		minor = (minor / 16) * 10 + (minor % 16);
10956 		sc->sc_nvm_ver_major = major;
10957 		sc->sc_nvm_ver_minor = minor;
10958 
10959 printver:
10960 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
10961 		    sc->sc_nvm_ver_minor);
10962 		if (have_build) {
10963 			sc->sc_nvm_ver_build = build;
10964 			aprint_verbose(".%d", build);
10965 		}
10966 	}
10967 	if (check_optionrom) {
10968 		wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off);
10969 		/* Option ROM Version */
10970 		if ((off != 0x0000) && (off != 0xffff)) {
10971 			off += NVM_COMBO_VER_OFF;
10972 			wm_nvm_read(sc, off + 1, 1, &uid1);
10973 			wm_nvm_read(sc, off, 1, &uid0);
10974 			if ((uid0 != 0) && (uid0 != 0xffff)
10975 			    && (uid1 != 0) && (uid1 != 0xffff)) {
10976 				/* 16bits */
10977 				major = uid0 >> 8;
10978 				build = (uid0 << 8) | (uid1 >> 8);
10979 				patch = uid1 & 0x00ff;
10980 				aprint_verbose(", option ROM Version %d.%d.%d",
10981 				    major, build, patch);
10982 			}
10983 		}
10984 	}
10985 
10986 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
10987 	aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
10988 }
10989 
10990 /*
10991  * wm_nvm_read:
10992  *
10993  *	Read data from the serial EEPROM.
10994  */
10995 static int
10996 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
10997 {
10998 	int rv;
10999 
11000 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
11001 		return 1;
11002 
11003 	if (wm_nvm_acquire(sc))
11004 		return 1;
11005 
11006 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
11007 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
11008 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
11009 		rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
11010 	else if (sc->sc_type == WM_T_PCH_SPT)
11011 		rv = wm_nvm_read_spt(sc, word, wordcnt, data);
11012 	else if (sc->sc_flags & WM_F_EEPROM_INVM)
11013 		rv = wm_nvm_read_invm(sc, word, wordcnt, data);
11014 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
11015 		rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
11016 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
11017 		rv = wm_nvm_read_spi(sc, word, wordcnt, data);
11018 	else
11019 		rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
11020 
11021 	wm_nvm_release(sc);
11022 	return rv;
11023 }
11024 
11025 /*
11026  * Hardware semaphores.
11027  * Very complexed...
11028  */
11029 
11030 static int
11031 wm_get_swsm_semaphore(struct wm_softc *sc)
11032 {
11033 	int32_t timeout;
11034 	uint32_t swsm;
11035 
11036 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
11037 		/* Get the SW semaphore. */
11038 		timeout = sc->sc_nvm_wordsize + 1;
11039 		while (timeout) {
11040 			swsm = CSR_READ(sc, WMREG_SWSM);
11041 
11042 			if ((swsm & SWSM_SMBI) == 0)
11043 				break;
11044 
11045 			delay(50);
11046 			timeout--;
11047 		}
11048 
11049 		if (timeout == 0) {
11050 			aprint_error_dev(sc->sc_dev,
11051 			    "could not acquire SWSM SMBI\n");
11052 			return 1;
11053 		}
11054 	}
11055 
11056 	/* Get the FW semaphore. */
11057 	timeout = sc->sc_nvm_wordsize + 1;
11058 	while (timeout) {
11059 		swsm = CSR_READ(sc, WMREG_SWSM);
11060 		swsm |= SWSM_SWESMBI;
11061 		CSR_WRITE(sc, WMREG_SWSM, swsm);
11062 		/* If we managed to set the bit we got the semaphore. */
11063 		swsm = CSR_READ(sc, WMREG_SWSM);
11064 		if (swsm & SWSM_SWESMBI)
11065 			break;
11066 
11067 		delay(50);
11068 		timeout--;
11069 	}
11070 
11071 	if (timeout == 0) {
11072 		aprint_error_dev(sc->sc_dev,
11073 		    "could not acquire SWSM SWESMBI\n");
11074 		/* Release semaphores */
11075 		wm_put_swsm_semaphore(sc);
11076 		return 1;
11077 	}
11078 	return 0;
11079 }
11080 
11081 static void
11082 wm_put_swsm_semaphore(struct wm_softc *sc)
11083 {
11084 	uint32_t swsm;
11085 
11086 	swsm = CSR_READ(sc, WMREG_SWSM);
11087 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
11088 	CSR_WRITE(sc, WMREG_SWSM, swsm);
11089 }
11090 
11091 static int
11092 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
11093 {
11094 	uint32_t swfw_sync;
11095 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
11096 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
11097 	int timeout = 200;
11098 
11099 	for (timeout = 0; timeout < 200; timeout++) {
11100 		if (sc->sc_flags & WM_F_LOCK_SWSM) {
11101 			if (wm_get_swsm_semaphore(sc)) {
11102 				aprint_error_dev(sc->sc_dev,
11103 				    "%s: failed to get semaphore\n",
11104 				    __func__);
11105 				return 1;
11106 			}
11107 		}
11108 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
11109 		if ((swfw_sync & (swmask | fwmask)) == 0) {
11110 			swfw_sync |= swmask;
11111 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
11112 			if (sc->sc_flags & WM_F_LOCK_SWSM)
11113 				wm_put_swsm_semaphore(sc);
11114 			return 0;
11115 		}
11116 		if (sc->sc_flags & WM_F_LOCK_SWSM)
11117 			wm_put_swsm_semaphore(sc);
11118 		delay(5000);
11119 	}
11120 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
11121 	    device_xname(sc->sc_dev), mask, swfw_sync);
11122 	return 1;
11123 }
11124 
11125 static void
11126 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
11127 {
11128 	uint32_t swfw_sync;
11129 
11130 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
11131 		while (wm_get_swsm_semaphore(sc) != 0)
11132 			continue;
11133 	}
11134 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
11135 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
11136 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
11137 	if (sc->sc_flags & WM_F_LOCK_SWSM)
11138 		wm_put_swsm_semaphore(sc);
11139 }
11140 
11141 static int
11142 wm_get_swfwhw_semaphore(struct wm_softc *sc)
11143 {
11144 	uint32_t ext_ctrl;
11145 	int timeout = 200;
11146 
11147 	for (timeout = 0; timeout < 200; timeout++) {
11148 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
11149 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
11150 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
11151 
11152 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
11153 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
11154 			return 0;
11155 		delay(5000);
11156 	}
11157 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
11158 	    device_xname(sc->sc_dev), ext_ctrl);
11159 	return 1;
11160 }
11161 
11162 static void
11163 wm_put_swfwhw_semaphore(struct wm_softc *sc)
11164 {
11165 	uint32_t ext_ctrl;
11166 
11167 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
11168 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
11169 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
11170 }
11171 
11172 static int
11173 wm_get_hw_semaphore_82573(struct wm_softc *sc)
11174 {
11175 	int i = 0;
11176 	uint32_t reg;
11177 
11178 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
11179 	do {
11180 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
11181 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
11182 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
11183 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
11184 			break;
11185 		delay(2*1000);
11186 		i++;
11187 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
11188 
11189 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
11190 		wm_put_hw_semaphore_82573(sc);
11191 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
11192 		    device_xname(sc->sc_dev));
11193 		return -1;
11194 	}
11195 
11196 	return 0;
11197 }
11198 
11199 static void
11200 wm_put_hw_semaphore_82573(struct wm_softc *sc)
11201 {
11202 	uint32_t reg;
11203 
11204 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
11205 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
11206 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
11207 }
11208 
11209 /*
11210  * Management mode and power management related subroutines.
11211  * BMC, AMT, suspend/resume and EEE.
11212  */
11213 
11214 #ifdef WM_WOL
11215 static int
11216 wm_check_mng_mode(struct wm_softc *sc)
11217 {
11218 	int rv;
11219 
11220 	switch (sc->sc_type) {
11221 	case WM_T_ICH8:
11222 	case WM_T_ICH9:
11223 	case WM_T_ICH10:
11224 	case WM_T_PCH:
11225 	case WM_T_PCH2:
11226 	case WM_T_PCH_LPT:
11227 	case WM_T_PCH_SPT:
11228 		rv = wm_check_mng_mode_ich8lan(sc);
11229 		break;
11230 	case WM_T_82574:
11231 	case WM_T_82583:
11232 		rv = wm_check_mng_mode_82574(sc);
11233 		break;
11234 	case WM_T_82571:
11235 	case WM_T_82572:
11236 	case WM_T_82573:
11237 	case WM_T_80003:
11238 		rv = wm_check_mng_mode_generic(sc);
11239 		break;
11240 	default:
11241 		/* noting to do */
11242 		rv = 0;
11243 		break;
11244 	}
11245 
11246 	return rv;
11247 }
11248 
11249 static int
11250 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
11251 {
11252 	uint32_t fwsm;
11253 
11254 	fwsm = CSR_READ(sc, WMREG_FWSM);
11255 
11256 	if (((fwsm & FWSM_FW_VALID) != 0)
11257 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
11258 		return 1;
11259 
11260 	return 0;
11261 }
11262 
11263 static int
11264 wm_check_mng_mode_82574(struct wm_softc *sc)
11265 {
11266 	uint16_t data;
11267 
11268 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
11269 
11270 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
11271 		return 1;
11272 
11273 	return 0;
11274 }
11275 
11276 static int
11277 wm_check_mng_mode_generic(struct wm_softc *sc)
11278 {
11279 	uint32_t fwsm;
11280 
11281 	fwsm = CSR_READ(sc, WMREG_FWSM);
11282 
11283 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
11284 		return 1;
11285 
11286 	return 0;
11287 }
11288 #endif /* WM_WOL */
11289 
11290 static int
11291 wm_enable_mng_pass_thru(struct wm_softc *sc)
11292 {
11293 	uint32_t manc, fwsm, factps;
11294 
11295 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
11296 		return 0;
11297 
11298 	manc = CSR_READ(sc, WMREG_MANC);
11299 
11300 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
11301 		device_xname(sc->sc_dev), manc));
11302 	if ((manc & MANC_RECV_TCO_EN) == 0)
11303 		return 0;
11304 
11305 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
11306 		fwsm = CSR_READ(sc, WMREG_FWSM);
11307 		factps = CSR_READ(sc, WMREG_FACTPS);
11308 		if (((factps & FACTPS_MNGCG) == 0)
11309 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
11310 			return 1;
11311 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
11312 		uint16_t data;
11313 
11314 		factps = CSR_READ(sc, WMREG_FACTPS);
11315 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
11316 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
11317 			device_xname(sc->sc_dev), factps, data));
11318 		if (((factps & FACTPS_MNGCG) == 0)
11319 		    && ((data & NVM_CFG2_MNGM_MASK)
11320 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
11321 			return 1;
11322 	} else if (((manc & MANC_SMBUS_EN) != 0)
11323 	    && ((manc & MANC_ASF_EN) == 0))
11324 		return 1;
11325 
11326 	return 0;
11327 }
11328 
11329 static bool
11330 wm_phy_resetisblocked(struct wm_softc *sc)
11331 {
11332 	bool blocked = false;
11333 	uint32_t reg;
11334 	int i = 0;
11335 
11336 	switch (sc->sc_type) {
11337 	case WM_T_ICH8:
11338 	case WM_T_ICH9:
11339 	case WM_T_ICH10:
11340 	case WM_T_PCH:
11341 	case WM_T_PCH2:
11342 	case WM_T_PCH_LPT:
11343 	case WM_T_PCH_SPT:
11344 		do {
11345 			reg = CSR_READ(sc, WMREG_FWSM);
11346 			if ((reg & FWSM_RSPCIPHY) == 0) {
11347 				blocked = true;
11348 				delay(10*1000);
11349 				continue;
11350 			}
11351 			blocked = false;
11352 		} while (blocked && (i++ < 10));
11353 		return blocked;
11354 		break;
11355 	case WM_T_82571:
11356 	case WM_T_82572:
11357 	case WM_T_82573:
11358 	case WM_T_82574:
11359 	case WM_T_82583:
11360 	case WM_T_80003:
11361 		reg = CSR_READ(sc, WMREG_MANC);
11362 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
11363 			return true;
11364 		else
11365 			return false;
11366 		break;
11367 	default:
11368 		/* no problem */
11369 		break;
11370 	}
11371 
11372 	return false;
11373 }
11374 
11375 static void
11376 wm_get_hw_control(struct wm_softc *sc)
11377 {
11378 	uint32_t reg;
11379 
11380 	switch (sc->sc_type) {
11381 	case WM_T_82573:
11382 		reg = CSR_READ(sc, WMREG_SWSM);
11383 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
11384 		break;
11385 	case WM_T_82571:
11386 	case WM_T_82572:
11387 	case WM_T_82574:
11388 	case WM_T_82583:
11389 	case WM_T_80003:
11390 	case WM_T_ICH8:
11391 	case WM_T_ICH9:
11392 	case WM_T_ICH10:
11393 	case WM_T_PCH:
11394 	case WM_T_PCH2:
11395 	case WM_T_PCH_LPT:
11396 	case WM_T_PCH_SPT:
11397 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
11398 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
11399 		break;
11400 	default:
11401 		break;
11402 	}
11403 }
11404 
11405 static void
11406 wm_release_hw_control(struct wm_softc *sc)
11407 {
11408 	uint32_t reg;
11409 
11410 	if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
11411 		return;
11412 
11413 	if (sc->sc_type == WM_T_82573) {
11414 		reg = CSR_READ(sc, WMREG_SWSM);
11415 		reg &= ~SWSM_DRV_LOAD;
11416 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
11417 	} else {
11418 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
11419 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
11420 	}
11421 }
11422 
11423 static void
11424 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
11425 {
11426 	uint32_t reg;
11427 
11428 	if (sc->sc_type < WM_T_PCH2)
11429 		return;
11430 
11431 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
11432 
11433 	if (gate)
11434 		reg |= EXTCNFCTR_GATE_PHY_CFG;
11435 	else
11436 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
11437 
11438 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
11439 }
11440 
11441 static void
11442 wm_smbustopci(struct wm_softc *sc)
11443 {
11444 	uint32_t fwsm, reg;
11445 
11446 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
11447 	wm_gate_hw_phy_config_ich8lan(sc, true);
11448 
11449 	/* Acquire semaphore */
11450 	wm_get_swfwhw_semaphore(sc);
11451 
11452 	fwsm = CSR_READ(sc, WMREG_FWSM);
11453 	if (((fwsm & FWSM_FW_VALID) == 0)
11454 	    && ((wm_phy_resetisblocked(sc) == false))) {
11455 		if (sc->sc_type >= WM_T_PCH_LPT) {
11456 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
11457 			reg |= CTRL_EXT_FORCE_SMBUS;
11458 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
11459 			CSR_WRITE_FLUSH(sc);
11460 			delay(50*1000);
11461 		}
11462 
11463 		/* Toggle LANPHYPC */
11464 		sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
11465 		sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
11466 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11467 		CSR_WRITE_FLUSH(sc);
11468 		delay(10);
11469 		sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
11470 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11471 		CSR_WRITE_FLUSH(sc);
11472 		delay(50*1000);
11473 
11474 		if (sc->sc_type >= WM_T_PCH_LPT) {
11475 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
11476 			reg &= ~CTRL_EXT_FORCE_SMBUS;
11477 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
11478 		}
11479 	}
11480 
11481 	/* Release semaphore */
11482 	wm_put_swfwhw_semaphore(sc);
11483 
11484 	/*
11485 	 * Ungate automatic PHY configuration by hardware on non-managed 82579
11486 	 */
11487 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0))
11488 		wm_gate_hw_phy_config_ich8lan(sc, false);
11489 }
11490 
11491 static void
11492 wm_init_manageability(struct wm_softc *sc)
11493 {
11494 
11495 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
11496 		device_xname(sc->sc_dev), __func__));
11497 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
11498 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
11499 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
11500 
11501 		/* Disable hardware interception of ARP */
11502 		manc &= ~MANC_ARP_EN;
11503 
11504 		/* Enable receiving management packets to the host */
11505 		if (sc->sc_type >= WM_T_82571) {
11506 			manc |= MANC_EN_MNG2HOST;
11507 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
11508 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
11509 		}
11510 
11511 		CSR_WRITE(sc, WMREG_MANC, manc);
11512 	}
11513 }
11514 
11515 static void
11516 wm_release_manageability(struct wm_softc *sc)
11517 {
11518 
11519 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
11520 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
11521 
11522 		manc |= MANC_ARP_EN;
11523 		if (sc->sc_type >= WM_T_82571)
11524 			manc &= ~MANC_EN_MNG2HOST;
11525 
11526 		CSR_WRITE(sc, WMREG_MANC, manc);
11527 	}
11528 }
11529 
11530 static void
11531 wm_get_wakeup(struct wm_softc *sc)
11532 {
11533 
11534 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
11535 	switch (sc->sc_type) {
11536 	case WM_T_82573:
11537 	case WM_T_82583:
11538 		sc->sc_flags |= WM_F_HAS_AMT;
11539 		/* FALLTHROUGH */
11540 	case WM_T_80003:
11541 	case WM_T_82541:
11542 	case WM_T_82547:
11543 	case WM_T_82571:
11544 	case WM_T_82572:
11545 	case WM_T_82574:
11546 	case WM_T_82575:
11547 	case WM_T_82576:
11548 	case WM_T_82580:
11549 	case WM_T_I350:
11550 	case WM_T_I354:
11551 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
11552 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
11553 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
11554 		break;
11555 	case WM_T_ICH8:
11556 	case WM_T_ICH9:
11557 	case WM_T_ICH10:
11558 	case WM_T_PCH:
11559 	case WM_T_PCH2:
11560 	case WM_T_PCH_LPT:
11561 	case WM_T_PCH_SPT: /* XXX only Q170 chipset? */
11562 		sc->sc_flags |= WM_F_HAS_AMT;
11563 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
11564 		break;
11565 	default:
11566 		break;
11567 	}
11568 
11569 	/* 1: HAS_MANAGE */
11570 	if (wm_enable_mng_pass_thru(sc) != 0)
11571 		sc->sc_flags |= WM_F_HAS_MANAGE;
11572 
11573 #ifdef WM_DEBUG
11574 	printf("\n");
11575 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
11576 		printf("HAS_AMT,");
11577 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
11578 		printf("ARC_SUBSYS_VALID,");
11579 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
11580 		printf("ASF_FIRMWARE_PRES,");
11581 	if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
11582 		printf("HAS_MANAGE,");
11583 	printf("\n");
11584 #endif
11585 	/*
11586 	 * Note that the WOL flags is set after the resetting of the eeprom
11587 	 * stuff
11588 	 */
11589 }
11590 
11591 #ifdef WM_WOL
11592 /* WOL in the newer chipset interfaces (pchlan) */
11593 static void
11594 wm_enable_phy_wakeup(struct wm_softc *sc)
11595 {
11596 #if 0
11597 	uint16_t preg;
11598 
11599 	/* Copy MAC RARs to PHY RARs */
11600 
11601 	/* Copy MAC MTA to PHY MTA */
11602 
11603 	/* Configure PHY Rx Control register */
11604 
11605 	/* Enable PHY wakeup in MAC register */
11606 
11607 	/* Configure and enable PHY wakeup in PHY registers */
11608 
11609 	/* Activate PHY wakeup */
11610 
11611 	/* XXX */
11612 #endif
11613 }
11614 
11615 /* Power down workaround on D3 */
11616 static void
11617 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
11618 {
11619 	uint32_t reg;
11620 	int i;
11621 
11622 	for (i = 0; i < 2; i++) {
11623 		/* Disable link */
11624 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
11625 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
11626 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
11627 
11628 		/*
11629 		 * Call gig speed drop workaround on Gig disable before
11630 		 * accessing any PHY registers
11631 		 */
11632 		if (sc->sc_type == WM_T_ICH8)
11633 			wm_gig_downshift_workaround_ich8lan(sc);
11634 
11635 		/* Write VR power-down enable */
11636 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
11637 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
11638 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
11639 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
11640 
11641 		/* Read it back and test */
11642 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
11643 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
11644 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
11645 			break;
11646 
11647 		/* Issue PHY reset and repeat at most one more time */
11648 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
11649 	}
11650 }
11651 
11652 static void
11653 wm_enable_wakeup(struct wm_softc *sc)
11654 {
11655 	uint32_t reg, pmreg;
11656 	pcireg_t pmode;
11657 
11658 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
11659 		&pmreg, NULL) == 0)
11660 		return;
11661 
11662 	/* Advertise the wakeup capability */
11663 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
11664 	    | CTRL_SWDPIN(3));
11665 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
11666 
11667 	/* ICH workaround */
11668 	switch (sc->sc_type) {
11669 	case WM_T_ICH8:
11670 	case WM_T_ICH9:
11671 	case WM_T_ICH10:
11672 	case WM_T_PCH:
11673 	case WM_T_PCH2:
11674 	case WM_T_PCH_LPT:
11675 	case WM_T_PCH_SPT:
11676 		/* Disable gig during WOL */
11677 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
11678 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
11679 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
11680 		if (sc->sc_type == WM_T_PCH)
11681 			wm_gmii_reset(sc);
11682 
11683 		/* Power down workaround */
11684 		if (sc->sc_phytype == WMPHY_82577) {
11685 			struct mii_softc *child;
11686 
11687 			/* Assume that the PHY is copper */
11688 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
11689 			if (child->mii_mpd_rev <= 2)
11690 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
11691 				    (768 << 5) | 25, 0x0444); /* magic num */
11692 		}
11693 		break;
11694 	default:
11695 		break;
11696 	}
11697 
11698 	/* Keep the laser running on fiber adapters */
11699 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
11700 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
11701 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
11702 		reg |= CTRL_EXT_SWDPIN(3);
11703 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
11704 	}
11705 
11706 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
11707 #if 0	/* for the multicast packet */
11708 	reg |= WUFC_MC;
11709 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
11710 #endif
11711 
11712 	if (sc->sc_type == WM_T_PCH) {
11713 		wm_enable_phy_wakeup(sc);
11714 	} else {
11715 		CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
11716 		CSR_WRITE(sc, WMREG_WUFC, reg);
11717 	}
11718 
11719 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
11720 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
11721 		|| (sc->sc_type == WM_T_PCH2))
11722 		    && (sc->sc_phytype == WMPHY_IGP_3))
11723 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
11724 
11725 	/* Request PME */
11726 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
11727 #if 0
11728 	/* Disable WOL */
11729 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
11730 #else
11731 	/* For WOL */
11732 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
11733 #endif
11734 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
11735 }
11736 #endif /* WM_WOL */
11737 
11738 /* LPLU */
11739 
11740 static void
11741 wm_lplu_d0_disable(struct wm_softc *sc)
11742 {
11743 	uint32_t reg;
11744 
11745 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
11746 	reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
11747 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
11748 }
11749 
11750 static void
11751 wm_lplu_d0_disable_pch(struct wm_softc *sc)
11752 {
11753 	uint32_t reg;
11754 
11755 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
11756 	reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
11757 	reg |= HV_OEM_BITS_ANEGNOW;
11758 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
11759 }
11760 
11761 /* EEE */
11762 
11763 static void
11764 wm_set_eee_i350(struct wm_softc *sc)
11765 {
11766 	uint32_t ipcnfg, eeer;
11767 
11768 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
11769 	eeer = CSR_READ(sc, WMREG_EEER);
11770 
11771 	if ((sc->sc_flags & WM_F_EEE) != 0) {
11772 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
11773 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
11774 		    | EEER_LPI_FC);
11775 	} else {
11776 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
11777 		ipcnfg &= ~IPCNFG_10BASE_TE;
11778 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
11779 		    | EEER_LPI_FC);
11780 	}
11781 
11782 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
11783 	CSR_WRITE(sc, WMREG_EEER, eeer);
11784 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
11785 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
11786 }
11787 
11788 /*
11789  * Workarounds (mainly PHY related).
11790  * Basically, PHY's workarounds are in the PHY drivers.
11791  */
11792 
11793 /* Work-around for 82566 Kumeran PCS lock loss */
11794 static void
11795 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
11796 {
11797 #if 0
11798 	int miistatus, active, i;
11799 	int reg;
11800 
11801 	miistatus = sc->sc_mii.mii_media_status;
11802 
11803 	/* If the link is not up, do nothing */
11804 	if ((miistatus & IFM_ACTIVE) == 0)
11805 		return;
11806 
11807 	active = sc->sc_mii.mii_media_active;
11808 
11809 	/* Nothing to do if the link is other than 1Gbps */
11810 	if (IFM_SUBTYPE(active) != IFM_1000_T)
11811 		return;
11812 
11813 	for (i = 0; i < 10; i++) {
11814 		/* read twice */
11815 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
11816 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
11817 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
11818 			goto out;	/* GOOD! */
11819 
11820 		/* Reset the PHY */
11821 		wm_gmii_reset(sc);
11822 		delay(5*1000);
11823 	}
11824 
11825 	/* Disable GigE link negotiation */
11826 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
11827 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
11828 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
11829 
11830 	/*
11831 	 * Call gig speed drop workaround on Gig disable before accessing
11832 	 * any PHY registers.
11833 	 */
11834 	wm_gig_downshift_workaround_ich8lan(sc);
11835 
11836 out:
11837 	return;
11838 #endif
11839 }
11840 
11841 /* WOL from S5 stops working */
11842 static void
11843 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
11844 {
11845 	uint16_t kmrn_reg;
11846 
11847 	/* Only for igp3 */
11848 	if (sc->sc_phytype == WMPHY_IGP_3) {
11849 		kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
11850 		kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
11851 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
11852 		kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
11853 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
11854 	}
11855 }
11856 
11857 /*
11858  * Workaround for pch's PHYs
11859  * XXX should be moved to new PHY driver?
11860  */
11861 static void
11862 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
11863 {
11864 	if (sc->sc_phytype == WMPHY_82577)
11865 		wm_set_mdio_slow_mode_hv(sc);
11866 
11867 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
11868 
11869 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
11870 
11871 	/* 82578 */
11872 	if (sc->sc_phytype == WMPHY_82578) {
11873 		/* PCH rev. < 3 */
11874 		if (sc->sc_rev < 3) {
11875 			/* XXX 6 bit shift? Why? Is it page2? */
11876 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
11877 			    0x66c0);
11878 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
11879 			    0xffff);
11880 		}
11881 
11882 		/* XXX phy rev. < 2 */
11883 	}
11884 
11885 	/* Select page 0 */
11886 
11887 	/* XXX acquire semaphore */
11888 	wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
11889 	/* XXX release semaphore */
11890 
11891 	/*
11892 	 * Configure the K1 Si workaround during phy reset assuming there is
11893 	 * link so that it disables K1 if link is in 1Gbps.
11894 	 */
11895 	wm_k1_gig_workaround_hv(sc, 1);
11896 }
11897 
11898 static void
11899 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
11900 {
11901 
11902 	wm_set_mdio_slow_mode_hv(sc);
11903 }
11904 
11905 static void
11906 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
11907 {
11908 	int k1_enable = sc->sc_nvm_k1_enabled;
11909 
11910 	/* XXX acquire semaphore */
11911 
11912 	if (link) {
11913 		k1_enable = 0;
11914 
11915 		/* Link stall fix for link up */
11916 		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
11917 	} else {
11918 		/* Link stall fix for link down */
11919 		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
11920 	}
11921 
11922 	wm_configure_k1_ich8lan(sc, k1_enable);
11923 
11924 	/* XXX release semaphore */
11925 }
11926 
11927 static void
11928 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
11929 {
11930 	uint32_t reg;
11931 
11932 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
11933 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
11934 	    reg | HV_KMRN_MDIO_SLOW);
11935 }
11936 
11937 static void
11938 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
11939 {
11940 	uint32_t ctrl, ctrl_ext, tmp;
11941 	uint16_t kmrn_reg;
11942 
11943 	kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
11944 
11945 	if (k1_enable)
11946 		kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
11947 	else
11948 		kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
11949 
11950 	wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
11951 
11952 	delay(20);
11953 
11954 	ctrl = CSR_READ(sc, WMREG_CTRL);
11955 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
11956 
11957 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
11958 	tmp |= CTRL_FRCSPD;
11959 
11960 	CSR_WRITE(sc, WMREG_CTRL, tmp);
11961 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
11962 	CSR_WRITE_FLUSH(sc);
11963 	delay(20);
11964 
11965 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
11966 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
11967 	CSR_WRITE_FLUSH(sc);
11968 	delay(20);
11969 }
11970 
11971 /* special case - for 82575 - need to do manual init ... */
11972 static void
11973 wm_reset_init_script_82575(struct wm_softc *sc)
11974 {
11975 	/*
11976 	 * remark: this is untested code - we have no board without EEPROM
11977 	 *  same setup as mentioned int the FreeBSD driver for the i82575
11978 	 */
11979 
11980 	/* SerDes configuration via SERDESCTRL */
11981 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
11982 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
11983 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
11984 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
11985 
11986 	/* CCM configuration via CCMCTL register */
11987 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
11988 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
11989 
11990 	/* PCIe lanes configuration */
11991 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
11992 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
11993 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
11994 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
11995 
11996 	/* PCIe PLL Configuration */
11997 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
11998 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
11999 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
12000 }
12001 
12002 static void
12003 wm_reset_mdicnfg_82580(struct wm_softc *sc)
12004 {
12005 	uint32_t reg;
12006 	uint16_t nvmword;
12007 	int rv;
12008 
12009 	if ((sc->sc_flags & WM_F_SGMII) == 0)
12010 		return;
12011 
12012 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
12013 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
12014 	if (rv != 0) {
12015 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
12016 		    __func__);
12017 		return;
12018 	}
12019 
12020 	reg = CSR_READ(sc, WMREG_MDICNFG);
12021 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
12022 		reg |= MDICNFG_DEST;
12023 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
12024 		reg |= MDICNFG_COM_MDIO;
12025 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
12026 }
12027 
12028 /*
12029  * I210 Errata 25 and I211 Errata 10
12030  * Slow System Clock.
12031  */
12032 static void
12033 wm_pll_workaround_i210(struct wm_softc *sc)
12034 {
12035 	uint32_t mdicnfg, wuc;
12036 	uint32_t reg;
12037 	pcireg_t pcireg;
12038 	uint32_t pmreg;
12039 	uint16_t nvmword, tmp_nvmword;
12040 	int phyval;
12041 	bool wa_done = false;
12042 	int i;
12043 
12044 	/* Save WUC and MDICNFG registers */
12045 	wuc = CSR_READ(sc, WMREG_WUC);
12046 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
12047 
12048 	reg = mdicnfg & ~MDICNFG_DEST;
12049 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
12050 
12051 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
12052 		nvmword = INVM_DEFAULT_AL;
12053 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
12054 
12055 	/* Get Power Management cap offset */
12056 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
12057 		&pmreg, NULL) == 0)
12058 		return;
12059 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
12060 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
12061 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
12062 
12063 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
12064 			break; /* OK */
12065 		}
12066 
12067 		wa_done = true;
12068 		/* Directly reset the internal PHY */
12069 		reg = CSR_READ(sc, WMREG_CTRL);
12070 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
12071 
12072 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
12073 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
12074 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
12075 
12076 		CSR_WRITE(sc, WMREG_WUC, 0);
12077 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
12078 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
12079 
12080 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
12081 		    pmreg + PCI_PMCSR);
12082 		pcireg |= PCI_PMCSR_STATE_D3;
12083 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
12084 		    pmreg + PCI_PMCSR, pcireg);
12085 		delay(1000);
12086 		pcireg &= ~PCI_PMCSR_STATE_D3;
12087 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
12088 		    pmreg + PCI_PMCSR, pcireg);
12089 
12090 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
12091 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
12092 
12093 		/* Restore WUC register */
12094 		CSR_WRITE(sc, WMREG_WUC, wuc);
12095 	}
12096 
12097 	/* Restore MDICNFG setting */
12098 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
12099 	if (wa_done)
12100 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
12101 }
12102