xref: /netbsd-src/sys/dev/pci/if_wm.c (revision 946379e7b37692fc43f68eb0d1c10daa0a7f3b6c)
1 /*	$NetBSD: if_wm.c,v 1.388 2016/01/07 10:08:18 msaitoh Exp $	*/
2 
3 /*
4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed for the NetBSD Project by
20  *	Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 /*******************************************************************************
39 
40   Copyright (c) 2001-2005, Intel Corporation
41   All rights reserved.
42 
43   Redistribution and use in source and binary forms, with or without
44   modification, are permitted provided that the following conditions are met:
45 
46    1. Redistributions of source code must retain the above copyright notice,
47       this list of conditions and the following disclaimer.
48 
49    2. Redistributions in binary form must reproduce the above copyright
50       notice, this list of conditions and the following disclaimer in the
51       documentation and/or other materials provided with the distribution.
52 
53    3. Neither the name of the Intel Corporation nor the names of its
54       contributors may be used to endorse or promote products derived from
55       this software without specific prior written permission.
56 
57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67   POSSIBILITY OF SUCH DAMAGE.
68 
69 *******************************************************************************/
70 /*
71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72  *
73  * TODO (in order of importance):
74  *
75  *	- Check XXX'ed comments
76  *	- Disable D0 LPLU on 8257[12356], 82580 and I350.
77  *	- TX Multi queue
78  *	- EEE (Energy Efficiency Ethernet)
79  *	- Virtual Function
80  *	- Set LED correctly (based on contents in EEPROM)
81  *	- Rework how parameters are loaded from the EEPROM.
82  *	- Image Unique ID
83  */
84 
85 #include <sys/cdefs.h>
86 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.388 2016/01/07 10:08:18 msaitoh Exp $");
87 
88 #ifdef _KERNEL_OPT
89 #include "opt_net_mpsafe.h"
90 #endif
91 
92 #include <sys/param.h>
93 #include <sys/systm.h>
94 #include <sys/callout.h>
95 #include <sys/mbuf.h>
96 #include <sys/malloc.h>
97 #include <sys/kmem.h>
98 #include <sys/kernel.h>
99 #include <sys/socket.h>
100 #include <sys/ioctl.h>
101 #include <sys/errno.h>
102 #include <sys/device.h>
103 #include <sys/queue.h>
104 #include <sys/syslog.h>
105 #include <sys/interrupt.h>
106 
107 #include <sys/rndsource.h>
108 
109 #include <net/if.h>
110 #include <net/if_dl.h>
111 #include <net/if_media.h>
112 #include <net/if_ether.h>
113 
114 #include <net/bpf.h>
115 
116 #include <netinet/in.h>			/* XXX for struct ip */
117 #include <netinet/in_systm.h>		/* XXX for struct ip */
118 #include <netinet/ip.h>			/* XXX for struct ip */
119 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
120 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
121 
122 #include <sys/bus.h>
123 #include <sys/intr.h>
124 #include <machine/endian.h>
125 
126 #include <dev/mii/mii.h>
127 #include <dev/mii/miivar.h>
128 #include <dev/mii/miidevs.h>
129 #include <dev/mii/mii_bitbang.h>
130 #include <dev/mii/ikphyreg.h>
131 #include <dev/mii/igphyreg.h>
132 #include <dev/mii/igphyvar.h>
133 #include <dev/mii/inbmphyreg.h>
134 
135 #include <dev/pci/pcireg.h>
136 #include <dev/pci/pcivar.h>
137 #include <dev/pci/pcidevs.h>
138 
139 #include <dev/pci/if_wmreg.h>
140 #include <dev/pci/if_wmvar.h>
141 
142 #ifdef WM_DEBUG
143 #define	WM_DEBUG_LINK		0x01
144 #define	WM_DEBUG_TX		0x02
145 #define	WM_DEBUG_RX		0x04
146 #define	WM_DEBUG_GMII		0x08
147 #define	WM_DEBUG_MANAGE		0x10
148 #define	WM_DEBUG_NVM		0x20
149 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
150     | WM_DEBUG_MANAGE | WM_DEBUG_NVM;
151 
152 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
153 #else
154 #define	DPRINTF(x, y)	/* nothing */
155 #endif /* WM_DEBUG */
156 
157 #ifdef NET_MPSAFE
158 #define WM_MPSAFE	1
159 #endif
160 
161 /*
162  * This device driver's max interrupt numbers.
163  */
164 #define WM_MAX_NTXINTR		16
165 #define WM_MAX_NRXINTR		16
166 #define WM_MAX_NINTR		(WM_MAX_NTXINTR + WM_MAX_NRXINTR + 1)
167 
168 /*
169  * Transmit descriptor list size.  Due to errata, we can only have
170  * 256 hardware descriptors in the ring on < 82544, but we use 4096
171  * on >= 82544.  We tell the upper layers that they can queue a lot
172  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
173  * of them at a time.
174  *
175  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
176  * chains containing many small mbufs have been observed in zero-copy
177  * situations with jumbo frames.
178  */
179 #define	WM_NTXSEGS		256
180 #define	WM_IFQUEUELEN		256
181 #define	WM_TXQUEUELEN_MAX	64
182 #define	WM_TXQUEUELEN_MAX_82547	16
183 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
184 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
185 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
186 #define	WM_NTXDESC_82542	256
187 #define	WM_NTXDESC_82544	4096
188 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
189 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
190 #define	WM_TXDESCSIZE(txq)	(WM_NTXDESC(txq) * sizeof(wiseman_txdesc_t))
191 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
192 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
193 
194 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
195 
196 /*
197  * Receive descriptor list size.  We have one Rx buffer for normal
198  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
199  * packet.  We allocate 256 receive descriptors, each with a 2k
200  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
201  */
202 #define	WM_NRXDESC		256
203 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
204 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
205 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
206 
207 typedef union txdescs {
208 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
209 	nq_txdesc_t      sctxu_nq_txdescs[WM_NTXDESC_82544];
210 } txdescs_t;
211 
212 #define	WM_CDTXOFF(x)	(sizeof(wiseman_txdesc_t) * x)
213 #define	WM_CDRXOFF(x)	(sizeof(wiseman_rxdesc_t) * x)
214 
215 /*
216  * Software state for transmit jobs.
217  */
218 struct wm_txsoft {
219 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
220 	bus_dmamap_t txs_dmamap;	/* our DMA map */
221 	int txs_firstdesc;		/* first descriptor in packet */
222 	int txs_lastdesc;		/* last descriptor in packet */
223 	int txs_ndesc;			/* # of descriptors used */
224 };
225 
226 /*
227  * Software state for receive buffers.  Each descriptor gets a
228  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
229  * more than one buffer, we chain them together.
230  */
231 struct wm_rxsoft {
232 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
233 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
234 };
235 
236 #define WM_LINKUP_TIMEOUT	50
237 
238 static uint16_t swfwphysem[] = {
239 	SWFW_PHY0_SM,
240 	SWFW_PHY1_SM,
241 	SWFW_PHY2_SM,
242 	SWFW_PHY3_SM
243 };
244 
245 static const uint32_t wm_82580_rxpbs_table[] = {
246 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
247 };
248 
249 struct wm_softc;
250 
251 struct wm_txqueue {
252 	kmutex_t *txq_lock;		/* lock for tx operations */
253 
254 	struct wm_softc *txq_sc;
255 
256 	int txq_id;			/* index of transmit queues */
257 	int txq_intr_idx;		/* index of MSI-X tables */
258 
259 	/* Software state for the transmit descriptors. */
260 	int txq_num;			/* must be a power of two */
261 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
262 
263 	/* TX control data structures. */
264 	int txq_ndesc;			/* must be a power of two */
265 	txdescs_t *txq_descs_u;
266         bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
267 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
268 	int txq_desc_rseg;		/* real number of control segment */
269 	size_t txq_desc_size;		/* control data size */
270 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
271 #define	txq_descs	txq_descs_u->sctxu_txdescs
272 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
273 
274 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
275 
276 	int txq_free;			/* number of free Tx descriptors */
277 	int txq_next;			/* next ready Tx descriptor */
278 
279 	int txq_sfree;			/* number of free Tx jobs */
280 	int txq_snext;			/* next free Tx job */
281 	int txq_sdirty;			/* dirty Tx jobs */
282 
283 	/* These 4 variables are used only on the 82547. */
284 	int txq_fifo_size;		/* Tx FIFO size */
285 	int txq_fifo_head;		/* current head of FIFO */
286 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
287 	int txq_fifo_stall;		/* Tx FIFO is stalled */
288 
289 	/* XXX which event counter is required? */
290 };
291 
292 struct wm_rxqueue {
293 	kmutex_t *rxq_lock;		/* lock for rx operations */
294 
295 	struct wm_softc *rxq_sc;
296 
297 	int rxq_id;			/* index of receive queues */
298 	int rxq_intr_idx;		/* index of MSI-X tables */
299 
300 	/* Software state for the receive descriptors. */
301 	wiseman_rxdesc_t *rxq_descs;
302 
303 	/* RX control data structures. */
304 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
305 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
306 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
307 	int rxq_desc_rseg;		/* real number of control segment */
308 	size_t rxq_desc_size;		/* control data size */
309 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
310 
311 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
312 
313 	int rxq_ptr;			/* next ready Rx desc/queue ent */
314 	int rxq_discard;
315 	int rxq_len;
316 	struct mbuf *rxq_head;
317 	struct mbuf *rxq_tail;
318 	struct mbuf **rxq_tailp;
319 
320 	/* XXX which event counter is required? */
321 };
322 
323 /*
324  * Software state per device.
325  */
326 struct wm_softc {
327 	device_t sc_dev;		/* generic device information */
328 	bus_space_tag_t sc_st;		/* bus space tag */
329 	bus_space_handle_t sc_sh;	/* bus space handle */
330 	bus_size_t sc_ss;		/* bus space size */
331 	bus_space_tag_t sc_iot;		/* I/O space tag */
332 	bus_space_handle_t sc_ioh;	/* I/O space handle */
333 	bus_size_t sc_ios;		/* I/O space size */
334 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
335 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
336 	bus_size_t sc_flashs;		/* flash registers space size */
337 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
338 
339 	struct ethercom sc_ethercom;	/* ethernet common data */
340 	struct mii_data sc_mii;		/* MII/media information */
341 
342 	pci_chipset_tag_t sc_pc;
343 	pcitag_t sc_pcitag;
344 	int sc_bus_speed;		/* PCI/PCIX bus speed */
345 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
346 
347 	uint16_t sc_pcidevid;		/* PCI device ID */
348 	wm_chip_type sc_type;		/* MAC type */
349 	int sc_rev;			/* MAC revision */
350 	wm_phy_type sc_phytype;		/* PHY type */
351 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
352 #define	WM_MEDIATYPE_UNKNOWN		0x00
353 #define	WM_MEDIATYPE_FIBER		0x01
354 #define	WM_MEDIATYPE_COPPER		0x02
355 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
356 	int sc_funcid;			/* unit number of the chip (0 to 3) */
357 	int sc_flags;			/* flags; see below */
358 	int sc_if_flags;		/* last if_flags */
359 	int sc_flowflags;		/* 802.3x flow control flags */
360 	int sc_align_tweak;
361 
362 	void *sc_ihs[WM_MAX_NINTR];	/*
363 					 * interrupt cookie.
364 					 * legacy and msi use sc_ihs[0].
365 					 */
366 	pci_intr_handle_t *sc_intrs;	/* legacy and msi use sc_intrs[0] */
367 	int sc_nintrs;			/* number of interrupts */
368 
369 	int sc_link_intr_idx;		/* index of MSI-X tables */
370 
371 	callout_t sc_tick_ch;		/* tick callout */
372 	bool sc_stopping;
373 
374 	int sc_nvm_ver_major;
375 	int sc_nvm_ver_minor;
376 	int sc_nvm_ver_build;
377 	int sc_nvm_addrbits;		/* NVM address bits */
378 	unsigned int sc_nvm_wordsize;	/* NVM word size */
379 	int sc_ich8_flash_base;
380 	int sc_ich8_flash_bank_size;
381 	int sc_nvm_k1_enabled;
382 
383 	int sc_ntxqueues;
384 	struct wm_txqueue *sc_txq;
385 
386 	int sc_nrxqueues;
387 	struct wm_rxqueue *sc_rxq;
388 
389 #ifdef WM_EVENT_COUNTERS
390 	/* Event counters. */
391 	struct evcnt sc_ev_txsstall;	/* Tx stalled due to no txs */
392 	struct evcnt sc_ev_txdstall;	/* Tx stalled due to no txd */
393 	struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
394 	struct evcnt sc_ev_txdw;	/* Tx descriptor interrupts */
395 	struct evcnt sc_ev_txqe;	/* Tx queue empty interrupts */
396 	struct evcnt sc_ev_rxintr;	/* Rx interrupts */
397 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
398 
399 	struct evcnt sc_ev_rxipsum;	/* IP checksums checked in-bound */
400 	struct evcnt sc_ev_rxtusum;	/* TCP/UDP cksums checked in-bound */
401 	struct evcnt sc_ev_txipsum;	/* IP checksums comp. out-bound */
402 	struct evcnt sc_ev_txtusum;	/* TCP/UDP cksums comp. out-bound */
403 	struct evcnt sc_ev_txtusum6;	/* TCP/UDP v6 cksums comp. out-bound */
404 	struct evcnt sc_ev_txtso;	/* TCP seg offload out-bound (IPv4) */
405 	struct evcnt sc_ev_txtso6;	/* TCP seg offload out-bound (IPv6) */
406 	struct evcnt sc_ev_txtsopain;	/* painful header manip. for TSO */
407 
408 	struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
409 	struct evcnt sc_ev_txdrop;	/* Tx packets dropped(too many segs) */
410 
411 	struct evcnt sc_ev_tu;		/* Tx underrun */
412 
413 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
414 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
415 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
416 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
417 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
418 #endif /* WM_EVENT_COUNTERS */
419 
420 	/* This variable are used only on the 82547. */
421 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
422 
423 	uint32_t sc_ctrl;		/* prototype CTRL register */
424 #if 0
425 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
426 #endif
427 	uint32_t sc_icr;		/* prototype interrupt bits */
428 	uint32_t sc_itr;		/* prototype intr throttling reg */
429 	uint32_t sc_tctl;		/* prototype TCTL register */
430 	uint32_t sc_rctl;		/* prototype RCTL register */
431 	uint32_t sc_txcw;		/* prototype TXCW register */
432 	uint32_t sc_tipg;		/* prototype TIPG register */
433 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
434 	uint32_t sc_pba;		/* prototype PBA register */
435 
436 	int sc_tbi_linkup;		/* TBI link status */
437 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
438 	int sc_tbi_serdes_ticks;	/* tbi ticks */
439 
440 	int sc_mchash_type;		/* multicast filter offset */
441 
442 	krndsource_t rnd_source;	/* random source */
443 
444 	kmutex_t *sc_core_lock;		/* lock for softc operations */
445 };
446 
447 #define WM_TX_LOCK(_txq)	if ((_txq)->txq_lock) mutex_enter((_txq)->txq_lock)
448 #define WM_TX_UNLOCK(_txq)	if ((_txq)->txq_lock) mutex_exit((_txq)->txq_lock)
449 #define WM_TX_LOCKED(_txq)	(!(_txq)->txq_lock || mutex_owned((_txq)->txq_lock))
450 #define WM_RX_LOCK(_rxq)	if ((_rxq)->rxq_lock) mutex_enter((_rxq)->rxq_lock)
451 #define WM_RX_UNLOCK(_rxq)	if ((_rxq)->rxq_lock) mutex_exit((_rxq)->rxq_lock)
452 #define WM_RX_LOCKED(_rxq)	(!(_rxq)->rxq_lock || mutex_owned((_rxq)->rxq_lock))
453 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
454 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
455 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
456 
457 #ifdef WM_MPSAFE
458 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
459 #else
460 #define CALLOUT_FLAGS	0
461 #endif
462 
463 #define	WM_RXCHAIN_RESET(rxq)						\
464 do {									\
465 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
466 	*(rxq)->rxq_tailp = NULL;					\
467 	(rxq)->rxq_len = 0;						\
468 } while (/*CONSTCOND*/0)
469 
470 #define	WM_RXCHAIN_LINK(rxq, m)						\
471 do {									\
472 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
473 	(rxq)->rxq_tailp = &(m)->m_next;				\
474 } while (/*CONSTCOND*/0)
475 
476 #ifdef WM_EVENT_COUNTERS
477 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
478 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
479 #else
480 #define	WM_EVCNT_INCR(ev)	/* nothing */
481 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
482 #endif
483 
484 #define	CSR_READ(sc, reg)						\
485 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
486 #define	CSR_WRITE(sc, reg, val)						\
487 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
488 #define	CSR_WRITE_FLUSH(sc)						\
489 	(void) CSR_READ((sc), WMREG_STATUS)
490 
491 #define ICH8_FLASH_READ32(sc, reg) \
492 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
493 #define ICH8_FLASH_WRITE32(sc, reg, data) \
494 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
495 
496 #define ICH8_FLASH_READ16(sc, reg) \
497 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
498 #define ICH8_FLASH_WRITE16(sc, reg, data) \
499 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
500 
501 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((x)))
502 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((x)))
503 
504 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
505 #define	WM_CDTXADDR_HI(txq, x)						\
506 	(sizeof(bus_addr_t) == 8 ?					\
507 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
508 
509 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
510 #define	WM_CDRXADDR_HI(rxq, x)						\
511 	(sizeof(bus_addr_t) == 8 ?					\
512 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
513 
514 /*
515  * Register read/write functions.
516  * Other than CSR_{READ|WRITE}().
517  */
518 #if 0
519 static inline uint32_t wm_io_read(struct wm_softc *, int);
520 #endif
521 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
522 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
523 	uint32_t, uint32_t);
524 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
525 
526 /*
527  * Descriptor sync/init functions.
528  */
529 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
530 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
531 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
532 
533 /*
534  * Device driver interface functions and commonly used functions.
535  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
536  */
537 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
538 static int	wm_match(device_t, cfdata_t, void *);
539 static void	wm_attach(device_t, device_t, void *);
540 static int	wm_detach(device_t, int);
541 static bool	wm_suspend(device_t, const pmf_qual_t *);
542 static bool	wm_resume(device_t, const pmf_qual_t *);
543 static void	wm_watchdog(struct ifnet *);
544 static void	wm_tick(void *);
545 static int	wm_ifflags_cb(struct ethercom *);
546 static int	wm_ioctl(struct ifnet *, u_long, void *);
547 /* MAC address related */
548 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
549 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
550 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
551 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
552 static void	wm_set_filter(struct wm_softc *);
553 /* Reset and init related */
554 static void	wm_set_vlan(struct wm_softc *);
555 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
556 static void	wm_get_auto_rd_done(struct wm_softc *);
557 static void	wm_lan_init_done(struct wm_softc *);
558 static void	wm_get_cfg_done(struct wm_softc *);
559 static void	wm_initialize_hardware_bits(struct wm_softc *);
560 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
561 static void	wm_reset(struct wm_softc *);
562 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
563 static void	wm_rxdrain(struct wm_rxqueue *);
564 static void	wm_rss_getkey(uint8_t *);
565 static void	wm_init_rss(struct wm_softc *);
566 static void	wm_adjust_qnum(struct wm_softc *, int);
567 static int	wm_setup_legacy(struct wm_softc *);
568 static int	wm_setup_msix(struct wm_softc *);
569 static int	wm_init(struct ifnet *);
570 static int	wm_init_locked(struct ifnet *);
571 static void	wm_stop(struct ifnet *, int);
572 static void	wm_stop_locked(struct ifnet *, int);
573 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
574 static void	wm_82547_txfifo_stall(void *);
575 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
576 /* DMA related */
577 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
578 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
579 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
580 static void	wm_init_tx_regs(struct wm_softc *, struct wm_txqueue *);
581 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
582 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
583 static void	wm_init_rx_regs(struct wm_softc *, struct wm_rxqueue *);
584 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
585 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
586 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
587 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
588 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
589 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
590 static void	wm_init_tx_queue(struct wm_softc *, struct wm_txqueue *);
591 static int	wm_init_rx_queue(struct wm_softc *, struct wm_rxqueue *);
592 static int	wm_alloc_txrx_queues(struct wm_softc *);
593 static void	wm_free_txrx_queues(struct wm_softc *);
594 static int	wm_init_txrx_queues(struct wm_softc *);
595 /* Start */
596 static int	wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
597     uint32_t *, uint8_t *);
598 static void	wm_start(struct ifnet *);
599 static void	wm_start_locked(struct ifnet *);
600 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txsoft *,
601     uint32_t *, uint32_t *, bool *);
602 static void	wm_nq_start(struct ifnet *);
603 static void	wm_nq_start_locked(struct ifnet *);
604 /* Interrupt */
605 static int	wm_txeof(struct wm_softc *);
606 static void	wm_rxeof(struct wm_rxqueue *);
607 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
608 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
609 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
610 static void	wm_linkintr(struct wm_softc *, uint32_t);
611 static int	wm_intr_legacy(void *);
612 static int	wm_txintr_msix(void *);
613 static int	wm_rxintr_msix(void *);
614 static int	wm_linkintr_msix(void *);
615 
616 /*
617  * Media related.
618  * GMII, SGMII, TBI, SERDES and SFP.
619  */
620 /* Common */
621 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
622 /* GMII related */
623 static void	wm_gmii_reset(struct wm_softc *);
624 static int	wm_get_phy_id_82575(struct wm_softc *);
625 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
626 static int	wm_gmii_mediachange(struct ifnet *);
627 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
628 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
629 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
630 static int	wm_gmii_i82543_readreg(device_t, int, int);
631 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
632 static int	wm_gmii_i82544_readreg(device_t, int, int);
633 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
634 static int	wm_gmii_i80003_readreg(device_t, int, int);
635 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
636 static int	wm_gmii_bm_readreg(device_t, int, int);
637 static void	wm_gmii_bm_writereg(device_t, int, int, int);
638 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
639 static int	wm_gmii_hv_readreg(device_t, int, int);
640 static void	wm_gmii_hv_writereg(device_t, int, int, int);
641 static int	wm_gmii_82580_readreg(device_t, int, int);
642 static void	wm_gmii_82580_writereg(device_t, int, int, int);
643 static int	wm_gmii_gs40g_readreg(device_t, int, int);
644 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
645 static void	wm_gmii_statchg(struct ifnet *);
646 static int	wm_kmrn_readreg(struct wm_softc *, int);
647 static void	wm_kmrn_writereg(struct wm_softc *, int, int);
648 /* SGMII */
649 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
650 static int	wm_sgmii_readreg(device_t, int, int);
651 static void	wm_sgmii_writereg(device_t, int, int, int);
652 /* TBI related */
653 static void	wm_tbi_mediainit(struct wm_softc *);
654 static int	wm_tbi_mediachange(struct ifnet *);
655 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
656 static int	wm_check_for_link(struct wm_softc *);
657 static void	wm_tbi_tick(struct wm_softc *);
658 /* SERDES related */
659 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
660 static int	wm_serdes_mediachange(struct ifnet *);
661 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
662 static void	wm_serdes_tick(struct wm_softc *);
663 /* SFP related */
664 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
665 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
666 
667 /*
668  * NVM related.
669  * Microwire, SPI (w/wo EERD) and Flash.
670  */
671 /* Misc functions */
672 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
673 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
674 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
675 /* Microwire */
676 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
677 /* SPI */
678 static int	wm_nvm_ready_spi(struct wm_softc *);
679 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
680 /* Using with EERD */
681 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
682 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
683 /* Flash */
684 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
685     unsigned int *);
686 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
687 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
688 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
689 	uint16_t *);
690 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
691 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
692 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
693 /* iNVM */
694 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
695 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
696 /* Lock, detecting NVM type, validate checksum and read */
697 static int	wm_nvm_acquire(struct wm_softc *);
698 static void	wm_nvm_release(struct wm_softc *);
699 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
700 static int	wm_nvm_get_flash_presence_i210(struct wm_softc *);
701 static int	wm_nvm_validate_checksum(struct wm_softc *);
702 static void	wm_nvm_version_invm(struct wm_softc *);
703 static void	wm_nvm_version(struct wm_softc *);
704 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
705 
706 /*
707  * Hardware semaphores.
708  * Very complexed...
709  */
710 static int	wm_get_swsm_semaphore(struct wm_softc *);
711 static void	wm_put_swsm_semaphore(struct wm_softc *);
712 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
713 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
714 static int	wm_get_swfwhw_semaphore(struct wm_softc *);
715 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
716 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
717 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
718 
719 /*
720  * Management mode and power management related subroutines.
721  * BMC, AMT, suspend/resume and EEE.
722  */
723 #ifdef WM_WOL
724 static int	wm_check_mng_mode(struct wm_softc *);
725 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
726 static int	wm_check_mng_mode_82574(struct wm_softc *);
727 static int	wm_check_mng_mode_generic(struct wm_softc *);
728 #endif
729 static int	wm_enable_mng_pass_thru(struct wm_softc *);
730 static bool	wm_phy_resetisblocked(struct wm_softc *);
731 static void	wm_get_hw_control(struct wm_softc *);
732 static void	wm_release_hw_control(struct wm_softc *);
733 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, int);
734 static void	wm_smbustopci(struct wm_softc *);
735 static void	wm_init_manageability(struct wm_softc *);
736 static void	wm_release_manageability(struct wm_softc *);
737 static void	wm_get_wakeup(struct wm_softc *);
738 #ifdef WM_WOL
739 static void	wm_enable_phy_wakeup(struct wm_softc *);
740 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
741 static void	wm_enable_wakeup(struct wm_softc *);
742 #endif
743 /* LPLU (Low Power Link Up) */
744 static void	wm_lplu_d0_disable(struct wm_softc *);
745 static void	wm_lplu_d0_disable_pch(struct wm_softc *);
746 /* EEE */
747 static void	wm_set_eee_i350(struct wm_softc *);
748 
749 /*
750  * Workarounds (mainly PHY related).
751  * Basically, PHY's workarounds are in the PHY drivers.
752  */
753 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
754 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
755 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
756 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
757 static void	wm_k1_gig_workaround_hv(struct wm_softc *, int);
758 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
759 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
760 static void	wm_reset_init_script_82575(struct wm_softc *);
761 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
762 static void	wm_pll_workaround_i210(struct wm_softc *);
763 
764 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
765     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
766 
767 /*
768  * Devices supported by this driver.
769  */
770 static const struct wm_product {
771 	pci_vendor_id_t		wmp_vendor;
772 	pci_product_id_t	wmp_product;
773 	const char		*wmp_name;
774 	wm_chip_type		wmp_type;
775 	uint32_t		wmp_flags;
776 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
777 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
778 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
779 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
780 #define WMP_MEDIATYPE(x)	((x) & 0x03)
781 } wm_products[] = {
782 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
783 	  "Intel i82542 1000BASE-X Ethernet",
784 	  WM_T_82542_2_1,	WMP_F_FIBER },
785 
786 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
787 	  "Intel i82543GC 1000BASE-X Ethernet",
788 	  WM_T_82543,		WMP_F_FIBER },
789 
790 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
791 	  "Intel i82543GC 1000BASE-T Ethernet",
792 	  WM_T_82543,		WMP_F_COPPER },
793 
794 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
795 	  "Intel i82544EI 1000BASE-T Ethernet",
796 	  WM_T_82544,		WMP_F_COPPER },
797 
798 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
799 	  "Intel i82544EI 1000BASE-X Ethernet",
800 	  WM_T_82544,		WMP_F_FIBER },
801 
802 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
803 	  "Intel i82544GC 1000BASE-T Ethernet",
804 	  WM_T_82544,		WMP_F_COPPER },
805 
806 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
807 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
808 	  WM_T_82544,		WMP_F_COPPER },
809 
810 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
811 	  "Intel i82540EM 1000BASE-T Ethernet",
812 	  WM_T_82540,		WMP_F_COPPER },
813 
814 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
815 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
816 	  WM_T_82540,		WMP_F_COPPER },
817 
818 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
819 	  "Intel i82540EP 1000BASE-T Ethernet",
820 	  WM_T_82540,		WMP_F_COPPER },
821 
822 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
823 	  "Intel i82540EP 1000BASE-T Ethernet",
824 	  WM_T_82540,		WMP_F_COPPER },
825 
826 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
827 	  "Intel i82540EP 1000BASE-T Ethernet",
828 	  WM_T_82540,		WMP_F_COPPER },
829 
830 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
831 	  "Intel i82545EM 1000BASE-T Ethernet",
832 	  WM_T_82545,		WMP_F_COPPER },
833 
834 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
835 	  "Intel i82545GM 1000BASE-T Ethernet",
836 	  WM_T_82545_3,		WMP_F_COPPER },
837 
838 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
839 	  "Intel i82545GM 1000BASE-X Ethernet",
840 	  WM_T_82545_3,		WMP_F_FIBER },
841 
842 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
843 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
844 	  WM_T_82545_3,		WMP_F_SERDES },
845 
846 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
847 	  "Intel i82546EB 1000BASE-T Ethernet",
848 	  WM_T_82546,		WMP_F_COPPER },
849 
850 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
851 	  "Intel i82546EB 1000BASE-T Ethernet",
852 	  WM_T_82546,		WMP_F_COPPER },
853 
854 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
855 	  "Intel i82545EM 1000BASE-X Ethernet",
856 	  WM_T_82545,		WMP_F_FIBER },
857 
858 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
859 	  "Intel i82546EB 1000BASE-X Ethernet",
860 	  WM_T_82546,		WMP_F_FIBER },
861 
862 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
863 	  "Intel i82546GB 1000BASE-T Ethernet",
864 	  WM_T_82546_3,		WMP_F_COPPER },
865 
866 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
867 	  "Intel i82546GB 1000BASE-X Ethernet",
868 	  WM_T_82546_3,		WMP_F_FIBER },
869 
870 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
871 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
872 	  WM_T_82546_3,		WMP_F_SERDES },
873 
874 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
875 	  "i82546GB quad-port Gigabit Ethernet",
876 	  WM_T_82546_3,		WMP_F_COPPER },
877 
878 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
879 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
880 	  WM_T_82546_3,		WMP_F_COPPER },
881 
882 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
883 	  "Intel PRO/1000MT (82546GB)",
884 	  WM_T_82546_3,		WMP_F_COPPER },
885 
886 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
887 	  "Intel i82541EI 1000BASE-T Ethernet",
888 	  WM_T_82541,		WMP_F_COPPER },
889 
890 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
891 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
892 	  WM_T_82541,		WMP_F_COPPER },
893 
894 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
895 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
896 	  WM_T_82541,		WMP_F_COPPER },
897 
898 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
899 	  "Intel i82541ER 1000BASE-T Ethernet",
900 	  WM_T_82541_2,		WMP_F_COPPER },
901 
902 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
903 	  "Intel i82541GI 1000BASE-T Ethernet",
904 	  WM_T_82541_2,		WMP_F_COPPER },
905 
906 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
907 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
908 	  WM_T_82541_2,		WMP_F_COPPER },
909 
910 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
911 	  "Intel i82541PI 1000BASE-T Ethernet",
912 	  WM_T_82541_2,		WMP_F_COPPER },
913 
914 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
915 	  "Intel i82547EI 1000BASE-T Ethernet",
916 	  WM_T_82547,		WMP_F_COPPER },
917 
918 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
919 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
920 	  WM_T_82547,		WMP_F_COPPER },
921 
922 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
923 	  "Intel i82547GI 1000BASE-T Ethernet",
924 	  WM_T_82547_2,		WMP_F_COPPER },
925 
926 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
927 	  "Intel PRO/1000 PT (82571EB)",
928 	  WM_T_82571,		WMP_F_COPPER },
929 
930 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
931 	  "Intel PRO/1000 PF (82571EB)",
932 	  WM_T_82571,		WMP_F_FIBER },
933 
934 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
935 	  "Intel PRO/1000 PB (82571EB)",
936 	  WM_T_82571,		WMP_F_SERDES },
937 
938 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
939 	  "Intel PRO/1000 QT (82571EB)",
940 	  WM_T_82571,		WMP_F_COPPER },
941 
942 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
943 	  "Intel PRO/1000 PT Quad Port Server Adapter",
944 	  WM_T_82571,		WMP_F_COPPER, },
945 
946 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
947 	  "Intel Gigabit PT Quad Port Server ExpressModule",
948 	  WM_T_82571,		WMP_F_COPPER, },
949 
950 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
951 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
952 	  WM_T_82571,		WMP_F_SERDES, },
953 
954 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
955 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
956 	  WM_T_82571,		WMP_F_SERDES, },
957 
958 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
959 	  "Intel 82571EB Quad 1000baseX Ethernet",
960 	  WM_T_82571,		WMP_F_FIBER, },
961 
962 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
963 	  "Intel i82572EI 1000baseT Ethernet",
964 	  WM_T_82572,		WMP_F_COPPER },
965 
966 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
967 	  "Intel i82572EI 1000baseX Ethernet",
968 	  WM_T_82572,		WMP_F_FIBER },
969 
970 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
971 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
972 	  WM_T_82572,		WMP_F_SERDES },
973 
974 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
975 	  "Intel i82572EI 1000baseT Ethernet",
976 	  WM_T_82572,		WMP_F_COPPER },
977 
978 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
979 	  "Intel i82573E",
980 	  WM_T_82573,		WMP_F_COPPER },
981 
982 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
983 	  "Intel i82573E IAMT",
984 	  WM_T_82573,		WMP_F_COPPER },
985 
986 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
987 	  "Intel i82573L Gigabit Ethernet",
988 	  WM_T_82573,		WMP_F_COPPER },
989 
990 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
991 	  "Intel i82574L",
992 	  WM_T_82574,		WMP_F_COPPER },
993 
994 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
995 	  "Intel i82574L",
996 	  WM_T_82574,		WMP_F_COPPER },
997 
998 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
999 	  "Intel i82583V",
1000 	  WM_T_82583,		WMP_F_COPPER },
1001 
1002 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
1003 	  "i80003 dual 1000baseT Ethernet",
1004 	  WM_T_80003,		WMP_F_COPPER },
1005 
1006 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
1007 	  "i80003 dual 1000baseX Ethernet",
1008 	  WM_T_80003,		WMP_F_COPPER },
1009 
1010 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
1011 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
1012 	  WM_T_80003,		WMP_F_SERDES },
1013 
1014 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
1015 	  "Intel i80003 1000baseT Ethernet",
1016 	  WM_T_80003,		WMP_F_COPPER },
1017 
1018 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
1019 	  "Intel i80003 Gigabit Ethernet (SERDES)",
1020 	  WM_T_80003,		WMP_F_SERDES },
1021 
1022 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
1023 	  "Intel i82801H (M_AMT) LAN Controller",
1024 	  WM_T_ICH8,		WMP_F_COPPER },
1025 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
1026 	  "Intel i82801H (AMT) LAN Controller",
1027 	  WM_T_ICH8,		WMP_F_COPPER },
1028 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
1029 	  "Intel i82801H LAN Controller",
1030 	  WM_T_ICH8,		WMP_F_COPPER },
1031 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
1032 	  "Intel i82801H (IFE) LAN Controller",
1033 	  WM_T_ICH8,		WMP_F_COPPER },
1034 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
1035 	  "Intel i82801H (M) LAN Controller",
1036 	  WM_T_ICH8,		WMP_F_COPPER },
1037 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
1038 	  "Intel i82801H IFE (GT) LAN Controller",
1039 	  WM_T_ICH8,		WMP_F_COPPER },
1040 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
1041 	  "Intel i82801H IFE (G) LAN Controller",
1042 	  WM_T_ICH8,		WMP_F_COPPER },
1043 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
1044 	  "82801I (AMT) LAN Controller",
1045 	  WM_T_ICH9,		WMP_F_COPPER },
1046 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
1047 	  "82801I LAN Controller",
1048 	  WM_T_ICH9,		WMP_F_COPPER },
1049 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
1050 	  "82801I (G) LAN Controller",
1051 	  WM_T_ICH9,		WMP_F_COPPER },
1052 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
1053 	  "82801I (GT) LAN Controller",
1054 	  WM_T_ICH9,		WMP_F_COPPER },
1055 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
1056 	  "82801I (C) LAN Controller",
1057 	  WM_T_ICH9,		WMP_F_COPPER },
1058 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
1059 	  "82801I mobile LAN Controller",
1060 	  WM_T_ICH9,		WMP_F_COPPER },
1061 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IGP_M_V,
1062 	  "82801I mobile (V) LAN Controller",
1063 	  WM_T_ICH9,		WMP_F_COPPER },
1064 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
1065 	  "82801I mobile (AMT) LAN Controller",
1066 	  WM_T_ICH9,		WMP_F_COPPER },
1067 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
1068 	  "82567LM-4 LAN Controller",
1069 	  WM_T_ICH9,		WMP_F_COPPER },
1070 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_82567V_3,
1071 	  "82567V-3 LAN Controller",
1072 	  WM_T_ICH9,		WMP_F_COPPER },
1073 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
1074 	  "82567LM-2 LAN Controller",
1075 	  WM_T_ICH10,		WMP_F_COPPER },
1076 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
1077 	  "82567LF-2 LAN Controller",
1078 	  WM_T_ICH10,		WMP_F_COPPER },
1079 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
1080 	  "82567LM-3 LAN Controller",
1081 	  WM_T_ICH10,		WMP_F_COPPER },
1082 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
1083 	  "82567LF-3 LAN Controller",
1084 	  WM_T_ICH10,		WMP_F_COPPER },
1085 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
1086 	  "82567V-2 LAN Controller",
1087 	  WM_T_ICH10,		WMP_F_COPPER },
1088 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
1089 	  "82567V-3? LAN Controller",
1090 	  WM_T_ICH10,		WMP_F_COPPER },
1091 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
1092 	  "HANKSVILLE LAN Controller",
1093 	  WM_T_ICH10,		WMP_F_COPPER },
1094 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
1095 	  "PCH LAN (82577LM) Controller",
1096 	  WM_T_PCH,		WMP_F_COPPER },
1097 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
1098 	  "PCH LAN (82577LC) Controller",
1099 	  WM_T_PCH,		WMP_F_COPPER },
1100 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
1101 	  "PCH LAN (82578DM) Controller",
1102 	  WM_T_PCH,		WMP_F_COPPER },
1103 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
1104 	  "PCH LAN (82578DC) Controller",
1105 	  WM_T_PCH,		WMP_F_COPPER },
1106 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
1107 	  "PCH2 LAN (82579LM) Controller",
1108 	  WM_T_PCH2,		WMP_F_COPPER },
1109 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
1110 	  "PCH2 LAN (82579V) Controller",
1111 	  WM_T_PCH2,		WMP_F_COPPER },
1112 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
1113 	  "82575EB dual-1000baseT Ethernet",
1114 	  WM_T_82575,		WMP_F_COPPER },
1115 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
1116 	  "82575EB dual-1000baseX Ethernet (SERDES)",
1117 	  WM_T_82575,		WMP_F_SERDES },
1118 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
1119 	  "82575GB quad-1000baseT Ethernet",
1120 	  WM_T_82575,		WMP_F_COPPER },
1121 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
1122 	  "82575GB quad-1000baseT Ethernet (PM)",
1123 	  WM_T_82575,		WMP_F_COPPER },
1124 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
1125 	  "82576 1000BaseT Ethernet",
1126 	  WM_T_82576,		WMP_F_COPPER },
1127 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
1128 	  "82576 1000BaseX Ethernet",
1129 	  WM_T_82576,		WMP_F_FIBER },
1130 
1131 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
1132 	  "82576 gigabit Ethernet (SERDES)",
1133 	  WM_T_82576,		WMP_F_SERDES },
1134 
1135 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
1136 	  "82576 quad-1000BaseT Ethernet",
1137 	  WM_T_82576,		WMP_F_COPPER },
1138 
1139 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
1140 	  "82576 Gigabit ET2 Quad Port Server Adapter",
1141 	  WM_T_82576,		WMP_F_COPPER },
1142 
1143 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
1144 	  "82576 gigabit Ethernet",
1145 	  WM_T_82576,		WMP_F_COPPER },
1146 
1147 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
1148 	  "82576 gigabit Ethernet (SERDES)",
1149 	  WM_T_82576,		WMP_F_SERDES },
1150 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
1151 	  "82576 quad-gigabit Ethernet (SERDES)",
1152 	  WM_T_82576,		WMP_F_SERDES },
1153 
1154 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
1155 	  "82580 1000BaseT Ethernet",
1156 	  WM_T_82580,		WMP_F_COPPER },
1157 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
1158 	  "82580 1000BaseX Ethernet",
1159 	  WM_T_82580,		WMP_F_FIBER },
1160 
1161 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
1162 	  "82580 1000BaseT Ethernet (SERDES)",
1163 	  WM_T_82580,		WMP_F_SERDES },
1164 
1165 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
1166 	  "82580 gigabit Ethernet (SGMII)",
1167 	  WM_T_82580,		WMP_F_COPPER },
1168 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
1169 	  "82580 dual-1000BaseT Ethernet",
1170 	  WM_T_82580,		WMP_F_COPPER },
1171 
1172 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1173 	  "82580 quad-1000BaseX Ethernet",
1174 	  WM_T_82580,		WMP_F_FIBER },
1175 
1176 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
1177 	  "DH89XXCC Gigabit Ethernet (SGMII)",
1178 	  WM_T_82580,		WMP_F_COPPER },
1179 
1180 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
1181 	  "DH89XXCC Gigabit Ethernet (SERDES)",
1182 	  WM_T_82580,		WMP_F_SERDES },
1183 
1184 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
1185 	  "DH89XXCC 1000BASE-KX Ethernet",
1186 	  WM_T_82580,		WMP_F_SERDES },
1187 
1188 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
1189 	  "DH89XXCC Gigabit Ethernet (SFP)",
1190 	  WM_T_82580,		WMP_F_SERDES },
1191 
1192 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
1193 	  "I350 Gigabit Network Connection",
1194 	  WM_T_I350,		WMP_F_COPPER },
1195 
1196 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
1197 	  "I350 Gigabit Fiber Network Connection",
1198 	  WM_T_I350,		WMP_F_FIBER },
1199 
1200 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
1201 	  "I350 Gigabit Backplane Connection",
1202 	  WM_T_I350,		WMP_F_SERDES },
1203 
1204 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
1205 	  "I350 Quad Port Gigabit Ethernet",
1206 	  WM_T_I350,		WMP_F_SERDES },
1207 
1208 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
1209 	  "I350 Gigabit Connection",
1210 	  WM_T_I350,		WMP_F_COPPER },
1211 
1212 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
1213 	  "I354 Gigabit Ethernet (KX)",
1214 	  WM_T_I354,		WMP_F_SERDES },
1215 
1216 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
1217 	  "I354 Gigabit Ethernet (SGMII)",
1218 	  WM_T_I354,		WMP_F_COPPER },
1219 
1220 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
1221 	  "I354 Gigabit Ethernet (2.5G)",
1222 	  WM_T_I354,		WMP_F_COPPER },
1223 
1224 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
1225 	  "I210-T1 Ethernet Server Adapter",
1226 	  WM_T_I210,		WMP_F_COPPER },
1227 
1228 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
1229 	  "I210 Ethernet (Copper OEM)",
1230 	  WM_T_I210,		WMP_F_COPPER },
1231 
1232 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
1233 	  "I210 Ethernet (Copper IT)",
1234 	  WM_T_I210,		WMP_F_COPPER },
1235 
1236 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
1237 	  "I210 Ethernet (FLASH less)",
1238 	  WM_T_I210,		WMP_F_COPPER },
1239 
1240 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
1241 	  "I210 Gigabit Ethernet (Fiber)",
1242 	  WM_T_I210,		WMP_F_FIBER },
1243 
1244 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
1245 	  "I210 Gigabit Ethernet (SERDES)",
1246 	  WM_T_I210,		WMP_F_SERDES },
1247 
1248 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
1249 	  "I210 Gigabit Ethernet (FLASH less)",
1250 	  WM_T_I210,		WMP_F_SERDES },
1251 
1252 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
1253 	  "I210 Gigabit Ethernet (SGMII)",
1254 	  WM_T_I210,		WMP_F_COPPER },
1255 
1256 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
1257 	  "I211 Ethernet (COPPER)",
1258 	  WM_T_I211,		WMP_F_COPPER },
1259 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
1260 	  "I217 V Ethernet Connection",
1261 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1262 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
1263 	  "I217 LM Ethernet Connection",
1264 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1265 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
1266 	  "I218 V Ethernet Connection",
1267 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1268 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
1269 	  "I218 V Ethernet Connection",
1270 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1271 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
1272 	  "I218 V Ethernet Connection",
1273 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1274 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
1275 	  "I218 LM Ethernet Connection",
1276 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1277 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
1278 	  "I218 LM Ethernet Connection",
1279 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1280 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
1281 	  "I218 LM Ethernet Connection",
1282 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1283 	{ 0,			0,
1284 	  NULL,
1285 	  0,			0 },
1286 };
1287 
1288 #ifdef WM_EVENT_COUNTERS
1289 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
1290 #endif /* WM_EVENT_COUNTERS */
1291 
1292 
1293 /*
1294  * Register read/write functions.
1295  * Other than CSR_{READ|WRITE}().
1296  */
1297 
1298 #if 0 /* Not currently used */
1299 static inline uint32_t
1300 wm_io_read(struct wm_softc *sc, int reg)
1301 {
1302 
1303 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1304 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1305 }
1306 #endif
1307 
1308 static inline void
1309 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1310 {
1311 
1312 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1313 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1314 }
1315 
1316 static inline void
1317 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1318     uint32_t data)
1319 {
1320 	uint32_t regval;
1321 	int i;
1322 
1323 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1324 
1325 	CSR_WRITE(sc, reg, regval);
1326 
1327 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1328 		delay(5);
1329 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1330 			break;
1331 	}
1332 	if (i == SCTL_CTL_POLL_TIMEOUT) {
1333 		aprint_error("%s: WARNING:"
1334 		    " i82575 reg 0x%08x setup did not indicate ready\n",
1335 		    device_xname(sc->sc_dev), reg);
1336 	}
1337 }
1338 
1339 static inline void
1340 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1341 {
1342 	wa->wa_low = htole32(v & 0xffffffffU);
1343 	if (sizeof(bus_addr_t) == 8)
1344 		wa->wa_high = htole32((uint64_t) v >> 32);
1345 	else
1346 		wa->wa_high = 0;
1347 }
1348 
1349 /*
1350  * Descriptor sync/init functions.
1351  */
1352 static inline void
1353 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
1354 {
1355 	struct wm_softc *sc = txq->txq_sc;
1356 
1357 	/* If it will wrap around, sync to the end of the ring. */
1358 	if ((start + num) > WM_NTXDESC(txq)) {
1359 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1360 		    WM_CDTXOFF(start), sizeof(wiseman_txdesc_t) *
1361 		    (WM_NTXDESC(txq) - start), ops);
1362 		num -= (WM_NTXDESC(txq) - start);
1363 		start = 0;
1364 	}
1365 
1366 	/* Now sync whatever is left. */
1367 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1368 	    WM_CDTXOFF(start), sizeof(wiseman_txdesc_t) * num, ops);
1369 }
1370 
1371 static inline void
1372 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
1373 {
1374 	struct wm_softc *sc = rxq->rxq_sc;
1375 
1376 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
1377 	    WM_CDRXOFF(start), sizeof(wiseman_rxdesc_t), ops);
1378 }
1379 
1380 static inline void
1381 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
1382 {
1383 	struct wm_softc *sc = rxq->rxq_sc;
1384 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
1385 	wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
1386 	struct mbuf *m = rxs->rxs_mbuf;
1387 
1388 	/*
1389 	 * Note: We scoot the packet forward 2 bytes in the buffer
1390 	 * so that the payload after the Ethernet header is aligned
1391 	 * to a 4-byte boundary.
1392 
1393 	 * XXX BRAINDAMAGE ALERT!
1394 	 * The stupid chip uses the same size for every buffer, which
1395 	 * is set in the Receive Control register.  We are using the 2K
1396 	 * size option, but what we REALLY want is (2K - 2)!  For this
1397 	 * reason, we can't "scoot" packets longer than the standard
1398 	 * Ethernet MTU.  On strict-alignment platforms, if the total
1399 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
1400 	 * the upper layer copy the headers.
1401 	 */
1402 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
1403 
1404 	wm_set_dma_addr(&rxd->wrx_addr,
1405 	    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1406 	rxd->wrx_len = 0;
1407 	rxd->wrx_cksum = 0;
1408 	rxd->wrx_status = 0;
1409 	rxd->wrx_errors = 0;
1410 	rxd->wrx_special = 0;
1411 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1412 
1413 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
1414 }
1415 
1416 /*
1417  * Device driver interface functions and commonly used functions.
1418  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
1419  */
1420 
1421 /* Lookup supported device table */
1422 static const struct wm_product *
1423 wm_lookup(const struct pci_attach_args *pa)
1424 {
1425 	const struct wm_product *wmp;
1426 
1427 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1428 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1429 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1430 			return wmp;
1431 	}
1432 	return NULL;
1433 }
1434 
1435 /* The match function (ca_match) */
1436 static int
1437 wm_match(device_t parent, cfdata_t cf, void *aux)
1438 {
1439 	struct pci_attach_args *pa = aux;
1440 
1441 	if (wm_lookup(pa) != NULL)
1442 		return 1;
1443 
1444 	return 0;
1445 }
1446 
1447 /* The attach function (ca_attach) */
1448 static void
1449 wm_attach(device_t parent, device_t self, void *aux)
1450 {
1451 	struct wm_softc *sc = device_private(self);
1452 	struct pci_attach_args *pa = aux;
1453 	prop_dictionary_t dict;
1454 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1455 	pci_chipset_tag_t pc = pa->pa_pc;
1456 	int counts[PCI_INTR_TYPE_SIZE];
1457 	pci_intr_type_t max_type;
1458 	const char *eetype, *xname;
1459 	bus_space_tag_t memt;
1460 	bus_space_handle_t memh;
1461 	bus_size_t memsize;
1462 	int memh_valid;
1463 	int i, error;
1464 	const struct wm_product *wmp;
1465 	prop_data_t ea;
1466 	prop_number_t pn;
1467 	uint8_t enaddr[ETHER_ADDR_LEN];
1468 	uint16_t cfg1, cfg2, swdpin, nvmword;
1469 	pcireg_t preg, memtype;
1470 	uint16_t eeprom_data, apme_mask;
1471 	bool force_clear_smbi;
1472 	uint32_t link_mode;
1473 	uint32_t reg;
1474 
1475 	sc->sc_dev = self;
1476 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
1477 	sc->sc_stopping = false;
1478 
1479 	wmp = wm_lookup(pa);
1480 #ifdef DIAGNOSTIC
1481 	if (wmp == NULL) {
1482 		printf("\n");
1483 		panic("wm_attach: impossible");
1484 	}
1485 #endif
1486 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
1487 
1488 	sc->sc_pc = pa->pa_pc;
1489 	sc->sc_pcitag = pa->pa_tag;
1490 
1491 	if (pci_dma64_available(pa))
1492 		sc->sc_dmat = pa->pa_dmat64;
1493 	else
1494 		sc->sc_dmat = pa->pa_dmat;
1495 
1496 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
1497 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
1498 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
1499 
1500 	sc->sc_type = wmp->wmp_type;
1501 	if (sc->sc_type < WM_T_82543) {
1502 		if (sc->sc_rev < 2) {
1503 			aprint_error_dev(sc->sc_dev,
1504 			    "i82542 must be at least rev. 2\n");
1505 			return;
1506 		}
1507 		if (sc->sc_rev < 3)
1508 			sc->sc_type = WM_T_82542_2_0;
1509 	}
1510 
1511 	/*
1512 	 * Disable MSI for Errata:
1513 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
1514 	 *
1515 	 *  82544: Errata 25
1516 	 *  82540: Errata  6 (easy to reproduce device timeout)
1517 	 *  82545: Errata  4 (easy to reproduce device timeout)
1518 	 *  82546: Errata 26 (easy to reproduce device timeout)
1519 	 *  82541: Errata  7 (easy to reproduce device timeout)
1520 	 *
1521 	 * "Byte Enables 2 and 3 are not set on MSI writes"
1522 	 *
1523 	 *  82571 & 82572: Errata 63
1524 	 */
1525 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
1526 	    || (sc->sc_type == WM_T_82572))
1527 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
1528 
1529 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1530 	    || (sc->sc_type == WM_T_82580)
1531 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
1532 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
1533 		sc->sc_flags |= WM_F_NEWQUEUE;
1534 
1535 	/* Set device properties (mactype) */
1536 	dict = device_properties(sc->sc_dev);
1537 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1538 
1539 	/*
1540 	 * Map the device.  All devices support memory-mapped acccess,
1541 	 * and it is really required for normal operation.
1542 	 */
1543 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1544 	switch (memtype) {
1545 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1546 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1547 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1548 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1549 		break;
1550 	default:
1551 		memh_valid = 0;
1552 		break;
1553 	}
1554 
1555 	if (memh_valid) {
1556 		sc->sc_st = memt;
1557 		sc->sc_sh = memh;
1558 		sc->sc_ss = memsize;
1559 	} else {
1560 		aprint_error_dev(sc->sc_dev,
1561 		    "unable to map device registers\n");
1562 		return;
1563 	}
1564 
1565 	/*
1566 	 * In addition, i82544 and later support I/O mapped indirect
1567 	 * register access.  It is not desirable (nor supported in
1568 	 * this driver) to use it for normal operation, though it is
1569 	 * required to work around bugs in some chip versions.
1570 	 */
1571 	if (sc->sc_type >= WM_T_82544) {
1572 		/* First we have to find the I/O BAR. */
1573 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1574 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
1575 			if (memtype == PCI_MAPREG_TYPE_IO)
1576 				break;
1577 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
1578 			    PCI_MAPREG_MEM_TYPE_64BIT)
1579 				i += 4;	/* skip high bits, too */
1580 		}
1581 		if (i < PCI_MAPREG_END) {
1582 			/*
1583 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
1584 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
1585 			 * It's no problem because newer chips has no this
1586 			 * bug.
1587 			 *
1588 			 * The i8254x doesn't apparently respond when the
1589 			 * I/O BAR is 0, which looks somewhat like it's not
1590 			 * been configured.
1591 			 */
1592 			preg = pci_conf_read(pc, pa->pa_tag, i);
1593 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1594 				aprint_error_dev(sc->sc_dev,
1595 				    "WARNING: I/O BAR at zero.\n");
1596 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1597 					0, &sc->sc_iot, &sc->sc_ioh,
1598 					NULL, &sc->sc_ios) == 0) {
1599 				sc->sc_flags |= WM_F_IOH_VALID;
1600 			} else {
1601 				aprint_error_dev(sc->sc_dev,
1602 				    "WARNING: unable to map I/O space\n");
1603 			}
1604 		}
1605 
1606 	}
1607 
1608 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
1609 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1610 	preg |= PCI_COMMAND_MASTER_ENABLE;
1611 	if (sc->sc_type < WM_T_82542_2_1)
1612 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1613 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1614 
1615 	/* power up chip */
1616 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1617 	    NULL)) && error != EOPNOTSUPP) {
1618 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1619 		return;
1620 	}
1621 
1622 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
1623 
1624 	/* Allocation settings */
1625 	max_type = PCI_INTR_TYPE_MSIX;
1626 	counts[PCI_INTR_TYPE_MSIX] = sc->sc_ntxqueues + sc->sc_nrxqueues + 1;
1627 	counts[PCI_INTR_TYPE_MSI] = 1;
1628 	counts[PCI_INTR_TYPE_INTX] = 1;
1629 
1630 alloc_retry:
1631 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
1632 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
1633 		return;
1634 	}
1635 
1636 	if (pci_intr_type(sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
1637 		error = wm_setup_msix(sc);
1638 		if (error) {
1639 			pci_intr_release(pc, sc->sc_intrs,
1640 			    counts[PCI_INTR_TYPE_MSIX]);
1641 
1642 			/* Setup for MSI: Disable MSI-X */
1643 			max_type = PCI_INTR_TYPE_MSI;
1644 			counts[PCI_INTR_TYPE_MSI] = 1;
1645 			counts[PCI_INTR_TYPE_INTX] = 1;
1646 			goto alloc_retry;
1647 		}
1648 	} else 	if (pci_intr_type(sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
1649 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
1650 		error = wm_setup_legacy(sc);
1651 		if (error) {
1652 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
1653 			    counts[PCI_INTR_TYPE_MSI]);
1654 
1655 			/* The next try is for INTx: Disable MSI */
1656 			max_type = PCI_INTR_TYPE_INTX;
1657 			counts[PCI_INTR_TYPE_INTX] = 1;
1658 			goto alloc_retry;
1659 		}
1660 	} else {
1661 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
1662 		error = wm_setup_legacy(sc);
1663 		if (error) {
1664 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
1665 			    counts[PCI_INTR_TYPE_INTX]);
1666 			return;
1667 		}
1668 	}
1669 
1670 	/*
1671 	 * Check the function ID (unit number of the chip).
1672 	 */
1673 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
1674 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
1675 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1676 	    || (sc->sc_type == WM_T_82580)
1677 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
1678 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
1679 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
1680 	else
1681 		sc->sc_funcid = 0;
1682 
1683 	/*
1684 	 * Determine a few things about the bus we're connected to.
1685 	 */
1686 	if (sc->sc_type < WM_T_82543) {
1687 		/* We don't really know the bus characteristics here. */
1688 		sc->sc_bus_speed = 33;
1689 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1690 		/*
1691 		 * CSA (Communication Streaming Architecture) is about as fast
1692 		 * a 32-bit 66MHz PCI Bus.
1693 		 */
1694 		sc->sc_flags |= WM_F_CSA;
1695 		sc->sc_bus_speed = 66;
1696 		aprint_verbose_dev(sc->sc_dev,
1697 		    "Communication Streaming Architecture\n");
1698 		if (sc->sc_type == WM_T_82547) {
1699 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
1700 			callout_setfunc(&sc->sc_txfifo_ch,
1701 					wm_82547_txfifo_stall, sc);
1702 			aprint_verbose_dev(sc->sc_dev,
1703 			    "using 82547 Tx FIFO stall work-around\n");
1704 		}
1705 	} else if (sc->sc_type >= WM_T_82571) {
1706 		sc->sc_flags |= WM_F_PCIE;
1707 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1708 		    && (sc->sc_type != WM_T_ICH10)
1709 		    && (sc->sc_type != WM_T_PCH)
1710 		    && (sc->sc_type != WM_T_PCH2)
1711 		    && (sc->sc_type != WM_T_PCH_LPT)) {
1712 			/* ICH* and PCH* have no PCIe capability registers */
1713 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1714 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
1715 				NULL) == 0)
1716 				aprint_error_dev(sc->sc_dev,
1717 				    "unable to find PCIe capability\n");
1718 		}
1719 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1720 	} else {
1721 		reg = CSR_READ(sc, WMREG_STATUS);
1722 		if (reg & STATUS_BUS64)
1723 			sc->sc_flags |= WM_F_BUS64;
1724 		if ((reg & STATUS_PCIX_MODE) != 0) {
1725 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1726 
1727 			sc->sc_flags |= WM_F_PCIX;
1728 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1729 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
1730 				aprint_error_dev(sc->sc_dev,
1731 				    "unable to find PCIX capability\n");
1732 			else if (sc->sc_type != WM_T_82545_3 &&
1733 				 sc->sc_type != WM_T_82546_3) {
1734 				/*
1735 				 * Work around a problem caused by the BIOS
1736 				 * setting the max memory read byte count
1737 				 * incorrectly.
1738 				 */
1739 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1740 				    sc->sc_pcixe_capoff + PCIX_CMD);
1741 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1742 				    sc->sc_pcixe_capoff + PCIX_STATUS);
1743 
1744 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
1745 				    PCIX_CMD_BYTECNT_SHIFT;
1746 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
1747 				    PCIX_STATUS_MAXB_SHIFT;
1748 				if (bytecnt > maxb) {
1749 					aprint_verbose_dev(sc->sc_dev,
1750 					    "resetting PCI-X MMRBC: %d -> %d\n",
1751 					    512 << bytecnt, 512 << maxb);
1752 					pcix_cmd = (pcix_cmd &
1753 					    ~PCIX_CMD_BYTECNT_MASK) |
1754 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
1755 					pci_conf_write(pa->pa_pc, pa->pa_tag,
1756 					    sc->sc_pcixe_capoff + PCIX_CMD,
1757 					    pcix_cmd);
1758 				}
1759 			}
1760 		}
1761 		/*
1762 		 * The quad port adapter is special; it has a PCIX-PCIX
1763 		 * bridge on the board, and can run the secondary bus at
1764 		 * a higher speed.
1765 		 */
1766 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1767 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1768 								      : 66;
1769 		} else if (sc->sc_flags & WM_F_PCIX) {
1770 			switch (reg & STATUS_PCIXSPD_MASK) {
1771 			case STATUS_PCIXSPD_50_66:
1772 				sc->sc_bus_speed = 66;
1773 				break;
1774 			case STATUS_PCIXSPD_66_100:
1775 				sc->sc_bus_speed = 100;
1776 				break;
1777 			case STATUS_PCIXSPD_100_133:
1778 				sc->sc_bus_speed = 133;
1779 				break;
1780 			default:
1781 				aprint_error_dev(sc->sc_dev,
1782 				    "unknown PCIXSPD %d; assuming 66MHz\n",
1783 				    reg & STATUS_PCIXSPD_MASK);
1784 				sc->sc_bus_speed = 66;
1785 				break;
1786 			}
1787 		} else
1788 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1789 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1790 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1791 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1792 	}
1793 
1794 	/* clear interesting stat counters */
1795 	CSR_READ(sc, WMREG_COLC);
1796 	CSR_READ(sc, WMREG_RXERRC);
1797 
1798 	/* get PHY control from SMBus to PCIe */
1799 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
1800 	    || (sc->sc_type == WM_T_PCH_LPT))
1801 		wm_smbustopci(sc);
1802 
1803 	/* Reset the chip to a known state. */
1804 	wm_reset(sc);
1805 
1806 	/* Get some information about the EEPROM. */
1807 	switch (sc->sc_type) {
1808 	case WM_T_82542_2_0:
1809 	case WM_T_82542_2_1:
1810 	case WM_T_82543:
1811 	case WM_T_82544:
1812 		/* Microwire */
1813 		sc->sc_nvm_wordsize = 64;
1814 		sc->sc_nvm_addrbits = 6;
1815 		break;
1816 	case WM_T_82540:
1817 	case WM_T_82545:
1818 	case WM_T_82545_3:
1819 	case WM_T_82546:
1820 	case WM_T_82546_3:
1821 		/* Microwire */
1822 		reg = CSR_READ(sc, WMREG_EECD);
1823 		if (reg & EECD_EE_SIZE) {
1824 			sc->sc_nvm_wordsize = 256;
1825 			sc->sc_nvm_addrbits = 8;
1826 		} else {
1827 			sc->sc_nvm_wordsize = 64;
1828 			sc->sc_nvm_addrbits = 6;
1829 		}
1830 		sc->sc_flags |= WM_F_LOCK_EECD;
1831 		break;
1832 	case WM_T_82541:
1833 	case WM_T_82541_2:
1834 	case WM_T_82547:
1835 	case WM_T_82547_2:
1836 		sc->sc_flags |= WM_F_LOCK_EECD;
1837 		reg = CSR_READ(sc, WMREG_EECD);
1838 		if (reg & EECD_EE_TYPE) {
1839 			/* SPI */
1840 			sc->sc_flags |= WM_F_EEPROM_SPI;
1841 			wm_nvm_set_addrbits_size_eecd(sc);
1842 		} else {
1843 			/* Microwire */
1844 			if ((reg & EECD_EE_ABITS) != 0) {
1845 				sc->sc_nvm_wordsize = 256;
1846 				sc->sc_nvm_addrbits = 8;
1847 			} else {
1848 				sc->sc_nvm_wordsize = 64;
1849 				sc->sc_nvm_addrbits = 6;
1850 			}
1851 		}
1852 		break;
1853 	case WM_T_82571:
1854 	case WM_T_82572:
1855 		/* SPI */
1856 		sc->sc_flags |= WM_F_EEPROM_SPI;
1857 		wm_nvm_set_addrbits_size_eecd(sc);
1858 		sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
1859 		break;
1860 	case WM_T_82573:
1861 		sc->sc_flags |= WM_F_LOCK_SWSM;
1862 		/* FALLTHROUGH */
1863 	case WM_T_82574:
1864 	case WM_T_82583:
1865 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
1866 			sc->sc_flags |= WM_F_EEPROM_FLASH;
1867 			sc->sc_nvm_wordsize = 2048;
1868 		} else {
1869 			/* SPI */
1870 			sc->sc_flags |= WM_F_EEPROM_SPI;
1871 			wm_nvm_set_addrbits_size_eecd(sc);
1872 		}
1873 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1874 		break;
1875 	case WM_T_82575:
1876 	case WM_T_82576:
1877 	case WM_T_82580:
1878 	case WM_T_I350:
1879 	case WM_T_I354:
1880 	case WM_T_80003:
1881 		/* SPI */
1882 		sc->sc_flags |= WM_F_EEPROM_SPI;
1883 		wm_nvm_set_addrbits_size_eecd(sc);
1884 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
1885 		    | WM_F_LOCK_SWSM;
1886 		break;
1887 	case WM_T_ICH8:
1888 	case WM_T_ICH9:
1889 	case WM_T_ICH10:
1890 	case WM_T_PCH:
1891 	case WM_T_PCH2:
1892 	case WM_T_PCH_LPT:
1893 		/* FLASH */
1894 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
1895 		sc->sc_nvm_wordsize = 2048;
1896 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
1897 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
1898 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
1899 			aprint_error_dev(sc->sc_dev,
1900 			    "can't map FLASH registers\n");
1901 			goto out;
1902 		}
1903 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
1904 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
1905 		    ICH_FLASH_SECTOR_SIZE;
1906 		sc->sc_ich8_flash_bank_size =
1907 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
1908 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
1909 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
1910 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
1911 		break;
1912 	case WM_T_I210:
1913 	case WM_T_I211:
1914 		if (wm_nvm_get_flash_presence_i210(sc)) {
1915 			wm_nvm_set_addrbits_size_eecd(sc);
1916 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
1917 			sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW;
1918 		} else {
1919 			sc->sc_nvm_wordsize = INVM_SIZE;
1920 			sc->sc_flags |= WM_F_EEPROM_INVM;
1921 			sc->sc_flags |= WM_F_LOCK_SWFW;
1922 		}
1923 		break;
1924 	default:
1925 		break;
1926 	}
1927 
1928 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
1929 	switch (sc->sc_type) {
1930 	case WM_T_82571:
1931 	case WM_T_82572:
1932 		reg = CSR_READ(sc, WMREG_SWSM2);
1933 		if ((reg & SWSM2_LOCK) == 0) {
1934 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
1935 			force_clear_smbi = true;
1936 		} else
1937 			force_clear_smbi = false;
1938 		break;
1939 	case WM_T_82573:
1940 	case WM_T_82574:
1941 	case WM_T_82583:
1942 		force_clear_smbi = true;
1943 		break;
1944 	default:
1945 		force_clear_smbi = false;
1946 		break;
1947 	}
1948 	if (force_clear_smbi) {
1949 		reg = CSR_READ(sc, WMREG_SWSM);
1950 		if ((reg & SWSM_SMBI) != 0)
1951 			aprint_error_dev(sc->sc_dev,
1952 			    "Please update the Bootagent\n");
1953 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
1954 	}
1955 
1956 	/*
1957 	 * Defer printing the EEPROM type until after verifying the checksum
1958 	 * This allows the EEPROM type to be printed correctly in the case
1959 	 * that no EEPROM is attached.
1960 	 */
1961 	/*
1962 	 * Validate the EEPROM checksum. If the checksum fails, flag
1963 	 * this for later, so we can fail future reads from the EEPROM.
1964 	 */
1965 	if (wm_nvm_validate_checksum(sc)) {
1966 		/*
1967 		 * Read twice again because some PCI-e parts fail the
1968 		 * first check due to the link being in sleep state.
1969 		 */
1970 		if (wm_nvm_validate_checksum(sc))
1971 			sc->sc_flags |= WM_F_EEPROM_INVALID;
1972 	}
1973 
1974 	/* Set device properties (macflags) */
1975 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
1976 
1977 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
1978 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
1979 	else {
1980 		aprint_verbose_dev(sc->sc_dev, "%u words ",
1981 		    sc->sc_nvm_wordsize);
1982 		if (sc->sc_flags & WM_F_EEPROM_INVM)
1983 			aprint_verbose("iNVM");
1984 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
1985 			aprint_verbose("FLASH(HW)");
1986 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
1987 			aprint_verbose("FLASH");
1988 		else {
1989 			if (sc->sc_flags & WM_F_EEPROM_SPI)
1990 				eetype = "SPI";
1991 			else
1992 				eetype = "MicroWire";
1993 			aprint_verbose("(%d address bits) %s EEPROM",
1994 			    sc->sc_nvm_addrbits, eetype);
1995 		}
1996 	}
1997 	wm_nvm_version(sc);
1998 	aprint_verbose("\n");
1999 
2000 	/* Check for I21[01] PLL workaround */
2001 	if (sc->sc_type == WM_T_I210)
2002 		sc->sc_flags |= WM_F_PLL_WA_I210;
2003 	if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
2004 		/* NVM image release 3.25 has a workaround */
2005 		if ((sc->sc_nvm_ver_major < 3)
2006 		    || ((sc->sc_nvm_ver_major == 3)
2007 			&& (sc->sc_nvm_ver_minor < 25))) {
2008 			aprint_verbose_dev(sc->sc_dev,
2009 			    "ROM image version %d.%d is older than 3.25\n",
2010 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
2011 			sc->sc_flags |= WM_F_PLL_WA_I210;
2012 		}
2013 	}
2014 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
2015 		wm_pll_workaround_i210(sc);
2016 
2017 	wm_get_wakeup(sc);
2018 	switch (sc->sc_type) {
2019 	case WM_T_82571:
2020 	case WM_T_82572:
2021 	case WM_T_82573:
2022 	case WM_T_82574:
2023 	case WM_T_82583:
2024 	case WM_T_80003:
2025 	case WM_T_ICH8:
2026 	case WM_T_ICH9:
2027 	case WM_T_ICH10:
2028 	case WM_T_PCH:
2029 	case WM_T_PCH2:
2030 	case WM_T_PCH_LPT:
2031 		/* Non-AMT based hardware can now take control from firmware */
2032 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
2033 			wm_get_hw_control(sc);
2034 		break;
2035 	default:
2036 		break;
2037 	}
2038 
2039 	/*
2040 	 * Read the Ethernet address from the EEPROM, if not first found
2041 	 * in device properties.
2042 	 */
2043 	ea = prop_dictionary_get(dict, "mac-address");
2044 	if (ea != NULL) {
2045 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
2046 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
2047 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
2048 	} else {
2049 		if (wm_read_mac_addr(sc, enaddr) != 0) {
2050 			aprint_error_dev(sc->sc_dev,
2051 			    "unable to read Ethernet address\n");
2052 			goto out;
2053 		}
2054 	}
2055 
2056 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
2057 	    ether_sprintf(enaddr));
2058 
2059 	/*
2060 	 * Read the config info from the EEPROM, and set up various
2061 	 * bits in the control registers based on their contents.
2062 	 */
2063 	pn = prop_dictionary_get(dict, "i82543-cfg1");
2064 	if (pn != NULL) {
2065 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2066 		cfg1 = (uint16_t) prop_number_integer_value(pn);
2067 	} else {
2068 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
2069 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
2070 			goto out;
2071 		}
2072 	}
2073 
2074 	pn = prop_dictionary_get(dict, "i82543-cfg2");
2075 	if (pn != NULL) {
2076 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2077 		cfg2 = (uint16_t) prop_number_integer_value(pn);
2078 	} else {
2079 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
2080 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
2081 			goto out;
2082 		}
2083 	}
2084 
2085 	/* check for WM_F_WOL */
2086 	switch (sc->sc_type) {
2087 	case WM_T_82542_2_0:
2088 	case WM_T_82542_2_1:
2089 	case WM_T_82543:
2090 		/* dummy? */
2091 		eeprom_data = 0;
2092 		apme_mask = NVM_CFG3_APME;
2093 		break;
2094 	case WM_T_82544:
2095 		apme_mask = NVM_CFG2_82544_APM_EN;
2096 		eeprom_data = cfg2;
2097 		break;
2098 	case WM_T_82546:
2099 	case WM_T_82546_3:
2100 	case WM_T_82571:
2101 	case WM_T_82572:
2102 	case WM_T_82573:
2103 	case WM_T_82574:
2104 	case WM_T_82583:
2105 	case WM_T_80003:
2106 	default:
2107 		apme_mask = NVM_CFG3_APME;
2108 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
2109 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
2110 		break;
2111 	case WM_T_82575:
2112 	case WM_T_82576:
2113 	case WM_T_82580:
2114 	case WM_T_I350:
2115 	case WM_T_I354: /* XXX ok? */
2116 	case WM_T_ICH8:
2117 	case WM_T_ICH9:
2118 	case WM_T_ICH10:
2119 	case WM_T_PCH:
2120 	case WM_T_PCH2:
2121 	case WM_T_PCH_LPT:
2122 		/* XXX The funcid should be checked on some devices */
2123 		apme_mask = WUC_APME;
2124 		eeprom_data = CSR_READ(sc, WMREG_WUC);
2125 		break;
2126 	}
2127 
2128 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
2129 	if ((eeprom_data & apme_mask) != 0)
2130 		sc->sc_flags |= WM_F_WOL;
2131 #ifdef WM_DEBUG
2132 	if ((sc->sc_flags & WM_F_WOL) != 0)
2133 		printf("WOL\n");
2134 #endif
2135 
2136 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
2137 		/* Check NVM for autonegotiation */
2138 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
2139 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
2140 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
2141 		}
2142 	}
2143 
2144 	/*
2145 	 * XXX need special handling for some multiple port cards
2146 	 * to disable a paticular port.
2147 	 */
2148 
2149 	if (sc->sc_type >= WM_T_82544) {
2150 		pn = prop_dictionary_get(dict, "i82543-swdpin");
2151 		if (pn != NULL) {
2152 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2153 			swdpin = (uint16_t) prop_number_integer_value(pn);
2154 		} else {
2155 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
2156 				aprint_error_dev(sc->sc_dev,
2157 				    "unable to read SWDPIN\n");
2158 				goto out;
2159 			}
2160 		}
2161 	}
2162 
2163 	if (cfg1 & NVM_CFG1_ILOS)
2164 		sc->sc_ctrl |= CTRL_ILOS;
2165 
2166 	/*
2167 	 * XXX
2168 	 * This code isn't correct because pin 2 and 3 are located
2169 	 * in different position on newer chips. Check all datasheet.
2170 	 *
2171 	 * Until resolve this problem, check if a chip < 82580
2172 	 */
2173 	if (sc->sc_type <= WM_T_82580) {
2174 		if (sc->sc_type >= WM_T_82544) {
2175 			sc->sc_ctrl |=
2176 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
2177 			    CTRL_SWDPIO_SHIFT;
2178 			sc->sc_ctrl |=
2179 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
2180 			    CTRL_SWDPINS_SHIFT;
2181 		} else {
2182 			sc->sc_ctrl |=
2183 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
2184 			    CTRL_SWDPIO_SHIFT;
2185 		}
2186 	}
2187 
2188 	/* XXX For other than 82580? */
2189 	if (sc->sc_type == WM_T_82580) {
2190 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
2191 		printf("CFG3 = %08x\n", (uint32_t)nvmword);
2192 		if (nvmword & __BIT(13)) {
2193 			printf("SET ILOS\n");
2194 			sc->sc_ctrl |= CTRL_ILOS;
2195 		}
2196 	}
2197 
2198 #if 0
2199 	if (sc->sc_type >= WM_T_82544) {
2200 		if (cfg1 & NVM_CFG1_IPS0)
2201 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
2202 		if (cfg1 & NVM_CFG1_IPS1)
2203 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
2204 		sc->sc_ctrl_ext |=
2205 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
2206 		    CTRL_EXT_SWDPIO_SHIFT;
2207 		sc->sc_ctrl_ext |=
2208 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
2209 		    CTRL_EXT_SWDPINS_SHIFT;
2210 	} else {
2211 		sc->sc_ctrl_ext |=
2212 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
2213 		    CTRL_EXT_SWDPIO_SHIFT;
2214 	}
2215 #endif
2216 
2217 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2218 #if 0
2219 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2220 #endif
2221 
2222 	if (sc->sc_type == WM_T_PCH) {
2223 		uint16_t val;
2224 
2225 		/* Save the NVM K1 bit setting */
2226 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
2227 
2228 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
2229 			sc->sc_nvm_k1_enabled = 1;
2230 		else
2231 			sc->sc_nvm_k1_enabled = 0;
2232 	}
2233 
2234 	/*
2235 	 * Determine if we're TBI,GMII or SGMII mode, and initialize the
2236 	 * media structures accordingly.
2237 	 */
2238 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
2239 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
2240 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
2241 	    || sc->sc_type == WM_T_82573
2242 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
2243 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
2244 		wm_gmii_mediainit(sc, wmp->wmp_product);
2245 	} else if (sc->sc_type < WM_T_82543 ||
2246 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
2247 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
2248 			aprint_error_dev(sc->sc_dev,
2249 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
2250 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
2251 		}
2252 		wm_tbi_mediainit(sc);
2253 	} else {
2254 		switch (sc->sc_type) {
2255 		case WM_T_82575:
2256 		case WM_T_82576:
2257 		case WM_T_82580:
2258 		case WM_T_I350:
2259 		case WM_T_I354:
2260 		case WM_T_I210:
2261 		case WM_T_I211:
2262 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
2263 			link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
2264 			switch (link_mode) {
2265 			case CTRL_EXT_LINK_MODE_1000KX:
2266 				aprint_verbose_dev(sc->sc_dev, "1000KX\n");
2267 				sc->sc_mediatype = WM_MEDIATYPE_SERDES;
2268 				break;
2269 			case CTRL_EXT_LINK_MODE_SGMII:
2270 				if (wm_sgmii_uses_mdio(sc)) {
2271 					aprint_verbose_dev(sc->sc_dev,
2272 					    "SGMII(MDIO)\n");
2273 					sc->sc_flags |= WM_F_SGMII;
2274 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2275 					break;
2276 				}
2277 				aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
2278 				/*FALLTHROUGH*/
2279 			case CTRL_EXT_LINK_MODE_PCIE_SERDES:
2280 				sc->sc_mediatype = wm_sfp_get_media_type(sc);
2281 				if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
2282 					if (link_mode
2283 					    == CTRL_EXT_LINK_MODE_SGMII) {
2284 						sc->sc_mediatype
2285 						    = WM_MEDIATYPE_COPPER;
2286 						sc->sc_flags |= WM_F_SGMII;
2287 					} else {
2288 						sc->sc_mediatype
2289 						    = WM_MEDIATYPE_SERDES;
2290 						aprint_verbose_dev(sc->sc_dev,
2291 						    "SERDES\n");
2292 					}
2293 					break;
2294 				}
2295 				if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
2296 					aprint_verbose_dev(sc->sc_dev,
2297 					    "SERDES\n");
2298 
2299 				/* Change current link mode setting */
2300 				reg &= ~CTRL_EXT_LINK_MODE_MASK;
2301 				switch (sc->sc_mediatype) {
2302 				case WM_MEDIATYPE_COPPER:
2303 					reg |= CTRL_EXT_LINK_MODE_SGMII;
2304 					break;
2305 				case WM_MEDIATYPE_SERDES:
2306 					reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
2307 					break;
2308 				default:
2309 					break;
2310 				}
2311 				CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2312 				break;
2313 			case CTRL_EXT_LINK_MODE_GMII:
2314 			default:
2315 				aprint_verbose_dev(sc->sc_dev, "Copper\n");
2316 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2317 				break;
2318 			}
2319 
2320 			reg &= ~CTRL_EXT_I2C_ENA;
2321 			if ((sc->sc_flags & WM_F_SGMII) != 0)
2322 				reg |= CTRL_EXT_I2C_ENA;
2323 			else
2324 				reg &= ~CTRL_EXT_I2C_ENA;
2325 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2326 
2327 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
2328 				wm_gmii_mediainit(sc, wmp->wmp_product);
2329 			else
2330 				wm_tbi_mediainit(sc);
2331 			break;
2332 		default:
2333 			if (sc->sc_mediatype == WM_MEDIATYPE_FIBER)
2334 				aprint_error_dev(sc->sc_dev,
2335 				    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
2336 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2337 			wm_gmii_mediainit(sc, wmp->wmp_product);
2338 		}
2339 	}
2340 
2341 	ifp = &sc->sc_ethercom.ec_if;
2342 	xname = device_xname(sc->sc_dev);
2343 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
2344 	ifp->if_softc = sc;
2345 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2346 	ifp->if_ioctl = wm_ioctl;
2347 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
2348 		ifp->if_start = wm_nq_start;
2349 	else
2350 		ifp->if_start = wm_start;
2351 	ifp->if_watchdog = wm_watchdog;
2352 	ifp->if_init = wm_init;
2353 	ifp->if_stop = wm_stop;
2354 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
2355 	IFQ_SET_READY(&ifp->if_snd);
2356 
2357 	/* Check for jumbo frame */
2358 	switch (sc->sc_type) {
2359 	case WM_T_82573:
2360 		/* XXX limited to 9234 if ASPM is disabled */
2361 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
2362 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
2363 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2364 		break;
2365 	case WM_T_82571:
2366 	case WM_T_82572:
2367 	case WM_T_82574:
2368 	case WM_T_82575:
2369 	case WM_T_82576:
2370 	case WM_T_82580:
2371 	case WM_T_I350:
2372 	case WM_T_I354: /* XXXX ok? */
2373 	case WM_T_I210:
2374 	case WM_T_I211:
2375 	case WM_T_80003:
2376 	case WM_T_ICH9:
2377 	case WM_T_ICH10:
2378 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
2379 	case WM_T_PCH_LPT:
2380 		/* XXX limited to 9234 */
2381 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2382 		break;
2383 	case WM_T_PCH:
2384 		/* XXX limited to 4096 */
2385 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2386 		break;
2387 	case WM_T_82542_2_0:
2388 	case WM_T_82542_2_1:
2389 	case WM_T_82583:
2390 	case WM_T_ICH8:
2391 		/* No support for jumbo frame */
2392 		break;
2393 	default:
2394 		/* ETHER_MAX_LEN_JUMBO */
2395 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2396 		break;
2397 	}
2398 
2399 	/* If we're a i82543 or greater, we can support VLANs. */
2400 	if (sc->sc_type >= WM_T_82543)
2401 		sc->sc_ethercom.ec_capabilities |=
2402 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
2403 
2404 	/*
2405 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
2406 	 * on i82543 and later.
2407 	 */
2408 	if (sc->sc_type >= WM_T_82543) {
2409 		ifp->if_capabilities |=
2410 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
2411 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
2412 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
2413 		    IFCAP_CSUM_TCPv6_Tx |
2414 		    IFCAP_CSUM_UDPv6_Tx;
2415 	}
2416 
2417 	/*
2418 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
2419 	 *
2420 	 *	82541GI (8086:1076) ... no
2421 	 *	82572EI (8086:10b9) ... yes
2422 	 */
2423 	if (sc->sc_type >= WM_T_82571) {
2424 		ifp->if_capabilities |=
2425 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
2426 	}
2427 
2428 	/*
2429 	 * If we're a i82544 or greater (except i82547), we can do
2430 	 * TCP segmentation offload.
2431 	 */
2432 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
2433 		ifp->if_capabilities |= IFCAP_TSOv4;
2434 	}
2435 
2436 	if (sc->sc_type >= WM_T_82571) {
2437 		ifp->if_capabilities |= IFCAP_TSOv6;
2438 	}
2439 
2440 #ifdef WM_MPSAFE
2441 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2442 #else
2443 	sc->sc_core_lock = NULL;
2444 #endif
2445 
2446 	/* Attach the interface. */
2447 	if_attach(ifp);
2448 	ether_ifattach(ifp, enaddr);
2449 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
2450 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
2451 			  RND_FLAG_DEFAULT);
2452 
2453 #ifdef WM_EVENT_COUNTERS
2454 	/* Attach event counters. */
2455 	evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
2456 	    NULL, xname, "txsstall");
2457 	evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
2458 	    NULL, xname, "txdstall");
2459 	evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
2460 	    NULL, xname, "txfifo_stall");
2461 	evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
2462 	    NULL, xname, "txdw");
2463 	evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
2464 	    NULL, xname, "txqe");
2465 	evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
2466 	    NULL, xname, "rxintr");
2467 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
2468 	    NULL, xname, "linkintr");
2469 
2470 	evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
2471 	    NULL, xname, "rxipsum");
2472 	evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
2473 	    NULL, xname, "rxtusum");
2474 	evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
2475 	    NULL, xname, "txipsum");
2476 	evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
2477 	    NULL, xname, "txtusum");
2478 	evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
2479 	    NULL, xname, "txtusum6");
2480 
2481 	evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
2482 	    NULL, xname, "txtso");
2483 	evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
2484 	    NULL, xname, "txtso6");
2485 	evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
2486 	    NULL, xname, "txtsopain");
2487 
2488 	for (i = 0; i < WM_NTXSEGS; i++) {
2489 		snprintf(wm_txseg_evcnt_names[i],
2490 		    sizeof(wm_txseg_evcnt_names[i]), "txseg%d", i);
2491 		evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
2492 		    NULL, xname, wm_txseg_evcnt_names[i]);
2493 	}
2494 
2495 	evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
2496 	    NULL, xname, "txdrop");
2497 
2498 	evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
2499 	    NULL, xname, "tu");
2500 
2501 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
2502 	    NULL, xname, "tx_xoff");
2503 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
2504 	    NULL, xname, "tx_xon");
2505 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
2506 	    NULL, xname, "rx_xoff");
2507 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
2508 	    NULL, xname, "rx_xon");
2509 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
2510 	    NULL, xname, "rx_macctl");
2511 #endif /* WM_EVENT_COUNTERS */
2512 
2513 	if (pmf_device_register(self, wm_suspend, wm_resume))
2514 		pmf_class_network_register(self, ifp);
2515 	else
2516 		aprint_error_dev(self, "couldn't establish power handler\n");
2517 
2518 	sc->sc_flags |= WM_F_ATTACHED;
2519  out:
2520 	return;
2521 }
2522 
2523 /* The detach function (ca_detach) */
2524 static int
2525 wm_detach(device_t self, int flags __unused)
2526 {
2527 	struct wm_softc *sc = device_private(self);
2528 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2529 	int i;
2530 #ifndef WM_MPSAFE
2531 	int s;
2532 #endif
2533 
2534 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
2535 		return 0;
2536 
2537 #ifndef WM_MPSAFE
2538 	s = splnet();
2539 #endif
2540 	/* Stop the interface. Callouts are stopped in it. */
2541 	wm_stop(ifp, 1);
2542 
2543 #ifndef WM_MPSAFE
2544 	splx(s);
2545 #endif
2546 
2547 	pmf_device_deregister(self);
2548 
2549 	/* Tell the firmware about the release */
2550 	WM_CORE_LOCK(sc);
2551 	wm_release_manageability(sc);
2552 	wm_release_hw_control(sc);
2553 	WM_CORE_UNLOCK(sc);
2554 
2555 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2556 
2557 	/* Delete all remaining media. */
2558 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2559 
2560 	ether_ifdetach(ifp);
2561 	if_detach(ifp);
2562 
2563 
2564 	/* Unload RX dmamaps and free mbufs */
2565 	for (i = 0; i < sc->sc_nrxqueues; i++) {
2566 		struct wm_rxqueue *rxq = &sc->sc_rxq[i];
2567 		WM_RX_LOCK(rxq);
2568 		wm_rxdrain(rxq);
2569 		WM_RX_UNLOCK(rxq);
2570 	}
2571 	/* Must unlock here */
2572 
2573 	wm_free_txrx_queues(sc);
2574 
2575 	/* Disestablish the interrupt handler */
2576 	for (i = 0; i < sc->sc_nintrs; i++) {
2577 		if (sc->sc_ihs[i] != NULL) {
2578 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
2579 			sc->sc_ihs[i] = NULL;
2580 		}
2581 	}
2582 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
2583 
2584 	/* Unmap the registers */
2585 	if (sc->sc_ss) {
2586 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
2587 		sc->sc_ss = 0;
2588 	}
2589 	if (sc->sc_ios) {
2590 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
2591 		sc->sc_ios = 0;
2592 	}
2593 	if (sc->sc_flashs) {
2594 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
2595 		sc->sc_flashs = 0;
2596 	}
2597 
2598 	if (sc->sc_core_lock)
2599 		mutex_obj_free(sc->sc_core_lock);
2600 
2601 	return 0;
2602 }
2603 
2604 static bool
2605 wm_suspend(device_t self, const pmf_qual_t *qual)
2606 {
2607 	struct wm_softc *sc = device_private(self);
2608 
2609 	wm_release_manageability(sc);
2610 	wm_release_hw_control(sc);
2611 #ifdef WM_WOL
2612 	wm_enable_wakeup(sc);
2613 #endif
2614 
2615 	return true;
2616 }
2617 
2618 static bool
2619 wm_resume(device_t self, const pmf_qual_t *qual)
2620 {
2621 	struct wm_softc *sc = device_private(self);
2622 
2623 	wm_init_manageability(sc);
2624 
2625 	return true;
2626 }
2627 
2628 /*
2629  * wm_watchdog:		[ifnet interface function]
2630  *
2631  *	Watchdog timer handler.
2632  */
2633 static void
2634 wm_watchdog(struct ifnet *ifp)
2635 {
2636 	struct wm_softc *sc = ifp->if_softc;
2637 	struct wm_txqueue *txq = &sc->sc_txq[0];
2638 
2639 	/*
2640 	 * Since we're using delayed interrupts, sweep up
2641 	 * before we report an error.
2642 	 */
2643 	WM_TX_LOCK(txq);
2644 	wm_txeof(sc);
2645 	WM_TX_UNLOCK(txq);
2646 
2647 	if (txq->txq_free != WM_NTXDESC(txq)) {
2648 #ifdef WM_DEBUG
2649 		int i, j;
2650 		struct wm_txsoft *txs;
2651 #endif
2652 		log(LOG_ERR,
2653 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2654 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
2655 		    txq->txq_next);
2656 		ifp->if_oerrors++;
2657 #ifdef WM_DEBUG
2658 		for (i = txq->txq_sdirty; i != txq->txq_snext ;
2659 		    i = WM_NEXTTXS(txq, i)) {
2660 		    txs = &txq->txq_soft[i];
2661 		    printf("txs %d tx %d -> %d\n",
2662 			i, txs->txs_firstdesc, txs->txs_lastdesc);
2663 		    for (j = txs->txs_firstdesc; ;
2664 			j = WM_NEXTTX(txq, j)) {
2665 			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
2666 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
2667 			printf("\t %#08x%08x\n",
2668 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
2669 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
2670 			if (j == txs->txs_lastdesc)
2671 				break;
2672 			}
2673 		}
2674 #endif
2675 		/* Reset the interface. */
2676 		(void) wm_init(ifp);
2677 	}
2678 
2679 	/* Try to get more packets going. */
2680 	ifp->if_start(ifp);
2681 }
2682 
2683 /*
2684  * wm_tick:
2685  *
2686  *	One second timer, used to check link status, sweep up
2687  *	completed transmit jobs, etc.
2688  */
2689 static void
2690 wm_tick(void *arg)
2691 {
2692 	struct wm_softc *sc = arg;
2693 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2694 #ifndef WM_MPSAFE
2695 	int s;
2696 
2697 	s = splnet();
2698 #endif
2699 
2700 	WM_CORE_LOCK(sc);
2701 
2702 	if (sc->sc_stopping)
2703 		goto out;
2704 
2705 	if (sc->sc_type >= WM_T_82542_2_1) {
2706 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
2707 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
2708 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
2709 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
2710 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
2711 	}
2712 
2713 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
2714 	ifp->if_ierrors += 0ULL + /* ensure quad_t */
2715 	    + CSR_READ(sc, WMREG_CRCERRS)
2716 	    + CSR_READ(sc, WMREG_ALGNERRC)
2717 	    + CSR_READ(sc, WMREG_SYMERRC)
2718 	    + CSR_READ(sc, WMREG_RXERRC)
2719 	    + CSR_READ(sc, WMREG_SEC)
2720 	    + CSR_READ(sc, WMREG_CEXTERR)
2721 	    + CSR_READ(sc, WMREG_RLEC);
2722 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
2723 
2724 	if (sc->sc_flags & WM_F_HAS_MII)
2725 		mii_tick(&sc->sc_mii);
2726 	else if ((sc->sc_type >= WM_T_82575)
2727 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
2728 		wm_serdes_tick(sc);
2729 	else
2730 		wm_tbi_tick(sc);
2731 
2732 out:
2733 	WM_CORE_UNLOCK(sc);
2734 #ifndef WM_MPSAFE
2735 	splx(s);
2736 #endif
2737 
2738 	if (!sc->sc_stopping)
2739 		callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2740 }
2741 
2742 static int
2743 wm_ifflags_cb(struct ethercom *ec)
2744 {
2745 	struct ifnet *ifp = &ec->ec_if;
2746 	struct wm_softc *sc = ifp->if_softc;
2747 	int change = ifp->if_flags ^ sc->sc_if_flags;
2748 	int rc = 0;
2749 
2750 	WM_CORE_LOCK(sc);
2751 
2752 	if (change != 0)
2753 		sc->sc_if_flags = ifp->if_flags;
2754 
2755 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
2756 		rc = ENETRESET;
2757 		goto out;
2758 	}
2759 
2760 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
2761 		wm_set_filter(sc);
2762 
2763 	wm_set_vlan(sc);
2764 
2765 out:
2766 	WM_CORE_UNLOCK(sc);
2767 
2768 	return rc;
2769 }
2770 
2771 /*
2772  * wm_ioctl:		[ifnet interface function]
2773  *
2774  *	Handle control requests from the operator.
2775  */
2776 static int
2777 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2778 {
2779 	struct wm_softc *sc = ifp->if_softc;
2780 	struct ifreq *ifr = (struct ifreq *) data;
2781 	struct ifaddr *ifa = (struct ifaddr *)data;
2782 	struct sockaddr_dl *sdl;
2783 	int s, error;
2784 
2785 #ifndef WM_MPSAFE
2786 	s = splnet();
2787 #endif
2788 	switch (cmd) {
2789 	case SIOCSIFMEDIA:
2790 	case SIOCGIFMEDIA:
2791 		WM_CORE_LOCK(sc);
2792 		/* Flow control requires full-duplex mode. */
2793 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
2794 		    (ifr->ifr_media & IFM_FDX) == 0)
2795 			ifr->ifr_media &= ~IFM_ETH_FMASK;
2796 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
2797 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
2798 				/* We can do both TXPAUSE and RXPAUSE. */
2799 				ifr->ifr_media |=
2800 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
2801 			}
2802 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
2803 		}
2804 		WM_CORE_UNLOCK(sc);
2805 #ifdef WM_MPSAFE
2806 		s = splnet();
2807 #endif
2808 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
2809 #ifdef WM_MPSAFE
2810 		splx(s);
2811 #endif
2812 		break;
2813 	case SIOCINITIFADDR:
2814 		WM_CORE_LOCK(sc);
2815 		if (ifa->ifa_addr->sa_family == AF_LINK) {
2816 			sdl = satosdl(ifp->if_dl->ifa_addr);
2817 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
2818 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
2819 			/* unicast address is first multicast entry */
2820 			wm_set_filter(sc);
2821 			error = 0;
2822 			WM_CORE_UNLOCK(sc);
2823 			break;
2824 		}
2825 		WM_CORE_UNLOCK(sc);
2826 		/*FALLTHROUGH*/
2827 	default:
2828 #ifdef WM_MPSAFE
2829 		s = splnet();
2830 #endif
2831 		/* It may call wm_start, so unlock here */
2832 		error = ether_ioctl(ifp, cmd, data);
2833 #ifdef WM_MPSAFE
2834 		splx(s);
2835 #endif
2836 		if (error != ENETRESET)
2837 			break;
2838 
2839 		error = 0;
2840 
2841 		if (cmd == SIOCSIFCAP) {
2842 			error = (*ifp->if_init)(ifp);
2843 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
2844 			;
2845 		else if (ifp->if_flags & IFF_RUNNING) {
2846 			/*
2847 			 * Multicast list has changed; set the hardware filter
2848 			 * accordingly.
2849 			 */
2850 			WM_CORE_LOCK(sc);
2851 			wm_set_filter(sc);
2852 			WM_CORE_UNLOCK(sc);
2853 		}
2854 		break;
2855 	}
2856 
2857 #ifndef WM_MPSAFE
2858 	splx(s);
2859 #endif
2860 	return error;
2861 }
2862 
2863 /* MAC address related */
2864 
2865 /*
2866  * Get the offset of MAC address and return it.
2867  * If error occured, use offset 0.
2868  */
2869 static uint16_t
2870 wm_check_alt_mac_addr(struct wm_softc *sc)
2871 {
2872 	uint16_t myea[ETHER_ADDR_LEN / 2];
2873 	uint16_t offset = NVM_OFF_MACADDR;
2874 
2875 	/* Try to read alternative MAC address pointer */
2876 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
2877 		return 0;
2878 
2879 	/* Check pointer if it's valid or not. */
2880 	if ((offset == 0x0000) || (offset == 0xffff))
2881 		return 0;
2882 
2883 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
2884 	/*
2885 	 * Check whether alternative MAC address is valid or not.
2886 	 * Some cards have non 0xffff pointer but those don't use
2887 	 * alternative MAC address in reality.
2888 	 *
2889 	 * Check whether the broadcast bit is set or not.
2890 	 */
2891 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
2892 		if (((myea[0] & 0xff) & 0x01) == 0)
2893 			return offset; /* Found */
2894 
2895 	/* Not found */
2896 	return 0;
2897 }
2898 
2899 static int
2900 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
2901 {
2902 	uint16_t myea[ETHER_ADDR_LEN / 2];
2903 	uint16_t offset = NVM_OFF_MACADDR;
2904 	int do_invert = 0;
2905 
2906 	switch (sc->sc_type) {
2907 	case WM_T_82580:
2908 	case WM_T_I350:
2909 	case WM_T_I354:
2910 		/* EEPROM Top Level Partitioning */
2911 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
2912 		break;
2913 	case WM_T_82571:
2914 	case WM_T_82575:
2915 	case WM_T_82576:
2916 	case WM_T_80003:
2917 	case WM_T_I210:
2918 	case WM_T_I211:
2919 		offset = wm_check_alt_mac_addr(sc);
2920 		if (offset == 0)
2921 			if ((sc->sc_funcid & 0x01) == 1)
2922 				do_invert = 1;
2923 		break;
2924 	default:
2925 		if ((sc->sc_funcid & 0x01) == 1)
2926 			do_invert = 1;
2927 		break;
2928 	}
2929 
2930 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]),
2931 		myea) != 0)
2932 		goto bad;
2933 
2934 	enaddr[0] = myea[0] & 0xff;
2935 	enaddr[1] = myea[0] >> 8;
2936 	enaddr[2] = myea[1] & 0xff;
2937 	enaddr[3] = myea[1] >> 8;
2938 	enaddr[4] = myea[2] & 0xff;
2939 	enaddr[5] = myea[2] >> 8;
2940 
2941 	/*
2942 	 * Toggle the LSB of the MAC address on the second port
2943 	 * of some dual port cards.
2944 	 */
2945 	if (do_invert != 0)
2946 		enaddr[5] ^= 1;
2947 
2948 	return 0;
2949 
2950  bad:
2951 	return -1;
2952 }
2953 
2954 /*
2955  * wm_set_ral:
2956  *
2957  *	Set an entery in the receive address list.
2958  */
2959 static void
2960 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
2961 {
2962 	uint32_t ral_lo, ral_hi;
2963 
2964 	if (enaddr != NULL) {
2965 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
2966 		    (enaddr[3] << 24);
2967 		ral_hi = enaddr[4] | (enaddr[5] << 8);
2968 		ral_hi |= RAL_AV;
2969 	} else {
2970 		ral_lo = 0;
2971 		ral_hi = 0;
2972 	}
2973 
2974 	if (sc->sc_type >= WM_T_82544) {
2975 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
2976 		    ral_lo);
2977 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
2978 		    ral_hi);
2979 	} else {
2980 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
2981 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
2982 	}
2983 }
2984 
2985 /*
2986  * wm_mchash:
2987  *
2988  *	Compute the hash of the multicast address for the 4096-bit
2989  *	multicast filter.
2990  */
2991 static uint32_t
2992 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
2993 {
2994 	static const int lo_shift[4] = { 4, 3, 2, 0 };
2995 	static const int hi_shift[4] = { 4, 5, 6, 8 };
2996 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
2997 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
2998 	uint32_t hash;
2999 
3000 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3001 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3002 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
3003 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
3004 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
3005 		return (hash & 0x3ff);
3006 	}
3007 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
3008 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
3009 
3010 	return (hash & 0xfff);
3011 }
3012 
3013 /*
3014  * wm_set_filter:
3015  *
3016  *	Set up the receive filter.
3017  */
3018 static void
3019 wm_set_filter(struct wm_softc *sc)
3020 {
3021 	struct ethercom *ec = &sc->sc_ethercom;
3022 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3023 	struct ether_multi *enm;
3024 	struct ether_multistep step;
3025 	bus_addr_t mta_reg;
3026 	uint32_t hash, reg, bit;
3027 	int i, size, max;
3028 
3029 	if (sc->sc_type >= WM_T_82544)
3030 		mta_reg = WMREG_CORDOVA_MTA;
3031 	else
3032 		mta_reg = WMREG_MTA;
3033 
3034 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
3035 
3036 	if (ifp->if_flags & IFF_BROADCAST)
3037 		sc->sc_rctl |= RCTL_BAM;
3038 	if (ifp->if_flags & IFF_PROMISC) {
3039 		sc->sc_rctl |= RCTL_UPE;
3040 		goto allmulti;
3041 	}
3042 
3043 	/*
3044 	 * Set the station address in the first RAL slot, and
3045 	 * clear the remaining slots.
3046 	 */
3047 	if (sc->sc_type == WM_T_ICH8)
3048 		size = WM_RAL_TABSIZE_ICH8 -1;
3049 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
3050 	    || (sc->sc_type == WM_T_PCH))
3051 		size = WM_RAL_TABSIZE_ICH8;
3052 	else if (sc->sc_type == WM_T_PCH2)
3053 		size = WM_RAL_TABSIZE_PCH2;
3054 	else if (sc->sc_type == WM_T_PCH_LPT)
3055 		size = WM_RAL_TABSIZE_PCH_LPT;
3056 	else if (sc->sc_type == WM_T_82575)
3057 		size = WM_RAL_TABSIZE_82575;
3058 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
3059 		size = WM_RAL_TABSIZE_82576;
3060 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
3061 		size = WM_RAL_TABSIZE_I350;
3062 	else
3063 		size = WM_RAL_TABSIZE;
3064 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
3065 
3066 	if (sc->sc_type == WM_T_PCH_LPT) {
3067 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
3068 		switch (i) {
3069 		case 0:
3070 			/* We can use all entries */
3071 			max = size;
3072 			break;
3073 		case 1:
3074 			/* Only RAR[0] */
3075 			max = 1;
3076 			break;
3077 		default:
3078 			/* available SHRA + RAR[0] */
3079 			max = i + 1;
3080 		}
3081 	} else
3082 		max = size;
3083 	for (i = 1; i < size; i++) {
3084 		if (i < max)
3085 			wm_set_ral(sc, NULL, i);
3086 	}
3087 
3088 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3089 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3090 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
3091 		size = WM_ICH8_MC_TABSIZE;
3092 	else
3093 		size = WM_MC_TABSIZE;
3094 	/* Clear out the multicast table. */
3095 	for (i = 0; i < size; i++)
3096 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
3097 
3098 	ETHER_FIRST_MULTI(step, ec, enm);
3099 	while (enm != NULL) {
3100 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
3101 			/*
3102 			 * We must listen to a range of multicast addresses.
3103 			 * For now, just accept all multicasts, rather than
3104 			 * trying to set only those filter bits needed to match
3105 			 * the range.  (At this time, the only use of address
3106 			 * ranges is for IP multicast routing, for which the
3107 			 * range is big enough to require all bits set.)
3108 			 */
3109 			goto allmulti;
3110 		}
3111 
3112 		hash = wm_mchash(sc, enm->enm_addrlo);
3113 
3114 		reg = (hash >> 5);
3115 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3116 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3117 		    || (sc->sc_type == WM_T_PCH2)
3118 		    || (sc->sc_type == WM_T_PCH_LPT))
3119 			reg &= 0x1f;
3120 		else
3121 			reg &= 0x7f;
3122 		bit = hash & 0x1f;
3123 
3124 		hash = CSR_READ(sc, mta_reg + (reg << 2));
3125 		hash |= 1U << bit;
3126 
3127 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
3128 			/*
3129 			 * 82544 Errata 9: Certain register cannot be written
3130 			 * with particular alignments in PCI-X bus operation
3131 			 * (FCAH, MTA and VFTA).
3132 			 */
3133 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
3134 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3135 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
3136 		} else
3137 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3138 
3139 		ETHER_NEXT_MULTI(step, enm);
3140 	}
3141 
3142 	ifp->if_flags &= ~IFF_ALLMULTI;
3143 	goto setit;
3144 
3145  allmulti:
3146 	ifp->if_flags |= IFF_ALLMULTI;
3147 	sc->sc_rctl |= RCTL_MPE;
3148 
3149  setit:
3150 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
3151 }
3152 
3153 /* Reset and init related */
3154 
3155 static void
3156 wm_set_vlan(struct wm_softc *sc)
3157 {
3158 	/* Deal with VLAN enables. */
3159 	if (VLAN_ATTACHED(&sc->sc_ethercom))
3160 		sc->sc_ctrl |= CTRL_VME;
3161 	else
3162 		sc->sc_ctrl &= ~CTRL_VME;
3163 
3164 	/* Write the control registers. */
3165 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3166 }
3167 
3168 static void
3169 wm_set_pcie_completion_timeout(struct wm_softc *sc)
3170 {
3171 	uint32_t gcr;
3172 	pcireg_t ctrl2;
3173 
3174 	gcr = CSR_READ(sc, WMREG_GCR);
3175 
3176 	/* Only take action if timeout value is defaulted to 0 */
3177 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
3178 		goto out;
3179 
3180 	if ((gcr & GCR_CAP_VER2) == 0) {
3181 		gcr |= GCR_CMPL_TMOUT_10MS;
3182 		goto out;
3183 	}
3184 
3185 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
3186 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
3187 	ctrl2 |= WM_PCIE_DCSR2_16MS;
3188 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
3189 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
3190 
3191 out:
3192 	/* Disable completion timeout resend */
3193 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
3194 
3195 	CSR_WRITE(sc, WMREG_GCR, gcr);
3196 }
3197 
3198 void
3199 wm_get_auto_rd_done(struct wm_softc *sc)
3200 {
3201 	int i;
3202 
3203 	/* wait for eeprom to reload */
3204 	switch (sc->sc_type) {
3205 	case WM_T_82571:
3206 	case WM_T_82572:
3207 	case WM_T_82573:
3208 	case WM_T_82574:
3209 	case WM_T_82583:
3210 	case WM_T_82575:
3211 	case WM_T_82576:
3212 	case WM_T_82580:
3213 	case WM_T_I350:
3214 	case WM_T_I354:
3215 	case WM_T_I210:
3216 	case WM_T_I211:
3217 	case WM_T_80003:
3218 	case WM_T_ICH8:
3219 	case WM_T_ICH9:
3220 		for (i = 0; i < 10; i++) {
3221 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
3222 				break;
3223 			delay(1000);
3224 		}
3225 		if (i == 10) {
3226 			log(LOG_ERR, "%s: auto read from eeprom failed to "
3227 			    "complete\n", device_xname(sc->sc_dev));
3228 		}
3229 		break;
3230 	default:
3231 		break;
3232 	}
3233 }
3234 
3235 void
3236 wm_lan_init_done(struct wm_softc *sc)
3237 {
3238 	uint32_t reg = 0;
3239 	int i;
3240 
3241 	/* wait for eeprom to reload */
3242 	switch (sc->sc_type) {
3243 	case WM_T_ICH10:
3244 	case WM_T_PCH:
3245 	case WM_T_PCH2:
3246 	case WM_T_PCH_LPT:
3247 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
3248 			reg = CSR_READ(sc, WMREG_STATUS);
3249 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
3250 				break;
3251 			delay(100);
3252 		}
3253 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
3254 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
3255 			    "complete\n", device_xname(sc->sc_dev), __func__);
3256 		}
3257 		break;
3258 	default:
3259 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3260 		    __func__);
3261 		break;
3262 	}
3263 
3264 	reg &= ~STATUS_LAN_INIT_DONE;
3265 	CSR_WRITE(sc, WMREG_STATUS, reg);
3266 }
3267 
3268 void
3269 wm_get_cfg_done(struct wm_softc *sc)
3270 {
3271 	int mask;
3272 	uint32_t reg;
3273 	int i;
3274 
3275 	/* wait for eeprom to reload */
3276 	switch (sc->sc_type) {
3277 	case WM_T_82542_2_0:
3278 	case WM_T_82542_2_1:
3279 		/* null */
3280 		break;
3281 	case WM_T_82543:
3282 	case WM_T_82544:
3283 	case WM_T_82540:
3284 	case WM_T_82545:
3285 	case WM_T_82545_3:
3286 	case WM_T_82546:
3287 	case WM_T_82546_3:
3288 	case WM_T_82541:
3289 	case WM_T_82541_2:
3290 	case WM_T_82547:
3291 	case WM_T_82547_2:
3292 	case WM_T_82573:
3293 	case WM_T_82574:
3294 	case WM_T_82583:
3295 		/* generic */
3296 		delay(10*1000);
3297 		break;
3298 	case WM_T_80003:
3299 	case WM_T_82571:
3300 	case WM_T_82572:
3301 	case WM_T_82575:
3302 	case WM_T_82576:
3303 	case WM_T_82580:
3304 	case WM_T_I350:
3305 	case WM_T_I354:
3306 	case WM_T_I210:
3307 	case WM_T_I211:
3308 		if (sc->sc_type == WM_T_82571) {
3309 			/* Only 82571 shares port 0 */
3310 			mask = EEMNGCTL_CFGDONE_0;
3311 		} else
3312 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
3313 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
3314 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
3315 				break;
3316 			delay(1000);
3317 		}
3318 		if (i >= WM_PHY_CFG_TIMEOUT) {
3319 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
3320 				device_xname(sc->sc_dev), __func__));
3321 		}
3322 		break;
3323 	case WM_T_ICH8:
3324 	case WM_T_ICH9:
3325 	case WM_T_ICH10:
3326 	case WM_T_PCH:
3327 	case WM_T_PCH2:
3328 	case WM_T_PCH_LPT:
3329 		delay(10*1000);
3330 		if (sc->sc_type >= WM_T_ICH10)
3331 			wm_lan_init_done(sc);
3332 		else
3333 			wm_get_auto_rd_done(sc);
3334 
3335 		reg = CSR_READ(sc, WMREG_STATUS);
3336 		if ((reg & STATUS_PHYRA) != 0)
3337 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
3338 		break;
3339 	default:
3340 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3341 		    __func__);
3342 		break;
3343 	}
3344 }
3345 
3346 /* Init hardware bits */
3347 void
3348 wm_initialize_hardware_bits(struct wm_softc *sc)
3349 {
3350 	uint32_t tarc0, tarc1, reg;
3351 
3352 	/* For 82571 variant, 80003 and ICHs */
3353 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
3354 	    || (sc->sc_type >= WM_T_80003)) {
3355 
3356 		/* Transmit Descriptor Control 0 */
3357 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
3358 		reg |= TXDCTL_COUNT_DESC;
3359 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
3360 
3361 		/* Transmit Descriptor Control 1 */
3362 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
3363 		reg |= TXDCTL_COUNT_DESC;
3364 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
3365 
3366 		/* TARC0 */
3367 		tarc0 = CSR_READ(sc, WMREG_TARC0);
3368 		switch (sc->sc_type) {
3369 		case WM_T_82571:
3370 		case WM_T_82572:
3371 		case WM_T_82573:
3372 		case WM_T_82574:
3373 		case WM_T_82583:
3374 		case WM_T_80003:
3375 			/* Clear bits 30..27 */
3376 			tarc0 &= ~__BITS(30, 27);
3377 			break;
3378 		default:
3379 			break;
3380 		}
3381 
3382 		switch (sc->sc_type) {
3383 		case WM_T_82571:
3384 		case WM_T_82572:
3385 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
3386 
3387 			tarc1 = CSR_READ(sc, WMREG_TARC1);
3388 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
3389 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
3390 			/* 8257[12] Errata No.7 */
3391 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
3392 
3393 			/* TARC1 bit 28 */
3394 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3395 				tarc1 &= ~__BIT(28);
3396 			else
3397 				tarc1 |= __BIT(28);
3398 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
3399 
3400 			/*
3401 			 * 8257[12] Errata No.13
3402 			 * Disable Dyamic Clock Gating.
3403 			 */
3404 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
3405 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
3406 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3407 			break;
3408 		case WM_T_82573:
3409 		case WM_T_82574:
3410 		case WM_T_82583:
3411 			if ((sc->sc_type == WM_T_82574)
3412 			    || (sc->sc_type == WM_T_82583))
3413 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
3414 
3415 			/* Extended Device Control */
3416 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
3417 			reg &= ~__BIT(23);	/* Clear bit 23 */
3418 			reg |= __BIT(22);	/* Set bit 22 */
3419 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3420 
3421 			/* Device Control */
3422 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
3423 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3424 
3425 			/* PCIe Control Register */
3426 			/*
3427 			 * 82573 Errata (unknown).
3428 			 *
3429 			 * 82574 Errata 25 and 82583 Errata 12
3430 			 * "Dropped Rx Packets":
3431 			 *   NVM Image Version 2.1.4 and newer has no this bug.
3432 			 */
3433 			reg = CSR_READ(sc, WMREG_GCR);
3434 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
3435 			CSR_WRITE(sc, WMREG_GCR, reg);
3436 
3437 			if ((sc->sc_type == WM_T_82574)
3438 			    || (sc->sc_type == WM_T_82583)) {
3439 				/*
3440 				 * Document says this bit must be set for
3441 				 * proper operation.
3442 				 */
3443 				reg = CSR_READ(sc, WMREG_GCR);
3444 				reg |= __BIT(22);
3445 				CSR_WRITE(sc, WMREG_GCR, reg);
3446 
3447 				/*
3448 				 * Apply workaround for hardware errata
3449 				 * documented in errata docs Fixes issue where
3450 				 * some error prone or unreliable PCIe
3451 				 * completions are occurring, particularly
3452 				 * with ASPM enabled. Without fix, issue can
3453 				 * cause Tx timeouts.
3454 				 */
3455 				reg = CSR_READ(sc, WMREG_GCR2);
3456 				reg |= __BIT(0);
3457 				CSR_WRITE(sc, WMREG_GCR2, reg);
3458 			}
3459 			break;
3460 		case WM_T_80003:
3461 			/* TARC0 */
3462 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
3463 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
3464 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
3465 
3466 			/* TARC1 bit 28 */
3467 			tarc1 = CSR_READ(sc, WMREG_TARC1);
3468 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3469 				tarc1 &= ~__BIT(28);
3470 			else
3471 				tarc1 |= __BIT(28);
3472 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
3473 			break;
3474 		case WM_T_ICH8:
3475 		case WM_T_ICH9:
3476 		case WM_T_ICH10:
3477 		case WM_T_PCH:
3478 		case WM_T_PCH2:
3479 		case WM_T_PCH_LPT:
3480 			/* TARC 0 */
3481 			if (sc->sc_type == WM_T_ICH8) {
3482 				/* Set TARC0 bits 29 and 28 */
3483 				tarc0 |= __BITS(29, 28);
3484 			}
3485 			/* Set TARC0 bits 23,24,26,27 */
3486 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
3487 
3488 			/* CTRL_EXT */
3489 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
3490 			reg |= __BIT(22);	/* Set bit 22 */
3491 			/*
3492 			 * Enable PHY low-power state when MAC is at D3
3493 			 * w/o WoL
3494 			 */
3495 			if (sc->sc_type >= WM_T_PCH)
3496 				reg |= CTRL_EXT_PHYPDEN;
3497 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3498 
3499 			/* TARC1 */
3500 			tarc1 = CSR_READ(sc, WMREG_TARC1);
3501 			/* bit 28 */
3502 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3503 				tarc1 &= ~__BIT(28);
3504 			else
3505 				tarc1 |= __BIT(28);
3506 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
3507 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
3508 
3509 			/* Device Status */
3510 			if (sc->sc_type == WM_T_ICH8) {
3511 				reg = CSR_READ(sc, WMREG_STATUS);
3512 				reg &= ~__BIT(31);
3513 				CSR_WRITE(sc, WMREG_STATUS, reg);
3514 
3515 			}
3516 
3517 			/*
3518 			 * Work-around descriptor data corruption issue during
3519 			 * NFS v2 UDP traffic, just disable the NFS filtering
3520 			 * capability.
3521 			 */
3522 			reg = CSR_READ(sc, WMREG_RFCTL);
3523 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
3524 			CSR_WRITE(sc, WMREG_RFCTL, reg);
3525 			break;
3526 		default:
3527 			break;
3528 		}
3529 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
3530 
3531 		/*
3532 		 * 8257[12] Errata No.52 and some others.
3533 		 * Avoid RSS Hash Value bug.
3534 		 */
3535 		switch (sc->sc_type) {
3536 		case WM_T_82571:
3537 		case WM_T_82572:
3538 		case WM_T_82573:
3539 		case WM_T_80003:
3540 		case WM_T_ICH8:
3541 			reg = CSR_READ(sc, WMREG_RFCTL);
3542 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
3543 			CSR_WRITE(sc, WMREG_RFCTL, reg);
3544 			break;
3545 		default:
3546 			break;
3547 		}
3548 	}
3549 }
3550 
3551 static uint32_t
3552 wm_rxpbs_adjust_82580(uint32_t val)
3553 {
3554 	uint32_t rv = 0;
3555 
3556 	if (val < __arraycount(wm_82580_rxpbs_table))
3557 		rv = wm_82580_rxpbs_table[val];
3558 
3559 	return rv;
3560 }
3561 
3562 /*
3563  * wm_reset:
3564  *
3565  *	Reset the i82542 chip.
3566  */
3567 static void
3568 wm_reset(struct wm_softc *sc)
3569 {
3570 	int phy_reset = 0;
3571 	int i, error = 0;
3572 	uint32_t reg, mask;
3573 
3574 	/*
3575 	 * Allocate on-chip memory according to the MTU size.
3576 	 * The Packet Buffer Allocation register must be written
3577 	 * before the chip is reset.
3578 	 */
3579 	switch (sc->sc_type) {
3580 	case WM_T_82547:
3581 	case WM_T_82547_2:
3582 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3583 		    PBA_22K : PBA_30K;
3584 		for (i = 0; i < sc->sc_ntxqueues; i++) {
3585 			struct wm_txqueue *txq = &sc->sc_txq[i];
3586 			txq->txq_fifo_head = 0;
3587 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
3588 			txq->txq_fifo_size =
3589 				(PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
3590 			txq->txq_fifo_stall = 0;
3591 		}
3592 		break;
3593 	case WM_T_82571:
3594 	case WM_T_82572:
3595 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
3596 	case WM_T_80003:
3597 		sc->sc_pba = PBA_32K;
3598 		break;
3599 	case WM_T_82573:
3600 		sc->sc_pba = PBA_12K;
3601 		break;
3602 	case WM_T_82574:
3603 	case WM_T_82583:
3604 		sc->sc_pba = PBA_20K;
3605 		break;
3606 	case WM_T_82576:
3607 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
3608 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
3609 		break;
3610 	case WM_T_82580:
3611 	case WM_T_I350:
3612 	case WM_T_I354:
3613 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
3614 		break;
3615 	case WM_T_I210:
3616 	case WM_T_I211:
3617 		sc->sc_pba = PBA_34K;
3618 		break;
3619 	case WM_T_ICH8:
3620 		/* Workaround for a bit corruption issue in FIFO memory */
3621 		sc->sc_pba = PBA_8K;
3622 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
3623 		break;
3624 	case WM_T_ICH9:
3625 	case WM_T_ICH10:
3626 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
3627 		    PBA_14K : PBA_10K;
3628 		break;
3629 	case WM_T_PCH:
3630 	case WM_T_PCH2:
3631 	case WM_T_PCH_LPT:
3632 		sc->sc_pba = PBA_26K;
3633 		break;
3634 	default:
3635 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3636 		    PBA_40K : PBA_48K;
3637 		break;
3638 	}
3639 	/*
3640 	 * Only old or non-multiqueue devices have the PBA register
3641 	 * XXX Need special handling for 82575.
3642 	 */
3643 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
3644 	    || (sc->sc_type == WM_T_82575))
3645 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
3646 
3647 	/* Prevent the PCI-E bus from sticking */
3648 	if (sc->sc_flags & WM_F_PCIE) {
3649 		int timeout = 800;
3650 
3651 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
3652 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3653 
3654 		while (timeout--) {
3655 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
3656 			    == 0)
3657 				break;
3658 			delay(100);
3659 		}
3660 	}
3661 
3662 	/* Set the completion timeout for interface */
3663 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
3664 	    || (sc->sc_type == WM_T_82580)
3665 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
3666 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
3667 		wm_set_pcie_completion_timeout(sc);
3668 
3669 	/* Clear interrupt */
3670 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3671 	if (sc->sc_nintrs > 1) {
3672 		if (sc->sc_type != WM_T_82574) {
3673 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
3674 			CSR_WRITE(sc, WMREG_EIAC, 0);
3675 		} else {
3676 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
3677 		}
3678 	}
3679 
3680 	/* Stop the transmit and receive processes. */
3681 	CSR_WRITE(sc, WMREG_RCTL, 0);
3682 	sc->sc_rctl &= ~RCTL_EN;
3683 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
3684 	CSR_WRITE_FLUSH(sc);
3685 
3686 	/* XXX set_tbi_sbp_82543() */
3687 
3688 	delay(10*1000);
3689 
3690 	/* Must acquire the MDIO ownership before MAC reset */
3691 	switch (sc->sc_type) {
3692 	case WM_T_82573:
3693 	case WM_T_82574:
3694 	case WM_T_82583:
3695 		error = wm_get_hw_semaphore_82573(sc);
3696 		break;
3697 	default:
3698 		break;
3699 	}
3700 
3701 	/*
3702 	 * 82541 Errata 29? & 82547 Errata 28?
3703 	 * See also the description about PHY_RST bit in CTRL register
3704 	 * in 8254x_GBe_SDM.pdf.
3705 	 */
3706 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
3707 		CSR_WRITE(sc, WMREG_CTRL,
3708 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
3709 		CSR_WRITE_FLUSH(sc);
3710 		delay(5000);
3711 	}
3712 
3713 	switch (sc->sc_type) {
3714 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
3715 	case WM_T_82541:
3716 	case WM_T_82541_2:
3717 	case WM_T_82547:
3718 	case WM_T_82547_2:
3719 		/*
3720 		 * On some chipsets, a reset through a memory-mapped write
3721 		 * cycle can cause the chip to reset before completing the
3722 		 * write cycle.  This causes major headache that can be
3723 		 * avoided by issuing the reset via indirect register writes
3724 		 * through I/O space.
3725 		 *
3726 		 * So, if we successfully mapped the I/O BAR at attach time,
3727 		 * use that.  Otherwise, try our luck with a memory-mapped
3728 		 * reset.
3729 		 */
3730 		if (sc->sc_flags & WM_F_IOH_VALID)
3731 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
3732 		else
3733 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
3734 		break;
3735 	case WM_T_82545_3:
3736 	case WM_T_82546_3:
3737 		/* Use the shadow control register on these chips. */
3738 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
3739 		break;
3740 	case WM_T_80003:
3741 		mask = swfwphysem[sc->sc_funcid];
3742 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3743 		wm_get_swfw_semaphore(sc, mask);
3744 		CSR_WRITE(sc, WMREG_CTRL, reg);
3745 		wm_put_swfw_semaphore(sc, mask);
3746 		break;
3747 	case WM_T_ICH8:
3748 	case WM_T_ICH9:
3749 	case WM_T_ICH10:
3750 	case WM_T_PCH:
3751 	case WM_T_PCH2:
3752 	case WM_T_PCH_LPT:
3753 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3754 		if (wm_phy_resetisblocked(sc) == false) {
3755 			/*
3756 			 * Gate automatic PHY configuration by hardware on
3757 			 * non-managed 82579
3758 			 */
3759 			if ((sc->sc_type == WM_T_PCH2)
3760 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
3761 				== 0))
3762 				wm_gate_hw_phy_config_ich8lan(sc, 1);
3763 
3764 			reg |= CTRL_PHY_RESET;
3765 			phy_reset = 1;
3766 		}
3767 		wm_get_swfwhw_semaphore(sc);
3768 		CSR_WRITE(sc, WMREG_CTRL, reg);
3769 		/* Don't insert a completion barrier when reset */
3770 		delay(20*1000);
3771 		wm_put_swfwhw_semaphore(sc);
3772 		break;
3773 	case WM_T_82580:
3774 	case WM_T_I350:
3775 	case WM_T_I354:
3776 	case WM_T_I210:
3777 	case WM_T_I211:
3778 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3779 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
3780 			CSR_WRITE_FLUSH(sc);
3781 		delay(5000);
3782 		break;
3783 	case WM_T_82542_2_0:
3784 	case WM_T_82542_2_1:
3785 	case WM_T_82543:
3786 	case WM_T_82540:
3787 	case WM_T_82545:
3788 	case WM_T_82546:
3789 	case WM_T_82571:
3790 	case WM_T_82572:
3791 	case WM_T_82573:
3792 	case WM_T_82574:
3793 	case WM_T_82575:
3794 	case WM_T_82576:
3795 	case WM_T_82583:
3796 	default:
3797 		/* Everything else can safely use the documented method. */
3798 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3799 		break;
3800 	}
3801 
3802 	/* Must release the MDIO ownership after MAC reset */
3803 	switch (sc->sc_type) {
3804 	case WM_T_82573:
3805 	case WM_T_82574:
3806 	case WM_T_82583:
3807 		if (error == 0)
3808 			wm_put_hw_semaphore_82573(sc);
3809 		break;
3810 	default:
3811 		break;
3812 	}
3813 
3814 	if (phy_reset != 0)
3815 		wm_get_cfg_done(sc);
3816 
3817 	/* reload EEPROM */
3818 	switch (sc->sc_type) {
3819 	case WM_T_82542_2_0:
3820 	case WM_T_82542_2_1:
3821 	case WM_T_82543:
3822 	case WM_T_82544:
3823 		delay(10);
3824 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3825 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3826 		CSR_WRITE_FLUSH(sc);
3827 		delay(2000);
3828 		break;
3829 	case WM_T_82540:
3830 	case WM_T_82545:
3831 	case WM_T_82545_3:
3832 	case WM_T_82546:
3833 	case WM_T_82546_3:
3834 		delay(5*1000);
3835 		/* XXX Disable HW ARPs on ASF enabled adapters */
3836 		break;
3837 	case WM_T_82541:
3838 	case WM_T_82541_2:
3839 	case WM_T_82547:
3840 	case WM_T_82547_2:
3841 		delay(20000);
3842 		/* XXX Disable HW ARPs on ASF enabled adapters */
3843 		break;
3844 	case WM_T_82571:
3845 	case WM_T_82572:
3846 	case WM_T_82573:
3847 	case WM_T_82574:
3848 	case WM_T_82583:
3849 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
3850 			delay(10);
3851 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3852 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3853 			CSR_WRITE_FLUSH(sc);
3854 		}
3855 		/* check EECD_EE_AUTORD */
3856 		wm_get_auto_rd_done(sc);
3857 		/*
3858 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
3859 		 * is set.
3860 		 */
3861 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
3862 		    || (sc->sc_type == WM_T_82583))
3863 			delay(25*1000);
3864 		break;
3865 	case WM_T_82575:
3866 	case WM_T_82576:
3867 	case WM_T_82580:
3868 	case WM_T_I350:
3869 	case WM_T_I354:
3870 	case WM_T_I210:
3871 	case WM_T_I211:
3872 	case WM_T_80003:
3873 		/* check EECD_EE_AUTORD */
3874 		wm_get_auto_rd_done(sc);
3875 		break;
3876 	case WM_T_ICH8:
3877 	case WM_T_ICH9:
3878 	case WM_T_ICH10:
3879 	case WM_T_PCH:
3880 	case WM_T_PCH2:
3881 	case WM_T_PCH_LPT:
3882 		break;
3883 	default:
3884 		panic("%s: unknown type\n", __func__);
3885 	}
3886 
3887 	/* Check whether EEPROM is present or not */
3888 	switch (sc->sc_type) {
3889 	case WM_T_82575:
3890 	case WM_T_82576:
3891 	case WM_T_82580:
3892 	case WM_T_I350:
3893 	case WM_T_I354:
3894 	case WM_T_ICH8:
3895 	case WM_T_ICH9:
3896 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
3897 			/* Not found */
3898 			sc->sc_flags |= WM_F_EEPROM_INVALID;
3899 			if (sc->sc_type == WM_T_82575)
3900 				wm_reset_init_script_82575(sc);
3901 		}
3902 		break;
3903 	default:
3904 		break;
3905 	}
3906 
3907 	if ((sc->sc_type == WM_T_82580)
3908 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
3909 		/* clear global device reset status bit */
3910 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
3911 	}
3912 
3913 	/* Clear any pending interrupt events. */
3914 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3915 	reg = CSR_READ(sc, WMREG_ICR);
3916 	if (sc->sc_nintrs > 1) {
3917 		if (sc->sc_type != WM_T_82574) {
3918 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
3919 			CSR_WRITE(sc, WMREG_EIAC, 0);
3920 		} else
3921 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
3922 	}
3923 
3924 	/* reload sc_ctrl */
3925 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3926 
3927 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
3928 		wm_set_eee_i350(sc);
3929 
3930 	/* dummy read from WUC */
3931 	if (sc->sc_type == WM_T_PCH)
3932 		reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
3933 	/*
3934 	 * For PCH, this write will make sure that any noise will be detected
3935 	 * as a CRC error and be dropped rather than show up as a bad packet
3936 	 * to the DMA engine
3937 	 */
3938 	if (sc->sc_type == WM_T_PCH)
3939 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
3940 
3941 	if (sc->sc_type >= WM_T_82544)
3942 		CSR_WRITE(sc, WMREG_WUC, 0);
3943 
3944 	wm_reset_mdicnfg_82580(sc);
3945 
3946 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
3947 		wm_pll_workaround_i210(sc);
3948 }
3949 
3950 /*
3951  * wm_add_rxbuf:
3952  *
3953  *	Add a receive buffer to the indiciated descriptor.
3954  */
3955 static int
3956 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
3957 {
3958 	struct wm_softc *sc = rxq->rxq_sc;
3959 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
3960 	struct mbuf *m;
3961 	int error;
3962 
3963 	KASSERT(WM_RX_LOCKED(rxq));
3964 
3965 	MGETHDR(m, M_DONTWAIT, MT_DATA);
3966 	if (m == NULL)
3967 		return ENOBUFS;
3968 
3969 	MCLGET(m, M_DONTWAIT);
3970 	if ((m->m_flags & M_EXT) == 0) {
3971 		m_freem(m);
3972 		return ENOBUFS;
3973 	}
3974 
3975 	if (rxs->rxs_mbuf != NULL)
3976 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
3977 
3978 	rxs->rxs_mbuf = m;
3979 
3980 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3981 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
3982 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
3983 	if (error) {
3984 		/* XXX XXX XXX */
3985 		aprint_error_dev(sc->sc_dev,
3986 		    "unable to load rx DMA map %d, error = %d\n",
3987 		    idx, error);
3988 		panic("wm_add_rxbuf");
3989 	}
3990 
3991 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3992 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
3993 
3994 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3995 		if ((sc->sc_rctl & RCTL_EN) != 0)
3996 			wm_init_rxdesc(rxq, idx);
3997 	} else
3998 		wm_init_rxdesc(rxq, idx);
3999 
4000 	return 0;
4001 }
4002 
4003 /*
4004  * wm_rxdrain:
4005  *
4006  *	Drain the receive queue.
4007  */
4008 static void
4009 wm_rxdrain(struct wm_rxqueue *rxq)
4010 {
4011 	struct wm_softc *sc = rxq->rxq_sc;
4012 	struct wm_rxsoft *rxs;
4013 	int i;
4014 
4015 	KASSERT(WM_RX_LOCKED(rxq));
4016 
4017 	for (i = 0; i < WM_NRXDESC; i++) {
4018 		rxs = &rxq->rxq_soft[i];
4019 		if (rxs->rxs_mbuf != NULL) {
4020 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4021 			m_freem(rxs->rxs_mbuf);
4022 			rxs->rxs_mbuf = NULL;
4023 		}
4024 	}
4025 }
4026 
4027 
4028 /*
4029  * XXX copy from FreeBSD's sys/net/rss_config.c
4030  */
4031 /*
4032  * RSS secret key, intended to prevent attacks on load-balancing.  Its
4033  * effectiveness may be limited by algorithm choice and available entropy
4034  * during the boot.
4035  *
4036  * XXXRW: And that we don't randomize it yet!
4037  *
4038  * This is the default Microsoft RSS specification key which is also
4039  * the Chelsio T5 firmware default key.
4040  */
4041 #define RSS_KEYSIZE 40
4042 static uint8_t wm_rss_key[RSS_KEYSIZE] = {
4043 	0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
4044 	0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
4045 	0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
4046 	0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
4047 	0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
4048 };
4049 
4050 /*
4051  * Caller must pass an array of size sizeof(rss_key).
4052  *
4053  * XXX
4054  * As if_ixgbe may use this function, this function should not be
4055  * if_wm specific function.
4056  */
4057 static void
4058 wm_rss_getkey(uint8_t *key)
4059 {
4060 
4061 	memcpy(key, wm_rss_key, sizeof(wm_rss_key));
4062 }
4063 
4064 /*
4065  * Setup registers for RSS.
4066  *
4067  * XXX not yet VMDq support
4068  */
4069 static void
4070 wm_init_rss(struct wm_softc *sc)
4071 {
4072 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
4073 	int i;
4074 
4075 	CTASSERT(sizeof(rss_key) == sizeof(wm_rss_key));
4076 
4077 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
4078 		int qid, reta_ent;
4079 
4080 		qid  = i % sc->sc_nrxqueues;
4081 		switch(sc->sc_type) {
4082 		case WM_T_82574:
4083 			reta_ent = __SHIFTIN(qid,
4084 			    RETA_ENT_QINDEX_MASK_82574);
4085 			break;
4086 		case WM_T_82575:
4087 			reta_ent = __SHIFTIN(qid,
4088 			    RETA_ENT_QINDEX1_MASK_82575);
4089 			break;
4090 		default:
4091 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
4092 			break;
4093 		}
4094 
4095 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
4096 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
4097 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
4098 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
4099 	}
4100 
4101 	wm_rss_getkey((uint8_t *)rss_key);
4102 	for (i = 0; i < RSSRK_NUM_REGS; i++)
4103 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
4104 
4105 	if (sc->sc_type == WM_T_82574)
4106 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
4107 	else
4108 		mrqc = MRQC_ENABLE_RSS_MQ;
4109 
4110 	/* XXXX
4111 	 * The same as FreeBSD igb.
4112 	 * Why doesn't use MRQC_RSS_FIELD_IPV6_EX?
4113 	 */
4114 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
4115 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
4116 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
4117 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
4118 
4119 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
4120 }
4121 
4122 /*
4123  * Adjust TX and RX queue numbers which the system actulally uses.
4124  *
4125  * The numbers are affected by below parameters.
4126  *     - The nubmer of hardware queues
4127  *     - The number of MSI-X vectors (= "nvectors" argument)
4128  *     - ncpu
4129  */
4130 static void
4131 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
4132 {
4133 	int hw_ntxqueues, hw_nrxqueues;
4134 
4135 	if (nvectors < 3) {
4136 		sc->sc_ntxqueues = 1;
4137 		sc->sc_nrxqueues = 1;
4138 		return;
4139 	}
4140 
4141 	switch(sc->sc_type) {
4142 	case WM_T_82572:
4143 		hw_ntxqueues = 2;
4144 		hw_nrxqueues = 2;
4145 		break;
4146 	case WM_T_82574:
4147 		hw_ntxqueues = 2;
4148 		hw_nrxqueues = 2;
4149 		break;
4150 	case WM_T_82575:
4151 		hw_ntxqueues = 4;
4152 		hw_nrxqueues = 4;
4153 		break;
4154 	case WM_T_82576:
4155 		hw_ntxqueues = 16;
4156 		hw_nrxqueues = 16;
4157 		break;
4158 	case WM_T_82580:
4159 	case WM_T_I350:
4160 	case WM_T_I354:
4161 		hw_ntxqueues = 8;
4162 		hw_nrxqueues = 8;
4163 		break;
4164 	case WM_T_I210:
4165 		hw_ntxqueues = 4;
4166 		hw_nrxqueues = 4;
4167 		break;
4168 	case WM_T_I211:
4169 		hw_ntxqueues = 2;
4170 		hw_nrxqueues = 2;
4171 		break;
4172 		/*
4173 		 * As below ethernet controllers does not support MSI-X,
4174 		 * this driver let them not use multiqueue.
4175 		 *     - WM_T_80003
4176 		 *     - WM_T_ICH8
4177 		 *     - WM_T_ICH9
4178 		 *     - WM_T_ICH10
4179 		 *     - WM_T_PCH
4180 		 *     - WM_T_PCH2
4181 		 *     - WM_T_PCH_LPT
4182 		 */
4183 	default:
4184 		hw_ntxqueues = 1;
4185 		hw_nrxqueues = 1;
4186 		break;
4187 	}
4188 
4189 	/*
4190 	 * As queues more then MSI-X vectors cannot improve scaling, we limit
4191 	 * the number of queues used actually.
4192 	 *
4193 	 * XXX
4194 	 * Currently, we separate TX queue interrupts and RX queue interrupts.
4195 	 * Howerver, the number of MSI-X vectors of recent controllers (such as
4196 	 * I354) expects that drivers bundle a TX queue interrupt and a RX
4197 	 * interrupt to one interrupt. e.g. FreeBSD's igb deals interrupts in
4198 	 * such a way.
4199 	 */
4200 	if (nvectors < hw_ntxqueues + hw_nrxqueues + 1) {
4201 		sc->sc_ntxqueues = (nvectors - 1) / 2;
4202 		sc->sc_nrxqueues = (nvectors - 1) / 2;
4203 	} else {
4204 		sc->sc_ntxqueues = hw_ntxqueues;
4205 		sc->sc_nrxqueues = hw_nrxqueues;
4206 	}
4207 
4208 	/*
4209 	 * As queues more then cpus cannot improve scaling, we limit
4210 	 * the number of queues used actually.
4211 	 */
4212 	if (ncpu < sc->sc_ntxqueues)
4213 		sc->sc_ntxqueues = ncpu;
4214 	if (ncpu < sc->sc_nrxqueues)
4215 		sc->sc_nrxqueues = ncpu;
4216 
4217 	/* XXX Currently, this driver supports RX multiqueue only. */
4218 	sc->sc_ntxqueues = 1;
4219 }
4220 
4221 /*
4222  * Both single interrupt MSI and INTx can use this function.
4223  */
4224 static int
4225 wm_setup_legacy(struct wm_softc *sc)
4226 {
4227 	pci_chipset_tag_t pc = sc->sc_pc;
4228 	const char *intrstr = NULL;
4229 	char intrbuf[PCI_INTRSTR_LEN];
4230 	int error;
4231 
4232 	error = wm_alloc_txrx_queues(sc);
4233 	if (error) {
4234 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
4235 		    error);
4236 		return ENOMEM;
4237 	}
4238 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
4239 	    sizeof(intrbuf));
4240 #ifdef WM_MPSAFE
4241 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
4242 #endif
4243 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
4244 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
4245 	if (sc->sc_ihs[0] == NULL) {
4246 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
4247 		    (pci_intr_type(sc->sc_intrs[0])
4248 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
4249 		return ENOMEM;
4250 	}
4251 
4252 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
4253 	sc->sc_nintrs = 1;
4254 	return 0;
4255 }
4256 
4257 static int
4258 wm_setup_msix(struct wm_softc *sc)
4259 {
4260 	void *vih;
4261 	kcpuset_t *affinity;
4262 	int qidx, error, intr_idx, tx_established, rx_established;
4263 	pci_chipset_tag_t pc = sc->sc_pc;
4264 	const char *intrstr = NULL;
4265 	char intrbuf[PCI_INTRSTR_LEN];
4266 	char intr_xname[INTRDEVNAMEBUF];
4267 	/*
4268 	 * To avoid other devices' interrupts, the affinity of Tx/Rx interrupts
4269 	 * start from CPU#1.
4270 	 */
4271 	int affinity_offset = 1;
4272 
4273 	error = wm_alloc_txrx_queues(sc);
4274 	if (error) {
4275 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
4276 		    error);
4277 		return ENOMEM;
4278 	}
4279 
4280 	kcpuset_create(&affinity, false);
4281 	intr_idx = 0;
4282 
4283 	/*
4284 	 * TX
4285 	 */
4286 	tx_established = 0;
4287 	for (qidx = 0; qidx < sc->sc_ntxqueues; qidx++) {
4288 		struct wm_txqueue *txq = &sc->sc_txq[qidx];
4289 		int affinity_to = (affinity_offset + intr_idx) % ncpu;
4290 
4291 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
4292 		    sizeof(intrbuf));
4293 #ifdef WM_MPSAFE
4294 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
4295 		    PCI_INTR_MPSAFE, true);
4296 #endif
4297 		memset(intr_xname, 0, sizeof(intr_xname));
4298 		snprintf(intr_xname, sizeof(intr_xname), "%sTX%d",
4299 		    device_xname(sc->sc_dev), qidx);
4300 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
4301 		    IPL_NET, wm_txintr_msix, txq, intr_xname);
4302 		if (vih == NULL) {
4303 			aprint_error_dev(sc->sc_dev,
4304 			    "unable to establish MSI-X(for TX)%s%s\n",
4305 			    intrstr ? " at " : "",
4306 			    intrstr ? intrstr : "");
4307 
4308 			goto fail_0;
4309 		}
4310 		kcpuset_zero(affinity);
4311 		/* Round-robin affinity */
4312 		kcpuset_set(affinity, affinity_to);
4313 		error = interrupt_distribute(vih, affinity, NULL);
4314 		if (error == 0) {
4315 			aprint_normal_dev(sc->sc_dev,
4316 			    "for TX interrupting at %s affinity to %u\n",
4317 			    intrstr, affinity_to);
4318 		} else {
4319 			aprint_normal_dev(sc->sc_dev,
4320 			    "for TX interrupting at %s\n", intrstr);
4321 		}
4322 		sc->sc_ihs[intr_idx] = vih;
4323 		txq->txq_id = qidx;
4324 		txq->txq_intr_idx = intr_idx;
4325 
4326 		tx_established++;
4327 		intr_idx++;
4328 	}
4329 
4330 	/*
4331 	 * RX
4332 	 */
4333 	rx_established = 0;
4334 	for (qidx = 0; qidx < sc->sc_nrxqueues; qidx++) {
4335 		struct wm_rxqueue *rxq = &sc->sc_rxq[qidx];
4336 		int affinity_to = (affinity_offset + intr_idx) % ncpu;
4337 
4338 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
4339 		    sizeof(intrbuf));
4340 #ifdef WM_MPSAFE
4341 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
4342 		    PCI_INTR_MPSAFE, true);
4343 #endif
4344 		memset(intr_xname, 0, sizeof(intr_xname));
4345 		snprintf(intr_xname, sizeof(intr_xname), "%sRX%d",
4346 		    device_xname(sc->sc_dev), qidx);
4347 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
4348 		    IPL_NET, wm_rxintr_msix, rxq, intr_xname);
4349 		if (vih == NULL) {
4350 			aprint_error_dev(sc->sc_dev,
4351 			    "unable to establish MSI-X(for RX)%s%s\n",
4352 			    intrstr ? " at " : "",
4353 			    intrstr ? intrstr : "");
4354 
4355 			goto fail_1;
4356 		}
4357 		kcpuset_zero(affinity);
4358 		/* Round-robin affinity */
4359 		kcpuset_set(affinity, affinity_to);
4360 		error = interrupt_distribute(vih, affinity, NULL);
4361 		if (error == 0) {
4362 			aprint_normal_dev(sc->sc_dev,
4363 			    "for RX interrupting at %s affinity to %u\n",
4364 			    intrstr, affinity_to);
4365 		} else {
4366 			aprint_normal_dev(sc->sc_dev,
4367 			    "for RX interrupting at %s\n", intrstr);
4368 		}
4369 		sc->sc_ihs[intr_idx] = vih;
4370 		rxq->rxq_id = qidx;
4371 		rxq->rxq_intr_idx = intr_idx;
4372 
4373 		rx_established++;
4374 		intr_idx++;
4375 	}
4376 
4377 	/*
4378 	 * LINK
4379 	 */
4380 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
4381 	    sizeof(intrbuf));
4382 #ifdef WM_MPSAFE
4383 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
4384 #endif
4385 	memset(intr_xname, 0, sizeof(intr_xname));
4386 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
4387 	    device_xname(sc->sc_dev));
4388 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
4389 		    IPL_NET, wm_linkintr_msix, sc, intr_xname);
4390 	if (vih == NULL) {
4391 		aprint_error_dev(sc->sc_dev,
4392 		    "unable to establish MSI-X(for LINK)%s%s\n",
4393 		    intrstr ? " at " : "",
4394 		    intrstr ? intrstr : "");
4395 
4396 		goto fail_1;
4397 	}
4398 	/* keep default affinity to LINK interrupt */
4399 	aprint_normal_dev(sc->sc_dev,
4400 	    "for LINK interrupting at %s\n", intrstr);
4401 	sc->sc_ihs[intr_idx] = vih;
4402 	sc->sc_link_intr_idx = intr_idx;
4403 
4404 	sc->sc_nintrs = sc->sc_ntxqueues + sc->sc_nrxqueues + 1;
4405 	kcpuset_destroy(affinity);
4406 	return 0;
4407 
4408  fail_1:
4409 	for (qidx = 0; qidx < rx_established; qidx++) {
4410 		struct wm_rxqueue *rxq = &sc->sc_rxq[qidx];
4411 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[rxq->rxq_intr_idx]);
4412 		sc->sc_ihs[rxq->rxq_intr_idx] = NULL;
4413 	}
4414  fail_0:
4415 	for (qidx = 0; qidx < tx_established; qidx++) {
4416 		struct wm_txqueue *txq = &sc->sc_txq[qidx];
4417 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[txq->txq_intr_idx]);
4418 		sc->sc_ihs[txq->txq_intr_idx] = NULL;
4419 	}
4420 
4421 	kcpuset_destroy(affinity);
4422 	return ENOMEM;
4423 }
4424 
4425 /*
4426  * wm_init:		[ifnet interface function]
4427  *
4428  *	Initialize the interface.
4429  */
4430 static int
4431 wm_init(struct ifnet *ifp)
4432 {
4433 	struct wm_softc *sc = ifp->if_softc;
4434 	int ret;
4435 
4436 	WM_CORE_LOCK(sc);
4437 	ret = wm_init_locked(ifp);
4438 	WM_CORE_UNLOCK(sc);
4439 
4440 	return ret;
4441 }
4442 
4443 static int
4444 wm_init_locked(struct ifnet *ifp)
4445 {
4446 	struct wm_softc *sc = ifp->if_softc;
4447 	int i, j, trynum, error = 0;
4448 	uint32_t reg;
4449 
4450 	KASSERT(WM_CORE_LOCKED(sc));
4451 	/*
4452 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
4453 	 * There is a small but measurable benefit to avoiding the adjusment
4454 	 * of the descriptor so that the headers are aligned, for normal mtu,
4455 	 * on such platforms.  One possibility is that the DMA itself is
4456 	 * slightly more efficient if the front of the entire packet (instead
4457 	 * of the front of the headers) is aligned.
4458 	 *
4459 	 * Note we must always set align_tweak to 0 if we are using
4460 	 * jumbo frames.
4461 	 */
4462 #ifdef __NO_STRICT_ALIGNMENT
4463 	sc->sc_align_tweak = 0;
4464 #else
4465 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
4466 		sc->sc_align_tweak = 0;
4467 	else
4468 		sc->sc_align_tweak = 2;
4469 #endif /* __NO_STRICT_ALIGNMENT */
4470 
4471 	/* Cancel any pending I/O. */
4472 	wm_stop_locked(ifp, 0);
4473 
4474 	/* update statistics before reset */
4475 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
4476 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
4477 
4478 	/* Reset the chip to a known state. */
4479 	wm_reset(sc);
4480 
4481 	switch (sc->sc_type) {
4482 	case WM_T_82571:
4483 	case WM_T_82572:
4484 	case WM_T_82573:
4485 	case WM_T_82574:
4486 	case WM_T_82583:
4487 	case WM_T_80003:
4488 	case WM_T_ICH8:
4489 	case WM_T_ICH9:
4490 	case WM_T_ICH10:
4491 	case WM_T_PCH:
4492 	case WM_T_PCH2:
4493 	case WM_T_PCH_LPT:
4494 		/* AMT based hardware can now take control from firmware */
4495 		if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
4496 			wm_get_hw_control(sc);
4497 		break;
4498 	default:
4499 		break;
4500 	}
4501 
4502 	/* Init hardware bits */
4503 	wm_initialize_hardware_bits(sc);
4504 
4505 	/* Reset the PHY. */
4506 	if (sc->sc_flags & WM_F_HAS_MII)
4507 		wm_gmii_reset(sc);
4508 
4509 	/* Calculate (E)ITR value */
4510 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4511 		sc->sc_itr = 450;	/* For EITR */
4512 	} else if (sc->sc_type >= WM_T_82543) {
4513 		/*
4514 		 * Set up the interrupt throttling register (units of 256ns)
4515 		 * Note that a footnote in Intel's documentation says this
4516 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
4517 		 * or 10Mbit mode.  Empirically, it appears to be the case
4518 		 * that that is also true for the 1024ns units of the other
4519 		 * interrupt-related timer registers -- so, really, we ought
4520 		 * to divide this value by 4 when the link speed is low.
4521 		 *
4522 		 * XXX implement this division at link speed change!
4523 		 */
4524 
4525 		/*
4526 		 * For N interrupts/sec, set this value to:
4527 		 * 1000000000 / (N * 256).  Note that we set the
4528 		 * absolute and packet timer values to this value
4529 		 * divided by 4 to get "simple timer" behavior.
4530 		 */
4531 
4532 		sc->sc_itr = 1500;		/* 2604 ints/sec */
4533 	}
4534 
4535 	error = wm_init_txrx_queues(sc);
4536 	if (error)
4537 		goto out;
4538 
4539 	/*
4540 	 * Clear out the VLAN table -- we don't use it (yet).
4541 	 */
4542 	CSR_WRITE(sc, WMREG_VET, 0);
4543 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
4544 		trynum = 10; /* Due to hw errata */
4545 	else
4546 		trynum = 1;
4547 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
4548 		for (j = 0; j < trynum; j++)
4549 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
4550 
4551 	/*
4552 	 * Set up flow-control parameters.
4553 	 *
4554 	 * XXX Values could probably stand some tuning.
4555 	 */
4556 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
4557 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
4558 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)) {
4559 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
4560 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
4561 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
4562 	}
4563 
4564 	sc->sc_fcrtl = FCRTL_DFLT;
4565 	if (sc->sc_type < WM_T_82543) {
4566 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
4567 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
4568 	} else {
4569 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
4570 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
4571 	}
4572 
4573 	if (sc->sc_type == WM_T_80003)
4574 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
4575 	else
4576 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
4577 
4578 	/* Writes the control register. */
4579 	wm_set_vlan(sc);
4580 
4581 	if (sc->sc_flags & WM_F_HAS_MII) {
4582 		int val;
4583 
4584 		switch (sc->sc_type) {
4585 		case WM_T_80003:
4586 		case WM_T_ICH8:
4587 		case WM_T_ICH9:
4588 		case WM_T_ICH10:
4589 		case WM_T_PCH:
4590 		case WM_T_PCH2:
4591 		case WM_T_PCH_LPT:
4592 			/*
4593 			 * Set the mac to wait the maximum time between each
4594 			 * iteration and increase the max iterations when
4595 			 * polling the phy; this fixes erroneous timeouts at
4596 			 * 10Mbps.
4597 			 */
4598 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
4599 			    0xFFFF);
4600 			val = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM);
4601 			val |= 0x3F;
4602 			wm_kmrn_writereg(sc,
4603 			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
4604 			break;
4605 		default:
4606 			break;
4607 		}
4608 
4609 		if (sc->sc_type == WM_T_80003) {
4610 			val = CSR_READ(sc, WMREG_CTRL_EXT);
4611 			val &= ~CTRL_EXT_LINK_MODE_MASK;
4612 			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
4613 
4614 			/* Bypass RX and TX FIFO's */
4615 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
4616 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
4617 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
4618 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
4619 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
4620 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
4621 		}
4622 	}
4623 #if 0
4624 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
4625 #endif
4626 
4627 	/* Set up checksum offload parameters. */
4628 	reg = CSR_READ(sc, WMREG_RXCSUM);
4629 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
4630 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
4631 		reg |= RXCSUM_IPOFL;
4632 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
4633 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
4634 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
4635 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
4636 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
4637 
4638 	/* Set up MSI-X */
4639 	if (sc->sc_nintrs > 1) {
4640 		uint32_t ivar;
4641 		struct wm_txqueue *txq;
4642 		struct wm_rxqueue *rxq;
4643 		int qid;
4644 
4645 		if (sc->sc_type == WM_T_82575) {
4646 			/* Interrupt control */
4647 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
4648 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
4649 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4650 
4651 			/* TX */
4652 			for (i = 0; i < sc->sc_ntxqueues; i++) {
4653 				txq = &sc->sc_txq[i];
4654 				CSR_WRITE(sc, WMREG_MSIXBM(txq->txq_intr_idx),
4655 				    EITR_TX_QUEUE(txq->txq_id));
4656 			}
4657 			/* RX */
4658 			for (i = 0; i < sc->sc_nrxqueues; i++) {
4659 				rxq = &sc->sc_rxq[i];
4660 				CSR_WRITE(sc, WMREG_MSIXBM(rxq->rxq_intr_idx),
4661 				    EITR_RX_QUEUE(rxq->rxq_id));
4662 			}
4663 			/* Link status */
4664 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
4665 			    EITR_OTHER);
4666 		} else if (sc->sc_type == WM_T_82574) {
4667 			/* Interrupt control */
4668 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
4669 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
4670 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4671 
4672 			ivar = 0;
4673 			/* TX */
4674 			for (i = 0; i < sc->sc_ntxqueues; i++) {
4675 				txq = &sc->sc_txq[i];
4676 				ivar |= __SHIFTIN((IVAR_VALID_82574
4677 					| txq->txq_intr_idx),
4678 				    IVAR_TX_MASK_Q_82574(txq->txq_id));
4679 			}
4680 			/* RX */
4681 			for (i = 0; i < sc->sc_nrxqueues; i++) {
4682 				rxq = &sc->sc_rxq[i];
4683 				ivar |= __SHIFTIN((IVAR_VALID_82574
4684 					| rxq->rxq_intr_idx),
4685 				    IVAR_RX_MASK_Q_82574(rxq->rxq_id));
4686 			}
4687 			/* Link status */
4688 			ivar |= __SHIFTIN((IVAR_VALID_82574
4689 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
4690 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
4691 		} else {
4692 			/* Interrupt control */
4693 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
4694 			    | GPIE_EIAME | GPIE_PBA);
4695 
4696 			switch (sc->sc_type) {
4697 			case WM_T_82580:
4698 			case WM_T_I350:
4699 			case WM_T_I354:
4700 			case WM_T_I210:
4701 			case WM_T_I211:
4702 				/* TX */
4703 				for (i = 0; i < sc->sc_ntxqueues; i++) {
4704 					txq = &sc->sc_txq[i];
4705 					qid = txq->txq_id;
4706 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
4707 					ivar &= ~IVAR_TX_MASK_Q(qid);
4708 					ivar |= __SHIFTIN((txq->txq_intr_idx
4709 						| IVAR_VALID),
4710 					    IVAR_TX_MASK_Q(qid));
4711 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
4712 				}
4713 
4714 				/* RX */
4715 				for (i = 0; i < sc->sc_nrxqueues; i++) {
4716 					rxq = &sc->sc_rxq[i];
4717 					qid = rxq->rxq_id;
4718 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
4719 					ivar &= ~IVAR_RX_MASK_Q(qid);
4720 					ivar |= __SHIFTIN((rxq->rxq_intr_idx
4721 						| IVAR_VALID),
4722 					    IVAR_RX_MASK_Q(qid));
4723 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
4724 				}
4725 				break;
4726 			case WM_T_82576:
4727 				/* TX */
4728 				for (i = 0; i < sc->sc_ntxqueues; i++) {
4729 					txq = &sc->sc_txq[i];
4730 					qid = txq->txq_id;
4731 					ivar = CSR_READ(sc,
4732 					    WMREG_IVAR_Q_82576(qid));
4733 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
4734 					ivar |= __SHIFTIN((txq->txq_intr_idx
4735 						| IVAR_VALID),
4736 					    IVAR_TX_MASK_Q_82576(qid));
4737 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
4738 					    ivar);
4739 				}
4740 
4741 				/* RX */
4742 				for (i = 0; i < sc->sc_nrxqueues; i++) {
4743 					rxq = &sc->sc_rxq[i];
4744 					qid = rxq->rxq_id;
4745 					ivar = CSR_READ(sc,
4746 					    WMREG_IVAR_Q_82576(qid));
4747 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
4748 					ivar |= __SHIFTIN((rxq->rxq_intr_idx
4749 						| IVAR_VALID),
4750 					    IVAR_RX_MASK_Q_82576(qid));
4751 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
4752 					    ivar);
4753 				}
4754 				break;
4755 			default:
4756 				break;
4757 			}
4758 
4759 			/* Link status */
4760 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
4761 			    IVAR_MISC_OTHER);
4762 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
4763 		}
4764 
4765 		if (sc->sc_nrxqueues > 1) {
4766 			wm_init_rss(sc);
4767 
4768 			/*
4769 			** NOTE: Receive Full-Packet Checksum Offload
4770 			** is mutually exclusive with Multiqueue. However
4771 			** this is not the same as TCP/IP checksums which
4772 			** still work.
4773 			*/
4774 			reg = CSR_READ(sc, WMREG_RXCSUM);
4775 			reg |= RXCSUM_PCSD;
4776 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
4777 		}
4778 	}
4779 
4780 	/* Set up the interrupt registers. */
4781 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4782 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
4783 	    ICR_RXO | ICR_RXT0;
4784 	if (sc->sc_nintrs > 1) {
4785 		uint32_t mask;
4786 		struct wm_txqueue *txq;
4787 		struct wm_rxqueue *rxq;
4788 
4789 		switch (sc->sc_type) {
4790 		case WM_T_82574:
4791 			CSR_WRITE(sc, WMREG_EIAC_82574,
4792 			    WMREG_EIAC_82574_MSIX_MASK);
4793 			sc->sc_icr |= WMREG_EIAC_82574_MSIX_MASK;
4794 			CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
4795 			break;
4796 		default:
4797 			if (sc->sc_type == WM_T_82575) {
4798 				mask = 0;
4799 				for (i = 0; i < sc->sc_ntxqueues; i++) {
4800 					txq = &sc->sc_txq[i];
4801 					mask |= EITR_TX_QUEUE(txq->txq_id);
4802 				}
4803 				for (i = 0; i < sc->sc_nrxqueues; i++) {
4804 					rxq = &sc->sc_rxq[i];
4805 					mask |= EITR_RX_QUEUE(rxq->rxq_id);
4806 				}
4807 				mask |= EITR_OTHER;
4808 			} else {
4809 				mask = 0;
4810 				for (i = 0; i < sc->sc_ntxqueues; i++) {
4811 					txq = &sc->sc_txq[i];
4812 					mask |= 1 << txq->txq_intr_idx;
4813 				}
4814 				for (i = 0; i < sc->sc_nrxqueues; i++) {
4815 					rxq = &sc->sc_rxq[i];
4816 					mask |= 1 << rxq->rxq_intr_idx;
4817 				}
4818 				mask |= 1 << sc->sc_link_intr_idx;
4819 			}
4820 			CSR_WRITE(sc, WMREG_EIAC, mask);
4821 			CSR_WRITE(sc, WMREG_EIAM, mask);
4822 			CSR_WRITE(sc, WMREG_EIMS, mask);
4823 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
4824 			break;
4825 		}
4826 	} else
4827 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
4828 
4829 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4830 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4831 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
4832 		reg = CSR_READ(sc, WMREG_KABGTXD);
4833 		reg |= KABGTXD_BGSQLBIAS;
4834 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
4835 	}
4836 
4837 	/* Set up the inter-packet gap. */
4838 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
4839 
4840 	if (sc->sc_type >= WM_T_82543) {
4841 		/*
4842 		 * XXX 82574 has both ITR and EITR. SET EITR when we use
4843 		 * the multi queue function with MSI-X.
4844 		 */
4845 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4846 			int qidx;
4847 			for (qidx = 0; qidx < sc->sc_ntxqueues; qidx++) {
4848 				struct wm_txqueue *txq = &sc->sc_txq[qidx];
4849 				CSR_WRITE(sc, WMREG_EITR(txq->txq_intr_idx),
4850 				    sc->sc_itr);
4851 			}
4852 			for (qidx = 0; qidx < sc->sc_nrxqueues; qidx++) {
4853 				struct wm_rxqueue *rxq = &sc->sc_rxq[qidx];
4854 				CSR_WRITE(sc, WMREG_EITR(rxq->rxq_intr_idx),
4855 				    sc->sc_itr);
4856 			}
4857 			/*
4858 			 * Link interrupts occur much less than TX
4859 			 * interrupts and RX interrupts. So, we don't
4860 			 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
4861 			 * FreeBSD's if_igb.
4862 			 */
4863 		} else
4864 			CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
4865 	}
4866 
4867 	/* Set the VLAN ethernetype. */
4868 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
4869 
4870 	/*
4871 	 * Set up the transmit control register; we start out with
4872 	 * a collision distance suitable for FDX, but update it whe
4873 	 * we resolve the media type.
4874 	 */
4875 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
4876 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
4877 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4878 	if (sc->sc_type >= WM_T_82571)
4879 		sc->sc_tctl |= TCTL_MULR;
4880 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4881 
4882 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4883 		/* Write TDT after TCTL.EN is set. See the document. */
4884 		CSR_WRITE(sc, WMREG_TDT(0), 0);
4885 	}
4886 
4887 	if (sc->sc_type == WM_T_80003) {
4888 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
4889 		reg &= ~TCTL_EXT_GCEX_MASK;
4890 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
4891 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
4892 	}
4893 
4894 	/* Set the media. */
4895 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
4896 		goto out;
4897 
4898 	/* Configure for OS presence */
4899 	wm_init_manageability(sc);
4900 
4901 	/*
4902 	 * Set up the receive control register; we actually program
4903 	 * the register when we set the receive filter.  Use multicast
4904 	 * address offset type 0.
4905 	 *
4906 	 * Only the i82544 has the ability to strip the incoming
4907 	 * CRC, so we don't enable that feature.
4908 	 */
4909 	sc->sc_mchash_type = 0;
4910 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
4911 	    | RCTL_MO(sc->sc_mchash_type);
4912 
4913 	/*
4914 	 * The I350 has a bug where it always strips the CRC whether
4915 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
4916 	 */
4917 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
4918 	    || (sc->sc_type == WM_T_I210))
4919 		sc->sc_rctl |= RCTL_SECRC;
4920 
4921 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
4922 	    && (ifp->if_mtu > ETHERMTU)) {
4923 		sc->sc_rctl |= RCTL_LPE;
4924 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4925 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
4926 	}
4927 
4928 	if (MCLBYTES == 2048) {
4929 		sc->sc_rctl |= RCTL_2k;
4930 	} else {
4931 		if (sc->sc_type >= WM_T_82543) {
4932 			switch (MCLBYTES) {
4933 			case 4096:
4934 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
4935 				break;
4936 			case 8192:
4937 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
4938 				break;
4939 			case 16384:
4940 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
4941 				break;
4942 			default:
4943 				panic("wm_init: MCLBYTES %d unsupported",
4944 				    MCLBYTES);
4945 				break;
4946 			}
4947 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
4948 	}
4949 
4950 	/* Set the receive filter. */
4951 	wm_set_filter(sc);
4952 
4953 	/* Enable ECC */
4954 	switch (sc->sc_type) {
4955 	case WM_T_82571:
4956 		reg = CSR_READ(sc, WMREG_PBA_ECC);
4957 		reg |= PBA_ECC_CORR_EN;
4958 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
4959 		break;
4960 	case WM_T_PCH_LPT:
4961 		reg = CSR_READ(sc, WMREG_PBECCSTS);
4962 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
4963 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
4964 
4965 		reg = CSR_READ(sc, WMREG_CTRL);
4966 		reg |= CTRL_MEHE;
4967 		CSR_WRITE(sc, WMREG_CTRL, reg);
4968 		break;
4969 	default:
4970 		break;
4971 	}
4972 
4973 	/* On 575 and later set RDT only if RX enabled */
4974 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4975 		int qidx;
4976 		for (qidx = 0; qidx < sc->sc_nrxqueues; qidx++) {
4977 			struct wm_rxqueue *rxq = &sc->sc_rxq[qidx];
4978 			for (i = 0; i < WM_NRXDESC; i++) {
4979 				WM_RX_LOCK(rxq);
4980 				wm_init_rxdesc(rxq, i);
4981 				WM_RX_UNLOCK(rxq);
4982 
4983 			}
4984 		}
4985 	}
4986 
4987 	sc->sc_stopping = false;
4988 
4989 	/* Start the one second link check clock. */
4990 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
4991 
4992 	/* ...all done! */
4993 	ifp->if_flags |= IFF_RUNNING;
4994 	ifp->if_flags &= ~IFF_OACTIVE;
4995 
4996  out:
4997 	sc->sc_if_flags = ifp->if_flags;
4998 	if (error)
4999 		log(LOG_ERR, "%s: interface not running\n",
5000 		    device_xname(sc->sc_dev));
5001 	return error;
5002 }
5003 
5004 /*
5005  * wm_stop:		[ifnet interface function]
5006  *
5007  *	Stop transmission on the interface.
5008  */
5009 static void
5010 wm_stop(struct ifnet *ifp, int disable)
5011 {
5012 	struct wm_softc *sc = ifp->if_softc;
5013 
5014 	WM_CORE_LOCK(sc);
5015 	wm_stop_locked(ifp, disable);
5016 	WM_CORE_UNLOCK(sc);
5017 }
5018 
5019 static void
5020 wm_stop_locked(struct ifnet *ifp, int disable)
5021 {
5022 	struct wm_softc *sc = ifp->if_softc;
5023 	struct wm_txsoft *txs;
5024 	int i, qidx;
5025 
5026 	KASSERT(WM_CORE_LOCKED(sc));
5027 
5028 	sc->sc_stopping = true;
5029 
5030 	/* Stop the one second clock. */
5031 	callout_stop(&sc->sc_tick_ch);
5032 
5033 	/* Stop the 82547 Tx FIFO stall check timer. */
5034 	if (sc->sc_type == WM_T_82547)
5035 		callout_stop(&sc->sc_txfifo_ch);
5036 
5037 	if (sc->sc_flags & WM_F_HAS_MII) {
5038 		/* Down the MII. */
5039 		mii_down(&sc->sc_mii);
5040 	} else {
5041 #if 0
5042 		/* Should we clear PHY's status properly? */
5043 		wm_reset(sc);
5044 #endif
5045 	}
5046 
5047 	/* Stop the transmit and receive processes. */
5048 	CSR_WRITE(sc, WMREG_TCTL, 0);
5049 	CSR_WRITE(sc, WMREG_RCTL, 0);
5050 	sc->sc_rctl &= ~RCTL_EN;
5051 
5052 	/*
5053 	 * Clear the interrupt mask to ensure the device cannot assert its
5054 	 * interrupt line.
5055 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
5056 	 * service any currently pending or shared interrupt.
5057 	 */
5058 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
5059 	sc->sc_icr = 0;
5060 	if (sc->sc_nintrs > 1) {
5061 		if (sc->sc_type != WM_T_82574) {
5062 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
5063 			CSR_WRITE(sc, WMREG_EIAC, 0);
5064 		} else
5065 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
5066 	}
5067 
5068 	/* Release any queued transmit buffers. */
5069 	for (qidx = 0; qidx < sc->sc_ntxqueues; qidx++) {
5070 		struct wm_txqueue *txq = &sc->sc_txq[qidx];
5071 		WM_TX_LOCK(txq);
5072 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
5073 			txs = &txq->txq_soft[i];
5074 			if (txs->txs_mbuf != NULL) {
5075 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
5076 				m_freem(txs->txs_mbuf);
5077 				txs->txs_mbuf = NULL;
5078 			}
5079 		}
5080 		WM_TX_UNLOCK(txq);
5081 	}
5082 
5083 	/* Mark the interface as down and cancel the watchdog timer. */
5084 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
5085 	ifp->if_timer = 0;
5086 
5087 	if (disable) {
5088 		for (i = 0; i < sc->sc_nrxqueues; i++) {
5089 			struct wm_rxqueue *rxq = &sc->sc_rxq[i];
5090 			WM_RX_LOCK(rxq);
5091 			wm_rxdrain(rxq);
5092 			WM_RX_UNLOCK(rxq);
5093 		}
5094 	}
5095 
5096 #if 0 /* notyet */
5097 	if (sc->sc_type >= WM_T_82544)
5098 		CSR_WRITE(sc, WMREG_WUC, 0);
5099 #endif
5100 }
5101 
5102 static void
5103 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
5104 {
5105 	struct mbuf *m;
5106 	int i;
5107 
5108 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
5109 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
5110 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
5111 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
5112 		    m->m_data, m->m_len, m->m_flags);
5113 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
5114 	    i, i == 1 ? "" : "s");
5115 }
5116 
5117 /*
5118  * wm_82547_txfifo_stall:
5119  *
5120  *	Callout used to wait for the 82547 Tx FIFO to drain,
5121  *	reset the FIFO pointers, and restart packet transmission.
5122  */
5123 static void
5124 wm_82547_txfifo_stall(void *arg)
5125 {
5126 	struct wm_softc *sc = arg;
5127 	struct wm_txqueue *txq = sc->sc_txq;
5128 #ifndef WM_MPSAFE
5129 	int s;
5130 
5131 	s = splnet();
5132 #endif
5133 	WM_TX_LOCK(txq);
5134 
5135 	if (sc->sc_stopping)
5136 		goto out;
5137 
5138 	if (txq->txq_fifo_stall) {
5139 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
5140 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
5141 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
5142 			/*
5143 			 * Packets have drained.  Stop transmitter, reset
5144 			 * FIFO pointers, restart transmitter, and kick
5145 			 * the packet queue.
5146 			 */
5147 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
5148 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
5149 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
5150 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
5151 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
5152 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
5153 			CSR_WRITE(sc, WMREG_TCTL, tctl);
5154 			CSR_WRITE_FLUSH(sc);
5155 
5156 			txq->txq_fifo_head = 0;
5157 			txq->txq_fifo_stall = 0;
5158 			wm_start_locked(&sc->sc_ethercom.ec_if);
5159 		} else {
5160 			/*
5161 			 * Still waiting for packets to drain; try again in
5162 			 * another tick.
5163 			 */
5164 			callout_schedule(&sc->sc_txfifo_ch, 1);
5165 		}
5166 	}
5167 
5168 out:
5169 	WM_TX_UNLOCK(txq);
5170 #ifndef WM_MPSAFE
5171 	splx(s);
5172 #endif
5173 }
5174 
5175 /*
5176  * wm_82547_txfifo_bugchk:
5177  *
5178  *	Check for bug condition in the 82547 Tx FIFO.  We need to
5179  *	prevent enqueueing a packet that would wrap around the end
5180  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
5181  *
5182  *	We do this by checking the amount of space before the end
5183  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
5184  *	the Tx FIFO, wait for all remaining packets to drain, reset
5185  *	the internal FIFO pointers to the beginning, and restart
5186  *	transmission on the interface.
5187  */
5188 #define	WM_FIFO_HDR		0x10
5189 #define	WM_82547_PAD_LEN	0x3e0
5190 static int
5191 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
5192 {
5193 	struct wm_txqueue *txq = &sc->sc_txq[0];
5194 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
5195 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
5196 
5197 	/* Just return if already stalled. */
5198 	if (txq->txq_fifo_stall)
5199 		return 1;
5200 
5201 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
5202 		/* Stall only occurs in half-duplex mode. */
5203 		goto send_packet;
5204 	}
5205 
5206 	if (len >= WM_82547_PAD_LEN + space) {
5207 		txq->txq_fifo_stall = 1;
5208 		callout_schedule(&sc->sc_txfifo_ch, 1);
5209 		return 1;
5210 	}
5211 
5212  send_packet:
5213 	txq->txq_fifo_head += len;
5214 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
5215 		txq->txq_fifo_head -= txq->txq_fifo_size;
5216 
5217 	return 0;
5218 }
5219 
5220 static int
5221 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
5222 {
5223 	int error;
5224 
5225 	/*
5226 	 * Allocate the control data structures, and create and load the
5227 	 * DMA map for it.
5228 	 *
5229 	 * NOTE: All Tx descriptors must be in the same 4G segment of
5230 	 * memory.  So must Rx descriptors.  We simplify by allocating
5231 	 * both sets within the same 4G segment.
5232 	 */
5233 	if (sc->sc_type < WM_T_82544) {
5234 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
5235 		txq->txq_desc_size = sizeof(wiseman_txdesc_t) *WM_NTXDESC(txq);
5236 	} else {
5237 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
5238 		txq->txq_desc_size = sizeof(txdescs_t);
5239 	}
5240 
5241 	if ((error = bus_dmamem_alloc(sc->sc_dmat, txq->txq_desc_size,
5242 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
5243 		    1, &txq->txq_desc_rseg, 0)) != 0) {
5244 		aprint_error_dev(sc->sc_dev,
5245 		    "unable to allocate TX control data, error = %d\n",
5246 		    error);
5247 		goto fail_0;
5248 	}
5249 
5250 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
5251 		    txq->txq_desc_rseg, txq->txq_desc_size,
5252 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
5253 		aprint_error_dev(sc->sc_dev,
5254 		    "unable to map TX control data, error = %d\n", error);
5255 		goto fail_1;
5256 	}
5257 
5258 	if ((error = bus_dmamap_create(sc->sc_dmat, txq->txq_desc_size, 1,
5259 		    txq->txq_desc_size, 0, 0, &txq->txq_desc_dmamap)) != 0) {
5260 		aprint_error_dev(sc->sc_dev,
5261 		    "unable to create TX control data DMA map, error = %d\n",
5262 		    error);
5263 		goto fail_2;
5264 	}
5265 
5266 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
5267 		    txq->txq_descs_u, txq->txq_desc_size, NULL, 0)) != 0) {
5268 		aprint_error_dev(sc->sc_dev,
5269 		    "unable to load TX control data DMA map, error = %d\n",
5270 		    error);
5271 		goto fail_3;
5272 	}
5273 
5274 	return 0;
5275 
5276  fail_3:
5277 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
5278  fail_2:
5279 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
5280 	    txq->txq_desc_size);
5281  fail_1:
5282 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
5283  fail_0:
5284 	return error;
5285 }
5286 
5287 static void
5288 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
5289 {
5290 
5291 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
5292 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
5293 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
5294 	    txq->txq_desc_size);
5295 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
5296 }
5297 
5298 static int
5299 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
5300 {
5301 	int error;
5302 
5303 	/*
5304 	 * Allocate the control data structures, and create and load the
5305 	 * DMA map for it.
5306 	 *
5307 	 * NOTE: All Tx descriptors must be in the same 4G segment of
5308 	 * memory.  So must Rx descriptors.  We simplify by allocating
5309 	 * both sets within the same 4G segment.
5310 	 */
5311 	rxq->rxq_desc_size = sizeof(wiseman_rxdesc_t) * WM_NRXDESC;
5312 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq->rxq_desc_size,
5313 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
5314 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
5315 		aprint_error_dev(sc->sc_dev,
5316 		    "unable to allocate RX control data, error = %d\n",
5317 		    error);
5318 		goto fail_0;
5319 	}
5320 
5321 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
5322 		    rxq->rxq_desc_rseg, rxq->rxq_desc_size,
5323 		    (void **)&rxq->rxq_descs, BUS_DMA_COHERENT)) != 0) {
5324 		aprint_error_dev(sc->sc_dev,
5325 		    "unable to map RX control data, error = %d\n", error);
5326 		goto fail_1;
5327 	}
5328 
5329 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq->rxq_desc_size, 1,
5330 		    rxq->rxq_desc_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
5331 		aprint_error_dev(sc->sc_dev,
5332 		    "unable to create RX control data DMA map, error = %d\n",
5333 		    error);
5334 		goto fail_2;
5335 	}
5336 
5337 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
5338 		    rxq->rxq_descs, rxq->rxq_desc_size, NULL, 0)) != 0) {
5339 		aprint_error_dev(sc->sc_dev,
5340 		    "unable to load RX control data DMA map, error = %d\n",
5341 		    error);
5342 		goto fail_3;
5343 	}
5344 
5345 	return 0;
5346 
5347  fail_3:
5348 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
5349  fail_2:
5350 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
5351 	    rxq->rxq_desc_size);
5352  fail_1:
5353 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
5354  fail_0:
5355 	return error;
5356 }
5357 
5358 static void
5359 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
5360 {
5361 
5362 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
5363 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
5364 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
5365 	    rxq->rxq_desc_size);
5366 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
5367 }
5368 
5369 
5370 static int
5371 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
5372 {
5373 	int i, error;
5374 
5375 	/* Create the transmit buffer DMA maps. */
5376 	WM_TXQUEUELEN(txq) =
5377 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
5378 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
5379 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
5380 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
5381 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
5382 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
5383 			aprint_error_dev(sc->sc_dev,
5384 			    "unable to create Tx DMA map %d, error = %d\n",
5385 			    i, error);
5386 			goto fail;
5387 		}
5388 	}
5389 
5390 	return 0;
5391 
5392  fail:
5393 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
5394 		if (txq->txq_soft[i].txs_dmamap != NULL)
5395 			bus_dmamap_destroy(sc->sc_dmat,
5396 			    txq->txq_soft[i].txs_dmamap);
5397 	}
5398 	return error;
5399 }
5400 
5401 static void
5402 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
5403 {
5404 	int i;
5405 
5406 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
5407 		if (txq->txq_soft[i].txs_dmamap != NULL)
5408 			bus_dmamap_destroy(sc->sc_dmat,
5409 			    txq->txq_soft[i].txs_dmamap);
5410 	}
5411 }
5412 
5413 static int
5414 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
5415 {
5416 	int i, error;
5417 
5418 	/* Create the receive buffer DMA maps. */
5419 	for (i = 0; i < WM_NRXDESC; i++) {
5420 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
5421 			    MCLBYTES, 0, 0,
5422 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
5423 			aprint_error_dev(sc->sc_dev,
5424 			    "unable to create Rx DMA map %d error = %d\n",
5425 			    i, error);
5426 			goto fail;
5427 		}
5428 		rxq->rxq_soft[i].rxs_mbuf = NULL;
5429 	}
5430 
5431 	return 0;
5432 
5433  fail:
5434 	for (i = 0; i < WM_NRXDESC; i++) {
5435 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
5436 			bus_dmamap_destroy(sc->sc_dmat,
5437 			    rxq->rxq_soft[i].rxs_dmamap);
5438 	}
5439 	return error;
5440 }
5441 
5442 static void
5443 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
5444 {
5445 	int i;
5446 
5447 	for (i = 0; i < WM_NRXDESC; i++) {
5448 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
5449 			bus_dmamap_destroy(sc->sc_dmat,
5450 			    rxq->rxq_soft[i].rxs_dmamap);
5451 	}
5452 }
5453 
5454 /*
5455  * wm_alloc_quques:
5456  *	Allocate {tx,rx}descs and {tx,rx} buffers
5457  */
5458 static int
5459 wm_alloc_txrx_queues(struct wm_softc *sc)
5460 {
5461 	int i, error, tx_done, rx_done;
5462 
5463 	/*
5464 	 * For transmission
5465 	 */
5466 	sc->sc_txq = kmem_zalloc(sizeof(struct wm_txqueue) * sc->sc_ntxqueues,
5467 	    KM_SLEEP);
5468 	if (sc->sc_txq == NULL) {
5469 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_txqueue\n");
5470 		error = ENOMEM;
5471 		goto fail_0;
5472 	}
5473 
5474 	error = 0;
5475 	tx_done = 0;
5476 	for (i = 0; i < sc->sc_ntxqueues; i++) {
5477 		struct wm_txqueue *txq = &sc->sc_txq[i];
5478 		txq->txq_sc = sc;
5479 #ifdef WM_MPSAFE
5480 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
5481 #else
5482 		txq->txq_lock = NULL;
5483 #endif
5484 		error = wm_alloc_tx_descs(sc, txq);
5485 		if (error)
5486 			break;
5487 		error = wm_alloc_tx_buffer(sc, txq);
5488 		if (error) {
5489 			wm_free_tx_descs(sc, txq);
5490 			break;
5491 		}
5492 		tx_done++;
5493 	}
5494 	if (error)
5495 		goto fail_1;
5496 
5497 	/*
5498 	 * For recieve
5499 	 */
5500 	sc->sc_rxq = kmem_zalloc(sizeof(struct wm_rxqueue) * sc->sc_nrxqueues,
5501 	    KM_SLEEP);
5502 	if (sc->sc_rxq == NULL) {
5503 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_rxqueue\n");
5504 		error = ENOMEM;
5505 		goto fail_1;
5506 	}
5507 
5508 	error = 0;
5509 	rx_done = 0;
5510 	for (i = 0; i < sc->sc_nrxqueues; i++) {
5511 		struct wm_rxqueue *rxq = &sc->sc_rxq[i];
5512 		rxq->rxq_sc = sc;
5513 #ifdef WM_MPSAFE
5514 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
5515 #else
5516 		rxq->rxq_lock = NULL;
5517 #endif
5518 		error = wm_alloc_rx_descs(sc, rxq);
5519 		if (error)
5520 			break;
5521 
5522 		error = wm_alloc_rx_buffer(sc, rxq);
5523 		if (error) {
5524 			wm_free_rx_descs(sc, rxq);
5525 			break;
5526 		}
5527 
5528 		rx_done++;
5529 	}
5530 	if (error)
5531 		goto fail_2;
5532 
5533 	return 0;
5534 
5535  fail_2:
5536 	for (i = 0; i < rx_done; i++) {
5537 		struct wm_rxqueue *rxq = &sc->sc_rxq[i];
5538 		wm_free_rx_buffer(sc, rxq);
5539 		wm_free_rx_descs(sc, rxq);
5540 		if (rxq->rxq_lock)
5541 			mutex_obj_free(rxq->rxq_lock);
5542 	}
5543 	kmem_free(sc->sc_rxq,
5544 	    sizeof(struct wm_rxqueue) * sc->sc_nrxqueues);
5545  fail_1:
5546 	for (i = 0; i < tx_done; i++) {
5547 		struct wm_txqueue *txq = &sc->sc_txq[i];
5548 		wm_free_tx_buffer(sc, txq);
5549 		wm_free_tx_descs(sc, txq);
5550 		if (txq->txq_lock)
5551 			mutex_obj_free(txq->txq_lock);
5552 	}
5553 	kmem_free(sc->sc_txq,
5554 	    sizeof(struct wm_txqueue) * sc->sc_ntxqueues);
5555  fail_0:
5556 	return error;
5557 }
5558 
5559 /*
5560  * wm_free_quques:
5561  *	Free {tx,rx}descs and {tx,rx} buffers
5562  */
5563 static void
5564 wm_free_txrx_queues(struct wm_softc *sc)
5565 {
5566 	int i;
5567 
5568 	for (i = 0; i < sc->sc_nrxqueues; i++) {
5569 		struct wm_rxqueue *rxq = &sc->sc_rxq[i];
5570 		wm_free_rx_buffer(sc, rxq);
5571 		wm_free_rx_descs(sc, rxq);
5572 		if (rxq->rxq_lock)
5573 			mutex_obj_free(rxq->rxq_lock);
5574 	}
5575 	kmem_free(sc->sc_rxq, sizeof(struct wm_rxqueue) * sc->sc_nrxqueues);
5576 
5577 	for (i = 0; i < sc->sc_ntxqueues; i++) {
5578 		struct wm_txqueue *txq = &sc->sc_txq[i];
5579 		wm_free_tx_buffer(sc, txq);
5580 		wm_free_tx_descs(sc, txq);
5581 		if (txq->txq_lock)
5582 			mutex_obj_free(txq->txq_lock);
5583 	}
5584 	kmem_free(sc->sc_txq, sizeof(struct wm_txqueue) * sc->sc_ntxqueues);
5585 }
5586 
5587 static void
5588 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
5589 {
5590 
5591 	KASSERT(WM_TX_LOCKED(txq));
5592 
5593 	/* Initialize the transmit descriptor ring. */
5594 	memset(txq->txq_descs, 0, WM_TXDESCSIZE(txq));
5595 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
5596 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
5597 	txq->txq_free = WM_NTXDESC(txq);
5598 	txq->txq_next = 0;
5599 }
5600 
5601 static void
5602 wm_init_tx_regs(struct wm_softc *sc, struct wm_txqueue *txq)
5603 {
5604 
5605 	KASSERT(WM_TX_LOCKED(txq));
5606 
5607 	if (sc->sc_type < WM_T_82543) {
5608 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
5609 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
5610 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(txq));
5611 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
5612 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
5613 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
5614 	} else {
5615 		int qid = txq->txq_id;
5616 
5617 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
5618 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
5619 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCSIZE(txq));
5620 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
5621 
5622 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
5623 			/*
5624 			 * Don't write TDT before TCTL.EN is set.
5625 			 * See the document.
5626 			 */
5627 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
5628 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
5629 			    | TXDCTL_WTHRESH(0));
5630 		else {
5631 			/* ITR / 4 */
5632 			CSR_WRITE(sc, WMREG_TIDV, sc->sc_itr / 4);
5633 			if (sc->sc_type >= WM_T_82540) {
5634 				/* should be same */
5635 				CSR_WRITE(sc, WMREG_TADV, sc->sc_itr / 4);
5636 			}
5637 
5638 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
5639 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
5640 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
5641 		}
5642 	}
5643 }
5644 
5645 static void
5646 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
5647 {
5648 	int i;
5649 
5650 	KASSERT(WM_TX_LOCKED(txq));
5651 
5652 	/* Initialize the transmit job descriptors. */
5653 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
5654 		txq->txq_soft[i].txs_mbuf = NULL;
5655 	txq->txq_sfree = WM_TXQUEUELEN(txq);
5656 	txq->txq_snext = 0;
5657 	txq->txq_sdirty = 0;
5658 }
5659 
5660 static void
5661 wm_init_tx_queue(struct wm_softc *sc, struct wm_txqueue *txq)
5662 {
5663 
5664 	KASSERT(WM_TX_LOCKED(txq));
5665 
5666 	/*
5667 	 * Set up some register offsets that are different between
5668 	 * the i82542 and the i82543 and later chips.
5669 	 */
5670 	if (sc->sc_type < WM_T_82543)
5671 		txq->txq_tdt_reg = WMREG_OLD_TDT;
5672 	else
5673 		txq->txq_tdt_reg = WMREG_TDT(txq->txq_id);
5674 
5675 	wm_init_tx_descs(sc, txq);
5676 	wm_init_tx_regs(sc, txq);
5677 	wm_init_tx_buffer(sc, txq);
5678 }
5679 
5680 static void
5681 wm_init_rx_regs(struct wm_softc *sc, struct wm_rxqueue *rxq)
5682 {
5683 
5684 	KASSERT(WM_RX_LOCKED(rxq));
5685 
5686 	/*
5687 	 * Initialize the receive descriptor and receive job
5688 	 * descriptor rings.
5689 	 */
5690 	if (sc->sc_type < WM_T_82543) {
5691 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
5692 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
5693 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
5694 		    sizeof(wiseman_rxdesc_t) * WM_NRXDESC);
5695 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
5696 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
5697 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
5698 
5699 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
5700 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
5701 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
5702 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
5703 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
5704 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
5705 	} else {
5706 		int qid = rxq->rxq_id;
5707 
5708 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
5709 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
5710 		CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_desc_size);
5711 
5712 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5713 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
5714 				panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
5715 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_LEGACY
5716 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
5717 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
5718 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
5719 			    | RXDCTL_WTHRESH(1));
5720 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
5721 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
5722 		} else {
5723 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
5724 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
5725 			/* ITR / 4 */
5726 			CSR_WRITE(sc, WMREG_RDTR, (sc->sc_itr / 4) | RDTR_FPD);
5727 			/* MUST be same */
5728 			CSR_WRITE(sc, WMREG_RADV, sc->sc_itr / 4);
5729 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
5730 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
5731 		}
5732 	}
5733 }
5734 
5735 static int
5736 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
5737 {
5738 	struct wm_rxsoft *rxs;
5739 	int error, i;
5740 
5741 	KASSERT(WM_RX_LOCKED(rxq));
5742 
5743 	for (i = 0; i < WM_NRXDESC; i++) {
5744 		rxs = &rxq->rxq_soft[i];
5745 		if (rxs->rxs_mbuf == NULL) {
5746 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
5747 				log(LOG_ERR, "%s: unable to allocate or map "
5748 				    "rx buffer %d, error = %d\n",
5749 				    device_xname(sc->sc_dev), i, error);
5750 				/*
5751 				 * XXX Should attempt to run with fewer receive
5752 				 * XXX buffers instead of just failing.
5753 				 */
5754 				wm_rxdrain(rxq);
5755 				return ENOMEM;
5756 			}
5757 		} else {
5758 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
5759 				wm_init_rxdesc(rxq, i);
5760 			/*
5761 			 * For 82575 and newer device, the RX descriptors
5762 			 * must be initialized after the setting of RCTL.EN in
5763 			 * wm_set_filter()
5764 			 */
5765 		}
5766 	}
5767 	rxq->rxq_ptr = 0;
5768 	rxq->rxq_discard = 0;
5769 	WM_RXCHAIN_RESET(rxq);
5770 
5771 	return 0;
5772 }
5773 
5774 static int
5775 wm_init_rx_queue(struct wm_softc *sc, struct wm_rxqueue *rxq)
5776 {
5777 
5778 	KASSERT(WM_RX_LOCKED(rxq));
5779 
5780 	/*
5781 	 * Set up some register offsets that are different between
5782 	 * the i82542 and the i82543 and later chips.
5783 	 */
5784 	if (sc->sc_type < WM_T_82543)
5785 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
5786 	else
5787 		rxq->rxq_rdt_reg = WMREG_RDT(rxq->rxq_id);
5788 
5789 	wm_init_rx_regs(sc, rxq);
5790 	return wm_init_rx_buffer(sc, rxq);
5791 }
5792 
5793 /*
5794  * wm_init_quques:
5795  *	Initialize {tx,rx}descs and {tx,rx} buffers
5796  */
5797 static int
5798 wm_init_txrx_queues(struct wm_softc *sc)
5799 {
5800 	int i, error;
5801 
5802 	for (i = 0; i < sc->sc_ntxqueues; i++) {
5803 		struct wm_txqueue *txq = &sc->sc_txq[i];
5804 		WM_TX_LOCK(txq);
5805 		wm_init_tx_queue(sc, txq);
5806 		WM_TX_UNLOCK(txq);
5807 	}
5808 
5809 	error = 0;
5810 	for (i = 0; i < sc->sc_nrxqueues; i++) {
5811 		struct wm_rxqueue *rxq = &sc->sc_rxq[i];
5812 		WM_RX_LOCK(rxq);
5813 		error = wm_init_rx_queue(sc, rxq);
5814 		WM_RX_UNLOCK(rxq);
5815 		if (error)
5816 			break;
5817 	}
5818 
5819 	return error;
5820 }
5821 
5822 /*
5823  * wm_tx_offload:
5824  *
5825  *	Set up TCP/IP checksumming parameters for the
5826  *	specified packet.
5827  */
5828 static int
5829 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
5830     uint8_t *fieldsp)
5831 {
5832 	struct wm_txqueue *txq = &sc->sc_txq[0];
5833 	struct mbuf *m0 = txs->txs_mbuf;
5834 	struct livengood_tcpip_ctxdesc *t;
5835 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
5836 	uint32_t ipcse;
5837 	struct ether_header *eh;
5838 	int offset, iphl;
5839 	uint8_t fields;
5840 
5841 	/*
5842 	 * XXX It would be nice if the mbuf pkthdr had offset
5843 	 * fields for the protocol headers.
5844 	 */
5845 
5846 	eh = mtod(m0, struct ether_header *);
5847 	switch (htons(eh->ether_type)) {
5848 	case ETHERTYPE_IP:
5849 	case ETHERTYPE_IPV6:
5850 		offset = ETHER_HDR_LEN;
5851 		break;
5852 
5853 	case ETHERTYPE_VLAN:
5854 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
5855 		break;
5856 
5857 	default:
5858 		/*
5859 		 * Don't support this protocol or encapsulation.
5860 		 */
5861 		*fieldsp = 0;
5862 		*cmdp = 0;
5863 		return 0;
5864 	}
5865 
5866 	if ((m0->m_pkthdr.csum_flags &
5867 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4)) != 0) {
5868 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
5869 	} else {
5870 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
5871 	}
5872 	ipcse = offset + iphl - 1;
5873 
5874 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
5875 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
5876 	seg = 0;
5877 	fields = 0;
5878 
5879 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
5880 		int hlen = offset + iphl;
5881 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
5882 
5883 		if (__predict_false(m0->m_len <
5884 				    (hlen + sizeof(struct tcphdr)))) {
5885 			/*
5886 			 * TCP/IP headers are not in the first mbuf; we need
5887 			 * to do this the slow and painful way.  Let's just
5888 			 * hope this doesn't happen very often.
5889 			 */
5890 			struct tcphdr th;
5891 
5892 			WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
5893 
5894 			m_copydata(m0, hlen, sizeof(th), &th);
5895 			if (v4) {
5896 				struct ip ip;
5897 
5898 				m_copydata(m0, offset, sizeof(ip), &ip);
5899 				ip.ip_len = 0;
5900 				m_copyback(m0,
5901 				    offset + offsetof(struct ip, ip_len),
5902 				    sizeof(ip.ip_len), &ip.ip_len);
5903 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
5904 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
5905 			} else {
5906 				struct ip6_hdr ip6;
5907 
5908 				m_copydata(m0, offset, sizeof(ip6), &ip6);
5909 				ip6.ip6_plen = 0;
5910 				m_copyback(m0,
5911 				    offset + offsetof(struct ip6_hdr, ip6_plen),
5912 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
5913 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
5914 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
5915 			}
5916 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
5917 			    sizeof(th.th_sum), &th.th_sum);
5918 
5919 			hlen += th.th_off << 2;
5920 		} else {
5921 			/*
5922 			 * TCP/IP headers are in the first mbuf; we can do
5923 			 * this the easy way.
5924 			 */
5925 			struct tcphdr *th;
5926 
5927 			if (v4) {
5928 				struct ip *ip =
5929 				    (void *)(mtod(m0, char *) + offset);
5930 				th = (void *)(mtod(m0, char *) + hlen);
5931 
5932 				ip->ip_len = 0;
5933 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
5934 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
5935 			} else {
5936 				struct ip6_hdr *ip6 =
5937 				    (void *)(mtod(m0, char *) + offset);
5938 				th = (void *)(mtod(m0, char *) + hlen);
5939 
5940 				ip6->ip6_plen = 0;
5941 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
5942 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
5943 			}
5944 			hlen += th->th_off << 2;
5945 		}
5946 
5947 		if (v4) {
5948 			WM_EVCNT_INCR(&sc->sc_ev_txtso);
5949 			cmdlen |= WTX_TCPIP_CMD_IP;
5950 		} else {
5951 			WM_EVCNT_INCR(&sc->sc_ev_txtso6);
5952 			ipcse = 0;
5953 		}
5954 		cmd |= WTX_TCPIP_CMD_TSE;
5955 		cmdlen |= WTX_TCPIP_CMD_TSE |
5956 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
5957 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
5958 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
5959 	}
5960 
5961 	/*
5962 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
5963 	 * offload feature, if we load the context descriptor, we
5964 	 * MUST provide valid values for IPCSS and TUCSS fields.
5965 	 */
5966 
5967 	ipcs = WTX_TCPIP_IPCSS(offset) |
5968 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
5969 	    WTX_TCPIP_IPCSE(ipcse);
5970 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
5971 		WM_EVCNT_INCR(&sc->sc_ev_txipsum);
5972 		fields |= WTX_IXSM;
5973 	}
5974 
5975 	offset += iphl;
5976 
5977 	if (m0->m_pkthdr.csum_flags &
5978 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
5979 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
5980 		fields |= WTX_TXSM;
5981 		tucs = WTX_TCPIP_TUCSS(offset) |
5982 		    WTX_TCPIP_TUCSO(offset +
5983 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
5984 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
5985 	} else if ((m0->m_pkthdr.csum_flags &
5986 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
5987 		WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
5988 		fields |= WTX_TXSM;
5989 		tucs = WTX_TCPIP_TUCSS(offset) |
5990 		    WTX_TCPIP_TUCSO(offset +
5991 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
5992 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
5993 	} else {
5994 		/* Just initialize it to a valid TCP context. */
5995 		tucs = WTX_TCPIP_TUCSS(offset) |
5996 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
5997 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
5998 	}
5999 
6000 	/* Fill in the context descriptor. */
6001 	t = (struct livengood_tcpip_ctxdesc *)
6002 	    &txq->txq_descs[txq->txq_next];
6003 	t->tcpip_ipcs = htole32(ipcs);
6004 	t->tcpip_tucs = htole32(tucs);
6005 	t->tcpip_cmdlen = htole32(cmdlen);
6006 	t->tcpip_seg = htole32(seg);
6007 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
6008 
6009 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
6010 	txs->txs_ndesc++;
6011 
6012 	*cmdp = cmd;
6013 	*fieldsp = fields;
6014 
6015 	return 0;
6016 }
6017 
6018 /*
6019  * wm_start:		[ifnet interface function]
6020  *
6021  *	Start packet transmission on the interface.
6022  */
6023 static void
6024 wm_start(struct ifnet *ifp)
6025 {
6026 	struct wm_softc *sc = ifp->if_softc;
6027 	struct wm_txqueue *txq = &sc->sc_txq[0];
6028 
6029 	WM_TX_LOCK(txq);
6030 	if (!sc->sc_stopping)
6031 		wm_start_locked(ifp);
6032 	WM_TX_UNLOCK(txq);
6033 }
6034 
6035 static void
6036 wm_start_locked(struct ifnet *ifp)
6037 {
6038 	struct wm_softc *sc = ifp->if_softc;
6039 	struct wm_txqueue *txq = &sc->sc_txq[0];
6040 	struct mbuf *m0;
6041 	struct m_tag *mtag;
6042 	struct wm_txsoft *txs;
6043 	bus_dmamap_t dmamap;
6044 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
6045 	bus_addr_t curaddr;
6046 	bus_size_t seglen, curlen;
6047 	uint32_t cksumcmd;
6048 	uint8_t cksumfields;
6049 
6050 	KASSERT(WM_TX_LOCKED(txq));
6051 
6052 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
6053 		return;
6054 
6055 	/* Remember the previous number of free descriptors. */
6056 	ofree = txq->txq_free;
6057 
6058 	/*
6059 	 * Loop through the send queue, setting up transmit descriptors
6060 	 * until we drain the queue, or use up all available transmit
6061 	 * descriptors.
6062 	 */
6063 	for (;;) {
6064 		m0 = NULL;
6065 
6066 		/* Get a work queue entry. */
6067 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
6068 			wm_txeof(sc);
6069 			if (txq->txq_sfree == 0) {
6070 				DPRINTF(WM_DEBUG_TX,
6071 				    ("%s: TX: no free job descriptors\n",
6072 					device_xname(sc->sc_dev)));
6073 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
6074 				break;
6075 			}
6076 		}
6077 
6078 		/* Grab a packet off the queue. */
6079 		IFQ_DEQUEUE(&ifp->if_snd, m0);
6080 		if (m0 == NULL)
6081 			break;
6082 
6083 		DPRINTF(WM_DEBUG_TX,
6084 		    ("%s: TX: have packet to transmit: %p\n",
6085 		    device_xname(sc->sc_dev), m0));
6086 
6087 		txs = &txq->txq_soft[txq->txq_snext];
6088 		dmamap = txs->txs_dmamap;
6089 
6090 		use_tso = (m0->m_pkthdr.csum_flags &
6091 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
6092 
6093 		/*
6094 		 * So says the Linux driver:
6095 		 * The controller does a simple calculation to make sure
6096 		 * there is enough room in the FIFO before initiating the
6097 		 * DMA for each buffer.  The calc is:
6098 		 *	4 = ceil(buffer len / MSS)
6099 		 * To make sure we don't overrun the FIFO, adjust the max
6100 		 * buffer len if the MSS drops.
6101 		 */
6102 		dmamap->dm_maxsegsz =
6103 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
6104 		    ? m0->m_pkthdr.segsz << 2
6105 		    : WTX_MAX_LEN;
6106 
6107 		/*
6108 		 * Load the DMA map.  If this fails, the packet either
6109 		 * didn't fit in the allotted number of segments, or we
6110 		 * were short on resources.  For the too-many-segments
6111 		 * case, we simply report an error and drop the packet,
6112 		 * since we can't sanely copy a jumbo packet to a single
6113 		 * buffer.
6114 		 */
6115 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
6116 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
6117 		if (error) {
6118 			if (error == EFBIG) {
6119 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
6120 				log(LOG_ERR, "%s: Tx packet consumes too many "
6121 				    "DMA segments, dropping...\n",
6122 				    device_xname(sc->sc_dev));
6123 				wm_dump_mbuf_chain(sc, m0);
6124 				m_freem(m0);
6125 				continue;
6126 			}
6127 			/*  Short on resources, just stop for now. */
6128 			DPRINTF(WM_DEBUG_TX,
6129 			    ("%s: TX: dmamap load failed: %d\n",
6130 			    device_xname(sc->sc_dev), error));
6131 			break;
6132 		}
6133 
6134 		segs_needed = dmamap->dm_nsegs;
6135 		if (use_tso) {
6136 			/* For sentinel descriptor; see below. */
6137 			segs_needed++;
6138 		}
6139 
6140 		/*
6141 		 * Ensure we have enough descriptors free to describe
6142 		 * the packet.  Note, we always reserve one descriptor
6143 		 * at the end of the ring due to the semantics of the
6144 		 * TDT register, plus one more in the event we need
6145 		 * to load offload context.
6146 		 */
6147 		if (segs_needed > txq->txq_free - 2) {
6148 			/*
6149 			 * Not enough free descriptors to transmit this
6150 			 * packet.  We haven't committed anything yet,
6151 			 * so just unload the DMA map, put the packet
6152 			 * pack on the queue, and punt.  Notify the upper
6153 			 * layer that there are no more slots left.
6154 			 */
6155 			DPRINTF(WM_DEBUG_TX,
6156 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
6157 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
6158 			    segs_needed, txq->txq_free - 1));
6159 			ifp->if_flags |= IFF_OACTIVE;
6160 			bus_dmamap_unload(sc->sc_dmat, dmamap);
6161 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
6162 			break;
6163 		}
6164 
6165 		/*
6166 		 * Check for 82547 Tx FIFO bug.  We need to do this
6167 		 * once we know we can transmit the packet, since we
6168 		 * do some internal FIFO space accounting here.
6169 		 */
6170 		if (sc->sc_type == WM_T_82547 &&
6171 		    wm_82547_txfifo_bugchk(sc, m0)) {
6172 			DPRINTF(WM_DEBUG_TX,
6173 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
6174 			    device_xname(sc->sc_dev)));
6175 			ifp->if_flags |= IFF_OACTIVE;
6176 			bus_dmamap_unload(sc->sc_dmat, dmamap);
6177 			WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
6178 			break;
6179 		}
6180 
6181 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
6182 
6183 		DPRINTF(WM_DEBUG_TX,
6184 		    ("%s: TX: packet has %d (%d) DMA segments\n",
6185 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
6186 
6187 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
6188 
6189 		/*
6190 		 * Store a pointer to the packet so that we can free it
6191 		 * later.
6192 		 *
6193 		 * Initially, we consider the number of descriptors the
6194 		 * packet uses the number of DMA segments.  This may be
6195 		 * incremented by 1 if we do checksum offload (a descriptor
6196 		 * is used to set the checksum context).
6197 		 */
6198 		txs->txs_mbuf = m0;
6199 		txs->txs_firstdesc = txq->txq_next;
6200 		txs->txs_ndesc = segs_needed;
6201 
6202 		/* Set up offload parameters for this packet. */
6203 		if (m0->m_pkthdr.csum_flags &
6204 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
6205 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
6206 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
6207 			if (wm_tx_offload(sc, txs, &cksumcmd,
6208 					  &cksumfields) != 0) {
6209 				/* Error message already displayed. */
6210 				bus_dmamap_unload(sc->sc_dmat, dmamap);
6211 				continue;
6212 			}
6213 		} else {
6214 			cksumcmd = 0;
6215 			cksumfields = 0;
6216 		}
6217 
6218 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
6219 
6220 		/* Sync the DMA map. */
6221 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
6222 		    BUS_DMASYNC_PREWRITE);
6223 
6224 		/* Initialize the transmit descriptor. */
6225 		for (nexttx = txq->txq_next, seg = 0;
6226 		     seg < dmamap->dm_nsegs; seg++) {
6227 			for (seglen = dmamap->dm_segs[seg].ds_len,
6228 			     curaddr = dmamap->dm_segs[seg].ds_addr;
6229 			     seglen != 0;
6230 			     curaddr += curlen, seglen -= curlen,
6231 			     nexttx = WM_NEXTTX(txq, nexttx)) {
6232 				curlen = seglen;
6233 
6234 				/*
6235 				 * So says the Linux driver:
6236 				 * Work around for premature descriptor
6237 				 * write-backs in TSO mode.  Append a
6238 				 * 4-byte sentinel descriptor.
6239 				 */
6240 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
6241 				    curlen > 8)
6242 					curlen -= 4;
6243 
6244 				wm_set_dma_addr(
6245 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
6246 				txq->txq_descs[nexttx].wtx_cmdlen
6247 				    = htole32(cksumcmd | curlen);
6248 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
6249 				    = 0;
6250 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
6251 				    = cksumfields;
6252 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
6253 				lasttx = nexttx;
6254 
6255 				DPRINTF(WM_DEBUG_TX,
6256 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
6257 				     "len %#04zx\n",
6258 				    device_xname(sc->sc_dev), nexttx,
6259 				    (uint64_t)curaddr, curlen));
6260 			}
6261 		}
6262 
6263 		KASSERT(lasttx != -1);
6264 
6265 		/*
6266 		 * Set up the command byte on the last descriptor of
6267 		 * the packet.  If we're in the interrupt delay window,
6268 		 * delay the interrupt.
6269 		 */
6270 		txq->txq_descs[lasttx].wtx_cmdlen |=
6271 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
6272 
6273 		/*
6274 		 * If VLANs are enabled and the packet has a VLAN tag, set
6275 		 * up the descriptor to encapsulate the packet for us.
6276 		 *
6277 		 * This is only valid on the last descriptor of the packet.
6278 		 */
6279 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
6280 			txq->txq_descs[lasttx].wtx_cmdlen |=
6281 			    htole32(WTX_CMD_VLE);
6282 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
6283 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
6284 		}
6285 
6286 		txs->txs_lastdesc = lasttx;
6287 
6288 		DPRINTF(WM_DEBUG_TX,
6289 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
6290 		    device_xname(sc->sc_dev),
6291 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
6292 
6293 		/* Sync the descriptors we're using. */
6294 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
6295 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
6296 
6297 		/* Give the packet to the chip. */
6298 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
6299 
6300 		DPRINTF(WM_DEBUG_TX,
6301 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
6302 
6303 		DPRINTF(WM_DEBUG_TX,
6304 		    ("%s: TX: finished transmitting packet, job %d\n",
6305 		    device_xname(sc->sc_dev), txq->txq_snext));
6306 
6307 		/* Advance the tx pointer. */
6308 		txq->txq_free -= txs->txs_ndesc;
6309 		txq->txq_next = nexttx;
6310 
6311 		txq->txq_sfree--;
6312 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
6313 
6314 		/* Pass the packet to any BPF listeners. */
6315 		bpf_mtap(ifp, m0);
6316 	}
6317 
6318 	if (m0 != NULL) {
6319 		ifp->if_flags |= IFF_OACTIVE;
6320 		WM_EVCNT_INCR(&sc->sc_ev_txdrop);
6321 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
6322 			__func__));
6323 		m_freem(m0);
6324 	}
6325 
6326 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
6327 		/* No more slots; notify upper layer. */
6328 		ifp->if_flags |= IFF_OACTIVE;
6329 	}
6330 
6331 	if (txq->txq_free != ofree) {
6332 		/* Set a watchdog timer in case the chip flakes out. */
6333 		ifp->if_timer = 5;
6334 	}
6335 }
6336 
6337 /*
6338  * wm_nq_tx_offload:
6339  *
6340  *	Set up TCP/IP checksumming parameters for the
6341  *	specified packet, for NEWQUEUE devices
6342  */
6343 static int
6344 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs,
6345     uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
6346 {
6347 	struct wm_txqueue *txq = &sc->sc_txq[0];
6348 	struct mbuf *m0 = txs->txs_mbuf;
6349 	struct m_tag *mtag;
6350 	uint32_t vl_len, mssidx, cmdc;
6351 	struct ether_header *eh;
6352 	int offset, iphl;
6353 
6354 	/*
6355 	 * XXX It would be nice if the mbuf pkthdr had offset
6356 	 * fields for the protocol headers.
6357 	 */
6358 	*cmdlenp = 0;
6359 	*fieldsp = 0;
6360 
6361 	eh = mtod(m0, struct ether_header *);
6362 	switch (htons(eh->ether_type)) {
6363 	case ETHERTYPE_IP:
6364 	case ETHERTYPE_IPV6:
6365 		offset = ETHER_HDR_LEN;
6366 		break;
6367 
6368 	case ETHERTYPE_VLAN:
6369 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
6370 		break;
6371 
6372 	default:
6373 		/* Don't support this protocol or encapsulation. */
6374 		*do_csum = false;
6375 		return 0;
6376 	}
6377 	*do_csum = true;
6378 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
6379 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
6380 
6381 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
6382 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
6383 
6384 	if ((m0->m_pkthdr.csum_flags &
6385 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
6386 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
6387 	} else {
6388 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
6389 	}
6390 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
6391 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
6392 
6393 	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
6394 		vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
6395 		     << NQTXC_VLLEN_VLAN_SHIFT);
6396 		*cmdlenp |= NQTX_CMD_VLE;
6397 	}
6398 
6399 	mssidx = 0;
6400 
6401 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
6402 		int hlen = offset + iphl;
6403 		int tcp_hlen;
6404 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
6405 
6406 		if (__predict_false(m0->m_len <
6407 				    (hlen + sizeof(struct tcphdr)))) {
6408 			/*
6409 			 * TCP/IP headers are not in the first mbuf; we need
6410 			 * to do this the slow and painful way.  Let's just
6411 			 * hope this doesn't happen very often.
6412 			 */
6413 			struct tcphdr th;
6414 
6415 			WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
6416 
6417 			m_copydata(m0, hlen, sizeof(th), &th);
6418 			if (v4) {
6419 				struct ip ip;
6420 
6421 				m_copydata(m0, offset, sizeof(ip), &ip);
6422 				ip.ip_len = 0;
6423 				m_copyback(m0,
6424 				    offset + offsetof(struct ip, ip_len),
6425 				    sizeof(ip.ip_len), &ip.ip_len);
6426 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
6427 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
6428 			} else {
6429 				struct ip6_hdr ip6;
6430 
6431 				m_copydata(m0, offset, sizeof(ip6), &ip6);
6432 				ip6.ip6_plen = 0;
6433 				m_copyback(m0,
6434 				    offset + offsetof(struct ip6_hdr, ip6_plen),
6435 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
6436 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
6437 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
6438 			}
6439 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
6440 			    sizeof(th.th_sum), &th.th_sum);
6441 
6442 			tcp_hlen = th.th_off << 2;
6443 		} else {
6444 			/*
6445 			 * TCP/IP headers are in the first mbuf; we can do
6446 			 * this the easy way.
6447 			 */
6448 			struct tcphdr *th;
6449 
6450 			if (v4) {
6451 				struct ip *ip =
6452 				    (void *)(mtod(m0, char *) + offset);
6453 				th = (void *)(mtod(m0, char *) + hlen);
6454 
6455 				ip->ip_len = 0;
6456 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
6457 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
6458 			} else {
6459 				struct ip6_hdr *ip6 =
6460 				    (void *)(mtod(m0, char *) + offset);
6461 				th = (void *)(mtod(m0, char *) + hlen);
6462 
6463 				ip6->ip6_plen = 0;
6464 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
6465 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
6466 			}
6467 			tcp_hlen = th->th_off << 2;
6468 		}
6469 		hlen += tcp_hlen;
6470 		*cmdlenp |= NQTX_CMD_TSE;
6471 
6472 		if (v4) {
6473 			WM_EVCNT_INCR(&sc->sc_ev_txtso);
6474 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
6475 		} else {
6476 			WM_EVCNT_INCR(&sc->sc_ev_txtso6);
6477 			*fieldsp |= NQTXD_FIELDS_TUXSM;
6478 		}
6479 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
6480 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
6481 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
6482 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
6483 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
6484 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
6485 	} else {
6486 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
6487 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
6488 	}
6489 
6490 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
6491 		*fieldsp |= NQTXD_FIELDS_IXSM;
6492 		cmdc |= NQTXC_CMD_IP4;
6493 	}
6494 
6495 	if (m0->m_pkthdr.csum_flags &
6496 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
6497 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
6498 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
6499 			cmdc |= NQTXC_CMD_TCP;
6500 		} else {
6501 			cmdc |= NQTXC_CMD_UDP;
6502 		}
6503 		cmdc |= NQTXC_CMD_IP4;
6504 		*fieldsp |= NQTXD_FIELDS_TUXSM;
6505 	}
6506 	if (m0->m_pkthdr.csum_flags &
6507 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
6508 		WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
6509 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
6510 			cmdc |= NQTXC_CMD_TCP;
6511 		} else {
6512 			cmdc |= NQTXC_CMD_UDP;
6513 		}
6514 		cmdc |= NQTXC_CMD_IP6;
6515 		*fieldsp |= NQTXD_FIELDS_TUXSM;
6516 	}
6517 
6518 	/* Fill in the context descriptor. */
6519 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
6520 	    htole32(vl_len);
6521 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
6522 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
6523 	    htole32(cmdc);
6524 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
6525 	    htole32(mssidx);
6526 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
6527 	DPRINTF(WM_DEBUG_TX,
6528 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
6529 	    txq->txq_next, 0, vl_len));
6530 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
6531 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
6532 	txs->txs_ndesc++;
6533 	return 0;
6534 }
6535 
6536 /*
6537  * wm_nq_start:		[ifnet interface function]
6538  *
6539  *	Start packet transmission on the interface for NEWQUEUE devices
6540  */
6541 static void
6542 wm_nq_start(struct ifnet *ifp)
6543 {
6544 	struct wm_softc *sc = ifp->if_softc;
6545 	struct wm_txqueue *txq = &sc->sc_txq[0];
6546 
6547 	WM_TX_LOCK(txq);
6548 	if (!sc->sc_stopping)
6549 		wm_nq_start_locked(ifp);
6550 	WM_TX_UNLOCK(txq);
6551 }
6552 
6553 static void
6554 wm_nq_start_locked(struct ifnet *ifp)
6555 {
6556 	struct wm_softc *sc = ifp->if_softc;
6557 	struct wm_txqueue *txq = &sc->sc_txq[0];
6558 	struct mbuf *m0;
6559 	struct m_tag *mtag;
6560 	struct wm_txsoft *txs;
6561 	bus_dmamap_t dmamap;
6562 	int error, nexttx, lasttx = -1, seg, segs_needed;
6563 	bool do_csum, sent;
6564 
6565 	KASSERT(WM_TX_LOCKED(txq));
6566 
6567 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
6568 		return;
6569 
6570 	sent = false;
6571 
6572 	/*
6573 	 * Loop through the send queue, setting up transmit descriptors
6574 	 * until we drain the queue, or use up all available transmit
6575 	 * descriptors.
6576 	 */
6577 	for (;;) {
6578 		m0 = NULL;
6579 
6580 		/* Get a work queue entry. */
6581 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
6582 			wm_txeof(sc);
6583 			if (txq->txq_sfree == 0) {
6584 				DPRINTF(WM_DEBUG_TX,
6585 				    ("%s: TX: no free job descriptors\n",
6586 					device_xname(sc->sc_dev)));
6587 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
6588 				break;
6589 			}
6590 		}
6591 
6592 		/* Grab a packet off the queue. */
6593 		IFQ_DEQUEUE(&ifp->if_snd, m0);
6594 		if (m0 == NULL)
6595 			break;
6596 
6597 		DPRINTF(WM_DEBUG_TX,
6598 		    ("%s: TX: have packet to transmit: %p\n",
6599 		    device_xname(sc->sc_dev), m0));
6600 
6601 		txs = &txq->txq_soft[txq->txq_snext];
6602 		dmamap = txs->txs_dmamap;
6603 
6604 		/*
6605 		 * Load the DMA map.  If this fails, the packet either
6606 		 * didn't fit in the allotted number of segments, or we
6607 		 * were short on resources.  For the too-many-segments
6608 		 * case, we simply report an error and drop the packet,
6609 		 * since we can't sanely copy a jumbo packet to a single
6610 		 * buffer.
6611 		 */
6612 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
6613 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
6614 		if (error) {
6615 			if (error == EFBIG) {
6616 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
6617 				log(LOG_ERR, "%s: Tx packet consumes too many "
6618 				    "DMA segments, dropping...\n",
6619 				    device_xname(sc->sc_dev));
6620 				wm_dump_mbuf_chain(sc, m0);
6621 				m_freem(m0);
6622 				continue;
6623 			}
6624 			/* Short on resources, just stop for now. */
6625 			DPRINTF(WM_DEBUG_TX,
6626 			    ("%s: TX: dmamap load failed: %d\n",
6627 			    device_xname(sc->sc_dev), error));
6628 			break;
6629 		}
6630 
6631 		segs_needed = dmamap->dm_nsegs;
6632 
6633 		/*
6634 		 * Ensure we have enough descriptors free to describe
6635 		 * the packet.  Note, we always reserve one descriptor
6636 		 * at the end of the ring due to the semantics of the
6637 		 * TDT register, plus one more in the event we need
6638 		 * to load offload context.
6639 		 */
6640 		if (segs_needed > txq->txq_free - 2) {
6641 			/*
6642 			 * Not enough free descriptors to transmit this
6643 			 * packet.  We haven't committed anything yet,
6644 			 * so just unload the DMA map, put the packet
6645 			 * pack on the queue, and punt.  Notify the upper
6646 			 * layer that there are no more slots left.
6647 			 */
6648 			DPRINTF(WM_DEBUG_TX,
6649 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
6650 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
6651 			    segs_needed, txq->txq_free - 1));
6652 			ifp->if_flags |= IFF_OACTIVE;
6653 			bus_dmamap_unload(sc->sc_dmat, dmamap);
6654 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
6655 			break;
6656 		}
6657 
6658 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
6659 
6660 		DPRINTF(WM_DEBUG_TX,
6661 		    ("%s: TX: packet has %d (%d) DMA segments\n",
6662 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
6663 
6664 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
6665 
6666 		/*
6667 		 * Store a pointer to the packet so that we can free it
6668 		 * later.
6669 		 *
6670 		 * Initially, we consider the number of descriptors the
6671 		 * packet uses the number of DMA segments.  This may be
6672 		 * incremented by 1 if we do checksum offload (a descriptor
6673 		 * is used to set the checksum context).
6674 		 */
6675 		txs->txs_mbuf = m0;
6676 		txs->txs_firstdesc = txq->txq_next;
6677 		txs->txs_ndesc = segs_needed;
6678 
6679 		/* Set up offload parameters for this packet. */
6680 		uint32_t cmdlen, fields, dcmdlen;
6681 		if (m0->m_pkthdr.csum_flags &
6682 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
6683 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
6684 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
6685 			if (wm_nq_tx_offload(sc, txs, &cmdlen, &fields,
6686 			    &do_csum) != 0) {
6687 				/* Error message already displayed. */
6688 				bus_dmamap_unload(sc->sc_dmat, dmamap);
6689 				continue;
6690 			}
6691 		} else {
6692 			do_csum = false;
6693 			cmdlen = 0;
6694 			fields = 0;
6695 		}
6696 
6697 		/* Sync the DMA map. */
6698 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
6699 		    BUS_DMASYNC_PREWRITE);
6700 
6701 		/* Initialize the first transmit descriptor. */
6702 		nexttx = txq->txq_next;
6703 		if (!do_csum) {
6704 			/* setup a legacy descriptor */
6705 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
6706 			    dmamap->dm_segs[0].ds_addr);
6707 			txq->txq_descs[nexttx].wtx_cmdlen =
6708 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
6709 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
6710 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
6711 			if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
6712 			    NULL) {
6713 				txq->txq_descs[nexttx].wtx_cmdlen |=
6714 				    htole32(WTX_CMD_VLE);
6715 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
6716 				    htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
6717 			} else {
6718 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
6719 			}
6720 			dcmdlen = 0;
6721 		} else {
6722 			/* setup an advanced data descriptor */
6723 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
6724 			    htole64(dmamap->dm_segs[0].ds_addr);
6725 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
6726 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
6727 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
6728 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
6729 			    htole32(fields);
6730 			DPRINTF(WM_DEBUG_TX,
6731 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
6732 			    device_xname(sc->sc_dev), nexttx,
6733 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
6734 			DPRINTF(WM_DEBUG_TX,
6735 			    ("\t 0x%08x%08x\n", fields,
6736 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
6737 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
6738 		}
6739 
6740 		lasttx = nexttx;
6741 		nexttx = WM_NEXTTX(txq, nexttx);
6742 		/*
6743 		 * fill in the next descriptors. legacy or adcanced format
6744 		 * is the same here
6745 		 */
6746 		for (seg = 1; seg < dmamap->dm_nsegs;
6747 		    seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
6748 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
6749 			    htole64(dmamap->dm_segs[seg].ds_addr);
6750 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
6751 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
6752 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
6753 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
6754 			lasttx = nexttx;
6755 
6756 			DPRINTF(WM_DEBUG_TX,
6757 			    ("%s: TX: desc %d: %#" PRIx64 ", "
6758 			     "len %#04zx\n",
6759 			    device_xname(sc->sc_dev), nexttx,
6760 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
6761 			    dmamap->dm_segs[seg].ds_len));
6762 		}
6763 
6764 		KASSERT(lasttx != -1);
6765 
6766 		/*
6767 		 * Set up the command byte on the last descriptor of
6768 		 * the packet.  If we're in the interrupt delay window,
6769 		 * delay the interrupt.
6770 		 */
6771 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
6772 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
6773 		txq->txq_descs[lasttx].wtx_cmdlen |=
6774 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
6775 
6776 		txs->txs_lastdesc = lasttx;
6777 
6778 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
6779 		    device_xname(sc->sc_dev),
6780 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
6781 
6782 		/* Sync the descriptors we're using. */
6783 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
6784 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
6785 
6786 		/* Give the packet to the chip. */
6787 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
6788 		sent = true;
6789 
6790 		DPRINTF(WM_DEBUG_TX,
6791 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
6792 
6793 		DPRINTF(WM_DEBUG_TX,
6794 		    ("%s: TX: finished transmitting packet, job %d\n",
6795 		    device_xname(sc->sc_dev), txq->txq_snext));
6796 
6797 		/* Advance the tx pointer. */
6798 		txq->txq_free -= txs->txs_ndesc;
6799 		txq->txq_next = nexttx;
6800 
6801 		txq->txq_sfree--;
6802 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
6803 
6804 		/* Pass the packet to any BPF listeners. */
6805 		bpf_mtap(ifp, m0);
6806 	}
6807 
6808 	if (m0 != NULL) {
6809 		ifp->if_flags |= IFF_OACTIVE;
6810 		WM_EVCNT_INCR(&sc->sc_ev_txdrop);
6811 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
6812 			__func__));
6813 		m_freem(m0);
6814 	}
6815 
6816 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
6817 		/* No more slots; notify upper layer. */
6818 		ifp->if_flags |= IFF_OACTIVE;
6819 	}
6820 
6821 	if (sent) {
6822 		/* Set a watchdog timer in case the chip flakes out. */
6823 		ifp->if_timer = 5;
6824 	}
6825 }
6826 
6827 /* Interrupt */
6828 
6829 /*
6830  * wm_txeof:
6831  *
6832  *	Helper; handle transmit interrupts.
6833  */
6834 static int
6835 wm_txeof(struct wm_softc *sc)
6836 {
6837 	struct wm_txqueue *txq = &sc->sc_txq[0];
6838 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6839 	struct wm_txsoft *txs;
6840 	bool processed = false;
6841 	int count = 0;
6842 	int i;
6843 	uint8_t status;
6844 
6845 	if (sc->sc_stopping)
6846 		return 0;
6847 
6848 	ifp->if_flags &= ~IFF_OACTIVE;
6849 
6850 	/*
6851 	 * Go through the Tx list and free mbufs for those
6852 	 * frames which have been transmitted.
6853 	 */
6854 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
6855 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
6856 		txs = &txq->txq_soft[i];
6857 
6858 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
6859 			device_xname(sc->sc_dev), i));
6860 
6861 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
6862 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
6863 
6864 		status =
6865 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
6866 		if ((status & WTX_ST_DD) == 0) {
6867 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
6868 			    BUS_DMASYNC_PREREAD);
6869 			break;
6870 		}
6871 
6872 		processed = true;
6873 		count++;
6874 		DPRINTF(WM_DEBUG_TX,
6875 		    ("%s: TX: job %d done: descs %d..%d\n",
6876 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
6877 		    txs->txs_lastdesc));
6878 
6879 		/*
6880 		 * XXX We should probably be using the statistics
6881 		 * XXX registers, but I don't know if they exist
6882 		 * XXX on chips before the i82544.
6883 		 */
6884 
6885 #ifdef WM_EVENT_COUNTERS
6886 		if (status & WTX_ST_TU)
6887 			WM_EVCNT_INCR(&sc->sc_ev_tu);
6888 #endif /* WM_EVENT_COUNTERS */
6889 
6890 		if (status & (WTX_ST_EC | WTX_ST_LC)) {
6891 			ifp->if_oerrors++;
6892 			if (status & WTX_ST_LC)
6893 				log(LOG_WARNING, "%s: late collision\n",
6894 				    device_xname(sc->sc_dev));
6895 			else if (status & WTX_ST_EC) {
6896 				ifp->if_collisions += 16;
6897 				log(LOG_WARNING, "%s: excessive collisions\n",
6898 				    device_xname(sc->sc_dev));
6899 			}
6900 		} else
6901 			ifp->if_opackets++;
6902 
6903 		txq->txq_free += txs->txs_ndesc;
6904 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
6905 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
6906 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
6907 		m_freem(txs->txs_mbuf);
6908 		txs->txs_mbuf = NULL;
6909 	}
6910 
6911 	/* Update the dirty transmit buffer pointer. */
6912 	txq->txq_sdirty = i;
6913 	DPRINTF(WM_DEBUG_TX,
6914 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
6915 
6916 	if (count != 0)
6917 		rnd_add_uint32(&sc->rnd_source, count);
6918 
6919 	/*
6920 	 * If there are no more pending transmissions, cancel the watchdog
6921 	 * timer.
6922 	 */
6923 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
6924 		ifp->if_timer = 0;
6925 
6926 	return processed;
6927 }
6928 
6929 /*
6930  * wm_rxeof:
6931  *
6932  *	Helper; handle receive interrupts.
6933  */
6934 static void
6935 wm_rxeof(struct wm_rxqueue *rxq)
6936 {
6937 	struct wm_softc *sc = rxq->rxq_sc;
6938 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6939 	struct wm_rxsoft *rxs;
6940 	struct mbuf *m;
6941 	int i, len;
6942 	int count = 0;
6943 	uint8_t status, errors;
6944 	uint16_t vlantag;
6945 
6946 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
6947 		rxs = &rxq->rxq_soft[i];
6948 
6949 		DPRINTF(WM_DEBUG_RX,
6950 		    ("%s: RX: checking descriptor %d\n",
6951 		    device_xname(sc->sc_dev), i));
6952 
6953 		wm_cdrxsync(rxq, i,BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
6954 
6955 		status = rxq->rxq_descs[i].wrx_status;
6956 		errors = rxq->rxq_descs[i].wrx_errors;
6957 		len = le16toh(rxq->rxq_descs[i].wrx_len);
6958 		vlantag = rxq->rxq_descs[i].wrx_special;
6959 
6960 		if ((status & WRX_ST_DD) == 0) {
6961 			/* We have processed all of the receive descriptors. */
6962 			wm_cdrxsync(rxq, i, BUS_DMASYNC_PREREAD);
6963 			break;
6964 		}
6965 
6966 		count++;
6967 		if (__predict_false(rxq->rxq_discard)) {
6968 			DPRINTF(WM_DEBUG_RX,
6969 			    ("%s: RX: discarding contents of descriptor %d\n",
6970 			    device_xname(sc->sc_dev), i));
6971 			wm_init_rxdesc(rxq, i);
6972 			if (status & WRX_ST_EOP) {
6973 				/* Reset our state. */
6974 				DPRINTF(WM_DEBUG_RX,
6975 				    ("%s: RX: resetting rxdiscard -> 0\n",
6976 				    device_xname(sc->sc_dev)));
6977 				rxq->rxq_discard = 0;
6978 			}
6979 			continue;
6980 		}
6981 
6982 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
6983 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
6984 
6985 		m = rxs->rxs_mbuf;
6986 
6987 		/*
6988 		 * Add a new receive buffer to the ring, unless of
6989 		 * course the length is zero. Treat the latter as a
6990 		 * failed mapping.
6991 		 */
6992 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
6993 			/*
6994 			 * Failed, throw away what we've done so
6995 			 * far, and discard the rest of the packet.
6996 			 */
6997 			ifp->if_ierrors++;
6998 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
6999 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
7000 			wm_init_rxdesc(rxq, i);
7001 			if ((status & WRX_ST_EOP) == 0)
7002 				rxq->rxq_discard = 1;
7003 			if (rxq->rxq_head != NULL)
7004 				m_freem(rxq->rxq_head);
7005 			WM_RXCHAIN_RESET(rxq);
7006 			DPRINTF(WM_DEBUG_RX,
7007 			    ("%s: RX: Rx buffer allocation failed, "
7008 			    "dropping packet%s\n", device_xname(sc->sc_dev),
7009 			    rxq->rxq_discard ? " (discard)" : ""));
7010 			continue;
7011 		}
7012 
7013 		m->m_len = len;
7014 		rxq->rxq_len += len;
7015 		DPRINTF(WM_DEBUG_RX,
7016 		    ("%s: RX: buffer at %p len %d\n",
7017 		    device_xname(sc->sc_dev), m->m_data, len));
7018 
7019 		/* If this is not the end of the packet, keep looking. */
7020 		if ((status & WRX_ST_EOP) == 0) {
7021 			WM_RXCHAIN_LINK(rxq, m);
7022 			DPRINTF(WM_DEBUG_RX,
7023 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
7024 			    device_xname(sc->sc_dev), rxq->rxq_len));
7025 			continue;
7026 		}
7027 
7028 		/*
7029 		 * Okay, we have the entire packet now.  The chip is
7030 		 * configured to include the FCS except I350 and I21[01]
7031 		 * (not all chips can be configured to strip it),
7032 		 * so we need to trim it.
7033 		 * May need to adjust length of previous mbuf in the
7034 		 * chain if the current mbuf is too short.
7035 		 * For an eratta, the RCTL_SECRC bit in RCTL register
7036 		 * is always set in I350, so we don't trim it.
7037 		 */
7038 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
7039 		    && (sc->sc_type != WM_T_I210)
7040 		    && (sc->sc_type != WM_T_I211)) {
7041 			if (m->m_len < ETHER_CRC_LEN) {
7042 				rxq->rxq_tail->m_len
7043 				    -= (ETHER_CRC_LEN - m->m_len);
7044 				m->m_len = 0;
7045 			} else
7046 				m->m_len -= ETHER_CRC_LEN;
7047 			len = rxq->rxq_len - ETHER_CRC_LEN;
7048 		} else
7049 			len = rxq->rxq_len;
7050 
7051 		WM_RXCHAIN_LINK(rxq, m);
7052 
7053 		*rxq->rxq_tailp = NULL;
7054 		m = rxq->rxq_head;
7055 
7056 		WM_RXCHAIN_RESET(rxq);
7057 
7058 		DPRINTF(WM_DEBUG_RX,
7059 		    ("%s: RX: have entire packet, len -> %d\n",
7060 		    device_xname(sc->sc_dev), len));
7061 
7062 		/* If an error occurred, update stats and drop the packet. */
7063 		if (errors &
7064 		     (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
7065 			if (errors & WRX_ER_SE)
7066 				log(LOG_WARNING, "%s: symbol error\n",
7067 				    device_xname(sc->sc_dev));
7068 			else if (errors & WRX_ER_SEQ)
7069 				log(LOG_WARNING, "%s: receive sequence error\n",
7070 				    device_xname(sc->sc_dev));
7071 			else if (errors & WRX_ER_CE)
7072 				log(LOG_WARNING, "%s: CRC error\n",
7073 				    device_xname(sc->sc_dev));
7074 			m_freem(m);
7075 			continue;
7076 		}
7077 
7078 		/* No errors.  Receive the packet. */
7079 		m->m_pkthdr.rcvif = ifp;
7080 		m->m_pkthdr.len = len;
7081 
7082 		/*
7083 		 * If VLANs are enabled, VLAN packets have been unwrapped
7084 		 * for us.  Associate the tag with the packet.
7085 		 */
7086 		/* XXXX should check for i350 and i354 */
7087 		if ((status & WRX_ST_VP) != 0) {
7088 			VLAN_INPUT_TAG(ifp, m, le16toh(vlantag), continue);
7089 		}
7090 
7091 		/* Set up checksum info for this packet. */
7092 		if ((status & WRX_ST_IXSM) == 0) {
7093 			if (status & WRX_ST_IPCS) {
7094 				WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
7095 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
7096 				if (errors & WRX_ER_IPE)
7097 					m->m_pkthdr.csum_flags |=
7098 					    M_CSUM_IPv4_BAD;
7099 			}
7100 			if (status & WRX_ST_TCPCS) {
7101 				/*
7102 				 * Note: we don't know if this was TCP or UDP,
7103 				 * so we just set both bits, and expect the
7104 				 * upper layers to deal.
7105 				 */
7106 				WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
7107 				m->m_pkthdr.csum_flags |=
7108 				    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
7109 				    M_CSUM_TCPv6 | M_CSUM_UDPv6;
7110 				if (errors & WRX_ER_TCPE)
7111 					m->m_pkthdr.csum_flags |=
7112 					    M_CSUM_TCP_UDP_BAD;
7113 			}
7114 		}
7115 
7116 		ifp->if_ipackets++;
7117 
7118 		WM_RX_UNLOCK(rxq);
7119 
7120 		/* Pass this up to any BPF listeners. */
7121 		bpf_mtap(ifp, m);
7122 
7123 		/* Pass it on. */
7124 		(*ifp->if_input)(ifp, m);
7125 
7126 		WM_RX_LOCK(rxq);
7127 
7128 		if (sc->sc_stopping)
7129 			break;
7130 	}
7131 
7132 	/* Update the receive pointer. */
7133 	rxq->rxq_ptr = i;
7134 	if (count != 0)
7135 		rnd_add_uint32(&sc->rnd_source, count);
7136 
7137 	DPRINTF(WM_DEBUG_RX,
7138 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
7139 }
7140 
7141 /*
7142  * wm_linkintr_gmii:
7143  *
7144  *	Helper; handle link interrupts for GMII.
7145  */
7146 static void
7147 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
7148 {
7149 
7150 	KASSERT(WM_CORE_LOCKED(sc));
7151 
7152 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
7153 		__func__));
7154 
7155 	if (icr & ICR_LSC) {
7156 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
7157 
7158 		if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0))
7159 			wm_gig_downshift_workaround_ich8lan(sc);
7160 
7161 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
7162 			device_xname(sc->sc_dev)));
7163 		mii_pollstat(&sc->sc_mii);
7164 		if (sc->sc_type == WM_T_82543) {
7165 			int miistatus, active;
7166 
7167 			/*
7168 			 * With 82543, we need to force speed and
7169 			 * duplex on the MAC equal to what the PHY
7170 			 * speed and duplex configuration is.
7171 			 */
7172 			miistatus = sc->sc_mii.mii_media_status;
7173 
7174 			if (miistatus & IFM_ACTIVE) {
7175 				active = sc->sc_mii.mii_media_active;
7176 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
7177 				switch (IFM_SUBTYPE(active)) {
7178 				case IFM_10_T:
7179 					sc->sc_ctrl |= CTRL_SPEED_10;
7180 					break;
7181 				case IFM_100_TX:
7182 					sc->sc_ctrl |= CTRL_SPEED_100;
7183 					break;
7184 				case IFM_1000_T:
7185 					sc->sc_ctrl |= CTRL_SPEED_1000;
7186 					break;
7187 				default:
7188 					/*
7189 					 * fiber?
7190 					 * Shoud not enter here.
7191 					 */
7192 					printf("unknown media (%x)\n", active);
7193 					break;
7194 				}
7195 				if (active & IFM_FDX)
7196 					sc->sc_ctrl |= CTRL_FD;
7197 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7198 			}
7199 		} else if ((sc->sc_type == WM_T_ICH8)
7200 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
7201 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
7202 		} else if (sc->sc_type == WM_T_PCH) {
7203 			wm_k1_gig_workaround_hv(sc,
7204 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
7205 		}
7206 
7207 		if ((sc->sc_phytype == WMPHY_82578)
7208 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
7209 			== IFM_1000_T)) {
7210 
7211 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
7212 				delay(200*1000); /* XXX too big */
7213 
7214 				/* Link stall fix for link up */
7215 				wm_gmii_hv_writereg(sc->sc_dev, 1,
7216 				    HV_MUX_DATA_CTRL,
7217 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
7218 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
7219 				wm_gmii_hv_writereg(sc->sc_dev, 1,
7220 				    HV_MUX_DATA_CTRL,
7221 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
7222 			}
7223 		}
7224 	} else if (icr & ICR_RXSEQ) {
7225 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
7226 			device_xname(sc->sc_dev)));
7227 	}
7228 }
7229 
7230 /*
7231  * wm_linkintr_tbi:
7232  *
7233  *	Helper; handle link interrupts for TBI mode.
7234  */
7235 static void
7236 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
7237 {
7238 	uint32_t status;
7239 
7240 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
7241 		__func__));
7242 
7243 	status = CSR_READ(sc, WMREG_STATUS);
7244 	if (icr & ICR_LSC) {
7245 		if (status & STATUS_LU) {
7246 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
7247 			    device_xname(sc->sc_dev),
7248 			    (status & STATUS_FD) ? "FDX" : "HDX"));
7249 			/*
7250 			 * NOTE: CTRL will update TFCE and RFCE automatically,
7251 			 * so we should update sc->sc_ctrl
7252 			 */
7253 
7254 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
7255 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
7256 			sc->sc_fcrtl &= ~FCRTL_XONE;
7257 			if (status & STATUS_FD)
7258 				sc->sc_tctl |=
7259 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
7260 			else
7261 				sc->sc_tctl |=
7262 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
7263 			if (sc->sc_ctrl & CTRL_TFCE)
7264 				sc->sc_fcrtl |= FCRTL_XONE;
7265 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
7266 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
7267 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
7268 				      sc->sc_fcrtl);
7269 			sc->sc_tbi_linkup = 1;
7270 		} else {
7271 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
7272 			    device_xname(sc->sc_dev)));
7273 			sc->sc_tbi_linkup = 0;
7274 		}
7275 		/* Update LED */
7276 		wm_tbi_serdes_set_linkled(sc);
7277 	} else if (icr & ICR_RXSEQ) {
7278 		DPRINTF(WM_DEBUG_LINK,
7279 		    ("%s: LINK: Receive sequence error\n",
7280 		    device_xname(sc->sc_dev)));
7281 	}
7282 }
7283 
7284 /*
7285  * wm_linkintr_serdes:
7286  *
7287  *	Helper; handle link interrupts for TBI mode.
7288  */
7289 static void
7290 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
7291 {
7292 	struct mii_data *mii = &sc->sc_mii;
7293 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7294 	uint32_t pcs_adv, pcs_lpab, reg;
7295 
7296 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
7297 		__func__));
7298 
7299 	if (icr & ICR_LSC) {
7300 		/* Check PCS */
7301 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
7302 		if ((reg & PCS_LSTS_LINKOK) != 0) {
7303 			mii->mii_media_status |= IFM_ACTIVE;
7304 			sc->sc_tbi_linkup = 1;
7305 		} else {
7306 			mii->mii_media_status |= IFM_NONE;
7307 			sc->sc_tbi_linkup = 0;
7308 			wm_tbi_serdes_set_linkled(sc);
7309 			return;
7310 		}
7311 		mii->mii_media_active |= IFM_1000_SX;
7312 		if ((reg & PCS_LSTS_FDX) != 0)
7313 			mii->mii_media_active |= IFM_FDX;
7314 		else
7315 			mii->mii_media_active |= IFM_HDX;
7316 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
7317 			/* Check flow */
7318 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
7319 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
7320 				DPRINTF(WM_DEBUG_LINK,
7321 				    ("XXX LINKOK but not ACOMP\n"));
7322 				return;
7323 			}
7324 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
7325 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
7326 			DPRINTF(WM_DEBUG_LINK,
7327 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
7328 			if ((pcs_adv & TXCW_SYM_PAUSE)
7329 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
7330 				mii->mii_media_active |= IFM_FLOW
7331 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
7332 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
7333 			    && (pcs_adv & TXCW_ASYM_PAUSE)
7334 			    && (pcs_lpab & TXCW_SYM_PAUSE)
7335 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
7336 				mii->mii_media_active |= IFM_FLOW
7337 				    | IFM_ETH_TXPAUSE;
7338 			else if ((pcs_adv & TXCW_SYM_PAUSE)
7339 			    && (pcs_adv & TXCW_ASYM_PAUSE)
7340 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
7341 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
7342 				mii->mii_media_active |= IFM_FLOW
7343 				    | IFM_ETH_RXPAUSE;
7344 		}
7345 		/* Update LED */
7346 		wm_tbi_serdes_set_linkled(sc);
7347 	} else {
7348 		DPRINTF(WM_DEBUG_LINK,
7349 		    ("%s: LINK: Receive sequence error\n",
7350 		    device_xname(sc->sc_dev)));
7351 	}
7352 }
7353 
7354 /*
7355  * wm_linkintr:
7356  *
7357  *	Helper; handle link interrupts.
7358  */
7359 static void
7360 wm_linkintr(struct wm_softc *sc, uint32_t icr)
7361 {
7362 
7363 	KASSERT(WM_CORE_LOCKED(sc));
7364 
7365 	if (sc->sc_flags & WM_F_HAS_MII)
7366 		wm_linkintr_gmii(sc, icr);
7367 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
7368 	    && (sc->sc_type >= WM_T_82575))
7369 		wm_linkintr_serdes(sc, icr);
7370 	else
7371 		wm_linkintr_tbi(sc, icr);
7372 }
7373 
7374 /*
7375  * wm_intr_legacy:
7376  *
7377  *	Interrupt service routine for INTx and MSI.
7378  */
7379 static int
7380 wm_intr_legacy(void *arg)
7381 {
7382 	struct wm_softc *sc = arg;
7383 	struct wm_txqueue *txq = &sc->sc_txq[0];
7384 	struct wm_rxqueue *rxq = &sc->sc_rxq[0];
7385 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7386 	uint32_t icr, rndval = 0;
7387 	int handled = 0;
7388 
7389 	DPRINTF(WM_DEBUG_TX,
7390 	    ("%s: INTx: got intr\n", device_xname(sc->sc_dev)));
7391 	while (1 /* CONSTCOND */) {
7392 		icr = CSR_READ(sc, WMREG_ICR);
7393 		if ((icr & sc->sc_icr) == 0)
7394 			break;
7395 		if (rndval == 0)
7396 			rndval = icr;
7397 
7398 		WM_RX_LOCK(rxq);
7399 
7400 		if (sc->sc_stopping) {
7401 			WM_RX_UNLOCK(rxq);
7402 			break;
7403 		}
7404 
7405 		handled = 1;
7406 
7407 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
7408 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
7409 			DPRINTF(WM_DEBUG_RX,
7410 			    ("%s: RX: got Rx intr 0x%08x\n",
7411 			    device_xname(sc->sc_dev),
7412 			    icr & (ICR_RXDMT0 | ICR_RXT0)));
7413 			WM_EVCNT_INCR(&sc->sc_ev_rxintr);
7414 		}
7415 #endif
7416 		wm_rxeof(rxq);
7417 
7418 		WM_RX_UNLOCK(rxq);
7419 		WM_TX_LOCK(txq);
7420 
7421 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
7422 		if (icr & ICR_TXDW) {
7423 			DPRINTF(WM_DEBUG_TX,
7424 			    ("%s: TX: got TXDW interrupt\n",
7425 			    device_xname(sc->sc_dev)));
7426 			WM_EVCNT_INCR(&sc->sc_ev_txdw);
7427 		}
7428 #endif
7429 		wm_txeof(sc);
7430 
7431 		WM_TX_UNLOCK(txq);
7432 		WM_CORE_LOCK(sc);
7433 
7434 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
7435 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
7436 			wm_linkintr(sc, icr);
7437 		}
7438 
7439 		WM_CORE_UNLOCK(sc);
7440 
7441 		if (icr & ICR_RXO) {
7442 #if defined(WM_DEBUG)
7443 			log(LOG_WARNING, "%s: Receive overrun\n",
7444 			    device_xname(sc->sc_dev));
7445 #endif /* defined(WM_DEBUG) */
7446 		}
7447 	}
7448 
7449 	rnd_add_uint32(&sc->rnd_source, rndval);
7450 
7451 	if (handled) {
7452 		/* Try to get more packets going. */
7453 		ifp->if_start(ifp);
7454 	}
7455 
7456 	return handled;
7457 }
7458 
7459 /*
7460  * wm_txintr_msix:
7461  *
7462  *	Interrupt service routine for TX complete interrupt for MSI-X.
7463  */
7464 static int
7465 wm_txintr_msix(void *arg)
7466 {
7467 	struct wm_txqueue *txq = arg;
7468 	struct wm_softc *sc = txq->txq_sc;
7469 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7470 	int handled = 0;
7471 
7472 	DPRINTF(WM_DEBUG_TX,
7473 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
7474 
7475 	if (sc->sc_type == WM_T_82574)
7476 		CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(txq->txq_id));
7477 	else if (sc->sc_type == WM_T_82575)
7478 		CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(txq->txq_id));
7479 	else
7480 		CSR_WRITE(sc, WMREG_EIMC, 1 << txq->txq_intr_idx);
7481 
7482 	WM_TX_LOCK(txq);
7483 
7484 	if (sc->sc_stopping)
7485 		goto out;
7486 
7487 	WM_EVCNT_INCR(&sc->sc_ev_txdw);
7488 	handled = wm_txeof(sc);
7489 
7490 out:
7491 	WM_TX_UNLOCK(txq);
7492 
7493 	if (sc->sc_type == WM_T_82574)
7494 		CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(txq->txq_id));
7495 	else if (sc->sc_type == WM_T_82575)
7496 		CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(txq->txq_id));
7497 	else
7498 		CSR_WRITE(sc, WMREG_EIMS, 1 << txq->txq_intr_idx);
7499 
7500 	if (handled) {
7501 		/* Try to get more packets going. */
7502 		ifp->if_start(ifp);
7503 	}
7504 
7505 	return handled;
7506 }
7507 
7508 /*
7509  * wm_rxintr_msix:
7510  *
7511  *	Interrupt service routine for RX interrupt for MSI-X.
7512  */
7513 static int
7514 wm_rxintr_msix(void *arg)
7515 {
7516 	struct wm_rxqueue *rxq = arg;
7517 	struct wm_softc *sc = rxq->rxq_sc;
7518 
7519 	DPRINTF(WM_DEBUG_RX,
7520 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
7521 
7522 	if (sc->sc_type == WM_T_82574)
7523 		CSR_WRITE(sc, WMREG_IMC, ICR_RXQ(rxq->rxq_id));
7524 	else if (sc->sc_type == WM_T_82575)
7525 		CSR_WRITE(sc, WMREG_EIMC, EITR_RX_QUEUE(rxq->rxq_id));
7526 	else
7527 		CSR_WRITE(sc, WMREG_EIMC, 1 << rxq->rxq_intr_idx);
7528 
7529 	WM_RX_LOCK(rxq);
7530 
7531 	if (sc->sc_stopping)
7532 		goto out;
7533 
7534 	WM_EVCNT_INCR(&sc->sc_ev_rxintr);
7535 	wm_rxeof(rxq);
7536 
7537 out:
7538 	WM_RX_UNLOCK(rxq);
7539 
7540 	if (sc->sc_type == WM_T_82574)
7541 		CSR_WRITE(sc, WMREG_IMS, ICR_RXQ(rxq->rxq_id));
7542 	else if (sc->sc_type == WM_T_82575)
7543 		CSR_WRITE(sc, WMREG_EIMS, EITR_RX_QUEUE(rxq->rxq_id));
7544 	else
7545 		CSR_WRITE(sc, WMREG_EIMS, 1 << rxq->rxq_intr_idx);
7546 
7547 	return 1;
7548 }
7549 
7550 /*
7551  * wm_linkintr_msix:
7552  *
7553  *	Interrupt service routine for link status change for MSI-X.
7554  */
7555 static int
7556 wm_linkintr_msix(void *arg)
7557 {
7558 	struct wm_softc *sc = arg;
7559 	uint32_t reg;
7560 
7561 	DPRINTF(WM_DEBUG_LINK,
7562 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
7563 
7564 	reg = CSR_READ(sc, WMREG_ICR);
7565 	WM_CORE_LOCK(sc);
7566 	if ((sc->sc_stopping) || ((reg & ICR_LSC) == 0))
7567 		goto out;
7568 
7569 	WM_EVCNT_INCR(&sc->sc_ev_linkintr);
7570 	wm_linkintr(sc, ICR_LSC);
7571 
7572 out:
7573 	WM_CORE_UNLOCK(sc);
7574 
7575 	if (sc->sc_type == WM_T_82574)
7576 		CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
7577 	else if (sc->sc_type == WM_T_82575)
7578 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
7579 	else
7580 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
7581 
7582 	return 1;
7583 }
7584 
7585 /*
7586  * Media related.
7587  * GMII, SGMII, TBI (and SERDES)
7588  */
7589 
7590 /* Common */
7591 
7592 /*
7593  * wm_tbi_serdes_set_linkled:
7594  *
7595  *	Update the link LED on TBI and SERDES devices.
7596  */
7597 static void
7598 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
7599 {
7600 
7601 	if (sc->sc_tbi_linkup)
7602 		sc->sc_ctrl |= CTRL_SWDPIN(0);
7603 	else
7604 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
7605 
7606 	/* 82540 or newer devices are active low */
7607 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
7608 
7609 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7610 }
7611 
7612 /* GMII related */
7613 
7614 /*
7615  * wm_gmii_reset:
7616  *
7617  *	Reset the PHY.
7618  */
7619 static void
7620 wm_gmii_reset(struct wm_softc *sc)
7621 {
7622 	uint32_t reg;
7623 	int rv;
7624 
7625 	/* get phy semaphore */
7626 	switch (sc->sc_type) {
7627 	case WM_T_82571:
7628 	case WM_T_82572:
7629 	case WM_T_82573:
7630 	case WM_T_82574:
7631 	case WM_T_82583:
7632 		 /* XXX should get sw semaphore, too */
7633 		rv = wm_get_swsm_semaphore(sc);
7634 		break;
7635 	case WM_T_82575:
7636 	case WM_T_82576:
7637 	case WM_T_82580:
7638 	case WM_T_I350:
7639 	case WM_T_I354:
7640 	case WM_T_I210:
7641 	case WM_T_I211:
7642 	case WM_T_80003:
7643 		rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
7644 		break;
7645 	case WM_T_ICH8:
7646 	case WM_T_ICH9:
7647 	case WM_T_ICH10:
7648 	case WM_T_PCH:
7649 	case WM_T_PCH2:
7650 	case WM_T_PCH_LPT:
7651 		rv = wm_get_swfwhw_semaphore(sc);
7652 		break;
7653 	default:
7654 		/* nothing to do*/
7655 		rv = 0;
7656 		break;
7657 	}
7658 	if (rv != 0) {
7659 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7660 		    __func__);
7661 		return;
7662 	}
7663 
7664 	switch (sc->sc_type) {
7665 	case WM_T_82542_2_0:
7666 	case WM_T_82542_2_1:
7667 		/* null */
7668 		break;
7669 	case WM_T_82543:
7670 		/*
7671 		 * With 82543, we need to force speed and duplex on the MAC
7672 		 * equal to what the PHY speed and duplex configuration is.
7673 		 * In addition, we need to perform a hardware reset on the PHY
7674 		 * to take it out of reset.
7675 		 */
7676 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
7677 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7678 
7679 		/* The PHY reset pin is active-low. */
7680 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
7681 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
7682 		    CTRL_EXT_SWDPIN(4));
7683 		reg |= CTRL_EXT_SWDPIO(4);
7684 
7685 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
7686 		CSR_WRITE_FLUSH(sc);
7687 		delay(10*1000);
7688 
7689 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
7690 		CSR_WRITE_FLUSH(sc);
7691 		delay(150);
7692 #if 0
7693 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
7694 #endif
7695 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
7696 		break;
7697 	case WM_T_82544:	/* reset 10000us */
7698 	case WM_T_82540:
7699 	case WM_T_82545:
7700 	case WM_T_82545_3:
7701 	case WM_T_82546:
7702 	case WM_T_82546_3:
7703 	case WM_T_82541:
7704 	case WM_T_82541_2:
7705 	case WM_T_82547:
7706 	case WM_T_82547_2:
7707 	case WM_T_82571:	/* reset 100us */
7708 	case WM_T_82572:
7709 	case WM_T_82573:
7710 	case WM_T_82574:
7711 	case WM_T_82575:
7712 	case WM_T_82576:
7713 	case WM_T_82580:
7714 	case WM_T_I350:
7715 	case WM_T_I354:
7716 	case WM_T_I210:
7717 	case WM_T_I211:
7718 	case WM_T_82583:
7719 	case WM_T_80003:
7720 		/* generic reset */
7721 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
7722 		CSR_WRITE_FLUSH(sc);
7723 		delay(20000);
7724 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7725 		CSR_WRITE_FLUSH(sc);
7726 		delay(20000);
7727 
7728 		if ((sc->sc_type == WM_T_82541)
7729 		    || (sc->sc_type == WM_T_82541_2)
7730 		    || (sc->sc_type == WM_T_82547)
7731 		    || (sc->sc_type == WM_T_82547_2)) {
7732 			/* workaround for igp are done in igp_reset() */
7733 			/* XXX add code to set LED after phy reset */
7734 		}
7735 		break;
7736 	case WM_T_ICH8:
7737 	case WM_T_ICH9:
7738 	case WM_T_ICH10:
7739 	case WM_T_PCH:
7740 	case WM_T_PCH2:
7741 	case WM_T_PCH_LPT:
7742 		/* generic reset */
7743 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
7744 		CSR_WRITE_FLUSH(sc);
7745 		delay(100);
7746 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7747 		CSR_WRITE_FLUSH(sc);
7748 		delay(150);
7749 		break;
7750 	default:
7751 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
7752 		    __func__);
7753 		break;
7754 	}
7755 
7756 	/* release PHY semaphore */
7757 	switch (sc->sc_type) {
7758 	case WM_T_82571:
7759 	case WM_T_82572:
7760 	case WM_T_82573:
7761 	case WM_T_82574:
7762 	case WM_T_82583:
7763 		 /* XXX should put sw semaphore, too */
7764 		wm_put_swsm_semaphore(sc);
7765 		break;
7766 	case WM_T_82575:
7767 	case WM_T_82576:
7768 	case WM_T_82580:
7769 	case WM_T_I350:
7770 	case WM_T_I354:
7771 	case WM_T_I210:
7772 	case WM_T_I211:
7773 	case WM_T_80003:
7774 		wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
7775 		break;
7776 	case WM_T_ICH8:
7777 	case WM_T_ICH9:
7778 	case WM_T_ICH10:
7779 	case WM_T_PCH:
7780 	case WM_T_PCH2:
7781 	case WM_T_PCH_LPT:
7782 		wm_put_swfwhw_semaphore(sc);
7783 		break;
7784 	default:
7785 		/* nothing to do*/
7786 		rv = 0;
7787 		break;
7788 	}
7789 
7790 	/* get_cfg_done */
7791 	wm_get_cfg_done(sc);
7792 
7793 	/* extra setup */
7794 	switch (sc->sc_type) {
7795 	case WM_T_82542_2_0:
7796 	case WM_T_82542_2_1:
7797 	case WM_T_82543:
7798 	case WM_T_82544:
7799 	case WM_T_82540:
7800 	case WM_T_82545:
7801 	case WM_T_82545_3:
7802 	case WM_T_82546:
7803 	case WM_T_82546_3:
7804 	case WM_T_82541_2:
7805 	case WM_T_82547_2:
7806 	case WM_T_82571:
7807 	case WM_T_82572:
7808 	case WM_T_82573:
7809 	case WM_T_82575:
7810 	case WM_T_82576:
7811 	case WM_T_82580:
7812 	case WM_T_I350:
7813 	case WM_T_I354:
7814 	case WM_T_I210:
7815 	case WM_T_I211:
7816 	case WM_T_80003:
7817 		/* null */
7818 		break;
7819 	case WM_T_82574:
7820 	case WM_T_82583:
7821 		wm_lplu_d0_disable(sc);
7822 		break;
7823 	case WM_T_82541:
7824 	case WM_T_82547:
7825 		/* XXX Configure actively LED after PHY reset */
7826 		break;
7827 	case WM_T_ICH8:
7828 	case WM_T_ICH9:
7829 	case WM_T_ICH10:
7830 	case WM_T_PCH:
7831 	case WM_T_PCH2:
7832 	case WM_T_PCH_LPT:
7833 		/* Allow time for h/w to get to a quiescent state afer reset */
7834 		delay(10*1000);
7835 
7836 		if (sc->sc_type == WM_T_PCH)
7837 			wm_hv_phy_workaround_ich8lan(sc);
7838 
7839 		if (sc->sc_type == WM_T_PCH2)
7840 			wm_lv_phy_workaround_ich8lan(sc);
7841 
7842 		if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
7843 			/*
7844 			 * dummy read to clear the phy wakeup bit after lcd
7845 			 * reset
7846 			 */
7847 			reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
7848 		}
7849 
7850 		/*
7851 		 * XXX Configure the LCD with th extended configuration region
7852 		 * in NVM
7853 		 */
7854 
7855 		/* Disable D0 LPLU. */
7856 		if (sc->sc_type >= WM_T_PCH)	/* PCH* */
7857 			wm_lplu_d0_disable_pch(sc);
7858 		else
7859 			wm_lplu_d0_disable(sc);	/* ICH* */
7860 		break;
7861 	default:
7862 		panic("%s: unknown type\n", __func__);
7863 		break;
7864 	}
7865 }
7866 
7867 /*
7868  * wm_get_phy_id_82575:
7869  *
7870  * Return PHY ID. Return -1 if it failed.
7871  */
7872 static int
7873 wm_get_phy_id_82575(struct wm_softc *sc)
7874 {
7875 	uint32_t reg;
7876 	int phyid = -1;
7877 
7878 	/* XXX */
7879 	if ((sc->sc_flags & WM_F_SGMII) == 0)
7880 		return -1;
7881 
7882 	if (wm_sgmii_uses_mdio(sc)) {
7883 		switch (sc->sc_type) {
7884 		case WM_T_82575:
7885 		case WM_T_82576:
7886 			reg = CSR_READ(sc, WMREG_MDIC);
7887 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
7888 			break;
7889 		case WM_T_82580:
7890 		case WM_T_I350:
7891 		case WM_T_I354:
7892 		case WM_T_I210:
7893 		case WM_T_I211:
7894 			reg = CSR_READ(sc, WMREG_MDICNFG);
7895 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
7896 			break;
7897 		default:
7898 			return -1;
7899 		}
7900 	}
7901 
7902 	return phyid;
7903 }
7904 
7905 
7906 /*
7907  * wm_gmii_mediainit:
7908  *
7909  *	Initialize media for use on 1000BASE-T devices.
7910  */
7911 static void
7912 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
7913 {
7914 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7915 	struct mii_data *mii = &sc->sc_mii;
7916 	uint32_t reg;
7917 
7918 	/* We have GMII. */
7919 	sc->sc_flags |= WM_F_HAS_MII;
7920 
7921 	if (sc->sc_type == WM_T_80003)
7922 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
7923 	else
7924 		sc->sc_tipg = TIPG_1000T_DFLT;
7925 
7926 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
7927 	if ((sc->sc_type == WM_T_82580)
7928 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
7929 	    || (sc->sc_type == WM_T_I211)) {
7930 		reg = CSR_READ(sc, WMREG_PHPM);
7931 		reg &= ~PHPM_GO_LINK_D;
7932 		CSR_WRITE(sc, WMREG_PHPM, reg);
7933 	}
7934 
7935 	/*
7936 	 * Let the chip set speed/duplex on its own based on
7937 	 * signals from the PHY.
7938 	 * XXXbouyer - I'm not sure this is right for the 80003,
7939 	 * the em driver only sets CTRL_SLU here - but it seems to work.
7940 	 */
7941 	sc->sc_ctrl |= CTRL_SLU;
7942 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7943 
7944 	/* Initialize our media structures and probe the GMII. */
7945 	mii->mii_ifp = ifp;
7946 
7947 	/*
7948 	 * Determine the PHY access method.
7949 	 *
7950 	 *  For SGMII, use SGMII specific method.
7951 	 *
7952 	 *  For some devices, we can determine the PHY access method
7953 	 * from sc_type.
7954 	 *
7955 	 *  For ICH and PCH variants, it's difficult to determine the PHY
7956 	 * access  method by sc_type, so use the PCI product ID for some
7957 	 * devices.
7958 	 * For other ICH8 variants, try to use igp's method. If the PHY
7959 	 * can't detect, then use bm's method.
7960 	 */
7961 	switch (prodid) {
7962 	case PCI_PRODUCT_INTEL_PCH_M_LM:
7963 	case PCI_PRODUCT_INTEL_PCH_M_LC:
7964 		/* 82577 */
7965 		sc->sc_phytype = WMPHY_82577;
7966 		break;
7967 	case PCI_PRODUCT_INTEL_PCH_D_DM:
7968 	case PCI_PRODUCT_INTEL_PCH_D_DC:
7969 		/* 82578 */
7970 		sc->sc_phytype = WMPHY_82578;
7971 		break;
7972 	case PCI_PRODUCT_INTEL_PCH2_LV_LM:
7973 	case PCI_PRODUCT_INTEL_PCH2_LV_V:
7974 		/* 82579 */
7975 		sc->sc_phytype = WMPHY_82579;
7976 		break;
7977 	case PCI_PRODUCT_INTEL_82801I_BM:
7978 	case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
7979 	case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
7980 	case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
7981 	case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
7982 	case PCI_PRODUCT_INTEL_82801J_R_BM_V:
7983 		/* 82567 */
7984 		sc->sc_phytype = WMPHY_BM;
7985 		mii->mii_readreg = wm_gmii_bm_readreg;
7986 		mii->mii_writereg = wm_gmii_bm_writereg;
7987 		break;
7988 	default:
7989 		if (((sc->sc_flags & WM_F_SGMII) != 0)
7990 		    && !wm_sgmii_uses_mdio(sc)){
7991 			/* SGMII */
7992 			mii->mii_readreg = wm_sgmii_readreg;
7993 			mii->mii_writereg = wm_sgmii_writereg;
7994 		} else if (sc->sc_type >= WM_T_80003) {
7995 			/* 80003 */
7996 			mii->mii_readreg = wm_gmii_i80003_readreg;
7997 			mii->mii_writereg = wm_gmii_i80003_writereg;
7998 		} else if (sc->sc_type >= WM_T_I210) {
7999 			/* I210 and I211 */
8000 			mii->mii_readreg = wm_gmii_gs40g_readreg;
8001 			mii->mii_writereg = wm_gmii_gs40g_writereg;
8002 		} else if (sc->sc_type >= WM_T_82580) {
8003 			/* 82580, I350 and I354 */
8004 			sc->sc_phytype = WMPHY_82580;
8005 			mii->mii_readreg = wm_gmii_82580_readreg;
8006 			mii->mii_writereg = wm_gmii_82580_writereg;
8007 		} else if (sc->sc_type >= WM_T_82544) {
8008 			/* 82544, 0, [56], [17], 8257[1234] and 82583 */
8009 			mii->mii_readreg = wm_gmii_i82544_readreg;
8010 			mii->mii_writereg = wm_gmii_i82544_writereg;
8011 		} else {
8012 			mii->mii_readreg = wm_gmii_i82543_readreg;
8013 			mii->mii_writereg = wm_gmii_i82543_writereg;
8014 		}
8015 		break;
8016 	}
8017 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_LPT)) {
8018 		/* All PCH* use _hv_ */
8019 		mii->mii_readreg = wm_gmii_hv_readreg;
8020 		mii->mii_writereg = wm_gmii_hv_writereg;
8021 	}
8022 	mii->mii_statchg = wm_gmii_statchg;
8023 
8024 	wm_gmii_reset(sc);
8025 
8026 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
8027 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
8028 	    wm_gmii_mediastatus);
8029 
8030 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
8031 	    || (sc->sc_type == WM_T_82580)
8032 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
8033 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
8034 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
8035 			/* Attach only one port */
8036 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
8037 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
8038 		} else {
8039 			int i, id;
8040 			uint32_t ctrl_ext;
8041 
8042 			id = wm_get_phy_id_82575(sc);
8043 			if (id != -1) {
8044 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
8045 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
8046 			}
8047 			if ((id == -1)
8048 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
8049 				/* Power on sgmii phy if it is disabled */
8050 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
8051 				CSR_WRITE(sc, WMREG_CTRL_EXT,
8052 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
8053 				CSR_WRITE_FLUSH(sc);
8054 				delay(300*1000); /* XXX too long */
8055 
8056 				/* from 1 to 8 */
8057 				for (i = 1; i < 8; i++)
8058 					mii_attach(sc->sc_dev, &sc->sc_mii,
8059 					    0xffffffff, i, MII_OFFSET_ANY,
8060 					    MIIF_DOPAUSE);
8061 
8062 				/* restore previous sfp cage power state */
8063 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
8064 			}
8065 		}
8066 	} else {
8067 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
8068 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
8069 	}
8070 
8071 	/*
8072 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
8073 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
8074 	 */
8075 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
8076 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
8077 		wm_set_mdio_slow_mode_hv(sc);
8078 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
8079 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
8080 	}
8081 
8082 	/*
8083 	 * (For ICH8 variants)
8084 	 * If PHY detection failed, use BM's r/w function and retry.
8085 	 */
8086 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
8087 		/* if failed, retry with *_bm_* */
8088 		mii->mii_readreg = wm_gmii_bm_readreg;
8089 		mii->mii_writereg = wm_gmii_bm_writereg;
8090 
8091 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
8092 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
8093 	}
8094 
8095 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
8096 		/* Any PHY wasn't find */
8097 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
8098 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
8099 		sc->sc_phytype = WMPHY_NONE;
8100 	} else {
8101 		/*
8102 		 * PHY Found!
8103 		 * Check PHY type.
8104 		 */
8105 		uint32_t model;
8106 		struct mii_softc *child;
8107 
8108 		child = LIST_FIRST(&mii->mii_phys);
8109 		model = child->mii_mpd_model;
8110 		if (model == MII_MODEL_yyINTEL_I82566)
8111 			sc->sc_phytype = WMPHY_IGP_3;
8112 
8113 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
8114 	}
8115 }
8116 
8117 /*
8118  * wm_gmii_mediachange:	[ifmedia interface function]
8119  *
8120  *	Set hardware to newly-selected media on a 1000BASE-T device.
8121  */
8122 static int
8123 wm_gmii_mediachange(struct ifnet *ifp)
8124 {
8125 	struct wm_softc *sc = ifp->if_softc;
8126 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
8127 	int rc;
8128 
8129 	if ((ifp->if_flags & IFF_UP) == 0)
8130 		return 0;
8131 
8132 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
8133 	sc->sc_ctrl |= CTRL_SLU;
8134 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
8135 	    || (sc->sc_type > WM_T_82543)) {
8136 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
8137 	} else {
8138 		sc->sc_ctrl &= ~CTRL_ASDE;
8139 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
8140 		if (ife->ifm_media & IFM_FDX)
8141 			sc->sc_ctrl |= CTRL_FD;
8142 		switch (IFM_SUBTYPE(ife->ifm_media)) {
8143 		case IFM_10_T:
8144 			sc->sc_ctrl |= CTRL_SPEED_10;
8145 			break;
8146 		case IFM_100_TX:
8147 			sc->sc_ctrl |= CTRL_SPEED_100;
8148 			break;
8149 		case IFM_1000_T:
8150 			sc->sc_ctrl |= CTRL_SPEED_1000;
8151 			break;
8152 		default:
8153 			panic("wm_gmii_mediachange: bad media 0x%x",
8154 			    ife->ifm_media);
8155 		}
8156 	}
8157 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8158 	if (sc->sc_type <= WM_T_82543)
8159 		wm_gmii_reset(sc);
8160 
8161 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
8162 		return 0;
8163 	return rc;
8164 }
8165 
8166 /*
8167  * wm_gmii_mediastatus:	[ifmedia interface function]
8168  *
8169  *	Get the current interface media status on a 1000BASE-T device.
8170  */
8171 static void
8172 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
8173 {
8174 	struct wm_softc *sc = ifp->if_softc;
8175 
8176 	ether_mediastatus(ifp, ifmr);
8177 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
8178 	    | sc->sc_flowflags;
8179 }
8180 
8181 #define	MDI_IO		CTRL_SWDPIN(2)
8182 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
8183 #define	MDI_CLK		CTRL_SWDPIN(3)
8184 
8185 static void
8186 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
8187 {
8188 	uint32_t i, v;
8189 
8190 	v = CSR_READ(sc, WMREG_CTRL);
8191 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
8192 	v |= MDI_DIR | CTRL_SWDPIO(3);
8193 
8194 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
8195 		if (data & i)
8196 			v |= MDI_IO;
8197 		else
8198 			v &= ~MDI_IO;
8199 		CSR_WRITE(sc, WMREG_CTRL, v);
8200 		CSR_WRITE_FLUSH(sc);
8201 		delay(10);
8202 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
8203 		CSR_WRITE_FLUSH(sc);
8204 		delay(10);
8205 		CSR_WRITE(sc, WMREG_CTRL, v);
8206 		CSR_WRITE_FLUSH(sc);
8207 		delay(10);
8208 	}
8209 }
8210 
8211 static uint32_t
8212 wm_i82543_mii_recvbits(struct wm_softc *sc)
8213 {
8214 	uint32_t v, i, data = 0;
8215 
8216 	v = CSR_READ(sc, WMREG_CTRL);
8217 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
8218 	v |= CTRL_SWDPIO(3);
8219 
8220 	CSR_WRITE(sc, WMREG_CTRL, v);
8221 	CSR_WRITE_FLUSH(sc);
8222 	delay(10);
8223 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
8224 	CSR_WRITE_FLUSH(sc);
8225 	delay(10);
8226 	CSR_WRITE(sc, WMREG_CTRL, v);
8227 	CSR_WRITE_FLUSH(sc);
8228 	delay(10);
8229 
8230 	for (i = 0; i < 16; i++) {
8231 		data <<= 1;
8232 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
8233 		CSR_WRITE_FLUSH(sc);
8234 		delay(10);
8235 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
8236 			data |= 1;
8237 		CSR_WRITE(sc, WMREG_CTRL, v);
8238 		CSR_WRITE_FLUSH(sc);
8239 		delay(10);
8240 	}
8241 
8242 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
8243 	CSR_WRITE_FLUSH(sc);
8244 	delay(10);
8245 	CSR_WRITE(sc, WMREG_CTRL, v);
8246 	CSR_WRITE_FLUSH(sc);
8247 	delay(10);
8248 
8249 	return data;
8250 }
8251 
8252 #undef MDI_IO
8253 #undef MDI_DIR
8254 #undef MDI_CLK
8255 
8256 /*
8257  * wm_gmii_i82543_readreg:	[mii interface function]
8258  *
8259  *	Read a PHY register on the GMII (i82543 version).
8260  */
8261 static int
8262 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
8263 {
8264 	struct wm_softc *sc = device_private(self);
8265 	int rv;
8266 
8267 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
8268 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
8269 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
8270 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
8271 
8272 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
8273 	    device_xname(sc->sc_dev), phy, reg, rv));
8274 
8275 	return rv;
8276 }
8277 
8278 /*
8279  * wm_gmii_i82543_writereg:	[mii interface function]
8280  *
8281  *	Write a PHY register on the GMII (i82543 version).
8282  */
8283 static void
8284 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
8285 {
8286 	struct wm_softc *sc = device_private(self);
8287 
8288 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
8289 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
8290 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
8291 	    (MII_COMMAND_START << 30), 32);
8292 }
8293 
8294 /*
8295  * wm_gmii_i82544_readreg:	[mii interface function]
8296  *
8297  *	Read a PHY register on the GMII.
8298  */
8299 static int
8300 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
8301 {
8302 	struct wm_softc *sc = device_private(self);
8303 	uint32_t mdic = 0;
8304 	int i, rv;
8305 
8306 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
8307 	    MDIC_REGADD(reg));
8308 
8309 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
8310 		mdic = CSR_READ(sc, WMREG_MDIC);
8311 		if (mdic & MDIC_READY)
8312 			break;
8313 		delay(50);
8314 	}
8315 
8316 	if ((mdic & MDIC_READY) == 0) {
8317 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
8318 		    device_xname(sc->sc_dev), phy, reg);
8319 		rv = 0;
8320 	} else if (mdic & MDIC_E) {
8321 #if 0 /* This is normal if no PHY is present. */
8322 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
8323 		    device_xname(sc->sc_dev), phy, reg);
8324 #endif
8325 		rv = 0;
8326 	} else {
8327 		rv = MDIC_DATA(mdic);
8328 		if (rv == 0xffff)
8329 			rv = 0;
8330 	}
8331 
8332 	return rv;
8333 }
8334 
8335 /*
8336  * wm_gmii_i82544_writereg:	[mii interface function]
8337  *
8338  *	Write a PHY register on the GMII.
8339  */
8340 static void
8341 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
8342 {
8343 	struct wm_softc *sc = device_private(self);
8344 	uint32_t mdic = 0;
8345 	int i;
8346 
8347 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
8348 	    MDIC_REGADD(reg) | MDIC_DATA(val));
8349 
8350 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
8351 		mdic = CSR_READ(sc, WMREG_MDIC);
8352 		if (mdic & MDIC_READY)
8353 			break;
8354 		delay(50);
8355 	}
8356 
8357 	if ((mdic & MDIC_READY) == 0)
8358 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
8359 		    device_xname(sc->sc_dev), phy, reg);
8360 	else if (mdic & MDIC_E)
8361 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
8362 		    device_xname(sc->sc_dev), phy, reg);
8363 }
8364 
8365 /*
8366  * wm_gmii_i80003_readreg:	[mii interface function]
8367  *
8368  *	Read a PHY register on the kumeran
8369  * This could be handled by the PHY layer if we didn't have to lock the
8370  * ressource ...
8371  */
8372 static int
8373 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
8374 {
8375 	struct wm_softc *sc = device_private(self);
8376 	int sem;
8377 	int rv;
8378 
8379 	if (phy != 1) /* only one PHY on kumeran bus */
8380 		return 0;
8381 
8382 	sem = swfwphysem[sc->sc_funcid];
8383 	if (wm_get_swfw_semaphore(sc, sem)) {
8384 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8385 		    __func__);
8386 		return 0;
8387 	}
8388 
8389 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
8390 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
8391 		    reg >> GG82563_PAGE_SHIFT);
8392 	} else {
8393 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
8394 		    reg >> GG82563_PAGE_SHIFT);
8395 	}
8396 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
8397 	delay(200);
8398 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
8399 	delay(200);
8400 
8401 	wm_put_swfw_semaphore(sc, sem);
8402 	return rv;
8403 }
8404 
8405 /*
8406  * wm_gmii_i80003_writereg:	[mii interface function]
8407  *
8408  *	Write a PHY register on the kumeran.
8409  * This could be handled by the PHY layer if we didn't have to lock the
8410  * ressource ...
8411  */
8412 static void
8413 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
8414 {
8415 	struct wm_softc *sc = device_private(self);
8416 	int sem;
8417 
8418 	if (phy != 1) /* only one PHY on kumeran bus */
8419 		return;
8420 
8421 	sem = swfwphysem[sc->sc_funcid];
8422 	if (wm_get_swfw_semaphore(sc, sem)) {
8423 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8424 		    __func__);
8425 		return;
8426 	}
8427 
8428 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
8429 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
8430 		    reg >> GG82563_PAGE_SHIFT);
8431 	} else {
8432 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
8433 		    reg >> GG82563_PAGE_SHIFT);
8434 	}
8435 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
8436 	delay(200);
8437 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
8438 	delay(200);
8439 
8440 	wm_put_swfw_semaphore(sc, sem);
8441 }
8442 
8443 /*
8444  * wm_gmii_bm_readreg:	[mii interface function]
8445  *
8446  *	Read a PHY register on the kumeran
8447  * This could be handled by the PHY layer if we didn't have to lock the
8448  * ressource ...
8449  */
8450 static int
8451 wm_gmii_bm_readreg(device_t self, int phy, int reg)
8452 {
8453 	struct wm_softc *sc = device_private(self);
8454 	int sem;
8455 	int rv;
8456 
8457 	sem = swfwphysem[sc->sc_funcid];
8458 	if (wm_get_swfw_semaphore(sc, sem)) {
8459 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8460 		    __func__);
8461 		return 0;
8462 	}
8463 
8464 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
8465 		if (phy == 1)
8466 			wm_gmii_i82544_writereg(self, phy,
8467 			    MII_IGPHY_PAGE_SELECT, reg);
8468 		else
8469 			wm_gmii_i82544_writereg(self, phy,
8470 			    GG82563_PHY_PAGE_SELECT,
8471 			    reg >> GG82563_PAGE_SHIFT);
8472 	}
8473 
8474 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
8475 	wm_put_swfw_semaphore(sc, sem);
8476 	return rv;
8477 }
8478 
8479 /*
8480  * wm_gmii_bm_writereg:	[mii interface function]
8481  *
8482  *	Write a PHY register on the kumeran.
8483  * This could be handled by the PHY layer if we didn't have to lock the
8484  * ressource ...
8485  */
8486 static void
8487 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
8488 {
8489 	struct wm_softc *sc = device_private(self);
8490 	int sem;
8491 
8492 	sem = swfwphysem[sc->sc_funcid];
8493 	if (wm_get_swfw_semaphore(sc, sem)) {
8494 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8495 		    __func__);
8496 		return;
8497 	}
8498 
8499 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
8500 		if (phy == 1)
8501 			wm_gmii_i82544_writereg(self, phy,
8502 			    MII_IGPHY_PAGE_SELECT, reg);
8503 		else
8504 			wm_gmii_i82544_writereg(self, phy,
8505 			    GG82563_PHY_PAGE_SELECT,
8506 			    reg >> GG82563_PAGE_SHIFT);
8507 	}
8508 
8509 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
8510 	wm_put_swfw_semaphore(sc, sem);
8511 }
8512 
8513 static void
8514 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
8515 {
8516 	struct wm_softc *sc = device_private(self);
8517 	uint16_t regnum = BM_PHY_REG_NUM(offset);
8518 	uint16_t wuce;
8519 
8520 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
8521 	if (sc->sc_type == WM_T_PCH) {
8522 		/* XXX e1000 driver do nothing... why? */
8523 	}
8524 
8525 	/* Set page 769 */
8526 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
8527 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
8528 
8529 	wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
8530 
8531 	wuce &= ~BM_WUC_HOST_WU_BIT;
8532 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
8533 	    wuce | BM_WUC_ENABLE_BIT);
8534 
8535 	/* Select page 800 */
8536 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
8537 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
8538 
8539 	/* Write page 800 */
8540 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
8541 
8542 	if (rd)
8543 		*val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
8544 	else
8545 		wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
8546 
8547 	/* Set page 769 */
8548 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
8549 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
8550 
8551 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
8552 }
8553 
8554 /*
8555  * wm_gmii_hv_readreg:	[mii interface function]
8556  *
8557  *	Read a PHY register on the kumeran
8558  * This could be handled by the PHY layer if we didn't have to lock the
8559  * ressource ...
8560  */
8561 static int
8562 wm_gmii_hv_readreg(device_t self, int phy, int reg)
8563 {
8564 	struct wm_softc *sc = device_private(self);
8565 	uint16_t page = BM_PHY_REG_PAGE(reg);
8566 	uint16_t regnum = BM_PHY_REG_NUM(reg);
8567 	uint16_t val;
8568 	int rv;
8569 
8570 	if (wm_get_swfwhw_semaphore(sc)) {
8571 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8572 		    __func__);
8573 		return 0;
8574 	}
8575 
8576 	/* XXX Workaround failure in MDIO access while cable is disconnected */
8577 	if (sc->sc_phytype == WMPHY_82577) {
8578 		/* XXX must write */
8579 	}
8580 
8581 	/* Page 800 works differently than the rest so it has its own func */
8582 	if (page == BM_WUC_PAGE) {
8583 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
8584 		return val;
8585 	}
8586 
8587 	/*
8588 	 * Lower than page 768 works differently than the rest so it has its
8589 	 * own func
8590 	 */
8591 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
8592 		printf("gmii_hv_readreg!!!\n");
8593 		return 0;
8594 	}
8595 
8596 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
8597 		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
8598 		    page << BME1000_PAGE_SHIFT);
8599 	}
8600 
8601 	rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
8602 	wm_put_swfwhw_semaphore(sc);
8603 	return rv;
8604 }
8605 
8606 /*
8607  * wm_gmii_hv_writereg:	[mii interface function]
8608  *
8609  *	Write a PHY register on the kumeran.
8610  * This could be handled by the PHY layer if we didn't have to lock the
8611  * ressource ...
8612  */
8613 static void
8614 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
8615 {
8616 	struct wm_softc *sc = device_private(self);
8617 	uint16_t page = BM_PHY_REG_PAGE(reg);
8618 	uint16_t regnum = BM_PHY_REG_NUM(reg);
8619 
8620 	if (wm_get_swfwhw_semaphore(sc)) {
8621 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8622 		    __func__);
8623 		return;
8624 	}
8625 
8626 	/* XXX Workaround failure in MDIO access while cable is disconnected */
8627 
8628 	/* Page 800 works differently than the rest so it has its own func */
8629 	if (page == BM_WUC_PAGE) {
8630 		uint16_t tmp;
8631 
8632 		tmp = val;
8633 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
8634 		return;
8635 	}
8636 
8637 	/*
8638 	 * Lower than page 768 works differently than the rest so it has its
8639 	 * own func
8640 	 */
8641 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
8642 		printf("gmii_hv_writereg!!!\n");
8643 		return;
8644 	}
8645 
8646 	/*
8647 	 * XXX Workaround MDIO accesses being disabled after entering IEEE
8648 	 * Power Down (whenever bit 11 of the PHY control register is set)
8649 	 */
8650 
8651 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
8652 		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
8653 		    page << BME1000_PAGE_SHIFT);
8654 	}
8655 
8656 	wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
8657 	wm_put_swfwhw_semaphore(sc);
8658 }
8659 
8660 /*
8661  * wm_gmii_82580_readreg:	[mii interface function]
8662  *
8663  *	Read a PHY register on the 82580 and I350.
8664  * This could be handled by the PHY layer if we didn't have to lock the
8665  * ressource ...
8666  */
8667 static int
8668 wm_gmii_82580_readreg(device_t self, int phy, int reg)
8669 {
8670 	struct wm_softc *sc = device_private(self);
8671 	int sem;
8672 	int rv;
8673 
8674 	sem = swfwphysem[sc->sc_funcid];
8675 	if (wm_get_swfw_semaphore(sc, sem)) {
8676 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8677 		    __func__);
8678 		return 0;
8679 	}
8680 
8681 	rv = wm_gmii_i82544_readreg(self, phy, reg);
8682 
8683 	wm_put_swfw_semaphore(sc, sem);
8684 	return rv;
8685 }
8686 
8687 /*
8688  * wm_gmii_82580_writereg:	[mii interface function]
8689  *
8690  *	Write a PHY register on the 82580 and I350.
8691  * This could be handled by the PHY layer if we didn't have to lock the
8692  * ressource ...
8693  */
8694 static void
8695 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
8696 {
8697 	struct wm_softc *sc = device_private(self);
8698 	int sem;
8699 
8700 	sem = swfwphysem[sc->sc_funcid];
8701 	if (wm_get_swfw_semaphore(sc, sem)) {
8702 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8703 		    __func__);
8704 		return;
8705 	}
8706 
8707 	wm_gmii_i82544_writereg(self, phy, reg, val);
8708 
8709 	wm_put_swfw_semaphore(sc, sem);
8710 }
8711 
8712 /*
8713  * wm_gmii_gs40g_readreg:	[mii interface function]
8714  *
8715  *	Read a PHY register on the I2100 and I211.
8716  * This could be handled by the PHY layer if we didn't have to lock the
8717  * ressource ...
8718  */
8719 static int
8720 wm_gmii_gs40g_readreg(device_t self, int phy, int reg)
8721 {
8722 	struct wm_softc *sc = device_private(self);
8723 	int sem;
8724 	int page, offset;
8725 	int rv;
8726 
8727 	/* Acquire semaphore */
8728 	sem = swfwphysem[sc->sc_funcid];
8729 	if (wm_get_swfw_semaphore(sc, sem)) {
8730 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8731 		    __func__);
8732 		return 0;
8733 	}
8734 
8735 	/* Page select */
8736 	page = reg >> GS40G_PAGE_SHIFT;
8737 	wm_gmii_i82544_writereg(self, phy, GS40G_PAGE_SELECT, page);
8738 
8739 	/* Read reg */
8740 	offset = reg & GS40G_OFFSET_MASK;
8741 	rv = wm_gmii_i82544_readreg(self, phy, offset);
8742 
8743 	wm_put_swfw_semaphore(sc, sem);
8744 	return rv;
8745 }
8746 
8747 /*
8748  * wm_gmii_gs40g_writereg:	[mii interface function]
8749  *
8750  *	Write a PHY register on the I210 and I211.
8751  * This could be handled by the PHY layer if we didn't have to lock the
8752  * ressource ...
8753  */
8754 static void
8755 wm_gmii_gs40g_writereg(device_t self, int phy, int reg, int val)
8756 {
8757 	struct wm_softc *sc = device_private(self);
8758 	int sem;
8759 	int page, offset;
8760 
8761 	/* Acquire semaphore */
8762 	sem = swfwphysem[sc->sc_funcid];
8763 	if (wm_get_swfw_semaphore(sc, sem)) {
8764 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8765 		    __func__);
8766 		return;
8767 	}
8768 
8769 	/* Page select */
8770 	page = reg >> GS40G_PAGE_SHIFT;
8771 	wm_gmii_i82544_writereg(self, phy, GS40G_PAGE_SELECT, page);
8772 
8773 	/* Write reg */
8774 	offset = reg & GS40G_OFFSET_MASK;
8775 	wm_gmii_i82544_writereg(self, phy, offset, val);
8776 
8777 	/* Release semaphore */
8778 	wm_put_swfw_semaphore(sc, sem);
8779 }
8780 
8781 /*
8782  * wm_gmii_statchg:	[mii interface function]
8783  *
8784  *	Callback from MII layer when media changes.
8785  */
8786 static void
8787 wm_gmii_statchg(struct ifnet *ifp)
8788 {
8789 	struct wm_softc *sc = ifp->if_softc;
8790 	struct mii_data *mii = &sc->sc_mii;
8791 
8792 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
8793 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
8794 	sc->sc_fcrtl &= ~FCRTL_XONE;
8795 
8796 	/*
8797 	 * Get flow control negotiation result.
8798 	 */
8799 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
8800 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
8801 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
8802 		mii->mii_media_active &= ~IFM_ETH_FMASK;
8803 	}
8804 
8805 	if (sc->sc_flowflags & IFM_FLOW) {
8806 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
8807 			sc->sc_ctrl |= CTRL_TFCE;
8808 			sc->sc_fcrtl |= FCRTL_XONE;
8809 		}
8810 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
8811 			sc->sc_ctrl |= CTRL_RFCE;
8812 	}
8813 
8814 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
8815 		DPRINTF(WM_DEBUG_LINK,
8816 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
8817 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
8818 	} else {
8819 		DPRINTF(WM_DEBUG_LINK,
8820 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
8821 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
8822 	}
8823 
8824 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8825 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
8826 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
8827 						 : WMREG_FCRTL, sc->sc_fcrtl);
8828 	if (sc->sc_type == WM_T_80003) {
8829 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
8830 		case IFM_1000_T:
8831 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
8832 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
8833 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
8834 			break;
8835 		default:
8836 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
8837 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
8838 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
8839 			break;
8840 		}
8841 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
8842 	}
8843 }
8844 
8845 /*
8846  * wm_kmrn_readreg:
8847  *
8848  *	Read a kumeran register
8849  */
8850 static int
8851 wm_kmrn_readreg(struct wm_softc *sc, int reg)
8852 {
8853 	int rv;
8854 
8855 	if (sc->sc_flags & WM_F_LOCK_SWFW) {
8856 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
8857 			aprint_error_dev(sc->sc_dev,
8858 			    "%s: failed to get semaphore\n", __func__);
8859 			return 0;
8860 		}
8861 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
8862 		if (wm_get_swfwhw_semaphore(sc)) {
8863 			aprint_error_dev(sc->sc_dev,
8864 			    "%s: failed to get semaphore\n", __func__);
8865 			return 0;
8866 		}
8867 	}
8868 
8869 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
8870 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
8871 	    KUMCTRLSTA_REN);
8872 	CSR_WRITE_FLUSH(sc);
8873 	delay(2);
8874 
8875 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
8876 
8877 	if (sc->sc_flags & WM_F_LOCK_SWFW)
8878 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
8879 	else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
8880 		wm_put_swfwhw_semaphore(sc);
8881 
8882 	return rv;
8883 }
8884 
8885 /*
8886  * wm_kmrn_writereg:
8887  *
8888  *	Write a kumeran register
8889  */
8890 static void
8891 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
8892 {
8893 
8894 	if (sc->sc_flags & WM_F_LOCK_SWFW) {
8895 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
8896 			aprint_error_dev(sc->sc_dev,
8897 			    "%s: failed to get semaphore\n", __func__);
8898 			return;
8899 		}
8900 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
8901 		if (wm_get_swfwhw_semaphore(sc)) {
8902 			aprint_error_dev(sc->sc_dev,
8903 			    "%s: failed to get semaphore\n", __func__);
8904 			return;
8905 		}
8906 	}
8907 
8908 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
8909 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
8910 	    (val & KUMCTRLSTA_MASK));
8911 
8912 	if (sc->sc_flags & WM_F_LOCK_SWFW)
8913 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
8914 	else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
8915 		wm_put_swfwhw_semaphore(sc);
8916 }
8917 
8918 /* SGMII related */
8919 
8920 /*
8921  * wm_sgmii_uses_mdio
8922  *
8923  * Check whether the transaction is to the internal PHY or the external
8924  * MDIO interface. Return true if it's MDIO.
8925  */
8926 static bool
8927 wm_sgmii_uses_mdio(struct wm_softc *sc)
8928 {
8929 	uint32_t reg;
8930 	bool ismdio = false;
8931 
8932 	switch (sc->sc_type) {
8933 	case WM_T_82575:
8934 	case WM_T_82576:
8935 		reg = CSR_READ(sc, WMREG_MDIC);
8936 		ismdio = ((reg & MDIC_DEST) != 0);
8937 		break;
8938 	case WM_T_82580:
8939 	case WM_T_I350:
8940 	case WM_T_I354:
8941 	case WM_T_I210:
8942 	case WM_T_I211:
8943 		reg = CSR_READ(sc, WMREG_MDICNFG);
8944 		ismdio = ((reg & MDICNFG_DEST) != 0);
8945 		break;
8946 	default:
8947 		break;
8948 	}
8949 
8950 	return ismdio;
8951 }
8952 
8953 /*
8954  * wm_sgmii_readreg:	[mii interface function]
8955  *
8956  *	Read a PHY register on the SGMII
8957  * This could be handled by the PHY layer if we didn't have to lock the
8958  * ressource ...
8959  */
8960 static int
8961 wm_sgmii_readreg(device_t self, int phy, int reg)
8962 {
8963 	struct wm_softc *sc = device_private(self);
8964 	uint32_t i2ccmd;
8965 	int i, rv;
8966 
8967 	if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
8968 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8969 		    __func__);
8970 		return 0;
8971 	}
8972 
8973 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
8974 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
8975 	    | I2CCMD_OPCODE_READ;
8976 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
8977 
8978 	/* Poll the ready bit */
8979 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
8980 		delay(50);
8981 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
8982 		if (i2ccmd & I2CCMD_READY)
8983 			break;
8984 	}
8985 	if ((i2ccmd & I2CCMD_READY) == 0)
8986 		aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
8987 	if ((i2ccmd & I2CCMD_ERROR) != 0)
8988 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
8989 
8990 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
8991 
8992 	wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
8993 	return rv;
8994 }
8995 
8996 /*
8997  * wm_sgmii_writereg:	[mii interface function]
8998  *
8999  *	Write a PHY register on the SGMII.
9000  * This could be handled by the PHY layer if we didn't have to lock the
9001  * ressource ...
9002  */
9003 static void
9004 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
9005 {
9006 	struct wm_softc *sc = device_private(self);
9007 	uint32_t i2ccmd;
9008 	int i;
9009 	int val_swapped;
9010 
9011 	if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
9012 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9013 		    __func__);
9014 		return;
9015 	}
9016 	/* Swap the data bytes for the I2C interface */
9017 	val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
9018 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
9019 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
9020 	    | I2CCMD_OPCODE_WRITE | val_swapped;
9021 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
9022 
9023 	/* Poll the ready bit */
9024 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
9025 		delay(50);
9026 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
9027 		if (i2ccmd & I2CCMD_READY)
9028 			break;
9029 	}
9030 	if ((i2ccmd & I2CCMD_READY) == 0)
9031 		aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
9032 	if ((i2ccmd & I2CCMD_ERROR) != 0)
9033 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
9034 
9035 	wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
9036 }
9037 
9038 /* TBI related */
9039 
9040 /*
9041  * wm_tbi_mediainit:
9042  *
9043  *	Initialize media for use on 1000BASE-X devices.
9044  */
9045 static void
9046 wm_tbi_mediainit(struct wm_softc *sc)
9047 {
9048 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9049 	const char *sep = "";
9050 
9051 	if (sc->sc_type < WM_T_82543)
9052 		sc->sc_tipg = TIPG_WM_DFLT;
9053 	else
9054 		sc->sc_tipg = TIPG_LG_DFLT;
9055 
9056 	sc->sc_tbi_serdes_anegticks = 5;
9057 
9058 	/* Initialize our media structures */
9059 	sc->sc_mii.mii_ifp = ifp;
9060 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
9061 
9062 	if ((sc->sc_type >= WM_T_82575)
9063 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
9064 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
9065 		    wm_serdes_mediachange, wm_serdes_mediastatus);
9066 	else
9067 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
9068 		    wm_tbi_mediachange, wm_tbi_mediastatus);
9069 
9070 	/*
9071 	 * SWD Pins:
9072 	 *
9073 	 *	0 = Link LED (output)
9074 	 *	1 = Loss Of Signal (input)
9075 	 */
9076 	sc->sc_ctrl |= CTRL_SWDPIO(0);
9077 
9078 	/* XXX Perhaps this is only for TBI */
9079 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
9080 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
9081 
9082 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
9083 		sc->sc_ctrl &= ~CTRL_LRST;
9084 
9085 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9086 
9087 #define	ADD(ss, mm, dd)							\
9088 do {									\
9089 	aprint_normal("%s%s", sep, ss);					\
9090 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
9091 	sep = ", ";							\
9092 } while (/*CONSTCOND*/0)
9093 
9094 	aprint_normal_dev(sc->sc_dev, "");
9095 
9096 	/* Only 82545 is LX */
9097 	if (sc->sc_type == WM_T_82545) {
9098 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
9099 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
9100 	} else {
9101 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
9102 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
9103 	}
9104 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
9105 	aprint_normal("\n");
9106 
9107 #undef ADD
9108 
9109 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
9110 }
9111 
9112 /*
9113  * wm_tbi_mediachange:	[ifmedia interface function]
9114  *
9115  *	Set hardware to newly-selected media on a 1000BASE-X device.
9116  */
9117 static int
9118 wm_tbi_mediachange(struct ifnet *ifp)
9119 {
9120 	struct wm_softc *sc = ifp->if_softc;
9121 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
9122 	uint32_t status;
9123 	int i;
9124 
9125 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
9126 		/* XXX need some work for >= 82571 and < 82575 */
9127 		if (sc->sc_type < WM_T_82575)
9128 			return 0;
9129 	}
9130 
9131 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
9132 	    || (sc->sc_type >= WM_T_82575))
9133 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
9134 
9135 	sc->sc_ctrl &= ~CTRL_LRST;
9136 	sc->sc_txcw = TXCW_ANE;
9137 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
9138 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
9139 	else if (ife->ifm_media & IFM_FDX)
9140 		sc->sc_txcw |= TXCW_FD;
9141 	else
9142 		sc->sc_txcw |= TXCW_HD;
9143 
9144 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
9145 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
9146 
9147 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
9148 		    device_xname(sc->sc_dev), sc->sc_txcw));
9149 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
9150 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9151 	CSR_WRITE_FLUSH(sc);
9152 	delay(1000);
9153 
9154 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
9155 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
9156 
9157 	/*
9158 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
9159 	 * optics detect a signal, 0 if they don't.
9160 	 */
9161 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
9162 		/* Have signal; wait for the link to come up. */
9163 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
9164 			delay(10000);
9165 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
9166 				break;
9167 		}
9168 
9169 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
9170 			    device_xname(sc->sc_dev),i));
9171 
9172 		status = CSR_READ(sc, WMREG_STATUS);
9173 		DPRINTF(WM_DEBUG_LINK,
9174 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
9175 			device_xname(sc->sc_dev),status, STATUS_LU));
9176 		if (status & STATUS_LU) {
9177 			/* Link is up. */
9178 			DPRINTF(WM_DEBUG_LINK,
9179 			    ("%s: LINK: set media -> link up %s\n",
9180 			    device_xname(sc->sc_dev),
9181 			    (status & STATUS_FD) ? "FDX" : "HDX"));
9182 
9183 			/*
9184 			 * NOTE: CTRL will update TFCE and RFCE automatically,
9185 			 * so we should update sc->sc_ctrl
9186 			 */
9187 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
9188 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
9189 			sc->sc_fcrtl &= ~FCRTL_XONE;
9190 			if (status & STATUS_FD)
9191 				sc->sc_tctl |=
9192 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
9193 			else
9194 				sc->sc_tctl |=
9195 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
9196 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
9197 				sc->sc_fcrtl |= FCRTL_XONE;
9198 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
9199 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
9200 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
9201 				      sc->sc_fcrtl);
9202 			sc->sc_tbi_linkup = 1;
9203 		} else {
9204 			if (i == WM_LINKUP_TIMEOUT)
9205 				wm_check_for_link(sc);
9206 			/* Link is down. */
9207 			DPRINTF(WM_DEBUG_LINK,
9208 			    ("%s: LINK: set media -> link down\n",
9209 			    device_xname(sc->sc_dev)));
9210 			sc->sc_tbi_linkup = 0;
9211 		}
9212 	} else {
9213 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
9214 		    device_xname(sc->sc_dev)));
9215 		sc->sc_tbi_linkup = 0;
9216 	}
9217 
9218 	wm_tbi_serdes_set_linkled(sc);
9219 
9220 	return 0;
9221 }
9222 
9223 /*
9224  * wm_tbi_mediastatus:	[ifmedia interface function]
9225  *
9226  *	Get the current interface media status on a 1000BASE-X device.
9227  */
9228 static void
9229 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
9230 {
9231 	struct wm_softc *sc = ifp->if_softc;
9232 	uint32_t ctrl, status;
9233 
9234 	ifmr->ifm_status = IFM_AVALID;
9235 	ifmr->ifm_active = IFM_ETHER;
9236 
9237 	status = CSR_READ(sc, WMREG_STATUS);
9238 	if ((status & STATUS_LU) == 0) {
9239 		ifmr->ifm_active |= IFM_NONE;
9240 		return;
9241 	}
9242 
9243 	ifmr->ifm_status |= IFM_ACTIVE;
9244 	/* Only 82545 is LX */
9245 	if (sc->sc_type == WM_T_82545)
9246 		ifmr->ifm_active |= IFM_1000_LX;
9247 	else
9248 		ifmr->ifm_active |= IFM_1000_SX;
9249 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
9250 		ifmr->ifm_active |= IFM_FDX;
9251 	else
9252 		ifmr->ifm_active |= IFM_HDX;
9253 	ctrl = CSR_READ(sc, WMREG_CTRL);
9254 	if (ctrl & CTRL_RFCE)
9255 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
9256 	if (ctrl & CTRL_TFCE)
9257 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
9258 }
9259 
9260 /* XXX TBI only */
9261 static int
9262 wm_check_for_link(struct wm_softc *sc)
9263 {
9264 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
9265 	uint32_t rxcw;
9266 	uint32_t ctrl;
9267 	uint32_t status;
9268 	uint32_t sig;
9269 
9270 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
9271 		/* XXX need some work for >= 82571 */
9272 		if (sc->sc_type >= WM_T_82571) {
9273 			sc->sc_tbi_linkup = 1;
9274 			return 0;
9275 		}
9276 	}
9277 
9278 	rxcw = CSR_READ(sc, WMREG_RXCW);
9279 	ctrl = CSR_READ(sc, WMREG_CTRL);
9280 	status = CSR_READ(sc, WMREG_STATUS);
9281 
9282 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
9283 
9284 	DPRINTF(WM_DEBUG_LINK,
9285 	    ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
9286 		device_xname(sc->sc_dev), __func__,
9287 		((ctrl & CTRL_SWDPIN(1)) == sig),
9288 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
9289 
9290 	/*
9291 	 * SWDPIN   LU RXCW
9292 	 *      0    0    0
9293 	 *      0    0    1	(should not happen)
9294 	 *      0    1    0	(should not happen)
9295 	 *      0    1    1	(should not happen)
9296 	 *      1    0    0	Disable autonego and force linkup
9297 	 *      1    0    1	got /C/ but not linkup yet
9298 	 *      1    1    0	(linkup)
9299 	 *      1    1    1	If IFM_AUTO, back to autonego
9300 	 *
9301 	 */
9302 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
9303 	    && ((status & STATUS_LU) == 0)
9304 	    && ((rxcw & RXCW_C) == 0)) {
9305 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
9306 			__func__));
9307 		sc->sc_tbi_linkup = 0;
9308 		/* Disable auto-negotiation in the TXCW register */
9309 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
9310 
9311 		/*
9312 		 * Force link-up and also force full-duplex.
9313 		 *
9314 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
9315 		 * so we should update sc->sc_ctrl
9316 		 */
9317 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
9318 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9319 	} else if (((status & STATUS_LU) != 0)
9320 	    && ((rxcw & RXCW_C) != 0)
9321 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
9322 		sc->sc_tbi_linkup = 1;
9323 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
9324 			__func__));
9325 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
9326 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
9327 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
9328 	    && ((rxcw & RXCW_C) != 0)) {
9329 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
9330 	} else {
9331 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
9332 			status));
9333 	}
9334 
9335 	return 0;
9336 }
9337 
9338 /*
9339  * wm_tbi_tick:
9340  *
9341  *	Check the link on TBI devices.
9342  *	This function acts as mii_tick().
9343  */
9344 static void
9345 wm_tbi_tick(struct wm_softc *sc)
9346 {
9347 	struct mii_data *mii = &sc->sc_mii;
9348 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
9349 	uint32_t status;
9350 
9351 	KASSERT(WM_CORE_LOCKED(sc));
9352 
9353 	status = CSR_READ(sc, WMREG_STATUS);
9354 
9355 	/* XXX is this needed? */
9356 	(void)CSR_READ(sc, WMREG_RXCW);
9357 	(void)CSR_READ(sc, WMREG_CTRL);
9358 
9359 	/* set link status */
9360 	if ((status & STATUS_LU) == 0) {
9361 		DPRINTF(WM_DEBUG_LINK,
9362 		    ("%s: LINK: checklink -> down\n",
9363 			device_xname(sc->sc_dev)));
9364 		sc->sc_tbi_linkup = 0;
9365 	} else if (sc->sc_tbi_linkup == 0) {
9366 		DPRINTF(WM_DEBUG_LINK,
9367 		    ("%s: LINK: checklink -> up %s\n",
9368 			device_xname(sc->sc_dev),
9369 			(status & STATUS_FD) ? "FDX" : "HDX"));
9370 		sc->sc_tbi_linkup = 1;
9371 		sc->sc_tbi_serdes_ticks = 0;
9372 	}
9373 
9374 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
9375 		goto setled;
9376 
9377 	if ((status & STATUS_LU) == 0) {
9378 		sc->sc_tbi_linkup = 0;
9379 		/* If the timer expired, retry autonegotiation */
9380 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
9381 		    && (++sc->sc_tbi_serdes_ticks
9382 			>= sc->sc_tbi_serdes_anegticks)) {
9383 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
9384 			sc->sc_tbi_serdes_ticks = 0;
9385 			/*
9386 			 * Reset the link, and let autonegotiation do
9387 			 * its thing
9388 			 */
9389 			sc->sc_ctrl |= CTRL_LRST;
9390 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9391 			CSR_WRITE_FLUSH(sc);
9392 			delay(1000);
9393 			sc->sc_ctrl &= ~CTRL_LRST;
9394 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9395 			CSR_WRITE_FLUSH(sc);
9396 			delay(1000);
9397 			CSR_WRITE(sc, WMREG_TXCW,
9398 			    sc->sc_txcw & ~TXCW_ANE);
9399 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
9400 		}
9401 	}
9402 
9403 setled:
9404 	wm_tbi_serdes_set_linkled(sc);
9405 }
9406 
9407 /* SERDES related */
9408 static void
9409 wm_serdes_power_up_link_82575(struct wm_softc *sc)
9410 {
9411 	uint32_t reg;
9412 
9413 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
9414 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
9415 		return;
9416 
9417 	reg = CSR_READ(sc, WMREG_PCS_CFG);
9418 	reg |= PCS_CFG_PCS_EN;
9419 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
9420 
9421 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
9422 	reg &= ~CTRL_EXT_SWDPIN(3);
9423 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
9424 	CSR_WRITE_FLUSH(sc);
9425 }
9426 
9427 static int
9428 wm_serdes_mediachange(struct ifnet *ifp)
9429 {
9430 	struct wm_softc *sc = ifp->if_softc;
9431 	bool pcs_autoneg = true; /* XXX */
9432 	uint32_t ctrl_ext, pcs_lctl, reg;
9433 
9434 	/* XXX Currently, this function is not called on 8257[12] */
9435 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
9436 	    || (sc->sc_type >= WM_T_82575))
9437 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
9438 
9439 	wm_serdes_power_up_link_82575(sc);
9440 
9441 	sc->sc_ctrl |= CTRL_SLU;
9442 
9443 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
9444 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
9445 
9446 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
9447 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
9448 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
9449 	case CTRL_EXT_LINK_MODE_SGMII:
9450 		pcs_autoneg = true;
9451 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
9452 		break;
9453 	case CTRL_EXT_LINK_MODE_1000KX:
9454 		pcs_autoneg = false;
9455 		/* FALLTHROUGH */
9456 	default:
9457 		if ((sc->sc_type == WM_T_82575)
9458 		    || (sc->sc_type == WM_T_82576)) {
9459 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
9460 				pcs_autoneg = false;
9461 		}
9462 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
9463 		    | CTRL_FRCFDX;
9464 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
9465 	}
9466 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9467 
9468 	if (pcs_autoneg) {
9469 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
9470 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
9471 
9472 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
9473 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
9474 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
9475 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
9476 	} else
9477 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
9478 
9479 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
9480 
9481 
9482 	return 0;
9483 }
9484 
9485 static void
9486 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
9487 {
9488 	struct wm_softc *sc = ifp->if_softc;
9489 	struct mii_data *mii = &sc->sc_mii;
9490 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
9491 	uint32_t pcs_adv, pcs_lpab, reg;
9492 
9493 	ifmr->ifm_status = IFM_AVALID;
9494 	ifmr->ifm_active = IFM_ETHER;
9495 
9496 	/* Check PCS */
9497 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
9498 	if ((reg & PCS_LSTS_LINKOK) == 0) {
9499 		ifmr->ifm_active |= IFM_NONE;
9500 		sc->sc_tbi_linkup = 0;
9501 		goto setled;
9502 	}
9503 
9504 	sc->sc_tbi_linkup = 1;
9505 	ifmr->ifm_status |= IFM_ACTIVE;
9506 	ifmr->ifm_active |= IFM_1000_SX; /* XXX */
9507 	if ((reg & PCS_LSTS_FDX) != 0)
9508 		ifmr->ifm_active |= IFM_FDX;
9509 	else
9510 		ifmr->ifm_active |= IFM_HDX;
9511 	mii->mii_media_active &= ~IFM_ETH_FMASK;
9512 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
9513 		/* Check flow */
9514 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
9515 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
9516 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
9517 			goto setled;
9518 		}
9519 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
9520 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
9521 		DPRINTF(WM_DEBUG_LINK,
9522 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
9523 		if ((pcs_adv & TXCW_SYM_PAUSE)
9524 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
9525 			mii->mii_media_active |= IFM_FLOW
9526 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
9527 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
9528 		    && (pcs_adv & TXCW_ASYM_PAUSE)
9529 		    && (pcs_lpab & TXCW_SYM_PAUSE)
9530 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
9531 			mii->mii_media_active |= IFM_FLOW
9532 			    | IFM_ETH_TXPAUSE;
9533 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
9534 		    && (pcs_adv & TXCW_ASYM_PAUSE)
9535 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
9536 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
9537 			mii->mii_media_active |= IFM_FLOW
9538 			    | IFM_ETH_RXPAUSE;
9539 		} else {
9540 		}
9541 	}
9542 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
9543 	    | (mii->mii_media_active & IFM_ETH_FMASK);
9544 setled:
9545 	wm_tbi_serdes_set_linkled(sc);
9546 }
9547 
9548 /*
9549  * wm_serdes_tick:
9550  *
9551  *	Check the link on serdes devices.
9552  */
9553 static void
9554 wm_serdes_tick(struct wm_softc *sc)
9555 {
9556 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9557 	struct mii_data *mii = &sc->sc_mii;
9558 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
9559 	uint32_t reg;
9560 
9561 	KASSERT(WM_CORE_LOCKED(sc));
9562 
9563 	mii->mii_media_status = IFM_AVALID;
9564 	mii->mii_media_active = IFM_ETHER;
9565 
9566 	/* Check PCS */
9567 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
9568 	if ((reg & PCS_LSTS_LINKOK) != 0) {
9569 		mii->mii_media_status |= IFM_ACTIVE;
9570 		sc->sc_tbi_linkup = 1;
9571 		sc->sc_tbi_serdes_ticks = 0;
9572 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
9573 		if ((reg & PCS_LSTS_FDX) != 0)
9574 			mii->mii_media_active |= IFM_FDX;
9575 		else
9576 			mii->mii_media_active |= IFM_HDX;
9577 	} else {
9578 		mii->mii_media_status |= IFM_NONE;
9579 		sc->sc_tbi_linkup = 0;
9580 		    /* If the timer expired, retry autonegotiation */
9581 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
9582 		    && (++sc->sc_tbi_serdes_ticks
9583 			>= sc->sc_tbi_serdes_anegticks)) {
9584 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
9585 			sc->sc_tbi_serdes_ticks = 0;
9586 			/* XXX */
9587 			wm_serdes_mediachange(ifp);
9588 		}
9589 	}
9590 
9591 	wm_tbi_serdes_set_linkled(sc);
9592 }
9593 
9594 /* SFP related */
9595 
9596 static int
9597 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
9598 {
9599 	uint32_t i2ccmd;
9600 	int i;
9601 
9602 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
9603 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
9604 
9605 	/* Poll the ready bit */
9606 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
9607 		delay(50);
9608 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
9609 		if (i2ccmd & I2CCMD_READY)
9610 			break;
9611 	}
9612 	if ((i2ccmd & I2CCMD_READY) == 0)
9613 		return -1;
9614 	if ((i2ccmd & I2CCMD_ERROR) != 0)
9615 		return -1;
9616 
9617 	*data = i2ccmd & 0x00ff;
9618 
9619 	return 0;
9620 }
9621 
9622 static uint32_t
9623 wm_sfp_get_media_type(struct wm_softc *sc)
9624 {
9625 	uint32_t ctrl_ext;
9626 	uint8_t val = 0;
9627 	int timeout = 3;
9628 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
9629 	int rv = -1;
9630 
9631 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
9632 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
9633 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
9634 	CSR_WRITE_FLUSH(sc);
9635 
9636 	/* Read SFP module data */
9637 	while (timeout) {
9638 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
9639 		if (rv == 0)
9640 			break;
9641 		delay(100*1000); /* XXX too big */
9642 		timeout--;
9643 	}
9644 	if (rv != 0)
9645 		goto out;
9646 	switch (val) {
9647 	case SFF_SFP_ID_SFF:
9648 		aprint_normal_dev(sc->sc_dev,
9649 		    "Module/Connector soldered to board\n");
9650 		break;
9651 	case SFF_SFP_ID_SFP:
9652 		aprint_normal_dev(sc->sc_dev, "SFP\n");
9653 		break;
9654 	case SFF_SFP_ID_UNKNOWN:
9655 		goto out;
9656 	default:
9657 		break;
9658 	}
9659 
9660 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
9661 	if (rv != 0) {
9662 		goto out;
9663 	}
9664 
9665 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
9666 		mediatype = WM_MEDIATYPE_SERDES;
9667 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
9668 		sc->sc_flags |= WM_F_SGMII;
9669 		mediatype = WM_MEDIATYPE_COPPER;
9670 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
9671 		sc->sc_flags |= WM_F_SGMII;
9672 		mediatype = WM_MEDIATYPE_SERDES;
9673 	}
9674 
9675 out:
9676 	/* Restore I2C interface setting */
9677 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
9678 
9679 	return mediatype;
9680 }
9681 /*
9682  * NVM related.
9683  * Microwire, SPI (w/wo EERD) and Flash.
9684  */
9685 
9686 /* Both spi and uwire */
9687 
9688 /*
9689  * wm_eeprom_sendbits:
9690  *
9691  *	Send a series of bits to the EEPROM.
9692  */
9693 static void
9694 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
9695 {
9696 	uint32_t reg;
9697 	int x;
9698 
9699 	reg = CSR_READ(sc, WMREG_EECD);
9700 
9701 	for (x = nbits; x > 0; x--) {
9702 		if (bits & (1U << (x - 1)))
9703 			reg |= EECD_DI;
9704 		else
9705 			reg &= ~EECD_DI;
9706 		CSR_WRITE(sc, WMREG_EECD, reg);
9707 		CSR_WRITE_FLUSH(sc);
9708 		delay(2);
9709 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
9710 		CSR_WRITE_FLUSH(sc);
9711 		delay(2);
9712 		CSR_WRITE(sc, WMREG_EECD, reg);
9713 		CSR_WRITE_FLUSH(sc);
9714 		delay(2);
9715 	}
9716 }
9717 
9718 /*
9719  * wm_eeprom_recvbits:
9720  *
9721  *	Receive a series of bits from the EEPROM.
9722  */
9723 static void
9724 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
9725 {
9726 	uint32_t reg, val;
9727 	int x;
9728 
9729 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
9730 
9731 	val = 0;
9732 	for (x = nbits; x > 0; x--) {
9733 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
9734 		CSR_WRITE_FLUSH(sc);
9735 		delay(2);
9736 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
9737 			val |= (1U << (x - 1));
9738 		CSR_WRITE(sc, WMREG_EECD, reg);
9739 		CSR_WRITE_FLUSH(sc);
9740 		delay(2);
9741 	}
9742 	*valp = val;
9743 }
9744 
9745 /* Microwire */
9746 
9747 /*
9748  * wm_nvm_read_uwire:
9749  *
9750  *	Read a word from the EEPROM using the MicroWire protocol.
9751  */
9752 static int
9753 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
9754 {
9755 	uint32_t reg, val;
9756 	int i;
9757 
9758 	for (i = 0; i < wordcnt; i++) {
9759 		/* Clear SK and DI. */
9760 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
9761 		CSR_WRITE(sc, WMREG_EECD, reg);
9762 
9763 		/*
9764 		 * XXX: workaround for a bug in qemu-0.12.x and prior
9765 		 * and Xen.
9766 		 *
9767 		 * We use this workaround only for 82540 because qemu's
9768 		 * e1000 act as 82540.
9769 		 */
9770 		if (sc->sc_type == WM_T_82540) {
9771 			reg |= EECD_SK;
9772 			CSR_WRITE(sc, WMREG_EECD, reg);
9773 			reg &= ~EECD_SK;
9774 			CSR_WRITE(sc, WMREG_EECD, reg);
9775 			CSR_WRITE_FLUSH(sc);
9776 			delay(2);
9777 		}
9778 		/* XXX: end of workaround */
9779 
9780 		/* Set CHIP SELECT. */
9781 		reg |= EECD_CS;
9782 		CSR_WRITE(sc, WMREG_EECD, reg);
9783 		CSR_WRITE_FLUSH(sc);
9784 		delay(2);
9785 
9786 		/* Shift in the READ command. */
9787 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
9788 
9789 		/* Shift in address. */
9790 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
9791 
9792 		/* Shift out the data. */
9793 		wm_eeprom_recvbits(sc, &val, 16);
9794 		data[i] = val & 0xffff;
9795 
9796 		/* Clear CHIP SELECT. */
9797 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
9798 		CSR_WRITE(sc, WMREG_EECD, reg);
9799 		CSR_WRITE_FLUSH(sc);
9800 		delay(2);
9801 	}
9802 
9803 	return 0;
9804 }
9805 
9806 /* SPI */
9807 
9808 /*
9809  * Set SPI and FLASH related information from the EECD register.
9810  * For 82541 and 82547, the word size is taken from EEPROM.
9811  */
9812 static int
9813 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
9814 {
9815 	int size;
9816 	uint32_t reg;
9817 	uint16_t data;
9818 
9819 	reg = CSR_READ(sc, WMREG_EECD);
9820 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
9821 
9822 	/* Read the size of NVM from EECD by default */
9823 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
9824 	switch (sc->sc_type) {
9825 	case WM_T_82541:
9826 	case WM_T_82541_2:
9827 	case WM_T_82547:
9828 	case WM_T_82547_2:
9829 		/* Set dummy value to access EEPROM */
9830 		sc->sc_nvm_wordsize = 64;
9831 		wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
9832 		reg = data;
9833 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
9834 		if (size == 0)
9835 			size = 6; /* 64 word size */
9836 		else
9837 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
9838 		break;
9839 	case WM_T_80003:
9840 	case WM_T_82571:
9841 	case WM_T_82572:
9842 	case WM_T_82573: /* SPI case */
9843 	case WM_T_82574: /* SPI case */
9844 	case WM_T_82583: /* SPI case */
9845 		size += NVM_WORD_SIZE_BASE_SHIFT;
9846 		if (size > 14)
9847 			size = 14;
9848 		break;
9849 	case WM_T_82575:
9850 	case WM_T_82576:
9851 	case WM_T_82580:
9852 	case WM_T_I350:
9853 	case WM_T_I354:
9854 	case WM_T_I210:
9855 	case WM_T_I211:
9856 		size += NVM_WORD_SIZE_BASE_SHIFT;
9857 		if (size > 15)
9858 			size = 15;
9859 		break;
9860 	default:
9861 		aprint_error_dev(sc->sc_dev,
9862 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
9863 		return -1;
9864 		break;
9865 	}
9866 
9867 	sc->sc_nvm_wordsize = 1 << size;
9868 
9869 	return 0;
9870 }
9871 
9872 /*
9873  * wm_nvm_ready_spi:
9874  *
9875  *	Wait for a SPI EEPROM to be ready for commands.
9876  */
9877 static int
9878 wm_nvm_ready_spi(struct wm_softc *sc)
9879 {
9880 	uint32_t val;
9881 	int usec;
9882 
9883 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
9884 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
9885 		wm_eeprom_recvbits(sc, &val, 8);
9886 		if ((val & SPI_SR_RDY) == 0)
9887 			break;
9888 	}
9889 	if (usec >= SPI_MAX_RETRIES) {
9890 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
9891 		return 1;
9892 	}
9893 	return 0;
9894 }
9895 
9896 /*
9897  * wm_nvm_read_spi:
9898  *
9899  *	Read a work from the EEPROM using the SPI protocol.
9900  */
9901 static int
9902 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
9903 {
9904 	uint32_t reg, val;
9905 	int i;
9906 	uint8_t opc;
9907 
9908 	/* Clear SK and CS. */
9909 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
9910 	CSR_WRITE(sc, WMREG_EECD, reg);
9911 	CSR_WRITE_FLUSH(sc);
9912 	delay(2);
9913 
9914 	if (wm_nvm_ready_spi(sc))
9915 		return 1;
9916 
9917 	/* Toggle CS to flush commands. */
9918 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
9919 	CSR_WRITE_FLUSH(sc);
9920 	delay(2);
9921 	CSR_WRITE(sc, WMREG_EECD, reg);
9922 	CSR_WRITE_FLUSH(sc);
9923 	delay(2);
9924 
9925 	opc = SPI_OPC_READ;
9926 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
9927 		opc |= SPI_OPC_A8;
9928 
9929 	wm_eeprom_sendbits(sc, opc, 8);
9930 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
9931 
9932 	for (i = 0; i < wordcnt; i++) {
9933 		wm_eeprom_recvbits(sc, &val, 16);
9934 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
9935 	}
9936 
9937 	/* Raise CS and clear SK. */
9938 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
9939 	CSR_WRITE(sc, WMREG_EECD, reg);
9940 	CSR_WRITE_FLUSH(sc);
9941 	delay(2);
9942 
9943 	return 0;
9944 }
9945 
9946 /* Using with EERD */
9947 
9948 static int
9949 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
9950 {
9951 	uint32_t attempts = 100000;
9952 	uint32_t i, reg = 0;
9953 	int32_t done = -1;
9954 
9955 	for (i = 0; i < attempts; i++) {
9956 		reg = CSR_READ(sc, rw);
9957 
9958 		if (reg & EERD_DONE) {
9959 			done = 0;
9960 			break;
9961 		}
9962 		delay(5);
9963 	}
9964 
9965 	return done;
9966 }
9967 
9968 static int
9969 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
9970     uint16_t *data)
9971 {
9972 	int i, eerd = 0;
9973 	int error = 0;
9974 
9975 	for (i = 0; i < wordcnt; i++) {
9976 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
9977 
9978 		CSR_WRITE(sc, WMREG_EERD, eerd);
9979 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
9980 		if (error != 0)
9981 			break;
9982 
9983 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
9984 	}
9985 
9986 	return error;
9987 }
9988 
9989 /* Flash */
9990 
9991 static int
9992 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
9993 {
9994 	uint32_t eecd;
9995 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
9996 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
9997 	uint8_t sig_byte = 0;
9998 
9999 	switch (sc->sc_type) {
10000 	case WM_T_ICH8:
10001 	case WM_T_ICH9:
10002 		eecd = CSR_READ(sc, WMREG_EECD);
10003 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
10004 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
10005 			return 0;
10006 		}
10007 		/* FALLTHROUGH */
10008 	default:
10009 		/* Default to 0 */
10010 		*bank = 0;
10011 
10012 		/* Check bank 0 */
10013 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
10014 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
10015 			*bank = 0;
10016 			return 0;
10017 		}
10018 
10019 		/* Check bank 1 */
10020 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
10021 		    &sig_byte);
10022 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
10023 			*bank = 1;
10024 			return 0;
10025 		}
10026 	}
10027 
10028 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
10029 		device_xname(sc->sc_dev)));
10030 	return -1;
10031 }
10032 
10033 /******************************************************************************
10034  * This function does initial flash setup so that a new read/write/erase cycle
10035  * can be started.
10036  *
10037  * sc - The pointer to the hw structure
10038  ****************************************************************************/
10039 static int32_t
10040 wm_ich8_cycle_init(struct wm_softc *sc)
10041 {
10042 	uint16_t hsfsts;
10043 	int32_t error = 1;
10044 	int32_t i     = 0;
10045 
10046 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
10047 
10048 	/* May be check the Flash Des Valid bit in Hw status */
10049 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
10050 		return error;
10051 	}
10052 
10053 	/* Clear FCERR in Hw status by writing 1 */
10054 	/* Clear DAEL in Hw status by writing a 1 */
10055 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
10056 
10057 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
10058 
10059 	/*
10060 	 * Either we should have a hardware SPI cycle in progress bit to check
10061 	 * against, in order to start a new cycle or FDONE bit should be
10062 	 * changed in the hardware so that it is 1 after harware reset, which
10063 	 * can then be used as an indication whether a cycle is in progress or
10064 	 * has been completed .. we should also have some software semaphore
10065 	 * mechanism to guard FDONE or the cycle in progress bit so that two
10066 	 * threads access to those bits can be sequentiallized or a way so that
10067 	 * 2 threads dont start the cycle at the same time
10068 	 */
10069 
10070 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
10071 		/*
10072 		 * There is no cycle running at present, so we can start a
10073 		 * cycle
10074 		 */
10075 
10076 		/* Begin by setting Flash Cycle Done. */
10077 		hsfsts |= HSFSTS_DONE;
10078 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
10079 		error = 0;
10080 	} else {
10081 		/*
10082 		 * otherwise poll for sometime so the current cycle has a
10083 		 * chance to end before giving up.
10084 		 */
10085 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
10086 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
10087 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
10088 				error = 0;
10089 				break;
10090 			}
10091 			delay(1);
10092 		}
10093 		if (error == 0) {
10094 			/*
10095 			 * Successful in waiting for previous cycle to timeout,
10096 			 * now set the Flash Cycle Done.
10097 			 */
10098 			hsfsts |= HSFSTS_DONE;
10099 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
10100 		}
10101 	}
10102 	return error;
10103 }
10104 
10105 /******************************************************************************
10106  * This function starts a flash cycle and waits for its completion
10107  *
10108  * sc - The pointer to the hw structure
10109  ****************************************************************************/
10110 static int32_t
10111 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
10112 {
10113 	uint16_t hsflctl;
10114 	uint16_t hsfsts;
10115 	int32_t error = 1;
10116 	uint32_t i = 0;
10117 
10118 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
10119 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
10120 	hsflctl |= HSFCTL_GO;
10121 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
10122 
10123 	/* Wait till FDONE bit is set to 1 */
10124 	do {
10125 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
10126 		if (hsfsts & HSFSTS_DONE)
10127 			break;
10128 		delay(1);
10129 		i++;
10130 	} while (i < timeout);
10131 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
10132 		error = 0;
10133 
10134 	return error;
10135 }
10136 
10137 /******************************************************************************
10138  * Reads a byte or word from the NVM using the ICH8 flash access registers.
10139  *
10140  * sc - The pointer to the hw structure
10141  * index - The index of the byte or word to read.
10142  * size - Size of data to read, 1=byte 2=word
10143  * data - Pointer to the word to store the value read.
10144  *****************************************************************************/
10145 static int32_t
10146 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
10147     uint32_t size, uint16_t *data)
10148 {
10149 	uint16_t hsfsts;
10150 	uint16_t hsflctl;
10151 	uint32_t flash_linear_address;
10152 	uint32_t flash_data = 0;
10153 	int32_t error = 1;
10154 	int32_t count = 0;
10155 
10156 	if (size < 1  || size > 2 || data == 0x0 ||
10157 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
10158 		return error;
10159 
10160 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
10161 	    sc->sc_ich8_flash_base;
10162 
10163 	do {
10164 		delay(1);
10165 		/* Steps */
10166 		error = wm_ich8_cycle_init(sc);
10167 		if (error)
10168 			break;
10169 
10170 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
10171 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
10172 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
10173 		    & HSFCTL_BCOUNT_MASK;
10174 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
10175 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
10176 
10177 		/*
10178 		 * Write the last 24 bits of index into Flash Linear address
10179 		 * field in Flash Address
10180 		 */
10181 		/* TODO: TBD maybe check the index against the size of flash */
10182 
10183 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
10184 
10185 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
10186 
10187 		/*
10188 		 * Check if FCERR is set to 1, if set to 1, clear it and try
10189 		 * the whole sequence a few more times, else read in (shift in)
10190 		 * the Flash Data0, the order is least significant byte first
10191 		 * msb to lsb
10192 		 */
10193 		if (error == 0) {
10194 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
10195 			if (size == 1)
10196 				*data = (uint8_t)(flash_data & 0x000000FF);
10197 			else if (size == 2)
10198 				*data = (uint16_t)(flash_data & 0x0000FFFF);
10199 			break;
10200 		} else {
10201 			/*
10202 			 * If we've gotten here, then things are probably
10203 			 * completely hosed, but if the error condition is
10204 			 * detected, it won't hurt to give it another try...
10205 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
10206 			 */
10207 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
10208 			if (hsfsts & HSFSTS_ERR) {
10209 				/* Repeat for some time before giving up. */
10210 				continue;
10211 			} else if ((hsfsts & HSFSTS_DONE) == 0)
10212 				break;
10213 		}
10214 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
10215 
10216 	return error;
10217 }
10218 
10219 /******************************************************************************
10220  * Reads a single byte from the NVM using the ICH8 flash access registers.
10221  *
10222  * sc - pointer to wm_hw structure
10223  * index - The index of the byte to read.
10224  * data - Pointer to a byte to store the value read.
10225  *****************************************************************************/
10226 static int32_t
10227 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
10228 {
10229 	int32_t status;
10230 	uint16_t word = 0;
10231 
10232 	status = wm_read_ich8_data(sc, index, 1, &word);
10233 	if (status == 0)
10234 		*data = (uint8_t)word;
10235 	else
10236 		*data = 0;
10237 
10238 	return status;
10239 }
10240 
10241 /******************************************************************************
10242  * Reads a word from the NVM using the ICH8 flash access registers.
10243  *
10244  * sc - pointer to wm_hw structure
10245  * index - The starting byte index of the word to read.
10246  * data - Pointer to a word to store the value read.
10247  *****************************************************************************/
10248 static int32_t
10249 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
10250 {
10251 	int32_t status;
10252 
10253 	status = wm_read_ich8_data(sc, index, 2, data);
10254 	return status;
10255 }
10256 
10257 /******************************************************************************
10258  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
10259  * register.
10260  *
10261  * sc - Struct containing variables accessed by shared code
10262  * offset - offset of word in the EEPROM to read
10263  * data - word read from the EEPROM
10264  * words - number of words to read
10265  *****************************************************************************/
10266 static int
10267 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
10268 {
10269 	int32_t  error = 0;
10270 	uint32_t flash_bank = 0;
10271 	uint32_t act_offset = 0;
10272 	uint32_t bank_offset = 0;
10273 	uint16_t word = 0;
10274 	uint16_t i = 0;
10275 
10276 	/*
10277 	 * We need to know which is the valid flash bank.  In the event
10278 	 * that we didn't allocate eeprom_shadow_ram, we may not be
10279 	 * managing flash_bank.  So it cannot be trusted and needs
10280 	 * to be updated with each read.
10281 	 */
10282 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
10283 	if (error) {
10284 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
10285 			device_xname(sc->sc_dev)));
10286 		flash_bank = 0;
10287 	}
10288 
10289 	/*
10290 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
10291 	 * size
10292 	 */
10293 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
10294 
10295 	error = wm_get_swfwhw_semaphore(sc);
10296 	if (error) {
10297 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
10298 		    __func__);
10299 		return error;
10300 	}
10301 
10302 	for (i = 0; i < words; i++) {
10303 		/* The NVM part needs a byte offset, hence * 2 */
10304 		act_offset = bank_offset + ((offset + i) * 2);
10305 		error = wm_read_ich8_word(sc, act_offset, &word);
10306 		if (error) {
10307 			aprint_error_dev(sc->sc_dev,
10308 			    "%s: failed to read NVM\n", __func__);
10309 			break;
10310 		}
10311 		data[i] = word;
10312 	}
10313 
10314 	wm_put_swfwhw_semaphore(sc);
10315 	return error;
10316 }
10317 
10318 /* iNVM */
10319 
10320 static int
10321 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
10322 {
10323 	int32_t  rv = 0;
10324 	uint32_t invm_dword;
10325 	uint16_t i;
10326 	uint8_t record_type, word_address;
10327 
10328 	for (i = 0; i < INVM_SIZE; i++) {
10329 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
10330 		/* Get record type */
10331 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
10332 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
10333 			break;
10334 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
10335 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
10336 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
10337 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
10338 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
10339 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
10340 			if (word_address == address) {
10341 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
10342 				rv = 0;
10343 				break;
10344 			}
10345 		}
10346 	}
10347 
10348 	return rv;
10349 }
10350 
10351 static int
10352 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
10353 {
10354 	int rv = 0;
10355 	int i;
10356 
10357 	for (i = 0; i < words; i++) {
10358 		switch (offset + i) {
10359 		case NVM_OFF_MACADDR:
10360 		case NVM_OFF_MACADDR1:
10361 		case NVM_OFF_MACADDR2:
10362 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
10363 			if (rv != 0) {
10364 				data[i] = 0xffff;
10365 				rv = -1;
10366 			}
10367 			break;
10368 		case NVM_OFF_CFG2:
10369 			rv = wm_nvm_read_word_invm(sc, offset, data);
10370 			if (rv != 0) {
10371 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
10372 				rv = 0;
10373 			}
10374 			break;
10375 		case NVM_OFF_CFG4:
10376 			rv = wm_nvm_read_word_invm(sc, offset, data);
10377 			if (rv != 0) {
10378 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
10379 				rv = 0;
10380 			}
10381 			break;
10382 		case NVM_OFF_LED_1_CFG:
10383 			rv = wm_nvm_read_word_invm(sc, offset, data);
10384 			if (rv != 0) {
10385 				*data = NVM_LED_1_CFG_DEFAULT_I211;
10386 				rv = 0;
10387 			}
10388 			break;
10389 		case NVM_OFF_LED_0_2_CFG:
10390 			rv = wm_nvm_read_word_invm(sc, offset, data);
10391 			if (rv != 0) {
10392 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
10393 				rv = 0;
10394 			}
10395 			break;
10396 		case NVM_OFF_ID_LED_SETTINGS:
10397 			rv = wm_nvm_read_word_invm(sc, offset, data);
10398 			if (rv != 0) {
10399 				*data = ID_LED_RESERVED_FFFF;
10400 				rv = 0;
10401 			}
10402 			break;
10403 		default:
10404 			DPRINTF(WM_DEBUG_NVM,
10405 			    ("NVM word 0x%02x is not mapped.\n", offset));
10406 			*data = NVM_RESERVED_WORD;
10407 			break;
10408 		}
10409 	}
10410 
10411 	return rv;
10412 }
10413 
10414 /* Lock, detecting NVM type, validate checksum, version and read */
10415 
10416 /*
10417  * wm_nvm_acquire:
10418  *
10419  *	Perform the EEPROM handshake required on some chips.
10420  */
10421 static int
10422 wm_nvm_acquire(struct wm_softc *sc)
10423 {
10424 	uint32_t reg;
10425 	int x;
10426 	int ret = 0;
10427 
10428 	/* always success */
10429 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
10430 		return 0;
10431 
10432 	if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
10433 		ret = wm_get_swfwhw_semaphore(sc);
10434 	} else if (sc->sc_flags & WM_F_LOCK_SWFW) {
10435 		/* This will also do wm_get_swsm_semaphore() if needed */
10436 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
10437 	} else if (sc->sc_flags & WM_F_LOCK_SWSM) {
10438 		ret = wm_get_swsm_semaphore(sc);
10439 	}
10440 
10441 	if (ret) {
10442 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
10443 			__func__);
10444 		return 1;
10445 	}
10446 
10447 	if (sc->sc_flags & WM_F_LOCK_EECD) {
10448 		reg = CSR_READ(sc, WMREG_EECD);
10449 
10450 		/* Request EEPROM access. */
10451 		reg |= EECD_EE_REQ;
10452 		CSR_WRITE(sc, WMREG_EECD, reg);
10453 
10454 		/* ..and wait for it to be granted. */
10455 		for (x = 0; x < 1000; x++) {
10456 			reg = CSR_READ(sc, WMREG_EECD);
10457 			if (reg & EECD_EE_GNT)
10458 				break;
10459 			delay(5);
10460 		}
10461 		if ((reg & EECD_EE_GNT) == 0) {
10462 			aprint_error_dev(sc->sc_dev,
10463 			    "could not acquire EEPROM GNT\n");
10464 			reg &= ~EECD_EE_REQ;
10465 			CSR_WRITE(sc, WMREG_EECD, reg);
10466 			if (sc->sc_flags & WM_F_LOCK_EXTCNF)
10467 				wm_put_swfwhw_semaphore(sc);
10468 			if (sc->sc_flags & WM_F_LOCK_SWFW)
10469 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
10470 			else if (sc->sc_flags & WM_F_LOCK_SWSM)
10471 				wm_put_swsm_semaphore(sc);
10472 			return 1;
10473 		}
10474 	}
10475 
10476 	return 0;
10477 }
10478 
10479 /*
10480  * wm_nvm_release:
10481  *
10482  *	Release the EEPROM mutex.
10483  */
10484 static void
10485 wm_nvm_release(struct wm_softc *sc)
10486 {
10487 	uint32_t reg;
10488 
10489 	/* always success */
10490 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
10491 		return;
10492 
10493 	if (sc->sc_flags & WM_F_LOCK_EECD) {
10494 		reg = CSR_READ(sc, WMREG_EECD);
10495 		reg &= ~EECD_EE_REQ;
10496 		CSR_WRITE(sc, WMREG_EECD, reg);
10497 	}
10498 
10499 	if (sc->sc_flags & WM_F_LOCK_EXTCNF)
10500 		wm_put_swfwhw_semaphore(sc);
10501 	if (sc->sc_flags & WM_F_LOCK_SWFW)
10502 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
10503 	else if (sc->sc_flags & WM_F_LOCK_SWSM)
10504 		wm_put_swsm_semaphore(sc);
10505 }
10506 
10507 static int
10508 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
10509 {
10510 	uint32_t eecd = 0;
10511 
10512 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
10513 	    || sc->sc_type == WM_T_82583) {
10514 		eecd = CSR_READ(sc, WMREG_EECD);
10515 
10516 		/* Isolate bits 15 & 16 */
10517 		eecd = ((eecd >> 15) & 0x03);
10518 
10519 		/* If both bits are set, device is Flash type */
10520 		if (eecd == 0x03)
10521 			return 0;
10522 	}
10523 	return 1;
10524 }
10525 
10526 static int
10527 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
10528 {
10529 	uint32_t eec;
10530 
10531 	eec = CSR_READ(sc, WMREG_EEC);
10532 	if ((eec & EEC_FLASH_DETECTED) != 0)
10533 		return 1;
10534 
10535 	return 0;
10536 }
10537 
10538 /*
10539  * wm_nvm_validate_checksum
10540  *
10541  * The checksum is defined as the sum of the first 64 (16 bit) words.
10542  */
10543 static int
10544 wm_nvm_validate_checksum(struct wm_softc *sc)
10545 {
10546 	uint16_t checksum;
10547 	uint16_t eeprom_data;
10548 #ifdef WM_DEBUG
10549 	uint16_t csum_wordaddr, valid_checksum;
10550 #endif
10551 	int i;
10552 
10553 	checksum = 0;
10554 
10555 	/* Don't check for I211 */
10556 	if (sc->sc_type == WM_T_I211)
10557 		return 0;
10558 
10559 #ifdef WM_DEBUG
10560 	if (sc->sc_type == WM_T_PCH_LPT) {
10561 		csum_wordaddr = NVM_OFF_COMPAT;
10562 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
10563 	} else {
10564 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
10565 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
10566 	}
10567 
10568 	/* Dump EEPROM image for debug */
10569 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
10570 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
10571 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
10572 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
10573 		if ((eeprom_data & valid_checksum) == 0) {
10574 			DPRINTF(WM_DEBUG_NVM,
10575 			    ("%s: NVM need to be updated (%04x != %04x)\n",
10576 				device_xname(sc->sc_dev), eeprom_data,
10577 				    valid_checksum));
10578 		}
10579 	}
10580 
10581 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
10582 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
10583 		for (i = 0; i < NVM_SIZE; i++) {
10584 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
10585 				printf("XXXX ");
10586 			else
10587 				printf("%04hx ", eeprom_data);
10588 			if (i % 8 == 7)
10589 				printf("\n");
10590 		}
10591 	}
10592 
10593 #endif /* WM_DEBUG */
10594 
10595 	for (i = 0; i < NVM_SIZE; i++) {
10596 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
10597 			return 1;
10598 		checksum += eeprom_data;
10599 	}
10600 
10601 	if (checksum != (uint16_t) NVM_CHECKSUM) {
10602 #ifdef WM_DEBUG
10603 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
10604 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
10605 #endif
10606 	}
10607 
10608 	return 0;
10609 }
10610 
10611 static void
10612 wm_nvm_version_invm(struct wm_softc *sc)
10613 {
10614 	uint32_t dword;
10615 
10616 	/*
10617 	 * Linux's code to decode version is very strange, so we don't
10618 	 * obey that algorithm and just use word 61 as the document.
10619 	 * Perhaps it's not perfect though...
10620 	 *
10621 	 * Example:
10622 	 *
10623 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
10624 	 */
10625 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
10626 	dword = __SHIFTOUT(dword, INVM_VER_1);
10627 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
10628 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
10629 }
10630 
10631 static void
10632 wm_nvm_version(struct wm_softc *sc)
10633 {
10634 	uint16_t major, minor, build, patch;
10635 	uint16_t uid0, uid1;
10636 	uint16_t nvm_data;
10637 	uint16_t off;
10638 	bool check_version = false;
10639 	bool check_optionrom = false;
10640 	bool have_build = false;
10641 
10642 	/*
10643 	 * Version format:
10644 	 *
10645 	 * XYYZ
10646 	 * X0YZ
10647 	 * X0YY
10648 	 *
10649 	 * Example:
10650 	 *
10651 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
10652 	 *	82571	0x50a6	5.10.6?
10653 	 *	82572	0x506a	5.6.10?
10654 	 *	82572EI	0x5069	5.6.9?
10655 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
10656 	 *		0x2013	2.1.3?
10657 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
10658 	 */
10659 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1);
10660 	switch (sc->sc_type) {
10661 	case WM_T_82571:
10662 	case WM_T_82572:
10663 	case WM_T_82574:
10664 	case WM_T_82583:
10665 		check_version = true;
10666 		check_optionrom = true;
10667 		have_build = true;
10668 		break;
10669 	case WM_T_82575:
10670 	case WM_T_82576:
10671 	case WM_T_82580:
10672 		if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
10673 			check_version = true;
10674 		break;
10675 	case WM_T_I211:
10676 		wm_nvm_version_invm(sc);
10677 		goto printver;
10678 	case WM_T_I210:
10679 		if (!wm_nvm_get_flash_presence_i210(sc)) {
10680 			wm_nvm_version_invm(sc);
10681 			goto printver;
10682 		}
10683 		/* FALLTHROUGH */
10684 	case WM_T_I350:
10685 	case WM_T_I354:
10686 		check_version = true;
10687 		check_optionrom = true;
10688 		break;
10689 	default:
10690 		return;
10691 	}
10692 	if (check_version) {
10693 		wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data);
10694 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
10695 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
10696 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
10697 			build = nvm_data & NVM_BUILD_MASK;
10698 			have_build = true;
10699 		} else
10700 			minor = nvm_data & 0x00ff;
10701 
10702 		/* Decimal */
10703 		minor = (minor / 16) * 10 + (minor % 16);
10704 		sc->sc_nvm_ver_major = major;
10705 		sc->sc_nvm_ver_minor = minor;
10706 
10707 printver:
10708 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
10709 		    sc->sc_nvm_ver_minor);
10710 		if (have_build) {
10711 			sc->sc_nvm_ver_build = build;
10712 			aprint_verbose(".%d", build);
10713 		}
10714 	}
10715 	if (check_optionrom) {
10716 		wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off);
10717 		/* Option ROM Version */
10718 		if ((off != 0x0000) && (off != 0xffff)) {
10719 			off += NVM_COMBO_VER_OFF;
10720 			wm_nvm_read(sc, off + 1, 1, &uid1);
10721 			wm_nvm_read(sc, off, 1, &uid0);
10722 			if ((uid0 != 0) && (uid0 != 0xffff)
10723 			    && (uid1 != 0) && (uid1 != 0xffff)) {
10724 				/* 16bits */
10725 				major = uid0 >> 8;
10726 				build = (uid0 << 8) | (uid1 >> 8);
10727 				patch = uid1 & 0x00ff;
10728 				aprint_verbose(", option ROM Version %d.%d.%d",
10729 				    major, build, patch);
10730 			}
10731 		}
10732 	}
10733 
10734 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
10735 	aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
10736 }
10737 
10738 /*
10739  * wm_nvm_read:
10740  *
10741  *	Read data from the serial EEPROM.
10742  */
10743 static int
10744 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
10745 {
10746 	int rv;
10747 
10748 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
10749 		return 1;
10750 
10751 	if (wm_nvm_acquire(sc))
10752 		return 1;
10753 
10754 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
10755 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
10756 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
10757 		rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
10758 	else if (sc->sc_flags & WM_F_EEPROM_INVM)
10759 		rv = wm_nvm_read_invm(sc, word, wordcnt, data);
10760 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
10761 		rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
10762 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
10763 		rv = wm_nvm_read_spi(sc, word, wordcnt, data);
10764 	else
10765 		rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
10766 
10767 	wm_nvm_release(sc);
10768 	return rv;
10769 }
10770 
10771 /*
10772  * Hardware semaphores.
10773  * Very complexed...
10774  */
10775 
10776 static int
10777 wm_get_swsm_semaphore(struct wm_softc *sc)
10778 {
10779 	int32_t timeout;
10780 	uint32_t swsm;
10781 
10782 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
10783 		/* Get the SW semaphore. */
10784 		timeout = sc->sc_nvm_wordsize + 1;
10785 		while (timeout) {
10786 			swsm = CSR_READ(sc, WMREG_SWSM);
10787 
10788 			if ((swsm & SWSM_SMBI) == 0)
10789 				break;
10790 
10791 			delay(50);
10792 			timeout--;
10793 		}
10794 
10795 		if (timeout == 0) {
10796 			aprint_error_dev(sc->sc_dev,
10797 			    "could not acquire SWSM SMBI\n");
10798 			return 1;
10799 		}
10800 	}
10801 
10802 	/* Get the FW semaphore. */
10803 	timeout = sc->sc_nvm_wordsize + 1;
10804 	while (timeout) {
10805 		swsm = CSR_READ(sc, WMREG_SWSM);
10806 		swsm |= SWSM_SWESMBI;
10807 		CSR_WRITE(sc, WMREG_SWSM, swsm);
10808 		/* If we managed to set the bit we got the semaphore. */
10809 		swsm = CSR_READ(sc, WMREG_SWSM);
10810 		if (swsm & SWSM_SWESMBI)
10811 			break;
10812 
10813 		delay(50);
10814 		timeout--;
10815 	}
10816 
10817 	if (timeout == 0) {
10818 		aprint_error_dev(sc->sc_dev,
10819 		    "could not acquire SWSM SWESMBI\n");
10820 		/* Release semaphores */
10821 		wm_put_swsm_semaphore(sc);
10822 		return 1;
10823 	}
10824 	return 0;
10825 }
10826 
10827 static void
10828 wm_put_swsm_semaphore(struct wm_softc *sc)
10829 {
10830 	uint32_t swsm;
10831 
10832 	swsm = CSR_READ(sc, WMREG_SWSM);
10833 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
10834 	CSR_WRITE(sc, WMREG_SWSM, swsm);
10835 }
10836 
10837 static int
10838 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
10839 {
10840 	uint32_t swfw_sync;
10841 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
10842 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
10843 	int timeout = 200;
10844 
10845 	for (timeout = 0; timeout < 200; timeout++) {
10846 		if (sc->sc_flags & WM_F_LOCK_SWSM) {
10847 			if (wm_get_swsm_semaphore(sc)) {
10848 				aprint_error_dev(sc->sc_dev,
10849 				    "%s: failed to get semaphore\n",
10850 				    __func__);
10851 				return 1;
10852 			}
10853 		}
10854 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
10855 		if ((swfw_sync & (swmask | fwmask)) == 0) {
10856 			swfw_sync |= swmask;
10857 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
10858 			if (sc->sc_flags & WM_F_LOCK_SWSM)
10859 				wm_put_swsm_semaphore(sc);
10860 			return 0;
10861 		}
10862 		if (sc->sc_flags & WM_F_LOCK_SWSM)
10863 			wm_put_swsm_semaphore(sc);
10864 		delay(5000);
10865 	}
10866 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
10867 	    device_xname(sc->sc_dev), mask, swfw_sync);
10868 	return 1;
10869 }
10870 
10871 static void
10872 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
10873 {
10874 	uint32_t swfw_sync;
10875 
10876 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
10877 		while (wm_get_swsm_semaphore(sc) != 0)
10878 			continue;
10879 	}
10880 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
10881 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
10882 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
10883 	if (sc->sc_flags & WM_F_LOCK_SWSM)
10884 		wm_put_swsm_semaphore(sc);
10885 }
10886 
10887 static int
10888 wm_get_swfwhw_semaphore(struct wm_softc *sc)
10889 {
10890 	uint32_t ext_ctrl;
10891 	int timeout = 200;
10892 
10893 	for (timeout = 0; timeout < 200; timeout++) {
10894 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
10895 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
10896 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
10897 
10898 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
10899 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
10900 			return 0;
10901 		delay(5000);
10902 	}
10903 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
10904 	    device_xname(sc->sc_dev), ext_ctrl);
10905 	return 1;
10906 }
10907 
10908 static void
10909 wm_put_swfwhw_semaphore(struct wm_softc *sc)
10910 {
10911 	uint32_t ext_ctrl;
10912 
10913 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
10914 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
10915 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
10916 }
10917 
10918 static int
10919 wm_get_hw_semaphore_82573(struct wm_softc *sc)
10920 {
10921 	int i = 0;
10922 	uint32_t reg;
10923 
10924 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
10925 	do {
10926 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
10927 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
10928 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
10929 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
10930 			break;
10931 		delay(2*1000);
10932 		i++;
10933 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
10934 
10935 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
10936 		wm_put_hw_semaphore_82573(sc);
10937 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
10938 		    device_xname(sc->sc_dev));
10939 		return -1;
10940 	}
10941 
10942 	return 0;
10943 }
10944 
10945 static void
10946 wm_put_hw_semaphore_82573(struct wm_softc *sc)
10947 {
10948 	uint32_t reg;
10949 
10950 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
10951 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
10952 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
10953 }
10954 
10955 /*
10956  * Management mode and power management related subroutines.
10957  * BMC, AMT, suspend/resume and EEE.
10958  */
10959 
10960 #ifdef WM_WOL
10961 static int
10962 wm_check_mng_mode(struct wm_softc *sc)
10963 {
10964 	int rv;
10965 
10966 	switch (sc->sc_type) {
10967 	case WM_T_ICH8:
10968 	case WM_T_ICH9:
10969 	case WM_T_ICH10:
10970 	case WM_T_PCH:
10971 	case WM_T_PCH2:
10972 	case WM_T_PCH_LPT:
10973 		rv = wm_check_mng_mode_ich8lan(sc);
10974 		break;
10975 	case WM_T_82574:
10976 	case WM_T_82583:
10977 		rv = wm_check_mng_mode_82574(sc);
10978 		break;
10979 	case WM_T_82571:
10980 	case WM_T_82572:
10981 	case WM_T_82573:
10982 	case WM_T_80003:
10983 		rv = wm_check_mng_mode_generic(sc);
10984 		break;
10985 	default:
10986 		/* noting to do */
10987 		rv = 0;
10988 		break;
10989 	}
10990 
10991 	return rv;
10992 }
10993 
10994 static int
10995 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
10996 {
10997 	uint32_t fwsm;
10998 
10999 	fwsm = CSR_READ(sc, WMREG_FWSM);
11000 
11001 	if (((fwsm & FWSM_FW_VALID) != 0)
11002 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
11003 		return 1;
11004 
11005 	return 0;
11006 }
11007 
11008 static int
11009 wm_check_mng_mode_82574(struct wm_softc *sc)
11010 {
11011 	uint16_t data;
11012 
11013 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
11014 
11015 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
11016 		return 1;
11017 
11018 	return 0;
11019 }
11020 
11021 static int
11022 wm_check_mng_mode_generic(struct wm_softc *sc)
11023 {
11024 	uint32_t fwsm;
11025 
11026 	fwsm = CSR_READ(sc, WMREG_FWSM);
11027 
11028 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
11029 		return 1;
11030 
11031 	return 0;
11032 }
11033 #endif /* WM_WOL */
11034 
11035 static int
11036 wm_enable_mng_pass_thru(struct wm_softc *sc)
11037 {
11038 	uint32_t manc, fwsm, factps;
11039 
11040 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
11041 		return 0;
11042 
11043 	manc = CSR_READ(sc, WMREG_MANC);
11044 
11045 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
11046 		device_xname(sc->sc_dev), manc));
11047 	if ((manc & MANC_RECV_TCO_EN) == 0)
11048 		return 0;
11049 
11050 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
11051 		fwsm = CSR_READ(sc, WMREG_FWSM);
11052 		factps = CSR_READ(sc, WMREG_FACTPS);
11053 		if (((factps & FACTPS_MNGCG) == 0)
11054 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
11055 			return 1;
11056 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
11057 		uint16_t data;
11058 
11059 		factps = CSR_READ(sc, WMREG_FACTPS);
11060 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
11061 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
11062 			device_xname(sc->sc_dev), factps, data));
11063 		if (((factps & FACTPS_MNGCG) == 0)
11064 		    && ((data & NVM_CFG2_MNGM_MASK)
11065 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
11066 			return 1;
11067 	} else if (((manc & MANC_SMBUS_EN) != 0)
11068 	    && ((manc & MANC_ASF_EN) == 0))
11069 		return 1;
11070 
11071 	return 0;
11072 }
11073 
11074 static bool
11075 wm_phy_resetisblocked(struct wm_softc *sc)
11076 {
11077 	bool blocked = false;
11078 	uint32_t reg;
11079 	int i = 0;
11080 
11081 	switch (sc->sc_type) {
11082 	case WM_T_ICH8:
11083 	case WM_T_ICH9:
11084 	case WM_T_ICH10:
11085 	case WM_T_PCH:
11086 	case WM_T_PCH2:
11087 	case WM_T_PCH_LPT:
11088 		do {
11089 			reg = CSR_READ(sc, WMREG_FWSM);
11090 			if ((reg & FWSM_RSPCIPHY) == 0) {
11091 				blocked = true;
11092 				delay(10*1000);
11093 				continue;
11094 			}
11095 			blocked = false;
11096 		} while (blocked && (i++ < 10));
11097 		return blocked;
11098 		break;
11099 	case WM_T_82571:
11100 	case WM_T_82572:
11101 	case WM_T_82573:
11102 	case WM_T_82574:
11103 	case WM_T_82583:
11104 	case WM_T_80003:
11105 		reg = CSR_READ(sc, WMREG_MANC);
11106 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
11107 			return true;
11108 		else
11109 			return false;
11110 		break;
11111 	default:
11112 		/* no problem */
11113 		break;
11114 	}
11115 
11116 	return false;
11117 }
11118 
11119 static void
11120 wm_get_hw_control(struct wm_softc *sc)
11121 {
11122 	uint32_t reg;
11123 
11124 	switch (sc->sc_type) {
11125 	case WM_T_82573:
11126 		reg = CSR_READ(sc, WMREG_SWSM);
11127 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
11128 		break;
11129 	case WM_T_82571:
11130 	case WM_T_82572:
11131 	case WM_T_82574:
11132 	case WM_T_82583:
11133 	case WM_T_80003:
11134 	case WM_T_ICH8:
11135 	case WM_T_ICH9:
11136 	case WM_T_ICH10:
11137 	case WM_T_PCH:
11138 	case WM_T_PCH2:
11139 	case WM_T_PCH_LPT:
11140 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
11141 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
11142 		break;
11143 	default:
11144 		break;
11145 	}
11146 }
11147 
11148 static void
11149 wm_release_hw_control(struct wm_softc *sc)
11150 {
11151 	uint32_t reg;
11152 
11153 	if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
11154 		return;
11155 
11156 	if (sc->sc_type == WM_T_82573) {
11157 		reg = CSR_READ(sc, WMREG_SWSM);
11158 		reg &= ~SWSM_DRV_LOAD;
11159 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
11160 	} else {
11161 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
11162 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
11163 	}
11164 }
11165 
11166 static void
11167 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, int on)
11168 {
11169 	uint32_t reg;
11170 
11171 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
11172 
11173 	if (on != 0)
11174 		reg |= EXTCNFCTR_GATE_PHY_CFG;
11175 	else
11176 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
11177 
11178 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
11179 }
11180 
11181 static void
11182 wm_smbustopci(struct wm_softc *sc)
11183 {
11184 	uint32_t fwsm;
11185 
11186 	fwsm = CSR_READ(sc, WMREG_FWSM);
11187 	if (((fwsm & FWSM_FW_VALID) == 0)
11188 	    && ((wm_phy_resetisblocked(sc) == false))) {
11189 		sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
11190 		sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
11191 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11192 		CSR_WRITE_FLUSH(sc);
11193 		delay(10);
11194 		sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
11195 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11196 		CSR_WRITE_FLUSH(sc);
11197 		delay(50*1000);
11198 
11199 		/*
11200 		 * Gate automatic PHY configuration by hardware on non-managed
11201 		 * 82579
11202 		 */
11203 		if (sc->sc_type == WM_T_PCH2)
11204 			wm_gate_hw_phy_config_ich8lan(sc, 1);
11205 	}
11206 }
11207 
11208 static void
11209 wm_init_manageability(struct wm_softc *sc)
11210 {
11211 
11212 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
11213 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
11214 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
11215 
11216 		/* Disable hardware interception of ARP */
11217 		manc &= ~MANC_ARP_EN;
11218 
11219 		/* Enable receiving management packets to the host */
11220 		if (sc->sc_type >= WM_T_82571) {
11221 			manc |= MANC_EN_MNG2HOST;
11222 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
11223 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
11224 		}
11225 
11226 		CSR_WRITE(sc, WMREG_MANC, manc);
11227 	}
11228 }
11229 
11230 static void
11231 wm_release_manageability(struct wm_softc *sc)
11232 {
11233 
11234 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
11235 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
11236 
11237 		manc |= MANC_ARP_EN;
11238 		if (sc->sc_type >= WM_T_82571)
11239 			manc &= ~MANC_EN_MNG2HOST;
11240 
11241 		CSR_WRITE(sc, WMREG_MANC, manc);
11242 	}
11243 }
11244 
11245 static void
11246 wm_get_wakeup(struct wm_softc *sc)
11247 {
11248 
11249 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
11250 	switch (sc->sc_type) {
11251 	case WM_T_82573:
11252 	case WM_T_82583:
11253 		sc->sc_flags |= WM_F_HAS_AMT;
11254 		/* FALLTHROUGH */
11255 	case WM_T_80003:
11256 	case WM_T_82541:
11257 	case WM_T_82547:
11258 	case WM_T_82571:
11259 	case WM_T_82572:
11260 	case WM_T_82574:
11261 	case WM_T_82575:
11262 	case WM_T_82576:
11263 	case WM_T_82580:
11264 	case WM_T_I350:
11265 	case WM_T_I354:
11266 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
11267 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
11268 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
11269 		break;
11270 	case WM_T_ICH8:
11271 	case WM_T_ICH9:
11272 	case WM_T_ICH10:
11273 	case WM_T_PCH:
11274 	case WM_T_PCH2:
11275 	case WM_T_PCH_LPT:
11276 		sc->sc_flags |= WM_F_HAS_AMT;
11277 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
11278 		break;
11279 	default:
11280 		break;
11281 	}
11282 
11283 	/* 1: HAS_MANAGE */
11284 	if (wm_enable_mng_pass_thru(sc) != 0)
11285 		sc->sc_flags |= WM_F_HAS_MANAGE;
11286 
11287 #ifdef WM_DEBUG
11288 	printf("\n");
11289 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
11290 		printf("HAS_AMT,");
11291 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
11292 		printf("ARC_SUBSYS_VALID,");
11293 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
11294 		printf("ASF_FIRMWARE_PRES,");
11295 	if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
11296 		printf("HAS_MANAGE,");
11297 	printf("\n");
11298 #endif
11299 	/*
11300 	 * Note that the WOL flags is set after the resetting of the eeprom
11301 	 * stuff
11302 	 */
11303 }
11304 
11305 #ifdef WM_WOL
11306 /* WOL in the newer chipset interfaces (pchlan) */
11307 static void
11308 wm_enable_phy_wakeup(struct wm_softc *sc)
11309 {
11310 #if 0
11311 	uint16_t preg;
11312 
11313 	/* Copy MAC RARs to PHY RARs */
11314 
11315 	/* Copy MAC MTA to PHY MTA */
11316 
11317 	/* Configure PHY Rx Control register */
11318 
11319 	/* Enable PHY wakeup in MAC register */
11320 
11321 	/* Configure and enable PHY wakeup in PHY registers */
11322 
11323 	/* Activate PHY wakeup */
11324 
11325 	/* XXX */
11326 #endif
11327 }
11328 
11329 /* Power down workaround on D3 */
11330 static void
11331 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
11332 {
11333 	uint32_t reg;
11334 	int i;
11335 
11336 	for (i = 0; i < 2; i++) {
11337 		/* Disable link */
11338 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
11339 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
11340 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
11341 
11342 		/*
11343 		 * Call gig speed drop workaround on Gig disable before
11344 		 * accessing any PHY registers
11345 		 */
11346 		if (sc->sc_type == WM_T_ICH8)
11347 			wm_gig_downshift_workaround_ich8lan(sc);
11348 
11349 		/* Write VR power-down enable */
11350 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
11351 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
11352 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
11353 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
11354 
11355 		/* Read it back and test */
11356 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
11357 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
11358 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
11359 			break;
11360 
11361 		/* Issue PHY reset and repeat at most one more time */
11362 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
11363 	}
11364 }
11365 
11366 static void
11367 wm_enable_wakeup(struct wm_softc *sc)
11368 {
11369 	uint32_t reg, pmreg;
11370 	pcireg_t pmode;
11371 
11372 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
11373 		&pmreg, NULL) == 0)
11374 		return;
11375 
11376 	/* Advertise the wakeup capability */
11377 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
11378 	    | CTRL_SWDPIN(3));
11379 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
11380 
11381 	/* ICH workaround */
11382 	switch (sc->sc_type) {
11383 	case WM_T_ICH8:
11384 	case WM_T_ICH9:
11385 	case WM_T_ICH10:
11386 	case WM_T_PCH:
11387 	case WM_T_PCH2:
11388 	case WM_T_PCH_LPT:
11389 		/* Disable gig during WOL */
11390 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
11391 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
11392 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
11393 		if (sc->sc_type == WM_T_PCH)
11394 			wm_gmii_reset(sc);
11395 
11396 		/* Power down workaround */
11397 		if (sc->sc_phytype == WMPHY_82577) {
11398 			struct mii_softc *child;
11399 
11400 			/* Assume that the PHY is copper */
11401 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
11402 			if (child->mii_mpd_rev <= 2)
11403 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
11404 				    (768 << 5) | 25, 0x0444); /* magic num */
11405 		}
11406 		break;
11407 	default:
11408 		break;
11409 	}
11410 
11411 	/* Keep the laser running on fiber adapters */
11412 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
11413 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
11414 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
11415 		reg |= CTRL_EXT_SWDPIN(3);
11416 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
11417 	}
11418 
11419 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
11420 #if 0	/* for the multicast packet */
11421 	reg |= WUFC_MC;
11422 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
11423 #endif
11424 
11425 	if (sc->sc_type == WM_T_PCH) {
11426 		wm_enable_phy_wakeup(sc);
11427 	} else {
11428 		CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
11429 		CSR_WRITE(sc, WMREG_WUFC, reg);
11430 	}
11431 
11432 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
11433 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
11434 		|| (sc->sc_type == WM_T_PCH2))
11435 		    && (sc->sc_phytype == WMPHY_IGP_3))
11436 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
11437 
11438 	/* Request PME */
11439 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
11440 #if 0
11441 	/* Disable WOL */
11442 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
11443 #else
11444 	/* For WOL */
11445 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
11446 #endif
11447 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
11448 }
11449 #endif /* WM_WOL */
11450 
11451 /* LPLU */
11452 
11453 static void
11454 wm_lplu_d0_disable(struct wm_softc *sc)
11455 {
11456 	uint32_t reg;
11457 
11458 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
11459 	reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
11460 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
11461 }
11462 
11463 static void
11464 wm_lplu_d0_disable_pch(struct wm_softc *sc)
11465 {
11466 	uint32_t reg;
11467 
11468 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
11469 	reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
11470 	reg |= HV_OEM_BITS_ANEGNOW;
11471 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
11472 }
11473 
11474 /* EEE */
11475 
11476 static void
11477 wm_set_eee_i350(struct wm_softc *sc)
11478 {
11479 	uint32_t ipcnfg, eeer;
11480 
11481 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
11482 	eeer = CSR_READ(sc, WMREG_EEER);
11483 
11484 	if ((sc->sc_flags & WM_F_EEE) != 0) {
11485 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
11486 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
11487 		    | EEER_LPI_FC);
11488 	} else {
11489 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
11490 		ipcnfg &= ~IPCNFG_10BASE_TE;
11491 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
11492 		    | EEER_LPI_FC);
11493 	}
11494 
11495 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
11496 	CSR_WRITE(sc, WMREG_EEER, eeer);
11497 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
11498 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
11499 }
11500 
11501 /*
11502  * Workarounds (mainly PHY related).
11503  * Basically, PHY's workarounds are in the PHY drivers.
11504  */
11505 
11506 /* Work-around for 82566 Kumeran PCS lock loss */
11507 static void
11508 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
11509 {
11510 #if 0
11511 	int miistatus, active, i;
11512 	int reg;
11513 
11514 	miistatus = sc->sc_mii.mii_media_status;
11515 
11516 	/* If the link is not up, do nothing */
11517 	if ((miistatus & IFM_ACTIVE) == 0)
11518 		return;
11519 
11520 	active = sc->sc_mii.mii_media_active;
11521 
11522 	/* Nothing to do if the link is other than 1Gbps */
11523 	if (IFM_SUBTYPE(active) != IFM_1000_T)
11524 		return;
11525 
11526 	for (i = 0; i < 10; i++) {
11527 		/* read twice */
11528 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
11529 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
11530 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
11531 			goto out;	/* GOOD! */
11532 
11533 		/* Reset the PHY */
11534 		wm_gmii_reset(sc);
11535 		delay(5*1000);
11536 	}
11537 
11538 	/* Disable GigE link negotiation */
11539 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
11540 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
11541 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
11542 
11543 	/*
11544 	 * Call gig speed drop workaround on Gig disable before accessing
11545 	 * any PHY registers.
11546 	 */
11547 	wm_gig_downshift_workaround_ich8lan(sc);
11548 
11549 out:
11550 	return;
11551 #endif
11552 }
11553 
11554 /* WOL from S5 stops working */
11555 static void
11556 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
11557 {
11558 	uint16_t kmrn_reg;
11559 
11560 	/* Only for igp3 */
11561 	if (sc->sc_phytype == WMPHY_IGP_3) {
11562 		kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
11563 		kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
11564 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
11565 		kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
11566 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
11567 	}
11568 }
11569 
11570 /*
11571  * Workaround for pch's PHYs
11572  * XXX should be moved to new PHY driver?
11573  */
11574 static void
11575 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
11576 {
11577 	if (sc->sc_phytype == WMPHY_82577)
11578 		wm_set_mdio_slow_mode_hv(sc);
11579 
11580 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
11581 
11582 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
11583 
11584 	/* 82578 */
11585 	if (sc->sc_phytype == WMPHY_82578) {
11586 		/* PCH rev. < 3 */
11587 		if (sc->sc_rev < 3) {
11588 			/* XXX 6 bit shift? Why? Is it page2? */
11589 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
11590 			    0x66c0);
11591 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
11592 			    0xffff);
11593 		}
11594 
11595 		/* XXX phy rev. < 2 */
11596 	}
11597 
11598 	/* Select page 0 */
11599 
11600 	/* XXX acquire semaphore */
11601 	wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
11602 	/* XXX release semaphore */
11603 
11604 	/*
11605 	 * Configure the K1 Si workaround during phy reset assuming there is
11606 	 * link so that it disables K1 if link is in 1Gbps.
11607 	 */
11608 	wm_k1_gig_workaround_hv(sc, 1);
11609 }
11610 
11611 static void
11612 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
11613 {
11614 
11615 	wm_set_mdio_slow_mode_hv(sc);
11616 }
11617 
11618 static void
11619 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
11620 {
11621 	int k1_enable = sc->sc_nvm_k1_enabled;
11622 
11623 	/* XXX acquire semaphore */
11624 
11625 	if (link) {
11626 		k1_enable = 0;
11627 
11628 		/* Link stall fix for link up */
11629 		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
11630 	} else {
11631 		/* Link stall fix for link down */
11632 		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
11633 	}
11634 
11635 	wm_configure_k1_ich8lan(sc, k1_enable);
11636 
11637 	/* XXX release semaphore */
11638 }
11639 
11640 static void
11641 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
11642 {
11643 	uint32_t reg;
11644 
11645 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
11646 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
11647 	    reg | HV_KMRN_MDIO_SLOW);
11648 }
11649 
11650 static void
11651 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
11652 {
11653 	uint32_t ctrl, ctrl_ext, tmp;
11654 	uint16_t kmrn_reg;
11655 
11656 	kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
11657 
11658 	if (k1_enable)
11659 		kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
11660 	else
11661 		kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
11662 
11663 	wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
11664 
11665 	delay(20);
11666 
11667 	ctrl = CSR_READ(sc, WMREG_CTRL);
11668 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
11669 
11670 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
11671 	tmp |= CTRL_FRCSPD;
11672 
11673 	CSR_WRITE(sc, WMREG_CTRL, tmp);
11674 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
11675 	CSR_WRITE_FLUSH(sc);
11676 	delay(20);
11677 
11678 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
11679 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
11680 	CSR_WRITE_FLUSH(sc);
11681 	delay(20);
11682 }
11683 
11684 /* special case - for 82575 - need to do manual init ... */
11685 static void
11686 wm_reset_init_script_82575(struct wm_softc *sc)
11687 {
11688 	/*
11689 	 * remark: this is untested code - we have no board without EEPROM
11690 	 *  same setup as mentioned int the FreeBSD driver for the i82575
11691 	 */
11692 
11693 	/* SerDes configuration via SERDESCTRL */
11694 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
11695 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
11696 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
11697 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
11698 
11699 	/* CCM configuration via CCMCTL register */
11700 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
11701 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
11702 
11703 	/* PCIe lanes configuration */
11704 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
11705 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
11706 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
11707 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
11708 
11709 	/* PCIe PLL Configuration */
11710 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
11711 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
11712 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
11713 }
11714 
11715 static void
11716 wm_reset_mdicnfg_82580(struct wm_softc *sc)
11717 {
11718 	uint32_t reg;
11719 	uint16_t nvmword;
11720 	int rv;
11721 
11722 	if ((sc->sc_flags & WM_F_SGMII) == 0)
11723 		return;
11724 
11725 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
11726 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
11727 	if (rv != 0) {
11728 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
11729 		    __func__);
11730 		return;
11731 	}
11732 
11733 	reg = CSR_READ(sc, WMREG_MDICNFG);
11734 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
11735 		reg |= MDICNFG_DEST;
11736 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
11737 		reg |= MDICNFG_COM_MDIO;
11738 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
11739 }
11740 
11741 /*
11742  * I210 Errata 25 and I211 Errata 10
11743  * Slow System Clock.
11744  */
11745 static void
11746 wm_pll_workaround_i210(struct wm_softc *sc)
11747 {
11748 	uint32_t mdicnfg, wuc;
11749 	uint32_t reg;
11750 	pcireg_t pcireg;
11751 	uint32_t pmreg;
11752 	uint16_t nvmword, tmp_nvmword;
11753 	int phyval;
11754 	bool wa_done = false;
11755 	int i;
11756 
11757 	/* Save WUC and MDICNFG registers */
11758 	wuc = CSR_READ(sc, WMREG_WUC);
11759 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
11760 
11761 	reg = mdicnfg & ~MDICNFG_DEST;
11762 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
11763 
11764 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
11765 		nvmword = INVM_DEFAULT_AL;
11766 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
11767 
11768 	/* Get Power Management cap offset */
11769 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
11770 		&pmreg, NULL) == 0)
11771 		return;
11772 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
11773 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
11774 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
11775 
11776 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
11777 			break; /* OK */
11778 		}
11779 
11780 		wa_done = true;
11781 		/* Directly reset the internal PHY */
11782 		reg = CSR_READ(sc, WMREG_CTRL);
11783 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
11784 
11785 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
11786 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
11787 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
11788 
11789 		CSR_WRITE(sc, WMREG_WUC, 0);
11790 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
11791 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
11792 
11793 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
11794 		    pmreg + PCI_PMCSR);
11795 		pcireg |= PCI_PMCSR_STATE_D3;
11796 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
11797 		    pmreg + PCI_PMCSR, pcireg);
11798 		delay(1000);
11799 		pcireg &= ~PCI_PMCSR_STATE_D3;
11800 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
11801 		    pmreg + PCI_PMCSR, pcireg);
11802 
11803 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
11804 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
11805 
11806 		/* Restore WUC register */
11807 		CSR_WRITE(sc, WMREG_WUC, wuc);
11808 	}
11809 
11810 	/* Restore MDICNFG setting */
11811 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
11812 	if (wa_done)
11813 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
11814 }
11815