xref: /netbsd-src/sys/dev/pci/if_wm.c (revision f89f6560d453f5e37386cc7938c072d2f528b9fa)
1 /*	$NetBSD: if_wm.c,v 1.316 2015/04/17 02:54:15 msaitoh Exp $	*/
2 
3 /*
4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed for the NetBSD Project by
20  *	Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 /*******************************************************************************
39 
40   Copyright (c) 2001-2005, Intel Corporation
41   All rights reserved.
42 
43   Redistribution and use in source and binary forms, with or without
44   modification, are permitted provided that the following conditions are met:
45 
46    1. Redistributions of source code must retain the above copyright notice,
47       this list of conditions and the following disclaimer.
48 
49    2. Redistributions in binary form must reproduce the above copyright
50       notice, this list of conditions and the following disclaimer in the
51       documentation and/or other materials provided with the distribution.
52 
53    3. Neither the name of the Intel Corporation nor the names of its
54       contributors may be used to endorse or promote products derived from
55       this software without specific prior written permission.
56 
57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67   POSSIBILITY OF SUCH DAMAGE.
68 
69 *******************************************************************************/
70 /*
71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72  *
73  * TODO (in order of importance):
74  *
75  *	- Check XXX'ed comments
76  *	- EEE (Energy Efficiency Ethernet)
77  *	- MSI/MSI-X
78  *	- Virtual Function
79  *	- Set LED correctly (based on contents in EEPROM)
80  *	- Rework how parameters are loaded from the EEPROM.
81  */
82 
83 #include <sys/cdefs.h>
84 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.316 2015/04/17 02:54:15 msaitoh Exp $");
85 
86 #ifdef _KERNEL_OPT
87 #include "opt_net_mpsafe.h"
88 #endif
89 
90 #include <sys/param.h>
91 #include <sys/systm.h>
92 #include <sys/callout.h>
93 #include <sys/mbuf.h>
94 #include <sys/malloc.h>
95 #include <sys/kernel.h>
96 #include <sys/socket.h>
97 #include <sys/ioctl.h>
98 #include <sys/errno.h>
99 #include <sys/device.h>
100 #include <sys/queue.h>
101 #include <sys/syslog.h>
102 
103 #include <sys/rndsource.h>
104 
105 #include <net/if.h>
106 #include <net/if_dl.h>
107 #include <net/if_media.h>
108 #include <net/if_ether.h>
109 
110 #include <net/bpf.h>
111 
112 #include <netinet/in.h>			/* XXX for struct ip */
113 #include <netinet/in_systm.h>		/* XXX for struct ip */
114 #include <netinet/ip.h>			/* XXX for struct ip */
115 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
116 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
117 
118 #include <sys/bus.h>
119 #include <sys/intr.h>
120 #include <machine/endian.h>
121 
122 #include <dev/mii/mii.h>
123 #include <dev/mii/miivar.h>
124 #include <dev/mii/miidevs.h>
125 #include <dev/mii/mii_bitbang.h>
126 #include <dev/mii/ikphyreg.h>
127 #include <dev/mii/igphyreg.h>
128 #include <dev/mii/igphyvar.h>
129 #include <dev/mii/inbmphyreg.h>
130 
131 #include <dev/pci/pcireg.h>
132 #include <dev/pci/pcivar.h>
133 #include <dev/pci/pcidevs.h>
134 
135 #include <dev/pci/if_wmreg.h>
136 #include <dev/pci/if_wmvar.h>
137 
138 #ifdef WM_DEBUG
139 #define	WM_DEBUG_LINK		0x01
140 #define	WM_DEBUG_TX		0x02
141 #define	WM_DEBUG_RX		0x04
142 #define	WM_DEBUG_GMII		0x08
143 #define	WM_DEBUG_MANAGE		0x10
144 #define	WM_DEBUG_NVM		0x20
145 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
146     | WM_DEBUG_MANAGE | WM_DEBUG_NVM;
147 
148 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
149 #else
150 #define	DPRINTF(x, y)	/* nothing */
151 #endif /* WM_DEBUG */
152 
153 #ifdef NET_MPSAFE
154 #define WM_MPSAFE	1
155 #endif
156 
157 /*
158  * Transmit descriptor list size.  Due to errata, we can only have
159  * 256 hardware descriptors in the ring on < 82544, but we use 4096
160  * on >= 82544.  We tell the upper layers that they can queue a lot
161  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
162  * of them at a time.
163  *
164  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
165  * chains containing many small mbufs have been observed in zero-copy
166  * situations with jumbo frames.
167  */
168 #define	WM_NTXSEGS		256
169 #define	WM_IFQUEUELEN		256
170 #define	WM_TXQUEUELEN_MAX	64
171 #define	WM_TXQUEUELEN_MAX_82547	16
172 #define	WM_TXQUEUELEN(sc)	((sc)->sc_txnum)
173 #define	WM_TXQUEUELEN_MASK(sc)	(WM_TXQUEUELEN(sc) - 1)
174 #define	WM_TXQUEUE_GC(sc)	(WM_TXQUEUELEN(sc) / 8)
175 #define	WM_NTXDESC_82542	256
176 #define	WM_NTXDESC_82544	4096
177 #define	WM_NTXDESC(sc)		((sc)->sc_ntxdesc)
178 #define	WM_NTXDESC_MASK(sc)	(WM_NTXDESC(sc) - 1)
179 #define	WM_TXDESCSIZE(sc)	(WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
180 #define	WM_NEXTTX(sc, x)	(((x) + 1) & WM_NTXDESC_MASK(sc))
181 #define	WM_NEXTTXS(sc, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(sc))
182 
183 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
184 
185 /*
186  * Receive descriptor list size.  We have one Rx buffer for normal
187  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
188  * packet.  We allocate 256 receive descriptors, each with a 2k
189  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
190  */
191 #define	WM_NRXDESC		256
192 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
193 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
194 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
195 
196 /*
197  * Control structures are DMA'd to the i82542 chip.  We allocate them in
198  * a single clump that maps to a single DMA segment to make several things
199  * easier.
200  */
201 struct wm_control_data_82544 {
202 	/*
203 	 * The receive descriptors.
204 	 */
205 	wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
206 
207 	/*
208 	 * The transmit descriptors.  Put these at the end, because
209 	 * we might use a smaller number of them.
210 	 */
211 	union {
212 		wiseman_txdesc_t wcdu_txdescs[WM_NTXDESC_82544];
213 		nq_txdesc_t      wcdu_nq_txdescs[WM_NTXDESC_82544];
214 	} wdc_u;
215 };
216 
217 struct wm_control_data_82542 {
218 	wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
219 	wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
220 };
221 
222 #define	WM_CDOFF(x)	offsetof(struct wm_control_data_82544, x)
223 #define	WM_CDTXOFF(x)	WM_CDOFF(wdc_u.wcdu_txdescs[(x)])
224 #define	WM_CDRXOFF(x)	WM_CDOFF(wcd_rxdescs[(x)])
225 
226 /*
227  * Software state for transmit jobs.
228  */
229 struct wm_txsoft {
230 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
231 	bus_dmamap_t txs_dmamap;	/* our DMA map */
232 	int txs_firstdesc;		/* first descriptor in packet */
233 	int txs_lastdesc;		/* last descriptor in packet */
234 	int txs_ndesc;			/* # of descriptors used */
235 };
236 
237 /*
238  * Software state for receive buffers.  Each descriptor gets a
239  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
240  * more than one buffer, we chain them together.
241  */
242 struct wm_rxsoft {
243 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
244 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
245 };
246 
247 #define WM_LINKUP_TIMEOUT	50
248 
249 static uint16_t swfwphysem[] = {
250 	SWFW_PHY0_SM,
251 	SWFW_PHY1_SM,
252 	SWFW_PHY2_SM,
253 	SWFW_PHY3_SM
254 };
255 
256 /*
257  * Software state per device.
258  */
259 struct wm_softc {
260 	device_t sc_dev;		/* generic device information */
261 	bus_space_tag_t sc_st;		/* bus space tag */
262 	bus_space_handle_t sc_sh;	/* bus space handle */
263 	bus_size_t sc_ss;		/* bus space size */
264 	bus_space_tag_t sc_iot;		/* I/O space tag */
265 	bus_space_handle_t sc_ioh;	/* I/O space handle */
266 	bus_size_t sc_ios;		/* I/O space size */
267 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
268 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
269 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
270 
271 	struct ethercom sc_ethercom;	/* ethernet common data */
272 	struct mii_data sc_mii;		/* MII/media information */
273 
274 	pci_chipset_tag_t sc_pc;
275 	pcitag_t sc_pcitag;
276 	int sc_bus_speed;		/* PCI/PCIX bus speed */
277 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
278 
279 	uint16_t sc_pcidevid;		/* PCI device ID */
280 	wm_chip_type sc_type;		/* MAC type */
281 	int sc_rev;			/* MAC revision */
282 	wm_phy_type sc_phytype;		/* PHY type */
283 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
284 #define	WM_MEDIATYPE_UNKNOWN		0x00
285 #define	WM_MEDIATYPE_FIBER		0x01
286 #define	WM_MEDIATYPE_COPPER		0x02
287 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
288 	int sc_funcid;			/* unit number of the chip (0 to 3) */
289 	int sc_flags;			/* flags; see below */
290 	int sc_if_flags;		/* last if_flags */
291 	int sc_flowflags;		/* 802.3x flow control flags */
292 	int sc_align_tweak;
293 
294 	void *sc_ih;			/* interrupt cookie */
295 	callout_t sc_tick_ch;		/* tick callout */
296 	bool sc_stopping;
297 
298 	int sc_nvm_addrbits;		/* NVM address bits */
299 	unsigned int sc_nvm_wordsize;		/* NVM word size */
300 	int sc_ich8_flash_base;
301 	int sc_ich8_flash_bank_size;
302 	int sc_nvm_k1_enabled;
303 
304 	/* Software state for the transmit and receive descriptors. */
305 	int sc_txnum;			/* must be a power of two */
306 	struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX];
307 	struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
308 
309 	/* Control data structures. */
310 	int sc_ntxdesc;			/* must be a power of two */
311 	struct wm_control_data_82544 *sc_control_data;
312 	bus_dmamap_t sc_cddmamap;	/* control data DMA map */
313 	bus_dma_segment_t sc_cd_seg;	/* control data segment */
314 	int sc_cd_rseg;			/* real number of control segment */
315 	size_t sc_cd_size;		/* control data size */
316 #define	sc_cddma	sc_cddmamap->dm_segs[0].ds_addr
317 #define	sc_txdescs	sc_control_data->wdc_u.wcdu_txdescs
318 #define	sc_nq_txdescs	sc_control_data->wdc_u.wcdu_nq_txdescs
319 #define	sc_rxdescs	sc_control_data->wcd_rxdescs
320 
321 #ifdef WM_EVENT_COUNTERS
322 	/* Event counters. */
323 	struct evcnt sc_ev_txsstall;	/* Tx stalled due to no txs */
324 	struct evcnt sc_ev_txdstall;	/* Tx stalled due to no txd */
325 	struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
326 	struct evcnt sc_ev_txdw;	/* Tx descriptor interrupts */
327 	struct evcnt sc_ev_txqe;	/* Tx queue empty interrupts */
328 	struct evcnt sc_ev_rxintr;	/* Rx interrupts */
329 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
330 
331 	struct evcnt sc_ev_rxipsum;	/* IP checksums checked in-bound */
332 	struct evcnt sc_ev_rxtusum;	/* TCP/UDP cksums checked in-bound */
333 	struct evcnt sc_ev_txipsum;	/* IP checksums comp. out-bound */
334 	struct evcnt sc_ev_txtusum;	/* TCP/UDP cksums comp. out-bound */
335 	struct evcnt sc_ev_txtusum6;	/* TCP/UDP v6 cksums comp. out-bound */
336 	struct evcnt sc_ev_txtso;	/* TCP seg offload out-bound (IPv4) */
337 	struct evcnt sc_ev_txtso6;	/* TCP seg offload out-bound (IPv6) */
338 	struct evcnt sc_ev_txtsopain;	/* painful header manip. for TSO */
339 
340 	struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
341 	struct evcnt sc_ev_txdrop;	/* Tx packets dropped (too many segs) */
342 
343 	struct evcnt sc_ev_tu;		/* Tx underrun */
344 
345 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
346 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
347 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
348 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
349 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
350 #endif /* WM_EVENT_COUNTERS */
351 
352 	bus_addr_t sc_tdt_reg;		/* offset of TDT register */
353 
354 	int	sc_txfree;		/* number of free Tx descriptors */
355 	int	sc_txnext;		/* next ready Tx descriptor */
356 
357 	int	sc_txsfree;		/* number of free Tx jobs */
358 	int	sc_txsnext;		/* next free Tx job */
359 	int	sc_txsdirty;		/* dirty Tx jobs */
360 
361 	/* These 5 variables are used only on the 82547. */
362 	int	sc_txfifo_size;		/* Tx FIFO size */
363 	int	sc_txfifo_head;		/* current head of FIFO */
364 	uint32_t sc_txfifo_addr;	/* internal address of start of FIFO */
365 	int	sc_txfifo_stall;	/* Tx FIFO is stalled */
366 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
367 
368 	bus_addr_t sc_rdt_reg;		/* offset of RDT register */
369 
370 	int	sc_rxptr;		/* next ready Rx descriptor/queue ent */
371 	int	sc_rxdiscard;
372 	int	sc_rxlen;
373 	struct mbuf *sc_rxhead;
374 	struct mbuf *sc_rxtail;
375 	struct mbuf **sc_rxtailp;
376 
377 	uint32_t sc_ctrl;		/* prototype CTRL register */
378 #if 0
379 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
380 #endif
381 	uint32_t sc_icr;		/* prototype interrupt bits */
382 	uint32_t sc_itr;		/* prototype intr throttling reg */
383 	uint32_t sc_tctl;		/* prototype TCTL register */
384 	uint32_t sc_rctl;		/* prototype RCTL register */
385 	uint32_t sc_txcw;		/* prototype TXCW register */
386 	uint32_t sc_tipg;		/* prototype TIPG register */
387 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
388 	uint32_t sc_pba;		/* prototype PBA register */
389 
390 	int sc_tbi_linkup;		/* TBI link status */
391 	int sc_tbi_anegticks;		/* autonegotiation ticks */
392 	int sc_tbi_ticks;		/* tbi ticks */
393 
394 	int sc_mchash_type;		/* multicast filter offset */
395 
396 	krndsource_t rnd_source;	/* random source */
397 
398 	kmutex_t *sc_tx_lock;		/* lock for tx operations */
399 	kmutex_t *sc_rx_lock;		/* lock for rx operations */
400 };
401 
402 #define WM_TX_LOCK(_sc)		if ((_sc)->sc_tx_lock) mutex_enter((_sc)->sc_tx_lock)
403 #define WM_TX_UNLOCK(_sc)	if ((_sc)->sc_tx_lock) mutex_exit((_sc)->sc_tx_lock)
404 #define WM_TX_LOCKED(_sc)	(!(_sc)->sc_tx_lock || mutex_owned((_sc)->sc_tx_lock))
405 #define WM_RX_LOCK(_sc)		if ((_sc)->sc_rx_lock) mutex_enter((_sc)->sc_rx_lock)
406 #define WM_RX_UNLOCK(_sc)	if ((_sc)->sc_rx_lock) mutex_exit((_sc)->sc_rx_lock)
407 #define WM_RX_LOCKED(_sc)	(!(_sc)->sc_rx_lock || mutex_owned((_sc)->sc_rx_lock))
408 #define WM_BOTH_LOCK(_sc)	do {WM_TX_LOCK(_sc); WM_RX_LOCK(_sc);} while (0)
409 #define WM_BOTH_UNLOCK(_sc)	do {WM_RX_UNLOCK(_sc); WM_TX_UNLOCK(_sc);} while (0)
410 #define WM_BOTH_LOCKED(_sc)	(WM_TX_LOCKED(_sc) && WM_RX_LOCKED(_sc))
411 
412 #ifdef WM_MPSAFE
413 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
414 #else
415 #define CALLOUT_FLAGS	0
416 #endif
417 
418 #define	WM_RXCHAIN_RESET(sc)						\
419 do {									\
420 	(sc)->sc_rxtailp = &(sc)->sc_rxhead;				\
421 	*(sc)->sc_rxtailp = NULL;					\
422 	(sc)->sc_rxlen = 0;						\
423 } while (/*CONSTCOND*/0)
424 
425 #define	WM_RXCHAIN_LINK(sc, m)						\
426 do {									\
427 	*(sc)->sc_rxtailp = (sc)->sc_rxtail = (m);			\
428 	(sc)->sc_rxtailp = &(m)->m_next;				\
429 } while (/*CONSTCOND*/0)
430 
431 #ifdef WM_EVENT_COUNTERS
432 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
433 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
434 #else
435 #define	WM_EVCNT_INCR(ev)	/* nothing */
436 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
437 #endif
438 
439 #define	CSR_READ(sc, reg)						\
440 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
441 #define	CSR_WRITE(sc, reg, val)						\
442 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
443 #define	CSR_WRITE_FLUSH(sc)						\
444 	(void) CSR_READ((sc), WMREG_STATUS)
445 
446 #define ICH8_FLASH_READ32(sc, reg) \
447 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
448 #define ICH8_FLASH_WRITE32(sc, reg, data) \
449 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
450 
451 #define ICH8_FLASH_READ16(sc, reg) \
452 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
453 #define ICH8_FLASH_WRITE16(sc, reg, data) \
454 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
455 
456 #define	WM_CDTXADDR(sc, x)	((sc)->sc_cddma + WM_CDTXOFF((x)))
457 #define	WM_CDRXADDR(sc, x)	((sc)->sc_cddma + WM_CDRXOFF((x)))
458 
459 #define	WM_CDTXADDR_LO(sc, x)	(WM_CDTXADDR((sc), (x)) & 0xffffffffU)
460 #define	WM_CDTXADDR_HI(sc, x)						\
461 	(sizeof(bus_addr_t) == 8 ?					\
462 	 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
463 
464 #define	WM_CDRXADDR_LO(sc, x)	(WM_CDRXADDR((sc), (x)) & 0xffffffffU)
465 #define	WM_CDRXADDR_HI(sc, x)						\
466 	(sizeof(bus_addr_t) == 8 ?					\
467 	 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
468 
469 #define	WM_CDTXSYNC(sc, x, n, ops)					\
470 do {									\
471 	int __x, __n;							\
472 									\
473 	__x = (x);							\
474 	__n = (n);							\
475 									\
476 	/* If it will wrap around, sync to the end of the ring. */	\
477 	if ((__x + __n) > WM_NTXDESC(sc)) {				\
478 		bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,	\
479 		    WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) *		\
480 		    (WM_NTXDESC(sc) - __x), (ops));			\
481 		__n -= (WM_NTXDESC(sc) - __x);				\
482 		__x = 0;						\
483 	}								\
484 									\
485 	/* Now sync whatever is left. */				\
486 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
487 	    WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops));	\
488 } while (/*CONSTCOND*/0)
489 
490 #define	WM_CDRXSYNC(sc, x, ops)						\
491 do {									\
492 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
493 	   WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops));		\
494 } while (/*CONSTCOND*/0)
495 
496 #define	WM_INIT_RXDESC(sc, x)						\
497 do {									\
498 	struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)];		\
499 	wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)];		\
500 	struct mbuf *__m = __rxs->rxs_mbuf;				\
501 									\
502 	/*								\
503 	 * Note: We scoot the packet forward 2 bytes in the buffer	\
504 	 * so that the payload after the Ethernet header is aligned	\
505 	 * to a 4-byte boundary.					\
506 	 *								\
507 	 * XXX BRAINDAMAGE ALERT!					\
508 	 * The stupid chip uses the same size for every buffer, which	\
509 	 * is set in the Receive Control register.  We are using the 2K	\
510 	 * size option, but what we REALLY want is (2K - 2)!  For this	\
511 	 * reason, we can't "scoot" packets longer than the standard	\
512 	 * Ethernet MTU.  On strict-alignment platforms, if the total	\
513 	 * size exceeds (2K - 2) we set align_tweak to 0 and let	\
514 	 * the upper layer copy the headers.				\
515 	 */								\
516 	__m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak;	\
517 									\
518 	wm_set_dma_addr(&__rxd->wrx_addr,				\
519 	    __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
520 	__rxd->wrx_len = 0;						\
521 	__rxd->wrx_cksum = 0;						\
522 	__rxd->wrx_status = 0;						\
523 	__rxd->wrx_errors = 0;						\
524 	__rxd->wrx_special = 0;						\
525 	WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
526 									\
527 	CSR_WRITE((sc), (sc)->sc_rdt_reg, (x));				\
528 } while (/*CONSTCOND*/0)
529 
530 /*
531  * Register read/write functions.
532  * Other than CSR_{READ|WRITE}().
533  */
534 #if 0
535 static inline uint32_t wm_io_read(struct wm_softc *, int);
536 #endif
537 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
538 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
539 	uint32_t, uint32_t);
540 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
541 
542 /*
543  * Device driver interface functions and commonly used functions.
544  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
545  */
546 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
547 static int	wm_match(device_t, cfdata_t, void *);
548 static void	wm_attach(device_t, device_t, void *);
549 static int	wm_detach(device_t, int);
550 static bool	wm_suspend(device_t, const pmf_qual_t *);
551 static bool	wm_resume(device_t, const pmf_qual_t *);
552 static void	wm_watchdog(struct ifnet *);
553 static void	wm_tick(void *);
554 static int	wm_ifflags_cb(struct ethercom *);
555 static int	wm_ioctl(struct ifnet *, u_long, void *);
556 /* MAC address related */
557 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
558 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
559 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
560 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
561 static void	wm_set_filter(struct wm_softc *);
562 /* Reset and init related */
563 static void	wm_set_vlan(struct wm_softc *);
564 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
565 static void	wm_get_auto_rd_done(struct wm_softc *);
566 static void	wm_lan_init_done(struct wm_softc *);
567 static void	wm_get_cfg_done(struct wm_softc *);
568 static void	wm_initialize_hardware_bits(struct wm_softc *);
569 static void	wm_reset(struct wm_softc *);
570 static int	wm_add_rxbuf(struct wm_softc *, int);
571 static void	wm_rxdrain(struct wm_softc *);
572 static int	wm_init(struct ifnet *);
573 static int	wm_init_locked(struct ifnet *);
574 static void	wm_stop(struct ifnet *, int);
575 static void	wm_stop_locked(struct ifnet *, int);
576 static int	wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
577     uint32_t *, uint8_t *);
578 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
579 static void	wm_82547_txfifo_stall(void *);
580 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
581 /* Start */
582 static void	wm_start(struct ifnet *);
583 static void	wm_start_locked(struct ifnet *);
584 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txsoft *,
585     uint32_t *, uint32_t *, bool *);
586 static void	wm_nq_start(struct ifnet *);
587 static void	wm_nq_start_locked(struct ifnet *);
588 /* Interrupt */
589 static void	wm_txintr(struct wm_softc *);
590 static void	wm_rxintr(struct wm_softc *);
591 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
592 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
593 static void	wm_linkintr(struct wm_softc *, uint32_t);
594 static int	wm_intr(void *);
595 
596 /*
597  * Media related.
598  * GMII, SGMII, TBI, SERDES and SFP.
599  */
600 /* GMII related */
601 static void	wm_gmii_reset(struct wm_softc *);
602 static int	wm_get_phy_id_82575(struct wm_softc *);
603 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
604 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
605 static int	wm_gmii_mediachange(struct ifnet *);
606 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
607 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
608 static int	wm_gmii_i82543_readreg(device_t, int, int);
609 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
610 static int	wm_gmii_i82544_readreg(device_t, int, int);
611 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
612 static int	wm_gmii_i80003_readreg(device_t, int, int);
613 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
614 static int	wm_gmii_bm_readreg(device_t, int, int);
615 static void	wm_gmii_bm_writereg(device_t, int, int, int);
616 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
617 static int	wm_gmii_hv_readreg(device_t, int, int);
618 static void	wm_gmii_hv_writereg(device_t, int, int, int);
619 static int	wm_gmii_82580_readreg(device_t, int, int);
620 static void	wm_gmii_82580_writereg(device_t, int, int, int);
621 static void	wm_gmii_statchg(struct ifnet *);
622 static int	wm_kmrn_readreg(struct wm_softc *, int);
623 static void	wm_kmrn_writereg(struct wm_softc *, int, int);
624 /* SGMII */
625 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
626 static int	wm_sgmii_readreg(device_t, int, int);
627 static void	wm_sgmii_writereg(device_t, int, int, int);
628 /* TBI related */
629 static int	wm_check_for_link(struct wm_softc *);
630 static void	wm_tbi_mediainit(struct wm_softc *);
631 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
632 static int	wm_tbi_mediachange(struct ifnet *);
633 static void	wm_tbi_set_linkled(struct wm_softc *);
634 static void	wm_tbi_check_link(struct wm_softc *);
635 /* SFP related */
636 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
637 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
638 
639 /*
640  * NVM related.
641  * Microwire, SPI (w/wo EERD) and Flash.
642  */
643 /* Misc functions */
644 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
645 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
646 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
647 /* Microwire */
648 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
649 /* SPI */
650 static int	wm_nvm_ready_spi(struct wm_softc *);
651 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
652 /* Using with EERD */
653 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
654 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
655 /* Flash */
656 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
657     unsigned int *);
658 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
659 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
660 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
661 	uint16_t *);
662 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
663 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
664 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
665 /* Lock, detecting NVM type, validate checksum and read */
666 static int	wm_nvm_acquire(struct wm_softc *);
667 static void	wm_nvm_release(struct wm_softc *);
668 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
669 static int	wm_nvm_validate_checksum(struct wm_softc *);
670 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
671 
672 /*
673  * Hardware semaphores.
674  * Very complexed...
675  */
676 static int	wm_get_swsm_semaphore(struct wm_softc *);
677 static void	wm_put_swsm_semaphore(struct wm_softc *);
678 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
679 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
680 static int	wm_get_swfwhw_semaphore(struct wm_softc *);
681 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
682 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
683 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
684 
685 /*
686  * Management mode and power management related subroutines.
687  * BMC, AMT, suspend/resume and EEE.
688  */
689 static int	wm_check_mng_mode(struct wm_softc *);
690 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
691 static int	wm_check_mng_mode_82574(struct wm_softc *);
692 static int	wm_check_mng_mode_generic(struct wm_softc *);
693 static int	wm_enable_mng_pass_thru(struct wm_softc *);
694 static int	wm_check_reset_block(struct wm_softc *);
695 static void	wm_get_hw_control(struct wm_softc *);
696 static void	wm_release_hw_control(struct wm_softc *);
697 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, int);
698 static void	wm_smbustopci(struct wm_softc *);
699 static void	wm_init_manageability(struct wm_softc *);
700 static void	wm_release_manageability(struct wm_softc *);
701 static void	wm_get_wakeup(struct wm_softc *);
702 #ifdef WM_WOL
703 static void	wm_enable_phy_wakeup(struct wm_softc *);
704 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
705 static void	wm_enable_wakeup(struct wm_softc *);
706 #endif
707 /* EEE */
708 static void	wm_set_eee_i350(struct wm_softc *);
709 
710 /*
711  * Workarounds (mainly PHY related).
712  * Basically, PHY's workarounds are in the PHY drivers.
713  */
714 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
715 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
716 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
717 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
718 static void	wm_k1_gig_workaround_hv(struct wm_softc *, int);
719 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
720 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
721 static void	wm_reset_init_script_82575(struct wm_softc *);
722 
723 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
724     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
725 
726 /*
727  * Devices supported by this driver.
728  */
729 static const struct wm_product {
730 	pci_vendor_id_t		wmp_vendor;
731 	pci_product_id_t	wmp_product;
732 	const char		*wmp_name;
733 	wm_chip_type		wmp_type;
734 	uint32_t		wmp_flags;
735 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
736 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
737 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
738 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
739 #define WMP_MEDIATYPE(x)	((x) & 0x03)
740 } wm_products[] = {
741 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
742 	  "Intel i82542 1000BASE-X Ethernet",
743 	  WM_T_82542_2_1,	WMP_F_FIBER },
744 
745 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
746 	  "Intel i82543GC 1000BASE-X Ethernet",
747 	  WM_T_82543,		WMP_F_FIBER },
748 
749 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
750 	  "Intel i82543GC 1000BASE-T Ethernet",
751 	  WM_T_82543,		WMP_F_COPPER },
752 
753 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
754 	  "Intel i82544EI 1000BASE-T Ethernet",
755 	  WM_T_82544,		WMP_F_COPPER },
756 
757 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
758 	  "Intel i82544EI 1000BASE-X Ethernet",
759 	  WM_T_82544,		WMP_F_FIBER },
760 
761 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
762 	  "Intel i82544GC 1000BASE-T Ethernet",
763 	  WM_T_82544,		WMP_F_COPPER },
764 
765 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
766 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
767 	  WM_T_82544,		WMP_F_COPPER },
768 
769 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
770 	  "Intel i82540EM 1000BASE-T Ethernet",
771 	  WM_T_82540,		WMP_F_COPPER },
772 
773 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
774 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
775 	  WM_T_82540,		WMP_F_COPPER },
776 
777 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
778 	  "Intel i82540EP 1000BASE-T Ethernet",
779 	  WM_T_82540,		WMP_F_COPPER },
780 
781 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
782 	  "Intel i82540EP 1000BASE-T Ethernet",
783 	  WM_T_82540,		WMP_F_COPPER },
784 
785 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
786 	  "Intel i82540EP 1000BASE-T Ethernet",
787 	  WM_T_82540,		WMP_F_COPPER },
788 
789 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
790 	  "Intel i82545EM 1000BASE-T Ethernet",
791 	  WM_T_82545,		WMP_F_COPPER },
792 
793 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
794 	  "Intel i82545GM 1000BASE-T Ethernet",
795 	  WM_T_82545_3,		WMP_F_COPPER },
796 
797 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
798 	  "Intel i82545GM 1000BASE-X Ethernet",
799 	  WM_T_82545_3,		WMP_F_FIBER },
800 
801 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
802 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
803 	  WM_T_82545_3,		WMP_F_SERDES },
804 
805 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
806 	  "Intel i82546EB 1000BASE-T Ethernet",
807 	  WM_T_82546,		WMP_F_COPPER },
808 
809 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
810 	  "Intel i82546EB 1000BASE-T Ethernet",
811 	  WM_T_82546,		WMP_F_COPPER },
812 
813 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
814 	  "Intel i82545EM 1000BASE-X Ethernet",
815 	  WM_T_82545,		WMP_F_FIBER },
816 
817 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
818 	  "Intel i82546EB 1000BASE-X Ethernet",
819 	  WM_T_82546,		WMP_F_FIBER },
820 
821 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
822 	  "Intel i82546GB 1000BASE-T Ethernet",
823 	  WM_T_82546_3,		WMP_F_COPPER },
824 
825 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
826 	  "Intel i82546GB 1000BASE-X Ethernet",
827 	  WM_T_82546_3,		WMP_F_FIBER },
828 
829 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
830 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
831 	  WM_T_82546_3,		WMP_F_SERDES },
832 
833 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
834 	  "i82546GB quad-port Gigabit Ethernet",
835 	  WM_T_82546_3,		WMP_F_COPPER },
836 
837 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
838 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
839 	  WM_T_82546_3,		WMP_F_COPPER },
840 
841 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
842 	  "Intel PRO/1000MT (82546GB)",
843 	  WM_T_82546_3,		WMP_F_COPPER },
844 
845 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
846 	  "Intel i82541EI 1000BASE-T Ethernet",
847 	  WM_T_82541,		WMP_F_COPPER },
848 
849 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
850 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
851 	  WM_T_82541,		WMP_F_COPPER },
852 
853 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
854 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
855 	  WM_T_82541,		WMP_F_COPPER },
856 
857 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
858 	  "Intel i82541ER 1000BASE-T Ethernet",
859 	  WM_T_82541_2,		WMP_F_COPPER },
860 
861 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
862 	  "Intel i82541GI 1000BASE-T Ethernet",
863 	  WM_T_82541_2,		WMP_F_COPPER },
864 
865 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
866 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
867 	  WM_T_82541_2,		WMP_F_COPPER },
868 
869 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
870 	  "Intel i82541PI 1000BASE-T Ethernet",
871 	  WM_T_82541_2,		WMP_F_COPPER },
872 
873 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
874 	  "Intel i82547EI 1000BASE-T Ethernet",
875 	  WM_T_82547,		WMP_F_COPPER },
876 
877 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
878 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
879 	  WM_T_82547,		WMP_F_COPPER },
880 
881 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
882 	  "Intel i82547GI 1000BASE-T Ethernet",
883 	  WM_T_82547_2,		WMP_F_COPPER },
884 
885 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
886 	  "Intel PRO/1000 PT (82571EB)",
887 	  WM_T_82571,		WMP_F_COPPER },
888 
889 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
890 	  "Intel PRO/1000 PF (82571EB)",
891 	  WM_T_82571,		WMP_F_FIBER },
892 
893 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
894 	  "Intel PRO/1000 PB (82571EB)",
895 	  WM_T_82571,		WMP_F_SERDES },
896 
897 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
898 	  "Intel PRO/1000 QT (82571EB)",
899 	  WM_T_82571,		WMP_F_COPPER },
900 
901 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
902 	  "Intel PRO/1000 PT Quad Port Server Adapter",
903 	  WM_T_82571,		WMP_F_COPPER, },
904 
905 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
906 	  "Intel Gigabit PT Quad Port Server ExpressModule",
907 	  WM_T_82571,		WMP_F_COPPER, },
908 
909 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
910 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
911 	  WM_T_82571,		WMP_F_SERDES, },
912 
913 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
914 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
915 	  WM_T_82571,		WMP_F_SERDES, },
916 
917 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
918 	  "Intel 82571EB Quad 1000baseX Ethernet",
919 	  WM_T_82571,		WMP_F_FIBER, },
920 
921 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
922 	  "Intel i82572EI 1000baseT Ethernet",
923 	  WM_T_82572,		WMP_F_COPPER },
924 
925 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
926 	  "Intel i82572EI 1000baseX Ethernet",
927 	  WM_T_82572,		WMP_F_FIBER },
928 
929 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
930 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
931 	  WM_T_82572,		WMP_F_SERDES },
932 
933 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
934 	  "Intel i82572EI 1000baseT Ethernet",
935 	  WM_T_82572,		WMP_F_COPPER },
936 
937 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
938 	  "Intel i82573E",
939 	  WM_T_82573,		WMP_F_COPPER },
940 
941 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
942 	  "Intel i82573E IAMT",
943 	  WM_T_82573,		WMP_F_COPPER },
944 
945 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
946 	  "Intel i82573L Gigabit Ethernet",
947 	  WM_T_82573,		WMP_F_COPPER },
948 
949 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
950 	  "Intel i82574L",
951 	  WM_T_82574,		WMP_F_COPPER },
952 
953 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
954 	  "Intel i82574L",
955 	  WM_T_82574,		WMP_F_COPPER },
956 
957 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
958 	  "Intel i82583V",
959 	  WM_T_82583,		WMP_F_COPPER },
960 
961 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
962 	  "i80003 dual 1000baseT Ethernet",
963 	  WM_T_80003,		WMP_F_COPPER },
964 
965 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
966 	  "i80003 dual 1000baseX Ethernet",
967 	  WM_T_80003,		WMP_F_COPPER },
968 
969 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
970 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
971 	  WM_T_80003,		WMP_F_SERDES },
972 
973 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
974 	  "Intel i80003 1000baseT Ethernet",
975 	  WM_T_80003,		WMP_F_COPPER },
976 
977 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
978 	  "Intel i80003 Gigabit Ethernet (SERDES)",
979 	  WM_T_80003,		WMP_F_SERDES },
980 
981 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
982 	  "Intel i82801H (M_AMT) LAN Controller",
983 	  WM_T_ICH8,		WMP_F_COPPER },
984 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
985 	  "Intel i82801H (AMT) LAN Controller",
986 	  WM_T_ICH8,		WMP_F_COPPER },
987 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
988 	  "Intel i82801H LAN Controller",
989 	  WM_T_ICH8,		WMP_F_COPPER },
990 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
991 	  "Intel i82801H (IFE) LAN Controller",
992 	  WM_T_ICH8,		WMP_F_COPPER },
993 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
994 	  "Intel i82801H (M) LAN Controller",
995 	  WM_T_ICH8,		WMP_F_COPPER },
996 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
997 	  "Intel i82801H IFE (GT) LAN Controller",
998 	  WM_T_ICH8,		WMP_F_COPPER },
999 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
1000 	  "Intel i82801H IFE (G) LAN Controller",
1001 	  WM_T_ICH8,		WMP_F_COPPER },
1002 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
1003 	  "82801I (AMT) LAN Controller",
1004 	  WM_T_ICH9,		WMP_F_COPPER },
1005 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
1006 	  "82801I LAN Controller",
1007 	  WM_T_ICH9,		WMP_F_COPPER },
1008 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
1009 	  "82801I (G) LAN Controller",
1010 	  WM_T_ICH9,		WMP_F_COPPER },
1011 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
1012 	  "82801I (GT) LAN Controller",
1013 	  WM_T_ICH9,		WMP_F_COPPER },
1014 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
1015 	  "82801I (C) LAN Controller",
1016 	  WM_T_ICH9,		WMP_F_COPPER },
1017 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
1018 	  "82801I mobile LAN Controller",
1019 	  WM_T_ICH9,		WMP_F_COPPER },
1020 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IGP_M_V,
1021 	  "82801I mobile (V) LAN Controller",
1022 	  WM_T_ICH9,		WMP_F_COPPER },
1023 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
1024 	  "82801I mobile (AMT) LAN Controller",
1025 	  WM_T_ICH9,		WMP_F_COPPER },
1026 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
1027 	  "82567LM-4 LAN Controller",
1028 	  WM_T_ICH9,		WMP_F_COPPER },
1029 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_82567V_3,
1030 	  "82567V-3 LAN Controller",
1031 	  WM_T_ICH9,		WMP_F_COPPER },
1032 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
1033 	  "82567LM-2 LAN Controller",
1034 	  WM_T_ICH10,		WMP_F_COPPER },
1035 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
1036 	  "82567LF-2 LAN Controller",
1037 	  WM_T_ICH10,		WMP_F_COPPER },
1038 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
1039 	  "82567LM-3 LAN Controller",
1040 	  WM_T_ICH10,		WMP_F_COPPER },
1041 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
1042 	  "82567LF-3 LAN Controller",
1043 	  WM_T_ICH10,		WMP_F_COPPER },
1044 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
1045 	  "82567V-2 LAN Controller",
1046 	  WM_T_ICH10,		WMP_F_COPPER },
1047 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
1048 	  "82567V-3? LAN Controller",
1049 	  WM_T_ICH10,		WMP_F_COPPER },
1050 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
1051 	  "HANKSVILLE LAN Controller",
1052 	  WM_T_ICH10,		WMP_F_COPPER },
1053 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
1054 	  "PCH LAN (82577LM) Controller",
1055 	  WM_T_PCH,		WMP_F_COPPER },
1056 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
1057 	  "PCH LAN (82577LC) Controller",
1058 	  WM_T_PCH,		WMP_F_COPPER },
1059 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
1060 	  "PCH LAN (82578DM) Controller",
1061 	  WM_T_PCH,		WMP_F_COPPER },
1062 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
1063 	  "PCH LAN (82578DC) Controller",
1064 	  WM_T_PCH,		WMP_F_COPPER },
1065 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
1066 	  "PCH2 LAN (82579LM) Controller",
1067 	  WM_T_PCH2,		WMP_F_COPPER },
1068 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
1069 	  "PCH2 LAN (82579V) Controller",
1070 	  WM_T_PCH2,		WMP_F_COPPER },
1071 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
1072 	  "82575EB dual-1000baseT Ethernet",
1073 	  WM_T_82575,		WMP_F_COPPER },
1074 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
1075 	  "82575EB dual-1000baseX Ethernet (SERDES)",
1076 	  WM_T_82575,		WMP_F_SERDES },
1077 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
1078 	  "82575GB quad-1000baseT Ethernet",
1079 	  WM_T_82575,		WMP_F_COPPER },
1080 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
1081 	  "82575GB quad-1000baseT Ethernet (PM)",
1082 	  WM_T_82575,		WMP_F_COPPER },
1083 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
1084 	  "82576 1000BaseT Ethernet",
1085 	  WM_T_82576,		WMP_F_COPPER },
1086 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
1087 	  "82576 1000BaseX Ethernet",
1088 	  WM_T_82576,		WMP_F_FIBER },
1089 
1090 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
1091 	  "82576 gigabit Ethernet (SERDES)",
1092 	  WM_T_82576,		WMP_F_SERDES },
1093 
1094 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
1095 	  "82576 quad-1000BaseT Ethernet",
1096 	  WM_T_82576,		WMP_F_COPPER },
1097 
1098 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
1099 	  "82576 Gigabit ET2 Quad Port Server Adapter",
1100 	  WM_T_82576,		WMP_F_COPPER },
1101 
1102 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
1103 	  "82576 gigabit Ethernet",
1104 	  WM_T_82576,		WMP_F_COPPER },
1105 
1106 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
1107 	  "82576 gigabit Ethernet (SERDES)",
1108 	  WM_T_82576,		WMP_F_SERDES },
1109 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
1110 	  "82576 quad-gigabit Ethernet (SERDES)",
1111 	  WM_T_82576,		WMP_F_SERDES },
1112 
1113 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
1114 	  "82580 1000BaseT Ethernet",
1115 	  WM_T_82580,		WMP_F_COPPER },
1116 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
1117 	  "82580 1000BaseX Ethernet",
1118 	  WM_T_82580,		WMP_F_FIBER },
1119 
1120 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
1121 	  "82580 1000BaseT Ethernet (SERDES)",
1122 	  WM_T_82580,		WMP_F_SERDES },
1123 
1124 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
1125 	  "82580 gigabit Ethernet (SGMII)",
1126 	  WM_T_82580,		WMP_F_COPPER },
1127 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
1128 	  "82580 dual-1000BaseT Ethernet",
1129 	  WM_T_82580,		WMP_F_COPPER },
1130 
1131 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1132 	  "82580 quad-1000BaseX Ethernet",
1133 	  WM_T_82580,		WMP_F_FIBER },
1134 
1135 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
1136 	  "DH89XXCC Gigabit Ethernet (SGMII)",
1137 	  WM_T_82580,		WMP_F_COPPER },
1138 
1139 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
1140 	  "DH89XXCC Gigabit Ethernet (SERDES)",
1141 	  WM_T_82580,		WMP_F_SERDES },
1142 
1143 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
1144 	  "DH89XXCC 1000BASE-KX Ethernet",
1145 	  WM_T_82580,		WMP_F_SERDES },
1146 
1147 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
1148 	  "DH89XXCC Gigabit Ethernet (SFP)",
1149 	  WM_T_82580,		WMP_F_SERDES },
1150 
1151 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
1152 	  "I350 Gigabit Network Connection",
1153 	  WM_T_I350,		WMP_F_COPPER },
1154 
1155 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
1156 	  "I350 Gigabit Fiber Network Connection",
1157 	  WM_T_I350,		WMP_F_FIBER },
1158 
1159 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
1160 	  "I350 Gigabit Backplane Connection",
1161 	  WM_T_I350,		WMP_F_SERDES },
1162 
1163 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
1164 	  "I350 Quad Port Gigabit Ethernet",
1165 	  WM_T_I350,		WMP_F_SERDES },
1166 
1167 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
1168 	  "I350 Gigabit Connection",
1169 	  WM_T_I350,		WMP_F_COPPER },
1170 
1171 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
1172 	  "I354 Gigabit Ethernet (KX)",
1173 	  WM_T_I354,		WMP_F_SERDES },
1174 
1175 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
1176 	  "I354 Gigabit Ethernet (SGMII)",
1177 	  WM_T_I354,		WMP_F_COPPER },
1178 
1179 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
1180 	  "I354 Gigabit Ethernet (2.5G)",
1181 	  WM_T_I354,		WMP_F_COPPER },
1182 
1183 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
1184 	  "I210-T1 Ethernet Server Adapter",
1185 	  WM_T_I210,		WMP_F_COPPER },
1186 
1187 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
1188 	  "I210 Ethernet (Copper OEM)",
1189 	  WM_T_I210,		WMP_F_COPPER },
1190 
1191 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
1192 	  "I210 Ethernet (Copper IT)",
1193 	  WM_T_I210,		WMP_F_COPPER },
1194 
1195 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
1196 	  "I210 Ethernet (FLASH less)",
1197 	  WM_T_I210,		WMP_F_COPPER },
1198 
1199 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
1200 	  "I210 Gigabit Ethernet (Fiber)",
1201 	  WM_T_I210,		WMP_F_FIBER },
1202 
1203 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
1204 	  "I210 Gigabit Ethernet (SERDES)",
1205 	  WM_T_I210,		WMP_F_SERDES },
1206 
1207 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
1208 	  "I210 Gigabit Ethernet (FLASH less)",
1209 	  WM_T_I210,		WMP_F_SERDES },
1210 
1211 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
1212 	  "I210 Gigabit Ethernet (SGMII)",
1213 	  WM_T_I210,		WMP_F_COPPER },
1214 
1215 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
1216 	  "I211 Ethernet (COPPER)",
1217 	  WM_T_I211,		WMP_F_COPPER },
1218 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
1219 	  "I217 V Ethernet Connection",
1220 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1221 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
1222 	  "I217 LM Ethernet Connection",
1223 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1224 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
1225 	  "I218 V Ethernet Connection",
1226 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1227 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
1228 	  "I218 V Ethernet Connection",
1229 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1230 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
1231 	  "I218 V Ethernet Connection",
1232 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1233 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
1234 	  "I218 LM Ethernet Connection",
1235 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1236 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
1237 	  "I218 LM Ethernet Connection",
1238 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1239 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
1240 	  "I218 LM Ethernet Connection",
1241 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1242 	{ 0,			0,
1243 	  NULL,
1244 	  0,			0 },
1245 };
1246 
1247 #ifdef WM_EVENT_COUNTERS
1248 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
1249 #endif /* WM_EVENT_COUNTERS */
1250 
1251 
1252 /*
1253  * Register read/write functions.
1254  * Other than CSR_{READ|WRITE}().
1255  */
1256 
1257 #if 0 /* Not currently used */
1258 static inline uint32_t
1259 wm_io_read(struct wm_softc *sc, int reg)
1260 {
1261 
1262 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1263 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1264 }
1265 #endif
1266 
1267 static inline void
1268 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1269 {
1270 
1271 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1272 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1273 }
1274 
1275 static inline void
1276 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1277     uint32_t data)
1278 {
1279 	uint32_t regval;
1280 	int i;
1281 
1282 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1283 
1284 	CSR_WRITE(sc, reg, regval);
1285 
1286 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1287 		delay(5);
1288 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1289 			break;
1290 	}
1291 	if (i == SCTL_CTL_POLL_TIMEOUT) {
1292 		aprint_error("%s: WARNING:"
1293 		    " i82575 reg 0x%08x setup did not indicate ready\n",
1294 		    device_xname(sc->sc_dev), reg);
1295 	}
1296 }
1297 
1298 static inline void
1299 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1300 {
1301 	wa->wa_low = htole32(v & 0xffffffffU);
1302 	if (sizeof(bus_addr_t) == 8)
1303 		wa->wa_high = htole32((uint64_t) v >> 32);
1304 	else
1305 		wa->wa_high = 0;
1306 }
1307 
1308 /*
1309  * Device driver interface functions and commonly used functions.
1310  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
1311  */
1312 
1313 /* Lookup supported device table */
1314 static const struct wm_product *
1315 wm_lookup(const struct pci_attach_args *pa)
1316 {
1317 	const struct wm_product *wmp;
1318 
1319 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1320 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1321 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1322 			return wmp;
1323 	}
1324 	return NULL;
1325 }
1326 
1327 /* The match function (ca_match) */
1328 static int
1329 wm_match(device_t parent, cfdata_t cf, void *aux)
1330 {
1331 	struct pci_attach_args *pa = aux;
1332 
1333 	if (wm_lookup(pa) != NULL)
1334 		return 1;
1335 
1336 	return 0;
1337 }
1338 
1339 /* The attach function (ca_attach) */
1340 static void
1341 wm_attach(device_t parent, device_t self, void *aux)
1342 {
1343 	struct wm_softc *sc = device_private(self);
1344 	struct pci_attach_args *pa = aux;
1345 	prop_dictionary_t dict;
1346 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1347 	pci_chipset_tag_t pc = pa->pa_pc;
1348 	pci_intr_handle_t ih;
1349 	const char *intrstr = NULL;
1350 	const char *eetype, *xname;
1351 	bus_space_tag_t memt;
1352 	bus_space_handle_t memh;
1353 	bus_size_t memsize;
1354 	int memh_valid;
1355 	int i, error;
1356 	const struct wm_product *wmp;
1357 	prop_data_t ea;
1358 	prop_number_t pn;
1359 	uint8_t enaddr[ETHER_ADDR_LEN];
1360 	uint16_t cfg1, cfg2, swdpin, io3;
1361 	pcireg_t preg, memtype;
1362 	uint16_t eeprom_data, apme_mask;
1363 	bool force_clear_smbi;
1364 	uint32_t link_mode;
1365 	uint32_t reg;
1366 	char intrbuf[PCI_INTRSTR_LEN];
1367 
1368 	sc->sc_dev = self;
1369 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
1370 	sc->sc_stopping = false;
1371 
1372 	wmp = wm_lookup(pa);
1373 #ifdef DIAGNOSTIC
1374 	if (wmp == NULL) {
1375 		printf("\n");
1376 		panic("wm_attach: impossible");
1377 	}
1378 #endif
1379 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
1380 
1381 	sc->sc_pc = pa->pa_pc;
1382 	sc->sc_pcitag = pa->pa_tag;
1383 
1384 	if (pci_dma64_available(pa))
1385 		sc->sc_dmat = pa->pa_dmat64;
1386 	else
1387 		sc->sc_dmat = pa->pa_dmat;
1388 
1389 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
1390 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
1391 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
1392 
1393 	sc->sc_type = wmp->wmp_type;
1394 	if (sc->sc_type < WM_T_82543) {
1395 		if (sc->sc_rev < 2) {
1396 			aprint_error_dev(sc->sc_dev,
1397 			    "i82542 must be at least rev. 2\n");
1398 			return;
1399 		}
1400 		if (sc->sc_rev < 3)
1401 			sc->sc_type = WM_T_82542_2_0;
1402 	}
1403 
1404 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1405 	    || (sc->sc_type == WM_T_82580)
1406 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
1407 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
1408 		sc->sc_flags |= WM_F_NEWQUEUE;
1409 
1410 	/* Set device properties (mactype) */
1411 	dict = device_properties(sc->sc_dev);
1412 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1413 
1414 	/*
1415 	 * Map the device.  All devices support memory-mapped acccess,
1416 	 * and it is really required for normal operation.
1417 	 */
1418 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1419 	switch (memtype) {
1420 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1421 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1422 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1423 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1424 		break;
1425 	default:
1426 		memh_valid = 0;
1427 		break;
1428 	}
1429 
1430 	if (memh_valid) {
1431 		sc->sc_st = memt;
1432 		sc->sc_sh = memh;
1433 		sc->sc_ss = memsize;
1434 	} else {
1435 		aprint_error_dev(sc->sc_dev,
1436 		    "unable to map device registers\n");
1437 		return;
1438 	}
1439 
1440 	/*
1441 	 * In addition, i82544 and later support I/O mapped indirect
1442 	 * register access.  It is not desirable (nor supported in
1443 	 * this driver) to use it for normal operation, though it is
1444 	 * required to work around bugs in some chip versions.
1445 	 */
1446 	if (sc->sc_type >= WM_T_82544) {
1447 		/* First we have to find the I/O BAR. */
1448 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1449 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
1450 			if (memtype == PCI_MAPREG_TYPE_IO)
1451 				break;
1452 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
1453 			    PCI_MAPREG_MEM_TYPE_64BIT)
1454 				i += 4;	/* skip high bits, too */
1455 		}
1456 		if (i < PCI_MAPREG_END) {
1457 			/*
1458 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
1459 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
1460 			 * It's no problem because newer chips has no this
1461 			 * bug.
1462 			 *
1463 			 * The i8254x doesn't apparently respond when the
1464 			 * I/O BAR is 0, which looks somewhat like it's not
1465 			 * been configured.
1466 			 */
1467 			preg = pci_conf_read(pc, pa->pa_tag, i);
1468 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1469 				aprint_error_dev(sc->sc_dev,
1470 				    "WARNING: I/O BAR at zero.\n");
1471 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1472 					0, &sc->sc_iot, &sc->sc_ioh,
1473 					NULL, &sc->sc_ios) == 0) {
1474 				sc->sc_flags |= WM_F_IOH_VALID;
1475 			} else {
1476 				aprint_error_dev(sc->sc_dev,
1477 				    "WARNING: unable to map I/O space\n");
1478 			}
1479 		}
1480 
1481 	}
1482 
1483 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
1484 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1485 	preg |= PCI_COMMAND_MASTER_ENABLE;
1486 	if (sc->sc_type < WM_T_82542_2_1)
1487 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1488 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1489 
1490 	/* power up chip */
1491 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1492 	    NULL)) && error != EOPNOTSUPP) {
1493 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1494 		return;
1495 	}
1496 
1497 	/*
1498 	 * Map and establish our interrupt.
1499 	 */
1500 	if (pci_intr_map(pa, &ih)) {
1501 		aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
1502 		return;
1503 	}
1504 	intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
1505 #ifdef WM_MPSAFE
1506 	pci_intr_setattr(pc, &ih, PCI_INTR_MPSAFE, true);
1507 #endif
1508 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
1509 	if (sc->sc_ih == NULL) {
1510 		aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
1511 		if (intrstr != NULL)
1512 			aprint_error(" at %s", intrstr);
1513 		aprint_error("\n");
1514 		return;
1515 	}
1516 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
1517 
1518 	/*
1519 	 * Check the function ID (unit number of the chip).
1520 	 */
1521 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
1522 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
1523 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1524 	    || (sc->sc_type == WM_T_82580)
1525 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
1526 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
1527 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
1528 	else
1529 		sc->sc_funcid = 0;
1530 
1531 	/*
1532 	 * Determine a few things about the bus we're connected to.
1533 	 */
1534 	if (sc->sc_type < WM_T_82543) {
1535 		/* We don't really know the bus characteristics here. */
1536 		sc->sc_bus_speed = 33;
1537 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1538 		/*
1539 		 * CSA (Communication Streaming Architecture) is about as fast
1540 		 * a 32-bit 66MHz PCI Bus.
1541 		 */
1542 		sc->sc_flags |= WM_F_CSA;
1543 		sc->sc_bus_speed = 66;
1544 		aprint_verbose_dev(sc->sc_dev,
1545 		    "Communication Streaming Architecture\n");
1546 		if (sc->sc_type == WM_T_82547) {
1547 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
1548 			callout_setfunc(&sc->sc_txfifo_ch,
1549 					wm_82547_txfifo_stall, sc);
1550 			aprint_verbose_dev(sc->sc_dev,
1551 			    "using 82547 Tx FIFO stall work-around\n");
1552 		}
1553 	} else if (sc->sc_type >= WM_T_82571) {
1554 		sc->sc_flags |= WM_F_PCIE;
1555 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1556 		    && (sc->sc_type != WM_T_ICH10)
1557 		    && (sc->sc_type != WM_T_PCH)
1558 		    && (sc->sc_type != WM_T_PCH2)
1559 		    && (sc->sc_type != WM_T_PCH_LPT)) {
1560 			/* ICH* and PCH* have no PCIe capability registers */
1561 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1562 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
1563 				NULL) == 0)
1564 				aprint_error_dev(sc->sc_dev,
1565 				    "unable to find PCIe capability\n");
1566 		}
1567 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1568 	} else {
1569 		reg = CSR_READ(sc, WMREG_STATUS);
1570 		if (reg & STATUS_BUS64)
1571 			sc->sc_flags |= WM_F_BUS64;
1572 		if ((reg & STATUS_PCIX_MODE) != 0) {
1573 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1574 
1575 			sc->sc_flags |= WM_F_PCIX;
1576 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1577 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
1578 				aprint_error_dev(sc->sc_dev,
1579 				    "unable to find PCIX capability\n");
1580 			else if (sc->sc_type != WM_T_82545_3 &&
1581 				 sc->sc_type != WM_T_82546_3) {
1582 				/*
1583 				 * Work around a problem caused by the BIOS
1584 				 * setting the max memory read byte count
1585 				 * incorrectly.
1586 				 */
1587 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1588 				    sc->sc_pcixe_capoff + PCIX_CMD);
1589 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1590 				    sc->sc_pcixe_capoff + PCIX_STATUS);
1591 
1592 				bytecnt =
1593 				    (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
1594 				    PCIX_CMD_BYTECNT_SHIFT;
1595 				maxb =
1596 				    (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
1597 				    PCIX_STATUS_MAXB_SHIFT;
1598 				if (bytecnt > maxb) {
1599 					aprint_verbose_dev(sc->sc_dev,
1600 					    "resetting PCI-X MMRBC: %d -> %d\n",
1601 					    512 << bytecnt, 512 << maxb);
1602 					pcix_cmd = (pcix_cmd &
1603 					    ~PCIX_CMD_BYTECNT_MASK) |
1604 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
1605 					pci_conf_write(pa->pa_pc, pa->pa_tag,
1606 					    sc->sc_pcixe_capoff + PCIX_CMD,
1607 					    pcix_cmd);
1608 				}
1609 			}
1610 		}
1611 		/*
1612 		 * The quad port adapter is special; it has a PCIX-PCIX
1613 		 * bridge on the board, and can run the secondary bus at
1614 		 * a higher speed.
1615 		 */
1616 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1617 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1618 								      : 66;
1619 		} else if (sc->sc_flags & WM_F_PCIX) {
1620 			switch (reg & STATUS_PCIXSPD_MASK) {
1621 			case STATUS_PCIXSPD_50_66:
1622 				sc->sc_bus_speed = 66;
1623 				break;
1624 			case STATUS_PCIXSPD_66_100:
1625 				sc->sc_bus_speed = 100;
1626 				break;
1627 			case STATUS_PCIXSPD_100_133:
1628 				sc->sc_bus_speed = 133;
1629 				break;
1630 			default:
1631 				aprint_error_dev(sc->sc_dev,
1632 				    "unknown PCIXSPD %d; assuming 66MHz\n",
1633 				    reg & STATUS_PCIXSPD_MASK);
1634 				sc->sc_bus_speed = 66;
1635 				break;
1636 			}
1637 		} else
1638 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1639 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1640 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1641 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1642 	}
1643 
1644 	/*
1645 	 * Allocate the control data structures, and create and load the
1646 	 * DMA map for it.
1647 	 *
1648 	 * NOTE: All Tx descriptors must be in the same 4G segment of
1649 	 * memory.  So must Rx descriptors.  We simplify by allocating
1650 	 * both sets within the same 4G segment.
1651 	 */
1652 	WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
1653 	    WM_NTXDESC_82542 : WM_NTXDESC_82544;
1654 	sc->sc_cd_size = sc->sc_type < WM_T_82544 ?
1655 	    sizeof(struct wm_control_data_82542) :
1656 	    sizeof(struct wm_control_data_82544);
1657 	if ((error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_cd_size, PAGE_SIZE,
1658 		    (bus_size_t) 0x100000000ULL, &sc->sc_cd_seg, 1,
1659 		    &sc->sc_cd_rseg, 0)) != 0) {
1660 		aprint_error_dev(sc->sc_dev,
1661 		    "unable to allocate control data, error = %d\n",
1662 		    error);
1663 		goto fail_0;
1664 	}
1665 
1666 	if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cd_seg,
1667 		    sc->sc_cd_rseg, sc->sc_cd_size,
1668 		    (void **)&sc->sc_control_data, BUS_DMA_COHERENT)) != 0) {
1669 		aprint_error_dev(sc->sc_dev,
1670 		    "unable to map control data, error = %d\n", error);
1671 		goto fail_1;
1672 	}
1673 
1674 	if ((error = bus_dmamap_create(sc->sc_dmat, sc->sc_cd_size, 1,
1675 		    sc->sc_cd_size, 0, 0, &sc->sc_cddmamap)) != 0) {
1676 		aprint_error_dev(sc->sc_dev,
1677 		    "unable to create control data DMA map, error = %d\n",
1678 		    error);
1679 		goto fail_2;
1680 	}
1681 
1682 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
1683 		    sc->sc_control_data, sc->sc_cd_size, NULL, 0)) != 0) {
1684 		aprint_error_dev(sc->sc_dev,
1685 		    "unable to load control data DMA map, error = %d\n",
1686 		    error);
1687 		goto fail_3;
1688 	}
1689 
1690 	/* Create the transmit buffer DMA maps. */
1691 	WM_TXQUEUELEN(sc) =
1692 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
1693 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
1694 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1695 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
1696 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
1697 			    &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1698 			aprint_error_dev(sc->sc_dev,
1699 			    "unable to create Tx DMA map %d, error = %d\n",
1700 			    i, error);
1701 			goto fail_4;
1702 		}
1703 	}
1704 
1705 	/* Create the receive buffer DMA maps. */
1706 	for (i = 0; i < WM_NRXDESC; i++) {
1707 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1708 			    MCLBYTES, 0, 0,
1709 			    &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1710 			aprint_error_dev(sc->sc_dev,
1711 			    "unable to create Rx DMA map %d error = %d\n",
1712 			    i, error);
1713 			goto fail_5;
1714 		}
1715 		sc->sc_rxsoft[i].rxs_mbuf = NULL;
1716 	}
1717 
1718 	/* clear interesting stat counters */
1719 	CSR_READ(sc, WMREG_COLC);
1720 	CSR_READ(sc, WMREG_RXERRC);
1721 
1722 	/* get PHY control from SMBus to PCIe */
1723 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
1724 	    || (sc->sc_type == WM_T_PCH_LPT))
1725 		wm_smbustopci(sc);
1726 
1727 	/* Reset the chip to a known state. */
1728 	wm_reset(sc);
1729 
1730 	/* Get some information about the EEPROM. */
1731 	switch (sc->sc_type) {
1732 	case WM_T_82542_2_0:
1733 	case WM_T_82542_2_1:
1734 	case WM_T_82543:
1735 	case WM_T_82544:
1736 		/* Microwire */
1737 		sc->sc_nvm_wordsize = 64;
1738 		sc->sc_nvm_addrbits = 6;
1739 		break;
1740 	case WM_T_82540:
1741 	case WM_T_82545:
1742 	case WM_T_82545_3:
1743 	case WM_T_82546:
1744 	case WM_T_82546_3:
1745 		/* Microwire */
1746 		reg = CSR_READ(sc, WMREG_EECD);
1747 		if (reg & EECD_EE_SIZE) {
1748 			sc->sc_nvm_wordsize = 256;
1749 			sc->sc_nvm_addrbits = 8;
1750 		} else {
1751 			sc->sc_nvm_wordsize = 64;
1752 			sc->sc_nvm_addrbits = 6;
1753 		}
1754 		sc->sc_flags |= WM_F_LOCK_EECD;
1755 		break;
1756 	case WM_T_82541:
1757 	case WM_T_82541_2:
1758 	case WM_T_82547:
1759 	case WM_T_82547_2:
1760 		sc->sc_flags |= WM_F_LOCK_EECD;
1761 		reg = CSR_READ(sc, WMREG_EECD);
1762 		if (reg & EECD_EE_TYPE) {
1763 			/* SPI */
1764 			sc->sc_flags |= WM_F_EEPROM_SPI;
1765 			wm_nvm_set_addrbits_size_eecd(sc);
1766 		} else {
1767 			/* Microwire */
1768 			if ((reg & EECD_EE_ABITS) != 0) {
1769 				sc->sc_nvm_wordsize = 256;
1770 				sc->sc_nvm_addrbits = 8;
1771 			} else {
1772 				sc->sc_nvm_wordsize = 64;
1773 				sc->sc_nvm_addrbits = 6;
1774 			}
1775 		}
1776 		break;
1777 	case WM_T_82571:
1778 	case WM_T_82572:
1779 		/* SPI */
1780 		sc->sc_flags |= WM_F_EEPROM_SPI;
1781 		wm_nvm_set_addrbits_size_eecd(sc);
1782 		sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
1783 		break;
1784 	case WM_T_82573:
1785 		sc->sc_flags |= WM_F_LOCK_SWSM;
1786 		/* FALLTHROUGH */
1787 	case WM_T_82574:
1788 	case WM_T_82583:
1789 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
1790 			sc->sc_flags |= WM_F_EEPROM_FLASH;
1791 			sc->sc_nvm_wordsize = 2048;
1792 		} else {
1793 			/* SPI */
1794 			sc->sc_flags |= WM_F_EEPROM_SPI;
1795 			wm_nvm_set_addrbits_size_eecd(sc);
1796 		}
1797 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1798 		break;
1799 	case WM_T_82575:
1800 	case WM_T_82576:
1801 	case WM_T_82580:
1802 	case WM_T_I350:
1803 	case WM_T_I354:
1804 	case WM_T_80003:
1805 		/* SPI */
1806 		sc->sc_flags |= WM_F_EEPROM_SPI;
1807 		wm_nvm_set_addrbits_size_eecd(sc);
1808 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
1809 		    | WM_F_LOCK_SWSM;
1810 		break;
1811 	case WM_T_ICH8:
1812 	case WM_T_ICH9:
1813 	case WM_T_ICH10:
1814 	case WM_T_PCH:
1815 	case WM_T_PCH2:
1816 	case WM_T_PCH_LPT:
1817 		/* FLASH */
1818 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
1819 		sc->sc_nvm_wordsize = 2048;
1820 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
1821 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
1822 		    &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) {
1823 			aprint_error_dev(sc->sc_dev,
1824 			    "can't map FLASH registers\n");
1825 			goto fail_5;
1826 		}
1827 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
1828 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
1829 						ICH_FLASH_SECTOR_SIZE;
1830 		sc->sc_ich8_flash_bank_size =
1831 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
1832 		sc->sc_ich8_flash_bank_size -=
1833 		    (reg & ICH_GFPREG_BASE_MASK);
1834 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
1835 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
1836 		break;
1837 	case WM_T_I210:
1838 	case WM_T_I211:
1839 		wm_nvm_set_addrbits_size_eecd(sc);
1840 		sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
1841 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW;
1842 		break;
1843 	default:
1844 		break;
1845 	}
1846 
1847 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
1848 	switch (sc->sc_type) {
1849 	case WM_T_82571:
1850 	case WM_T_82572:
1851 		reg = CSR_READ(sc, WMREG_SWSM2);
1852 		if ((reg & SWSM2_LOCK) == 0) {
1853 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
1854 			force_clear_smbi = true;
1855 		} else
1856 			force_clear_smbi = false;
1857 		break;
1858 	case WM_T_82573:
1859 	case WM_T_82574:
1860 	case WM_T_82583:
1861 		force_clear_smbi = true;
1862 		break;
1863 	default:
1864 		force_clear_smbi = false;
1865 		break;
1866 	}
1867 	if (force_clear_smbi) {
1868 		reg = CSR_READ(sc, WMREG_SWSM);
1869 		if ((reg & SWSM_SMBI) != 0)
1870 			aprint_error_dev(sc->sc_dev,
1871 			    "Please update the Bootagent\n");
1872 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
1873 	}
1874 
1875 	/*
1876 	 * Defer printing the EEPROM type until after verifying the checksum
1877 	 * This allows the EEPROM type to be printed correctly in the case
1878 	 * that no EEPROM is attached.
1879 	 */
1880 	/*
1881 	 * Validate the EEPROM checksum. If the checksum fails, flag
1882 	 * this for later, so we can fail future reads from the EEPROM.
1883 	 */
1884 	if (wm_nvm_validate_checksum(sc)) {
1885 		/*
1886 		 * Read twice again because some PCI-e parts fail the
1887 		 * first check due to the link being in sleep state.
1888 		 */
1889 		if (wm_nvm_validate_checksum(sc))
1890 			sc->sc_flags |= WM_F_EEPROM_INVALID;
1891 	}
1892 
1893 	/* Set device properties (macflags) */
1894 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
1895 
1896 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
1897 		aprint_verbose_dev(sc->sc_dev, "No EEPROM\n");
1898 	else {
1899 		aprint_verbose_dev(sc->sc_dev, "%u words ",
1900 		    sc->sc_nvm_wordsize);
1901 		if (sc->sc_flags & WM_F_EEPROM_FLASH_HW) {
1902 			aprint_verbose("FLASH(HW)\n");
1903 		} else if (sc->sc_flags & WM_F_EEPROM_FLASH) {
1904 			aprint_verbose("FLASH\n");
1905 		} else {
1906 			if (sc->sc_flags & WM_F_EEPROM_SPI)
1907 				eetype = "SPI";
1908 			else
1909 				eetype = "MicroWire";
1910 			aprint_verbose("(%d address bits) %s EEPROM\n",
1911 			    sc->sc_nvm_addrbits, eetype);
1912 		}
1913 	}
1914 
1915 	switch (sc->sc_type) {
1916 	case WM_T_82571:
1917 	case WM_T_82572:
1918 	case WM_T_82573:
1919 	case WM_T_82574:
1920 	case WM_T_82583:
1921 	case WM_T_80003:
1922 	case WM_T_ICH8:
1923 	case WM_T_ICH9:
1924 	case WM_T_ICH10:
1925 	case WM_T_PCH:
1926 	case WM_T_PCH2:
1927 	case WM_T_PCH_LPT:
1928 		if (wm_check_mng_mode(sc) != 0)
1929 			wm_get_hw_control(sc);
1930 		break;
1931 	default:
1932 		break;
1933 	}
1934 	wm_get_wakeup(sc);
1935 	/*
1936 	 * Read the Ethernet address from the EEPROM, if not first found
1937 	 * in device properties.
1938 	 */
1939 	ea = prop_dictionary_get(dict, "mac-address");
1940 	if (ea != NULL) {
1941 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
1942 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
1943 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
1944 	} else {
1945 		if (wm_read_mac_addr(sc, enaddr) != 0) {
1946 			aprint_error_dev(sc->sc_dev,
1947 			    "unable to read Ethernet address\n");
1948 			goto fail_5;
1949 		}
1950 	}
1951 
1952 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
1953 	    ether_sprintf(enaddr));
1954 
1955 	/*
1956 	 * Read the config info from the EEPROM, and set up various
1957 	 * bits in the control registers based on their contents.
1958 	 */
1959 	pn = prop_dictionary_get(dict, "i82543-cfg1");
1960 	if (pn != NULL) {
1961 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1962 		cfg1 = (uint16_t) prop_number_integer_value(pn);
1963 	} else {
1964 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
1965 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
1966 			goto fail_5;
1967 		}
1968 	}
1969 
1970 	pn = prop_dictionary_get(dict, "i82543-cfg2");
1971 	if (pn != NULL) {
1972 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1973 		cfg2 = (uint16_t) prop_number_integer_value(pn);
1974 	} else {
1975 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
1976 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
1977 			goto fail_5;
1978 		}
1979 	}
1980 
1981 	/* check for WM_F_WOL */
1982 	switch (sc->sc_type) {
1983 	case WM_T_82542_2_0:
1984 	case WM_T_82542_2_1:
1985 	case WM_T_82543:
1986 		/* dummy? */
1987 		eeprom_data = 0;
1988 		apme_mask = NVM_CFG3_APME;
1989 		break;
1990 	case WM_T_82544:
1991 		apme_mask = NVM_CFG2_82544_APM_EN;
1992 		eeprom_data = cfg2;
1993 		break;
1994 	case WM_T_82546:
1995 	case WM_T_82546_3:
1996 	case WM_T_82571:
1997 	case WM_T_82572:
1998 	case WM_T_82573:
1999 	case WM_T_82574:
2000 	case WM_T_82583:
2001 	case WM_T_80003:
2002 	default:
2003 		apme_mask = NVM_CFG3_APME;
2004 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
2005 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
2006 		break;
2007 	case WM_T_82575:
2008 	case WM_T_82576:
2009 	case WM_T_82580:
2010 	case WM_T_I350:
2011 	case WM_T_I354: /* XXX ok? */
2012 	case WM_T_ICH8:
2013 	case WM_T_ICH9:
2014 	case WM_T_ICH10:
2015 	case WM_T_PCH:
2016 	case WM_T_PCH2:
2017 	case WM_T_PCH_LPT:
2018 		/* XXX The funcid should be checked on some devices */
2019 		apme_mask = WUC_APME;
2020 		eeprom_data = CSR_READ(sc, WMREG_WUC);
2021 		break;
2022 	}
2023 
2024 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
2025 	if ((eeprom_data & apme_mask) != 0)
2026 		sc->sc_flags |= WM_F_WOL;
2027 #ifdef WM_DEBUG
2028 	if ((sc->sc_flags & WM_F_WOL) != 0)
2029 		printf("WOL\n");
2030 #endif
2031 
2032 	/*
2033 	 * XXX need special handling for some multiple port cards
2034 	 * to disable a paticular port.
2035 	 */
2036 
2037 	if (sc->sc_type >= WM_T_82544) {
2038 		pn = prop_dictionary_get(dict, "i82543-swdpin");
2039 		if (pn != NULL) {
2040 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2041 			swdpin = (uint16_t) prop_number_integer_value(pn);
2042 		} else {
2043 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
2044 				aprint_error_dev(sc->sc_dev,
2045 				    "unable to read SWDPIN\n");
2046 				goto fail_5;
2047 			}
2048 		}
2049 	}
2050 
2051 	if (cfg1 & NVM_CFG1_ILOS)
2052 		sc->sc_ctrl |= CTRL_ILOS;
2053 	if (sc->sc_type >= WM_T_82544) {
2054 		sc->sc_ctrl |=
2055 		    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
2056 		    CTRL_SWDPIO_SHIFT;
2057 		sc->sc_ctrl |=
2058 		    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
2059 		    CTRL_SWDPINS_SHIFT;
2060 	} else {
2061 		sc->sc_ctrl |=
2062 		    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
2063 		    CTRL_SWDPIO_SHIFT;
2064 	}
2065 
2066 #if 0
2067 	if (sc->sc_type >= WM_T_82544) {
2068 		if (cfg1 & NVM_CFG1_IPS0)
2069 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
2070 		if (cfg1 & NVM_CFG1_IPS1)
2071 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
2072 		sc->sc_ctrl_ext |=
2073 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
2074 		    CTRL_EXT_SWDPIO_SHIFT;
2075 		sc->sc_ctrl_ext |=
2076 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
2077 		    CTRL_EXT_SWDPINS_SHIFT;
2078 	} else {
2079 		sc->sc_ctrl_ext |=
2080 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
2081 		    CTRL_EXT_SWDPIO_SHIFT;
2082 	}
2083 #endif
2084 
2085 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2086 #if 0
2087 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2088 #endif
2089 
2090 	/*
2091 	 * Set up some register offsets that are different between
2092 	 * the i82542 and the i82543 and later chips.
2093 	 */
2094 	if (sc->sc_type < WM_T_82543) {
2095 		sc->sc_rdt_reg = WMREG_OLD_RDT0;
2096 		sc->sc_tdt_reg = WMREG_OLD_TDT;
2097 	} else {
2098 		sc->sc_rdt_reg = WMREG_RDT;
2099 		sc->sc_tdt_reg = WMREG_TDT;
2100 	}
2101 
2102 	if (sc->sc_type == WM_T_PCH) {
2103 		uint16_t val;
2104 
2105 		/* Save the NVM K1 bit setting */
2106 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
2107 
2108 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
2109 			sc->sc_nvm_k1_enabled = 1;
2110 		else
2111 			sc->sc_nvm_k1_enabled = 0;
2112 	}
2113 
2114 	/*
2115 	 * Determine if we're TBI,GMII or SGMII mode, and initialize the
2116 	 * media structures accordingly.
2117 	 */
2118 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
2119 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
2120 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
2121 	    || sc->sc_type == WM_T_82573
2122 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
2123 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
2124 		wm_gmii_mediainit(sc, wmp->wmp_product);
2125 	} else if (sc->sc_type < WM_T_82543 ||
2126 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
2127 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
2128 			aprint_error_dev(sc->sc_dev,
2129 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
2130 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
2131 		}
2132 		wm_tbi_mediainit(sc);
2133 	} else {
2134 		switch (sc->sc_type) {
2135 		case WM_T_82575:
2136 		case WM_T_82576:
2137 		case WM_T_82580:
2138 		case WM_T_I350:
2139 		case WM_T_I354:
2140 		case WM_T_I210:
2141 		case WM_T_I211:
2142 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
2143 			link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
2144 			switch (link_mode) {
2145 			case CTRL_EXT_LINK_MODE_1000KX:
2146 				aprint_verbose_dev(sc->sc_dev, "1000KX\n");
2147 				sc->sc_mediatype = WM_MEDIATYPE_SERDES;
2148 				break;
2149 			case CTRL_EXT_LINK_MODE_SGMII:
2150 				if (wm_sgmii_uses_mdio(sc)) {
2151 					aprint_verbose_dev(sc->sc_dev,
2152 					    "SGMII(MDIO)\n");
2153 					sc->sc_flags |= WM_F_SGMII;
2154 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2155 					break;
2156 				}
2157 				aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
2158 				/*FALLTHROUGH*/
2159 			case CTRL_EXT_LINK_MODE_PCIE_SERDES:
2160 				sc->sc_mediatype = wm_sfp_get_media_type(sc);
2161 				if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
2162 					if (link_mode
2163 					    == CTRL_EXT_LINK_MODE_SGMII) {
2164 						sc->sc_mediatype
2165 						    = WM_MEDIATYPE_COPPER;
2166 						sc->sc_flags |= WM_F_SGMII;
2167 					} else {
2168 						sc->sc_mediatype
2169 						    = WM_MEDIATYPE_SERDES;
2170 						aprint_verbose_dev(sc->sc_dev,
2171 						    "SERDES\n");
2172 					}
2173 					break;
2174 				}
2175 				if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
2176 					aprint_verbose_dev(sc->sc_dev,
2177 					    "SERDES\n");
2178 
2179 				/* Change current link mode setting */
2180 				reg &= ~CTRL_EXT_LINK_MODE_MASK;
2181 				switch (sc->sc_mediatype) {
2182 				case WM_MEDIATYPE_COPPER:
2183 					reg |= CTRL_EXT_LINK_MODE_SGMII;
2184 					break;
2185 				case WM_MEDIATYPE_SERDES:
2186 					reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
2187 					break;
2188 				default:
2189 					break;
2190 				}
2191 				CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2192 				break;
2193 			case CTRL_EXT_LINK_MODE_GMII:
2194 			default:
2195 				aprint_verbose_dev(sc->sc_dev, "Copper\n");
2196 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2197 				break;
2198 			}
2199 
2200 			reg &= ~CTRL_EXT_I2C_ENA;
2201 			if ((sc->sc_flags & WM_F_SGMII) != 0)
2202 				reg |= CTRL_EXT_I2C_ENA;
2203 			else
2204 				reg &= ~CTRL_EXT_I2C_ENA;
2205 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2206 
2207 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
2208 				wm_gmii_mediainit(sc, wmp->wmp_product);
2209 			else
2210 				wm_tbi_mediainit(sc);
2211 			break;
2212 		default:
2213 			if (sc->sc_mediatype == WM_MEDIATYPE_FIBER)
2214 				aprint_error_dev(sc->sc_dev,
2215 				    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
2216 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2217 			wm_gmii_mediainit(sc, wmp->wmp_product);
2218 		}
2219 	}
2220 
2221 	ifp = &sc->sc_ethercom.ec_if;
2222 	xname = device_xname(sc->sc_dev);
2223 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
2224 	ifp->if_softc = sc;
2225 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2226 	ifp->if_ioctl = wm_ioctl;
2227 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
2228 		ifp->if_start = wm_nq_start;
2229 	else
2230 		ifp->if_start = wm_start;
2231 	ifp->if_watchdog = wm_watchdog;
2232 	ifp->if_init = wm_init;
2233 	ifp->if_stop = wm_stop;
2234 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
2235 	IFQ_SET_READY(&ifp->if_snd);
2236 
2237 	/* Check for jumbo frame */
2238 	switch (sc->sc_type) {
2239 	case WM_T_82573:
2240 		/* XXX limited to 9234 if ASPM is disabled */
2241 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &io3);
2242 		if ((io3 & NVM_3GIO_3_ASPM_MASK) != 0)
2243 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2244 		break;
2245 	case WM_T_82571:
2246 	case WM_T_82572:
2247 	case WM_T_82574:
2248 	case WM_T_82575:
2249 	case WM_T_82576:
2250 	case WM_T_82580:
2251 	case WM_T_I350:
2252 	case WM_T_I354: /* XXXX ok? */
2253 	case WM_T_I210:
2254 	case WM_T_I211:
2255 	case WM_T_80003:
2256 	case WM_T_ICH9:
2257 	case WM_T_ICH10:
2258 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
2259 	case WM_T_PCH_LPT:
2260 		/* XXX limited to 9234 */
2261 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2262 		break;
2263 	case WM_T_PCH:
2264 		/* XXX limited to 4096 */
2265 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2266 		break;
2267 	case WM_T_82542_2_0:
2268 	case WM_T_82542_2_1:
2269 	case WM_T_82583:
2270 	case WM_T_ICH8:
2271 		/* No support for jumbo frame */
2272 		break;
2273 	default:
2274 		/* ETHER_MAX_LEN_JUMBO */
2275 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2276 		break;
2277 	}
2278 
2279 	/* If we're a i82543 or greater, we can support VLANs. */
2280 	if (sc->sc_type >= WM_T_82543)
2281 		sc->sc_ethercom.ec_capabilities |=
2282 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
2283 
2284 	/*
2285 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
2286 	 * on i82543 and later.
2287 	 */
2288 	if (sc->sc_type >= WM_T_82543) {
2289 		ifp->if_capabilities |=
2290 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
2291 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
2292 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
2293 		    IFCAP_CSUM_TCPv6_Tx |
2294 		    IFCAP_CSUM_UDPv6_Tx;
2295 	}
2296 
2297 	/*
2298 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
2299 	 *
2300 	 *	82541GI (8086:1076) ... no
2301 	 *	82572EI (8086:10b9) ... yes
2302 	 */
2303 	if (sc->sc_type >= WM_T_82571) {
2304 		ifp->if_capabilities |=
2305 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
2306 	}
2307 
2308 	/*
2309 	 * If we're a i82544 or greater (except i82547), we can do
2310 	 * TCP segmentation offload.
2311 	 */
2312 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
2313 		ifp->if_capabilities |= IFCAP_TSOv4;
2314 	}
2315 
2316 	if (sc->sc_type >= WM_T_82571) {
2317 		ifp->if_capabilities |= IFCAP_TSOv6;
2318 	}
2319 
2320 #ifdef WM_MPSAFE
2321 	sc->sc_tx_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2322 	sc->sc_rx_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2323 #else
2324 	sc->sc_tx_lock = NULL;
2325 	sc->sc_rx_lock = NULL;
2326 #endif
2327 
2328 	/* Attach the interface. */
2329 	if_attach(ifp);
2330 	ether_ifattach(ifp, enaddr);
2331 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
2332 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
2333 			  RND_FLAG_DEFAULT);
2334 
2335 #ifdef WM_EVENT_COUNTERS
2336 	/* Attach event counters. */
2337 	evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
2338 	    NULL, xname, "txsstall");
2339 	evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
2340 	    NULL, xname, "txdstall");
2341 	evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
2342 	    NULL, xname, "txfifo_stall");
2343 	evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
2344 	    NULL, xname, "txdw");
2345 	evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
2346 	    NULL, xname, "txqe");
2347 	evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
2348 	    NULL, xname, "rxintr");
2349 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
2350 	    NULL, xname, "linkintr");
2351 
2352 	evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
2353 	    NULL, xname, "rxipsum");
2354 	evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
2355 	    NULL, xname, "rxtusum");
2356 	evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
2357 	    NULL, xname, "txipsum");
2358 	evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
2359 	    NULL, xname, "txtusum");
2360 	evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
2361 	    NULL, xname, "txtusum6");
2362 
2363 	evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
2364 	    NULL, xname, "txtso");
2365 	evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
2366 	    NULL, xname, "txtso6");
2367 	evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
2368 	    NULL, xname, "txtsopain");
2369 
2370 	for (i = 0; i < WM_NTXSEGS; i++) {
2371 		snprintf(wm_txseg_evcnt_names[i],
2372 		    sizeof(wm_txseg_evcnt_names[i]), "txseg%d", i);
2373 		evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
2374 		    NULL, xname, wm_txseg_evcnt_names[i]);
2375 	}
2376 
2377 	evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
2378 	    NULL, xname, "txdrop");
2379 
2380 	evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
2381 	    NULL, xname, "tu");
2382 
2383 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
2384 	    NULL, xname, "tx_xoff");
2385 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
2386 	    NULL, xname, "tx_xon");
2387 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
2388 	    NULL, xname, "rx_xoff");
2389 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
2390 	    NULL, xname, "rx_xon");
2391 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
2392 	    NULL, xname, "rx_macctl");
2393 #endif /* WM_EVENT_COUNTERS */
2394 
2395 	if (pmf_device_register(self, wm_suspend, wm_resume))
2396 		pmf_class_network_register(self, ifp);
2397 	else
2398 		aprint_error_dev(self, "couldn't establish power handler\n");
2399 
2400 	sc->sc_flags |= WM_F_ATTACHED;
2401 	return;
2402 
2403 	/*
2404 	 * Free any resources we've allocated during the failed attach
2405 	 * attempt.  Do this in reverse order and fall through.
2406 	 */
2407  fail_5:
2408 	for (i = 0; i < WM_NRXDESC; i++) {
2409 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2410 			bus_dmamap_destroy(sc->sc_dmat,
2411 			    sc->sc_rxsoft[i].rxs_dmamap);
2412 	}
2413  fail_4:
2414 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2415 		if (sc->sc_txsoft[i].txs_dmamap != NULL)
2416 			bus_dmamap_destroy(sc->sc_dmat,
2417 			    sc->sc_txsoft[i].txs_dmamap);
2418 	}
2419 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2420  fail_3:
2421 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2422  fail_2:
2423 	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2424 	    sc->sc_cd_size);
2425  fail_1:
2426 	bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2427  fail_0:
2428 	return;
2429 }
2430 
2431 /* The detach function (ca_detach) */
2432 static int
2433 wm_detach(device_t self, int flags __unused)
2434 {
2435 	struct wm_softc *sc = device_private(self);
2436 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2437 	int i;
2438 #ifndef WM_MPSAFE
2439 	int s;
2440 #endif
2441 
2442 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
2443 		return 0;
2444 
2445 #ifndef WM_MPSAFE
2446 	s = splnet();
2447 #endif
2448 	/* Stop the interface. Callouts are stopped in it. */
2449 	wm_stop(ifp, 1);
2450 
2451 #ifndef WM_MPSAFE
2452 	splx(s);
2453 #endif
2454 
2455 	pmf_device_deregister(self);
2456 
2457 	/* Tell the firmware about the release */
2458 	WM_BOTH_LOCK(sc);
2459 	wm_release_manageability(sc);
2460 	wm_release_hw_control(sc);
2461 	WM_BOTH_UNLOCK(sc);
2462 
2463 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2464 
2465 	/* Delete all remaining media. */
2466 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2467 
2468 	ether_ifdetach(ifp);
2469 	if_detach(ifp);
2470 
2471 
2472 	/* Unload RX dmamaps and free mbufs */
2473 	WM_RX_LOCK(sc);
2474 	wm_rxdrain(sc);
2475 	WM_RX_UNLOCK(sc);
2476 	/* Must unlock here */
2477 
2478 	/* Free dmamap. It's the same as the end of the wm_attach() function */
2479 	for (i = 0; i < WM_NRXDESC; i++) {
2480 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2481 			bus_dmamap_destroy(sc->sc_dmat,
2482 			    sc->sc_rxsoft[i].rxs_dmamap);
2483 	}
2484 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2485 		if (sc->sc_txsoft[i].txs_dmamap != NULL)
2486 			bus_dmamap_destroy(sc->sc_dmat,
2487 			    sc->sc_txsoft[i].txs_dmamap);
2488 	}
2489 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2490 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2491 	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2492 	    sc->sc_cd_size);
2493 	bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2494 
2495 	/* Disestablish the interrupt handler */
2496 	if (sc->sc_ih != NULL) {
2497 		pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
2498 		sc->sc_ih = NULL;
2499 	}
2500 
2501 	/* Unmap the registers */
2502 	if (sc->sc_ss) {
2503 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
2504 		sc->sc_ss = 0;
2505 	}
2506 
2507 	if (sc->sc_ios) {
2508 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
2509 		sc->sc_ios = 0;
2510 	}
2511 
2512 	if (sc->sc_tx_lock)
2513 		mutex_obj_free(sc->sc_tx_lock);
2514 	if (sc->sc_rx_lock)
2515 		mutex_obj_free(sc->sc_rx_lock);
2516 
2517 	return 0;
2518 }
2519 
2520 static bool
2521 wm_suspend(device_t self, const pmf_qual_t *qual)
2522 {
2523 	struct wm_softc *sc = device_private(self);
2524 
2525 	wm_release_manageability(sc);
2526 	wm_release_hw_control(sc);
2527 #ifdef WM_WOL
2528 	wm_enable_wakeup(sc);
2529 #endif
2530 
2531 	return true;
2532 }
2533 
2534 static bool
2535 wm_resume(device_t self, const pmf_qual_t *qual)
2536 {
2537 	struct wm_softc *sc = device_private(self);
2538 
2539 	wm_init_manageability(sc);
2540 
2541 	return true;
2542 }
2543 
2544 /*
2545  * wm_watchdog:		[ifnet interface function]
2546  *
2547  *	Watchdog timer handler.
2548  */
2549 static void
2550 wm_watchdog(struct ifnet *ifp)
2551 {
2552 	struct wm_softc *sc = ifp->if_softc;
2553 
2554 	/*
2555 	 * Since we're using delayed interrupts, sweep up
2556 	 * before we report an error.
2557 	 */
2558 	WM_TX_LOCK(sc);
2559 	wm_txintr(sc);
2560 	WM_TX_UNLOCK(sc);
2561 
2562 	if (sc->sc_txfree != WM_NTXDESC(sc)) {
2563 #ifdef WM_DEBUG
2564 		int i, j;
2565 		struct wm_txsoft *txs;
2566 #endif
2567 		log(LOG_ERR,
2568 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2569 		    device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
2570 		    sc->sc_txnext);
2571 		ifp->if_oerrors++;
2572 #ifdef WM_DEBUG
2573 		for (i = sc->sc_txsdirty; i != sc->sc_txsnext ;
2574 		    i = WM_NEXTTXS(sc, i)) {
2575 		    txs = &sc->sc_txsoft[i];
2576 		    printf("txs %d tx %d -> %d\n",
2577 			i, txs->txs_firstdesc, txs->txs_lastdesc);
2578 		    for (j = txs->txs_firstdesc; ;
2579 			j = WM_NEXTTX(sc, j)) {
2580 			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
2581 			    sc->sc_nq_txdescs[j].nqtx_data.nqtxd_addr);
2582 			printf("\t %#08x%08x\n",
2583 			    sc->sc_nq_txdescs[j].nqtx_data.nqtxd_fields,
2584 			    sc->sc_nq_txdescs[j].nqtx_data.nqtxd_cmdlen);
2585 			if (j == txs->txs_lastdesc)
2586 				break;
2587 			}
2588 		}
2589 #endif
2590 		/* Reset the interface. */
2591 		(void) wm_init(ifp);
2592 	}
2593 
2594 	/* Try to get more packets going. */
2595 	ifp->if_start(ifp);
2596 }
2597 
2598 /*
2599  * wm_tick:
2600  *
2601  *	One second timer, used to check link status, sweep up
2602  *	completed transmit jobs, etc.
2603  */
2604 static void
2605 wm_tick(void *arg)
2606 {
2607 	struct wm_softc *sc = arg;
2608 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2609 #ifndef WM_MPSAFE
2610 	int s;
2611 
2612 	s = splnet();
2613 #endif
2614 
2615 	WM_TX_LOCK(sc);
2616 
2617 	if (sc->sc_stopping)
2618 		goto out;
2619 
2620 	if (sc->sc_type >= WM_T_82542_2_1) {
2621 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
2622 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
2623 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
2624 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
2625 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
2626 	}
2627 
2628 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
2629 	ifp->if_ierrors += 0ULL + /* ensure quad_t */
2630 	    + CSR_READ(sc, WMREG_CRCERRS)
2631 	    + CSR_READ(sc, WMREG_ALGNERRC)
2632 	    + CSR_READ(sc, WMREG_SYMERRC)
2633 	    + CSR_READ(sc, WMREG_RXERRC)
2634 	    + CSR_READ(sc, WMREG_SEC)
2635 	    + CSR_READ(sc, WMREG_CEXTERR)
2636 	    + CSR_READ(sc, WMREG_RLEC);
2637 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
2638 
2639 	if (sc->sc_flags & WM_F_HAS_MII)
2640 		mii_tick(&sc->sc_mii);
2641 	else
2642 		wm_tbi_check_link(sc);
2643 
2644 out:
2645 	WM_TX_UNLOCK(sc);
2646 #ifndef WM_MPSAFE
2647 	splx(s);
2648 #endif
2649 
2650 	if (!sc->sc_stopping)
2651 		callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2652 }
2653 
2654 static int
2655 wm_ifflags_cb(struct ethercom *ec)
2656 {
2657 	struct ifnet *ifp = &ec->ec_if;
2658 	struct wm_softc *sc = ifp->if_softc;
2659 	int change = ifp->if_flags ^ sc->sc_if_flags;
2660 	int rc = 0;
2661 
2662 	WM_BOTH_LOCK(sc);
2663 
2664 	if (change != 0)
2665 		sc->sc_if_flags = ifp->if_flags;
2666 
2667 	if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) {
2668 		rc = ENETRESET;
2669 		goto out;
2670 	}
2671 
2672 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
2673 		wm_set_filter(sc);
2674 
2675 	wm_set_vlan(sc);
2676 
2677 out:
2678 	WM_BOTH_UNLOCK(sc);
2679 
2680 	return rc;
2681 }
2682 
2683 /*
2684  * wm_ioctl:		[ifnet interface function]
2685  *
2686  *	Handle control requests from the operator.
2687  */
2688 static int
2689 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2690 {
2691 	struct wm_softc *sc = ifp->if_softc;
2692 	struct ifreq *ifr = (struct ifreq *) data;
2693 	struct ifaddr *ifa = (struct ifaddr *)data;
2694 	struct sockaddr_dl *sdl;
2695 	int s, error;
2696 
2697 #ifndef WM_MPSAFE
2698 	s = splnet();
2699 #endif
2700 	switch (cmd) {
2701 	case SIOCSIFMEDIA:
2702 	case SIOCGIFMEDIA:
2703 		WM_BOTH_LOCK(sc);
2704 		/* Flow control requires full-duplex mode. */
2705 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
2706 		    (ifr->ifr_media & IFM_FDX) == 0)
2707 			ifr->ifr_media &= ~IFM_ETH_FMASK;
2708 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
2709 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
2710 				/* We can do both TXPAUSE and RXPAUSE. */
2711 				ifr->ifr_media |=
2712 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
2713 			}
2714 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
2715 		}
2716 		WM_BOTH_UNLOCK(sc);
2717 #ifdef WM_MPSAFE
2718 		s = splnet();
2719 #endif
2720 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
2721 #ifdef WM_MPSAFE
2722 		splx(s);
2723 #endif
2724 		break;
2725 	case SIOCINITIFADDR:
2726 		WM_BOTH_LOCK(sc);
2727 		if (ifa->ifa_addr->sa_family == AF_LINK) {
2728 			sdl = satosdl(ifp->if_dl->ifa_addr);
2729 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
2730 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
2731 			/* unicast address is first multicast entry */
2732 			wm_set_filter(sc);
2733 			error = 0;
2734 			WM_BOTH_UNLOCK(sc);
2735 			break;
2736 		}
2737 		WM_BOTH_UNLOCK(sc);
2738 		/*FALLTHROUGH*/
2739 	default:
2740 #ifdef WM_MPSAFE
2741 		s = splnet();
2742 #endif
2743 		/* It may call wm_start, so unlock here */
2744 		error = ether_ioctl(ifp, cmd, data);
2745 #ifdef WM_MPSAFE
2746 		splx(s);
2747 #endif
2748 		if (error != ENETRESET)
2749 			break;
2750 
2751 		error = 0;
2752 
2753 		if (cmd == SIOCSIFCAP) {
2754 			error = (*ifp->if_init)(ifp);
2755 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
2756 			;
2757 		else if (ifp->if_flags & IFF_RUNNING) {
2758 			/*
2759 			 * Multicast list has changed; set the hardware filter
2760 			 * accordingly.
2761 			 */
2762 			WM_BOTH_LOCK(sc);
2763 			wm_set_filter(sc);
2764 			WM_BOTH_UNLOCK(sc);
2765 		}
2766 		break;
2767 	}
2768 
2769 	/* Try to get more packets going. */
2770 	ifp->if_start(ifp);
2771 
2772 #ifndef WM_MPSAFE
2773 	splx(s);
2774 #endif
2775 	return error;
2776 }
2777 
2778 /* MAC address related */
2779 
2780 /*
2781  * Get the offset of MAC address and return it.
2782  * If error occured, use offset 0.
2783  */
2784 static uint16_t
2785 wm_check_alt_mac_addr(struct wm_softc *sc)
2786 {
2787 	uint16_t myea[ETHER_ADDR_LEN / 2];
2788 	uint16_t offset = NVM_OFF_MACADDR;
2789 
2790 	/* Try to read alternative MAC address pointer */
2791 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
2792 		return 0;
2793 
2794 	/* Check pointer if it's valid or not. */
2795 	if ((offset == 0x0000) || (offset == 0xffff))
2796 		return 0;
2797 
2798 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
2799 	/*
2800 	 * Check whether alternative MAC address is valid or not.
2801 	 * Some cards have non 0xffff pointer but those don't use
2802 	 * alternative MAC address in reality.
2803 	 *
2804 	 * Check whether the broadcast bit is set or not.
2805 	 */
2806 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
2807 		if (((myea[0] & 0xff) & 0x01) == 0)
2808 			return offset; /* Found */
2809 
2810 	/* Not found */
2811 	return 0;
2812 }
2813 
2814 static int
2815 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
2816 {
2817 	uint16_t myea[ETHER_ADDR_LEN / 2];
2818 	uint16_t offset = NVM_OFF_MACADDR;
2819 	int do_invert = 0;
2820 
2821 	switch (sc->sc_type) {
2822 	case WM_T_82580:
2823 	case WM_T_I350:
2824 	case WM_T_I354:
2825 		/* EEPROM Top Level Partitioning */
2826 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
2827 		break;
2828 	case WM_T_82571:
2829 	case WM_T_82575:
2830 	case WM_T_82576:
2831 	case WM_T_80003:
2832 	case WM_T_I210:
2833 	case WM_T_I211:
2834 		offset = wm_check_alt_mac_addr(sc);
2835 		if (offset == 0)
2836 			if ((sc->sc_funcid & 0x01) == 1)
2837 				do_invert = 1;
2838 		break;
2839 	default:
2840 		if ((sc->sc_funcid & 0x01) == 1)
2841 			do_invert = 1;
2842 		break;
2843 	}
2844 
2845 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]),
2846 		myea) != 0)
2847 		goto bad;
2848 
2849 	enaddr[0] = myea[0] & 0xff;
2850 	enaddr[1] = myea[0] >> 8;
2851 	enaddr[2] = myea[1] & 0xff;
2852 	enaddr[3] = myea[1] >> 8;
2853 	enaddr[4] = myea[2] & 0xff;
2854 	enaddr[5] = myea[2] >> 8;
2855 
2856 	/*
2857 	 * Toggle the LSB of the MAC address on the second port
2858 	 * of some dual port cards.
2859 	 */
2860 	if (do_invert != 0)
2861 		enaddr[5] ^= 1;
2862 
2863 	return 0;
2864 
2865  bad:
2866 	return -1;
2867 }
2868 
2869 /*
2870  * wm_set_ral:
2871  *
2872  *	Set an entery in the receive address list.
2873  */
2874 static void
2875 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
2876 {
2877 	uint32_t ral_lo, ral_hi;
2878 
2879 	if (enaddr != NULL) {
2880 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
2881 		    (enaddr[3] << 24);
2882 		ral_hi = enaddr[4] | (enaddr[5] << 8);
2883 		ral_hi |= RAL_AV;
2884 	} else {
2885 		ral_lo = 0;
2886 		ral_hi = 0;
2887 	}
2888 
2889 	if (sc->sc_type >= WM_T_82544) {
2890 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
2891 		    ral_lo);
2892 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
2893 		    ral_hi);
2894 	} else {
2895 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
2896 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
2897 	}
2898 }
2899 
2900 /*
2901  * wm_mchash:
2902  *
2903  *	Compute the hash of the multicast address for the 4096-bit
2904  *	multicast filter.
2905  */
2906 static uint32_t
2907 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
2908 {
2909 	static const int lo_shift[4] = { 4, 3, 2, 0 };
2910 	static const int hi_shift[4] = { 4, 5, 6, 8 };
2911 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
2912 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
2913 	uint32_t hash;
2914 
2915 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
2916 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
2917 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
2918 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
2919 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
2920 		return (hash & 0x3ff);
2921 	}
2922 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
2923 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
2924 
2925 	return (hash & 0xfff);
2926 }
2927 
2928 /*
2929  * wm_set_filter:
2930  *
2931  *	Set up the receive filter.
2932  */
2933 static void
2934 wm_set_filter(struct wm_softc *sc)
2935 {
2936 	struct ethercom *ec = &sc->sc_ethercom;
2937 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2938 	struct ether_multi *enm;
2939 	struct ether_multistep step;
2940 	bus_addr_t mta_reg;
2941 	uint32_t hash, reg, bit;
2942 	int i, size;
2943 
2944 	if (sc->sc_type >= WM_T_82544)
2945 		mta_reg = WMREG_CORDOVA_MTA;
2946 	else
2947 		mta_reg = WMREG_MTA;
2948 
2949 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
2950 
2951 	if (ifp->if_flags & IFF_BROADCAST)
2952 		sc->sc_rctl |= RCTL_BAM;
2953 	if (ifp->if_flags & IFF_PROMISC) {
2954 		sc->sc_rctl |= RCTL_UPE;
2955 		goto allmulti;
2956 	}
2957 
2958 	/*
2959 	 * Set the station address in the first RAL slot, and
2960 	 * clear the remaining slots.
2961 	 */
2962 	if (sc->sc_type == WM_T_ICH8)
2963 		size = WM_RAL_TABSIZE_ICH8 -1;
2964 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
2965 	    || (sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
2966 	    || (sc->sc_type == WM_T_PCH_LPT))
2967 		size = WM_RAL_TABSIZE_ICH8;
2968 	else if (sc->sc_type == WM_T_82575)
2969 		size = WM_RAL_TABSIZE_82575;
2970 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
2971 		size = WM_RAL_TABSIZE_82576;
2972 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
2973 		size = WM_RAL_TABSIZE_I350;
2974 	else
2975 		size = WM_RAL_TABSIZE;
2976 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
2977 	for (i = 1; i < size; i++)
2978 		wm_set_ral(sc, NULL, i);
2979 
2980 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
2981 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
2982 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
2983 		size = WM_ICH8_MC_TABSIZE;
2984 	else
2985 		size = WM_MC_TABSIZE;
2986 	/* Clear out the multicast table. */
2987 	for (i = 0; i < size; i++)
2988 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
2989 
2990 	ETHER_FIRST_MULTI(step, ec, enm);
2991 	while (enm != NULL) {
2992 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2993 			/*
2994 			 * We must listen to a range of multicast addresses.
2995 			 * For now, just accept all multicasts, rather than
2996 			 * trying to set only those filter bits needed to match
2997 			 * the range.  (At this time, the only use of address
2998 			 * ranges is for IP multicast routing, for which the
2999 			 * range is big enough to require all bits set.)
3000 			 */
3001 			goto allmulti;
3002 		}
3003 
3004 		hash = wm_mchash(sc, enm->enm_addrlo);
3005 
3006 		reg = (hash >> 5);
3007 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3008 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3009 		    || (sc->sc_type == WM_T_PCH2)
3010 		    || (sc->sc_type == WM_T_PCH_LPT))
3011 			reg &= 0x1f;
3012 		else
3013 			reg &= 0x7f;
3014 		bit = hash & 0x1f;
3015 
3016 		hash = CSR_READ(sc, mta_reg + (reg << 2));
3017 		hash |= 1U << bit;
3018 
3019 		/* XXX Hardware bug?? */
3020 		if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
3021 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
3022 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3023 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
3024 		} else
3025 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3026 
3027 		ETHER_NEXT_MULTI(step, enm);
3028 	}
3029 
3030 	ifp->if_flags &= ~IFF_ALLMULTI;
3031 	goto setit;
3032 
3033  allmulti:
3034 	ifp->if_flags |= IFF_ALLMULTI;
3035 	sc->sc_rctl |= RCTL_MPE;
3036 
3037  setit:
3038 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
3039 }
3040 
3041 /* Reset and init related */
3042 
3043 static void
3044 wm_set_vlan(struct wm_softc *sc)
3045 {
3046 	/* Deal with VLAN enables. */
3047 	if (VLAN_ATTACHED(&sc->sc_ethercom))
3048 		sc->sc_ctrl |= CTRL_VME;
3049 	else
3050 		sc->sc_ctrl &= ~CTRL_VME;
3051 
3052 	/* Write the control registers. */
3053 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3054 }
3055 
3056 static void
3057 wm_set_pcie_completion_timeout(struct wm_softc *sc)
3058 {
3059 	uint32_t gcr;
3060 	pcireg_t ctrl2;
3061 
3062 	gcr = CSR_READ(sc, WMREG_GCR);
3063 
3064 	/* Only take action if timeout value is defaulted to 0 */
3065 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
3066 		goto out;
3067 
3068 	if ((gcr & GCR_CAP_VER2) == 0) {
3069 		gcr |= GCR_CMPL_TMOUT_10MS;
3070 		goto out;
3071 	}
3072 
3073 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
3074 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
3075 	ctrl2 |= WM_PCIE_DCSR2_16MS;
3076 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
3077 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
3078 
3079 out:
3080 	/* Disable completion timeout resend */
3081 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
3082 
3083 	CSR_WRITE(sc, WMREG_GCR, gcr);
3084 }
3085 
3086 void
3087 wm_get_auto_rd_done(struct wm_softc *sc)
3088 {
3089 	int i;
3090 
3091 	/* wait for eeprom to reload */
3092 	switch (sc->sc_type) {
3093 	case WM_T_82571:
3094 	case WM_T_82572:
3095 	case WM_T_82573:
3096 	case WM_T_82574:
3097 	case WM_T_82583:
3098 	case WM_T_82575:
3099 	case WM_T_82576:
3100 	case WM_T_82580:
3101 	case WM_T_I350:
3102 	case WM_T_I354:
3103 	case WM_T_I210:
3104 	case WM_T_I211:
3105 	case WM_T_80003:
3106 	case WM_T_ICH8:
3107 	case WM_T_ICH9:
3108 		for (i = 0; i < 10; i++) {
3109 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
3110 				break;
3111 			delay(1000);
3112 		}
3113 		if (i == 10) {
3114 			log(LOG_ERR, "%s: auto read from eeprom failed to "
3115 			    "complete\n", device_xname(sc->sc_dev));
3116 		}
3117 		break;
3118 	default:
3119 		break;
3120 	}
3121 }
3122 
3123 void
3124 wm_lan_init_done(struct wm_softc *sc)
3125 {
3126 	uint32_t reg = 0;
3127 	int i;
3128 
3129 	/* wait for eeprom to reload */
3130 	switch (sc->sc_type) {
3131 	case WM_T_ICH10:
3132 	case WM_T_PCH:
3133 	case WM_T_PCH2:
3134 	case WM_T_PCH_LPT:
3135 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
3136 			reg = CSR_READ(sc, WMREG_STATUS);
3137 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
3138 				break;
3139 			delay(100);
3140 		}
3141 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
3142 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
3143 			    "complete\n", device_xname(sc->sc_dev), __func__);
3144 		}
3145 		break;
3146 	default:
3147 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3148 		    __func__);
3149 		break;
3150 	}
3151 
3152 	reg &= ~STATUS_LAN_INIT_DONE;
3153 	CSR_WRITE(sc, WMREG_STATUS, reg);
3154 }
3155 
3156 void
3157 wm_get_cfg_done(struct wm_softc *sc)
3158 {
3159 	int mask;
3160 	uint32_t reg;
3161 	int i;
3162 
3163 	/* wait for eeprom to reload */
3164 	switch (sc->sc_type) {
3165 	case WM_T_82542_2_0:
3166 	case WM_T_82542_2_1:
3167 		/* null */
3168 		break;
3169 	case WM_T_82543:
3170 	case WM_T_82544:
3171 	case WM_T_82540:
3172 	case WM_T_82545:
3173 	case WM_T_82545_3:
3174 	case WM_T_82546:
3175 	case WM_T_82546_3:
3176 	case WM_T_82541:
3177 	case WM_T_82541_2:
3178 	case WM_T_82547:
3179 	case WM_T_82547_2:
3180 	case WM_T_82573:
3181 	case WM_T_82574:
3182 	case WM_T_82583:
3183 		/* generic */
3184 		delay(10*1000);
3185 		break;
3186 	case WM_T_80003:
3187 	case WM_T_82571:
3188 	case WM_T_82572:
3189 	case WM_T_82575:
3190 	case WM_T_82576:
3191 	case WM_T_82580:
3192 	case WM_T_I350:
3193 	case WM_T_I354:
3194 	case WM_T_I210:
3195 	case WM_T_I211:
3196 		if (sc->sc_type == WM_T_82571) {
3197 			/* Only 82571 shares port 0 */
3198 			mask = EEMNGCTL_CFGDONE_0;
3199 		} else
3200 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
3201 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
3202 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
3203 				break;
3204 			delay(1000);
3205 		}
3206 		if (i >= WM_PHY_CFG_TIMEOUT) {
3207 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
3208 				device_xname(sc->sc_dev), __func__));
3209 		}
3210 		break;
3211 	case WM_T_ICH8:
3212 	case WM_T_ICH9:
3213 	case WM_T_ICH10:
3214 	case WM_T_PCH:
3215 	case WM_T_PCH2:
3216 	case WM_T_PCH_LPT:
3217 		delay(10*1000);
3218 		if (sc->sc_type >= WM_T_ICH10)
3219 			wm_lan_init_done(sc);
3220 		else
3221 			wm_get_auto_rd_done(sc);
3222 
3223 		reg = CSR_READ(sc, WMREG_STATUS);
3224 		if ((reg & STATUS_PHYRA) != 0)
3225 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
3226 		break;
3227 	default:
3228 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3229 		    __func__);
3230 		break;
3231 	}
3232 }
3233 
3234 /* Init hardware bits */
3235 void
3236 wm_initialize_hardware_bits(struct wm_softc *sc)
3237 {
3238 	uint32_t tarc0, tarc1, reg;
3239 
3240 	/* For 82571 variant, 80003 and ICHs */
3241 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
3242 	    || (sc->sc_type >= WM_T_80003)) {
3243 
3244 		/* Transmit Descriptor Control 0 */
3245 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
3246 		reg |= TXDCTL_COUNT_DESC;
3247 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
3248 
3249 		/* Transmit Descriptor Control 1 */
3250 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
3251 		reg |= TXDCTL_COUNT_DESC;
3252 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
3253 
3254 		/* TARC0 */
3255 		tarc0 = CSR_READ(sc, WMREG_TARC0);
3256 		switch (sc->sc_type) {
3257 		case WM_T_82571:
3258 		case WM_T_82572:
3259 		case WM_T_82573:
3260 		case WM_T_82574:
3261 		case WM_T_82583:
3262 		case WM_T_80003:
3263 			/* Clear bits 30..27 */
3264 			tarc0 &= ~__BITS(30, 27);
3265 			break;
3266 		default:
3267 			break;
3268 		}
3269 
3270 		switch (sc->sc_type) {
3271 		case WM_T_82571:
3272 		case WM_T_82572:
3273 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
3274 
3275 			tarc1 = CSR_READ(sc, WMREG_TARC1);
3276 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
3277 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
3278 			/* 8257[12] Errata No.7 */
3279 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
3280 
3281 			/* TARC1 bit 28 */
3282 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3283 				tarc1 &= ~__BIT(28);
3284 			else
3285 				tarc1 |= __BIT(28);
3286 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
3287 
3288 			/*
3289 			 * 8257[12] Errata No.13
3290 			 * Disable Dyamic Clock Gating.
3291 			 */
3292 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
3293 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
3294 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3295 			break;
3296 		case WM_T_82573:
3297 		case WM_T_82574:
3298 		case WM_T_82583:
3299 			if ((sc->sc_type == WM_T_82574)
3300 			    || (sc->sc_type == WM_T_82583))
3301 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
3302 
3303 			/* Extended Device Control */
3304 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
3305 			reg &= ~__BIT(23);	/* Clear bit 23 */
3306 			reg |= __BIT(22);	/* Set bit 22 */
3307 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3308 
3309 			/* Device Control */
3310 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
3311 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3312 
3313 			/* PCIe Control Register */
3314 			if ((sc->sc_type == WM_T_82574)
3315 			    || (sc->sc_type == WM_T_82583)) {
3316 				/*
3317 				 * Document says this bit must be set for
3318 				 * proper operation.
3319 				 */
3320 				reg = CSR_READ(sc, WMREG_GCR);
3321 				reg |= __BIT(22);
3322 				CSR_WRITE(sc, WMREG_GCR, reg);
3323 
3324 				/*
3325 				 * Apply workaround for hardware errata
3326 				 * documented in errata docs Fixes issue where
3327 				 * some error prone or unreliable PCIe
3328 				 * completions are occurring, particularly
3329 				 * with ASPM enabled. Without fix, issue can
3330 				 * cause Tx timeouts.
3331 				 */
3332 				reg = CSR_READ(sc, WMREG_GCR2);
3333 				reg |= __BIT(0);
3334 				CSR_WRITE(sc, WMREG_GCR2, reg);
3335 			}
3336 			break;
3337 		case WM_T_80003:
3338 			/* TARC0 */
3339 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
3340 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
3341 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
3342 
3343 			/* TARC1 bit 28 */
3344 			tarc1 = CSR_READ(sc, WMREG_TARC1);
3345 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3346 				tarc1 &= ~__BIT(28);
3347 			else
3348 				tarc1 |= __BIT(28);
3349 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
3350 			break;
3351 		case WM_T_ICH8:
3352 		case WM_T_ICH9:
3353 		case WM_T_ICH10:
3354 		case WM_T_PCH:
3355 		case WM_T_PCH2:
3356 		case WM_T_PCH_LPT:
3357 			/* TARC 0 */
3358 			if (sc->sc_type == WM_T_ICH8) {
3359 				/* Set TARC0 bits 29 and 28 */
3360 				tarc0 |= __BITS(29, 28);
3361 			}
3362 			/* Set TARC0 bits 23,24,26,27 */
3363 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
3364 
3365 			/* CTRL_EXT */
3366 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
3367 			reg |= __BIT(22);	/* Set bit 22 */
3368 			/*
3369 			 * Enable PHY low-power state when MAC is at D3
3370 			 * w/o WoL
3371 			 */
3372 			if (sc->sc_type >= WM_T_PCH)
3373 				reg |= CTRL_EXT_PHYPDEN;
3374 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3375 
3376 			/* TARC1 */
3377 			tarc1 = CSR_READ(sc, WMREG_TARC1);
3378 			/* bit 28 */
3379 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3380 				tarc1 &= ~__BIT(28);
3381 			else
3382 				tarc1 |= __BIT(28);
3383 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
3384 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
3385 
3386 			/* Device Status */
3387 			if (sc->sc_type == WM_T_ICH8) {
3388 				reg = CSR_READ(sc, WMREG_STATUS);
3389 				reg &= ~__BIT(31);
3390 				CSR_WRITE(sc, WMREG_STATUS, reg);
3391 
3392 			}
3393 
3394 			/*
3395 			 * Work-around descriptor data corruption issue during
3396 			 * NFS v2 UDP traffic, just disable the NFS filtering
3397 			 * capability.
3398 			 */
3399 			reg = CSR_READ(sc, WMREG_RFCTL);
3400 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
3401 			CSR_WRITE(sc, WMREG_RFCTL, reg);
3402 			break;
3403 		default:
3404 			break;
3405 		}
3406 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
3407 
3408 		/*
3409 		 * 8257[12] Errata No.52 and some others.
3410 		 * Avoid RSS Hash Value bug.
3411 		 */
3412 		switch (sc->sc_type) {
3413 		case WM_T_82571:
3414 		case WM_T_82572:
3415 		case WM_T_82573:
3416 		case WM_T_80003:
3417 		case WM_T_ICH8:
3418 			reg = CSR_READ(sc, WMREG_RFCTL);
3419 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
3420 			CSR_WRITE(sc, WMREG_RFCTL, reg);
3421 			break;
3422 		default:
3423 			break;
3424 		}
3425 	}
3426 }
3427 
3428 /*
3429  * wm_reset:
3430  *
3431  *	Reset the i82542 chip.
3432  */
3433 static void
3434 wm_reset(struct wm_softc *sc)
3435 {
3436 	int phy_reset = 0;
3437 	int error = 0;
3438 	uint32_t reg, mask;
3439 
3440 	/*
3441 	 * Allocate on-chip memory according to the MTU size.
3442 	 * The Packet Buffer Allocation register must be written
3443 	 * before the chip is reset.
3444 	 */
3445 	switch (sc->sc_type) {
3446 	case WM_T_82547:
3447 	case WM_T_82547_2:
3448 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3449 		    PBA_22K : PBA_30K;
3450 		sc->sc_txfifo_head = 0;
3451 		sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
3452 		sc->sc_txfifo_size =
3453 		    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
3454 		sc->sc_txfifo_stall = 0;
3455 		break;
3456 	case WM_T_82571:
3457 	case WM_T_82572:
3458 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
3459 	case WM_T_I350:
3460 	case WM_T_I354:
3461 	case WM_T_80003:
3462 		sc->sc_pba = PBA_32K;
3463 		break;
3464 	case WM_T_82580:
3465 		sc->sc_pba = PBA_35K;
3466 		break;
3467 	case WM_T_I210:
3468 	case WM_T_I211:
3469 		sc->sc_pba = PBA_34K;
3470 		break;
3471 	case WM_T_82576:
3472 		sc->sc_pba = PBA_64K;
3473 		break;
3474 	case WM_T_82573:
3475 		sc->sc_pba = PBA_12K;
3476 		break;
3477 	case WM_T_82574:
3478 	case WM_T_82583:
3479 		sc->sc_pba = PBA_20K;
3480 		break;
3481 	case WM_T_ICH8:
3482 		/* Workaround for a bit corruption issue in FIFO memory */
3483 		sc->sc_pba = PBA_8K;
3484 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
3485 		break;
3486 	case WM_T_ICH9:
3487 	case WM_T_ICH10:
3488 		sc->sc_pba = PBA_10K;
3489 		break;
3490 	case WM_T_PCH:
3491 	case WM_T_PCH2:
3492 	case WM_T_PCH_LPT:
3493 		sc->sc_pba = PBA_26K;
3494 		break;
3495 	default:
3496 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3497 		    PBA_40K : PBA_48K;
3498 		break;
3499 	}
3500 	CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
3501 
3502 	/* Prevent the PCI-E bus from sticking */
3503 	if (sc->sc_flags & WM_F_PCIE) {
3504 		int timeout = 800;
3505 
3506 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
3507 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3508 
3509 		while (timeout--) {
3510 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
3511 			    == 0)
3512 				break;
3513 			delay(100);
3514 		}
3515 	}
3516 
3517 	/* Set the completion timeout for interface */
3518 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
3519 	    || (sc->sc_type == WM_T_82580)
3520 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
3521 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
3522 		wm_set_pcie_completion_timeout(sc);
3523 
3524 	/* Clear interrupt */
3525 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3526 
3527 	/* Stop the transmit and receive processes. */
3528 	CSR_WRITE(sc, WMREG_RCTL, 0);
3529 	sc->sc_rctl &= ~RCTL_EN;
3530 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
3531 	CSR_WRITE_FLUSH(sc);
3532 
3533 	/* XXX set_tbi_sbp_82543() */
3534 
3535 	delay(10*1000);
3536 
3537 	/* Must acquire the MDIO ownership before MAC reset */
3538 	switch (sc->sc_type) {
3539 	case WM_T_82573:
3540 	case WM_T_82574:
3541 	case WM_T_82583:
3542 		error = wm_get_hw_semaphore_82573(sc);
3543 		break;
3544 	default:
3545 		break;
3546 	}
3547 
3548 	/*
3549 	 * 82541 Errata 29? & 82547 Errata 28?
3550 	 * See also the description about PHY_RST bit in CTRL register
3551 	 * in 8254x_GBe_SDM.pdf.
3552 	 */
3553 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
3554 		CSR_WRITE(sc, WMREG_CTRL,
3555 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
3556 		CSR_WRITE_FLUSH(sc);
3557 		delay(5000);
3558 	}
3559 
3560 	switch (sc->sc_type) {
3561 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
3562 	case WM_T_82541:
3563 	case WM_T_82541_2:
3564 	case WM_T_82547:
3565 	case WM_T_82547_2:
3566 		/*
3567 		 * On some chipsets, a reset through a memory-mapped write
3568 		 * cycle can cause the chip to reset before completing the
3569 		 * write cycle.  This causes major headache that can be
3570 		 * avoided by issuing the reset via indirect register writes
3571 		 * through I/O space.
3572 		 *
3573 		 * So, if we successfully mapped the I/O BAR at attach time,
3574 		 * use that.  Otherwise, try our luck with a memory-mapped
3575 		 * reset.
3576 		 */
3577 		if (sc->sc_flags & WM_F_IOH_VALID)
3578 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
3579 		else
3580 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
3581 		break;
3582 	case WM_T_82545_3:
3583 	case WM_T_82546_3:
3584 		/* Use the shadow control register on these chips. */
3585 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
3586 		break;
3587 	case WM_T_80003:
3588 		mask = swfwphysem[sc->sc_funcid];
3589 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3590 		wm_get_swfw_semaphore(sc, mask);
3591 		CSR_WRITE(sc, WMREG_CTRL, reg);
3592 		wm_put_swfw_semaphore(sc, mask);
3593 		break;
3594 	case WM_T_ICH8:
3595 	case WM_T_ICH9:
3596 	case WM_T_ICH10:
3597 	case WM_T_PCH:
3598 	case WM_T_PCH2:
3599 	case WM_T_PCH_LPT:
3600 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3601 		if (wm_check_reset_block(sc) == 0) {
3602 			/*
3603 			 * Gate automatic PHY configuration by hardware on
3604 			 * non-managed 82579
3605 			 */
3606 			if ((sc->sc_type == WM_T_PCH2)
3607 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
3608 				!= 0))
3609 				wm_gate_hw_phy_config_ich8lan(sc, 1);
3610 
3611 
3612 			reg |= CTRL_PHY_RESET;
3613 			phy_reset = 1;
3614 		}
3615 		wm_get_swfwhw_semaphore(sc);
3616 		CSR_WRITE(sc, WMREG_CTRL, reg);
3617 		/* Don't insert a completion barrier when reset */
3618 		delay(20*1000);
3619 		wm_put_swfwhw_semaphore(sc);
3620 		break;
3621 	case WM_T_82580:
3622 	case WM_T_I350:
3623 	case WM_T_I354:
3624 	case WM_T_I210:
3625 	case WM_T_I211:
3626 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3627 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
3628 			CSR_WRITE_FLUSH(sc);
3629 		delay(5000);
3630 		break;
3631 	case WM_T_82542_2_0:
3632 	case WM_T_82542_2_1:
3633 	case WM_T_82543:
3634 	case WM_T_82540:
3635 	case WM_T_82545:
3636 	case WM_T_82546:
3637 	case WM_T_82571:
3638 	case WM_T_82572:
3639 	case WM_T_82573:
3640 	case WM_T_82574:
3641 	case WM_T_82575:
3642 	case WM_T_82576:
3643 	case WM_T_82583:
3644 	default:
3645 		/* Everything else can safely use the documented method. */
3646 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3647 		break;
3648 	}
3649 
3650 	/* Must release the MDIO ownership after MAC reset */
3651 	switch (sc->sc_type) {
3652 	case WM_T_82573:
3653 	case WM_T_82574:
3654 	case WM_T_82583:
3655 		if (error == 0)
3656 			wm_put_hw_semaphore_82573(sc);
3657 		break;
3658 	default:
3659 		break;
3660 	}
3661 
3662 	if (phy_reset != 0)
3663 		wm_get_cfg_done(sc);
3664 
3665 	/* reload EEPROM */
3666 	switch (sc->sc_type) {
3667 	case WM_T_82542_2_0:
3668 	case WM_T_82542_2_1:
3669 	case WM_T_82543:
3670 	case WM_T_82544:
3671 		delay(10);
3672 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3673 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3674 		CSR_WRITE_FLUSH(sc);
3675 		delay(2000);
3676 		break;
3677 	case WM_T_82540:
3678 	case WM_T_82545:
3679 	case WM_T_82545_3:
3680 	case WM_T_82546:
3681 	case WM_T_82546_3:
3682 		delay(5*1000);
3683 		/* XXX Disable HW ARPs on ASF enabled adapters */
3684 		break;
3685 	case WM_T_82541:
3686 	case WM_T_82541_2:
3687 	case WM_T_82547:
3688 	case WM_T_82547_2:
3689 		delay(20000);
3690 		/* XXX Disable HW ARPs on ASF enabled adapters */
3691 		break;
3692 	case WM_T_82571:
3693 	case WM_T_82572:
3694 	case WM_T_82573:
3695 	case WM_T_82574:
3696 	case WM_T_82583:
3697 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
3698 			delay(10);
3699 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3700 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3701 			CSR_WRITE_FLUSH(sc);
3702 		}
3703 		/* check EECD_EE_AUTORD */
3704 		wm_get_auto_rd_done(sc);
3705 		/*
3706 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
3707 		 * is set.
3708 		 */
3709 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
3710 		    || (sc->sc_type == WM_T_82583))
3711 			delay(25*1000);
3712 		break;
3713 	case WM_T_82575:
3714 	case WM_T_82576:
3715 	case WM_T_82580:
3716 	case WM_T_I350:
3717 	case WM_T_I354:
3718 	case WM_T_I210:
3719 	case WM_T_I211:
3720 	case WM_T_80003:
3721 		/* check EECD_EE_AUTORD */
3722 		wm_get_auto_rd_done(sc);
3723 		break;
3724 	case WM_T_ICH8:
3725 	case WM_T_ICH9:
3726 	case WM_T_ICH10:
3727 	case WM_T_PCH:
3728 	case WM_T_PCH2:
3729 	case WM_T_PCH_LPT:
3730 		break;
3731 	default:
3732 		panic("%s: unknown type\n", __func__);
3733 	}
3734 
3735 	/* Check whether EEPROM is present or not */
3736 	switch (sc->sc_type) {
3737 	case WM_T_82575:
3738 	case WM_T_82576:
3739 #if 0 /* XXX */
3740 	case WM_T_82580:
3741 #endif
3742 	case WM_T_I350:
3743 	case WM_T_I354:
3744 	case WM_T_ICH8:
3745 	case WM_T_ICH9:
3746 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
3747 			/* Not found */
3748 			sc->sc_flags |= WM_F_EEPROM_INVALID;
3749 			if ((sc->sc_type == WM_T_82575)
3750 			    || (sc->sc_type == WM_T_82576)
3751 			    || (sc->sc_type == WM_T_82580)
3752 			    || (sc->sc_type == WM_T_I350)
3753 			    || (sc->sc_type == WM_T_I354))
3754 				wm_reset_init_script_82575(sc);
3755 		}
3756 		break;
3757 	default:
3758 		break;
3759 	}
3760 
3761 	if ((sc->sc_type == WM_T_82580)
3762 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
3763 		/* clear global device reset status bit */
3764 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
3765 	}
3766 
3767 	/* Clear any pending interrupt events. */
3768 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3769 	reg = CSR_READ(sc, WMREG_ICR);
3770 
3771 	/* reload sc_ctrl */
3772 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3773 
3774 	if (sc->sc_type == WM_T_I350)
3775 		wm_set_eee_i350(sc);
3776 
3777 	/* dummy read from WUC */
3778 	if (sc->sc_type == WM_T_PCH)
3779 		reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
3780 	/*
3781 	 * For PCH, this write will make sure that any noise will be detected
3782 	 * as a CRC error and be dropped rather than show up as a bad packet
3783 	 * to the DMA engine
3784 	 */
3785 	if (sc->sc_type == WM_T_PCH)
3786 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
3787 
3788 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
3789 		CSR_WRITE(sc, WMREG_WUC, 0);
3790 
3791 	/* XXX need special handling for 82580 */
3792 }
3793 
3794 /*
3795  * wm_add_rxbuf:
3796  *
3797  *	Add a receive buffer to the indiciated descriptor.
3798  */
3799 static int
3800 wm_add_rxbuf(struct wm_softc *sc, int idx)
3801 {
3802 	struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
3803 	struct mbuf *m;
3804 	int error;
3805 
3806 	KASSERT(WM_RX_LOCKED(sc));
3807 
3808 	MGETHDR(m, M_DONTWAIT, MT_DATA);
3809 	if (m == NULL)
3810 		return ENOBUFS;
3811 
3812 	MCLGET(m, M_DONTWAIT);
3813 	if ((m->m_flags & M_EXT) == 0) {
3814 		m_freem(m);
3815 		return ENOBUFS;
3816 	}
3817 
3818 	if (rxs->rxs_mbuf != NULL)
3819 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
3820 
3821 	rxs->rxs_mbuf = m;
3822 
3823 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3824 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
3825 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
3826 	if (error) {
3827 		/* XXX XXX XXX */
3828 		aprint_error_dev(sc->sc_dev,
3829 		    "unable to load rx DMA map %d, error = %d\n",
3830 		    idx, error);
3831 		panic("wm_add_rxbuf");
3832 	}
3833 
3834 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3835 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
3836 
3837 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3838 		if ((sc->sc_rctl & RCTL_EN) != 0)
3839 			WM_INIT_RXDESC(sc, idx);
3840 	} else
3841 		WM_INIT_RXDESC(sc, idx);
3842 
3843 	return 0;
3844 }
3845 
3846 /*
3847  * wm_rxdrain:
3848  *
3849  *	Drain the receive queue.
3850  */
3851 static void
3852 wm_rxdrain(struct wm_softc *sc)
3853 {
3854 	struct wm_rxsoft *rxs;
3855 	int i;
3856 
3857 	KASSERT(WM_RX_LOCKED(sc));
3858 
3859 	for (i = 0; i < WM_NRXDESC; i++) {
3860 		rxs = &sc->sc_rxsoft[i];
3861 		if (rxs->rxs_mbuf != NULL) {
3862 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
3863 			m_freem(rxs->rxs_mbuf);
3864 			rxs->rxs_mbuf = NULL;
3865 		}
3866 	}
3867 }
3868 
3869 /*
3870  * wm_init:		[ifnet interface function]
3871  *
3872  *	Initialize the interface.
3873  */
3874 static int
3875 wm_init(struct ifnet *ifp)
3876 {
3877 	struct wm_softc *sc = ifp->if_softc;
3878 	int ret;
3879 
3880 	WM_BOTH_LOCK(sc);
3881 	ret = wm_init_locked(ifp);
3882 	WM_BOTH_UNLOCK(sc);
3883 
3884 	return ret;
3885 }
3886 
3887 static int
3888 wm_init_locked(struct ifnet *ifp)
3889 {
3890 	struct wm_softc *sc = ifp->if_softc;
3891 	struct wm_rxsoft *rxs;
3892 	int i, j, trynum, error = 0;
3893 	uint32_t reg;
3894 
3895 	KASSERT(WM_BOTH_LOCKED(sc));
3896 	/*
3897 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
3898 	 * There is a small but measurable benefit to avoiding the adjusment
3899 	 * of the descriptor so that the headers are aligned, for normal mtu,
3900 	 * on such platforms.  One possibility is that the DMA itself is
3901 	 * slightly more efficient if the front of the entire packet (instead
3902 	 * of the front of the headers) is aligned.
3903 	 *
3904 	 * Note we must always set align_tweak to 0 if we are using
3905 	 * jumbo frames.
3906 	 */
3907 #ifdef __NO_STRICT_ALIGNMENT
3908 	sc->sc_align_tweak = 0;
3909 #else
3910 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
3911 		sc->sc_align_tweak = 0;
3912 	else
3913 		sc->sc_align_tweak = 2;
3914 #endif /* __NO_STRICT_ALIGNMENT */
3915 
3916 	/* Cancel any pending I/O. */
3917 	wm_stop_locked(ifp, 0);
3918 
3919 	/* update statistics before reset */
3920 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3921 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
3922 
3923 	/* Reset the chip to a known state. */
3924 	wm_reset(sc);
3925 
3926 	switch (sc->sc_type) {
3927 	case WM_T_82571:
3928 	case WM_T_82572:
3929 	case WM_T_82573:
3930 	case WM_T_82574:
3931 	case WM_T_82583:
3932 	case WM_T_80003:
3933 	case WM_T_ICH8:
3934 	case WM_T_ICH9:
3935 	case WM_T_ICH10:
3936 	case WM_T_PCH:
3937 	case WM_T_PCH2:
3938 	case WM_T_PCH_LPT:
3939 		if (wm_check_mng_mode(sc) != 0)
3940 			wm_get_hw_control(sc);
3941 		break;
3942 	default:
3943 		break;
3944 	}
3945 
3946 	/* Init hardware bits */
3947 	wm_initialize_hardware_bits(sc);
3948 
3949 	/* Reset the PHY. */
3950 	if (sc->sc_flags & WM_F_HAS_MII)
3951 		wm_gmii_reset(sc);
3952 
3953 	/* Initialize the transmit descriptor ring. */
3954 	memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
3955 	WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
3956 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3957 	sc->sc_txfree = WM_NTXDESC(sc);
3958 	sc->sc_txnext = 0;
3959 
3960 	if (sc->sc_type < WM_T_82543) {
3961 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(sc, 0));
3962 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(sc, 0));
3963 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
3964 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
3965 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
3966 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
3967 	} else {
3968 		CSR_WRITE(sc, WMREG_TDBAH, WM_CDTXADDR_HI(sc, 0));
3969 		CSR_WRITE(sc, WMREG_TDBAL, WM_CDTXADDR_LO(sc, 0));
3970 		CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
3971 		CSR_WRITE(sc, WMREG_TDH, 0);
3972 		CSR_WRITE(sc, WMREG_TIDV, 375);		/* ITR / 4 */
3973 		CSR_WRITE(sc, WMREG_TADV, 375);		/* should be same */
3974 
3975 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
3976 			/*
3977 			 * Don't write TDT before TCTL.EN is set.
3978 			 * See the document.
3979 			 */
3980 			CSR_WRITE(sc, WMREG_TXDCTL(0), TXDCTL_QUEUE_ENABLE
3981 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
3982 			    | TXDCTL_WTHRESH(0));
3983 		else {
3984 			CSR_WRITE(sc, WMREG_TDT, 0);
3985 			CSR_WRITE(sc, WMREG_TXDCTL(0), TXDCTL_PTHRESH(0) |
3986 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
3987 			CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
3988 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
3989 		}
3990 	}
3991 	CSR_WRITE(sc, WMREG_TQSA_LO, 0);
3992 	CSR_WRITE(sc, WMREG_TQSA_HI, 0);
3993 
3994 	/* Initialize the transmit job descriptors. */
3995 	for (i = 0; i < WM_TXQUEUELEN(sc); i++)
3996 		sc->sc_txsoft[i].txs_mbuf = NULL;
3997 	sc->sc_txsfree = WM_TXQUEUELEN(sc);
3998 	sc->sc_txsnext = 0;
3999 	sc->sc_txsdirty = 0;
4000 
4001 	/*
4002 	 * Initialize the receive descriptor and receive job
4003 	 * descriptor rings.
4004 	 */
4005 	if (sc->sc_type < WM_T_82543) {
4006 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
4007 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
4008 		CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
4009 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
4010 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
4011 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
4012 
4013 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
4014 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
4015 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
4016 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
4017 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
4018 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
4019 	} else {
4020 		CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
4021 		CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
4022 		CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
4023 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4024 			CSR_WRITE(sc, WMREG_EITR(0), 450);
4025 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
4026 				panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
4027 			CSR_WRITE(sc, WMREG_SRRCTL, SRRCTL_DESCTYPE_LEGACY
4028 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
4029 			CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_QUEUE_ENABLE
4030 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
4031 			    | RXDCTL_WTHRESH(1));
4032 		} else {
4033 			CSR_WRITE(sc, WMREG_RDH, 0);
4034 			CSR_WRITE(sc, WMREG_RDT, 0);
4035 			CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD); /* ITR/4 */
4036 			CSR_WRITE(sc, WMREG_RADV, 375);	/* MUST be same */
4037 		}
4038 	}
4039 	for (i = 0; i < WM_NRXDESC; i++) {
4040 		rxs = &sc->sc_rxsoft[i];
4041 		if (rxs->rxs_mbuf == NULL) {
4042 			if ((error = wm_add_rxbuf(sc, i)) != 0) {
4043 				log(LOG_ERR, "%s: unable to allocate or map "
4044 				    "rx buffer %d, error = %d\n",
4045 				    device_xname(sc->sc_dev), i, error);
4046 				/*
4047 				 * XXX Should attempt to run with fewer receive
4048 				 * XXX buffers instead of just failing.
4049 				 */
4050 				wm_rxdrain(sc);
4051 				goto out;
4052 			}
4053 		} else {
4054 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
4055 				WM_INIT_RXDESC(sc, i);
4056 			/*
4057 			 * For 82575 and newer device, the RX descriptors
4058 			 * must be initialized after the setting of RCTL.EN in
4059 			 * wm_set_filter()
4060 			 */
4061 		}
4062 	}
4063 	sc->sc_rxptr = 0;
4064 	sc->sc_rxdiscard = 0;
4065 	WM_RXCHAIN_RESET(sc);
4066 
4067 	/*
4068 	 * Clear out the VLAN table -- we don't use it (yet).
4069 	 */
4070 	CSR_WRITE(sc, WMREG_VET, 0);
4071 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
4072 		trynum = 10; /* Due to hw errata */
4073 	else
4074 		trynum = 1;
4075 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
4076 		for (j = 0; j < trynum; j++)
4077 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
4078 
4079 	/*
4080 	 * Set up flow-control parameters.
4081 	 *
4082 	 * XXX Values could probably stand some tuning.
4083 	 */
4084 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
4085 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
4086 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)) {
4087 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
4088 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
4089 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
4090 	}
4091 
4092 	sc->sc_fcrtl = FCRTL_DFLT;
4093 	if (sc->sc_type < WM_T_82543) {
4094 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
4095 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
4096 	} else {
4097 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
4098 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
4099 	}
4100 
4101 	if (sc->sc_type == WM_T_80003)
4102 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
4103 	else
4104 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
4105 
4106 	/* Writes the control register. */
4107 	wm_set_vlan(sc);
4108 
4109 	if (sc->sc_flags & WM_F_HAS_MII) {
4110 		int val;
4111 
4112 		switch (sc->sc_type) {
4113 		case WM_T_80003:
4114 		case WM_T_ICH8:
4115 		case WM_T_ICH9:
4116 		case WM_T_ICH10:
4117 		case WM_T_PCH:
4118 		case WM_T_PCH2:
4119 		case WM_T_PCH_LPT:
4120 			/*
4121 			 * Set the mac to wait the maximum time between each
4122 			 * iteration and increase the max iterations when
4123 			 * polling the phy; this fixes erroneous timeouts at
4124 			 * 10Mbps.
4125 			 */
4126 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
4127 			    0xFFFF);
4128 			val = wm_kmrn_readreg(sc,
4129 			    KUMCTRLSTA_OFFSET_INB_PARAM);
4130 			val |= 0x3F;
4131 			wm_kmrn_writereg(sc,
4132 			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
4133 			break;
4134 		default:
4135 			break;
4136 		}
4137 
4138 		if (sc->sc_type == WM_T_80003) {
4139 			val = CSR_READ(sc, WMREG_CTRL_EXT);
4140 			val &= ~CTRL_EXT_LINK_MODE_MASK;
4141 			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
4142 
4143 			/* Bypass RX and TX FIFO's */
4144 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
4145 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
4146 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
4147 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
4148 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
4149 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
4150 		}
4151 	}
4152 #if 0
4153 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
4154 #endif
4155 
4156 	/* Set up checksum offload parameters. */
4157 	reg = CSR_READ(sc, WMREG_RXCSUM);
4158 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
4159 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
4160 		reg |= RXCSUM_IPOFL;
4161 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
4162 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
4163 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
4164 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
4165 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
4166 
4167 	/* Set up the interrupt registers. */
4168 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4169 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
4170 	    ICR_RXO | ICR_RXT0;
4171 	CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
4172 
4173 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4174 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4175 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
4176 		reg = CSR_READ(sc, WMREG_KABGTXD);
4177 		reg |= KABGTXD_BGSQLBIAS;
4178 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
4179 	}
4180 
4181 	/* Set up the inter-packet gap. */
4182 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
4183 
4184 	if (sc->sc_type >= WM_T_82543) {
4185 		/*
4186 		 * Set up the interrupt throttling register (units of 256ns)
4187 		 * Note that a footnote in Intel's documentation says this
4188 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
4189 		 * or 10Mbit mode.  Empirically, it appears to be the case
4190 		 * that that is also true for the 1024ns units of the other
4191 		 * interrupt-related timer registers -- so, really, we ought
4192 		 * to divide this value by 4 when the link speed is low.
4193 		 *
4194 		 * XXX implement this division at link speed change!
4195 		 */
4196 
4197 		/*
4198 		 * For N interrupts/sec, set this value to:
4199 		 * 1000000000 / (N * 256).  Note that we set the
4200 		 * absolute and packet timer values to this value
4201 		 * divided by 4 to get "simple timer" behavior.
4202 		 */
4203 
4204 		sc->sc_itr = 1500;		/* 2604 ints/sec */
4205 		CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
4206 	}
4207 
4208 	/* Set the VLAN ethernetype. */
4209 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
4210 
4211 	/*
4212 	 * Set up the transmit control register; we start out with
4213 	 * a collision distance suitable for FDX, but update it whe
4214 	 * we resolve the media type.
4215 	 */
4216 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
4217 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
4218 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4219 	if (sc->sc_type >= WM_T_82571)
4220 		sc->sc_tctl |= TCTL_MULR;
4221 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4222 
4223 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4224 		/* Write TDT after TCTL.EN is set. See the document. */
4225 		CSR_WRITE(sc, WMREG_TDT, 0);
4226 	}
4227 
4228 	if (sc->sc_type == WM_T_80003) {
4229 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
4230 		reg &= ~TCTL_EXT_GCEX_MASK;
4231 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
4232 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
4233 	}
4234 
4235 	/* Set the media. */
4236 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
4237 		goto out;
4238 
4239 	/* Configure for OS presence */
4240 	wm_init_manageability(sc);
4241 
4242 	/*
4243 	 * Set up the receive control register; we actually program
4244 	 * the register when we set the receive filter.  Use multicast
4245 	 * address offset type 0.
4246 	 *
4247 	 * Only the i82544 has the ability to strip the incoming
4248 	 * CRC, so we don't enable that feature.
4249 	 */
4250 	sc->sc_mchash_type = 0;
4251 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
4252 	    | RCTL_MO(sc->sc_mchash_type);
4253 
4254 	/*
4255 	 * The I350 has a bug where it always strips the CRC whether
4256 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
4257 	 */
4258 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
4259 	    || (sc->sc_type == WM_T_I210))
4260 		sc->sc_rctl |= RCTL_SECRC;
4261 
4262 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
4263 	    && (ifp->if_mtu > ETHERMTU)) {
4264 		sc->sc_rctl |= RCTL_LPE;
4265 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4266 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
4267 	}
4268 
4269 	if (MCLBYTES == 2048) {
4270 		sc->sc_rctl |= RCTL_2k;
4271 	} else {
4272 		if (sc->sc_type >= WM_T_82543) {
4273 			switch (MCLBYTES) {
4274 			case 4096:
4275 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
4276 				break;
4277 			case 8192:
4278 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
4279 				break;
4280 			case 16384:
4281 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
4282 				break;
4283 			default:
4284 				panic("wm_init: MCLBYTES %d unsupported",
4285 				    MCLBYTES);
4286 				break;
4287 			}
4288 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
4289 	}
4290 
4291 	/* Set the receive filter. */
4292 	wm_set_filter(sc);
4293 
4294 	/* Enable ECC */
4295 	switch (sc->sc_type) {
4296 	case WM_T_82571:
4297 		reg = CSR_READ(sc, WMREG_PBA_ECC);
4298 		reg |= PBA_ECC_CORR_EN;
4299 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
4300 		break;
4301 	case WM_T_PCH_LPT:
4302 		reg = CSR_READ(sc, WMREG_PBECCSTS);
4303 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
4304 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
4305 
4306 		reg = CSR_READ(sc, WMREG_CTRL);
4307 		reg |= CTRL_MEHE;
4308 		CSR_WRITE(sc, WMREG_CTRL, reg);
4309 		break;
4310 	default:
4311 		break;
4312 	}
4313 
4314 	/* On 575 and later set RDT only if RX enabled */
4315 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4316 		for (i = 0; i < WM_NRXDESC; i++)
4317 			WM_INIT_RXDESC(sc, i);
4318 
4319 	sc->sc_stopping = false;
4320 
4321 	/* Start the one second link check clock. */
4322 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
4323 
4324 	/* ...all done! */
4325 	ifp->if_flags |= IFF_RUNNING;
4326 	ifp->if_flags &= ~IFF_OACTIVE;
4327 
4328  out:
4329 	sc->sc_if_flags = ifp->if_flags;
4330 	if (error)
4331 		log(LOG_ERR, "%s: interface not running\n",
4332 		    device_xname(sc->sc_dev));
4333 	return error;
4334 }
4335 
4336 /*
4337  * wm_stop:		[ifnet interface function]
4338  *
4339  *	Stop transmission on the interface.
4340  */
4341 static void
4342 wm_stop(struct ifnet *ifp, int disable)
4343 {
4344 	struct wm_softc *sc = ifp->if_softc;
4345 
4346 	WM_BOTH_LOCK(sc);
4347 	wm_stop_locked(ifp, disable);
4348 	WM_BOTH_UNLOCK(sc);
4349 }
4350 
4351 static void
4352 wm_stop_locked(struct ifnet *ifp, int disable)
4353 {
4354 	struct wm_softc *sc = ifp->if_softc;
4355 	struct wm_txsoft *txs;
4356 	int i;
4357 
4358 	KASSERT(WM_BOTH_LOCKED(sc));
4359 
4360 	sc->sc_stopping = true;
4361 
4362 	/* Stop the one second clock. */
4363 	callout_stop(&sc->sc_tick_ch);
4364 
4365 	/* Stop the 82547 Tx FIFO stall check timer. */
4366 	if (sc->sc_type == WM_T_82547)
4367 		callout_stop(&sc->sc_txfifo_ch);
4368 
4369 	if (sc->sc_flags & WM_F_HAS_MII) {
4370 		/* Down the MII. */
4371 		mii_down(&sc->sc_mii);
4372 	} else {
4373 #if 0
4374 		/* Should we clear PHY's status properly? */
4375 		wm_reset(sc);
4376 #endif
4377 	}
4378 
4379 	/* Stop the transmit and receive processes. */
4380 	CSR_WRITE(sc, WMREG_TCTL, 0);
4381 	CSR_WRITE(sc, WMREG_RCTL, 0);
4382 	sc->sc_rctl &= ~RCTL_EN;
4383 
4384 	/*
4385 	 * Clear the interrupt mask to ensure the device cannot assert its
4386 	 * interrupt line.
4387 	 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
4388 	 * any currently pending or shared interrupt.
4389 	 */
4390 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4391 	sc->sc_icr = 0;
4392 
4393 	/* Release any queued transmit buffers. */
4394 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
4395 		txs = &sc->sc_txsoft[i];
4396 		if (txs->txs_mbuf != NULL) {
4397 			bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
4398 			m_freem(txs->txs_mbuf);
4399 			txs->txs_mbuf = NULL;
4400 		}
4401 	}
4402 
4403 	/* Mark the interface as down and cancel the watchdog timer. */
4404 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
4405 	ifp->if_timer = 0;
4406 
4407 	if (disable)
4408 		wm_rxdrain(sc);
4409 
4410 #if 0 /* notyet */
4411 	if (sc->sc_type >= WM_T_82544)
4412 		CSR_WRITE(sc, WMREG_WUC, 0);
4413 #endif
4414 }
4415 
4416 /*
4417  * wm_tx_offload:
4418  *
4419  *	Set up TCP/IP checksumming parameters for the
4420  *	specified packet.
4421  */
4422 static int
4423 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
4424     uint8_t *fieldsp)
4425 {
4426 	struct mbuf *m0 = txs->txs_mbuf;
4427 	struct livengood_tcpip_ctxdesc *t;
4428 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
4429 	uint32_t ipcse;
4430 	struct ether_header *eh;
4431 	int offset, iphl;
4432 	uint8_t fields;
4433 
4434 	/*
4435 	 * XXX It would be nice if the mbuf pkthdr had offset
4436 	 * fields for the protocol headers.
4437 	 */
4438 
4439 	eh = mtod(m0, struct ether_header *);
4440 	switch (htons(eh->ether_type)) {
4441 	case ETHERTYPE_IP:
4442 	case ETHERTYPE_IPV6:
4443 		offset = ETHER_HDR_LEN;
4444 		break;
4445 
4446 	case ETHERTYPE_VLAN:
4447 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
4448 		break;
4449 
4450 	default:
4451 		/*
4452 		 * Don't support this protocol or encapsulation.
4453 		 */
4454 		*fieldsp = 0;
4455 		*cmdp = 0;
4456 		return 0;
4457 	}
4458 
4459 	if ((m0->m_pkthdr.csum_flags &
4460 	    (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
4461 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
4462 	} else {
4463 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
4464 	}
4465 	ipcse = offset + iphl - 1;
4466 
4467 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
4468 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
4469 	seg = 0;
4470 	fields = 0;
4471 
4472 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
4473 		int hlen = offset + iphl;
4474 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
4475 
4476 		if (__predict_false(m0->m_len <
4477 				    (hlen + sizeof(struct tcphdr)))) {
4478 			/*
4479 			 * TCP/IP headers are not in the first mbuf; we need
4480 			 * to do this the slow and painful way.  Let's just
4481 			 * hope this doesn't happen very often.
4482 			 */
4483 			struct tcphdr th;
4484 
4485 			WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
4486 
4487 			m_copydata(m0, hlen, sizeof(th), &th);
4488 			if (v4) {
4489 				struct ip ip;
4490 
4491 				m_copydata(m0, offset, sizeof(ip), &ip);
4492 				ip.ip_len = 0;
4493 				m_copyback(m0,
4494 				    offset + offsetof(struct ip, ip_len),
4495 				    sizeof(ip.ip_len), &ip.ip_len);
4496 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
4497 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
4498 			} else {
4499 				struct ip6_hdr ip6;
4500 
4501 				m_copydata(m0, offset, sizeof(ip6), &ip6);
4502 				ip6.ip6_plen = 0;
4503 				m_copyback(m0,
4504 				    offset + offsetof(struct ip6_hdr, ip6_plen),
4505 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
4506 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
4507 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
4508 			}
4509 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
4510 			    sizeof(th.th_sum), &th.th_sum);
4511 
4512 			hlen += th.th_off << 2;
4513 		} else {
4514 			/*
4515 			 * TCP/IP headers are in the first mbuf; we can do
4516 			 * this the easy way.
4517 			 */
4518 			struct tcphdr *th;
4519 
4520 			if (v4) {
4521 				struct ip *ip =
4522 				    (void *)(mtod(m0, char *) + offset);
4523 				th = (void *)(mtod(m0, char *) + hlen);
4524 
4525 				ip->ip_len = 0;
4526 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
4527 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
4528 			} else {
4529 				struct ip6_hdr *ip6 =
4530 				    (void *)(mtod(m0, char *) + offset);
4531 				th = (void *)(mtod(m0, char *) + hlen);
4532 
4533 				ip6->ip6_plen = 0;
4534 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
4535 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
4536 			}
4537 			hlen += th->th_off << 2;
4538 		}
4539 
4540 		if (v4) {
4541 			WM_EVCNT_INCR(&sc->sc_ev_txtso);
4542 			cmdlen |= WTX_TCPIP_CMD_IP;
4543 		} else {
4544 			WM_EVCNT_INCR(&sc->sc_ev_txtso6);
4545 			ipcse = 0;
4546 		}
4547 		cmd |= WTX_TCPIP_CMD_TSE;
4548 		cmdlen |= WTX_TCPIP_CMD_TSE |
4549 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
4550 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
4551 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
4552 	}
4553 
4554 	/*
4555 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
4556 	 * offload feature, if we load the context descriptor, we
4557 	 * MUST provide valid values for IPCSS and TUCSS fields.
4558 	 */
4559 
4560 	ipcs = WTX_TCPIP_IPCSS(offset) |
4561 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
4562 	    WTX_TCPIP_IPCSE(ipcse);
4563 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
4564 		WM_EVCNT_INCR(&sc->sc_ev_txipsum);
4565 		fields |= WTX_IXSM;
4566 	}
4567 
4568 	offset += iphl;
4569 
4570 	if (m0->m_pkthdr.csum_flags &
4571 	    (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
4572 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
4573 		fields |= WTX_TXSM;
4574 		tucs = WTX_TCPIP_TUCSS(offset) |
4575 		    WTX_TCPIP_TUCSO(offset +
4576 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
4577 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
4578 	} else if ((m0->m_pkthdr.csum_flags &
4579 	    (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
4580 		WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
4581 		fields |= WTX_TXSM;
4582 		tucs = WTX_TCPIP_TUCSS(offset) |
4583 		    WTX_TCPIP_TUCSO(offset +
4584 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
4585 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
4586 	} else {
4587 		/* Just initialize it to a valid TCP context. */
4588 		tucs = WTX_TCPIP_TUCSS(offset) |
4589 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
4590 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
4591 	}
4592 
4593 	/* Fill in the context descriptor. */
4594 	t = (struct livengood_tcpip_ctxdesc *)
4595 	    &sc->sc_txdescs[sc->sc_txnext];
4596 	t->tcpip_ipcs = htole32(ipcs);
4597 	t->tcpip_tucs = htole32(tucs);
4598 	t->tcpip_cmdlen = htole32(cmdlen);
4599 	t->tcpip_seg = htole32(seg);
4600 	WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
4601 
4602 	sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
4603 	txs->txs_ndesc++;
4604 
4605 	*cmdp = cmd;
4606 	*fieldsp = fields;
4607 
4608 	return 0;
4609 }
4610 
4611 static void
4612 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
4613 {
4614 	struct mbuf *m;
4615 	int i;
4616 
4617 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
4618 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
4619 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
4620 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
4621 		    m->m_data, m->m_len, m->m_flags);
4622 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
4623 	    i, i == 1 ? "" : "s");
4624 }
4625 
4626 /*
4627  * wm_82547_txfifo_stall:
4628  *
4629  *	Callout used to wait for the 82547 Tx FIFO to drain,
4630  *	reset the FIFO pointers, and restart packet transmission.
4631  */
4632 static void
4633 wm_82547_txfifo_stall(void *arg)
4634 {
4635 	struct wm_softc *sc = arg;
4636 #ifndef WM_MPSAFE
4637 	int s;
4638 
4639 	s = splnet();
4640 #endif
4641 	WM_TX_LOCK(sc);
4642 
4643 	if (sc->sc_stopping)
4644 		goto out;
4645 
4646 	if (sc->sc_txfifo_stall) {
4647 		if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
4648 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
4649 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
4650 			/*
4651 			 * Packets have drained.  Stop transmitter, reset
4652 			 * FIFO pointers, restart transmitter, and kick
4653 			 * the packet queue.
4654 			 */
4655 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
4656 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
4657 			CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
4658 			CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
4659 			CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
4660 			CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
4661 			CSR_WRITE(sc, WMREG_TCTL, tctl);
4662 			CSR_WRITE_FLUSH(sc);
4663 
4664 			sc->sc_txfifo_head = 0;
4665 			sc->sc_txfifo_stall = 0;
4666 			wm_start_locked(&sc->sc_ethercom.ec_if);
4667 		} else {
4668 			/*
4669 			 * Still waiting for packets to drain; try again in
4670 			 * another tick.
4671 			 */
4672 			callout_schedule(&sc->sc_txfifo_ch, 1);
4673 		}
4674 	}
4675 
4676 out:
4677 	WM_TX_UNLOCK(sc);
4678 #ifndef WM_MPSAFE
4679 	splx(s);
4680 #endif
4681 }
4682 
4683 /*
4684  * wm_82547_txfifo_bugchk:
4685  *
4686  *	Check for bug condition in the 82547 Tx FIFO.  We need to
4687  *	prevent enqueueing a packet that would wrap around the end
4688  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
4689  *
4690  *	We do this by checking the amount of space before the end
4691  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
4692  *	the Tx FIFO, wait for all remaining packets to drain, reset
4693  *	the internal FIFO pointers to the beginning, and restart
4694  *	transmission on the interface.
4695  */
4696 #define	WM_FIFO_HDR		0x10
4697 #define	WM_82547_PAD_LEN	0x3e0
4698 static int
4699 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
4700 {
4701 	int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
4702 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
4703 
4704 	/* Just return if already stalled. */
4705 	if (sc->sc_txfifo_stall)
4706 		return 1;
4707 
4708 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
4709 		/* Stall only occurs in half-duplex mode. */
4710 		goto send_packet;
4711 	}
4712 
4713 	if (len >= WM_82547_PAD_LEN + space) {
4714 		sc->sc_txfifo_stall = 1;
4715 		callout_schedule(&sc->sc_txfifo_ch, 1);
4716 		return 1;
4717 	}
4718 
4719  send_packet:
4720 	sc->sc_txfifo_head += len;
4721 	if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
4722 		sc->sc_txfifo_head -= sc->sc_txfifo_size;
4723 
4724 	return 0;
4725 }
4726 
4727 /*
4728  * wm_start:		[ifnet interface function]
4729  *
4730  *	Start packet transmission on the interface.
4731  */
4732 static void
4733 wm_start(struct ifnet *ifp)
4734 {
4735 	struct wm_softc *sc = ifp->if_softc;
4736 
4737 	WM_TX_LOCK(sc);
4738 	if (!sc->sc_stopping)
4739 		wm_start_locked(ifp);
4740 	WM_TX_UNLOCK(sc);
4741 }
4742 
4743 static void
4744 wm_start_locked(struct ifnet *ifp)
4745 {
4746 	struct wm_softc *sc = ifp->if_softc;
4747 	struct mbuf *m0;
4748 	struct m_tag *mtag;
4749 	struct wm_txsoft *txs;
4750 	bus_dmamap_t dmamap;
4751 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
4752 	bus_addr_t curaddr;
4753 	bus_size_t seglen, curlen;
4754 	uint32_t cksumcmd;
4755 	uint8_t cksumfields;
4756 
4757 	KASSERT(WM_TX_LOCKED(sc));
4758 
4759 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
4760 		return;
4761 
4762 	/* Remember the previous number of free descriptors. */
4763 	ofree = sc->sc_txfree;
4764 
4765 	/*
4766 	 * Loop through the send queue, setting up transmit descriptors
4767 	 * until we drain the queue, or use up all available transmit
4768 	 * descriptors.
4769 	 */
4770 	for (;;) {
4771 		m0 = NULL;
4772 
4773 		/* Get a work queue entry. */
4774 		if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
4775 			wm_txintr(sc);
4776 			if (sc->sc_txsfree == 0) {
4777 				DPRINTF(WM_DEBUG_TX,
4778 				    ("%s: TX: no free job descriptors\n",
4779 					device_xname(sc->sc_dev)));
4780 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
4781 				break;
4782 			}
4783 		}
4784 
4785 		/* Grab a packet off the queue. */
4786 		IFQ_DEQUEUE(&ifp->if_snd, m0);
4787 		if (m0 == NULL)
4788 			break;
4789 
4790 		DPRINTF(WM_DEBUG_TX,
4791 		    ("%s: TX: have packet to transmit: %p\n",
4792 		    device_xname(sc->sc_dev), m0));
4793 
4794 		txs = &sc->sc_txsoft[sc->sc_txsnext];
4795 		dmamap = txs->txs_dmamap;
4796 
4797 		use_tso = (m0->m_pkthdr.csum_flags &
4798 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
4799 
4800 		/*
4801 		 * So says the Linux driver:
4802 		 * The controller does a simple calculation to make sure
4803 		 * there is enough room in the FIFO before initiating the
4804 		 * DMA for each buffer.  The calc is:
4805 		 *	4 = ceil(buffer len / MSS)
4806 		 * To make sure we don't overrun the FIFO, adjust the max
4807 		 * buffer len if the MSS drops.
4808 		 */
4809 		dmamap->dm_maxsegsz =
4810 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
4811 		    ? m0->m_pkthdr.segsz << 2
4812 		    : WTX_MAX_LEN;
4813 
4814 		/*
4815 		 * Load the DMA map.  If this fails, the packet either
4816 		 * didn't fit in the allotted number of segments, or we
4817 		 * were short on resources.  For the too-many-segments
4818 		 * case, we simply report an error and drop the packet,
4819 		 * since we can't sanely copy a jumbo packet to a single
4820 		 * buffer.
4821 		 */
4822 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
4823 		    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
4824 		if (error) {
4825 			if (error == EFBIG) {
4826 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
4827 				log(LOG_ERR, "%s: Tx packet consumes too many "
4828 				    "DMA segments, dropping...\n",
4829 				    device_xname(sc->sc_dev));
4830 				wm_dump_mbuf_chain(sc, m0);
4831 				m_freem(m0);
4832 				continue;
4833 			}
4834 			/*  Short on resources, just stop for now. */
4835 			DPRINTF(WM_DEBUG_TX,
4836 			    ("%s: TX: dmamap load failed: %d\n",
4837 			    device_xname(sc->sc_dev), error));
4838 			break;
4839 		}
4840 
4841 		segs_needed = dmamap->dm_nsegs;
4842 		if (use_tso) {
4843 			/* For sentinel descriptor; see below. */
4844 			segs_needed++;
4845 		}
4846 
4847 		/*
4848 		 * Ensure we have enough descriptors free to describe
4849 		 * the packet.  Note, we always reserve one descriptor
4850 		 * at the end of the ring due to the semantics of the
4851 		 * TDT register, plus one more in the event we need
4852 		 * to load offload context.
4853 		 */
4854 		if (segs_needed > sc->sc_txfree - 2) {
4855 			/*
4856 			 * Not enough free descriptors to transmit this
4857 			 * packet.  We haven't committed anything yet,
4858 			 * so just unload the DMA map, put the packet
4859 			 * pack on the queue, and punt.  Notify the upper
4860 			 * layer that there are no more slots left.
4861 			 */
4862 			DPRINTF(WM_DEBUG_TX,
4863 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
4864 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
4865 			    segs_needed, sc->sc_txfree - 1));
4866 			ifp->if_flags |= IFF_OACTIVE;
4867 			bus_dmamap_unload(sc->sc_dmat, dmamap);
4868 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
4869 			break;
4870 		}
4871 
4872 		/*
4873 		 * Check for 82547 Tx FIFO bug.  We need to do this
4874 		 * once we know we can transmit the packet, since we
4875 		 * do some internal FIFO space accounting here.
4876 		 */
4877 		if (sc->sc_type == WM_T_82547 &&
4878 		    wm_82547_txfifo_bugchk(sc, m0)) {
4879 			DPRINTF(WM_DEBUG_TX,
4880 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
4881 			    device_xname(sc->sc_dev)));
4882 			ifp->if_flags |= IFF_OACTIVE;
4883 			bus_dmamap_unload(sc->sc_dmat, dmamap);
4884 			WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
4885 			break;
4886 		}
4887 
4888 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
4889 
4890 		DPRINTF(WM_DEBUG_TX,
4891 		    ("%s: TX: packet has %d (%d) DMA segments\n",
4892 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
4893 
4894 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
4895 
4896 		/*
4897 		 * Store a pointer to the packet so that we can free it
4898 		 * later.
4899 		 *
4900 		 * Initially, we consider the number of descriptors the
4901 		 * packet uses the number of DMA segments.  This may be
4902 		 * incremented by 1 if we do checksum offload (a descriptor
4903 		 * is used to set the checksum context).
4904 		 */
4905 		txs->txs_mbuf = m0;
4906 		txs->txs_firstdesc = sc->sc_txnext;
4907 		txs->txs_ndesc = segs_needed;
4908 
4909 		/* Set up offload parameters for this packet. */
4910 		if (m0->m_pkthdr.csum_flags &
4911 		    (M_CSUM_TSOv4|M_CSUM_TSOv6|
4912 		    M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
4913 		    M_CSUM_TCPv6|M_CSUM_UDPv6)) {
4914 			if (wm_tx_offload(sc, txs, &cksumcmd,
4915 					  &cksumfields) != 0) {
4916 				/* Error message already displayed. */
4917 				bus_dmamap_unload(sc->sc_dmat, dmamap);
4918 				continue;
4919 			}
4920 		} else {
4921 			cksumcmd = 0;
4922 			cksumfields = 0;
4923 		}
4924 
4925 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
4926 
4927 		/* Sync the DMA map. */
4928 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
4929 		    BUS_DMASYNC_PREWRITE);
4930 
4931 		/* Initialize the transmit descriptor. */
4932 		for (nexttx = sc->sc_txnext, seg = 0;
4933 		     seg < dmamap->dm_nsegs; seg++) {
4934 			for (seglen = dmamap->dm_segs[seg].ds_len,
4935 			     curaddr = dmamap->dm_segs[seg].ds_addr;
4936 			     seglen != 0;
4937 			     curaddr += curlen, seglen -= curlen,
4938 			     nexttx = WM_NEXTTX(sc, nexttx)) {
4939 				curlen = seglen;
4940 
4941 				/*
4942 				 * So says the Linux driver:
4943 				 * Work around for premature descriptor
4944 				 * write-backs in TSO mode.  Append a
4945 				 * 4-byte sentinel descriptor.
4946 				 */
4947 				if (use_tso &&
4948 				    seg == dmamap->dm_nsegs - 1 &&
4949 				    curlen > 8)
4950 					curlen -= 4;
4951 
4952 				wm_set_dma_addr(
4953 				    &sc->sc_txdescs[nexttx].wtx_addr,
4954 				    curaddr);
4955 				sc->sc_txdescs[nexttx].wtx_cmdlen =
4956 				    htole32(cksumcmd | curlen);
4957 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
4958 				    0;
4959 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
4960 				    cksumfields;
4961 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
4962 				lasttx = nexttx;
4963 
4964 				DPRINTF(WM_DEBUG_TX,
4965 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
4966 				     "len %#04zx\n",
4967 				    device_xname(sc->sc_dev), nexttx,
4968 				    (uint64_t)curaddr, curlen));
4969 			}
4970 		}
4971 
4972 		KASSERT(lasttx != -1);
4973 
4974 		/*
4975 		 * Set up the command byte on the last descriptor of
4976 		 * the packet.  If we're in the interrupt delay window,
4977 		 * delay the interrupt.
4978 		 */
4979 		sc->sc_txdescs[lasttx].wtx_cmdlen |=
4980 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
4981 
4982 		/*
4983 		 * If VLANs are enabled and the packet has a VLAN tag, set
4984 		 * up the descriptor to encapsulate the packet for us.
4985 		 *
4986 		 * This is only valid on the last descriptor of the packet.
4987 		 */
4988 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
4989 			sc->sc_txdescs[lasttx].wtx_cmdlen |=
4990 			    htole32(WTX_CMD_VLE);
4991 			sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
4992 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
4993 		}
4994 
4995 		txs->txs_lastdesc = lasttx;
4996 
4997 		DPRINTF(WM_DEBUG_TX,
4998 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
4999 		    device_xname(sc->sc_dev),
5000 		    lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
5001 
5002 		/* Sync the descriptors we're using. */
5003 		WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
5004 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
5005 
5006 		/* Give the packet to the chip. */
5007 		CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
5008 
5009 		DPRINTF(WM_DEBUG_TX,
5010 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
5011 
5012 		DPRINTF(WM_DEBUG_TX,
5013 		    ("%s: TX: finished transmitting packet, job %d\n",
5014 		    device_xname(sc->sc_dev), sc->sc_txsnext));
5015 
5016 		/* Advance the tx pointer. */
5017 		sc->sc_txfree -= txs->txs_ndesc;
5018 		sc->sc_txnext = nexttx;
5019 
5020 		sc->sc_txsfree--;
5021 		sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
5022 
5023 		/* Pass the packet to any BPF listeners. */
5024 		bpf_mtap(ifp, m0);
5025 	}
5026 
5027 	if (m0 != NULL) {
5028 		ifp->if_flags |= IFF_OACTIVE;
5029 		WM_EVCNT_INCR(&sc->sc_ev_txdrop);
5030 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
5031 		m_freem(m0);
5032 	}
5033 
5034 	if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
5035 		/* No more slots; notify upper layer. */
5036 		ifp->if_flags |= IFF_OACTIVE;
5037 	}
5038 
5039 	if (sc->sc_txfree != ofree) {
5040 		/* Set a watchdog timer in case the chip flakes out. */
5041 		ifp->if_timer = 5;
5042 	}
5043 }
5044 
5045 /*
5046  * wm_nq_tx_offload:
5047  *
5048  *	Set up TCP/IP checksumming parameters for the
5049  *	specified packet, for NEWQUEUE devices
5050  */
5051 static int
5052 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs,
5053     uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
5054 {
5055 	struct mbuf *m0 = txs->txs_mbuf;
5056 	struct m_tag *mtag;
5057 	uint32_t vl_len, mssidx, cmdc;
5058 	struct ether_header *eh;
5059 	int offset, iphl;
5060 
5061 	/*
5062 	 * XXX It would be nice if the mbuf pkthdr had offset
5063 	 * fields for the protocol headers.
5064 	 */
5065 	*cmdlenp = 0;
5066 	*fieldsp = 0;
5067 
5068 	eh = mtod(m0, struct ether_header *);
5069 	switch (htons(eh->ether_type)) {
5070 	case ETHERTYPE_IP:
5071 	case ETHERTYPE_IPV6:
5072 		offset = ETHER_HDR_LEN;
5073 		break;
5074 
5075 	case ETHERTYPE_VLAN:
5076 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
5077 		break;
5078 
5079 	default:
5080 		/* Don't support this protocol or encapsulation. */
5081 		*do_csum = false;
5082 		return 0;
5083 	}
5084 	*do_csum = true;
5085 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
5086 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
5087 
5088 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
5089 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
5090 
5091 	if ((m0->m_pkthdr.csum_flags &
5092 	    (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4|M_CSUM_IPv4)) != 0) {
5093 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
5094 	} else {
5095 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
5096 	}
5097 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
5098 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
5099 
5100 	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
5101 		vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
5102 		     << NQTXC_VLLEN_VLAN_SHIFT);
5103 		*cmdlenp |= NQTX_CMD_VLE;
5104 	}
5105 
5106 	mssidx = 0;
5107 
5108 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
5109 		int hlen = offset + iphl;
5110 		int tcp_hlen;
5111 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
5112 
5113 		if (__predict_false(m0->m_len <
5114 				    (hlen + sizeof(struct tcphdr)))) {
5115 			/*
5116 			 * TCP/IP headers are not in the first mbuf; we need
5117 			 * to do this the slow and painful way.  Let's just
5118 			 * hope this doesn't happen very often.
5119 			 */
5120 			struct tcphdr th;
5121 
5122 			WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
5123 
5124 			m_copydata(m0, hlen, sizeof(th), &th);
5125 			if (v4) {
5126 				struct ip ip;
5127 
5128 				m_copydata(m0, offset, sizeof(ip), &ip);
5129 				ip.ip_len = 0;
5130 				m_copyback(m0,
5131 				    offset + offsetof(struct ip, ip_len),
5132 				    sizeof(ip.ip_len), &ip.ip_len);
5133 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
5134 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
5135 			} else {
5136 				struct ip6_hdr ip6;
5137 
5138 				m_copydata(m0, offset, sizeof(ip6), &ip6);
5139 				ip6.ip6_plen = 0;
5140 				m_copyback(m0,
5141 				    offset + offsetof(struct ip6_hdr, ip6_plen),
5142 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
5143 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
5144 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
5145 			}
5146 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
5147 			    sizeof(th.th_sum), &th.th_sum);
5148 
5149 			tcp_hlen = th.th_off << 2;
5150 		} else {
5151 			/*
5152 			 * TCP/IP headers are in the first mbuf; we can do
5153 			 * this the easy way.
5154 			 */
5155 			struct tcphdr *th;
5156 
5157 			if (v4) {
5158 				struct ip *ip =
5159 				    (void *)(mtod(m0, char *) + offset);
5160 				th = (void *)(mtod(m0, char *) + hlen);
5161 
5162 				ip->ip_len = 0;
5163 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
5164 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
5165 			} else {
5166 				struct ip6_hdr *ip6 =
5167 				    (void *)(mtod(m0, char *) + offset);
5168 				th = (void *)(mtod(m0, char *) + hlen);
5169 
5170 				ip6->ip6_plen = 0;
5171 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
5172 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
5173 			}
5174 			tcp_hlen = th->th_off << 2;
5175 		}
5176 		hlen += tcp_hlen;
5177 		*cmdlenp |= NQTX_CMD_TSE;
5178 
5179 		if (v4) {
5180 			WM_EVCNT_INCR(&sc->sc_ev_txtso);
5181 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
5182 		} else {
5183 			WM_EVCNT_INCR(&sc->sc_ev_txtso6);
5184 			*fieldsp |= NQTXD_FIELDS_TUXSM;
5185 		}
5186 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
5187 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
5188 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
5189 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
5190 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
5191 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
5192 	} else {
5193 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
5194 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
5195 	}
5196 
5197 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
5198 		*fieldsp |= NQTXD_FIELDS_IXSM;
5199 		cmdc |= NQTXC_CMD_IP4;
5200 	}
5201 
5202 	if (m0->m_pkthdr.csum_flags &
5203 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
5204 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
5205 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
5206 			cmdc |= NQTXC_CMD_TCP;
5207 		} else {
5208 			cmdc |= NQTXC_CMD_UDP;
5209 		}
5210 		cmdc |= NQTXC_CMD_IP4;
5211 		*fieldsp |= NQTXD_FIELDS_TUXSM;
5212 	}
5213 	if (m0->m_pkthdr.csum_flags &
5214 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
5215 		WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
5216 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
5217 			cmdc |= NQTXC_CMD_TCP;
5218 		} else {
5219 			cmdc |= NQTXC_CMD_UDP;
5220 		}
5221 		cmdc |= NQTXC_CMD_IP6;
5222 		*fieldsp |= NQTXD_FIELDS_TUXSM;
5223 	}
5224 
5225 	/* Fill in the context descriptor. */
5226 	sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_vl_len =
5227 	    htole32(vl_len);
5228 	sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_sn = 0;
5229 	sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_cmd =
5230 	    htole32(cmdc);
5231 	sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_mssidx =
5232 	    htole32(mssidx);
5233 	WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
5234 	DPRINTF(WM_DEBUG_TX,
5235 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
5236 	    sc->sc_txnext, 0, vl_len));
5237 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
5238 	sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
5239 	txs->txs_ndesc++;
5240 	return 0;
5241 }
5242 
5243 /*
5244  * wm_nq_start:		[ifnet interface function]
5245  *
5246  *	Start packet transmission on the interface for NEWQUEUE devices
5247  */
5248 static void
5249 wm_nq_start(struct ifnet *ifp)
5250 {
5251 	struct wm_softc *sc = ifp->if_softc;
5252 
5253 	WM_TX_LOCK(sc);
5254 	if (!sc->sc_stopping)
5255 		wm_nq_start_locked(ifp);
5256 	WM_TX_UNLOCK(sc);
5257 }
5258 
5259 static void
5260 wm_nq_start_locked(struct ifnet *ifp)
5261 {
5262 	struct wm_softc *sc = ifp->if_softc;
5263 	struct mbuf *m0;
5264 	struct m_tag *mtag;
5265 	struct wm_txsoft *txs;
5266 	bus_dmamap_t dmamap;
5267 	int error, nexttx, lasttx = -1, seg, segs_needed;
5268 	bool do_csum, sent;
5269 
5270 	KASSERT(WM_TX_LOCKED(sc));
5271 
5272 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
5273 		return;
5274 
5275 	sent = false;
5276 
5277 	/*
5278 	 * Loop through the send queue, setting up transmit descriptors
5279 	 * until we drain the queue, or use up all available transmit
5280 	 * descriptors.
5281 	 */
5282 	for (;;) {
5283 		m0 = NULL;
5284 
5285 		/* Get a work queue entry. */
5286 		if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
5287 			wm_txintr(sc);
5288 			if (sc->sc_txsfree == 0) {
5289 				DPRINTF(WM_DEBUG_TX,
5290 				    ("%s: TX: no free job descriptors\n",
5291 					device_xname(sc->sc_dev)));
5292 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
5293 				break;
5294 			}
5295 		}
5296 
5297 		/* Grab a packet off the queue. */
5298 		IFQ_DEQUEUE(&ifp->if_snd, m0);
5299 		if (m0 == NULL)
5300 			break;
5301 
5302 		DPRINTF(WM_DEBUG_TX,
5303 		    ("%s: TX: have packet to transmit: %p\n",
5304 		    device_xname(sc->sc_dev), m0));
5305 
5306 		txs = &sc->sc_txsoft[sc->sc_txsnext];
5307 		dmamap = txs->txs_dmamap;
5308 
5309 		/*
5310 		 * Load the DMA map.  If this fails, the packet either
5311 		 * didn't fit in the allotted number of segments, or we
5312 		 * were short on resources.  For the too-many-segments
5313 		 * case, we simply report an error and drop the packet,
5314 		 * since we can't sanely copy a jumbo packet to a single
5315 		 * buffer.
5316 		 */
5317 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
5318 		    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
5319 		if (error) {
5320 			if (error == EFBIG) {
5321 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
5322 				log(LOG_ERR, "%s: Tx packet consumes too many "
5323 				    "DMA segments, dropping...\n",
5324 				    device_xname(sc->sc_dev));
5325 				wm_dump_mbuf_chain(sc, m0);
5326 				m_freem(m0);
5327 				continue;
5328 			}
5329 			/* Short on resources, just stop for now. */
5330 			DPRINTF(WM_DEBUG_TX,
5331 			    ("%s: TX: dmamap load failed: %d\n",
5332 			    device_xname(sc->sc_dev), error));
5333 			break;
5334 		}
5335 
5336 		segs_needed = dmamap->dm_nsegs;
5337 
5338 		/*
5339 		 * Ensure we have enough descriptors free to describe
5340 		 * the packet.  Note, we always reserve one descriptor
5341 		 * at the end of the ring due to the semantics of the
5342 		 * TDT register, plus one more in the event we need
5343 		 * to load offload context.
5344 		 */
5345 		if (segs_needed > sc->sc_txfree - 2) {
5346 			/*
5347 			 * Not enough free descriptors to transmit this
5348 			 * packet.  We haven't committed anything yet,
5349 			 * so just unload the DMA map, put the packet
5350 			 * pack on the queue, and punt.  Notify the upper
5351 			 * layer that there are no more slots left.
5352 			 */
5353 			DPRINTF(WM_DEBUG_TX,
5354 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
5355 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
5356 			    segs_needed, sc->sc_txfree - 1));
5357 			ifp->if_flags |= IFF_OACTIVE;
5358 			bus_dmamap_unload(sc->sc_dmat, dmamap);
5359 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
5360 			break;
5361 		}
5362 
5363 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
5364 
5365 		DPRINTF(WM_DEBUG_TX,
5366 		    ("%s: TX: packet has %d (%d) DMA segments\n",
5367 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
5368 
5369 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
5370 
5371 		/*
5372 		 * Store a pointer to the packet so that we can free it
5373 		 * later.
5374 		 *
5375 		 * Initially, we consider the number of descriptors the
5376 		 * packet uses the number of DMA segments.  This may be
5377 		 * incremented by 1 if we do checksum offload (a descriptor
5378 		 * is used to set the checksum context).
5379 		 */
5380 		txs->txs_mbuf = m0;
5381 		txs->txs_firstdesc = sc->sc_txnext;
5382 		txs->txs_ndesc = segs_needed;
5383 
5384 		/* Set up offload parameters for this packet. */
5385 		uint32_t cmdlen, fields, dcmdlen;
5386 		if (m0->m_pkthdr.csum_flags &
5387 		    (M_CSUM_TSOv4|M_CSUM_TSOv6|
5388 		    M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
5389 		    M_CSUM_TCPv6|M_CSUM_UDPv6)) {
5390 			if (wm_nq_tx_offload(sc, txs, &cmdlen, &fields,
5391 			    &do_csum) != 0) {
5392 				/* Error message already displayed. */
5393 				bus_dmamap_unload(sc->sc_dmat, dmamap);
5394 				continue;
5395 			}
5396 		} else {
5397 			do_csum = false;
5398 			cmdlen = 0;
5399 			fields = 0;
5400 		}
5401 
5402 		/* Sync the DMA map. */
5403 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
5404 		    BUS_DMASYNC_PREWRITE);
5405 
5406 		/* Initialize the first transmit descriptor. */
5407 		nexttx = sc->sc_txnext;
5408 		if (!do_csum) {
5409 			/* setup a legacy descriptor */
5410 			wm_set_dma_addr(
5411 			    &sc->sc_txdescs[nexttx].wtx_addr,
5412 			    dmamap->dm_segs[0].ds_addr);
5413 			sc->sc_txdescs[nexttx].wtx_cmdlen =
5414 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
5415 			sc->sc_txdescs[nexttx].wtx_fields.wtxu_status = 0;
5416 			sc->sc_txdescs[nexttx].wtx_fields.wtxu_options = 0;
5417 			if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
5418 			    NULL) {
5419 				sc->sc_txdescs[nexttx].wtx_cmdlen |=
5420 				    htole32(WTX_CMD_VLE);
5421 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan =
5422 				    htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
5423 			} else {
5424 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
5425 			}
5426 			dcmdlen = 0;
5427 		} else {
5428 			/* setup an advanced data descriptor */
5429 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
5430 			    htole64(dmamap->dm_segs[0].ds_addr);
5431 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
5432 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
5433 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
5434 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields =
5435 			    htole32(fields);
5436 			DPRINTF(WM_DEBUG_TX,
5437 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
5438 			    device_xname(sc->sc_dev), nexttx,
5439 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
5440 			DPRINTF(WM_DEBUG_TX,
5441 			    ("\t 0x%08x%08x\n", fields,
5442 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
5443 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
5444 		}
5445 
5446 		lasttx = nexttx;
5447 		nexttx = WM_NEXTTX(sc, nexttx);
5448 		/*
5449 		 * fill in the next descriptors. legacy or adcanced format
5450 		 * is the same here
5451 		 */
5452 		for (seg = 1; seg < dmamap->dm_nsegs;
5453 		    seg++, nexttx = WM_NEXTTX(sc, nexttx)) {
5454 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
5455 			    htole64(dmamap->dm_segs[seg].ds_addr);
5456 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
5457 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
5458 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
5459 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields = 0;
5460 			lasttx = nexttx;
5461 
5462 			DPRINTF(WM_DEBUG_TX,
5463 			    ("%s: TX: desc %d: %#" PRIx64 ", "
5464 			     "len %#04zx\n",
5465 			    device_xname(sc->sc_dev), nexttx,
5466 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
5467 			    dmamap->dm_segs[seg].ds_len));
5468 		}
5469 
5470 		KASSERT(lasttx != -1);
5471 
5472 		/*
5473 		 * Set up the command byte on the last descriptor of
5474 		 * the packet.  If we're in the interrupt delay window,
5475 		 * delay the interrupt.
5476 		 */
5477 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
5478 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
5479 		sc->sc_txdescs[lasttx].wtx_cmdlen |=
5480 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
5481 
5482 		txs->txs_lastdesc = lasttx;
5483 
5484 		DPRINTF(WM_DEBUG_TX,
5485 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
5486 		    device_xname(sc->sc_dev),
5487 		    lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
5488 
5489 		/* Sync the descriptors we're using. */
5490 		WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
5491 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
5492 
5493 		/* Give the packet to the chip. */
5494 		CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
5495 		sent = true;
5496 
5497 		DPRINTF(WM_DEBUG_TX,
5498 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
5499 
5500 		DPRINTF(WM_DEBUG_TX,
5501 		    ("%s: TX: finished transmitting packet, job %d\n",
5502 		    device_xname(sc->sc_dev), sc->sc_txsnext));
5503 
5504 		/* Advance the tx pointer. */
5505 		sc->sc_txfree -= txs->txs_ndesc;
5506 		sc->sc_txnext = nexttx;
5507 
5508 		sc->sc_txsfree--;
5509 		sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
5510 
5511 		/* Pass the packet to any BPF listeners. */
5512 		bpf_mtap(ifp, m0);
5513 	}
5514 
5515 	if (m0 != NULL) {
5516 		ifp->if_flags |= IFF_OACTIVE;
5517 		WM_EVCNT_INCR(&sc->sc_ev_txdrop);
5518 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
5519 		m_freem(m0);
5520 	}
5521 
5522 	if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
5523 		/* No more slots; notify upper layer. */
5524 		ifp->if_flags |= IFF_OACTIVE;
5525 	}
5526 
5527 	if (sent) {
5528 		/* Set a watchdog timer in case the chip flakes out. */
5529 		ifp->if_timer = 5;
5530 	}
5531 }
5532 
5533 /* Interrupt */
5534 
5535 /*
5536  * wm_txintr:
5537  *
5538  *	Helper; handle transmit interrupts.
5539  */
5540 static void
5541 wm_txintr(struct wm_softc *sc)
5542 {
5543 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5544 	struct wm_txsoft *txs;
5545 	uint8_t status;
5546 	int i;
5547 
5548 	if (sc->sc_stopping)
5549 		return;
5550 
5551 	ifp->if_flags &= ~IFF_OACTIVE;
5552 
5553 	/*
5554 	 * Go through the Tx list and free mbufs for those
5555 	 * frames which have been transmitted.
5556 	 */
5557 	for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
5558 	     i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
5559 		txs = &sc->sc_txsoft[i];
5560 
5561 		DPRINTF(WM_DEBUG_TX,
5562 		    ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
5563 
5564 		WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
5565 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
5566 
5567 		status =
5568 		    sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
5569 		if ((status & WTX_ST_DD) == 0) {
5570 			WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
5571 			    BUS_DMASYNC_PREREAD);
5572 			break;
5573 		}
5574 
5575 		DPRINTF(WM_DEBUG_TX,
5576 		    ("%s: TX: job %d done: descs %d..%d\n",
5577 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
5578 		    txs->txs_lastdesc));
5579 
5580 		/*
5581 		 * XXX We should probably be using the statistics
5582 		 * XXX registers, but I don't know if they exist
5583 		 * XXX on chips before the i82544.
5584 		 */
5585 
5586 #ifdef WM_EVENT_COUNTERS
5587 		if (status & WTX_ST_TU)
5588 			WM_EVCNT_INCR(&sc->sc_ev_tu);
5589 #endif /* WM_EVENT_COUNTERS */
5590 
5591 		if (status & (WTX_ST_EC|WTX_ST_LC)) {
5592 			ifp->if_oerrors++;
5593 			if (status & WTX_ST_LC)
5594 				log(LOG_WARNING, "%s: late collision\n",
5595 				    device_xname(sc->sc_dev));
5596 			else if (status & WTX_ST_EC) {
5597 				ifp->if_collisions += 16;
5598 				log(LOG_WARNING, "%s: excessive collisions\n",
5599 				    device_xname(sc->sc_dev));
5600 			}
5601 		} else
5602 			ifp->if_opackets++;
5603 
5604 		sc->sc_txfree += txs->txs_ndesc;
5605 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
5606 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
5607 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
5608 		m_freem(txs->txs_mbuf);
5609 		txs->txs_mbuf = NULL;
5610 	}
5611 
5612 	/* Update the dirty transmit buffer pointer. */
5613 	sc->sc_txsdirty = i;
5614 	DPRINTF(WM_DEBUG_TX,
5615 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
5616 
5617 	/*
5618 	 * If there are no more pending transmissions, cancel the watchdog
5619 	 * timer.
5620 	 */
5621 	if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
5622 		ifp->if_timer = 0;
5623 }
5624 
5625 /*
5626  * wm_rxintr:
5627  *
5628  *	Helper; handle receive interrupts.
5629  */
5630 static void
5631 wm_rxintr(struct wm_softc *sc)
5632 {
5633 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5634 	struct wm_rxsoft *rxs;
5635 	struct mbuf *m;
5636 	int i, len;
5637 	uint8_t status, errors;
5638 	uint16_t vlantag;
5639 
5640 	for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
5641 		rxs = &sc->sc_rxsoft[i];
5642 
5643 		DPRINTF(WM_DEBUG_RX,
5644 		    ("%s: RX: checking descriptor %d\n",
5645 		    device_xname(sc->sc_dev), i));
5646 
5647 		WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
5648 
5649 		status = sc->sc_rxdescs[i].wrx_status;
5650 		errors = sc->sc_rxdescs[i].wrx_errors;
5651 		len = le16toh(sc->sc_rxdescs[i].wrx_len);
5652 		vlantag = sc->sc_rxdescs[i].wrx_special;
5653 
5654 		if ((status & WRX_ST_DD) == 0) {
5655 			/* We have processed all of the receive descriptors. */
5656 			WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
5657 			break;
5658 		}
5659 
5660 		if (__predict_false(sc->sc_rxdiscard)) {
5661 			DPRINTF(WM_DEBUG_RX,
5662 			    ("%s: RX: discarding contents of descriptor %d\n",
5663 			    device_xname(sc->sc_dev), i));
5664 			WM_INIT_RXDESC(sc, i);
5665 			if (status & WRX_ST_EOP) {
5666 				/* Reset our state. */
5667 				DPRINTF(WM_DEBUG_RX,
5668 				    ("%s: RX: resetting rxdiscard -> 0\n",
5669 				    device_xname(sc->sc_dev)));
5670 				sc->sc_rxdiscard = 0;
5671 			}
5672 			continue;
5673 		}
5674 
5675 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
5676 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
5677 
5678 		m = rxs->rxs_mbuf;
5679 
5680 		/*
5681 		 * Add a new receive buffer to the ring, unless of
5682 		 * course the length is zero. Treat the latter as a
5683 		 * failed mapping.
5684 		 */
5685 		if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
5686 			/*
5687 			 * Failed, throw away what we've done so
5688 			 * far, and discard the rest of the packet.
5689 			 */
5690 			ifp->if_ierrors++;
5691 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
5692 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
5693 			WM_INIT_RXDESC(sc, i);
5694 			if ((status & WRX_ST_EOP) == 0)
5695 				sc->sc_rxdiscard = 1;
5696 			if (sc->sc_rxhead != NULL)
5697 				m_freem(sc->sc_rxhead);
5698 			WM_RXCHAIN_RESET(sc);
5699 			DPRINTF(WM_DEBUG_RX,
5700 			    ("%s: RX: Rx buffer allocation failed, "
5701 			    "dropping packet%s\n", device_xname(sc->sc_dev),
5702 			    sc->sc_rxdiscard ? " (discard)" : ""));
5703 			continue;
5704 		}
5705 
5706 		m->m_len = len;
5707 		sc->sc_rxlen += len;
5708 		DPRINTF(WM_DEBUG_RX,
5709 		    ("%s: RX: buffer at %p len %d\n",
5710 		    device_xname(sc->sc_dev), m->m_data, len));
5711 
5712 		/* If this is not the end of the packet, keep looking. */
5713 		if ((status & WRX_ST_EOP) == 0) {
5714 			WM_RXCHAIN_LINK(sc, m);
5715 			DPRINTF(WM_DEBUG_RX,
5716 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
5717 			    device_xname(sc->sc_dev), sc->sc_rxlen));
5718 			continue;
5719 		}
5720 
5721 		/*
5722 		 * Okay, we have the entire packet now.  The chip is
5723 		 * configured to include the FCS except I350 and I21[01]
5724 		 * (not all chips can be configured to strip it),
5725 		 * so we need to trim it.
5726 		 * May need to adjust length of previous mbuf in the
5727 		 * chain if the current mbuf is too short.
5728 		 * For an eratta, the RCTL_SECRC bit in RCTL register
5729 		 * is always set in I350, so we don't trim it.
5730 		 */
5731 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
5732 		    && (sc->sc_type != WM_T_I210)
5733 		    && (sc->sc_type != WM_T_I211)) {
5734 			if (m->m_len < ETHER_CRC_LEN) {
5735 				sc->sc_rxtail->m_len
5736 				    -= (ETHER_CRC_LEN - m->m_len);
5737 				m->m_len = 0;
5738 			} else
5739 				m->m_len -= ETHER_CRC_LEN;
5740 			len = sc->sc_rxlen - ETHER_CRC_LEN;
5741 		} else
5742 			len = sc->sc_rxlen;
5743 
5744 		WM_RXCHAIN_LINK(sc, m);
5745 
5746 		*sc->sc_rxtailp = NULL;
5747 		m = sc->sc_rxhead;
5748 
5749 		WM_RXCHAIN_RESET(sc);
5750 
5751 		DPRINTF(WM_DEBUG_RX,
5752 		    ("%s: RX: have entire packet, len -> %d\n",
5753 		    device_xname(sc->sc_dev), len));
5754 
5755 		/* If an error occurred, update stats and drop the packet. */
5756 		if (errors &
5757 		     (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
5758 			if (errors & WRX_ER_SE)
5759 				log(LOG_WARNING, "%s: symbol error\n",
5760 				    device_xname(sc->sc_dev));
5761 			else if (errors & WRX_ER_SEQ)
5762 				log(LOG_WARNING, "%s: receive sequence error\n",
5763 				    device_xname(sc->sc_dev));
5764 			else if (errors & WRX_ER_CE)
5765 				log(LOG_WARNING, "%s: CRC error\n",
5766 				    device_xname(sc->sc_dev));
5767 			m_freem(m);
5768 			continue;
5769 		}
5770 
5771 		/* No errors.  Receive the packet. */
5772 		m->m_pkthdr.rcvif = ifp;
5773 		m->m_pkthdr.len = len;
5774 
5775 		/*
5776 		 * If VLANs are enabled, VLAN packets have been unwrapped
5777 		 * for us.  Associate the tag with the packet.
5778 		 */
5779 		/* XXXX should check for i350 and i354 */
5780 		if ((status & WRX_ST_VP) != 0) {
5781 			VLAN_INPUT_TAG(ifp, m,
5782 			    le16toh(vlantag),
5783 			    continue);
5784 		}
5785 
5786 		/* Set up checksum info for this packet. */
5787 		if ((status & WRX_ST_IXSM) == 0) {
5788 			if (status & WRX_ST_IPCS) {
5789 				WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
5790 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
5791 				if (errors & WRX_ER_IPE)
5792 					m->m_pkthdr.csum_flags |=
5793 					    M_CSUM_IPv4_BAD;
5794 			}
5795 			if (status & WRX_ST_TCPCS) {
5796 				/*
5797 				 * Note: we don't know if this was TCP or UDP,
5798 				 * so we just set both bits, and expect the
5799 				 * upper layers to deal.
5800 				 */
5801 				WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
5802 				m->m_pkthdr.csum_flags |=
5803 				    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
5804 				    M_CSUM_TCPv6 | M_CSUM_UDPv6;
5805 				if (errors & WRX_ER_TCPE)
5806 					m->m_pkthdr.csum_flags |=
5807 					    M_CSUM_TCP_UDP_BAD;
5808 			}
5809 		}
5810 
5811 		ifp->if_ipackets++;
5812 
5813 		WM_RX_UNLOCK(sc);
5814 
5815 		/* Pass this up to any BPF listeners. */
5816 		bpf_mtap(ifp, m);
5817 
5818 		/* Pass it on. */
5819 		(*ifp->if_input)(ifp, m);
5820 
5821 		WM_RX_LOCK(sc);
5822 
5823 		if (sc->sc_stopping)
5824 			break;
5825 	}
5826 
5827 	/* Update the receive pointer. */
5828 	sc->sc_rxptr = i;
5829 
5830 	DPRINTF(WM_DEBUG_RX,
5831 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
5832 }
5833 
5834 /*
5835  * wm_linkintr_gmii:
5836  *
5837  *	Helper; handle link interrupts for GMII.
5838  */
5839 static void
5840 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
5841 {
5842 
5843 	KASSERT(WM_TX_LOCKED(sc));
5844 
5845 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
5846 		__func__));
5847 
5848 	if (icr & ICR_LSC) {
5849 		DPRINTF(WM_DEBUG_LINK,
5850 		    ("%s: LINK: LSC -> mii_pollstat\n",
5851 			device_xname(sc->sc_dev)));
5852 		mii_pollstat(&sc->sc_mii);
5853 		if (sc->sc_type == WM_T_82543) {
5854 			int miistatus, active;
5855 
5856 			/*
5857 			 * With 82543, we need to force speed and
5858 			 * duplex on the MAC equal to what the PHY
5859 			 * speed and duplex configuration is.
5860 			 */
5861 			miistatus = sc->sc_mii.mii_media_status;
5862 
5863 			if (miistatus & IFM_ACTIVE) {
5864 				active = sc->sc_mii.mii_media_active;
5865 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
5866 				switch (IFM_SUBTYPE(active)) {
5867 				case IFM_10_T:
5868 					sc->sc_ctrl |= CTRL_SPEED_10;
5869 					break;
5870 				case IFM_100_TX:
5871 					sc->sc_ctrl |= CTRL_SPEED_100;
5872 					break;
5873 				case IFM_1000_T:
5874 					sc->sc_ctrl |= CTRL_SPEED_1000;
5875 					break;
5876 				default:
5877 					/*
5878 					 * fiber?
5879 					 * Shoud not enter here.
5880 					 */
5881 					printf("unknown media (%x)\n",
5882 					    active);
5883 					break;
5884 				}
5885 				if (active & IFM_FDX)
5886 					sc->sc_ctrl |= CTRL_FD;
5887 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5888 			}
5889 		} else if ((sc->sc_type == WM_T_ICH8)
5890 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
5891 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
5892 		} else if (sc->sc_type == WM_T_PCH) {
5893 			wm_k1_gig_workaround_hv(sc,
5894 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
5895 		}
5896 
5897 		if ((sc->sc_phytype == WMPHY_82578)
5898 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
5899 			== IFM_1000_T)) {
5900 
5901 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
5902 				delay(200*1000); /* XXX too big */
5903 
5904 				/* Link stall fix for link up */
5905 				wm_gmii_hv_writereg(sc->sc_dev, 1,
5906 				    HV_MUX_DATA_CTRL,
5907 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
5908 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
5909 				wm_gmii_hv_writereg(sc->sc_dev, 1,
5910 				    HV_MUX_DATA_CTRL,
5911 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
5912 			}
5913 		}
5914 	} else if (icr & ICR_RXSEQ) {
5915 		DPRINTF(WM_DEBUG_LINK,
5916 		    ("%s: LINK Receive sequence error\n",
5917 			device_xname(sc->sc_dev)));
5918 	}
5919 }
5920 
5921 /*
5922  * wm_linkintr_tbi:
5923  *
5924  *	Helper; handle link interrupts for TBI mode.
5925  */
5926 static void
5927 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
5928 {
5929 	uint32_t status;
5930 
5931 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
5932 		__func__));
5933 
5934 	status = CSR_READ(sc, WMREG_STATUS);
5935 	if (icr & ICR_LSC) {
5936 		if (status & STATUS_LU) {
5937 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
5938 			    device_xname(sc->sc_dev),
5939 			    (status & STATUS_FD) ? "FDX" : "HDX"));
5940 			/*
5941 			 * NOTE: CTRL will update TFCE and RFCE automatically,
5942 			 * so we should update sc->sc_ctrl
5943 			 */
5944 
5945 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
5946 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
5947 			sc->sc_fcrtl &= ~FCRTL_XONE;
5948 			if (status & STATUS_FD)
5949 				sc->sc_tctl |=
5950 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
5951 			else
5952 				sc->sc_tctl |=
5953 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
5954 			if (sc->sc_ctrl & CTRL_TFCE)
5955 				sc->sc_fcrtl |= FCRTL_XONE;
5956 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
5957 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
5958 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
5959 				      sc->sc_fcrtl);
5960 			sc->sc_tbi_linkup = 1;
5961 		} else {
5962 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
5963 			    device_xname(sc->sc_dev)));
5964 			sc->sc_tbi_linkup = 0;
5965 		}
5966 		wm_tbi_set_linkled(sc);
5967 	} else if (icr & ICR_RXSEQ) {
5968 		DPRINTF(WM_DEBUG_LINK,
5969 		    ("%s: LINK: Receive sequence error\n",
5970 		    device_xname(sc->sc_dev)));
5971 	}
5972 }
5973 
5974 /*
5975  * wm_linkintr:
5976  *
5977  *	Helper; handle link interrupts.
5978  */
5979 static void
5980 wm_linkintr(struct wm_softc *sc, uint32_t icr)
5981 {
5982 
5983 	if (sc->sc_flags & WM_F_HAS_MII)
5984 		wm_linkintr_gmii(sc, icr);
5985 	else
5986 		wm_linkintr_tbi(sc, icr);
5987 }
5988 
5989 /*
5990  * wm_intr:
5991  *
5992  *	Interrupt service routine.
5993  */
5994 static int
5995 wm_intr(void *arg)
5996 {
5997 	struct wm_softc *sc = arg;
5998 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5999 	uint32_t icr;
6000 	int handled = 0;
6001 
6002 	while (1 /* CONSTCOND */) {
6003 		icr = CSR_READ(sc, WMREG_ICR);
6004 		if ((icr & sc->sc_icr) == 0)
6005 			break;
6006 		rnd_add_uint32(&sc->rnd_source, icr);
6007 
6008 		WM_RX_LOCK(sc);
6009 
6010 		if (sc->sc_stopping) {
6011 			WM_RX_UNLOCK(sc);
6012 			break;
6013 		}
6014 
6015 		handled = 1;
6016 
6017 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
6018 		if (icr & (ICR_RXDMT0|ICR_RXT0)) {
6019 			DPRINTF(WM_DEBUG_RX,
6020 			    ("%s: RX: got Rx intr 0x%08x\n",
6021 			    device_xname(sc->sc_dev),
6022 			    icr & (ICR_RXDMT0|ICR_RXT0)));
6023 			WM_EVCNT_INCR(&sc->sc_ev_rxintr);
6024 		}
6025 #endif
6026 		wm_rxintr(sc);
6027 
6028 		WM_RX_UNLOCK(sc);
6029 		WM_TX_LOCK(sc);
6030 
6031 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
6032 		if (icr & ICR_TXDW) {
6033 			DPRINTF(WM_DEBUG_TX,
6034 			    ("%s: TX: got TXDW interrupt\n",
6035 			    device_xname(sc->sc_dev)));
6036 			WM_EVCNT_INCR(&sc->sc_ev_txdw);
6037 		}
6038 #endif
6039 		wm_txintr(sc);
6040 
6041 		if (icr & (ICR_LSC|ICR_RXSEQ)) {
6042 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
6043 			wm_linkintr(sc, icr);
6044 		}
6045 
6046 		WM_TX_UNLOCK(sc);
6047 
6048 		if (icr & ICR_RXO) {
6049 #if defined(WM_DEBUG)
6050 			log(LOG_WARNING, "%s: Receive overrun\n",
6051 			    device_xname(sc->sc_dev));
6052 #endif /* defined(WM_DEBUG) */
6053 		}
6054 	}
6055 
6056 	if (handled) {
6057 		/* Try to get more packets going. */
6058 		ifp->if_start(ifp);
6059 	}
6060 
6061 	return handled;
6062 }
6063 
6064 /*
6065  * Media related.
6066  * GMII, SGMII, TBI (and SERDES)
6067  */
6068 
6069 /* GMII related */
6070 
6071 /*
6072  * wm_gmii_reset:
6073  *
6074  *	Reset the PHY.
6075  */
6076 static void
6077 wm_gmii_reset(struct wm_softc *sc)
6078 {
6079 	uint32_t reg;
6080 	int rv;
6081 
6082 	/* get phy semaphore */
6083 	switch (sc->sc_type) {
6084 	case WM_T_82571:
6085 	case WM_T_82572:
6086 	case WM_T_82573:
6087 	case WM_T_82574:
6088 	case WM_T_82583:
6089 		 /* XXX should get sw semaphore, too */
6090 		rv = wm_get_swsm_semaphore(sc);
6091 		break;
6092 	case WM_T_82575:
6093 	case WM_T_82576:
6094 	case WM_T_82580:
6095 	case WM_T_I350:
6096 	case WM_T_I354:
6097 	case WM_T_I210:
6098 	case WM_T_I211:
6099 	case WM_T_80003:
6100 		rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6101 		break;
6102 	case WM_T_ICH8:
6103 	case WM_T_ICH9:
6104 	case WM_T_ICH10:
6105 	case WM_T_PCH:
6106 	case WM_T_PCH2:
6107 	case WM_T_PCH_LPT:
6108 		rv = wm_get_swfwhw_semaphore(sc);
6109 		break;
6110 	default:
6111 		/* nothing to do*/
6112 		rv = 0;
6113 		break;
6114 	}
6115 	if (rv != 0) {
6116 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6117 		    __func__);
6118 		return;
6119 	}
6120 
6121 	switch (sc->sc_type) {
6122 	case WM_T_82542_2_0:
6123 	case WM_T_82542_2_1:
6124 		/* null */
6125 		break;
6126 	case WM_T_82543:
6127 		/*
6128 		 * With 82543, we need to force speed and duplex on the MAC
6129 		 * equal to what the PHY speed and duplex configuration is.
6130 		 * In addition, we need to perform a hardware reset on the PHY
6131 		 * to take it out of reset.
6132 		 */
6133 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
6134 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6135 
6136 		/* The PHY reset pin is active-low. */
6137 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
6138 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
6139 		    CTRL_EXT_SWDPIN(4));
6140 		reg |= CTRL_EXT_SWDPIO(4);
6141 
6142 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
6143 		CSR_WRITE_FLUSH(sc);
6144 		delay(10*1000);
6145 
6146 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
6147 		CSR_WRITE_FLUSH(sc);
6148 		delay(150);
6149 #if 0
6150 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
6151 #endif
6152 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
6153 		break;
6154 	case WM_T_82544:	/* reset 10000us */
6155 	case WM_T_82540:
6156 	case WM_T_82545:
6157 	case WM_T_82545_3:
6158 	case WM_T_82546:
6159 	case WM_T_82546_3:
6160 	case WM_T_82541:
6161 	case WM_T_82541_2:
6162 	case WM_T_82547:
6163 	case WM_T_82547_2:
6164 	case WM_T_82571:	/* reset 100us */
6165 	case WM_T_82572:
6166 	case WM_T_82573:
6167 	case WM_T_82574:
6168 	case WM_T_82575:
6169 	case WM_T_82576:
6170 	case WM_T_82580:
6171 	case WM_T_I350:
6172 	case WM_T_I354:
6173 	case WM_T_I210:
6174 	case WM_T_I211:
6175 	case WM_T_82583:
6176 	case WM_T_80003:
6177 		/* generic reset */
6178 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
6179 		CSR_WRITE_FLUSH(sc);
6180 		delay(20000);
6181 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6182 		CSR_WRITE_FLUSH(sc);
6183 		delay(20000);
6184 
6185 		if ((sc->sc_type == WM_T_82541)
6186 		    || (sc->sc_type == WM_T_82541_2)
6187 		    || (sc->sc_type == WM_T_82547)
6188 		    || (sc->sc_type == WM_T_82547_2)) {
6189 			/* workaround for igp are done in igp_reset() */
6190 			/* XXX add code to set LED after phy reset */
6191 		}
6192 		break;
6193 	case WM_T_ICH8:
6194 	case WM_T_ICH9:
6195 	case WM_T_ICH10:
6196 	case WM_T_PCH:
6197 	case WM_T_PCH2:
6198 	case WM_T_PCH_LPT:
6199 		/* generic reset */
6200 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
6201 		CSR_WRITE_FLUSH(sc);
6202 		delay(100);
6203 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6204 		CSR_WRITE_FLUSH(sc);
6205 		delay(150);
6206 		break;
6207 	default:
6208 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
6209 		    __func__);
6210 		break;
6211 	}
6212 
6213 	/* release PHY semaphore */
6214 	switch (sc->sc_type) {
6215 	case WM_T_82571:
6216 	case WM_T_82572:
6217 	case WM_T_82573:
6218 	case WM_T_82574:
6219 	case WM_T_82583:
6220 		 /* XXX should put sw semaphore, too */
6221 		wm_put_swsm_semaphore(sc);
6222 		break;
6223 	case WM_T_82575:
6224 	case WM_T_82576:
6225 	case WM_T_82580:
6226 	case WM_T_I350:
6227 	case WM_T_I354:
6228 	case WM_T_I210:
6229 	case WM_T_I211:
6230 	case WM_T_80003:
6231 		wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6232 		break;
6233 	case WM_T_ICH8:
6234 	case WM_T_ICH9:
6235 	case WM_T_ICH10:
6236 	case WM_T_PCH:
6237 	case WM_T_PCH2:
6238 	case WM_T_PCH_LPT:
6239 		wm_put_swfwhw_semaphore(sc);
6240 		break;
6241 	default:
6242 		/* nothing to do*/
6243 		rv = 0;
6244 		break;
6245 	}
6246 
6247 	/* get_cfg_done */
6248 	wm_get_cfg_done(sc);
6249 
6250 	/* extra setup */
6251 	switch (sc->sc_type) {
6252 	case WM_T_82542_2_0:
6253 	case WM_T_82542_2_1:
6254 	case WM_T_82543:
6255 	case WM_T_82544:
6256 	case WM_T_82540:
6257 	case WM_T_82545:
6258 	case WM_T_82545_3:
6259 	case WM_T_82546:
6260 	case WM_T_82546_3:
6261 	case WM_T_82541_2:
6262 	case WM_T_82547_2:
6263 	case WM_T_82571:
6264 	case WM_T_82572:
6265 	case WM_T_82573:
6266 	case WM_T_82574:
6267 	case WM_T_82575:
6268 	case WM_T_82576:
6269 	case WM_T_82580:
6270 	case WM_T_I350:
6271 	case WM_T_I354:
6272 	case WM_T_I210:
6273 	case WM_T_I211:
6274 	case WM_T_82583:
6275 	case WM_T_80003:
6276 		/* null */
6277 		break;
6278 	case WM_T_82541:
6279 	case WM_T_82547:
6280 		/* XXX Configure actively LED after PHY reset */
6281 		break;
6282 	case WM_T_ICH8:
6283 	case WM_T_ICH9:
6284 	case WM_T_ICH10:
6285 	case WM_T_PCH:
6286 	case WM_T_PCH2:
6287 	case WM_T_PCH_LPT:
6288 		/* Allow time for h/w to get to a quiescent state afer reset */
6289 		delay(10*1000);
6290 
6291 		if (sc->sc_type == WM_T_PCH)
6292 			wm_hv_phy_workaround_ich8lan(sc);
6293 
6294 		if (sc->sc_type == WM_T_PCH2)
6295 			wm_lv_phy_workaround_ich8lan(sc);
6296 
6297 		if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
6298 			/*
6299 			 * dummy read to clear the phy wakeup bit after lcd
6300 			 * reset
6301 			 */
6302 			reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
6303 		}
6304 
6305 		/*
6306 		 * XXX Configure the LCD with th extended configuration region
6307 		 * in NVM
6308 		 */
6309 
6310 		/* Configure the LCD with the OEM bits in NVM */
6311 		if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
6312 		    || (sc->sc_type == WM_T_PCH_LPT)) {
6313 			/*
6314 			 * Disable LPLU.
6315 			 * XXX It seems that 82567 has LPLU, too.
6316 			 */
6317 			reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
6318 			reg &= ~(HV_OEM_BITS_A1KDIS| HV_OEM_BITS_LPLU);
6319 			reg |= HV_OEM_BITS_ANEGNOW;
6320 			wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
6321 		}
6322 		break;
6323 	default:
6324 		panic("%s: unknown type\n", __func__);
6325 		break;
6326 	}
6327 }
6328 
6329 /*
6330  * wm_get_phy_id_82575:
6331  *
6332  * Return PHY ID. Return -1 if it failed.
6333  */
6334 static int
6335 wm_get_phy_id_82575(struct wm_softc *sc)
6336 {
6337 	uint32_t reg;
6338 	int phyid = -1;
6339 
6340 	/* XXX */
6341 	if ((sc->sc_flags & WM_F_SGMII) == 0)
6342 		return -1;
6343 
6344 	if (wm_sgmii_uses_mdio(sc)) {
6345 		switch (sc->sc_type) {
6346 		case WM_T_82575:
6347 		case WM_T_82576:
6348 			reg = CSR_READ(sc, WMREG_MDIC);
6349 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
6350 			break;
6351 		case WM_T_82580:
6352 		case WM_T_I350:
6353 		case WM_T_I354:
6354 		case WM_T_I210:
6355 		case WM_T_I211:
6356 			reg = CSR_READ(sc, WMREG_MDICNFG);
6357 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
6358 			break;
6359 		default:
6360 			return -1;
6361 		}
6362 	}
6363 
6364 	return phyid;
6365 }
6366 
6367 
6368 /*
6369  * wm_gmii_mediainit:
6370  *
6371  *	Initialize media for use on 1000BASE-T devices.
6372  */
6373 static void
6374 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
6375 {
6376 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6377 	struct mii_data *mii = &sc->sc_mii;
6378 	uint32_t reg;
6379 
6380 	/* We have GMII. */
6381 	sc->sc_flags |= WM_F_HAS_MII;
6382 
6383 	if (sc->sc_type == WM_T_80003)
6384 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
6385 	else
6386 		sc->sc_tipg = TIPG_1000T_DFLT;
6387 
6388 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
6389 	if ((sc->sc_type == WM_T_82580)
6390 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
6391 	    || (sc->sc_type == WM_T_I211)) {
6392 		reg = CSR_READ(sc, WMREG_PHPM);
6393 		reg &= ~PHPM_GO_LINK_D;
6394 		CSR_WRITE(sc, WMREG_PHPM, reg);
6395 	}
6396 
6397 	/*
6398 	 * Let the chip set speed/duplex on its own based on
6399 	 * signals from the PHY.
6400 	 * XXXbouyer - I'm not sure this is right for the 80003,
6401 	 * the em driver only sets CTRL_SLU here - but it seems to work.
6402 	 */
6403 	sc->sc_ctrl |= CTRL_SLU;
6404 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6405 
6406 	/* Initialize our media structures and probe the GMII. */
6407 	mii->mii_ifp = ifp;
6408 
6409 	/*
6410 	 * Determine the PHY access method.
6411 	 *
6412 	 *  For SGMII, use SGMII specific method.
6413 	 *
6414 	 *  For some devices, we can determine the PHY access method
6415 	 * from sc_type.
6416 	 *
6417 	 *  For ICH and PCH variants, it's difficult to determine the PHY
6418 	 * access  method by sc_type, so use the PCI product ID for some
6419 	 * devices.
6420 	 * For other ICH8 variants, try to use igp's method. If the PHY
6421 	 * can't detect, then use bm's method.
6422 	 */
6423 	switch (prodid) {
6424 	case PCI_PRODUCT_INTEL_PCH_M_LM:
6425 	case PCI_PRODUCT_INTEL_PCH_M_LC:
6426 		/* 82577 */
6427 		sc->sc_phytype = WMPHY_82577;
6428 		break;
6429 	case PCI_PRODUCT_INTEL_PCH_D_DM:
6430 	case PCI_PRODUCT_INTEL_PCH_D_DC:
6431 		/* 82578 */
6432 		sc->sc_phytype = WMPHY_82578;
6433 		break;
6434 	case PCI_PRODUCT_INTEL_PCH2_LV_LM:
6435 	case PCI_PRODUCT_INTEL_PCH2_LV_V:
6436 		/* 82579 */
6437 		sc->sc_phytype = WMPHY_82579;
6438 		break;
6439 	case PCI_PRODUCT_INTEL_82801I_BM:
6440 	case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
6441 	case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
6442 	case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
6443 	case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
6444 	case PCI_PRODUCT_INTEL_82801J_R_BM_V:
6445 		/* 82567 */
6446 		sc->sc_phytype = WMPHY_BM;
6447 		mii->mii_readreg = wm_gmii_bm_readreg;
6448 		mii->mii_writereg = wm_gmii_bm_writereg;
6449 		break;
6450 	default:
6451 		if (((sc->sc_flags & WM_F_SGMII) != 0)
6452 		    && !wm_sgmii_uses_mdio(sc)){
6453 			mii->mii_readreg = wm_sgmii_readreg;
6454 			mii->mii_writereg = wm_sgmii_writereg;
6455 		} else if (sc->sc_type >= WM_T_80003) {
6456 			mii->mii_readreg = wm_gmii_i80003_readreg;
6457 			mii->mii_writereg = wm_gmii_i80003_writereg;
6458 		} else if (sc->sc_type >= WM_T_I210) {
6459 			mii->mii_readreg = wm_gmii_i82544_readreg;
6460 			mii->mii_writereg = wm_gmii_i82544_writereg;
6461 		} else if (sc->sc_type >= WM_T_82580) {
6462 			sc->sc_phytype = WMPHY_82580;
6463 			mii->mii_readreg = wm_gmii_82580_readreg;
6464 			mii->mii_writereg = wm_gmii_82580_writereg;
6465 		} else if (sc->sc_type >= WM_T_82544) {
6466 			mii->mii_readreg = wm_gmii_i82544_readreg;
6467 			mii->mii_writereg = wm_gmii_i82544_writereg;
6468 		} else {
6469 			mii->mii_readreg = wm_gmii_i82543_readreg;
6470 			mii->mii_writereg = wm_gmii_i82543_writereg;
6471 		}
6472 		break;
6473 	}
6474 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_LPT)) {
6475 		/* All PCH* use _hv_ */
6476 		mii->mii_readreg = wm_gmii_hv_readreg;
6477 		mii->mii_writereg = wm_gmii_hv_writereg;
6478 	}
6479 	mii->mii_statchg = wm_gmii_statchg;
6480 
6481 	wm_gmii_reset(sc);
6482 
6483 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
6484 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
6485 	    wm_gmii_mediastatus);
6486 
6487 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
6488 	    || (sc->sc_type == WM_T_82580)
6489 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
6490 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
6491 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
6492 			/* Attach only one port */
6493 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
6494 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
6495 		} else {
6496 			int i, id;
6497 			uint32_t ctrl_ext;
6498 
6499 			id = wm_get_phy_id_82575(sc);
6500 			if (id != -1) {
6501 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
6502 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
6503 			}
6504 			if ((id == -1)
6505 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
6506 				/* Power on sgmii phy if it is disabled */
6507 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
6508 				CSR_WRITE(sc, WMREG_CTRL_EXT,
6509 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
6510 				CSR_WRITE_FLUSH(sc);
6511 				delay(300*1000); /* XXX too long */
6512 
6513 				/* from 1 to 8 */
6514 				for (i = 1; i < 8; i++)
6515 					mii_attach(sc->sc_dev, &sc->sc_mii,
6516 					    0xffffffff, i, MII_OFFSET_ANY,
6517 					    MIIF_DOPAUSE);
6518 
6519 				/* restore previous sfp cage power state */
6520 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
6521 			}
6522 		}
6523 	} else {
6524 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6525 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
6526 	}
6527 
6528 	/*
6529 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
6530 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
6531 	 */
6532 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
6533 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
6534 		wm_set_mdio_slow_mode_hv(sc);
6535 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6536 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
6537 	}
6538 
6539 	/*
6540 	 * (For ICH8 variants)
6541 	 * If PHY detection failed, use BM's r/w function and retry.
6542 	 */
6543 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
6544 		/* if failed, retry with *_bm_* */
6545 		mii->mii_readreg = wm_gmii_bm_readreg;
6546 		mii->mii_writereg = wm_gmii_bm_writereg;
6547 
6548 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6549 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
6550 	}
6551 
6552 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
6553 		/* Any PHY wasn't find */
6554 		ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
6555 		ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_NONE);
6556 		sc->sc_phytype = WMPHY_NONE;
6557 	} else {
6558 		/*
6559 		 * PHY Found!
6560 		 * Check PHY type.
6561 		 */
6562 		uint32_t model;
6563 		struct mii_softc *child;
6564 
6565 		child = LIST_FIRST(&mii->mii_phys);
6566 		if (device_is_a(child->mii_dev, "igphy")) {
6567 			struct igphy_softc *isc = (struct igphy_softc *)child;
6568 
6569 			model = isc->sc_mii.mii_mpd_model;
6570 			if (model == MII_MODEL_yyINTEL_I82566)
6571 				sc->sc_phytype = WMPHY_IGP_3;
6572 		}
6573 
6574 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
6575 	}
6576 }
6577 
6578 /*
6579  * wm_gmii_mediastatus:	[ifmedia interface function]
6580  *
6581  *	Get the current interface media status on a 1000BASE-T device.
6582  */
6583 static void
6584 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
6585 {
6586 	struct wm_softc *sc = ifp->if_softc;
6587 
6588 	ether_mediastatus(ifp, ifmr);
6589 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
6590 	    | sc->sc_flowflags;
6591 }
6592 
6593 /*
6594  * wm_gmii_mediachange:	[ifmedia interface function]
6595  *
6596  *	Set hardware to newly-selected media on a 1000BASE-T device.
6597  */
6598 static int
6599 wm_gmii_mediachange(struct ifnet *ifp)
6600 {
6601 	struct wm_softc *sc = ifp->if_softc;
6602 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
6603 	int rc;
6604 
6605 	if ((ifp->if_flags & IFF_UP) == 0)
6606 		return 0;
6607 
6608 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
6609 	sc->sc_ctrl |= CTRL_SLU;
6610 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
6611 	    || (sc->sc_type > WM_T_82543)) {
6612 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
6613 	} else {
6614 		sc->sc_ctrl &= ~CTRL_ASDE;
6615 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
6616 		if (ife->ifm_media & IFM_FDX)
6617 			sc->sc_ctrl |= CTRL_FD;
6618 		switch (IFM_SUBTYPE(ife->ifm_media)) {
6619 		case IFM_10_T:
6620 			sc->sc_ctrl |= CTRL_SPEED_10;
6621 			break;
6622 		case IFM_100_TX:
6623 			sc->sc_ctrl |= CTRL_SPEED_100;
6624 			break;
6625 		case IFM_1000_T:
6626 			sc->sc_ctrl |= CTRL_SPEED_1000;
6627 			break;
6628 		default:
6629 			panic("wm_gmii_mediachange: bad media 0x%x",
6630 			    ife->ifm_media);
6631 		}
6632 	}
6633 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6634 	if (sc->sc_type <= WM_T_82543)
6635 		wm_gmii_reset(sc);
6636 
6637 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
6638 		return 0;
6639 	return rc;
6640 }
6641 
6642 #define	MDI_IO		CTRL_SWDPIN(2)
6643 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
6644 #define	MDI_CLK		CTRL_SWDPIN(3)
6645 
6646 static void
6647 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
6648 {
6649 	uint32_t i, v;
6650 
6651 	v = CSR_READ(sc, WMREG_CTRL);
6652 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
6653 	v |= MDI_DIR | CTRL_SWDPIO(3);
6654 
6655 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
6656 		if (data & i)
6657 			v |= MDI_IO;
6658 		else
6659 			v &= ~MDI_IO;
6660 		CSR_WRITE(sc, WMREG_CTRL, v);
6661 		CSR_WRITE_FLUSH(sc);
6662 		delay(10);
6663 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6664 		CSR_WRITE_FLUSH(sc);
6665 		delay(10);
6666 		CSR_WRITE(sc, WMREG_CTRL, v);
6667 		CSR_WRITE_FLUSH(sc);
6668 		delay(10);
6669 	}
6670 }
6671 
6672 static uint32_t
6673 wm_i82543_mii_recvbits(struct wm_softc *sc)
6674 {
6675 	uint32_t v, i, data = 0;
6676 
6677 	v = CSR_READ(sc, WMREG_CTRL);
6678 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
6679 	v |= CTRL_SWDPIO(3);
6680 
6681 	CSR_WRITE(sc, WMREG_CTRL, v);
6682 	CSR_WRITE_FLUSH(sc);
6683 	delay(10);
6684 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6685 	CSR_WRITE_FLUSH(sc);
6686 	delay(10);
6687 	CSR_WRITE(sc, WMREG_CTRL, v);
6688 	CSR_WRITE_FLUSH(sc);
6689 	delay(10);
6690 
6691 	for (i = 0; i < 16; i++) {
6692 		data <<= 1;
6693 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6694 		CSR_WRITE_FLUSH(sc);
6695 		delay(10);
6696 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
6697 			data |= 1;
6698 		CSR_WRITE(sc, WMREG_CTRL, v);
6699 		CSR_WRITE_FLUSH(sc);
6700 		delay(10);
6701 	}
6702 
6703 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6704 	CSR_WRITE_FLUSH(sc);
6705 	delay(10);
6706 	CSR_WRITE(sc, WMREG_CTRL, v);
6707 	CSR_WRITE_FLUSH(sc);
6708 	delay(10);
6709 
6710 	return data;
6711 }
6712 
6713 #undef MDI_IO
6714 #undef MDI_DIR
6715 #undef MDI_CLK
6716 
6717 /*
6718  * wm_gmii_i82543_readreg:	[mii interface function]
6719  *
6720  *	Read a PHY register on the GMII (i82543 version).
6721  */
6722 static int
6723 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
6724 {
6725 	struct wm_softc *sc = device_private(self);
6726 	int rv;
6727 
6728 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
6729 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
6730 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
6731 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
6732 
6733 	DPRINTF(WM_DEBUG_GMII,
6734 	    ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
6735 	    device_xname(sc->sc_dev), phy, reg, rv));
6736 
6737 	return rv;
6738 }
6739 
6740 /*
6741  * wm_gmii_i82543_writereg:	[mii interface function]
6742  *
6743  *	Write a PHY register on the GMII (i82543 version).
6744  */
6745 static void
6746 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
6747 {
6748 	struct wm_softc *sc = device_private(self);
6749 
6750 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
6751 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
6752 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
6753 	    (MII_COMMAND_START << 30), 32);
6754 }
6755 
6756 /*
6757  * wm_gmii_i82544_readreg:	[mii interface function]
6758  *
6759  *	Read a PHY register on the GMII.
6760  */
6761 static int
6762 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
6763 {
6764 	struct wm_softc *sc = device_private(self);
6765 	uint32_t mdic = 0;
6766 	int i, rv;
6767 
6768 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
6769 	    MDIC_REGADD(reg));
6770 
6771 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
6772 		mdic = CSR_READ(sc, WMREG_MDIC);
6773 		if (mdic & MDIC_READY)
6774 			break;
6775 		delay(50);
6776 	}
6777 
6778 	if ((mdic & MDIC_READY) == 0) {
6779 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
6780 		    device_xname(sc->sc_dev), phy, reg);
6781 		rv = 0;
6782 	} else if (mdic & MDIC_E) {
6783 #if 0 /* This is normal if no PHY is present. */
6784 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
6785 		    device_xname(sc->sc_dev), phy, reg);
6786 #endif
6787 		rv = 0;
6788 	} else {
6789 		rv = MDIC_DATA(mdic);
6790 		if (rv == 0xffff)
6791 			rv = 0;
6792 	}
6793 
6794 	return rv;
6795 }
6796 
6797 /*
6798  * wm_gmii_i82544_writereg:	[mii interface function]
6799  *
6800  *	Write a PHY register on the GMII.
6801  */
6802 static void
6803 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
6804 {
6805 	struct wm_softc *sc = device_private(self);
6806 	uint32_t mdic = 0;
6807 	int i;
6808 
6809 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
6810 	    MDIC_REGADD(reg) | MDIC_DATA(val));
6811 
6812 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
6813 		mdic = CSR_READ(sc, WMREG_MDIC);
6814 		if (mdic & MDIC_READY)
6815 			break;
6816 		delay(50);
6817 	}
6818 
6819 	if ((mdic & MDIC_READY) == 0)
6820 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
6821 		    device_xname(sc->sc_dev), phy, reg);
6822 	else if (mdic & MDIC_E)
6823 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
6824 		    device_xname(sc->sc_dev), phy, reg);
6825 }
6826 
6827 /*
6828  * wm_gmii_i80003_readreg:	[mii interface function]
6829  *
6830  *	Read a PHY register on the kumeran
6831  * This could be handled by the PHY layer if we didn't have to lock the
6832  * ressource ...
6833  */
6834 static int
6835 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
6836 {
6837 	struct wm_softc *sc = device_private(self);
6838 	int sem;
6839 	int rv;
6840 
6841 	if (phy != 1) /* only one PHY on kumeran bus */
6842 		return 0;
6843 
6844 	sem = swfwphysem[sc->sc_funcid];
6845 	if (wm_get_swfw_semaphore(sc, sem)) {
6846 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6847 		    __func__);
6848 		return 0;
6849 	}
6850 
6851 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
6852 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6853 		    reg >> GG82563_PAGE_SHIFT);
6854 	} else {
6855 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
6856 		    reg >> GG82563_PAGE_SHIFT);
6857 	}
6858 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
6859 	delay(200);
6860 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
6861 	delay(200);
6862 
6863 	wm_put_swfw_semaphore(sc, sem);
6864 	return rv;
6865 }
6866 
6867 /*
6868  * wm_gmii_i80003_writereg:	[mii interface function]
6869  *
6870  *	Write a PHY register on the kumeran.
6871  * This could be handled by the PHY layer if we didn't have to lock the
6872  * ressource ...
6873  */
6874 static void
6875 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
6876 {
6877 	struct wm_softc *sc = device_private(self);
6878 	int sem;
6879 
6880 	if (phy != 1) /* only one PHY on kumeran bus */
6881 		return;
6882 
6883 	sem = swfwphysem[sc->sc_funcid];
6884 	if (wm_get_swfw_semaphore(sc, sem)) {
6885 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6886 		    __func__);
6887 		return;
6888 	}
6889 
6890 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
6891 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6892 		    reg >> GG82563_PAGE_SHIFT);
6893 	} else {
6894 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
6895 		    reg >> GG82563_PAGE_SHIFT);
6896 	}
6897 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
6898 	delay(200);
6899 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
6900 	delay(200);
6901 
6902 	wm_put_swfw_semaphore(sc, sem);
6903 }
6904 
6905 /*
6906  * wm_gmii_bm_readreg:	[mii interface function]
6907  *
6908  *	Read a PHY register on the kumeran
6909  * This could be handled by the PHY layer if we didn't have to lock the
6910  * ressource ...
6911  */
6912 static int
6913 wm_gmii_bm_readreg(device_t self, int phy, int reg)
6914 {
6915 	struct wm_softc *sc = device_private(self);
6916 	int sem;
6917 	int rv;
6918 
6919 	sem = swfwphysem[sc->sc_funcid];
6920 	if (wm_get_swfw_semaphore(sc, sem)) {
6921 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6922 		    __func__);
6923 		return 0;
6924 	}
6925 
6926 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
6927 		if (phy == 1)
6928 			wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
6929 			    reg);
6930 		else
6931 			wm_gmii_i82544_writereg(self, phy,
6932 			    GG82563_PHY_PAGE_SELECT,
6933 			    reg >> GG82563_PAGE_SHIFT);
6934 	}
6935 
6936 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
6937 	wm_put_swfw_semaphore(sc, sem);
6938 	return rv;
6939 }
6940 
6941 /*
6942  * wm_gmii_bm_writereg:	[mii interface function]
6943  *
6944  *	Write a PHY register on the kumeran.
6945  * This could be handled by the PHY layer if we didn't have to lock the
6946  * ressource ...
6947  */
6948 static void
6949 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
6950 {
6951 	struct wm_softc *sc = device_private(self);
6952 	int sem;
6953 
6954 	sem = swfwphysem[sc->sc_funcid];
6955 	if (wm_get_swfw_semaphore(sc, sem)) {
6956 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6957 		    __func__);
6958 		return;
6959 	}
6960 
6961 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
6962 		if (phy == 1)
6963 			wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
6964 			    reg);
6965 		else
6966 			wm_gmii_i82544_writereg(self, phy,
6967 			    GG82563_PHY_PAGE_SELECT,
6968 			    reg >> GG82563_PAGE_SHIFT);
6969 	}
6970 
6971 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
6972 	wm_put_swfw_semaphore(sc, sem);
6973 }
6974 
6975 static void
6976 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
6977 {
6978 	struct wm_softc *sc = device_private(self);
6979 	uint16_t regnum = BM_PHY_REG_NUM(offset);
6980 	uint16_t wuce;
6981 
6982 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
6983 	if (sc->sc_type == WM_T_PCH) {
6984 		/* XXX e1000 driver do nothing... why? */
6985 	}
6986 
6987 	/* Set page 769 */
6988 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6989 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
6990 
6991 	wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
6992 
6993 	wuce &= ~BM_WUC_HOST_WU_BIT;
6994 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
6995 	    wuce | BM_WUC_ENABLE_BIT);
6996 
6997 	/* Select page 800 */
6998 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6999 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
7000 
7001 	/* Write page 800 */
7002 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
7003 
7004 	if (rd)
7005 		*val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
7006 	else
7007 		wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
7008 
7009 	/* Set page 769 */
7010 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7011 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
7012 
7013 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
7014 }
7015 
7016 /*
7017  * wm_gmii_hv_readreg:	[mii interface function]
7018  *
7019  *	Read a PHY register on the kumeran
7020  * This could be handled by the PHY layer if we didn't have to lock the
7021  * ressource ...
7022  */
7023 static int
7024 wm_gmii_hv_readreg(device_t self, int phy, int reg)
7025 {
7026 	struct wm_softc *sc = device_private(self);
7027 	uint16_t page = BM_PHY_REG_PAGE(reg);
7028 	uint16_t regnum = BM_PHY_REG_NUM(reg);
7029 	uint16_t val;
7030 	int rv;
7031 
7032 	if (wm_get_swfwhw_semaphore(sc)) {
7033 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7034 		    __func__);
7035 		return 0;
7036 	}
7037 
7038 	/* XXX Workaround failure in MDIO access while cable is disconnected */
7039 	if (sc->sc_phytype == WMPHY_82577) {
7040 		/* XXX must write */
7041 	}
7042 
7043 	/* Page 800 works differently than the rest so it has its own func */
7044 	if (page == BM_WUC_PAGE) {
7045 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
7046 		return val;
7047 	}
7048 
7049 	/*
7050 	 * Lower than page 768 works differently than the rest so it has its
7051 	 * own func
7052 	 */
7053 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
7054 		printf("gmii_hv_readreg!!!\n");
7055 		return 0;
7056 	}
7057 
7058 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
7059 		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7060 		    page << BME1000_PAGE_SHIFT);
7061 	}
7062 
7063 	rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
7064 	wm_put_swfwhw_semaphore(sc);
7065 	return rv;
7066 }
7067 
7068 /*
7069  * wm_gmii_hv_writereg:	[mii interface function]
7070  *
7071  *	Write a PHY register on the kumeran.
7072  * This could be handled by the PHY layer if we didn't have to lock the
7073  * ressource ...
7074  */
7075 static void
7076 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
7077 {
7078 	struct wm_softc *sc = device_private(self);
7079 	uint16_t page = BM_PHY_REG_PAGE(reg);
7080 	uint16_t regnum = BM_PHY_REG_NUM(reg);
7081 
7082 	if (wm_get_swfwhw_semaphore(sc)) {
7083 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7084 		    __func__);
7085 		return;
7086 	}
7087 
7088 	/* XXX Workaround failure in MDIO access while cable is disconnected */
7089 
7090 	/* Page 800 works differently than the rest so it has its own func */
7091 	if (page == BM_WUC_PAGE) {
7092 		uint16_t tmp;
7093 
7094 		tmp = val;
7095 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
7096 		return;
7097 	}
7098 
7099 	/*
7100 	 * Lower than page 768 works differently than the rest so it has its
7101 	 * own func
7102 	 */
7103 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
7104 		printf("gmii_hv_writereg!!!\n");
7105 		return;
7106 	}
7107 
7108 	/*
7109 	 * XXX Workaround MDIO accesses being disabled after entering IEEE
7110 	 * Power Down (whenever bit 11 of the PHY control register is set)
7111 	 */
7112 
7113 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
7114 		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7115 		    page << BME1000_PAGE_SHIFT);
7116 	}
7117 
7118 	wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
7119 	wm_put_swfwhw_semaphore(sc);
7120 }
7121 
7122 /*
7123  * wm_gmii_82580_readreg:	[mii interface function]
7124  *
7125  *	Read a PHY register on the 82580 and I350.
7126  * This could be handled by the PHY layer if we didn't have to lock the
7127  * ressource ...
7128  */
7129 static int
7130 wm_gmii_82580_readreg(device_t self, int phy, int reg)
7131 {
7132 	struct wm_softc *sc = device_private(self);
7133 	int sem;
7134 	int rv;
7135 
7136 	sem = swfwphysem[sc->sc_funcid];
7137 	if (wm_get_swfw_semaphore(sc, sem)) {
7138 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7139 		    __func__);
7140 		return 0;
7141 	}
7142 
7143 	rv = wm_gmii_i82544_readreg(self, phy, reg);
7144 
7145 	wm_put_swfw_semaphore(sc, sem);
7146 	return rv;
7147 }
7148 
7149 /*
7150  * wm_gmii_82580_writereg:	[mii interface function]
7151  *
7152  *	Write a PHY register on the 82580 and I350.
7153  * This could be handled by the PHY layer if we didn't have to lock the
7154  * ressource ...
7155  */
7156 static void
7157 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
7158 {
7159 	struct wm_softc *sc = device_private(self);
7160 	int sem;
7161 
7162 	sem = swfwphysem[sc->sc_funcid];
7163 	if (wm_get_swfw_semaphore(sc, sem)) {
7164 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7165 		    __func__);
7166 		return;
7167 	}
7168 
7169 	wm_gmii_i82544_writereg(self, phy, reg, val);
7170 
7171 	wm_put_swfw_semaphore(sc, sem);
7172 }
7173 
7174 /*
7175  * wm_gmii_statchg:	[mii interface function]
7176  *
7177  *	Callback from MII layer when media changes.
7178  */
7179 static void
7180 wm_gmii_statchg(struct ifnet *ifp)
7181 {
7182 	struct wm_softc *sc = ifp->if_softc;
7183 	struct mii_data *mii = &sc->sc_mii;
7184 
7185 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
7186 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
7187 	sc->sc_fcrtl &= ~FCRTL_XONE;
7188 
7189 	/*
7190 	 * Get flow control negotiation result.
7191 	 */
7192 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
7193 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
7194 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
7195 		mii->mii_media_active &= ~IFM_ETH_FMASK;
7196 	}
7197 
7198 	if (sc->sc_flowflags & IFM_FLOW) {
7199 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
7200 			sc->sc_ctrl |= CTRL_TFCE;
7201 			sc->sc_fcrtl |= FCRTL_XONE;
7202 		}
7203 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
7204 			sc->sc_ctrl |= CTRL_RFCE;
7205 	}
7206 
7207 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
7208 		DPRINTF(WM_DEBUG_LINK,
7209 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
7210 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
7211 	} else {
7212 		DPRINTF(WM_DEBUG_LINK,
7213 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
7214 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
7215 	}
7216 
7217 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7218 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
7219 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
7220 						 : WMREG_FCRTL, sc->sc_fcrtl);
7221 	if (sc->sc_type == WM_T_80003) {
7222 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
7223 		case IFM_1000_T:
7224 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
7225 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
7226 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
7227 			break;
7228 		default:
7229 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
7230 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
7231 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
7232 			break;
7233 		}
7234 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
7235 	}
7236 }
7237 
7238 /*
7239  * wm_kmrn_readreg:
7240  *
7241  *	Read a kumeran register
7242  */
7243 static int
7244 wm_kmrn_readreg(struct wm_softc *sc, int reg)
7245 {
7246 	int rv;
7247 
7248 	if (sc->sc_flags == WM_F_LOCK_SWFW) {
7249 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
7250 			aprint_error_dev(sc->sc_dev,
7251 			    "%s: failed to get semaphore\n", __func__);
7252 			return 0;
7253 		}
7254 	} else if (sc->sc_flags == WM_F_LOCK_EXTCNF) {
7255 		if (wm_get_swfwhw_semaphore(sc)) {
7256 			aprint_error_dev(sc->sc_dev,
7257 			    "%s: failed to get semaphore\n", __func__);
7258 			return 0;
7259 		}
7260 	}
7261 
7262 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
7263 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
7264 	    KUMCTRLSTA_REN);
7265 	CSR_WRITE_FLUSH(sc);
7266 	delay(2);
7267 
7268 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
7269 
7270 	if (sc->sc_flags == WM_F_LOCK_SWFW)
7271 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
7272 	else if (sc->sc_flags == WM_F_LOCK_EXTCNF)
7273 		wm_put_swfwhw_semaphore(sc);
7274 
7275 	return rv;
7276 }
7277 
7278 /*
7279  * wm_kmrn_writereg:
7280  *
7281  *	Write a kumeran register
7282  */
7283 static void
7284 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
7285 {
7286 
7287 	if (sc->sc_flags == WM_F_LOCK_SWFW) {
7288 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
7289 			aprint_error_dev(sc->sc_dev,
7290 			    "%s: failed to get semaphore\n", __func__);
7291 			return;
7292 		}
7293 	} else if (sc->sc_flags == WM_F_LOCK_EXTCNF) {
7294 		if (wm_get_swfwhw_semaphore(sc)) {
7295 			aprint_error_dev(sc->sc_dev,
7296 			    "%s: failed to get semaphore\n", __func__);
7297 			return;
7298 		}
7299 	}
7300 
7301 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
7302 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
7303 	    (val & KUMCTRLSTA_MASK));
7304 
7305 	if (sc->sc_flags == WM_F_LOCK_SWFW)
7306 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
7307 	else if (sc->sc_flags == WM_F_LOCK_EXTCNF)
7308 		wm_put_swfwhw_semaphore(sc);
7309 }
7310 
7311 /* SGMII related */
7312 
7313 /*
7314  * wm_sgmii_uses_mdio
7315  *
7316  * Check whether the transaction is to the internal PHY or the external
7317  * MDIO interface. Return true if it's MDIO.
7318  */
7319 static bool
7320 wm_sgmii_uses_mdio(struct wm_softc *sc)
7321 {
7322 	uint32_t reg;
7323 	bool ismdio = false;
7324 
7325 	switch (sc->sc_type) {
7326 	case WM_T_82575:
7327 	case WM_T_82576:
7328 		reg = CSR_READ(sc, WMREG_MDIC);
7329 		ismdio = ((reg & MDIC_DEST) != 0);
7330 		break;
7331 	case WM_T_82580:
7332 	case WM_T_I350:
7333 	case WM_T_I354:
7334 	case WM_T_I210:
7335 	case WM_T_I211:
7336 		reg = CSR_READ(sc, WMREG_MDICNFG);
7337 		ismdio = ((reg & MDICNFG_DEST) != 0);
7338 		break;
7339 	default:
7340 		break;
7341 	}
7342 
7343 	return ismdio;
7344 }
7345 
7346 /*
7347  * wm_sgmii_readreg:	[mii interface function]
7348  *
7349  *	Read a PHY register on the SGMII
7350  * This could be handled by the PHY layer if we didn't have to lock the
7351  * ressource ...
7352  */
7353 static int
7354 wm_sgmii_readreg(device_t self, int phy, int reg)
7355 {
7356 	struct wm_softc *sc = device_private(self);
7357 	uint32_t i2ccmd;
7358 	int i, rv;
7359 
7360 	if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
7361 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7362 		    __func__);
7363 		return 0;
7364 	}
7365 
7366 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
7367 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
7368 	    | I2CCMD_OPCODE_READ;
7369 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
7370 
7371 	/* Poll the ready bit */
7372 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
7373 		delay(50);
7374 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
7375 		if (i2ccmd & I2CCMD_READY)
7376 			break;
7377 	}
7378 	if ((i2ccmd & I2CCMD_READY) == 0)
7379 		aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
7380 	if ((i2ccmd & I2CCMD_ERROR) != 0)
7381 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
7382 
7383 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
7384 
7385 	wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
7386 	return rv;
7387 }
7388 
7389 /*
7390  * wm_sgmii_writereg:	[mii interface function]
7391  *
7392  *	Write a PHY register on the SGMII.
7393  * This could be handled by the PHY layer if we didn't have to lock the
7394  * ressource ...
7395  */
7396 static void
7397 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
7398 {
7399 	struct wm_softc *sc = device_private(self);
7400 	uint32_t i2ccmd;
7401 	int i;
7402 	int val_swapped;
7403 
7404 	if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
7405 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7406 		    __func__);
7407 		return;
7408 	}
7409 	/* Swap the data bytes for the I2C interface */
7410 	val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
7411 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
7412 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
7413 	    | I2CCMD_OPCODE_WRITE | val_swapped;
7414 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
7415 
7416 	/* Poll the ready bit */
7417 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
7418 		delay(50);
7419 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
7420 		if (i2ccmd & I2CCMD_READY)
7421 			break;
7422 	}
7423 	if ((i2ccmd & I2CCMD_READY) == 0)
7424 		aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
7425 	if ((i2ccmd & I2CCMD_ERROR) != 0)
7426 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
7427 
7428 	wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
7429 }
7430 
7431 /* TBI related */
7432 
7433 /* XXX Currently TBI only */
7434 static int
7435 wm_check_for_link(struct wm_softc *sc)
7436 {
7437 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7438 	uint32_t rxcw;
7439 	uint32_t ctrl;
7440 	uint32_t status;
7441 	uint32_t sig;
7442 
7443 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
7444 		sc->sc_tbi_linkup = 1;
7445 		return 0;
7446 	}
7447 
7448 	rxcw = CSR_READ(sc, WMREG_RXCW);
7449 	ctrl = CSR_READ(sc, WMREG_CTRL);
7450 	status = CSR_READ(sc, WMREG_STATUS);
7451 
7452 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
7453 
7454 	DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
7455 		device_xname(sc->sc_dev), __func__,
7456 		((ctrl & CTRL_SWDPIN(1)) == sig),
7457 		((status & STATUS_LU) != 0),
7458 		((rxcw & RXCW_C) != 0)
7459 		    ));
7460 
7461 	/*
7462 	 * SWDPIN   LU RXCW
7463 	 *      0    0    0
7464 	 *      0    0    1	(should not happen)
7465 	 *      0    1    0	(should not happen)
7466 	 *      0    1    1	(should not happen)
7467 	 *      1    0    0	Disable autonego and force linkup
7468 	 *      1    0    1	got /C/ but not linkup yet
7469 	 *      1    1    0	(linkup)
7470 	 *      1    1    1	If IFM_AUTO, back to autonego
7471 	 *
7472 	 */
7473 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
7474 	    && ((status & STATUS_LU) == 0)
7475 	    && ((rxcw & RXCW_C) == 0)) {
7476 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
7477 			__func__));
7478 		sc->sc_tbi_linkup = 0;
7479 		/* Disable auto-negotiation in the TXCW register */
7480 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
7481 
7482 		/*
7483 		 * Force link-up and also force full-duplex.
7484 		 *
7485 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
7486 		 * so we should update sc->sc_ctrl
7487 		 */
7488 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
7489 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7490 	} else if (((status & STATUS_LU) != 0)
7491 	    && ((rxcw & RXCW_C) != 0)
7492 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
7493 		sc->sc_tbi_linkup = 1;
7494 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
7495 			__func__));
7496 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
7497 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
7498 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
7499 	    && ((rxcw & RXCW_C) != 0)) {
7500 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
7501 	} else {
7502 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
7503 			status));
7504 	}
7505 
7506 	return 0;
7507 }
7508 
7509 /*
7510  * wm_tbi_mediainit:
7511  *
7512  *	Initialize media for use on 1000BASE-X devices.
7513  */
7514 static void
7515 wm_tbi_mediainit(struct wm_softc *sc)
7516 {
7517 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7518 	const char *sep = "";
7519 
7520 	if (sc->sc_type < WM_T_82543)
7521 		sc->sc_tipg = TIPG_WM_DFLT;
7522 	else
7523 		sc->sc_tipg = TIPG_LG_DFLT;
7524 
7525 	sc->sc_tbi_anegticks = 5;
7526 
7527 	/* Initialize our media structures */
7528 	sc->sc_mii.mii_ifp = ifp;
7529 
7530 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
7531 	ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
7532 	    wm_tbi_mediastatus);
7533 
7534 	/*
7535 	 * SWD Pins:
7536 	 *
7537 	 *	0 = Link LED (output)
7538 	 *	1 = Loss Of Signal (input)
7539 	 */
7540 	sc->sc_ctrl |= CTRL_SWDPIO(0);
7541 	sc->sc_ctrl &= ~CTRL_SWDPIO(1);
7542 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
7543 		sc->sc_ctrl &= ~CTRL_LRST;
7544 
7545 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7546 
7547 #define	ADD(ss, mm, dd)							\
7548 do {									\
7549 	aprint_normal("%s%s", sep, ss);					\
7550 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL);	\
7551 	sep = ", ";							\
7552 } while (/*CONSTCOND*/0)
7553 
7554 	aprint_normal_dev(sc->sc_dev, "");
7555 
7556 	/* Only 82545 is LX */
7557 	if (sc->sc_type == WM_T_82545) {
7558 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
7559 		ADD("1000baseLX-FDX", IFM_1000_LX|IFM_FDX, ANAR_X_FD);
7560 	} else {
7561 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
7562 		ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
7563 	}
7564 	ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
7565 	aprint_normal("\n");
7566 
7567 #undef ADD
7568 
7569 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
7570 }
7571 
7572 /*
7573  * wm_tbi_mediastatus:	[ifmedia interface function]
7574  *
7575  *	Get the current interface media status on a 1000BASE-X device.
7576  */
7577 static void
7578 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
7579 {
7580 	struct wm_softc *sc = ifp->if_softc;
7581 	uint32_t ctrl, status;
7582 
7583 	ifmr->ifm_status = IFM_AVALID;
7584 	ifmr->ifm_active = IFM_ETHER;
7585 
7586 	status = CSR_READ(sc, WMREG_STATUS);
7587 	if ((status & STATUS_LU) == 0) {
7588 		ifmr->ifm_active |= IFM_NONE;
7589 		return;
7590 	}
7591 
7592 	ifmr->ifm_status |= IFM_ACTIVE;
7593 	/* Only 82545 is LX */
7594 	if (sc->sc_type == WM_T_82545)
7595 		ifmr->ifm_active |= IFM_1000_LX;
7596 	else
7597 		ifmr->ifm_active |= IFM_1000_SX;
7598 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
7599 		ifmr->ifm_active |= IFM_FDX;
7600 	else
7601 		ifmr->ifm_active |= IFM_HDX;
7602 	ctrl = CSR_READ(sc, WMREG_CTRL);
7603 	if (ctrl & CTRL_RFCE)
7604 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
7605 	if (ctrl & CTRL_TFCE)
7606 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
7607 }
7608 
7609 /*
7610  * wm_tbi_mediachange:	[ifmedia interface function]
7611  *
7612  *	Set hardware to newly-selected media on a 1000BASE-X device.
7613  */
7614 static int
7615 wm_tbi_mediachange(struct ifnet *ifp)
7616 {
7617 	struct wm_softc *sc = ifp->if_softc;
7618 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7619 	uint32_t status;
7620 	int i;
7621 
7622 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
7623 		return 0;
7624 
7625 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
7626 	    || (sc->sc_type >= WM_T_82575))
7627 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
7628 
7629 	/* XXX power_up_serdes_link_82575() */
7630 
7631 	sc->sc_ctrl &= ~CTRL_LRST;
7632 	sc->sc_txcw = TXCW_ANE;
7633 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
7634 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
7635 	else if (ife->ifm_media & IFM_FDX)
7636 		sc->sc_txcw |= TXCW_FD;
7637 	else
7638 		sc->sc_txcw |= TXCW_HD;
7639 
7640 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
7641 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
7642 
7643 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
7644 		    device_xname(sc->sc_dev), sc->sc_txcw));
7645 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
7646 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7647 	CSR_WRITE_FLUSH(sc);
7648 	delay(1000);
7649 
7650 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
7651 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
7652 
7653 	/*
7654 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
7655 	 * optics detect a signal, 0 if they don't.
7656 	 */
7657 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
7658 		/* Have signal; wait for the link to come up. */
7659 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
7660 			delay(10000);
7661 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
7662 				break;
7663 		}
7664 
7665 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
7666 			    device_xname(sc->sc_dev),i));
7667 
7668 		status = CSR_READ(sc, WMREG_STATUS);
7669 		DPRINTF(WM_DEBUG_LINK,
7670 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
7671 			device_xname(sc->sc_dev),status, STATUS_LU));
7672 		if (status & STATUS_LU) {
7673 			/* Link is up. */
7674 			DPRINTF(WM_DEBUG_LINK,
7675 			    ("%s: LINK: set media -> link up %s\n",
7676 			    device_xname(sc->sc_dev),
7677 			    (status & STATUS_FD) ? "FDX" : "HDX"));
7678 
7679 			/*
7680 			 * NOTE: CTRL will update TFCE and RFCE automatically,
7681 			 * so we should update sc->sc_ctrl
7682 			 */
7683 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
7684 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
7685 			sc->sc_fcrtl &= ~FCRTL_XONE;
7686 			if (status & STATUS_FD)
7687 				sc->sc_tctl |=
7688 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
7689 			else
7690 				sc->sc_tctl |=
7691 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
7692 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
7693 				sc->sc_fcrtl |= FCRTL_XONE;
7694 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
7695 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
7696 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
7697 				      sc->sc_fcrtl);
7698 			sc->sc_tbi_linkup = 1;
7699 		} else {
7700 			if (i == WM_LINKUP_TIMEOUT)
7701 				wm_check_for_link(sc);
7702 			/* Link is down. */
7703 			DPRINTF(WM_DEBUG_LINK,
7704 			    ("%s: LINK: set media -> link down\n",
7705 			    device_xname(sc->sc_dev)));
7706 			sc->sc_tbi_linkup = 0;
7707 		}
7708 	} else {
7709 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
7710 		    device_xname(sc->sc_dev)));
7711 		sc->sc_tbi_linkup = 0;
7712 	}
7713 
7714 	wm_tbi_set_linkled(sc);
7715 
7716 	return 0;
7717 }
7718 
7719 /*
7720  * wm_tbi_set_linkled:
7721  *
7722  *	Update the link LED on 1000BASE-X devices.
7723  */
7724 static void
7725 wm_tbi_set_linkled(struct wm_softc *sc)
7726 {
7727 
7728 	if (sc->sc_tbi_linkup)
7729 		sc->sc_ctrl |= CTRL_SWDPIN(0);
7730 	else
7731 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
7732 
7733 	/* 82540 or newer devices are active low */
7734 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
7735 
7736 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7737 }
7738 
7739 /*
7740  * wm_tbi_check_link:
7741  *
7742  *	Check the link on 1000BASE-X devices.
7743  */
7744 static void
7745 wm_tbi_check_link(struct wm_softc *sc)
7746 {
7747 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7748 	uint32_t status;
7749 
7750 	KASSERT(WM_TX_LOCKED(sc));
7751 
7752 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
7753 		sc->sc_tbi_linkup = 1;
7754 		return;
7755 	}
7756 
7757 	status = CSR_READ(sc, WMREG_STATUS);
7758 
7759 	/* XXX is this needed? */
7760 	(void)CSR_READ(sc, WMREG_RXCW);
7761 	(void)CSR_READ(sc, WMREG_CTRL);
7762 
7763 	/* set link status */
7764 	if ((status & STATUS_LU) == 0) {
7765 		DPRINTF(WM_DEBUG_LINK,
7766 		    ("%s: LINK: checklink -> down\n",
7767 			device_xname(sc->sc_dev)));
7768 		sc->sc_tbi_linkup = 0;
7769 	} else if (sc->sc_tbi_linkup == 0) {
7770 		DPRINTF(WM_DEBUG_LINK,
7771 		    ("%s: LINK: checklink -> up %s\n",
7772 			device_xname(sc->sc_dev),
7773 			(status & STATUS_FD) ? "FDX" : "HDX"));
7774 		sc->sc_tbi_linkup = 1;
7775 	}
7776 
7777 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP)
7778 	    && ((status & STATUS_LU) == 0)) {
7779 		sc->sc_tbi_linkup = 0;
7780 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
7781 			/* If the timer expired, retry autonegotiation */
7782 			if (++sc->sc_tbi_ticks >= sc->sc_tbi_anegticks) {
7783 				DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
7784 				sc->sc_tbi_ticks = 0;
7785 				/*
7786 				 * Reset the link, and let autonegotiation do
7787 				 * its thing
7788 				 */
7789 				sc->sc_ctrl |= CTRL_LRST;
7790 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7791 				CSR_WRITE_FLUSH(sc);
7792 				delay(1000);
7793 				sc->sc_ctrl &= ~CTRL_LRST;
7794 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7795 				CSR_WRITE_FLUSH(sc);
7796 				delay(1000);
7797 				CSR_WRITE(sc, WMREG_TXCW,
7798 				    sc->sc_txcw & ~TXCW_ANE);
7799 				CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
7800 			}
7801 		}
7802 	}
7803 
7804 	wm_tbi_set_linkled(sc);
7805 }
7806 
7807 /* SFP related */
7808 
7809 static int
7810 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
7811 {
7812 	uint32_t i2ccmd;
7813 	int i;
7814 
7815 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
7816 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
7817 
7818 	/* Poll the ready bit */
7819 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
7820 		delay(50);
7821 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
7822 		if (i2ccmd & I2CCMD_READY)
7823 			break;
7824 	}
7825 	if ((i2ccmd & I2CCMD_READY) == 0)
7826 		return -1;
7827 	if ((i2ccmd & I2CCMD_ERROR) != 0)
7828 		return -1;
7829 
7830 	*data = i2ccmd & 0x00ff;
7831 
7832 	return 0;
7833 }
7834 
7835 static uint32_t
7836 wm_sfp_get_media_type(struct wm_softc *sc)
7837 {
7838 	uint32_t ctrl_ext;
7839 	uint8_t val = 0;
7840 	int timeout = 3;
7841 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
7842 	int rv = -1;
7843 
7844 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
7845 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
7846 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
7847 	CSR_WRITE_FLUSH(sc);
7848 
7849 	/* Read SFP module data */
7850 	while (timeout) {
7851 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
7852 		if (rv == 0)
7853 			break;
7854 		delay(100*1000); /* XXX too big */
7855 		timeout--;
7856 	}
7857 	if (rv != 0)
7858 		goto out;
7859 	switch (val) {
7860 	case SFF_SFP_ID_SFF:
7861 		aprint_normal_dev(sc->sc_dev,
7862 		    "Module/Connector soldered to board\n");
7863 		break;
7864 	case SFF_SFP_ID_SFP:
7865 		aprint_normal_dev(sc->sc_dev, "SFP\n");
7866 		break;
7867 	case SFF_SFP_ID_UNKNOWN:
7868 		goto out;
7869 	default:
7870 		break;
7871 	}
7872 
7873 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
7874 	if (rv != 0) {
7875 		goto out;
7876 	}
7877 
7878 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
7879 		mediatype = WM_MEDIATYPE_SERDES;
7880 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
7881 		sc->sc_flags |= WM_F_SGMII;
7882 		mediatype = WM_MEDIATYPE_COPPER;
7883 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
7884 		sc->sc_flags |= WM_F_SGMII;
7885 		mediatype = WM_MEDIATYPE_SERDES;
7886 	}
7887 
7888 out:
7889 	/* Restore I2C interface setting */
7890 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
7891 
7892 	return mediatype;
7893 }
7894 /*
7895  * NVM related.
7896  * Microwire, SPI (w/wo EERD) and Flash.
7897  */
7898 
7899 /* Both spi and uwire */
7900 
7901 /*
7902  * wm_eeprom_sendbits:
7903  *
7904  *	Send a series of bits to the EEPROM.
7905  */
7906 static void
7907 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
7908 {
7909 	uint32_t reg;
7910 	int x;
7911 
7912 	reg = CSR_READ(sc, WMREG_EECD);
7913 
7914 	for (x = nbits; x > 0; x--) {
7915 		if (bits & (1U << (x - 1)))
7916 			reg |= EECD_DI;
7917 		else
7918 			reg &= ~EECD_DI;
7919 		CSR_WRITE(sc, WMREG_EECD, reg);
7920 		CSR_WRITE_FLUSH(sc);
7921 		delay(2);
7922 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
7923 		CSR_WRITE_FLUSH(sc);
7924 		delay(2);
7925 		CSR_WRITE(sc, WMREG_EECD, reg);
7926 		CSR_WRITE_FLUSH(sc);
7927 		delay(2);
7928 	}
7929 }
7930 
7931 /*
7932  * wm_eeprom_recvbits:
7933  *
7934  *	Receive a series of bits from the EEPROM.
7935  */
7936 static void
7937 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
7938 {
7939 	uint32_t reg, val;
7940 	int x;
7941 
7942 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
7943 
7944 	val = 0;
7945 	for (x = nbits; x > 0; x--) {
7946 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
7947 		CSR_WRITE_FLUSH(sc);
7948 		delay(2);
7949 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
7950 			val |= (1U << (x - 1));
7951 		CSR_WRITE(sc, WMREG_EECD, reg);
7952 		CSR_WRITE_FLUSH(sc);
7953 		delay(2);
7954 	}
7955 	*valp = val;
7956 }
7957 
7958 /* Microwire */
7959 
7960 /*
7961  * wm_nvm_read_uwire:
7962  *
7963  *	Read a word from the EEPROM using the MicroWire protocol.
7964  */
7965 static int
7966 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
7967 {
7968 	uint32_t reg, val;
7969 	int i;
7970 
7971 	for (i = 0; i < wordcnt; i++) {
7972 		/* Clear SK and DI. */
7973 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
7974 		CSR_WRITE(sc, WMREG_EECD, reg);
7975 
7976 		/*
7977 		 * XXX: workaround for a bug in qemu-0.12.x and prior
7978 		 * and Xen.
7979 		 *
7980 		 * We use this workaround only for 82540 because qemu's
7981 		 * e1000 act as 82540.
7982 		 */
7983 		if (sc->sc_type == WM_T_82540) {
7984 			reg |= EECD_SK;
7985 			CSR_WRITE(sc, WMREG_EECD, reg);
7986 			reg &= ~EECD_SK;
7987 			CSR_WRITE(sc, WMREG_EECD, reg);
7988 			CSR_WRITE_FLUSH(sc);
7989 			delay(2);
7990 		}
7991 		/* XXX: end of workaround */
7992 
7993 		/* Set CHIP SELECT. */
7994 		reg |= EECD_CS;
7995 		CSR_WRITE(sc, WMREG_EECD, reg);
7996 		CSR_WRITE_FLUSH(sc);
7997 		delay(2);
7998 
7999 		/* Shift in the READ command. */
8000 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
8001 
8002 		/* Shift in address. */
8003 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
8004 
8005 		/* Shift out the data. */
8006 		wm_eeprom_recvbits(sc, &val, 16);
8007 		data[i] = val & 0xffff;
8008 
8009 		/* Clear CHIP SELECT. */
8010 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
8011 		CSR_WRITE(sc, WMREG_EECD, reg);
8012 		CSR_WRITE_FLUSH(sc);
8013 		delay(2);
8014 	}
8015 
8016 	return 0;
8017 }
8018 
8019 /* SPI */
8020 
8021 /*
8022  * Set SPI and FLASH related information from the EECD register.
8023  * For 82541 and 82547, the word size is taken from EEPROM.
8024  */
8025 static int
8026 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
8027 {
8028 	int size;
8029 	uint32_t reg;
8030 	uint16_t data;
8031 
8032 	reg = CSR_READ(sc, WMREG_EECD);
8033 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
8034 
8035 	/* Read the size of NVM from EECD by default */
8036 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
8037 	switch (sc->sc_type) {
8038 	case WM_T_82541:
8039 	case WM_T_82541_2:
8040 	case WM_T_82547:
8041 	case WM_T_82547_2:
8042 		/* Set dummy value to access EEPROM */
8043 		sc->sc_nvm_wordsize = 64;
8044 		wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
8045 		reg = data;
8046 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
8047 		if (size == 0)
8048 			size = 6; /* 64 word size */
8049 		else
8050 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
8051 		break;
8052 	case WM_T_80003:
8053 	case WM_T_82571:
8054 	case WM_T_82572:
8055 	case WM_T_82573: /* SPI case */
8056 	case WM_T_82574: /* SPI case */
8057 	case WM_T_82583: /* SPI case */
8058 		size += NVM_WORD_SIZE_BASE_SHIFT;
8059 		if (size > 14)
8060 			size = 14;
8061 		break;
8062 	case WM_T_82575:
8063 	case WM_T_82576:
8064 	case WM_T_82580:
8065 	case WM_T_I350:
8066 	case WM_T_I354:
8067 	case WM_T_I210:
8068 	case WM_T_I211:
8069 		size += NVM_WORD_SIZE_BASE_SHIFT;
8070 		if (size > 15)
8071 			size = 15;
8072 		break;
8073 	default:
8074 		aprint_error_dev(sc->sc_dev,
8075 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
8076 		return -1;
8077 		break;
8078 	}
8079 
8080 	sc->sc_nvm_wordsize = 1 << size;
8081 
8082 	return 0;
8083 }
8084 
8085 /*
8086  * wm_nvm_ready_spi:
8087  *
8088  *	Wait for a SPI EEPROM to be ready for commands.
8089  */
8090 static int
8091 wm_nvm_ready_spi(struct wm_softc *sc)
8092 {
8093 	uint32_t val;
8094 	int usec;
8095 
8096 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
8097 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
8098 		wm_eeprom_recvbits(sc, &val, 8);
8099 		if ((val & SPI_SR_RDY) == 0)
8100 			break;
8101 	}
8102 	if (usec >= SPI_MAX_RETRIES) {
8103 		aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
8104 		return 1;
8105 	}
8106 	return 0;
8107 }
8108 
8109 /*
8110  * wm_nvm_read_spi:
8111  *
8112  *	Read a work from the EEPROM using the SPI protocol.
8113  */
8114 static int
8115 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
8116 {
8117 	uint32_t reg, val;
8118 	int i;
8119 	uint8_t opc;
8120 
8121 	/* Clear SK and CS. */
8122 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
8123 	CSR_WRITE(sc, WMREG_EECD, reg);
8124 	CSR_WRITE_FLUSH(sc);
8125 	delay(2);
8126 
8127 	if (wm_nvm_ready_spi(sc))
8128 		return 1;
8129 
8130 	/* Toggle CS to flush commands. */
8131 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
8132 	CSR_WRITE_FLUSH(sc);
8133 	delay(2);
8134 	CSR_WRITE(sc, WMREG_EECD, reg);
8135 	CSR_WRITE_FLUSH(sc);
8136 	delay(2);
8137 
8138 	opc = SPI_OPC_READ;
8139 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
8140 		opc |= SPI_OPC_A8;
8141 
8142 	wm_eeprom_sendbits(sc, opc, 8);
8143 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
8144 
8145 	for (i = 0; i < wordcnt; i++) {
8146 		wm_eeprom_recvbits(sc, &val, 16);
8147 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
8148 	}
8149 
8150 	/* Raise CS and clear SK. */
8151 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
8152 	CSR_WRITE(sc, WMREG_EECD, reg);
8153 	CSR_WRITE_FLUSH(sc);
8154 	delay(2);
8155 
8156 	return 0;
8157 }
8158 
8159 /* Using with EERD */
8160 
8161 static int
8162 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
8163 {
8164 	uint32_t attempts = 100000;
8165 	uint32_t i, reg = 0;
8166 	int32_t done = -1;
8167 
8168 	for (i = 0; i < attempts; i++) {
8169 		reg = CSR_READ(sc, rw);
8170 
8171 		if (reg & EERD_DONE) {
8172 			done = 0;
8173 			break;
8174 		}
8175 		delay(5);
8176 	}
8177 
8178 	return done;
8179 }
8180 
8181 static int
8182 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
8183     uint16_t *data)
8184 {
8185 	int i, eerd = 0;
8186 	int error = 0;
8187 
8188 	for (i = 0; i < wordcnt; i++) {
8189 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
8190 
8191 		CSR_WRITE(sc, WMREG_EERD, eerd);
8192 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
8193 		if (error != 0)
8194 			break;
8195 
8196 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
8197 	}
8198 
8199 	return error;
8200 }
8201 
8202 /* Flash */
8203 
8204 static int
8205 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
8206 {
8207 	uint32_t eecd;
8208 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
8209 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
8210 	uint8_t sig_byte = 0;
8211 
8212 	switch (sc->sc_type) {
8213 	case WM_T_ICH8:
8214 	case WM_T_ICH9:
8215 		eecd = CSR_READ(sc, WMREG_EECD);
8216 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
8217 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
8218 			return 0;
8219 		}
8220 		/* FALLTHROUGH */
8221 	default:
8222 		/* Default to 0 */
8223 		*bank = 0;
8224 
8225 		/* Check bank 0 */
8226 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
8227 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
8228 			*bank = 0;
8229 			return 0;
8230 		}
8231 
8232 		/* Check bank 1 */
8233 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
8234 		    &sig_byte);
8235 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
8236 			*bank = 1;
8237 			return 0;
8238 		}
8239 	}
8240 
8241 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
8242 		device_xname(sc->sc_dev)));
8243 	return -1;
8244 }
8245 
8246 /******************************************************************************
8247  * This function does initial flash setup so that a new read/write/erase cycle
8248  * can be started.
8249  *
8250  * sc - The pointer to the hw structure
8251  ****************************************************************************/
8252 static int32_t
8253 wm_ich8_cycle_init(struct wm_softc *sc)
8254 {
8255 	uint16_t hsfsts;
8256 	int32_t error = 1;
8257 	int32_t i     = 0;
8258 
8259 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
8260 
8261 	/* May be check the Flash Des Valid bit in Hw status */
8262 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
8263 		return error;
8264 	}
8265 
8266 	/* Clear FCERR in Hw status by writing 1 */
8267 	/* Clear DAEL in Hw status by writing a 1 */
8268 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
8269 
8270 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
8271 
8272 	/*
8273 	 * Either we should have a hardware SPI cycle in progress bit to check
8274 	 * against, in order to start a new cycle or FDONE bit should be
8275 	 * changed in the hardware so that it is 1 after harware reset, which
8276 	 * can then be used as an indication whether a cycle is in progress or
8277 	 * has been completed .. we should also have some software semaphore
8278 	 * mechanism to guard FDONE or the cycle in progress bit so that two
8279 	 * threads access to those bits can be sequentiallized or a way so that
8280 	 * 2 threads dont start the cycle at the same time
8281 	 */
8282 
8283 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
8284 		/*
8285 		 * There is no cycle running at present, so we can start a
8286 		 * cycle
8287 		 */
8288 
8289 		/* Begin by setting Flash Cycle Done. */
8290 		hsfsts |= HSFSTS_DONE;
8291 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
8292 		error = 0;
8293 	} else {
8294 		/*
8295 		 * otherwise poll for sometime so the current cycle has a
8296 		 * chance to end before giving up.
8297 		 */
8298 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
8299 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
8300 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
8301 				error = 0;
8302 				break;
8303 			}
8304 			delay(1);
8305 		}
8306 		if (error == 0) {
8307 			/*
8308 			 * Successful in waiting for previous cycle to timeout,
8309 			 * now set the Flash Cycle Done.
8310 			 */
8311 			hsfsts |= HSFSTS_DONE;
8312 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
8313 		}
8314 	}
8315 	return error;
8316 }
8317 
8318 /******************************************************************************
8319  * This function starts a flash cycle and waits for its completion
8320  *
8321  * sc - The pointer to the hw structure
8322  ****************************************************************************/
8323 static int32_t
8324 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
8325 {
8326 	uint16_t hsflctl;
8327 	uint16_t hsfsts;
8328 	int32_t error = 1;
8329 	uint32_t i = 0;
8330 
8331 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
8332 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
8333 	hsflctl |= HSFCTL_GO;
8334 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
8335 
8336 	/* Wait till FDONE bit is set to 1 */
8337 	do {
8338 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
8339 		if (hsfsts & HSFSTS_DONE)
8340 			break;
8341 		delay(1);
8342 		i++;
8343 	} while (i < timeout);
8344 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
8345 		error = 0;
8346 
8347 	return error;
8348 }
8349 
8350 /******************************************************************************
8351  * Reads a byte or word from the NVM using the ICH8 flash access registers.
8352  *
8353  * sc - The pointer to the hw structure
8354  * index - The index of the byte or word to read.
8355  * size - Size of data to read, 1=byte 2=word
8356  * data - Pointer to the word to store the value read.
8357  *****************************************************************************/
8358 static int32_t
8359 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
8360     uint32_t size, uint16_t *data)
8361 {
8362 	uint16_t hsfsts;
8363 	uint16_t hsflctl;
8364 	uint32_t flash_linear_address;
8365 	uint32_t flash_data = 0;
8366 	int32_t error = 1;
8367 	int32_t count = 0;
8368 
8369 	if (size < 1  || size > 2 || data == 0x0 ||
8370 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
8371 		return error;
8372 
8373 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
8374 	    sc->sc_ich8_flash_base;
8375 
8376 	do {
8377 		delay(1);
8378 		/* Steps */
8379 		error = wm_ich8_cycle_init(sc);
8380 		if (error)
8381 			break;
8382 
8383 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
8384 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
8385 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
8386 		    & HSFCTL_BCOUNT_MASK;
8387 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
8388 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
8389 
8390 		/*
8391 		 * Write the last 24 bits of index into Flash Linear address
8392 		 * field in Flash Address
8393 		 */
8394 		/* TODO: TBD maybe check the index against the size of flash */
8395 
8396 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
8397 
8398 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
8399 
8400 		/*
8401 		 * Check if FCERR is set to 1, if set to 1, clear it and try
8402 		 * the whole sequence a few more times, else read in (shift in)
8403 		 * the Flash Data0, the order is least significant byte first
8404 		 * msb to lsb
8405 		 */
8406 		if (error == 0) {
8407 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
8408 			if (size == 1)
8409 				*data = (uint8_t)(flash_data & 0x000000FF);
8410 			else if (size == 2)
8411 				*data = (uint16_t)(flash_data & 0x0000FFFF);
8412 			break;
8413 		} else {
8414 			/*
8415 			 * If we've gotten here, then things are probably
8416 			 * completely hosed, but if the error condition is
8417 			 * detected, it won't hurt to give it another try...
8418 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
8419 			 */
8420 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
8421 			if (hsfsts & HSFSTS_ERR) {
8422 				/* Repeat for some time before giving up. */
8423 				continue;
8424 			} else if ((hsfsts & HSFSTS_DONE) == 0)
8425 				break;
8426 		}
8427 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
8428 
8429 	return error;
8430 }
8431 
8432 /******************************************************************************
8433  * Reads a single byte from the NVM using the ICH8 flash access registers.
8434  *
8435  * sc - pointer to wm_hw structure
8436  * index - The index of the byte to read.
8437  * data - Pointer to a byte to store the value read.
8438  *****************************************************************************/
8439 static int32_t
8440 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
8441 {
8442 	int32_t status;
8443 	uint16_t word = 0;
8444 
8445 	status = wm_read_ich8_data(sc, index, 1, &word);
8446 	if (status == 0)
8447 		*data = (uint8_t)word;
8448 	else
8449 		*data = 0;
8450 
8451 	return status;
8452 }
8453 
8454 /******************************************************************************
8455  * Reads a word from the NVM using the ICH8 flash access registers.
8456  *
8457  * sc - pointer to wm_hw structure
8458  * index - The starting byte index of the word to read.
8459  * data - Pointer to a word to store the value read.
8460  *****************************************************************************/
8461 static int32_t
8462 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
8463 {
8464 	int32_t status;
8465 
8466 	status = wm_read_ich8_data(sc, index, 2, data);
8467 	return status;
8468 }
8469 
8470 /******************************************************************************
8471  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
8472  * register.
8473  *
8474  * sc - Struct containing variables accessed by shared code
8475  * offset - offset of word in the EEPROM to read
8476  * data - word read from the EEPROM
8477  * words - number of words to read
8478  *****************************************************************************/
8479 static int
8480 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
8481 {
8482 	int32_t  error = 0;
8483 	uint32_t flash_bank = 0;
8484 	uint32_t act_offset = 0;
8485 	uint32_t bank_offset = 0;
8486 	uint16_t word = 0;
8487 	uint16_t i = 0;
8488 
8489 	/*
8490 	 * We need to know which is the valid flash bank.  In the event
8491 	 * that we didn't allocate eeprom_shadow_ram, we may not be
8492 	 * managing flash_bank.  So it cannot be trusted and needs
8493 	 * to be updated with each read.
8494 	 */
8495 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
8496 	if (error) {
8497 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
8498 			device_xname(sc->sc_dev)));
8499 		flash_bank = 0;
8500 	}
8501 
8502 	/*
8503 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
8504 	 * size
8505 	 */
8506 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
8507 
8508 	error = wm_get_swfwhw_semaphore(sc);
8509 	if (error) {
8510 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8511 		    __func__);
8512 		return error;
8513 	}
8514 
8515 	for (i = 0; i < words; i++) {
8516 		/* The NVM part needs a byte offset, hence * 2 */
8517 		act_offset = bank_offset + ((offset + i) * 2);
8518 		error = wm_read_ich8_word(sc, act_offset, &word);
8519 		if (error) {
8520 			aprint_error_dev(sc->sc_dev,
8521 			    "%s: failed to read NVM\n", __func__);
8522 			break;
8523 		}
8524 		data[i] = word;
8525 	}
8526 
8527 	wm_put_swfwhw_semaphore(sc);
8528 	return error;
8529 }
8530 
8531 /* Lock, detecting NVM type, validate checksum and read */
8532 
8533 /*
8534  * wm_nvm_acquire:
8535  *
8536  *	Perform the EEPROM handshake required on some chips.
8537  */
8538 static int
8539 wm_nvm_acquire(struct wm_softc *sc)
8540 {
8541 	uint32_t reg;
8542 	int x;
8543 	int ret = 0;
8544 
8545 	/* always success */
8546 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
8547 		return 0;
8548 
8549 	if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
8550 		ret = wm_get_swfwhw_semaphore(sc);
8551 	} else if (sc->sc_flags & WM_F_LOCK_SWFW) {
8552 		/* This will also do wm_get_swsm_semaphore() if needed */
8553 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
8554 	} else if (sc->sc_flags & WM_F_LOCK_SWSM) {
8555 		ret = wm_get_swsm_semaphore(sc);
8556 	}
8557 
8558 	if (ret) {
8559 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8560 			__func__);
8561 		return 1;
8562 	}
8563 
8564 	if (sc->sc_flags & WM_F_LOCK_EECD) {
8565 		reg = CSR_READ(sc, WMREG_EECD);
8566 
8567 		/* Request EEPROM access. */
8568 		reg |= EECD_EE_REQ;
8569 		CSR_WRITE(sc, WMREG_EECD, reg);
8570 
8571 		/* ..and wait for it to be granted. */
8572 		for (x = 0; x < 1000; x++) {
8573 			reg = CSR_READ(sc, WMREG_EECD);
8574 			if (reg & EECD_EE_GNT)
8575 				break;
8576 			delay(5);
8577 		}
8578 		if ((reg & EECD_EE_GNT) == 0) {
8579 			aprint_error_dev(sc->sc_dev,
8580 			    "could not acquire EEPROM GNT\n");
8581 			reg &= ~EECD_EE_REQ;
8582 			CSR_WRITE(sc, WMREG_EECD, reg);
8583 			if (sc->sc_flags & WM_F_LOCK_EXTCNF)
8584 				wm_put_swfwhw_semaphore(sc);
8585 			if (sc->sc_flags & WM_F_LOCK_SWFW)
8586 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
8587 			else if (sc->sc_flags & WM_F_LOCK_SWSM)
8588 				wm_put_swsm_semaphore(sc);
8589 			return 1;
8590 		}
8591 	}
8592 
8593 	return 0;
8594 }
8595 
8596 /*
8597  * wm_nvm_release:
8598  *
8599  *	Release the EEPROM mutex.
8600  */
8601 static void
8602 wm_nvm_release(struct wm_softc *sc)
8603 {
8604 	uint32_t reg;
8605 
8606 	/* always success */
8607 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
8608 		return;
8609 
8610 	if (sc->sc_flags & WM_F_LOCK_EECD) {
8611 		reg = CSR_READ(sc, WMREG_EECD);
8612 		reg &= ~EECD_EE_REQ;
8613 		CSR_WRITE(sc, WMREG_EECD, reg);
8614 	}
8615 
8616 	if (sc->sc_flags & WM_F_LOCK_EXTCNF)
8617 		wm_put_swfwhw_semaphore(sc);
8618 	if (sc->sc_flags & WM_F_LOCK_SWFW)
8619 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
8620 	else if (sc->sc_flags & WM_F_LOCK_SWSM)
8621 		wm_put_swsm_semaphore(sc);
8622 }
8623 
8624 static int
8625 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
8626 {
8627 	uint32_t eecd = 0;
8628 
8629 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
8630 	    || sc->sc_type == WM_T_82583) {
8631 		eecd = CSR_READ(sc, WMREG_EECD);
8632 
8633 		/* Isolate bits 15 & 16 */
8634 		eecd = ((eecd >> 15) & 0x03);
8635 
8636 		/* If both bits are set, device is Flash type */
8637 		if (eecd == 0x03)
8638 			return 0;
8639 	}
8640 	return 1;
8641 }
8642 
8643 /*
8644  * wm_nvm_validate_checksum
8645  *
8646  * The checksum is defined as the sum of the first 64 (16 bit) words.
8647  */
8648 static int
8649 wm_nvm_validate_checksum(struct wm_softc *sc)
8650 {
8651 	uint16_t checksum;
8652 	uint16_t eeprom_data;
8653 #ifdef WM_DEBUG
8654 	uint16_t csum_wordaddr, valid_checksum;
8655 #endif
8656 	int i;
8657 
8658 	checksum = 0;
8659 
8660 	/* Don't check for I211 */
8661 	if (sc->sc_type == WM_T_I211)
8662 		return 0;
8663 
8664 #ifdef WM_DEBUG
8665 	if (sc->sc_type == WM_T_PCH_LPT) {
8666 		csum_wordaddr = NVM_OFF_COMPAT;
8667 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
8668 	} else {
8669 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
8670 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
8671 	}
8672 
8673 	/* Dump EEPROM image for debug */
8674 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
8675 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
8676 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
8677 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
8678 		if ((eeprom_data & valid_checksum) == 0) {
8679 			DPRINTF(WM_DEBUG_NVM,
8680 			    ("%s: NVM need to be updated (%04x != %04x)\n",
8681 				device_xname(sc->sc_dev), eeprom_data,
8682 				    valid_checksum));
8683 		}
8684 	}
8685 
8686 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
8687 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
8688 		for (i = 0; i < NVM_SIZE; i++) {
8689 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
8690 				printf("XXXX ");
8691 			else
8692 				printf("%04hx ", eeprom_data);
8693 			if (i % 8 == 7)
8694 				printf("\n");
8695 		}
8696 	}
8697 
8698 #endif /* WM_DEBUG */
8699 
8700 	for (i = 0; i < NVM_SIZE; i++) {
8701 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
8702 			return 1;
8703 		checksum += eeprom_data;
8704 	}
8705 
8706 	if (checksum != (uint16_t) NVM_CHECKSUM) {
8707 #ifdef WM_DEBUG
8708 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
8709 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
8710 #endif
8711 	}
8712 
8713 	return 0;
8714 }
8715 
8716 /*
8717  * wm_nvm_read:
8718  *
8719  *	Read data from the serial EEPROM.
8720  */
8721 static int
8722 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
8723 {
8724 	int rv;
8725 
8726 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
8727 		return 1;
8728 
8729 	if (wm_nvm_acquire(sc))
8730 		return 1;
8731 
8732 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
8733 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
8734 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
8735 		rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
8736 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
8737 		rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
8738 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
8739 		rv = wm_nvm_read_spi(sc, word, wordcnt, data);
8740 	else
8741 		rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
8742 
8743 	wm_nvm_release(sc);
8744 	return rv;
8745 }
8746 
8747 /*
8748  * Hardware semaphores.
8749  * Very complexed...
8750  */
8751 
8752 static int
8753 wm_get_swsm_semaphore(struct wm_softc *sc)
8754 {
8755 	int32_t timeout;
8756 	uint32_t swsm;
8757 
8758 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
8759 		/* Get the SW semaphore. */
8760 		timeout = sc->sc_nvm_wordsize + 1;
8761 		while (timeout) {
8762 			swsm = CSR_READ(sc, WMREG_SWSM);
8763 
8764 			if ((swsm & SWSM_SMBI) == 0)
8765 				break;
8766 
8767 			delay(50);
8768 			timeout--;
8769 		}
8770 
8771 		if (timeout == 0) {
8772 			aprint_error_dev(sc->sc_dev,
8773 			    "could not acquire SWSM SMBI\n");
8774 			return 1;
8775 		}
8776 	}
8777 
8778 	/* Get the FW semaphore. */
8779 	timeout = sc->sc_nvm_wordsize + 1;
8780 	while (timeout) {
8781 		swsm = CSR_READ(sc, WMREG_SWSM);
8782 		swsm |= SWSM_SWESMBI;
8783 		CSR_WRITE(sc, WMREG_SWSM, swsm);
8784 		/* If we managed to set the bit we got the semaphore. */
8785 		swsm = CSR_READ(sc, WMREG_SWSM);
8786 		if (swsm & SWSM_SWESMBI)
8787 			break;
8788 
8789 		delay(50);
8790 		timeout--;
8791 	}
8792 
8793 	if (timeout == 0) {
8794 		aprint_error_dev(sc->sc_dev, "could not acquire SWSM SWESMBI\n");
8795 		/* Release semaphores */
8796 		wm_put_swsm_semaphore(sc);
8797 		return 1;
8798 	}
8799 	return 0;
8800 }
8801 
8802 static void
8803 wm_put_swsm_semaphore(struct wm_softc *sc)
8804 {
8805 	uint32_t swsm;
8806 
8807 	swsm = CSR_READ(sc, WMREG_SWSM);
8808 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
8809 	CSR_WRITE(sc, WMREG_SWSM, swsm);
8810 }
8811 
8812 static int
8813 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
8814 {
8815 	uint32_t swfw_sync;
8816 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
8817 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
8818 	int timeout = 200;
8819 
8820 	for (timeout = 0; timeout < 200; timeout++) {
8821 		if (sc->sc_flags & WM_F_LOCK_SWSM) {
8822 			if (wm_get_swsm_semaphore(sc)) {
8823 				aprint_error_dev(sc->sc_dev,
8824 				    "%s: failed to get semaphore\n",
8825 				    __func__);
8826 				return 1;
8827 			}
8828 		}
8829 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
8830 		if ((swfw_sync & (swmask | fwmask)) == 0) {
8831 			swfw_sync |= swmask;
8832 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
8833 			if (sc->sc_flags & WM_F_LOCK_SWSM)
8834 				wm_put_swsm_semaphore(sc);
8835 			return 0;
8836 		}
8837 		if (sc->sc_flags & WM_F_LOCK_SWSM)
8838 			wm_put_swsm_semaphore(sc);
8839 		delay(5000);
8840 	}
8841 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
8842 	    device_xname(sc->sc_dev), mask, swfw_sync);
8843 	return 1;
8844 }
8845 
8846 static void
8847 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
8848 {
8849 	uint32_t swfw_sync;
8850 
8851 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
8852 		while (wm_get_swsm_semaphore(sc) != 0)
8853 			continue;
8854 	}
8855 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
8856 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
8857 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
8858 	if (sc->sc_flags & WM_F_LOCK_SWSM)
8859 		wm_put_swsm_semaphore(sc);
8860 }
8861 
8862 static int
8863 wm_get_swfwhw_semaphore(struct wm_softc *sc)
8864 {
8865 	uint32_t ext_ctrl;
8866 	int timeout = 200;
8867 
8868 	for (timeout = 0; timeout < 200; timeout++) {
8869 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
8870 		ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
8871 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
8872 
8873 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
8874 		if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
8875 			return 0;
8876 		delay(5000);
8877 	}
8878 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
8879 	    device_xname(sc->sc_dev), ext_ctrl);
8880 	return 1;
8881 }
8882 
8883 static void
8884 wm_put_swfwhw_semaphore(struct wm_softc *sc)
8885 {
8886 	uint32_t ext_ctrl;
8887 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
8888 	ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
8889 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
8890 }
8891 
8892 static int
8893 wm_get_hw_semaphore_82573(struct wm_softc *sc)
8894 {
8895 	int i = 0;
8896 	uint32_t reg;
8897 
8898 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
8899 	do {
8900 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
8901 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
8902 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
8903 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
8904 			break;
8905 		delay(2*1000);
8906 		i++;
8907 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
8908 
8909 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
8910 		wm_put_hw_semaphore_82573(sc);
8911 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
8912 		    device_xname(sc->sc_dev));
8913 		return -1;
8914 	}
8915 
8916 	return 0;
8917 }
8918 
8919 static void
8920 wm_put_hw_semaphore_82573(struct wm_softc *sc)
8921 {
8922 	uint32_t reg;
8923 
8924 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
8925 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
8926 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
8927 }
8928 
8929 /*
8930  * Management mode and power management related subroutines.
8931  * BMC, AMT, suspend/resume and EEE.
8932  */
8933 
8934 static int
8935 wm_check_mng_mode(struct wm_softc *sc)
8936 {
8937 	int rv;
8938 
8939 	switch (sc->sc_type) {
8940 	case WM_T_ICH8:
8941 	case WM_T_ICH9:
8942 	case WM_T_ICH10:
8943 	case WM_T_PCH:
8944 	case WM_T_PCH2:
8945 	case WM_T_PCH_LPT:
8946 		rv = wm_check_mng_mode_ich8lan(sc);
8947 		break;
8948 	case WM_T_82574:
8949 	case WM_T_82583:
8950 		rv = wm_check_mng_mode_82574(sc);
8951 		break;
8952 	case WM_T_82571:
8953 	case WM_T_82572:
8954 	case WM_T_82573:
8955 	case WM_T_80003:
8956 		rv = wm_check_mng_mode_generic(sc);
8957 		break;
8958 	default:
8959 		/* noting to do */
8960 		rv = 0;
8961 		break;
8962 	}
8963 
8964 	return rv;
8965 }
8966 
8967 static int
8968 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
8969 {
8970 	uint32_t fwsm;
8971 
8972 	fwsm = CSR_READ(sc, WMREG_FWSM);
8973 
8974 	if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
8975 		return 1;
8976 
8977 	return 0;
8978 }
8979 
8980 static int
8981 wm_check_mng_mode_82574(struct wm_softc *sc)
8982 {
8983 	uint16_t data;
8984 
8985 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
8986 
8987 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
8988 		return 1;
8989 
8990 	return 0;
8991 }
8992 
8993 static int
8994 wm_check_mng_mode_generic(struct wm_softc *sc)
8995 {
8996 	uint32_t fwsm;
8997 
8998 	fwsm = CSR_READ(sc, WMREG_FWSM);
8999 
9000 	if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
9001 		return 1;
9002 
9003 	return 0;
9004 }
9005 
9006 static int
9007 wm_enable_mng_pass_thru(struct wm_softc *sc)
9008 {
9009 	uint32_t manc, fwsm, factps;
9010 
9011 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
9012 		return 0;
9013 
9014 	manc = CSR_READ(sc, WMREG_MANC);
9015 
9016 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
9017 		device_xname(sc->sc_dev), manc));
9018 	if ((manc & MANC_RECV_TCO_EN) == 0)
9019 		return 0;
9020 
9021 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
9022 		fwsm = CSR_READ(sc, WMREG_FWSM);
9023 		factps = CSR_READ(sc, WMREG_FACTPS);
9024 		if (((factps & FACTPS_MNGCG) == 0)
9025 		    && ((fwsm & FWSM_MODE_MASK)
9026 			== (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT)))
9027 			return 1;
9028 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
9029 		uint16_t data;
9030 
9031 		factps = CSR_READ(sc, WMREG_FACTPS);
9032 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
9033 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
9034 			device_xname(sc->sc_dev), factps, data));
9035 		if (((factps & FACTPS_MNGCG) == 0)
9036 		    && ((data & NVM_CFG2_MNGM_MASK)
9037 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
9038 			return 1;
9039 	} else if (((manc & MANC_SMBUS_EN) != 0)
9040 	    && ((manc & MANC_ASF_EN) == 0))
9041 		return 1;
9042 
9043 	return 0;
9044 }
9045 
9046 static int
9047 wm_check_reset_block(struct wm_softc *sc)
9048 {
9049 	uint32_t reg;
9050 
9051 	switch (sc->sc_type) {
9052 	case WM_T_ICH8:
9053 	case WM_T_ICH9:
9054 	case WM_T_ICH10:
9055 	case WM_T_PCH:
9056 	case WM_T_PCH2:
9057 	case WM_T_PCH_LPT:
9058 		reg = CSR_READ(sc, WMREG_FWSM);
9059 		if ((reg & FWSM_RSPCIPHY) != 0)
9060 			return 0;
9061 		else
9062 			return -1;
9063 		break;
9064 	case WM_T_82571:
9065 	case WM_T_82572:
9066 	case WM_T_82573:
9067 	case WM_T_82574:
9068 	case WM_T_82583:
9069 	case WM_T_80003:
9070 		reg = CSR_READ(sc, WMREG_MANC);
9071 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
9072 			return -1;
9073 		else
9074 			return 0;
9075 		break;
9076 	default:
9077 		/* no problem */
9078 		break;
9079 	}
9080 
9081 	return 0;
9082 }
9083 
9084 static void
9085 wm_get_hw_control(struct wm_softc *sc)
9086 {
9087 	uint32_t reg;
9088 
9089 	switch (sc->sc_type) {
9090 	case WM_T_82573:
9091 		reg = CSR_READ(sc, WMREG_SWSM);
9092 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
9093 		break;
9094 	case WM_T_82571:
9095 	case WM_T_82572:
9096 	case WM_T_82574:
9097 	case WM_T_82583:
9098 	case WM_T_80003:
9099 	case WM_T_ICH8:
9100 	case WM_T_ICH9:
9101 	case WM_T_ICH10:
9102 	case WM_T_PCH:
9103 	case WM_T_PCH2:
9104 	case WM_T_PCH_LPT:
9105 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
9106 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
9107 		break;
9108 	default:
9109 		break;
9110 	}
9111 }
9112 
9113 static void
9114 wm_release_hw_control(struct wm_softc *sc)
9115 {
9116 	uint32_t reg;
9117 
9118 	if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
9119 		return;
9120 
9121 	if (sc->sc_type == WM_T_82573) {
9122 		reg = CSR_READ(sc, WMREG_SWSM);
9123 		reg &= ~SWSM_DRV_LOAD;
9124 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
9125 	} else {
9126 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
9127 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
9128 	}
9129 }
9130 
9131 static void
9132 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, int on)
9133 {
9134 	uint32_t reg;
9135 
9136 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
9137 
9138 	if (on != 0)
9139 		reg |= EXTCNFCTR_GATE_PHY_CFG;
9140 	else
9141 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
9142 
9143 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
9144 }
9145 
9146 static void
9147 wm_smbustopci(struct wm_softc *sc)
9148 {
9149 	uint32_t fwsm;
9150 
9151 	fwsm = CSR_READ(sc, WMREG_FWSM);
9152 	if (((fwsm & FWSM_FW_VALID) == 0)
9153 	    && ((wm_check_reset_block(sc) == 0))) {
9154 		sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
9155 		sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
9156 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9157 		CSR_WRITE_FLUSH(sc);
9158 		delay(10);
9159 		sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
9160 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9161 		CSR_WRITE_FLUSH(sc);
9162 		delay(50*1000);
9163 
9164 		/*
9165 		 * Gate automatic PHY configuration by hardware on non-managed
9166 		 * 82579
9167 		 */
9168 		if (sc->sc_type == WM_T_PCH2)
9169 			wm_gate_hw_phy_config_ich8lan(sc, 1);
9170 	}
9171 }
9172 
9173 static void
9174 wm_init_manageability(struct wm_softc *sc)
9175 {
9176 
9177 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
9178 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
9179 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
9180 
9181 		/* Disable hardware interception of ARP */
9182 		manc &= ~MANC_ARP_EN;
9183 
9184 		/* Enable receiving management packets to the host */
9185 		if (sc->sc_type >= WM_T_82571) {
9186 			manc |= MANC_EN_MNG2HOST;
9187 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
9188 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
9189 
9190 		}
9191 
9192 		CSR_WRITE(sc, WMREG_MANC, manc);
9193 	}
9194 }
9195 
9196 static void
9197 wm_release_manageability(struct wm_softc *sc)
9198 {
9199 
9200 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
9201 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
9202 
9203 		manc |= MANC_ARP_EN;
9204 		if (sc->sc_type >= WM_T_82571)
9205 			manc &= ~MANC_EN_MNG2HOST;
9206 
9207 		CSR_WRITE(sc, WMREG_MANC, manc);
9208 	}
9209 }
9210 
9211 static void
9212 wm_get_wakeup(struct wm_softc *sc)
9213 {
9214 
9215 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
9216 	switch (sc->sc_type) {
9217 	case WM_T_82573:
9218 	case WM_T_82583:
9219 		sc->sc_flags |= WM_F_HAS_AMT;
9220 		/* FALLTHROUGH */
9221 	case WM_T_80003:
9222 	case WM_T_82541:
9223 	case WM_T_82547:
9224 	case WM_T_82571:
9225 	case WM_T_82572:
9226 	case WM_T_82574:
9227 	case WM_T_82575:
9228 	case WM_T_82576:
9229 	case WM_T_82580:
9230 	case WM_T_I350:
9231 	case WM_T_I354:
9232 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE_MASK) != 0)
9233 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
9234 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
9235 		break;
9236 	case WM_T_ICH8:
9237 	case WM_T_ICH9:
9238 	case WM_T_ICH10:
9239 	case WM_T_PCH:
9240 	case WM_T_PCH2:
9241 	case WM_T_PCH_LPT:
9242 		sc->sc_flags |= WM_F_HAS_AMT;
9243 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
9244 		break;
9245 	default:
9246 		break;
9247 	}
9248 
9249 	/* 1: HAS_MANAGE */
9250 	if (wm_enable_mng_pass_thru(sc) != 0)
9251 		sc->sc_flags |= WM_F_HAS_MANAGE;
9252 
9253 #ifdef WM_DEBUG
9254 	printf("\n");
9255 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
9256 		printf("HAS_AMT,");
9257 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
9258 		printf("ARC_SUBSYS_VALID,");
9259 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
9260 		printf("ASF_FIRMWARE_PRES,");
9261 	if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
9262 		printf("HAS_MANAGE,");
9263 	printf("\n");
9264 #endif
9265 	/*
9266 	 * Note that the WOL flags is set after the resetting of the eeprom
9267 	 * stuff
9268 	 */
9269 }
9270 
9271 #ifdef WM_WOL
9272 /* WOL in the newer chipset interfaces (pchlan) */
9273 static void
9274 wm_enable_phy_wakeup(struct wm_softc *sc)
9275 {
9276 #if 0
9277 	uint16_t preg;
9278 
9279 	/* Copy MAC RARs to PHY RARs */
9280 
9281 	/* Copy MAC MTA to PHY MTA */
9282 
9283 	/* Configure PHY Rx Control register */
9284 
9285 	/* Enable PHY wakeup in MAC register */
9286 
9287 	/* Configure and enable PHY wakeup in PHY registers */
9288 
9289 	/* Activate PHY wakeup */
9290 
9291 	/* XXX */
9292 #endif
9293 }
9294 
9295 /* Power down workaround on D3 */
9296 static void
9297 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
9298 {
9299 	uint32_t reg;
9300 	int i;
9301 
9302 	for (i = 0; i < 2; i++) {
9303 		/* Disable link */
9304 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
9305 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
9306 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
9307 
9308 		/*
9309 		 * Call gig speed drop workaround on Gig disable before
9310 		 * accessing any PHY registers
9311 		 */
9312 		if (sc->sc_type == WM_T_ICH8)
9313 			wm_gig_downshift_workaround_ich8lan(sc);
9314 
9315 		/* Write VR power-down enable */
9316 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
9317 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
9318 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
9319 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
9320 
9321 		/* Read it back and test */
9322 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
9323 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
9324 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
9325 			break;
9326 
9327 		/* Issue PHY reset and repeat at most one more time */
9328 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
9329 	}
9330 }
9331 
9332 static void
9333 wm_enable_wakeup(struct wm_softc *sc)
9334 {
9335 	uint32_t reg, pmreg;
9336 	pcireg_t pmode;
9337 
9338 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
9339 		&pmreg, NULL) == 0)
9340 		return;
9341 
9342 	/* Advertise the wakeup capability */
9343 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
9344 	    | CTRL_SWDPIN(3));
9345 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
9346 
9347 	/* ICH workaround */
9348 	switch (sc->sc_type) {
9349 	case WM_T_ICH8:
9350 	case WM_T_ICH9:
9351 	case WM_T_ICH10:
9352 	case WM_T_PCH:
9353 	case WM_T_PCH2:
9354 	case WM_T_PCH_LPT:
9355 		/* Disable gig during WOL */
9356 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
9357 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
9358 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
9359 		if (sc->sc_type == WM_T_PCH)
9360 			wm_gmii_reset(sc);
9361 
9362 		/* Power down workaround */
9363 		if (sc->sc_phytype == WMPHY_82577) {
9364 			struct mii_softc *child;
9365 
9366 			/* Assume that the PHY is copper */
9367 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
9368 			if (child->mii_mpd_rev <= 2)
9369 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
9370 				    (768 << 5) | 25, 0x0444); /* magic num */
9371 		}
9372 		break;
9373 	default:
9374 		break;
9375 	}
9376 
9377 	/* Keep the laser running on fiber adapters */
9378 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
9379 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
9380 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
9381 		reg |= CTRL_EXT_SWDPIN(3);
9382 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
9383 	}
9384 
9385 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
9386 #if 0	/* for the multicast packet */
9387 	reg |= WUFC_MC;
9388 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
9389 #endif
9390 
9391 	if (sc->sc_type == WM_T_PCH) {
9392 		wm_enable_phy_wakeup(sc);
9393 	} else {
9394 		CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
9395 		CSR_WRITE(sc, WMREG_WUFC, reg);
9396 	}
9397 
9398 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
9399 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
9400 		|| (sc->sc_type == WM_T_PCH2))
9401 		    && (sc->sc_phytype == WMPHY_IGP_3))
9402 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
9403 
9404 	/* Request PME */
9405 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
9406 #if 0
9407 	/* Disable WOL */
9408 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
9409 #else
9410 	/* For WOL */
9411 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
9412 #endif
9413 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
9414 }
9415 #endif /* WM_WOL */
9416 
9417 /* EEE */
9418 
9419 static void
9420 wm_set_eee_i350(struct wm_softc *sc)
9421 {
9422 	uint32_t ipcnfg, eeer;
9423 
9424 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
9425 	eeer = CSR_READ(sc, WMREG_EEER);
9426 
9427 	if ((sc->sc_flags & WM_F_EEE) != 0) {
9428 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
9429 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
9430 		    | EEER_LPI_FC);
9431 	} else {
9432 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
9433 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
9434 		    | EEER_LPI_FC);
9435 	}
9436 
9437 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
9438 	CSR_WRITE(sc, WMREG_EEER, eeer);
9439 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
9440 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
9441 }
9442 
9443 /*
9444  * Workarounds (mainly PHY related).
9445  * Basically, PHY's workarounds are in the PHY drivers.
9446  */
9447 
9448 /* Work-around for 82566 Kumeran PCS lock loss */
9449 static void
9450 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
9451 {
9452 	int miistatus, active, i;
9453 	int reg;
9454 
9455 	miistatus = sc->sc_mii.mii_media_status;
9456 
9457 	/* If the link is not up, do nothing */
9458 	if ((miistatus & IFM_ACTIVE) != 0)
9459 		return;
9460 
9461 	active = sc->sc_mii.mii_media_active;
9462 
9463 	/* Nothing to do if the link is other than 1Gbps */
9464 	if (IFM_SUBTYPE(active) != IFM_1000_T)
9465 		return;
9466 
9467 	for (i = 0; i < 10; i++) {
9468 		/* read twice */
9469 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
9470 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
9471 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) != 0)
9472 			goto out;	/* GOOD! */
9473 
9474 		/* Reset the PHY */
9475 		wm_gmii_reset(sc);
9476 		delay(5*1000);
9477 	}
9478 
9479 	/* Disable GigE link negotiation */
9480 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
9481 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
9482 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
9483 
9484 	/*
9485 	 * Call gig speed drop workaround on Gig disable before accessing
9486 	 * any PHY registers.
9487 	 */
9488 	wm_gig_downshift_workaround_ich8lan(sc);
9489 
9490 out:
9491 	return;
9492 }
9493 
9494 /* WOL from S5 stops working */
9495 static void
9496 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
9497 {
9498 	uint16_t kmrn_reg;
9499 
9500 	/* Only for igp3 */
9501 	if (sc->sc_phytype == WMPHY_IGP_3) {
9502 		kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
9503 		kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
9504 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
9505 		kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
9506 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
9507 	}
9508 }
9509 
9510 /*
9511  * Workaround for pch's PHYs
9512  * XXX should be moved to new PHY driver?
9513  */
9514 static void
9515 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
9516 {
9517 	if (sc->sc_phytype == WMPHY_82577)
9518 		wm_set_mdio_slow_mode_hv(sc);
9519 
9520 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
9521 
9522 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
9523 
9524 	/* 82578 */
9525 	if (sc->sc_phytype == WMPHY_82578) {
9526 		/* PCH rev. < 3 */
9527 		if (sc->sc_rev < 3) {
9528 			/* XXX 6 bit shift? Why? Is it page2? */
9529 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
9530 			    0x66c0);
9531 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
9532 			    0xffff);
9533 		}
9534 
9535 		/* XXX phy rev. < 2 */
9536 	}
9537 
9538 	/* Select page 0 */
9539 
9540 	/* XXX acquire semaphore */
9541 	wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
9542 	/* XXX release semaphore */
9543 
9544 	/*
9545 	 * Configure the K1 Si workaround during phy reset assuming there is
9546 	 * link so that it disables K1 if link is in 1Gbps.
9547 	 */
9548 	wm_k1_gig_workaround_hv(sc, 1);
9549 }
9550 
9551 static void
9552 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
9553 {
9554 
9555 	wm_set_mdio_slow_mode_hv(sc);
9556 }
9557 
9558 static void
9559 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
9560 {
9561 	int k1_enable = sc->sc_nvm_k1_enabled;
9562 
9563 	/* XXX acquire semaphore */
9564 
9565 	if (link) {
9566 		k1_enable = 0;
9567 
9568 		/* Link stall fix for link up */
9569 		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
9570 	} else {
9571 		/* Link stall fix for link down */
9572 		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
9573 	}
9574 
9575 	wm_configure_k1_ich8lan(sc, k1_enable);
9576 
9577 	/* XXX release semaphore */
9578 }
9579 
9580 static void
9581 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
9582 {
9583 	uint32_t reg;
9584 
9585 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
9586 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
9587 	    reg | HV_KMRN_MDIO_SLOW);
9588 }
9589 
9590 static void
9591 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
9592 {
9593 	uint32_t ctrl, ctrl_ext, tmp;
9594 	uint16_t kmrn_reg;
9595 
9596 	kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
9597 
9598 	if (k1_enable)
9599 		kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
9600 	else
9601 		kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
9602 
9603 	wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
9604 
9605 	delay(20);
9606 
9607 	ctrl = CSR_READ(sc, WMREG_CTRL);
9608 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
9609 
9610 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
9611 	tmp |= CTRL_FRCSPD;
9612 
9613 	CSR_WRITE(sc, WMREG_CTRL, tmp);
9614 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
9615 	CSR_WRITE_FLUSH(sc);
9616 	delay(20);
9617 
9618 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
9619 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
9620 	CSR_WRITE_FLUSH(sc);
9621 	delay(20);
9622 }
9623 
9624 /* special case - for 82575 - need to do manual init ... */
9625 static void
9626 wm_reset_init_script_82575(struct wm_softc *sc)
9627 {
9628 	/*
9629 	 * remark: this is untested code - we have no board without EEPROM
9630 	 *  same setup as mentioned int the FreeBSD driver for the i82575
9631 	 */
9632 
9633 	/* SerDes configuration via SERDESCTRL */
9634 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
9635 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
9636 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
9637 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
9638 
9639 	/* CCM configuration via CCMCTL register */
9640 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
9641 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
9642 
9643 	/* PCIe lanes configuration */
9644 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
9645 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
9646 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
9647 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
9648 
9649 	/* PCIe PLL Configuration */
9650 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
9651 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
9652 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
9653 }
9654