xref: /netbsd-src/sys/dev/pci/if_wm.c (revision f14316bcbc544b96a93e884bc5c2b15fd60e22ae)
1 /*	$NetBSD: if_wm.c,v 1.289 2014/08/10 16:44:36 tls Exp $	*/
2 
3 /*
4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed for the NetBSD Project by
20  *	Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 /*******************************************************************************
39 
40   Copyright (c) 2001-2005, Intel Corporation
41   All rights reserved.
42 
43   Redistribution and use in source and binary forms, with or without
44   modification, are permitted provided that the following conditions are met:
45 
46    1. Redistributions of source code must retain the above copyright notice,
47       this list of conditions and the following disclaimer.
48 
49    2. Redistributions in binary form must reproduce the above copyright
50       notice, this list of conditions and the following disclaimer in the
51       documentation and/or other materials provided with the distribution.
52 
53    3. Neither the name of the Intel Corporation nor the names of its
54       contributors may be used to endorse or promote products derived from
55       this software without specific prior written permission.
56 
57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67   POSSIBILITY OF SUCH DAMAGE.
68 
69 *******************************************************************************/
70 /*
71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72  *
73  * TODO (in order of importance):
74  *
75  *	- Check XXX'ed comments
76  *	- Internal SERDES mode newer than or equal to 82575.
77  *	- EEE (Energy Efficiency Ethernet)
78  *	- MSI/MSI-X
79  *	- Virtual Function
80  *	- Set LED correctly (based on contents in EEPROM)
81  *	- Rework how parameters are loaded from the EEPROM.
82  */
83 
84 #include <sys/cdefs.h>
85 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.289 2014/08/10 16:44:36 tls Exp $");
86 
87 #include <sys/param.h>
88 #include <sys/systm.h>
89 #include <sys/callout.h>
90 #include <sys/mbuf.h>
91 #include <sys/malloc.h>
92 #include <sys/kernel.h>
93 #include <sys/socket.h>
94 #include <sys/ioctl.h>
95 #include <sys/errno.h>
96 #include <sys/device.h>
97 #include <sys/queue.h>
98 #include <sys/syslog.h>
99 
100 #include <sys/rnd.h>
101 
102 #include <net/if.h>
103 #include <net/if_dl.h>
104 #include <net/if_media.h>
105 #include <net/if_ether.h>
106 
107 #include <net/bpf.h>
108 
109 #include <netinet/in.h>			/* XXX for struct ip */
110 #include <netinet/in_systm.h>		/* XXX for struct ip */
111 #include <netinet/ip.h>			/* XXX for struct ip */
112 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
113 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
114 
115 #include <sys/bus.h>
116 #include <sys/intr.h>
117 #include <machine/endian.h>
118 
119 #include <dev/mii/mii.h>
120 #include <dev/mii/miivar.h>
121 #include <dev/mii/miidevs.h>
122 #include <dev/mii/mii_bitbang.h>
123 #include <dev/mii/ikphyreg.h>
124 #include <dev/mii/igphyreg.h>
125 #include <dev/mii/igphyvar.h>
126 #include <dev/mii/inbmphyreg.h>
127 
128 #include <dev/pci/pcireg.h>
129 #include <dev/pci/pcivar.h>
130 #include <dev/pci/pcidevs.h>
131 
132 #include <dev/pci/if_wmreg.h>
133 #include <dev/pci/if_wmvar.h>
134 
135 #ifdef WM_DEBUG
136 #define	WM_DEBUG_LINK		0x01
137 #define	WM_DEBUG_TX		0x02
138 #define	WM_DEBUG_RX		0x04
139 #define	WM_DEBUG_GMII		0x08
140 #define	WM_DEBUG_MANAGE		0x10
141 #define	WM_DEBUG_NVM		0x20
142 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
143     | WM_DEBUG_MANAGE | WM_DEBUG_NVM;
144 
145 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
146 #else
147 #define	DPRINTF(x, y)	/* nothing */
148 #endif /* WM_DEBUG */
149 
150 #ifdef NET_MPSAFE
151 #define WM_MPSAFE	1
152 #endif
153 
154 /*
155  * Transmit descriptor list size.  Due to errata, we can only have
156  * 256 hardware descriptors in the ring on < 82544, but we use 4096
157  * on >= 82544.  We tell the upper layers that they can queue a lot
158  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
159  * of them at a time.
160  *
161  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
162  * chains containing many small mbufs have been observed in zero-copy
163  * situations with jumbo frames.
164  */
165 #define	WM_NTXSEGS		256
166 #define	WM_IFQUEUELEN		256
167 #define	WM_TXQUEUELEN_MAX	64
168 #define	WM_TXQUEUELEN_MAX_82547	16
169 #define	WM_TXQUEUELEN(sc)	((sc)->sc_txnum)
170 #define	WM_TXQUEUELEN_MASK(sc)	(WM_TXQUEUELEN(sc) - 1)
171 #define	WM_TXQUEUE_GC(sc)	(WM_TXQUEUELEN(sc) / 8)
172 #define	WM_NTXDESC_82542	256
173 #define	WM_NTXDESC_82544	4096
174 #define	WM_NTXDESC(sc)		((sc)->sc_ntxdesc)
175 #define	WM_NTXDESC_MASK(sc)	(WM_NTXDESC(sc) - 1)
176 #define	WM_TXDESCSIZE(sc)	(WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
177 #define	WM_NEXTTX(sc, x)	(((x) + 1) & WM_NTXDESC_MASK(sc))
178 #define	WM_NEXTTXS(sc, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(sc))
179 
180 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
181 
182 /*
183  * Receive descriptor list size.  We have one Rx buffer for normal
184  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
185  * packet.  We allocate 256 receive descriptors, each with a 2k
186  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
187  */
188 #define	WM_NRXDESC		256
189 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
190 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
191 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
192 
193 /*
194  * Control structures are DMA'd to the i82542 chip.  We allocate them in
195  * a single clump that maps to a single DMA segment to make several things
196  * easier.
197  */
198 struct wm_control_data_82544 {
199 	/*
200 	 * The receive descriptors.
201 	 */
202 	wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
203 
204 	/*
205 	 * The transmit descriptors.  Put these at the end, because
206 	 * we might use a smaller number of them.
207 	 */
208 	union {
209 		wiseman_txdesc_t wcdu_txdescs[WM_NTXDESC_82544];
210 		nq_txdesc_t      wcdu_nq_txdescs[WM_NTXDESC_82544];
211 	} wdc_u;
212 };
213 
214 struct wm_control_data_82542 {
215 	wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
216 	wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
217 };
218 
219 #define	WM_CDOFF(x)	offsetof(struct wm_control_data_82544, x)
220 #define	WM_CDTXOFF(x)	WM_CDOFF(wdc_u.wcdu_txdescs[(x)])
221 #define	WM_CDRXOFF(x)	WM_CDOFF(wcd_rxdescs[(x)])
222 
223 /*
224  * Software state for transmit jobs.
225  */
226 struct wm_txsoft {
227 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
228 	bus_dmamap_t txs_dmamap;	/* our DMA map */
229 	int txs_firstdesc;		/* first descriptor in packet */
230 	int txs_lastdesc;		/* last descriptor in packet */
231 	int txs_ndesc;			/* # of descriptors used */
232 };
233 
234 /*
235  * Software state for receive buffers.  Each descriptor gets a
236  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
237  * more than one buffer, we chain them together.
238  */
239 struct wm_rxsoft {
240 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
241 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
242 };
243 
244 #define WM_LINKUP_TIMEOUT	50
245 
246 static uint16_t swfwphysem[] = {
247 	SWFW_PHY0_SM,
248 	SWFW_PHY1_SM,
249 	SWFW_PHY2_SM,
250 	SWFW_PHY3_SM
251 };
252 
253 /*
254  * Software state per device.
255  */
256 struct wm_softc {
257 	device_t sc_dev;		/* generic device information */
258 	bus_space_tag_t sc_st;		/* bus space tag */
259 	bus_space_handle_t sc_sh;	/* bus space handle */
260 	bus_size_t sc_ss;		/* bus space size */
261 	bus_space_tag_t sc_iot;		/* I/O space tag */
262 	bus_space_handle_t sc_ioh;	/* I/O space handle */
263 	bus_size_t sc_ios;		/* I/O space size */
264 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
265 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
266 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
267 
268 	struct ethercom sc_ethercom;	/* ethernet common data */
269 	struct mii_data sc_mii;		/* MII/media information */
270 
271 	pci_chipset_tag_t sc_pc;
272 	pcitag_t sc_pcitag;
273 	int sc_bus_speed;		/* PCI/PCIX bus speed */
274 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
275 
276 	const struct wm_product *sc_wmp; /* Pointer to the wm_product entry */
277 	wm_chip_type sc_type;		/* MAC type */
278 	int sc_rev;			/* MAC revision */
279 	wm_phy_type sc_phytype;		/* PHY type */
280 	int sc_funcid;			/* unit number of the chip (0 to 3) */
281 	int sc_flags;			/* flags; see below */
282 	int sc_if_flags;		/* last if_flags */
283 	int sc_flowflags;		/* 802.3x flow control flags */
284 	int sc_align_tweak;
285 
286 	void *sc_ih;			/* interrupt cookie */
287 	callout_t sc_tick_ch;		/* tick callout */
288 	bool sc_stopping;
289 
290 	int sc_ee_addrbits;		/* EEPROM address bits */
291 	int sc_ich8_flash_base;
292 	int sc_ich8_flash_bank_size;
293 	int sc_nvm_k1_enabled;
294 
295 	/* Software state for the transmit and receive descriptors. */
296 	int sc_txnum;			/* must be a power of two */
297 	struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX];
298 	struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
299 
300 	/* Control data structures. */
301 	int sc_ntxdesc;			/* must be a power of two */
302 	struct wm_control_data_82544 *sc_control_data;
303 	bus_dmamap_t sc_cddmamap;	/* control data DMA map */
304 	bus_dma_segment_t sc_cd_seg;	/* control data segment */
305 	int sc_cd_rseg;			/* real number of control segment */
306 	size_t sc_cd_size;		/* control data size */
307 #define	sc_cddma	sc_cddmamap->dm_segs[0].ds_addr
308 #define	sc_txdescs	sc_control_data->wdc_u.wcdu_txdescs
309 #define	sc_nq_txdescs	sc_control_data->wdc_u.wcdu_nq_txdescs
310 #define	sc_rxdescs	sc_control_data->wcd_rxdescs
311 
312 #ifdef WM_EVENT_COUNTERS
313 	/* Event counters. */
314 	struct evcnt sc_ev_txsstall;	/* Tx stalled due to no txs */
315 	struct evcnt sc_ev_txdstall;	/* Tx stalled due to no txd */
316 	struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
317 	struct evcnt sc_ev_txdw;	/* Tx descriptor interrupts */
318 	struct evcnt sc_ev_txqe;	/* Tx queue empty interrupts */
319 	struct evcnt sc_ev_rxintr;	/* Rx interrupts */
320 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
321 
322 	struct evcnt sc_ev_rxipsum;	/* IP checksums checked in-bound */
323 	struct evcnt sc_ev_rxtusum;	/* TCP/UDP cksums checked in-bound */
324 	struct evcnt sc_ev_txipsum;	/* IP checksums comp. out-bound */
325 	struct evcnt sc_ev_txtusum;	/* TCP/UDP cksums comp. out-bound */
326 	struct evcnt sc_ev_txtusum6;	/* TCP/UDP v6 cksums comp. out-bound */
327 	struct evcnt sc_ev_txtso;	/* TCP seg offload out-bound (IPv4) */
328 	struct evcnt sc_ev_txtso6;	/* TCP seg offload out-bound (IPv6) */
329 	struct evcnt sc_ev_txtsopain;	/* painful header manip. for TSO */
330 
331 	struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
332 	struct evcnt sc_ev_txdrop;	/* Tx packets dropped (too many segs) */
333 
334 	struct evcnt sc_ev_tu;		/* Tx underrun */
335 
336 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
337 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
338 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
339 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
340 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
341 #endif /* WM_EVENT_COUNTERS */
342 
343 	bus_addr_t sc_tdt_reg;		/* offset of TDT register */
344 
345 	int	sc_txfree;		/* number of free Tx descriptors */
346 	int	sc_txnext;		/* next ready Tx descriptor */
347 
348 	int	sc_txsfree;		/* number of free Tx jobs */
349 	int	sc_txsnext;		/* next free Tx job */
350 	int	sc_txsdirty;		/* dirty Tx jobs */
351 
352 	/* These 5 variables are used only on the 82547. */
353 	int	sc_txfifo_size;		/* Tx FIFO size */
354 	int	sc_txfifo_head;		/* current head of FIFO */
355 	uint32_t sc_txfifo_addr;	/* internal address of start of FIFO */
356 	int	sc_txfifo_stall;	/* Tx FIFO is stalled */
357 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
358 
359 	bus_addr_t sc_rdt_reg;		/* offset of RDT register */
360 
361 	int	sc_rxptr;		/* next ready Rx descriptor/queue ent */
362 	int	sc_rxdiscard;
363 	int	sc_rxlen;
364 	struct mbuf *sc_rxhead;
365 	struct mbuf *sc_rxtail;
366 	struct mbuf **sc_rxtailp;
367 
368 	uint32_t sc_ctrl;		/* prototype CTRL register */
369 #if 0
370 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
371 #endif
372 	uint32_t sc_icr;		/* prototype interrupt bits */
373 	uint32_t sc_itr;		/* prototype intr throttling reg */
374 	uint32_t sc_tctl;		/* prototype TCTL register */
375 	uint32_t sc_rctl;		/* prototype RCTL register */
376 	uint32_t sc_txcw;		/* prototype TXCW register */
377 	uint32_t sc_tipg;		/* prototype TIPG register */
378 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
379 	uint32_t sc_pba;		/* prototype PBA register */
380 
381 	int sc_tbi_linkup;		/* TBI link status */
382 	int sc_tbi_anegticks;		/* autonegotiation ticks */
383 	int sc_tbi_ticks;		/* tbi ticks */
384 
385 	int sc_mchash_type;		/* multicast filter offset */
386 
387 	krndsource_t rnd_source;	/* random source */
388 
389 	kmutex_t *sc_tx_lock;		/* lock for tx operations */
390 	kmutex_t *sc_rx_lock;		/* lock for rx operations */
391 };
392 
393 #define WM_TX_LOCK(_sc)		if ((_sc)->sc_tx_lock) mutex_enter((_sc)->sc_tx_lock)
394 #define WM_TX_UNLOCK(_sc)	if ((_sc)->sc_tx_lock) mutex_exit((_sc)->sc_tx_lock)
395 #define WM_TX_LOCKED(_sc)	(!(_sc)->sc_tx_lock || mutex_owned((_sc)->sc_tx_lock))
396 #define WM_RX_LOCK(_sc)		if ((_sc)->sc_rx_lock) mutex_enter((_sc)->sc_rx_lock)
397 #define WM_RX_UNLOCK(_sc)	if ((_sc)->sc_rx_lock) mutex_exit((_sc)->sc_rx_lock)
398 #define WM_RX_LOCKED(_sc)	(!(_sc)->sc_rx_lock || mutex_owned((_sc)->sc_rx_lock))
399 #define WM_BOTH_LOCK(_sc)	do {WM_TX_LOCK(_sc); WM_RX_LOCK(_sc);} while (0)
400 #define WM_BOTH_UNLOCK(_sc)	do {WM_RX_UNLOCK(_sc); WM_TX_UNLOCK(_sc);} while (0)
401 #define WM_BOTH_LOCKED(_sc)	(WM_TX_LOCKED(_sc) && WM_RX_LOCKED(_sc))
402 
403 #ifdef WM_MPSAFE
404 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
405 #else
406 #define CALLOUT_FLAGS	0
407 #endif
408 
409 #define	WM_RXCHAIN_RESET(sc)						\
410 do {									\
411 	(sc)->sc_rxtailp = &(sc)->sc_rxhead;				\
412 	*(sc)->sc_rxtailp = NULL;					\
413 	(sc)->sc_rxlen = 0;						\
414 } while (/*CONSTCOND*/0)
415 
416 #define	WM_RXCHAIN_LINK(sc, m)						\
417 do {									\
418 	*(sc)->sc_rxtailp = (sc)->sc_rxtail = (m);			\
419 	(sc)->sc_rxtailp = &(m)->m_next;				\
420 } while (/*CONSTCOND*/0)
421 
422 #ifdef WM_EVENT_COUNTERS
423 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
424 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
425 #else
426 #define	WM_EVCNT_INCR(ev)	/* nothing */
427 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
428 #endif
429 
430 #define	CSR_READ(sc, reg)						\
431 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
432 #define	CSR_WRITE(sc, reg, val)						\
433 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
434 #define	CSR_WRITE_FLUSH(sc)						\
435 	(void) CSR_READ((sc), WMREG_STATUS)
436 
437 #define ICH8_FLASH_READ32(sc, reg) \
438 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
439 #define ICH8_FLASH_WRITE32(sc, reg, data) \
440 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
441 
442 #define ICH8_FLASH_READ16(sc, reg) \
443 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
444 #define ICH8_FLASH_WRITE16(sc, reg, data) \
445 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
446 
447 #define	WM_CDTXADDR(sc, x)	((sc)->sc_cddma + WM_CDTXOFF((x)))
448 #define	WM_CDRXADDR(sc, x)	((sc)->sc_cddma + WM_CDRXOFF((x)))
449 
450 #define	WM_CDTXADDR_LO(sc, x)	(WM_CDTXADDR((sc), (x)) & 0xffffffffU)
451 #define	WM_CDTXADDR_HI(sc, x)						\
452 	(sizeof(bus_addr_t) == 8 ?					\
453 	 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
454 
455 #define	WM_CDRXADDR_LO(sc, x)	(WM_CDRXADDR((sc), (x)) & 0xffffffffU)
456 #define	WM_CDRXADDR_HI(sc, x)						\
457 	(sizeof(bus_addr_t) == 8 ?					\
458 	 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
459 
460 #define	WM_CDTXSYNC(sc, x, n, ops)					\
461 do {									\
462 	int __x, __n;							\
463 									\
464 	__x = (x);							\
465 	__n = (n);							\
466 									\
467 	/* If it will wrap around, sync to the end of the ring. */	\
468 	if ((__x + __n) > WM_NTXDESC(sc)) {				\
469 		bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,	\
470 		    WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) *		\
471 		    (WM_NTXDESC(sc) - __x), (ops));			\
472 		__n -= (WM_NTXDESC(sc) - __x);				\
473 		__x = 0;						\
474 	}								\
475 									\
476 	/* Now sync whatever is left. */				\
477 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
478 	    WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops));	\
479 } while (/*CONSTCOND*/0)
480 
481 #define	WM_CDRXSYNC(sc, x, ops)						\
482 do {									\
483 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
484 	   WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops));		\
485 } while (/*CONSTCOND*/0)
486 
487 #define	WM_INIT_RXDESC(sc, x)						\
488 do {									\
489 	struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)];		\
490 	wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)];		\
491 	struct mbuf *__m = __rxs->rxs_mbuf;				\
492 									\
493 	/*								\
494 	 * Note: We scoot the packet forward 2 bytes in the buffer	\
495 	 * so that the payload after the Ethernet header is aligned	\
496 	 * to a 4-byte boundary.					\
497 	 *								\
498 	 * XXX BRAINDAMAGE ALERT!					\
499 	 * The stupid chip uses the same size for every buffer, which	\
500 	 * is set in the Receive Control register.  We are using the 2K	\
501 	 * size option, but what we REALLY want is (2K - 2)!  For this	\
502 	 * reason, we can't "scoot" packets longer than the standard	\
503 	 * Ethernet MTU.  On strict-alignment platforms, if the total	\
504 	 * size exceeds (2K - 2) we set align_tweak to 0 and let	\
505 	 * the upper layer copy the headers.				\
506 	 */								\
507 	__m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak;	\
508 									\
509 	wm_set_dma_addr(&__rxd->wrx_addr,				\
510 	    __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
511 	__rxd->wrx_len = 0;						\
512 	__rxd->wrx_cksum = 0;						\
513 	__rxd->wrx_status = 0;						\
514 	__rxd->wrx_errors = 0;						\
515 	__rxd->wrx_special = 0;						\
516 	WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
517 									\
518 	CSR_WRITE((sc), (sc)->sc_rdt_reg, (x));				\
519 } while (/*CONSTCOND*/0)
520 
521 /*
522  * Register read/write functions.
523  * Other than CSR_{READ|WRITE}().
524  */
525 #if 0
526 static inline uint32_t wm_io_read(struct wm_softc *, int);
527 #endif
528 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
529 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
530 	uint32_t, uint32_t);
531 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
532 
533 /*
534  * Device driver interface functions and commonly used functions.
535  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
536  */
537 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
538 static int	wm_match(device_t, cfdata_t, void *);
539 static void	wm_attach(device_t, device_t, void *);
540 static int	wm_detach(device_t, int);
541 static bool	wm_suspend(device_t, const pmf_qual_t *);
542 static bool	wm_resume(device_t, const pmf_qual_t *);
543 static void	wm_watchdog(struct ifnet *);
544 static void	wm_tick(void *);
545 static int	wm_ifflags_cb(struct ethercom *);
546 static int	wm_ioctl(struct ifnet *, u_long, void *);
547 /* MAC address related */
548 static int	wm_check_alt_mac_addr(struct wm_softc *);
549 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
550 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
551 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
552 static void	wm_set_filter(struct wm_softc *);
553 /* Reset and init related */
554 static void	wm_set_vlan(struct wm_softc *);
555 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
556 static void	wm_get_auto_rd_done(struct wm_softc *);
557 static void	wm_lan_init_done(struct wm_softc *);
558 static void	wm_get_cfg_done(struct wm_softc *);
559 static void	wm_reset(struct wm_softc *);
560 static int	wm_add_rxbuf(struct wm_softc *, int);
561 static void	wm_rxdrain(struct wm_softc *);
562 static int	wm_init(struct ifnet *);
563 static int	wm_init_locked(struct ifnet *);
564 static void	wm_stop(struct ifnet *, int);
565 static void	wm_stop_locked(struct ifnet *, int);
566 static int	wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
567     uint32_t *, uint8_t *);
568 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
569 static void	wm_82547_txfifo_stall(void *);
570 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
571 /* Start */
572 static void	wm_start(struct ifnet *);
573 static void	wm_start_locked(struct ifnet *);
574 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txsoft *,
575     uint32_t *, uint32_t *, bool *);
576 static void	wm_nq_start(struct ifnet *);
577 static void	wm_nq_start_locked(struct ifnet *);
578 /* Interrupt */
579 static void	wm_txintr(struct wm_softc *);
580 static void	wm_rxintr(struct wm_softc *);
581 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
582 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
583 static void	wm_linkintr(struct wm_softc *, uint32_t);
584 static int	wm_intr(void *);
585 
586 /*
587  * Media related.
588  * GMII, SGMII, TBI (and SERDES)
589  */
590 /* GMII related */
591 static void	wm_gmii_reset(struct wm_softc *);
592 static int	wm_get_phy_id_82575(struct wm_softc *);
593 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
594 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
595 static int	wm_gmii_mediachange(struct ifnet *);
596 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
597 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
598 static int	wm_gmii_i82543_readreg(device_t, int, int);
599 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
600 static int	wm_gmii_i82544_readreg(device_t, int, int);
601 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
602 static int	wm_gmii_i80003_readreg(device_t, int, int);
603 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
604 static int	wm_gmii_bm_readreg(device_t, int, int);
605 static void	wm_gmii_bm_writereg(device_t, int, int, int);
606 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
607 static int	wm_gmii_hv_readreg(device_t, int, int);
608 static void	wm_gmii_hv_writereg(device_t, int, int, int);
609 static int	wm_gmii_82580_readreg(device_t, int, int);
610 static void	wm_gmii_82580_writereg(device_t, int, int, int);
611 static void	wm_gmii_statchg(struct ifnet *);
612 static int	wm_kmrn_readreg(struct wm_softc *, int);
613 static void	wm_kmrn_writereg(struct wm_softc *, int, int);
614 /* SGMII */
615 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
616 static int	wm_sgmii_readreg(device_t, int, int);
617 static void	wm_sgmii_writereg(device_t, int, int, int);
618 /* TBI related */
619 static int	wm_check_for_link(struct wm_softc *);
620 static void	wm_tbi_mediainit(struct wm_softc *);
621 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
622 static int	wm_tbi_mediachange(struct ifnet *);
623 static void	wm_tbi_set_linkled(struct wm_softc *);
624 static void	wm_tbi_check_link(struct wm_softc *);
625 
626 /*
627  * NVM related.
628  * Microwire, SPI (w/wo EERD) and Flash.
629  */
630 /* Both spi and uwire */
631 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
632 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
633 /* Microwire */
634 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
635 /* SPI */
636 static void	wm_set_spiaddrbits(struct wm_softc *);
637 static int	wm_nvm_ready_spi(struct wm_softc *);
638 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
639 /* Using with EERD */
640 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
641 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
642 /* Flash */
643 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
644     unsigned int *);
645 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
646 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
647 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
648 	uint16_t *);
649 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
650 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
651 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
652 /* Lock, detecting NVM type, validate checksum and read */
653 static int	wm_nvm_acquire(struct wm_softc *);
654 static void	wm_nvm_release(struct wm_softc *);
655 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
656 static int	wm_nvm_validate_checksum(struct wm_softc *);
657 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
658 
659 /*
660  * Hardware semaphores.
661  * Very complexed...
662  */
663 static int	wm_get_swsm_semaphore(struct wm_softc *);
664 static void	wm_put_swsm_semaphore(struct wm_softc *);
665 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
666 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
667 static int	wm_get_swfwhw_semaphore(struct wm_softc *);
668 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
669 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
670 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
671 
672 /*
673  * Management mode and power management related subroutines.
674  * BMC, AMT, suspend/resume and EEE.
675  */
676 static int	wm_check_mng_mode(struct wm_softc *);
677 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
678 static int	wm_check_mng_mode_82574(struct wm_softc *);
679 static int	wm_check_mng_mode_generic(struct wm_softc *);
680 static int	wm_enable_mng_pass_thru(struct wm_softc *);
681 static int	wm_check_reset_block(struct wm_softc *);
682 static void	wm_get_hw_control(struct wm_softc *);
683 static void	wm_release_hw_control(struct wm_softc *);
684 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, int);
685 static void	wm_smbustopci(struct wm_softc *);
686 static void	wm_init_manageability(struct wm_softc *);
687 static void	wm_release_manageability(struct wm_softc *);
688 static void	wm_get_wakeup(struct wm_softc *);
689 #ifdef WM_WOL
690 static void	wm_enable_phy_wakeup(struct wm_softc *);
691 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
692 static void	wm_enable_wakeup(struct wm_softc *);
693 #endif
694 /* EEE */
695 static void	wm_set_eee_i350(struct wm_softc *);
696 
697 /*
698  * Workarounds (mainly PHY related).
699  * Basically, PHY's workarounds are in the PHY drivers.
700  */
701 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
702 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
703 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
704 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
705 static void	wm_k1_gig_workaround_hv(struct wm_softc *, int);
706 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
707 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
708 static void	wm_reset_init_script_82575(struct wm_softc *);
709 
710 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
711     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
712 
713 /*
714  * Devices supported by this driver.
715  */
716 static const struct wm_product {
717 	pci_vendor_id_t		wmp_vendor;
718 	pci_product_id_t	wmp_product;
719 	const char		*wmp_name;
720 	wm_chip_type		wmp_type;
721 	int			wmp_flags;
722 #define	WMP_F_1000X		0x01
723 #define	WMP_F_1000T		0x02
724 #define	WMP_F_SERDES		0x04
725 } wm_products[] = {
726 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
727 	  "Intel i82542 1000BASE-X Ethernet",
728 	  WM_T_82542_2_1,	WMP_F_1000X },
729 
730 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
731 	  "Intel i82543GC 1000BASE-X Ethernet",
732 	  WM_T_82543,		WMP_F_1000X },
733 
734 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
735 	  "Intel i82543GC 1000BASE-T Ethernet",
736 	  WM_T_82543,		WMP_F_1000T },
737 
738 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
739 	  "Intel i82544EI 1000BASE-T Ethernet",
740 	  WM_T_82544,		WMP_F_1000T },
741 
742 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
743 	  "Intel i82544EI 1000BASE-X Ethernet",
744 	  WM_T_82544,		WMP_F_1000X },
745 
746 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
747 	  "Intel i82544GC 1000BASE-T Ethernet",
748 	  WM_T_82544,		WMP_F_1000T },
749 
750 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
751 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
752 	  WM_T_82544,		WMP_F_1000T },
753 
754 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
755 	  "Intel i82540EM 1000BASE-T Ethernet",
756 	  WM_T_82540,		WMP_F_1000T },
757 
758 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
759 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
760 	  WM_T_82540,		WMP_F_1000T },
761 
762 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
763 	  "Intel i82540EP 1000BASE-T Ethernet",
764 	  WM_T_82540,		WMP_F_1000T },
765 
766 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
767 	  "Intel i82540EP 1000BASE-T Ethernet",
768 	  WM_T_82540,		WMP_F_1000T },
769 
770 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
771 	  "Intel i82540EP 1000BASE-T Ethernet",
772 	  WM_T_82540,		WMP_F_1000T },
773 
774 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
775 	  "Intel i82545EM 1000BASE-T Ethernet",
776 	  WM_T_82545,		WMP_F_1000T },
777 
778 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
779 	  "Intel i82545GM 1000BASE-T Ethernet",
780 	  WM_T_82545_3,		WMP_F_1000T },
781 
782 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
783 	  "Intel i82545GM 1000BASE-X Ethernet",
784 	  WM_T_82545_3,		WMP_F_1000X },
785 
786 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
787 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
788 	  WM_T_82545_3,		WMP_F_SERDES },
789 
790 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
791 	  "Intel i82546EB 1000BASE-T Ethernet",
792 	  WM_T_82546,		WMP_F_1000T },
793 
794 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
795 	  "Intel i82546EB 1000BASE-T Ethernet",
796 	  WM_T_82546,		WMP_F_1000T },
797 
798 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
799 	  "Intel i82545EM 1000BASE-X Ethernet",
800 	  WM_T_82545,		WMP_F_1000X },
801 
802 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
803 	  "Intel i82546EB 1000BASE-X Ethernet",
804 	  WM_T_82546,		WMP_F_1000X },
805 
806 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
807 	  "Intel i82546GB 1000BASE-T Ethernet",
808 	  WM_T_82546_3,		WMP_F_1000T },
809 
810 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
811 	  "Intel i82546GB 1000BASE-X Ethernet",
812 	  WM_T_82546_3,		WMP_F_1000X },
813 
814 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
815 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
816 	  WM_T_82546_3,		WMP_F_SERDES },
817 
818 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
819 	  "i82546GB quad-port Gigabit Ethernet",
820 	  WM_T_82546_3,		WMP_F_1000T },
821 
822 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
823 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
824 	  WM_T_82546_3,		WMP_F_1000T },
825 
826 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
827 	  "Intel PRO/1000MT (82546GB)",
828 	  WM_T_82546_3,		WMP_F_1000T },
829 
830 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
831 	  "Intel i82541EI 1000BASE-T Ethernet",
832 	  WM_T_82541,		WMP_F_1000T },
833 
834 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
835 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
836 	  WM_T_82541,		WMP_F_1000T },
837 
838 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
839 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
840 	  WM_T_82541,		WMP_F_1000T },
841 
842 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
843 	  "Intel i82541ER 1000BASE-T Ethernet",
844 	  WM_T_82541_2,		WMP_F_1000T },
845 
846 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
847 	  "Intel i82541GI 1000BASE-T Ethernet",
848 	  WM_T_82541_2,		WMP_F_1000T },
849 
850 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
851 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
852 	  WM_T_82541_2,		WMP_F_1000T },
853 
854 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
855 	  "Intel i82541PI 1000BASE-T Ethernet",
856 	  WM_T_82541_2,		WMP_F_1000T },
857 
858 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
859 	  "Intel i82547EI 1000BASE-T Ethernet",
860 	  WM_T_82547,		WMP_F_1000T },
861 
862 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
863 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
864 	  WM_T_82547,		WMP_F_1000T },
865 
866 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
867 	  "Intel i82547GI 1000BASE-T Ethernet",
868 	  WM_T_82547_2,		WMP_F_1000T },
869 
870 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
871 	  "Intel PRO/1000 PT (82571EB)",
872 	  WM_T_82571,		WMP_F_1000T },
873 
874 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
875 	  "Intel PRO/1000 PF (82571EB)",
876 	  WM_T_82571,		WMP_F_1000X },
877 
878 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
879 	  "Intel PRO/1000 PB (82571EB)",
880 	  WM_T_82571,		WMP_F_SERDES },
881 
882 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
883 	  "Intel PRO/1000 QT (82571EB)",
884 	  WM_T_82571,		WMP_F_1000T },
885 
886 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
887 	  "Intel i82572EI 1000baseT Ethernet",
888 	  WM_T_82572,		WMP_F_1000T },
889 
890 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
891 	  "Intel PRO/1000 PT Quad Port Server Adapter",
892 	  WM_T_82571,		WMP_F_1000T, },
893 
894 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
895 	  "Intel i82572EI 1000baseX Ethernet",
896 	  WM_T_82572,		WMP_F_1000X },
897 
898 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
899 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
900 	  WM_T_82572,		WMP_F_SERDES },
901 
902 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
903 	  "Intel i82572EI 1000baseT Ethernet",
904 	  WM_T_82572,		WMP_F_1000T },
905 
906 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
907 	  "Intel i82573E",
908 	  WM_T_82573,		WMP_F_1000T },
909 
910 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
911 	  "Intel i82573E IAMT",
912 	  WM_T_82573,		WMP_F_1000T },
913 
914 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
915 	  "Intel i82573L Gigabit Ethernet",
916 	  WM_T_82573,		WMP_F_1000T },
917 
918 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
919 	  "Intel i82574L",
920 	  WM_T_82574,		WMP_F_1000T },
921 
922 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
923 	  "Intel i82583V",
924 	  WM_T_82583,		WMP_F_1000T },
925 
926 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
927 	  "i80003 dual 1000baseT Ethernet",
928 	  WM_T_80003,		WMP_F_1000T },
929 
930 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
931 	  "i80003 dual 1000baseX Ethernet",
932 	  WM_T_80003,		WMP_F_1000T },
933 
934 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
935 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
936 	  WM_T_80003,		WMP_F_SERDES },
937 
938 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
939 	  "Intel i80003 1000baseT Ethernet",
940 	  WM_T_80003,		WMP_F_1000T },
941 
942 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
943 	  "Intel i80003 Gigabit Ethernet (SERDES)",
944 	  WM_T_80003,		WMP_F_SERDES },
945 
946 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
947 	  "Intel i82801H (M_AMT) LAN Controller",
948 	  WM_T_ICH8,		WMP_F_1000T },
949 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
950 	  "Intel i82801H (AMT) LAN Controller",
951 	  WM_T_ICH8,		WMP_F_1000T },
952 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
953 	  "Intel i82801H LAN Controller",
954 	  WM_T_ICH8,		WMP_F_1000T },
955 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
956 	  "Intel i82801H (IFE) LAN Controller",
957 	  WM_T_ICH8,		WMP_F_1000T },
958 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
959 	  "Intel i82801H (M) LAN Controller",
960 	  WM_T_ICH8,		WMP_F_1000T },
961 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
962 	  "Intel i82801H IFE (GT) LAN Controller",
963 	  WM_T_ICH8,		WMP_F_1000T },
964 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
965 	  "Intel i82801H IFE (G) LAN Controller",
966 	  WM_T_ICH8,		WMP_F_1000T },
967 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
968 	  "82801I (AMT) LAN Controller",
969 	  WM_T_ICH9,		WMP_F_1000T },
970 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
971 	  "82801I LAN Controller",
972 	  WM_T_ICH9,		WMP_F_1000T },
973 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
974 	  "82801I (G) LAN Controller",
975 	  WM_T_ICH9,		WMP_F_1000T },
976 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
977 	  "82801I (GT) LAN Controller",
978 	  WM_T_ICH9,		WMP_F_1000T },
979 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
980 	  "82801I (C) LAN Controller",
981 	  WM_T_ICH9,		WMP_F_1000T },
982 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
983 	  "82801I mobile LAN Controller",
984 	  WM_T_ICH9,		WMP_F_1000T },
985 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IGP_M_V,
986 	  "82801I mobile (V) LAN Controller",
987 	  WM_T_ICH9,		WMP_F_1000T },
988 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
989 	  "82801I mobile (AMT) LAN Controller",
990 	  WM_T_ICH9,		WMP_F_1000T },
991 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
992 	  "82567LM-4 LAN Controller",
993 	  WM_T_ICH9,		WMP_F_1000T },
994 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_82567V_3,
995 	  "82567V-3 LAN Controller",
996 	  WM_T_ICH9,		WMP_F_1000T },
997 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
998 	  "82567LM-2 LAN Controller",
999 	  WM_T_ICH10,		WMP_F_1000T },
1000 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
1001 	  "82567LF-2 LAN Controller",
1002 	  WM_T_ICH10,		WMP_F_1000T },
1003 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
1004 	  "82567LM-3 LAN Controller",
1005 	  WM_T_ICH10,		WMP_F_1000T },
1006 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
1007 	  "82567LF-3 LAN Controller",
1008 	  WM_T_ICH10,		WMP_F_1000T },
1009 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
1010 	  "82567V-2 LAN Controller",
1011 	  WM_T_ICH10,		WMP_F_1000T },
1012 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
1013 	  "82567V-3? LAN Controller",
1014 	  WM_T_ICH10,		WMP_F_1000T },
1015 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
1016 	  "HANKSVILLE LAN Controller",
1017 	  WM_T_ICH10,		WMP_F_1000T },
1018 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
1019 	  "PCH LAN (82577LM) Controller",
1020 	  WM_T_PCH,		WMP_F_1000T },
1021 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
1022 	  "PCH LAN (82577LC) Controller",
1023 	  WM_T_PCH,		WMP_F_1000T },
1024 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
1025 	  "PCH LAN (82578DM) Controller",
1026 	  WM_T_PCH,		WMP_F_1000T },
1027 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
1028 	  "PCH LAN (82578DC) Controller",
1029 	  WM_T_PCH,		WMP_F_1000T },
1030 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
1031 	  "PCH2 LAN (82579LM) Controller",
1032 	  WM_T_PCH2,		WMP_F_1000T },
1033 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
1034 	  "PCH2 LAN (82579V) Controller",
1035 	  WM_T_PCH2,		WMP_F_1000T },
1036 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
1037 	  "82575EB dual-1000baseT Ethernet",
1038 	  WM_T_82575,		WMP_F_1000T },
1039 #if 0
1040 	/*
1041 	 * not sure if WMP_F_1000X or WMP_F_SERDES - we do not have it - so
1042 	 * disabled for now ...
1043 	 */
1044 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
1045 	  "82575EB dual-1000baseX Ethernet (SERDES)",
1046 	  WM_T_82575,		WMP_F_SERDES },
1047 #endif
1048 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
1049 	  "82575GB quad-1000baseT Ethernet",
1050 	  WM_T_82575,		WMP_F_1000T },
1051 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
1052 	  "82575GB quad-1000baseT Ethernet (PM)",
1053 	  WM_T_82575,		WMP_F_1000T },
1054 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
1055 	  "82576 1000BaseT Ethernet",
1056 	  WM_T_82576,		WMP_F_1000T },
1057 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
1058 	  "82576 1000BaseX Ethernet",
1059 	  WM_T_82576,		WMP_F_1000X },
1060 
1061 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
1062 	  "82576 gigabit Ethernet (SERDES)",
1063 	  WM_T_82576,		WMP_F_SERDES },
1064 
1065 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
1066 	  "82576 quad-1000BaseT Ethernet",
1067 	  WM_T_82576,		WMP_F_1000T },
1068 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
1069 	  "82576 gigabit Ethernet",
1070 	  WM_T_82576,		WMP_F_1000T },
1071 
1072 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
1073 	  "82576 gigabit Ethernet (SERDES)",
1074 	  WM_T_82576,		WMP_F_SERDES },
1075 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
1076 	  "82576 quad-gigabit Ethernet (SERDES)",
1077 	  WM_T_82576,		WMP_F_SERDES },
1078 
1079 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
1080 	  "82580 1000BaseT Ethernet",
1081 	  WM_T_82580,		WMP_F_1000T },
1082 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
1083 	  "82580 1000BaseX Ethernet",
1084 	  WM_T_82580,		WMP_F_1000X },
1085 
1086 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
1087 	  "82580 1000BaseT Ethernet (SERDES)",
1088 	  WM_T_82580,		WMP_F_SERDES },
1089 
1090 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
1091 	  "82580 gigabit Ethernet (SGMII)",
1092 	  WM_T_82580,		WMP_F_1000T },
1093 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
1094 	  "82580 dual-1000BaseT Ethernet",
1095 	  WM_T_82580,		WMP_F_1000T },
1096 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_ER,
1097 	  "82580 1000BaseT Ethernet",
1098 	  WM_T_82580ER,		WMP_F_1000T },
1099 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_ER_DUAL,
1100 	  "82580 dual-1000BaseT Ethernet",
1101 	  WM_T_82580ER,		WMP_F_1000T },
1102 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1103 	  "82580 quad-1000BaseX Ethernet",
1104 	  WM_T_82580,		WMP_F_1000X },
1105 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
1106 	  "I350 Gigabit Network Connection",
1107 	  WM_T_I350,		WMP_F_1000T },
1108 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
1109 	  "I350 Gigabit Fiber Network Connection",
1110 	  WM_T_I350,		WMP_F_1000X },
1111 
1112 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
1113 	  "I350 Gigabit Backplane Connection",
1114 	  WM_T_I350,		WMP_F_SERDES },
1115 #if 0
1116 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
1117 	  "I350 Gigabit Connection",
1118 	  WM_T_I350,		WMP_F_1000T },
1119 #endif
1120 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
1121 	  "I354 Gigabit Connection",
1122 	  WM_T_I354,		WMP_F_1000T },
1123 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
1124 	  "I210-T1 Ethernet Server Adapter",
1125 	  WM_T_I210,		WMP_F_1000T },
1126 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
1127 	  "I210 Ethernet (Copper OEM)",
1128 	  WM_T_I210,		WMP_F_1000T },
1129 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
1130 	  "I210 Ethernet (Copper IT)",
1131 	  WM_T_I210,		WMP_F_1000T },
1132 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
1133 	  "I210 Gigabit Ethernet (Fiber)",
1134 	  WM_T_I210,		WMP_F_1000X },
1135 
1136 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
1137 	  "I210 Gigabit Ethernet (SERDES)",
1138 	  WM_T_I210,		WMP_F_SERDES },
1139 #if 0
1140 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
1141 	  "I210 Gigabit Ethernet (SGMII)",
1142 	  WM_T_I210,		WMP_F_SERDES },
1143 #endif
1144 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
1145 	  "I211 Ethernet (COPPER)",
1146 	  WM_T_I211,		WMP_F_1000T },
1147 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
1148 	  "I217 V Ethernet Connection",
1149 	  WM_T_PCH_LPT,		WMP_F_1000T },
1150 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
1151 	  "I217 LM Ethernet Connection",
1152 	  WM_T_PCH_LPT,		WMP_F_1000T },
1153 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
1154 	  "I218 V Ethernet Connection",
1155 	  WM_T_PCH_LPT,		WMP_F_1000T },
1156 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
1157 	  "I218 LM Ethernet Connection",
1158 	  WM_T_PCH_LPT,		WMP_F_1000T },
1159 	{ 0,			0,
1160 	  NULL,
1161 	  0,			0 },
1162 };
1163 
1164 #ifdef WM_EVENT_COUNTERS
1165 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
1166 #endif /* WM_EVENT_COUNTERS */
1167 
1168 
1169 /*
1170  * Register read/write functions.
1171  * Other than CSR_{READ|WRITE}().
1172  */
1173 
1174 #if 0 /* Not currently used */
1175 static inline uint32_t
1176 wm_io_read(struct wm_softc *sc, int reg)
1177 {
1178 
1179 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1180 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1181 }
1182 #endif
1183 
1184 static inline void
1185 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1186 {
1187 
1188 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1189 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1190 }
1191 
1192 static inline void
1193 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1194     uint32_t data)
1195 {
1196 	uint32_t regval;
1197 	int i;
1198 
1199 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1200 
1201 	CSR_WRITE(sc, reg, regval);
1202 
1203 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1204 		delay(5);
1205 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1206 			break;
1207 	}
1208 	if (i == SCTL_CTL_POLL_TIMEOUT) {
1209 		aprint_error("%s: WARNING:"
1210 		    " i82575 reg 0x%08x setup did not indicate ready\n",
1211 		    device_xname(sc->sc_dev), reg);
1212 	}
1213 }
1214 
1215 static inline void
1216 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1217 {
1218 	wa->wa_low = htole32(v & 0xffffffffU);
1219 	if (sizeof(bus_addr_t) == 8)
1220 		wa->wa_high = htole32((uint64_t) v >> 32);
1221 	else
1222 		wa->wa_high = 0;
1223 }
1224 
1225 /*
1226  * Device driver interface functions and commonly used functions.
1227  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
1228  */
1229 
1230 /* Lookup supported device table */
1231 static const struct wm_product *
1232 wm_lookup(const struct pci_attach_args *pa)
1233 {
1234 	const struct wm_product *wmp;
1235 
1236 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1237 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1238 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1239 			return wmp;
1240 	}
1241 	return NULL;
1242 }
1243 
1244 /* The match function (ca_match) */
1245 static int
1246 wm_match(device_t parent, cfdata_t cf, void *aux)
1247 {
1248 	struct pci_attach_args *pa = aux;
1249 
1250 	if (wm_lookup(pa) != NULL)
1251 		return 1;
1252 
1253 	return 0;
1254 }
1255 
1256 /* The attach function (ca_attach) */
1257 static void
1258 wm_attach(device_t parent, device_t self, void *aux)
1259 {
1260 	struct wm_softc *sc = device_private(self);
1261 	struct pci_attach_args *pa = aux;
1262 	prop_dictionary_t dict;
1263 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1264 	pci_chipset_tag_t pc = pa->pa_pc;
1265 	pci_intr_handle_t ih;
1266 	const char *intrstr = NULL;
1267 	const char *eetype, *xname;
1268 	bus_space_tag_t memt;
1269 	bus_space_handle_t memh;
1270 	bus_size_t memsize;
1271 	int memh_valid;
1272 	int i, error;
1273 	const struct wm_product *wmp;
1274 	prop_data_t ea;
1275 	prop_number_t pn;
1276 	uint8_t enaddr[ETHER_ADDR_LEN];
1277 	uint16_t cfg1, cfg2, swdpin, io3;
1278 	pcireg_t preg, memtype;
1279 	uint16_t eeprom_data, apme_mask;
1280 	bool force_clear_smbi;
1281 	uint32_t reg;
1282 	char intrbuf[PCI_INTRSTR_LEN];
1283 
1284 	sc->sc_dev = self;
1285 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
1286 	sc->sc_stopping = false;
1287 
1288 	sc->sc_wmp = wmp = wm_lookup(pa);
1289 	if (wmp == NULL) {
1290 		printf("\n");
1291 		panic("wm_attach: impossible");
1292 	}
1293 
1294 	sc->sc_pc = pa->pa_pc;
1295 	sc->sc_pcitag = pa->pa_tag;
1296 
1297 	if (pci_dma64_available(pa))
1298 		sc->sc_dmat = pa->pa_dmat64;
1299 	else
1300 		sc->sc_dmat = pa->pa_dmat;
1301 
1302 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
1303 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
1304 
1305 	sc->sc_type = wmp->wmp_type;
1306 	if (sc->sc_type < WM_T_82543) {
1307 		if (sc->sc_rev < 2) {
1308 			aprint_error_dev(sc->sc_dev,
1309 			    "i82542 must be at least rev. 2\n");
1310 			return;
1311 		}
1312 		if (sc->sc_rev < 3)
1313 			sc->sc_type = WM_T_82542_2_0;
1314 	}
1315 
1316 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1317 	    || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
1318 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
1319 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
1320 		sc->sc_flags |= WM_F_NEWQUEUE;
1321 
1322 	/* Set device properties (mactype) */
1323 	dict = device_properties(sc->sc_dev);
1324 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1325 
1326 	/*
1327 	 * Map the device.  All devices support memory-mapped acccess,
1328 	 * and it is really required for normal operation.
1329 	 */
1330 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1331 	switch (memtype) {
1332 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1333 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1334 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1335 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1336 		break;
1337 	default:
1338 		memh_valid = 0;
1339 		break;
1340 	}
1341 
1342 	if (memh_valid) {
1343 		sc->sc_st = memt;
1344 		sc->sc_sh = memh;
1345 		sc->sc_ss = memsize;
1346 	} else {
1347 		aprint_error_dev(sc->sc_dev,
1348 		    "unable to map device registers\n");
1349 		return;
1350 	}
1351 
1352 	/*
1353 	 * In addition, i82544 and later support I/O mapped indirect
1354 	 * register access.  It is not desirable (nor supported in
1355 	 * this driver) to use it for normal operation, though it is
1356 	 * required to work around bugs in some chip versions.
1357 	 */
1358 	if (sc->sc_type >= WM_T_82544) {
1359 		/* First we have to find the I/O BAR. */
1360 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1361 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
1362 			if (memtype == PCI_MAPREG_TYPE_IO)
1363 				break;
1364 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
1365 			    PCI_MAPREG_MEM_TYPE_64BIT)
1366 				i += 4;	/* skip high bits, too */
1367 		}
1368 		if (i < PCI_MAPREG_END) {
1369 			/*
1370 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
1371 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
1372 			 * It's no problem because newer chips has no this
1373 			 * bug.
1374 			 *
1375 			 * The i8254x doesn't apparently respond when the
1376 			 * I/O BAR is 0, which looks somewhat like it's not
1377 			 * been configured.
1378 			 */
1379 			preg = pci_conf_read(pc, pa->pa_tag, i);
1380 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1381 				aprint_error_dev(sc->sc_dev,
1382 				    "WARNING: I/O BAR at zero.\n");
1383 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1384 					0, &sc->sc_iot, &sc->sc_ioh,
1385 					NULL, &sc->sc_ios) == 0) {
1386 				sc->sc_flags |= WM_F_IOH_VALID;
1387 			} else {
1388 				aprint_error_dev(sc->sc_dev,
1389 				    "WARNING: unable to map I/O space\n");
1390 			}
1391 		}
1392 
1393 	}
1394 
1395 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
1396 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1397 	preg |= PCI_COMMAND_MASTER_ENABLE;
1398 	if (sc->sc_type < WM_T_82542_2_1)
1399 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1400 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1401 
1402 	/* power up chip */
1403 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1404 	    NULL)) && error != EOPNOTSUPP) {
1405 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1406 		return;
1407 	}
1408 
1409 	/*
1410 	 * Map and establish our interrupt.
1411 	 */
1412 	if (pci_intr_map(pa, &ih)) {
1413 		aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
1414 		return;
1415 	}
1416 	intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
1417 #ifdef WM_MPSAFE
1418 	pci_intr_setattr(pc, &ih, PCI_INTR_MPSAFE, true);
1419 #endif
1420 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
1421 	if (sc->sc_ih == NULL) {
1422 		aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
1423 		if (intrstr != NULL)
1424 			aprint_error(" at %s", intrstr);
1425 		aprint_error("\n");
1426 		return;
1427 	}
1428 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
1429 
1430 	/*
1431 	 * Check the function ID (unit number of the chip).
1432 	 */
1433 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
1434 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
1435 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1436 	    || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
1437 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
1438 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
1439 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
1440 	else
1441 		sc->sc_funcid = 0;
1442 
1443 	/*
1444 	 * Determine a few things about the bus we're connected to.
1445 	 */
1446 	if (sc->sc_type < WM_T_82543) {
1447 		/* We don't really know the bus characteristics here. */
1448 		sc->sc_bus_speed = 33;
1449 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1450 		/*
1451 		 * CSA (Communication Streaming Architecture) is about as fast
1452 		 * a 32-bit 66MHz PCI Bus.
1453 		 */
1454 		sc->sc_flags |= WM_F_CSA;
1455 		sc->sc_bus_speed = 66;
1456 		aprint_verbose_dev(sc->sc_dev,
1457 		    "Communication Streaming Architecture\n");
1458 		if (sc->sc_type == WM_T_82547) {
1459 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
1460 			callout_setfunc(&sc->sc_txfifo_ch,
1461 					wm_82547_txfifo_stall, sc);
1462 			aprint_verbose_dev(sc->sc_dev,
1463 			    "using 82547 Tx FIFO stall work-around\n");
1464 		}
1465 	} else if (sc->sc_type >= WM_T_82571) {
1466 		sc->sc_flags |= WM_F_PCIE;
1467 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1468 		    && (sc->sc_type != WM_T_ICH10)
1469 		    && (sc->sc_type != WM_T_PCH)
1470 		    && (sc->sc_type != WM_T_PCH2)
1471 		    && (sc->sc_type != WM_T_PCH_LPT)) {
1472 			/* ICH* and PCH* have no PCIe capability registers */
1473 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1474 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
1475 				NULL) == 0)
1476 				aprint_error_dev(sc->sc_dev,
1477 				    "unable to find PCIe capability\n");
1478 		}
1479 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1480 	} else {
1481 		reg = CSR_READ(sc, WMREG_STATUS);
1482 		if (reg & STATUS_BUS64)
1483 			sc->sc_flags |= WM_F_BUS64;
1484 		if ((reg & STATUS_PCIX_MODE) != 0) {
1485 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1486 
1487 			sc->sc_flags |= WM_F_PCIX;
1488 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1489 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
1490 				aprint_error_dev(sc->sc_dev,
1491 				    "unable to find PCIX capability\n");
1492 			else if (sc->sc_type != WM_T_82545_3 &&
1493 				 sc->sc_type != WM_T_82546_3) {
1494 				/*
1495 				 * Work around a problem caused by the BIOS
1496 				 * setting the max memory read byte count
1497 				 * incorrectly.
1498 				 */
1499 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1500 				    sc->sc_pcixe_capoff + PCIX_CMD);
1501 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1502 				    sc->sc_pcixe_capoff + PCIX_STATUS);
1503 
1504 				bytecnt =
1505 				    (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
1506 				    PCIX_CMD_BYTECNT_SHIFT;
1507 				maxb =
1508 				    (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
1509 				    PCIX_STATUS_MAXB_SHIFT;
1510 				if (bytecnt > maxb) {
1511 					aprint_verbose_dev(sc->sc_dev,
1512 					    "resetting PCI-X MMRBC: %d -> %d\n",
1513 					    512 << bytecnt, 512 << maxb);
1514 					pcix_cmd = (pcix_cmd &
1515 					    ~PCIX_CMD_BYTECNT_MASK) |
1516 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
1517 					pci_conf_write(pa->pa_pc, pa->pa_tag,
1518 					    sc->sc_pcixe_capoff + PCIX_CMD,
1519 					    pcix_cmd);
1520 				}
1521 			}
1522 		}
1523 		/*
1524 		 * The quad port adapter is special; it has a PCIX-PCIX
1525 		 * bridge on the board, and can run the secondary bus at
1526 		 * a higher speed.
1527 		 */
1528 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1529 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1530 								      : 66;
1531 		} else if (sc->sc_flags & WM_F_PCIX) {
1532 			switch (reg & STATUS_PCIXSPD_MASK) {
1533 			case STATUS_PCIXSPD_50_66:
1534 				sc->sc_bus_speed = 66;
1535 				break;
1536 			case STATUS_PCIXSPD_66_100:
1537 				sc->sc_bus_speed = 100;
1538 				break;
1539 			case STATUS_PCIXSPD_100_133:
1540 				sc->sc_bus_speed = 133;
1541 				break;
1542 			default:
1543 				aprint_error_dev(sc->sc_dev,
1544 				    "unknown PCIXSPD %d; assuming 66MHz\n",
1545 				    reg & STATUS_PCIXSPD_MASK);
1546 				sc->sc_bus_speed = 66;
1547 				break;
1548 			}
1549 		} else
1550 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1551 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1552 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1553 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1554 	}
1555 
1556 	/*
1557 	 * Allocate the control data structures, and create and load the
1558 	 * DMA map for it.
1559 	 *
1560 	 * NOTE: All Tx descriptors must be in the same 4G segment of
1561 	 * memory.  So must Rx descriptors.  We simplify by allocating
1562 	 * both sets within the same 4G segment.
1563 	 */
1564 	WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
1565 	    WM_NTXDESC_82542 : WM_NTXDESC_82544;
1566 	sc->sc_cd_size = sc->sc_type < WM_T_82544 ?
1567 	    sizeof(struct wm_control_data_82542) :
1568 	    sizeof(struct wm_control_data_82544);
1569 	if ((error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_cd_size, PAGE_SIZE,
1570 		    (bus_size_t) 0x100000000ULL, &sc->sc_cd_seg, 1,
1571 		    &sc->sc_cd_rseg, 0)) != 0) {
1572 		aprint_error_dev(sc->sc_dev,
1573 		    "unable to allocate control data, error = %d\n",
1574 		    error);
1575 		goto fail_0;
1576 	}
1577 
1578 	if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cd_seg,
1579 		    sc->sc_cd_rseg, sc->sc_cd_size,
1580 		    (void **)&sc->sc_control_data, BUS_DMA_COHERENT)) != 0) {
1581 		aprint_error_dev(sc->sc_dev,
1582 		    "unable to map control data, error = %d\n", error);
1583 		goto fail_1;
1584 	}
1585 
1586 	if ((error = bus_dmamap_create(sc->sc_dmat, sc->sc_cd_size, 1,
1587 		    sc->sc_cd_size, 0, 0, &sc->sc_cddmamap)) != 0) {
1588 		aprint_error_dev(sc->sc_dev,
1589 		    "unable to create control data DMA map, error = %d\n",
1590 		    error);
1591 		goto fail_2;
1592 	}
1593 
1594 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
1595 		    sc->sc_control_data, sc->sc_cd_size, NULL, 0)) != 0) {
1596 		aprint_error_dev(sc->sc_dev,
1597 		    "unable to load control data DMA map, error = %d\n",
1598 		    error);
1599 		goto fail_3;
1600 	}
1601 
1602 	/* Create the transmit buffer DMA maps. */
1603 	WM_TXQUEUELEN(sc) =
1604 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
1605 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
1606 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1607 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
1608 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
1609 			    &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1610 			aprint_error_dev(sc->sc_dev,
1611 			    "unable to create Tx DMA map %d, error = %d\n",
1612 			    i, error);
1613 			goto fail_4;
1614 		}
1615 	}
1616 
1617 	/* Create the receive buffer DMA maps. */
1618 	for (i = 0; i < WM_NRXDESC; i++) {
1619 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1620 			    MCLBYTES, 0, 0,
1621 			    &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1622 			aprint_error_dev(sc->sc_dev,
1623 			    "unable to create Rx DMA map %d error = %d\n",
1624 			    i, error);
1625 			goto fail_5;
1626 		}
1627 		sc->sc_rxsoft[i].rxs_mbuf = NULL;
1628 	}
1629 
1630 	/* clear interesting stat counters */
1631 	CSR_READ(sc, WMREG_COLC);
1632 	CSR_READ(sc, WMREG_RXERRC);
1633 
1634 	/* get PHY control from SMBus to PCIe */
1635 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
1636 	    || (sc->sc_type == WM_T_PCH_LPT))
1637 		wm_smbustopci(sc);
1638 
1639 	/* Reset the chip to a known state. */
1640 	wm_reset(sc);
1641 
1642 	/* Get some information about the EEPROM. */
1643 	switch (sc->sc_type) {
1644 	case WM_T_82542_2_0:
1645 	case WM_T_82542_2_1:
1646 	case WM_T_82543:
1647 	case WM_T_82544:
1648 		/* Microwire */
1649 		sc->sc_ee_addrbits = 6;
1650 		break;
1651 	case WM_T_82540:
1652 	case WM_T_82545:
1653 	case WM_T_82545_3:
1654 	case WM_T_82546:
1655 	case WM_T_82546_3:
1656 		/* Microwire */
1657 		reg = CSR_READ(sc, WMREG_EECD);
1658 		if (reg & EECD_EE_SIZE)
1659 			sc->sc_ee_addrbits = 8;
1660 		else
1661 			sc->sc_ee_addrbits = 6;
1662 		sc->sc_flags |= WM_F_LOCK_EECD;
1663 		break;
1664 	case WM_T_82541:
1665 	case WM_T_82541_2:
1666 	case WM_T_82547:
1667 	case WM_T_82547_2:
1668 		reg = CSR_READ(sc, WMREG_EECD);
1669 		if (reg & EECD_EE_TYPE) {
1670 			/* SPI */
1671 			wm_set_spiaddrbits(sc);
1672 		} else
1673 			/* Microwire */
1674 			sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6;
1675 		sc->sc_flags |= WM_F_LOCK_EECD;
1676 		break;
1677 	case WM_T_82571:
1678 	case WM_T_82572:
1679 		/* SPI */
1680 		wm_set_spiaddrbits(sc);
1681 		sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
1682 		break;
1683 	case WM_T_82573:
1684 		sc->sc_flags |= WM_F_LOCK_SWSM;
1685 		/* FALLTHROUGH */
1686 	case WM_T_82574:
1687 	case WM_T_82583:
1688 		if (wm_nvm_is_onboard_eeprom(sc) == 0)
1689 			sc->sc_flags |= WM_F_EEPROM_FLASH;
1690 		else {
1691 			/* SPI */
1692 			wm_set_spiaddrbits(sc);
1693 		}
1694 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1695 		break;
1696 	case WM_T_82575:
1697 	case WM_T_82576:
1698 	case WM_T_82580:
1699 	case WM_T_82580ER:
1700 	case WM_T_I350:
1701 	case WM_T_I354:
1702 	case WM_T_80003:
1703 		/* SPI */
1704 		wm_set_spiaddrbits(sc);
1705 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
1706 		    | WM_F_LOCK_SWSM;
1707 		break;
1708 	case WM_T_ICH8:
1709 	case WM_T_ICH9:
1710 	case WM_T_ICH10:
1711 	case WM_T_PCH:
1712 	case WM_T_PCH2:
1713 	case WM_T_PCH_LPT:
1714 		/* FLASH */
1715 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
1716 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
1717 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
1718 		    &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) {
1719 			aprint_error_dev(sc->sc_dev,
1720 			    "can't map FLASH registers\n");
1721 			return;
1722 		}
1723 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
1724 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
1725 						ICH_FLASH_SECTOR_SIZE;
1726 		sc->sc_ich8_flash_bank_size =
1727 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
1728 		sc->sc_ich8_flash_bank_size -=
1729 		    (reg & ICH_GFPREG_BASE_MASK);
1730 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
1731 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
1732 		break;
1733 	case WM_T_I210:
1734 	case WM_T_I211:
1735 		sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
1736 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW;
1737 		break;
1738 	default:
1739 		break;
1740 	}
1741 
1742 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
1743 	switch (sc->sc_type) {
1744 	case WM_T_82571:
1745 	case WM_T_82572:
1746 		reg = CSR_READ(sc, WMREG_SWSM2);
1747 		if ((reg & SWSM2_LOCK) != 0) {
1748 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
1749 			force_clear_smbi = true;
1750 		} else
1751 			force_clear_smbi = false;
1752 		break;
1753 	case WM_T_82573:
1754 	case WM_T_82574:
1755 	case WM_T_82583:
1756 		force_clear_smbi = true;
1757 		break;
1758 	default:
1759 		force_clear_smbi = false;
1760 		break;
1761 	}
1762 	if (force_clear_smbi) {
1763 		reg = CSR_READ(sc, WMREG_SWSM);
1764 		if ((reg & SWSM_SMBI) != 0)
1765 			aprint_error_dev(sc->sc_dev,
1766 			    "Please update the Bootagent\n");
1767 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
1768 	}
1769 
1770 	/*
1771 	 * Defer printing the EEPROM type until after verifying the checksum
1772 	 * This allows the EEPROM type to be printed correctly in the case
1773 	 * that no EEPROM is attached.
1774 	 */
1775 	/*
1776 	 * Validate the EEPROM checksum. If the checksum fails, flag
1777 	 * this for later, so we can fail future reads from the EEPROM.
1778 	 */
1779 	if (wm_nvm_validate_checksum(sc)) {
1780 		/*
1781 		 * Read twice again because some PCI-e parts fail the
1782 		 * first check due to the link being in sleep state.
1783 		 */
1784 		if (wm_nvm_validate_checksum(sc))
1785 			sc->sc_flags |= WM_F_EEPROM_INVALID;
1786 	}
1787 
1788 	/* Set device properties (macflags) */
1789 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
1790 
1791 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
1792 		aprint_verbose_dev(sc->sc_dev, "No EEPROM\n");
1793 	else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW) {
1794 		aprint_verbose_dev(sc->sc_dev, "FLASH(HW)\n");
1795 	} else if (sc->sc_flags & WM_F_EEPROM_FLASH) {
1796 		aprint_verbose_dev(sc->sc_dev, "FLASH\n");
1797 	} else {
1798 		if (sc->sc_flags & WM_F_EEPROM_SPI)
1799 			eetype = "SPI";
1800 		else
1801 			eetype = "MicroWire";
1802 		aprint_verbose_dev(sc->sc_dev,
1803 		    "%u word (%d address bits) %s EEPROM\n",
1804 		    1U << sc->sc_ee_addrbits,
1805 		    sc->sc_ee_addrbits, eetype);
1806 	}
1807 
1808 	switch (sc->sc_type) {
1809 	case WM_T_82571:
1810 	case WM_T_82572:
1811 	case WM_T_82573:
1812 	case WM_T_82574:
1813 	case WM_T_82583:
1814 	case WM_T_80003:
1815 	case WM_T_ICH8:
1816 	case WM_T_ICH9:
1817 	case WM_T_ICH10:
1818 	case WM_T_PCH:
1819 	case WM_T_PCH2:
1820 	case WM_T_PCH_LPT:
1821 		if (wm_check_mng_mode(sc) != 0)
1822 			wm_get_hw_control(sc);
1823 		break;
1824 	default:
1825 		break;
1826 	}
1827 	wm_get_wakeup(sc);
1828 	/*
1829 	 * Read the Ethernet address from the EEPROM, if not first found
1830 	 * in device properties.
1831 	 */
1832 	ea = prop_dictionary_get(dict, "mac-address");
1833 	if (ea != NULL) {
1834 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
1835 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
1836 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
1837 	} else {
1838 		if (wm_read_mac_addr(sc, enaddr) != 0) {
1839 			aprint_error_dev(sc->sc_dev,
1840 			    "unable to read Ethernet address\n");
1841 			return;
1842 		}
1843 	}
1844 
1845 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
1846 	    ether_sprintf(enaddr));
1847 
1848 	/*
1849 	 * Read the config info from the EEPROM, and set up various
1850 	 * bits in the control registers based on their contents.
1851 	 */
1852 	pn = prop_dictionary_get(dict, "i82543-cfg1");
1853 	if (pn != NULL) {
1854 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1855 		cfg1 = (uint16_t) prop_number_integer_value(pn);
1856 	} else {
1857 		if (wm_nvm_read(sc, EEPROM_OFF_CFG1, 1, &cfg1)) {
1858 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
1859 			return;
1860 		}
1861 	}
1862 
1863 	pn = prop_dictionary_get(dict, "i82543-cfg2");
1864 	if (pn != NULL) {
1865 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1866 		cfg2 = (uint16_t) prop_number_integer_value(pn);
1867 	} else {
1868 		if (wm_nvm_read(sc, EEPROM_OFF_CFG2, 1, &cfg2)) {
1869 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
1870 			return;
1871 		}
1872 	}
1873 
1874 	/* check for WM_F_WOL */
1875 	switch (sc->sc_type) {
1876 	case WM_T_82542_2_0:
1877 	case WM_T_82542_2_1:
1878 	case WM_T_82543:
1879 		/* dummy? */
1880 		eeprom_data = 0;
1881 		apme_mask = EEPROM_CFG3_APME;
1882 		break;
1883 	case WM_T_82544:
1884 		apme_mask = EEPROM_CFG2_82544_APM_EN;
1885 		eeprom_data = cfg2;
1886 		break;
1887 	case WM_T_82546:
1888 	case WM_T_82546_3:
1889 	case WM_T_82571:
1890 	case WM_T_82572:
1891 	case WM_T_82573:
1892 	case WM_T_82574:
1893 	case WM_T_82583:
1894 	case WM_T_80003:
1895 	default:
1896 		apme_mask = EEPROM_CFG3_APME;
1897 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? EEPROM_OFF_CFG3_PORTB
1898 		    : EEPROM_OFF_CFG3_PORTA, 1, &eeprom_data);
1899 		break;
1900 	case WM_T_82575:
1901 	case WM_T_82576:
1902 	case WM_T_82580:
1903 	case WM_T_82580ER:
1904 	case WM_T_I350:
1905 	case WM_T_I354: /* XXX ok? */
1906 	case WM_T_ICH8:
1907 	case WM_T_ICH9:
1908 	case WM_T_ICH10:
1909 	case WM_T_PCH:
1910 	case WM_T_PCH2:
1911 	case WM_T_PCH_LPT:
1912 		/* XXX The funcid should be checked on some devices */
1913 		apme_mask = WUC_APME;
1914 		eeprom_data = CSR_READ(sc, WMREG_WUC);
1915 		break;
1916 	}
1917 
1918 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
1919 	if ((eeprom_data & apme_mask) != 0)
1920 		sc->sc_flags |= WM_F_WOL;
1921 #ifdef WM_DEBUG
1922 	if ((sc->sc_flags & WM_F_WOL) != 0)
1923 		printf("WOL\n");
1924 #endif
1925 
1926 	/*
1927 	 * XXX need special handling for some multiple port cards
1928 	 * to disable a paticular port.
1929 	 */
1930 
1931 	if (sc->sc_type >= WM_T_82544) {
1932 		pn = prop_dictionary_get(dict, "i82543-swdpin");
1933 		if (pn != NULL) {
1934 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1935 			swdpin = (uint16_t) prop_number_integer_value(pn);
1936 		} else {
1937 			if (wm_nvm_read(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) {
1938 				aprint_error_dev(sc->sc_dev,
1939 				    "unable to read SWDPIN\n");
1940 				return;
1941 			}
1942 		}
1943 	}
1944 
1945 	if (cfg1 & EEPROM_CFG1_ILOS)
1946 		sc->sc_ctrl |= CTRL_ILOS;
1947 	if (sc->sc_type >= WM_T_82544) {
1948 		sc->sc_ctrl |=
1949 		    ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
1950 		    CTRL_SWDPIO_SHIFT;
1951 		sc->sc_ctrl |=
1952 		    ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
1953 		    CTRL_SWDPINS_SHIFT;
1954 	} else {
1955 		sc->sc_ctrl |=
1956 		    ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
1957 		    CTRL_SWDPIO_SHIFT;
1958 	}
1959 
1960 #if 0
1961 	if (sc->sc_type >= WM_T_82544) {
1962 		if (cfg1 & EEPROM_CFG1_IPS0)
1963 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
1964 		if (cfg1 & EEPROM_CFG1_IPS1)
1965 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
1966 		sc->sc_ctrl_ext |=
1967 		    ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
1968 		    CTRL_EXT_SWDPIO_SHIFT;
1969 		sc->sc_ctrl_ext |=
1970 		    ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
1971 		    CTRL_EXT_SWDPINS_SHIFT;
1972 	} else {
1973 		sc->sc_ctrl_ext |=
1974 		    ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
1975 		    CTRL_EXT_SWDPIO_SHIFT;
1976 	}
1977 #endif
1978 
1979 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
1980 #if 0
1981 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
1982 #endif
1983 
1984 	/*
1985 	 * Set up some register offsets that are different between
1986 	 * the i82542 and the i82543 and later chips.
1987 	 */
1988 	if (sc->sc_type < WM_T_82543) {
1989 		sc->sc_rdt_reg = WMREG_OLD_RDT0;
1990 		sc->sc_tdt_reg = WMREG_OLD_TDT;
1991 	} else {
1992 		sc->sc_rdt_reg = WMREG_RDT;
1993 		sc->sc_tdt_reg = WMREG_TDT;
1994 	}
1995 
1996 	if (sc->sc_type == WM_T_PCH) {
1997 		uint16_t val;
1998 
1999 		/* Save the NVM K1 bit setting */
2000 		wm_nvm_read(sc, EEPROM_OFF_K1_CONFIG, 1, &val);
2001 
2002 		if ((val & EEPROM_K1_CONFIG_ENABLE) != 0)
2003 			sc->sc_nvm_k1_enabled = 1;
2004 		else
2005 			sc->sc_nvm_k1_enabled = 0;
2006 	}
2007 
2008 	/*
2009 	 * Determine if we're TBI,GMII or SGMII mode, and initialize the
2010 	 * media structures accordingly.
2011 	 */
2012 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
2013 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
2014 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
2015 	    || sc->sc_type == WM_T_82573
2016 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
2017 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
2018 		wm_gmii_mediainit(sc, wmp->wmp_product);
2019 	} else if (sc->sc_type < WM_T_82543 ||
2020 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
2021 		if (wmp->wmp_flags & WMP_F_1000T)
2022 			aprint_error_dev(sc->sc_dev,
2023 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
2024 		wm_tbi_mediainit(sc);
2025 	} else {
2026 		switch (sc->sc_type) {
2027 		case WM_T_82575:
2028 		case WM_T_82576:
2029 		case WM_T_82580:
2030 		case WM_T_82580ER:
2031 		case WM_T_I350:
2032 		case WM_T_I354:
2033 		case WM_T_I210:
2034 		case WM_T_I211:
2035 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
2036 			switch (reg & CTRL_EXT_LINK_MODE_MASK) {
2037 			case CTRL_EXT_LINK_MODE_1000KX:
2038 				aprint_verbose_dev(sc->sc_dev, "1000KX\n");
2039 				CSR_WRITE(sc, WMREG_CTRL_EXT,
2040 				    reg | CTRL_EXT_I2C_ENA);
2041 				panic("not supported yet\n");
2042 				break;
2043 			case CTRL_EXT_LINK_MODE_SGMII:
2044 				if (wm_sgmii_uses_mdio(sc)) {
2045 					aprint_verbose_dev(sc->sc_dev,
2046 					    "SGMII(MDIO)\n");
2047 					sc->sc_flags |= WM_F_SGMII;
2048 					wm_gmii_mediainit(sc,
2049 					    wmp->wmp_product);
2050 					break;
2051 				}
2052 				aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
2053 				/*FALLTHROUGH*/
2054 			case CTRL_EXT_LINK_MODE_PCIE_SERDES:
2055 				aprint_verbose_dev(sc->sc_dev, "SERDES\n");
2056 				CSR_WRITE(sc, WMREG_CTRL_EXT,
2057 				    reg | CTRL_EXT_I2C_ENA);
2058 				panic("not supported yet\n");
2059 				break;
2060 			case CTRL_EXT_LINK_MODE_GMII:
2061 			default:
2062 				CSR_WRITE(sc, WMREG_CTRL_EXT,
2063 				    reg & ~CTRL_EXT_I2C_ENA);
2064 				wm_gmii_mediainit(sc, wmp->wmp_product);
2065 				break;
2066 			}
2067 			break;
2068 		default:
2069 			if (wmp->wmp_flags & WMP_F_1000X)
2070 				aprint_error_dev(sc->sc_dev,
2071 				    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
2072 			wm_gmii_mediainit(sc, wmp->wmp_product);
2073 		}
2074 	}
2075 
2076 	ifp = &sc->sc_ethercom.ec_if;
2077 	xname = device_xname(sc->sc_dev);
2078 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
2079 	ifp->if_softc = sc;
2080 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2081 	ifp->if_ioctl = wm_ioctl;
2082 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
2083 		ifp->if_start = wm_nq_start;
2084 	else
2085 		ifp->if_start = wm_start;
2086 	ifp->if_watchdog = wm_watchdog;
2087 	ifp->if_init = wm_init;
2088 	ifp->if_stop = wm_stop;
2089 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
2090 	IFQ_SET_READY(&ifp->if_snd);
2091 
2092 	/* Check for jumbo frame */
2093 	switch (sc->sc_type) {
2094 	case WM_T_82573:
2095 		/* XXX limited to 9234 if ASPM is disabled */
2096 		wm_nvm_read(sc, EEPROM_INIT_3GIO_3, 1, &io3);
2097 		if ((io3 & EEPROM_3GIO_3_ASPM_MASK) != 0)
2098 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2099 		break;
2100 	case WM_T_82571:
2101 	case WM_T_82572:
2102 	case WM_T_82574:
2103 	case WM_T_82575:
2104 	case WM_T_82576:
2105 	case WM_T_82580:
2106 	case WM_T_82580ER:
2107 	case WM_T_I350:
2108 	case WM_T_I354: /* XXXX ok? */
2109 	case WM_T_I210:
2110 	case WM_T_I211:
2111 	case WM_T_80003:
2112 	case WM_T_ICH9:
2113 	case WM_T_ICH10:
2114 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
2115 	case WM_T_PCH_LPT:
2116 		/* XXX limited to 9234 */
2117 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2118 		break;
2119 	case WM_T_PCH:
2120 		/* XXX limited to 4096 */
2121 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2122 		break;
2123 	case WM_T_82542_2_0:
2124 	case WM_T_82542_2_1:
2125 	case WM_T_82583:
2126 	case WM_T_ICH8:
2127 		/* No support for jumbo frame */
2128 		break;
2129 	default:
2130 		/* ETHER_MAX_LEN_JUMBO */
2131 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2132 		break;
2133 	}
2134 
2135 	/* If we're a i82543 or greater, we can support VLANs. */
2136 	if (sc->sc_type >= WM_T_82543)
2137 		sc->sc_ethercom.ec_capabilities |=
2138 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
2139 
2140 	/*
2141 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
2142 	 * on i82543 and later.
2143 	 */
2144 	if (sc->sc_type >= WM_T_82543) {
2145 		ifp->if_capabilities |=
2146 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
2147 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
2148 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
2149 		    IFCAP_CSUM_TCPv6_Tx |
2150 		    IFCAP_CSUM_UDPv6_Tx;
2151 	}
2152 
2153 	/*
2154 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
2155 	 *
2156 	 *	82541GI (8086:1076) ... no
2157 	 *	82572EI (8086:10b9) ... yes
2158 	 */
2159 	if (sc->sc_type >= WM_T_82571) {
2160 		ifp->if_capabilities |=
2161 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
2162 	}
2163 
2164 	/*
2165 	 * If we're a i82544 or greater (except i82547), we can do
2166 	 * TCP segmentation offload.
2167 	 */
2168 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
2169 		ifp->if_capabilities |= IFCAP_TSOv4;
2170 	}
2171 
2172 	if (sc->sc_type >= WM_T_82571) {
2173 		ifp->if_capabilities |= IFCAP_TSOv6;
2174 	}
2175 
2176 #ifdef WM_MPSAFE
2177 	sc->sc_tx_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2178 	sc->sc_rx_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2179 #else
2180 	sc->sc_tx_lock = NULL;
2181 	sc->sc_rx_lock = NULL;
2182 #endif
2183 
2184 	/* Attach the interface. */
2185 	if_attach(ifp);
2186 	ether_ifattach(ifp, enaddr);
2187 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
2188 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
2189 			  RND_FLAG_DEFAULT);
2190 
2191 #ifdef WM_EVENT_COUNTERS
2192 	/* Attach event counters. */
2193 	evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
2194 	    NULL, xname, "txsstall");
2195 	evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
2196 	    NULL, xname, "txdstall");
2197 	evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
2198 	    NULL, xname, "txfifo_stall");
2199 	evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
2200 	    NULL, xname, "txdw");
2201 	evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
2202 	    NULL, xname, "txqe");
2203 	evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
2204 	    NULL, xname, "rxintr");
2205 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
2206 	    NULL, xname, "linkintr");
2207 
2208 	evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
2209 	    NULL, xname, "rxipsum");
2210 	evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
2211 	    NULL, xname, "rxtusum");
2212 	evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
2213 	    NULL, xname, "txipsum");
2214 	evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
2215 	    NULL, xname, "txtusum");
2216 	evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
2217 	    NULL, xname, "txtusum6");
2218 
2219 	evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
2220 	    NULL, xname, "txtso");
2221 	evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
2222 	    NULL, xname, "txtso6");
2223 	evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
2224 	    NULL, xname, "txtsopain");
2225 
2226 	for (i = 0; i < WM_NTXSEGS; i++) {
2227 		snprintf(wm_txseg_evcnt_names[i],
2228 		    sizeof(wm_txseg_evcnt_names[i]), "txseg%d", i);
2229 		evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
2230 		    NULL, xname, wm_txseg_evcnt_names[i]);
2231 	}
2232 
2233 	evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
2234 	    NULL, xname, "txdrop");
2235 
2236 	evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
2237 	    NULL, xname, "tu");
2238 
2239 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
2240 	    NULL, xname, "tx_xoff");
2241 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
2242 	    NULL, xname, "tx_xon");
2243 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
2244 	    NULL, xname, "rx_xoff");
2245 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
2246 	    NULL, xname, "rx_xon");
2247 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
2248 	    NULL, xname, "rx_macctl");
2249 #endif /* WM_EVENT_COUNTERS */
2250 
2251 	if (pmf_device_register(self, wm_suspend, wm_resume))
2252 		pmf_class_network_register(self, ifp);
2253 	else
2254 		aprint_error_dev(self, "couldn't establish power handler\n");
2255 
2256 	return;
2257 
2258 	/*
2259 	 * Free any resources we've allocated during the failed attach
2260 	 * attempt.  Do this in reverse order and fall through.
2261 	 */
2262  fail_5:
2263 	for (i = 0; i < WM_NRXDESC; i++) {
2264 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2265 			bus_dmamap_destroy(sc->sc_dmat,
2266 			    sc->sc_rxsoft[i].rxs_dmamap);
2267 	}
2268  fail_4:
2269 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2270 		if (sc->sc_txsoft[i].txs_dmamap != NULL)
2271 			bus_dmamap_destroy(sc->sc_dmat,
2272 			    sc->sc_txsoft[i].txs_dmamap);
2273 	}
2274 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2275  fail_3:
2276 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2277  fail_2:
2278 	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2279 	    sc->sc_cd_size);
2280  fail_1:
2281 	bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2282  fail_0:
2283 	return;
2284 }
2285 
2286 /* The detach function (ca_detach) */
2287 static int
2288 wm_detach(device_t self, int flags __unused)
2289 {
2290 	struct wm_softc *sc = device_private(self);
2291 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2292 	int i;
2293 #ifndef WM_MPSAFE
2294 	int s;
2295 
2296 	s = splnet();
2297 #endif
2298 	/* Stop the interface. Callouts are stopped in it. */
2299 	wm_stop(ifp, 1);
2300 
2301 #ifndef WM_MPSAFE
2302 	splx(s);
2303 #endif
2304 
2305 	pmf_device_deregister(self);
2306 
2307 	/* Tell the firmware about the release */
2308 	WM_BOTH_LOCK(sc);
2309 	wm_release_manageability(sc);
2310 	wm_release_hw_control(sc);
2311 	WM_BOTH_UNLOCK(sc);
2312 
2313 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2314 
2315 	/* Delete all remaining media. */
2316 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2317 
2318 	ether_ifdetach(ifp);
2319 	if_detach(ifp);
2320 
2321 
2322 	/* Unload RX dmamaps and free mbufs */
2323 	WM_RX_LOCK(sc);
2324 	wm_rxdrain(sc);
2325 	WM_RX_UNLOCK(sc);
2326 	/* Must unlock here */
2327 
2328 	/* Free dmamap. It's the same as the end of the wm_attach() function */
2329 	for (i = 0; i < WM_NRXDESC; i++) {
2330 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2331 			bus_dmamap_destroy(sc->sc_dmat,
2332 			    sc->sc_rxsoft[i].rxs_dmamap);
2333 	}
2334 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2335 		if (sc->sc_txsoft[i].txs_dmamap != NULL)
2336 			bus_dmamap_destroy(sc->sc_dmat,
2337 			    sc->sc_txsoft[i].txs_dmamap);
2338 	}
2339 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2340 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2341 	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2342 	    sc->sc_cd_size);
2343 	bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2344 
2345 	/* Disestablish the interrupt handler */
2346 	if (sc->sc_ih != NULL) {
2347 		pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
2348 		sc->sc_ih = NULL;
2349 	}
2350 
2351 	/* Unmap the registers */
2352 	if (sc->sc_ss) {
2353 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
2354 		sc->sc_ss = 0;
2355 	}
2356 
2357 	if (sc->sc_ios) {
2358 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
2359 		sc->sc_ios = 0;
2360 	}
2361 
2362 	if (sc->sc_tx_lock)
2363 		mutex_obj_free(sc->sc_tx_lock);
2364 	if (sc->sc_rx_lock)
2365 		mutex_obj_free(sc->sc_rx_lock);
2366 
2367 	return 0;
2368 }
2369 
2370 static bool
2371 wm_suspend(device_t self, const pmf_qual_t *qual)
2372 {
2373 	struct wm_softc *sc = device_private(self);
2374 
2375 	wm_release_manageability(sc);
2376 	wm_release_hw_control(sc);
2377 #ifdef WM_WOL
2378 	wm_enable_wakeup(sc);
2379 #endif
2380 
2381 	return true;
2382 }
2383 
2384 static bool
2385 wm_resume(device_t self, const pmf_qual_t *qual)
2386 {
2387 	struct wm_softc *sc = device_private(self);
2388 
2389 	wm_init_manageability(sc);
2390 
2391 	return true;
2392 }
2393 
2394 /*
2395  * wm_watchdog:		[ifnet interface function]
2396  *
2397  *	Watchdog timer handler.
2398  */
2399 static void
2400 wm_watchdog(struct ifnet *ifp)
2401 {
2402 	struct wm_softc *sc = ifp->if_softc;
2403 
2404 	/*
2405 	 * Since we're using delayed interrupts, sweep up
2406 	 * before we report an error.
2407 	 */
2408 	WM_TX_LOCK(sc);
2409 	wm_txintr(sc);
2410 	WM_TX_UNLOCK(sc);
2411 
2412 	if (sc->sc_txfree != WM_NTXDESC(sc)) {
2413 #ifdef WM_DEBUG
2414 		int i, j;
2415 		struct wm_txsoft *txs;
2416 #endif
2417 		log(LOG_ERR,
2418 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2419 		    device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
2420 		    sc->sc_txnext);
2421 		ifp->if_oerrors++;
2422 #ifdef WM_DEBUG
2423 		for (i = sc->sc_txsdirty; i != sc->sc_txsnext ;
2424 		    i = WM_NEXTTXS(sc, i)) {
2425 		    txs = &sc->sc_txsoft[i];
2426 		    printf("txs %d tx %d -> %d\n",
2427 			i, txs->txs_firstdesc, txs->txs_lastdesc);
2428 		    for (j = txs->txs_firstdesc; ;
2429 			j = WM_NEXTTX(sc, j)) {
2430 			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
2431 			    sc->sc_nq_txdescs[j].nqtx_data.nqtxd_addr);
2432 			printf("\t %#08x%08x\n",
2433 			    sc->sc_nq_txdescs[j].nqtx_data.nqtxd_fields,
2434 			    sc->sc_nq_txdescs[j].nqtx_data.nqtxd_cmdlen);
2435 			if (j == txs->txs_lastdesc)
2436 				break;
2437 			}
2438 		}
2439 #endif
2440 		/* Reset the interface. */
2441 		(void) wm_init(ifp);
2442 	}
2443 
2444 	/* Try to get more packets going. */
2445 	ifp->if_start(ifp);
2446 }
2447 
2448 /*
2449  * wm_tick:
2450  *
2451  *	One second timer, used to check link status, sweep up
2452  *	completed transmit jobs, etc.
2453  */
2454 static void
2455 wm_tick(void *arg)
2456 {
2457 	struct wm_softc *sc = arg;
2458 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2459 #ifndef WM_MPSAFE
2460 	int s;
2461 
2462 	s = splnet();
2463 #endif
2464 
2465 	WM_TX_LOCK(sc);
2466 
2467 	if (sc->sc_stopping)
2468 		goto out;
2469 
2470 	if (sc->sc_type >= WM_T_82542_2_1) {
2471 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
2472 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
2473 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
2474 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
2475 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
2476 	}
2477 
2478 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
2479 	ifp->if_ierrors += 0ULL + /* ensure quad_t */
2480 	    + CSR_READ(sc, WMREG_CRCERRS)
2481 	    + CSR_READ(sc, WMREG_ALGNERRC)
2482 	    + CSR_READ(sc, WMREG_SYMERRC)
2483 	    + CSR_READ(sc, WMREG_RXERRC)
2484 	    + CSR_READ(sc, WMREG_SEC)
2485 	    + CSR_READ(sc, WMREG_CEXTERR)
2486 	    + CSR_READ(sc, WMREG_RLEC);
2487 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
2488 
2489 	if (sc->sc_flags & WM_F_HAS_MII)
2490 		mii_tick(&sc->sc_mii);
2491 	else
2492 		wm_tbi_check_link(sc);
2493 
2494 out:
2495 	WM_TX_UNLOCK(sc);
2496 #ifndef WM_MPSAFE
2497 	splx(s);
2498 #endif
2499 
2500 	if (!sc->sc_stopping)
2501 		callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2502 }
2503 
2504 static int
2505 wm_ifflags_cb(struct ethercom *ec)
2506 {
2507 	struct ifnet *ifp = &ec->ec_if;
2508 	struct wm_softc *sc = ifp->if_softc;
2509 	int change = ifp->if_flags ^ sc->sc_if_flags;
2510 	int rc = 0;
2511 
2512 	WM_BOTH_LOCK(sc);
2513 
2514 	if (change != 0)
2515 		sc->sc_if_flags = ifp->if_flags;
2516 
2517 	if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) {
2518 		rc = ENETRESET;
2519 		goto out;
2520 	}
2521 
2522 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
2523 		wm_set_filter(sc);
2524 
2525 	wm_set_vlan(sc);
2526 
2527 out:
2528 	WM_BOTH_UNLOCK(sc);
2529 
2530 	return rc;
2531 }
2532 
2533 /*
2534  * wm_ioctl:		[ifnet interface function]
2535  *
2536  *	Handle control requests from the operator.
2537  */
2538 static int
2539 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2540 {
2541 	struct wm_softc *sc = ifp->if_softc;
2542 	struct ifreq *ifr = (struct ifreq *) data;
2543 	struct ifaddr *ifa = (struct ifaddr *)data;
2544 	struct sockaddr_dl *sdl;
2545 	int s, error;
2546 
2547 #ifndef WM_MPSAFE
2548 	s = splnet();
2549 #endif
2550 	WM_BOTH_LOCK(sc);
2551 
2552 	switch (cmd) {
2553 	case SIOCSIFMEDIA:
2554 	case SIOCGIFMEDIA:
2555 		/* Flow control requires full-duplex mode. */
2556 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
2557 		    (ifr->ifr_media & IFM_FDX) == 0)
2558 			ifr->ifr_media &= ~IFM_ETH_FMASK;
2559 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
2560 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
2561 				/* We can do both TXPAUSE and RXPAUSE. */
2562 				ifr->ifr_media |=
2563 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
2564 			}
2565 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
2566 		}
2567 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
2568 		break;
2569 	case SIOCINITIFADDR:
2570 		if (ifa->ifa_addr->sa_family == AF_LINK) {
2571 			sdl = satosdl(ifp->if_dl->ifa_addr);
2572 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
2573 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
2574 			/* unicast address is first multicast entry */
2575 			wm_set_filter(sc);
2576 			error = 0;
2577 			break;
2578 		}
2579 		/*FALLTHROUGH*/
2580 	default:
2581 		WM_BOTH_UNLOCK(sc);
2582 #ifdef WM_MPSAFE
2583 		s = splnet();
2584 #endif
2585 		/* It may call wm_start, so unlock here */
2586 		error = ether_ioctl(ifp, cmd, data);
2587 #ifdef WM_MPSAFE
2588 		splx(s);
2589 #endif
2590 		WM_BOTH_LOCK(sc);
2591 
2592 		if (error != ENETRESET)
2593 			break;
2594 
2595 		error = 0;
2596 
2597 		if (cmd == SIOCSIFCAP) {
2598 			WM_BOTH_UNLOCK(sc);
2599 			error = (*ifp->if_init)(ifp);
2600 			WM_BOTH_LOCK(sc);
2601 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
2602 			;
2603 		else if (ifp->if_flags & IFF_RUNNING) {
2604 			/*
2605 			 * Multicast list has changed; set the hardware filter
2606 			 * accordingly.
2607 			 */
2608 			wm_set_filter(sc);
2609 		}
2610 		break;
2611 	}
2612 
2613 	WM_BOTH_UNLOCK(sc);
2614 
2615 	/* Try to get more packets going. */
2616 	ifp->if_start(ifp);
2617 
2618 #ifndef WM_MPSAFE
2619 	splx(s);
2620 #endif
2621 	return error;
2622 }
2623 
2624 /* MAC address related */
2625 
2626 static int
2627 wm_check_alt_mac_addr(struct wm_softc *sc)
2628 {
2629 	uint16_t myea[ETHER_ADDR_LEN / 2];
2630 	uint16_t offset = EEPROM_OFF_MACADDR;
2631 
2632 	/* Try to read alternative MAC address pointer */
2633 	if (wm_nvm_read(sc, EEPROM_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
2634 		return -1;
2635 
2636 	/* Check pointer */
2637 	if (offset == 0xffff)
2638 		return -1;
2639 
2640 	/*
2641 	 * Check whether alternative MAC address is valid or not.
2642 	 * Some cards have non 0xffff pointer but those don't use
2643 	 * alternative MAC address in reality.
2644 	 *
2645 	 * Check whether the broadcast bit is set or not.
2646 	 */
2647 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
2648 		if (((myea[0] & 0xff) & 0x01) == 0)
2649 			return 0; /* found! */
2650 
2651 	/* not found */
2652 	return -1;
2653 }
2654 
2655 static int
2656 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
2657 {
2658 	uint16_t myea[ETHER_ADDR_LEN / 2];
2659 	uint16_t offset = EEPROM_OFF_MACADDR;
2660 	int do_invert = 0;
2661 
2662 	switch (sc->sc_type) {
2663 	case WM_T_82580:
2664 	case WM_T_82580ER:
2665 	case WM_T_I350:
2666 	case WM_T_I354:
2667 		switch (sc->sc_funcid) {
2668 		case 0:
2669 			/* default value (== EEPROM_OFF_MACADDR) */
2670 			break;
2671 		case 1:
2672 			offset = EEPROM_OFF_LAN1;
2673 			break;
2674 		case 2:
2675 			offset = EEPROM_OFF_LAN2;
2676 			break;
2677 		case 3:
2678 			offset = EEPROM_OFF_LAN3;
2679 			break;
2680 		default:
2681 			goto bad;
2682 			/* NOTREACHED */
2683 			break;
2684 		}
2685 		break;
2686 	case WM_T_82571:
2687 	case WM_T_82575:
2688 	case WM_T_82576:
2689 	case WM_T_80003:
2690 	case WM_T_I210:
2691 	case WM_T_I211:
2692 		if (wm_check_alt_mac_addr(sc) != 0) {
2693 			/* reset the offset to LAN0 */
2694 			offset = EEPROM_OFF_MACADDR;
2695 			if ((sc->sc_funcid & 0x01) == 1)
2696 				do_invert = 1;
2697 			goto do_read;
2698 		}
2699 		switch (sc->sc_funcid) {
2700 		case 0:
2701 			/*
2702 			 * The offset is the value in EEPROM_ALT_MAC_ADDR_PTR
2703 			 * itself.
2704 			 */
2705 			break;
2706 		case 1:
2707 			offset += EEPROM_OFF_MACADDR_LAN1;
2708 			break;
2709 		case 2:
2710 			offset += EEPROM_OFF_MACADDR_LAN2;
2711 			break;
2712 		case 3:
2713 			offset += EEPROM_OFF_MACADDR_LAN3;
2714 			break;
2715 		default:
2716 			goto bad;
2717 			/* NOTREACHED */
2718 			break;
2719 		}
2720 		break;
2721 	default:
2722 		if ((sc->sc_funcid & 0x01) == 1)
2723 			do_invert = 1;
2724 		break;
2725 	}
2726 
2727  do_read:
2728 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]),
2729 		myea) != 0) {
2730 		goto bad;
2731 	}
2732 
2733 	enaddr[0] = myea[0] & 0xff;
2734 	enaddr[1] = myea[0] >> 8;
2735 	enaddr[2] = myea[1] & 0xff;
2736 	enaddr[3] = myea[1] >> 8;
2737 	enaddr[4] = myea[2] & 0xff;
2738 	enaddr[5] = myea[2] >> 8;
2739 
2740 	/*
2741 	 * Toggle the LSB of the MAC address on the second port
2742 	 * of some dual port cards.
2743 	 */
2744 	if (do_invert != 0)
2745 		enaddr[5] ^= 1;
2746 
2747 	return 0;
2748 
2749  bad:
2750 	return -1;
2751 }
2752 
2753 /*
2754  * wm_set_ral:
2755  *
2756  *	Set an entery in the receive address list.
2757  */
2758 static void
2759 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
2760 {
2761 	uint32_t ral_lo, ral_hi;
2762 
2763 	if (enaddr != NULL) {
2764 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
2765 		    (enaddr[3] << 24);
2766 		ral_hi = enaddr[4] | (enaddr[5] << 8);
2767 		ral_hi |= RAL_AV;
2768 	} else {
2769 		ral_lo = 0;
2770 		ral_hi = 0;
2771 	}
2772 
2773 	if (sc->sc_type >= WM_T_82544) {
2774 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
2775 		    ral_lo);
2776 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
2777 		    ral_hi);
2778 	} else {
2779 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
2780 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
2781 	}
2782 }
2783 
2784 /*
2785  * wm_mchash:
2786  *
2787  *	Compute the hash of the multicast address for the 4096-bit
2788  *	multicast filter.
2789  */
2790 static uint32_t
2791 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
2792 {
2793 	static const int lo_shift[4] = { 4, 3, 2, 0 };
2794 	static const int hi_shift[4] = { 4, 5, 6, 8 };
2795 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
2796 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
2797 	uint32_t hash;
2798 
2799 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
2800 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
2801 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
2802 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
2803 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
2804 		return (hash & 0x3ff);
2805 	}
2806 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
2807 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
2808 
2809 	return (hash & 0xfff);
2810 }
2811 
2812 /*
2813  * wm_set_filter:
2814  *
2815  *	Set up the receive filter.
2816  */
2817 static void
2818 wm_set_filter(struct wm_softc *sc)
2819 {
2820 	struct ethercom *ec = &sc->sc_ethercom;
2821 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2822 	struct ether_multi *enm;
2823 	struct ether_multistep step;
2824 	bus_addr_t mta_reg;
2825 	uint32_t hash, reg, bit;
2826 	int i, size;
2827 
2828 	if (sc->sc_type >= WM_T_82544)
2829 		mta_reg = WMREG_CORDOVA_MTA;
2830 	else
2831 		mta_reg = WMREG_MTA;
2832 
2833 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
2834 
2835 	if (ifp->if_flags & IFF_BROADCAST)
2836 		sc->sc_rctl |= RCTL_BAM;
2837 	if (ifp->if_flags & IFF_PROMISC) {
2838 		sc->sc_rctl |= RCTL_UPE;
2839 		goto allmulti;
2840 	}
2841 
2842 	/*
2843 	 * Set the station address in the first RAL slot, and
2844 	 * clear the remaining slots.
2845 	 */
2846 	if (sc->sc_type == WM_T_ICH8)
2847 		size = WM_RAL_TABSIZE_ICH8 -1;
2848 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
2849 	    || (sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
2850 	    || (sc->sc_type == WM_T_PCH_LPT))
2851 		size = WM_RAL_TABSIZE_ICH8;
2852 	else if (sc->sc_type == WM_T_82575)
2853 		size = WM_RAL_TABSIZE_82575;
2854 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
2855 		size = WM_RAL_TABSIZE_82576;
2856 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
2857 		size = WM_RAL_TABSIZE_I350;
2858 	else
2859 		size = WM_RAL_TABSIZE;
2860 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
2861 	for (i = 1; i < size; i++)
2862 		wm_set_ral(sc, NULL, i);
2863 
2864 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
2865 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
2866 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
2867 		size = WM_ICH8_MC_TABSIZE;
2868 	else
2869 		size = WM_MC_TABSIZE;
2870 	/* Clear out the multicast table. */
2871 	for (i = 0; i < size; i++)
2872 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
2873 
2874 	ETHER_FIRST_MULTI(step, ec, enm);
2875 	while (enm != NULL) {
2876 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2877 			/*
2878 			 * We must listen to a range of multicast addresses.
2879 			 * For now, just accept all multicasts, rather than
2880 			 * trying to set only those filter bits needed to match
2881 			 * the range.  (At this time, the only use of address
2882 			 * ranges is for IP multicast routing, for which the
2883 			 * range is big enough to require all bits set.)
2884 			 */
2885 			goto allmulti;
2886 		}
2887 
2888 		hash = wm_mchash(sc, enm->enm_addrlo);
2889 
2890 		reg = (hash >> 5);
2891 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
2892 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
2893 		    || (sc->sc_type == WM_T_PCH2)
2894 		    || (sc->sc_type == WM_T_PCH_LPT))
2895 			reg &= 0x1f;
2896 		else
2897 			reg &= 0x7f;
2898 		bit = hash & 0x1f;
2899 
2900 		hash = CSR_READ(sc, mta_reg + (reg << 2));
2901 		hash |= 1U << bit;
2902 
2903 		/* XXX Hardware bug?? */
2904 		if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
2905 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
2906 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
2907 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
2908 		} else
2909 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
2910 
2911 		ETHER_NEXT_MULTI(step, enm);
2912 	}
2913 
2914 	ifp->if_flags &= ~IFF_ALLMULTI;
2915 	goto setit;
2916 
2917  allmulti:
2918 	ifp->if_flags |= IFF_ALLMULTI;
2919 	sc->sc_rctl |= RCTL_MPE;
2920 
2921  setit:
2922 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
2923 }
2924 
2925 /* Reset and init related */
2926 
2927 static void
2928 wm_set_vlan(struct wm_softc *sc)
2929 {
2930 	/* Deal with VLAN enables. */
2931 	if (VLAN_ATTACHED(&sc->sc_ethercom))
2932 		sc->sc_ctrl |= CTRL_VME;
2933 	else
2934 		sc->sc_ctrl &= ~CTRL_VME;
2935 
2936 	/* Write the control registers. */
2937 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2938 }
2939 
2940 static void
2941 wm_set_pcie_completion_timeout(struct wm_softc *sc)
2942 {
2943 	uint32_t gcr;
2944 	pcireg_t ctrl2;
2945 
2946 	gcr = CSR_READ(sc, WMREG_GCR);
2947 
2948 	/* Only take action if timeout value is defaulted to 0 */
2949 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
2950 		goto out;
2951 
2952 	if ((gcr & GCR_CAP_VER2) == 0) {
2953 		gcr |= GCR_CMPL_TMOUT_10MS;
2954 		goto out;
2955 	}
2956 
2957 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
2958 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
2959 	ctrl2 |= WM_PCIE_DCSR2_16MS;
2960 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
2961 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
2962 
2963 out:
2964 	/* Disable completion timeout resend */
2965 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
2966 
2967 	CSR_WRITE(sc, WMREG_GCR, gcr);
2968 }
2969 
2970 void
2971 wm_get_auto_rd_done(struct wm_softc *sc)
2972 {
2973 	int i;
2974 
2975 	/* wait for eeprom to reload */
2976 	switch (sc->sc_type) {
2977 	case WM_T_82571:
2978 	case WM_T_82572:
2979 	case WM_T_82573:
2980 	case WM_T_82574:
2981 	case WM_T_82583:
2982 	case WM_T_82575:
2983 	case WM_T_82576:
2984 	case WM_T_82580:
2985 	case WM_T_82580ER:
2986 	case WM_T_I350:
2987 	case WM_T_I354:
2988 	case WM_T_I210:
2989 	case WM_T_I211:
2990 	case WM_T_80003:
2991 	case WM_T_ICH8:
2992 	case WM_T_ICH9:
2993 		for (i = 0; i < 10; i++) {
2994 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
2995 				break;
2996 			delay(1000);
2997 		}
2998 		if (i == 10) {
2999 			log(LOG_ERR, "%s: auto read from eeprom failed to "
3000 			    "complete\n", device_xname(sc->sc_dev));
3001 		}
3002 		break;
3003 	default:
3004 		break;
3005 	}
3006 }
3007 
3008 void
3009 wm_lan_init_done(struct wm_softc *sc)
3010 {
3011 	uint32_t reg = 0;
3012 	int i;
3013 
3014 	/* wait for eeprom to reload */
3015 	switch (sc->sc_type) {
3016 	case WM_T_ICH10:
3017 	case WM_T_PCH:
3018 	case WM_T_PCH2:
3019 	case WM_T_PCH_LPT:
3020 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
3021 			reg = CSR_READ(sc, WMREG_STATUS);
3022 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
3023 				break;
3024 			delay(100);
3025 		}
3026 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
3027 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
3028 			    "complete\n", device_xname(sc->sc_dev), __func__);
3029 		}
3030 		break;
3031 	default:
3032 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3033 		    __func__);
3034 		break;
3035 	}
3036 
3037 	reg &= ~STATUS_LAN_INIT_DONE;
3038 	CSR_WRITE(sc, WMREG_STATUS, reg);
3039 }
3040 
3041 void
3042 wm_get_cfg_done(struct wm_softc *sc)
3043 {
3044 	int mask;
3045 	uint32_t reg;
3046 	int i;
3047 
3048 	/* wait for eeprom to reload */
3049 	switch (sc->sc_type) {
3050 	case WM_T_82542_2_0:
3051 	case WM_T_82542_2_1:
3052 		/* null */
3053 		break;
3054 	case WM_T_82543:
3055 	case WM_T_82544:
3056 	case WM_T_82540:
3057 	case WM_T_82545:
3058 	case WM_T_82545_3:
3059 	case WM_T_82546:
3060 	case WM_T_82546_3:
3061 	case WM_T_82541:
3062 	case WM_T_82541_2:
3063 	case WM_T_82547:
3064 	case WM_T_82547_2:
3065 	case WM_T_82573:
3066 	case WM_T_82574:
3067 	case WM_T_82583:
3068 		/* generic */
3069 		delay(10*1000);
3070 		break;
3071 	case WM_T_80003:
3072 	case WM_T_82571:
3073 	case WM_T_82572:
3074 	case WM_T_82575:
3075 	case WM_T_82576:
3076 	case WM_T_82580:
3077 	case WM_T_82580ER:
3078 	case WM_T_I350:
3079 	case WM_T_I354:
3080 	case WM_T_I210:
3081 	case WM_T_I211:
3082 		if (sc->sc_type == WM_T_82571) {
3083 			/* Only 82571 shares port 0 */
3084 			mask = EEMNGCTL_CFGDONE_0;
3085 		} else
3086 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
3087 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
3088 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
3089 				break;
3090 			delay(1000);
3091 		}
3092 		if (i >= WM_PHY_CFG_TIMEOUT) {
3093 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
3094 				device_xname(sc->sc_dev), __func__));
3095 		}
3096 		break;
3097 	case WM_T_ICH8:
3098 	case WM_T_ICH9:
3099 	case WM_T_ICH10:
3100 	case WM_T_PCH:
3101 	case WM_T_PCH2:
3102 	case WM_T_PCH_LPT:
3103 		delay(10*1000);
3104 		if (sc->sc_type >= WM_T_ICH10)
3105 			wm_lan_init_done(sc);
3106 		else
3107 			wm_get_auto_rd_done(sc);
3108 
3109 		reg = CSR_READ(sc, WMREG_STATUS);
3110 		if ((reg & STATUS_PHYRA) != 0)
3111 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
3112 		break;
3113 	default:
3114 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3115 		    __func__);
3116 		break;
3117 	}
3118 }
3119 
3120 /*
3121  * wm_reset:
3122  *
3123  *	Reset the i82542 chip.
3124  */
3125 static void
3126 wm_reset(struct wm_softc *sc)
3127 {
3128 	int phy_reset = 0;
3129 	int error = 0;
3130 	uint32_t reg, mask;
3131 
3132 	/*
3133 	 * Allocate on-chip memory according to the MTU size.
3134 	 * The Packet Buffer Allocation register must be written
3135 	 * before the chip is reset.
3136 	 */
3137 	switch (sc->sc_type) {
3138 	case WM_T_82547:
3139 	case WM_T_82547_2:
3140 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3141 		    PBA_22K : PBA_30K;
3142 		sc->sc_txfifo_head = 0;
3143 		sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
3144 		sc->sc_txfifo_size =
3145 		    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
3146 		sc->sc_txfifo_stall = 0;
3147 		break;
3148 	case WM_T_82571:
3149 	case WM_T_82572:
3150 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
3151 	case WM_T_I350:
3152 	case WM_T_I354:
3153 	case WM_T_80003:
3154 		sc->sc_pba = PBA_32K;
3155 		break;
3156 	case WM_T_82580:
3157 	case WM_T_82580ER:
3158 		sc->sc_pba = PBA_35K;
3159 		break;
3160 	case WM_T_I210:
3161 	case WM_T_I211:
3162 		sc->sc_pba = PBA_34K;
3163 		break;
3164 	case WM_T_82576:
3165 		sc->sc_pba = PBA_64K;
3166 		break;
3167 	case WM_T_82573:
3168 		sc->sc_pba = PBA_12K;
3169 		break;
3170 	case WM_T_82574:
3171 	case WM_T_82583:
3172 		sc->sc_pba = PBA_20K;
3173 		break;
3174 	case WM_T_ICH8:
3175 		sc->sc_pba = PBA_8K;
3176 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
3177 		break;
3178 	case WM_T_ICH9:
3179 	case WM_T_ICH10:
3180 		sc->sc_pba = PBA_10K;
3181 		break;
3182 	case WM_T_PCH:
3183 	case WM_T_PCH2:
3184 	case WM_T_PCH_LPT:
3185 		sc->sc_pba = PBA_26K;
3186 		break;
3187 	default:
3188 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3189 		    PBA_40K : PBA_48K;
3190 		break;
3191 	}
3192 	CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
3193 
3194 	/* Prevent the PCI-E bus from sticking */
3195 	if (sc->sc_flags & WM_F_PCIE) {
3196 		int timeout = 800;
3197 
3198 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
3199 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3200 
3201 		while (timeout--) {
3202 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
3203 			    == 0)
3204 				break;
3205 			delay(100);
3206 		}
3207 	}
3208 
3209 	/* Set the completion timeout for interface */
3210 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
3211 	    || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
3212 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
3213 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
3214 		wm_set_pcie_completion_timeout(sc);
3215 
3216 	/* Clear interrupt */
3217 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3218 
3219 	/* Stop the transmit and receive processes. */
3220 	CSR_WRITE(sc, WMREG_RCTL, 0);
3221 	sc->sc_rctl &= ~RCTL_EN;
3222 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
3223 	CSR_WRITE_FLUSH(sc);
3224 
3225 	/* XXX set_tbi_sbp_82543() */
3226 
3227 	delay(10*1000);
3228 
3229 	/* Must acquire the MDIO ownership before MAC reset */
3230 	switch (sc->sc_type) {
3231 	case WM_T_82573:
3232 	case WM_T_82574:
3233 	case WM_T_82583:
3234 		error = wm_get_hw_semaphore_82573(sc);
3235 		break;
3236 	default:
3237 		break;
3238 	}
3239 
3240 	/*
3241 	 * 82541 Errata 29? & 82547 Errata 28?
3242 	 * See also the description about PHY_RST bit in CTRL register
3243 	 * in 8254x_GBe_SDM.pdf.
3244 	 */
3245 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
3246 		CSR_WRITE(sc, WMREG_CTRL,
3247 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
3248 		CSR_WRITE_FLUSH(sc);
3249 		delay(5000);
3250 	}
3251 
3252 	switch (sc->sc_type) {
3253 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
3254 	case WM_T_82541:
3255 	case WM_T_82541_2:
3256 	case WM_T_82547:
3257 	case WM_T_82547_2:
3258 		/*
3259 		 * On some chipsets, a reset through a memory-mapped write
3260 		 * cycle can cause the chip to reset before completing the
3261 		 * write cycle.  This causes major headache that can be
3262 		 * avoided by issuing the reset via indirect register writes
3263 		 * through I/O space.
3264 		 *
3265 		 * So, if we successfully mapped the I/O BAR at attach time,
3266 		 * use that.  Otherwise, try our luck with a memory-mapped
3267 		 * reset.
3268 		 */
3269 		if (sc->sc_flags & WM_F_IOH_VALID)
3270 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
3271 		else
3272 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
3273 		break;
3274 	case WM_T_82545_3:
3275 	case WM_T_82546_3:
3276 		/* Use the shadow control register on these chips. */
3277 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
3278 		break;
3279 	case WM_T_80003:
3280 		mask = swfwphysem[sc->sc_funcid];
3281 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3282 		wm_get_swfw_semaphore(sc, mask);
3283 		CSR_WRITE(sc, WMREG_CTRL, reg);
3284 		wm_put_swfw_semaphore(sc, mask);
3285 		break;
3286 	case WM_T_ICH8:
3287 	case WM_T_ICH9:
3288 	case WM_T_ICH10:
3289 	case WM_T_PCH:
3290 	case WM_T_PCH2:
3291 	case WM_T_PCH_LPT:
3292 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3293 		if (wm_check_reset_block(sc) == 0) {
3294 			/*
3295 			 * Gate automatic PHY configuration by hardware on
3296 			 * non-managed 82579
3297 			 */
3298 			if ((sc->sc_type == WM_T_PCH2)
3299 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
3300 				!= 0))
3301 				wm_gate_hw_phy_config_ich8lan(sc, 1);
3302 
3303 
3304 			reg |= CTRL_PHY_RESET;
3305 			phy_reset = 1;
3306 		}
3307 		wm_get_swfwhw_semaphore(sc);
3308 		CSR_WRITE(sc, WMREG_CTRL, reg);
3309 		/* Don't insert a completion barrier when reset */
3310 		delay(20*1000);
3311 		wm_put_swfwhw_semaphore(sc);
3312 		break;
3313 	case WM_T_82542_2_0:
3314 	case WM_T_82542_2_1:
3315 	case WM_T_82543:
3316 	case WM_T_82540:
3317 	case WM_T_82545:
3318 	case WM_T_82546:
3319 	case WM_T_82571:
3320 	case WM_T_82572:
3321 	case WM_T_82573:
3322 	case WM_T_82574:
3323 	case WM_T_82575:
3324 	case WM_T_82576:
3325 	case WM_T_82580:
3326 	case WM_T_82580ER:
3327 	case WM_T_82583:
3328 	case WM_T_I350:
3329 	case WM_T_I354:
3330 	case WM_T_I210:
3331 	case WM_T_I211:
3332 	default:
3333 		/* Everything else can safely use the documented method. */
3334 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3335 		break;
3336 	}
3337 
3338 	/* Must release the MDIO ownership after MAC reset */
3339 	switch (sc->sc_type) {
3340 	case WM_T_82573:
3341 	case WM_T_82574:
3342 	case WM_T_82583:
3343 		if (error == 0)
3344 			wm_put_hw_semaphore_82573(sc);
3345 		break;
3346 	default:
3347 		break;
3348 	}
3349 
3350 	if (phy_reset != 0)
3351 		wm_get_cfg_done(sc);
3352 
3353 	/* reload EEPROM */
3354 	switch (sc->sc_type) {
3355 	case WM_T_82542_2_0:
3356 	case WM_T_82542_2_1:
3357 	case WM_T_82543:
3358 	case WM_T_82544:
3359 		delay(10);
3360 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3361 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3362 		CSR_WRITE_FLUSH(sc);
3363 		delay(2000);
3364 		break;
3365 	case WM_T_82540:
3366 	case WM_T_82545:
3367 	case WM_T_82545_3:
3368 	case WM_T_82546:
3369 	case WM_T_82546_3:
3370 		delay(5*1000);
3371 		/* XXX Disable HW ARPs on ASF enabled adapters */
3372 		break;
3373 	case WM_T_82541:
3374 	case WM_T_82541_2:
3375 	case WM_T_82547:
3376 	case WM_T_82547_2:
3377 		delay(20000);
3378 		/* XXX Disable HW ARPs on ASF enabled adapters */
3379 		break;
3380 	case WM_T_82571:
3381 	case WM_T_82572:
3382 	case WM_T_82573:
3383 	case WM_T_82574:
3384 	case WM_T_82583:
3385 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
3386 			delay(10);
3387 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3388 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3389 			CSR_WRITE_FLUSH(sc);
3390 		}
3391 		/* check EECD_EE_AUTORD */
3392 		wm_get_auto_rd_done(sc);
3393 		/*
3394 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
3395 		 * is set.
3396 		 */
3397 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
3398 		    || (sc->sc_type == WM_T_82583))
3399 			delay(25*1000);
3400 		break;
3401 	case WM_T_82575:
3402 	case WM_T_82576:
3403 	case WM_T_82580:
3404 	case WM_T_82580ER:
3405 	case WM_T_I350:
3406 	case WM_T_I354:
3407 	case WM_T_I210:
3408 	case WM_T_I211:
3409 	case WM_T_80003:
3410 		/* check EECD_EE_AUTORD */
3411 		wm_get_auto_rd_done(sc);
3412 		break;
3413 	case WM_T_ICH8:
3414 	case WM_T_ICH9:
3415 	case WM_T_ICH10:
3416 	case WM_T_PCH:
3417 	case WM_T_PCH2:
3418 	case WM_T_PCH_LPT:
3419 		break;
3420 	default:
3421 		panic("%s: unknown type\n", __func__);
3422 	}
3423 
3424 	/* Check whether EEPROM is present or not */
3425 	switch (sc->sc_type) {
3426 	case WM_T_82575:
3427 	case WM_T_82576:
3428 #if 0 /* XXX */
3429 	case WM_T_82580:
3430 	case WM_T_82580ER:
3431 #endif
3432 	case WM_T_I350:
3433 	case WM_T_I354:
3434 	case WM_T_ICH8:
3435 	case WM_T_ICH9:
3436 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
3437 			/* Not found */
3438 			sc->sc_flags |= WM_F_EEPROM_INVALID;
3439 			if ((sc->sc_type == WM_T_82575)
3440 			    || (sc->sc_type == WM_T_82576)
3441 			    || (sc->sc_type == WM_T_82580)
3442 			    || (sc->sc_type == WM_T_82580ER)
3443 			    || (sc->sc_type == WM_T_I350)
3444 			    || (sc->sc_type == WM_T_I354))
3445 				wm_reset_init_script_82575(sc);
3446 		}
3447 		break;
3448 	default:
3449 		break;
3450 	}
3451 
3452 	if ((sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
3453 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
3454 		/* clear global device reset status bit */
3455 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
3456 	}
3457 
3458 	/* Clear any pending interrupt events. */
3459 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3460 	reg = CSR_READ(sc, WMREG_ICR);
3461 
3462 	/* reload sc_ctrl */
3463 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3464 
3465 	if (sc->sc_type == WM_T_I350)
3466 		wm_set_eee_i350(sc);
3467 
3468 	/* dummy read from WUC */
3469 	if (sc->sc_type == WM_T_PCH)
3470 		reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
3471 	/*
3472 	 * For PCH, this write will make sure that any noise will be detected
3473 	 * as a CRC error and be dropped rather than show up as a bad packet
3474 	 * to the DMA engine
3475 	 */
3476 	if (sc->sc_type == WM_T_PCH)
3477 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
3478 
3479 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
3480 		CSR_WRITE(sc, WMREG_WUC, 0);
3481 
3482 	/* XXX need special handling for 82580 */
3483 }
3484 
3485 /*
3486  * wm_add_rxbuf:
3487  *
3488  *	Add a receive buffer to the indiciated descriptor.
3489  */
3490 static int
3491 wm_add_rxbuf(struct wm_softc *sc, int idx)
3492 {
3493 	struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
3494 	struct mbuf *m;
3495 	int error;
3496 
3497 	KASSERT(WM_RX_LOCKED(sc));
3498 
3499 	MGETHDR(m, M_DONTWAIT, MT_DATA);
3500 	if (m == NULL)
3501 		return ENOBUFS;
3502 
3503 	MCLGET(m, M_DONTWAIT);
3504 	if ((m->m_flags & M_EXT) == 0) {
3505 		m_freem(m);
3506 		return ENOBUFS;
3507 	}
3508 
3509 	if (rxs->rxs_mbuf != NULL)
3510 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
3511 
3512 	rxs->rxs_mbuf = m;
3513 
3514 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3515 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
3516 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
3517 	if (error) {
3518 		/* XXX XXX XXX */
3519 		aprint_error_dev(sc->sc_dev,
3520 		    "unable to load rx DMA map %d, error = %d\n",
3521 		    idx, error);
3522 		panic("wm_add_rxbuf");
3523 	}
3524 
3525 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3526 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
3527 
3528 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3529 		if ((sc->sc_rctl & RCTL_EN) != 0)
3530 			WM_INIT_RXDESC(sc, idx);
3531 	} else
3532 		WM_INIT_RXDESC(sc, idx);
3533 
3534 	return 0;
3535 }
3536 
3537 /*
3538  * wm_rxdrain:
3539  *
3540  *	Drain the receive queue.
3541  */
3542 static void
3543 wm_rxdrain(struct wm_softc *sc)
3544 {
3545 	struct wm_rxsoft *rxs;
3546 	int i;
3547 
3548 	KASSERT(WM_RX_LOCKED(sc));
3549 
3550 	for (i = 0; i < WM_NRXDESC; i++) {
3551 		rxs = &sc->sc_rxsoft[i];
3552 		if (rxs->rxs_mbuf != NULL) {
3553 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
3554 			m_freem(rxs->rxs_mbuf);
3555 			rxs->rxs_mbuf = NULL;
3556 		}
3557 	}
3558 }
3559 
3560 /*
3561  * wm_init:		[ifnet interface function]
3562  *
3563  *	Initialize the interface.
3564  */
3565 static int
3566 wm_init(struct ifnet *ifp)
3567 {
3568 	struct wm_softc *sc = ifp->if_softc;
3569 	int ret;
3570 
3571 	WM_BOTH_LOCK(sc);
3572 	ret = wm_init_locked(ifp);
3573 	WM_BOTH_UNLOCK(sc);
3574 
3575 	return ret;
3576 }
3577 
3578 static int
3579 wm_init_locked(struct ifnet *ifp)
3580 {
3581 	struct wm_softc *sc = ifp->if_softc;
3582 	struct wm_rxsoft *rxs;
3583 	int i, j, trynum, error = 0;
3584 	uint32_t reg;
3585 
3586 	KASSERT(WM_BOTH_LOCKED(sc));
3587 	/*
3588 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
3589 	 * There is a small but measurable benefit to avoiding the adjusment
3590 	 * of the descriptor so that the headers are aligned, for normal mtu,
3591 	 * on such platforms.  One possibility is that the DMA itself is
3592 	 * slightly more efficient if the front of the entire packet (instead
3593 	 * of the front of the headers) is aligned.
3594 	 *
3595 	 * Note we must always set align_tweak to 0 if we are using
3596 	 * jumbo frames.
3597 	 */
3598 #ifdef __NO_STRICT_ALIGNMENT
3599 	sc->sc_align_tweak = 0;
3600 #else
3601 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
3602 		sc->sc_align_tweak = 0;
3603 	else
3604 		sc->sc_align_tweak = 2;
3605 #endif /* __NO_STRICT_ALIGNMENT */
3606 
3607 	/* Cancel any pending I/O. */
3608 	wm_stop_locked(ifp, 0);
3609 
3610 	/* update statistics before reset */
3611 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3612 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
3613 
3614 	/* Reset the chip to a known state. */
3615 	wm_reset(sc);
3616 
3617 	switch (sc->sc_type) {
3618 	case WM_T_82571:
3619 	case WM_T_82572:
3620 	case WM_T_82573:
3621 	case WM_T_82574:
3622 	case WM_T_82583:
3623 	case WM_T_80003:
3624 	case WM_T_ICH8:
3625 	case WM_T_ICH9:
3626 	case WM_T_ICH10:
3627 	case WM_T_PCH:
3628 	case WM_T_PCH2:
3629 	case WM_T_PCH_LPT:
3630 		if (wm_check_mng_mode(sc) != 0)
3631 			wm_get_hw_control(sc);
3632 		break;
3633 	default:
3634 		break;
3635 	}
3636 
3637 	/* Reset the PHY. */
3638 	if (sc->sc_flags & WM_F_HAS_MII)
3639 		wm_gmii_reset(sc);
3640 
3641 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
3642 	/* Enable PHY low-power state when MAC is at D3 w/o WoL */
3643 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
3644 	    || (sc->sc_type == WM_T_PCH_LPT))
3645 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_PHYPDEN);
3646 
3647 	/* Initialize the transmit descriptor ring. */
3648 	memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
3649 	WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
3650 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3651 	sc->sc_txfree = WM_NTXDESC(sc);
3652 	sc->sc_txnext = 0;
3653 
3654 	if (sc->sc_type < WM_T_82543) {
3655 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(sc, 0));
3656 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(sc, 0));
3657 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
3658 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
3659 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
3660 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
3661 	} else {
3662 		CSR_WRITE(sc, WMREG_TDBAH, WM_CDTXADDR_HI(sc, 0));
3663 		CSR_WRITE(sc, WMREG_TDBAL, WM_CDTXADDR_LO(sc, 0));
3664 		CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
3665 		CSR_WRITE(sc, WMREG_TDH, 0);
3666 		CSR_WRITE(sc, WMREG_TIDV, 375);		/* ITR / 4 */
3667 		CSR_WRITE(sc, WMREG_TADV, 375);		/* should be same */
3668 
3669 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
3670 			/*
3671 			 * Don't write TDT before TCTL.EN is set.
3672 			 * See the document.
3673 			 */
3674 			CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_QUEUE_ENABLE
3675 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
3676 			    | TXDCTL_WTHRESH(0));
3677 		else {
3678 			CSR_WRITE(sc, WMREG_TDT, 0);
3679 			CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
3680 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
3681 			CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
3682 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
3683 		}
3684 	}
3685 	CSR_WRITE(sc, WMREG_TQSA_LO, 0);
3686 	CSR_WRITE(sc, WMREG_TQSA_HI, 0);
3687 
3688 	/* Initialize the transmit job descriptors. */
3689 	for (i = 0; i < WM_TXQUEUELEN(sc); i++)
3690 		sc->sc_txsoft[i].txs_mbuf = NULL;
3691 	sc->sc_txsfree = WM_TXQUEUELEN(sc);
3692 	sc->sc_txsnext = 0;
3693 	sc->sc_txsdirty = 0;
3694 
3695 	/*
3696 	 * Initialize the receive descriptor and receive job
3697 	 * descriptor rings.
3698 	 */
3699 	if (sc->sc_type < WM_T_82543) {
3700 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
3701 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
3702 		CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
3703 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
3704 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
3705 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
3706 
3707 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
3708 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
3709 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
3710 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
3711 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
3712 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
3713 	} else {
3714 		CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
3715 		CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
3716 		CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
3717 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3718 			CSR_WRITE(sc, WMREG_EITR(0), 450);
3719 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
3720 				panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
3721 			CSR_WRITE(sc, WMREG_SRRCTL, SRRCTL_DESCTYPE_LEGACY
3722 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
3723 			CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_QUEUE_ENABLE
3724 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
3725 			    | RXDCTL_WTHRESH(1));
3726 		} else {
3727 			CSR_WRITE(sc, WMREG_RDH, 0);
3728 			CSR_WRITE(sc, WMREG_RDT, 0);
3729 			CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD); /* ITR/4 */
3730 			CSR_WRITE(sc, WMREG_RADV, 375);	/* MUST be same */
3731 		}
3732 	}
3733 	for (i = 0; i < WM_NRXDESC; i++) {
3734 		rxs = &sc->sc_rxsoft[i];
3735 		if (rxs->rxs_mbuf == NULL) {
3736 			if ((error = wm_add_rxbuf(sc, i)) != 0) {
3737 				log(LOG_ERR, "%s: unable to allocate or map "
3738 				    "rx buffer %d, error = %d\n",
3739 				    device_xname(sc->sc_dev), i, error);
3740 				/*
3741 				 * XXX Should attempt to run with fewer receive
3742 				 * XXX buffers instead of just failing.
3743 				 */
3744 				wm_rxdrain(sc);
3745 				goto out;
3746 			}
3747 		} else {
3748 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
3749 				WM_INIT_RXDESC(sc, i);
3750 			/*
3751 			 * For 82575 and newer device, the RX descriptors
3752 			 * must be initialized after the setting of RCTL.EN in
3753 			 * wm_set_filter()
3754 			 */
3755 		}
3756 	}
3757 	sc->sc_rxptr = 0;
3758 	sc->sc_rxdiscard = 0;
3759 	WM_RXCHAIN_RESET(sc);
3760 
3761 	/*
3762 	 * Clear out the VLAN table -- we don't use it (yet).
3763 	 */
3764 	CSR_WRITE(sc, WMREG_VET, 0);
3765 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
3766 		trynum = 10; /* Due to hw errata */
3767 	else
3768 		trynum = 1;
3769 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
3770 		for (j = 0; j < trynum; j++)
3771 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
3772 
3773 	/*
3774 	 * Set up flow-control parameters.
3775 	 *
3776 	 * XXX Values could probably stand some tuning.
3777 	 */
3778 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
3779 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
3780 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)) {
3781 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
3782 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
3783 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
3784 	}
3785 
3786 	sc->sc_fcrtl = FCRTL_DFLT;
3787 	if (sc->sc_type < WM_T_82543) {
3788 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
3789 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
3790 	} else {
3791 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
3792 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
3793 	}
3794 
3795 	if (sc->sc_type == WM_T_80003)
3796 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
3797 	else
3798 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
3799 
3800 	/* Writes the control register. */
3801 	wm_set_vlan(sc);
3802 
3803 	if (sc->sc_flags & WM_F_HAS_MII) {
3804 		int val;
3805 
3806 		switch (sc->sc_type) {
3807 		case WM_T_80003:
3808 		case WM_T_ICH8:
3809 		case WM_T_ICH9:
3810 		case WM_T_ICH10:
3811 		case WM_T_PCH:
3812 		case WM_T_PCH2:
3813 		case WM_T_PCH_LPT:
3814 			/*
3815 			 * Set the mac to wait the maximum time between each
3816 			 * iteration and increase the max iterations when
3817 			 * polling the phy; this fixes erroneous timeouts at
3818 			 * 10Mbps.
3819 			 */
3820 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
3821 			    0xFFFF);
3822 			val = wm_kmrn_readreg(sc,
3823 			    KUMCTRLSTA_OFFSET_INB_PARAM);
3824 			val |= 0x3F;
3825 			wm_kmrn_writereg(sc,
3826 			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
3827 			break;
3828 		default:
3829 			break;
3830 		}
3831 
3832 		if (sc->sc_type == WM_T_80003) {
3833 			val = CSR_READ(sc, WMREG_CTRL_EXT);
3834 			val &= ~CTRL_EXT_LINK_MODE_MASK;
3835 			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
3836 
3837 			/* Bypass RX and TX FIFO's */
3838 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
3839 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
3840 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
3841 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
3842 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
3843 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
3844 		}
3845 	}
3846 #if 0
3847 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
3848 #endif
3849 
3850 	/* Set up checksum offload parameters. */
3851 	reg = CSR_READ(sc, WMREG_RXCSUM);
3852 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
3853 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
3854 		reg |= RXCSUM_IPOFL;
3855 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
3856 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
3857 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
3858 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
3859 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
3860 
3861 	/* Set up the interrupt registers. */
3862 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3863 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
3864 	    ICR_RXO | ICR_RXT0;
3865 	CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
3866 
3867 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3868 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3869 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
3870 		reg = CSR_READ(sc, WMREG_KABGTXD);
3871 		reg |= KABGTXD_BGSQLBIAS;
3872 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
3873 	}
3874 
3875 	/* Set up the inter-packet gap. */
3876 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
3877 
3878 	if (sc->sc_type >= WM_T_82543) {
3879 		/*
3880 		 * Set up the interrupt throttling register (units of 256ns)
3881 		 * Note that a footnote in Intel's documentation says this
3882 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
3883 		 * or 10Mbit mode.  Empirically, it appears to be the case
3884 		 * that that is also true for the 1024ns units of the other
3885 		 * interrupt-related timer registers -- so, really, we ought
3886 		 * to divide this value by 4 when the link speed is low.
3887 		 *
3888 		 * XXX implement this division at link speed change!
3889 		 */
3890 
3891 		/*
3892 		 * For N interrupts/sec, set this value to:
3893 		 * 1000000000 / (N * 256).  Note that we set the
3894 		 * absolute and packet timer values to this value
3895 		 * divided by 4 to get "simple timer" behavior.
3896 		 */
3897 
3898 		sc->sc_itr = 1500;		/* 2604 ints/sec */
3899 		CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
3900 	}
3901 
3902 	/* Set the VLAN ethernetype. */
3903 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
3904 
3905 	/*
3906 	 * Set up the transmit control register; we start out with
3907 	 * a collision distance suitable for FDX, but update it whe
3908 	 * we resolve the media type.
3909 	 */
3910 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
3911 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
3912 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3913 	if (sc->sc_type >= WM_T_82571)
3914 		sc->sc_tctl |= TCTL_MULR;
3915 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3916 
3917 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3918 		/* Write TDT after TCTL.EN is set. See the document. */
3919 		CSR_WRITE(sc, WMREG_TDT, 0);
3920 	}
3921 
3922 	if (sc->sc_type == WM_T_80003) {
3923 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
3924 		reg &= ~TCTL_EXT_GCEX_MASK;
3925 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
3926 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
3927 	}
3928 
3929 	/* Set the media. */
3930 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
3931 		goto out;
3932 
3933 	/* Configure for OS presence */
3934 	wm_init_manageability(sc);
3935 
3936 	/*
3937 	 * Set up the receive control register; we actually program
3938 	 * the register when we set the receive filter.  Use multicast
3939 	 * address offset type 0.
3940 	 *
3941 	 * Only the i82544 has the ability to strip the incoming
3942 	 * CRC, so we don't enable that feature.
3943 	 */
3944 	sc->sc_mchash_type = 0;
3945 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
3946 	    | RCTL_MO(sc->sc_mchash_type);
3947 
3948 	/*
3949 	 * The I350 has a bug where it always strips the CRC whether
3950 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
3951 	 */
3952 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
3953 	    || (sc->sc_type == WM_T_I210))
3954 		sc->sc_rctl |= RCTL_SECRC;
3955 
3956 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
3957 	    && (ifp->if_mtu > ETHERMTU)) {
3958 		sc->sc_rctl |= RCTL_LPE;
3959 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
3960 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
3961 	}
3962 
3963 	if (MCLBYTES == 2048) {
3964 		sc->sc_rctl |= RCTL_2k;
3965 	} else {
3966 		if (sc->sc_type >= WM_T_82543) {
3967 			switch (MCLBYTES) {
3968 			case 4096:
3969 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
3970 				break;
3971 			case 8192:
3972 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
3973 				break;
3974 			case 16384:
3975 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
3976 				break;
3977 			default:
3978 				panic("wm_init: MCLBYTES %d unsupported",
3979 				    MCLBYTES);
3980 				break;
3981 			}
3982 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
3983 	}
3984 
3985 	/* Set the receive filter. */
3986 	wm_set_filter(sc);
3987 
3988 	/* Enable ECC */
3989 	switch (sc->sc_type) {
3990 	case WM_T_82571:
3991 		reg = CSR_READ(sc, WMREG_PBA_ECC);
3992 		reg |= PBA_ECC_CORR_EN;
3993 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
3994 		break;
3995 	case WM_T_PCH_LPT:
3996 		reg = CSR_READ(sc, WMREG_PBECCSTS);
3997 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
3998 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
3999 
4000 		reg = CSR_READ(sc, WMREG_CTRL);
4001 		reg |= CTRL_MEHE;
4002 		CSR_WRITE(sc, WMREG_CTRL, reg);
4003 		break;
4004 	default:
4005 		break;
4006 	}
4007 
4008 	/* On 575 and later set RDT only if RX enabled */
4009 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4010 		for (i = 0; i < WM_NRXDESC; i++)
4011 			WM_INIT_RXDESC(sc, i);
4012 
4013 	sc->sc_stopping = false;
4014 
4015 	/* Start the one second link check clock. */
4016 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
4017 
4018 	/* ...all done! */
4019 	ifp->if_flags |= IFF_RUNNING;
4020 	ifp->if_flags &= ~IFF_OACTIVE;
4021 
4022  out:
4023 	sc->sc_if_flags = ifp->if_flags;
4024 	if (error)
4025 		log(LOG_ERR, "%s: interface not running\n",
4026 		    device_xname(sc->sc_dev));
4027 	return error;
4028 }
4029 
4030 /*
4031  * wm_stop:		[ifnet interface function]
4032  *
4033  *	Stop transmission on the interface.
4034  */
4035 static void
4036 wm_stop(struct ifnet *ifp, int disable)
4037 {
4038 	struct wm_softc *sc = ifp->if_softc;
4039 
4040 	WM_BOTH_LOCK(sc);
4041 	wm_stop_locked(ifp, disable);
4042 	WM_BOTH_UNLOCK(sc);
4043 }
4044 
4045 static void
4046 wm_stop_locked(struct ifnet *ifp, int disable)
4047 {
4048 	struct wm_softc *sc = ifp->if_softc;
4049 	struct wm_txsoft *txs;
4050 	int i;
4051 
4052 	KASSERT(WM_BOTH_LOCKED(sc));
4053 
4054 	sc->sc_stopping = true;
4055 
4056 	/* Stop the one second clock. */
4057 	callout_stop(&sc->sc_tick_ch);
4058 
4059 	/* Stop the 82547 Tx FIFO stall check timer. */
4060 	if (sc->sc_type == WM_T_82547)
4061 		callout_stop(&sc->sc_txfifo_ch);
4062 
4063 	if (sc->sc_flags & WM_F_HAS_MII) {
4064 		/* Down the MII. */
4065 		mii_down(&sc->sc_mii);
4066 	} else {
4067 #if 0
4068 		/* Should we clear PHY's status properly? */
4069 		wm_reset(sc);
4070 #endif
4071 	}
4072 
4073 	/* Stop the transmit and receive processes. */
4074 	CSR_WRITE(sc, WMREG_TCTL, 0);
4075 	CSR_WRITE(sc, WMREG_RCTL, 0);
4076 	sc->sc_rctl &= ~RCTL_EN;
4077 
4078 	/*
4079 	 * Clear the interrupt mask to ensure the device cannot assert its
4080 	 * interrupt line.
4081 	 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
4082 	 * any currently pending or shared interrupt.
4083 	 */
4084 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4085 	sc->sc_icr = 0;
4086 
4087 	/* Release any queued transmit buffers. */
4088 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
4089 		txs = &sc->sc_txsoft[i];
4090 		if (txs->txs_mbuf != NULL) {
4091 			bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
4092 			m_freem(txs->txs_mbuf);
4093 			txs->txs_mbuf = NULL;
4094 		}
4095 	}
4096 
4097 	/* Mark the interface as down and cancel the watchdog timer. */
4098 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
4099 	ifp->if_timer = 0;
4100 
4101 	if (disable)
4102 		wm_rxdrain(sc);
4103 
4104 #if 0 /* notyet */
4105 	if (sc->sc_type >= WM_T_82544)
4106 		CSR_WRITE(sc, WMREG_WUC, 0);
4107 #endif
4108 }
4109 
4110 /*
4111  * wm_tx_offload:
4112  *
4113  *	Set up TCP/IP checksumming parameters for the
4114  *	specified packet.
4115  */
4116 static int
4117 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
4118     uint8_t *fieldsp)
4119 {
4120 	struct mbuf *m0 = txs->txs_mbuf;
4121 	struct livengood_tcpip_ctxdesc *t;
4122 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
4123 	uint32_t ipcse;
4124 	struct ether_header *eh;
4125 	int offset, iphl;
4126 	uint8_t fields;
4127 
4128 	/*
4129 	 * XXX It would be nice if the mbuf pkthdr had offset
4130 	 * fields for the protocol headers.
4131 	 */
4132 
4133 	eh = mtod(m0, struct ether_header *);
4134 	switch (htons(eh->ether_type)) {
4135 	case ETHERTYPE_IP:
4136 	case ETHERTYPE_IPV6:
4137 		offset = ETHER_HDR_LEN;
4138 		break;
4139 
4140 	case ETHERTYPE_VLAN:
4141 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
4142 		break;
4143 
4144 	default:
4145 		/*
4146 		 * Don't support this protocol or encapsulation.
4147 		 */
4148 		*fieldsp = 0;
4149 		*cmdp = 0;
4150 		return 0;
4151 	}
4152 
4153 	if ((m0->m_pkthdr.csum_flags &
4154 	    (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
4155 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
4156 	} else {
4157 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
4158 	}
4159 	ipcse = offset + iphl - 1;
4160 
4161 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
4162 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
4163 	seg = 0;
4164 	fields = 0;
4165 
4166 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
4167 		int hlen = offset + iphl;
4168 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
4169 
4170 		if (__predict_false(m0->m_len <
4171 				    (hlen + sizeof(struct tcphdr)))) {
4172 			/*
4173 			 * TCP/IP headers are not in the first mbuf; we need
4174 			 * to do this the slow and painful way.  Let's just
4175 			 * hope this doesn't happen very often.
4176 			 */
4177 			struct tcphdr th;
4178 
4179 			WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
4180 
4181 			m_copydata(m0, hlen, sizeof(th), &th);
4182 			if (v4) {
4183 				struct ip ip;
4184 
4185 				m_copydata(m0, offset, sizeof(ip), &ip);
4186 				ip.ip_len = 0;
4187 				m_copyback(m0,
4188 				    offset + offsetof(struct ip, ip_len),
4189 				    sizeof(ip.ip_len), &ip.ip_len);
4190 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
4191 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
4192 			} else {
4193 				struct ip6_hdr ip6;
4194 
4195 				m_copydata(m0, offset, sizeof(ip6), &ip6);
4196 				ip6.ip6_plen = 0;
4197 				m_copyback(m0,
4198 				    offset + offsetof(struct ip6_hdr, ip6_plen),
4199 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
4200 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
4201 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
4202 			}
4203 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
4204 			    sizeof(th.th_sum), &th.th_sum);
4205 
4206 			hlen += th.th_off << 2;
4207 		} else {
4208 			/*
4209 			 * TCP/IP headers are in the first mbuf; we can do
4210 			 * this the easy way.
4211 			 */
4212 			struct tcphdr *th;
4213 
4214 			if (v4) {
4215 				struct ip *ip =
4216 				    (void *)(mtod(m0, char *) + offset);
4217 				th = (void *)(mtod(m0, char *) + hlen);
4218 
4219 				ip->ip_len = 0;
4220 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
4221 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
4222 			} else {
4223 				struct ip6_hdr *ip6 =
4224 				    (void *)(mtod(m0, char *) + offset);
4225 				th = (void *)(mtod(m0, char *) + hlen);
4226 
4227 				ip6->ip6_plen = 0;
4228 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
4229 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
4230 			}
4231 			hlen += th->th_off << 2;
4232 		}
4233 
4234 		if (v4) {
4235 			WM_EVCNT_INCR(&sc->sc_ev_txtso);
4236 			cmdlen |= WTX_TCPIP_CMD_IP;
4237 		} else {
4238 			WM_EVCNT_INCR(&sc->sc_ev_txtso6);
4239 			ipcse = 0;
4240 		}
4241 		cmd |= WTX_TCPIP_CMD_TSE;
4242 		cmdlen |= WTX_TCPIP_CMD_TSE |
4243 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
4244 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
4245 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
4246 	}
4247 
4248 	/*
4249 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
4250 	 * offload feature, if we load the context descriptor, we
4251 	 * MUST provide valid values for IPCSS and TUCSS fields.
4252 	 */
4253 
4254 	ipcs = WTX_TCPIP_IPCSS(offset) |
4255 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
4256 	    WTX_TCPIP_IPCSE(ipcse);
4257 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
4258 		WM_EVCNT_INCR(&sc->sc_ev_txipsum);
4259 		fields |= WTX_IXSM;
4260 	}
4261 
4262 	offset += iphl;
4263 
4264 	if (m0->m_pkthdr.csum_flags &
4265 	    (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
4266 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
4267 		fields |= WTX_TXSM;
4268 		tucs = WTX_TCPIP_TUCSS(offset) |
4269 		    WTX_TCPIP_TUCSO(offset +
4270 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
4271 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
4272 	} else if ((m0->m_pkthdr.csum_flags &
4273 	    (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
4274 		WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
4275 		fields |= WTX_TXSM;
4276 		tucs = WTX_TCPIP_TUCSS(offset) |
4277 		    WTX_TCPIP_TUCSO(offset +
4278 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
4279 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
4280 	} else {
4281 		/* Just initialize it to a valid TCP context. */
4282 		tucs = WTX_TCPIP_TUCSS(offset) |
4283 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
4284 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
4285 	}
4286 
4287 	/* Fill in the context descriptor. */
4288 	t = (struct livengood_tcpip_ctxdesc *)
4289 	    &sc->sc_txdescs[sc->sc_txnext];
4290 	t->tcpip_ipcs = htole32(ipcs);
4291 	t->tcpip_tucs = htole32(tucs);
4292 	t->tcpip_cmdlen = htole32(cmdlen);
4293 	t->tcpip_seg = htole32(seg);
4294 	WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
4295 
4296 	sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
4297 	txs->txs_ndesc++;
4298 
4299 	*cmdp = cmd;
4300 	*fieldsp = fields;
4301 
4302 	return 0;
4303 }
4304 
4305 static void
4306 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
4307 {
4308 	struct mbuf *m;
4309 	int i;
4310 
4311 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
4312 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
4313 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
4314 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
4315 		    m->m_data, m->m_len, m->m_flags);
4316 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
4317 	    i, i == 1 ? "" : "s");
4318 }
4319 
4320 /*
4321  * wm_82547_txfifo_stall:
4322  *
4323  *	Callout used to wait for the 82547 Tx FIFO to drain,
4324  *	reset the FIFO pointers, and restart packet transmission.
4325  */
4326 static void
4327 wm_82547_txfifo_stall(void *arg)
4328 {
4329 	struct wm_softc *sc = arg;
4330 #ifndef WM_MPSAFE
4331 	int s;
4332 
4333 	s = splnet();
4334 #endif
4335 	WM_TX_LOCK(sc);
4336 
4337 	if (sc->sc_stopping)
4338 		goto out;
4339 
4340 	if (sc->sc_txfifo_stall) {
4341 		if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
4342 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
4343 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
4344 			/*
4345 			 * Packets have drained.  Stop transmitter, reset
4346 			 * FIFO pointers, restart transmitter, and kick
4347 			 * the packet queue.
4348 			 */
4349 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
4350 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
4351 			CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
4352 			CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
4353 			CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
4354 			CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
4355 			CSR_WRITE(sc, WMREG_TCTL, tctl);
4356 			CSR_WRITE_FLUSH(sc);
4357 
4358 			sc->sc_txfifo_head = 0;
4359 			sc->sc_txfifo_stall = 0;
4360 			wm_start_locked(&sc->sc_ethercom.ec_if);
4361 		} else {
4362 			/*
4363 			 * Still waiting for packets to drain; try again in
4364 			 * another tick.
4365 			 */
4366 			callout_schedule(&sc->sc_txfifo_ch, 1);
4367 		}
4368 	}
4369 
4370 out:
4371 	WM_TX_UNLOCK(sc);
4372 #ifndef WM_MPSAFE
4373 	splx(s);
4374 #endif
4375 }
4376 
4377 /*
4378  * wm_82547_txfifo_bugchk:
4379  *
4380  *	Check for bug condition in the 82547 Tx FIFO.  We need to
4381  *	prevent enqueueing a packet that would wrap around the end
4382  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
4383  *
4384  *	We do this by checking the amount of space before the end
4385  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
4386  *	the Tx FIFO, wait for all remaining packets to drain, reset
4387  *	the internal FIFO pointers to the beginning, and restart
4388  *	transmission on the interface.
4389  */
4390 #define	WM_FIFO_HDR		0x10
4391 #define	WM_82547_PAD_LEN	0x3e0
4392 static int
4393 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
4394 {
4395 	int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
4396 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
4397 
4398 	/* Just return if already stalled. */
4399 	if (sc->sc_txfifo_stall)
4400 		return 1;
4401 
4402 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
4403 		/* Stall only occurs in half-duplex mode. */
4404 		goto send_packet;
4405 	}
4406 
4407 	if (len >= WM_82547_PAD_LEN + space) {
4408 		sc->sc_txfifo_stall = 1;
4409 		callout_schedule(&sc->sc_txfifo_ch, 1);
4410 		return 1;
4411 	}
4412 
4413  send_packet:
4414 	sc->sc_txfifo_head += len;
4415 	if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
4416 		sc->sc_txfifo_head -= sc->sc_txfifo_size;
4417 
4418 	return 0;
4419 }
4420 
4421 /*
4422  * wm_start:		[ifnet interface function]
4423  *
4424  *	Start packet transmission on the interface.
4425  */
4426 static void
4427 wm_start(struct ifnet *ifp)
4428 {
4429 	struct wm_softc *sc = ifp->if_softc;
4430 
4431 	WM_TX_LOCK(sc);
4432 	if (!sc->sc_stopping)
4433 		wm_start_locked(ifp);
4434 	WM_TX_UNLOCK(sc);
4435 }
4436 
4437 static void
4438 wm_start_locked(struct ifnet *ifp)
4439 {
4440 	struct wm_softc *sc = ifp->if_softc;
4441 	struct mbuf *m0;
4442 	struct m_tag *mtag;
4443 	struct wm_txsoft *txs;
4444 	bus_dmamap_t dmamap;
4445 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
4446 	bus_addr_t curaddr;
4447 	bus_size_t seglen, curlen;
4448 	uint32_t cksumcmd;
4449 	uint8_t cksumfields;
4450 
4451 	KASSERT(WM_TX_LOCKED(sc));
4452 
4453 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
4454 		return;
4455 
4456 	/* Remember the previous number of free descriptors. */
4457 	ofree = sc->sc_txfree;
4458 
4459 	/*
4460 	 * Loop through the send queue, setting up transmit descriptors
4461 	 * until we drain the queue, or use up all available transmit
4462 	 * descriptors.
4463 	 */
4464 	for (;;) {
4465 		m0 = NULL;
4466 
4467 		/* Get a work queue entry. */
4468 		if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
4469 			wm_txintr(sc);
4470 			if (sc->sc_txsfree == 0) {
4471 				DPRINTF(WM_DEBUG_TX,
4472 				    ("%s: TX: no free job descriptors\n",
4473 					device_xname(sc->sc_dev)));
4474 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
4475 				break;
4476 			}
4477 		}
4478 
4479 		/* Grab a packet off the queue. */
4480 		IFQ_DEQUEUE(&ifp->if_snd, m0);
4481 		if (m0 == NULL)
4482 			break;
4483 
4484 		DPRINTF(WM_DEBUG_TX,
4485 		    ("%s: TX: have packet to transmit: %p\n",
4486 		    device_xname(sc->sc_dev), m0));
4487 
4488 		txs = &sc->sc_txsoft[sc->sc_txsnext];
4489 		dmamap = txs->txs_dmamap;
4490 
4491 		use_tso = (m0->m_pkthdr.csum_flags &
4492 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
4493 
4494 		/*
4495 		 * So says the Linux driver:
4496 		 * The controller does a simple calculation to make sure
4497 		 * there is enough room in the FIFO before initiating the
4498 		 * DMA for each buffer.  The calc is:
4499 		 *	4 = ceil(buffer len / MSS)
4500 		 * To make sure we don't overrun the FIFO, adjust the max
4501 		 * buffer len if the MSS drops.
4502 		 */
4503 		dmamap->dm_maxsegsz =
4504 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
4505 		    ? m0->m_pkthdr.segsz << 2
4506 		    : WTX_MAX_LEN;
4507 
4508 		/*
4509 		 * Load the DMA map.  If this fails, the packet either
4510 		 * didn't fit in the allotted number of segments, or we
4511 		 * were short on resources.  For the too-many-segments
4512 		 * case, we simply report an error and drop the packet,
4513 		 * since we can't sanely copy a jumbo packet to a single
4514 		 * buffer.
4515 		 */
4516 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
4517 		    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
4518 		if (error) {
4519 			if (error == EFBIG) {
4520 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
4521 				log(LOG_ERR, "%s: Tx packet consumes too many "
4522 				    "DMA segments, dropping...\n",
4523 				    device_xname(sc->sc_dev));
4524 				wm_dump_mbuf_chain(sc, m0);
4525 				m_freem(m0);
4526 				continue;
4527 			}
4528 			/*  Short on resources, just stop for now. */
4529 			DPRINTF(WM_DEBUG_TX,
4530 			    ("%s: TX: dmamap load failed: %d\n",
4531 			    device_xname(sc->sc_dev), error));
4532 			break;
4533 		}
4534 
4535 		segs_needed = dmamap->dm_nsegs;
4536 		if (use_tso) {
4537 			/* For sentinel descriptor; see below. */
4538 			segs_needed++;
4539 		}
4540 
4541 		/*
4542 		 * Ensure we have enough descriptors free to describe
4543 		 * the packet.  Note, we always reserve one descriptor
4544 		 * at the end of the ring due to the semantics of the
4545 		 * TDT register, plus one more in the event we need
4546 		 * to load offload context.
4547 		 */
4548 		if (segs_needed > sc->sc_txfree - 2) {
4549 			/*
4550 			 * Not enough free descriptors to transmit this
4551 			 * packet.  We haven't committed anything yet,
4552 			 * so just unload the DMA map, put the packet
4553 			 * pack on the queue, and punt.  Notify the upper
4554 			 * layer that there are no more slots left.
4555 			 */
4556 			DPRINTF(WM_DEBUG_TX,
4557 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
4558 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
4559 			    segs_needed, sc->sc_txfree - 1));
4560 			ifp->if_flags |= IFF_OACTIVE;
4561 			bus_dmamap_unload(sc->sc_dmat, dmamap);
4562 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
4563 			break;
4564 		}
4565 
4566 		/*
4567 		 * Check for 82547 Tx FIFO bug.  We need to do this
4568 		 * once we know we can transmit the packet, since we
4569 		 * do some internal FIFO space accounting here.
4570 		 */
4571 		if (sc->sc_type == WM_T_82547 &&
4572 		    wm_82547_txfifo_bugchk(sc, m0)) {
4573 			DPRINTF(WM_DEBUG_TX,
4574 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
4575 			    device_xname(sc->sc_dev)));
4576 			ifp->if_flags |= IFF_OACTIVE;
4577 			bus_dmamap_unload(sc->sc_dmat, dmamap);
4578 			WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
4579 			break;
4580 		}
4581 
4582 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
4583 
4584 		DPRINTF(WM_DEBUG_TX,
4585 		    ("%s: TX: packet has %d (%d) DMA segments\n",
4586 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
4587 
4588 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
4589 
4590 		/*
4591 		 * Store a pointer to the packet so that we can free it
4592 		 * later.
4593 		 *
4594 		 * Initially, we consider the number of descriptors the
4595 		 * packet uses the number of DMA segments.  This may be
4596 		 * incremented by 1 if we do checksum offload (a descriptor
4597 		 * is used to set the checksum context).
4598 		 */
4599 		txs->txs_mbuf = m0;
4600 		txs->txs_firstdesc = sc->sc_txnext;
4601 		txs->txs_ndesc = segs_needed;
4602 
4603 		/* Set up offload parameters for this packet. */
4604 		if (m0->m_pkthdr.csum_flags &
4605 		    (M_CSUM_TSOv4|M_CSUM_TSOv6|
4606 		    M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
4607 		    M_CSUM_TCPv6|M_CSUM_UDPv6)) {
4608 			if (wm_tx_offload(sc, txs, &cksumcmd,
4609 					  &cksumfields) != 0) {
4610 				/* Error message already displayed. */
4611 				bus_dmamap_unload(sc->sc_dmat, dmamap);
4612 				continue;
4613 			}
4614 		} else {
4615 			cksumcmd = 0;
4616 			cksumfields = 0;
4617 		}
4618 
4619 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
4620 
4621 		/* Sync the DMA map. */
4622 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
4623 		    BUS_DMASYNC_PREWRITE);
4624 
4625 		/* Initialize the transmit descriptor. */
4626 		for (nexttx = sc->sc_txnext, seg = 0;
4627 		     seg < dmamap->dm_nsegs; seg++) {
4628 			for (seglen = dmamap->dm_segs[seg].ds_len,
4629 			     curaddr = dmamap->dm_segs[seg].ds_addr;
4630 			     seglen != 0;
4631 			     curaddr += curlen, seglen -= curlen,
4632 			     nexttx = WM_NEXTTX(sc, nexttx)) {
4633 				curlen = seglen;
4634 
4635 				/*
4636 				 * So says the Linux driver:
4637 				 * Work around for premature descriptor
4638 				 * write-backs in TSO mode.  Append a
4639 				 * 4-byte sentinel descriptor.
4640 				 */
4641 				if (use_tso &&
4642 				    seg == dmamap->dm_nsegs - 1 &&
4643 				    curlen > 8)
4644 					curlen -= 4;
4645 
4646 				wm_set_dma_addr(
4647 				    &sc->sc_txdescs[nexttx].wtx_addr,
4648 				    curaddr);
4649 				sc->sc_txdescs[nexttx].wtx_cmdlen =
4650 				    htole32(cksumcmd | curlen);
4651 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
4652 				    0;
4653 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
4654 				    cksumfields;
4655 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
4656 				lasttx = nexttx;
4657 
4658 				DPRINTF(WM_DEBUG_TX,
4659 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
4660 				     "len %#04zx\n",
4661 				    device_xname(sc->sc_dev), nexttx,
4662 				    (uint64_t)curaddr, curlen));
4663 			}
4664 		}
4665 
4666 		KASSERT(lasttx != -1);
4667 
4668 		/*
4669 		 * Set up the command byte on the last descriptor of
4670 		 * the packet.  If we're in the interrupt delay window,
4671 		 * delay the interrupt.
4672 		 */
4673 		sc->sc_txdescs[lasttx].wtx_cmdlen |=
4674 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
4675 
4676 		/*
4677 		 * If VLANs are enabled and the packet has a VLAN tag, set
4678 		 * up the descriptor to encapsulate the packet for us.
4679 		 *
4680 		 * This is only valid on the last descriptor of the packet.
4681 		 */
4682 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
4683 			sc->sc_txdescs[lasttx].wtx_cmdlen |=
4684 			    htole32(WTX_CMD_VLE);
4685 			sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
4686 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
4687 		}
4688 
4689 		txs->txs_lastdesc = lasttx;
4690 
4691 		DPRINTF(WM_DEBUG_TX,
4692 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
4693 		    device_xname(sc->sc_dev),
4694 		    lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
4695 
4696 		/* Sync the descriptors we're using. */
4697 		WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
4698 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
4699 
4700 		/* Give the packet to the chip. */
4701 		CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
4702 
4703 		DPRINTF(WM_DEBUG_TX,
4704 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
4705 
4706 		DPRINTF(WM_DEBUG_TX,
4707 		    ("%s: TX: finished transmitting packet, job %d\n",
4708 		    device_xname(sc->sc_dev), sc->sc_txsnext));
4709 
4710 		/* Advance the tx pointer. */
4711 		sc->sc_txfree -= txs->txs_ndesc;
4712 		sc->sc_txnext = nexttx;
4713 
4714 		sc->sc_txsfree--;
4715 		sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
4716 
4717 		/* Pass the packet to any BPF listeners. */
4718 		bpf_mtap(ifp, m0);
4719 	}
4720 
4721 	if (m0 != NULL) {
4722 		ifp->if_flags |= IFF_OACTIVE;
4723 		WM_EVCNT_INCR(&sc->sc_ev_txdrop);
4724 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
4725 		m_freem(m0);
4726 	}
4727 
4728 	if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
4729 		/* No more slots; notify upper layer. */
4730 		ifp->if_flags |= IFF_OACTIVE;
4731 	}
4732 
4733 	if (sc->sc_txfree != ofree) {
4734 		/* Set a watchdog timer in case the chip flakes out. */
4735 		ifp->if_timer = 5;
4736 	}
4737 }
4738 
4739 /*
4740  * wm_nq_tx_offload:
4741  *
4742  *	Set up TCP/IP checksumming parameters for the
4743  *	specified packet, for NEWQUEUE devices
4744  */
4745 static int
4746 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs,
4747     uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
4748 {
4749 	struct mbuf *m0 = txs->txs_mbuf;
4750 	struct m_tag *mtag;
4751 	uint32_t vl_len, mssidx, cmdc;
4752 	struct ether_header *eh;
4753 	int offset, iphl;
4754 
4755 	/*
4756 	 * XXX It would be nice if the mbuf pkthdr had offset
4757 	 * fields for the protocol headers.
4758 	 */
4759 	*cmdlenp = 0;
4760 	*fieldsp = 0;
4761 
4762 	eh = mtod(m0, struct ether_header *);
4763 	switch (htons(eh->ether_type)) {
4764 	case ETHERTYPE_IP:
4765 	case ETHERTYPE_IPV6:
4766 		offset = ETHER_HDR_LEN;
4767 		break;
4768 
4769 	case ETHERTYPE_VLAN:
4770 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
4771 		break;
4772 
4773 	default:
4774 		/* Don't support this protocol or encapsulation. */
4775 		*do_csum = false;
4776 		return 0;
4777 	}
4778 	*do_csum = true;
4779 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
4780 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
4781 
4782 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
4783 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
4784 
4785 	if ((m0->m_pkthdr.csum_flags &
4786 	    (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4|M_CSUM_IPv4)) != 0) {
4787 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
4788 	} else {
4789 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
4790 	}
4791 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
4792 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
4793 
4794 	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
4795 		vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
4796 		     << NQTXC_VLLEN_VLAN_SHIFT);
4797 		*cmdlenp |= NQTX_CMD_VLE;
4798 	}
4799 
4800 	mssidx = 0;
4801 
4802 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
4803 		int hlen = offset + iphl;
4804 		int tcp_hlen;
4805 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
4806 
4807 		if (__predict_false(m0->m_len <
4808 				    (hlen + sizeof(struct tcphdr)))) {
4809 			/*
4810 			 * TCP/IP headers are not in the first mbuf; we need
4811 			 * to do this the slow and painful way.  Let's just
4812 			 * hope this doesn't happen very often.
4813 			 */
4814 			struct tcphdr th;
4815 
4816 			WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
4817 
4818 			m_copydata(m0, hlen, sizeof(th), &th);
4819 			if (v4) {
4820 				struct ip ip;
4821 
4822 				m_copydata(m0, offset, sizeof(ip), &ip);
4823 				ip.ip_len = 0;
4824 				m_copyback(m0,
4825 				    offset + offsetof(struct ip, ip_len),
4826 				    sizeof(ip.ip_len), &ip.ip_len);
4827 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
4828 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
4829 			} else {
4830 				struct ip6_hdr ip6;
4831 
4832 				m_copydata(m0, offset, sizeof(ip6), &ip6);
4833 				ip6.ip6_plen = 0;
4834 				m_copyback(m0,
4835 				    offset + offsetof(struct ip6_hdr, ip6_plen),
4836 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
4837 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
4838 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
4839 			}
4840 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
4841 			    sizeof(th.th_sum), &th.th_sum);
4842 
4843 			tcp_hlen = th.th_off << 2;
4844 		} else {
4845 			/*
4846 			 * TCP/IP headers are in the first mbuf; we can do
4847 			 * this the easy way.
4848 			 */
4849 			struct tcphdr *th;
4850 
4851 			if (v4) {
4852 				struct ip *ip =
4853 				    (void *)(mtod(m0, char *) + offset);
4854 				th = (void *)(mtod(m0, char *) + hlen);
4855 
4856 				ip->ip_len = 0;
4857 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
4858 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
4859 			} else {
4860 				struct ip6_hdr *ip6 =
4861 				    (void *)(mtod(m0, char *) + offset);
4862 				th = (void *)(mtod(m0, char *) + hlen);
4863 
4864 				ip6->ip6_plen = 0;
4865 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
4866 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
4867 			}
4868 			tcp_hlen = th->th_off << 2;
4869 		}
4870 		hlen += tcp_hlen;
4871 		*cmdlenp |= NQTX_CMD_TSE;
4872 
4873 		if (v4) {
4874 			WM_EVCNT_INCR(&sc->sc_ev_txtso);
4875 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
4876 		} else {
4877 			WM_EVCNT_INCR(&sc->sc_ev_txtso6);
4878 			*fieldsp |= NQTXD_FIELDS_TUXSM;
4879 		}
4880 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
4881 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
4882 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
4883 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
4884 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
4885 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
4886 	} else {
4887 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
4888 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
4889 	}
4890 
4891 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
4892 		*fieldsp |= NQTXD_FIELDS_IXSM;
4893 		cmdc |= NQTXC_CMD_IP4;
4894 	}
4895 
4896 	if (m0->m_pkthdr.csum_flags &
4897 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
4898 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
4899 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
4900 			cmdc |= NQTXC_CMD_TCP;
4901 		} else {
4902 			cmdc |= NQTXC_CMD_UDP;
4903 		}
4904 		cmdc |= NQTXC_CMD_IP4;
4905 		*fieldsp |= NQTXD_FIELDS_TUXSM;
4906 	}
4907 	if (m0->m_pkthdr.csum_flags &
4908 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
4909 		WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
4910 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
4911 			cmdc |= NQTXC_CMD_TCP;
4912 		} else {
4913 			cmdc |= NQTXC_CMD_UDP;
4914 		}
4915 		cmdc |= NQTXC_CMD_IP6;
4916 		*fieldsp |= NQTXD_FIELDS_TUXSM;
4917 	}
4918 
4919 	/* Fill in the context descriptor. */
4920 	sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_vl_len =
4921 	    htole32(vl_len);
4922 	sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_sn = 0;
4923 	sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_cmd =
4924 	    htole32(cmdc);
4925 	sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_mssidx =
4926 	    htole32(mssidx);
4927 	WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
4928 	DPRINTF(WM_DEBUG_TX,
4929 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
4930 	    sc->sc_txnext, 0, vl_len));
4931 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
4932 	sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
4933 	txs->txs_ndesc++;
4934 	return 0;
4935 }
4936 
4937 /*
4938  * wm_nq_start:		[ifnet interface function]
4939  *
4940  *	Start packet transmission on the interface for NEWQUEUE devices
4941  */
4942 static void
4943 wm_nq_start(struct ifnet *ifp)
4944 {
4945 	struct wm_softc *sc = ifp->if_softc;
4946 
4947 	WM_TX_LOCK(sc);
4948 	if (!sc->sc_stopping)
4949 		wm_nq_start_locked(ifp);
4950 	WM_TX_UNLOCK(sc);
4951 }
4952 
4953 static void
4954 wm_nq_start_locked(struct ifnet *ifp)
4955 {
4956 	struct wm_softc *sc = ifp->if_softc;
4957 	struct mbuf *m0;
4958 	struct m_tag *mtag;
4959 	struct wm_txsoft *txs;
4960 	bus_dmamap_t dmamap;
4961 	int error, nexttx, lasttx = -1, seg, segs_needed;
4962 	bool do_csum, sent;
4963 
4964 	KASSERT(WM_TX_LOCKED(sc));
4965 
4966 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
4967 		return;
4968 
4969 	sent = false;
4970 
4971 	/*
4972 	 * Loop through the send queue, setting up transmit descriptors
4973 	 * until we drain the queue, or use up all available transmit
4974 	 * descriptors.
4975 	 */
4976 	for (;;) {
4977 		m0 = NULL;
4978 
4979 		/* Get a work queue entry. */
4980 		if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
4981 			wm_txintr(sc);
4982 			if (sc->sc_txsfree == 0) {
4983 				DPRINTF(WM_DEBUG_TX,
4984 				    ("%s: TX: no free job descriptors\n",
4985 					device_xname(sc->sc_dev)));
4986 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
4987 				break;
4988 			}
4989 		}
4990 
4991 		/* Grab a packet off the queue. */
4992 		IFQ_DEQUEUE(&ifp->if_snd, m0);
4993 		if (m0 == NULL)
4994 			break;
4995 
4996 		DPRINTF(WM_DEBUG_TX,
4997 		    ("%s: TX: have packet to transmit: %p\n",
4998 		    device_xname(sc->sc_dev), m0));
4999 
5000 		txs = &sc->sc_txsoft[sc->sc_txsnext];
5001 		dmamap = txs->txs_dmamap;
5002 
5003 		/*
5004 		 * Load the DMA map.  If this fails, the packet either
5005 		 * didn't fit in the allotted number of segments, or we
5006 		 * were short on resources.  For the too-many-segments
5007 		 * case, we simply report an error and drop the packet,
5008 		 * since we can't sanely copy a jumbo packet to a single
5009 		 * buffer.
5010 		 */
5011 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
5012 		    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
5013 		if (error) {
5014 			if (error == EFBIG) {
5015 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
5016 				log(LOG_ERR, "%s: Tx packet consumes too many "
5017 				    "DMA segments, dropping...\n",
5018 				    device_xname(sc->sc_dev));
5019 				wm_dump_mbuf_chain(sc, m0);
5020 				m_freem(m0);
5021 				continue;
5022 			}
5023 			/* Short on resources, just stop for now. */
5024 			DPRINTF(WM_DEBUG_TX,
5025 			    ("%s: TX: dmamap load failed: %d\n",
5026 			    device_xname(sc->sc_dev), error));
5027 			break;
5028 		}
5029 
5030 		segs_needed = dmamap->dm_nsegs;
5031 
5032 		/*
5033 		 * Ensure we have enough descriptors free to describe
5034 		 * the packet.  Note, we always reserve one descriptor
5035 		 * at the end of the ring due to the semantics of the
5036 		 * TDT register, plus one more in the event we need
5037 		 * to load offload context.
5038 		 */
5039 		if (segs_needed > sc->sc_txfree - 2) {
5040 			/*
5041 			 * Not enough free descriptors to transmit this
5042 			 * packet.  We haven't committed anything yet,
5043 			 * so just unload the DMA map, put the packet
5044 			 * pack on the queue, and punt.  Notify the upper
5045 			 * layer that there are no more slots left.
5046 			 */
5047 			DPRINTF(WM_DEBUG_TX,
5048 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
5049 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
5050 			    segs_needed, sc->sc_txfree - 1));
5051 			ifp->if_flags |= IFF_OACTIVE;
5052 			bus_dmamap_unload(sc->sc_dmat, dmamap);
5053 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
5054 			break;
5055 		}
5056 
5057 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
5058 
5059 		DPRINTF(WM_DEBUG_TX,
5060 		    ("%s: TX: packet has %d (%d) DMA segments\n",
5061 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
5062 
5063 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
5064 
5065 		/*
5066 		 * Store a pointer to the packet so that we can free it
5067 		 * later.
5068 		 *
5069 		 * Initially, we consider the number of descriptors the
5070 		 * packet uses the number of DMA segments.  This may be
5071 		 * incremented by 1 if we do checksum offload (a descriptor
5072 		 * is used to set the checksum context).
5073 		 */
5074 		txs->txs_mbuf = m0;
5075 		txs->txs_firstdesc = sc->sc_txnext;
5076 		txs->txs_ndesc = segs_needed;
5077 
5078 		/* Set up offload parameters for this packet. */
5079 		uint32_t cmdlen, fields, dcmdlen;
5080 		if (m0->m_pkthdr.csum_flags &
5081 		    (M_CSUM_TSOv4|M_CSUM_TSOv6|
5082 		    M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
5083 		    M_CSUM_TCPv6|M_CSUM_UDPv6)) {
5084 			if (wm_nq_tx_offload(sc, txs, &cmdlen, &fields,
5085 			    &do_csum) != 0) {
5086 				/* Error message already displayed. */
5087 				bus_dmamap_unload(sc->sc_dmat, dmamap);
5088 				continue;
5089 			}
5090 		} else {
5091 			do_csum = false;
5092 			cmdlen = 0;
5093 			fields = 0;
5094 		}
5095 
5096 		/* Sync the DMA map. */
5097 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
5098 		    BUS_DMASYNC_PREWRITE);
5099 
5100 		/* Initialize the first transmit descriptor. */
5101 		nexttx = sc->sc_txnext;
5102 		if (!do_csum) {
5103 			/* setup a legacy descriptor */
5104 			wm_set_dma_addr(
5105 			    &sc->sc_txdescs[nexttx].wtx_addr,
5106 			    dmamap->dm_segs[0].ds_addr);
5107 			sc->sc_txdescs[nexttx].wtx_cmdlen =
5108 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
5109 			sc->sc_txdescs[nexttx].wtx_fields.wtxu_status = 0;
5110 			sc->sc_txdescs[nexttx].wtx_fields.wtxu_options = 0;
5111 			if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
5112 			    NULL) {
5113 				sc->sc_txdescs[nexttx].wtx_cmdlen |=
5114 				    htole32(WTX_CMD_VLE);
5115 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan =
5116 				    htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
5117 			} else {
5118 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
5119 			}
5120 			dcmdlen = 0;
5121 		} else {
5122 			/* setup an advanced data descriptor */
5123 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
5124 			    htole64(dmamap->dm_segs[0].ds_addr);
5125 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
5126 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
5127 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
5128 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields =
5129 			    htole32(fields);
5130 			DPRINTF(WM_DEBUG_TX,
5131 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
5132 			    device_xname(sc->sc_dev), nexttx,
5133 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
5134 			DPRINTF(WM_DEBUG_TX,
5135 			    ("\t 0x%08x%08x\n", fields,
5136 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
5137 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
5138 		}
5139 
5140 		lasttx = nexttx;
5141 		nexttx = WM_NEXTTX(sc, nexttx);
5142 		/*
5143 		 * fill in the next descriptors. legacy or adcanced format
5144 		 * is the same here
5145 		 */
5146 		for (seg = 1; seg < dmamap->dm_nsegs;
5147 		    seg++, nexttx = WM_NEXTTX(sc, nexttx)) {
5148 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
5149 			    htole64(dmamap->dm_segs[seg].ds_addr);
5150 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
5151 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
5152 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
5153 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields = 0;
5154 			lasttx = nexttx;
5155 
5156 			DPRINTF(WM_DEBUG_TX,
5157 			    ("%s: TX: desc %d: %#" PRIx64 ", "
5158 			     "len %#04zx\n",
5159 			    device_xname(sc->sc_dev), nexttx,
5160 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
5161 			    dmamap->dm_segs[seg].ds_len));
5162 		}
5163 
5164 		KASSERT(lasttx != -1);
5165 
5166 		/*
5167 		 * Set up the command byte on the last descriptor of
5168 		 * the packet.  If we're in the interrupt delay window,
5169 		 * delay the interrupt.
5170 		 */
5171 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
5172 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
5173 		sc->sc_txdescs[lasttx].wtx_cmdlen |=
5174 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
5175 
5176 		txs->txs_lastdesc = lasttx;
5177 
5178 		DPRINTF(WM_DEBUG_TX,
5179 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
5180 		    device_xname(sc->sc_dev),
5181 		    lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
5182 
5183 		/* Sync the descriptors we're using. */
5184 		WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
5185 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
5186 
5187 		/* Give the packet to the chip. */
5188 		CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
5189 		sent = true;
5190 
5191 		DPRINTF(WM_DEBUG_TX,
5192 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
5193 
5194 		DPRINTF(WM_DEBUG_TX,
5195 		    ("%s: TX: finished transmitting packet, job %d\n",
5196 		    device_xname(sc->sc_dev), sc->sc_txsnext));
5197 
5198 		/* Advance the tx pointer. */
5199 		sc->sc_txfree -= txs->txs_ndesc;
5200 		sc->sc_txnext = nexttx;
5201 
5202 		sc->sc_txsfree--;
5203 		sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
5204 
5205 		/* Pass the packet to any BPF listeners. */
5206 		bpf_mtap(ifp, m0);
5207 	}
5208 
5209 	if (m0 != NULL) {
5210 		ifp->if_flags |= IFF_OACTIVE;
5211 		WM_EVCNT_INCR(&sc->sc_ev_txdrop);
5212 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
5213 		m_freem(m0);
5214 	}
5215 
5216 	if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
5217 		/* No more slots; notify upper layer. */
5218 		ifp->if_flags |= IFF_OACTIVE;
5219 	}
5220 
5221 	if (sent) {
5222 		/* Set a watchdog timer in case the chip flakes out. */
5223 		ifp->if_timer = 5;
5224 	}
5225 }
5226 
5227 /* Interrupt */
5228 
5229 /*
5230  * wm_txintr:
5231  *
5232  *	Helper; handle transmit interrupts.
5233  */
5234 static void
5235 wm_txintr(struct wm_softc *sc)
5236 {
5237 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5238 	struct wm_txsoft *txs;
5239 	uint8_t status;
5240 	int i;
5241 
5242 	if (sc->sc_stopping)
5243 		return;
5244 
5245 	ifp->if_flags &= ~IFF_OACTIVE;
5246 
5247 	/*
5248 	 * Go through the Tx list and free mbufs for those
5249 	 * frames which have been transmitted.
5250 	 */
5251 	for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
5252 	     i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
5253 		txs = &sc->sc_txsoft[i];
5254 
5255 		DPRINTF(WM_DEBUG_TX,
5256 		    ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
5257 
5258 		WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
5259 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
5260 
5261 		status =
5262 		    sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
5263 		if ((status & WTX_ST_DD) == 0) {
5264 			WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
5265 			    BUS_DMASYNC_PREREAD);
5266 			break;
5267 		}
5268 
5269 		DPRINTF(WM_DEBUG_TX,
5270 		    ("%s: TX: job %d done: descs %d..%d\n",
5271 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
5272 		    txs->txs_lastdesc));
5273 
5274 		/*
5275 		 * XXX We should probably be using the statistics
5276 		 * XXX registers, but I don't know if they exist
5277 		 * XXX on chips before the i82544.
5278 		 */
5279 
5280 #ifdef WM_EVENT_COUNTERS
5281 		if (status & WTX_ST_TU)
5282 			WM_EVCNT_INCR(&sc->sc_ev_tu);
5283 #endif /* WM_EVENT_COUNTERS */
5284 
5285 		if (status & (WTX_ST_EC|WTX_ST_LC)) {
5286 			ifp->if_oerrors++;
5287 			if (status & WTX_ST_LC)
5288 				log(LOG_WARNING, "%s: late collision\n",
5289 				    device_xname(sc->sc_dev));
5290 			else if (status & WTX_ST_EC) {
5291 				ifp->if_collisions += 16;
5292 				log(LOG_WARNING, "%s: excessive collisions\n",
5293 				    device_xname(sc->sc_dev));
5294 			}
5295 		} else
5296 			ifp->if_opackets++;
5297 
5298 		sc->sc_txfree += txs->txs_ndesc;
5299 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
5300 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
5301 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
5302 		m_freem(txs->txs_mbuf);
5303 		txs->txs_mbuf = NULL;
5304 	}
5305 
5306 	/* Update the dirty transmit buffer pointer. */
5307 	sc->sc_txsdirty = i;
5308 	DPRINTF(WM_DEBUG_TX,
5309 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
5310 
5311 	/*
5312 	 * If there are no more pending transmissions, cancel the watchdog
5313 	 * timer.
5314 	 */
5315 	if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
5316 		ifp->if_timer = 0;
5317 }
5318 
5319 /*
5320  * wm_rxintr:
5321  *
5322  *	Helper; handle receive interrupts.
5323  */
5324 static void
5325 wm_rxintr(struct wm_softc *sc)
5326 {
5327 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5328 	struct wm_rxsoft *rxs;
5329 	struct mbuf *m;
5330 	int i, len;
5331 	uint8_t status, errors;
5332 	uint16_t vlantag;
5333 
5334 	for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
5335 		rxs = &sc->sc_rxsoft[i];
5336 
5337 		DPRINTF(WM_DEBUG_RX,
5338 		    ("%s: RX: checking descriptor %d\n",
5339 		    device_xname(sc->sc_dev), i));
5340 
5341 		WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
5342 
5343 		status = sc->sc_rxdescs[i].wrx_status;
5344 		errors = sc->sc_rxdescs[i].wrx_errors;
5345 		len = le16toh(sc->sc_rxdescs[i].wrx_len);
5346 		vlantag = sc->sc_rxdescs[i].wrx_special;
5347 
5348 		if ((status & WRX_ST_DD) == 0) {
5349 			/* We have processed all of the receive descriptors. */
5350 			WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
5351 			break;
5352 		}
5353 
5354 		if (__predict_false(sc->sc_rxdiscard)) {
5355 			DPRINTF(WM_DEBUG_RX,
5356 			    ("%s: RX: discarding contents of descriptor %d\n",
5357 			    device_xname(sc->sc_dev), i));
5358 			WM_INIT_RXDESC(sc, i);
5359 			if (status & WRX_ST_EOP) {
5360 				/* Reset our state. */
5361 				DPRINTF(WM_DEBUG_RX,
5362 				    ("%s: RX: resetting rxdiscard -> 0\n",
5363 				    device_xname(sc->sc_dev)));
5364 				sc->sc_rxdiscard = 0;
5365 			}
5366 			continue;
5367 		}
5368 
5369 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
5370 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
5371 
5372 		m = rxs->rxs_mbuf;
5373 
5374 		/*
5375 		 * Add a new receive buffer to the ring, unless of
5376 		 * course the length is zero. Treat the latter as a
5377 		 * failed mapping.
5378 		 */
5379 		if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
5380 			/*
5381 			 * Failed, throw away what we've done so
5382 			 * far, and discard the rest of the packet.
5383 			 */
5384 			ifp->if_ierrors++;
5385 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
5386 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
5387 			WM_INIT_RXDESC(sc, i);
5388 			if ((status & WRX_ST_EOP) == 0)
5389 				sc->sc_rxdiscard = 1;
5390 			if (sc->sc_rxhead != NULL)
5391 				m_freem(sc->sc_rxhead);
5392 			WM_RXCHAIN_RESET(sc);
5393 			DPRINTF(WM_DEBUG_RX,
5394 			    ("%s: RX: Rx buffer allocation failed, "
5395 			    "dropping packet%s\n", device_xname(sc->sc_dev),
5396 			    sc->sc_rxdiscard ? " (discard)" : ""));
5397 			continue;
5398 		}
5399 
5400 		m->m_len = len;
5401 		sc->sc_rxlen += len;
5402 		DPRINTF(WM_DEBUG_RX,
5403 		    ("%s: RX: buffer at %p len %d\n",
5404 		    device_xname(sc->sc_dev), m->m_data, len));
5405 
5406 		/* If this is not the end of the packet, keep looking. */
5407 		if ((status & WRX_ST_EOP) == 0) {
5408 			WM_RXCHAIN_LINK(sc, m);
5409 			DPRINTF(WM_DEBUG_RX,
5410 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
5411 			    device_xname(sc->sc_dev), sc->sc_rxlen));
5412 			continue;
5413 		}
5414 
5415 		/*
5416 		 * Okay, we have the entire packet now.  The chip is
5417 		 * configured to include the FCS except I350 and I21[01]
5418 		 * (not all chips can be configured to strip it),
5419 		 * so we need to trim it.
5420 		 * May need to adjust length of previous mbuf in the
5421 		 * chain if the current mbuf is too short.
5422 		 * For an eratta, the RCTL_SECRC bit in RCTL register
5423 		 * is always set in I350, so we don't trim it.
5424 		 */
5425 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
5426 		    && (sc->sc_type != WM_T_I210)
5427 		    && (sc->sc_type != WM_T_I211)) {
5428 			if (m->m_len < ETHER_CRC_LEN) {
5429 				sc->sc_rxtail->m_len
5430 				    -= (ETHER_CRC_LEN - m->m_len);
5431 				m->m_len = 0;
5432 			} else
5433 				m->m_len -= ETHER_CRC_LEN;
5434 			len = sc->sc_rxlen - ETHER_CRC_LEN;
5435 		} else
5436 			len = sc->sc_rxlen;
5437 
5438 		WM_RXCHAIN_LINK(sc, m);
5439 
5440 		*sc->sc_rxtailp = NULL;
5441 		m = sc->sc_rxhead;
5442 
5443 		WM_RXCHAIN_RESET(sc);
5444 
5445 		DPRINTF(WM_DEBUG_RX,
5446 		    ("%s: RX: have entire packet, len -> %d\n",
5447 		    device_xname(sc->sc_dev), len));
5448 
5449 		/* If an error occurred, update stats and drop the packet. */
5450 		if (errors &
5451 		     (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
5452 			if (errors & WRX_ER_SE)
5453 				log(LOG_WARNING, "%s: symbol error\n",
5454 				    device_xname(sc->sc_dev));
5455 			else if (errors & WRX_ER_SEQ)
5456 				log(LOG_WARNING, "%s: receive sequence error\n",
5457 				    device_xname(sc->sc_dev));
5458 			else if (errors & WRX_ER_CE)
5459 				log(LOG_WARNING, "%s: CRC error\n",
5460 				    device_xname(sc->sc_dev));
5461 			m_freem(m);
5462 			continue;
5463 		}
5464 
5465 		/* No errors.  Receive the packet. */
5466 		m->m_pkthdr.rcvif = ifp;
5467 		m->m_pkthdr.len = len;
5468 
5469 		/*
5470 		 * If VLANs are enabled, VLAN packets have been unwrapped
5471 		 * for us.  Associate the tag with the packet.
5472 		 */
5473 		/* XXXX should check for i350 and i354 */
5474 		if ((status & WRX_ST_VP) != 0) {
5475 			VLAN_INPUT_TAG(ifp, m,
5476 			    le16toh(vlantag),
5477 			    continue);
5478 		}
5479 
5480 		/* Set up checksum info for this packet. */
5481 		if ((status & WRX_ST_IXSM) == 0) {
5482 			if (status & WRX_ST_IPCS) {
5483 				WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
5484 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
5485 				if (errors & WRX_ER_IPE)
5486 					m->m_pkthdr.csum_flags |=
5487 					    M_CSUM_IPv4_BAD;
5488 			}
5489 			if (status & WRX_ST_TCPCS) {
5490 				/*
5491 				 * Note: we don't know if this was TCP or UDP,
5492 				 * so we just set both bits, and expect the
5493 				 * upper layers to deal.
5494 				 */
5495 				WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
5496 				m->m_pkthdr.csum_flags |=
5497 				    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
5498 				    M_CSUM_TCPv6 | M_CSUM_UDPv6;
5499 				if (errors & WRX_ER_TCPE)
5500 					m->m_pkthdr.csum_flags |=
5501 					    M_CSUM_TCP_UDP_BAD;
5502 			}
5503 		}
5504 
5505 		ifp->if_ipackets++;
5506 
5507 		WM_RX_UNLOCK(sc);
5508 
5509 		/* Pass this up to any BPF listeners. */
5510 		bpf_mtap(ifp, m);
5511 
5512 		/* Pass it on. */
5513 		(*ifp->if_input)(ifp, m);
5514 
5515 		WM_RX_LOCK(sc);
5516 
5517 		if (sc->sc_stopping)
5518 			break;
5519 	}
5520 
5521 	/* Update the receive pointer. */
5522 	sc->sc_rxptr = i;
5523 
5524 	DPRINTF(WM_DEBUG_RX,
5525 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
5526 }
5527 
5528 /*
5529  * wm_linkintr_gmii:
5530  *
5531  *	Helper; handle link interrupts for GMII.
5532  */
5533 static void
5534 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
5535 {
5536 
5537 	KASSERT(WM_TX_LOCKED(sc));
5538 
5539 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
5540 		__func__));
5541 
5542 	if (icr & ICR_LSC) {
5543 		DPRINTF(WM_DEBUG_LINK,
5544 		    ("%s: LINK: LSC -> mii_pollstat\n",
5545 			device_xname(sc->sc_dev)));
5546 		mii_pollstat(&sc->sc_mii);
5547 		if (sc->sc_type == WM_T_82543) {
5548 			int miistatus, active;
5549 
5550 			/*
5551 			 * With 82543, we need to force speed and
5552 			 * duplex on the MAC equal to what the PHY
5553 			 * speed and duplex configuration is.
5554 			 */
5555 			miistatus = sc->sc_mii.mii_media_status;
5556 
5557 			if (miistatus & IFM_ACTIVE) {
5558 				active = sc->sc_mii.mii_media_active;
5559 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
5560 				switch (IFM_SUBTYPE(active)) {
5561 				case IFM_10_T:
5562 					sc->sc_ctrl |= CTRL_SPEED_10;
5563 					break;
5564 				case IFM_100_TX:
5565 					sc->sc_ctrl |= CTRL_SPEED_100;
5566 					break;
5567 				case IFM_1000_T:
5568 					sc->sc_ctrl |= CTRL_SPEED_1000;
5569 					break;
5570 				default:
5571 					/*
5572 					 * fiber?
5573 					 * Shoud not enter here.
5574 					 */
5575 					printf("unknown media (%x)\n",
5576 					    active);
5577 					break;
5578 				}
5579 				if (active & IFM_FDX)
5580 					sc->sc_ctrl |= CTRL_FD;
5581 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5582 			}
5583 		} else if ((sc->sc_type == WM_T_ICH8)
5584 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
5585 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
5586 		} else if (sc->sc_type == WM_T_PCH) {
5587 			wm_k1_gig_workaround_hv(sc,
5588 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
5589 		}
5590 
5591 		if ((sc->sc_phytype == WMPHY_82578)
5592 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
5593 			== IFM_1000_T)) {
5594 
5595 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
5596 				delay(200*1000); /* XXX too big */
5597 
5598 				/* Link stall fix for link up */
5599 				wm_gmii_hv_writereg(sc->sc_dev, 1,
5600 				    HV_MUX_DATA_CTRL,
5601 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
5602 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
5603 				wm_gmii_hv_writereg(sc->sc_dev, 1,
5604 				    HV_MUX_DATA_CTRL,
5605 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
5606 			}
5607 		}
5608 	} else if (icr & ICR_RXSEQ) {
5609 		DPRINTF(WM_DEBUG_LINK,
5610 		    ("%s: LINK Receive sequence error\n",
5611 			device_xname(sc->sc_dev)));
5612 	}
5613 }
5614 
5615 /*
5616  * wm_linkintr_tbi:
5617  *
5618  *	Helper; handle link interrupts for TBI mode.
5619  */
5620 static void
5621 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
5622 {
5623 	uint32_t status;
5624 
5625 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
5626 		__func__));
5627 
5628 	status = CSR_READ(sc, WMREG_STATUS);
5629 	if (icr & ICR_LSC) {
5630 		if (status & STATUS_LU) {
5631 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
5632 			    device_xname(sc->sc_dev),
5633 			    (status & STATUS_FD) ? "FDX" : "HDX"));
5634 			/*
5635 			 * NOTE: CTRL will update TFCE and RFCE automatically,
5636 			 * so we should update sc->sc_ctrl
5637 			 */
5638 
5639 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
5640 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
5641 			sc->sc_fcrtl &= ~FCRTL_XONE;
5642 			if (status & STATUS_FD)
5643 				sc->sc_tctl |=
5644 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
5645 			else
5646 				sc->sc_tctl |=
5647 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
5648 			if (sc->sc_ctrl & CTRL_TFCE)
5649 				sc->sc_fcrtl |= FCRTL_XONE;
5650 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
5651 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
5652 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
5653 				      sc->sc_fcrtl);
5654 			sc->sc_tbi_linkup = 1;
5655 		} else {
5656 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
5657 			    device_xname(sc->sc_dev)));
5658 			sc->sc_tbi_linkup = 0;
5659 		}
5660 		wm_tbi_set_linkled(sc);
5661 	} else if (icr & ICR_RXSEQ) {
5662 		DPRINTF(WM_DEBUG_LINK,
5663 		    ("%s: LINK: Receive sequence error\n",
5664 		    device_xname(sc->sc_dev)));
5665 	}
5666 }
5667 
5668 /*
5669  * wm_linkintr:
5670  *
5671  *	Helper; handle link interrupts.
5672  */
5673 static void
5674 wm_linkintr(struct wm_softc *sc, uint32_t icr)
5675 {
5676 
5677 	if (sc->sc_flags & WM_F_HAS_MII)
5678 		wm_linkintr_gmii(sc, icr);
5679 	else
5680 		wm_linkintr_tbi(sc, icr);
5681 }
5682 
5683 /*
5684  * wm_intr:
5685  *
5686  *	Interrupt service routine.
5687  */
5688 static int
5689 wm_intr(void *arg)
5690 {
5691 	struct wm_softc *sc = arg;
5692 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5693 	uint32_t icr;
5694 	int handled = 0;
5695 
5696 	while (1 /* CONSTCOND */) {
5697 		icr = CSR_READ(sc, WMREG_ICR);
5698 		if ((icr & sc->sc_icr) == 0)
5699 			break;
5700 		rnd_add_uint32(&sc->rnd_source, icr);
5701 
5702 		WM_RX_LOCK(sc);
5703 
5704 		if (sc->sc_stopping) {
5705 			WM_RX_UNLOCK(sc);
5706 			break;
5707 		}
5708 
5709 		handled = 1;
5710 
5711 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
5712 		if (icr & (ICR_RXDMT0|ICR_RXT0)) {
5713 			DPRINTF(WM_DEBUG_RX,
5714 			    ("%s: RX: got Rx intr 0x%08x\n",
5715 			    device_xname(sc->sc_dev),
5716 			    icr & (ICR_RXDMT0|ICR_RXT0)));
5717 			WM_EVCNT_INCR(&sc->sc_ev_rxintr);
5718 		}
5719 #endif
5720 		wm_rxintr(sc);
5721 
5722 		WM_RX_UNLOCK(sc);
5723 		WM_TX_LOCK(sc);
5724 
5725 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
5726 		if (icr & ICR_TXDW) {
5727 			DPRINTF(WM_DEBUG_TX,
5728 			    ("%s: TX: got TXDW interrupt\n",
5729 			    device_xname(sc->sc_dev)));
5730 			WM_EVCNT_INCR(&sc->sc_ev_txdw);
5731 		}
5732 #endif
5733 		wm_txintr(sc);
5734 
5735 		if (icr & (ICR_LSC|ICR_RXSEQ)) {
5736 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
5737 			wm_linkintr(sc, icr);
5738 		}
5739 
5740 		WM_TX_UNLOCK(sc);
5741 
5742 		if (icr & ICR_RXO) {
5743 #if defined(WM_DEBUG)
5744 			log(LOG_WARNING, "%s: Receive overrun\n",
5745 			    device_xname(sc->sc_dev));
5746 #endif /* defined(WM_DEBUG) */
5747 		}
5748 	}
5749 
5750 	if (handled) {
5751 		/* Try to get more packets going. */
5752 		ifp->if_start(ifp);
5753 	}
5754 
5755 	return handled;
5756 }
5757 
5758 /*
5759  * Media related.
5760  * GMII, SGMII, TBI (and SERDES)
5761  */
5762 
5763 /* GMII related */
5764 
5765 /*
5766  * wm_gmii_reset:
5767  *
5768  *	Reset the PHY.
5769  */
5770 static void
5771 wm_gmii_reset(struct wm_softc *sc)
5772 {
5773 	uint32_t reg;
5774 	int rv;
5775 
5776 	/* get phy semaphore */
5777 	switch (sc->sc_type) {
5778 	case WM_T_82571:
5779 	case WM_T_82572:
5780 	case WM_T_82573:
5781 	case WM_T_82574:
5782 	case WM_T_82583:
5783 		 /* XXX should get sw semaphore, too */
5784 		rv = wm_get_swsm_semaphore(sc);
5785 		break;
5786 	case WM_T_82575:
5787 	case WM_T_82576:
5788 	case WM_T_82580:
5789 	case WM_T_82580ER:
5790 	case WM_T_I350:
5791 	case WM_T_I354:
5792 	case WM_T_I210:
5793 	case WM_T_I211:
5794 	case WM_T_80003:
5795 		rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
5796 		break;
5797 	case WM_T_ICH8:
5798 	case WM_T_ICH9:
5799 	case WM_T_ICH10:
5800 	case WM_T_PCH:
5801 	case WM_T_PCH2:
5802 	case WM_T_PCH_LPT:
5803 		rv = wm_get_swfwhw_semaphore(sc);
5804 		break;
5805 	default:
5806 		/* nothing to do*/
5807 		rv = 0;
5808 		break;
5809 	}
5810 	if (rv != 0) {
5811 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5812 		    __func__);
5813 		return;
5814 	}
5815 
5816 	switch (sc->sc_type) {
5817 	case WM_T_82542_2_0:
5818 	case WM_T_82542_2_1:
5819 		/* null */
5820 		break;
5821 	case WM_T_82543:
5822 		/*
5823 		 * With 82543, we need to force speed and duplex on the MAC
5824 		 * equal to what the PHY speed and duplex configuration is.
5825 		 * In addition, we need to perform a hardware reset on the PHY
5826 		 * to take it out of reset.
5827 		 */
5828 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
5829 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5830 
5831 		/* The PHY reset pin is active-low. */
5832 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
5833 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
5834 		    CTRL_EXT_SWDPIN(4));
5835 		reg |= CTRL_EXT_SWDPIO(4);
5836 
5837 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5838 		CSR_WRITE_FLUSH(sc);
5839 		delay(10*1000);
5840 
5841 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
5842 		CSR_WRITE_FLUSH(sc);
5843 		delay(150);
5844 #if 0
5845 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
5846 #endif
5847 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
5848 		break;
5849 	case WM_T_82544:	/* reset 10000us */
5850 	case WM_T_82540:
5851 	case WM_T_82545:
5852 	case WM_T_82545_3:
5853 	case WM_T_82546:
5854 	case WM_T_82546_3:
5855 	case WM_T_82541:
5856 	case WM_T_82541_2:
5857 	case WM_T_82547:
5858 	case WM_T_82547_2:
5859 	case WM_T_82571:	/* reset 100us */
5860 	case WM_T_82572:
5861 	case WM_T_82573:
5862 	case WM_T_82574:
5863 	case WM_T_82575:
5864 	case WM_T_82576:
5865 	case WM_T_82580:
5866 	case WM_T_82580ER:
5867 	case WM_T_I350:
5868 	case WM_T_I354:
5869 	case WM_T_I210:
5870 	case WM_T_I211:
5871 	case WM_T_82583:
5872 	case WM_T_80003:
5873 		/* generic reset */
5874 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
5875 		CSR_WRITE_FLUSH(sc);
5876 		delay(20000);
5877 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5878 		CSR_WRITE_FLUSH(sc);
5879 		delay(20000);
5880 
5881 		if ((sc->sc_type == WM_T_82541)
5882 		    || (sc->sc_type == WM_T_82541_2)
5883 		    || (sc->sc_type == WM_T_82547)
5884 		    || (sc->sc_type == WM_T_82547_2)) {
5885 			/* workaround for igp are done in igp_reset() */
5886 			/* XXX add code to set LED after phy reset */
5887 		}
5888 		break;
5889 	case WM_T_ICH8:
5890 	case WM_T_ICH9:
5891 	case WM_T_ICH10:
5892 	case WM_T_PCH:
5893 	case WM_T_PCH2:
5894 	case WM_T_PCH_LPT:
5895 		/* generic reset */
5896 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
5897 		CSR_WRITE_FLUSH(sc);
5898 		delay(100);
5899 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5900 		CSR_WRITE_FLUSH(sc);
5901 		delay(150);
5902 		break;
5903 	default:
5904 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
5905 		    __func__);
5906 		break;
5907 	}
5908 
5909 	/* release PHY semaphore */
5910 	switch (sc->sc_type) {
5911 	case WM_T_82571:
5912 	case WM_T_82572:
5913 	case WM_T_82573:
5914 	case WM_T_82574:
5915 	case WM_T_82583:
5916 		 /* XXX should put sw semaphore, too */
5917 		wm_put_swsm_semaphore(sc);
5918 		break;
5919 	case WM_T_82575:
5920 	case WM_T_82576:
5921 	case WM_T_82580:
5922 	case WM_T_82580ER:
5923 	case WM_T_I350:
5924 	case WM_T_I354:
5925 	case WM_T_I210:
5926 	case WM_T_I211:
5927 	case WM_T_80003:
5928 		wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
5929 		break;
5930 	case WM_T_ICH8:
5931 	case WM_T_ICH9:
5932 	case WM_T_ICH10:
5933 	case WM_T_PCH:
5934 	case WM_T_PCH2:
5935 	case WM_T_PCH_LPT:
5936 		wm_put_swfwhw_semaphore(sc);
5937 		break;
5938 	default:
5939 		/* nothing to do*/
5940 		rv = 0;
5941 		break;
5942 	}
5943 
5944 	/* get_cfg_done */
5945 	wm_get_cfg_done(sc);
5946 
5947 	/* extra setup */
5948 	switch (sc->sc_type) {
5949 	case WM_T_82542_2_0:
5950 	case WM_T_82542_2_1:
5951 	case WM_T_82543:
5952 	case WM_T_82544:
5953 	case WM_T_82540:
5954 	case WM_T_82545:
5955 	case WM_T_82545_3:
5956 	case WM_T_82546:
5957 	case WM_T_82546_3:
5958 	case WM_T_82541_2:
5959 	case WM_T_82547_2:
5960 	case WM_T_82571:
5961 	case WM_T_82572:
5962 	case WM_T_82573:
5963 	case WM_T_82574:
5964 	case WM_T_82575:
5965 	case WM_T_82576:
5966 	case WM_T_82580:
5967 	case WM_T_82580ER:
5968 	case WM_T_I350:
5969 	case WM_T_I354:
5970 	case WM_T_I210:
5971 	case WM_T_I211:
5972 	case WM_T_82583:
5973 	case WM_T_80003:
5974 		/* null */
5975 		break;
5976 	case WM_T_82541:
5977 	case WM_T_82547:
5978 		/* XXX Configure actively LED after PHY reset */
5979 		break;
5980 	case WM_T_ICH8:
5981 	case WM_T_ICH9:
5982 	case WM_T_ICH10:
5983 	case WM_T_PCH:
5984 	case WM_T_PCH2:
5985 	case WM_T_PCH_LPT:
5986 		/* Allow time for h/w to get to a quiescent state afer reset */
5987 		delay(10*1000);
5988 
5989 		if (sc->sc_type == WM_T_PCH)
5990 			wm_hv_phy_workaround_ich8lan(sc);
5991 
5992 		if (sc->sc_type == WM_T_PCH2)
5993 			wm_lv_phy_workaround_ich8lan(sc);
5994 
5995 		if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
5996 			/*
5997 			 * dummy read to clear the phy wakeup bit after lcd
5998 			 * reset
5999 			 */
6000 			reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
6001 		}
6002 
6003 		/*
6004 		 * XXX Configure the LCD with th extended configuration region
6005 		 * in NVM
6006 		 */
6007 
6008 		/* Configure the LCD with the OEM bits in NVM */
6009 		if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
6010 		    || (sc->sc_type == WM_T_PCH_LPT)) {
6011 			/*
6012 			 * Disable LPLU.
6013 			 * XXX It seems that 82567 has LPLU, too.
6014 			 */
6015 			reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
6016 			reg &= ~(HV_OEM_BITS_A1KDIS| HV_OEM_BITS_LPLU);
6017 			reg |= HV_OEM_BITS_ANEGNOW;
6018 			wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
6019 		}
6020 		break;
6021 	default:
6022 		panic("%s: unknown type\n", __func__);
6023 		break;
6024 	}
6025 }
6026 
6027 /*
6028  * wm_get_phy_id_82575:
6029  *
6030  * Return PHY ID. Return -1 if it failed.
6031  */
6032 static int
6033 wm_get_phy_id_82575(struct wm_softc *sc)
6034 {
6035 	uint32_t reg;
6036 	int phyid = -1;
6037 
6038 	/* XXX */
6039 	if ((sc->sc_flags & WM_F_SGMII) == 0)
6040 		return -1;
6041 
6042 	if (wm_sgmii_uses_mdio(sc)) {
6043 		switch (sc->sc_type) {
6044 		case WM_T_82575:
6045 		case WM_T_82576:
6046 			reg = CSR_READ(sc, WMREG_MDIC);
6047 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
6048 			break;
6049 		case WM_T_82580:
6050 		case WM_T_I350:
6051 		case WM_T_I354:
6052 		case WM_T_I210:
6053 		case WM_T_I211:
6054 			reg = CSR_READ(sc, WMREG_MDICNFG);
6055 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
6056 			break;
6057 		default:
6058 			return -1;
6059 		}
6060 	}
6061 
6062 	return phyid;
6063 }
6064 
6065 
6066 /*
6067  * wm_gmii_mediainit:
6068  *
6069  *	Initialize media for use on 1000BASE-T devices.
6070  */
6071 static void
6072 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
6073 {
6074 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6075 	struct mii_data *mii = &sc->sc_mii;
6076 	uint32_t reg;
6077 
6078 	/* We have MII. */
6079 	sc->sc_flags |= WM_F_HAS_MII;
6080 
6081 	if (sc->sc_type == WM_T_80003)
6082 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
6083 	else
6084 		sc->sc_tipg = TIPG_1000T_DFLT;
6085 
6086 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
6087 	if ((sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
6088 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
6089 	    || (sc->sc_type == WM_T_I211)) {
6090 		reg = CSR_READ(sc, WMREG_PHPM);
6091 		reg &= ~PHPM_GO_LINK_D;
6092 		CSR_WRITE(sc, WMREG_PHPM, reg);
6093 	}
6094 
6095 	/*
6096 	 * Let the chip set speed/duplex on its own based on
6097 	 * signals from the PHY.
6098 	 * XXXbouyer - I'm not sure this is right for the 80003,
6099 	 * the em driver only sets CTRL_SLU here - but it seems to work.
6100 	 */
6101 	sc->sc_ctrl |= CTRL_SLU;
6102 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6103 
6104 	/* Initialize our media structures and probe the GMII. */
6105 	mii->mii_ifp = ifp;
6106 
6107 	/*
6108 	 * Determine the PHY access method.
6109 	 *
6110 	 *  For SGMII, use SGMII specific method.
6111 	 *
6112 	 *  For some devices, we can determine the PHY access method
6113 	 * from sc_type.
6114 	 *
6115 	 *  For ICH8 variants, it's difficult to detemine the PHY access
6116 	 * method by sc_type, so use the PCI product ID for some devices.
6117 	 * For other ICH8 variants, try to use igp's method. If the PHY
6118 	 * can't detect, then use bm's method.
6119 	 */
6120 	switch (prodid) {
6121 	case PCI_PRODUCT_INTEL_PCH_M_LM:
6122 	case PCI_PRODUCT_INTEL_PCH_M_LC:
6123 		/* 82577 */
6124 		sc->sc_phytype = WMPHY_82577;
6125 		mii->mii_readreg = wm_gmii_hv_readreg;
6126 		mii->mii_writereg = wm_gmii_hv_writereg;
6127 		break;
6128 	case PCI_PRODUCT_INTEL_PCH_D_DM:
6129 	case PCI_PRODUCT_INTEL_PCH_D_DC:
6130 		/* 82578 */
6131 		sc->sc_phytype = WMPHY_82578;
6132 		mii->mii_readreg = wm_gmii_hv_readreg;
6133 		mii->mii_writereg = wm_gmii_hv_writereg;
6134 		break;
6135 	case PCI_PRODUCT_INTEL_PCH2_LV_LM:
6136 	case PCI_PRODUCT_INTEL_PCH2_LV_V:
6137 		/* 82579 */
6138 		sc->sc_phytype = WMPHY_82579;
6139 		mii->mii_readreg = wm_gmii_hv_readreg;
6140 		mii->mii_writereg = wm_gmii_hv_writereg;
6141 		break;
6142 	case PCI_PRODUCT_INTEL_I217_LM:
6143 	case PCI_PRODUCT_INTEL_I217_V:
6144 	case PCI_PRODUCT_INTEL_I218_LM:
6145 	case PCI_PRODUCT_INTEL_I218_V:
6146 		/* I21[78] */
6147 		mii->mii_readreg = wm_gmii_hv_readreg;
6148 		mii->mii_writereg = wm_gmii_hv_writereg;
6149 		break;
6150 	case PCI_PRODUCT_INTEL_82801I_BM:
6151 	case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
6152 	case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
6153 	case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
6154 	case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
6155 	case PCI_PRODUCT_INTEL_82801J_R_BM_V:
6156 		/* 82567 */
6157 		sc->sc_phytype = WMPHY_BM;
6158 		mii->mii_readreg = wm_gmii_bm_readreg;
6159 		mii->mii_writereg = wm_gmii_bm_writereg;
6160 		break;
6161 	default:
6162 		if (((sc->sc_flags & WM_F_SGMII) != 0)
6163 		    && !wm_sgmii_uses_mdio(sc)){
6164 			mii->mii_readreg = wm_sgmii_readreg;
6165 			mii->mii_writereg = wm_sgmii_writereg;
6166 		} else if (sc->sc_type >= WM_T_80003) {
6167 			mii->mii_readreg = wm_gmii_i80003_readreg;
6168 			mii->mii_writereg = wm_gmii_i80003_writereg;
6169 		} else if (sc->sc_type >= WM_T_I210) {
6170 			mii->mii_readreg = wm_gmii_i82544_readreg;
6171 			mii->mii_writereg = wm_gmii_i82544_writereg;
6172 		} else if (sc->sc_type >= WM_T_82580) {
6173 			sc->sc_phytype = WMPHY_82580;
6174 			mii->mii_readreg = wm_gmii_82580_readreg;
6175 			mii->mii_writereg = wm_gmii_82580_writereg;
6176 		} else if (sc->sc_type >= WM_T_82544) {
6177 			mii->mii_readreg = wm_gmii_i82544_readreg;
6178 			mii->mii_writereg = wm_gmii_i82544_writereg;
6179 		} else {
6180 			mii->mii_readreg = wm_gmii_i82543_readreg;
6181 			mii->mii_writereg = wm_gmii_i82543_writereg;
6182 		}
6183 		break;
6184 	}
6185 	mii->mii_statchg = wm_gmii_statchg;
6186 
6187 	wm_gmii_reset(sc);
6188 
6189 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
6190 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
6191 	    wm_gmii_mediastatus);
6192 
6193 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
6194 	    || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
6195 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
6196 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
6197 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
6198 			/* Attach only one port */
6199 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
6200 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
6201 		} else {
6202 			int i, id;
6203 			uint32_t ctrl_ext;
6204 
6205 			id = wm_get_phy_id_82575(sc);
6206 			if (id != -1) {
6207 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
6208 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
6209 			}
6210 			if ((id == -1)
6211 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
6212 				/* Power on sgmii phy if it is disabled */
6213 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
6214 				CSR_WRITE(sc, WMREG_CTRL_EXT,
6215 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
6216 				CSR_WRITE_FLUSH(sc);
6217 				delay(300*1000); /* XXX too long */
6218 
6219 				/* from 1 to 8 */
6220 				for (i = 1; i < 8; i++)
6221 					mii_attach(sc->sc_dev, &sc->sc_mii,
6222 					    0xffffffff, i, MII_OFFSET_ANY,
6223 					    MIIF_DOPAUSE);
6224 
6225 				/* restore previous sfp cage power state */
6226 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
6227 			}
6228 		}
6229 	} else {
6230 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6231 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
6232 	}
6233 
6234 	/*
6235 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
6236 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
6237 	 */
6238 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
6239 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
6240 		wm_set_mdio_slow_mode_hv(sc);
6241 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6242 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
6243 	}
6244 
6245 	/*
6246 	 * (For ICH8 variants)
6247 	 * If PHY detection failed, use BM's r/w function and retry.
6248 	 */
6249 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
6250 		/* if failed, retry with *_bm_* */
6251 		mii->mii_readreg = wm_gmii_bm_readreg;
6252 		mii->mii_writereg = wm_gmii_bm_writereg;
6253 
6254 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6255 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
6256 	}
6257 
6258 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
6259 		/* Any PHY wasn't find */
6260 		ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
6261 		ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_NONE);
6262 		sc->sc_phytype = WMPHY_NONE;
6263 	} else {
6264 		/*
6265 		 * PHY Found!
6266 		 * Check PHY type.
6267 		 */
6268 		uint32_t model;
6269 		struct mii_softc *child;
6270 
6271 		child = LIST_FIRST(&mii->mii_phys);
6272 		if (device_is_a(child->mii_dev, "igphy")) {
6273 			struct igphy_softc *isc = (struct igphy_softc *)child;
6274 
6275 			model = isc->sc_mii.mii_mpd_model;
6276 			if (model == MII_MODEL_yyINTEL_I82566)
6277 				sc->sc_phytype = WMPHY_IGP_3;
6278 		}
6279 
6280 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
6281 	}
6282 }
6283 
6284 /*
6285  * wm_gmii_mediastatus:	[ifmedia interface function]
6286  *
6287  *	Get the current interface media status on a 1000BASE-T device.
6288  */
6289 static void
6290 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
6291 {
6292 	struct wm_softc *sc = ifp->if_softc;
6293 
6294 	ether_mediastatus(ifp, ifmr);
6295 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
6296 	    | sc->sc_flowflags;
6297 }
6298 
6299 /*
6300  * wm_gmii_mediachange:	[ifmedia interface function]
6301  *
6302  *	Set hardware to newly-selected media on a 1000BASE-T device.
6303  */
6304 static int
6305 wm_gmii_mediachange(struct ifnet *ifp)
6306 {
6307 	struct wm_softc *sc = ifp->if_softc;
6308 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
6309 	int rc;
6310 
6311 	if ((ifp->if_flags & IFF_UP) == 0)
6312 		return 0;
6313 
6314 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
6315 	sc->sc_ctrl |= CTRL_SLU;
6316 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
6317 	    || (sc->sc_type > WM_T_82543)) {
6318 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
6319 	} else {
6320 		sc->sc_ctrl &= ~CTRL_ASDE;
6321 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
6322 		if (ife->ifm_media & IFM_FDX)
6323 			sc->sc_ctrl |= CTRL_FD;
6324 		switch (IFM_SUBTYPE(ife->ifm_media)) {
6325 		case IFM_10_T:
6326 			sc->sc_ctrl |= CTRL_SPEED_10;
6327 			break;
6328 		case IFM_100_TX:
6329 			sc->sc_ctrl |= CTRL_SPEED_100;
6330 			break;
6331 		case IFM_1000_T:
6332 			sc->sc_ctrl |= CTRL_SPEED_1000;
6333 			break;
6334 		default:
6335 			panic("wm_gmii_mediachange: bad media 0x%x",
6336 			    ife->ifm_media);
6337 		}
6338 	}
6339 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6340 	if (sc->sc_type <= WM_T_82543)
6341 		wm_gmii_reset(sc);
6342 
6343 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
6344 		return 0;
6345 	return rc;
6346 }
6347 
6348 #define	MDI_IO		CTRL_SWDPIN(2)
6349 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
6350 #define	MDI_CLK		CTRL_SWDPIN(3)
6351 
6352 static void
6353 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
6354 {
6355 	uint32_t i, v;
6356 
6357 	v = CSR_READ(sc, WMREG_CTRL);
6358 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
6359 	v |= MDI_DIR | CTRL_SWDPIO(3);
6360 
6361 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
6362 		if (data & i)
6363 			v |= MDI_IO;
6364 		else
6365 			v &= ~MDI_IO;
6366 		CSR_WRITE(sc, WMREG_CTRL, v);
6367 		CSR_WRITE_FLUSH(sc);
6368 		delay(10);
6369 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6370 		CSR_WRITE_FLUSH(sc);
6371 		delay(10);
6372 		CSR_WRITE(sc, WMREG_CTRL, v);
6373 		CSR_WRITE_FLUSH(sc);
6374 		delay(10);
6375 	}
6376 }
6377 
6378 static uint32_t
6379 wm_i82543_mii_recvbits(struct wm_softc *sc)
6380 {
6381 	uint32_t v, i, data = 0;
6382 
6383 	v = CSR_READ(sc, WMREG_CTRL);
6384 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
6385 	v |= CTRL_SWDPIO(3);
6386 
6387 	CSR_WRITE(sc, WMREG_CTRL, v);
6388 	CSR_WRITE_FLUSH(sc);
6389 	delay(10);
6390 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6391 	CSR_WRITE_FLUSH(sc);
6392 	delay(10);
6393 	CSR_WRITE(sc, WMREG_CTRL, v);
6394 	CSR_WRITE_FLUSH(sc);
6395 	delay(10);
6396 
6397 	for (i = 0; i < 16; i++) {
6398 		data <<= 1;
6399 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6400 		CSR_WRITE_FLUSH(sc);
6401 		delay(10);
6402 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
6403 			data |= 1;
6404 		CSR_WRITE(sc, WMREG_CTRL, v);
6405 		CSR_WRITE_FLUSH(sc);
6406 		delay(10);
6407 	}
6408 
6409 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6410 	CSR_WRITE_FLUSH(sc);
6411 	delay(10);
6412 	CSR_WRITE(sc, WMREG_CTRL, v);
6413 	CSR_WRITE_FLUSH(sc);
6414 	delay(10);
6415 
6416 	return data;
6417 }
6418 
6419 #undef MDI_IO
6420 #undef MDI_DIR
6421 #undef MDI_CLK
6422 
6423 /*
6424  * wm_gmii_i82543_readreg:	[mii interface function]
6425  *
6426  *	Read a PHY register on the GMII (i82543 version).
6427  */
6428 static int
6429 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
6430 {
6431 	struct wm_softc *sc = device_private(self);
6432 	int rv;
6433 
6434 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
6435 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
6436 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
6437 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
6438 
6439 	DPRINTF(WM_DEBUG_GMII,
6440 	    ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
6441 	    device_xname(sc->sc_dev), phy, reg, rv));
6442 
6443 	return rv;
6444 }
6445 
6446 /*
6447  * wm_gmii_i82543_writereg:	[mii interface function]
6448  *
6449  *	Write a PHY register on the GMII (i82543 version).
6450  */
6451 static void
6452 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
6453 {
6454 	struct wm_softc *sc = device_private(self);
6455 
6456 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
6457 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
6458 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
6459 	    (MII_COMMAND_START << 30), 32);
6460 }
6461 
6462 /*
6463  * wm_gmii_i82544_readreg:	[mii interface function]
6464  *
6465  *	Read a PHY register on the GMII.
6466  */
6467 static int
6468 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
6469 {
6470 	struct wm_softc *sc = device_private(self);
6471 	uint32_t mdic = 0;
6472 	int i, rv;
6473 
6474 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
6475 	    MDIC_REGADD(reg));
6476 
6477 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
6478 		mdic = CSR_READ(sc, WMREG_MDIC);
6479 		if (mdic & MDIC_READY)
6480 			break;
6481 		delay(50);
6482 	}
6483 
6484 	if ((mdic & MDIC_READY) == 0) {
6485 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
6486 		    device_xname(sc->sc_dev), phy, reg);
6487 		rv = 0;
6488 	} else if (mdic & MDIC_E) {
6489 #if 0 /* This is normal if no PHY is present. */
6490 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
6491 		    device_xname(sc->sc_dev), phy, reg);
6492 #endif
6493 		rv = 0;
6494 	} else {
6495 		rv = MDIC_DATA(mdic);
6496 		if (rv == 0xffff)
6497 			rv = 0;
6498 	}
6499 
6500 	return rv;
6501 }
6502 
6503 /*
6504  * wm_gmii_i82544_writereg:	[mii interface function]
6505  *
6506  *	Write a PHY register on the GMII.
6507  */
6508 static void
6509 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
6510 {
6511 	struct wm_softc *sc = device_private(self);
6512 	uint32_t mdic = 0;
6513 	int i;
6514 
6515 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
6516 	    MDIC_REGADD(reg) | MDIC_DATA(val));
6517 
6518 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
6519 		mdic = CSR_READ(sc, WMREG_MDIC);
6520 		if (mdic & MDIC_READY)
6521 			break;
6522 		delay(50);
6523 	}
6524 
6525 	if ((mdic & MDIC_READY) == 0)
6526 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
6527 		    device_xname(sc->sc_dev), phy, reg);
6528 	else if (mdic & MDIC_E)
6529 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
6530 		    device_xname(sc->sc_dev), phy, reg);
6531 }
6532 
6533 /*
6534  * wm_gmii_i80003_readreg:	[mii interface function]
6535  *
6536  *	Read a PHY register on the kumeran
6537  * This could be handled by the PHY layer if we didn't have to lock the
6538  * ressource ...
6539  */
6540 static int
6541 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
6542 {
6543 	struct wm_softc *sc = device_private(self);
6544 	int sem;
6545 	int rv;
6546 
6547 	if (phy != 1) /* only one PHY on kumeran bus */
6548 		return 0;
6549 
6550 	sem = swfwphysem[sc->sc_funcid];
6551 	if (wm_get_swfw_semaphore(sc, sem)) {
6552 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6553 		    __func__);
6554 		return 0;
6555 	}
6556 
6557 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
6558 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6559 		    reg >> GG82563_PAGE_SHIFT);
6560 	} else {
6561 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
6562 		    reg >> GG82563_PAGE_SHIFT);
6563 	}
6564 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
6565 	delay(200);
6566 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
6567 	delay(200);
6568 
6569 	wm_put_swfw_semaphore(sc, sem);
6570 	return rv;
6571 }
6572 
6573 /*
6574  * wm_gmii_i80003_writereg:	[mii interface function]
6575  *
6576  *	Write a PHY register on the kumeran.
6577  * This could be handled by the PHY layer if we didn't have to lock the
6578  * ressource ...
6579  */
6580 static void
6581 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
6582 {
6583 	struct wm_softc *sc = device_private(self);
6584 	int sem;
6585 
6586 	if (phy != 1) /* only one PHY on kumeran bus */
6587 		return;
6588 
6589 	sem = swfwphysem[sc->sc_funcid];
6590 	if (wm_get_swfw_semaphore(sc, sem)) {
6591 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6592 		    __func__);
6593 		return;
6594 	}
6595 
6596 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
6597 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6598 		    reg >> GG82563_PAGE_SHIFT);
6599 	} else {
6600 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
6601 		    reg >> GG82563_PAGE_SHIFT);
6602 	}
6603 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
6604 	delay(200);
6605 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
6606 	delay(200);
6607 
6608 	wm_put_swfw_semaphore(sc, sem);
6609 }
6610 
6611 /*
6612  * wm_gmii_bm_readreg:	[mii interface function]
6613  *
6614  *	Read a PHY register on the kumeran
6615  * This could be handled by the PHY layer if we didn't have to lock the
6616  * ressource ...
6617  */
6618 static int
6619 wm_gmii_bm_readreg(device_t self, int phy, int reg)
6620 {
6621 	struct wm_softc *sc = device_private(self);
6622 	int sem;
6623 	int rv;
6624 
6625 	sem = swfwphysem[sc->sc_funcid];
6626 	if (wm_get_swfw_semaphore(sc, sem)) {
6627 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6628 		    __func__);
6629 		return 0;
6630 	}
6631 
6632 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
6633 		if (phy == 1)
6634 			wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
6635 			    reg);
6636 		else
6637 			wm_gmii_i82544_writereg(self, phy,
6638 			    GG82563_PHY_PAGE_SELECT,
6639 			    reg >> GG82563_PAGE_SHIFT);
6640 	}
6641 
6642 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
6643 	wm_put_swfw_semaphore(sc, sem);
6644 	return rv;
6645 }
6646 
6647 /*
6648  * wm_gmii_bm_writereg:	[mii interface function]
6649  *
6650  *	Write a PHY register on the kumeran.
6651  * This could be handled by the PHY layer if we didn't have to lock the
6652  * ressource ...
6653  */
6654 static void
6655 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
6656 {
6657 	struct wm_softc *sc = device_private(self);
6658 	int sem;
6659 
6660 	sem = swfwphysem[sc->sc_funcid];
6661 	if (wm_get_swfw_semaphore(sc, sem)) {
6662 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6663 		    __func__);
6664 		return;
6665 	}
6666 
6667 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
6668 		if (phy == 1)
6669 			wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
6670 			    reg);
6671 		else
6672 			wm_gmii_i82544_writereg(self, phy,
6673 			    GG82563_PHY_PAGE_SELECT,
6674 			    reg >> GG82563_PAGE_SHIFT);
6675 	}
6676 
6677 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
6678 	wm_put_swfw_semaphore(sc, sem);
6679 }
6680 
6681 static void
6682 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
6683 {
6684 	struct wm_softc *sc = device_private(self);
6685 	uint16_t regnum = BM_PHY_REG_NUM(offset);
6686 	uint16_t wuce;
6687 
6688 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
6689 	if (sc->sc_type == WM_T_PCH) {
6690 		/* XXX e1000 driver do nothing... why? */
6691 	}
6692 
6693 	/* Set page 769 */
6694 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6695 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
6696 
6697 	wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
6698 
6699 	wuce &= ~BM_WUC_HOST_WU_BIT;
6700 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
6701 	    wuce | BM_WUC_ENABLE_BIT);
6702 
6703 	/* Select page 800 */
6704 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6705 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
6706 
6707 	/* Write page 800 */
6708 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
6709 
6710 	if (rd)
6711 		*val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
6712 	else
6713 		wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
6714 
6715 	/* Set page 769 */
6716 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6717 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
6718 
6719 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
6720 }
6721 
6722 /*
6723  * wm_gmii_hv_readreg:	[mii interface function]
6724  *
6725  *	Read a PHY register on the kumeran
6726  * This could be handled by the PHY layer if we didn't have to lock the
6727  * ressource ...
6728  */
6729 static int
6730 wm_gmii_hv_readreg(device_t self, int phy, int reg)
6731 {
6732 	struct wm_softc *sc = device_private(self);
6733 	uint16_t page = BM_PHY_REG_PAGE(reg);
6734 	uint16_t regnum = BM_PHY_REG_NUM(reg);
6735 	uint16_t val;
6736 	int rv;
6737 
6738 	if (wm_get_swfwhw_semaphore(sc)) {
6739 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6740 		    __func__);
6741 		return 0;
6742 	}
6743 
6744 	/* XXX Workaround failure in MDIO access while cable is disconnected */
6745 	if (sc->sc_phytype == WMPHY_82577) {
6746 		/* XXX must write */
6747 	}
6748 
6749 	/* Page 800 works differently than the rest so it has its own func */
6750 	if (page == BM_WUC_PAGE) {
6751 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
6752 		return val;
6753 	}
6754 
6755 	/*
6756 	 * Lower than page 768 works differently than the rest so it has its
6757 	 * own func
6758 	 */
6759 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
6760 		printf("gmii_hv_readreg!!!\n");
6761 		return 0;
6762 	}
6763 
6764 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
6765 		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6766 		    page << BME1000_PAGE_SHIFT);
6767 	}
6768 
6769 	rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
6770 	wm_put_swfwhw_semaphore(sc);
6771 	return rv;
6772 }
6773 
6774 /*
6775  * wm_gmii_hv_writereg:	[mii interface function]
6776  *
6777  *	Write a PHY register on the kumeran.
6778  * This could be handled by the PHY layer if we didn't have to lock the
6779  * ressource ...
6780  */
6781 static void
6782 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
6783 {
6784 	struct wm_softc *sc = device_private(self);
6785 	uint16_t page = BM_PHY_REG_PAGE(reg);
6786 	uint16_t regnum = BM_PHY_REG_NUM(reg);
6787 
6788 	if (wm_get_swfwhw_semaphore(sc)) {
6789 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6790 		    __func__);
6791 		return;
6792 	}
6793 
6794 	/* XXX Workaround failure in MDIO access while cable is disconnected */
6795 
6796 	/* Page 800 works differently than the rest so it has its own func */
6797 	if (page == BM_WUC_PAGE) {
6798 		uint16_t tmp;
6799 
6800 		tmp = val;
6801 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
6802 		return;
6803 	}
6804 
6805 	/*
6806 	 * Lower than page 768 works differently than the rest so it has its
6807 	 * own func
6808 	 */
6809 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
6810 		printf("gmii_hv_writereg!!!\n");
6811 		return;
6812 	}
6813 
6814 	/*
6815 	 * XXX Workaround MDIO accesses being disabled after entering IEEE
6816 	 * Power Down (whenever bit 11 of the PHY control register is set)
6817 	 */
6818 
6819 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
6820 		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6821 		    page << BME1000_PAGE_SHIFT);
6822 	}
6823 
6824 	wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
6825 	wm_put_swfwhw_semaphore(sc);
6826 }
6827 
6828 /*
6829  * wm_gmii_82580_readreg:	[mii interface function]
6830  *
6831  *	Read a PHY register on the 82580 and I350.
6832  * This could be handled by the PHY layer if we didn't have to lock the
6833  * ressource ...
6834  */
6835 static int
6836 wm_gmii_82580_readreg(device_t self, int phy, int reg)
6837 {
6838 	struct wm_softc *sc = device_private(self);
6839 	int sem;
6840 	int rv;
6841 
6842 	sem = swfwphysem[sc->sc_funcid];
6843 	if (wm_get_swfw_semaphore(sc, sem)) {
6844 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6845 		    __func__);
6846 		return 0;
6847 	}
6848 
6849 	rv = wm_gmii_i82544_readreg(self, phy, reg);
6850 
6851 	wm_put_swfw_semaphore(sc, sem);
6852 	return rv;
6853 }
6854 
6855 /*
6856  * wm_gmii_82580_writereg:	[mii interface function]
6857  *
6858  *	Write a PHY register on the 82580 and I350.
6859  * This could be handled by the PHY layer if we didn't have to lock the
6860  * ressource ...
6861  */
6862 static void
6863 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
6864 {
6865 	struct wm_softc *sc = device_private(self);
6866 	int sem;
6867 
6868 	sem = swfwphysem[sc->sc_funcid];
6869 	if (wm_get_swfw_semaphore(sc, sem)) {
6870 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6871 		    __func__);
6872 		return;
6873 	}
6874 
6875 	wm_gmii_i82544_writereg(self, phy, reg, val);
6876 
6877 	wm_put_swfw_semaphore(sc, sem);
6878 }
6879 
6880 /*
6881  * wm_gmii_statchg:	[mii interface function]
6882  *
6883  *	Callback from MII layer when media changes.
6884  */
6885 static void
6886 wm_gmii_statchg(struct ifnet *ifp)
6887 {
6888 	struct wm_softc *sc = ifp->if_softc;
6889 	struct mii_data *mii = &sc->sc_mii;
6890 
6891 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
6892 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
6893 	sc->sc_fcrtl &= ~FCRTL_XONE;
6894 
6895 	/*
6896 	 * Get flow control negotiation result.
6897 	 */
6898 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
6899 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
6900 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
6901 		mii->mii_media_active &= ~IFM_ETH_FMASK;
6902 	}
6903 
6904 	if (sc->sc_flowflags & IFM_FLOW) {
6905 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
6906 			sc->sc_ctrl |= CTRL_TFCE;
6907 			sc->sc_fcrtl |= FCRTL_XONE;
6908 		}
6909 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
6910 			sc->sc_ctrl |= CTRL_RFCE;
6911 	}
6912 
6913 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
6914 		DPRINTF(WM_DEBUG_LINK,
6915 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
6916 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
6917 	} else {
6918 		DPRINTF(WM_DEBUG_LINK,
6919 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
6920 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
6921 	}
6922 
6923 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6924 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
6925 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
6926 						 : WMREG_FCRTL, sc->sc_fcrtl);
6927 	if (sc->sc_type == WM_T_80003) {
6928 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
6929 		case IFM_1000_T:
6930 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
6931 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
6932 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
6933 			break;
6934 		default:
6935 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
6936 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
6937 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
6938 			break;
6939 		}
6940 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
6941 	}
6942 }
6943 
6944 /*
6945  * wm_kmrn_readreg:
6946  *
6947  *	Read a kumeran register
6948  */
6949 static int
6950 wm_kmrn_readreg(struct wm_softc *sc, int reg)
6951 {
6952 	int rv;
6953 
6954 	if (sc->sc_flags == WM_F_LOCK_SWFW) {
6955 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
6956 			aprint_error_dev(sc->sc_dev,
6957 			    "%s: failed to get semaphore\n", __func__);
6958 			return 0;
6959 		}
6960 	} else if (sc->sc_flags == WM_F_LOCK_EXTCNF) {
6961 		if (wm_get_swfwhw_semaphore(sc)) {
6962 			aprint_error_dev(sc->sc_dev,
6963 			    "%s: failed to get semaphore\n", __func__);
6964 			return 0;
6965 		}
6966 	}
6967 
6968 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
6969 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
6970 	    KUMCTRLSTA_REN);
6971 	CSR_WRITE_FLUSH(sc);
6972 	delay(2);
6973 
6974 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
6975 
6976 	if (sc->sc_flags == WM_F_LOCK_SWFW)
6977 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
6978 	else if (sc->sc_flags == WM_F_LOCK_EXTCNF)
6979 		wm_put_swfwhw_semaphore(sc);
6980 
6981 	return rv;
6982 }
6983 
6984 /*
6985  * wm_kmrn_writereg:
6986  *
6987  *	Write a kumeran register
6988  */
6989 static void
6990 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
6991 {
6992 
6993 	if (sc->sc_flags == WM_F_LOCK_SWFW) {
6994 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
6995 			aprint_error_dev(sc->sc_dev,
6996 			    "%s: failed to get semaphore\n", __func__);
6997 			return;
6998 		}
6999 	} else if (sc->sc_flags == WM_F_LOCK_EXTCNF) {
7000 		if (wm_get_swfwhw_semaphore(sc)) {
7001 			aprint_error_dev(sc->sc_dev,
7002 			    "%s: failed to get semaphore\n", __func__);
7003 			return;
7004 		}
7005 	}
7006 
7007 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
7008 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
7009 	    (val & KUMCTRLSTA_MASK));
7010 
7011 	if (sc->sc_flags == WM_F_LOCK_SWFW)
7012 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
7013 	else if (sc->sc_flags == WM_F_LOCK_EXTCNF)
7014 		wm_put_swfwhw_semaphore(sc);
7015 }
7016 
7017 /* SGMII related */
7018 
7019 /*
7020  * wm_sgmii_uses_mdio
7021  *
7022  * Check whether the transaction is to the internal PHY or the external
7023  * MDIO interface. Return true if it's MDIO.
7024  */
7025 static bool
7026 wm_sgmii_uses_mdio(struct wm_softc *sc)
7027 {
7028 	uint32_t reg;
7029 	bool ismdio = false;
7030 
7031 	switch (sc->sc_type) {
7032 	case WM_T_82575:
7033 	case WM_T_82576:
7034 		reg = CSR_READ(sc, WMREG_MDIC);
7035 		ismdio = ((reg & MDIC_DEST) != 0);
7036 		break;
7037 	case WM_T_82580:
7038 	case WM_T_82580ER:
7039 	case WM_T_I350:
7040 	case WM_T_I354:
7041 	case WM_T_I210:
7042 	case WM_T_I211:
7043 		reg = CSR_READ(sc, WMREG_MDICNFG);
7044 		ismdio = ((reg & MDICNFG_DEST) != 0);
7045 		break;
7046 	default:
7047 		break;
7048 	}
7049 
7050 	return ismdio;
7051 }
7052 
7053 /*
7054  * wm_sgmii_readreg:	[mii interface function]
7055  *
7056  *	Read a PHY register on the SGMII
7057  * This could be handled by the PHY layer if we didn't have to lock the
7058  * ressource ...
7059  */
7060 static int
7061 wm_sgmii_readreg(device_t self, int phy, int reg)
7062 {
7063 	struct wm_softc *sc = device_private(self);
7064 	uint32_t i2ccmd;
7065 	int i, rv;
7066 
7067 	if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
7068 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7069 		    __func__);
7070 		return 0;
7071 	}
7072 
7073 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
7074 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
7075 	    | I2CCMD_OPCODE_READ;
7076 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
7077 
7078 	/* Poll the ready bit */
7079 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
7080 		delay(50);
7081 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
7082 		if (i2ccmd & I2CCMD_READY)
7083 			break;
7084 	}
7085 	if ((i2ccmd & I2CCMD_READY) == 0)
7086 		aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
7087 	if ((i2ccmd & I2CCMD_ERROR) != 0)
7088 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
7089 
7090 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
7091 
7092 	wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
7093 	return rv;
7094 }
7095 
7096 /*
7097  * wm_sgmii_writereg:	[mii interface function]
7098  *
7099  *	Write a PHY register on the SGMII.
7100  * This could be handled by the PHY layer if we didn't have to lock the
7101  * ressource ...
7102  */
7103 static void
7104 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
7105 {
7106 	struct wm_softc *sc = device_private(self);
7107 	uint32_t i2ccmd;
7108 	int i;
7109 
7110 	if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
7111 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7112 		    __func__);
7113 		return;
7114 	}
7115 
7116 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
7117 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
7118 	    | I2CCMD_OPCODE_WRITE;
7119 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
7120 
7121 	/* Poll the ready bit */
7122 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
7123 		delay(50);
7124 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
7125 		if (i2ccmd & I2CCMD_READY)
7126 			break;
7127 	}
7128 	if ((i2ccmd & I2CCMD_READY) == 0)
7129 		aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
7130 	if ((i2ccmd & I2CCMD_ERROR) != 0)
7131 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
7132 
7133 	wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
7134 }
7135 
7136 /* TBI related */
7137 
7138 /* XXX Currently TBI only */
7139 static int
7140 wm_check_for_link(struct wm_softc *sc)
7141 {
7142 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7143 	uint32_t rxcw;
7144 	uint32_t ctrl;
7145 	uint32_t status;
7146 	uint32_t sig;
7147 
7148 	if (sc->sc_wmp->wmp_flags & WMP_F_SERDES) {
7149 		sc->sc_tbi_linkup = 1;
7150 		return 0;
7151 	}
7152 
7153 	rxcw = CSR_READ(sc, WMREG_RXCW);
7154 	ctrl = CSR_READ(sc, WMREG_CTRL);
7155 	status = CSR_READ(sc, WMREG_STATUS);
7156 
7157 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
7158 
7159 	DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
7160 		device_xname(sc->sc_dev), __func__,
7161 		((ctrl & CTRL_SWDPIN(1)) == sig),
7162 		((status & STATUS_LU) != 0),
7163 		((rxcw & RXCW_C) != 0)
7164 		    ));
7165 
7166 	/*
7167 	 * SWDPIN   LU RXCW
7168 	 *      0    0    0
7169 	 *      0    0    1	(should not happen)
7170 	 *      0    1    0	(should not happen)
7171 	 *      0    1    1	(should not happen)
7172 	 *      1    0    0	Disable autonego and force linkup
7173 	 *      1    0    1	got /C/ but not linkup yet
7174 	 *      1    1    0	(linkup)
7175 	 *      1    1    1	If IFM_AUTO, back to autonego
7176 	 *
7177 	 */
7178 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
7179 	    && ((status & STATUS_LU) == 0)
7180 	    && ((rxcw & RXCW_C) == 0)) {
7181 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
7182 			__func__));
7183 		sc->sc_tbi_linkup = 0;
7184 		/* Disable auto-negotiation in the TXCW register */
7185 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
7186 
7187 		/*
7188 		 * Force link-up and also force full-duplex.
7189 		 *
7190 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
7191 		 * so we should update sc->sc_ctrl
7192 		 */
7193 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
7194 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7195 	} else if (((status & STATUS_LU) != 0)
7196 	    && ((rxcw & RXCW_C) != 0)
7197 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
7198 		sc->sc_tbi_linkup = 1;
7199 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
7200 			__func__));
7201 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
7202 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
7203 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
7204 	    && ((rxcw & RXCW_C) != 0)) {
7205 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
7206 	} else {
7207 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
7208 			status));
7209 	}
7210 
7211 	return 0;
7212 }
7213 
7214 /*
7215  * wm_tbi_mediainit:
7216  *
7217  *	Initialize media for use on 1000BASE-X devices.
7218  */
7219 static void
7220 wm_tbi_mediainit(struct wm_softc *sc)
7221 {
7222 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7223 	const char *sep = "";
7224 
7225 	if (sc->sc_type < WM_T_82543)
7226 		sc->sc_tipg = TIPG_WM_DFLT;
7227 	else
7228 		sc->sc_tipg = TIPG_LG_DFLT;
7229 
7230 	sc->sc_tbi_anegticks = 5;
7231 
7232 	/* Initialize our media structures */
7233 	sc->sc_mii.mii_ifp = ifp;
7234 
7235 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
7236 	ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
7237 	    wm_tbi_mediastatus);
7238 
7239 	/*
7240 	 * SWD Pins:
7241 	 *
7242 	 *	0 = Link LED (output)
7243 	 *	1 = Loss Of Signal (input)
7244 	 */
7245 	sc->sc_ctrl |= CTRL_SWDPIO(0);
7246 	sc->sc_ctrl &= ~CTRL_SWDPIO(1);
7247 	if (sc->sc_wmp->wmp_flags & WMP_F_SERDES)
7248 		sc->sc_ctrl &= ~CTRL_LRST;
7249 
7250 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7251 
7252 #define	ADD(ss, mm, dd)							\
7253 do {									\
7254 	aprint_normal("%s%s", sep, ss);					\
7255 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL);	\
7256 	sep = ", ";							\
7257 } while (/*CONSTCOND*/0)
7258 
7259 	aprint_normal_dev(sc->sc_dev, "");
7260 
7261 	/* Only 82545 is LX */
7262 	if (sc->sc_type == WM_T_82545) {
7263 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
7264 		ADD("1000baseLX-FDX", IFM_1000_LX|IFM_FDX, ANAR_X_FD);
7265 	} else {
7266 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
7267 		ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
7268 	}
7269 	ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
7270 	aprint_normal("\n");
7271 
7272 #undef ADD
7273 
7274 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
7275 }
7276 
7277 /*
7278  * wm_tbi_mediastatus:	[ifmedia interface function]
7279  *
7280  *	Get the current interface media status on a 1000BASE-X device.
7281  */
7282 static void
7283 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
7284 {
7285 	struct wm_softc *sc = ifp->if_softc;
7286 	uint32_t ctrl, status;
7287 
7288 	ifmr->ifm_status = IFM_AVALID;
7289 	ifmr->ifm_active = IFM_ETHER;
7290 
7291 	status = CSR_READ(sc, WMREG_STATUS);
7292 	if ((status & STATUS_LU) == 0) {
7293 		ifmr->ifm_active |= IFM_NONE;
7294 		return;
7295 	}
7296 
7297 	ifmr->ifm_status |= IFM_ACTIVE;
7298 	/* Only 82545 is LX */
7299 	if (sc->sc_type == WM_T_82545)
7300 		ifmr->ifm_active |= IFM_1000_LX;
7301 	else
7302 		ifmr->ifm_active |= IFM_1000_SX;
7303 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
7304 		ifmr->ifm_active |= IFM_FDX;
7305 	else
7306 		ifmr->ifm_active |= IFM_HDX;
7307 	ctrl = CSR_READ(sc, WMREG_CTRL);
7308 	if (ctrl & CTRL_RFCE)
7309 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
7310 	if (ctrl & CTRL_TFCE)
7311 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
7312 }
7313 
7314 /*
7315  * wm_tbi_mediachange:	[ifmedia interface function]
7316  *
7317  *	Set hardware to newly-selected media on a 1000BASE-X device.
7318  */
7319 static int
7320 wm_tbi_mediachange(struct ifnet *ifp)
7321 {
7322 	struct wm_softc *sc = ifp->if_softc;
7323 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7324 	uint32_t status;
7325 	int i;
7326 
7327 	if (sc->sc_wmp->wmp_flags & WMP_F_SERDES)
7328 		return 0;
7329 
7330 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
7331 	    || (sc->sc_type >= WM_T_82575))
7332 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
7333 
7334 	/* XXX power_up_serdes_link_82575() */
7335 
7336 	sc->sc_ctrl &= ~CTRL_LRST;
7337 	sc->sc_txcw = TXCW_ANE;
7338 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
7339 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
7340 	else if (ife->ifm_media & IFM_FDX)
7341 		sc->sc_txcw |= TXCW_FD;
7342 	else
7343 		sc->sc_txcw |= TXCW_HD;
7344 
7345 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
7346 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
7347 
7348 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
7349 		    device_xname(sc->sc_dev), sc->sc_txcw));
7350 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
7351 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7352 	CSR_WRITE_FLUSH(sc);
7353 	delay(1000);
7354 
7355 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
7356 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
7357 
7358 	/*
7359 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
7360 	 * optics detect a signal, 0 if they don't.
7361 	 */
7362 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
7363 		/* Have signal; wait for the link to come up. */
7364 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
7365 			delay(10000);
7366 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
7367 				break;
7368 		}
7369 
7370 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
7371 			    device_xname(sc->sc_dev),i));
7372 
7373 		status = CSR_READ(sc, WMREG_STATUS);
7374 		DPRINTF(WM_DEBUG_LINK,
7375 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
7376 			device_xname(sc->sc_dev),status, STATUS_LU));
7377 		if (status & STATUS_LU) {
7378 			/* Link is up. */
7379 			DPRINTF(WM_DEBUG_LINK,
7380 			    ("%s: LINK: set media -> link up %s\n",
7381 			    device_xname(sc->sc_dev),
7382 			    (status & STATUS_FD) ? "FDX" : "HDX"));
7383 
7384 			/*
7385 			 * NOTE: CTRL will update TFCE and RFCE automatically,
7386 			 * so we should update sc->sc_ctrl
7387 			 */
7388 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
7389 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
7390 			sc->sc_fcrtl &= ~FCRTL_XONE;
7391 			if (status & STATUS_FD)
7392 				sc->sc_tctl |=
7393 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
7394 			else
7395 				sc->sc_tctl |=
7396 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
7397 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
7398 				sc->sc_fcrtl |= FCRTL_XONE;
7399 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
7400 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
7401 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
7402 				      sc->sc_fcrtl);
7403 			sc->sc_tbi_linkup = 1;
7404 		} else {
7405 			if (i == WM_LINKUP_TIMEOUT)
7406 				wm_check_for_link(sc);
7407 			/* Link is down. */
7408 			DPRINTF(WM_DEBUG_LINK,
7409 			    ("%s: LINK: set media -> link down\n",
7410 			    device_xname(sc->sc_dev)));
7411 			sc->sc_tbi_linkup = 0;
7412 		}
7413 	} else {
7414 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
7415 		    device_xname(sc->sc_dev)));
7416 		sc->sc_tbi_linkup = 0;
7417 	}
7418 
7419 	wm_tbi_set_linkled(sc);
7420 
7421 	return 0;
7422 }
7423 
7424 /*
7425  * wm_tbi_set_linkled:
7426  *
7427  *	Update the link LED on 1000BASE-X devices.
7428  */
7429 static void
7430 wm_tbi_set_linkled(struct wm_softc *sc)
7431 {
7432 
7433 	if (sc->sc_tbi_linkup)
7434 		sc->sc_ctrl |= CTRL_SWDPIN(0);
7435 	else
7436 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
7437 
7438 	/* 82540 or newer devices are active low */
7439 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
7440 
7441 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7442 }
7443 
7444 /*
7445  * wm_tbi_check_link:
7446  *
7447  *	Check the link on 1000BASE-X devices.
7448  */
7449 static void
7450 wm_tbi_check_link(struct wm_softc *sc)
7451 {
7452 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7453 	uint32_t status;
7454 
7455 	KASSERT(WM_TX_LOCKED(sc));
7456 
7457 	if (sc->sc_wmp->wmp_flags & WMP_F_SERDES) {
7458 		sc->sc_tbi_linkup = 1;
7459 		return;
7460 	}
7461 
7462 	status = CSR_READ(sc, WMREG_STATUS);
7463 
7464 	/* XXX is this needed? */
7465 	(void)CSR_READ(sc, WMREG_RXCW);
7466 	(void)CSR_READ(sc, WMREG_CTRL);
7467 
7468 	/* set link status */
7469 	if ((status & STATUS_LU) == 0) {
7470 		DPRINTF(WM_DEBUG_LINK,
7471 		    ("%s: LINK: checklink -> down\n",
7472 			device_xname(sc->sc_dev)));
7473 		sc->sc_tbi_linkup = 0;
7474 	} else if (sc->sc_tbi_linkup == 0) {
7475 		DPRINTF(WM_DEBUG_LINK,
7476 		    ("%s: LINK: checklink -> up %s\n",
7477 			device_xname(sc->sc_dev),
7478 			(status & STATUS_FD) ? "FDX" : "HDX"));
7479 		sc->sc_tbi_linkup = 1;
7480 	}
7481 
7482 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP)
7483 	    && ((status & STATUS_LU) == 0)) {
7484 		sc->sc_tbi_linkup = 0;
7485 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
7486 			/* If the timer expired, retry autonegotiation */
7487 			if (++sc->sc_tbi_ticks >= sc->sc_tbi_anegticks) {
7488 				DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
7489 				sc->sc_tbi_ticks = 0;
7490 				/*
7491 				 * Reset the link, and let autonegotiation do
7492 				 * its thing
7493 				 */
7494 				sc->sc_ctrl |= CTRL_LRST;
7495 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7496 				CSR_WRITE_FLUSH(sc);
7497 				delay(1000);
7498 				sc->sc_ctrl &= ~CTRL_LRST;
7499 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7500 				CSR_WRITE_FLUSH(sc);
7501 				delay(1000);
7502 				CSR_WRITE(sc, WMREG_TXCW,
7503 				    sc->sc_txcw & ~TXCW_ANE);
7504 				CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
7505 			}
7506 		}
7507 	}
7508 
7509 	wm_tbi_set_linkled(sc);
7510 }
7511 
7512 /*
7513  * NVM related.
7514  * Microwire, SPI (w/wo EERD) and Flash.
7515  */
7516 
7517 /* Both spi and uwire */
7518 
7519 /*
7520  * wm_eeprom_sendbits:
7521  *
7522  *	Send a series of bits to the EEPROM.
7523  */
7524 static void
7525 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
7526 {
7527 	uint32_t reg;
7528 	int x;
7529 
7530 	reg = CSR_READ(sc, WMREG_EECD);
7531 
7532 	for (x = nbits; x > 0; x--) {
7533 		if (bits & (1U << (x - 1)))
7534 			reg |= EECD_DI;
7535 		else
7536 			reg &= ~EECD_DI;
7537 		CSR_WRITE(sc, WMREG_EECD, reg);
7538 		CSR_WRITE_FLUSH(sc);
7539 		delay(2);
7540 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
7541 		CSR_WRITE_FLUSH(sc);
7542 		delay(2);
7543 		CSR_WRITE(sc, WMREG_EECD, reg);
7544 		CSR_WRITE_FLUSH(sc);
7545 		delay(2);
7546 	}
7547 }
7548 
7549 /*
7550  * wm_eeprom_recvbits:
7551  *
7552  *	Receive a series of bits from the EEPROM.
7553  */
7554 static void
7555 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
7556 {
7557 	uint32_t reg, val;
7558 	int x;
7559 
7560 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
7561 
7562 	val = 0;
7563 	for (x = nbits; x > 0; x--) {
7564 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
7565 		CSR_WRITE_FLUSH(sc);
7566 		delay(2);
7567 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
7568 			val |= (1U << (x - 1));
7569 		CSR_WRITE(sc, WMREG_EECD, reg);
7570 		CSR_WRITE_FLUSH(sc);
7571 		delay(2);
7572 	}
7573 	*valp = val;
7574 }
7575 
7576 /* Microwire */
7577 
7578 /*
7579  * wm_nvm_read_uwire:
7580  *
7581  *	Read a word from the EEPROM using the MicroWire protocol.
7582  */
7583 static int
7584 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
7585 {
7586 	uint32_t reg, val;
7587 	int i;
7588 
7589 	for (i = 0; i < wordcnt; i++) {
7590 		/* Clear SK and DI. */
7591 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
7592 		CSR_WRITE(sc, WMREG_EECD, reg);
7593 
7594 		/*
7595 		 * XXX: workaround for a bug in qemu-0.12.x and prior
7596 		 * and Xen.
7597 		 *
7598 		 * We use this workaround only for 82540 because qemu's
7599 		 * e1000 act as 82540.
7600 		 */
7601 		if (sc->sc_type == WM_T_82540) {
7602 			reg |= EECD_SK;
7603 			CSR_WRITE(sc, WMREG_EECD, reg);
7604 			reg &= ~EECD_SK;
7605 			CSR_WRITE(sc, WMREG_EECD, reg);
7606 			CSR_WRITE_FLUSH(sc);
7607 			delay(2);
7608 		}
7609 		/* XXX: end of workaround */
7610 
7611 		/* Set CHIP SELECT. */
7612 		reg |= EECD_CS;
7613 		CSR_WRITE(sc, WMREG_EECD, reg);
7614 		CSR_WRITE_FLUSH(sc);
7615 		delay(2);
7616 
7617 		/* Shift in the READ command. */
7618 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
7619 
7620 		/* Shift in address. */
7621 		wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits);
7622 
7623 		/* Shift out the data. */
7624 		wm_eeprom_recvbits(sc, &val, 16);
7625 		data[i] = val & 0xffff;
7626 
7627 		/* Clear CHIP SELECT. */
7628 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
7629 		CSR_WRITE(sc, WMREG_EECD, reg);
7630 		CSR_WRITE_FLUSH(sc);
7631 		delay(2);
7632 	}
7633 
7634 	return 0;
7635 }
7636 
7637 /* SPI */
7638 
7639 /* Set SPI related information */
7640 static void
7641 wm_set_spiaddrbits(struct wm_softc *sc)
7642 {
7643 	uint32_t reg;
7644 
7645 	sc->sc_flags |= WM_F_EEPROM_SPI;
7646 	reg = CSR_READ(sc, WMREG_EECD);
7647 	sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
7648 }
7649 
7650 /*
7651  * wm_nvm_ready_spi:
7652  *
7653  *	Wait for a SPI EEPROM to be ready for commands.
7654  */
7655 static int
7656 wm_nvm_ready_spi(struct wm_softc *sc)
7657 {
7658 	uint32_t val;
7659 	int usec;
7660 
7661 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
7662 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
7663 		wm_eeprom_recvbits(sc, &val, 8);
7664 		if ((val & SPI_SR_RDY) == 0)
7665 			break;
7666 	}
7667 	if (usec >= SPI_MAX_RETRIES) {
7668 		aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
7669 		return 1;
7670 	}
7671 	return 0;
7672 }
7673 
7674 /*
7675  * wm_nvm_read_spi:
7676  *
7677  *	Read a work from the EEPROM using the SPI protocol.
7678  */
7679 static int
7680 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
7681 {
7682 	uint32_t reg, val;
7683 	int i;
7684 	uint8_t opc;
7685 
7686 	/* Clear SK and CS. */
7687 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
7688 	CSR_WRITE(sc, WMREG_EECD, reg);
7689 	CSR_WRITE_FLUSH(sc);
7690 	delay(2);
7691 
7692 	if (wm_nvm_ready_spi(sc))
7693 		return 1;
7694 
7695 	/* Toggle CS to flush commands. */
7696 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
7697 	CSR_WRITE_FLUSH(sc);
7698 	delay(2);
7699 	CSR_WRITE(sc, WMREG_EECD, reg);
7700 	CSR_WRITE_FLUSH(sc);
7701 	delay(2);
7702 
7703 	opc = SPI_OPC_READ;
7704 	if (sc->sc_ee_addrbits == 8 && word >= 128)
7705 		opc |= SPI_OPC_A8;
7706 
7707 	wm_eeprom_sendbits(sc, opc, 8);
7708 	wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits);
7709 
7710 	for (i = 0; i < wordcnt; i++) {
7711 		wm_eeprom_recvbits(sc, &val, 16);
7712 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
7713 	}
7714 
7715 	/* Raise CS and clear SK. */
7716 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
7717 	CSR_WRITE(sc, WMREG_EECD, reg);
7718 	CSR_WRITE_FLUSH(sc);
7719 	delay(2);
7720 
7721 	return 0;
7722 }
7723 
7724 /* Using with EERD */
7725 
7726 static int
7727 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
7728 {
7729 	uint32_t attempts = 100000;
7730 	uint32_t i, reg = 0;
7731 	int32_t done = -1;
7732 
7733 	for (i = 0; i < attempts; i++) {
7734 		reg = CSR_READ(sc, rw);
7735 
7736 		if (reg & EERD_DONE) {
7737 			done = 0;
7738 			break;
7739 		}
7740 		delay(5);
7741 	}
7742 
7743 	return done;
7744 }
7745 
7746 static int
7747 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
7748     uint16_t *data)
7749 {
7750 	int i, eerd = 0;
7751 	int error = 0;
7752 
7753 	for (i = 0; i < wordcnt; i++) {
7754 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
7755 
7756 		CSR_WRITE(sc, WMREG_EERD, eerd);
7757 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
7758 		if (error != 0)
7759 			break;
7760 
7761 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
7762 	}
7763 
7764 	return error;
7765 }
7766 
7767 /* Flash */
7768 
7769 static int
7770 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
7771 {
7772 	uint32_t eecd;
7773 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
7774 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
7775 	uint8_t sig_byte = 0;
7776 
7777 	switch (sc->sc_type) {
7778 	case WM_T_ICH8:
7779 	case WM_T_ICH9:
7780 		eecd = CSR_READ(sc, WMREG_EECD);
7781 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
7782 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
7783 			return 0;
7784 		}
7785 		/* FALLTHROUGH */
7786 	default:
7787 		/* Default to 0 */
7788 		*bank = 0;
7789 
7790 		/* Check bank 0 */
7791 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
7792 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
7793 			*bank = 0;
7794 			return 0;
7795 		}
7796 
7797 		/* Check bank 1 */
7798 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
7799 		    &sig_byte);
7800 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
7801 			*bank = 1;
7802 			return 0;
7803 		}
7804 	}
7805 
7806 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
7807 		device_xname(sc->sc_dev)));
7808 	return -1;
7809 }
7810 
7811 /******************************************************************************
7812  * This function does initial flash setup so that a new read/write/erase cycle
7813  * can be started.
7814  *
7815  * sc - The pointer to the hw structure
7816  ****************************************************************************/
7817 static int32_t
7818 wm_ich8_cycle_init(struct wm_softc *sc)
7819 {
7820 	uint16_t hsfsts;
7821 	int32_t error = 1;
7822 	int32_t i     = 0;
7823 
7824 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
7825 
7826 	/* May be check the Flash Des Valid bit in Hw status */
7827 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
7828 		return error;
7829 	}
7830 
7831 	/* Clear FCERR in Hw status by writing 1 */
7832 	/* Clear DAEL in Hw status by writing a 1 */
7833 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
7834 
7835 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
7836 
7837 	/*
7838 	 * Either we should have a hardware SPI cycle in progress bit to check
7839 	 * against, in order to start a new cycle or FDONE bit should be
7840 	 * changed in the hardware so that it is 1 after harware reset, which
7841 	 * can then be used as an indication whether a cycle is in progress or
7842 	 * has been completed .. we should also have some software semaphore
7843 	 * mechanism to guard FDONE or the cycle in progress bit so that two
7844 	 * threads access to those bits can be sequentiallized or a way so that
7845 	 * 2 threads dont start the cycle at the same time
7846 	 */
7847 
7848 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
7849 		/*
7850 		 * There is no cycle running at present, so we can start a
7851 		 * cycle
7852 		 */
7853 
7854 		/* Begin by setting Flash Cycle Done. */
7855 		hsfsts |= HSFSTS_DONE;
7856 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
7857 		error = 0;
7858 	} else {
7859 		/*
7860 		 * otherwise poll for sometime so the current cycle has a
7861 		 * chance to end before giving up.
7862 		 */
7863 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
7864 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
7865 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
7866 				error = 0;
7867 				break;
7868 			}
7869 			delay(1);
7870 		}
7871 		if (error == 0) {
7872 			/*
7873 			 * Successful in waiting for previous cycle to timeout,
7874 			 * now set the Flash Cycle Done.
7875 			 */
7876 			hsfsts |= HSFSTS_DONE;
7877 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
7878 		}
7879 	}
7880 	return error;
7881 }
7882 
7883 /******************************************************************************
7884  * This function starts a flash cycle and waits for its completion
7885  *
7886  * sc - The pointer to the hw structure
7887  ****************************************************************************/
7888 static int32_t
7889 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
7890 {
7891 	uint16_t hsflctl;
7892 	uint16_t hsfsts;
7893 	int32_t error = 1;
7894 	uint32_t i = 0;
7895 
7896 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
7897 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
7898 	hsflctl |= HSFCTL_GO;
7899 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
7900 
7901 	/* Wait till FDONE bit is set to 1 */
7902 	do {
7903 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
7904 		if (hsfsts & HSFSTS_DONE)
7905 			break;
7906 		delay(1);
7907 		i++;
7908 	} while (i < timeout);
7909 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
7910 		error = 0;
7911 
7912 	return error;
7913 }
7914 
7915 /******************************************************************************
7916  * Reads a byte or word from the NVM using the ICH8 flash access registers.
7917  *
7918  * sc - The pointer to the hw structure
7919  * index - The index of the byte or word to read.
7920  * size - Size of data to read, 1=byte 2=word
7921  * data - Pointer to the word to store the value read.
7922  *****************************************************************************/
7923 static int32_t
7924 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
7925     uint32_t size, uint16_t *data)
7926 {
7927 	uint16_t hsfsts;
7928 	uint16_t hsflctl;
7929 	uint32_t flash_linear_address;
7930 	uint32_t flash_data = 0;
7931 	int32_t error = 1;
7932 	int32_t count = 0;
7933 
7934 	if (size < 1  || size > 2 || data == 0x0 ||
7935 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
7936 		return error;
7937 
7938 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
7939 	    sc->sc_ich8_flash_base;
7940 
7941 	do {
7942 		delay(1);
7943 		/* Steps */
7944 		error = wm_ich8_cycle_init(sc);
7945 		if (error)
7946 			break;
7947 
7948 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
7949 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
7950 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
7951 		    & HSFCTL_BCOUNT_MASK;
7952 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
7953 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
7954 
7955 		/*
7956 		 * Write the last 24 bits of index into Flash Linear address
7957 		 * field in Flash Address
7958 		 */
7959 		/* TODO: TBD maybe check the index against the size of flash */
7960 
7961 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
7962 
7963 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
7964 
7965 		/*
7966 		 * Check if FCERR is set to 1, if set to 1, clear it and try
7967 		 * the whole sequence a few more times, else read in (shift in)
7968 		 * the Flash Data0, the order is least significant byte first
7969 		 * msb to lsb
7970 		 */
7971 		if (error == 0) {
7972 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
7973 			if (size == 1)
7974 				*data = (uint8_t)(flash_data & 0x000000FF);
7975 			else if (size == 2)
7976 				*data = (uint16_t)(flash_data & 0x0000FFFF);
7977 			break;
7978 		} else {
7979 			/*
7980 			 * If we've gotten here, then things are probably
7981 			 * completely hosed, but if the error condition is
7982 			 * detected, it won't hurt to give it another try...
7983 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
7984 			 */
7985 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
7986 			if (hsfsts & HSFSTS_ERR) {
7987 				/* Repeat for some time before giving up. */
7988 				continue;
7989 			} else if ((hsfsts & HSFSTS_DONE) == 0)
7990 				break;
7991 		}
7992 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
7993 
7994 	return error;
7995 }
7996 
7997 /******************************************************************************
7998  * Reads a single byte from the NVM using the ICH8 flash access registers.
7999  *
8000  * sc - pointer to wm_hw structure
8001  * index - The index of the byte to read.
8002  * data - Pointer to a byte to store the value read.
8003  *****************************************************************************/
8004 static int32_t
8005 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
8006 {
8007 	int32_t status;
8008 	uint16_t word = 0;
8009 
8010 	status = wm_read_ich8_data(sc, index, 1, &word);
8011 	if (status == 0)
8012 		*data = (uint8_t)word;
8013 	else
8014 		*data = 0;
8015 
8016 	return status;
8017 }
8018 
8019 /******************************************************************************
8020  * Reads a word from the NVM using the ICH8 flash access registers.
8021  *
8022  * sc - pointer to wm_hw structure
8023  * index - The starting byte index of the word to read.
8024  * data - Pointer to a word to store the value read.
8025  *****************************************************************************/
8026 static int32_t
8027 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
8028 {
8029 	int32_t status;
8030 
8031 	status = wm_read_ich8_data(sc, index, 2, data);
8032 	return status;
8033 }
8034 
8035 /******************************************************************************
8036  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
8037  * register.
8038  *
8039  * sc - Struct containing variables accessed by shared code
8040  * offset - offset of word in the EEPROM to read
8041  * data - word read from the EEPROM
8042  * words - number of words to read
8043  *****************************************************************************/
8044 static int
8045 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
8046 {
8047 	int32_t  error = 0;
8048 	uint32_t flash_bank = 0;
8049 	uint32_t act_offset = 0;
8050 	uint32_t bank_offset = 0;
8051 	uint16_t word = 0;
8052 	uint16_t i = 0;
8053 
8054 	/*
8055 	 * We need to know which is the valid flash bank.  In the event
8056 	 * that we didn't allocate eeprom_shadow_ram, we may not be
8057 	 * managing flash_bank.  So it cannot be trusted and needs
8058 	 * to be updated with each read.
8059 	 */
8060 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
8061 	if (error) {
8062 		aprint_error_dev(sc->sc_dev, "%s: failed to detect NVM bank\n",
8063 		    __func__);
8064 		flash_bank = 0;
8065 	}
8066 
8067 	/*
8068 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
8069 	 * size
8070 	 */
8071 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
8072 
8073 	error = wm_get_swfwhw_semaphore(sc);
8074 	if (error) {
8075 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8076 		    __func__);
8077 		return error;
8078 	}
8079 
8080 	for (i = 0; i < words; i++) {
8081 		/* The NVM part needs a byte offset, hence * 2 */
8082 		act_offset = bank_offset + ((offset + i) * 2);
8083 		error = wm_read_ich8_word(sc, act_offset, &word);
8084 		if (error) {
8085 			aprint_error_dev(sc->sc_dev,
8086 			    "%s: failed to read NVM\n", __func__);
8087 			break;
8088 		}
8089 		data[i] = word;
8090 	}
8091 
8092 	wm_put_swfwhw_semaphore(sc);
8093 	return error;
8094 }
8095 
8096 /* Lock, detecting NVM type, validate checksum and read */
8097 
8098 /*
8099  * wm_nvm_acquire:
8100  *
8101  *	Perform the EEPROM handshake required on some chips.
8102  */
8103 static int
8104 wm_nvm_acquire(struct wm_softc *sc)
8105 {
8106 	uint32_t reg;
8107 	int x;
8108 	int ret = 0;
8109 
8110 	/* always success */
8111 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
8112 		return 0;
8113 
8114 	if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
8115 		ret = wm_get_swfwhw_semaphore(sc);
8116 	} else if (sc->sc_flags & WM_F_LOCK_SWFW) {
8117 		/* This will also do wm_get_swsm_semaphore() if needed */
8118 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
8119 	} else if (sc->sc_flags & WM_F_LOCK_SWSM) {
8120 		ret = wm_get_swsm_semaphore(sc);
8121 	}
8122 
8123 	if (ret) {
8124 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8125 			__func__);
8126 		return 1;
8127 	}
8128 
8129 	if (sc->sc_flags & WM_F_LOCK_EECD) {
8130 		reg = CSR_READ(sc, WMREG_EECD);
8131 
8132 		/* Request EEPROM access. */
8133 		reg |= EECD_EE_REQ;
8134 		CSR_WRITE(sc, WMREG_EECD, reg);
8135 
8136 		/* ..and wait for it to be granted. */
8137 		for (x = 0; x < 1000; x++) {
8138 			reg = CSR_READ(sc, WMREG_EECD);
8139 			if (reg & EECD_EE_GNT)
8140 				break;
8141 			delay(5);
8142 		}
8143 		if ((reg & EECD_EE_GNT) == 0) {
8144 			aprint_error_dev(sc->sc_dev,
8145 			    "could not acquire EEPROM GNT\n");
8146 			reg &= ~EECD_EE_REQ;
8147 			CSR_WRITE(sc, WMREG_EECD, reg);
8148 			if (sc->sc_flags & WM_F_LOCK_EXTCNF)
8149 				wm_put_swfwhw_semaphore(sc);
8150 			if (sc->sc_flags & WM_F_LOCK_SWFW)
8151 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
8152 			else if (sc->sc_flags & WM_F_LOCK_SWSM)
8153 				wm_put_swsm_semaphore(sc);
8154 			return 1;
8155 		}
8156 	}
8157 
8158 	return 0;
8159 }
8160 
8161 /*
8162  * wm_nvm_release:
8163  *
8164  *	Release the EEPROM mutex.
8165  */
8166 static void
8167 wm_nvm_release(struct wm_softc *sc)
8168 {
8169 	uint32_t reg;
8170 
8171 	/* always success */
8172 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
8173 		return;
8174 
8175 	if (sc->sc_flags & WM_F_LOCK_EECD) {
8176 		reg = CSR_READ(sc, WMREG_EECD);
8177 		reg &= ~EECD_EE_REQ;
8178 		CSR_WRITE(sc, WMREG_EECD, reg);
8179 	}
8180 
8181 	if (sc->sc_flags & WM_F_LOCK_EXTCNF)
8182 		wm_put_swfwhw_semaphore(sc);
8183 	if (sc->sc_flags & WM_F_LOCK_SWFW)
8184 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
8185 	else if (sc->sc_flags & WM_F_LOCK_SWSM)
8186 		wm_put_swsm_semaphore(sc);
8187 }
8188 
8189 static int
8190 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
8191 {
8192 	uint32_t eecd = 0;
8193 
8194 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
8195 	    || sc->sc_type == WM_T_82583) {
8196 		eecd = CSR_READ(sc, WMREG_EECD);
8197 
8198 		/* Isolate bits 15 & 16 */
8199 		eecd = ((eecd >> 15) & 0x03);
8200 
8201 		/* If both bits are set, device is Flash type */
8202 		if (eecd == 0x03)
8203 			return 0;
8204 	}
8205 	return 1;
8206 }
8207 
8208 #define NVM_CHECKSUM			0xBABA
8209 #define EEPROM_SIZE			0x0040
8210 #define NVM_COMPAT			0x0003
8211 #define NVM_COMPAT_VALID_CHECKSUM	0x0001
8212 #define NVM_FUTURE_INIT_WORD1			0x0019
8213 #define NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM	0x0040
8214 
8215 /*
8216  * wm_nvm_validate_checksum
8217  *
8218  * The checksum is defined as the sum of the first 64 (16 bit) words.
8219  */
8220 static int
8221 wm_nvm_validate_checksum(struct wm_softc *sc)
8222 {
8223 	uint16_t checksum;
8224 	uint16_t eeprom_data;
8225 #ifdef WM_DEBUG
8226 	uint16_t csum_wordaddr, valid_checksum;
8227 #endif
8228 	int i;
8229 
8230 	checksum = 0;
8231 
8232 	/* Don't check for I211 */
8233 	if (sc->sc_type == WM_T_I211)
8234 		return 0;
8235 
8236 #ifdef WM_DEBUG
8237 	if (sc->sc_type == WM_T_PCH_LPT) {
8238 		csum_wordaddr = NVM_COMPAT;
8239 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
8240 	} else {
8241 		csum_wordaddr = NVM_FUTURE_INIT_WORD1;
8242 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
8243 	}
8244 
8245 	/* Dump EEPROM image for debug */
8246 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
8247 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
8248 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
8249 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
8250 		if ((eeprom_data & valid_checksum) == 0) {
8251 			DPRINTF(WM_DEBUG_NVM,
8252 			    ("%s: NVM need to be updated (%04x != %04x)\n",
8253 				device_xname(sc->sc_dev), eeprom_data,
8254 				    valid_checksum));
8255 		}
8256 	}
8257 
8258 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
8259 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
8260 		for (i = 0; i < EEPROM_SIZE; i++) {
8261 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
8262 				printf("XX ");
8263 			else
8264 				printf("%04x ", eeprom_data);
8265 			if (i % 8 == 7)
8266 				printf("\n");
8267 		}
8268 	}
8269 
8270 #endif /* WM_DEBUG */
8271 
8272 	for (i = 0; i < EEPROM_SIZE; i++) {
8273 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
8274 			return 1;
8275 		checksum += eeprom_data;
8276 	}
8277 
8278 	if (checksum != (uint16_t) NVM_CHECKSUM) {
8279 #ifdef WM_DEBUG
8280 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
8281 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
8282 #endif
8283 	}
8284 
8285 	return 0;
8286 }
8287 
8288 /*
8289  * wm_nvm_read:
8290  *
8291  *	Read data from the serial EEPROM.
8292  */
8293 static int
8294 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
8295 {
8296 	int rv;
8297 
8298 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
8299 		return 1;
8300 
8301 	if (wm_nvm_acquire(sc))
8302 		return 1;
8303 
8304 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
8305 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
8306 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
8307 		rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
8308 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
8309 		rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
8310 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
8311 		rv = wm_nvm_read_spi(sc, word, wordcnt, data);
8312 	else
8313 		rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
8314 
8315 	wm_nvm_release(sc);
8316 	return rv;
8317 }
8318 
8319 /*
8320  * Hardware semaphores.
8321  * Very complexed...
8322  */
8323 
8324 static int
8325 wm_get_swsm_semaphore(struct wm_softc *sc)
8326 {
8327 	int32_t timeout;
8328 	uint32_t swsm;
8329 
8330 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
8331 		/* Get the SW semaphore. */
8332 		timeout = 1000 + 1; /* XXX */
8333 		while (timeout) {
8334 			swsm = CSR_READ(sc, WMREG_SWSM);
8335 
8336 			if ((swsm & SWSM_SMBI) == 0)
8337 				break;
8338 
8339 			delay(50);
8340 			timeout--;
8341 		}
8342 
8343 		if (timeout == 0) {
8344 			aprint_error_dev(sc->sc_dev,
8345 			    "could not acquire SWSM SMBI\n");
8346 			return 1;
8347 		}
8348 	}
8349 
8350 	/* Get the FW semaphore. */
8351 	timeout = 1000 + 1; /* XXX */
8352 	while (timeout) {
8353 		swsm = CSR_READ(sc, WMREG_SWSM);
8354 		swsm |= SWSM_SWESMBI;
8355 		CSR_WRITE(sc, WMREG_SWSM, swsm);
8356 		/* If we managed to set the bit we got the semaphore. */
8357 		swsm = CSR_READ(sc, WMREG_SWSM);
8358 		if (swsm & SWSM_SWESMBI)
8359 			break;
8360 
8361 		delay(50);
8362 		timeout--;
8363 	}
8364 
8365 	if (timeout == 0) {
8366 		aprint_error_dev(sc->sc_dev, "could not acquire SWSM SWESMBI\n");
8367 		/* Release semaphores */
8368 		wm_put_swsm_semaphore(sc);
8369 		return 1;
8370 	}
8371 	return 0;
8372 }
8373 
8374 static void
8375 wm_put_swsm_semaphore(struct wm_softc *sc)
8376 {
8377 	uint32_t swsm;
8378 
8379 	swsm = CSR_READ(sc, WMREG_SWSM);
8380 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
8381 	CSR_WRITE(sc, WMREG_SWSM, swsm);
8382 }
8383 
8384 static int
8385 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
8386 {
8387 	uint32_t swfw_sync;
8388 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
8389 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
8390 	int timeout = 200;
8391 
8392 	for (timeout = 0; timeout < 200; timeout++) {
8393 		if (sc->sc_flags & WM_F_LOCK_SWSM) {
8394 			if (wm_get_swsm_semaphore(sc)) {
8395 				aprint_error_dev(sc->sc_dev,
8396 				    "%s: failed to get semaphore\n",
8397 				    __func__);
8398 				return 1;
8399 			}
8400 		}
8401 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
8402 		if ((swfw_sync & (swmask | fwmask)) == 0) {
8403 			swfw_sync |= swmask;
8404 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
8405 			if (sc->sc_flags & WM_F_LOCK_SWSM)
8406 				wm_put_swsm_semaphore(sc);
8407 			return 0;
8408 		}
8409 		if (sc->sc_flags & WM_F_LOCK_SWSM)
8410 			wm_put_swsm_semaphore(sc);
8411 		delay(5000);
8412 	}
8413 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
8414 	    device_xname(sc->sc_dev), mask, swfw_sync);
8415 	return 1;
8416 }
8417 
8418 static void
8419 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
8420 {
8421 	uint32_t swfw_sync;
8422 
8423 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
8424 		while (wm_get_swsm_semaphore(sc) != 0)
8425 			continue;
8426 	}
8427 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
8428 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
8429 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
8430 	if (sc->sc_flags & WM_F_LOCK_SWSM)
8431 		wm_put_swsm_semaphore(sc);
8432 }
8433 
8434 static int
8435 wm_get_swfwhw_semaphore(struct wm_softc *sc)
8436 {
8437 	uint32_t ext_ctrl;
8438 	int timeout = 200;
8439 
8440 	for (timeout = 0; timeout < 200; timeout++) {
8441 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
8442 		ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
8443 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
8444 
8445 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
8446 		if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
8447 			return 0;
8448 		delay(5000);
8449 	}
8450 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
8451 	    device_xname(sc->sc_dev), ext_ctrl);
8452 	return 1;
8453 }
8454 
8455 static void
8456 wm_put_swfwhw_semaphore(struct wm_softc *sc)
8457 {
8458 	uint32_t ext_ctrl;
8459 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
8460 	ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
8461 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
8462 }
8463 
8464 static int
8465 wm_get_hw_semaphore_82573(struct wm_softc *sc)
8466 {
8467 	int i = 0;
8468 	uint32_t reg;
8469 
8470 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
8471 	do {
8472 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
8473 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
8474 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
8475 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
8476 			break;
8477 		delay(2*1000);
8478 		i++;
8479 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
8480 
8481 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
8482 		wm_put_hw_semaphore_82573(sc);
8483 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
8484 		    device_xname(sc->sc_dev));
8485 		return -1;
8486 	}
8487 
8488 	return 0;
8489 }
8490 
8491 static void
8492 wm_put_hw_semaphore_82573(struct wm_softc *sc)
8493 {
8494 	uint32_t reg;
8495 
8496 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
8497 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
8498 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
8499 }
8500 
8501 /*
8502  * Management mode and power management related subroutines.
8503  * BMC, AMT, suspend/resume and EEE.
8504  */
8505 
8506 static int
8507 wm_check_mng_mode(struct wm_softc *sc)
8508 {
8509 	int rv;
8510 
8511 	switch (sc->sc_type) {
8512 	case WM_T_ICH8:
8513 	case WM_T_ICH9:
8514 	case WM_T_ICH10:
8515 	case WM_T_PCH:
8516 	case WM_T_PCH2:
8517 	case WM_T_PCH_LPT:
8518 		rv = wm_check_mng_mode_ich8lan(sc);
8519 		break;
8520 	case WM_T_82574:
8521 	case WM_T_82583:
8522 		rv = wm_check_mng_mode_82574(sc);
8523 		break;
8524 	case WM_T_82571:
8525 	case WM_T_82572:
8526 	case WM_T_82573:
8527 	case WM_T_80003:
8528 		rv = wm_check_mng_mode_generic(sc);
8529 		break;
8530 	default:
8531 		/* noting to do */
8532 		rv = 0;
8533 		break;
8534 	}
8535 
8536 	return rv;
8537 }
8538 
8539 static int
8540 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
8541 {
8542 	uint32_t fwsm;
8543 
8544 	fwsm = CSR_READ(sc, WMREG_FWSM);
8545 
8546 	if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
8547 		return 1;
8548 
8549 	return 0;
8550 }
8551 
8552 static int
8553 wm_check_mng_mode_82574(struct wm_softc *sc)
8554 {
8555 	uint16_t data;
8556 
8557 	wm_nvm_read(sc, EEPROM_OFF_CFG2, 1, &data);
8558 
8559 	if ((data & EEPROM_CFG2_MNGM_MASK) != 0)
8560 		return 1;
8561 
8562 	return 0;
8563 }
8564 
8565 static int
8566 wm_check_mng_mode_generic(struct wm_softc *sc)
8567 {
8568 	uint32_t fwsm;
8569 
8570 	fwsm = CSR_READ(sc, WMREG_FWSM);
8571 
8572 	if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
8573 		return 1;
8574 
8575 	return 0;
8576 }
8577 
8578 static int
8579 wm_enable_mng_pass_thru(struct wm_softc *sc)
8580 {
8581 	uint32_t manc, fwsm, factps;
8582 
8583 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
8584 		return 0;
8585 
8586 	manc = CSR_READ(sc, WMREG_MANC);
8587 
8588 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
8589 		device_xname(sc->sc_dev), manc));
8590 	if ((manc & MANC_RECV_TCO_EN) == 0)
8591 		return 0;
8592 
8593 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
8594 		fwsm = CSR_READ(sc, WMREG_FWSM);
8595 		factps = CSR_READ(sc, WMREG_FACTPS);
8596 		if (((factps & FACTPS_MNGCG) == 0)
8597 		    && ((fwsm & FWSM_MODE_MASK)
8598 			== (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT)))
8599 			return 1;
8600 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
8601 		uint16_t data;
8602 
8603 		factps = CSR_READ(sc, WMREG_FACTPS);
8604 		wm_nvm_read(sc, EEPROM_OFF_CFG2, 1, &data);
8605 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
8606 			device_xname(sc->sc_dev), factps, data));
8607 		if (((factps & FACTPS_MNGCG) == 0)
8608 		    && ((data & EEPROM_CFG2_MNGM_MASK)
8609 			== (EEPROM_CFG2_MNGM_PT << EEPROM_CFG2_MNGM_SHIFT)))
8610 			return 1;
8611 	} else if (((manc & MANC_SMBUS_EN) != 0)
8612 	    && ((manc & MANC_ASF_EN) == 0))
8613 		return 1;
8614 
8615 	return 0;
8616 }
8617 
8618 static int
8619 wm_check_reset_block(struct wm_softc *sc)
8620 {
8621 	uint32_t reg;
8622 
8623 	switch (sc->sc_type) {
8624 	case WM_T_ICH8:
8625 	case WM_T_ICH9:
8626 	case WM_T_ICH10:
8627 	case WM_T_PCH:
8628 	case WM_T_PCH2:
8629 	case WM_T_PCH_LPT:
8630 		reg = CSR_READ(sc, WMREG_FWSM);
8631 		if ((reg & FWSM_RSPCIPHY) != 0)
8632 			return 0;
8633 		else
8634 			return -1;
8635 		break;
8636 	case WM_T_82571:
8637 	case WM_T_82572:
8638 	case WM_T_82573:
8639 	case WM_T_82574:
8640 	case WM_T_82583:
8641 	case WM_T_80003:
8642 		reg = CSR_READ(sc, WMREG_MANC);
8643 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
8644 			return -1;
8645 		else
8646 			return 0;
8647 		break;
8648 	default:
8649 		/* no problem */
8650 		break;
8651 	}
8652 
8653 	return 0;
8654 }
8655 
8656 static void
8657 wm_get_hw_control(struct wm_softc *sc)
8658 {
8659 	uint32_t reg;
8660 
8661 	switch (sc->sc_type) {
8662 	case WM_T_82573:
8663 		reg = CSR_READ(sc, WMREG_SWSM);
8664 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
8665 		break;
8666 	case WM_T_82571:
8667 	case WM_T_82572:
8668 	case WM_T_82574:
8669 	case WM_T_82583:
8670 	case WM_T_80003:
8671 	case WM_T_ICH8:
8672 	case WM_T_ICH9:
8673 	case WM_T_ICH10:
8674 	case WM_T_PCH:
8675 	case WM_T_PCH2:
8676 	case WM_T_PCH_LPT:
8677 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
8678 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
8679 		break;
8680 	default:
8681 		break;
8682 	}
8683 }
8684 
8685 static void
8686 wm_release_hw_control(struct wm_softc *sc)
8687 {
8688 	uint32_t reg;
8689 
8690 	if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
8691 		return;
8692 
8693 	if (sc->sc_type == WM_T_82573) {
8694 		reg = CSR_READ(sc, WMREG_SWSM);
8695 		reg &= ~SWSM_DRV_LOAD;
8696 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
8697 	} else {
8698 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
8699 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
8700 	}
8701 }
8702 
8703 static void
8704 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, int on)
8705 {
8706 	uint32_t reg;
8707 
8708 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
8709 
8710 	if (on != 0)
8711 		reg |= EXTCNFCTR_GATE_PHY_CFG;
8712 	else
8713 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
8714 
8715 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
8716 }
8717 
8718 static void
8719 wm_smbustopci(struct wm_softc *sc)
8720 {
8721 	uint32_t fwsm;
8722 
8723 	fwsm = CSR_READ(sc, WMREG_FWSM);
8724 	if (((fwsm & FWSM_FW_VALID) == 0)
8725 	    && ((wm_check_reset_block(sc) == 0))) {
8726 		sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
8727 		sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
8728 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8729 		CSR_WRITE_FLUSH(sc);
8730 		delay(10);
8731 		sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
8732 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8733 		CSR_WRITE_FLUSH(sc);
8734 		delay(50*1000);
8735 
8736 		/*
8737 		 * Gate automatic PHY configuration by hardware on non-managed
8738 		 * 82579
8739 		 */
8740 		if (sc->sc_type == WM_T_PCH2)
8741 			wm_gate_hw_phy_config_ich8lan(sc, 1);
8742 	}
8743 }
8744 
8745 static void
8746 wm_init_manageability(struct wm_softc *sc)
8747 {
8748 
8749 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
8750 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
8751 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
8752 
8753 		/* Disable hardware interception of ARP */
8754 		manc &= ~MANC_ARP_EN;
8755 
8756 		/* Enable receiving management packets to the host */
8757 		if (sc->sc_type >= WM_T_82571) {
8758 			manc |= MANC_EN_MNG2HOST;
8759 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
8760 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
8761 
8762 		}
8763 
8764 		CSR_WRITE(sc, WMREG_MANC, manc);
8765 	}
8766 }
8767 
8768 static void
8769 wm_release_manageability(struct wm_softc *sc)
8770 {
8771 
8772 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
8773 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
8774 
8775 		manc |= MANC_ARP_EN;
8776 		if (sc->sc_type >= WM_T_82571)
8777 			manc &= ~MANC_EN_MNG2HOST;
8778 
8779 		CSR_WRITE(sc, WMREG_MANC, manc);
8780 	}
8781 }
8782 
8783 static void
8784 wm_get_wakeup(struct wm_softc *sc)
8785 {
8786 
8787 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
8788 	switch (sc->sc_type) {
8789 	case WM_T_82573:
8790 	case WM_T_82583:
8791 		sc->sc_flags |= WM_F_HAS_AMT;
8792 		/* FALLTHROUGH */
8793 	case WM_T_80003:
8794 	case WM_T_82541:
8795 	case WM_T_82547:
8796 	case WM_T_82571:
8797 	case WM_T_82572:
8798 	case WM_T_82574:
8799 	case WM_T_82575:
8800 	case WM_T_82576:
8801 	case WM_T_82580:
8802 	case WM_T_82580ER:
8803 	case WM_T_I350:
8804 	case WM_T_I354:
8805 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE_MASK) != 0)
8806 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
8807 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
8808 		break;
8809 	case WM_T_ICH8:
8810 	case WM_T_ICH9:
8811 	case WM_T_ICH10:
8812 	case WM_T_PCH:
8813 	case WM_T_PCH2:
8814 	case WM_T_PCH_LPT:
8815 		sc->sc_flags |= WM_F_HAS_AMT;
8816 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
8817 		break;
8818 	default:
8819 		break;
8820 	}
8821 
8822 	/* 1: HAS_MANAGE */
8823 	if (wm_enable_mng_pass_thru(sc) != 0)
8824 		sc->sc_flags |= WM_F_HAS_MANAGE;
8825 
8826 #ifdef WM_DEBUG
8827 	printf("\n");
8828 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
8829 		printf("HAS_AMT,");
8830 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
8831 		printf("ARC_SUBSYS_VALID,");
8832 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
8833 		printf("ASF_FIRMWARE_PRES,");
8834 	if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
8835 		printf("HAS_MANAGE,");
8836 	printf("\n");
8837 #endif
8838 	/*
8839 	 * Note that the WOL flags is set after the resetting of the eeprom
8840 	 * stuff
8841 	 */
8842 }
8843 
8844 #ifdef WM_WOL
8845 /* WOL in the newer chipset interfaces (pchlan) */
8846 static void
8847 wm_enable_phy_wakeup(struct wm_softc *sc)
8848 {
8849 #if 0
8850 	uint16_t preg;
8851 
8852 	/* Copy MAC RARs to PHY RARs */
8853 
8854 	/* Copy MAC MTA to PHY MTA */
8855 
8856 	/* Configure PHY Rx Control register */
8857 
8858 	/* Enable PHY wakeup in MAC register */
8859 
8860 	/* Configure and enable PHY wakeup in PHY registers */
8861 
8862 	/* Activate PHY wakeup */
8863 
8864 	/* XXX */
8865 #endif
8866 }
8867 
8868 /* Power down workaround on D3 */
8869 static void
8870 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
8871 {
8872 	uint32_t reg;
8873 	int i;
8874 
8875 	for (i = 0; i < 2; i++) {
8876 		/* Disable link */
8877 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
8878 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
8879 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
8880 
8881 		/*
8882 		 * Call gig speed drop workaround on Gig disable before
8883 		 * accessing any PHY registers
8884 		 */
8885 		if (sc->sc_type == WM_T_ICH8)
8886 			wm_gig_downshift_workaround_ich8lan(sc);
8887 
8888 		/* Write VR power-down enable */
8889 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
8890 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
8891 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
8892 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
8893 
8894 		/* Read it back and test */
8895 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
8896 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
8897 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
8898 			break;
8899 
8900 		/* Issue PHY reset and repeat at most one more time */
8901 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
8902 	}
8903 }
8904 
8905 static void
8906 wm_enable_wakeup(struct wm_softc *sc)
8907 {
8908 	uint32_t reg, pmreg;
8909 	pcireg_t pmode;
8910 
8911 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
8912 		&pmreg, NULL) == 0)
8913 		return;
8914 
8915 	/* Advertise the wakeup capability */
8916 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
8917 	    | CTRL_SWDPIN(3));
8918 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
8919 
8920 	/* ICH workaround */
8921 	switch (sc->sc_type) {
8922 	case WM_T_ICH8:
8923 	case WM_T_ICH9:
8924 	case WM_T_ICH10:
8925 	case WM_T_PCH:
8926 	case WM_T_PCH2:
8927 	case WM_T_PCH_LPT:
8928 		/* Disable gig during WOL */
8929 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
8930 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
8931 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
8932 		if (sc->sc_type == WM_T_PCH)
8933 			wm_gmii_reset(sc);
8934 
8935 		/* Power down workaround */
8936 		if (sc->sc_phytype == WMPHY_82577) {
8937 			struct mii_softc *child;
8938 
8939 			/* Assume that the PHY is copper */
8940 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
8941 			if (child->mii_mpd_rev <= 2)
8942 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
8943 				    (768 << 5) | 25, 0x0444); /* magic num */
8944 		}
8945 		break;
8946 	default:
8947 		break;
8948 	}
8949 
8950 	/* Keep the laser running on fiber adapters */
8951 	if (((sc->sc_wmp->wmp_flags & WMP_F_1000X) != 0)
8952 	    || (sc->sc_wmp->wmp_flags & WMP_F_SERDES) != 0) {
8953 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
8954 		reg |= CTRL_EXT_SWDPIN(3);
8955 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
8956 	}
8957 
8958 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
8959 #if 0	/* for the multicast packet */
8960 	reg |= WUFC_MC;
8961 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
8962 #endif
8963 
8964 	if (sc->sc_type == WM_T_PCH) {
8965 		wm_enable_phy_wakeup(sc);
8966 	} else {
8967 		CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
8968 		CSR_WRITE(sc, WMREG_WUFC, reg);
8969 	}
8970 
8971 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
8972 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
8973 		|| (sc->sc_type == WM_T_PCH2))
8974 		    && (sc->sc_phytype == WMPHY_IGP_3))
8975 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
8976 
8977 	/* Request PME */
8978 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
8979 #if 0
8980 	/* Disable WOL */
8981 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
8982 #else
8983 	/* For WOL */
8984 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
8985 #endif
8986 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
8987 }
8988 #endif /* WM_WOL */
8989 
8990 /* EEE */
8991 
8992 static void
8993 wm_set_eee_i350(struct wm_softc *sc)
8994 {
8995 	uint32_t ipcnfg, eeer;
8996 
8997 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
8998 	eeer = CSR_READ(sc, WMREG_EEER);
8999 
9000 	if ((sc->sc_flags & WM_F_EEE) != 0) {
9001 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
9002 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
9003 		    | EEER_LPI_FC);
9004 	} else {
9005 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
9006 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
9007 		    | EEER_LPI_FC);
9008 	}
9009 
9010 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
9011 	CSR_WRITE(sc, WMREG_EEER, eeer);
9012 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
9013 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
9014 }
9015 
9016 /*
9017  * Workarounds (mainly PHY related).
9018  * Basically, PHY's workarounds are in the PHY drivers.
9019  */
9020 
9021 /* Work-around for 82566 Kumeran PCS lock loss */
9022 static void
9023 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
9024 {
9025 	int miistatus, active, i;
9026 	int reg;
9027 
9028 	miistatus = sc->sc_mii.mii_media_status;
9029 
9030 	/* If the link is not up, do nothing */
9031 	if ((miistatus & IFM_ACTIVE) != 0)
9032 		return;
9033 
9034 	active = sc->sc_mii.mii_media_active;
9035 
9036 	/* Nothing to do if the link is other than 1Gbps */
9037 	if (IFM_SUBTYPE(active) != IFM_1000_T)
9038 		return;
9039 
9040 	for (i = 0; i < 10; i++) {
9041 		/* read twice */
9042 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
9043 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
9044 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) != 0)
9045 			goto out;	/* GOOD! */
9046 
9047 		/* Reset the PHY */
9048 		wm_gmii_reset(sc);
9049 		delay(5*1000);
9050 	}
9051 
9052 	/* Disable GigE link negotiation */
9053 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
9054 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
9055 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
9056 
9057 	/*
9058 	 * Call gig speed drop workaround on Gig disable before accessing
9059 	 * any PHY registers.
9060 	 */
9061 	wm_gig_downshift_workaround_ich8lan(sc);
9062 
9063 out:
9064 	return;
9065 }
9066 
9067 /* WOL from S5 stops working */
9068 static void
9069 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
9070 {
9071 	uint16_t kmrn_reg;
9072 
9073 	/* Only for igp3 */
9074 	if (sc->sc_phytype == WMPHY_IGP_3) {
9075 		kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
9076 		kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
9077 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
9078 		kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
9079 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
9080 	}
9081 }
9082 
9083 /*
9084  * Workaround for pch's PHYs
9085  * XXX should be moved to new PHY driver?
9086  */
9087 static void
9088 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
9089 {
9090 	if (sc->sc_phytype == WMPHY_82577)
9091 		wm_set_mdio_slow_mode_hv(sc);
9092 
9093 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
9094 
9095 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
9096 
9097 	/* 82578 */
9098 	if (sc->sc_phytype == WMPHY_82578) {
9099 		/* PCH rev. < 3 */
9100 		if (sc->sc_rev < 3) {
9101 			/* XXX 6 bit shift? Why? Is it page2? */
9102 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
9103 			    0x66c0);
9104 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
9105 			    0xffff);
9106 		}
9107 
9108 		/* XXX phy rev. < 2 */
9109 	}
9110 
9111 	/* Select page 0 */
9112 
9113 	/* XXX acquire semaphore */
9114 	wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
9115 	/* XXX release semaphore */
9116 
9117 	/*
9118 	 * Configure the K1 Si workaround during phy reset assuming there is
9119 	 * link so that it disables K1 if link is in 1Gbps.
9120 	 */
9121 	wm_k1_gig_workaround_hv(sc, 1);
9122 }
9123 
9124 static void
9125 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
9126 {
9127 
9128 	wm_set_mdio_slow_mode_hv(sc);
9129 }
9130 
9131 static void
9132 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
9133 {
9134 	int k1_enable = sc->sc_nvm_k1_enabled;
9135 
9136 	/* XXX acquire semaphore */
9137 
9138 	if (link) {
9139 		k1_enable = 0;
9140 
9141 		/* Link stall fix for link up */
9142 		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
9143 	} else {
9144 		/* Link stall fix for link down */
9145 		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
9146 	}
9147 
9148 	wm_configure_k1_ich8lan(sc, k1_enable);
9149 
9150 	/* XXX release semaphore */
9151 }
9152 
9153 static void
9154 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
9155 {
9156 	uint32_t reg;
9157 
9158 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
9159 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
9160 	    reg | HV_KMRN_MDIO_SLOW);
9161 }
9162 
9163 static void
9164 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
9165 {
9166 	uint32_t ctrl, ctrl_ext, tmp;
9167 	uint16_t kmrn_reg;
9168 
9169 	kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
9170 
9171 	if (k1_enable)
9172 		kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
9173 	else
9174 		kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
9175 
9176 	wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
9177 
9178 	delay(20);
9179 
9180 	ctrl = CSR_READ(sc, WMREG_CTRL);
9181 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
9182 
9183 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
9184 	tmp |= CTRL_FRCSPD;
9185 
9186 	CSR_WRITE(sc, WMREG_CTRL, tmp);
9187 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
9188 	CSR_WRITE_FLUSH(sc);
9189 	delay(20);
9190 
9191 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
9192 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
9193 	CSR_WRITE_FLUSH(sc);
9194 	delay(20);
9195 }
9196 
9197 /* special case - for 82575 - need to do manual init ... */
9198 static void
9199 wm_reset_init_script_82575(struct wm_softc *sc)
9200 {
9201 	/*
9202 	 * remark: this is untested code - we have no board without EEPROM
9203 	 *  same setup as mentioned int the freeBSD driver for the i82575
9204 	 */
9205 
9206 	/* SerDes configuration via SERDESCTRL */
9207 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
9208 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
9209 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
9210 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
9211 
9212 	/* CCM configuration via CCMCTL register */
9213 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
9214 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
9215 
9216 	/* PCIe lanes configuration */
9217 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
9218 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
9219 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
9220 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
9221 
9222 	/* PCIe PLL Configuration */
9223 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
9224 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
9225 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
9226 }
9227