xref: /netbsd-src/sys/dev/pci/if_wm.c (revision b7b7574d3bf8eeb51a1fa3977b59142ec6434a55)
1 /*	$NetBSD: if_wm.c,v 1.272 2014/07/01 10:35:18 ozaki-r Exp $	*/
2 
3 /*
4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed for the NetBSD Project by
20  *	Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 /*******************************************************************************
39 
40   Copyright (c) 2001-2005, Intel Corporation
41   All rights reserved.
42 
43   Redistribution and use in source and binary forms, with or without
44   modification, are permitted provided that the following conditions are met:
45 
46    1. Redistributions of source code must retain the above copyright notice,
47       this list of conditions and the following disclaimer.
48 
49    2. Redistributions in binary form must reproduce the above copyright
50       notice, this list of conditions and the following disclaimer in the
51       documentation and/or other materials provided with the distribution.
52 
53    3. Neither the name of the Intel Corporation nor the names of its
54       contributors may be used to endorse or promote products derived from
55       this software without specific prior written permission.
56 
57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67   POSSIBILITY OF SUCH DAMAGE.
68 
69 *******************************************************************************/
70 /*
71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72  *
73  * TODO (in order of importance):
74  *
75  *	- Rework how parameters are loaded from the EEPROM.
76  */
77 
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.272 2014/07/01 10:35:18 ozaki-r Exp $");
80 
81 #include <sys/param.h>
82 #include <sys/systm.h>
83 #include <sys/callout.h>
84 #include <sys/mbuf.h>
85 #include <sys/malloc.h>
86 #include <sys/kernel.h>
87 #include <sys/socket.h>
88 #include <sys/ioctl.h>
89 #include <sys/errno.h>
90 #include <sys/device.h>
91 #include <sys/queue.h>
92 #include <sys/syslog.h>
93 
94 #include <sys/rnd.h>
95 
96 #include <net/if.h>
97 #include <net/if_dl.h>
98 #include <net/if_media.h>
99 #include <net/if_ether.h>
100 
101 #include <net/bpf.h>
102 
103 #include <netinet/in.h>			/* XXX for struct ip */
104 #include <netinet/in_systm.h>		/* XXX for struct ip */
105 #include <netinet/ip.h>			/* XXX for struct ip */
106 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
107 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
108 
109 #include <sys/bus.h>
110 #include <sys/intr.h>
111 #include <machine/endian.h>
112 
113 #include <dev/mii/mii.h>
114 #include <dev/mii/miivar.h>
115 #include <dev/mii/miidevs.h>
116 #include <dev/mii/mii_bitbang.h>
117 #include <dev/mii/ikphyreg.h>
118 #include <dev/mii/igphyreg.h>
119 #include <dev/mii/igphyvar.h>
120 #include <dev/mii/inbmphyreg.h>
121 
122 #include <dev/pci/pcireg.h>
123 #include <dev/pci/pcivar.h>
124 #include <dev/pci/pcidevs.h>
125 
126 #include <dev/pci/if_wmreg.h>
127 #include <dev/pci/if_wmvar.h>
128 
129 #ifdef WM_DEBUG
130 #define	WM_DEBUG_LINK		0x01
131 #define	WM_DEBUG_TX		0x02
132 #define	WM_DEBUG_RX		0x04
133 #define	WM_DEBUG_GMII		0x08
134 #define	WM_DEBUG_MANAGE		0x10
135 #define	WM_DEBUG_NVM		0x20
136 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
137     | WM_DEBUG_MANAGE | WM_DEBUG_NVM;
138 
139 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
140 #else
141 #define	DPRINTF(x, y)	/* nothing */
142 #endif /* WM_DEBUG */
143 
144 #ifdef NET_MPSAFE
145 #define WM_MPSAFE	1
146 #endif
147 
148 /*
149  * Transmit descriptor list size.  Due to errata, we can only have
150  * 256 hardware descriptors in the ring on < 82544, but we use 4096
151  * on >= 82544.  We tell the upper layers that they can queue a lot
152  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
153  * of them at a time.
154  *
155  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
156  * chains containing many small mbufs have been observed in zero-copy
157  * situations with jumbo frames.
158  */
159 #define	WM_NTXSEGS		256
160 #define	WM_IFQUEUELEN		256
161 #define	WM_TXQUEUELEN_MAX	64
162 #define	WM_TXQUEUELEN_MAX_82547	16
163 #define	WM_TXQUEUELEN(sc)	((sc)->sc_txnum)
164 #define	WM_TXQUEUELEN_MASK(sc)	(WM_TXQUEUELEN(sc) - 1)
165 #define	WM_TXQUEUE_GC(sc)	(WM_TXQUEUELEN(sc) / 8)
166 #define	WM_NTXDESC_82542	256
167 #define	WM_NTXDESC_82544	4096
168 #define	WM_NTXDESC(sc)		((sc)->sc_ntxdesc)
169 #define	WM_NTXDESC_MASK(sc)	(WM_NTXDESC(sc) - 1)
170 #define	WM_TXDESCSIZE(sc)	(WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
171 #define	WM_NEXTTX(sc, x)	(((x) + 1) & WM_NTXDESC_MASK(sc))
172 #define	WM_NEXTTXS(sc, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(sc))
173 
174 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
175 
176 /*
177  * Receive descriptor list size.  We have one Rx buffer for normal
178  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
179  * packet.  We allocate 256 receive descriptors, each with a 2k
180  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
181  */
182 #define	WM_NRXDESC		256
183 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
184 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
185 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
186 
187 /*
188  * Control structures are DMA'd to the i82542 chip.  We allocate them in
189  * a single clump that maps to a single DMA segment to make several things
190  * easier.
191  */
192 struct wm_control_data_82544 {
193 	/*
194 	 * The receive descriptors.
195 	 */
196 	wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
197 
198 	/*
199 	 * The transmit descriptors.  Put these at the end, because
200 	 * we might use a smaller number of them.
201 	 */
202 	union {
203 		wiseman_txdesc_t wcdu_txdescs[WM_NTXDESC_82544];
204 		nq_txdesc_t      wcdu_nq_txdescs[WM_NTXDESC_82544];
205 	} wdc_u;
206 };
207 
208 struct wm_control_data_82542 {
209 	wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
210 	wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
211 };
212 
213 #define	WM_CDOFF(x)	offsetof(struct wm_control_data_82544, x)
214 #define	WM_CDTXOFF(x)	WM_CDOFF(wdc_u.wcdu_txdescs[(x)])
215 #define	WM_CDRXOFF(x)	WM_CDOFF(wcd_rxdescs[(x)])
216 
217 /*
218  * Software state for transmit jobs.
219  */
220 struct wm_txsoft {
221 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
222 	bus_dmamap_t txs_dmamap;	/* our DMA map */
223 	int txs_firstdesc;		/* first descriptor in packet */
224 	int txs_lastdesc;		/* last descriptor in packet */
225 	int txs_ndesc;			/* # of descriptors used */
226 };
227 
228 /*
229  * Software state for receive buffers.  Each descriptor gets a
230  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
231  * more than one buffer, we chain them together.
232  */
233 struct wm_rxsoft {
234 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
235 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
236 };
237 
238 #define WM_LINKUP_TIMEOUT	50
239 
240 static uint16_t swfwphysem[] = {
241 	SWFW_PHY0_SM,
242 	SWFW_PHY1_SM,
243 	SWFW_PHY2_SM,
244 	SWFW_PHY3_SM
245 };
246 
247 /*
248  * Software state per device.
249  */
250 struct wm_softc {
251 	device_t sc_dev;		/* generic device information */
252 	bus_space_tag_t sc_st;		/* bus space tag */
253 	bus_space_handle_t sc_sh;	/* bus space handle */
254 	bus_size_t sc_ss;		/* bus space size */
255 	bus_space_tag_t sc_iot;		/* I/O space tag */
256 	bus_space_handle_t sc_ioh;	/* I/O space handle */
257 	bus_size_t sc_ios;		/* I/O space size */
258 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
259 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
260 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
261 
262 	struct ethercom sc_ethercom;	/* ethernet common data */
263 	struct mii_data sc_mii;		/* MII/media information */
264 
265 	pci_chipset_tag_t sc_pc;
266 	pcitag_t sc_pcitag;
267 	int sc_bus_speed;		/* PCI/PCIX bus speed */
268 	int sc_pcixe_capoff;		/* PCI[Xe] capability register offset */
269 
270 	const struct wm_product *sc_wmp; /* Pointer to the wm_product entry */
271 	wm_chip_type sc_type;		/* MAC type */
272 	int sc_rev;			/* MAC revision */
273 	wm_phy_type sc_phytype;		/* PHY type */
274 	int sc_funcid;			/* unit number of the chip (0 to 3) */
275 	int sc_flags;			/* flags; see below */
276 	int sc_if_flags;		/* last if_flags */
277 	int sc_flowflags;		/* 802.3x flow control flags */
278 	int sc_align_tweak;
279 
280 	void *sc_ih;			/* interrupt cookie */
281 	callout_t sc_tick_ch;		/* tick callout */
282 	bool sc_stopping;
283 
284 	int sc_ee_addrbits;		/* EEPROM address bits */
285 	int sc_ich8_flash_base;
286 	int sc_ich8_flash_bank_size;
287 	int sc_nvm_k1_enabled;
288 
289 	/*
290 	 * Software state for the transmit and receive descriptors.
291 	 */
292 	int sc_txnum;			/* must be a power of two */
293 	struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX];
294 	struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
295 
296 	/*
297 	 * Control data structures.
298 	 */
299 	int sc_ntxdesc;			/* must be a power of two */
300 	struct wm_control_data_82544 *sc_control_data;
301 	bus_dmamap_t sc_cddmamap;	/* control data DMA map */
302 	bus_dma_segment_t sc_cd_seg;	/* control data segment */
303 	int sc_cd_rseg;			/* real number of control segment */
304 	size_t sc_cd_size;		/* control data size */
305 #define	sc_cddma	sc_cddmamap->dm_segs[0].ds_addr
306 #define	sc_txdescs	sc_control_data->wdc_u.wcdu_txdescs
307 #define	sc_nq_txdescs	sc_control_data->wdc_u.wcdu_nq_txdescs
308 #define	sc_rxdescs	sc_control_data->wcd_rxdescs
309 
310 #ifdef WM_EVENT_COUNTERS
311 	/* Event counters. */
312 	struct evcnt sc_ev_txsstall;	/* Tx stalled due to no txs */
313 	struct evcnt sc_ev_txdstall;	/* Tx stalled due to no txd */
314 	struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
315 	struct evcnt sc_ev_txdw;	/* Tx descriptor interrupts */
316 	struct evcnt sc_ev_txqe;	/* Tx queue empty interrupts */
317 	struct evcnt sc_ev_rxintr;	/* Rx interrupts */
318 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
319 
320 	struct evcnt sc_ev_rxipsum;	/* IP checksums checked in-bound */
321 	struct evcnt sc_ev_rxtusum;	/* TCP/UDP cksums checked in-bound */
322 	struct evcnt sc_ev_txipsum;	/* IP checksums comp. out-bound */
323 	struct evcnt sc_ev_txtusum;	/* TCP/UDP cksums comp. out-bound */
324 	struct evcnt sc_ev_txtusum6;	/* TCP/UDP v6 cksums comp. out-bound */
325 	struct evcnt sc_ev_txtso;	/* TCP seg offload out-bound (IPv4) */
326 	struct evcnt sc_ev_txtso6;	/* TCP seg offload out-bound (IPv6) */
327 	struct evcnt sc_ev_txtsopain;	/* painful header manip. for TSO */
328 
329 	struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
330 	struct evcnt sc_ev_txdrop;	/* Tx packets dropped (too many segs) */
331 
332 	struct evcnt sc_ev_tu;		/* Tx underrun */
333 
334 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
335 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
336 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
337 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
338 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
339 #endif /* WM_EVENT_COUNTERS */
340 
341 	bus_addr_t sc_tdt_reg;		/* offset of TDT register */
342 
343 	int	sc_txfree;		/* number of free Tx descriptors */
344 	int	sc_txnext;		/* next ready Tx descriptor */
345 
346 	int	sc_txsfree;		/* number of free Tx jobs */
347 	int	sc_txsnext;		/* next free Tx job */
348 	int	sc_txsdirty;		/* dirty Tx jobs */
349 
350 	/* These 5 variables are used only on the 82547. */
351 	int	sc_txfifo_size;		/* Tx FIFO size */
352 	int	sc_txfifo_head;		/* current head of FIFO */
353 	uint32_t sc_txfifo_addr;	/* internal address of start of FIFO */
354 	int	sc_txfifo_stall;	/* Tx FIFO is stalled */
355 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
356 
357 	bus_addr_t sc_rdt_reg;		/* offset of RDT register */
358 
359 	int	sc_rxptr;		/* next ready Rx descriptor/queue ent */
360 	int	sc_rxdiscard;
361 	int	sc_rxlen;
362 	struct mbuf *sc_rxhead;
363 	struct mbuf *sc_rxtail;
364 	struct mbuf **sc_rxtailp;
365 
366 	uint32_t sc_ctrl;		/* prototype CTRL register */
367 #if 0
368 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
369 #endif
370 	uint32_t sc_icr;		/* prototype interrupt bits */
371 	uint32_t sc_itr;		/* prototype intr throttling reg */
372 	uint32_t sc_tctl;		/* prototype TCTL register */
373 	uint32_t sc_rctl;		/* prototype RCTL register */
374 	uint32_t sc_txcw;		/* prototype TXCW register */
375 	uint32_t sc_tipg;		/* prototype TIPG register */
376 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
377 	uint32_t sc_pba;		/* prototype PBA register */
378 
379 	int sc_tbi_linkup;		/* TBI link status */
380 	int sc_tbi_anegticks;		/* autonegotiation ticks */
381 	int sc_tbi_ticks;		/* tbi ticks */
382 	int sc_tbi_nrxcfg;		/* count of ICR_RXCFG */
383 	int sc_tbi_lastnrxcfg;		/* count of ICR_RXCFG (on last tick) */
384 
385 	int sc_mchash_type;		/* multicast filter offset */
386 
387 	krndsource_t rnd_source;	/* random source */
388 
389 	kmutex_t *sc_txrx_lock;		/* lock for tx/rx operations */
390 					/* XXX need separation? */
391 };
392 
393 #define WM_LOCK(_sc)	if ((_sc)->sc_txrx_lock) mutex_enter((_sc)->sc_txrx_lock)
394 #define WM_UNLOCK(_sc)	if ((_sc)->sc_txrx_lock) mutex_exit((_sc)->sc_txrx_lock)
395 #define WM_LOCKED(_sc)	(!(_sc)->sc_txrx_lock || mutex_owned((_sc)->sc_txrx_lock))
396 
397 #ifdef WM_MPSAFE
398 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
399 #else
400 #define CALLOUT_FLAGS	0
401 #endif
402 
403 #define	WM_RXCHAIN_RESET(sc)						\
404 do {									\
405 	(sc)->sc_rxtailp = &(sc)->sc_rxhead;				\
406 	*(sc)->sc_rxtailp = NULL;					\
407 	(sc)->sc_rxlen = 0;						\
408 } while (/*CONSTCOND*/0)
409 
410 #define	WM_RXCHAIN_LINK(sc, m)						\
411 do {									\
412 	*(sc)->sc_rxtailp = (sc)->sc_rxtail = (m);			\
413 	(sc)->sc_rxtailp = &(m)->m_next;				\
414 } while (/*CONSTCOND*/0)
415 
416 #ifdef WM_EVENT_COUNTERS
417 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
418 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
419 #else
420 #define	WM_EVCNT_INCR(ev)	/* nothing */
421 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
422 #endif
423 
424 #define	CSR_READ(sc, reg)						\
425 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
426 #define	CSR_WRITE(sc, reg, val)						\
427 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
428 #define	CSR_WRITE_FLUSH(sc)						\
429 	(void) CSR_READ((sc), WMREG_STATUS)
430 
431 #define ICH8_FLASH_READ32(sc, reg) \
432 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
433 #define ICH8_FLASH_WRITE32(sc, reg, data) \
434 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
435 
436 #define ICH8_FLASH_READ16(sc, reg) \
437 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
438 #define ICH8_FLASH_WRITE16(sc, reg, data) \
439 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
440 
441 #define	WM_CDTXADDR(sc, x)	((sc)->sc_cddma + WM_CDTXOFF((x)))
442 #define	WM_CDRXADDR(sc, x)	((sc)->sc_cddma + WM_CDRXOFF((x)))
443 
444 #define	WM_CDTXADDR_LO(sc, x)	(WM_CDTXADDR((sc), (x)) & 0xffffffffU)
445 #define	WM_CDTXADDR_HI(sc, x)						\
446 	(sizeof(bus_addr_t) == 8 ?					\
447 	 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
448 
449 #define	WM_CDRXADDR_LO(sc, x)	(WM_CDRXADDR((sc), (x)) & 0xffffffffU)
450 #define	WM_CDRXADDR_HI(sc, x)						\
451 	(sizeof(bus_addr_t) == 8 ?					\
452 	 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
453 
454 #define	WM_CDTXSYNC(sc, x, n, ops)					\
455 do {									\
456 	int __x, __n;							\
457 									\
458 	__x = (x);							\
459 	__n = (n);							\
460 									\
461 	/* If it will wrap around, sync to the end of the ring. */	\
462 	if ((__x + __n) > WM_NTXDESC(sc)) {				\
463 		bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,	\
464 		    WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) *		\
465 		    (WM_NTXDESC(sc) - __x), (ops));			\
466 		__n -= (WM_NTXDESC(sc) - __x);				\
467 		__x = 0;						\
468 	}								\
469 									\
470 	/* Now sync whatever is left. */				\
471 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
472 	    WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops));	\
473 } while (/*CONSTCOND*/0)
474 
475 #define	WM_CDRXSYNC(sc, x, ops)						\
476 do {									\
477 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
478 	   WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops));		\
479 } while (/*CONSTCOND*/0)
480 
481 #define	WM_INIT_RXDESC(sc, x)						\
482 do {									\
483 	struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)];		\
484 	wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)];		\
485 	struct mbuf *__m = __rxs->rxs_mbuf;				\
486 									\
487 	/*								\
488 	 * Note: We scoot the packet forward 2 bytes in the buffer	\
489 	 * so that the payload after the Ethernet header is aligned	\
490 	 * to a 4-byte boundary.					\
491 	 *								\
492 	 * XXX BRAINDAMAGE ALERT!					\
493 	 * The stupid chip uses the same size for every buffer, which	\
494 	 * is set in the Receive Control register.  We are using the 2K	\
495 	 * size option, but what we REALLY want is (2K - 2)!  For this	\
496 	 * reason, we can't "scoot" packets longer than the standard	\
497 	 * Ethernet MTU.  On strict-alignment platforms, if the total	\
498 	 * size exceeds (2K - 2) we set align_tweak to 0 and let	\
499 	 * the upper layer copy the headers.				\
500 	 */								\
501 	__m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak;	\
502 									\
503 	wm_set_dma_addr(&__rxd->wrx_addr,				\
504 	    __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
505 	__rxd->wrx_len = 0;						\
506 	__rxd->wrx_cksum = 0;						\
507 	__rxd->wrx_status = 0;						\
508 	__rxd->wrx_errors = 0;						\
509 	__rxd->wrx_special = 0;						\
510 	WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
511 									\
512 	CSR_WRITE((sc), (sc)->sc_rdt_reg, (x));				\
513 } while (/*CONSTCOND*/0)
514 
515 static void	wm_start(struct ifnet *);
516 static void	wm_start_locked(struct ifnet *);
517 static void	wm_nq_start(struct ifnet *);
518 static void	wm_nq_start_locked(struct ifnet *);
519 static void	wm_watchdog(struct ifnet *);
520 static int	wm_ifflags_cb(struct ethercom *);
521 static int	wm_ioctl(struct ifnet *, u_long, void *);
522 static int	wm_init(struct ifnet *);
523 static int	wm_init_locked(struct ifnet *);
524 static void	wm_stop(struct ifnet *, int);
525 static void	wm_stop_locked(struct ifnet *, int);
526 static bool	wm_suspend(device_t, const pmf_qual_t *);
527 static bool	wm_resume(device_t, const pmf_qual_t *);
528 
529 static void	wm_reset(struct wm_softc *);
530 static void	wm_rxdrain(struct wm_softc *);
531 static int	wm_add_rxbuf(struct wm_softc *, int);
532 static int	wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
533 static int	wm_read_eeprom_eerd(struct wm_softc *, int, int, u_int16_t *);
534 static int	wm_validate_eeprom_checksum(struct wm_softc *);
535 static int	wm_check_alt_mac_addr(struct wm_softc *);
536 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
537 static void	wm_tick(void *);
538 
539 static void	wm_set_filter(struct wm_softc *);
540 static void	wm_set_vlan(struct wm_softc *);
541 
542 static int	wm_intr(void *);
543 static void	wm_txintr(struct wm_softc *);
544 static void	wm_rxintr(struct wm_softc *);
545 static void	wm_linkintr(struct wm_softc *, uint32_t);
546 
547 static void	wm_tbi_mediainit(struct wm_softc *);
548 static int	wm_tbi_mediachange(struct ifnet *);
549 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
550 
551 static void	wm_tbi_set_linkled(struct wm_softc *);
552 static void	wm_tbi_check_link(struct wm_softc *);
553 
554 static void	wm_gmii_reset(struct wm_softc *);
555 
556 static int	wm_gmii_i82543_readreg(device_t, int, int);
557 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
558 static int	wm_gmii_i82544_readreg(device_t, int, int);
559 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
560 static int	wm_gmii_i80003_readreg(device_t, int, int);
561 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
562 static int	wm_gmii_bm_readreg(device_t, int, int);
563 static void	wm_gmii_bm_writereg(device_t, int, int, int);
564 static int	wm_gmii_hv_readreg(device_t, int, int);
565 static void	wm_gmii_hv_writereg(device_t, int, int, int);
566 static int	wm_gmii_82580_readreg(device_t, int, int);
567 static void	wm_gmii_82580_writereg(device_t, int, int, int);
568 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
569 static int	wm_sgmii_readreg(device_t, int, int);
570 static void	wm_sgmii_writereg(device_t, int, int, int);
571 
572 static void	wm_gmii_statchg(struct ifnet *);
573 
574 static int	wm_get_phy_id_82575(struct wm_softc *);
575 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
576 static int	wm_gmii_mediachange(struct ifnet *);
577 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
578 
579 static int	wm_kmrn_readreg(struct wm_softc *, int);
580 static void	wm_kmrn_writereg(struct wm_softc *, int, int);
581 
582 static void	wm_set_spiaddrbits(struct wm_softc *);
583 static int	wm_match(device_t, cfdata_t, void *);
584 static void	wm_attach(device_t, device_t, void *);
585 static int	wm_detach(device_t, int);
586 static int	wm_is_onboard_nvm_eeprom(struct wm_softc *);
587 static void	wm_get_auto_rd_done(struct wm_softc *);
588 static void	wm_lan_init_done(struct wm_softc *);
589 static void	wm_get_cfg_done(struct wm_softc *);
590 static int	wm_get_swsm_semaphore(struct wm_softc *);
591 static void	wm_put_swsm_semaphore(struct wm_softc *);
592 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
593 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
594 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
595 static int	wm_get_swfwhw_semaphore(struct wm_softc *);
596 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
597 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
598 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
599 
600 static int	wm_read_eeprom_ich8(struct wm_softc *, int, int, uint16_t *);
601 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
602 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
603 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t,
604 		     uint32_t, uint16_t *);
605 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
606 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
607 static void	wm_82547_txfifo_stall(void *);
608 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, int);
609 static int	wm_check_mng_mode(struct wm_softc *);
610 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
611 static int	wm_check_mng_mode_82574(struct wm_softc *);
612 static int	wm_check_mng_mode_generic(struct wm_softc *);
613 static int	wm_enable_mng_pass_thru(struct wm_softc *);
614 static int	wm_check_reset_block(struct wm_softc *);
615 static void	wm_get_hw_control(struct wm_softc *);
616 static int	wm_check_for_link(struct wm_softc *);
617 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
618 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
619 #ifdef WM_WOL
620 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
621 #endif
622 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
623 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
624 static void	wm_k1_gig_workaround_hv(struct wm_softc *, int);
625 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
626 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
627 static void	wm_smbustopci(struct wm_softc *);
628 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
629 static void	wm_reset_init_script_82575(struct wm_softc *);
630 static void	wm_release_manageability(struct wm_softc *);
631 static void	wm_release_hw_control(struct wm_softc *);
632 static void	wm_get_wakeup(struct wm_softc *);
633 #ifdef WM_WOL
634 static void	wm_enable_phy_wakeup(struct wm_softc *);
635 static void	wm_enable_wakeup(struct wm_softc *);
636 #endif
637 static void	wm_init_manageability(struct wm_softc *);
638 static void	wm_set_eee_i350(struct wm_softc *);
639 
640 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
641     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
642 
643 /*
644  * Devices supported by this driver.
645  */
646 static const struct wm_product {
647 	pci_vendor_id_t		wmp_vendor;
648 	pci_product_id_t	wmp_product;
649 	const char		*wmp_name;
650 	wm_chip_type		wmp_type;
651 	int			wmp_flags;
652 #define	WMP_F_1000X		0x01
653 #define	WMP_F_1000T		0x02
654 #define	WMP_F_SERDES		0x04
655 } wm_products[] = {
656 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
657 	  "Intel i82542 1000BASE-X Ethernet",
658 	  WM_T_82542_2_1,	WMP_F_1000X },
659 
660 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
661 	  "Intel i82543GC 1000BASE-X Ethernet",
662 	  WM_T_82543,		WMP_F_1000X },
663 
664 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
665 	  "Intel i82543GC 1000BASE-T Ethernet",
666 	  WM_T_82543,		WMP_F_1000T },
667 
668 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
669 	  "Intel i82544EI 1000BASE-T Ethernet",
670 	  WM_T_82544,		WMP_F_1000T },
671 
672 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
673 	  "Intel i82544EI 1000BASE-X Ethernet",
674 	  WM_T_82544,		WMP_F_1000X },
675 
676 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
677 	  "Intel i82544GC 1000BASE-T Ethernet",
678 	  WM_T_82544,		WMP_F_1000T },
679 
680 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
681 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
682 	  WM_T_82544,		WMP_F_1000T },
683 
684 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
685 	  "Intel i82540EM 1000BASE-T Ethernet",
686 	  WM_T_82540,		WMP_F_1000T },
687 
688 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
689 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
690 	  WM_T_82540,		WMP_F_1000T },
691 
692 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
693 	  "Intel i82540EP 1000BASE-T Ethernet",
694 	  WM_T_82540,		WMP_F_1000T },
695 
696 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
697 	  "Intel i82540EP 1000BASE-T Ethernet",
698 	  WM_T_82540,		WMP_F_1000T },
699 
700 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
701 	  "Intel i82540EP 1000BASE-T Ethernet",
702 	  WM_T_82540,		WMP_F_1000T },
703 
704 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
705 	  "Intel i82545EM 1000BASE-T Ethernet",
706 	  WM_T_82545,		WMP_F_1000T },
707 
708 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
709 	  "Intel i82545GM 1000BASE-T Ethernet",
710 	  WM_T_82545_3,		WMP_F_1000T },
711 
712 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
713 	  "Intel i82545GM 1000BASE-X Ethernet",
714 	  WM_T_82545_3,		WMP_F_1000X },
715 #if 0
716 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
717 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
718 	  WM_T_82545_3,		WMP_F_SERDES },
719 #endif
720 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
721 	  "Intel i82546EB 1000BASE-T Ethernet",
722 	  WM_T_82546,		WMP_F_1000T },
723 
724 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
725 	  "Intel i82546EB 1000BASE-T Ethernet",
726 	  WM_T_82546,		WMP_F_1000T },
727 
728 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
729 	  "Intel i82545EM 1000BASE-X Ethernet",
730 	  WM_T_82545,		WMP_F_1000X },
731 
732 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
733 	  "Intel i82546EB 1000BASE-X Ethernet",
734 	  WM_T_82546,		WMP_F_1000X },
735 
736 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
737 	  "Intel i82546GB 1000BASE-T Ethernet",
738 	  WM_T_82546_3,		WMP_F_1000T },
739 
740 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
741 	  "Intel i82546GB 1000BASE-X Ethernet",
742 	  WM_T_82546_3,		WMP_F_1000X },
743 #if 0
744 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
745 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
746 	  WM_T_82546_3,		WMP_F_SERDES },
747 #endif
748 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
749 	  "i82546GB quad-port Gigabit Ethernet",
750 	  WM_T_82546_3,		WMP_F_1000T },
751 
752 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
753 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
754 	  WM_T_82546_3,		WMP_F_1000T },
755 
756 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
757 	  "Intel PRO/1000MT (82546GB)",
758 	  WM_T_82546_3,		WMP_F_1000T },
759 
760 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
761 	  "Intel i82541EI 1000BASE-T Ethernet",
762 	  WM_T_82541,		WMP_F_1000T },
763 
764 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
765 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
766 	  WM_T_82541,		WMP_F_1000T },
767 
768 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
769 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
770 	  WM_T_82541,		WMP_F_1000T },
771 
772 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
773 	  "Intel i82541ER 1000BASE-T Ethernet",
774 	  WM_T_82541_2,		WMP_F_1000T },
775 
776 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
777 	  "Intel i82541GI 1000BASE-T Ethernet",
778 	  WM_T_82541_2,		WMP_F_1000T },
779 
780 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
781 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
782 	  WM_T_82541_2,		WMP_F_1000T },
783 
784 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
785 	  "Intel i82541PI 1000BASE-T Ethernet",
786 	  WM_T_82541_2,		WMP_F_1000T },
787 
788 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
789 	  "Intel i82547EI 1000BASE-T Ethernet",
790 	  WM_T_82547,		WMP_F_1000T },
791 
792 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
793 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
794 	  WM_T_82547,		WMP_F_1000T },
795 
796 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
797 	  "Intel i82547GI 1000BASE-T Ethernet",
798 	  WM_T_82547_2,		WMP_F_1000T },
799 
800 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
801 	  "Intel PRO/1000 PT (82571EB)",
802 	  WM_T_82571,		WMP_F_1000T },
803 
804 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
805 	  "Intel PRO/1000 PF (82571EB)",
806 	  WM_T_82571,		WMP_F_1000X },
807 #if 0
808 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
809 	  "Intel PRO/1000 PB (82571EB)",
810 	  WM_T_82571,		WMP_F_SERDES },
811 #endif
812 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
813 	  "Intel PRO/1000 QT (82571EB)",
814 	  WM_T_82571,		WMP_F_1000T },
815 
816 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
817 	  "Intel i82572EI 1000baseT Ethernet",
818 	  WM_T_82572,		WMP_F_1000T },
819 
820 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
821 	  "Intel PRO/1000 PT Quad Port Server Adapter",
822 	  WM_T_82571,		WMP_F_1000T, },
823 
824 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
825 	  "Intel i82572EI 1000baseX Ethernet",
826 	  WM_T_82572,		WMP_F_1000X },
827 #if 0
828 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
829 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
830 	  WM_T_82572,		WMP_F_SERDES },
831 #endif
832 
833 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
834 	  "Intel i82572EI 1000baseT Ethernet",
835 	  WM_T_82572,		WMP_F_1000T },
836 
837 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
838 	  "Intel i82573E",
839 	  WM_T_82573,		WMP_F_1000T },
840 
841 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
842 	  "Intel i82573E IAMT",
843 	  WM_T_82573,		WMP_F_1000T },
844 
845 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
846 	  "Intel i82573L Gigabit Ethernet",
847 	  WM_T_82573,		WMP_F_1000T },
848 
849 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
850 	  "Intel i82574L",
851 	  WM_T_82574,		WMP_F_1000T },
852 
853 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
854 	  "Intel i82583V",
855 	  WM_T_82583,		WMP_F_1000T },
856 
857 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
858 	  "i80003 dual 1000baseT Ethernet",
859 	  WM_T_80003,		WMP_F_1000T },
860 
861 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
862 	  "i80003 dual 1000baseX Ethernet",
863 	  WM_T_80003,		WMP_F_1000T },
864 #if 0
865 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
866 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
867 	  WM_T_80003,		WMP_F_SERDES },
868 #endif
869 
870 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
871 	  "Intel i80003 1000baseT Ethernet",
872 	  WM_T_80003,		WMP_F_1000T },
873 #if 0
874 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
875 	  "Intel i80003 Gigabit Ethernet (SERDES)",
876 	  WM_T_80003,		WMP_F_SERDES },
877 #endif
878 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
879 	  "Intel i82801H (M_AMT) LAN Controller",
880 	  WM_T_ICH8,		WMP_F_1000T },
881 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
882 	  "Intel i82801H (AMT) LAN Controller",
883 	  WM_T_ICH8,		WMP_F_1000T },
884 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
885 	  "Intel i82801H LAN Controller",
886 	  WM_T_ICH8,		WMP_F_1000T },
887 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
888 	  "Intel i82801H (IFE) LAN Controller",
889 	  WM_T_ICH8,		WMP_F_1000T },
890 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
891 	  "Intel i82801H (M) LAN Controller",
892 	  WM_T_ICH8,		WMP_F_1000T },
893 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
894 	  "Intel i82801H IFE (GT) LAN Controller",
895 	  WM_T_ICH8,		WMP_F_1000T },
896 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
897 	  "Intel i82801H IFE (G) LAN Controller",
898 	  WM_T_ICH8,		WMP_F_1000T },
899 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
900 	  "82801I (AMT) LAN Controller",
901 	  WM_T_ICH9,		WMP_F_1000T },
902 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
903 	  "82801I LAN Controller",
904 	  WM_T_ICH9,		WMP_F_1000T },
905 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
906 	  "82801I (G) LAN Controller",
907 	  WM_T_ICH9,		WMP_F_1000T },
908 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
909 	  "82801I (GT) LAN Controller",
910 	  WM_T_ICH9,		WMP_F_1000T },
911 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
912 	  "82801I (C) LAN Controller",
913 	  WM_T_ICH9,		WMP_F_1000T },
914 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
915 	  "82801I mobile LAN Controller",
916 	  WM_T_ICH9,		WMP_F_1000T },
917 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IGP_M_V,
918 	  "82801I mobile (V) LAN Controller",
919 	  WM_T_ICH9,		WMP_F_1000T },
920 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
921 	  "82801I mobile (AMT) LAN Controller",
922 	  WM_T_ICH9,		WMP_F_1000T },
923 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
924 	  "82567LM-4 LAN Controller",
925 	  WM_T_ICH9,		WMP_F_1000T },
926 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_82567V_3,
927 	  "82567V-3 LAN Controller",
928 	  WM_T_ICH9,		WMP_F_1000T },
929 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
930 	  "82567LM-2 LAN Controller",
931 	  WM_T_ICH10,		WMP_F_1000T },
932 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
933 	  "82567LF-2 LAN Controller",
934 	  WM_T_ICH10,		WMP_F_1000T },
935 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
936 	  "82567LM-3 LAN Controller",
937 	  WM_T_ICH10,		WMP_F_1000T },
938 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
939 	  "82567LF-3 LAN Controller",
940 	  WM_T_ICH10,		WMP_F_1000T },
941 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
942 	  "82567V-2 LAN Controller",
943 	  WM_T_ICH10,		WMP_F_1000T },
944 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
945 	  "82567V-3? LAN Controller",
946 	  WM_T_ICH10,		WMP_F_1000T },
947 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
948 	  "HANKSVILLE LAN Controller",
949 	  WM_T_ICH10,		WMP_F_1000T },
950 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
951 	  "PCH LAN (82577LM) Controller",
952 	  WM_T_PCH,		WMP_F_1000T },
953 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
954 	  "PCH LAN (82577LC) Controller",
955 	  WM_T_PCH,		WMP_F_1000T },
956 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
957 	  "PCH LAN (82578DM) Controller",
958 	  WM_T_PCH,		WMP_F_1000T },
959 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
960 	  "PCH LAN (82578DC) Controller",
961 	  WM_T_PCH,		WMP_F_1000T },
962 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
963 	  "PCH2 LAN (82579LM) Controller",
964 	  WM_T_PCH2,		WMP_F_1000T },
965 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
966 	  "PCH2 LAN (82579V) Controller",
967 	  WM_T_PCH2,		WMP_F_1000T },
968 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
969 	  "82575EB dual-1000baseT Ethernet",
970 	  WM_T_82575,		WMP_F_1000T },
971 #if 0
972 	/*
973 	 * not sure if WMP_F_1000X or WMP_F_SERDES - we do not have it - so
974 	 * disabled for now ...
975 	 */
976 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
977 	  "82575EB dual-1000baseX Ethernet (SERDES)",
978 	  WM_T_82575,		WMP_F_SERDES },
979 #endif
980 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
981 	  "82575GB quad-1000baseT Ethernet",
982 	  WM_T_82575,		WMP_F_1000T },
983 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
984 	  "82575GB quad-1000baseT Ethernet (PM)",
985 	  WM_T_82575,		WMP_F_1000T },
986 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
987 	  "82576 1000BaseT Ethernet",
988 	  WM_T_82576,		WMP_F_1000T },
989 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
990 	  "82576 1000BaseX Ethernet",
991 	  WM_T_82576,		WMP_F_1000X },
992 #if 0
993 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
994 	  "82576 gigabit Ethernet (SERDES)",
995 	  WM_T_82576,		WMP_F_SERDES },
996 #endif
997 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
998 	  "82576 quad-1000BaseT Ethernet",
999 	  WM_T_82576,		WMP_F_1000T },
1000 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
1001 	  "82576 gigabit Ethernet",
1002 	  WM_T_82576,		WMP_F_1000T },
1003 #if 0
1004 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
1005 	  "82576 gigabit Ethernet (SERDES)",
1006 	  WM_T_82576,		WMP_F_SERDES },
1007 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
1008 	  "82576 quad-gigabit Ethernet (SERDES)",
1009 	  WM_T_82576,		WMP_F_SERDES },
1010 #endif
1011 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
1012 	  "82580 1000BaseT Ethernet",
1013 	  WM_T_82580,		WMP_F_1000T },
1014 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
1015 	  "82580 1000BaseX Ethernet",
1016 	  WM_T_82580,		WMP_F_1000X },
1017 #if 0
1018 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
1019 	  "82580 1000BaseT Ethernet (SERDES)",
1020 	  WM_T_82580,		WMP_F_SERDES },
1021 #endif
1022 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
1023 	  "82580 gigabit Ethernet (SGMII)",
1024 	  WM_T_82580,		WMP_F_1000T },
1025 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
1026 	  "82580 dual-1000BaseT Ethernet",
1027 	  WM_T_82580,		WMP_F_1000T },
1028 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_ER,
1029 	  "82580 1000BaseT Ethernet",
1030 	  WM_T_82580ER,		WMP_F_1000T },
1031 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_ER_DUAL,
1032 	  "82580 dual-1000BaseT Ethernet",
1033 	  WM_T_82580ER,		WMP_F_1000T },
1034 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1035 	  "82580 quad-1000BaseX Ethernet",
1036 	  WM_T_82580,		WMP_F_1000X },
1037 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
1038 	  "I350 Gigabit Network Connection",
1039 	  WM_T_I350,		WMP_F_1000T },
1040 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
1041 	  "I350 Gigabit Fiber Network Connection",
1042 	  WM_T_I350,		WMP_F_1000X },
1043 #if 0
1044 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
1045 	  "I350 Gigabit Backplane Connection",
1046 	  WM_T_I350,		WMP_F_SERDES },
1047 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
1048 	  "I350 Gigabit Connection",
1049 	  WM_T_I350,		WMP_F_1000T },
1050 #endif
1051 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
1052 	  "I354 Gigabit Connection",
1053 	  WM_T_I354,		WMP_F_1000T },
1054 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
1055 	  "I210-T1 Ethernet Server Adapter",
1056 	  WM_T_I210,		WMP_F_1000T },
1057 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
1058 	  "I210 Ethernet (Copper OEM)",
1059 	  WM_T_I210,		WMP_F_1000T },
1060 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
1061 	  "I210 Ethernet (Copper IT)",
1062 	  WM_T_I210,		WMP_F_1000T },
1063 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
1064 	  "I210 Gigabit Ethernet (Fiber)",
1065 	  WM_T_I210,		WMP_F_1000X },
1066 #if 0
1067 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
1068 	  "I210 Gigabit Ethernet (SERDES)",
1069 	  WM_T_I210,		WMP_F_SERDES },
1070 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
1071 	  "I210 Gigabit Ethernet (SGMII)",
1072 	  WM_T_I210,		WMP_F_SERDES },
1073 #endif
1074 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
1075 	  "I211 Ethernet (COPPER)",
1076 	  WM_T_I211,		WMP_F_1000T },
1077 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
1078 	  "I217 V Ethernet Connection",
1079 	  WM_T_PCH_LPT,		WMP_F_1000T },
1080 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
1081 	  "I217 LM Ethernet Connection",
1082 	  WM_T_PCH_LPT,		WMP_F_1000T },
1083 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
1084 	  "I218 V Ethernet Connection",
1085 	  WM_T_PCH_LPT,		WMP_F_1000T },
1086 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
1087 	  "I218 LM Ethernet Connection",
1088 	  WM_T_PCH_LPT,		WMP_F_1000T },
1089 	{ 0,			0,
1090 	  NULL,
1091 	  0,			0 },
1092 };
1093 
1094 #ifdef WM_EVENT_COUNTERS
1095 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
1096 #endif /* WM_EVENT_COUNTERS */
1097 
1098 #if 0 /* Not currently used */
1099 static inline uint32_t
1100 wm_io_read(struct wm_softc *sc, int reg)
1101 {
1102 
1103 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1104 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1105 }
1106 #endif
1107 
1108 static inline void
1109 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1110 {
1111 
1112 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1113 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1114 }
1115 
1116 static inline void
1117 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1118     uint32_t data)
1119 {
1120 	uint32_t regval;
1121 	int i;
1122 
1123 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1124 
1125 	CSR_WRITE(sc, reg, regval);
1126 
1127 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1128 		delay(5);
1129 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1130 			break;
1131 	}
1132 	if (i == SCTL_CTL_POLL_TIMEOUT) {
1133 		aprint_error("%s: WARNING: i82575 reg 0x%08x setup did not indicate ready\n",
1134 		    device_xname(sc->sc_dev), reg);
1135 	}
1136 }
1137 
1138 static inline void
1139 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1140 {
1141 	wa->wa_low = htole32(v & 0xffffffffU);
1142 	if (sizeof(bus_addr_t) == 8)
1143 		wa->wa_high = htole32((uint64_t) v >> 32);
1144 	else
1145 		wa->wa_high = 0;
1146 }
1147 
1148 static void
1149 wm_set_spiaddrbits(struct wm_softc *sc)
1150 {
1151 	uint32_t reg;
1152 
1153 	sc->sc_flags |= WM_F_EEPROM_SPI;
1154 	reg = CSR_READ(sc, WMREG_EECD);
1155 	sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
1156 }
1157 
1158 static const struct wm_product *
1159 wm_lookup(const struct pci_attach_args *pa)
1160 {
1161 	const struct wm_product *wmp;
1162 
1163 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1164 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1165 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1166 			return wmp;
1167 	}
1168 	return NULL;
1169 }
1170 
1171 static int
1172 wm_match(device_t parent, cfdata_t cf, void *aux)
1173 {
1174 	struct pci_attach_args *pa = aux;
1175 
1176 	if (wm_lookup(pa) != NULL)
1177 		return 1;
1178 
1179 	return 0;
1180 }
1181 
1182 static void
1183 wm_attach(device_t parent, device_t self, void *aux)
1184 {
1185 	struct wm_softc *sc = device_private(self);
1186 	struct pci_attach_args *pa = aux;
1187 	prop_dictionary_t dict;
1188 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1189 	pci_chipset_tag_t pc = pa->pa_pc;
1190 	pci_intr_handle_t ih;
1191 	const char *intrstr = NULL;
1192 	const char *eetype, *xname;
1193 	bus_space_tag_t memt;
1194 	bus_space_handle_t memh;
1195 	bus_size_t memsize;
1196 	int memh_valid;
1197 	int i, error;
1198 	const struct wm_product *wmp;
1199 	prop_data_t ea;
1200 	prop_number_t pn;
1201 	uint8_t enaddr[ETHER_ADDR_LEN];
1202 	uint16_t cfg1, cfg2, swdpin, io3;
1203 	pcireg_t preg, memtype;
1204 	uint16_t eeprom_data, apme_mask;
1205 	uint32_t reg;
1206 	char intrbuf[PCI_INTRSTR_LEN];
1207 
1208 	sc->sc_dev = self;
1209 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
1210 	sc->sc_stopping = false;
1211 
1212 	sc->sc_wmp = wmp = wm_lookup(pa);
1213 	if (wmp == NULL) {
1214 		printf("\n");
1215 		panic("wm_attach: impossible");
1216 	}
1217 
1218 	sc->sc_pc = pa->pa_pc;
1219 	sc->sc_pcitag = pa->pa_tag;
1220 
1221 	if (pci_dma64_available(pa))
1222 		sc->sc_dmat = pa->pa_dmat64;
1223 	else
1224 		sc->sc_dmat = pa->pa_dmat;
1225 
1226 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
1227 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
1228 
1229 	sc->sc_type = wmp->wmp_type;
1230 	if (sc->sc_type < WM_T_82543) {
1231 		if (sc->sc_rev < 2) {
1232 			aprint_error_dev(sc->sc_dev,
1233 			    "i82542 must be at least rev. 2\n");
1234 			return;
1235 		}
1236 		if (sc->sc_rev < 3)
1237 			sc->sc_type = WM_T_82542_2_0;
1238 	}
1239 
1240 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1241 	    || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
1242 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
1243 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
1244 		sc->sc_flags |= WM_F_NEWQUEUE;
1245 
1246 	/* Set device properties (mactype) */
1247 	dict = device_properties(sc->sc_dev);
1248 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1249 
1250 	/*
1251 	 * Map the device.  All devices support memory-mapped acccess,
1252 	 * and it is really required for normal operation.
1253 	 */
1254 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1255 	switch (memtype) {
1256 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1257 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1258 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1259 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1260 		break;
1261 	default:
1262 		memh_valid = 0;
1263 		break;
1264 	}
1265 
1266 	if (memh_valid) {
1267 		sc->sc_st = memt;
1268 		sc->sc_sh = memh;
1269 		sc->sc_ss = memsize;
1270 	} else {
1271 		aprint_error_dev(sc->sc_dev,
1272 		    "unable to map device registers\n");
1273 		return;
1274 	}
1275 
1276 	/*
1277 	 * In addition, i82544 and later support I/O mapped indirect
1278 	 * register access.  It is not desirable (nor supported in
1279 	 * this driver) to use it for normal operation, though it is
1280 	 * required to work around bugs in some chip versions.
1281 	 */
1282 	if (sc->sc_type >= WM_T_82544) {
1283 		/* First we have to find the I/O BAR. */
1284 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1285 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
1286 			if (memtype == PCI_MAPREG_TYPE_IO)
1287 				break;
1288 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
1289 			    PCI_MAPREG_MEM_TYPE_64BIT)
1290 				i += 4;	/* skip high bits, too */
1291 		}
1292 		if (i < PCI_MAPREG_END) {
1293 			/*
1294 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
1295 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
1296 			 * It's no problem because newer chips has no this
1297 			 * bug.
1298 			 *
1299 			 * The i8254x doesn't apparently respond when the
1300 			 * I/O BAR is 0, which looks somewhat like it's not
1301 			 * been configured.
1302 			 */
1303 			preg = pci_conf_read(pc, pa->pa_tag, i);
1304 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1305 				aprint_error_dev(sc->sc_dev,
1306 				    "WARNING: I/O BAR at zero.\n");
1307 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1308 					0, &sc->sc_iot, &sc->sc_ioh,
1309 					NULL, &sc->sc_ios) == 0) {
1310 				sc->sc_flags |= WM_F_IOH_VALID;
1311 			} else {
1312 				aprint_error_dev(sc->sc_dev,
1313 				    "WARNING: unable to map I/O space\n");
1314 			}
1315 		}
1316 
1317 	}
1318 
1319 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
1320 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1321 	preg |= PCI_COMMAND_MASTER_ENABLE;
1322 	if (sc->sc_type < WM_T_82542_2_1)
1323 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1324 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1325 
1326 	/* power up chip */
1327 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1328 	    NULL)) && error != EOPNOTSUPP) {
1329 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1330 		return;
1331 	}
1332 
1333 	/*
1334 	 * Map and establish our interrupt.
1335 	 */
1336 	if (pci_intr_map(pa, &ih)) {
1337 		aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
1338 		return;
1339 	}
1340 	intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
1341 #ifdef WM_MPSAFE
1342 	pci_intr_setattr(pc, &ih, PCI_INTR_MPSAFE, true);
1343 #endif
1344 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
1345 	if (sc->sc_ih == NULL) {
1346 		aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
1347 		if (intrstr != NULL)
1348 			aprint_error(" at %s", intrstr);
1349 		aprint_error("\n");
1350 		return;
1351 	}
1352 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
1353 
1354 	/*
1355 	 * Check the function ID (unit number of the chip).
1356 	 */
1357 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
1358 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
1359 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1360 	    || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
1361 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
1362 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
1363 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
1364 	else
1365 		sc->sc_funcid = 0;
1366 
1367 	/*
1368 	 * Determine a few things about the bus we're connected to.
1369 	 */
1370 	if (sc->sc_type < WM_T_82543) {
1371 		/* We don't really know the bus characteristics here. */
1372 		sc->sc_bus_speed = 33;
1373 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1374 		/*
1375 		 * CSA (Communication Streaming Architecture) is about as fast
1376 		 * a 32-bit 66MHz PCI Bus.
1377 		 */
1378 		sc->sc_flags |= WM_F_CSA;
1379 		sc->sc_bus_speed = 66;
1380 		aprint_verbose_dev(sc->sc_dev,
1381 		    "Communication Streaming Architecture\n");
1382 		if (sc->sc_type == WM_T_82547) {
1383 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
1384 			callout_setfunc(&sc->sc_txfifo_ch,
1385 					wm_82547_txfifo_stall, sc);
1386 			aprint_verbose_dev(sc->sc_dev,
1387 			    "using 82547 Tx FIFO stall work-around\n");
1388 		}
1389 	} else if (sc->sc_type >= WM_T_82571) {
1390 		sc->sc_flags |= WM_F_PCIE;
1391 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1392 		    && (sc->sc_type != WM_T_ICH10)
1393 		    && (sc->sc_type != WM_T_PCH)
1394 		    && (sc->sc_type != WM_T_PCH2)
1395 		    && (sc->sc_type != WM_T_PCH_LPT)) {
1396 			sc->sc_flags |= WM_F_EEPROM_SEMAPHORE;
1397 			/* ICH* and PCH* have no PCIe capability registers */
1398 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1399 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
1400 				NULL) == 0)
1401 				aprint_error_dev(sc->sc_dev,
1402 				    "unable to find PCIe capability\n");
1403 		}
1404 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1405 	} else {
1406 		reg = CSR_READ(sc, WMREG_STATUS);
1407 		if (reg & STATUS_BUS64)
1408 			sc->sc_flags |= WM_F_BUS64;
1409 		if ((reg & STATUS_PCIX_MODE) != 0) {
1410 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1411 
1412 			sc->sc_flags |= WM_F_PCIX;
1413 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1414 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
1415 				aprint_error_dev(sc->sc_dev,
1416 				    "unable to find PCIX capability\n");
1417 			else if (sc->sc_type != WM_T_82545_3 &&
1418 				 sc->sc_type != WM_T_82546_3) {
1419 				/*
1420 				 * Work around a problem caused by the BIOS
1421 				 * setting the max memory read byte count
1422 				 * incorrectly.
1423 				 */
1424 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1425 				    sc->sc_pcixe_capoff + PCIX_CMD);
1426 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1427 				    sc->sc_pcixe_capoff + PCIX_STATUS);
1428 
1429 				bytecnt =
1430 				    (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
1431 				    PCIX_CMD_BYTECNT_SHIFT;
1432 				maxb =
1433 				    (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
1434 				    PCIX_STATUS_MAXB_SHIFT;
1435 				if (bytecnt > maxb) {
1436 					aprint_verbose_dev(sc->sc_dev,
1437 					    "resetting PCI-X MMRBC: %d -> %d\n",
1438 					    512 << bytecnt, 512 << maxb);
1439 					pcix_cmd = (pcix_cmd &
1440 					    ~PCIX_CMD_BYTECNT_MASK) |
1441 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
1442 					pci_conf_write(pa->pa_pc, pa->pa_tag,
1443 					    sc->sc_pcixe_capoff + PCIX_CMD,
1444 					    pcix_cmd);
1445 				}
1446 			}
1447 		}
1448 		/*
1449 		 * The quad port adapter is special; it has a PCIX-PCIX
1450 		 * bridge on the board, and can run the secondary bus at
1451 		 * a higher speed.
1452 		 */
1453 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1454 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1455 								      : 66;
1456 		} else if (sc->sc_flags & WM_F_PCIX) {
1457 			switch (reg & STATUS_PCIXSPD_MASK) {
1458 			case STATUS_PCIXSPD_50_66:
1459 				sc->sc_bus_speed = 66;
1460 				break;
1461 			case STATUS_PCIXSPD_66_100:
1462 				sc->sc_bus_speed = 100;
1463 				break;
1464 			case STATUS_PCIXSPD_100_133:
1465 				sc->sc_bus_speed = 133;
1466 				break;
1467 			default:
1468 				aprint_error_dev(sc->sc_dev,
1469 				    "unknown PCIXSPD %d; assuming 66MHz\n",
1470 				    reg & STATUS_PCIXSPD_MASK);
1471 				sc->sc_bus_speed = 66;
1472 				break;
1473 			}
1474 		} else
1475 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1476 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1477 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1478 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1479 	}
1480 
1481 	/*
1482 	 * Allocate the control data structures, and create and load the
1483 	 * DMA map for it.
1484 	 *
1485 	 * NOTE: All Tx descriptors must be in the same 4G segment of
1486 	 * memory.  So must Rx descriptors.  We simplify by allocating
1487 	 * both sets within the same 4G segment.
1488 	 */
1489 	WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
1490 	    WM_NTXDESC_82542 : WM_NTXDESC_82544;
1491 	sc->sc_cd_size = sc->sc_type < WM_T_82544 ?
1492 	    sizeof(struct wm_control_data_82542) :
1493 	    sizeof(struct wm_control_data_82544);
1494 	if ((error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_cd_size, PAGE_SIZE,
1495 		    (bus_size_t) 0x100000000ULL, &sc->sc_cd_seg, 1,
1496 		    &sc->sc_cd_rseg, 0)) != 0) {
1497 		aprint_error_dev(sc->sc_dev,
1498 		    "unable to allocate control data, error = %d\n",
1499 		    error);
1500 		goto fail_0;
1501 	}
1502 
1503 	if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cd_seg,
1504 		    sc->sc_cd_rseg, sc->sc_cd_size,
1505 		    (void **)&sc->sc_control_data, BUS_DMA_COHERENT)) != 0) {
1506 		aprint_error_dev(sc->sc_dev,
1507 		    "unable to map control data, error = %d\n", error);
1508 		goto fail_1;
1509 	}
1510 
1511 	if ((error = bus_dmamap_create(sc->sc_dmat, sc->sc_cd_size, 1,
1512 		    sc->sc_cd_size, 0, 0, &sc->sc_cddmamap)) != 0) {
1513 		aprint_error_dev(sc->sc_dev,
1514 		    "unable to create control data DMA map, error = %d\n",
1515 		    error);
1516 		goto fail_2;
1517 	}
1518 
1519 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
1520 		    sc->sc_control_data, sc->sc_cd_size, NULL, 0)) != 0) {
1521 		aprint_error_dev(sc->sc_dev,
1522 		    "unable to load control data DMA map, error = %d\n",
1523 		    error);
1524 		goto fail_3;
1525 	}
1526 
1527 	/*
1528 	 * Create the transmit buffer DMA maps.
1529 	 */
1530 	WM_TXQUEUELEN(sc) =
1531 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
1532 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
1533 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1534 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
1535 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
1536 			    &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1537 			aprint_error_dev(sc->sc_dev,
1538 			    "unable to create Tx DMA map %d, error = %d\n",
1539 			    i, error);
1540 			goto fail_4;
1541 		}
1542 	}
1543 
1544 	/*
1545 	 * Create the receive buffer DMA maps.
1546 	 */
1547 	for (i = 0; i < WM_NRXDESC; i++) {
1548 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1549 			    MCLBYTES, 0, 0,
1550 			    &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1551 			aprint_error_dev(sc->sc_dev,
1552 			    "unable to create Rx DMA map %d error = %d\n",
1553 			    i, error);
1554 			goto fail_5;
1555 		}
1556 		sc->sc_rxsoft[i].rxs_mbuf = NULL;
1557 	}
1558 
1559 	/* clear interesting stat counters */
1560 	CSR_READ(sc, WMREG_COLC);
1561 	CSR_READ(sc, WMREG_RXERRC);
1562 
1563 	/* get PHY control from SMBus to PCIe */
1564 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
1565 	    || (sc->sc_type == WM_T_PCH_LPT))
1566 		wm_smbustopci(sc);
1567 
1568 	/*
1569 	 * Reset the chip to a known state.
1570 	 */
1571 	wm_reset(sc);
1572 
1573 	/*
1574 	 * Get some information about the EEPROM.
1575 	 */
1576 	switch (sc->sc_type) {
1577 	case WM_T_82542_2_0:
1578 	case WM_T_82542_2_1:
1579 	case WM_T_82543:
1580 	case WM_T_82544:
1581 		/* Microwire */
1582 		sc->sc_ee_addrbits = 6;
1583 		break;
1584 	case WM_T_82540:
1585 	case WM_T_82545:
1586 	case WM_T_82545_3:
1587 	case WM_T_82546:
1588 	case WM_T_82546_3:
1589 		/* Microwire */
1590 		reg = CSR_READ(sc, WMREG_EECD);
1591 		if (reg & EECD_EE_SIZE)
1592 			sc->sc_ee_addrbits = 8;
1593 		else
1594 			sc->sc_ee_addrbits = 6;
1595 		sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1596 		break;
1597 	case WM_T_82541:
1598 	case WM_T_82541_2:
1599 	case WM_T_82547:
1600 	case WM_T_82547_2:
1601 		reg = CSR_READ(sc, WMREG_EECD);
1602 		if (reg & EECD_EE_TYPE) {
1603 			/* SPI */
1604 			wm_set_spiaddrbits(sc);
1605 		} else
1606 			/* Microwire */
1607 			sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6;
1608 		sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1609 		break;
1610 	case WM_T_82571:
1611 	case WM_T_82572:
1612 		/* SPI */
1613 		wm_set_spiaddrbits(sc);
1614 		sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1615 		break;
1616 	case WM_T_82573:
1617 	case WM_T_82574:
1618 	case WM_T_82583:
1619 		if (wm_is_onboard_nvm_eeprom(sc) == 0)
1620 			sc->sc_flags |= WM_F_EEPROM_FLASH;
1621 		else {
1622 			/* SPI */
1623 			wm_set_spiaddrbits(sc);
1624 		}
1625 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1626 		break;
1627 	case WM_T_82575:
1628 	case WM_T_82576:
1629 	case WM_T_82580:
1630 	case WM_T_82580ER:
1631 	case WM_T_I350:
1632 	case WM_T_I354: /* XXXX ok? */
1633 	case WM_T_80003:
1634 		/* SPI */
1635 		wm_set_spiaddrbits(sc);
1636 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_SWFW_SYNC;
1637 		break;
1638 	case WM_T_ICH8:
1639 	case WM_T_ICH9:
1640 	case WM_T_ICH10:
1641 	case WM_T_PCH:
1642 	case WM_T_PCH2:
1643 	case WM_T_PCH_LPT:
1644 		/* FLASH */
1645 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_SWFWHW_SYNC;
1646 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
1647 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
1648 		    &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) {
1649 			aprint_error_dev(sc->sc_dev,
1650 			    "can't map FLASH registers\n");
1651 			return;
1652 		}
1653 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
1654 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
1655 						ICH_FLASH_SECTOR_SIZE;
1656 		sc->sc_ich8_flash_bank_size =
1657 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
1658 		sc->sc_ich8_flash_bank_size -=
1659 		    (reg & ICH_GFPREG_BASE_MASK);
1660 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
1661 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
1662 		break;
1663 	case WM_T_I210:
1664 	case WM_T_I211:
1665 		sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
1666 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_SWFW_SYNC;
1667 		break;
1668 	default:
1669 		break;
1670 	}
1671 
1672 	/*
1673 	 * Defer printing the EEPROM type until after verifying the checksum
1674 	 * This allows the EEPROM type to be printed correctly in the case
1675 	 * that no EEPROM is attached.
1676 	 */
1677 	/*
1678 	 * Validate the EEPROM checksum. If the checksum fails, flag
1679 	 * this for later, so we can fail future reads from the EEPROM.
1680 	 */
1681 	if (wm_validate_eeprom_checksum(sc)) {
1682 		/*
1683 		 * Read twice again because some PCI-e parts fail the
1684 		 * first check due to the link being in sleep state.
1685 		 */
1686 		if (wm_validate_eeprom_checksum(sc))
1687 			sc->sc_flags |= WM_F_EEPROM_INVALID;
1688 	}
1689 
1690 	/* Set device properties (macflags) */
1691 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
1692 
1693 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
1694 		aprint_verbose_dev(sc->sc_dev, "No EEPROM\n");
1695 	else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW) {
1696 		aprint_verbose_dev(sc->sc_dev, "FLASH(HW)\n");
1697 	} else if (sc->sc_flags & WM_F_EEPROM_FLASH) {
1698 		aprint_verbose_dev(sc->sc_dev, "FLASH\n");
1699 	} else {
1700 		if (sc->sc_flags & WM_F_EEPROM_SPI)
1701 			eetype = "SPI";
1702 		else
1703 			eetype = "MicroWire";
1704 		aprint_verbose_dev(sc->sc_dev,
1705 		    "%u word (%d address bits) %s EEPROM\n",
1706 		    1U << sc->sc_ee_addrbits,
1707 		    sc->sc_ee_addrbits, eetype);
1708 	}
1709 
1710 	switch (sc->sc_type) {
1711 	case WM_T_82571:
1712 	case WM_T_82572:
1713 	case WM_T_82573:
1714 	case WM_T_82574:
1715 	case WM_T_82583:
1716 	case WM_T_80003:
1717 	case WM_T_ICH8:
1718 	case WM_T_ICH9:
1719 	case WM_T_ICH10:
1720 	case WM_T_PCH:
1721 	case WM_T_PCH2:
1722 	case WM_T_PCH_LPT:
1723 		if (wm_check_mng_mode(sc) != 0)
1724 			wm_get_hw_control(sc);
1725 		break;
1726 	default:
1727 		break;
1728 	}
1729 	wm_get_wakeup(sc);
1730 	/*
1731 	 * Read the Ethernet address from the EEPROM, if not first found
1732 	 * in device properties.
1733 	 */
1734 	ea = prop_dictionary_get(dict, "mac-address");
1735 	if (ea != NULL) {
1736 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
1737 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
1738 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
1739 	} else {
1740 		if (wm_read_mac_addr(sc, enaddr) != 0) {
1741 			aprint_error_dev(sc->sc_dev,
1742 			    "unable to read Ethernet address\n");
1743 			return;
1744 		}
1745 	}
1746 
1747 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
1748 	    ether_sprintf(enaddr));
1749 
1750 	/*
1751 	 * Read the config info from the EEPROM, and set up various
1752 	 * bits in the control registers based on their contents.
1753 	 */
1754 	pn = prop_dictionary_get(dict, "i82543-cfg1");
1755 	if (pn != NULL) {
1756 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1757 		cfg1 = (uint16_t) prop_number_integer_value(pn);
1758 	} else {
1759 		if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) {
1760 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
1761 			return;
1762 		}
1763 	}
1764 
1765 	pn = prop_dictionary_get(dict, "i82543-cfg2");
1766 	if (pn != NULL) {
1767 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1768 		cfg2 = (uint16_t) prop_number_integer_value(pn);
1769 	} else {
1770 		if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) {
1771 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
1772 			return;
1773 		}
1774 	}
1775 
1776 	/* check for WM_F_WOL */
1777 	switch (sc->sc_type) {
1778 	case WM_T_82542_2_0:
1779 	case WM_T_82542_2_1:
1780 	case WM_T_82543:
1781 		/* dummy? */
1782 		eeprom_data = 0;
1783 		apme_mask = EEPROM_CFG3_APME;
1784 		break;
1785 	case WM_T_82544:
1786 		apme_mask = EEPROM_CFG2_82544_APM_EN;
1787 		eeprom_data = cfg2;
1788 		break;
1789 	case WM_T_82546:
1790 	case WM_T_82546_3:
1791 	case WM_T_82571:
1792 	case WM_T_82572:
1793 	case WM_T_82573:
1794 	case WM_T_82574:
1795 	case WM_T_82583:
1796 	case WM_T_80003:
1797 	default:
1798 		apme_mask = EEPROM_CFG3_APME;
1799 		wm_read_eeprom(sc, (sc->sc_funcid == 1) ? EEPROM_OFF_CFG3_PORTB
1800 		    : EEPROM_OFF_CFG3_PORTA, 1, &eeprom_data);
1801 		break;
1802 	case WM_T_82575:
1803 	case WM_T_82576:
1804 	case WM_T_82580:
1805 	case WM_T_82580ER:
1806 	case WM_T_I350:
1807 	case WM_T_I354: /* XXX ok? */
1808 	case WM_T_ICH8:
1809 	case WM_T_ICH9:
1810 	case WM_T_ICH10:
1811 	case WM_T_PCH:
1812 	case WM_T_PCH2:
1813 	case WM_T_PCH_LPT:
1814 		/* XXX The funcid should be checked on some devices */
1815 		apme_mask = WUC_APME;
1816 		eeprom_data = CSR_READ(sc, WMREG_WUC);
1817 		break;
1818 	}
1819 
1820 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
1821 	if ((eeprom_data & apme_mask) != 0)
1822 		sc->sc_flags |= WM_F_WOL;
1823 #ifdef WM_DEBUG
1824 	if ((sc->sc_flags & WM_F_WOL) != 0)
1825 		printf("WOL\n");
1826 #endif
1827 
1828 	/*
1829 	 * XXX need special handling for some multiple port cards
1830 	 * to disable a paticular port.
1831 	 */
1832 
1833 	if (sc->sc_type >= WM_T_82544) {
1834 		pn = prop_dictionary_get(dict, "i82543-swdpin");
1835 		if (pn != NULL) {
1836 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1837 			swdpin = (uint16_t) prop_number_integer_value(pn);
1838 		} else {
1839 			if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) {
1840 				aprint_error_dev(sc->sc_dev,
1841 				    "unable to read SWDPIN\n");
1842 				return;
1843 			}
1844 		}
1845 	}
1846 
1847 	if (cfg1 & EEPROM_CFG1_ILOS)
1848 		sc->sc_ctrl |= CTRL_ILOS;
1849 	if (sc->sc_type >= WM_T_82544) {
1850 		sc->sc_ctrl |=
1851 		    ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
1852 		    CTRL_SWDPIO_SHIFT;
1853 		sc->sc_ctrl |=
1854 		    ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
1855 		    CTRL_SWDPINS_SHIFT;
1856 	} else {
1857 		sc->sc_ctrl |=
1858 		    ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
1859 		    CTRL_SWDPIO_SHIFT;
1860 	}
1861 
1862 #if 0
1863 	if (sc->sc_type >= WM_T_82544) {
1864 		if (cfg1 & EEPROM_CFG1_IPS0)
1865 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
1866 		if (cfg1 & EEPROM_CFG1_IPS1)
1867 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
1868 		sc->sc_ctrl_ext |=
1869 		    ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
1870 		    CTRL_EXT_SWDPIO_SHIFT;
1871 		sc->sc_ctrl_ext |=
1872 		    ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
1873 		    CTRL_EXT_SWDPINS_SHIFT;
1874 	} else {
1875 		sc->sc_ctrl_ext |=
1876 		    ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
1877 		    CTRL_EXT_SWDPIO_SHIFT;
1878 	}
1879 #endif
1880 
1881 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
1882 #if 0
1883 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
1884 #endif
1885 
1886 	/*
1887 	 * Set up some register offsets that are different between
1888 	 * the i82542 and the i82543 and later chips.
1889 	 */
1890 	if (sc->sc_type < WM_T_82543) {
1891 		sc->sc_rdt_reg = WMREG_OLD_RDT0;
1892 		sc->sc_tdt_reg = WMREG_OLD_TDT;
1893 	} else {
1894 		sc->sc_rdt_reg = WMREG_RDT;
1895 		sc->sc_tdt_reg = WMREG_TDT;
1896 	}
1897 
1898 	if (sc->sc_type == WM_T_PCH) {
1899 		uint16_t val;
1900 
1901 		/* Save the NVM K1 bit setting */
1902 		wm_read_eeprom(sc, EEPROM_OFF_K1_CONFIG, 1, &val);
1903 
1904 		if ((val & EEPROM_K1_CONFIG_ENABLE) != 0)
1905 			sc->sc_nvm_k1_enabled = 1;
1906 		else
1907 			sc->sc_nvm_k1_enabled = 0;
1908 	}
1909 
1910 	/*
1911 	 * Determine if we're TBI,GMII or SGMII mode, and initialize the
1912 	 * media structures accordingly.
1913 	 */
1914 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
1915 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
1916 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
1917 	    || sc->sc_type == WM_T_82573
1918 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
1919 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
1920 		wm_gmii_mediainit(sc, wmp->wmp_product);
1921 	} else if (sc->sc_type < WM_T_82543 ||
1922 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
1923 		if (wmp->wmp_flags & WMP_F_1000T)
1924 			aprint_error_dev(sc->sc_dev,
1925 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
1926 		wm_tbi_mediainit(sc);
1927 	} else {
1928 		switch (sc->sc_type) {
1929 		case WM_T_82575:
1930 		case WM_T_82576:
1931 		case WM_T_82580:
1932 		case WM_T_82580ER:
1933 		case WM_T_I350:
1934 		case WM_T_I354:
1935 		case WM_T_I210:
1936 		case WM_T_I211:
1937 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
1938 			switch (reg & CTRL_EXT_LINK_MODE_MASK) {
1939 			case CTRL_EXT_LINK_MODE_1000KX:
1940 				aprint_verbose_dev(sc->sc_dev, "1000KX\n");
1941 				CSR_WRITE(sc, WMREG_CTRL_EXT,
1942 				    reg | CTRL_EXT_I2C_ENA);
1943 				panic("not supported yet\n");
1944 				break;
1945 			case CTRL_EXT_LINK_MODE_SGMII:
1946 				if (wm_sgmii_uses_mdio(sc)) {
1947 					aprint_verbose_dev(sc->sc_dev,
1948 					    "SGMII(MDIO)\n");
1949 					sc->sc_flags |= WM_F_SGMII;
1950 					wm_gmii_mediainit(sc,
1951 					    wmp->wmp_product);
1952 					break;
1953 				}
1954 				aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
1955 				/*FALLTHROUGH*/
1956 			case CTRL_EXT_LINK_MODE_PCIE_SERDES:
1957 				aprint_verbose_dev(sc->sc_dev, "SERDES\n");
1958 				CSR_WRITE(sc, WMREG_CTRL_EXT,
1959 				    reg | CTRL_EXT_I2C_ENA);
1960 				panic("not supported yet\n");
1961 				break;
1962 			case CTRL_EXT_LINK_MODE_GMII:
1963 			default:
1964 				CSR_WRITE(sc, WMREG_CTRL_EXT,
1965 				    reg & ~CTRL_EXT_I2C_ENA);
1966 				wm_gmii_mediainit(sc, wmp->wmp_product);
1967 				break;
1968 			}
1969 			break;
1970 		default:
1971 			if (wmp->wmp_flags & WMP_F_1000X)
1972 				aprint_error_dev(sc->sc_dev,
1973 				    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
1974 			wm_gmii_mediainit(sc, wmp->wmp_product);
1975 		}
1976 	}
1977 
1978 	ifp = &sc->sc_ethercom.ec_if;
1979 	xname = device_xname(sc->sc_dev);
1980 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
1981 	ifp->if_softc = sc;
1982 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1983 	ifp->if_ioctl = wm_ioctl;
1984 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
1985 		ifp->if_start = wm_nq_start;
1986 	else
1987 		ifp->if_start = wm_start;
1988 	ifp->if_watchdog = wm_watchdog;
1989 	ifp->if_init = wm_init;
1990 	ifp->if_stop = wm_stop;
1991 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
1992 	IFQ_SET_READY(&ifp->if_snd);
1993 
1994 	/* Check for jumbo frame */
1995 	switch (sc->sc_type) {
1996 	case WM_T_82573:
1997 		/* XXX limited to 9234 if ASPM is disabled */
1998 		wm_read_eeprom(sc, EEPROM_INIT_3GIO_3, 1, &io3);
1999 		if ((io3 & EEPROM_3GIO_3_ASPM_MASK) != 0)
2000 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2001 		break;
2002 	case WM_T_82571:
2003 	case WM_T_82572:
2004 	case WM_T_82574:
2005 	case WM_T_82575:
2006 	case WM_T_82576:
2007 	case WM_T_82580:
2008 	case WM_T_82580ER:
2009 	case WM_T_I350:
2010 	case WM_T_I354: /* XXXX ok? */
2011 	case WM_T_I210:
2012 	case WM_T_I211:
2013 	case WM_T_80003:
2014 	case WM_T_ICH9:
2015 	case WM_T_ICH10:
2016 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
2017 	case WM_T_PCH_LPT:
2018 		/* XXX limited to 9234 */
2019 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2020 		break;
2021 	case WM_T_PCH:
2022 		/* XXX limited to 4096 */
2023 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2024 		break;
2025 	case WM_T_82542_2_0:
2026 	case WM_T_82542_2_1:
2027 	case WM_T_82583:
2028 	case WM_T_ICH8:
2029 		/* No support for jumbo frame */
2030 		break;
2031 	default:
2032 		/* ETHER_MAX_LEN_JUMBO */
2033 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2034 		break;
2035 	}
2036 
2037 	/*
2038 	 * If we're a i82543 or greater, we can support VLANs.
2039 	 */
2040 	if (sc->sc_type >= WM_T_82543)
2041 		sc->sc_ethercom.ec_capabilities |=
2042 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
2043 
2044 	/*
2045 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
2046 	 * on i82543 and later.
2047 	 */
2048 	if (sc->sc_type >= WM_T_82543) {
2049 		ifp->if_capabilities |=
2050 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
2051 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
2052 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
2053 		    IFCAP_CSUM_TCPv6_Tx |
2054 		    IFCAP_CSUM_UDPv6_Tx;
2055 	}
2056 
2057 	/*
2058 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
2059 	 *
2060 	 *	82541GI (8086:1076) ... no
2061 	 *	82572EI (8086:10b9) ... yes
2062 	 */
2063 	if (sc->sc_type >= WM_T_82571) {
2064 		ifp->if_capabilities |=
2065 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
2066 	}
2067 
2068 	/*
2069 	 * If we're a i82544 or greater (except i82547), we can do
2070 	 * TCP segmentation offload.
2071 	 */
2072 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
2073 		ifp->if_capabilities |= IFCAP_TSOv4;
2074 	}
2075 
2076 	if (sc->sc_type >= WM_T_82571) {
2077 		ifp->if_capabilities |= IFCAP_TSOv6;
2078 	}
2079 
2080 #ifdef WM_MPSAFE
2081 	sc->sc_txrx_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2082 #else
2083 	sc->sc_txrx_lock = NULL;
2084 #endif
2085 
2086 	/*
2087 	 * Attach the interface.
2088 	 */
2089 	if_attach(ifp);
2090 	ether_ifattach(ifp, enaddr);
2091 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
2092 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET, 0);
2093 
2094 #ifdef WM_EVENT_COUNTERS
2095 	/* Attach event counters. */
2096 	evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
2097 	    NULL, xname, "txsstall");
2098 	evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
2099 	    NULL, xname, "txdstall");
2100 	evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
2101 	    NULL, xname, "txfifo_stall");
2102 	evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
2103 	    NULL, xname, "txdw");
2104 	evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
2105 	    NULL, xname, "txqe");
2106 	evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
2107 	    NULL, xname, "rxintr");
2108 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
2109 	    NULL, xname, "linkintr");
2110 
2111 	evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
2112 	    NULL, xname, "rxipsum");
2113 	evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
2114 	    NULL, xname, "rxtusum");
2115 	evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
2116 	    NULL, xname, "txipsum");
2117 	evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
2118 	    NULL, xname, "txtusum");
2119 	evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
2120 	    NULL, xname, "txtusum6");
2121 
2122 	evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
2123 	    NULL, xname, "txtso");
2124 	evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
2125 	    NULL, xname, "txtso6");
2126 	evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
2127 	    NULL, xname, "txtsopain");
2128 
2129 	for (i = 0; i < WM_NTXSEGS; i++) {
2130 		snprintf(wm_txseg_evcnt_names[i],
2131 		    sizeof(wm_txseg_evcnt_names[i]), "txseg%d", i);
2132 		evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
2133 		    NULL, xname, wm_txseg_evcnt_names[i]);
2134 	}
2135 
2136 	evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
2137 	    NULL, xname, "txdrop");
2138 
2139 	evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
2140 	    NULL, xname, "tu");
2141 
2142 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
2143 	    NULL, xname, "tx_xoff");
2144 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
2145 	    NULL, xname, "tx_xon");
2146 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
2147 	    NULL, xname, "rx_xoff");
2148 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
2149 	    NULL, xname, "rx_xon");
2150 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
2151 	    NULL, xname, "rx_macctl");
2152 #endif /* WM_EVENT_COUNTERS */
2153 
2154 	if (pmf_device_register(self, wm_suspend, wm_resume))
2155 		pmf_class_network_register(self, ifp);
2156 	else
2157 		aprint_error_dev(self, "couldn't establish power handler\n");
2158 
2159 	return;
2160 
2161 	/*
2162 	 * Free any resources we've allocated during the failed attach
2163 	 * attempt.  Do this in reverse order and fall through.
2164 	 */
2165  fail_5:
2166 	for (i = 0; i < WM_NRXDESC; i++) {
2167 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2168 			bus_dmamap_destroy(sc->sc_dmat,
2169 			    sc->sc_rxsoft[i].rxs_dmamap);
2170 	}
2171  fail_4:
2172 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2173 		if (sc->sc_txsoft[i].txs_dmamap != NULL)
2174 			bus_dmamap_destroy(sc->sc_dmat,
2175 			    sc->sc_txsoft[i].txs_dmamap);
2176 	}
2177 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2178  fail_3:
2179 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2180  fail_2:
2181 	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2182 	    sc->sc_cd_size);
2183  fail_1:
2184 	bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2185  fail_0:
2186 	return;
2187 }
2188 
2189 static int
2190 wm_detach(device_t self, int flags __unused)
2191 {
2192 	struct wm_softc *sc = device_private(self);
2193 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2194 	int i;
2195 #ifndef WM_MPSAFE
2196 	int s;
2197 
2198 	s = splnet();
2199 #endif
2200 	/* Stop the interface. Callouts are stopped in it. */
2201 	wm_stop(ifp, 1);
2202 
2203 #ifndef WM_MPSAFE
2204 	splx(s);
2205 #endif
2206 
2207 	pmf_device_deregister(self);
2208 
2209 	/* Tell the firmware about the release */
2210 	WM_LOCK(sc);
2211 	wm_release_manageability(sc);
2212 	wm_release_hw_control(sc);
2213 	WM_UNLOCK(sc);
2214 
2215 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2216 
2217 	/* Delete all remaining media. */
2218 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2219 
2220 	ether_ifdetach(ifp);
2221 	if_detach(ifp);
2222 
2223 
2224 	/* Unload RX dmamaps and free mbufs */
2225 	WM_LOCK(sc);
2226 	wm_rxdrain(sc);
2227 	WM_UNLOCK(sc);
2228 	/* Must unlock here */
2229 
2230 	/* Free dmamap. It's the same as the end of the wm_attach() function */
2231 	for (i = 0; i < WM_NRXDESC; i++) {
2232 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2233 			bus_dmamap_destroy(sc->sc_dmat,
2234 			    sc->sc_rxsoft[i].rxs_dmamap);
2235 	}
2236 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2237 		if (sc->sc_txsoft[i].txs_dmamap != NULL)
2238 			bus_dmamap_destroy(sc->sc_dmat,
2239 			    sc->sc_txsoft[i].txs_dmamap);
2240 	}
2241 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2242 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2243 	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2244 	    sc->sc_cd_size);
2245 	bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2246 
2247 	/* Disestablish the interrupt handler */
2248 	if (sc->sc_ih != NULL) {
2249 		pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
2250 		sc->sc_ih = NULL;
2251 	}
2252 
2253 	/* Unmap the registers */
2254 	if (sc->sc_ss) {
2255 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
2256 		sc->sc_ss = 0;
2257 	}
2258 
2259 	if (sc->sc_ios) {
2260 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
2261 		sc->sc_ios = 0;
2262 	}
2263 
2264 	if (sc->sc_txrx_lock)
2265 		mutex_obj_free(sc->sc_txrx_lock);
2266 
2267 	return 0;
2268 }
2269 
2270 /*
2271  * wm_tx_offload:
2272  *
2273  *	Set up TCP/IP checksumming parameters for the
2274  *	specified packet.
2275  */
2276 static int
2277 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
2278     uint8_t *fieldsp)
2279 {
2280 	struct mbuf *m0 = txs->txs_mbuf;
2281 	struct livengood_tcpip_ctxdesc *t;
2282 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
2283 	uint32_t ipcse;
2284 	struct ether_header *eh;
2285 	int offset, iphl;
2286 	uint8_t fields;
2287 
2288 	/*
2289 	 * XXX It would be nice if the mbuf pkthdr had offset
2290 	 * fields for the protocol headers.
2291 	 */
2292 
2293 	eh = mtod(m0, struct ether_header *);
2294 	switch (htons(eh->ether_type)) {
2295 	case ETHERTYPE_IP:
2296 	case ETHERTYPE_IPV6:
2297 		offset = ETHER_HDR_LEN;
2298 		break;
2299 
2300 	case ETHERTYPE_VLAN:
2301 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2302 		break;
2303 
2304 	default:
2305 		/*
2306 		 * Don't support this protocol or encapsulation.
2307 		 */
2308 		*fieldsp = 0;
2309 		*cmdp = 0;
2310 		return 0;
2311 	}
2312 
2313 	if ((m0->m_pkthdr.csum_flags &
2314 	    (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
2315 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
2316 	} else {
2317 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
2318 	}
2319 	ipcse = offset + iphl - 1;
2320 
2321 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
2322 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
2323 	seg = 0;
2324 	fields = 0;
2325 
2326 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
2327 		int hlen = offset + iphl;
2328 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
2329 
2330 		if (__predict_false(m0->m_len <
2331 				    (hlen + sizeof(struct tcphdr)))) {
2332 			/*
2333 			 * TCP/IP headers are not in the first mbuf; we need
2334 			 * to do this the slow and painful way.  Let's just
2335 			 * hope this doesn't happen very often.
2336 			 */
2337 			struct tcphdr th;
2338 
2339 			WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
2340 
2341 			m_copydata(m0, hlen, sizeof(th), &th);
2342 			if (v4) {
2343 				struct ip ip;
2344 
2345 				m_copydata(m0, offset, sizeof(ip), &ip);
2346 				ip.ip_len = 0;
2347 				m_copyback(m0,
2348 				    offset + offsetof(struct ip, ip_len),
2349 				    sizeof(ip.ip_len), &ip.ip_len);
2350 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
2351 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
2352 			} else {
2353 				struct ip6_hdr ip6;
2354 
2355 				m_copydata(m0, offset, sizeof(ip6), &ip6);
2356 				ip6.ip6_plen = 0;
2357 				m_copyback(m0,
2358 				    offset + offsetof(struct ip6_hdr, ip6_plen),
2359 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
2360 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
2361 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
2362 			}
2363 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
2364 			    sizeof(th.th_sum), &th.th_sum);
2365 
2366 			hlen += th.th_off << 2;
2367 		} else {
2368 			/*
2369 			 * TCP/IP headers are in the first mbuf; we can do
2370 			 * this the easy way.
2371 			 */
2372 			struct tcphdr *th;
2373 
2374 			if (v4) {
2375 				struct ip *ip =
2376 				    (void *)(mtod(m0, char *) + offset);
2377 				th = (void *)(mtod(m0, char *) + hlen);
2378 
2379 				ip->ip_len = 0;
2380 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
2381 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
2382 			} else {
2383 				struct ip6_hdr *ip6 =
2384 				    (void *)(mtod(m0, char *) + offset);
2385 				th = (void *)(mtod(m0, char *) + hlen);
2386 
2387 				ip6->ip6_plen = 0;
2388 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
2389 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
2390 			}
2391 			hlen += th->th_off << 2;
2392 		}
2393 
2394 		if (v4) {
2395 			WM_EVCNT_INCR(&sc->sc_ev_txtso);
2396 			cmdlen |= WTX_TCPIP_CMD_IP;
2397 		} else {
2398 			WM_EVCNT_INCR(&sc->sc_ev_txtso6);
2399 			ipcse = 0;
2400 		}
2401 		cmd |= WTX_TCPIP_CMD_TSE;
2402 		cmdlen |= WTX_TCPIP_CMD_TSE |
2403 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
2404 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
2405 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
2406 	}
2407 
2408 	/*
2409 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
2410 	 * offload feature, if we load the context descriptor, we
2411 	 * MUST provide valid values for IPCSS and TUCSS fields.
2412 	 */
2413 
2414 	ipcs = WTX_TCPIP_IPCSS(offset) |
2415 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
2416 	    WTX_TCPIP_IPCSE(ipcse);
2417 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
2418 		WM_EVCNT_INCR(&sc->sc_ev_txipsum);
2419 		fields |= WTX_IXSM;
2420 	}
2421 
2422 	offset += iphl;
2423 
2424 	if (m0->m_pkthdr.csum_flags &
2425 	    (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
2426 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
2427 		fields |= WTX_TXSM;
2428 		tucs = WTX_TCPIP_TUCSS(offset) |
2429 		    WTX_TCPIP_TUCSO(offset +
2430 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
2431 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
2432 	} else if ((m0->m_pkthdr.csum_flags &
2433 	    (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
2434 		WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
2435 		fields |= WTX_TXSM;
2436 		tucs = WTX_TCPIP_TUCSS(offset) |
2437 		    WTX_TCPIP_TUCSO(offset +
2438 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
2439 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
2440 	} else {
2441 		/* Just initialize it to a valid TCP context. */
2442 		tucs = WTX_TCPIP_TUCSS(offset) |
2443 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
2444 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
2445 	}
2446 
2447 	/* Fill in the context descriptor. */
2448 	t = (struct livengood_tcpip_ctxdesc *)
2449 	    &sc->sc_txdescs[sc->sc_txnext];
2450 	t->tcpip_ipcs = htole32(ipcs);
2451 	t->tcpip_tucs = htole32(tucs);
2452 	t->tcpip_cmdlen = htole32(cmdlen);
2453 	t->tcpip_seg = htole32(seg);
2454 	WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
2455 
2456 	sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
2457 	txs->txs_ndesc++;
2458 
2459 	*cmdp = cmd;
2460 	*fieldsp = fields;
2461 
2462 	return 0;
2463 }
2464 
2465 static void
2466 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
2467 {
2468 	struct mbuf *m;
2469 	int i;
2470 
2471 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
2472 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
2473 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
2474 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
2475 		    m->m_data, m->m_len, m->m_flags);
2476 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
2477 	    i, i == 1 ? "" : "s");
2478 }
2479 
2480 /*
2481  * wm_82547_txfifo_stall:
2482  *
2483  *	Callout used to wait for the 82547 Tx FIFO to drain,
2484  *	reset the FIFO pointers, and restart packet transmission.
2485  */
2486 static void
2487 wm_82547_txfifo_stall(void *arg)
2488 {
2489 	struct wm_softc *sc = arg;
2490 #ifndef WM_MPSAFE
2491 	int s;
2492 
2493 	s = splnet();
2494 #endif
2495 	WM_LOCK(sc);
2496 
2497 	if (sc->sc_stopping)
2498 		goto out;
2499 
2500 	if (sc->sc_txfifo_stall) {
2501 		if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
2502 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
2503 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
2504 			/*
2505 			 * Packets have drained.  Stop transmitter, reset
2506 			 * FIFO pointers, restart transmitter, and kick
2507 			 * the packet queue.
2508 			 */
2509 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
2510 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
2511 			CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
2512 			CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
2513 			CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
2514 			CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
2515 			CSR_WRITE(sc, WMREG_TCTL, tctl);
2516 			CSR_WRITE_FLUSH(sc);
2517 
2518 			sc->sc_txfifo_head = 0;
2519 			sc->sc_txfifo_stall = 0;
2520 			wm_start_locked(&sc->sc_ethercom.ec_if);
2521 		} else {
2522 			/*
2523 			 * Still waiting for packets to drain; try again in
2524 			 * another tick.
2525 			 */
2526 			callout_schedule(&sc->sc_txfifo_ch, 1);
2527 		}
2528 	}
2529 
2530 out:
2531 	WM_UNLOCK(sc);
2532 #ifndef WM_MPSAFE
2533 	splx(s);
2534 #endif
2535 }
2536 
2537 static void
2538 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, int on)
2539 {
2540 	uint32_t reg;
2541 
2542 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
2543 
2544 	if (on != 0)
2545 		reg |= EXTCNFCTR_GATE_PHY_CFG;
2546 	else
2547 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
2548 
2549 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
2550 }
2551 
2552 /*
2553  * wm_82547_txfifo_bugchk:
2554  *
2555  *	Check for bug condition in the 82547 Tx FIFO.  We need to
2556  *	prevent enqueueing a packet that would wrap around the end
2557  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
2558  *
2559  *	We do this by checking the amount of space before the end
2560  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
2561  *	the Tx FIFO, wait for all remaining packets to drain, reset
2562  *	the internal FIFO pointers to the beginning, and restart
2563  *	transmission on the interface.
2564  */
2565 #define	WM_FIFO_HDR		0x10
2566 #define	WM_82547_PAD_LEN	0x3e0
2567 static int
2568 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
2569 {
2570 	int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
2571 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
2572 
2573 	/* Just return if already stalled. */
2574 	if (sc->sc_txfifo_stall)
2575 		return 1;
2576 
2577 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
2578 		/* Stall only occurs in half-duplex mode. */
2579 		goto send_packet;
2580 	}
2581 
2582 	if (len >= WM_82547_PAD_LEN + space) {
2583 		sc->sc_txfifo_stall = 1;
2584 		callout_schedule(&sc->sc_txfifo_ch, 1);
2585 		return 1;
2586 	}
2587 
2588  send_packet:
2589 	sc->sc_txfifo_head += len;
2590 	if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
2591 		sc->sc_txfifo_head -= sc->sc_txfifo_size;
2592 
2593 	return 0;
2594 }
2595 
2596 /*
2597  * wm_start:		[ifnet interface function]
2598  *
2599  *	Start packet transmission on the interface.
2600  */
2601 static void
2602 wm_start(struct ifnet *ifp)
2603 {
2604 	struct wm_softc *sc = ifp->if_softc;
2605 
2606 	WM_LOCK(sc);
2607 	if (!sc->sc_stopping)
2608 		wm_start_locked(ifp);
2609 	WM_UNLOCK(sc);
2610 }
2611 
2612 static void
2613 wm_start_locked(struct ifnet *ifp)
2614 {
2615 	struct wm_softc *sc = ifp->if_softc;
2616 	struct mbuf *m0;
2617 	struct m_tag *mtag;
2618 	struct wm_txsoft *txs;
2619 	bus_dmamap_t dmamap;
2620 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
2621 	bus_addr_t curaddr;
2622 	bus_size_t seglen, curlen;
2623 	uint32_t cksumcmd;
2624 	uint8_t cksumfields;
2625 
2626 	KASSERT(WM_LOCKED(sc));
2627 
2628 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
2629 		return;
2630 
2631 	/*
2632 	 * Remember the previous number of free descriptors.
2633 	 */
2634 	ofree = sc->sc_txfree;
2635 
2636 	/*
2637 	 * Loop through the send queue, setting up transmit descriptors
2638 	 * until we drain the queue, or use up all available transmit
2639 	 * descriptors.
2640 	 */
2641 	for (;;) {
2642 		m0 = NULL;
2643 
2644 		/* Get a work queue entry. */
2645 		if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
2646 			wm_txintr(sc);
2647 			if (sc->sc_txsfree == 0) {
2648 				DPRINTF(WM_DEBUG_TX,
2649 				    ("%s: TX: no free job descriptors\n",
2650 					device_xname(sc->sc_dev)));
2651 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
2652 				break;
2653 			}
2654 		}
2655 
2656 		/* Grab a packet off the queue. */
2657 		IFQ_DEQUEUE(&ifp->if_snd, m0);
2658 		if (m0 == NULL)
2659 			break;
2660 
2661 		DPRINTF(WM_DEBUG_TX,
2662 		    ("%s: TX: have packet to transmit: %p\n",
2663 		    device_xname(sc->sc_dev), m0));
2664 
2665 		txs = &sc->sc_txsoft[sc->sc_txsnext];
2666 		dmamap = txs->txs_dmamap;
2667 
2668 		use_tso = (m0->m_pkthdr.csum_flags &
2669 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
2670 
2671 		/*
2672 		 * So says the Linux driver:
2673 		 * The controller does a simple calculation to make sure
2674 		 * there is enough room in the FIFO before initiating the
2675 		 * DMA for each buffer.  The calc is:
2676 		 *	4 = ceil(buffer len / MSS)
2677 		 * To make sure we don't overrun the FIFO, adjust the max
2678 		 * buffer len if the MSS drops.
2679 		 */
2680 		dmamap->dm_maxsegsz =
2681 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
2682 		    ? m0->m_pkthdr.segsz << 2
2683 		    : WTX_MAX_LEN;
2684 
2685 		/*
2686 		 * Load the DMA map.  If this fails, the packet either
2687 		 * didn't fit in the allotted number of segments, or we
2688 		 * were short on resources.  For the too-many-segments
2689 		 * case, we simply report an error and drop the packet,
2690 		 * since we can't sanely copy a jumbo packet to a single
2691 		 * buffer.
2692 		 */
2693 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
2694 		    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
2695 		if (error) {
2696 			if (error == EFBIG) {
2697 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
2698 				log(LOG_ERR, "%s: Tx packet consumes too many "
2699 				    "DMA segments, dropping...\n",
2700 				    device_xname(sc->sc_dev));
2701 				wm_dump_mbuf_chain(sc, m0);
2702 				m_freem(m0);
2703 				continue;
2704 			}
2705 			/*
2706 			 * Short on resources, just stop for now.
2707 			 */
2708 			DPRINTF(WM_DEBUG_TX,
2709 			    ("%s: TX: dmamap load failed: %d\n",
2710 			    device_xname(sc->sc_dev), error));
2711 			break;
2712 		}
2713 
2714 		segs_needed = dmamap->dm_nsegs;
2715 		if (use_tso) {
2716 			/* For sentinel descriptor; see below. */
2717 			segs_needed++;
2718 		}
2719 
2720 		/*
2721 		 * Ensure we have enough descriptors free to describe
2722 		 * the packet.  Note, we always reserve one descriptor
2723 		 * at the end of the ring due to the semantics of the
2724 		 * TDT register, plus one more in the event we need
2725 		 * to load offload context.
2726 		 */
2727 		if (segs_needed > sc->sc_txfree - 2) {
2728 			/*
2729 			 * Not enough free descriptors to transmit this
2730 			 * packet.  We haven't committed anything yet,
2731 			 * so just unload the DMA map, put the packet
2732 			 * pack on the queue, and punt.  Notify the upper
2733 			 * layer that there are no more slots left.
2734 			 */
2735 			DPRINTF(WM_DEBUG_TX,
2736 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
2737 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
2738 			    segs_needed, sc->sc_txfree - 1));
2739 			ifp->if_flags |= IFF_OACTIVE;
2740 			bus_dmamap_unload(sc->sc_dmat, dmamap);
2741 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
2742 			break;
2743 		}
2744 
2745 		/*
2746 		 * Check for 82547 Tx FIFO bug.  We need to do this
2747 		 * once we know we can transmit the packet, since we
2748 		 * do some internal FIFO space accounting here.
2749 		 */
2750 		if (sc->sc_type == WM_T_82547 &&
2751 		    wm_82547_txfifo_bugchk(sc, m0)) {
2752 			DPRINTF(WM_DEBUG_TX,
2753 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
2754 			    device_xname(sc->sc_dev)));
2755 			ifp->if_flags |= IFF_OACTIVE;
2756 			bus_dmamap_unload(sc->sc_dmat, dmamap);
2757 			WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
2758 			break;
2759 		}
2760 
2761 		/*
2762 		 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
2763 		 */
2764 
2765 		DPRINTF(WM_DEBUG_TX,
2766 		    ("%s: TX: packet has %d (%d) DMA segments\n",
2767 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
2768 
2769 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
2770 
2771 		/*
2772 		 * Store a pointer to the packet so that we can free it
2773 		 * later.
2774 		 *
2775 		 * Initially, we consider the number of descriptors the
2776 		 * packet uses the number of DMA segments.  This may be
2777 		 * incremented by 1 if we do checksum offload (a descriptor
2778 		 * is used to set the checksum context).
2779 		 */
2780 		txs->txs_mbuf = m0;
2781 		txs->txs_firstdesc = sc->sc_txnext;
2782 		txs->txs_ndesc = segs_needed;
2783 
2784 		/* Set up offload parameters for this packet. */
2785 		if (m0->m_pkthdr.csum_flags &
2786 		    (M_CSUM_TSOv4|M_CSUM_TSOv6|
2787 		    M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
2788 		    M_CSUM_TCPv6|M_CSUM_UDPv6)) {
2789 			if (wm_tx_offload(sc, txs, &cksumcmd,
2790 					  &cksumfields) != 0) {
2791 				/* Error message already displayed. */
2792 				bus_dmamap_unload(sc->sc_dmat, dmamap);
2793 				continue;
2794 			}
2795 		} else {
2796 			cksumcmd = 0;
2797 			cksumfields = 0;
2798 		}
2799 
2800 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
2801 
2802 		/* Sync the DMA map. */
2803 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
2804 		    BUS_DMASYNC_PREWRITE);
2805 
2806 		/*
2807 		 * Initialize the transmit descriptor.
2808 		 */
2809 		for (nexttx = sc->sc_txnext, seg = 0;
2810 		     seg < dmamap->dm_nsegs; seg++) {
2811 			for (seglen = dmamap->dm_segs[seg].ds_len,
2812 			     curaddr = dmamap->dm_segs[seg].ds_addr;
2813 			     seglen != 0;
2814 			     curaddr += curlen, seglen -= curlen,
2815 			     nexttx = WM_NEXTTX(sc, nexttx)) {
2816 				curlen = seglen;
2817 
2818 				/*
2819 				 * So says the Linux driver:
2820 				 * Work around for premature descriptor
2821 				 * write-backs in TSO mode.  Append a
2822 				 * 4-byte sentinel descriptor.
2823 				 */
2824 				if (use_tso &&
2825 				    seg == dmamap->dm_nsegs - 1 &&
2826 				    curlen > 8)
2827 					curlen -= 4;
2828 
2829 				wm_set_dma_addr(
2830 				    &sc->sc_txdescs[nexttx].wtx_addr,
2831 				    curaddr);
2832 				sc->sc_txdescs[nexttx].wtx_cmdlen =
2833 				    htole32(cksumcmd | curlen);
2834 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
2835 				    0;
2836 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
2837 				    cksumfields;
2838 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
2839 				lasttx = nexttx;
2840 
2841 				DPRINTF(WM_DEBUG_TX,
2842 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
2843 				     "len %#04zx\n",
2844 				    device_xname(sc->sc_dev), nexttx,
2845 				    (uint64_t)curaddr, curlen));
2846 			}
2847 		}
2848 
2849 		KASSERT(lasttx != -1);
2850 
2851 		/*
2852 		 * Set up the command byte on the last descriptor of
2853 		 * the packet.  If we're in the interrupt delay window,
2854 		 * delay the interrupt.
2855 		 */
2856 		sc->sc_txdescs[lasttx].wtx_cmdlen |=
2857 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
2858 
2859 		/*
2860 		 * If VLANs are enabled and the packet has a VLAN tag, set
2861 		 * up the descriptor to encapsulate the packet for us.
2862 		 *
2863 		 * This is only valid on the last descriptor of the packet.
2864 		 */
2865 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
2866 			sc->sc_txdescs[lasttx].wtx_cmdlen |=
2867 			    htole32(WTX_CMD_VLE);
2868 			sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
2869 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
2870 		}
2871 
2872 		txs->txs_lastdesc = lasttx;
2873 
2874 		DPRINTF(WM_DEBUG_TX,
2875 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
2876 		    device_xname(sc->sc_dev),
2877 		    lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
2878 
2879 		/* Sync the descriptors we're using. */
2880 		WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
2881 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2882 
2883 		/* Give the packet to the chip. */
2884 		CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
2885 
2886 		DPRINTF(WM_DEBUG_TX,
2887 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
2888 
2889 		DPRINTF(WM_DEBUG_TX,
2890 		    ("%s: TX: finished transmitting packet, job %d\n",
2891 		    device_xname(sc->sc_dev), sc->sc_txsnext));
2892 
2893 		/* Advance the tx pointer. */
2894 		sc->sc_txfree -= txs->txs_ndesc;
2895 		sc->sc_txnext = nexttx;
2896 
2897 		sc->sc_txsfree--;
2898 		sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
2899 
2900 		/* Pass the packet to any BPF listeners. */
2901 		bpf_mtap(ifp, m0);
2902 	}
2903 
2904 	if (m0 != NULL) {
2905 		ifp->if_flags |= IFF_OACTIVE;
2906 		WM_EVCNT_INCR(&sc->sc_ev_txdrop);
2907 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
2908 		m_freem(m0);
2909 	}
2910 
2911 	if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
2912 		/* No more slots; notify upper layer. */
2913 		ifp->if_flags |= IFF_OACTIVE;
2914 	}
2915 
2916 	if (sc->sc_txfree != ofree) {
2917 		/* Set a watchdog timer in case the chip flakes out. */
2918 		ifp->if_timer = 5;
2919 	}
2920 }
2921 
2922 /*
2923  * wm_nq_tx_offload:
2924  *
2925  *	Set up TCP/IP checksumming parameters for the
2926  *	specified packet, for NEWQUEUE devices
2927  */
2928 static int
2929 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs,
2930     uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
2931 {
2932 	struct mbuf *m0 = txs->txs_mbuf;
2933 	struct m_tag *mtag;
2934 	uint32_t vl_len, mssidx, cmdc;
2935 	struct ether_header *eh;
2936 	int offset, iphl;
2937 
2938 	/*
2939 	 * XXX It would be nice if the mbuf pkthdr had offset
2940 	 * fields for the protocol headers.
2941 	 */
2942 	*cmdlenp = 0;
2943 	*fieldsp = 0;
2944 
2945 	eh = mtod(m0, struct ether_header *);
2946 	switch (htons(eh->ether_type)) {
2947 	case ETHERTYPE_IP:
2948 	case ETHERTYPE_IPV6:
2949 		offset = ETHER_HDR_LEN;
2950 		break;
2951 
2952 	case ETHERTYPE_VLAN:
2953 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2954 		break;
2955 
2956 	default:
2957 		/*
2958 		 * Don't support this protocol or encapsulation.
2959 		 */
2960 		*do_csum = false;
2961 		return 0;
2962 	}
2963 	*do_csum = true;
2964 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
2965 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
2966 
2967 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
2968 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
2969 
2970 	if ((m0->m_pkthdr.csum_flags &
2971 	    (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4|M_CSUM_IPv4)) != 0) {
2972 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
2973 	} else {
2974 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
2975 	}
2976 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
2977 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
2978 
2979 	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
2980 		vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
2981 		     << NQTXC_VLLEN_VLAN_SHIFT);
2982 		*cmdlenp |= NQTX_CMD_VLE;
2983 	}
2984 
2985 	mssidx = 0;
2986 
2987 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
2988 		int hlen = offset + iphl;
2989 		int tcp_hlen;
2990 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
2991 
2992 		if (__predict_false(m0->m_len <
2993 				    (hlen + sizeof(struct tcphdr)))) {
2994 			/*
2995 			 * TCP/IP headers are not in the first mbuf; we need
2996 			 * to do this the slow and painful way.  Let's just
2997 			 * hope this doesn't happen very often.
2998 			 */
2999 			struct tcphdr th;
3000 
3001 			WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
3002 
3003 			m_copydata(m0, hlen, sizeof(th), &th);
3004 			if (v4) {
3005 				struct ip ip;
3006 
3007 				m_copydata(m0, offset, sizeof(ip), &ip);
3008 				ip.ip_len = 0;
3009 				m_copyback(m0,
3010 				    offset + offsetof(struct ip, ip_len),
3011 				    sizeof(ip.ip_len), &ip.ip_len);
3012 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
3013 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
3014 			} else {
3015 				struct ip6_hdr ip6;
3016 
3017 				m_copydata(m0, offset, sizeof(ip6), &ip6);
3018 				ip6.ip6_plen = 0;
3019 				m_copyback(m0,
3020 				    offset + offsetof(struct ip6_hdr, ip6_plen),
3021 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
3022 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
3023 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
3024 			}
3025 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
3026 			    sizeof(th.th_sum), &th.th_sum);
3027 
3028 			tcp_hlen = th.th_off << 2;
3029 		} else {
3030 			/*
3031 			 * TCP/IP headers are in the first mbuf; we can do
3032 			 * this the easy way.
3033 			 */
3034 			struct tcphdr *th;
3035 
3036 			if (v4) {
3037 				struct ip *ip =
3038 				    (void *)(mtod(m0, char *) + offset);
3039 				th = (void *)(mtod(m0, char *) + hlen);
3040 
3041 				ip->ip_len = 0;
3042 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
3043 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
3044 			} else {
3045 				struct ip6_hdr *ip6 =
3046 				    (void *)(mtod(m0, char *) + offset);
3047 				th = (void *)(mtod(m0, char *) + hlen);
3048 
3049 				ip6->ip6_plen = 0;
3050 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
3051 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
3052 			}
3053 			tcp_hlen = th->th_off << 2;
3054 		}
3055 		hlen += tcp_hlen;
3056 		*cmdlenp |= NQTX_CMD_TSE;
3057 
3058 		if (v4) {
3059 			WM_EVCNT_INCR(&sc->sc_ev_txtso);
3060 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
3061 		} else {
3062 			WM_EVCNT_INCR(&sc->sc_ev_txtso6);
3063 			*fieldsp |= NQTXD_FIELDS_TUXSM;
3064 		}
3065 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
3066 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
3067 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
3068 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
3069 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
3070 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
3071 	} else {
3072 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
3073 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
3074 	}
3075 
3076 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
3077 		*fieldsp |= NQTXD_FIELDS_IXSM;
3078 		cmdc |= NQTXC_CMD_IP4;
3079 	}
3080 
3081 	if (m0->m_pkthdr.csum_flags &
3082 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
3083 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
3084 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
3085 			cmdc |= NQTXC_CMD_TCP;
3086 		} else {
3087 			cmdc |= NQTXC_CMD_UDP;
3088 		}
3089 		cmdc |= NQTXC_CMD_IP4;
3090 		*fieldsp |= NQTXD_FIELDS_TUXSM;
3091 	}
3092 	if (m0->m_pkthdr.csum_flags &
3093 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
3094 		WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
3095 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
3096 			cmdc |= NQTXC_CMD_TCP;
3097 		} else {
3098 			cmdc |= NQTXC_CMD_UDP;
3099 		}
3100 		cmdc |= NQTXC_CMD_IP6;
3101 		*fieldsp |= NQTXD_FIELDS_TUXSM;
3102 	}
3103 
3104 	/* Fill in the context descriptor. */
3105 	sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_vl_len =
3106 	    htole32(vl_len);
3107 	sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_sn = 0;
3108 	sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_cmd =
3109 	    htole32(cmdc);
3110 	sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_mssidx =
3111 	    htole32(mssidx);
3112 	WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
3113 	DPRINTF(WM_DEBUG_TX,
3114 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
3115 	    sc->sc_txnext, 0, vl_len));
3116 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
3117 	sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
3118 	txs->txs_ndesc++;
3119 	return 0;
3120 }
3121 
3122 /*
3123  * wm_nq_start:		[ifnet interface function]
3124  *
3125  *	Start packet transmission on the interface for NEWQUEUE devices
3126  */
3127 static void
3128 wm_nq_start(struct ifnet *ifp)
3129 {
3130 	struct wm_softc *sc = ifp->if_softc;
3131 
3132 	WM_LOCK(sc);
3133 	if (!sc->sc_stopping)
3134 		wm_nq_start_locked(ifp);
3135 	WM_UNLOCK(sc);
3136 }
3137 
3138 static void
3139 wm_nq_start_locked(struct ifnet *ifp)
3140 {
3141 	struct wm_softc *sc = ifp->if_softc;
3142 	struct mbuf *m0;
3143 	struct m_tag *mtag;
3144 	struct wm_txsoft *txs;
3145 	bus_dmamap_t dmamap;
3146 	int error, nexttx, lasttx = -1, seg, segs_needed;
3147 	bool do_csum, sent;
3148 
3149 	KASSERT(WM_LOCKED(sc));
3150 
3151 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
3152 		return;
3153 
3154 	sent = false;
3155 
3156 	/*
3157 	 * Loop through the send queue, setting up transmit descriptors
3158 	 * until we drain the queue, or use up all available transmit
3159 	 * descriptors.
3160 	 */
3161 	for (;;) {
3162 		m0 = NULL;
3163 
3164 		/* Get a work queue entry. */
3165 		if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
3166 			wm_txintr(sc);
3167 			if (sc->sc_txsfree == 0) {
3168 				DPRINTF(WM_DEBUG_TX,
3169 				    ("%s: TX: no free job descriptors\n",
3170 					device_xname(sc->sc_dev)));
3171 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
3172 				break;
3173 			}
3174 		}
3175 
3176 		/* Grab a packet off the queue. */
3177 		IFQ_DEQUEUE(&ifp->if_snd, m0);
3178 		if (m0 == NULL)
3179 			break;
3180 
3181 		DPRINTF(WM_DEBUG_TX,
3182 		    ("%s: TX: have packet to transmit: %p\n",
3183 		    device_xname(sc->sc_dev), m0));
3184 
3185 		txs = &sc->sc_txsoft[sc->sc_txsnext];
3186 		dmamap = txs->txs_dmamap;
3187 
3188 		/*
3189 		 * Load the DMA map.  If this fails, the packet either
3190 		 * didn't fit in the allotted number of segments, or we
3191 		 * were short on resources.  For the too-many-segments
3192 		 * case, we simply report an error and drop the packet,
3193 		 * since we can't sanely copy a jumbo packet to a single
3194 		 * buffer.
3195 		 */
3196 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
3197 		    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
3198 		if (error) {
3199 			if (error == EFBIG) {
3200 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
3201 				log(LOG_ERR, "%s: Tx packet consumes too many "
3202 				    "DMA segments, dropping...\n",
3203 				    device_xname(sc->sc_dev));
3204 				wm_dump_mbuf_chain(sc, m0);
3205 				m_freem(m0);
3206 				continue;
3207 			}
3208 			/*
3209 			 * Short on resources, just stop for now.
3210 			 */
3211 			DPRINTF(WM_DEBUG_TX,
3212 			    ("%s: TX: dmamap load failed: %d\n",
3213 			    device_xname(sc->sc_dev), error));
3214 			break;
3215 		}
3216 
3217 		segs_needed = dmamap->dm_nsegs;
3218 
3219 		/*
3220 		 * Ensure we have enough descriptors free to describe
3221 		 * the packet.  Note, we always reserve one descriptor
3222 		 * at the end of the ring due to the semantics of the
3223 		 * TDT register, plus one more in the event we need
3224 		 * to load offload context.
3225 		 */
3226 		if (segs_needed > sc->sc_txfree - 2) {
3227 			/*
3228 			 * Not enough free descriptors to transmit this
3229 			 * packet.  We haven't committed anything yet,
3230 			 * so just unload the DMA map, put the packet
3231 			 * pack on the queue, and punt.  Notify the upper
3232 			 * layer that there are no more slots left.
3233 			 */
3234 			DPRINTF(WM_DEBUG_TX,
3235 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
3236 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
3237 			    segs_needed, sc->sc_txfree - 1));
3238 			ifp->if_flags |= IFF_OACTIVE;
3239 			bus_dmamap_unload(sc->sc_dmat, dmamap);
3240 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
3241 			break;
3242 		}
3243 
3244 		/*
3245 		 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
3246 		 */
3247 
3248 		DPRINTF(WM_DEBUG_TX,
3249 		    ("%s: TX: packet has %d (%d) DMA segments\n",
3250 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
3251 
3252 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
3253 
3254 		/*
3255 		 * Store a pointer to the packet so that we can free it
3256 		 * later.
3257 		 *
3258 		 * Initially, we consider the number of descriptors the
3259 		 * packet uses the number of DMA segments.  This may be
3260 		 * incremented by 1 if we do checksum offload (a descriptor
3261 		 * is used to set the checksum context).
3262 		 */
3263 		txs->txs_mbuf = m0;
3264 		txs->txs_firstdesc = sc->sc_txnext;
3265 		txs->txs_ndesc = segs_needed;
3266 
3267 		/* Set up offload parameters for this packet. */
3268 		uint32_t cmdlen, fields, dcmdlen;
3269 		if (m0->m_pkthdr.csum_flags &
3270 		    (M_CSUM_TSOv4|M_CSUM_TSOv6|
3271 		    M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
3272 		    M_CSUM_TCPv6|M_CSUM_UDPv6)) {
3273 			if (wm_nq_tx_offload(sc, txs, &cmdlen, &fields,
3274 			    &do_csum) != 0) {
3275 				/* Error message already displayed. */
3276 				bus_dmamap_unload(sc->sc_dmat, dmamap);
3277 				continue;
3278 			}
3279 		} else {
3280 			do_csum = false;
3281 			cmdlen = 0;
3282 			fields = 0;
3283 		}
3284 
3285 		/* Sync the DMA map. */
3286 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
3287 		    BUS_DMASYNC_PREWRITE);
3288 
3289 		/*
3290 		 * Initialize the first transmit descriptor.
3291 		 */
3292 		nexttx = sc->sc_txnext;
3293 		if (!do_csum) {
3294 			/* setup a legacy descriptor */
3295 			wm_set_dma_addr(
3296 			    &sc->sc_txdescs[nexttx].wtx_addr,
3297 			    dmamap->dm_segs[0].ds_addr);
3298 			sc->sc_txdescs[nexttx].wtx_cmdlen =
3299 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
3300 			sc->sc_txdescs[nexttx].wtx_fields.wtxu_status = 0;
3301 			sc->sc_txdescs[nexttx].wtx_fields.wtxu_options = 0;
3302 			if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
3303 			    NULL) {
3304 				sc->sc_txdescs[nexttx].wtx_cmdlen |=
3305 				    htole32(WTX_CMD_VLE);
3306 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan =
3307 				    htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
3308 			} else {
3309 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
3310 			}
3311 			dcmdlen = 0;
3312 		} else {
3313 			/* setup an advanced data descriptor */
3314 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
3315 			    htole64(dmamap->dm_segs[0].ds_addr);
3316 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
3317 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
3318 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
3319 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields =
3320 			    htole32(fields);
3321 			DPRINTF(WM_DEBUG_TX,
3322 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
3323 			    device_xname(sc->sc_dev), nexttx,
3324 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
3325 			DPRINTF(WM_DEBUG_TX,
3326 			    ("\t 0x%08x%08x\n", fields,
3327 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
3328 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
3329 		}
3330 
3331 		lasttx = nexttx;
3332 		nexttx = WM_NEXTTX(sc, nexttx);
3333 		/*
3334 		 * fill in the next descriptors. legacy or adcanced format
3335 		 * is the same here
3336 		 */
3337 		for (seg = 1; seg < dmamap->dm_nsegs;
3338 		    seg++, nexttx = WM_NEXTTX(sc, nexttx)) {
3339 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
3340 			    htole64(dmamap->dm_segs[seg].ds_addr);
3341 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
3342 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
3343 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
3344 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields = 0;
3345 			lasttx = nexttx;
3346 
3347 			DPRINTF(WM_DEBUG_TX,
3348 			    ("%s: TX: desc %d: %#" PRIx64 ", "
3349 			     "len %#04zx\n",
3350 			    device_xname(sc->sc_dev), nexttx,
3351 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
3352 			    dmamap->dm_segs[seg].ds_len));
3353 		}
3354 
3355 		KASSERT(lasttx != -1);
3356 
3357 		/*
3358 		 * Set up the command byte on the last descriptor of
3359 		 * the packet.  If we're in the interrupt delay window,
3360 		 * delay the interrupt.
3361 		 */
3362 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
3363 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
3364 		sc->sc_txdescs[lasttx].wtx_cmdlen |=
3365 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
3366 
3367 		txs->txs_lastdesc = lasttx;
3368 
3369 		DPRINTF(WM_DEBUG_TX,
3370 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
3371 		    device_xname(sc->sc_dev),
3372 		    lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
3373 
3374 		/* Sync the descriptors we're using. */
3375 		WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
3376 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3377 
3378 		/* Give the packet to the chip. */
3379 		CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
3380 		sent = true;
3381 
3382 		DPRINTF(WM_DEBUG_TX,
3383 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
3384 
3385 		DPRINTF(WM_DEBUG_TX,
3386 		    ("%s: TX: finished transmitting packet, job %d\n",
3387 		    device_xname(sc->sc_dev), sc->sc_txsnext));
3388 
3389 		/* Advance the tx pointer. */
3390 		sc->sc_txfree -= txs->txs_ndesc;
3391 		sc->sc_txnext = nexttx;
3392 
3393 		sc->sc_txsfree--;
3394 		sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
3395 
3396 		/* Pass the packet to any BPF listeners. */
3397 		bpf_mtap(ifp, m0);
3398 	}
3399 
3400 	if (m0 != NULL) {
3401 		ifp->if_flags |= IFF_OACTIVE;
3402 		WM_EVCNT_INCR(&sc->sc_ev_txdrop);
3403 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
3404 		m_freem(m0);
3405 	}
3406 
3407 	if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
3408 		/* No more slots; notify upper layer. */
3409 		ifp->if_flags |= IFF_OACTIVE;
3410 	}
3411 
3412 	if (sent) {
3413 		/* Set a watchdog timer in case the chip flakes out. */
3414 		ifp->if_timer = 5;
3415 	}
3416 }
3417 
3418 /*
3419  * wm_watchdog:		[ifnet interface function]
3420  *
3421  *	Watchdog timer handler.
3422  */
3423 static void
3424 wm_watchdog(struct ifnet *ifp)
3425 {
3426 	struct wm_softc *sc = ifp->if_softc;
3427 
3428 	/*
3429 	 * Since we're using delayed interrupts, sweep up
3430 	 * before we report an error.
3431 	 */
3432 	WM_LOCK(sc);
3433 	wm_txintr(sc);
3434 	WM_UNLOCK(sc);
3435 
3436 	if (sc->sc_txfree != WM_NTXDESC(sc)) {
3437 #ifdef WM_DEBUG
3438 		int i, j;
3439 		struct wm_txsoft *txs;
3440 #endif
3441 		log(LOG_ERR,
3442 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
3443 		    device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
3444 		    sc->sc_txnext);
3445 		ifp->if_oerrors++;
3446 #ifdef WM_DEBUG
3447 		for (i = sc->sc_txsdirty; i != sc->sc_txsnext ;
3448 		    i = WM_NEXTTXS(sc, i)) {
3449 		    txs = &sc->sc_txsoft[i];
3450 		    printf("txs %d tx %d -> %d\n",
3451 			i, txs->txs_firstdesc, txs->txs_lastdesc);
3452 		    for (j = txs->txs_firstdesc; ;
3453 			j = WM_NEXTTX(sc, j)) {
3454 			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
3455 			    sc->sc_nq_txdescs[j].nqtx_data.nqtxd_addr);
3456 			printf("\t %#08x%08x\n",
3457 			    sc->sc_nq_txdescs[j].nqtx_data.nqtxd_fields,
3458 			    sc->sc_nq_txdescs[j].nqtx_data.nqtxd_cmdlen);
3459 			if (j == txs->txs_lastdesc)
3460 				break;
3461 			}
3462 		}
3463 #endif
3464 		/* Reset the interface. */
3465 		(void) wm_init(ifp);
3466 	}
3467 
3468 	/* Try to get more packets going. */
3469 	ifp->if_start(ifp);
3470 }
3471 
3472 static int
3473 wm_ifflags_cb(struct ethercom *ec)
3474 {
3475 	struct ifnet *ifp = &ec->ec_if;
3476 	struct wm_softc *sc = ifp->if_softc;
3477 	int change = ifp->if_flags ^ sc->sc_if_flags;
3478 	int rc = 0;
3479 
3480 	WM_LOCK(sc);
3481 
3482 	if (change != 0)
3483 		sc->sc_if_flags = ifp->if_flags;
3484 
3485 	if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) {
3486 		rc = ENETRESET;
3487 		goto out;
3488 	}
3489 
3490 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
3491 		wm_set_filter(sc);
3492 
3493 	wm_set_vlan(sc);
3494 
3495 out:
3496 	WM_UNLOCK(sc);
3497 
3498 	return rc;
3499 }
3500 
3501 /*
3502  * wm_ioctl:		[ifnet interface function]
3503  *
3504  *	Handle control requests from the operator.
3505  */
3506 static int
3507 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
3508 {
3509 	struct wm_softc *sc = ifp->if_softc;
3510 	struct ifreq *ifr = (struct ifreq *) data;
3511 	struct ifaddr *ifa = (struct ifaddr *)data;
3512 	struct sockaddr_dl *sdl;
3513 	int s, error;
3514 
3515 #ifndef WM_MPSAFE
3516 	s = splnet();
3517 #endif
3518 	WM_LOCK(sc);
3519 
3520 	switch (cmd) {
3521 	case SIOCSIFMEDIA:
3522 	case SIOCGIFMEDIA:
3523 		/* Flow control requires full-duplex mode. */
3524 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
3525 		    (ifr->ifr_media & IFM_FDX) == 0)
3526 			ifr->ifr_media &= ~IFM_ETH_FMASK;
3527 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
3528 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
3529 				/* We can do both TXPAUSE and RXPAUSE. */
3530 				ifr->ifr_media |=
3531 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
3532 			}
3533 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
3534 		}
3535 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
3536 		break;
3537 	case SIOCINITIFADDR:
3538 		if (ifa->ifa_addr->sa_family == AF_LINK) {
3539 			sdl = satosdl(ifp->if_dl->ifa_addr);
3540 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
3541 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
3542 			/* unicast address is first multicast entry */
3543 			wm_set_filter(sc);
3544 			error = 0;
3545 			break;
3546 		}
3547 		/*FALLTHROUGH*/
3548 	default:
3549 		WM_UNLOCK(sc);
3550 #ifdef WM_MPSAFE
3551 		s = splnet();
3552 #endif
3553 		/* It may call wm_start, so unlock here */
3554 		error = ether_ioctl(ifp, cmd, data);
3555 #ifdef WM_MPSAFE
3556 		splx(s);
3557 #endif
3558 		WM_LOCK(sc);
3559 
3560 		if (error != ENETRESET)
3561 			break;
3562 
3563 		error = 0;
3564 
3565 		if (cmd == SIOCSIFCAP) {
3566 			WM_UNLOCK(sc);
3567 			error = (*ifp->if_init)(ifp);
3568 			WM_LOCK(sc);
3569 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
3570 			;
3571 		else if (ifp->if_flags & IFF_RUNNING) {
3572 			/*
3573 			 * Multicast list has changed; set the hardware filter
3574 			 * accordingly.
3575 			 */
3576 			wm_set_filter(sc);
3577 		}
3578 		break;
3579 	}
3580 
3581 	WM_UNLOCK(sc);
3582 
3583 	/* Try to get more packets going. */
3584 	ifp->if_start(ifp);
3585 
3586 #ifndef WM_MPSAFE
3587 	splx(s);
3588 #endif
3589 	return error;
3590 }
3591 
3592 /*
3593  * wm_intr:
3594  *
3595  *	Interrupt service routine.
3596  */
3597 static int
3598 wm_intr(void *arg)
3599 {
3600 	struct wm_softc *sc = arg;
3601 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3602 	uint32_t icr;
3603 	int handled = 0;
3604 
3605 	while (1 /* CONSTCOND */) {
3606 		icr = CSR_READ(sc, WMREG_ICR);
3607 		if ((icr & sc->sc_icr) == 0)
3608 			break;
3609 		rnd_add_uint32(&sc->rnd_source, icr);
3610 
3611 		WM_LOCK(sc);
3612 
3613 		if (sc->sc_stopping) {
3614 			WM_UNLOCK(sc);
3615 			break;
3616 		}
3617 
3618 		handled = 1;
3619 
3620 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
3621 		if (icr & (ICR_RXDMT0|ICR_RXT0)) {
3622 			DPRINTF(WM_DEBUG_RX,
3623 			    ("%s: RX: got Rx intr 0x%08x\n",
3624 			    device_xname(sc->sc_dev),
3625 			    icr & (ICR_RXDMT0|ICR_RXT0)));
3626 			WM_EVCNT_INCR(&sc->sc_ev_rxintr);
3627 		}
3628 #endif
3629 		wm_rxintr(sc);
3630 
3631 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
3632 		if (icr & ICR_TXDW) {
3633 			DPRINTF(WM_DEBUG_TX,
3634 			    ("%s: TX: got TXDW interrupt\n",
3635 			    device_xname(sc->sc_dev)));
3636 			WM_EVCNT_INCR(&sc->sc_ev_txdw);
3637 		}
3638 #endif
3639 		wm_txintr(sc);
3640 
3641 		if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
3642 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
3643 			wm_linkintr(sc, icr);
3644 		}
3645 
3646 		WM_UNLOCK(sc);
3647 
3648 		if (icr & ICR_RXO) {
3649 #if defined(WM_DEBUG)
3650 			log(LOG_WARNING, "%s: Receive overrun\n",
3651 			    device_xname(sc->sc_dev));
3652 #endif /* defined(WM_DEBUG) */
3653 		}
3654 	}
3655 
3656 	if (handled) {
3657 		/* Try to get more packets going. */
3658 		ifp->if_start(ifp);
3659 	}
3660 
3661 	return handled;
3662 }
3663 
3664 /*
3665  * wm_txintr:
3666  *
3667  *	Helper; handle transmit interrupts.
3668  */
3669 static void
3670 wm_txintr(struct wm_softc *sc)
3671 {
3672 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3673 	struct wm_txsoft *txs;
3674 	uint8_t status;
3675 	int i;
3676 
3677 	if (sc->sc_stopping)
3678 		return;
3679 
3680 	ifp->if_flags &= ~IFF_OACTIVE;
3681 
3682 	/*
3683 	 * Go through the Tx list and free mbufs for those
3684 	 * frames which have been transmitted.
3685 	 */
3686 	for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
3687 	     i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
3688 		txs = &sc->sc_txsoft[i];
3689 
3690 		DPRINTF(WM_DEBUG_TX,
3691 		    ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
3692 
3693 		WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
3694 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3695 
3696 		status =
3697 		    sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
3698 		if ((status & WTX_ST_DD) == 0) {
3699 			WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
3700 			    BUS_DMASYNC_PREREAD);
3701 			break;
3702 		}
3703 
3704 		DPRINTF(WM_DEBUG_TX,
3705 		    ("%s: TX: job %d done: descs %d..%d\n",
3706 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
3707 		    txs->txs_lastdesc));
3708 
3709 		/*
3710 		 * XXX We should probably be using the statistics
3711 		 * XXX registers, but I don't know if they exist
3712 		 * XXX on chips before the i82544.
3713 		 */
3714 
3715 #ifdef WM_EVENT_COUNTERS
3716 		if (status & WTX_ST_TU)
3717 			WM_EVCNT_INCR(&sc->sc_ev_tu);
3718 #endif /* WM_EVENT_COUNTERS */
3719 
3720 		if (status & (WTX_ST_EC|WTX_ST_LC)) {
3721 			ifp->if_oerrors++;
3722 			if (status & WTX_ST_LC)
3723 				log(LOG_WARNING, "%s: late collision\n",
3724 				    device_xname(sc->sc_dev));
3725 			else if (status & WTX_ST_EC) {
3726 				ifp->if_collisions += 16;
3727 				log(LOG_WARNING, "%s: excessive collisions\n",
3728 				    device_xname(sc->sc_dev));
3729 			}
3730 		} else
3731 			ifp->if_opackets++;
3732 
3733 		sc->sc_txfree += txs->txs_ndesc;
3734 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
3735 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
3736 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
3737 		m_freem(txs->txs_mbuf);
3738 		txs->txs_mbuf = NULL;
3739 	}
3740 
3741 	/* Update the dirty transmit buffer pointer. */
3742 	sc->sc_txsdirty = i;
3743 	DPRINTF(WM_DEBUG_TX,
3744 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
3745 
3746 	/*
3747 	 * If there are no more pending transmissions, cancel the watchdog
3748 	 * timer.
3749 	 */
3750 	if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
3751 		ifp->if_timer = 0;
3752 }
3753 
3754 /*
3755  * wm_rxintr:
3756  *
3757  *	Helper; handle receive interrupts.
3758  */
3759 static void
3760 wm_rxintr(struct wm_softc *sc)
3761 {
3762 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3763 	struct wm_rxsoft *rxs;
3764 	struct mbuf *m;
3765 	int i, len;
3766 	uint8_t status, errors;
3767 	uint16_t vlantag;
3768 
3769 	for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
3770 		rxs = &sc->sc_rxsoft[i];
3771 
3772 		DPRINTF(WM_DEBUG_RX,
3773 		    ("%s: RX: checking descriptor %d\n",
3774 		    device_xname(sc->sc_dev), i));
3775 
3776 		WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3777 
3778 		status = sc->sc_rxdescs[i].wrx_status;
3779 		errors = sc->sc_rxdescs[i].wrx_errors;
3780 		len = le16toh(sc->sc_rxdescs[i].wrx_len);
3781 		vlantag = sc->sc_rxdescs[i].wrx_special;
3782 
3783 		if ((status & WRX_ST_DD) == 0) {
3784 			/*
3785 			 * We have processed all of the receive descriptors.
3786 			 */
3787 			WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
3788 			break;
3789 		}
3790 
3791 		if (__predict_false(sc->sc_rxdiscard)) {
3792 			DPRINTF(WM_DEBUG_RX,
3793 			    ("%s: RX: discarding contents of descriptor %d\n",
3794 			    device_xname(sc->sc_dev), i));
3795 			WM_INIT_RXDESC(sc, i);
3796 			if (status & WRX_ST_EOP) {
3797 				/* Reset our state. */
3798 				DPRINTF(WM_DEBUG_RX,
3799 				    ("%s: RX: resetting rxdiscard -> 0\n",
3800 				    device_xname(sc->sc_dev)));
3801 				sc->sc_rxdiscard = 0;
3802 			}
3803 			continue;
3804 		}
3805 
3806 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3807 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
3808 
3809 		m = rxs->rxs_mbuf;
3810 
3811 		/*
3812 		 * Add a new receive buffer to the ring, unless of
3813 		 * course the length is zero. Treat the latter as a
3814 		 * failed mapping.
3815 		 */
3816 		if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
3817 			/*
3818 			 * Failed, throw away what we've done so
3819 			 * far, and discard the rest of the packet.
3820 			 */
3821 			ifp->if_ierrors++;
3822 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3823 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
3824 			WM_INIT_RXDESC(sc, i);
3825 			if ((status & WRX_ST_EOP) == 0)
3826 				sc->sc_rxdiscard = 1;
3827 			if (sc->sc_rxhead != NULL)
3828 				m_freem(sc->sc_rxhead);
3829 			WM_RXCHAIN_RESET(sc);
3830 			DPRINTF(WM_DEBUG_RX,
3831 			    ("%s: RX: Rx buffer allocation failed, "
3832 			    "dropping packet%s\n", device_xname(sc->sc_dev),
3833 			    sc->sc_rxdiscard ? " (discard)" : ""));
3834 			continue;
3835 		}
3836 
3837 		m->m_len = len;
3838 		sc->sc_rxlen += len;
3839 		DPRINTF(WM_DEBUG_RX,
3840 		    ("%s: RX: buffer at %p len %d\n",
3841 		    device_xname(sc->sc_dev), m->m_data, len));
3842 
3843 		/*
3844 		 * If this is not the end of the packet, keep
3845 		 * looking.
3846 		 */
3847 		if ((status & WRX_ST_EOP) == 0) {
3848 			WM_RXCHAIN_LINK(sc, m);
3849 			DPRINTF(WM_DEBUG_RX,
3850 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
3851 			    device_xname(sc->sc_dev), sc->sc_rxlen));
3852 			continue;
3853 		}
3854 
3855 		/*
3856 		 * Okay, we have the entire packet now.  The chip is
3857 		 * configured to include the FCS except I350 and I21[01]
3858 		 * (not all chips can be configured to strip it),
3859 		 * so we need to trim it.
3860 		 * May need to adjust length of previous mbuf in the
3861 		 * chain if the current mbuf is too short.
3862 		 * For an eratta, the RCTL_SECRC bit in RCTL register
3863 		 * is always set in I350, so we don't trim it.
3864 		 */
3865 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
3866 		    && (sc->sc_type != WM_T_I210)
3867 		    && (sc->sc_type != WM_T_I211)) {
3868 			if (m->m_len < ETHER_CRC_LEN) {
3869 				sc->sc_rxtail->m_len
3870 				    -= (ETHER_CRC_LEN - m->m_len);
3871 				m->m_len = 0;
3872 			} else
3873 				m->m_len -= ETHER_CRC_LEN;
3874 			len = sc->sc_rxlen - ETHER_CRC_LEN;
3875 		} else
3876 			len = sc->sc_rxlen;
3877 
3878 		WM_RXCHAIN_LINK(sc, m);
3879 
3880 		*sc->sc_rxtailp = NULL;
3881 		m = sc->sc_rxhead;
3882 
3883 		WM_RXCHAIN_RESET(sc);
3884 
3885 		DPRINTF(WM_DEBUG_RX,
3886 		    ("%s: RX: have entire packet, len -> %d\n",
3887 		    device_xname(sc->sc_dev), len));
3888 
3889 		/*
3890 		 * If an error occurred, update stats and drop the packet.
3891 		 */
3892 		if (errors &
3893 		     (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
3894 			if (errors & WRX_ER_SE)
3895 				log(LOG_WARNING, "%s: symbol error\n",
3896 				    device_xname(sc->sc_dev));
3897 			else if (errors & WRX_ER_SEQ)
3898 				log(LOG_WARNING, "%s: receive sequence error\n",
3899 				    device_xname(sc->sc_dev));
3900 			else if (errors & WRX_ER_CE)
3901 				log(LOG_WARNING, "%s: CRC error\n",
3902 				    device_xname(sc->sc_dev));
3903 			m_freem(m);
3904 			continue;
3905 		}
3906 
3907 		/*
3908 		 * No errors.  Receive the packet.
3909 		 */
3910 		m->m_pkthdr.rcvif = ifp;
3911 		m->m_pkthdr.len = len;
3912 
3913 		/*
3914 		 * If VLANs are enabled, VLAN packets have been unwrapped
3915 		 * for us.  Associate the tag with the packet.
3916 		 */
3917 		/* XXXX should check for i350 and i354 */
3918 		if ((status & WRX_ST_VP) != 0) {
3919 			VLAN_INPUT_TAG(ifp, m,
3920 			    le16toh(vlantag),
3921 			    continue);
3922 		}
3923 
3924 		/*
3925 		 * Set up checksum info for this packet.
3926 		 */
3927 		if ((status & WRX_ST_IXSM) == 0) {
3928 			if (status & WRX_ST_IPCS) {
3929 				WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
3930 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
3931 				if (errors & WRX_ER_IPE)
3932 					m->m_pkthdr.csum_flags |=
3933 					    M_CSUM_IPv4_BAD;
3934 			}
3935 			if (status & WRX_ST_TCPCS) {
3936 				/*
3937 				 * Note: we don't know if this was TCP or UDP,
3938 				 * so we just set both bits, and expect the
3939 				 * upper layers to deal.
3940 				 */
3941 				WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
3942 				m->m_pkthdr.csum_flags |=
3943 				    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
3944 				    M_CSUM_TCPv6 | M_CSUM_UDPv6;
3945 				if (errors & WRX_ER_TCPE)
3946 					m->m_pkthdr.csum_flags |=
3947 					    M_CSUM_TCP_UDP_BAD;
3948 			}
3949 		}
3950 
3951 		ifp->if_ipackets++;
3952 
3953 		WM_UNLOCK(sc);
3954 
3955 		/* Pass this up to any BPF listeners. */
3956 		bpf_mtap(ifp, m);
3957 
3958 		/* Pass it on. */
3959 		(*ifp->if_input)(ifp, m);
3960 
3961 		WM_LOCK(sc);
3962 
3963 		if (sc->sc_stopping)
3964 			break;
3965 	}
3966 
3967 	/* Update the receive pointer. */
3968 	sc->sc_rxptr = i;
3969 
3970 	DPRINTF(WM_DEBUG_RX,
3971 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
3972 }
3973 
3974 /*
3975  * wm_linkintr_gmii:
3976  *
3977  *	Helper; handle link interrupts for GMII.
3978  */
3979 static void
3980 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
3981 {
3982 
3983 	KASSERT(WM_LOCKED(sc));
3984 
3985 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
3986 		__func__));
3987 
3988 	if (icr & ICR_LSC) {
3989 		DPRINTF(WM_DEBUG_LINK,
3990 		    ("%s: LINK: LSC -> mii_pollstat\n",
3991 			device_xname(sc->sc_dev)));
3992 		mii_pollstat(&sc->sc_mii);
3993 		if (sc->sc_type == WM_T_82543) {
3994 			int miistatus, active;
3995 
3996 			/*
3997 			 * With 82543, we need to force speed and
3998 			 * duplex on the MAC equal to what the PHY
3999 			 * speed and duplex configuration is.
4000 			 */
4001 			miistatus = sc->sc_mii.mii_media_status;
4002 
4003 			if (miistatus & IFM_ACTIVE) {
4004 				active = sc->sc_mii.mii_media_active;
4005 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
4006 				switch (IFM_SUBTYPE(active)) {
4007 				case IFM_10_T:
4008 					sc->sc_ctrl |= CTRL_SPEED_10;
4009 					break;
4010 				case IFM_100_TX:
4011 					sc->sc_ctrl |= CTRL_SPEED_100;
4012 					break;
4013 				case IFM_1000_T:
4014 					sc->sc_ctrl |= CTRL_SPEED_1000;
4015 					break;
4016 				default:
4017 					/*
4018 					 * fiber?
4019 					 * Shoud not enter here.
4020 					 */
4021 					printf("unknown media (%x)\n",
4022 					    active);
4023 					break;
4024 				}
4025 				if (active & IFM_FDX)
4026 					sc->sc_ctrl |= CTRL_FD;
4027 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4028 			}
4029 		} else if ((sc->sc_type == WM_T_ICH8)
4030 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
4031 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
4032 		} else if (sc->sc_type == WM_T_PCH) {
4033 			wm_k1_gig_workaround_hv(sc,
4034 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
4035 		}
4036 
4037 		if ((sc->sc_phytype == WMPHY_82578)
4038 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
4039 			== IFM_1000_T)) {
4040 
4041 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
4042 				delay(200*1000); /* XXX too big */
4043 
4044 				/* Link stall fix for link up */
4045 				wm_gmii_hv_writereg(sc->sc_dev, 1,
4046 				    HV_MUX_DATA_CTRL,
4047 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
4048 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
4049 				wm_gmii_hv_writereg(sc->sc_dev, 1,
4050 				    HV_MUX_DATA_CTRL,
4051 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
4052 			}
4053 		}
4054 	} else if (icr & ICR_RXSEQ) {
4055 		DPRINTF(WM_DEBUG_LINK,
4056 		    ("%s: LINK Receive sequence error\n",
4057 			device_xname(sc->sc_dev)));
4058 	}
4059 }
4060 
4061 /*
4062  * wm_linkintr_tbi:
4063  *
4064  *	Helper; handle link interrupts for TBI mode.
4065  */
4066 static void
4067 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
4068 {
4069 	uint32_t status;
4070 
4071 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
4072 		__func__));
4073 
4074 	status = CSR_READ(sc, WMREG_STATUS);
4075 	if (icr & ICR_LSC) {
4076 		if (status & STATUS_LU) {
4077 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
4078 			    device_xname(sc->sc_dev),
4079 			    (status & STATUS_FD) ? "FDX" : "HDX"));
4080 			/*
4081 			 * NOTE: CTRL will update TFCE and RFCE automatically,
4082 			 * so we should update sc->sc_ctrl
4083 			 */
4084 
4085 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
4086 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
4087 			sc->sc_fcrtl &= ~FCRTL_XONE;
4088 			if (status & STATUS_FD)
4089 				sc->sc_tctl |=
4090 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4091 			else
4092 				sc->sc_tctl |=
4093 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
4094 			if (sc->sc_ctrl & CTRL_TFCE)
4095 				sc->sc_fcrtl |= FCRTL_XONE;
4096 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4097 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
4098 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
4099 				      sc->sc_fcrtl);
4100 			sc->sc_tbi_linkup = 1;
4101 		} else {
4102 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
4103 			    device_xname(sc->sc_dev)));
4104 			sc->sc_tbi_linkup = 0;
4105 		}
4106 		wm_tbi_set_linkled(sc);
4107 	} else if (icr & ICR_RXCFG) {
4108 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
4109 		    device_xname(sc->sc_dev)));
4110 		sc->sc_tbi_nrxcfg++;
4111 		wm_check_for_link(sc);
4112 	} else if (icr & ICR_RXSEQ) {
4113 		DPRINTF(WM_DEBUG_LINK,
4114 		    ("%s: LINK: Receive sequence error\n",
4115 		    device_xname(sc->sc_dev)));
4116 	}
4117 }
4118 
4119 /*
4120  * wm_linkintr:
4121  *
4122  *	Helper; handle link interrupts.
4123  */
4124 static void
4125 wm_linkintr(struct wm_softc *sc, uint32_t icr)
4126 {
4127 
4128 	if (sc->sc_flags & WM_F_HAS_MII)
4129 		wm_linkintr_gmii(sc, icr);
4130 	else
4131 		wm_linkintr_tbi(sc, icr);
4132 }
4133 
4134 /*
4135  * wm_tick:
4136  *
4137  *	One second timer, used to check link status, sweep up
4138  *	completed transmit jobs, etc.
4139  */
4140 static void
4141 wm_tick(void *arg)
4142 {
4143 	struct wm_softc *sc = arg;
4144 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4145 #ifndef WM_MPSAFE
4146 	int s;
4147 
4148 	s = splnet();
4149 #endif
4150 
4151 	WM_LOCK(sc);
4152 
4153 	if (sc->sc_stopping)
4154 		goto out;
4155 
4156 	if (sc->sc_type >= WM_T_82542_2_1) {
4157 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
4158 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
4159 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
4160 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
4161 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
4162 	}
4163 
4164 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
4165 	ifp->if_ierrors += 0ULL + /* ensure quad_t */
4166 	    + CSR_READ(sc, WMREG_CRCERRS)
4167 	    + CSR_READ(sc, WMREG_ALGNERRC)
4168 	    + CSR_READ(sc, WMREG_SYMERRC)
4169 	    + CSR_READ(sc, WMREG_RXERRC)
4170 	    + CSR_READ(sc, WMREG_SEC)
4171 	    + CSR_READ(sc, WMREG_CEXTERR)
4172 	    + CSR_READ(sc, WMREG_RLEC);
4173 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
4174 
4175 	if (sc->sc_flags & WM_F_HAS_MII)
4176 		mii_tick(&sc->sc_mii);
4177 	else
4178 		wm_tbi_check_link(sc);
4179 
4180 out:
4181 	WM_UNLOCK(sc);
4182 #ifndef WM_MPSAFE
4183 	splx(s);
4184 #endif
4185 
4186 	if (!sc->sc_stopping)
4187 		callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
4188 }
4189 
4190 /*
4191  * wm_reset:
4192  *
4193  *	Reset the i82542 chip.
4194  */
4195 static void
4196 wm_reset(struct wm_softc *sc)
4197 {
4198 	int phy_reset = 0;
4199 	uint32_t reg, mask;
4200 
4201 	/*
4202 	 * Allocate on-chip memory according to the MTU size.
4203 	 * The Packet Buffer Allocation register must be written
4204 	 * before the chip is reset.
4205 	 */
4206 	switch (sc->sc_type) {
4207 	case WM_T_82547:
4208 	case WM_T_82547_2:
4209 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
4210 		    PBA_22K : PBA_30K;
4211 		sc->sc_txfifo_head = 0;
4212 		sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
4213 		sc->sc_txfifo_size =
4214 		    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
4215 		sc->sc_txfifo_stall = 0;
4216 		break;
4217 	case WM_T_82571:
4218 	case WM_T_82572:
4219 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
4220 	case WM_T_I350:
4221 	case WM_T_I354:
4222 	case WM_T_80003:
4223 		sc->sc_pba = PBA_32K;
4224 		break;
4225 	case WM_T_82580:
4226 	case WM_T_82580ER:
4227 		sc->sc_pba = PBA_35K;
4228 		break;
4229 	case WM_T_I210:
4230 	case WM_T_I211:
4231 		sc->sc_pba = PBA_34K;
4232 		break;
4233 	case WM_T_82576:
4234 		sc->sc_pba = PBA_64K;
4235 		break;
4236 	case WM_T_82573:
4237 		sc->sc_pba = PBA_12K;
4238 		break;
4239 	case WM_T_82574:
4240 	case WM_T_82583:
4241 		sc->sc_pba = PBA_20K;
4242 		break;
4243 	case WM_T_ICH8:
4244 		sc->sc_pba = PBA_8K;
4245 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
4246 		break;
4247 	case WM_T_ICH9:
4248 	case WM_T_ICH10:
4249 		sc->sc_pba = PBA_10K;
4250 		break;
4251 	case WM_T_PCH:
4252 	case WM_T_PCH2:
4253 	case WM_T_PCH_LPT:
4254 		sc->sc_pba = PBA_26K;
4255 		break;
4256 	default:
4257 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
4258 		    PBA_40K : PBA_48K;
4259 		break;
4260 	}
4261 	CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
4262 
4263 	/* Prevent the PCI-E bus from sticking */
4264 	if (sc->sc_flags & WM_F_PCIE) {
4265 		int timeout = 800;
4266 
4267 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
4268 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4269 
4270 		while (timeout--) {
4271 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
4272 			    == 0)
4273 				break;
4274 			delay(100);
4275 		}
4276 	}
4277 
4278 	/* Set the completion timeout for interface */
4279 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
4280 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
4281 		wm_set_pcie_completion_timeout(sc);
4282 
4283 	/* Clear interrupt */
4284 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4285 
4286 	/* Stop the transmit and receive processes. */
4287 	CSR_WRITE(sc, WMREG_RCTL, 0);
4288 	sc->sc_rctl &= ~RCTL_EN;
4289 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
4290 	CSR_WRITE_FLUSH(sc);
4291 
4292 	/* XXX set_tbi_sbp_82543() */
4293 
4294 	delay(10*1000);
4295 
4296 	/* Must acquire the MDIO ownership before MAC reset */
4297 	switch (sc->sc_type) {
4298 	case WM_T_82573:
4299 	case WM_T_82574:
4300 	case WM_T_82583:
4301 		wm_get_hw_semaphore_82573(sc);
4302 		break;
4303 	default:
4304 		break;
4305 	}
4306 
4307 	/*
4308 	 * 82541 Errata 29? & 82547 Errata 28?
4309 	 * See also the description about PHY_RST bit in CTRL register
4310 	 * in 8254x_GBe_SDM.pdf.
4311 	 */
4312 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
4313 		CSR_WRITE(sc, WMREG_CTRL,
4314 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
4315 		CSR_WRITE_FLUSH(sc);
4316 		delay(5000);
4317 	}
4318 
4319 	switch (sc->sc_type) {
4320 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
4321 	case WM_T_82541:
4322 	case WM_T_82541_2:
4323 	case WM_T_82547:
4324 	case WM_T_82547_2:
4325 		/*
4326 		 * On some chipsets, a reset through a memory-mapped write
4327 		 * cycle can cause the chip to reset before completing the
4328 		 * write cycle.  This causes major headache that can be
4329 		 * avoided by issuing the reset via indirect register writes
4330 		 * through I/O space.
4331 		 *
4332 		 * So, if we successfully mapped the I/O BAR at attach time,
4333 		 * use that.  Otherwise, try our luck with a memory-mapped
4334 		 * reset.
4335 		 */
4336 		if (sc->sc_flags & WM_F_IOH_VALID)
4337 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
4338 		else
4339 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
4340 		break;
4341 	case WM_T_82545_3:
4342 	case WM_T_82546_3:
4343 		/* Use the shadow control register on these chips. */
4344 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
4345 		break;
4346 	case WM_T_80003:
4347 		mask = swfwphysem[sc->sc_funcid];
4348 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
4349 		wm_get_swfw_semaphore(sc, mask);
4350 		CSR_WRITE(sc, WMREG_CTRL, reg);
4351 		wm_put_swfw_semaphore(sc, mask);
4352 		break;
4353 	case WM_T_ICH8:
4354 	case WM_T_ICH9:
4355 	case WM_T_ICH10:
4356 	case WM_T_PCH:
4357 	case WM_T_PCH2:
4358 	case WM_T_PCH_LPT:
4359 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
4360 		if (wm_check_reset_block(sc) == 0) {
4361 			/*
4362 			 * Gate automatic PHY configuration by hardware on
4363 			 * non-managed 82579
4364 			 */
4365 			if ((sc->sc_type == WM_T_PCH2)
4366 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
4367 				!= 0))
4368 				wm_gate_hw_phy_config_ich8lan(sc, 1);
4369 
4370 
4371 			reg |= CTRL_PHY_RESET;
4372 			phy_reset = 1;
4373 		}
4374 		wm_get_swfwhw_semaphore(sc);
4375 		CSR_WRITE(sc, WMREG_CTRL, reg);
4376 		/* Don't insert a completion barrier when reset */
4377 		delay(20*1000);
4378 		wm_put_swfwhw_semaphore(sc);
4379 		break;
4380 	case WM_T_82542_2_0:
4381 	case WM_T_82542_2_1:
4382 	case WM_T_82543:
4383 	case WM_T_82540:
4384 	case WM_T_82545:
4385 	case WM_T_82546:
4386 	case WM_T_82571:
4387 	case WM_T_82572:
4388 	case WM_T_82573:
4389 	case WM_T_82574:
4390 	case WM_T_82575:
4391 	case WM_T_82576:
4392 	case WM_T_82580:
4393 	case WM_T_82580ER:
4394 	case WM_T_82583:
4395 	case WM_T_I350:
4396 	case WM_T_I354:
4397 	case WM_T_I210:
4398 	case WM_T_I211:
4399 	default:
4400 		/* Everything else can safely use the documented method. */
4401 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
4402 		break;
4403 	}
4404 
4405 	/* Must release the MDIO ownership after MAC reset */
4406 	switch (sc->sc_type) {
4407 	case WM_T_82574:
4408 	case WM_T_82583:
4409 		wm_put_hw_semaphore_82573(sc);
4410 		break;
4411 	default:
4412 		break;
4413 	}
4414 
4415 	if (phy_reset != 0)
4416 		wm_get_cfg_done(sc);
4417 
4418 	/* reload EEPROM */
4419 	switch (sc->sc_type) {
4420 	case WM_T_82542_2_0:
4421 	case WM_T_82542_2_1:
4422 	case WM_T_82543:
4423 	case WM_T_82544:
4424 		delay(10);
4425 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
4426 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4427 		CSR_WRITE_FLUSH(sc);
4428 		delay(2000);
4429 		break;
4430 	case WM_T_82540:
4431 	case WM_T_82545:
4432 	case WM_T_82545_3:
4433 	case WM_T_82546:
4434 	case WM_T_82546_3:
4435 		delay(5*1000);
4436 		/* XXX Disable HW ARPs on ASF enabled adapters */
4437 		break;
4438 	case WM_T_82541:
4439 	case WM_T_82541_2:
4440 	case WM_T_82547:
4441 	case WM_T_82547_2:
4442 		delay(20000);
4443 		/* XXX Disable HW ARPs on ASF enabled adapters */
4444 		break;
4445 	case WM_T_82571:
4446 	case WM_T_82572:
4447 	case WM_T_82573:
4448 	case WM_T_82574:
4449 	case WM_T_82583:
4450 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
4451 			delay(10);
4452 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
4453 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4454 			CSR_WRITE_FLUSH(sc);
4455 		}
4456 		/* check EECD_EE_AUTORD */
4457 		wm_get_auto_rd_done(sc);
4458 		/*
4459 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
4460 		 * is set.
4461 		 */
4462 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
4463 		    || (sc->sc_type == WM_T_82583))
4464 			delay(25*1000);
4465 		break;
4466 	case WM_T_82575:
4467 	case WM_T_82576:
4468 	case WM_T_82580:
4469 	case WM_T_82580ER:
4470 	case WM_T_I350:
4471 	case WM_T_I354:
4472 	case WM_T_I210:
4473 	case WM_T_I211:
4474 	case WM_T_80003:
4475 		/* check EECD_EE_AUTORD */
4476 		wm_get_auto_rd_done(sc);
4477 		break;
4478 	case WM_T_ICH8:
4479 	case WM_T_ICH9:
4480 	case WM_T_ICH10:
4481 	case WM_T_PCH:
4482 	case WM_T_PCH2:
4483 	case WM_T_PCH_LPT:
4484 		break;
4485 	default:
4486 		panic("%s: unknown type\n", __func__);
4487 	}
4488 
4489 	/* Check whether EEPROM is present or not */
4490 	switch (sc->sc_type) {
4491 	case WM_T_82575:
4492 	case WM_T_82576:
4493 #if 0 /* XXX */
4494 	case WM_T_82580:
4495 	case WM_T_82580ER:
4496 #endif
4497 	case WM_T_I350:
4498 	case WM_T_I354:
4499 	case WM_T_ICH8:
4500 	case WM_T_ICH9:
4501 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
4502 			/* Not found */
4503 			sc->sc_flags |= WM_F_EEPROM_INVALID;
4504 			if ((sc->sc_type == WM_T_82575)
4505 			    || (sc->sc_type == WM_T_82576)
4506 			    || (sc->sc_type == WM_T_82580)
4507 			    || (sc->sc_type == WM_T_82580ER)
4508 			    || (sc->sc_type == WM_T_I350)
4509 			    || (sc->sc_type == WM_T_I354))
4510 				wm_reset_init_script_82575(sc);
4511 		}
4512 		break;
4513 	default:
4514 		break;
4515 	}
4516 
4517 	if ((sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
4518 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
4519 		/* clear global device reset status bit */
4520 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
4521 	}
4522 
4523 	/* Clear any pending interrupt events. */
4524 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4525 	reg = CSR_READ(sc, WMREG_ICR);
4526 
4527 	/* reload sc_ctrl */
4528 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
4529 
4530 	if (sc->sc_type == WM_T_I350)
4531 		wm_set_eee_i350(sc);
4532 
4533 	/* dummy read from WUC */
4534 	if (sc->sc_type == WM_T_PCH)
4535 		reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
4536 	/*
4537 	 * For PCH, this write will make sure that any noise will be detected
4538 	 * as a CRC error and be dropped rather than show up as a bad packet
4539 	 * to the DMA engine
4540 	 */
4541 	if (sc->sc_type == WM_T_PCH)
4542 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
4543 
4544 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4545 		CSR_WRITE(sc, WMREG_WUC, 0);
4546 
4547 	/* XXX need special handling for 82580 */
4548 }
4549 
4550 static void
4551 wm_set_vlan(struct wm_softc *sc)
4552 {
4553 	/* Deal with VLAN enables. */
4554 	if (VLAN_ATTACHED(&sc->sc_ethercom))
4555 		sc->sc_ctrl |= CTRL_VME;
4556 	else
4557 		sc->sc_ctrl &= ~CTRL_VME;
4558 
4559 	/* Write the control registers. */
4560 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4561 }
4562 
4563 /*
4564  * wm_init:		[ifnet interface function]
4565  *
4566  *	Initialize the interface.
4567  */
4568 static int
4569 wm_init(struct ifnet *ifp)
4570 {
4571 	struct wm_softc *sc = ifp->if_softc;
4572 	int ret;
4573 
4574 	WM_LOCK(sc);
4575 	ret = wm_init_locked(ifp);
4576 	WM_UNLOCK(sc);
4577 
4578 	return ret;
4579 }
4580 
4581 static int
4582 wm_init_locked(struct ifnet *ifp)
4583 {
4584 	struct wm_softc *sc = ifp->if_softc;
4585 	struct wm_rxsoft *rxs;
4586 	int i, j, trynum, error = 0;
4587 	uint32_t reg;
4588 
4589 	KASSERT(WM_LOCKED(sc));
4590 	/*
4591 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
4592 	 * There is a small but measurable benefit to avoiding the adjusment
4593 	 * of the descriptor so that the headers are aligned, for normal mtu,
4594 	 * on such platforms.  One possibility is that the DMA itself is
4595 	 * slightly more efficient if the front of the entire packet (instead
4596 	 * of the front of the headers) is aligned.
4597 	 *
4598 	 * Note we must always set align_tweak to 0 if we are using
4599 	 * jumbo frames.
4600 	 */
4601 #ifdef __NO_STRICT_ALIGNMENT
4602 	sc->sc_align_tweak = 0;
4603 #else
4604 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
4605 		sc->sc_align_tweak = 0;
4606 	else
4607 		sc->sc_align_tweak = 2;
4608 #endif /* __NO_STRICT_ALIGNMENT */
4609 
4610 	/* Cancel any pending I/O. */
4611 	wm_stop_locked(ifp, 0);
4612 
4613 	/* update statistics before reset */
4614 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
4615 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
4616 
4617 	/* Reset the chip to a known state. */
4618 	wm_reset(sc);
4619 
4620 	switch (sc->sc_type) {
4621 	case WM_T_82571:
4622 	case WM_T_82572:
4623 	case WM_T_82573:
4624 	case WM_T_82574:
4625 	case WM_T_82583:
4626 	case WM_T_80003:
4627 	case WM_T_ICH8:
4628 	case WM_T_ICH9:
4629 	case WM_T_ICH10:
4630 	case WM_T_PCH:
4631 	case WM_T_PCH2:
4632 	case WM_T_PCH_LPT:
4633 		if (wm_check_mng_mode(sc) != 0)
4634 			wm_get_hw_control(sc);
4635 		break;
4636 	default:
4637 		break;
4638 	}
4639 
4640 	/* Reset the PHY. */
4641 	if (sc->sc_flags & WM_F_HAS_MII)
4642 		wm_gmii_reset(sc);
4643 
4644 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
4645 	/* Enable PHY low-power state when MAC is at D3 w/o WoL */
4646 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
4647 	    || (sc->sc_type == WM_T_PCH_LPT))
4648 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_PHYPDEN);
4649 
4650 	/* Initialize the transmit descriptor ring. */
4651 	memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
4652 	WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
4653 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
4654 	sc->sc_txfree = WM_NTXDESC(sc);
4655 	sc->sc_txnext = 0;
4656 
4657 	if (sc->sc_type < WM_T_82543) {
4658 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(sc, 0));
4659 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(sc, 0));
4660 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
4661 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
4662 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
4663 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
4664 	} else {
4665 		CSR_WRITE(sc, WMREG_TDBAH, WM_CDTXADDR_HI(sc, 0));
4666 		CSR_WRITE(sc, WMREG_TDBAL, WM_CDTXADDR_LO(sc, 0));
4667 		CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
4668 		CSR_WRITE(sc, WMREG_TDH, 0);
4669 		CSR_WRITE(sc, WMREG_TIDV, 375);		/* ITR / 4 */
4670 		CSR_WRITE(sc, WMREG_TADV, 375);		/* should be same */
4671 
4672 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4673 			/*
4674 			 * Don't write TDT before TCTL.EN is set.
4675 			 * See the document.
4676 			 */
4677 			CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_QUEUE_ENABLE
4678 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
4679 			    | TXDCTL_WTHRESH(0));
4680 		else {
4681 			CSR_WRITE(sc, WMREG_TDT, 0);
4682 			CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
4683 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
4684 			CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
4685 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
4686 		}
4687 	}
4688 	CSR_WRITE(sc, WMREG_TQSA_LO, 0);
4689 	CSR_WRITE(sc, WMREG_TQSA_HI, 0);
4690 
4691 	/* Initialize the transmit job descriptors. */
4692 	for (i = 0; i < WM_TXQUEUELEN(sc); i++)
4693 		sc->sc_txsoft[i].txs_mbuf = NULL;
4694 	sc->sc_txsfree = WM_TXQUEUELEN(sc);
4695 	sc->sc_txsnext = 0;
4696 	sc->sc_txsdirty = 0;
4697 
4698 	/*
4699 	 * Initialize the receive descriptor and receive job
4700 	 * descriptor rings.
4701 	 */
4702 	if (sc->sc_type < WM_T_82543) {
4703 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
4704 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
4705 		CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
4706 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
4707 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
4708 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
4709 
4710 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
4711 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
4712 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
4713 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
4714 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
4715 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
4716 	} else {
4717 		CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
4718 		CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
4719 		CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
4720 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4721 			CSR_WRITE(sc, WMREG_EITR(0), 450);
4722 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
4723 				panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
4724 			CSR_WRITE(sc, WMREG_SRRCTL, SRRCTL_DESCTYPE_LEGACY
4725 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
4726 			CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_QUEUE_ENABLE
4727 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
4728 			    | RXDCTL_WTHRESH(1));
4729 		} else {
4730 			CSR_WRITE(sc, WMREG_RDH, 0);
4731 			CSR_WRITE(sc, WMREG_RDT, 0);
4732 			CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD); /* ITR/4 */
4733 			CSR_WRITE(sc, WMREG_RADV, 375);	/* MUST be same */
4734 		}
4735 	}
4736 	for (i = 0; i < WM_NRXDESC; i++) {
4737 		rxs = &sc->sc_rxsoft[i];
4738 		if (rxs->rxs_mbuf == NULL) {
4739 			if ((error = wm_add_rxbuf(sc, i)) != 0) {
4740 				log(LOG_ERR, "%s: unable to allocate or map "
4741 				    "rx buffer %d, error = %d\n",
4742 				    device_xname(sc->sc_dev), i, error);
4743 				/*
4744 				 * XXX Should attempt to run with fewer receive
4745 				 * XXX buffers instead of just failing.
4746 				 */
4747 				wm_rxdrain(sc);
4748 				goto out;
4749 			}
4750 		} else {
4751 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
4752 				WM_INIT_RXDESC(sc, i);
4753 			/*
4754 			 * For 82575 and newer device, the RX descriptors
4755 			 * must be initialized after the setting of RCTL.EN in
4756 			 * wm_set_filter()
4757 			 */
4758 		}
4759 	}
4760 	sc->sc_rxptr = 0;
4761 	sc->sc_rxdiscard = 0;
4762 	WM_RXCHAIN_RESET(sc);
4763 
4764 	/*
4765 	 * Clear out the VLAN table -- we don't use it (yet).
4766 	 */
4767 	CSR_WRITE(sc, WMREG_VET, 0);
4768 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
4769 		trynum = 10; /* Due to hw errata */
4770 	else
4771 		trynum = 1;
4772 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
4773 		for (j = 0; j < trynum; j++)
4774 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
4775 
4776 	/*
4777 	 * Set up flow-control parameters.
4778 	 *
4779 	 * XXX Values could probably stand some tuning.
4780 	 */
4781 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
4782 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
4783 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)) {
4784 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
4785 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
4786 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
4787 	}
4788 
4789 	sc->sc_fcrtl = FCRTL_DFLT;
4790 	if (sc->sc_type < WM_T_82543) {
4791 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
4792 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
4793 	} else {
4794 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
4795 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
4796 	}
4797 
4798 	if (sc->sc_type == WM_T_80003)
4799 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
4800 	else
4801 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
4802 
4803 	/* Writes the control register. */
4804 	wm_set_vlan(sc);
4805 
4806 	if (sc->sc_flags & WM_F_HAS_MII) {
4807 		int val;
4808 
4809 		switch (sc->sc_type) {
4810 		case WM_T_80003:
4811 		case WM_T_ICH8:
4812 		case WM_T_ICH9:
4813 		case WM_T_ICH10:
4814 		case WM_T_PCH:
4815 		case WM_T_PCH2:
4816 		case WM_T_PCH_LPT:
4817 			/*
4818 			 * Set the mac to wait the maximum time between each
4819 			 * iteration and increase the max iterations when
4820 			 * polling the phy; this fixes erroneous timeouts at
4821 			 * 10Mbps.
4822 			 */
4823 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
4824 			    0xFFFF);
4825 			val = wm_kmrn_readreg(sc,
4826 			    KUMCTRLSTA_OFFSET_INB_PARAM);
4827 			val |= 0x3F;
4828 			wm_kmrn_writereg(sc,
4829 			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
4830 			break;
4831 		default:
4832 			break;
4833 		}
4834 
4835 		if (sc->sc_type == WM_T_80003) {
4836 			val = CSR_READ(sc, WMREG_CTRL_EXT);
4837 			val &= ~CTRL_EXT_LINK_MODE_MASK;
4838 			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
4839 
4840 			/* Bypass RX and TX FIFO's */
4841 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
4842 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
4843 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
4844 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
4845 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
4846 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
4847 		}
4848 	}
4849 #if 0
4850 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
4851 #endif
4852 
4853 	/*
4854 	 * Set up checksum offload parameters.
4855 	 */
4856 	reg = CSR_READ(sc, WMREG_RXCSUM);
4857 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
4858 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
4859 		reg |= RXCSUM_IPOFL;
4860 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
4861 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
4862 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
4863 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
4864 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
4865 
4866 	/* Reset TBI's RXCFG count */
4867 	sc->sc_tbi_nrxcfg = sc->sc_tbi_lastnrxcfg = 0;
4868 
4869 	/*
4870 	 * Set up the interrupt registers.
4871 	 */
4872 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4873 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
4874 	    ICR_RXO | ICR_RXT0;
4875 	if ((sc->sc_flags & WM_F_HAS_MII) == 0)
4876 		sc->sc_icr |= ICR_RXCFG;
4877 	CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
4878 
4879 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4880 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4881 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
4882 		reg = CSR_READ(sc, WMREG_KABGTXD);
4883 		reg |= KABGTXD_BGSQLBIAS;
4884 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
4885 	}
4886 
4887 	/* Set up the inter-packet gap. */
4888 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
4889 
4890 	if (sc->sc_type >= WM_T_82543) {
4891 		/*
4892 		 * Set up the interrupt throttling register (units of 256ns)
4893 		 * Note that a footnote in Intel's documentation says this
4894 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
4895 		 * or 10Mbit mode.  Empirically, it appears to be the case
4896 		 * that that is also true for the 1024ns units of the other
4897 		 * interrupt-related timer registers -- so, really, we ought
4898 		 * to divide this value by 4 when the link speed is low.
4899 		 *
4900 		 * XXX implement this division at link speed change!
4901 		 */
4902 
4903 		 /*
4904 		  * For N interrupts/sec, set this value to:
4905 		  * 1000000000 / (N * 256).  Note that we set the
4906 		  * absolute and packet timer values to this value
4907 		  * divided by 4 to get "simple timer" behavior.
4908 		  */
4909 
4910 		sc->sc_itr = 1500;		/* 2604 ints/sec */
4911 		CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
4912 	}
4913 
4914 	/* Set the VLAN ethernetype. */
4915 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
4916 
4917 	/*
4918 	 * Set up the transmit control register; we start out with
4919 	 * a collision distance suitable for FDX, but update it whe
4920 	 * we resolve the media type.
4921 	 */
4922 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
4923 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
4924 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4925 	if (sc->sc_type >= WM_T_82571)
4926 		sc->sc_tctl |= TCTL_MULR;
4927 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4928 
4929 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4930 		/*
4931 		 * Write TDT after TCTL.EN is set.
4932 		 * See the document.
4933 		 */
4934 		CSR_WRITE(sc, WMREG_TDT, 0);
4935 	}
4936 
4937 	if (sc->sc_type == WM_T_80003) {
4938 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
4939 		reg &= ~TCTL_EXT_GCEX_MASK;
4940 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
4941 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
4942 	}
4943 
4944 	/* Set the media. */
4945 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
4946 		goto out;
4947 
4948 	/* Configure for OS presence */
4949 	wm_init_manageability(sc);
4950 
4951 	/*
4952 	 * Set up the receive control register; we actually program
4953 	 * the register when we set the receive filter.  Use multicast
4954 	 * address offset type 0.
4955 	 *
4956 	 * Only the i82544 has the ability to strip the incoming
4957 	 * CRC, so we don't enable that feature.
4958 	 */
4959 	sc->sc_mchash_type = 0;
4960 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
4961 	    | RCTL_MO(sc->sc_mchash_type);
4962 
4963 	/*
4964 	 * The I350 has a bug where it always strips the CRC whether
4965 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
4966 	 */
4967 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
4968 	    || (sc->sc_type == WM_T_I210))
4969 		sc->sc_rctl |= RCTL_SECRC;
4970 
4971 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
4972 	    && (ifp->if_mtu > ETHERMTU)) {
4973 		sc->sc_rctl |= RCTL_LPE;
4974 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4975 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
4976 	}
4977 
4978 	if (MCLBYTES == 2048) {
4979 		sc->sc_rctl |= RCTL_2k;
4980 	} else {
4981 		if (sc->sc_type >= WM_T_82543) {
4982 			switch (MCLBYTES) {
4983 			case 4096:
4984 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
4985 				break;
4986 			case 8192:
4987 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
4988 				break;
4989 			case 16384:
4990 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
4991 				break;
4992 			default:
4993 				panic("wm_init: MCLBYTES %d unsupported",
4994 				    MCLBYTES);
4995 				break;
4996 			}
4997 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
4998 	}
4999 
5000 	/* Set the receive filter. */
5001 	wm_set_filter(sc);
5002 
5003 	/* Enable ECC */
5004 	switch (sc->sc_type) {
5005 	case WM_T_82571:
5006 		reg = CSR_READ(sc, WMREG_PBA_ECC);
5007 		reg |= PBA_ECC_CORR_EN;
5008 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
5009 		break;
5010 	case WM_T_PCH_LPT:
5011 		reg = CSR_READ(sc, WMREG_PBECCSTS);
5012 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
5013 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
5014 
5015 		reg = CSR_READ(sc, WMREG_CTRL);
5016 		reg |= CTRL_MEHE;
5017 		CSR_WRITE(sc, WMREG_CTRL, reg);
5018 		break;
5019 	default:
5020 		break;
5021 	}
5022 
5023 	/* On 575 and later set RDT only if RX enabled */
5024 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
5025 		for (i = 0; i < WM_NRXDESC; i++)
5026 			WM_INIT_RXDESC(sc, i);
5027 
5028 	sc->sc_stopping = false;
5029 
5030 	/* Start the one second link check clock. */
5031 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
5032 
5033 	/* ...all done! */
5034 	ifp->if_flags |= IFF_RUNNING;
5035 	ifp->if_flags &= ~IFF_OACTIVE;
5036 
5037  out:
5038 	sc->sc_if_flags = ifp->if_flags;
5039 	if (error)
5040 		log(LOG_ERR, "%s: interface not running\n",
5041 		    device_xname(sc->sc_dev));
5042 	return error;
5043 }
5044 
5045 /*
5046  * wm_rxdrain:
5047  *
5048  *	Drain the receive queue.
5049  */
5050 static void
5051 wm_rxdrain(struct wm_softc *sc)
5052 {
5053 	struct wm_rxsoft *rxs;
5054 	int i;
5055 
5056 	KASSERT(WM_LOCKED(sc));
5057 
5058 	for (i = 0; i < WM_NRXDESC; i++) {
5059 		rxs = &sc->sc_rxsoft[i];
5060 		if (rxs->rxs_mbuf != NULL) {
5061 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
5062 			m_freem(rxs->rxs_mbuf);
5063 			rxs->rxs_mbuf = NULL;
5064 		}
5065 	}
5066 }
5067 
5068 /*
5069  * wm_stop:		[ifnet interface function]
5070  *
5071  *	Stop transmission on the interface.
5072  */
5073 static void
5074 wm_stop(struct ifnet *ifp, int disable)
5075 {
5076 	struct wm_softc *sc = ifp->if_softc;
5077 
5078 	WM_LOCK(sc);
5079 	wm_stop_locked(ifp, disable);
5080 	WM_UNLOCK(sc);
5081 }
5082 
5083 static void
5084 wm_stop_locked(struct ifnet *ifp, int disable)
5085 {
5086 	struct wm_softc *sc = ifp->if_softc;
5087 	struct wm_txsoft *txs;
5088 	int i;
5089 
5090 	KASSERT(WM_LOCKED(sc));
5091 
5092 	sc->sc_stopping = true;
5093 
5094 	/* Stop the one second clock. */
5095 	callout_stop(&sc->sc_tick_ch);
5096 
5097 	/* Stop the 82547 Tx FIFO stall check timer. */
5098 	if (sc->sc_type == WM_T_82547)
5099 		callout_stop(&sc->sc_txfifo_ch);
5100 
5101 	if (sc->sc_flags & WM_F_HAS_MII) {
5102 		/* Down the MII. */
5103 		mii_down(&sc->sc_mii);
5104 	} else {
5105 #if 0
5106 		/* Should we clear PHY's status properly? */
5107 		wm_reset(sc);
5108 #endif
5109 	}
5110 
5111 	/* Stop the transmit and receive processes. */
5112 	CSR_WRITE(sc, WMREG_TCTL, 0);
5113 	CSR_WRITE(sc, WMREG_RCTL, 0);
5114 	sc->sc_rctl &= ~RCTL_EN;
5115 
5116 	/*
5117 	 * Clear the interrupt mask to ensure the device cannot assert its
5118 	 * interrupt line.
5119 	 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
5120 	 * any currently pending or shared interrupt.
5121 	 */
5122 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
5123 	sc->sc_icr = 0;
5124 
5125 	/* Release any queued transmit buffers. */
5126 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
5127 		txs = &sc->sc_txsoft[i];
5128 		if (txs->txs_mbuf != NULL) {
5129 			bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
5130 			m_freem(txs->txs_mbuf);
5131 			txs->txs_mbuf = NULL;
5132 		}
5133 	}
5134 
5135 	/* Mark the interface as down and cancel the watchdog timer. */
5136 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
5137 	ifp->if_timer = 0;
5138 
5139 	if (disable)
5140 		wm_rxdrain(sc);
5141 
5142 #if 0 /* notyet */
5143 	if (sc->sc_type >= WM_T_82544)
5144 		CSR_WRITE(sc, WMREG_WUC, 0);
5145 #endif
5146 }
5147 
5148 void
5149 wm_get_auto_rd_done(struct wm_softc *sc)
5150 {
5151 	int i;
5152 
5153 	/* wait for eeprom to reload */
5154 	switch (sc->sc_type) {
5155 	case WM_T_82571:
5156 	case WM_T_82572:
5157 	case WM_T_82573:
5158 	case WM_T_82574:
5159 	case WM_T_82583:
5160 	case WM_T_82575:
5161 	case WM_T_82576:
5162 	case WM_T_82580:
5163 	case WM_T_82580ER:
5164 	case WM_T_I350:
5165 	case WM_T_I354:
5166 	case WM_T_I210:
5167 	case WM_T_I211:
5168 	case WM_T_80003:
5169 	case WM_T_ICH8:
5170 	case WM_T_ICH9:
5171 		for (i = 0; i < 10; i++) {
5172 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
5173 				break;
5174 			delay(1000);
5175 		}
5176 		if (i == 10) {
5177 			log(LOG_ERR, "%s: auto read from eeprom failed to "
5178 			    "complete\n", device_xname(sc->sc_dev));
5179 		}
5180 		break;
5181 	default:
5182 		break;
5183 	}
5184 }
5185 
5186 void
5187 wm_lan_init_done(struct wm_softc *sc)
5188 {
5189 	uint32_t reg = 0;
5190 	int i;
5191 
5192 	/* wait for eeprom to reload */
5193 	switch (sc->sc_type) {
5194 	case WM_T_ICH10:
5195 	case WM_T_PCH:
5196 	case WM_T_PCH2:
5197 	case WM_T_PCH_LPT:
5198 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
5199 			reg = CSR_READ(sc, WMREG_STATUS);
5200 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
5201 				break;
5202 			delay(100);
5203 		}
5204 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
5205 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
5206 			    "complete\n", device_xname(sc->sc_dev), __func__);
5207 		}
5208 		break;
5209 	default:
5210 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
5211 		    __func__);
5212 		break;
5213 	}
5214 
5215 	reg &= ~STATUS_LAN_INIT_DONE;
5216 	CSR_WRITE(sc, WMREG_STATUS, reg);
5217 }
5218 
5219 void
5220 wm_get_cfg_done(struct wm_softc *sc)
5221 {
5222 	int mask;
5223 	uint32_t reg;
5224 	int i;
5225 
5226 	/* wait for eeprom to reload */
5227 	switch (sc->sc_type) {
5228 	case WM_T_82542_2_0:
5229 	case WM_T_82542_2_1:
5230 		/* null */
5231 		break;
5232 	case WM_T_82543:
5233 	case WM_T_82544:
5234 	case WM_T_82540:
5235 	case WM_T_82545:
5236 	case WM_T_82545_3:
5237 	case WM_T_82546:
5238 	case WM_T_82546_3:
5239 	case WM_T_82541:
5240 	case WM_T_82541_2:
5241 	case WM_T_82547:
5242 	case WM_T_82547_2:
5243 	case WM_T_82573:
5244 	case WM_T_82574:
5245 	case WM_T_82583:
5246 		/* generic */
5247 		delay(10*1000);
5248 		break;
5249 	case WM_T_80003:
5250 	case WM_T_82571:
5251 	case WM_T_82572:
5252 	case WM_T_82575:
5253 	case WM_T_82576:
5254 	case WM_T_82580:
5255 	case WM_T_82580ER:
5256 	case WM_T_I350:
5257 	case WM_T_I354:
5258 	case WM_T_I210:
5259 	case WM_T_I211:
5260 		if (sc->sc_type == WM_T_82571) {
5261 			/* Only 82571 shares port 0 */
5262 			mask = EEMNGCTL_CFGDONE_0;
5263 		} else
5264 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
5265 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
5266 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
5267 				break;
5268 			delay(1000);
5269 		}
5270 		if (i >= WM_PHY_CFG_TIMEOUT) {
5271 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
5272 				device_xname(sc->sc_dev), __func__));
5273 		}
5274 		break;
5275 	case WM_T_ICH8:
5276 	case WM_T_ICH9:
5277 	case WM_T_ICH10:
5278 	case WM_T_PCH:
5279 	case WM_T_PCH2:
5280 	case WM_T_PCH_LPT:
5281 		delay(10*1000);
5282 		if (sc->sc_type >= WM_T_ICH10)
5283 			wm_lan_init_done(sc);
5284 		else
5285 			wm_get_auto_rd_done(sc);
5286 
5287 		reg = CSR_READ(sc, WMREG_STATUS);
5288 		if ((reg & STATUS_PHYRA) != 0)
5289 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
5290 		break;
5291 	default:
5292 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
5293 		    __func__);
5294 		break;
5295 	}
5296 }
5297 
5298 /*
5299  * wm_acquire_eeprom:
5300  *
5301  *	Perform the EEPROM handshake required on some chips.
5302  */
5303 static int
5304 wm_acquire_eeprom(struct wm_softc *sc)
5305 {
5306 	uint32_t reg;
5307 	int x;
5308 	int ret = 0;
5309 
5310 	/* always success */
5311 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
5312 		return 0;
5313 
5314 	if (sc->sc_flags & WM_F_SWFWHW_SYNC) {
5315 		ret = wm_get_swfwhw_semaphore(sc);
5316 	} else if (sc->sc_flags & WM_F_SWFW_SYNC) {
5317 		/* this will also do wm_get_swsm_semaphore() if needed */
5318 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
5319 	} else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
5320 		ret = wm_get_swsm_semaphore(sc);
5321 	}
5322 
5323 	if (ret) {
5324 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5325 			__func__);
5326 		return 1;
5327 	}
5328 
5329 	if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
5330 		reg = CSR_READ(sc, WMREG_EECD);
5331 
5332 		/* Request EEPROM access. */
5333 		reg |= EECD_EE_REQ;
5334 		CSR_WRITE(sc, WMREG_EECD, reg);
5335 
5336 		/* ..and wait for it to be granted. */
5337 		for (x = 0; x < 1000; x++) {
5338 			reg = CSR_READ(sc, WMREG_EECD);
5339 			if (reg & EECD_EE_GNT)
5340 				break;
5341 			delay(5);
5342 		}
5343 		if ((reg & EECD_EE_GNT) == 0) {
5344 			aprint_error_dev(sc->sc_dev,
5345 			    "could not acquire EEPROM GNT\n");
5346 			reg &= ~EECD_EE_REQ;
5347 			CSR_WRITE(sc, WMREG_EECD, reg);
5348 			if (sc->sc_flags & WM_F_SWFWHW_SYNC)
5349 				wm_put_swfwhw_semaphore(sc);
5350 			if (sc->sc_flags & WM_F_SWFW_SYNC)
5351 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
5352 			else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
5353 				wm_put_swsm_semaphore(sc);
5354 			return 1;
5355 		}
5356 	}
5357 
5358 	return 0;
5359 }
5360 
5361 /*
5362  * wm_release_eeprom:
5363  *
5364  *	Release the EEPROM mutex.
5365  */
5366 static void
5367 wm_release_eeprom(struct wm_softc *sc)
5368 {
5369 	uint32_t reg;
5370 
5371 	/* always success */
5372 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
5373 		return;
5374 
5375 	if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
5376 		reg = CSR_READ(sc, WMREG_EECD);
5377 		reg &= ~EECD_EE_REQ;
5378 		CSR_WRITE(sc, WMREG_EECD, reg);
5379 	}
5380 
5381 	if (sc->sc_flags & WM_F_SWFWHW_SYNC)
5382 		wm_put_swfwhw_semaphore(sc);
5383 	if (sc->sc_flags & WM_F_SWFW_SYNC)
5384 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
5385 	else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
5386 		wm_put_swsm_semaphore(sc);
5387 }
5388 
5389 /*
5390  * wm_eeprom_sendbits:
5391  *
5392  *	Send a series of bits to the EEPROM.
5393  */
5394 static void
5395 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
5396 {
5397 	uint32_t reg;
5398 	int x;
5399 
5400 	reg = CSR_READ(sc, WMREG_EECD);
5401 
5402 	for (x = nbits; x > 0; x--) {
5403 		if (bits & (1U << (x - 1)))
5404 			reg |= EECD_DI;
5405 		else
5406 			reg &= ~EECD_DI;
5407 		CSR_WRITE(sc, WMREG_EECD, reg);
5408 		CSR_WRITE_FLUSH(sc);
5409 		delay(2);
5410 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
5411 		CSR_WRITE_FLUSH(sc);
5412 		delay(2);
5413 		CSR_WRITE(sc, WMREG_EECD, reg);
5414 		CSR_WRITE_FLUSH(sc);
5415 		delay(2);
5416 	}
5417 }
5418 
5419 /*
5420  * wm_eeprom_recvbits:
5421  *
5422  *	Receive a series of bits from the EEPROM.
5423  */
5424 static void
5425 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
5426 {
5427 	uint32_t reg, val;
5428 	int x;
5429 
5430 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
5431 
5432 	val = 0;
5433 	for (x = nbits; x > 0; x--) {
5434 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
5435 		CSR_WRITE_FLUSH(sc);
5436 		delay(2);
5437 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
5438 			val |= (1U << (x - 1));
5439 		CSR_WRITE(sc, WMREG_EECD, reg);
5440 		CSR_WRITE_FLUSH(sc);
5441 		delay(2);
5442 	}
5443 	*valp = val;
5444 }
5445 
5446 /*
5447  * wm_read_eeprom_uwire:
5448  *
5449  *	Read a word from the EEPROM using the MicroWire protocol.
5450  */
5451 static int
5452 wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
5453 {
5454 	uint32_t reg, val;
5455 	int i;
5456 
5457 	for (i = 0; i < wordcnt; i++) {
5458 		/* Clear SK and DI. */
5459 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
5460 		CSR_WRITE(sc, WMREG_EECD, reg);
5461 
5462 		/*
5463 		 * XXX: workaround for a bug in qemu-0.12.x and prior
5464 		 * and Xen.
5465 		 *
5466 		 * We use this workaround only for 82540 because qemu's
5467 		 * e1000 act as 82540.
5468 		 */
5469 		if (sc->sc_type == WM_T_82540) {
5470 			reg |= EECD_SK;
5471 			CSR_WRITE(sc, WMREG_EECD, reg);
5472 			reg &= ~EECD_SK;
5473 			CSR_WRITE(sc, WMREG_EECD, reg);
5474 			CSR_WRITE_FLUSH(sc);
5475 			delay(2);
5476 		}
5477 		/* XXX: end of workaround */
5478 
5479 		/* Set CHIP SELECT. */
5480 		reg |= EECD_CS;
5481 		CSR_WRITE(sc, WMREG_EECD, reg);
5482 		CSR_WRITE_FLUSH(sc);
5483 		delay(2);
5484 
5485 		/* Shift in the READ command. */
5486 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
5487 
5488 		/* Shift in address. */
5489 		wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits);
5490 
5491 		/* Shift out the data. */
5492 		wm_eeprom_recvbits(sc, &val, 16);
5493 		data[i] = val & 0xffff;
5494 
5495 		/* Clear CHIP SELECT. */
5496 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
5497 		CSR_WRITE(sc, WMREG_EECD, reg);
5498 		CSR_WRITE_FLUSH(sc);
5499 		delay(2);
5500 	}
5501 
5502 	return 0;
5503 }
5504 
5505 /*
5506  * wm_spi_eeprom_ready:
5507  *
5508  *	Wait for a SPI EEPROM to be ready for commands.
5509  */
5510 static int
5511 wm_spi_eeprom_ready(struct wm_softc *sc)
5512 {
5513 	uint32_t val;
5514 	int usec;
5515 
5516 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
5517 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
5518 		wm_eeprom_recvbits(sc, &val, 8);
5519 		if ((val & SPI_SR_RDY) == 0)
5520 			break;
5521 	}
5522 	if (usec >= SPI_MAX_RETRIES) {
5523 		aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
5524 		return 1;
5525 	}
5526 	return 0;
5527 }
5528 
5529 /*
5530  * wm_read_eeprom_spi:
5531  *
5532  *	Read a work from the EEPROM using the SPI protocol.
5533  */
5534 static int
5535 wm_read_eeprom_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
5536 {
5537 	uint32_t reg, val;
5538 	int i;
5539 	uint8_t opc;
5540 
5541 	/* Clear SK and CS. */
5542 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
5543 	CSR_WRITE(sc, WMREG_EECD, reg);
5544 	CSR_WRITE_FLUSH(sc);
5545 	delay(2);
5546 
5547 	if (wm_spi_eeprom_ready(sc))
5548 		return 1;
5549 
5550 	/* Toggle CS to flush commands. */
5551 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
5552 	CSR_WRITE_FLUSH(sc);
5553 	delay(2);
5554 	CSR_WRITE(sc, WMREG_EECD, reg);
5555 	CSR_WRITE_FLUSH(sc);
5556 	delay(2);
5557 
5558 	opc = SPI_OPC_READ;
5559 	if (sc->sc_ee_addrbits == 8 && word >= 128)
5560 		opc |= SPI_OPC_A8;
5561 
5562 	wm_eeprom_sendbits(sc, opc, 8);
5563 	wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits);
5564 
5565 	for (i = 0; i < wordcnt; i++) {
5566 		wm_eeprom_recvbits(sc, &val, 16);
5567 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
5568 	}
5569 
5570 	/* Raise CS and clear SK. */
5571 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
5572 	CSR_WRITE(sc, WMREG_EECD, reg);
5573 	CSR_WRITE_FLUSH(sc);
5574 	delay(2);
5575 
5576 	return 0;
5577 }
5578 
5579 #define NVM_CHECKSUM			0xBABA
5580 #define EEPROM_SIZE			0x0040
5581 #define NVM_COMPAT			0x0003
5582 #define NVM_COMPAT_VALID_CHECKSUM	0x0001
5583 #define NVM_FUTURE_INIT_WORD1			0x0019
5584 #define NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM	0x0040
5585 
5586 /*
5587  * wm_validate_eeprom_checksum
5588  *
5589  * The checksum is defined as the sum of the first 64 (16 bit) words.
5590  */
5591 static int
5592 wm_validate_eeprom_checksum(struct wm_softc *sc)
5593 {
5594 	uint16_t checksum;
5595 	uint16_t eeprom_data;
5596 #ifdef WM_DEBUG
5597 	uint16_t csum_wordaddr, valid_checksum;
5598 #endif
5599 	int i;
5600 
5601 	checksum = 0;
5602 
5603 	/* Don't check for I211 */
5604 	if (sc->sc_type == WM_T_I211)
5605 		return 0;
5606 
5607 #ifdef WM_DEBUG
5608 	if (sc->sc_type == WM_T_PCH_LPT) {
5609 		csum_wordaddr = NVM_COMPAT;
5610 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
5611 	} else {
5612 		csum_wordaddr = NVM_FUTURE_INIT_WORD1;
5613 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
5614 	}
5615 
5616 	/* Dump EEPROM image for debug */
5617 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5618 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5619 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
5620 		wm_read_eeprom(sc, csum_wordaddr, 1, &eeprom_data);
5621 		if ((eeprom_data & valid_checksum) == 0) {
5622 			DPRINTF(WM_DEBUG_NVM,
5623 			    ("%s: NVM need to be updated (%04x != %04x)\n",
5624 				device_xname(sc->sc_dev), eeprom_data,
5625 				    valid_checksum));
5626 		}
5627 	}
5628 
5629 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
5630 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
5631 		for (i = 0; i < EEPROM_SIZE; i++) {
5632 			if (wm_read_eeprom(sc, i, 1, &eeprom_data))
5633 				printf("XX ");
5634 			else
5635 				printf("%04x ", eeprom_data);
5636 			if (i % 8 == 7)
5637 				printf("\n");
5638 		}
5639 	}
5640 
5641 #endif /* WM_DEBUG */
5642 
5643 	for (i = 0; i < EEPROM_SIZE; i++) {
5644 		if (wm_read_eeprom(sc, i, 1, &eeprom_data))
5645 			return 1;
5646 		checksum += eeprom_data;
5647 	}
5648 
5649 	if (checksum != (uint16_t) NVM_CHECKSUM) {
5650 #ifdef WM_DEBUG
5651 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
5652 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
5653 #endif
5654 	}
5655 
5656 	return 0;
5657 }
5658 
5659 /*
5660  * wm_read_eeprom:
5661  *
5662  *	Read data from the serial EEPROM.
5663  */
5664 static int
5665 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
5666 {
5667 	int rv;
5668 
5669 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
5670 		return 1;
5671 
5672 	if (wm_acquire_eeprom(sc))
5673 		return 1;
5674 
5675 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5676 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5677 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
5678 		rv = wm_read_eeprom_ich8(sc, word, wordcnt, data);
5679 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
5680 		rv = wm_read_eeprom_eerd(sc, word, wordcnt, data);
5681 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
5682 		rv = wm_read_eeprom_spi(sc, word, wordcnt, data);
5683 	else
5684 		rv = wm_read_eeprom_uwire(sc, word, wordcnt, data);
5685 
5686 	wm_release_eeprom(sc);
5687 	return rv;
5688 }
5689 
5690 static int
5691 wm_read_eeprom_eerd(struct wm_softc *sc, int offset, int wordcnt,
5692     uint16_t *data)
5693 {
5694 	int i, eerd = 0;
5695 	int error = 0;
5696 
5697 	for (i = 0; i < wordcnt; i++) {
5698 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
5699 
5700 		CSR_WRITE(sc, WMREG_EERD, eerd);
5701 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
5702 		if (error != 0)
5703 			break;
5704 
5705 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
5706 	}
5707 
5708 	return error;
5709 }
5710 
5711 static int
5712 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
5713 {
5714 	uint32_t attempts = 100000;
5715 	uint32_t i, reg = 0;
5716 	int32_t done = -1;
5717 
5718 	for (i = 0; i < attempts; i++) {
5719 		reg = CSR_READ(sc, rw);
5720 
5721 		if (reg & EERD_DONE) {
5722 			done = 0;
5723 			break;
5724 		}
5725 		delay(5);
5726 	}
5727 
5728 	return done;
5729 }
5730 
5731 static int
5732 wm_check_alt_mac_addr(struct wm_softc *sc)
5733 {
5734 	uint16_t myea[ETHER_ADDR_LEN / 2];
5735 	uint16_t offset = EEPROM_OFF_MACADDR;
5736 
5737 	/* Try to read alternative MAC address pointer */
5738 	if (wm_read_eeprom(sc, EEPROM_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
5739 		return -1;
5740 
5741 	/* Check pointer */
5742 	if (offset == 0xffff)
5743 		return -1;
5744 
5745 	/*
5746 	 * Check whether alternative MAC address is valid or not.
5747 	 * Some cards have non 0xffff pointer but those don't use
5748 	 * alternative MAC address in reality.
5749 	 *
5750 	 * Check whether the broadcast bit is set or not.
5751 	 */
5752 	if (wm_read_eeprom(sc, offset, 1, myea) == 0)
5753 		if (((myea[0] & 0xff) & 0x01) == 0)
5754 			return 0; /* found! */
5755 
5756 	/* not found */
5757 	return -1;
5758 }
5759 
5760 static int
5761 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
5762 {
5763 	uint16_t myea[ETHER_ADDR_LEN / 2];
5764 	uint16_t offset = EEPROM_OFF_MACADDR;
5765 	int do_invert = 0;
5766 
5767 	switch (sc->sc_type) {
5768 	case WM_T_82580:
5769 	case WM_T_82580ER:
5770 	case WM_T_I350:
5771 	case WM_T_I354:
5772 		switch (sc->sc_funcid) {
5773 		case 0:
5774 			/* default value (== EEPROM_OFF_MACADDR) */
5775 			break;
5776 		case 1:
5777 			offset = EEPROM_OFF_LAN1;
5778 			break;
5779 		case 2:
5780 			offset = EEPROM_OFF_LAN2;
5781 			break;
5782 		case 3:
5783 			offset = EEPROM_OFF_LAN3;
5784 			break;
5785 		default:
5786 			goto bad;
5787 			/* NOTREACHED */
5788 			break;
5789 		}
5790 		break;
5791 	case WM_T_82571:
5792 	case WM_T_82575:
5793 	case WM_T_82576:
5794 	case WM_T_80003:
5795 	case WM_T_I210:
5796 	case WM_T_I211:
5797 		if (wm_check_alt_mac_addr(sc) != 0) {
5798 			/* reset the offset to LAN0 */
5799 			offset = EEPROM_OFF_MACADDR;
5800 			if ((sc->sc_funcid & 0x01) == 1)
5801 				do_invert = 1;
5802 			goto do_read;
5803 		}
5804 		switch (sc->sc_funcid) {
5805 		case 0:
5806 			/*
5807 			 * The offset is the value in EEPROM_ALT_MAC_ADDR_PTR
5808 			 * itself.
5809 			 */
5810 			break;
5811 		case 1:
5812 			offset += EEPROM_OFF_MACADDR_LAN1;
5813 			break;
5814 		case 2:
5815 			offset += EEPROM_OFF_MACADDR_LAN2;
5816 			break;
5817 		case 3:
5818 			offset += EEPROM_OFF_MACADDR_LAN3;
5819 			break;
5820 		default:
5821 			goto bad;
5822 			/* NOTREACHED */
5823 			break;
5824 		}
5825 		break;
5826 	default:
5827 		if ((sc->sc_funcid & 0x01) == 1)
5828 			do_invert = 1;
5829 		break;
5830 	}
5831 
5832  do_read:
5833 	if (wm_read_eeprom(sc, offset, sizeof(myea) / sizeof(myea[0]),
5834 		myea) != 0) {
5835 		goto bad;
5836 	}
5837 
5838 	enaddr[0] = myea[0] & 0xff;
5839 	enaddr[1] = myea[0] >> 8;
5840 	enaddr[2] = myea[1] & 0xff;
5841 	enaddr[3] = myea[1] >> 8;
5842 	enaddr[4] = myea[2] & 0xff;
5843 	enaddr[5] = myea[2] >> 8;
5844 
5845 	/*
5846 	 * Toggle the LSB of the MAC address on the second port
5847 	 * of some dual port cards.
5848 	 */
5849 	if (do_invert != 0)
5850 		enaddr[5] ^= 1;
5851 
5852 	return 0;
5853 
5854  bad:
5855 	aprint_error_dev(sc->sc_dev, "unable to read Ethernet address\n");
5856 
5857 	return -1;
5858 }
5859 
5860 /*
5861  * wm_add_rxbuf:
5862  *
5863  *	Add a receive buffer to the indiciated descriptor.
5864  */
5865 static int
5866 wm_add_rxbuf(struct wm_softc *sc, int idx)
5867 {
5868 	struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
5869 	struct mbuf *m;
5870 	int error;
5871 
5872 	KASSERT(WM_LOCKED(sc));
5873 
5874 	MGETHDR(m, M_DONTWAIT, MT_DATA);
5875 	if (m == NULL)
5876 		return ENOBUFS;
5877 
5878 	MCLGET(m, M_DONTWAIT);
5879 	if ((m->m_flags & M_EXT) == 0) {
5880 		m_freem(m);
5881 		return ENOBUFS;
5882 	}
5883 
5884 	if (rxs->rxs_mbuf != NULL)
5885 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
5886 
5887 	rxs->rxs_mbuf = m;
5888 
5889 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
5890 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
5891 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
5892 	if (error) {
5893 		/* XXX XXX XXX */
5894 		aprint_error_dev(sc->sc_dev,
5895 		    "unable to load rx DMA map %d, error = %d\n",
5896 		    idx, error);
5897 		panic("wm_add_rxbuf");
5898 	}
5899 
5900 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
5901 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
5902 
5903 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5904 		if ((sc->sc_rctl & RCTL_EN) != 0)
5905 			WM_INIT_RXDESC(sc, idx);
5906 	} else
5907 		WM_INIT_RXDESC(sc, idx);
5908 
5909 	return 0;
5910 }
5911 
5912 /*
5913  * wm_set_ral:
5914  *
5915  *	Set an entery in the receive address list.
5916  */
5917 static void
5918 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
5919 {
5920 	uint32_t ral_lo, ral_hi;
5921 
5922 	if (enaddr != NULL) {
5923 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
5924 		    (enaddr[3] << 24);
5925 		ral_hi = enaddr[4] | (enaddr[5] << 8);
5926 		ral_hi |= RAL_AV;
5927 	} else {
5928 		ral_lo = 0;
5929 		ral_hi = 0;
5930 	}
5931 
5932 	if (sc->sc_type >= WM_T_82544) {
5933 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
5934 		    ral_lo);
5935 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
5936 		    ral_hi);
5937 	} else {
5938 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
5939 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
5940 	}
5941 }
5942 
5943 /*
5944  * wm_mchash:
5945  *
5946  *	Compute the hash of the multicast address for the 4096-bit
5947  *	multicast filter.
5948  */
5949 static uint32_t
5950 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
5951 {
5952 	static const int lo_shift[4] = { 4, 3, 2, 0 };
5953 	static const int hi_shift[4] = { 4, 5, 6, 8 };
5954 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
5955 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
5956 	uint32_t hash;
5957 
5958 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5959 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5960 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
5961 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
5962 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
5963 		return (hash & 0x3ff);
5964 	}
5965 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
5966 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
5967 
5968 	return (hash & 0xfff);
5969 }
5970 
5971 /*
5972  * wm_set_filter:
5973  *
5974  *	Set up the receive filter.
5975  */
5976 static void
5977 wm_set_filter(struct wm_softc *sc)
5978 {
5979 	struct ethercom *ec = &sc->sc_ethercom;
5980 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5981 	struct ether_multi *enm;
5982 	struct ether_multistep step;
5983 	bus_addr_t mta_reg;
5984 	uint32_t hash, reg, bit;
5985 	int i, size;
5986 
5987 	if (sc->sc_type >= WM_T_82544)
5988 		mta_reg = WMREG_CORDOVA_MTA;
5989 	else
5990 		mta_reg = WMREG_MTA;
5991 
5992 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
5993 
5994 	if (ifp->if_flags & IFF_BROADCAST)
5995 		sc->sc_rctl |= RCTL_BAM;
5996 	if (ifp->if_flags & IFF_PROMISC) {
5997 		sc->sc_rctl |= RCTL_UPE;
5998 		goto allmulti;
5999 	}
6000 
6001 	/*
6002 	 * Set the station address in the first RAL slot, and
6003 	 * clear the remaining slots.
6004 	 */
6005 	if (sc->sc_type == WM_T_ICH8)
6006 		size = WM_RAL_TABSIZE_ICH8 -1;
6007 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
6008 	    || (sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
6009 	    || (sc->sc_type == WM_T_PCH_LPT))
6010 		size = WM_RAL_TABSIZE_ICH8;
6011 	else if (sc->sc_type == WM_T_82575)
6012 		size = WM_RAL_TABSIZE_82575;
6013 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
6014 		size = WM_RAL_TABSIZE_82576;
6015 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
6016 		size = WM_RAL_TABSIZE_I350;
6017 	else
6018 		size = WM_RAL_TABSIZE;
6019 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
6020 	for (i = 1; i < size; i++)
6021 		wm_set_ral(sc, NULL, i);
6022 
6023 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
6024 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
6025 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
6026 		size = WM_ICH8_MC_TABSIZE;
6027 	else
6028 		size = WM_MC_TABSIZE;
6029 	/* Clear out the multicast table. */
6030 	for (i = 0; i < size; i++)
6031 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
6032 
6033 	ETHER_FIRST_MULTI(step, ec, enm);
6034 	while (enm != NULL) {
6035 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
6036 			/*
6037 			 * We must listen to a range of multicast addresses.
6038 			 * For now, just accept all multicasts, rather than
6039 			 * trying to set only those filter bits needed to match
6040 			 * the range.  (At this time, the only use of address
6041 			 * ranges is for IP multicast routing, for which the
6042 			 * range is big enough to require all bits set.)
6043 			 */
6044 			goto allmulti;
6045 		}
6046 
6047 		hash = wm_mchash(sc, enm->enm_addrlo);
6048 
6049 		reg = (hash >> 5);
6050 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
6051 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
6052 		    || (sc->sc_type == WM_T_PCH2)
6053 		    || (sc->sc_type == WM_T_PCH_LPT))
6054 			reg &= 0x1f;
6055 		else
6056 			reg &= 0x7f;
6057 		bit = hash & 0x1f;
6058 
6059 		hash = CSR_READ(sc, mta_reg + (reg << 2));
6060 		hash |= 1U << bit;
6061 
6062 		/* XXX Hardware bug?? */
6063 		if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
6064 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
6065 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
6066 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
6067 		} else
6068 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
6069 
6070 		ETHER_NEXT_MULTI(step, enm);
6071 	}
6072 
6073 	ifp->if_flags &= ~IFF_ALLMULTI;
6074 	goto setit;
6075 
6076  allmulti:
6077 	ifp->if_flags |= IFF_ALLMULTI;
6078 	sc->sc_rctl |= RCTL_MPE;
6079 
6080  setit:
6081 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
6082 }
6083 
6084 /*
6085  * wm_tbi_mediainit:
6086  *
6087  *	Initialize media for use on 1000BASE-X devices.
6088  */
6089 static void
6090 wm_tbi_mediainit(struct wm_softc *sc)
6091 {
6092 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6093 	const char *sep = "";
6094 
6095 	if (sc->sc_type < WM_T_82543)
6096 		sc->sc_tipg = TIPG_WM_DFLT;
6097 	else
6098 		sc->sc_tipg = TIPG_LG_DFLT;
6099 
6100 	sc->sc_tbi_anegticks = 5;
6101 
6102 	/* Initialize our media structures */
6103 	sc->sc_mii.mii_ifp = ifp;
6104 
6105 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
6106 	ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
6107 	    wm_tbi_mediastatus);
6108 
6109 	/*
6110 	 * SWD Pins:
6111 	 *
6112 	 *	0 = Link LED (output)
6113 	 *	1 = Loss Of Signal (input)
6114 	 */
6115 	sc->sc_ctrl |= CTRL_SWDPIO(0);
6116 	sc->sc_ctrl &= ~CTRL_SWDPIO(1);
6117 
6118 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6119 
6120 #define	ADD(ss, mm, dd)							\
6121 do {									\
6122 	aprint_normal("%s%s", sep, ss);					\
6123 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL);	\
6124 	sep = ", ";							\
6125 } while (/*CONSTCOND*/0)
6126 
6127 	aprint_normal_dev(sc->sc_dev, "");
6128 	ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
6129 	ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
6130 	ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
6131 	aprint_normal("\n");
6132 
6133 #undef ADD
6134 
6135 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
6136 }
6137 
6138 /*
6139  * wm_tbi_mediastatus:	[ifmedia interface function]
6140  *
6141  *	Get the current interface media status on a 1000BASE-X device.
6142  */
6143 static void
6144 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
6145 {
6146 	struct wm_softc *sc = ifp->if_softc;
6147 	uint32_t ctrl, status;
6148 
6149 	ifmr->ifm_status = IFM_AVALID;
6150 	ifmr->ifm_active = IFM_ETHER;
6151 
6152 	status = CSR_READ(sc, WMREG_STATUS);
6153 	if ((status & STATUS_LU) == 0) {
6154 		ifmr->ifm_active |= IFM_NONE;
6155 		return;
6156 	}
6157 
6158 	ifmr->ifm_status |= IFM_ACTIVE;
6159 	ifmr->ifm_active |= IFM_1000_SX;
6160 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
6161 		ifmr->ifm_active |= IFM_FDX;
6162 	else
6163 		ifmr->ifm_active |= IFM_HDX;
6164 	ctrl = CSR_READ(sc, WMREG_CTRL);
6165 	if (ctrl & CTRL_RFCE)
6166 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
6167 	if (ctrl & CTRL_TFCE)
6168 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
6169 }
6170 
6171 /*
6172  * wm_tbi_mediachange:	[ifmedia interface function]
6173  *
6174  *	Set hardware to newly-selected media on a 1000BASE-X device.
6175  */
6176 static int
6177 wm_tbi_mediachange(struct ifnet *ifp)
6178 {
6179 	struct wm_softc *sc = ifp->if_softc;
6180 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
6181 	uint32_t status;
6182 	int i;
6183 
6184 	sc->sc_txcw = 0;
6185 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO ||
6186 	    (sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
6187 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
6188 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
6189 		sc->sc_txcw |= TXCW_ANE;
6190 	} else {
6191 		/*
6192 		 * If autonegotiation is turned off, force link up and turn on
6193 		 * full duplex
6194 		 */
6195 		sc->sc_txcw &= ~TXCW_ANE;
6196 		sc->sc_ctrl |= CTRL_SLU | CTRL_FD;
6197 		sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
6198 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6199 		CSR_WRITE_FLUSH(sc);
6200 		delay(1000);
6201 	}
6202 
6203 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
6204 		    device_xname(sc->sc_dev),sc->sc_txcw));
6205 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
6206 	CSR_WRITE_FLUSH(sc);
6207 	delay(10000);
6208 
6209 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
6210 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
6211 
6212 	/*
6213 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
6214 	 * optics detect a signal, 0 if they don't.
6215 	 */
6216 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
6217 		/* Have signal; wait for the link to come up. */
6218 
6219 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
6220 			/*
6221 			 * Reset the link, and let autonegotiation do its thing
6222 			 */
6223 			sc->sc_ctrl |= CTRL_LRST;
6224 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6225 			CSR_WRITE_FLUSH(sc);
6226 			delay(1000);
6227 			sc->sc_ctrl &= ~CTRL_LRST;
6228 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6229 			CSR_WRITE_FLUSH(sc);
6230 			delay(1000);
6231 		}
6232 
6233 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
6234 			delay(10000);
6235 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
6236 				break;
6237 		}
6238 
6239 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
6240 			    device_xname(sc->sc_dev),i));
6241 
6242 		status = CSR_READ(sc, WMREG_STATUS);
6243 		DPRINTF(WM_DEBUG_LINK,
6244 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
6245 			device_xname(sc->sc_dev),status, STATUS_LU));
6246 		if (status & STATUS_LU) {
6247 			/* Link is up. */
6248 			DPRINTF(WM_DEBUG_LINK,
6249 			    ("%s: LINK: set media -> link up %s\n",
6250 			    device_xname(sc->sc_dev),
6251 			    (status & STATUS_FD) ? "FDX" : "HDX"));
6252 
6253 			/*
6254 			 * NOTE: CTRL will update TFCE and RFCE automatically,
6255 			 * so we should update sc->sc_ctrl
6256 			 */
6257 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
6258 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
6259 			sc->sc_fcrtl &= ~FCRTL_XONE;
6260 			if (status & STATUS_FD)
6261 				sc->sc_tctl |=
6262 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
6263 			else
6264 				sc->sc_tctl |=
6265 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
6266 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
6267 				sc->sc_fcrtl |= FCRTL_XONE;
6268 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
6269 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
6270 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
6271 				      sc->sc_fcrtl);
6272 			sc->sc_tbi_linkup = 1;
6273 		} else {
6274 			if (i == WM_LINKUP_TIMEOUT)
6275 				wm_check_for_link(sc);
6276 			/* Link is down. */
6277 			DPRINTF(WM_DEBUG_LINK,
6278 			    ("%s: LINK: set media -> link down\n",
6279 			    device_xname(sc->sc_dev)));
6280 			sc->sc_tbi_linkup = 0;
6281 		}
6282 	} else {
6283 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
6284 		    device_xname(sc->sc_dev)));
6285 		sc->sc_tbi_linkup = 0;
6286 	}
6287 
6288 	wm_tbi_set_linkled(sc);
6289 
6290 	return 0;
6291 }
6292 
6293 /*
6294  * wm_tbi_set_linkled:
6295  *
6296  *	Update the link LED on 1000BASE-X devices.
6297  */
6298 static void
6299 wm_tbi_set_linkled(struct wm_softc *sc)
6300 {
6301 
6302 	if (sc->sc_tbi_linkup)
6303 		sc->sc_ctrl |= CTRL_SWDPIN(0);
6304 	else
6305 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
6306 
6307 	/* 82540 or newer devices are active low */
6308 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
6309 
6310 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6311 }
6312 
6313 /*
6314  * wm_tbi_check_link:
6315  *
6316  *	Check the link on 1000BASE-X devices.
6317  */
6318 static void
6319 wm_tbi_check_link(struct wm_softc *sc)
6320 {
6321 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6322 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
6323 	uint32_t status;
6324 
6325 	KASSERT(WM_LOCKED(sc));
6326 
6327 	status = CSR_READ(sc, WMREG_STATUS);
6328 
6329 	/* XXX is this needed? */
6330 	(void)CSR_READ(sc, WMREG_RXCW);
6331 	(void)CSR_READ(sc, WMREG_CTRL);
6332 
6333 	/* set link status */
6334 	if ((status & STATUS_LU) == 0) {
6335 		DPRINTF(WM_DEBUG_LINK,
6336 		    ("%s: LINK: checklink -> down\n", device_xname(sc->sc_dev)));
6337 		sc->sc_tbi_linkup = 0;
6338 	} else if (sc->sc_tbi_linkup == 0) {
6339 		DPRINTF(WM_DEBUG_LINK,
6340 		    ("%s: LINK: checklink -> up %s\n", device_xname(sc->sc_dev),
6341 		    (status & STATUS_FD) ? "FDX" : "HDX"));
6342 		sc->sc_tbi_linkup = 1;
6343 	}
6344 
6345 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP)
6346 	    && ((status & STATUS_LU) == 0)) {
6347 		sc->sc_tbi_linkup = 0;
6348 		if (sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg > 100) {
6349 			/* RXCFG storm! */
6350 			DPRINTF(WM_DEBUG_LINK, ("RXCFG storm! (%d)\n",
6351 				sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg));
6352 			wm_init_locked(ifp);
6353 			WM_UNLOCK(sc);
6354 			ifp->if_start(ifp);
6355 			WM_LOCK(sc);
6356 		} else if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
6357 			/* If the timer expired, retry autonegotiation */
6358 			if (++sc->sc_tbi_ticks >= sc->sc_tbi_anegticks) {
6359 				DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
6360 				sc->sc_tbi_ticks = 0;
6361 				/*
6362 				 * Reset the link, and let autonegotiation do
6363 				 * its thing
6364 				 */
6365 				sc->sc_ctrl |= CTRL_LRST;
6366 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6367 				CSR_WRITE_FLUSH(sc);
6368 				delay(1000);
6369 				sc->sc_ctrl &= ~CTRL_LRST;
6370 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6371 				CSR_WRITE_FLUSH(sc);
6372 				delay(1000);
6373 				CSR_WRITE(sc, WMREG_TXCW,
6374 				    sc->sc_txcw & ~TXCW_ANE);
6375 				CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
6376 			}
6377 		}
6378 	}
6379 
6380 	wm_tbi_set_linkled(sc);
6381 }
6382 
6383 /*
6384  * wm_gmii_reset:
6385  *
6386  *	Reset the PHY.
6387  */
6388 static void
6389 wm_gmii_reset(struct wm_softc *sc)
6390 {
6391 	uint32_t reg;
6392 	int rv;
6393 
6394 	/* get phy semaphore */
6395 	switch (sc->sc_type) {
6396 	case WM_T_82571:
6397 	case WM_T_82572:
6398 	case WM_T_82573:
6399 	case WM_T_82574:
6400 	case WM_T_82583:
6401 		 /* XXX should get sw semaphore, too */
6402 		rv = wm_get_swsm_semaphore(sc);
6403 		break;
6404 	case WM_T_82575:
6405 	case WM_T_82576:
6406 	case WM_T_82580:
6407 	case WM_T_82580ER:
6408 	case WM_T_I350:
6409 	case WM_T_I354:
6410 	case WM_T_I210:
6411 	case WM_T_I211:
6412 	case WM_T_80003:
6413 		rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6414 		break;
6415 	case WM_T_ICH8:
6416 	case WM_T_ICH9:
6417 	case WM_T_ICH10:
6418 	case WM_T_PCH:
6419 	case WM_T_PCH2:
6420 	case WM_T_PCH_LPT:
6421 		rv = wm_get_swfwhw_semaphore(sc);
6422 		break;
6423 	default:
6424 		/* nothing to do*/
6425 		rv = 0;
6426 		break;
6427 	}
6428 	if (rv != 0) {
6429 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6430 		    __func__);
6431 		return;
6432 	}
6433 
6434 	switch (sc->sc_type) {
6435 	case WM_T_82542_2_0:
6436 	case WM_T_82542_2_1:
6437 		/* null */
6438 		break;
6439 	case WM_T_82543:
6440 		/*
6441 		 * With 82543, we need to force speed and duplex on the MAC
6442 		 * equal to what the PHY speed and duplex configuration is.
6443 		 * In addition, we need to perform a hardware reset on the PHY
6444 		 * to take it out of reset.
6445 		 */
6446 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
6447 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6448 
6449 		/* The PHY reset pin is active-low. */
6450 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
6451 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
6452 		    CTRL_EXT_SWDPIN(4));
6453 		reg |= CTRL_EXT_SWDPIO(4);
6454 
6455 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
6456 		CSR_WRITE_FLUSH(sc);
6457 		delay(10*1000);
6458 
6459 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
6460 		CSR_WRITE_FLUSH(sc);
6461 		delay(150);
6462 #if 0
6463 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
6464 #endif
6465 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
6466 		break;
6467 	case WM_T_82544:	/* reset 10000us */
6468 	case WM_T_82540:
6469 	case WM_T_82545:
6470 	case WM_T_82545_3:
6471 	case WM_T_82546:
6472 	case WM_T_82546_3:
6473 	case WM_T_82541:
6474 	case WM_T_82541_2:
6475 	case WM_T_82547:
6476 	case WM_T_82547_2:
6477 	case WM_T_82571:	/* reset 100us */
6478 	case WM_T_82572:
6479 	case WM_T_82573:
6480 	case WM_T_82574:
6481 	case WM_T_82575:
6482 	case WM_T_82576:
6483 	case WM_T_82580:
6484 	case WM_T_82580ER:
6485 	case WM_T_I350:
6486 	case WM_T_I354:
6487 	case WM_T_I210:
6488 	case WM_T_I211:
6489 	case WM_T_82583:
6490 	case WM_T_80003:
6491 		/* generic reset */
6492 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
6493 		CSR_WRITE_FLUSH(sc);
6494 		delay(20000);
6495 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6496 		CSR_WRITE_FLUSH(sc);
6497 		delay(20000);
6498 
6499 		if ((sc->sc_type == WM_T_82541)
6500 		    || (sc->sc_type == WM_T_82541_2)
6501 		    || (sc->sc_type == WM_T_82547)
6502 		    || (sc->sc_type == WM_T_82547_2)) {
6503 			/* workaround for igp are done in igp_reset() */
6504 			/* XXX add code to set LED after phy reset */
6505 		}
6506 		break;
6507 	case WM_T_ICH8:
6508 	case WM_T_ICH9:
6509 	case WM_T_ICH10:
6510 	case WM_T_PCH:
6511 	case WM_T_PCH2:
6512 	case WM_T_PCH_LPT:
6513 		/* generic reset */
6514 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
6515 		CSR_WRITE_FLUSH(sc);
6516 		delay(100);
6517 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6518 		CSR_WRITE_FLUSH(sc);
6519 		delay(150);
6520 		break;
6521 	default:
6522 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
6523 		    __func__);
6524 		break;
6525 	}
6526 
6527 	/* release PHY semaphore */
6528 	switch (sc->sc_type) {
6529 	case WM_T_82571:
6530 	case WM_T_82572:
6531 	case WM_T_82573:
6532 	case WM_T_82574:
6533 	case WM_T_82583:
6534 		 /* XXX should put sw semaphore, too */
6535 		wm_put_swsm_semaphore(sc);
6536 		break;
6537 	case WM_T_82575:
6538 	case WM_T_82576:
6539 	case WM_T_82580:
6540 	case WM_T_82580ER:
6541 	case WM_T_I350:
6542 	case WM_T_I354:
6543 	case WM_T_I210:
6544 	case WM_T_I211:
6545 	case WM_T_80003:
6546 		wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6547 		break;
6548 	case WM_T_ICH8:
6549 	case WM_T_ICH9:
6550 	case WM_T_ICH10:
6551 	case WM_T_PCH:
6552 	case WM_T_PCH2:
6553 	case WM_T_PCH_LPT:
6554 		wm_put_swfwhw_semaphore(sc);
6555 		break;
6556 	default:
6557 		/* nothing to do*/
6558 		rv = 0;
6559 		break;
6560 	}
6561 
6562 	/* get_cfg_done */
6563 	wm_get_cfg_done(sc);
6564 
6565 	/* extra setup */
6566 	switch (sc->sc_type) {
6567 	case WM_T_82542_2_0:
6568 	case WM_T_82542_2_1:
6569 	case WM_T_82543:
6570 	case WM_T_82544:
6571 	case WM_T_82540:
6572 	case WM_T_82545:
6573 	case WM_T_82545_3:
6574 	case WM_T_82546:
6575 	case WM_T_82546_3:
6576 	case WM_T_82541_2:
6577 	case WM_T_82547_2:
6578 	case WM_T_82571:
6579 	case WM_T_82572:
6580 	case WM_T_82573:
6581 	case WM_T_82574:
6582 	case WM_T_82575:
6583 	case WM_T_82576:
6584 	case WM_T_82580:
6585 	case WM_T_82580ER:
6586 	case WM_T_I350:
6587 	case WM_T_I354:
6588 	case WM_T_I210:
6589 	case WM_T_I211:
6590 	case WM_T_82583:
6591 	case WM_T_80003:
6592 		/* null */
6593 		break;
6594 	case WM_T_82541:
6595 	case WM_T_82547:
6596 		/* XXX Configure actively LED after PHY reset */
6597 		break;
6598 	case WM_T_ICH8:
6599 	case WM_T_ICH9:
6600 	case WM_T_ICH10:
6601 	case WM_T_PCH:
6602 	case WM_T_PCH2:
6603 	case WM_T_PCH_LPT:
6604 		/* Allow time for h/w to get to a quiescent state afer reset */
6605 		delay(10*1000);
6606 
6607 		if (sc->sc_type == WM_T_PCH)
6608 			wm_hv_phy_workaround_ich8lan(sc);
6609 
6610 		if (sc->sc_type == WM_T_PCH2)
6611 			wm_lv_phy_workaround_ich8lan(sc);
6612 
6613 		if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
6614 			/*
6615 			 * dummy read to clear the phy wakeup bit after lcd
6616 			 * reset
6617 			 */
6618 			reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
6619 		}
6620 
6621 		/*
6622 		 * XXX Configure the LCD with th extended configuration region
6623 		 * in NVM
6624 		 */
6625 
6626 		/* Configure the LCD with the OEM bits in NVM */
6627 		if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
6628 		    || (sc->sc_type == WM_T_PCH_LPT)) {
6629 			/*
6630 			 * Disable LPLU.
6631 			 * XXX It seems that 82567 has LPLU, too.
6632 			 */
6633 			reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
6634 			reg &= ~(HV_OEM_BITS_A1KDIS| HV_OEM_BITS_LPLU);
6635 			reg |= HV_OEM_BITS_ANEGNOW;
6636 			wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
6637 		}
6638 		break;
6639 	default:
6640 		panic("%s: unknown type\n", __func__);
6641 		break;
6642 	}
6643 }
6644 
6645 /*
6646  * wm_get_phy_id_82575:
6647  *
6648  * Return PHY ID. Return -1 if it failed.
6649  */
6650 static int
6651 wm_get_phy_id_82575(struct wm_softc *sc)
6652 {
6653 	uint32_t reg;
6654 	int phyid = -1;
6655 
6656 	/* XXX */
6657 	if ((sc->sc_flags & WM_F_SGMII) == 0)
6658 		return -1;
6659 
6660 	if (wm_sgmii_uses_mdio(sc)) {
6661 		switch (sc->sc_type) {
6662 		case WM_T_82575:
6663 		case WM_T_82576:
6664 			reg = CSR_READ(sc, WMREG_MDIC);
6665 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
6666 			break;
6667 		case WM_T_82580:
6668 		case WM_T_I350:
6669 		case WM_T_I354:
6670 		case WM_T_I210:
6671 		case WM_T_I211:
6672 			reg = CSR_READ(sc, WMREG_MDICNFG);
6673 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
6674 			break;
6675 		default:
6676 			return -1;
6677 		}
6678 	}
6679 
6680 	return phyid;
6681 }
6682 
6683 
6684 /*
6685  * wm_gmii_mediainit:
6686  *
6687  *	Initialize media for use on 1000BASE-T devices.
6688  */
6689 static void
6690 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
6691 {
6692 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6693 	struct mii_data *mii = &sc->sc_mii;
6694 
6695 	/* We have MII. */
6696 	sc->sc_flags |= WM_F_HAS_MII;
6697 
6698 	if (sc->sc_type == WM_T_80003)
6699 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
6700 	else
6701 		sc->sc_tipg = TIPG_1000T_DFLT;
6702 
6703 	/*
6704 	 * Let the chip set speed/duplex on its own based on
6705 	 * signals from the PHY.
6706 	 * XXXbouyer - I'm not sure this is right for the 80003,
6707 	 * the em driver only sets CTRL_SLU here - but it seems to work.
6708 	 */
6709 	sc->sc_ctrl |= CTRL_SLU;
6710 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6711 
6712 	/* Initialize our media structures and probe the GMII. */
6713 	mii->mii_ifp = ifp;
6714 
6715 	/*
6716 	 * Determine the PHY access method.
6717 	 *
6718 	 *  For SGMII, use SGMII specific method.
6719 	 *
6720 	 *  For some devices, we can determine the PHY access method
6721 	 * from sc_type.
6722 	 *
6723 	 *  For ICH8 variants, it's difficult to detemine the PHY access
6724 	 * method by sc_type, so use the PCI product ID for some devices.
6725 	 * For other ICH8 variants, try to use igp's method. If the PHY
6726 	 * can't detect, then use bm's method.
6727 	 */
6728 	switch (prodid) {
6729 	case PCI_PRODUCT_INTEL_PCH_M_LM:
6730 	case PCI_PRODUCT_INTEL_PCH_M_LC:
6731 		/* 82577 */
6732 		sc->sc_phytype = WMPHY_82577;
6733 		mii->mii_readreg = wm_gmii_hv_readreg;
6734 		mii->mii_writereg = wm_gmii_hv_writereg;
6735 		break;
6736 	case PCI_PRODUCT_INTEL_PCH_D_DM:
6737 	case PCI_PRODUCT_INTEL_PCH_D_DC:
6738 		/* 82578 */
6739 		sc->sc_phytype = WMPHY_82578;
6740 		mii->mii_readreg = wm_gmii_hv_readreg;
6741 		mii->mii_writereg = wm_gmii_hv_writereg;
6742 		break;
6743 	case PCI_PRODUCT_INTEL_PCH2_LV_LM:
6744 	case PCI_PRODUCT_INTEL_PCH2_LV_V:
6745 		/* 82579 */
6746 		sc->sc_phytype = WMPHY_82579;
6747 		mii->mii_readreg = wm_gmii_hv_readreg;
6748 		mii->mii_writereg = wm_gmii_hv_writereg;
6749 		break;
6750 	case PCI_PRODUCT_INTEL_I217_LM:
6751 	case PCI_PRODUCT_INTEL_I217_V:
6752 	case PCI_PRODUCT_INTEL_I218_LM:
6753 	case PCI_PRODUCT_INTEL_I218_V:
6754 		/* I21[78] */
6755 		mii->mii_readreg = wm_gmii_hv_readreg;
6756 		mii->mii_writereg = wm_gmii_hv_writereg;
6757 		break;
6758 	case PCI_PRODUCT_INTEL_82801I_BM:
6759 	case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
6760 	case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
6761 	case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
6762 	case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
6763 	case PCI_PRODUCT_INTEL_82801J_R_BM_V:
6764 		/* 82567 */
6765 		sc->sc_phytype = WMPHY_BM;
6766 		mii->mii_readreg = wm_gmii_bm_readreg;
6767 		mii->mii_writereg = wm_gmii_bm_writereg;
6768 		break;
6769 	default:
6770 		if (((sc->sc_flags & WM_F_SGMII) != 0)
6771 		    && !wm_sgmii_uses_mdio(sc)){
6772 			mii->mii_readreg = wm_sgmii_readreg;
6773 			mii->mii_writereg = wm_sgmii_writereg;
6774 		} else if (sc->sc_type >= WM_T_80003) {
6775 			mii->mii_readreg = wm_gmii_i80003_readreg;
6776 			mii->mii_writereg = wm_gmii_i80003_writereg;
6777 		} else if (sc->sc_type >= WM_T_I210) {
6778 			mii->mii_readreg = wm_gmii_i82544_readreg;
6779 			mii->mii_writereg = wm_gmii_i82544_writereg;
6780 		} else if (sc->sc_type >= WM_T_82580) {
6781 			sc->sc_phytype = WMPHY_82580;
6782 			mii->mii_readreg = wm_gmii_82580_readreg;
6783 			mii->mii_writereg = wm_gmii_82580_writereg;
6784 		} else if (sc->sc_type >= WM_T_82544) {
6785 			mii->mii_readreg = wm_gmii_i82544_readreg;
6786 			mii->mii_writereg = wm_gmii_i82544_writereg;
6787 		} else {
6788 			mii->mii_readreg = wm_gmii_i82543_readreg;
6789 			mii->mii_writereg = wm_gmii_i82543_writereg;
6790 		}
6791 		break;
6792 	}
6793 	mii->mii_statchg = wm_gmii_statchg;
6794 
6795 	wm_gmii_reset(sc);
6796 
6797 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
6798 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
6799 	    wm_gmii_mediastatus);
6800 
6801 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
6802 	    || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
6803 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
6804 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
6805 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
6806 			/* Attach only one port */
6807 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
6808 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
6809 		} else {
6810 			int i, id;
6811 			uint32_t ctrl_ext;
6812 
6813 			id = wm_get_phy_id_82575(sc);
6814 			if (id != -1) {
6815 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
6816 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
6817 			}
6818 			if ((id == -1)
6819 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
6820 				/* Power on sgmii phy if it is disabled */
6821 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
6822 				CSR_WRITE(sc, WMREG_CTRL_EXT,
6823 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
6824 				CSR_WRITE_FLUSH(sc);
6825 				delay(300*1000); /* XXX too long */
6826 
6827 				/* from 1 to 8 */
6828 				for (i = 1; i < 8; i++)
6829 					mii_attach(sc->sc_dev, &sc->sc_mii,
6830 					    0xffffffff, i, MII_OFFSET_ANY,
6831 					    MIIF_DOPAUSE);
6832 
6833 				/* restore previous sfp cage power state */
6834 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
6835 			}
6836 		}
6837 	} else {
6838 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6839 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
6840 	}
6841 
6842 	/*
6843 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
6844 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
6845 	 */
6846 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
6847 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
6848 		wm_set_mdio_slow_mode_hv(sc);
6849 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6850 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
6851 	}
6852 
6853 	/*
6854 	 * (For ICH8 variants)
6855 	 * If PHY detection failed, use BM's r/w function and retry.
6856 	 */
6857 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
6858 		/* if failed, retry with *_bm_* */
6859 		mii->mii_readreg = wm_gmii_bm_readreg;
6860 		mii->mii_writereg = wm_gmii_bm_writereg;
6861 
6862 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6863 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
6864 	}
6865 
6866 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
6867 		/* Any PHY wasn't find */
6868 		ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
6869 		ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_NONE);
6870 		sc->sc_phytype = WMPHY_NONE;
6871 	} else {
6872 		/*
6873 		 * PHY Found!
6874 		 * Check PHY type.
6875 		 */
6876 		uint32_t model;
6877 		struct mii_softc *child;
6878 
6879 		child = LIST_FIRST(&mii->mii_phys);
6880 		if (device_is_a(child->mii_dev, "igphy")) {
6881 			struct igphy_softc *isc = (struct igphy_softc *)child;
6882 
6883 			model = isc->sc_mii.mii_mpd_model;
6884 			if (model == MII_MODEL_yyINTEL_I82566)
6885 				sc->sc_phytype = WMPHY_IGP_3;
6886 		}
6887 
6888 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
6889 	}
6890 }
6891 
6892 /*
6893  * wm_gmii_mediastatus:	[ifmedia interface function]
6894  *
6895  *	Get the current interface media status on a 1000BASE-T device.
6896  */
6897 static void
6898 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
6899 {
6900 	struct wm_softc *sc = ifp->if_softc;
6901 
6902 	ether_mediastatus(ifp, ifmr);
6903 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
6904 	    | sc->sc_flowflags;
6905 }
6906 
6907 /*
6908  * wm_gmii_mediachange:	[ifmedia interface function]
6909  *
6910  *	Set hardware to newly-selected media on a 1000BASE-T device.
6911  */
6912 static int
6913 wm_gmii_mediachange(struct ifnet *ifp)
6914 {
6915 	struct wm_softc *sc = ifp->if_softc;
6916 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
6917 	int rc;
6918 
6919 	if ((ifp->if_flags & IFF_UP) == 0)
6920 		return 0;
6921 
6922 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
6923 	sc->sc_ctrl |= CTRL_SLU;
6924 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
6925 	    || (sc->sc_type > WM_T_82543)) {
6926 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
6927 	} else {
6928 		sc->sc_ctrl &= ~CTRL_ASDE;
6929 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
6930 		if (ife->ifm_media & IFM_FDX)
6931 			sc->sc_ctrl |= CTRL_FD;
6932 		switch (IFM_SUBTYPE(ife->ifm_media)) {
6933 		case IFM_10_T:
6934 			sc->sc_ctrl |= CTRL_SPEED_10;
6935 			break;
6936 		case IFM_100_TX:
6937 			sc->sc_ctrl |= CTRL_SPEED_100;
6938 			break;
6939 		case IFM_1000_T:
6940 			sc->sc_ctrl |= CTRL_SPEED_1000;
6941 			break;
6942 		default:
6943 			panic("wm_gmii_mediachange: bad media 0x%x",
6944 			    ife->ifm_media);
6945 		}
6946 	}
6947 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6948 	if (sc->sc_type <= WM_T_82543)
6949 		wm_gmii_reset(sc);
6950 
6951 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
6952 		return 0;
6953 	return rc;
6954 }
6955 
6956 #define	MDI_IO		CTRL_SWDPIN(2)
6957 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
6958 #define	MDI_CLK		CTRL_SWDPIN(3)
6959 
6960 static void
6961 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
6962 {
6963 	uint32_t i, v;
6964 
6965 	v = CSR_READ(sc, WMREG_CTRL);
6966 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
6967 	v |= MDI_DIR | CTRL_SWDPIO(3);
6968 
6969 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
6970 		if (data & i)
6971 			v |= MDI_IO;
6972 		else
6973 			v &= ~MDI_IO;
6974 		CSR_WRITE(sc, WMREG_CTRL, v);
6975 		CSR_WRITE_FLUSH(sc);
6976 		delay(10);
6977 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6978 		CSR_WRITE_FLUSH(sc);
6979 		delay(10);
6980 		CSR_WRITE(sc, WMREG_CTRL, v);
6981 		CSR_WRITE_FLUSH(sc);
6982 		delay(10);
6983 	}
6984 }
6985 
6986 static uint32_t
6987 i82543_mii_recvbits(struct wm_softc *sc)
6988 {
6989 	uint32_t v, i, data = 0;
6990 
6991 	v = CSR_READ(sc, WMREG_CTRL);
6992 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
6993 	v |= CTRL_SWDPIO(3);
6994 
6995 	CSR_WRITE(sc, WMREG_CTRL, v);
6996 	CSR_WRITE_FLUSH(sc);
6997 	delay(10);
6998 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6999 	CSR_WRITE_FLUSH(sc);
7000 	delay(10);
7001 	CSR_WRITE(sc, WMREG_CTRL, v);
7002 	CSR_WRITE_FLUSH(sc);
7003 	delay(10);
7004 
7005 	for (i = 0; i < 16; i++) {
7006 		data <<= 1;
7007 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
7008 		CSR_WRITE_FLUSH(sc);
7009 		delay(10);
7010 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
7011 			data |= 1;
7012 		CSR_WRITE(sc, WMREG_CTRL, v);
7013 		CSR_WRITE_FLUSH(sc);
7014 		delay(10);
7015 	}
7016 
7017 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
7018 	CSR_WRITE_FLUSH(sc);
7019 	delay(10);
7020 	CSR_WRITE(sc, WMREG_CTRL, v);
7021 	CSR_WRITE_FLUSH(sc);
7022 	delay(10);
7023 
7024 	return data;
7025 }
7026 
7027 #undef MDI_IO
7028 #undef MDI_DIR
7029 #undef MDI_CLK
7030 
7031 /*
7032  * wm_gmii_i82543_readreg:	[mii interface function]
7033  *
7034  *	Read a PHY register on the GMII (i82543 version).
7035  */
7036 static int
7037 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
7038 {
7039 	struct wm_softc *sc = device_private(self);
7040 	int rv;
7041 
7042 	i82543_mii_sendbits(sc, 0xffffffffU, 32);
7043 	i82543_mii_sendbits(sc, reg | (phy << 5) |
7044 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
7045 	rv = i82543_mii_recvbits(sc) & 0xffff;
7046 
7047 	DPRINTF(WM_DEBUG_GMII,
7048 	    ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
7049 	    device_xname(sc->sc_dev), phy, reg, rv));
7050 
7051 	return rv;
7052 }
7053 
7054 /*
7055  * wm_gmii_i82543_writereg:	[mii interface function]
7056  *
7057  *	Write a PHY register on the GMII (i82543 version).
7058  */
7059 static void
7060 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
7061 {
7062 	struct wm_softc *sc = device_private(self);
7063 
7064 	i82543_mii_sendbits(sc, 0xffffffffU, 32);
7065 	i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
7066 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
7067 	    (MII_COMMAND_START << 30), 32);
7068 }
7069 
7070 /*
7071  * wm_gmii_i82544_readreg:	[mii interface function]
7072  *
7073  *	Read a PHY register on the GMII.
7074  */
7075 static int
7076 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
7077 {
7078 	struct wm_softc *sc = device_private(self);
7079 	uint32_t mdic = 0;
7080 	int i, rv;
7081 
7082 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
7083 	    MDIC_REGADD(reg));
7084 
7085 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
7086 		mdic = CSR_READ(sc, WMREG_MDIC);
7087 		if (mdic & MDIC_READY)
7088 			break;
7089 		delay(50);
7090 	}
7091 
7092 	if ((mdic & MDIC_READY) == 0) {
7093 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
7094 		    device_xname(sc->sc_dev), phy, reg);
7095 		rv = 0;
7096 	} else if (mdic & MDIC_E) {
7097 #if 0 /* This is normal if no PHY is present. */
7098 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
7099 		    device_xname(sc->sc_dev), phy, reg);
7100 #endif
7101 		rv = 0;
7102 	} else {
7103 		rv = MDIC_DATA(mdic);
7104 		if (rv == 0xffff)
7105 			rv = 0;
7106 	}
7107 
7108 	return rv;
7109 }
7110 
7111 /*
7112  * wm_gmii_i82544_writereg:	[mii interface function]
7113  *
7114  *	Write a PHY register on the GMII.
7115  */
7116 static void
7117 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
7118 {
7119 	struct wm_softc *sc = device_private(self);
7120 	uint32_t mdic = 0;
7121 	int i;
7122 
7123 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
7124 	    MDIC_REGADD(reg) | MDIC_DATA(val));
7125 
7126 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
7127 		mdic = CSR_READ(sc, WMREG_MDIC);
7128 		if (mdic & MDIC_READY)
7129 			break;
7130 		delay(50);
7131 	}
7132 
7133 	if ((mdic & MDIC_READY) == 0)
7134 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
7135 		    device_xname(sc->sc_dev), phy, reg);
7136 	else if (mdic & MDIC_E)
7137 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
7138 		    device_xname(sc->sc_dev), phy, reg);
7139 }
7140 
7141 /*
7142  * wm_gmii_i80003_readreg:	[mii interface function]
7143  *
7144  *	Read a PHY register on the kumeran
7145  * This could be handled by the PHY layer if we didn't have to lock the
7146  * ressource ...
7147  */
7148 static int
7149 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
7150 {
7151 	struct wm_softc *sc = device_private(self);
7152 	int sem;
7153 	int rv;
7154 
7155 	if (phy != 1) /* only one PHY on kumeran bus */
7156 		return 0;
7157 
7158 	sem = swfwphysem[sc->sc_funcid];
7159 	if (wm_get_swfw_semaphore(sc, sem)) {
7160 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7161 		    __func__);
7162 		return 0;
7163 	}
7164 
7165 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
7166 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
7167 		    reg >> GG82563_PAGE_SHIFT);
7168 	} else {
7169 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
7170 		    reg >> GG82563_PAGE_SHIFT);
7171 	}
7172 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
7173 	delay(200);
7174 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
7175 	delay(200);
7176 
7177 	wm_put_swfw_semaphore(sc, sem);
7178 	return rv;
7179 }
7180 
7181 /*
7182  * wm_gmii_i80003_writereg:	[mii interface function]
7183  *
7184  *	Write a PHY register on the kumeran.
7185  * This could be handled by the PHY layer if we didn't have to lock the
7186  * ressource ...
7187  */
7188 static void
7189 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
7190 {
7191 	struct wm_softc *sc = device_private(self);
7192 	int sem;
7193 
7194 	if (phy != 1) /* only one PHY on kumeran bus */
7195 		return;
7196 
7197 	sem = swfwphysem[sc->sc_funcid];
7198 	if (wm_get_swfw_semaphore(sc, sem)) {
7199 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7200 		    __func__);
7201 		return;
7202 	}
7203 
7204 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
7205 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
7206 		    reg >> GG82563_PAGE_SHIFT);
7207 	} else {
7208 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
7209 		    reg >> GG82563_PAGE_SHIFT);
7210 	}
7211 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
7212 	delay(200);
7213 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
7214 	delay(200);
7215 
7216 	wm_put_swfw_semaphore(sc, sem);
7217 }
7218 
7219 /*
7220  * wm_gmii_bm_readreg:	[mii interface function]
7221  *
7222  *	Read a PHY register on the kumeran
7223  * This could be handled by the PHY layer if we didn't have to lock the
7224  * ressource ...
7225  */
7226 static int
7227 wm_gmii_bm_readreg(device_t self, int phy, int reg)
7228 {
7229 	struct wm_softc *sc = device_private(self);
7230 	int sem;
7231 	int rv;
7232 
7233 	sem = swfwphysem[sc->sc_funcid];
7234 	if (wm_get_swfw_semaphore(sc, sem)) {
7235 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7236 		    __func__);
7237 		return 0;
7238 	}
7239 
7240 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
7241 		if (phy == 1)
7242 			wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
7243 			    reg);
7244 		else
7245 			wm_gmii_i82544_writereg(self, phy,
7246 			    GG82563_PHY_PAGE_SELECT,
7247 			    reg >> GG82563_PAGE_SHIFT);
7248 	}
7249 
7250 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
7251 	wm_put_swfw_semaphore(sc, sem);
7252 	return rv;
7253 }
7254 
7255 /*
7256  * wm_gmii_bm_writereg:	[mii interface function]
7257  *
7258  *	Write a PHY register on the kumeran.
7259  * This could be handled by the PHY layer if we didn't have to lock the
7260  * ressource ...
7261  */
7262 static void
7263 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
7264 {
7265 	struct wm_softc *sc = device_private(self);
7266 	int sem;
7267 
7268 	sem = swfwphysem[sc->sc_funcid];
7269 	if (wm_get_swfw_semaphore(sc, sem)) {
7270 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7271 		    __func__);
7272 		return;
7273 	}
7274 
7275 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
7276 		if (phy == 1)
7277 			wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
7278 			    reg);
7279 		else
7280 			wm_gmii_i82544_writereg(self, phy,
7281 			    GG82563_PHY_PAGE_SELECT,
7282 			    reg >> GG82563_PAGE_SHIFT);
7283 	}
7284 
7285 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
7286 	wm_put_swfw_semaphore(sc, sem);
7287 }
7288 
7289 static void
7290 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
7291 {
7292 	struct wm_softc *sc = device_private(self);
7293 	uint16_t regnum = BM_PHY_REG_NUM(offset);
7294 	uint16_t wuce;
7295 
7296 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
7297 	if (sc->sc_type == WM_T_PCH) {
7298 		/* XXX e1000 driver do nothing... why? */
7299 	}
7300 
7301 	/* Set page 769 */
7302 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7303 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
7304 
7305 	wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
7306 
7307 	wuce &= ~BM_WUC_HOST_WU_BIT;
7308 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
7309 	    wuce | BM_WUC_ENABLE_BIT);
7310 
7311 	/* Select page 800 */
7312 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7313 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
7314 
7315 	/* Write page 800 */
7316 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
7317 
7318 	if (rd)
7319 		*val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
7320 	else
7321 		wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
7322 
7323 	/* Set page 769 */
7324 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7325 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
7326 
7327 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
7328 }
7329 
7330 /*
7331  * wm_gmii_hv_readreg:	[mii interface function]
7332  *
7333  *	Read a PHY register on the kumeran
7334  * This could be handled by the PHY layer if we didn't have to lock the
7335  * ressource ...
7336  */
7337 static int
7338 wm_gmii_hv_readreg(device_t self, int phy, int reg)
7339 {
7340 	struct wm_softc *sc = device_private(self);
7341 	uint16_t page = BM_PHY_REG_PAGE(reg);
7342 	uint16_t regnum = BM_PHY_REG_NUM(reg);
7343 	uint16_t val;
7344 	int rv;
7345 
7346 	if (wm_get_swfwhw_semaphore(sc)) {
7347 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7348 		    __func__);
7349 		return 0;
7350 	}
7351 
7352 	/* XXX Workaround failure in MDIO access while cable is disconnected */
7353 	if (sc->sc_phytype == WMPHY_82577) {
7354 		/* XXX must write */
7355 	}
7356 
7357 	/* Page 800 works differently than the rest so it has its own func */
7358 	if (page == BM_WUC_PAGE) {
7359 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
7360 		return val;
7361 	}
7362 
7363 	/*
7364 	 * Lower than page 768 works differently than the rest so it has its
7365 	 * own func
7366 	 */
7367 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
7368 		printf("gmii_hv_readreg!!!\n");
7369 		return 0;
7370 	}
7371 
7372 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
7373 		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7374 		    page << BME1000_PAGE_SHIFT);
7375 	}
7376 
7377 	rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
7378 	wm_put_swfwhw_semaphore(sc);
7379 	return rv;
7380 }
7381 
7382 /*
7383  * wm_gmii_hv_writereg:	[mii interface function]
7384  *
7385  *	Write a PHY register on the kumeran.
7386  * This could be handled by the PHY layer if we didn't have to lock the
7387  * ressource ...
7388  */
7389 static void
7390 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
7391 {
7392 	struct wm_softc *sc = device_private(self);
7393 	uint16_t page = BM_PHY_REG_PAGE(reg);
7394 	uint16_t regnum = BM_PHY_REG_NUM(reg);
7395 
7396 	if (wm_get_swfwhw_semaphore(sc)) {
7397 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7398 		    __func__);
7399 		return;
7400 	}
7401 
7402 	/* XXX Workaround failure in MDIO access while cable is disconnected */
7403 
7404 	/* Page 800 works differently than the rest so it has its own func */
7405 	if (page == BM_WUC_PAGE) {
7406 		uint16_t tmp;
7407 
7408 		tmp = val;
7409 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
7410 		return;
7411 	}
7412 
7413 	/*
7414 	 * Lower than page 768 works differently than the rest so it has its
7415 	 * own func
7416 	 */
7417 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
7418 		printf("gmii_hv_writereg!!!\n");
7419 		return;
7420 	}
7421 
7422 	/*
7423 	 * XXX Workaround MDIO accesses being disabled after entering IEEE
7424 	 * Power Down (whenever bit 11 of the PHY control register is set)
7425 	 */
7426 
7427 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
7428 		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7429 		    page << BME1000_PAGE_SHIFT);
7430 	}
7431 
7432 	wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
7433 	wm_put_swfwhw_semaphore(sc);
7434 }
7435 
7436 /*
7437  * wm_sgmii_uses_mdio
7438  *
7439  * Check whether the transaction is to the internal PHY or the external
7440  * MDIO interface. Return true if it's MDIO.
7441  */
7442 static bool
7443 wm_sgmii_uses_mdio(struct wm_softc *sc)
7444 {
7445 	uint32_t reg;
7446 	bool ismdio = false;
7447 
7448 	switch (sc->sc_type) {
7449 	case WM_T_82575:
7450 	case WM_T_82576:
7451 		reg = CSR_READ(sc, WMREG_MDIC);
7452 		ismdio = ((reg & MDIC_DEST) != 0);
7453 		break;
7454 	case WM_T_82580:
7455 	case WM_T_82580ER:
7456 	case WM_T_I350:
7457 	case WM_T_I354:
7458 	case WM_T_I210:
7459 	case WM_T_I211:
7460 		reg = CSR_READ(sc, WMREG_MDICNFG);
7461 		ismdio = ((reg & MDICNFG_DEST) != 0);
7462 		break;
7463 	default:
7464 		break;
7465 	}
7466 
7467 	return ismdio;
7468 }
7469 
7470 /*
7471  * wm_sgmii_readreg:	[mii interface function]
7472  *
7473  *	Read a PHY register on the SGMII
7474  * This could be handled by the PHY layer if we didn't have to lock the
7475  * ressource ...
7476  */
7477 static int
7478 wm_sgmii_readreg(device_t self, int phy, int reg)
7479 {
7480 	struct wm_softc *sc = device_private(self);
7481 	uint32_t i2ccmd;
7482 	int i, rv;
7483 
7484 	if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
7485 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7486 		    __func__);
7487 		return 0;
7488 	}
7489 
7490 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
7491 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
7492 	    | I2CCMD_OPCODE_READ;
7493 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
7494 
7495 	/* Poll the ready bit */
7496 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
7497 		delay(50);
7498 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
7499 		if (i2ccmd & I2CCMD_READY)
7500 			break;
7501 	}
7502 	if ((i2ccmd & I2CCMD_READY) == 0)
7503 		aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
7504 	if ((i2ccmd & I2CCMD_ERROR) != 0)
7505 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
7506 
7507 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
7508 
7509 	wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
7510 	return rv;
7511 }
7512 
7513 /*
7514  * wm_sgmii_writereg:	[mii interface function]
7515  *
7516  *	Write a PHY register on the SGMII.
7517  * This could be handled by the PHY layer if we didn't have to lock the
7518  * ressource ...
7519  */
7520 static void
7521 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
7522 {
7523 	struct wm_softc *sc = device_private(self);
7524 	uint32_t i2ccmd;
7525 	int i;
7526 
7527 	if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
7528 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7529 		    __func__);
7530 		return;
7531 	}
7532 
7533 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
7534 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
7535 	    | I2CCMD_OPCODE_WRITE;
7536 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
7537 
7538 	/* Poll the ready bit */
7539 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
7540 		delay(50);
7541 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
7542 		if (i2ccmd & I2CCMD_READY)
7543 			break;
7544 	}
7545 	if ((i2ccmd & I2CCMD_READY) == 0)
7546 		aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
7547 	if ((i2ccmd & I2CCMD_ERROR) != 0)
7548 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
7549 
7550 	wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
7551 }
7552 
7553 /*
7554  * wm_gmii_82580_readreg:	[mii interface function]
7555  *
7556  *	Read a PHY register on the 82580 and I350.
7557  * This could be handled by the PHY layer if we didn't have to lock the
7558  * ressource ...
7559  */
7560 static int
7561 wm_gmii_82580_readreg(device_t self, int phy, int reg)
7562 {
7563 	struct wm_softc *sc = device_private(self);
7564 	int sem;
7565 	int rv;
7566 
7567 	sem = swfwphysem[sc->sc_funcid];
7568 	if (wm_get_swfw_semaphore(sc, sem)) {
7569 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7570 		    __func__);
7571 		return 0;
7572 	}
7573 
7574 	rv = wm_gmii_i82544_readreg(self, phy, reg);
7575 
7576 	wm_put_swfw_semaphore(sc, sem);
7577 	return rv;
7578 }
7579 
7580 /*
7581  * wm_gmii_82580_writereg:	[mii interface function]
7582  *
7583  *	Write a PHY register on the 82580 and I350.
7584  * This could be handled by the PHY layer if we didn't have to lock the
7585  * ressource ...
7586  */
7587 static void
7588 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
7589 {
7590 	struct wm_softc *sc = device_private(self);
7591 	int sem;
7592 
7593 	sem = swfwphysem[sc->sc_funcid];
7594 	if (wm_get_swfw_semaphore(sc, sem)) {
7595 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7596 		    __func__);
7597 		return;
7598 	}
7599 
7600 	wm_gmii_i82544_writereg(self, phy, reg, val);
7601 
7602 	wm_put_swfw_semaphore(sc, sem);
7603 }
7604 
7605 /*
7606  * wm_gmii_statchg:	[mii interface function]
7607  *
7608  *	Callback from MII layer when media changes.
7609  */
7610 static void
7611 wm_gmii_statchg(struct ifnet *ifp)
7612 {
7613 	struct wm_softc *sc = ifp->if_softc;
7614 	struct mii_data *mii = &sc->sc_mii;
7615 
7616 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
7617 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
7618 	sc->sc_fcrtl &= ~FCRTL_XONE;
7619 
7620 	/*
7621 	 * Get flow control negotiation result.
7622 	 */
7623 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
7624 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
7625 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
7626 		mii->mii_media_active &= ~IFM_ETH_FMASK;
7627 	}
7628 
7629 	if (sc->sc_flowflags & IFM_FLOW) {
7630 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
7631 			sc->sc_ctrl |= CTRL_TFCE;
7632 			sc->sc_fcrtl |= FCRTL_XONE;
7633 		}
7634 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
7635 			sc->sc_ctrl |= CTRL_RFCE;
7636 	}
7637 
7638 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
7639 		DPRINTF(WM_DEBUG_LINK,
7640 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
7641 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
7642 	} else {
7643 		DPRINTF(WM_DEBUG_LINK,
7644 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
7645 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
7646 	}
7647 
7648 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7649 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
7650 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
7651 						 : WMREG_FCRTL, sc->sc_fcrtl);
7652 	if (sc->sc_type == WM_T_80003) {
7653 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
7654 		case IFM_1000_T:
7655 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
7656 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
7657 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
7658 			break;
7659 		default:
7660 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
7661 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
7662 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
7663 			break;
7664 		}
7665 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
7666 	}
7667 }
7668 
7669 /*
7670  * wm_kmrn_readreg:
7671  *
7672  *	Read a kumeran register
7673  */
7674 static int
7675 wm_kmrn_readreg(struct wm_softc *sc, int reg)
7676 {
7677 	int rv;
7678 
7679 	if (sc->sc_flags == WM_F_SWFW_SYNC) {
7680 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
7681 			aprint_error_dev(sc->sc_dev,
7682 			    "%s: failed to get semaphore\n", __func__);
7683 			return 0;
7684 		}
7685 	} else if (sc->sc_flags == WM_F_SWFWHW_SYNC) {
7686 		if (wm_get_swfwhw_semaphore(sc)) {
7687 			aprint_error_dev(sc->sc_dev,
7688 			    "%s: failed to get semaphore\n", __func__);
7689 			return 0;
7690 		}
7691 	}
7692 
7693 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
7694 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
7695 	    KUMCTRLSTA_REN);
7696 	CSR_WRITE_FLUSH(sc);
7697 	delay(2);
7698 
7699 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
7700 
7701 	if (sc->sc_flags == WM_F_SWFW_SYNC)
7702 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
7703 	else if (sc->sc_flags == WM_F_SWFWHW_SYNC)
7704 		wm_put_swfwhw_semaphore(sc);
7705 
7706 	return rv;
7707 }
7708 
7709 /*
7710  * wm_kmrn_writereg:
7711  *
7712  *	Write a kumeran register
7713  */
7714 static void
7715 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
7716 {
7717 
7718 	if (sc->sc_flags == WM_F_SWFW_SYNC) {
7719 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
7720 			aprint_error_dev(sc->sc_dev,
7721 			    "%s: failed to get semaphore\n", __func__);
7722 			return;
7723 		}
7724 	} else if (sc->sc_flags == WM_F_SWFWHW_SYNC) {
7725 		if (wm_get_swfwhw_semaphore(sc)) {
7726 			aprint_error_dev(sc->sc_dev,
7727 			    "%s: failed to get semaphore\n", __func__);
7728 			return;
7729 		}
7730 	}
7731 
7732 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
7733 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
7734 	    (val & KUMCTRLSTA_MASK));
7735 
7736 	if (sc->sc_flags == WM_F_SWFW_SYNC)
7737 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
7738 	else if (sc->sc_flags == WM_F_SWFWHW_SYNC)
7739 		wm_put_swfwhw_semaphore(sc);
7740 }
7741 
7742 static int
7743 wm_is_onboard_nvm_eeprom(struct wm_softc *sc)
7744 {
7745 	uint32_t eecd = 0;
7746 
7747 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
7748 	    || sc->sc_type == WM_T_82583) {
7749 		eecd = CSR_READ(sc, WMREG_EECD);
7750 
7751 		/* Isolate bits 15 & 16 */
7752 		eecd = ((eecd >> 15) & 0x03);
7753 
7754 		/* If both bits are set, device is Flash type */
7755 		if (eecd == 0x03)
7756 			return 0;
7757 	}
7758 	return 1;
7759 }
7760 
7761 static int
7762 wm_get_swsm_semaphore(struct wm_softc *sc)
7763 {
7764 	int32_t timeout;
7765 	uint32_t swsm;
7766 
7767 	/* Get the SW semaphore. */
7768 	timeout = 1000 + 1; /* XXX */
7769 	while (timeout) {
7770 		swsm = CSR_READ(sc, WMREG_SWSM);
7771 
7772 		if ((swsm & SWSM_SMBI) == 0)
7773 			break;
7774 
7775 		delay(50);
7776 		timeout--;
7777 	}
7778 
7779 	if (timeout == 0) {
7780 		aprint_error_dev(sc->sc_dev, "could not acquire SWSM SMBI\n");
7781 		return 1;
7782 	}
7783 
7784 	/* Get the FW semaphore. */
7785 	timeout = 1000 + 1; /* XXX */
7786 	while (timeout) {
7787 		swsm = CSR_READ(sc, WMREG_SWSM);
7788 		swsm |= SWSM_SWESMBI;
7789 		CSR_WRITE(sc, WMREG_SWSM, swsm);
7790 		/* if we managed to set the bit we got the semaphore. */
7791 		swsm = CSR_READ(sc, WMREG_SWSM);
7792 		if (swsm & SWSM_SWESMBI)
7793 			break;
7794 
7795 		delay(50);
7796 		timeout--;
7797 	}
7798 
7799 	if (timeout == 0) {
7800 		aprint_error_dev(sc->sc_dev, "could not acquire SWSM SWESMBI\n");
7801 		/* Release semaphores */
7802 		wm_put_swsm_semaphore(sc);
7803 		return 1;
7804 	}
7805 	return 0;
7806 }
7807 
7808 static void
7809 wm_put_swsm_semaphore(struct wm_softc *sc)
7810 {
7811 	uint32_t swsm;
7812 
7813 	swsm = CSR_READ(sc, WMREG_SWSM);
7814 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
7815 	CSR_WRITE(sc, WMREG_SWSM, swsm);
7816 }
7817 
7818 static int
7819 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
7820 {
7821 	uint32_t swfw_sync;
7822 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
7823 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
7824 	int timeout = 200;
7825 
7826 	for (timeout = 0; timeout < 200; timeout++) {
7827 		if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
7828 			if (wm_get_swsm_semaphore(sc)) {
7829 				aprint_error_dev(sc->sc_dev,
7830 				    "%s: failed to get semaphore\n",
7831 				    __func__);
7832 				return 1;
7833 			}
7834 		}
7835 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
7836 		if ((swfw_sync & (swmask | fwmask)) == 0) {
7837 			swfw_sync |= swmask;
7838 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
7839 			if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
7840 				wm_put_swsm_semaphore(sc);
7841 			return 0;
7842 		}
7843 		if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
7844 			wm_put_swsm_semaphore(sc);
7845 		delay(5000);
7846 	}
7847 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
7848 	    device_xname(sc->sc_dev), mask, swfw_sync);
7849 	return 1;
7850 }
7851 
7852 static void
7853 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
7854 {
7855 	uint32_t swfw_sync;
7856 
7857 	if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
7858 		while (wm_get_swsm_semaphore(sc) != 0)
7859 			continue;
7860 	}
7861 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
7862 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
7863 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
7864 	if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
7865 		wm_put_swsm_semaphore(sc);
7866 }
7867 
7868 static int
7869 wm_get_swfwhw_semaphore(struct wm_softc *sc)
7870 {
7871 	uint32_t ext_ctrl;
7872 	int timeout = 200;
7873 
7874 	for (timeout = 0; timeout < 200; timeout++) {
7875 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
7876 		ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
7877 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
7878 
7879 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
7880 		if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
7881 			return 0;
7882 		delay(5000);
7883 	}
7884 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
7885 	    device_xname(sc->sc_dev), ext_ctrl);
7886 	return 1;
7887 }
7888 
7889 static void
7890 wm_put_swfwhw_semaphore(struct wm_softc *sc)
7891 {
7892 	uint32_t ext_ctrl;
7893 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
7894 	ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
7895 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
7896 }
7897 
7898 static int
7899 wm_get_hw_semaphore_82573(struct wm_softc *sc)
7900 {
7901 	int i = 0;
7902 	uint32_t reg;
7903 
7904 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
7905 	do {
7906 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
7907 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
7908 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
7909 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
7910 			break;
7911 		delay(2*1000);
7912 		i++;
7913 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
7914 
7915 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
7916 		wm_put_hw_semaphore_82573(sc);
7917 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
7918 		    device_xname(sc->sc_dev));
7919 		return -1;
7920 	}
7921 
7922 	return 0;
7923 }
7924 
7925 static void
7926 wm_put_hw_semaphore_82573(struct wm_softc *sc)
7927 {
7928 	uint32_t reg;
7929 
7930 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
7931 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
7932 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
7933 }
7934 
7935 static int
7936 wm_valid_nvm_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
7937 {
7938 	uint32_t eecd;
7939 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
7940 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
7941 	uint8_t sig_byte = 0;
7942 
7943 	switch (sc->sc_type) {
7944 	case WM_T_ICH8:
7945 	case WM_T_ICH9:
7946 		eecd = CSR_READ(sc, WMREG_EECD);
7947 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
7948 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
7949 			return 0;
7950 		}
7951 		/* FALLTHROUGH */
7952 	default:
7953 		/* Default to 0 */
7954 		*bank = 0;
7955 
7956 		/* Check bank 0 */
7957 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
7958 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
7959 			*bank = 0;
7960 			return 0;
7961 		}
7962 
7963 		/* Check bank 1 */
7964 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
7965 		    &sig_byte);
7966 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
7967 			*bank = 1;
7968 			return 0;
7969 		}
7970 	}
7971 
7972 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
7973 		device_xname(sc->sc_dev)));
7974 	return -1;
7975 }
7976 
7977 /******************************************************************************
7978  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
7979  * register.
7980  *
7981  * sc - Struct containing variables accessed by shared code
7982  * offset - offset of word in the EEPROM to read
7983  * data - word read from the EEPROM
7984  * words - number of words to read
7985  *****************************************************************************/
7986 static int
7987 wm_read_eeprom_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
7988 {
7989 	int32_t  error = 0;
7990 	uint32_t flash_bank = 0;
7991 	uint32_t act_offset = 0;
7992 	uint32_t bank_offset = 0;
7993 	uint16_t word = 0;
7994 	uint16_t i = 0;
7995 
7996 	/* We need to know which is the valid flash bank.  In the event
7997 	 * that we didn't allocate eeprom_shadow_ram, we may not be
7998 	 * managing flash_bank.  So it cannot be trusted and needs
7999 	 * to be updated with each read.
8000 	 */
8001 	error = wm_valid_nvm_bank_detect_ich8lan(sc, &flash_bank);
8002 	if (error) {
8003 		aprint_error_dev(sc->sc_dev, "%s: failed to detect NVM bank\n",
8004 		    __func__);
8005 		flash_bank = 0;
8006 	}
8007 
8008 	/*
8009 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
8010 	 * size
8011 	 */
8012 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
8013 
8014 	error = wm_get_swfwhw_semaphore(sc);
8015 	if (error) {
8016 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8017 		    __func__);
8018 		return error;
8019 	}
8020 
8021 	for (i = 0; i < words; i++) {
8022 		/* The NVM part needs a byte offset, hence * 2 */
8023 		act_offset = bank_offset + ((offset + i) * 2);
8024 		error = wm_read_ich8_word(sc, act_offset, &word);
8025 		if (error) {
8026 			aprint_error_dev(sc->sc_dev,
8027 			    "%s: failed to read NVM\n", __func__);
8028 			break;
8029 		}
8030 		data[i] = word;
8031 	}
8032 
8033 	wm_put_swfwhw_semaphore(sc);
8034 	return error;
8035 }
8036 
8037 /******************************************************************************
8038  * This function does initial flash setup so that a new read/write/erase cycle
8039  * can be started.
8040  *
8041  * sc - The pointer to the hw structure
8042  ****************************************************************************/
8043 static int32_t
8044 wm_ich8_cycle_init(struct wm_softc *sc)
8045 {
8046 	uint16_t hsfsts;
8047 	int32_t error = 1;
8048 	int32_t i     = 0;
8049 
8050 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
8051 
8052 	/* May be check the Flash Des Valid bit in Hw status */
8053 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
8054 		return error;
8055 	}
8056 
8057 	/* Clear FCERR in Hw status by writing 1 */
8058 	/* Clear DAEL in Hw status by writing a 1 */
8059 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
8060 
8061 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
8062 
8063 	/*
8064 	 * Either we should have a hardware SPI cycle in progress bit to check
8065 	 * against, in order to start a new cycle or FDONE bit should be
8066 	 * changed in the hardware so that it is 1 after harware reset, which
8067 	 * can then be used as an indication whether a cycle is in progress or
8068 	 * has been completed .. we should also have some software semaphore
8069 	 * mechanism to guard FDONE or the cycle in progress bit so that two
8070 	 * threads access to those bits can be sequentiallized or a way so that
8071 	 * 2 threads dont start the cycle at the same time
8072 	 */
8073 
8074 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
8075 		/*
8076 		 * There is no cycle running at present, so we can start a
8077 		 * cycle
8078 		 */
8079 
8080 		/* Begin by setting Flash Cycle Done. */
8081 		hsfsts |= HSFSTS_DONE;
8082 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
8083 		error = 0;
8084 	} else {
8085 		/*
8086 		 * otherwise poll for sometime so the current cycle has a
8087 		 * chance to end before giving up.
8088 		 */
8089 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
8090 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
8091 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
8092 				error = 0;
8093 				break;
8094 			}
8095 			delay(1);
8096 		}
8097 		if (error == 0) {
8098 			/*
8099 			 * Successful in waiting for previous cycle to timeout,
8100 			 * now set the Flash Cycle Done.
8101 			 */
8102 			hsfsts |= HSFSTS_DONE;
8103 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
8104 		}
8105 	}
8106 	return error;
8107 }
8108 
8109 /******************************************************************************
8110  * This function starts a flash cycle and waits for its completion
8111  *
8112  * sc - The pointer to the hw structure
8113  ****************************************************************************/
8114 static int32_t
8115 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
8116 {
8117 	uint16_t hsflctl;
8118 	uint16_t hsfsts;
8119 	int32_t error = 1;
8120 	uint32_t i = 0;
8121 
8122 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
8123 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
8124 	hsflctl |= HSFCTL_GO;
8125 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
8126 
8127 	/* wait till FDONE bit is set to 1 */
8128 	do {
8129 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
8130 		if (hsfsts & HSFSTS_DONE)
8131 			break;
8132 		delay(1);
8133 		i++;
8134 	} while (i < timeout);
8135 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
8136 		error = 0;
8137 
8138 	return error;
8139 }
8140 
8141 /******************************************************************************
8142  * Reads a byte or word from the NVM using the ICH8 flash access registers.
8143  *
8144  * sc - The pointer to the hw structure
8145  * index - The index of the byte or word to read.
8146  * size - Size of data to read, 1=byte 2=word
8147  * data - Pointer to the word to store the value read.
8148  *****************************************************************************/
8149 static int32_t
8150 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
8151     uint32_t size, uint16_t* data)
8152 {
8153 	uint16_t hsfsts;
8154 	uint16_t hsflctl;
8155 	uint32_t flash_linear_address;
8156 	uint32_t flash_data = 0;
8157 	int32_t error = 1;
8158 	int32_t count = 0;
8159 
8160 	if (size < 1  || size > 2 || data == 0x0 ||
8161 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
8162 		return error;
8163 
8164 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
8165 	    sc->sc_ich8_flash_base;
8166 
8167 	do {
8168 		delay(1);
8169 		/* Steps */
8170 		error = wm_ich8_cycle_init(sc);
8171 		if (error)
8172 			break;
8173 
8174 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
8175 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
8176 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
8177 		    & HSFCTL_BCOUNT_MASK;
8178 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
8179 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
8180 
8181 		/*
8182 		 * Write the last 24 bits of index into Flash Linear address
8183 		 * field in Flash Address
8184 		 */
8185 		/* TODO: TBD maybe check the index against the size of flash */
8186 
8187 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
8188 
8189 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
8190 
8191 		/*
8192 		 * Check if FCERR is set to 1, if set to 1, clear it and try
8193 		 * the whole sequence a few more times, else read in (shift in)
8194 		 * the Flash Data0, the order is least significant byte first
8195 		 * msb to lsb
8196 		 */
8197 		if (error == 0) {
8198 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
8199 			if (size == 1)
8200 				*data = (uint8_t)(flash_data & 0x000000FF);
8201 			else if (size == 2)
8202 				*data = (uint16_t)(flash_data & 0x0000FFFF);
8203 			break;
8204 		} else {
8205 			/*
8206 			 * If we've gotten here, then things are probably
8207 			 * completely hosed, but if the error condition is
8208 			 * detected, it won't hurt to give it another try...
8209 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
8210 			 */
8211 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
8212 			if (hsfsts & HSFSTS_ERR) {
8213 				/* Repeat for some time before giving up. */
8214 				continue;
8215 			} else if ((hsfsts & HSFSTS_DONE) == 0)
8216 				break;
8217 		}
8218 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
8219 
8220 	return error;
8221 }
8222 
8223 /******************************************************************************
8224  * Reads a single byte from the NVM using the ICH8 flash access registers.
8225  *
8226  * sc - pointer to wm_hw structure
8227  * index - The index of the byte to read.
8228  * data - Pointer to a byte to store the value read.
8229  *****************************************************************************/
8230 static int32_t
8231 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
8232 {
8233 	int32_t status;
8234 	uint16_t word = 0;
8235 
8236 	status = wm_read_ich8_data(sc, index, 1, &word);
8237 	if (status == 0)
8238 		*data = (uint8_t)word;
8239 	else
8240 		*data = 0;
8241 
8242 	return status;
8243 }
8244 
8245 /******************************************************************************
8246  * Reads a word from the NVM using the ICH8 flash access registers.
8247  *
8248  * sc - pointer to wm_hw structure
8249  * index - The starting byte index of the word to read.
8250  * data - Pointer to a word to store the value read.
8251  *****************************************************************************/
8252 static int32_t
8253 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
8254 {
8255 	int32_t status;
8256 
8257 	status = wm_read_ich8_data(sc, index, 2, data);
8258 	return status;
8259 }
8260 
8261 static int
8262 wm_check_mng_mode(struct wm_softc *sc)
8263 {
8264 	int rv;
8265 
8266 	switch (sc->sc_type) {
8267 	case WM_T_ICH8:
8268 	case WM_T_ICH9:
8269 	case WM_T_ICH10:
8270 	case WM_T_PCH:
8271 	case WM_T_PCH2:
8272 	case WM_T_PCH_LPT:
8273 		rv = wm_check_mng_mode_ich8lan(sc);
8274 		break;
8275 	case WM_T_82574:
8276 	case WM_T_82583:
8277 		rv = wm_check_mng_mode_82574(sc);
8278 		break;
8279 	case WM_T_82571:
8280 	case WM_T_82572:
8281 	case WM_T_82573:
8282 	case WM_T_80003:
8283 		rv = wm_check_mng_mode_generic(sc);
8284 		break;
8285 	default:
8286 		/* noting to do */
8287 		rv = 0;
8288 		break;
8289 	}
8290 
8291 	return rv;
8292 }
8293 
8294 static int
8295 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
8296 {
8297 	uint32_t fwsm;
8298 
8299 	fwsm = CSR_READ(sc, WMREG_FWSM);
8300 
8301 	if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
8302 		return 1;
8303 
8304 	return 0;
8305 }
8306 
8307 static int
8308 wm_check_mng_mode_82574(struct wm_softc *sc)
8309 {
8310 	uint16_t data;
8311 
8312 	wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &data);
8313 
8314 	if ((data & EEPROM_CFG2_MNGM_MASK) != 0)
8315 		return 1;
8316 
8317 	return 0;
8318 }
8319 
8320 static int
8321 wm_check_mng_mode_generic(struct wm_softc *sc)
8322 {
8323 	uint32_t fwsm;
8324 
8325 	fwsm = CSR_READ(sc, WMREG_FWSM);
8326 
8327 	if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
8328 		return 1;
8329 
8330 	return 0;
8331 }
8332 
8333 static int
8334 wm_enable_mng_pass_thru(struct wm_softc *sc)
8335 {
8336 	uint32_t manc, fwsm, factps;
8337 
8338 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
8339 		return 0;
8340 
8341 	manc = CSR_READ(sc, WMREG_MANC);
8342 
8343 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
8344 		device_xname(sc->sc_dev), manc));
8345 	if ((manc & MANC_RECV_TCO_EN) == 0)
8346 		return 0;
8347 
8348 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
8349 		fwsm = CSR_READ(sc, WMREG_FWSM);
8350 		factps = CSR_READ(sc, WMREG_FACTPS);
8351 		if (((factps & FACTPS_MNGCG) == 0)
8352 		    && ((fwsm & FWSM_MODE_MASK)
8353 			== (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT)))
8354 			return 1;
8355 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
8356 		uint16_t data;
8357 
8358 		factps = CSR_READ(sc, WMREG_FACTPS);
8359 		wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &data);
8360 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
8361 			device_xname(sc->sc_dev), factps, data));
8362 		if (((factps & FACTPS_MNGCG) == 0)
8363 		    && ((data & EEPROM_CFG2_MNGM_MASK)
8364 			== (EEPROM_CFG2_MNGM_PT << EEPROM_CFG2_MNGM_SHIFT)))
8365 			return 1;
8366 	} else if (((manc & MANC_SMBUS_EN) != 0)
8367 	    && ((manc & MANC_ASF_EN) == 0))
8368 		return 1;
8369 
8370 	return 0;
8371 }
8372 
8373 static int
8374 wm_check_reset_block(struct wm_softc *sc)
8375 {
8376 	uint32_t reg;
8377 
8378 	switch (sc->sc_type) {
8379 	case WM_T_ICH8:
8380 	case WM_T_ICH9:
8381 	case WM_T_ICH10:
8382 	case WM_T_PCH:
8383 	case WM_T_PCH2:
8384 	case WM_T_PCH_LPT:
8385 		reg = CSR_READ(sc, WMREG_FWSM);
8386 		if ((reg & FWSM_RSPCIPHY) != 0)
8387 			return 0;
8388 		else
8389 			return -1;
8390 		break;
8391 	case WM_T_82571:
8392 	case WM_T_82572:
8393 	case WM_T_82573:
8394 	case WM_T_82574:
8395 	case WM_T_82583:
8396 	case WM_T_80003:
8397 		reg = CSR_READ(sc, WMREG_MANC);
8398 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
8399 			return -1;
8400 		else
8401 			return 0;
8402 		break;
8403 	default:
8404 		/* no problem */
8405 		break;
8406 	}
8407 
8408 	return 0;
8409 }
8410 
8411 static void
8412 wm_get_hw_control(struct wm_softc *sc)
8413 {
8414 	uint32_t reg;
8415 
8416 	switch (sc->sc_type) {
8417 	case WM_T_82573:
8418 		reg = CSR_READ(sc, WMREG_SWSM);
8419 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
8420 		break;
8421 	case WM_T_82571:
8422 	case WM_T_82572:
8423 	case WM_T_82574:
8424 	case WM_T_82583:
8425 	case WM_T_80003:
8426 	case WM_T_ICH8:
8427 	case WM_T_ICH9:
8428 	case WM_T_ICH10:
8429 	case WM_T_PCH:
8430 	case WM_T_PCH2:
8431 	case WM_T_PCH_LPT:
8432 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
8433 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
8434 		break;
8435 	default:
8436 		break;
8437 	}
8438 }
8439 
8440 static void
8441 wm_release_hw_control(struct wm_softc *sc)
8442 {
8443 	uint32_t reg;
8444 
8445 	if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
8446 		return;
8447 
8448 	if (sc->sc_type == WM_T_82573) {
8449 		reg = CSR_READ(sc, WMREG_SWSM);
8450 		reg &= ~SWSM_DRV_LOAD;
8451 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
8452 	} else {
8453 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
8454 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
8455 	}
8456 }
8457 
8458 /* XXX Currently TBI only */
8459 static int
8460 wm_check_for_link(struct wm_softc *sc)
8461 {
8462 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
8463 	uint32_t rxcw;
8464 	uint32_t ctrl;
8465 	uint32_t status;
8466 	uint32_t sig;
8467 
8468 	rxcw = CSR_READ(sc, WMREG_RXCW);
8469 	ctrl = CSR_READ(sc, WMREG_CTRL);
8470 	status = CSR_READ(sc, WMREG_STATUS);
8471 
8472 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
8473 
8474 	DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
8475 		device_xname(sc->sc_dev), __func__,
8476 		((ctrl & CTRL_SWDPIN(1)) == sig),
8477 		((status & STATUS_LU) != 0),
8478 		((rxcw & RXCW_C) != 0)
8479 		    ));
8480 
8481 	/*
8482 	 * SWDPIN   LU RXCW
8483 	 *      0    0    0
8484 	 *      0    0    1	(should not happen)
8485 	 *      0    1    0	(should not happen)
8486 	 *      0    1    1	(should not happen)
8487 	 *      1    0    0	Disable autonego and force linkup
8488 	 *      1    0    1	got /C/ but not linkup yet
8489 	 *      1    1    0	(linkup)
8490 	 *      1    1    1	If IFM_AUTO, back to autonego
8491 	 *
8492 	 */
8493 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
8494 	    && ((status & STATUS_LU) == 0)
8495 	    && ((rxcw & RXCW_C) == 0)) {
8496 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
8497 			__func__));
8498 		sc->sc_tbi_linkup = 0;
8499 		/* Disable auto-negotiation in the TXCW register */
8500 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
8501 
8502 		/*
8503 		 * Force link-up and also force full-duplex.
8504 		 *
8505 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
8506 		 * so we should update sc->sc_ctrl
8507 		 */
8508 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
8509 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8510 	} else if (((status & STATUS_LU) != 0)
8511 	    && ((rxcw & RXCW_C) != 0)
8512 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
8513 		sc->sc_tbi_linkup = 1;
8514 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
8515 			__func__));
8516 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
8517 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
8518 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
8519 	    && ((rxcw & RXCW_C) != 0)) {
8520 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
8521 	} else {
8522 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
8523 			status));
8524 	}
8525 
8526 	return 0;
8527 }
8528 
8529 /* Work-around for 82566 Kumeran PCS lock loss */
8530 static void
8531 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
8532 {
8533 	int miistatus, active, i;
8534 	int reg;
8535 
8536 	miistatus = sc->sc_mii.mii_media_status;
8537 
8538 	/* If the link is not up, do nothing */
8539 	if ((miistatus & IFM_ACTIVE) != 0)
8540 		return;
8541 
8542 	active = sc->sc_mii.mii_media_active;
8543 
8544 	/* Nothing to do if the link is other than 1Gbps */
8545 	if (IFM_SUBTYPE(active) != IFM_1000_T)
8546 		return;
8547 
8548 	for (i = 0; i < 10; i++) {
8549 		/* read twice */
8550 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
8551 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
8552 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) != 0)
8553 			goto out;	/* GOOD! */
8554 
8555 		/* Reset the PHY */
8556 		wm_gmii_reset(sc);
8557 		delay(5*1000);
8558 	}
8559 
8560 	/* Disable GigE link negotiation */
8561 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
8562 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
8563 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
8564 
8565 	/*
8566 	 * Call gig speed drop workaround on Gig disable before accessing
8567 	 * any PHY registers.
8568 	 */
8569 	wm_gig_downshift_workaround_ich8lan(sc);
8570 
8571 out:
8572 	return;
8573 }
8574 
8575 /* WOL from S5 stops working */
8576 static void
8577 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
8578 {
8579 	uint16_t kmrn_reg;
8580 
8581 	/* Only for igp3 */
8582 	if (sc->sc_phytype == WMPHY_IGP_3) {
8583 		kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
8584 		kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
8585 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
8586 		kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
8587 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
8588 	}
8589 }
8590 
8591 #ifdef WM_WOL
8592 /* Power down workaround on D3 */
8593 static void
8594 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
8595 {
8596 	uint32_t reg;
8597 	int i;
8598 
8599 	for (i = 0; i < 2; i++) {
8600 		/* Disable link */
8601 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
8602 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
8603 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
8604 
8605 		/*
8606 		 * Call gig speed drop workaround on Gig disable before
8607 		 * accessing any PHY registers
8608 		 */
8609 		if (sc->sc_type == WM_T_ICH8)
8610 			wm_gig_downshift_workaround_ich8lan(sc);
8611 
8612 		/* Write VR power-down enable */
8613 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
8614 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
8615 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
8616 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
8617 
8618 		/* Read it back and test */
8619 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
8620 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
8621 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
8622 			break;
8623 
8624 		/* Issue PHY reset and repeat at most one more time */
8625 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
8626 	}
8627 }
8628 #endif /* WM_WOL */
8629 
8630 /*
8631  * Workaround for pch's PHYs
8632  * XXX should be moved to new PHY driver?
8633  */
8634 static void
8635 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
8636 {
8637 	if (sc->sc_phytype == WMPHY_82577)
8638 		wm_set_mdio_slow_mode_hv(sc);
8639 
8640 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
8641 
8642 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
8643 
8644 	/* 82578 */
8645 	if (sc->sc_phytype == WMPHY_82578) {
8646 		/* PCH rev. < 3 */
8647 		if (sc->sc_rev < 3) {
8648 			/* XXX 6 bit shift? Why? Is it page2? */
8649 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
8650 			    0x66c0);
8651 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
8652 			    0xffff);
8653 		}
8654 
8655 		/* XXX phy rev. < 2 */
8656 	}
8657 
8658 	/* Select page 0 */
8659 
8660 	/* XXX acquire semaphore */
8661 	wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
8662 	/* XXX release semaphore */
8663 
8664 	/*
8665 	 * Configure the K1 Si workaround during phy reset assuming there is
8666 	 * link so that it disables K1 if link is in 1Gbps.
8667 	 */
8668 	wm_k1_gig_workaround_hv(sc, 1);
8669 }
8670 
8671 static void
8672 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
8673 {
8674 
8675 	wm_set_mdio_slow_mode_hv(sc);
8676 }
8677 
8678 static void
8679 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
8680 {
8681 	int k1_enable = sc->sc_nvm_k1_enabled;
8682 
8683 	/* XXX acquire semaphore */
8684 
8685 	if (link) {
8686 		k1_enable = 0;
8687 
8688 		/* Link stall fix for link up */
8689 		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
8690 	} else {
8691 		/* Link stall fix for link down */
8692 		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
8693 	}
8694 
8695 	wm_configure_k1_ich8lan(sc, k1_enable);
8696 
8697 	/* XXX release semaphore */
8698 }
8699 
8700 static void
8701 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
8702 {
8703 	uint32_t reg;
8704 
8705 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
8706 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
8707 	    reg | HV_KMRN_MDIO_SLOW);
8708 }
8709 
8710 static void
8711 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
8712 {
8713 	uint32_t ctrl, ctrl_ext, tmp;
8714 	uint16_t kmrn_reg;
8715 
8716 	kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
8717 
8718 	if (k1_enable)
8719 		kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
8720 	else
8721 		kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
8722 
8723 	wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
8724 
8725 	delay(20);
8726 
8727 	ctrl = CSR_READ(sc, WMREG_CTRL);
8728 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
8729 
8730 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
8731 	tmp |= CTRL_FRCSPD;
8732 
8733 	CSR_WRITE(sc, WMREG_CTRL, tmp);
8734 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
8735 	CSR_WRITE_FLUSH(sc);
8736 	delay(20);
8737 
8738 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
8739 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
8740 	CSR_WRITE_FLUSH(sc);
8741 	delay(20);
8742 }
8743 
8744 static void
8745 wm_smbustopci(struct wm_softc *sc)
8746 {
8747 	uint32_t fwsm;
8748 
8749 	fwsm = CSR_READ(sc, WMREG_FWSM);
8750 	if (((fwsm & FWSM_FW_VALID) == 0)
8751 	    && ((wm_check_reset_block(sc) == 0))) {
8752 		sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
8753 		sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
8754 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8755 		CSR_WRITE_FLUSH(sc);
8756 		delay(10);
8757 		sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
8758 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8759 		CSR_WRITE_FLUSH(sc);
8760 		delay(50*1000);
8761 
8762 		/*
8763 		 * Gate automatic PHY configuration by hardware on non-managed
8764 		 * 82579
8765 		 */
8766 		if (sc->sc_type == WM_T_PCH2)
8767 			wm_gate_hw_phy_config_ich8lan(sc, 1);
8768 	}
8769 }
8770 
8771 static void
8772 wm_set_pcie_completion_timeout(struct wm_softc *sc)
8773 {
8774 	uint32_t gcr;
8775 	pcireg_t ctrl2;
8776 
8777 	gcr = CSR_READ(sc, WMREG_GCR);
8778 
8779 	/* Only take action if timeout value is defaulted to 0 */
8780 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
8781 		goto out;
8782 
8783 	if ((gcr & GCR_CAP_VER2) == 0) {
8784 		gcr |= GCR_CMPL_TMOUT_10MS;
8785 		goto out;
8786 	}
8787 
8788 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
8789 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
8790 	ctrl2 |= WM_PCIE_DCSR2_16MS;
8791 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
8792 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
8793 
8794 out:
8795 	/* Disable completion timeout resend */
8796 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
8797 
8798 	CSR_WRITE(sc, WMREG_GCR, gcr);
8799 }
8800 
8801 /* special case - for 82575 - need to do manual init ... */
8802 static void
8803 wm_reset_init_script_82575(struct wm_softc *sc)
8804 {
8805 	/*
8806 	 * remark: this is untested code - we have no board without EEPROM
8807 	 *  same setup as mentioned int the freeBSD driver for the i82575
8808 	 */
8809 
8810 	/* SerDes configuration via SERDESCTRL */
8811 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
8812 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
8813 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
8814 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
8815 
8816 	/* CCM configuration via CCMCTL register */
8817 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
8818 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
8819 
8820 	/* PCIe lanes configuration */
8821 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
8822 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
8823 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
8824 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
8825 
8826 	/* PCIe PLL Configuration */
8827 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
8828 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
8829 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
8830 }
8831 
8832 static void
8833 wm_init_manageability(struct wm_softc *sc)
8834 {
8835 
8836 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
8837 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
8838 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
8839 
8840 		/* disabl hardware interception of ARP */
8841 		manc &= ~MANC_ARP_EN;
8842 
8843 		/* enable receiving management packets to the host */
8844 		if (sc->sc_type >= WM_T_82571) {
8845 			manc |= MANC_EN_MNG2HOST;
8846 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
8847 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
8848 
8849 		}
8850 
8851 		CSR_WRITE(sc, WMREG_MANC, manc);
8852 	}
8853 }
8854 
8855 static void
8856 wm_release_manageability(struct wm_softc *sc)
8857 {
8858 
8859 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
8860 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
8861 
8862 		manc |= MANC_ARP_EN;
8863 		if (sc->sc_type >= WM_T_82571)
8864 			manc &= ~MANC_EN_MNG2HOST;
8865 
8866 		CSR_WRITE(sc, WMREG_MANC, manc);
8867 	}
8868 }
8869 
8870 static void
8871 wm_get_wakeup(struct wm_softc *sc)
8872 {
8873 
8874 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
8875 	switch (sc->sc_type) {
8876 	case WM_T_82573:
8877 	case WM_T_82583:
8878 		sc->sc_flags |= WM_F_HAS_AMT;
8879 		/* FALLTHROUGH */
8880 	case WM_T_80003:
8881 	case WM_T_82541:
8882 	case WM_T_82547:
8883 	case WM_T_82571:
8884 	case WM_T_82572:
8885 	case WM_T_82574:
8886 	case WM_T_82575:
8887 	case WM_T_82576:
8888 	case WM_T_82580:
8889 	case WM_T_82580ER:
8890 	case WM_T_I350:
8891 	case WM_T_I354:
8892 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE_MASK) != 0)
8893 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
8894 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
8895 		break;
8896 	case WM_T_ICH8:
8897 	case WM_T_ICH9:
8898 	case WM_T_ICH10:
8899 	case WM_T_PCH:
8900 	case WM_T_PCH2:
8901 	case WM_T_PCH_LPT:
8902 		sc->sc_flags |= WM_F_HAS_AMT;
8903 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
8904 		break;
8905 	default:
8906 		break;
8907 	}
8908 
8909 	/* 1: HAS_MANAGE */
8910 	if (wm_enable_mng_pass_thru(sc) != 0)
8911 		sc->sc_flags |= WM_F_HAS_MANAGE;
8912 
8913 #ifdef WM_DEBUG
8914 	printf("\n");
8915 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
8916 		printf("HAS_AMT,");
8917 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
8918 		printf("ARC_SUBSYS_VALID,");
8919 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
8920 		printf("ASF_FIRMWARE_PRES,");
8921 	if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
8922 		printf("HAS_MANAGE,");
8923 	printf("\n");
8924 #endif
8925 	/*
8926 	 * Note that the WOL flags is set after the resetting of the eeprom
8927 	 * stuff
8928 	 */
8929 }
8930 
8931 #ifdef WM_WOL
8932 /* WOL in the newer chipset interfaces (pchlan) */
8933 static void
8934 wm_enable_phy_wakeup(struct wm_softc *sc)
8935 {
8936 #if 0
8937 	uint16_t preg;
8938 
8939 	/* Copy MAC RARs to PHY RARs */
8940 
8941 	/* Copy MAC MTA to PHY MTA */
8942 
8943 	/* Configure PHY Rx Control register */
8944 
8945 	/* Enable PHY wakeup in MAC register */
8946 
8947 	/* Configure and enable PHY wakeup in PHY registers */
8948 
8949 	/* Activate PHY wakeup */
8950 
8951 	/* XXX */
8952 #endif
8953 }
8954 
8955 static void
8956 wm_enable_wakeup(struct wm_softc *sc)
8957 {
8958 	uint32_t reg, pmreg;
8959 	pcireg_t pmode;
8960 
8961 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
8962 		&pmreg, NULL) == 0)
8963 		return;
8964 
8965 	/* Advertise the wakeup capability */
8966 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
8967 	    | CTRL_SWDPIN(3));
8968 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
8969 
8970 	/* ICH workaround */
8971 	switch (sc->sc_type) {
8972 	case WM_T_ICH8:
8973 	case WM_T_ICH9:
8974 	case WM_T_ICH10:
8975 	case WM_T_PCH:
8976 	case WM_T_PCH2:
8977 	case WM_T_PCH_LPT:
8978 		/* Disable gig during WOL */
8979 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
8980 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
8981 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
8982 		if (sc->sc_type == WM_T_PCH)
8983 			wm_gmii_reset(sc);
8984 
8985 		/* Power down workaround */
8986 		if (sc->sc_phytype == WMPHY_82577) {
8987 			struct mii_softc *child;
8988 
8989 			/* Assume that the PHY is copper */
8990 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
8991 			if (child->mii_mpd_rev <= 2)
8992 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
8993 				    (768 << 5) | 25, 0x0444); /* magic num */
8994 		}
8995 		break;
8996 	default:
8997 		break;
8998 	}
8999 
9000 	/* Keep the laser running on fiber adapters */
9001 	if (((sc->sc_wmp->wmp_flags & WMP_F_1000X) != 0)
9002 	    || (sc->sc_wmp->wmp_flags & WMP_F_SERDES) != 0) {
9003 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
9004 		reg |= CTRL_EXT_SWDPIN(3);
9005 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
9006 	}
9007 
9008 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
9009 #if 0	/* for the multicast packet */
9010 	reg |= WUFC_MC;
9011 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
9012 #endif
9013 
9014 	if (sc->sc_type == WM_T_PCH) {
9015 		wm_enable_phy_wakeup(sc);
9016 	} else {
9017 		CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
9018 		CSR_WRITE(sc, WMREG_WUFC, reg);
9019 	}
9020 
9021 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
9022 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
9023 		|| (sc->sc_type == WM_T_PCH2))
9024 		    && (sc->sc_phytype == WMPHY_IGP_3))
9025 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
9026 
9027 	/* Request PME */
9028 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
9029 #if 0
9030 	/* Disable WOL */
9031 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
9032 #else
9033 	/* For WOL */
9034 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
9035 #endif
9036 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
9037 }
9038 #endif /* WM_WOL */
9039 
9040 static bool
9041 wm_suspend(device_t self, const pmf_qual_t *qual)
9042 {
9043 	struct wm_softc *sc = device_private(self);
9044 
9045 	wm_release_manageability(sc);
9046 	wm_release_hw_control(sc);
9047 #ifdef WM_WOL
9048 	wm_enable_wakeup(sc);
9049 #endif
9050 
9051 	return true;
9052 }
9053 
9054 static bool
9055 wm_resume(device_t self, const pmf_qual_t *qual)
9056 {
9057 	struct wm_softc *sc = device_private(self);
9058 
9059 	wm_init_manageability(sc);
9060 
9061 	return true;
9062 }
9063 
9064 static void
9065 wm_set_eee_i350(struct wm_softc * sc)
9066 {
9067 	uint32_t ipcnfg, eeer;
9068 
9069 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
9070 	eeer = CSR_READ(sc, WMREG_EEER);
9071 
9072 	if ((sc->sc_flags & WM_F_EEE) != 0) {
9073 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
9074 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
9075 		    | EEER_LPI_FC);
9076 	} else {
9077 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
9078 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
9079 		    | EEER_LPI_FC);
9080 	}
9081 
9082 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
9083 	CSR_WRITE(sc, WMREG_EEER, eeer);
9084 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
9085 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
9086 }
9087