xref: /netbsd-src/sys/dev/pci/if_wm.c (revision 1b9578b8c2c1f848eeb16dabbfd7d1f0d9fdefbd)
1 /*	$NetBSD: if_wm.c,v 1.223 2011/07/01 07:45:39 matt Exp $	*/
2 
3 /*
4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed for the NetBSD Project by
20  *	Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 /*******************************************************************************
39 
40   Copyright (c) 2001-2005, Intel Corporation
41   All rights reserved.
42 
43   Redistribution and use in source and binary forms, with or without
44   modification, are permitted provided that the following conditions are met:
45 
46    1. Redistributions of source code must retain the above copyright notice,
47       this list of conditions and the following disclaimer.
48 
49    2. Redistributions in binary form must reproduce the above copyright
50       notice, this list of conditions and the following disclaimer in the
51       documentation and/or other materials provided with the distribution.
52 
53    3. Neither the name of the Intel Corporation nor the names of its
54       contributors may be used to endorse or promote products derived from
55       this software without specific prior written permission.
56 
57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67   POSSIBILITY OF SUCH DAMAGE.
68 
69 *******************************************************************************/
70 /*
71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72  *
73  * TODO (in order of importance):
74  *
75  *	- Rework how parameters are loaded from the EEPROM.
76  */
77 
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.223 2011/07/01 07:45:39 matt Exp $");
80 
81 #include "rnd.h"
82 
83 #include <sys/param.h>
84 #include <sys/systm.h>
85 #include <sys/callout.h>
86 #include <sys/mbuf.h>
87 #include <sys/malloc.h>
88 #include <sys/kernel.h>
89 #include <sys/socket.h>
90 #include <sys/ioctl.h>
91 #include <sys/errno.h>
92 #include <sys/device.h>
93 #include <sys/queue.h>
94 #include <sys/syslog.h>
95 
96 #if NRND > 0
97 #include <sys/rnd.h>
98 #endif
99 
100 #include <net/if.h>
101 #include <net/if_dl.h>
102 #include <net/if_media.h>
103 #include <net/if_ether.h>
104 
105 #include <net/bpf.h>
106 
107 #include <netinet/in.h>			/* XXX for struct ip */
108 #include <netinet/in_systm.h>		/* XXX for struct ip */
109 #include <netinet/ip.h>			/* XXX for struct ip */
110 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
111 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
112 
113 #include <sys/bus.h>
114 #include <sys/intr.h>
115 #include <machine/endian.h>
116 
117 #include <dev/mii/mii.h>
118 #include <dev/mii/miivar.h>
119 #include <dev/mii/miidevs.h>
120 #include <dev/mii/mii_bitbang.h>
121 #include <dev/mii/ikphyreg.h>
122 #include <dev/mii/igphyreg.h>
123 #include <dev/mii/igphyvar.h>
124 #include <dev/mii/inbmphyreg.h>
125 
126 #include <dev/pci/pcireg.h>
127 #include <dev/pci/pcivar.h>
128 #include <dev/pci/pcidevs.h>
129 
130 #include <dev/pci/if_wmreg.h>
131 #include <dev/pci/if_wmvar.h>
132 
133 #ifdef WM_DEBUG
134 #define	WM_DEBUG_LINK		0x01
135 #define	WM_DEBUG_TX		0x02
136 #define	WM_DEBUG_RX		0x04
137 #define	WM_DEBUG_GMII		0x08
138 #define	WM_DEBUG_MANAGE		0x10
139 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
140     | WM_DEBUG_MANAGE;
141 
142 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
143 #else
144 #define	DPRINTF(x, y)	/* nothing */
145 #endif /* WM_DEBUG */
146 
147 /*
148  * Transmit descriptor list size.  Due to errata, we can only have
149  * 256 hardware descriptors in the ring on < 82544, but we use 4096
150  * on >= 82544.  We tell the upper layers that they can queue a lot
151  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
152  * of them at a time.
153  *
154  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
155  * chains containing many small mbufs have been observed in zero-copy
156  * situations with jumbo frames.
157  */
158 #define	WM_NTXSEGS		256
159 #define	WM_IFQUEUELEN		256
160 #define	WM_TXQUEUELEN_MAX	64
161 #define	WM_TXQUEUELEN_MAX_82547	16
162 #define	WM_TXQUEUELEN(sc)	((sc)->sc_txnum)
163 #define	WM_TXQUEUELEN_MASK(sc)	(WM_TXQUEUELEN(sc) - 1)
164 #define	WM_TXQUEUE_GC(sc)	(WM_TXQUEUELEN(sc) / 8)
165 #define	WM_NTXDESC_82542	256
166 #define	WM_NTXDESC_82544	4096
167 #define	WM_NTXDESC(sc)		((sc)->sc_ntxdesc)
168 #define	WM_NTXDESC_MASK(sc)	(WM_NTXDESC(sc) - 1)
169 #define	WM_TXDESCSIZE(sc)	(WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
170 #define	WM_NEXTTX(sc, x)	(((x) + 1) & WM_NTXDESC_MASK(sc))
171 #define	WM_NEXTTXS(sc, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(sc))
172 
173 #define	WM_MAXTXDMA		round_page(IP_MAXPACKET) /* for TSO */
174 
175 /*
176  * Receive descriptor list size.  We have one Rx buffer for normal
177  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
178  * packet.  We allocate 256 receive descriptors, each with a 2k
179  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
180  */
181 #define	WM_NRXDESC		256
182 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
183 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
184 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
185 
186 /*
187  * Control structures are DMA'd to the i82542 chip.  We allocate them in
188  * a single clump that maps to a single DMA segment to make several things
189  * easier.
190  */
191 struct wm_control_data_82544 {
192 	/*
193 	 * The receive descriptors.
194 	 */
195 	wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
196 
197 	/*
198 	 * The transmit descriptors.  Put these at the end, because
199 	 * we might use a smaller number of them.
200 	 */
201 	wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82544];
202 };
203 
204 struct wm_control_data_82542 {
205 	wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
206 	wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
207 };
208 
209 #define	WM_CDOFF(x)	offsetof(struct wm_control_data_82544, x)
210 #define	WM_CDTXOFF(x)	WM_CDOFF(wcd_txdescs[(x)])
211 #define	WM_CDRXOFF(x)	WM_CDOFF(wcd_rxdescs[(x)])
212 
213 /*
214  * Software state for transmit jobs.
215  */
216 struct wm_txsoft {
217 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
218 	bus_dmamap_t txs_dmamap;	/* our DMA map */
219 	int txs_firstdesc;		/* first descriptor in packet */
220 	int txs_lastdesc;		/* last descriptor in packet */
221 	int txs_ndesc;			/* # of descriptors used */
222 };
223 
224 /*
225  * Software state for receive buffers.  Each descriptor gets a
226  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
227  * more than one buffer, we chain them together.
228  */
229 struct wm_rxsoft {
230 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
231 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
232 };
233 
234 #define WM_LINKUP_TIMEOUT	50
235 
236 static uint16_t swfwphysem[] = {
237 	SWFW_PHY0_SM,
238 	SWFW_PHY1_SM,
239 	SWFW_PHY2_SM,
240 	SWFW_PHY3_SM
241 };
242 
243 /*
244  * Software state per device.
245  */
246 struct wm_softc {
247 	device_t sc_dev;		/* generic device information */
248 	bus_space_tag_t sc_st;		/* bus space tag */
249 	bus_space_handle_t sc_sh;	/* bus space handle */
250 	bus_size_t sc_ss;		/* bus space size */
251 	bus_space_tag_t sc_iot;		/* I/O space tag */
252 	bus_space_handle_t sc_ioh;	/* I/O space handle */
253 	bus_size_t sc_ios;		/* I/O space size */
254 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
255 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
256 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
257 
258 	struct ethercom sc_ethercom;	/* ethernet common data */
259 	struct mii_data sc_mii;		/* MII/media information */
260 
261 	pci_chipset_tag_t sc_pc;
262 	pcitag_t sc_pcitag;
263 	int sc_bus_speed;		/* PCI/PCIX bus speed */
264 	int sc_pcixe_capoff;		/* PCI[Xe] capability register offset */
265 
266 	const struct wm_product *sc_wmp; /* Pointer to the wm_product entry */
267 	wm_chip_type sc_type;		/* MAC type */
268 	int sc_rev;			/* MAC revision */
269 	wm_phy_type sc_phytype;		/* PHY type */
270 	int sc_funcid;			/* unit number of the chip (0 to 3) */
271 	int sc_flags;			/* flags; see below */
272 	int sc_if_flags;		/* last if_flags */
273 	int sc_flowflags;		/* 802.3x flow control flags */
274 	int sc_align_tweak;
275 
276 	void *sc_ih;			/* interrupt cookie */
277 	callout_t sc_tick_ch;		/* tick callout */
278 
279 	int sc_ee_addrbits;		/* EEPROM address bits */
280 	int sc_ich8_flash_base;
281 	int sc_ich8_flash_bank_size;
282 	int sc_nvm_k1_enabled;
283 
284 	/*
285 	 * Software state for the transmit and receive descriptors.
286 	 */
287 	int sc_txnum;			/* must be a power of two */
288 	struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX];
289 	struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
290 
291 	/*
292 	 * Control data structures.
293 	 */
294 	int sc_ntxdesc;			/* must be a power of two */
295 	struct wm_control_data_82544 *sc_control_data;
296 	bus_dmamap_t sc_cddmamap;	/* control data DMA map */
297 	bus_dma_segment_t sc_cd_seg;	/* control data segment */
298 	int sc_cd_rseg;			/* real number of control segment */
299 	size_t sc_cd_size;		/* control data size */
300 #define	sc_cddma	sc_cddmamap->dm_segs[0].ds_addr
301 #define	sc_txdescs	sc_control_data->wcd_txdescs
302 #define	sc_rxdescs	sc_control_data->wcd_rxdescs
303 
304 #ifdef WM_EVENT_COUNTERS
305 	/* Event counters. */
306 	struct evcnt sc_ev_txsstall;	/* Tx stalled due to no txs */
307 	struct evcnt sc_ev_txdstall;	/* Tx stalled due to no txd */
308 	struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
309 	struct evcnt sc_ev_txdw;	/* Tx descriptor interrupts */
310 	struct evcnt sc_ev_txqe;	/* Tx queue empty interrupts */
311 	struct evcnt sc_ev_rxintr;	/* Rx interrupts */
312 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
313 
314 	struct evcnt sc_ev_rxipsum;	/* IP checksums checked in-bound */
315 	struct evcnt sc_ev_rxtusum;	/* TCP/UDP cksums checked in-bound */
316 	struct evcnt sc_ev_txipsum;	/* IP checksums comp. out-bound */
317 	struct evcnt sc_ev_txtusum;	/* TCP/UDP cksums comp. out-bound */
318 	struct evcnt sc_ev_txtusum6;	/* TCP/UDP v6 cksums comp. out-bound */
319 	struct evcnt sc_ev_txtso;	/* TCP seg offload out-bound (IPv4) */
320 	struct evcnt sc_ev_txtso6;	/* TCP seg offload out-bound (IPv6) */
321 	struct evcnt sc_ev_txtsopain;	/* painful header manip. for TSO */
322 
323 	struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
324 	struct evcnt sc_ev_txdrop;	/* Tx packets dropped (too many segs) */
325 
326 	struct evcnt sc_ev_tu;		/* Tx underrun */
327 
328 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
329 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
330 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
331 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
332 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
333 #endif /* WM_EVENT_COUNTERS */
334 
335 	bus_addr_t sc_tdt_reg;		/* offset of TDT register */
336 
337 	int	sc_txfree;		/* number of free Tx descriptors */
338 	int	sc_txnext;		/* next ready Tx descriptor */
339 
340 	int	sc_txsfree;		/* number of free Tx jobs */
341 	int	sc_txsnext;		/* next free Tx job */
342 	int	sc_txsdirty;		/* dirty Tx jobs */
343 
344 	/* These 5 variables are used only on the 82547. */
345 	int	sc_txfifo_size;		/* Tx FIFO size */
346 	int	sc_txfifo_head;		/* current head of FIFO */
347 	uint32_t sc_txfifo_addr;	/* internal address of start of FIFO */
348 	int	sc_txfifo_stall;	/* Tx FIFO is stalled */
349 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
350 
351 	bus_addr_t sc_rdt_reg;		/* offset of RDT register */
352 
353 	int	sc_rxptr;		/* next ready Rx descriptor/queue ent */
354 	int	sc_rxdiscard;
355 	int	sc_rxlen;
356 	struct mbuf *sc_rxhead;
357 	struct mbuf *sc_rxtail;
358 	struct mbuf **sc_rxtailp;
359 
360 	uint32_t sc_ctrl;		/* prototype CTRL register */
361 #if 0
362 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
363 #endif
364 	uint32_t sc_icr;		/* prototype interrupt bits */
365 	uint32_t sc_itr;		/* prototype intr throttling reg */
366 	uint32_t sc_tctl;		/* prototype TCTL register */
367 	uint32_t sc_rctl;		/* prototype RCTL register */
368 	uint32_t sc_txcw;		/* prototype TXCW register */
369 	uint32_t sc_tipg;		/* prototype TIPG register */
370 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
371 	uint32_t sc_pba;		/* prototype PBA register */
372 
373 	int sc_tbi_linkup;		/* TBI link status */
374 	int sc_tbi_anegticks;		/* autonegotiation ticks */
375 	int sc_tbi_ticks;		/* tbi ticks */
376 	int sc_tbi_nrxcfg;		/* count of ICR_RXCFG */
377 	int sc_tbi_lastnrxcfg;		/* count of ICR_RXCFG (on last tick) */
378 
379 	int sc_mchash_type;		/* multicast filter offset */
380 
381 #if NRND > 0
382 	rndsource_element_t rnd_source;	/* random source */
383 #endif
384 };
385 
386 #define	WM_RXCHAIN_RESET(sc)						\
387 do {									\
388 	(sc)->sc_rxtailp = &(sc)->sc_rxhead;				\
389 	*(sc)->sc_rxtailp = NULL;					\
390 	(sc)->sc_rxlen = 0;						\
391 } while (/*CONSTCOND*/0)
392 
393 #define	WM_RXCHAIN_LINK(sc, m)						\
394 do {									\
395 	*(sc)->sc_rxtailp = (sc)->sc_rxtail = (m);			\
396 	(sc)->sc_rxtailp = &(m)->m_next;				\
397 } while (/*CONSTCOND*/0)
398 
399 #ifdef WM_EVENT_COUNTERS
400 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
401 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
402 #else
403 #define	WM_EVCNT_INCR(ev)	/* nothing */
404 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
405 #endif
406 
407 #define	CSR_READ(sc, reg)						\
408 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
409 #define	CSR_WRITE(sc, reg, val)						\
410 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
411 #define	CSR_WRITE_FLUSH(sc)						\
412 	(void) CSR_READ((sc), WMREG_STATUS)
413 
414 #define ICH8_FLASH_READ32(sc, reg) \
415 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
416 #define ICH8_FLASH_WRITE32(sc, reg, data) \
417 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
418 
419 #define ICH8_FLASH_READ16(sc, reg) \
420 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
421 #define ICH8_FLASH_WRITE16(sc, reg, data) \
422 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
423 
424 #define	WM_CDTXADDR(sc, x)	((sc)->sc_cddma + WM_CDTXOFF((x)))
425 #define	WM_CDRXADDR(sc, x)	((sc)->sc_cddma + WM_CDRXOFF((x)))
426 
427 #define	WM_CDTXADDR_LO(sc, x)	(WM_CDTXADDR((sc), (x)) & 0xffffffffU)
428 #define	WM_CDTXADDR_HI(sc, x)						\
429 	(sizeof(bus_addr_t) == 8 ?					\
430 	 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
431 
432 #define	WM_CDRXADDR_LO(sc, x)	(WM_CDRXADDR((sc), (x)) & 0xffffffffU)
433 #define	WM_CDRXADDR_HI(sc, x)						\
434 	(sizeof(bus_addr_t) == 8 ?					\
435 	 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
436 
437 #define	WM_CDTXSYNC(sc, x, n, ops)					\
438 do {									\
439 	int __x, __n;							\
440 									\
441 	__x = (x);							\
442 	__n = (n);							\
443 									\
444 	/* If it will wrap around, sync to the end of the ring. */	\
445 	if ((__x + __n) > WM_NTXDESC(sc)) {				\
446 		bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,	\
447 		    WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) *		\
448 		    (WM_NTXDESC(sc) - __x), (ops));			\
449 		__n -= (WM_NTXDESC(sc) - __x);				\
450 		__x = 0;						\
451 	}								\
452 									\
453 	/* Now sync whatever is left. */				\
454 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
455 	    WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops));	\
456 } while (/*CONSTCOND*/0)
457 
458 #define	WM_CDRXSYNC(sc, x, ops)						\
459 do {									\
460 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
461 	   WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops));		\
462 } while (/*CONSTCOND*/0)
463 
464 #define	WM_INIT_RXDESC(sc, x)						\
465 do {									\
466 	struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)];		\
467 	wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)];		\
468 	struct mbuf *__m = __rxs->rxs_mbuf;				\
469 									\
470 	/*								\
471 	 * Note: We scoot the packet forward 2 bytes in the buffer	\
472 	 * so that the payload after the Ethernet header is aligned	\
473 	 * to a 4-byte boundary.					\
474 	 *								\
475 	 * XXX BRAINDAMAGE ALERT!					\
476 	 * The stupid chip uses the same size for every buffer, which	\
477 	 * is set in the Receive Control register.  We are using the 2K	\
478 	 * size option, but what we REALLY want is (2K - 2)!  For this	\
479 	 * reason, we can't "scoot" packets longer than the standard	\
480 	 * Ethernet MTU.  On strict-alignment platforms, if the total	\
481 	 * size exceeds (2K - 2) we set align_tweak to 0 and let	\
482 	 * the upper layer copy the headers.				\
483 	 */								\
484 	__m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak;	\
485 									\
486 	wm_set_dma_addr(&__rxd->wrx_addr,				\
487 	    __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
488 	__rxd->wrx_len = 0;						\
489 	__rxd->wrx_cksum = 0;						\
490 	__rxd->wrx_status = 0;						\
491 	__rxd->wrx_errors = 0;						\
492 	__rxd->wrx_special = 0;						\
493 	WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
494 									\
495 	CSR_WRITE((sc), (sc)->sc_rdt_reg, (x));				\
496 } while (/*CONSTCOND*/0)
497 
498 static void	wm_start(struct ifnet *);
499 static void	wm_watchdog(struct ifnet *);
500 static int	wm_ifflags_cb(struct ethercom *);
501 static int	wm_ioctl(struct ifnet *, u_long, void *);
502 static int	wm_init(struct ifnet *);
503 static void	wm_stop(struct ifnet *, int);
504 static bool	wm_suspend(device_t, const pmf_qual_t *);
505 static bool	wm_resume(device_t, const pmf_qual_t *);
506 
507 static void	wm_reset(struct wm_softc *);
508 static void	wm_rxdrain(struct wm_softc *);
509 static int	wm_add_rxbuf(struct wm_softc *, int);
510 static int	wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
511 static int	wm_read_eeprom_eerd(struct wm_softc *, int, int, u_int16_t *);
512 static int	wm_validate_eeprom_checksum(struct wm_softc *);
513 static int	wm_check_alt_mac_addr(struct wm_softc *);
514 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
515 static void	wm_tick(void *);
516 
517 static void	wm_set_filter(struct wm_softc *);
518 static void	wm_set_vlan(struct wm_softc *);
519 
520 static int	wm_intr(void *);
521 static void	wm_txintr(struct wm_softc *);
522 static void	wm_rxintr(struct wm_softc *);
523 static void	wm_linkintr(struct wm_softc *, uint32_t);
524 
525 static void	wm_tbi_mediainit(struct wm_softc *);
526 static int	wm_tbi_mediachange(struct ifnet *);
527 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
528 
529 static void	wm_tbi_set_linkled(struct wm_softc *);
530 static void	wm_tbi_check_link(struct wm_softc *);
531 
532 static void	wm_gmii_reset(struct wm_softc *);
533 
534 static int	wm_gmii_i82543_readreg(device_t, int, int);
535 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
536 
537 static int	wm_gmii_i82544_readreg(device_t, int, int);
538 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
539 
540 static int	wm_gmii_i80003_readreg(device_t, int, int);
541 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
542 static int	wm_gmii_bm_readreg(device_t, int, int);
543 static void	wm_gmii_bm_writereg(device_t, int, int, int);
544 static int	wm_gmii_hv_readreg(device_t, int, int);
545 static void	wm_gmii_hv_writereg(device_t, int, int, int);
546 static int	wm_sgmii_readreg(device_t, int, int);
547 static void	wm_sgmii_writereg(device_t, int, int, int);
548 
549 static void	wm_gmii_statchg(device_t);
550 
551 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
552 static int	wm_gmii_mediachange(struct ifnet *);
553 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
554 
555 static int	wm_kmrn_readreg(struct wm_softc *, int);
556 static void	wm_kmrn_writereg(struct wm_softc *, int, int);
557 
558 static void	wm_set_spiaddrbits(struct wm_softc *);
559 static int	wm_match(device_t, cfdata_t, void *);
560 static void	wm_attach(device_t, device_t, void *);
561 static int	wm_detach(device_t, int);
562 static int	wm_is_onboard_nvm_eeprom(struct wm_softc *);
563 static void	wm_get_auto_rd_done(struct wm_softc *);
564 static void	wm_lan_init_done(struct wm_softc *);
565 static void	wm_get_cfg_done(struct wm_softc *);
566 static int	wm_get_swsm_semaphore(struct wm_softc *);
567 static void	wm_put_swsm_semaphore(struct wm_softc *);
568 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
569 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
570 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
571 static int	wm_get_swfwhw_semaphore(struct wm_softc *);
572 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
573 
574 static int	wm_read_eeprom_ich8(struct wm_softc *, int, int, uint16_t *);
575 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
576 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
577 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t,
578 		     uint32_t, uint16_t *);
579 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
580 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
581 static void	wm_82547_txfifo_stall(void *);
582 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, int);
583 static int	wm_check_mng_mode(struct wm_softc *);
584 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
585 static int	wm_check_mng_mode_82574(struct wm_softc *);
586 static int	wm_check_mng_mode_generic(struct wm_softc *);
587 static int	wm_enable_mng_pass_thru(struct wm_softc *);
588 static int	wm_check_reset_block(struct wm_softc *);
589 static void	wm_get_hw_control(struct wm_softc *);
590 static int	wm_check_for_link(struct wm_softc *);
591 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
592 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
593 #ifdef WM_WOL
594 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
595 #endif
596 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
597 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
598 static void	wm_k1_gig_workaround_hv(struct wm_softc *, int);
599 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
600 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
601 static void	wm_smbustopci(struct wm_softc *);
602 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
603 static void	wm_reset_init_script_82575(struct wm_softc *);
604 static void	wm_release_manageability(struct wm_softc *);
605 static void	wm_release_hw_control(struct wm_softc *);
606 static void	wm_get_wakeup(struct wm_softc *);
607 #ifdef WM_WOL
608 static void	wm_enable_phy_wakeup(struct wm_softc *);
609 static void	wm_enable_wakeup(struct wm_softc *);
610 #endif
611 static void	wm_init_manageability(struct wm_softc *);
612 
613 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
614     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
615 
616 /*
617  * Devices supported by this driver.
618  */
619 static const struct wm_product {
620 	pci_vendor_id_t		wmp_vendor;
621 	pci_product_id_t	wmp_product;
622 	const char		*wmp_name;
623 	wm_chip_type		wmp_type;
624 	int			wmp_flags;
625 #define	WMP_F_1000X		0x01
626 #define	WMP_F_1000T		0x02
627 #define	WMP_F_SERDES		0x04
628 } wm_products[] = {
629 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
630 	  "Intel i82542 1000BASE-X Ethernet",
631 	  WM_T_82542_2_1,	WMP_F_1000X },
632 
633 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
634 	  "Intel i82543GC 1000BASE-X Ethernet",
635 	  WM_T_82543,		WMP_F_1000X },
636 
637 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
638 	  "Intel i82543GC 1000BASE-T Ethernet",
639 	  WM_T_82543,		WMP_F_1000T },
640 
641 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
642 	  "Intel i82544EI 1000BASE-T Ethernet",
643 	  WM_T_82544,		WMP_F_1000T },
644 
645 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
646 	  "Intel i82544EI 1000BASE-X Ethernet",
647 	  WM_T_82544,		WMP_F_1000X },
648 
649 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
650 	  "Intel i82544GC 1000BASE-T Ethernet",
651 	  WM_T_82544,		WMP_F_1000T },
652 
653 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
654 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
655 	  WM_T_82544,		WMP_F_1000T },
656 
657 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
658 	  "Intel i82540EM 1000BASE-T Ethernet",
659 	  WM_T_82540,		WMP_F_1000T },
660 
661 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
662 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
663 	  WM_T_82540,		WMP_F_1000T },
664 
665 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
666 	  "Intel i82540EP 1000BASE-T Ethernet",
667 	  WM_T_82540,		WMP_F_1000T },
668 
669 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
670 	  "Intel i82540EP 1000BASE-T Ethernet",
671 	  WM_T_82540,		WMP_F_1000T },
672 
673 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
674 	  "Intel i82540EP 1000BASE-T Ethernet",
675 	  WM_T_82540,		WMP_F_1000T },
676 
677 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
678 	  "Intel i82545EM 1000BASE-T Ethernet",
679 	  WM_T_82545,		WMP_F_1000T },
680 
681 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
682 	  "Intel i82545GM 1000BASE-T Ethernet",
683 	  WM_T_82545_3,		WMP_F_1000T },
684 
685 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
686 	  "Intel i82545GM 1000BASE-X Ethernet",
687 	  WM_T_82545_3,		WMP_F_1000X },
688 #if 0
689 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
690 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
691 	  WM_T_82545_3,		WMP_F_SERDES },
692 #endif
693 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
694 	  "Intel i82546EB 1000BASE-T Ethernet",
695 	  WM_T_82546,		WMP_F_1000T },
696 
697 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
698 	  "Intel i82546EB 1000BASE-T Ethernet",
699 	  WM_T_82546,		WMP_F_1000T },
700 
701 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
702 	  "Intel i82545EM 1000BASE-X Ethernet",
703 	  WM_T_82545,		WMP_F_1000X },
704 
705 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
706 	  "Intel i82546EB 1000BASE-X Ethernet",
707 	  WM_T_82546,		WMP_F_1000X },
708 
709 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
710 	  "Intel i82546GB 1000BASE-T Ethernet",
711 	  WM_T_82546_3,		WMP_F_1000T },
712 
713 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
714 	  "Intel i82546GB 1000BASE-X Ethernet",
715 	  WM_T_82546_3,		WMP_F_1000X },
716 #if 0
717 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
718 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
719 	  WM_T_82546_3,		WMP_F_SERDES },
720 #endif
721 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
722 	  "i82546GB quad-port Gigabit Ethernet",
723 	  WM_T_82546_3,		WMP_F_1000T },
724 
725 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
726 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
727 	  WM_T_82546_3,		WMP_F_1000T },
728 
729 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
730 	  "Intel PRO/1000MT (82546GB)",
731 	  WM_T_82546_3,		WMP_F_1000T },
732 
733 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
734 	  "Intel i82541EI 1000BASE-T Ethernet",
735 	  WM_T_82541,		WMP_F_1000T },
736 
737 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
738 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
739 	  WM_T_82541,		WMP_F_1000T },
740 
741 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
742 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
743 	  WM_T_82541,		WMP_F_1000T },
744 
745 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
746 	  "Intel i82541ER 1000BASE-T Ethernet",
747 	  WM_T_82541_2,		WMP_F_1000T },
748 
749 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
750 	  "Intel i82541GI 1000BASE-T Ethernet",
751 	  WM_T_82541_2,		WMP_F_1000T },
752 
753 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
754 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
755 	  WM_T_82541_2,		WMP_F_1000T },
756 
757 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
758 	  "Intel i82541PI 1000BASE-T Ethernet",
759 	  WM_T_82541_2,		WMP_F_1000T },
760 
761 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
762 	  "Intel i82547EI 1000BASE-T Ethernet",
763 	  WM_T_82547,		WMP_F_1000T },
764 
765 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
766 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
767 	  WM_T_82547,		WMP_F_1000T },
768 
769 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
770 	  "Intel i82547GI 1000BASE-T Ethernet",
771 	  WM_T_82547_2,		WMP_F_1000T },
772 
773 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
774 	  "Intel PRO/1000 PT (82571EB)",
775 	  WM_T_82571,		WMP_F_1000T },
776 
777 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
778 	  "Intel PRO/1000 PF (82571EB)",
779 	  WM_T_82571,		WMP_F_1000X },
780 #if 0
781 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
782 	  "Intel PRO/1000 PB (82571EB)",
783 	  WM_T_82571,		WMP_F_SERDES },
784 #endif
785 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
786 	  "Intel PRO/1000 QT (82571EB)",
787 	  WM_T_82571,		WMP_F_1000T },
788 
789 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
790 	  "Intel i82572EI 1000baseT Ethernet",
791 	  WM_T_82572,		WMP_F_1000T },
792 
793 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
794 	  "Intel PRO/1000 PT Quad Port Server Adapter",
795 	  WM_T_82571,		WMP_F_1000T, },
796 
797 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
798 	  "Intel i82572EI 1000baseX Ethernet",
799 	  WM_T_82572,		WMP_F_1000X },
800 #if 0
801 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
802 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
803 	  WM_T_82572,		WMP_F_SERDES },
804 #endif
805 
806 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
807 	  "Intel i82572EI 1000baseT Ethernet",
808 	  WM_T_82572,		WMP_F_1000T },
809 
810 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
811 	  "Intel i82573E",
812 	  WM_T_82573,		WMP_F_1000T },
813 
814 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
815 	  "Intel i82573E IAMT",
816 	  WM_T_82573,		WMP_F_1000T },
817 
818 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
819 	  "Intel i82573L Gigabit Ethernet",
820 	  WM_T_82573,		WMP_F_1000T },
821 
822 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
823 	  "Intel i82574L",
824 	  WM_T_82574,		WMP_F_1000T },
825 
826 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
827 	  "Intel i82583V",
828 	  WM_T_82583,		WMP_F_1000T },
829 
830 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
831 	  "i80003 dual 1000baseT Ethernet",
832 	  WM_T_80003,		WMP_F_1000T },
833 
834 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
835 	  "i80003 dual 1000baseX Ethernet",
836 	  WM_T_80003,		WMP_F_1000T },
837 #if 0
838 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
839 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
840 	  WM_T_80003,		WMP_F_SERDES },
841 #endif
842 
843 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
844 	  "Intel i80003 1000baseT Ethernet",
845 	  WM_T_80003,		WMP_F_1000T },
846 #if 0
847 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
848 	  "Intel i80003 Gigabit Ethernet (SERDES)",
849 	  WM_T_80003,		WMP_F_SERDES },
850 #endif
851 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
852 	  "Intel i82801H (M_AMT) LAN Controller",
853 	  WM_T_ICH8,		WMP_F_1000T },
854 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
855 	  "Intel i82801H (AMT) LAN Controller",
856 	  WM_T_ICH8,		WMP_F_1000T },
857 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
858 	  "Intel i82801H LAN Controller",
859 	  WM_T_ICH8,		WMP_F_1000T },
860 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
861 	  "Intel i82801H (IFE) LAN Controller",
862 	  WM_T_ICH8,		WMP_F_1000T },
863 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
864 	  "Intel i82801H (M) LAN Controller",
865 	  WM_T_ICH8,		WMP_F_1000T },
866 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
867 	  "Intel i82801H IFE (GT) LAN Controller",
868 	  WM_T_ICH8,		WMP_F_1000T },
869 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
870 	  "Intel i82801H IFE (G) LAN Controller",
871 	  WM_T_ICH8,		WMP_F_1000T },
872 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
873 	  "82801I (AMT) LAN Controller",
874 	  WM_T_ICH9,		WMP_F_1000T },
875 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
876 	  "82801I LAN Controller",
877 	  WM_T_ICH9,		WMP_F_1000T },
878 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
879 	  "82801I (G) LAN Controller",
880 	  WM_T_ICH9,		WMP_F_1000T },
881 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
882 	  "82801I (GT) LAN Controller",
883 	  WM_T_ICH9,		WMP_F_1000T },
884 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
885 	  "82801I (C) LAN Controller",
886 	  WM_T_ICH9,		WMP_F_1000T },
887 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
888 	  "82801I mobile LAN Controller",
889 	  WM_T_ICH9,		WMP_F_1000T },
890 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IGP_M_V,
891 	  "82801I mobile (V) LAN Controller",
892 	  WM_T_ICH9,		WMP_F_1000T },
893 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
894 	  "82801I mobile (AMT) LAN Controller",
895 	  WM_T_ICH9,		WMP_F_1000T },
896 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
897 	  "82567LM-4 LAN Controller",
898 	  WM_T_ICH9,		WMP_F_1000T },
899 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_82567V_3,
900 	  "82567V-3 LAN Controller",
901 	  WM_T_ICH9,		WMP_F_1000T },
902 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
903 	  "82567LM-2 LAN Controller",
904 	  WM_T_ICH10,		WMP_F_1000T },
905 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
906 	  "82567LF-2 LAN Controller",
907 	  WM_T_ICH10,		WMP_F_1000T },
908 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
909 	  "82567LM-3 LAN Controller",
910 	  WM_T_ICH10,		WMP_F_1000T },
911 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
912 	  "82567LF-3 LAN Controller",
913 	  WM_T_ICH10,		WMP_F_1000T },
914 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
915 	  "82567V-2 LAN Controller",
916 	  WM_T_ICH10,		WMP_F_1000T },
917 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
918 	  "82567V-3? LAN Controller",
919 	  WM_T_ICH10,		WMP_F_1000T },
920 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
921 	  "HANKSVILLE LAN Controller",
922 	  WM_T_ICH10,		WMP_F_1000T },
923 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
924 	  "PCH LAN (82577LM) Controller",
925 	  WM_T_PCH,		WMP_F_1000T },
926 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
927 	  "PCH LAN (82577LC) Controller",
928 	  WM_T_PCH,		WMP_F_1000T },
929 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
930 	  "PCH LAN (82578DM) Controller",
931 	  WM_T_PCH,		WMP_F_1000T },
932 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
933 	  "PCH LAN (82578DC) Controller",
934 	  WM_T_PCH2,		WMP_F_1000T },
935 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
936 	  "PCH2 LAN (82579LM) Controller",
937 	  WM_T_PCH2,		WMP_F_1000T },
938 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
939 	  "PCH2 LAN (82579V) Controller",
940 	  WM_T_PCH,		WMP_F_1000T },
941 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
942 	  "82575EB dual-1000baseT Ethernet",
943 	  WM_T_82575,		WMP_F_1000T },
944 #if 0
945 	/*
946 	 * not sure if WMP_F_1000X or WMP_F_SERDES - we do not have it - so
947 	 * disabled for now ...
948 	 */
949 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
950 	  "82575EB dual-1000baseX Ethernet (SERDES)",
951 	  WM_T_82575,		WMP_F_SERDES },
952 #endif
953 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
954 	  "82575GB quad-1000baseT Ethernet",
955 	  WM_T_82575,		WMP_F_1000T },
956 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
957 	  "82575GB quad-1000baseT Ethernet (PM)",
958 	  WM_T_82575,		WMP_F_1000T },
959 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
960 	  "82576 1000BaseT Ethernet",
961 	  WM_T_82576,		WMP_F_1000T },
962 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
963 	  "82576 1000BaseX Ethernet",
964 	  WM_T_82576,		WMP_F_1000X },
965 #if 0
966 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
967 	  "82576 gigabit Ethernet (SERDES)",
968 	  WM_T_82576,		WMP_F_SERDES },
969 #endif
970 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
971 	  "82576 quad-1000BaseT Ethernet",
972 	  WM_T_82576,		WMP_F_1000T },
973 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
974 	  "82576 gigabit Ethernet",
975 	  WM_T_82576,		WMP_F_1000T },
976 #if 0
977 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
978 	  "82576 gigabit Ethernet (SERDES)",
979 	  WM_T_82576,		WMP_F_SERDES },
980 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
981 	  "82576 quad-gigabit Ethernet (SERDES)",
982 	  WM_T_82576,		WMP_F_SERDES },
983 #endif
984 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
985 	  "82580 1000BaseT Ethernet",
986 	  WM_T_82580,		WMP_F_1000T },
987 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
988 	  "82580 1000BaseX Ethernet",
989 	  WM_T_82580,		WMP_F_1000X },
990 #if 0
991 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
992 	  "82580 1000BaseT Ethernet (SERDES)",
993 	  WM_T_82580,		WMP_F_SERDES },
994 #endif
995 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
996 	  "82580 gigabit Ethernet (SGMII)",
997 	  WM_T_82580,		WMP_F_1000T },
998 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
999 	  "82580 dual-1000BaseT Ethernet",
1000 	  WM_T_82580,		WMP_F_1000T },
1001 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_ER,
1002 	  "82580 1000BaseT Ethernet",
1003 	  WM_T_82580ER,		WMP_F_1000T },
1004 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_ER_DUAL,
1005 	  "82580 dual-1000BaseT Ethernet",
1006 	  WM_T_82580ER,		WMP_F_1000T },
1007 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1008 	  "82580 quad-1000BaseX Ethernet",
1009 	  WM_T_82580,		WMP_F_1000X },
1010 	{ 0,			0,
1011 	  NULL,
1012 	  0,			0 },
1013 };
1014 
1015 #ifdef WM_EVENT_COUNTERS
1016 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
1017 #endif /* WM_EVENT_COUNTERS */
1018 
1019 #if 0 /* Not currently used */
1020 static inline uint32_t
1021 wm_io_read(struct wm_softc *sc, int reg)
1022 {
1023 
1024 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1025 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1026 }
1027 #endif
1028 
1029 static inline void
1030 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1031 {
1032 
1033 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1034 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1035 }
1036 
1037 static inline void
1038 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1039     uint32_t data)
1040 {
1041 	uint32_t regval;
1042 	int i;
1043 
1044 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1045 
1046 	CSR_WRITE(sc, reg, regval);
1047 
1048 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1049 		delay(5);
1050 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1051 			break;
1052 	}
1053 	if (i == SCTL_CTL_POLL_TIMEOUT) {
1054 		aprint_error("%s: WARNING: i82575 reg 0x%08x setup did not indicate ready\n",
1055 		    device_xname(sc->sc_dev), reg);
1056 	}
1057 }
1058 
1059 static inline void
1060 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1061 {
1062 	wa->wa_low = htole32(v & 0xffffffffU);
1063 	if (sizeof(bus_addr_t) == 8)
1064 		wa->wa_high = htole32((uint64_t) v >> 32);
1065 	else
1066 		wa->wa_high = 0;
1067 }
1068 
1069 static void
1070 wm_set_spiaddrbits(struct wm_softc *sc)
1071 {
1072 	uint32_t reg;
1073 
1074 	sc->sc_flags |= WM_F_EEPROM_SPI;
1075 	reg = CSR_READ(sc, WMREG_EECD);
1076 	sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
1077 }
1078 
1079 static const struct wm_product *
1080 wm_lookup(const struct pci_attach_args *pa)
1081 {
1082 	const struct wm_product *wmp;
1083 
1084 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1085 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1086 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1087 			return wmp;
1088 	}
1089 	return NULL;
1090 }
1091 
1092 static int
1093 wm_match(device_t parent, cfdata_t cf, void *aux)
1094 {
1095 	struct pci_attach_args *pa = aux;
1096 
1097 	if (wm_lookup(pa) != NULL)
1098 		return 1;
1099 
1100 	return 0;
1101 }
1102 
1103 static void
1104 wm_attach(device_t parent, device_t self, void *aux)
1105 {
1106 	struct wm_softc *sc = device_private(self);
1107 	struct pci_attach_args *pa = aux;
1108 	prop_dictionary_t dict;
1109 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1110 	pci_chipset_tag_t pc = pa->pa_pc;
1111 	pci_intr_handle_t ih;
1112 	const char *intrstr = NULL;
1113 	const char *eetype, *xname;
1114 	bus_space_tag_t memt;
1115 	bus_space_handle_t memh;
1116 	bus_size_t memsize;
1117 	int memh_valid;
1118 	int i, error;
1119 	const struct wm_product *wmp;
1120 	prop_data_t ea;
1121 	prop_number_t pn;
1122 	uint8_t enaddr[ETHER_ADDR_LEN];
1123 	uint16_t cfg1, cfg2, swdpin, io3;
1124 	pcireg_t preg, memtype;
1125 	uint16_t eeprom_data, apme_mask;
1126 	uint32_t reg;
1127 
1128 	sc->sc_dev = self;
1129 	callout_init(&sc->sc_tick_ch, 0);
1130 
1131 	sc->sc_wmp = wmp = wm_lookup(pa);
1132 	if (wmp == NULL) {
1133 		printf("\n");
1134 		panic("wm_attach: impossible");
1135 	}
1136 
1137 	sc->sc_pc = pa->pa_pc;
1138 	sc->sc_pcitag = pa->pa_tag;
1139 
1140 	if (pci_dma64_available(pa))
1141 		sc->sc_dmat = pa->pa_dmat64;
1142 	else
1143 		sc->sc_dmat = pa->pa_dmat;
1144 
1145 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
1146 	aprint_naive(": Ethernet controller\n");
1147 	aprint_normal(": %s, rev. %d\n", wmp->wmp_name, sc->sc_rev);
1148 
1149 	sc->sc_type = wmp->wmp_type;
1150 	if (sc->sc_type < WM_T_82543) {
1151 		if (sc->sc_rev < 2) {
1152 			aprint_error_dev(sc->sc_dev,
1153 			    "i82542 must be at least rev. 2\n");
1154 			return;
1155 		}
1156 		if (sc->sc_rev < 3)
1157 			sc->sc_type = WM_T_82542_2_0;
1158 	}
1159 
1160 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1161 	    || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER))
1162 		sc->sc_flags |= WM_F_NEWQUEUE;
1163 
1164 	/* Set device properties (mactype) */
1165 	dict = device_properties(sc->sc_dev);
1166 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1167 
1168 	/*
1169 	 * Map the device.  All devices support memory-mapped acccess,
1170 	 * and it is really required for normal operation.
1171 	 */
1172 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1173 	switch (memtype) {
1174 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1175 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1176 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1177 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1178 		break;
1179 	default:
1180 		memh_valid = 0;
1181 		break;
1182 	}
1183 
1184 	if (memh_valid) {
1185 		sc->sc_st = memt;
1186 		sc->sc_sh = memh;
1187 		sc->sc_ss = memsize;
1188 	} else {
1189 		aprint_error_dev(sc->sc_dev,
1190 		    "unable to map device registers\n");
1191 		return;
1192 	}
1193 
1194 	wm_get_wakeup(sc);
1195 
1196 	/*
1197 	 * In addition, i82544 and later support I/O mapped indirect
1198 	 * register access.  It is not desirable (nor supported in
1199 	 * this driver) to use it for normal operation, though it is
1200 	 * required to work around bugs in some chip versions.
1201 	 */
1202 	if (sc->sc_type >= WM_T_82544) {
1203 		/* First we have to find the I/O BAR. */
1204 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1205 			if (pci_mapreg_type(pa->pa_pc, pa->pa_tag, i) ==
1206 			    PCI_MAPREG_TYPE_IO)
1207 				break;
1208 		}
1209 		if (i != PCI_MAPREG_END) {
1210 			/*
1211 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
1212 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
1213 			 * It's no problem because newer chips has no this
1214 			 * bug.
1215 			 *
1216 			 * The i8254x doesn't apparently respond when the
1217 			 * I/O BAR is 0, which looks somewhat like it's not
1218 			 * been configured.
1219 			 */
1220 			preg = pci_conf_read(pc, pa->pa_tag, i);
1221 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1222 				aprint_error_dev(sc->sc_dev,
1223 				    "WARNING: I/O BAR at zero.\n");
1224 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1225 					0, &sc->sc_iot, &sc->sc_ioh,
1226 					NULL, &sc->sc_ios) == 0) {
1227 				sc->sc_flags |= WM_F_IOH_VALID;
1228 			} else {
1229 				aprint_error_dev(sc->sc_dev,
1230 				    "WARNING: unable to map I/O space\n");
1231 			}
1232 		}
1233 
1234 	}
1235 
1236 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
1237 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1238 	preg |= PCI_COMMAND_MASTER_ENABLE;
1239 	if (sc->sc_type < WM_T_82542_2_1)
1240 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1241 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1242 
1243 	/* power up chip */
1244 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1245 	    NULL)) && error != EOPNOTSUPP) {
1246 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1247 		return;
1248 	}
1249 
1250 	/*
1251 	 * Map and establish our interrupt.
1252 	 */
1253 	if (pci_intr_map(pa, &ih)) {
1254 		aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
1255 		return;
1256 	}
1257 	intrstr = pci_intr_string(pc, ih);
1258 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
1259 	if (sc->sc_ih == NULL) {
1260 		aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
1261 		if (intrstr != NULL)
1262 			aprint_error(" at %s", intrstr);
1263 		aprint_error("\n");
1264 		return;
1265 	}
1266 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
1267 
1268 	/*
1269 	 * Check the function ID (unit number of the chip).
1270 	 */
1271 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
1272 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
1273 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1274 	    || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER))
1275 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
1276 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
1277 	else
1278 		sc->sc_funcid = 0;
1279 
1280 	/*
1281 	 * Determine a few things about the bus we're connected to.
1282 	 */
1283 	if (sc->sc_type < WM_T_82543) {
1284 		/* We don't really know the bus characteristics here. */
1285 		sc->sc_bus_speed = 33;
1286 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1287 		/*
1288 		 * CSA (Communication Streaming Architecture) is about as fast
1289 		 * a 32-bit 66MHz PCI Bus.
1290 		 */
1291 		sc->sc_flags |= WM_F_CSA;
1292 		sc->sc_bus_speed = 66;
1293 		aprint_verbose_dev(sc->sc_dev,
1294 		    "Communication Streaming Architecture\n");
1295 		if (sc->sc_type == WM_T_82547) {
1296 			callout_init(&sc->sc_txfifo_ch, 0);
1297 			callout_setfunc(&sc->sc_txfifo_ch,
1298 					wm_82547_txfifo_stall, sc);
1299 			aprint_verbose_dev(sc->sc_dev,
1300 			    "using 82547 Tx FIFO stall work-around\n");
1301 		}
1302 	} else if (sc->sc_type >= WM_T_82571) {
1303 		sc->sc_flags |= WM_F_PCIE;
1304 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1305 		    && (sc->sc_type != WM_T_ICH10)
1306 		    && (sc->sc_type != WM_T_PCH)
1307 		    && (sc->sc_type != WM_T_PCH2)) {
1308 			sc->sc_flags |= WM_F_EEPROM_SEMAPHORE;
1309 			/* ICH* and PCH* have no PCIe capability registers */
1310 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1311 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
1312 				NULL) == 0)
1313 				aprint_error_dev(sc->sc_dev,
1314 				    "unable to find PCIe capability\n");
1315 		}
1316 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1317 	} else {
1318 		reg = CSR_READ(sc, WMREG_STATUS);
1319 		if (reg & STATUS_BUS64)
1320 			sc->sc_flags |= WM_F_BUS64;
1321 		if ((reg & STATUS_PCIX_MODE) != 0) {
1322 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1323 
1324 			sc->sc_flags |= WM_F_PCIX;
1325 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1326 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
1327 				aprint_error_dev(sc->sc_dev,
1328 				    "unable to find PCIX capability\n");
1329 			else if (sc->sc_type != WM_T_82545_3 &&
1330 				 sc->sc_type != WM_T_82546_3) {
1331 				/*
1332 				 * Work around a problem caused by the BIOS
1333 				 * setting the max memory read byte count
1334 				 * incorrectly.
1335 				 */
1336 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1337 				    sc->sc_pcixe_capoff + PCI_PCIX_CMD);
1338 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1339 				    sc->sc_pcixe_capoff + PCI_PCIX_STATUS);
1340 
1341 				bytecnt =
1342 				    (pcix_cmd & PCI_PCIX_CMD_BYTECNT_MASK) >>
1343 				    PCI_PCIX_CMD_BYTECNT_SHIFT;
1344 				maxb =
1345 				    (pcix_sts & PCI_PCIX_STATUS_MAXB_MASK) >>
1346 				    PCI_PCIX_STATUS_MAXB_SHIFT;
1347 				if (bytecnt > maxb) {
1348 					aprint_verbose_dev(sc->sc_dev,
1349 					    "resetting PCI-X MMRBC: %d -> %d\n",
1350 					    512 << bytecnt, 512 << maxb);
1351 					pcix_cmd = (pcix_cmd &
1352 					    ~PCI_PCIX_CMD_BYTECNT_MASK) |
1353 					   (maxb << PCI_PCIX_CMD_BYTECNT_SHIFT);
1354 					pci_conf_write(pa->pa_pc, pa->pa_tag,
1355 					    sc->sc_pcixe_capoff + PCI_PCIX_CMD,
1356 					    pcix_cmd);
1357 				}
1358 			}
1359 		}
1360 		/*
1361 		 * The quad port adapter is special; it has a PCIX-PCIX
1362 		 * bridge on the board, and can run the secondary bus at
1363 		 * a higher speed.
1364 		 */
1365 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1366 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1367 								      : 66;
1368 		} else if (sc->sc_flags & WM_F_PCIX) {
1369 			switch (reg & STATUS_PCIXSPD_MASK) {
1370 			case STATUS_PCIXSPD_50_66:
1371 				sc->sc_bus_speed = 66;
1372 				break;
1373 			case STATUS_PCIXSPD_66_100:
1374 				sc->sc_bus_speed = 100;
1375 				break;
1376 			case STATUS_PCIXSPD_100_133:
1377 				sc->sc_bus_speed = 133;
1378 				break;
1379 			default:
1380 				aprint_error_dev(sc->sc_dev,
1381 				    "unknown PCIXSPD %d; assuming 66MHz\n",
1382 				    reg & STATUS_PCIXSPD_MASK);
1383 				sc->sc_bus_speed = 66;
1384 				break;
1385 			}
1386 		} else
1387 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1388 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1389 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1390 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1391 	}
1392 
1393 	/*
1394 	 * Allocate the control data structures, and create and load the
1395 	 * DMA map for it.
1396 	 *
1397 	 * NOTE: All Tx descriptors must be in the same 4G segment of
1398 	 * memory.  So must Rx descriptors.  We simplify by allocating
1399 	 * both sets within the same 4G segment.
1400 	 */
1401 	WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
1402 	    WM_NTXDESC_82542 : WM_NTXDESC_82544;
1403 	sc->sc_cd_size = sc->sc_type < WM_T_82544 ?
1404 	    sizeof(struct wm_control_data_82542) :
1405 	    sizeof(struct wm_control_data_82544);
1406 	if ((error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_cd_size, PAGE_SIZE,
1407 		    (bus_size_t) 0x100000000ULL, &sc->sc_cd_seg, 1,
1408 		    &sc->sc_cd_rseg, 0)) != 0) {
1409 		aprint_error_dev(sc->sc_dev,
1410 		    "unable to allocate control data, error = %d\n",
1411 		    error);
1412 		goto fail_0;
1413 	}
1414 
1415 	if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cd_seg,
1416 		    sc->sc_cd_rseg, sc->sc_cd_size,
1417 		    (void **)&sc->sc_control_data, BUS_DMA_COHERENT)) != 0) {
1418 		aprint_error_dev(sc->sc_dev,
1419 		    "unable to map control data, error = %d\n", error);
1420 		goto fail_1;
1421 	}
1422 
1423 	if ((error = bus_dmamap_create(sc->sc_dmat, sc->sc_cd_size, 1,
1424 		    sc->sc_cd_size, 0, 0, &sc->sc_cddmamap)) != 0) {
1425 		aprint_error_dev(sc->sc_dev,
1426 		    "unable to create control data DMA map, error = %d\n",
1427 		    error);
1428 		goto fail_2;
1429 	}
1430 
1431 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
1432 		    sc->sc_control_data, sc->sc_cd_size, NULL, 0)) != 0) {
1433 		aprint_error_dev(sc->sc_dev,
1434 		    "unable to load control data DMA map, error = %d\n",
1435 		    error);
1436 		goto fail_3;
1437 	}
1438 
1439 	/*
1440 	 * Create the transmit buffer DMA maps.
1441 	 */
1442 	WM_TXQUEUELEN(sc) =
1443 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
1444 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
1445 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1446 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
1447 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
1448 			    &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1449 			aprint_error_dev(sc->sc_dev,
1450 			    "unable to create Tx DMA map %d, error = %d\n",
1451 			    i, error);
1452 			goto fail_4;
1453 		}
1454 	}
1455 
1456 	/*
1457 	 * Create the receive buffer DMA maps.
1458 	 */
1459 	for (i = 0; i < WM_NRXDESC; i++) {
1460 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1461 			    MCLBYTES, 0, 0,
1462 			    &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1463 			aprint_error_dev(sc->sc_dev,
1464 			    "unable to create Rx DMA map %d error = %d\n",
1465 			    i, error);
1466 			goto fail_5;
1467 		}
1468 		sc->sc_rxsoft[i].rxs_mbuf = NULL;
1469 	}
1470 
1471 	/* clear interesting stat counters */
1472 	CSR_READ(sc, WMREG_COLC);
1473 	CSR_READ(sc, WMREG_RXERRC);
1474 
1475 	/* get PHY control from SMBus to PCIe */
1476 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2))
1477 		wm_smbustopci(sc);
1478 
1479 	/*
1480 	 * Reset the chip to a known state.
1481 	 */
1482 	wm_reset(sc);
1483 
1484 	switch (sc->sc_type) {
1485 	case WM_T_82571:
1486 	case WM_T_82572:
1487 	case WM_T_82573:
1488 	case WM_T_82574:
1489 	case WM_T_82583:
1490 	case WM_T_80003:
1491 	case WM_T_ICH8:
1492 	case WM_T_ICH9:
1493 	case WM_T_ICH10:
1494 	case WM_T_PCH:
1495 	case WM_T_PCH2:
1496 		if (wm_check_mng_mode(sc) != 0)
1497 			wm_get_hw_control(sc);
1498 		break;
1499 	default:
1500 		break;
1501 	}
1502 
1503 	/*
1504 	 * Get some information about the EEPROM.
1505 	 */
1506 	switch (sc->sc_type) {
1507 	case WM_T_82542_2_0:
1508 	case WM_T_82542_2_1:
1509 	case WM_T_82543:
1510 	case WM_T_82544:
1511 		/* Microwire */
1512 		sc->sc_ee_addrbits = 6;
1513 		break;
1514 	case WM_T_82540:
1515 	case WM_T_82545:
1516 	case WM_T_82545_3:
1517 	case WM_T_82546:
1518 	case WM_T_82546_3:
1519 		/* Microwire */
1520 		reg = CSR_READ(sc, WMREG_EECD);
1521 		if (reg & EECD_EE_SIZE)
1522 			sc->sc_ee_addrbits = 8;
1523 		else
1524 			sc->sc_ee_addrbits = 6;
1525 		sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1526 		break;
1527 	case WM_T_82541:
1528 	case WM_T_82541_2:
1529 	case WM_T_82547:
1530 	case WM_T_82547_2:
1531 		reg = CSR_READ(sc, WMREG_EECD);
1532 		if (reg & EECD_EE_TYPE) {
1533 			/* SPI */
1534 			wm_set_spiaddrbits(sc);
1535 		} else
1536 			/* Microwire */
1537 			sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6;
1538 		sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1539 		break;
1540 	case WM_T_82571:
1541 	case WM_T_82572:
1542 		/* SPI */
1543 		wm_set_spiaddrbits(sc);
1544 		sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1545 		break;
1546 	case WM_T_82573:
1547 	case WM_T_82574:
1548 	case WM_T_82583:
1549 		if (wm_is_onboard_nvm_eeprom(sc) == 0)
1550 			sc->sc_flags |= WM_F_EEPROM_FLASH;
1551 		else {
1552 			/* SPI */
1553 			wm_set_spiaddrbits(sc);
1554 		}
1555 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1556 		break;
1557 	case WM_T_82575:
1558 	case WM_T_82576:
1559 	case WM_T_82580:
1560 	case WM_T_82580ER:
1561 	case WM_T_80003:
1562 		/* SPI */
1563 		wm_set_spiaddrbits(sc);
1564 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_SWFW_SYNC;
1565 		break;
1566 	case WM_T_ICH8:
1567 	case WM_T_ICH9:
1568 	case WM_T_ICH10:
1569 	case WM_T_PCH:
1570 	case WM_T_PCH2:
1571 		/* FLASH */
1572 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_SWFWHW_SYNC;
1573 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
1574 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
1575 		    &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) {
1576 			aprint_error_dev(sc->sc_dev,
1577 			    "can't map FLASH registers\n");
1578 			return;
1579 		}
1580 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
1581 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
1582 						ICH_FLASH_SECTOR_SIZE;
1583 		sc->sc_ich8_flash_bank_size =
1584 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
1585 		sc->sc_ich8_flash_bank_size -=
1586 		    (reg & ICH_GFPREG_BASE_MASK);
1587 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
1588 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
1589 		break;
1590 	default:
1591 		break;
1592 	}
1593 
1594 	/*
1595 	 * Defer printing the EEPROM type until after verifying the checksum
1596 	 * This allows the EEPROM type to be printed correctly in the case
1597 	 * that no EEPROM is attached.
1598 	 */
1599 	/*
1600 	 * Validate the EEPROM checksum. If the checksum fails, flag
1601 	 * this for later, so we can fail future reads from the EEPROM.
1602 	 */
1603 	if (wm_validate_eeprom_checksum(sc)) {
1604 		/*
1605 		 * Read twice again because some PCI-e parts fail the
1606 		 * first check due to the link being in sleep state.
1607 		 */
1608 		if (wm_validate_eeprom_checksum(sc))
1609 			sc->sc_flags |= WM_F_EEPROM_INVALID;
1610 	}
1611 
1612 	/* Set device properties (macflags) */
1613 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
1614 
1615 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
1616 		aprint_verbose_dev(sc->sc_dev, "No EEPROM\n");
1617 	else if (sc->sc_flags & WM_F_EEPROM_FLASH) {
1618 		aprint_verbose_dev(sc->sc_dev, "FLASH\n");
1619 	} else {
1620 		if (sc->sc_flags & WM_F_EEPROM_SPI)
1621 			eetype = "SPI";
1622 		else
1623 			eetype = "MicroWire";
1624 		aprint_verbose_dev(sc->sc_dev,
1625 		    "%u word (%d address bits) %s EEPROM\n",
1626 		    1U << sc->sc_ee_addrbits,
1627 		    sc->sc_ee_addrbits, eetype);
1628 	}
1629 
1630 	/*
1631 	 * Read the Ethernet address from the EEPROM, if not first found
1632 	 * in device properties.
1633 	 */
1634 	ea = prop_dictionary_get(dict, "mac-address");
1635 	if (ea != NULL) {
1636 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
1637 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
1638 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
1639 	} else {
1640 		if (wm_read_mac_addr(sc, enaddr) != 0) {
1641 			aprint_error_dev(sc->sc_dev,
1642 			    "unable to read Ethernet address\n");
1643 			return;
1644 		}
1645 	}
1646 
1647 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
1648 	    ether_sprintf(enaddr));
1649 
1650 	/*
1651 	 * Read the config info from the EEPROM, and set up various
1652 	 * bits in the control registers based on their contents.
1653 	 */
1654 	pn = prop_dictionary_get(dict, "i82543-cfg1");
1655 	if (pn != NULL) {
1656 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1657 		cfg1 = (uint16_t) prop_number_integer_value(pn);
1658 	} else {
1659 		if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) {
1660 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
1661 			return;
1662 		}
1663 	}
1664 
1665 	pn = prop_dictionary_get(dict, "i82543-cfg2");
1666 	if (pn != NULL) {
1667 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1668 		cfg2 = (uint16_t) prop_number_integer_value(pn);
1669 	} else {
1670 		if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) {
1671 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
1672 			return;
1673 		}
1674 	}
1675 
1676 	/* check for WM_F_WOL */
1677 	switch (sc->sc_type) {
1678 	case WM_T_82542_2_0:
1679 	case WM_T_82542_2_1:
1680 	case WM_T_82543:
1681 		/* dummy? */
1682 		eeprom_data = 0;
1683 		apme_mask = EEPROM_CFG3_APME;
1684 		break;
1685 	case WM_T_82544:
1686 		apme_mask = EEPROM_CFG2_82544_APM_EN;
1687 		eeprom_data = cfg2;
1688 		break;
1689 	case WM_T_82546:
1690 	case WM_T_82546_3:
1691 	case WM_T_82571:
1692 	case WM_T_82572:
1693 	case WM_T_82573:
1694 	case WM_T_82574:
1695 	case WM_T_82583:
1696 	case WM_T_80003:
1697 	default:
1698 		apme_mask = EEPROM_CFG3_APME;
1699 		wm_read_eeprom(sc, (sc->sc_funcid == 1) ? EEPROM_OFF_CFG3_PORTB
1700 		    : EEPROM_OFF_CFG3_PORTA, 1, &eeprom_data);
1701 		break;
1702 	case WM_T_82575:
1703 	case WM_T_82576:
1704 	case WM_T_82580:
1705 	case WM_T_82580ER:
1706 	case WM_T_ICH8:
1707 	case WM_T_ICH9:
1708 	case WM_T_ICH10:
1709 	case WM_T_PCH:
1710 	case WM_T_PCH2:
1711 		apme_mask = WUC_APME;
1712 		eeprom_data = CSR_READ(sc, WMREG_WUC);
1713 		break;
1714 	}
1715 
1716 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
1717 	if ((eeprom_data & apme_mask) != 0)
1718 		sc->sc_flags |= WM_F_WOL;
1719 #ifdef WM_DEBUG
1720 	if ((sc->sc_flags & WM_F_WOL) != 0)
1721 		printf("WOL\n");
1722 #endif
1723 
1724 	/*
1725 	 * XXX need special handling for some multiple port cards
1726 	 * to disable a paticular port.
1727 	 */
1728 
1729 	if (sc->sc_type >= WM_T_82544) {
1730 		pn = prop_dictionary_get(dict, "i82543-swdpin");
1731 		if (pn != NULL) {
1732 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1733 			swdpin = (uint16_t) prop_number_integer_value(pn);
1734 		} else {
1735 			if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) {
1736 				aprint_error_dev(sc->sc_dev,
1737 				    "unable to read SWDPIN\n");
1738 				return;
1739 			}
1740 		}
1741 	}
1742 
1743 	if (cfg1 & EEPROM_CFG1_ILOS)
1744 		sc->sc_ctrl |= CTRL_ILOS;
1745 	if (sc->sc_type >= WM_T_82544) {
1746 		sc->sc_ctrl |=
1747 		    ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
1748 		    CTRL_SWDPIO_SHIFT;
1749 		sc->sc_ctrl |=
1750 		    ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
1751 		    CTRL_SWDPINS_SHIFT;
1752 	} else {
1753 		sc->sc_ctrl |=
1754 		    ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
1755 		    CTRL_SWDPIO_SHIFT;
1756 	}
1757 
1758 #if 0
1759 	if (sc->sc_type >= WM_T_82544) {
1760 		if (cfg1 & EEPROM_CFG1_IPS0)
1761 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
1762 		if (cfg1 & EEPROM_CFG1_IPS1)
1763 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
1764 		sc->sc_ctrl_ext |=
1765 		    ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
1766 		    CTRL_EXT_SWDPIO_SHIFT;
1767 		sc->sc_ctrl_ext |=
1768 		    ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
1769 		    CTRL_EXT_SWDPINS_SHIFT;
1770 	} else {
1771 		sc->sc_ctrl_ext |=
1772 		    ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
1773 		    CTRL_EXT_SWDPIO_SHIFT;
1774 	}
1775 #endif
1776 
1777 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
1778 #if 0
1779 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
1780 #endif
1781 
1782 	/*
1783 	 * Set up some register offsets that are different between
1784 	 * the i82542 and the i82543 and later chips.
1785 	 */
1786 	if (sc->sc_type < WM_T_82543) {
1787 		sc->sc_rdt_reg = WMREG_OLD_RDT0;
1788 		sc->sc_tdt_reg = WMREG_OLD_TDT;
1789 	} else {
1790 		sc->sc_rdt_reg = WMREG_RDT;
1791 		sc->sc_tdt_reg = WMREG_TDT;
1792 	}
1793 
1794 	if (sc->sc_type == WM_T_PCH) {
1795 		uint16_t val;
1796 
1797 		/* Save the NVM K1 bit setting */
1798 		wm_read_eeprom(sc, EEPROM_OFF_K1_CONFIG, 1, &val);
1799 
1800 		if ((val & EEPROM_K1_CONFIG_ENABLE) != 0)
1801 			sc->sc_nvm_k1_enabled = 1;
1802 		else
1803 			sc->sc_nvm_k1_enabled = 0;
1804 	}
1805 
1806 	/*
1807 	 * Determine if we're TBI,GMII or SGMII mode, and initialize the
1808 	 * media structures accordingly.
1809 	 */
1810 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
1811 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
1812 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_82573
1813 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
1814 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
1815 		wm_gmii_mediainit(sc, wmp->wmp_product);
1816 	} else if (sc->sc_type < WM_T_82543 ||
1817 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
1818 		if (wmp->wmp_flags & WMP_F_1000T)
1819 			aprint_error_dev(sc->sc_dev,
1820 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
1821 		wm_tbi_mediainit(sc);
1822 	} else {
1823 		switch (sc->sc_type) {
1824 		case WM_T_82575:
1825 		case WM_T_82576:
1826 		case WM_T_82580:
1827 		case WM_T_82580ER:
1828 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
1829 			switch (reg & CTRL_EXT_LINK_MODE_MASK) {
1830 			case CTRL_EXT_LINK_MODE_SGMII:
1831 				aprint_verbose_dev(sc->sc_dev, "SGMII\n");
1832 				sc->sc_flags |= WM_F_SGMII;
1833 				CSR_WRITE(sc, WMREG_CTRL_EXT,
1834 				    reg | CTRL_EXT_I2C_ENA);
1835 				wm_gmii_mediainit(sc, wmp->wmp_product);
1836 				break;
1837 			case CTRL_EXT_LINK_MODE_1000KX:
1838 			case CTRL_EXT_LINK_MODE_PCIE_SERDES:
1839 				aprint_verbose_dev(sc->sc_dev, "1000KX or SERDES\n");
1840 				CSR_WRITE(sc, WMREG_CTRL_EXT,
1841 				    reg | CTRL_EXT_I2C_ENA);
1842 				panic("not supported yet\n");
1843 				break;
1844 			case CTRL_EXT_LINK_MODE_GMII:
1845 			default:
1846 				CSR_WRITE(sc, WMREG_CTRL_EXT,
1847 				    reg & ~CTRL_EXT_I2C_ENA);
1848 				wm_gmii_mediainit(sc, wmp->wmp_product);
1849 				break;
1850 			}
1851 			break;
1852 		default:
1853 			if (wmp->wmp_flags & WMP_F_1000X)
1854 				aprint_error_dev(sc->sc_dev,
1855 				    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
1856 			wm_gmii_mediainit(sc, wmp->wmp_product);
1857 		}
1858 	}
1859 
1860 	ifp = &sc->sc_ethercom.ec_if;
1861 	xname = device_xname(sc->sc_dev);
1862 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
1863 	ifp->if_softc = sc;
1864 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1865 	ifp->if_ioctl = wm_ioctl;
1866 	ifp->if_start = wm_start;
1867 	ifp->if_watchdog = wm_watchdog;
1868 	ifp->if_init = wm_init;
1869 	ifp->if_stop = wm_stop;
1870 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
1871 	IFQ_SET_READY(&ifp->if_snd);
1872 
1873 	/* Check for jumbo frame */
1874 	switch (sc->sc_type) {
1875 	case WM_T_82573:
1876 		/* XXX limited to 9234 if ASPM is disabled */
1877 		wm_read_eeprom(sc, EEPROM_INIT_3GIO_3, 1, &io3);
1878 		if ((io3 & EEPROM_3GIO_3_ASPM_MASK) != 0)
1879 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1880 		break;
1881 	case WM_T_82571:
1882 	case WM_T_82572:
1883 	case WM_T_82574:
1884 	case WM_T_82575:
1885 	case WM_T_82576:
1886 	case WM_T_82580:
1887 	case WM_T_82580ER:
1888 	case WM_T_80003:
1889 	case WM_T_ICH9:
1890 	case WM_T_ICH10:
1891 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
1892 		/* XXX limited to 9234 */
1893 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1894 		break;
1895 	case WM_T_PCH:
1896 		/* XXX limited to 4096 */
1897 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1898 		break;
1899 	case WM_T_82542_2_0:
1900 	case WM_T_82542_2_1:
1901 	case WM_T_82583:
1902 	case WM_T_ICH8:
1903 		/* No support for jumbo frame */
1904 		break;
1905 	default:
1906 		/* ETHER_MAX_LEN_JUMBO */
1907 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1908 		break;
1909 	}
1910 
1911 	/*
1912 	 * If we're a i82543 or greater, we can support VLANs.
1913 	 */
1914 	if (sc->sc_type >= WM_T_82543)
1915 		sc->sc_ethercom.ec_capabilities |=
1916 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
1917 
1918 	/*
1919 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
1920 	 * on i82543 and later.
1921 	 */
1922 	if (sc->sc_type >= WM_T_82543) {
1923 		ifp->if_capabilities |=
1924 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
1925 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
1926 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
1927 		    IFCAP_CSUM_TCPv6_Tx |
1928 		    IFCAP_CSUM_UDPv6_Tx;
1929 	}
1930 
1931 	/*
1932 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
1933 	 *
1934 	 *	82541GI (8086:1076) ... no
1935 	 *	82572EI (8086:10b9) ... yes
1936 	 */
1937 	if (sc->sc_type >= WM_T_82571) {
1938 		ifp->if_capabilities |=
1939 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
1940 	}
1941 
1942 	/*
1943 	 * If we're a i82544 or greater (except i82547), we can do
1944 	 * TCP segmentation offload.
1945 	 */
1946 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
1947 		ifp->if_capabilities |= IFCAP_TSOv4;
1948 	}
1949 
1950 	if (sc->sc_type >= WM_T_82571) {
1951 		ifp->if_capabilities |= IFCAP_TSOv6;
1952 	}
1953 
1954 	/*
1955 	 * Attach the interface.
1956 	 */
1957 	if_attach(ifp);
1958 	ether_ifattach(ifp, enaddr);
1959 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
1960 #if NRND > 0
1961 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET, 0);
1962 #endif
1963 
1964 #ifdef WM_EVENT_COUNTERS
1965 	/* Attach event counters. */
1966 	evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
1967 	    NULL, xname, "txsstall");
1968 	evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
1969 	    NULL, xname, "txdstall");
1970 	evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
1971 	    NULL, xname, "txfifo_stall");
1972 	evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
1973 	    NULL, xname, "txdw");
1974 	evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
1975 	    NULL, xname, "txqe");
1976 	evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
1977 	    NULL, xname, "rxintr");
1978 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
1979 	    NULL, xname, "linkintr");
1980 
1981 	evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
1982 	    NULL, xname, "rxipsum");
1983 	evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
1984 	    NULL, xname, "rxtusum");
1985 	evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
1986 	    NULL, xname, "txipsum");
1987 	evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
1988 	    NULL, xname, "txtusum");
1989 	evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
1990 	    NULL, xname, "txtusum6");
1991 
1992 	evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
1993 	    NULL, xname, "txtso");
1994 	evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
1995 	    NULL, xname, "txtso6");
1996 	evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
1997 	    NULL, xname, "txtsopain");
1998 
1999 	for (i = 0; i < WM_NTXSEGS; i++) {
2000 		sprintf(wm_txseg_evcnt_names[i], "txseg%d", i);
2001 		evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
2002 		    NULL, xname, wm_txseg_evcnt_names[i]);
2003 	}
2004 
2005 	evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
2006 	    NULL, xname, "txdrop");
2007 
2008 	evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
2009 	    NULL, xname, "tu");
2010 
2011 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
2012 	    NULL, xname, "tx_xoff");
2013 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
2014 	    NULL, xname, "tx_xon");
2015 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
2016 	    NULL, xname, "rx_xoff");
2017 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
2018 	    NULL, xname, "rx_xon");
2019 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
2020 	    NULL, xname, "rx_macctl");
2021 #endif /* WM_EVENT_COUNTERS */
2022 
2023 	if (pmf_device_register(self, wm_suspend, wm_resume))
2024 		pmf_class_network_register(self, ifp);
2025 	else
2026 		aprint_error_dev(self, "couldn't establish power handler\n");
2027 
2028 	return;
2029 
2030 	/*
2031 	 * Free any resources we've allocated during the failed attach
2032 	 * attempt.  Do this in reverse order and fall through.
2033 	 */
2034  fail_5:
2035 	for (i = 0; i < WM_NRXDESC; i++) {
2036 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2037 			bus_dmamap_destroy(sc->sc_dmat,
2038 			    sc->sc_rxsoft[i].rxs_dmamap);
2039 	}
2040  fail_4:
2041 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2042 		if (sc->sc_txsoft[i].txs_dmamap != NULL)
2043 			bus_dmamap_destroy(sc->sc_dmat,
2044 			    sc->sc_txsoft[i].txs_dmamap);
2045 	}
2046 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2047  fail_3:
2048 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2049  fail_2:
2050 	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2051 	    sc->sc_cd_size);
2052  fail_1:
2053 	bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2054  fail_0:
2055 	return;
2056 }
2057 
2058 static int
2059 wm_detach(device_t self, int flags __unused)
2060 {
2061 	struct wm_softc *sc = device_private(self);
2062 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2063 	int i, s;
2064 
2065 	s = splnet();
2066 	/* Stop the interface. Callouts are stopped in it. */
2067 	wm_stop(ifp, 1);
2068 	splx(s);
2069 
2070 	pmf_device_deregister(self);
2071 
2072 	/* Tell the firmware about the release */
2073 	wm_release_manageability(sc);
2074 	wm_release_hw_control(sc);
2075 
2076 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2077 
2078 	/* Delete all remaining media. */
2079 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2080 
2081 	ether_ifdetach(ifp);
2082 	if_detach(ifp);
2083 
2084 
2085 	/* Unload RX dmamaps and free mbufs */
2086 	wm_rxdrain(sc);
2087 
2088 	/* Free dmamap. It's the same as the end of the wm_attach() function */
2089 	for (i = 0; i < WM_NRXDESC; i++) {
2090 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2091 			bus_dmamap_destroy(sc->sc_dmat,
2092 			    sc->sc_rxsoft[i].rxs_dmamap);
2093 	}
2094 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2095 		if (sc->sc_txsoft[i].txs_dmamap != NULL)
2096 			bus_dmamap_destroy(sc->sc_dmat,
2097 			    sc->sc_txsoft[i].txs_dmamap);
2098 	}
2099 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2100 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2101 	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2102 	    sc->sc_cd_size);
2103 	bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2104 
2105 	/* Disestablish the interrupt handler */
2106 	if (sc->sc_ih != NULL) {
2107 		pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
2108 		sc->sc_ih = NULL;
2109 	}
2110 
2111 	/* Unmap the registers */
2112 	if (sc->sc_ss) {
2113 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
2114 		sc->sc_ss = 0;
2115 	}
2116 
2117 	if (sc->sc_ios) {
2118 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
2119 		sc->sc_ios = 0;
2120 	}
2121 
2122 	return 0;
2123 }
2124 
2125 /*
2126  * wm_tx_offload:
2127  *
2128  *	Set up TCP/IP checksumming parameters for the
2129  *	specified packet.
2130  */
2131 static int
2132 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
2133     uint8_t *fieldsp)
2134 {
2135 	struct mbuf *m0 = txs->txs_mbuf;
2136 	struct livengood_tcpip_ctxdesc *t;
2137 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
2138 	uint32_t ipcse;
2139 	struct ether_header *eh;
2140 	int offset, iphl;
2141 	uint8_t fields;
2142 
2143 	/*
2144 	 * XXX It would be nice if the mbuf pkthdr had offset
2145 	 * fields for the protocol headers.
2146 	 */
2147 
2148 	eh = mtod(m0, struct ether_header *);
2149 	switch (htons(eh->ether_type)) {
2150 	case ETHERTYPE_IP:
2151 	case ETHERTYPE_IPV6:
2152 		offset = ETHER_HDR_LEN;
2153 		break;
2154 
2155 	case ETHERTYPE_VLAN:
2156 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2157 		break;
2158 
2159 	default:
2160 		/*
2161 		 * Don't support this protocol or encapsulation.
2162 		 */
2163 		*fieldsp = 0;
2164 		*cmdp = 0;
2165 		return 0;
2166 	}
2167 
2168 	if ((m0->m_pkthdr.csum_flags &
2169 	    (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
2170 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
2171 	} else {
2172 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
2173 	}
2174 	ipcse = offset + iphl - 1;
2175 
2176 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
2177 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
2178 	seg = 0;
2179 	fields = 0;
2180 
2181 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
2182 		int hlen = offset + iphl;
2183 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
2184 
2185 		if (__predict_false(m0->m_len <
2186 				    (hlen + sizeof(struct tcphdr)))) {
2187 			/*
2188 			 * TCP/IP headers are not in the first mbuf; we need
2189 			 * to do this the slow and painful way.  Let's just
2190 			 * hope this doesn't happen very often.
2191 			 */
2192 			struct tcphdr th;
2193 
2194 			WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
2195 
2196 			m_copydata(m0, hlen, sizeof(th), &th);
2197 			if (v4) {
2198 				struct ip ip;
2199 
2200 				m_copydata(m0, offset, sizeof(ip), &ip);
2201 				ip.ip_len = 0;
2202 				m_copyback(m0,
2203 				    offset + offsetof(struct ip, ip_len),
2204 				    sizeof(ip.ip_len), &ip.ip_len);
2205 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
2206 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
2207 			} else {
2208 				struct ip6_hdr ip6;
2209 
2210 				m_copydata(m0, offset, sizeof(ip6), &ip6);
2211 				ip6.ip6_plen = 0;
2212 				m_copyback(m0,
2213 				    offset + offsetof(struct ip6_hdr, ip6_plen),
2214 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
2215 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
2216 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
2217 			}
2218 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
2219 			    sizeof(th.th_sum), &th.th_sum);
2220 
2221 			hlen += th.th_off << 2;
2222 		} else {
2223 			/*
2224 			 * TCP/IP headers are in the first mbuf; we can do
2225 			 * this the easy way.
2226 			 */
2227 			struct tcphdr *th;
2228 
2229 			if (v4) {
2230 				struct ip *ip =
2231 				    (void *)(mtod(m0, char *) + offset);
2232 				th = (void *)(mtod(m0, char *) + hlen);
2233 
2234 				ip->ip_len = 0;
2235 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
2236 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
2237 			} else {
2238 				struct ip6_hdr *ip6 =
2239 				    (void *)(mtod(m0, char *) + offset);
2240 				th = (void *)(mtod(m0, char *) + hlen);
2241 
2242 				ip6->ip6_plen = 0;
2243 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
2244 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
2245 			}
2246 			hlen += th->th_off << 2;
2247 		}
2248 
2249 		if (v4) {
2250 			WM_EVCNT_INCR(&sc->sc_ev_txtso);
2251 			cmdlen |= WTX_TCPIP_CMD_IP;
2252 		} else {
2253 			WM_EVCNT_INCR(&sc->sc_ev_txtso6);
2254 			ipcse = 0;
2255 		}
2256 		cmd |= WTX_TCPIP_CMD_TSE;
2257 		cmdlen |= WTX_TCPIP_CMD_TSE |
2258 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
2259 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
2260 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
2261 	}
2262 
2263 	/*
2264 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
2265 	 * offload feature, if we load the context descriptor, we
2266 	 * MUST provide valid values for IPCSS and TUCSS fields.
2267 	 */
2268 
2269 	ipcs = WTX_TCPIP_IPCSS(offset) |
2270 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
2271 	    WTX_TCPIP_IPCSE(ipcse);
2272 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
2273 		WM_EVCNT_INCR(&sc->sc_ev_txipsum);
2274 		fields |= WTX_IXSM;
2275 	}
2276 
2277 	offset += iphl;
2278 
2279 	if (m0->m_pkthdr.csum_flags &
2280 	    (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
2281 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
2282 		fields |= WTX_TXSM;
2283 		tucs = WTX_TCPIP_TUCSS(offset) |
2284 		    WTX_TCPIP_TUCSO(offset +
2285 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
2286 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
2287 	} else if ((m0->m_pkthdr.csum_flags &
2288 	    (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
2289 		WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
2290 		fields |= WTX_TXSM;
2291 		tucs = WTX_TCPIP_TUCSS(offset) |
2292 		    WTX_TCPIP_TUCSO(offset +
2293 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
2294 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
2295 	} else {
2296 		/* Just initialize it to a valid TCP context. */
2297 		tucs = WTX_TCPIP_TUCSS(offset) |
2298 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
2299 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
2300 	}
2301 
2302 	/* Fill in the context descriptor. */
2303 	t = (struct livengood_tcpip_ctxdesc *)
2304 	    &sc->sc_txdescs[sc->sc_txnext];
2305 	t->tcpip_ipcs = htole32(ipcs);
2306 	t->tcpip_tucs = htole32(tucs);
2307 	t->tcpip_cmdlen = htole32(cmdlen);
2308 	t->tcpip_seg = htole32(seg);
2309 	WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
2310 
2311 	sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
2312 	txs->txs_ndesc++;
2313 
2314 	*cmdp = cmd;
2315 	*fieldsp = fields;
2316 
2317 	return 0;
2318 }
2319 
2320 static void
2321 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
2322 {
2323 	struct mbuf *m;
2324 	int i;
2325 
2326 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
2327 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
2328 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
2329 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
2330 		    m->m_data, m->m_len, m->m_flags);
2331 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
2332 	    i, i == 1 ? "" : "s");
2333 }
2334 
2335 /*
2336  * wm_82547_txfifo_stall:
2337  *
2338  *	Callout used to wait for the 82547 Tx FIFO to drain,
2339  *	reset the FIFO pointers, and restart packet transmission.
2340  */
2341 static void
2342 wm_82547_txfifo_stall(void *arg)
2343 {
2344 	struct wm_softc *sc = arg;
2345 	int s;
2346 
2347 	s = splnet();
2348 
2349 	if (sc->sc_txfifo_stall) {
2350 		if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
2351 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
2352 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
2353 			/*
2354 			 * Packets have drained.  Stop transmitter, reset
2355 			 * FIFO pointers, restart transmitter, and kick
2356 			 * the packet queue.
2357 			 */
2358 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
2359 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
2360 			CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
2361 			CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
2362 			CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
2363 			CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
2364 			CSR_WRITE(sc, WMREG_TCTL, tctl);
2365 			CSR_WRITE_FLUSH(sc);
2366 
2367 			sc->sc_txfifo_head = 0;
2368 			sc->sc_txfifo_stall = 0;
2369 			wm_start(&sc->sc_ethercom.ec_if);
2370 		} else {
2371 			/*
2372 			 * Still waiting for packets to drain; try again in
2373 			 * another tick.
2374 			 */
2375 			callout_schedule(&sc->sc_txfifo_ch, 1);
2376 		}
2377 	}
2378 
2379 	splx(s);
2380 }
2381 
2382 static void
2383 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, int on)
2384 {
2385 	uint32_t reg;
2386 
2387 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
2388 
2389 	if (on != 0)
2390 		reg |= EXTCNFCTR_GATE_PHY_CFG;
2391 	else
2392 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
2393 
2394 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
2395 }
2396 
2397 /*
2398  * wm_82547_txfifo_bugchk:
2399  *
2400  *	Check for bug condition in the 82547 Tx FIFO.  We need to
2401  *	prevent enqueueing a packet that would wrap around the end
2402  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
2403  *
2404  *	We do this by checking the amount of space before the end
2405  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
2406  *	the Tx FIFO, wait for all remaining packets to drain, reset
2407  *	the internal FIFO pointers to the beginning, and restart
2408  *	transmission on the interface.
2409  */
2410 #define	WM_FIFO_HDR		0x10
2411 #define	WM_82547_PAD_LEN	0x3e0
2412 static int
2413 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
2414 {
2415 	int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
2416 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
2417 
2418 	/* Just return if already stalled. */
2419 	if (sc->sc_txfifo_stall)
2420 		return 1;
2421 
2422 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
2423 		/* Stall only occurs in half-duplex mode. */
2424 		goto send_packet;
2425 	}
2426 
2427 	if (len >= WM_82547_PAD_LEN + space) {
2428 		sc->sc_txfifo_stall = 1;
2429 		callout_schedule(&sc->sc_txfifo_ch, 1);
2430 		return 1;
2431 	}
2432 
2433  send_packet:
2434 	sc->sc_txfifo_head += len;
2435 	if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
2436 		sc->sc_txfifo_head -= sc->sc_txfifo_size;
2437 
2438 	return 0;
2439 }
2440 
2441 /*
2442  * wm_start:		[ifnet interface function]
2443  *
2444  *	Start packet transmission on the interface.
2445  */
2446 static void
2447 wm_start(struct ifnet *ifp)
2448 {
2449 	struct wm_softc *sc = ifp->if_softc;
2450 	struct mbuf *m0;
2451 	struct m_tag *mtag;
2452 	struct wm_txsoft *txs;
2453 	bus_dmamap_t dmamap;
2454 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
2455 	bus_addr_t curaddr;
2456 	bus_size_t seglen, curlen;
2457 	uint32_t cksumcmd;
2458 	uint8_t cksumfields;
2459 
2460 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
2461 		return;
2462 
2463 	/*
2464 	 * Remember the previous number of free descriptors.
2465 	 */
2466 	ofree = sc->sc_txfree;
2467 
2468 	/*
2469 	 * Loop through the send queue, setting up transmit descriptors
2470 	 * until we drain the queue, or use up all available transmit
2471 	 * descriptors.
2472 	 */
2473 	for (;;) {
2474 		/* Grab a packet off the queue. */
2475 		IFQ_POLL(&ifp->if_snd, m0);
2476 		if (m0 == NULL)
2477 			break;
2478 
2479 		DPRINTF(WM_DEBUG_TX,
2480 		    ("%s: TX: have packet to transmit: %p\n",
2481 		    device_xname(sc->sc_dev), m0));
2482 
2483 		/* Get a work queue entry. */
2484 		if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
2485 			wm_txintr(sc);
2486 			if (sc->sc_txsfree == 0) {
2487 				DPRINTF(WM_DEBUG_TX,
2488 				    ("%s: TX: no free job descriptors\n",
2489 					device_xname(sc->sc_dev)));
2490 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
2491 				break;
2492 			}
2493 		}
2494 
2495 		txs = &sc->sc_txsoft[sc->sc_txsnext];
2496 		dmamap = txs->txs_dmamap;
2497 
2498 		use_tso = (m0->m_pkthdr.csum_flags &
2499 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
2500 
2501 		/*
2502 		 * So says the Linux driver:
2503 		 * The controller does a simple calculation to make sure
2504 		 * there is enough room in the FIFO before initiating the
2505 		 * DMA for each buffer.  The calc is:
2506 		 *	4 = ceil(buffer len / MSS)
2507 		 * To make sure we don't overrun the FIFO, adjust the max
2508 		 * buffer len if the MSS drops.
2509 		 */
2510 		dmamap->dm_maxsegsz =
2511 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
2512 		    ? m0->m_pkthdr.segsz << 2
2513 		    : WTX_MAX_LEN;
2514 
2515 		/*
2516 		 * Load the DMA map.  If this fails, the packet either
2517 		 * didn't fit in the allotted number of segments, or we
2518 		 * were short on resources.  For the too-many-segments
2519 		 * case, we simply report an error and drop the packet,
2520 		 * since we can't sanely copy a jumbo packet to a single
2521 		 * buffer.
2522 		 */
2523 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
2524 		    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
2525 		if (error) {
2526 			if (error == EFBIG) {
2527 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
2528 				log(LOG_ERR, "%s: Tx packet consumes too many "
2529 				    "DMA segments, dropping...\n",
2530 				    device_xname(sc->sc_dev));
2531 				IFQ_DEQUEUE(&ifp->if_snd, m0);
2532 				wm_dump_mbuf_chain(sc, m0);
2533 				m_freem(m0);
2534 				continue;
2535 			}
2536 			/*
2537 			 * Short on resources, just stop for now.
2538 			 */
2539 			DPRINTF(WM_DEBUG_TX,
2540 			    ("%s: TX: dmamap load failed: %d\n",
2541 			    device_xname(sc->sc_dev), error));
2542 			break;
2543 		}
2544 
2545 		segs_needed = dmamap->dm_nsegs;
2546 		if (use_tso) {
2547 			/* For sentinel descriptor; see below. */
2548 			segs_needed++;
2549 		}
2550 
2551 		/*
2552 		 * Ensure we have enough descriptors free to describe
2553 		 * the packet.  Note, we always reserve one descriptor
2554 		 * at the end of the ring due to the semantics of the
2555 		 * TDT register, plus one more in the event we need
2556 		 * to load offload context.
2557 		 */
2558 		if (segs_needed > sc->sc_txfree - 2) {
2559 			/*
2560 			 * Not enough free descriptors to transmit this
2561 			 * packet.  We haven't committed anything yet,
2562 			 * so just unload the DMA map, put the packet
2563 			 * pack on the queue, and punt.  Notify the upper
2564 			 * layer that there are no more slots left.
2565 			 */
2566 			DPRINTF(WM_DEBUG_TX,
2567 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
2568 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
2569 			    segs_needed, sc->sc_txfree - 1));
2570 			ifp->if_flags |= IFF_OACTIVE;
2571 			bus_dmamap_unload(sc->sc_dmat, dmamap);
2572 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
2573 			break;
2574 		}
2575 
2576 		/*
2577 		 * Check for 82547 Tx FIFO bug.  We need to do this
2578 		 * once we know we can transmit the packet, since we
2579 		 * do some internal FIFO space accounting here.
2580 		 */
2581 		if (sc->sc_type == WM_T_82547 &&
2582 		    wm_82547_txfifo_bugchk(sc, m0)) {
2583 			DPRINTF(WM_DEBUG_TX,
2584 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
2585 			    device_xname(sc->sc_dev)));
2586 			ifp->if_flags |= IFF_OACTIVE;
2587 			bus_dmamap_unload(sc->sc_dmat, dmamap);
2588 			WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
2589 			break;
2590 		}
2591 
2592 		IFQ_DEQUEUE(&ifp->if_snd, m0);
2593 
2594 		/*
2595 		 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
2596 		 */
2597 
2598 		DPRINTF(WM_DEBUG_TX,
2599 		    ("%s: TX: packet has %d (%d) DMA segments\n",
2600 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
2601 
2602 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
2603 
2604 		/*
2605 		 * Store a pointer to the packet so that we can free it
2606 		 * later.
2607 		 *
2608 		 * Initially, we consider the number of descriptors the
2609 		 * packet uses the number of DMA segments.  This may be
2610 		 * incremented by 1 if we do checksum offload (a descriptor
2611 		 * is used to set the checksum context).
2612 		 */
2613 		txs->txs_mbuf = m0;
2614 		txs->txs_firstdesc = sc->sc_txnext;
2615 		txs->txs_ndesc = segs_needed;
2616 
2617 		/* Set up offload parameters for this packet. */
2618 		if (m0->m_pkthdr.csum_flags &
2619 		    (M_CSUM_TSOv4|M_CSUM_TSOv6|
2620 		    M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
2621 		    M_CSUM_TCPv6|M_CSUM_UDPv6)) {
2622 			if (wm_tx_offload(sc, txs, &cksumcmd,
2623 					  &cksumfields) != 0) {
2624 				/* Error message already displayed. */
2625 				bus_dmamap_unload(sc->sc_dmat, dmamap);
2626 				continue;
2627 			}
2628 		} else {
2629 			cksumcmd = 0;
2630 			cksumfields = 0;
2631 		}
2632 
2633 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
2634 
2635 		/* Sync the DMA map. */
2636 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
2637 		    BUS_DMASYNC_PREWRITE);
2638 
2639 		/*
2640 		 * Initialize the transmit descriptor.
2641 		 */
2642 		for (nexttx = sc->sc_txnext, seg = 0;
2643 		     seg < dmamap->dm_nsegs; seg++) {
2644 			for (seglen = dmamap->dm_segs[seg].ds_len,
2645 			     curaddr = dmamap->dm_segs[seg].ds_addr;
2646 			     seglen != 0;
2647 			     curaddr += curlen, seglen -= curlen,
2648 			     nexttx = WM_NEXTTX(sc, nexttx)) {
2649 				curlen = seglen;
2650 
2651 				/*
2652 				 * So says the Linux driver:
2653 				 * Work around for premature descriptor
2654 				 * write-backs in TSO mode.  Append a
2655 				 * 4-byte sentinel descriptor.
2656 				 */
2657 				if (use_tso &&
2658 				    seg == dmamap->dm_nsegs - 1 &&
2659 				    curlen > 8)
2660 					curlen -= 4;
2661 
2662 				wm_set_dma_addr(
2663 				    &sc->sc_txdescs[nexttx].wtx_addr,
2664 				    curaddr);
2665 				sc->sc_txdescs[nexttx].wtx_cmdlen =
2666 				    htole32(cksumcmd | curlen);
2667 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
2668 				    0;
2669 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
2670 				    cksumfields;
2671 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
2672 				lasttx = nexttx;
2673 
2674 				DPRINTF(WM_DEBUG_TX,
2675 				    ("%s: TX: desc %d: low %#" PRIxPADDR ", "
2676 				     "len %#04zx\n",
2677 				    device_xname(sc->sc_dev), nexttx,
2678 				    curaddr & 0xffffffffUL, curlen));
2679 			}
2680 		}
2681 
2682 		KASSERT(lasttx != -1);
2683 
2684 		/*
2685 		 * Set up the command byte on the last descriptor of
2686 		 * the packet.  If we're in the interrupt delay window,
2687 		 * delay the interrupt.
2688 		 */
2689 		sc->sc_txdescs[lasttx].wtx_cmdlen |=
2690 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
2691 
2692 		/*
2693 		 * If VLANs are enabled and the packet has a VLAN tag, set
2694 		 * up the descriptor to encapsulate the packet for us.
2695 		 *
2696 		 * This is only valid on the last descriptor of the packet.
2697 		 */
2698 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
2699 			sc->sc_txdescs[lasttx].wtx_cmdlen |=
2700 			    htole32(WTX_CMD_VLE);
2701 			sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
2702 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
2703 		}
2704 
2705 		txs->txs_lastdesc = lasttx;
2706 
2707 		DPRINTF(WM_DEBUG_TX,
2708 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
2709 		    device_xname(sc->sc_dev),
2710 		    lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
2711 
2712 		/* Sync the descriptors we're using. */
2713 		WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
2714 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2715 
2716 		/* Give the packet to the chip. */
2717 		CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
2718 
2719 		DPRINTF(WM_DEBUG_TX,
2720 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
2721 
2722 		DPRINTF(WM_DEBUG_TX,
2723 		    ("%s: TX: finished transmitting packet, job %d\n",
2724 		    device_xname(sc->sc_dev), sc->sc_txsnext));
2725 
2726 		/* Advance the tx pointer. */
2727 		sc->sc_txfree -= txs->txs_ndesc;
2728 		sc->sc_txnext = nexttx;
2729 
2730 		sc->sc_txsfree--;
2731 		sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
2732 
2733 		/* Pass the packet to any BPF listeners. */
2734 		bpf_mtap(ifp, m0);
2735 	}
2736 
2737 	if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
2738 		/* No more slots; notify upper layer. */
2739 		ifp->if_flags |= IFF_OACTIVE;
2740 	}
2741 
2742 	if (sc->sc_txfree != ofree) {
2743 		/* Set a watchdog timer in case the chip flakes out. */
2744 		ifp->if_timer = 5;
2745 	}
2746 }
2747 
2748 /*
2749  * wm_watchdog:		[ifnet interface function]
2750  *
2751  *	Watchdog timer handler.
2752  */
2753 static void
2754 wm_watchdog(struct ifnet *ifp)
2755 {
2756 	struct wm_softc *sc = ifp->if_softc;
2757 
2758 	/*
2759 	 * Since we're using delayed interrupts, sweep up
2760 	 * before we report an error.
2761 	 */
2762 	wm_txintr(sc);
2763 
2764 	if (sc->sc_txfree != WM_NTXDESC(sc)) {
2765 		log(LOG_ERR,
2766 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2767 		    device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
2768 		    sc->sc_txnext);
2769 		ifp->if_oerrors++;
2770 
2771 		/* Reset the interface. */
2772 		(void) wm_init(ifp);
2773 	}
2774 
2775 	/* Try to get more packets going. */
2776 	wm_start(ifp);
2777 }
2778 
2779 static int
2780 wm_ifflags_cb(struct ethercom *ec)
2781 {
2782 	struct ifnet *ifp = &ec->ec_if;
2783 	struct wm_softc *sc = ifp->if_softc;
2784 	int change = ifp->if_flags ^ sc->sc_if_flags;
2785 
2786 	if (change != 0)
2787 		sc->sc_if_flags = ifp->if_flags;
2788 
2789 	if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
2790 		return ENETRESET;
2791 
2792 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
2793 		wm_set_filter(sc);
2794 
2795 	wm_set_vlan(sc);
2796 
2797 	return 0;
2798 }
2799 
2800 /*
2801  * wm_ioctl:		[ifnet interface function]
2802  *
2803  *	Handle control requests from the operator.
2804  */
2805 static int
2806 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2807 {
2808 	struct wm_softc *sc = ifp->if_softc;
2809 	struct ifreq *ifr = (struct ifreq *) data;
2810 	struct ifaddr *ifa = (struct ifaddr *)data;
2811 	struct sockaddr_dl *sdl;
2812 	int s, error;
2813 
2814 	s = splnet();
2815 
2816 	switch (cmd) {
2817 	case SIOCSIFMEDIA:
2818 	case SIOCGIFMEDIA:
2819 		/* Flow control requires full-duplex mode. */
2820 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
2821 		    (ifr->ifr_media & IFM_FDX) == 0)
2822 			ifr->ifr_media &= ~IFM_ETH_FMASK;
2823 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
2824 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
2825 				/* We can do both TXPAUSE and RXPAUSE. */
2826 				ifr->ifr_media |=
2827 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
2828 			}
2829 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
2830 		}
2831 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
2832 		break;
2833 	case SIOCINITIFADDR:
2834 		if (ifa->ifa_addr->sa_family == AF_LINK) {
2835 			sdl = satosdl(ifp->if_dl->ifa_addr);
2836 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
2837 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
2838 			/* unicast address is first multicast entry */
2839 			wm_set_filter(sc);
2840 			error = 0;
2841 			break;
2842 		}
2843 		/*FALLTHROUGH*/
2844 	default:
2845 		if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
2846 			break;
2847 
2848 		error = 0;
2849 
2850 		if (cmd == SIOCSIFCAP)
2851 			error = (*ifp->if_init)(ifp);
2852 		else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
2853 			;
2854 		else if (ifp->if_flags & IFF_RUNNING) {
2855 			/*
2856 			 * Multicast list has changed; set the hardware filter
2857 			 * accordingly.
2858 			 */
2859 			wm_set_filter(sc);
2860 		}
2861 		break;
2862 	}
2863 
2864 	/* Try to get more packets going. */
2865 	wm_start(ifp);
2866 
2867 	splx(s);
2868 	return error;
2869 }
2870 
2871 /*
2872  * wm_intr:
2873  *
2874  *	Interrupt service routine.
2875  */
2876 static int
2877 wm_intr(void *arg)
2878 {
2879 	struct wm_softc *sc = arg;
2880 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2881 	uint32_t icr;
2882 	int handled = 0;
2883 
2884 	while (1 /* CONSTCOND */) {
2885 		icr = CSR_READ(sc, WMREG_ICR);
2886 		if ((icr & sc->sc_icr) == 0)
2887 			break;
2888 #if 0 /*NRND > 0*/
2889 		if (RND_ENABLED(&sc->rnd_source))
2890 			rnd_add_uint32(&sc->rnd_source, icr);
2891 #endif
2892 
2893 		handled = 1;
2894 
2895 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
2896 		if (icr & (ICR_RXDMT0|ICR_RXT0)) {
2897 			DPRINTF(WM_DEBUG_RX,
2898 			    ("%s: RX: got Rx intr 0x%08x\n",
2899 			    device_xname(sc->sc_dev),
2900 			    icr & (ICR_RXDMT0|ICR_RXT0)));
2901 			WM_EVCNT_INCR(&sc->sc_ev_rxintr);
2902 		}
2903 #endif
2904 		wm_rxintr(sc);
2905 
2906 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
2907 		if (icr & ICR_TXDW) {
2908 			DPRINTF(WM_DEBUG_TX,
2909 			    ("%s: TX: got TXDW interrupt\n",
2910 			    device_xname(sc->sc_dev)));
2911 			WM_EVCNT_INCR(&sc->sc_ev_txdw);
2912 		}
2913 #endif
2914 		wm_txintr(sc);
2915 
2916 		if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
2917 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
2918 			wm_linkintr(sc, icr);
2919 		}
2920 
2921 		if (icr & ICR_RXO) {
2922 #if defined(WM_DEBUG)
2923 			log(LOG_WARNING, "%s: Receive overrun\n",
2924 			    device_xname(sc->sc_dev));
2925 #endif /* defined(WM_DEBUG) */
2926 		}
2927 	}
2928 
2929 	if (handled) {
2930 		/* Try to get more packets going. */
2931 		wm_start(ifp);
2932 	}
2933 
2934 	return handled;
2935 }
2936 
2937 /*
2938  * wm_txintr:
2939  *
2940  *	Helper; handle transmit interrupts.
2941  */
2942 static void
2943 wm_txintr(struct wm_softc *sc)
2944 {
2945 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2946 	struct wm_txsoft *txs;
2947 	uint8_t status;
2948 	int i;
2949 
2950 	ifp->if_flags &= ~IFF_OACTIVE;
2951 
2952 	/*
2953 	 * Go through the Tx list and free mbufs for those
2954 	 * frames which have been transmitted.
2955 	 */
2956 	for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
2957 	     i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
2958 		txs = &sc->sc_txsoft[i];
2959 
2960 		DPRINTF(WM_DEBUG_TX,
2961 		    ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
2962 
2963 		WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
2964 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2965 
2966 		status =
2967 		    sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
2968 		if ((status & WTX_ST_DD) == 0) {
2969 			WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
2970 			    BUS_DMASYNC_PREREAD);
2971 			break;
2972 		}
2973 
2974 		DPRINTF(WM_DEBUG_TX,
2975 		    ("%s: TX: job %d done: descs %d..%d\n",
2976 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
2977 		    txs->txs_lastdesc));
2978 
2979 		/*
2980 		 * XXX We should probably be using the statistics
2981 		 * XXX registers, but I don't know if they exist
2982 		 * XXX on chips before the i82544.
2983 		 */
2984 
2985 #ifdef WM_EVENT_COUNTERS
2986 		if (status & WTX_ST_TU)
2987 			WM_EVCNT_INCR(&sc->sc_ev_tu);
2988 #endif /* WM_EVENT_COUNTERS */
2989 
2990 		if (status & (WTX_ST_EC|WTX_ST_LC)) {
2991 			ifp->if_oerrors++;
2992 			if (status & WTX_ST_LC)
2993 				log(LOG_WARNING, "%s: late collision\n",
2994 				    device_xname(sc->sc_dev));
2995 			else if (status & WTX_ST_EC) {
2996 				ifp->if_collisions += 16;
2997 				log(LOG_WARNING, "%s: excessive collisions\n",
2998 				    device_xname(sc->sc_dev));
2999 			}
3000 		} else
3001 			ifp->if_opackets++;
3002 
3003 		sc->sc_txfree += txs->txs_ndesc;
3004 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
3005 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
3006 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
3007 		m_freem(txs->txs_mbuf);
3008 		txs->txs_mbuf = NULL;
3009 	}
3010 
3011 	/* Update the dirty transmit buffer pointer. */
3012 	sc->sc_txsdirty = i;
3013 	DPRINTF(WM_DEBUG_TX,
3014 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
3015 
3016 	/*
3017 	 * If there are no more pending transmissions, cancel the watchdog
3018 	 * timer.
3019 	 */
3020 	if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
3021 		ifp->if_timer = 0;
3022 }
3023 
3024 /*
3025  * wm_rxintr:
3026  *
3027  *	Helper; handle receive interrupts.
3028  */
3029 static void
3030 wm_rxintr(struct wm_softc *sc)
3031 {
3032 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3033 	struct wm_rxsoft *rxs;
3034 	struct mbuf *m;
3035 	int i, len;
3036 	uint8_t status, errors;
3037 	uint16_t vlantag;
3038 
3039 	for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
3040 		rxs = &sc->sc_rxsoft[i];
3041 
3042 		DPRINTF(WM_DEBUG_RX,
3043 		    ("%s: RX: checking descriptor %d\n",
3044 		    device_xname(sc->sc_dev), i));
3045 
3046 		WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3047 
3048 		status = sc->sc_rxdescs[i].wrx_status;
3049 		errors = sc->sc_rxdescs[i].wrx_errors;
3050 		len = le16toh(sc->sc_rxdescs[i].wrx_len);
3051 		vlantag = sc->sc_rxdescs[i].wrx_special;
3052 
3053 		if ((status & WRX_ST_DD) == 0) {
3054 			/*
3055 			 * We have processed all of the receive descriptors.
3056 			 */
3057 			WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
3058 			break;
3059 		}
3060 
3061 		if (__predict_false(sc->sc_rxdiscard)) {
3062 			DPRINTF(WM_DEBUG_RX,
3063 			    ("%s: RX: discarding contents of descriptor %d\n",
3064 			    device_xname(sc->sc_dev), i));
3065 			WM_INIT_RXDESC(sc, i);
3066 			if (status & WRX_ST_EOP) {
3067 				/* Reset our state. */
3068 				DPRINTF(WM_DEBUG_RX,
3069 				    ("%s: RX: resetting rxdiscard -> 0\n",
3070 				    device_xname(sc->sc_dev)));
3071 				sc->sc_rxdiscard = 0;
3072 			}
3073 			continue;
3074 		}
3075 
3076 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3077 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
3078 
3079 		m = rxs->rxs_mbuf;
3080 
3081 		/*
3082 		 * Add a new receive buffer to the ring, unless of
3083 		 * course the length is zero. Treat the latter as a
3084 		 * failed mapping.
3085 		 */
3086 		if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
3087 			/*
3088 			 * Failed, throw away what we've done so
3089 			 * far, and discard the rest of the packet.
3090 			 */
3091 			ifp->if_ierrors++;
3092 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3093 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
3094 			WM_INIT_RXDESC(sc, i);
3095 			if ((status & WRX_ST_EOP) == 0)
3096 				sc->sc_rxdiscard = 1;
3097 			if (sc->sc_rxhead != NULL)
3098 				m_freem(sc->sc_rxhead);
3099 			WM_RXCHAIN_RESET(sc);
3100 			DPRINTF(WM_DEBUG_RX,
3101 			    ("%s: RX: Rx buffer allocation failed, "
3102 			    "dropping packet%s\n", device_xname(sc->sc_dev),
3103 			    sc->sc_rxdiscard ? " (discard)" : ""));
3104 			continue;
3105 		}
3106 
3107 		m->m_len = len;
3108 		sc->sc_rxlen += len;
3109 		DPRINTF(WM_DEBUG_RX,
3110 		    ("%s: RX: buffer at %p len %d\n",
3111 		    device_xname(sc->sc_dev), m->m_data, len));
3112 
3113 		/*
3114 		 * If this is not the end of the packet, keep
3115 		 * looking.
3116 		 */
3117 		if ((status & WRX_ST_EOP) == 0) {
3118 			WM_RXCHAIN_LINK(sc, m);
3119 			DPRINTF(WM_DEBUG_RX,
3120 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
3121 			    device_xname(sc->sc_dev), sc->sc_rxlen));
3122 			continue;
3123 		}
3124 
3125 		/*
3126 		 * Okay, we have the entire packet now.  The chip is
3127 		 * configured to include the FCS (not all chips can
3128 		 * be configured to strip it), so we need to trim it.
3129 		 * May need to adjust length of previous mbuf in the
3130 		 * chain if the current mbuf is too short.
3131 		 */
3132 		if (m->m_len < ETHER_CRC_LEN) {
3133 			sc->sc_rxtail->m_len -= (ETHER_CRC_LEN - m->m_len);
3134 			m->m_len = 0;
3135 		} else {
3136 			m->m_len -= ETHER_CRC_LEN;
3137 		}
3138 		len = sc->sc_rxlen - ETHER_CRC_LEN;
3139 
3140 		WM_RXCHAIN_LINK(sc, m);
3141 
3142 		*sc->sc_rxtailp = NULL;
3143 		m = sc->sc_rxhead;
3144 
3145 		WM_RXCHAIN_RESET(sc);
3146 
3147 		DPRINTF(WM_DEBUG_RX,
3148 		    ("%s: RX: have entire packet, len -> %d\n",
3149 		    device_xname(sc->sc_dev), len));
3150 
3151 		/*
3152 		 * If an error occurred, update stats and drop the packet.
3153 		 */
3154 		if (errors &
3155 		     (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
3156 			if (errors & WRX_ER_SE)
3157 				log(LOG_WARNING, "%s: symbol error\n",
3158 				    device_xname(sc->sc_dev));
3159 			else if (errors & WRX_ER_SEQ)
3160 				log(LOG_WARNING, "%s: receive sequence error\n",
3161 				    device_xname(sc->sc_dev));
3162 			else if (errors & WRX_ER_CE)
3163 				log(LOG_WARNING, "%s: CRC error\n",
3164 				    device_xname(sc->sc_dev));
3165 			m_freem(m);
3166 			continue;
3167 		}
3168 
3169 		/*
3170 		 * No errors.  Receive the packet.
3171 		 */
3172 		m->m_pkthdr.rcvif = ifp;
3173 		m->m_pkthdr.len = len;
3174 
3175 		/*
3176 		 * If VLANs are enabled, VLAN packets have been unwrapped
3177 		 * for us.  Associate the tag with the packet.
3178 		 */
3179 		if ((status & WRX_ST_VP) != 0) {
3180 			VLAN_INPUT_TAG(ifp, m,
3181 			    le16toh(vlantag),
3182 			    continue);
3183 		}
3184 
3185 		/*
3186 		 * Set up checksum info for this packet.
3187 		 */
3188 		if ((status & WRX_ST_IXSM) == 0) {
3189 			if (status & WRX_ST_IPCS) {
3190 				WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
3191 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
3192 				if (errors & WRX_ER_IPE)
3193 					m->m_pkthdr.csum_flags |=
3194 					    M_CSUM_IPv4_BAD;
3195 			}
3196 			if (status & WRX_ST_TCPCS) {
3197 				/*
3198 				 * Note: we don't know if this was TCP or UDP,
3199 				 * so we just set both bits, and expect the
3200 				 * upper layers to deal.
3201 				 */
3202 				WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
3203 				m->m_pkthdr.csum_flags |=
3204 				    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
3205 				    M_CSUM_TCPv6 | M_CSUM_UDPv6;
3206 				if (errors & WRX_ER_TCPE)
3207 					m->m_pkthdr.csum_flags |=
3208 					    M_CSUM_TCP_UDP_BAD;
3209 			}
3210 		}
3211 
3212 		ifp->if_ipackets++;
3213 
3214 		/* Pass this up to any BPF listeners. */
3215 		bpf_mtap(ifp, m);
3216 
3217 		/* Pass it on. */
3218 		(*ifp->if_input)(ifp, m);
3219 	}
3220 
3221 	/* Update the receive pointer. */
3222 	sc->sc_rxptr = i;
3223 
3224 	DPRINTF(WM_DEBUG_RX,
3225 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
3226 }
3227 
3228 /*
3229  * wm_linkintr_gmii:
3230  *
3231  *	Helper; handle link interrupts for GMII.
3232  */
3233 static void
3234 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
3235 {
3236 
3237 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
3238 		__func__));
3239 
3240 	if (icr & ICR_LSC) {
3241 		DPRINTF(WM_DEBUG_LINK,
3242 		    ("%s: LINK: LSC -> mii_tick\n",
3243 			device_xname(sc->sc_dev)));
3244 		mii_tick(&sc->sc_mii);
3245 		if (sc->sc_type == WM_T_82543) {
3246 			int miistatus, active;
3247 
3248 			/*
3249 			 * With 82543, we need to force speed and
3250 			 * duplex on the MAC equal to what the PHY
3251 			 * speed and duplex configuration is.
3252 			 */
3253 			miistatus = sc->sc_mii.mii_media_status;
3254 
3255 			if (miistatus & IFM_ACTIVE) {
3256 				active = sc->sc_mii.mii_media_active;
3257 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
3258 				switch (IFM_SUBTYPE(active)) {
3259 				case IFM_10_T:
3260 					sc->sc_ctrl |= CTRL_SPEED_10;
3261 					break;
3262 				case IFM_100_TX:
3263 					sc->sc_ctrl |= CTRL_SPEED_100;
3264 					break;
3265 				case IFM_1000_T:
3266 					sc->sc_ctrl |= CTRL_SPEED_1000;
3267 					break;
3268 				default:
3269 					/*
3270 					 * fiber?
3271 					 * Shoud not enter here.
3272 					 */
3273 					printf("unknown media (%x)\n",
3274 					    active);
3275 					break;
3276 				}
3277 				if (active & IFM_FDX)
3278 					sc->sc_ctrl |= CTRL_FD;
3279 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3280 			}
3281 		} else if ((sc->sc_type == WM_T_ICH8)
3282 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
3283 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
3284 		} else if (sc->sc_type == WM_T_PCH) {
3285 			wm_k1_gig_workaround_hv(sc,
3286 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
3287 		}
3288 
3289 		if ((sc->sc_phytype == WMPHY_82578)
3290 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
3291 			== IFM_1000_T)) {
3292 
3293 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
3294 				delay(200*1000); /* XXX too big */
3295 
3296 				/* Link stall fix for link up */
3297 				wm_gmii_hv_writereg(sc->sc_dev, 1,
3298 				    HV_MUX_DATA_CTRL,
3299 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
3300 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
3301 				wm_gmii_hv_writereg(sc->sc_dev, 1,
3302 				    HV_MUX_DATA_CTRL,
3303 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
3304 			}
3305 		}
3306 	} else if (icr & ICR_RXSEQ) {
3307 		DPRINTF(WM_DEBUG_LINK,
3308 		    ("%s: LINK Receive sequence error\n",
3309 			device_xname(sc->sc_dev)));
3310 	}
3311 }
3312 
3313 /*
3314  * wm_linkintr_tbi:
3315  *
3316  *	Helper; handle link interrupts for TBI mode.
3317  */
3318 static void
3319 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
3320 {
3321 	uint32_t status;
3322 
3323 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
3324 		__func__));
3325 
3326 	status = CSR_READ(sc, WMREG_STATUS);
3327 	if (icr & ICR_LSC) {
3328 		if (status & STATUS_LU) {
3329 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
3330 			    device_xname(sc->sc_dev),
3331 			    (status & STATUS_FD) ? "FDX" : "HDX"));
3332 			/*
3333 			 * NOTE: CTRL will update TFCE and RFCE automatically,
3334 			 * so we should update sc->sc_ctrl
3335 			 */
3336 
3337 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3338 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
3339 			sc->sc_fcrtl &= ~FCRTL_XONE;
3340 			if (status & STATUS_FD)
3341 				sc->sc_tctl |=
3342 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3343 			else
3344 				sc->sc_tctl |=
3345 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
3346 			if (sc->sc_ctrl & CTRL_TFCE)
3347 				sc->sc_fcrtl |= FCRTL_XONE;
3348 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3349 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
3350 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
3351 				      sc->sc_fcrtl);
3352 			sc->sc_tbi_linkup = 1;
3353 		} else {
3354 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
3355 			    device_xname(sc->sc_dev)));
3356 			sc->sc_tbi_linkup = 0;
3357 		}
3358 		wm_tbi_set_linkled(sc);
3359 	} else if (icr & ICR_RXCFG) {
3360 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
3361 		    device_xname(sc->sc_dev)));
3362 		sc->sc_tbi_nrxcfg++;
3363 		wm_check_for_link(sc);
3364 	} else if (icr & ICR_RXSEQ) {
3365 		DPRINTF(WM_DEBUG_LINK,
3366 		    ("%s: LINK: Receive sequence error\n",
3367 		    device_xname(sc->sc_dev)));
3368 	}
3369 }
3370 
3371 /*
3372  * wm_linkintr:
3373  *
3374  *	Helper; handle link interrupts.
3375  */
3376 static void
3377 wm_linkintr(struct wm_softc *sc, uint32_t icr)
3378 {
3379 
3380 	if (sc->sc_flags & WM_F_HAS_MII)
3381 		wm_linkintr_gmii(sc, icr);
3382 	else
3383 		wm_linkintr_tbi(sc, icr);
3384 }
3385 
3386 /*
3387  * wm_tick:
3388  *
3389  *	One second timer, used to check link status, sweep up
3390  *	completed transmit jobs, etc.
3391  */
3392 static void
3393 wm_tick(void *arg)
3394 {
3395 	struct wm_softc *sc = arg;
3396 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3397 	int s;
3398 
3399 	s = splnet();
3400 
3401 	if (sc->sc_type >= WM_T_82542_2_1) {
3402 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
3403 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
3404 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
3405 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
3406 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
3407 	}
3408 
3409 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3410 	ifp->if_ierrors += 0ULL + /* ensure quad_t */
3411 	    + CSR_READ(sc, WMREG_CRCERRS)
3412 	    + CSR_READ(sc, WMREG_ALGNERRC)
3413 	    + CSR_READ(sc, WMREG_SYMERRC)
3414 	    + CSR_READ(sc, WMREG_RXERRC)
3415 	    + CSR_READ(sc, WMREG_SEC)
3416 	    + CSR_READ(sc, WMREG_CEXTERR)
3417 	    + CSR_READ(sc, WMREG_RLEC);
3418 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
3419 
3420 	if (sc->sc_flags & WM_F_HAS_MII)
3421 		mii_tick(&sc->sc_mii);
3422 	else
3423 		wm_tbi_check_link(sc);
3424 
3425 	splx(s);
3426 
3427 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
3428 }
3429 
3430 /*
3431  * wm_reset:
3432  *
3433  *	Reset the i82542 chip.
3434  */
3435 static void
3436 wm_reset(struct wm_softc *sc)
3437 {
3438 	int phy_reset = 0;
3439 	uint32_t reg, mask;
3440 	int i;
3441 
3442 	/*
3443 	 * Allocate on-chip memory according to the MTU size.
3444 	 * The Packet Buffer Allocation register must be written
3445 	 * before the chip is reset.
3446 	 */
3447 	switch (sc->sc_type) {
3448 	case WM_T_82547:
3449 	case WM_T_82547_2:
3450 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3451 		    PBA_22K : PBA_30K;
3452 		sc->sc_txfifo_head = 0;
3453 		sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
3454 		sc->sc_txfifo_size =
3455 		    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
3456 		sc->sc_txfifo_stall = 0;
3457 		break;
3458 	case WM_T_82571:
3459 	case WM_T_82572:
3460 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
3461 	case WM_T_80003:
3462 		sc->sc_pba = PBA_32K;
3463 		break;
3464 	case WM_T_82580:
3465 	case WM_T_82580ER:
3466 		sc->sc_pba = PBA_35K;
3467 		break;
3468 	case WM_T_82576:
3469 		sc->sc_pba = PBA_64K;
3470 		break;
3471 	case WM_T_82573:
3472 		sc->sc_pba = PBA_12K;
3473 		break;
3474 	case WM_T_82574:
3475 	case WM_T_82583:
3476 		sc->sc_pba = PBA_20K;
3477 		break;
3478 	case WM_T_ICH8:
3479 		sc->sc_pba = PBA_8K;
3480 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
3481 		break;
3482 	case WM_T_ICH9:
3483 	case WM_T_ICH10:
3484 		sc->sc_pba = PBA_10K;
3485 		break;
3486 	case WM_T_PCH:
3487 	case WM_T_PCH2:
3488 		sc->sc_pba = PBA_26K;
3489 		break;
3490 	default:
3491 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3492 		    PBA_40K : PBA_48K;
3493 		break;
3494 	}
3495 	CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
3496 
3497 	/* Prevent the PCI-E bus from sticking */
3498 	if (sc->sc_flags & WM_F_PCIE) {
3499 		int timeout = 800;
3500 
3501 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
3502 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3503 
3504 		while (timeout--) {
3505 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA) == 0)
3506 				break;
3507 			delay(100);
3508 		}
3509 	}
3510 
3511 	/* Set the completion timeout for interface */
3512 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
3513 		wm_set_pcie_completion_timeout(sc);
3514 
3515 	/* Clear interrupt */
3516 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3517 
3518 	/* Stop the transmit and receive processes. */
3519 	CSR_WRITE(sc, WMREG_RCTL, 0);
3520 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
3521 	sc->sc_rctl &= ~RCTL_EN;
3522 
3523 	/* XXX set_tbi_sbp_82543() */
3524 
3525 	delay(10*1000);
3526 
3527 	/* Must acquire the MDIO ownership before MAC reset */
3528 	switch (sc->sc_type) {
3529 	case WM_T_82573:
3530 	case WM_T_82574:
3531 	case WM_T_82583:
3532 		i = 0;
3533 		reg = CSR_READ(sc, WMREG_EXTCNFCTR)
3534 		    | EXTCNFCTR_MDIO_SW_OWNERSHIP;
3535 		do {
3536 			CSR_WRITE(sc, WMREG_EXTCNFCTR,
3537 			    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
3538 			reg = CSR_READ(sc, WMREG_EXTCNFCTR);
3539 			if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
3540 				break;
3541 			reg |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
3542 			delay(2*1000);
3543 			i++;
3544 		} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
3545 		break;
3546 	default:
3547 		break;
3548 	}
3549 
3550 	/*
3551 	 * 82541 Errata 29? & 82547 Errata 28?
3552 	 * See also the description about PHY_RST bit in CTRL register
3553 	 * in 8254x_GBe_SDM.pdf.
3554 	 */
3555 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
3556 		CSR_WRITE(sc, WMREG_CTRL,
3557 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
3558 		delay(5000);
3559 	}
3560 
3561 	switch (sc->sc_type) {
3562 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
3563 	case WM_T_82541:
3564 	case WM_T_82541_2:
3565 	case WM_T_82547:
3566 	case WM_T_82547_2:
3567 		/*
3568 		 * On some chipsets, a reset through a memory-mapped write
3569 		 * cycle can cause the chip to reset before completing the
3570 		 * write cycle.  This causes major headache that can be
3571 		 * avoided by issuing the reset via indirect register writes
3572 		 * through I/O space.
3573 		 *
3574 		 * So, if we successfully mapped the I/O BAR at attach time,
3575 		 * use that.  Otherwise, try our luck with a memory-mapped
3576 		 * reset.
3577 		 */
3578 		if (sc->sc_flags & WM_F_IOH_VALID)
3579 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
3580 		else
3581 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
3582 		break;
3583 	case WM_T_82545_3:
3584 	case WM_T_82546_3:
3585 		/* Use the shadow control register on these chips. */
3586 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
3587 		break;
3588 	case WM_T_80003:
3589 		mask = swfwphysem[sc->sc_funcid];
3590 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3591 		wm_get_swfw_semaphore(sc, mask);
3592 		CSR_WRITE(sc, WMREG_CTRL, reg);
3593 		wm_put_swfw_semaphore(sc, mask);
3594 		break;
3595 	case WM_T_ICH8:
3596 	case WM_T_ICH9:
3597 	case WM_T_ICH10:
3598 	case WM_T_PCH:
3599 	case WM_T_PCH2:
3600 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3601 		if (wm_check_reset_block(sc) == 0) {
3602 			/*
3603 			 * Gate automatic PHY configuration by hardware on
3604 			 * manaed 82579
3605 			 */
3606 			if ((sc->sc_type == WM_T_PCH2)
3607 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
3608 				!= 0))
3609 				wm_gate_hw_phy_config_ich8lan(sc, 1);
3610 
3611 
3612 			reg |= CTRL_PHY_RESET;
3613 			phy_reset = 1;
3614 		}
3615 		wm_get_swfwhw_semaphore(sc);
3616 		CSR_WRITE(sc, WMREG_CTRL, reg);
3617 		delay(20*1000);
3618 		wm_put_swfwhw_semaphore(sc);
3619 		break;
3620 	case WM_T_82542_2_0:
3621 	case WM_T_82542_2_1:
3622 	case WM_T_82543:
3623 	case WM_T_82540:
3624 	case WM_T_82545:
3625 	case WM_T_82546:
3626 	case WM_T_82571:
3627 	case WM_T_82572:
3628 	case WM_T_82573:
3629 	case WM_T_82574:
3630 	case WM_T_82575:
3631 	case WM_T_82576:
3632 	case WM_T_82580:
3633 	case WM_T_82580ER:
3634 	case WM_T_82583:
3635 	default:
3636 		/* Everything else can safely use the documented method. */
3637 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3638 		break;
3639 	}
3640 
3641 	if (phy_reset != 0)
3642 		wm_get_cfg_done(sc);
3643 
3644 	/* reload EEPROM */
3645 	switch (sc->sc_type) {
3646 	case WM_T_82542_2_0:
3647 	case WM_T_82542_2_1:
3648 	case WM_T_82543:
3649 	case WM_T_82544:
3650 		delay(10);
3651 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3652 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3653 		delay(2000);
3654 		break;
3655 	case WM_T_82540:
3656 	case WM_T_82545:
3657 	case WM_T_82545_3:
3658 	case WM_T_82546:
3659 	case WM_T_82546_3:
3660 		delay(5*1000);
3661 		/* XXX Disable HW ARPs on ASF enabled adapters */
3662 		break;
3663 	case WM_T_82541:
3664 	case WM_T_82541_2:
3665 	case WM_T_82547:
3666 	case WM_T_82547_2:
3667 		delay(20000);
3668 		/* XXX Disable HW ARPs on ASF enabled adapters */
3669 		break;
3670 	case WM_T_82571:
3671 	case WM_T_82572:
3672 	case WM_T_82573:
3673 	case WM_T_82574:
3674 	case WM_T_82583:
3675 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
3676 			delay(10);
3677 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3678 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3679 		}
3680 		/* check EECD_EE_AUTORD */
3681 		wm_get_auto_rd_done(sc);
3682 		/*
3683 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
3684 		 * is set.
3685 		 */
3686 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
3687 		    || (sc->sc_type == WM_T_82583))
3688 			delay(25*1000);
3689 		break;
3690 	case WM_T_82575:
3691 	case WM_T_82576:
3692 	case WM_T_82580:
3693 	case WM_T_82580ER:
3694 	case WM_T_80003:
3695 	case WM_T_ICH8:
3696 	case WM_T_ICH9:
3697 		/* check EECD_EE_AUTORD */
3698 		wm_get_auto_rd_done(sc);
3699 		break;
3700 	case WM_T_ICH10:
3701 	case WM_T_PCH:
3702 	case WM_T_PCH2:
3703 		wm_lan_init_done(sc);
3704 		break;
3705 	default:
3706 		panic("%s: unknown type\n", __func__);
3707 	}
3708 
3709 	/* Check whether EEPROM is present or not */
3710 	switch (sc->sc_type) {
3711 	case WM_T_82575:
3712 	case WM_T_82576:
3713 #if 0 /* XXX */
3714 	case WM_T_82580:
3715 	case WM_T_82580ER:
3716 #endif
3717 	case WM_T_ICH8:
3718 	case WM_T_ICH9:
3719 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
3720 			/* Not found */
3721 			sc->sc_flags |= WM_F_EEPROM_INVALID;
3722 			if ((sc->sc_type == WM_T_82575)
3723 			    || (sc->sc_type == WM_T_82576)
3724 			    || (sc->sc_type == WM_T_82580)
3725 			    || (sc->sc_type == WM_T_82580ER))
3726 				wm_reset_init_script_82575(sc);
3727 		}
3728 		break;
3729 	default:
3730 		break;
3731 	}
3732 
3733 	if ((sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)) {
3734 		/* clear global device reset status bit */
3735 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
3736 	}
3737 
3738 	/* Clear any pending interrupt events. */
3739 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3740 	reg = CSR_READ(sc, WMREG_ICR);
3741 
3742 	/* reload sc_ctrl */
3743 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3744 
3745 	/* dummy read from WUC */
3746 	if (sc->sc_type == WM_T_PCH)
3747 		reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
3748 	/*
3749 	 * For PCH, this write will make sure that any noise will be detected
3750 	 * as a CRC error and be dropped rather than show up as a bad packet
3751 	 * to the DMA engine
3752 	 */
3753 	if (sc->sc_type == WM_T_PCH)
3754 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
3755 
3756 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
3757 		CSR_WRITE(sc, WMREG_WUC, 0);
3758 
3759 	/* XXX need special handling for 82580 */
3760 }
3761 
3762 static void
3763 wm_set_vlan(struct wm_softc *sc)
3764 {
3765 	/* Deal with VLAN enables. */
3766 	if (VLAN_ATTACHED(&sc->sc_ethercom))
3767 		sc->sc_ctrl |= CTRL_VME;
3768 	else
3769 		sc->sc_ctrl &= ~CTRL_VME;
3770 
3771 	/* Write the control registers. */
3772 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3773 }
3774 
3775 /*
3776  * wm_init:		[ifnet interface function]
3777  *
3778  *	Initialize the interface.  Must be called at splnet().
3779  */
3780 static int
3781 wm_init(struct ifnet *ifp)
3782 {
3783 	struct wm_softc *sc = ifp->if_softc;
3784 	struct wm_rxsoft *rxs;
3785 	int i, error = 0;
3786 	uint32_t reg;
3787 
3788 	/*
3789 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
3790 	 * There is a small but measurable benefit to avoiding the adjusment
3791 	 * of the descriptor so that the headers are aligned, for normal mtu,
3792 	 * on such platforms.  One possibility is that the DMA itself is
3793 	 * slightly more efficient if the front of the entire packet (instead
3794 	 * of the front of the headers) is aligned.
3795 	 *
3796 	 * Note we must always set align_tweak to 0 if we are using
3797 	 * jumbo frames.
3798 	 */
3799 #ifdef __NO_STRICT_ALIGNMENT
3800 	sc->sc_align_tweak = 0;
3801 #else
3802 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
3803 		sc->sc_align_tweak = 0;
3804 	else
3805 		sc->sc_align_tweak = 2;
3806 #endif /* __NO_STRICT_ALIGNMENT */
3807 
3808 	/* Cancel any pending I/O. */
3809 	wm_stop(ifp, 0);
3810 
3811 	/* update statistics before reset */
3812 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3813 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
3814 
3815 	/* Reset the chip to a known state. */
3816 	wm_reset(sc);
3817 
3818 	switch (sc->sc_type) {
3819 	case WM_T_82571:
3820 	case WM_T_82572:
3821 	case WM_T_82573:
3822 	case WM_T_82574:
3823 	case WM_T_82583:
3824 	case WM_T_80003:
3825 	case WM_T_ICH8:
3826 	case WM_T_ICH9:
3827 	case WM_T_ICH10:
3828 	case WM_T_PCH:
3829 	case WM_T_PCH2:
3830 		if (wm_check_mng_mode(sc) != 0)
3831 			wm_get_hw_control(sc);
3832 		break;
3833 	default:
3834 		break;
3835 	}
3836 
3837 	/* Reset the PHY. */
3838 	if (sc->sc_flags & WM_F_HAS_MII)
3839 		wm_gmii_reset(sc);
3840 
3841 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
3842 	/* Enable PHY low-power state when MAC is at D3 w/o WoL */
3843 	if ((sc->sc_type == WM_T_PCH) && (sc->sc_type == WM_T_PCH2))
3844 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_PHYPDEN);
3845 
3846 	/* Initialize the transmit descriptor ring. */
3847 	memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
3848 	WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
3849 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3850 	sc->sc_txfree = WM_NTXDESC(sc);
3851 	sc->sc_txnext = 0;
3852 
3853 	if (sc->sc_type < WM_T_82543) {
3854 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(sc, 0));
3855 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(sc, 0));
3856 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
3857 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
3858 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
3859 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
3860 	} else {
3861 		CSR_WRITE(sc, WMREG_TDBAH, WM_CDTXADDR_HI(sc, 0));
3862 		CSR_WRITE(sc, WMREG_TDBAL, WM_CDTXADDR_LO(sc, 0));
3863 		CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
3864 		CSR_WRITE(sc, WMREG_TDH, 0);
3865 		CSR_WRITE(sc, WMREG_TIDV, 375);		/* ITR / 4 */
3866 		CSR_WRITE(sc, WMREG_TADV, 375);		/* should be same */
3867 
3868 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
3869 			/*
3870 			 * Don't write TDT before TCTL.EN is set.
3871 			 * See the document.
3872 			 */
3873 			CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_QUEUE_ENABLE
3874 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
3875 			    | TXDCTL_WTHRESH(0));
3876 		else {
3877 			CSR_WRITE(sc, WMREG_TDT, 0);
3878 			CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
3879 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
3880 			CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
3881 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
3882 		}
3883 	}
3884 	CSR_WRITE(sc, WMREG_TQSA_LO, 0);
3885 	CSR_WRITE(sc, WMREG_TQSA_HI, 0);
3886 
3887 	/* Initialize the transmit job descriptors. */
3888 	for (i = 0; i < WM_TXQUEUELEN(sc); i++)
3889 		sc->sc_txsoft[i].txs_mbuf = NULL;
3890 	sc->sc_txsfree = WM_TXQUEUELEN(sc);
3891 	sc->sc_txsnext = 0;
3892 	sc->sc_txsdirty = 0;
3893 
3894 	/*
3895 	 * Initialize the receive descriptor and receive job
3896 	 * descriptor rings.
3897 	 */
3898 	if (sc->sc_type < WM_T_82543) {
3899 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
3900 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
3901 		CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
3902 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
3903 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
3904 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
3905 
3906 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
3907 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
3908 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
3909 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
3910 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
3911 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
3912 	} else {
3913 		CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
3914 		CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
3915 		CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
3916 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3917 			CSR_WRITE(sc, WMREG_EITR(0), 450);
3918 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
3919 				panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
3920 			CSR_WRITE(sc, WMREG_SRRCTL, SRRCTL_DESCTYPE_LEGACY
3921 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
3922 			CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_QUEUE_ENABLE
3923 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
3924 			    | RXDCTL_WTHRESH(1));
3925 		} else {
3926 			CSR_WRITE(sc, WMREG_RDH, 0);
3927 			CSR_WRITE(sc, WMREG_RDT, 0);
3928 			CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD);	/* ITR/4 */
3929 			CSR_WRITE(sc, WMREG_RADV, 375);		/* MUST be same */
3930 		}
3931 	}
3932 	for (i = 0; i < WM_NRXDESC; i++) {
3933 		rxs = &sc->sc_rxsoft[i];
3934 		if (rxs->rxs_mbuf == NULL) {
3935 			if ((error = wm_add_rxbuf(sc, i)) != 0) {
3936 				log(LOG_ERR, "%s: unable to allocate or map rx "
3937 				    "buffer %d, error = %d\n",
3938 				    device_xname(sc->sc_dev), i, error);
3939 				/*
3940 				 * XXX Should attempt to run with fewer receive
3941 				 * XXX buffers instead of just failing.
3942 				 */
3943 				wm_rxdrain(sc);
3944 				goto out;
3945 			}
3946 		} else {
3947 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
3948 				WM_INIT_RXDESC(sc, i);
3949 			/*
3950 			 * For 82575 and newer device, the RX descriptors
3951 			 * must be initialized after the setting of RCTL.EN in
3952 			 * wm_set_filter()
3953 			 */
3954 		}
3955 	}
3956 	sc->sc_rxptr = 0;
3957 	sc->sc_rxdiscard = 0;
3958 	WM_RXCHAIN_RESET(sc);
3959 
3960 	/*
3961 	 * Clear out the VLAN table -- we don't use it (yet).
3962 	 */
3963 	CSR_WRITE(sc, WMREG_VET, 0);
3964 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
3965 		CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
3966 
3967 	/*
3968 	 * Set up flow-control parameters.
3969 	 *
3970 	 * XXX Values could probably stand some tuning.
3971 	 */
3972 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
3973 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
3974 	    && (sc->sc_type != WM_T_PCH2)) {
3975 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
3976 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
3977 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
3978 	}
3979 
3980 	sc->sc_fcrtl = FCRTL_DFLT;
3981 	if (sc->sc_type < WM_T_82543) {
3982 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
3983 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
3984 	} else {
3985 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
3986 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
3987 	}
3988 
3989 	if (sc->sc_type == WM_T_80003)
3990 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
3991 	else
3992 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
3993 
3994 	/* Writes the control register. */
3995 	wm_set_vlan(sc);
3996 
3997 	if (sc->sc_flags & WM_F_HAS_MII) {
3998 		int val;
3999 
4000 		switch (sc->sc_type) {
4001 		case WM_T_80003:
4002 		case WM_T_ICH8:
4003 		case WM_T_ICH9:
4004 		case WM_T_ICH10:
4005 		case WM_T_PCH:
4006 		case WM_T_PCH2:
4007 			/*
4008 			 * Set the mac to wait the maximum time between each
4009 			 * iteration and increase the max iterations when
4010 			 * polling the phy; this fixes erroneous timeouts at
4011 			 * 10Mbps.
4012 			 */
4013 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
4014 			    0xFFFF);
4015 			val = wm_kmrn_readreg(sc,
4016 			    KUMCTRLSTA_OFFSET_INB_PARAM);
4017 			val |= 0x3F;
4018 			wm_kmrn_writereg(sc,
4019 			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
4020 			break;
4021 		default:
4022 			break;
4023 		}
4024 
4025 		if (sc->sc_type == WM_T_80003) {
4026 			val = CSR_READ(sc, WMREG_CTRL_EXT);
4027 			val &= ~CTRL_EXT_LINK_MODE_MASK;
4028 			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
4029 
4030 			/* Bypass RX and TX FIFO's */
4031 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
4032 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
4033 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
4034 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
4035 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
4036 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
4037 		}
4038 	}
4039 #if 0
4040 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
4041 #endif
4042 
4043 	/*
4044 	 * Set up checksum offload parameters.
4045 	 */
4046 	reg = CSR_READ(sc, WMREG_RXCSUM);
4047 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
4048 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
4049 		reg |= RXCSUM_IPOFL;
4050 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
4051 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
4052 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
4053 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
4054 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
4055 
4056 	/* Reset TBI's RXCFG count */
4057 	sc->sc_tbi_nrxcfg = sc->sc_tbi_lastnrxcfg = 0;
4058 
4059 	/*
4060 	 * Set up the interrupt registers.
4061 	 */
4062 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4063 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
4064 	    ICR_RXO | ICR_RXT0;
4065 	if ((sc->sc_flags & WM_F_HAS_MII) == 0)
4066 		sc->sc_icr |= ICR_RXCFG;
4067 	CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
4068 
4069 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4070 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4071 		 || (sc->sc_type == WM_T_PCH2)) {
4072 		reg = CSR_READ(sc, WMREG_KABGTXD);
4073 		reg |= KABGTXD_BGSQLBIAS;
4074 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
4075 	}
4076 
4077 	/* Set up the inter-packet gap. */
4078 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
4079 
4080 	if (sc->sc_type >= WM_T_82543) {
4081 		/*
4082 		 * Set up the interrupt throttling register (units of 256ns)
4083 		 * Note that a footnote in Intel's documentation says this
4084 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
4085 		 * or 10Mbit mode.  Empirically, it appears to be the case
4086 		 * that that is also true for the 1024ns units of the other
4087 		 * interrupt-related timer registers -- so, really, we ought
4088 		 * to divide this value by 4 when the link speed is low.
4089 		 *
4090 		 * XXX implement this division at link speed change!
4091 		 */
4092 
4093 		 /*
4094 		  * For N interrupts/sec, set this value to:
4095 		  * 1000000000 / (N * 256).  Note that we set the
4096 		  * absolute and packet timer values to this value
4097 		  * divided by 4 to get "simple timer" behavior.
4098 		  */
4099 
4100 		sc->sc_itr = 1500;		/* 2604 ints/sec */
4101 		CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
4102 	}
4103 
4104 	/* Set the VLAN ethernetype. */
4105 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
4106 
4107 	/*
4108 	 * Set up the transmit control register; we start out with
4109 	 * a collision distance suitable for FDX, but update it whe
4110 	 * we resolve the media type.
4111 	 */
4112 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
4113 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
4114 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4115 	if (sc->sc_type >= WM_T_82571)
4116 		sc->sc_tctl |= TCTL_MULR;
4117 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4118 
4119 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4120 		/*
4121 		 * Write TDT after TCTL.EN is set.
4122 		 * See the document.
4123 		 */
4124 		CSR_WRITE(sc, WMREG_TDT, 0);
4125 	}
4126 
4127 	if (sc->sc_type == WM_T_80003) {
4128 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
4129 		reg &= ~TCTL_EXT_GCEX_MASK;
4130 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
4131 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
4132 	}
4133 
4134 	/* Set the media. */
4135 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
4136 		goto out;
4137 
4138 	/* Configure for OS presence */
4139 	wm_init_manageability(sc);
4140 
4141 	/*
4142 	 * Set up the receive control register; we actually program
4143 	 * the register when we set the receive filter.  Use multicast
4144 	 * address offset type 0.
4145 	 *
4146 	 * Only the i82544 has the ability to strip the incoming
4147 	 * CRC, so we don't enable that feature.
4148 	 */
4149 	sc->sc_mchash_type = 0;
4150 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
4151 	    | RCTL_MO(sc->sc_mchash_type);
4152 
4153 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
4154 	    && (ifp->if_mtu > ETHERMTU)) {
4155 		sc->sc_rctl |= RCTL_LPE;
4156 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4157 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
4158 	}
4159 
4160 	if (MCLBYTES == 2048) {
4161 		sc->sc_rctl |= RCTL_2k;
4162 	} else {
4163 		if (sc->sc_type >= WM_T_82543) {
4164 			switch (MCLBYTES) {
4165 			case 4096:
4166 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
4167 				break;
4168 			case 8192:
4169 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
4170 				break;
4171 			case 16384:
4172 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
4173 				break;
4174 			default:
4175 				panic("wm_init: MCLBYTES %d unsupported",
4176 				    MCLBYTES);
4177 				break;
4178 			}
4179 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
4180 	}
4181 
4182 	/* Set the receive filter. */
4183 	wm_set_filter(sc);
4184 
4185 	/* On 575 and later set RDT only if RX enabled */
4186 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4187 		for (i = 0; i < WM_NRXDESC; i++)
4188 			WM_INIT_RXDESC(sc, i);
4189 
4190 	/* Start the one second link check clock. */
4191 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
4192 
4193 	/* ...all done! */
4194 	ifp->if_flags |= IFF_RUNNING;
4195 	ifp->if_flags &= ~IFF_OACTIVE;
4196 
4197  out:
4198 	sc->sc_if_flags = ifp->if_flags;
4199 	if (error)
4200 		log(LOG_ERR, "%s: interface not running\n",
4201 		    device_xname(sc->sc_dev));
4202 	return error;
4203 }
4204 
4205 /*
4206  * wm_rxdrain:
4207  *
4208  *	Drain the receive queue.
4209  */
4210 static void
4211 wm_rxdrain(struct wm_softc *sc)
4212 {
4213 	struct wm_rxsoft *rxs;
4214 	int i;
4215 
4216 	for (i = 0; i < WM_NRXDESC; i++) {
4217 		rxs = &sc->sc_rxsoft[i];
4218 		if (rxs->rxs_mbuf != NULL) {
4219 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4220 			m_freem(rxs->rxs_mbuf);
4221 			rxs->rxs_mbuf = NULL;
4222 		}
4223 	}
4224 }
4225 
4226 /*
4227  * wm_stop:		[ifnet interface function]
4228  *
4229  *	Stop transmission on the interface.
4230  */
4231 static void
4232 wm_stop(struct ifnet *ifp, int disable)
4233 {
4234 	struct wm_softc *sc = ifp->if_softc;
4235 	struct wm_txsoft *txs;
4236 	int i;
4237 
4238 	/* Stop the one second clock. */
4239 	callout_stop(&sc->sc_tick_ch);
4240 
4241 	/* Stop the 82547 Tx FIFO stall check timer. */
4242 	if (sc->sc_type == WM_T_82547)
4243 		callout_stop(&sc->sc_txfifo_ch);
4244 
4245 	if (sc->sc_flags & WM_F_HAS_MII) {
4246 		/* Down the MII. */
4247 		mii_down(&sc->sc_mii);
4248 	} else {
4249 #if 0
4250 		/* Should we clear PHY's status properly? */
4251 		wm_reset(sc);
4252 #endif
4253 	}
4254 
4255 	/* Stop the transmit and receive processes. */
4256 	CSR_WRITE(sc, WMREG_TCTL, 0);
4257 	CSR_WRITE(sc, WMREG_RCTL, 0);
4258 	sc->sc_rctl &= ~RCTL_EN;
4259 
4260 	/*
4261 	 * Clear the interrupt mask to ensure the device cannot assert its
4262 	 * interrupt line.
4263 	 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
4264 	 * any currently pending or shared interrupt.
4265 	 */
4266 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4267 	sc->sc_icr = 0;
4268 
4269 	/* Release any queued transmit buffers. */
4270 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
4271 		txs = &sc->sc_txsoft[i];
4272 		if (txs->txs_mbuf != NULL) {
4273 			bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
4274 			m_freem(txs->txs_mbuf);
4275 			txs->txs_mbuf = NULL;
4276 		}
4277 	}
4278 
4279 	/* Mark the interface as down and cancel the watchdog timer. */
4280 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
4281 	ifp->if_timer = 0;
4282 
4283 	if (disable)
4284 		wm_rxdrain(sc);
4285 
4286 #if 0 /* notyet */
4287 	if (sc->sc_type >= WM_T_82544)
4288 		CSR_WRITE(sc, WMREG_WUC, 0);
4289 #endif
4290 }
4291 
4292 void
4293 wm_get_auto_rd_done(struct wm_softc *sc)
4294 {
4295 	int i;
4296 
4297 	/* wait for eeprom to reload */
4298 	switch (sc->sc_type) {
4299 	case WM_T_82571:
4300 	case WM_T_82572:
4301 	case WM_T_82573:
4302 	case WM_T_82574:
4303 	case WM_T_82583:
4304 	case WM_T_82575:
4305 	case WM_T_82576:
4306 	case WM_T_82580:
4307 	case WM_T_82580ER:
4308 	case WM_T_80003:
4309 	case WM_T_ICH8:
4310 	case WM_T_ICH9:
4311 		for (i = 0; i < 10; i++) {
4312 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
4313 				break;
4314 			delay(1000);
4315 		}
4316 		if (i == 10) {
4317 			log(LOG_ERR, "%s: auto read from eeprom failed to "
4318 			    "complete\n", device_xname(sc->sc_dev));
4319 		}
4320 		break;
4321 	default:
4322 		break;
4323 	}
4324 }
4325 
4326 void
4327 wm_lan_init_done(struct wm_softc *sc)
4328 {
4329 	uint32_t reg = 0;
4330 	int i;
4331 
4332 	/* wait for eeprom to reload */
4333 	switch (sc->sc_type) {
4334 	case WM_T_ICH10:
4335 	case WM_T_PCH:
4336 	case WM_T_PCH2:
4337 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
4338 			reg = CSR_READ(sc, WMREG_STATUS);
4339 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
4340 				break;
4341 			delay(100);
4342 		}
4343 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
4344 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
4345 			    "complete\n", device_xname(sc->sc_dev), __func__);
4346 		}
4347 		break;
4348 	default:
4349 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4350 		    __func__);
4351 		break;
4352 	}
4353 
4354 	reg &= ~STATUS_LAN_INIT_DONE;
4355 	CSR_WRITE(sc, WMREG_STATUS, reg);
4356 }
4357 
4358 void
4359 wm_get_cfg_done(struct wm_softc *sc)
4360 {
4361 	int mask;
4362 	uint32_t reg;
4363 	int i;
4364 
4365 	/* wait for eeprom to reload */
4366 	switch (sc->sc_type) {
4367 	case WM_T_82542_2_0:
4368 	case WM_T_82542_2_1:
4369 		/* null */
4370 		break;
4371 	case WM_T_82543:
4372 	case WM_T_82544:
4373 	case WM_T_82540:
4374 	case WM_T_82545:
4375 	case WM_T_82545_3:
4376 	case WM_T_82546:
4377 	case WM_T_82546_3:
4378 	case WM_T_82541:
4379 	case WM_T_82541_2:
4380 	case WM_T_82547:
4381 	case WM_T_82547_2:
4382 	case WM_T_82573:
4383 	case WM_T_82574:
4384 	case WM_T_82583:
4385 		/* generic */
4386 		delay(10*1000);
4387 		break;
4388 	case WM_T_80003:
4389 	case WM_T_82571:
4390 	case WM_T_82572:
4391 	case WM_T_82575:
4392 	case WM_T_82576:
4393 	case WM_T_82580:
4394 	case WM_T_82580ER:
4395 		if (sc->sc_type == WM_T_82571) {
4396 			/* Only 82571 shares port 0 */
4397 			mask = EEMNGCTL_CFGDONE_0;
4398 		} else
4399 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
4400 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
4401 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
4402 				break;
4403 			delay(1000);
4404 		}
4405 		if (i >= WM_PHY_CFG_TIMEOUT) {
4406 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
4407 				device_xname(sc->sc_dev), __func__));
4408 		}
4409 		break;
4410 	case WM_T_ICH8:
4411 	case WM_T_ICH9:
4412 	case WM_T_ICH10:
4413 	case WM_T_PCH:
4414 	case WM_T_PCH2:
4415 		if (sc->sc_type >= WM_T_PCH) {
4416 			reg = CSR_READ(sc, WMREG_STATUS);
4417 			if ((reg & STATUS_PHYRA) != 0)
4418 				CSR_WRITE(sc, WMREG_STATUS,
4419 				    reg & ~STATUS_PHYRA);
4420 		}
4421 		delay(10*1000);
4422 		break;
4423 	default:
4424 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4425 		    __func__);
4426 		break;
4427 	}
4428 }
4429 
4430 /*
4431  * wm_acquire_eeprom:
4432  *
4433  *	Perform the EEPROM handshake required on some chips.
4434  */
4435 static int
4436 wm_acquire_eeprom(struct wm_softc *sc)
4437 {
4438 	uint32_t reg;
4439 	int x;
4440 	int ret = 0;
4441 
4442 	/* always success */
4443 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
4444 		return 0;
4445 
4446 	if (sc->sc_flags & WM_F_SWFWHW_SYNC) {
4447 		ret = wm_get_swfwhw_semaphore(sc);
4448 	} else if (sc->sc_flags & WM_F_SWFW_SYNC) {
4449 		/* this will also do wm_get_swsm_semaphore() if needed */
4450 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
4451 	} else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
4452 		ret = wm_get_swsm_semaphore(sc);
4453 	}
4454 
4455 	if (ret) {
4456 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
4457 			__func__);
4458 		return 1;
4459 	}
4460 
4461 	if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
4462 		reg = CSR_READ(sc, WMREG_EECD);
4463 
4464 		/* Request EEPROM access. */
4465 		reg |= EECD_EE_REQ;
4466 		CSR_WRITE(sc, WMREG_EECD, reg);
4467 
4468 		/* ..and wait for it to be granted. */
4469 		for (x = 0; x < 1000; x++) {
4470 			reg = CSR_READ(sc, WMREG_EECD);
4471 			if (reg & EECD_EE_GNT)
4472 				break;
4473 			delay(5);
4474 		}
4475 		if ((reg & EECD_EE_GNT) == 0) {
4476 			aprint_error_dev(sc->sc_dev,
4477 			    "could not acquire EEPROM GNT\n");
4478 			reg &= ~EECD_EE_REQ;
4479 			CSR_WRITE(sc, WMREG_EECD, reg);
4480 			if (sc->sc_flags & WM_F_SWFWHW_SYNC)
4481 				wm_put_swfwhw_semaphore(sc);
4482 			if (sc->sc_flags & WM_F_SWFW_SYNC)
4483 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
4484 			else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
4485 				wm_put_swsm_semaphore(sc);
4486 			return 1;
4487 		}
4488 	}
4489 
4490 	return 0;
4491 }
4492 
4493 /*
4494  * wm_release_eeprom:
4495  *
4496  *	Release the EEPROM mutex.
4497  */
4498 static void
4499 wm_release_eeprom(struct wm_softc *sc)
4500 {
4501 	uint32_t reg;
4502 
4503 	/* always success */
4504 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
4505 		return;
4506 
4507 	if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
4508 		reg = CSR_READ(sc, WMREG_EECD);
4509 		reg &= ~EECD_EE_REQ;
4510 		CSR_WRITE(sc, WMREG_EECD, reg);
4511 	}
4512 
4513 	if (sc->sc_flags & WM_F_SWFWHW_SYNC)
4514 		wm_put_swfwhw_semaphore(sc);
4515 	if (sc->sc_flags & WM_F_SWFW_SYNC)
4516 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
4517 	else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
4518 		wm_put_swsm_semaphore(sc);
4519 }
4520 
4521 /*
4522  * wm_eeprom_sendbits:
4523  *
4524  *	Send a series of bits to the EEPROM.
4525  */
4526 static void
4527 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
4528 {
4529 	uint32_t reg;
4530 	int x;
4531 
4532 	reg = CSR_READ(sc, WMREG_EECD);
4533 
4534 	for (x = nbits; x > 0; x--) {
4535 		if (bits & (1U << (x - 1)))
4536 			reg |= EECD_DI;
4537 		else
4538 			reg &= ~EECD_DI;
4539 		CSR_WRITE(sc, WMREG_EECD, reg);
4540 		delay(2);
4541 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
4542 		delay(2);
4543 		CSR_WRITE(sc, WMREG_EECD, reg);
4544 		delay(2);
4545 	}
4546 }
4547 
4548 /*
4549  * wm_eeprom_recvbits:
4550  *
4551  *	Receive a series of bits from the EEPROM.
4552  */
4553 static void
4554 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
4555 {
4556 	uint32_t reg, val;
4557 	int x;
4558 
4559 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
4560 
4561 	val = 0;
4562 	for (x = nbits; x > 0; x--) {
4563 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
4564 		delay(2);
4565 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
4566 			val |= (1U << (x - 1));
4567 		CSR_WRITE(sc, WMREG_EECD, reg);
4568 		delay(2);
4569 	}
4570 	*valp = val;
4571 }
4572 
4573 /*
4574  * wm_read_eeprom_uwire:
4575  *
4576  *	Read a word from the EEPROM using the MicroWire protocol.
4577  */
4578 static int
4579 wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
4580 {
4581 	uint32_t reg, val;
4582 	int i;
4583 
4584 	for (i = 0; i < wordcnt; i++) {
4585 		/* Clear SK and DI. */
4586 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
4587 		CSR_WRITE(sc, WMREG_EECD, reg);
4588 
4589 		/* Set CHIP SELECT. */
4590 		reg |= EECD_CS;
4591 		CSR_WRITE(sc, WMREG_EECD, reg);
4592 		delay(2);
4593 
4594 		/* Shift in the READ command. */
4595 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
4596 
4597 		/* Shift in address. */
4598 		wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits);
4599 
4600 		/* Shift out the data. */
4601 		wm_eeprom_recvbits(sc, &val, 16);
4602 		data[i] = val & 0xffff;
4603 
4604 		/* Clear CHIP SELECT. */
4605 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
4606 		CSR_WRITE(sc, WMREG_EECD, reg);
4607 		delay(2);
4608 	}
4609 
4610 	return 0;
4611 }
4612 
4613 /*
4614  * wm_spi_eeprom_ready:
4615  *
4616  *	Wait for a SPI EEPROM to be ready for commands.
4617  */
4618 static int
4619 wm_spi_eeprom_ready(struct wm_softc *sc)
4620 {
4621 	uint32_t val;
4622 	int usec;
4623 
4624 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
4625 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
4626 		wm_eeprom_recvbits(sc, &val, 8);
4627 		if ((val & SPI_SR_RDY) == 0)
4628 			break;
4629 	}
4630 	if (usec >= SPI_MAX_RETRIES) {
4631 		aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
4632 		return 1;
4633 	}
4634 	return 0;
4635 }
4636 
4637 /*
4638  * wm_read_eeprom_spi:
4639  *
4640  *	Read a work from the EEPROM using the SPI protocol.
4641  */
4642 static int
4643 wm_read_eeprom_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
4644 {
4645 	uint32_t reg, val;
4646 	int i;
4647 	uint8_t opc;
4648 
4649 	/* Clear SK and CS. */
4650 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
4651 	CSR_WRITE(sc, WMREG_EECD, reg);
4652 	delay(2);
4653 
4654 	if (wm_spi_eeprom_ready(sc))
4655 		return 1;
4656 
4657 	/* Toggle CS to flush commands. */
4658 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
4659 	delay(2);
4660 	CSR_WRITE(sc, WMREG_EECD, reg);
4661 	delay(2);
4662 
4663 	opc = SPI_OPC_READ;
4664 	if (sc->sc_ee_addrbits == 8 && word >= 128)
4665 		opc |= SPI_OPC_A8;
4666 
4667 	wm_eeprom_sendbits(sc, opc, 8);
4668 	wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits);
4669 
4670 	for (i = 0; i < wordcnt; i++) {
4671 		wm_eeprom_recvbits(sc, &val, 16);
4672 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
4673 	}
4674 
4675 	/* Raise CS and clear SK. */
4676 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
4677 	CSR_WRITE(sc, WMREG_EECD, reg);
4678 	delay(2);
4679 
4680 	return 0;
4681 }
4682 
4683 #define EEPROM_CHECKSUM		0xBABA
4684 #define EEPROM_SIZE		0x0040
4685 
4686 /*
4687  * wm_validate_eeprom_checksum
4688  *
4689  * The checksum is defined as the sum of the first 64 (16 bit) words.
4690  */
4691 static int
4692 wm_validate_eeprom_checksum(struct wm_softc *sc)
4693 {
4694 	uint16_t checksum;
4695 	uint16_t eeprom_data;
4696 	int i;
4697 
4698 	checksum = 0;
4699 
4700 	for (i = 0; i < EEPROM_SIZE; i++) {
4701 		if (wm_read_eeprom(sc, i, 1, &eeprom_data))
4702 			return 1;
4703 		checksum += eeprom_data;
4704 	}
4705 
4706 	if (checksum != (uint16_t) EEPROM_CHECKSUM)
4707 		return 1;
4708 
4709 	return 0;
4710 }
4711 
4712 /*
4713  * wm_read_eeprom:
4714  *
4715  *	Read data from the serial EEPROM.
4716  */
4717 static int
4718 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
4719 {
4720 	int rv;
4721 
4722 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
4723 		return 1;
4724 
4725 	if (wm_acquire_eeprom(sc))
4726 		return 1;
4727 
4728 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4729 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4730 		 || (sc->sc_type == WM_T_PCH2))
4731 		rv = wm_read_eeprom_ich8(sc, word, wordcnt, data);
4732 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
4733 		rv = wm_read_eeprom_eerd(sc, word, wordcnt, data);
4734 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
4735 		rv = wm_read_eeprom_spi(sc, word, wordcnt, data);
4736 	else
4737 		rv = wm_read_eeprom_uwire(sc, word, wordcnt, data);
4738 
4739 	wm_release_eeprom(sc);
4740 	return rv;
4741 }
4742 
4743 static int
4744 wm_read_eeprom_eerd(struct wm_softc *sc, int offset, int wordcnt,
4745     uint16_t *data)
4746 {
4747 	int i, eerd = 0;
4748 	int error = 0;
4749 
4750 	for (i = 0; i < wordcnt; i++) {
4751 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
4752 
4753 		CSR_WRITE(sc, WMREG_EERD, eerd);
4754 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
4755 		if (error != 0)
4756 			break;
4757 
4758 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
4759 	}
4760 
4761 	return error;
4762 }
4763 
4764 static int
4765 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
4766 {
4767 	uint32_t attempts = 100000;
4768 	uint32_t i, reg = 0;
4769 	int32_t done = -1;
4770 
4771 	for (i = 0; i < attempts; i++) {
4772 		reg = CSR_READ(sc, rw);
4773 
4774 		if (reg & EERD_DONE) {
4775 			done = 0;
4776 			break;
4777 		}
4778 		delay(5);
4779 	}
4780 
4781 	return done;
4782 }
4783 
4784 static int
4785 wm_check_alt_mac_addr(struct wm_softc *sc)
4786 {
4787 	uint16_t myea[ETHER_ADDR_LEN / 2];
4788 	uint16_t offset = EEPROM_OFF_MACADDR;
4789 
4790 	/* Try to read alternative MAC address pointer */
4791 	if (wm_read_eeprom(sc, EEPROM_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
4792 		return -1;
4793 
4794 	/* Check pointer */
4795 	if (offset == 0xffff)
4796 		return -1;
4797 
4798 	/*
4799 	 * Check whether alternative MAC address is valid or not.
4800 	 * Some cards have non 0xffff pointer but those don't use
4801 	 * alternative MAC address in reality.
4802 	 *
4803 	 * Check whether the broadcast bit is set or not.
4804 	 */
4805 	if (wm_read_eeprom(sc, offset, 1, myea) == 0)
4806 		if (((myea[0] & 0xff) & 0x01) == 0)
4807 			return 0; /* found! */
4808 
4809 	/* not found */
4810 	return -1;
4811 }
4812 
4813 static int
4814 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
4815 {
4816 	uint16_t myea[ETHER_ADDR_LEN / 2];
4817 	uint16_t offset = EEPROM_OFF_MACADDR;
4818 	int do_invert = 0;
4819 
4820 	switch (sc->sc_type) {
4821 	case WM_T_82580:
4822 	case WM_T_82580ER:
4823 		switch (sc->sc_funcid) {
4824 		case 0:
4825 			/* default value (== EEPROM_OFF_MACADDR) */
4826 			break;
4827 		case 1:
4828 			offset = EEPROM_OFF_LAN1;
4829 			break;
4830 		case 2:
4831 			offset = EEPROM_OFF_LAN2;
4832 			break;
4833 		case 3:
4834 			offset = EEPROM_OFF_LAN3;
4835 			break;
4836 		default:
4837 			goto bad;
4838 			/* NOTREACHED */
4839 			break;
4840 		}
4841 		break;
4842 	case WM_T_82571:
4843 	case WM_T_82575:
4844 	case WM_T_82576:
4845 	case WM_T_80003:
4846 		if (wm_check_alt_mac_addr(sc) != 0) {
4847 			/* reset the offset to LAN0 */
4848 			offset = EEPROM_OFF_MACADDR;
4849 			if ((sc->sc_funcid & 0x01) == 1)
4850 				do_invert = 1;
4851 			goto do_read;
4852 		}
4853 		switch (sc->sc_funcid) {
4854 		case 0:
4855 			/*
4856 			 * The offset is the value in EEPROM_ALT_MAC_ADDR_PTR
4857 			 * itself.
4858 			 */
4859 			break;
4860 		case 1:
4861 			offset += EEPROM_OFF_MACADDR_LAN1;
4862 			break;
4863 		case 2:
4864 			offset += EEPROM_OFF_MACADDR_LAN2;
4865 			break;
4866 		case 3:
4867 			offset += EEPROM_OFF_MACADDR_LAN3;
4868 			break;
4869 		default:
4870 			goto bad;
4871 			/* NOTREACHED */
4872 			break;
4873 		}
4874 		break;
4875 	default:
4876 		if ((sc->sc_funcid & 0x01) == 1)
4877 			do_invert = 1;
4878 		break;
4879 	}
4880 
4881  do_read:
4882 	if (wm_read_eeprom(sc, offset, sizeof(myea) / sizeof(myea[0]),
4883 		myea) != 0) {
4884 		goto bad;
4885 	}
4886 
4887 	enaddr[0] = myea[0] & 0xff;
4888 	enaddr[1] = myea[0] >> 8;
4889 	enaddr[2] = myea[1] & 0xff;
4890 	enaddr[3] = myea[1] >> 8;
4891 	enaddr[4] = myea[2] & 0xff;
4892 	enaddr[5] = myea[2] >> 8;
4893 
4894 	/*
4895 	 * Toggle the LSB of the MAC address on the second port
4896 	 * of some dual port cards.
4897 	 */
4898 	if (do_invert != 0)
4899 		enaddr[5] ^= 1;
4900 
4901 	return 0;
4902 
4903  bad:
4904 	aprint_error_dev(sc->sc_dev, "unable to read Ethernet address\n");
4905 
4906 	return -1;
4907 }
4908 
4909 /*
4910  * wm_add_rxbuf:
4911  *
4912  *	Add a receive buffer to the indiciated descriptor.
4913  */
4914 static int
4915 wm_add_rxbuf(struct wm_softc *sc, int idx)
4916 {
4917 	struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
4918 	struct mbuf *m;
4919 	int error;
4920 
4921 	MGETHDR(m, M_DONTWAIT, MT_DATA);
4922 	if (m == NULL)
4923 		return ENOBUFS;
4924 
4925 	MCLGET(m, M_DONTWAIT);
4926 	if ((m->m_flags & M_EXT) == 0) {
4927 		m_freem(m);
4928 		return ENOBUFS;
4929 	}
4930 
4931 	if (rxs->rxs_mbuf != NULL)
4932 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4933 
4934 	rxs->rxs_mbuf = m;
4935 
4936 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
4937 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
4938 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
4939 	if (error) {
4940 		/* XXX XXX XXX */
4941 		aprint_error_dev(sc->sc_dev,
4942 		    "unable to load rx DMA map %d, error = %d\n",
4943 		    idx, error);
4944 		panic("wm_add_rxbuf");
4945 	}
4946 
4947 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
4948 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
4949 
4950 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4951 		if ((sc->sc_rctl & RCTL_EN) != 0)
4952 			WM_INIT_RXDESC(sc, idx);
4953 	} else
4954 		WM_INIT_RXDESC(sc, idx);
4955 
4956 	return 0;
4957 }
4958 
4959 /*
4960  * wm_set_ral:
4961  *
4962  *	Set an entery in the receive address list.
4963  */
4964 static void
4965 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
4966 {
4967 	uint32_t ral_lo, ral_hi;
4968 
4969 	if (enaddr != NULL) {
4970 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
4971 		    (enaddr[3] << 24);
4972 		ral_hi = enaddr[4] | (enaddr[5] << 8);
4973 		ral_hi |= RAL_AV;
4974 	} else {
4975 		ral_lo = 0;
4976 		ral_hi = 0;
4977 	}
4978 
4979 	if (sc->sc_type >= WM_T_82544) {
4980 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
4981 		    ral_lo);
4982 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
4983 		    ral_hi);
4984 	} else {
4985 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
4986 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
4987 	}
4988 }
4989 
4990 /*
4991  * wm_mchash:
4992  *
4993  *	Compute the hash of the multicast address for the 4096-bit
4994  *	multicast filter.
4995  */
4996 static uint32_t
4997 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
4998 {
4999 	static const int lo_shift[4] = { 4, 3, 2, 0 };
5000 	static const int hi_shift[4] = { 4, 5, 6, 8 };
5001 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
5002 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
5003 	uint32_t hash;
5004 
5005 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5006 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5007 	    || (sc->sc_type == WM_T_PCH2)) {
5008 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
5009 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
5010 		return (hash & 0x3ff);
5011 	}
5012 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
5013 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
5014 
5015 	return (hash & 0xfff);
5016 }
5017 
5018 /*
5019  * wm_set_filter:
5020  *
5021  *	Set up the receive filter.
5022  */
5023 static void
5024 wm_set_filter(struct wm_softc *sc)
5025 {
5026 	struct ethercom *ec = &sc->sc_ethercom;
5027 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5028 	struct ether_multi *enm;
5029 	struct ether_multistep step;
5030 	bus_addr_t mta_reg;
5031 	uint32_t hash, reg, bit;
5032 	int i, size;
5033 
5034 	if (sc->sc_type >= WM_T_82544)
5035 		mta_reg = WMREG_CORDOVA_MTA;
5036 	else
5037 		mta_reg = WMREG_MTA;
5038 
5039 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
5040 
5041 	if (ifp->if_flags & IFF_BROADCAST)
5042 		sc->sc_rctl |= RCTL_BAM;
5043 	if (ifp->if_flags & IFF_PROMISC) {
5044 		sc->sc_rctl |= RCTL_UPE;
5045 		goto allmulti;
5046 	}
5047 
5048 	/*
5049 	 * Set the station address in the first RAL slot, and
5050 	 * clear the remaining slots.
5051 	 */
5052 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5053 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5054 	    || (sc->sc_type == WM_T_PCH2))
5055 		size = WM_ICH8_RAL_TABSIZE;
5056 	else
5057 		size = WM_RAL_TABSIZE;
5058 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
5059 	for (i = 1; i < size; i++)
5060 		wm_set_ral(sc, NULL, i);
5061 
5062 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5063 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5064 	    || (sc->sc_type == WM_T_PCH2))
5065 		size = WM_ICH8_MC_TABSIZE;
5066 	else
5067 		size = WM_MC_TABSIZE;
5068 	/* Clear out the multicast table. */
5069 	for (i = 0; i < size; i++)
5070 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
5071 
5072 	ETHER_FIRST_MULTI(step, ec, enm);
5073 	while (enm != NULL) {
5074 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
5075 			/*
5076 			 * We must listen to a range of multicast addresses.
5077 			 * For now, just accept all multicasts, rather than
5078 			 * trying to set only those filter bits needed to match
5079 			 * the range.  (At this time, the only use of address
5080 			 * ranges is for IP multicast routing, for which the
5081 			 * range is big enough to require all bits set.)
5082 			 */
5083 			goto allmulti;
5084 		}
5085 
5086 		hash = wm_mchash(sc, enm->enm_addrlo);
5087 
5088 		reg = (hash >> 5);
5089 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5090 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5091 		    || (sc->sc_type == WM_T_PCH2))
5092 			reg &= 0x1f;
5093 		else
5094 			reg &= 0x7f;
5095 		bit = hash & 0x1f;
5096 
5097 		hash = CSR_READ(sc, mta_reg + (reg << 2));
5098 		hash |= 1U << bit;
5099 
5100 		/* XXX Hardware bug?? */
5101 		if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
5102 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
5103 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
5104 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
5105 		} else
5106 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
5107 
5108 		ETHER_NEXT_MULTI(step, enm);
5109 	}
5110 
5111 	ifp->if_flags &= ~IFF_ALLMULTI;
5112 	goto setit;
5113 
5114  allmulti:
5115 	ifp->if_flags |= IFF_ALLMULTI;
5116 	sc->sc_rctl |= RCTL_MPE;
5117 
5118  setit:
5119 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
5120 }
5121 
5122 /*
5123  * wm_tbi_mediainit:
5124  *
5125  *	Initialize media for use on 1000BASE-X devices.
5126  */
5127 static void
5128 wm_tbi_mediainit(struct wm_softc *sc)
5129 {
5130 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5131 	const char *sep = "";
5132 
5133 	if (sc->sc_type < WM_T_82543)
5134 		sc->sc_tipg = TIPG_WM_DFLT;
5135 	else
5136 		sc->sc_tipg = TIPG_LG_DFLT;
5137 
5138 	sc->sc_tbi_anegticks = 5;
5139 
5140 	/* Initialize our media structures */
5141 	sc->sc_mii.mii_ifp = ifp;
5142 
5143 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
5144 	ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
5145 	    wm_tbi_mediastatus);
5146 
5147 	/*
5148 	 * SWD Pins:
5149 	 *
5150 	 *	0 = Link LED (output)
5151 	 *	1 = Loss Of Signal (input)
5152 	 */
5153 	sc->sc_ctrl |= CTRL_SWDPIO(0);
5154 	sc->sc_ctrl &= ~CTRL_SWDPIO(1);
5155 
5156 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5157 
5158 #define	ADD(ss, mm, dd)							\
5159 do {									\
5160 	aprint_normal("%s%s", sep, ss);					\
5161 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL);	\
5162 	sep = ", ";							\
5163 } while (/*CONSTCOND*/0)
5164 
5165 	aprint_normal_dev(sc->sc_dev, "");
5166 	ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
5167 	ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
5168 	ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
5169 	aprint_normal("\n");
5170 
5171 #undef ADD
5172 
5173 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
5174 }
5175 
5176 /*
5177  * wm_tbi_mediastatus:	[ifmedia interface function]
5178  *
5179  *	Get the current interface media status on a 1000BASE-X device.
5180  */
5181 static void
5182 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
5183 {
5184 	struct wm_softc *sc = ifp->if_softc;
5185 	uint32_t ctrl, status;
5186 
5187 	ifmr->ifm_status = IFM_AVALID;
5188 	ifmr->ifm_active = IFM_ETHER;
5189 
5190 	status = CSR_READ(sc, WMREG_STATUS);
5191 	if ((status & STATUS_LU) == 0) {
5192 		ifmr->ifm_active |= IFM_NONE;
5193 		return;
5194 	}
5195 
5196 	ifmr->ifm_status |= IFM_ACTIVE;
5197 	ifmr->ifm_active |= IFM_1000_SX;
5198 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
5199 		ifmr->ifm_active |= IFM_FDX;
5200 	ctrl = CSR_READ(sc, WMREG_CTRL);
5201 	if (ctrl & CTRL_RFCE)
5202 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
5203 	if (ctrl & CTRL_TFCE)
5204 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
5205 }
5206 
5207 /*
5208  * wm_tbi_mediachange:	[ifmedia interface function]
5209  *
5210  *	Set hardware to newly-selected media on a 1000BASE-X device.
5211  */
5212 static int
5213 wm_tbi_mediachange(struct ifnet *ifp)
5214 {
5215 	struct wm_softc *sc = ifp->if_softc;
5216 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
5217 	uint32_t status;
5218 	int i;
5219 
5220 	sc->sc_txcw = 0;
5221 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO ||
5222 	    (sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
5223 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
5224 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
5225 		sc->sc_txcw |= TXCW_ANE;
5226 	} else {
5227 		/*
5228 		 * If autonegotiation is turned off, force link up and turn on
5229 		 * full duplex
5230 		 */
5231 		sc->sc_txcw &= ~TXCW_ANE;
5232 		sc->sc_ctrl |= CTRL_SLU | CTRL_FD;
5233 		sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
5234 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5235 		delay(1000);
5236 	}
5237 
5238 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
5239 		    device_xname(sc->sc_dev),sc->sc_txcw));
5240 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
5241 	delay(10000);
5242 
5243 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
5244 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
5245 
5246 	/*
5247 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
5248 	 * optics detect a signal, 0 if they don't.
5249 	 */
5250 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
5251 		/* Have signal; wait for the link to come up. */
5252 
5253 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
5254 			/*
5255 			 * Reset the link, and let autonegotiation do its thing
5256 			 */
5257 			sc->sc_ctrl |= CTRL_LRST;
5258 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5259 			delay(1000);
5260 			sc->sc_ctrl &= ~CTRL_LRST;
5261 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5262 			delay(1000);
5263 		}
5264 
5265 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
5266 			delay(10000);
5267 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
5268 				break;
5269 		}
5270 
5271 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
5272 			    device_xname(sc->sc_dev),i));
5273 
5274 		status = CSR_READ(sc, WMREG_STATUS);
5275 		DPRINTF(WM_DEBUG_LINK,
5276 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
5277 			device_xname(sc->sc_dev),status, STATUS_LU));
5278 		if (status & STATUS_LU) {
5279 			/* Link is up. */
5280 			DPRINTF(WM_DEBUG_LINK,
5281 			    ("%s: LINK: set media -> link up %s\n",
5282 			    device_xname(sc->sc_dev),
5283 			    (status & STATUS_FD) ? "FDX" : "HDX"));
5284 
5285 			/*
5286 			 * NOTE: CTRL will update TFCE and RFCE automatically,
5287 			 * so we should update sc->sc_ctrl
5288 			 */
5289 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
5290 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
5291 			sc->sc_fcrtl &= ~FCRTL_XONE;
5292 			if (status & STATUS_FD)
5293 				sc->sc_tctl |=
5294 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
5295 			else
5296 				sc->sc_tctl |=
5297 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
5298 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
5299 				sc->sc_fcrtl |= FCRTL_XONE;
5300 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
5301 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
5302 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
5303 				      sc->sc_fcrtl);
5304 			sc->sc_tbi_linkup = 1;
5305 		} else {
5306 			if (i == WM_LINKUP_TIMEOUT)
5307 				wm_check_for_link(sc);
5308 			/* Link is down. */
5309 			DPRINTF(WM_DEBUG_LINK,
5310 			    ("%s: LINK: set media -> link down\n",
5311 			    device_xname(sc->sc_dev)));
5312 			sc->sc_tbi_linkup = 0;
5313 		}
5314 	} else {
5315 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
5316 		    device_xname(sc->sc_dev)));
5317 		sc->sc_tbi_linkup = 0;
5318 	}
5319 
5320 	wm_tbi_set_linkled(sc);
5321 
5322 	return 0;
5323 }
5324 
5325 /*
5326  * wm_tbi_set_linkled:
5327  *
5328  *	Update the link LED on 1000BASE-X devices.
5329  */
5330 static void
5331 wm_tbi_set_linkled(struct wm_softc *sc)
5332 {
5333 
5334 	if (sc->sc_tbi_linkup)
5335 		sc->sc_ctrl |= CTRL_SWDPIN(0);
5336 	else
5337 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
5338 
5339 	/* 82540 or newer devices are active low */
5340 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
5341 
5342 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5343 }
5344 
5345 /*
5346  * wm_tbi_check_link:
5347  *
5348  *	Check the link on 1000BASE-X devices.
5349  */
5350 static void
5351 wm_tbi_check_link(struct wm_softc *sc)
5352 {
5353 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5354 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
5355 	uint32_t rxcw, ctrl, status;
5356 
5357 	status = CSR_READ(sc, WMREG_STATUS);
5358 
5359 	rxcw = CSR_READ(sc, WMREG_RXCW);
5360 	ctrl = CSR_READ(sc, WMREG_CTRL);
5361 
5362 	/* set link status */
5363 	if ((status & STATUS_LU) == 0) {
5364 		DPRINTF(WM_DEBUG_LINK,
5365 		    ("%s: LINK: checklink -> down\n", device_xname(sc->sc_dev)));
5366 		sc->sc_tbi_linkup = 0;
5367 	} else if (sc->sc_tbi_linkup == 0) {
5368 		DPRINTF(WM_DEBUG_LINK,
5369 		    ("%s: LINK: checklink -> up %s\n", device_xname(sc->sc_dev),
5370 		    (status & STATUS_FD) ? "FDX" : "HDX"));
5371 		sc->sc_tbi_linkup = 1;
5372 	}
5373 
5374 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP)
5375 	    && ((status & STATUS_LU) == 0)) {
5376 		sc->sc_tbi_linkup = 0;
5377 		if (sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg > 100) {
5378 			/* RXCFG storm! */
5379 			DPRINTF(WM_DEBUG_LINK, ("RXCFG storm! (%d)\n",
5380 				sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg));
5381 			wm_init(ifp);
5382 			wm_start(ifp);
5383 		} else if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
5384 			/* If the timer expired, retry autonegotiation */
5385 			if (++sc->sc_tbi_ticks >= sc->sc_tbi_anegticks) {
5386 				DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
5387 				sc->sc_tbi_ticks = 0;
5388 				/*
5389 				 * Reset the link, and let autonegotiation do
5390 				 * its thing
5391 				 */
5392 				sc->sc_ctrl |= CTRL_LRST;
5393 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5394 				delay(1000);
5395 				sc->sc_ctrl &= ~CTRL_LRST;
5396 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5397 				delay(1000);
5398 				CSR_WRITE(sc, WMREG_TXCW,
5399 				    sc->sc_txcw & ~TXCW_ANE);
5400 				CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
5401 			}
5402 		}
5403 	}
5404 
5405 	wm_tbi_set_linkled(sc);
5406 }
5407 
5408 /*
5409  * wm_gmii_reset:
5410  *
5411  *	Reset the PHY.
5412  */
5413 static void
5414 wm_gmii_reset(struct wm_softc *sc)
5415 {
5416 	uint32_t reg;
5417 	int rv;
5418 
5419 	/* get phy semaphore */
5420 	switch (sc->sc_type) {
5421 	case WM_T_82571:
5422 	case WM_T_82572:
5423 	case WM_T_82573:
5424 	case WM_T_82574:
5425 	case WM_T_82583:
5426 		 /* XXX should get sw semaphore, too */
5427 		rv = wm_get_swsm_semaphore(sc);
5428 		break;
5429 	case WM_T_82575:
5430 	case WM_T_82576:
5431 	case WM_T_82580:
5432 	case WM_T_82580ER:
5433 	case WM_T_80003:
5434 		rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
5435 		break;
5436 	case WM_T_ICH8:
5437 	case WM_T_ICH9:
5438 	case WM_T_ICH10:
5439 	case WM_T_PCH:
5440 	case WM_T_PCH2:
5441 		rv = wm_get_swfwhw_semaphore(sc);
5442 		break;
5443 	default:
5444 		/* nothing to do*/
5445 		rv = 0;
5446 		break;
5447 	}
5448 	if (rv != 0) {
5449 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5450 		    __func__);
5451 		return;
5452 	}
5453 
5454 	switch (sc->sc_type) {
5455 	case WM_T_82542_2_0:
5456 	case WM_T_82542_2_1:
5457 		/* null */
5458 		break;
5459 	case WM_T_82543:
5460 		/*
5461 		 * With 82543, we need to force speed and duplex on the MAC
5462 		 * equal to what the PHY speed and duplex configuration is.
5463 		 * In addition, we need to perform a hardware reset on the PHY
5464 		 * to take it out of reset.
5465 		 */
5466 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
5467 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5468 
5469 		/* The PHY reset pin is active-low. */
5470 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
5471 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
5472 		    CTRL_EXT_SWDPIN(4));
5473 		reg |= CTRL_EXT_SWDPIO(4);
5474 
5475 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5476 		delay(10*1000);
5477 
5478 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
5479 		delay(150);
5480 #if 0
5481 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
5482 #endif
5483 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
5484 		break;
5485 	case WM_T_82544:	/* reset 10000us */
5486 	case WM_T_82540:
5487 	case WM_T_82545:
5488 	case WM_T_82545_3:
5489 	case WM_T_82546:
5490 	case WM_T_82546_3:
5491 	case WM_T_82541:
5492 	case WM_T_82541_2:
5493 	case WM_T_82547:
5494 	case WM_T_82547_2:
5495 	case WM_T_82571:	/* reset 100us */
5496 	case WM_T_82572:
5497 	case WM_T_82573:
5498 	case WM_T_82574:
5499 	case WM_T_82575:
5500 	case WM_T_82576:
5501 	case WM_T_82580:
5502 	case WM_T_82580ER:
5503 	case WM_T_82583:
5504 	case WM_T_80003:
5505 		/* generic reset */
5506 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
5507 		delay(20000);
5508 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5509 		delay(20000);
5510 
5511 		if ((sc->sc_type == WM_T_82541)
5512 		    || (sc->sc_type == WM_T_82541_2)
5513 		    || (sc->sc_type == WM_T_82547)
5514 		    || (sc->sc_type == WM_T_82547_2)) {
5515 			/* workaround for igp are done in igp_reset() */
5516 			/* XXX add code to set LED after phy reset */
5517 		}
5518 		break;
5519 	case WM_T_ICH8:
5520 	case WM_T_ICH9:
5521 	case WM_T_ICH10:
5522 	case WM_T_PCH:
5523 	case WM_T_PCH2:
5524 		/* generic reset */
5525 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
5526 		delay(100);
5527 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5528 		delay(150);
5529 		break;
5530 	default:
5531 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
5532 		    __func__);
5533 		break;
5534 	}
5535 
5536 	/* release PHY semaphore */
5537 	switch (sc->sc_type) {
5538 	case WM_T_82571:
5539 	case WM_T_82572:
5540 	case WM_T_82573:
5541 	case WM_T_82574:
5542 	case WM_T_82583:
5543 		 /* XXX should put sw semaphore, too */
5544 		wm_put_swsm_semaphore(sc);
5545 		break;
5546 	case WM_T_82575:
5547 	case WM_T_82576:
5548 	case WM_T_82580:
5549 	case WM_T_82580ER:
5550 	case WM_T_80003:
5551 		wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
5552 		break;
5553 	case WM_T_ICH8:
5554 	case WM_T_ICH9:
5555 	case WM_T_ICH10:
5556 	case WM_T_PCH:
5557 	case WM_T_PCH2:
5558 		wm_put_swfwhw_semaphore(sc);
5559 		break;
5560 	default:
5561 		/* nothing to do*/
5562 		rv = 0;
5563 		break;
5564 	}
5565 
5566 	/* get_cfg_done */
5567 	wm_get_cfg_done(sc);
5568 
5569 	/* extra setup */
5570 	switch (sc->sc_type) {
5571 	case WM_T_82542_2_0:
5572 	case WM_T_82542_2_1:
5573 	case WM_T_82543:
5574 	case WM_T_82544:
5575 	case WM_T_82540:
5576 	case WM_T_82545:
5577 	case WM_T_82545_3:
5578 	case WM_T_82546:
5579 	case WM_T_82546_3:
5580 	case WM_T_82541_2:
5581 	case WM_T_82547_2:
5582 	case WM_T_82571:
5583 	case WM_T_82572:
5584 	case WM_T_82573:
5585 	case WM_T_82574:
5586 	case WM_T_82575:
5587 	case WM_T_82576:
5588 	case WM_T_82580:
5589 	case WM_T_82580ER:
5590 	case WM_T_82583:
5591 	case WM_T_80003:
5592 		/* null */
5593 		break;
5594 	case WM_T_82541:
5595 	case WM_T_82547:
5596 		/* XXX Configure actively LED after PHY reset */
5597 		break;
5598 	case WM_T_ICH8:
5599 	case WM_T_ICH9:
5600 	case WM_T_ICH10:
5601 	case WM_T_PCH:
5602 	case WM_T_PCH2:
5603 		/* Allow time for h/w to get to a quiescent state afer reset */
5604 		delay(10*1000);
5605 
5606 		if (sc->sc_type == WM_T_PCH)
5607 			wm_hv_phy_workaround_ich8lan(sc);
5608 
5609 		if (sc->sc_type == WM_T_PCH2)
5610 			wm_lv_phy_workaround_ich8lan(sc);
5611 
5612 		if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
5613 			/*
5614 			 * dummy read to clear the phy wakeup bit after lcd
5615 			 * reset
5616 			 */
5617 			reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
5618 		}
5619 
5620 		/*
5621 		 * XXX Configure the LCD with th extended configuration region
5622 		 * in NVM
5623 		 */
5624 
5625 		/* Configure the LCD with the OEM bits in NVM */
5626 		if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
5627 			/*
5628 			 * Disable LPLU.
5629 			 * XXX It seems that 82567 has LPLU, too.
5630 			 */
5631 			reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
5632 			reg &= ~(HV_OEM_BITS_A1KDIS| HV_OEM_BITS_LPLU);
5633 			reg |= HV_OEM_BITS_ANEGNOW;
5634 			wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
5635 		}
5636 		break;
5637 	default:
5638 		panic("%s: unknown type\n", __func__);
5639 		break;
5640 	}
5641 }
5642 
5643 /*
5644  * wm_gmii_mediainit:
5645  *
5646  *	Initialize media for use on 1000BASE-T devices.
5647  */
5648 static void
5649 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
5650 {
5651 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5652 
5653 	/* We have MII. */
5654 	sc->sc_flags |= WM_F_HAS_MII;
5655 
5656 	if (sc->sc_type == WM_T_80003)
5657 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
5658 	else
5659 		sc->sc_tipg = TIPG_1000T_DFLT;
5660 
5661 	/*
5662 	 * Let the chip set speed/duplex on its own based on
5663 	 * signals from the PHY.
5664 	 * XXXbouyer - I'm not sure this is right for the 80003,
5665 	 * the em driver only sets CTRL_SLU here - but it seems to work.
5666 	 */
5667 	sc->sc_ctrl |= CTRL_SLU;
5668 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5669 
5670 	/* Initialize our media structures and probe the GMII. */
5671 	sc->sc_mii.mii_ifp = ifp;
5672 
5673 	switch (prodid) {
5674 	case PCI_PRODUCT_INTEL_PCH_M_LM:
5675 	case PCI_PRODUCT_INTEL_PCH_M_LC:
5676 		/* 82577 */
5677 		sc->sc_phytype = WMPHY_82577;
5678 		sc->sc_mii.mii_readreg = wm_gmii_hv_readreg;
5679 		sc->sc_mii.mii_writereg = wm_gmii_hv_writereg;
5680 		break;
5681 	case PCI_PRODUCT_INTEL_PCH_D_DM:
5682 	case PCI_PRODUCT_INTEL_PCH_D_DC:
5683 		/* 82578 */
5684 		sc->sc_phytype = WMPHY_82578;
5685 		sc->sc_mii.mii_readreg = wm_gmii_hv_readreg;
5686 		sc->sc_mii.mii_writereg = wm_gmii_hv_writereg;
5687 		break;
5688 	case PCI_PRODUCT_INTEL_PCH2_LV_LM:
5689 	case PCI_PRODUCT_INTEL_PCH2_LV_V:
5690 		/* 82578 */
5691 		sc->sc_phytype = WMPHY_82579;
5692 		sc->sc_mii.mii_readreg = wm_gmii_hv_readreg;
5693 		sc->sc_mii.mii_writereg = wm_gmii_hv_writereg;
5694 		break;
5695 	case PCI_PRODUCT_INTEL_82801I_BM:
5696 	case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
5697 	case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
5698 	case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
5699 	case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
5700 	case PCI_PRODUCT_INTEL_82801J_R_BM_V:
5701 		/* 82567 */
5702 		sc->sc_phytype = WMPHY_BM;
5703 		sc->sc_mii.mii_readreg = wm_gmii_bm_readreg;
5704 		sc->sc_mii.mii_writereg = wm_gmii_bm_writereg;
5705 		break;
5706 	default:
5707 		if ((sc->sc_flags & WM_F_SGMII) != 0) {
5708 			sc->sc_mii.mii_readreg = wm_sgmii_readreg;
5709 			sc->sc_mii.mii_writereg = wm_sgmii_writereg;
5710 		} else if (sc->sc_type >= WM_T_80003) {
5711 			sc->sc_mii.mii_readreg = wm_gmii_i80003_readreg;
5712 			sc->sc_mii.mii_writereg = wm_gmii_i80003_writereg;
5713 		} else if (sc->sc_type >= WM_T_82544) {
5714 			sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg;
5715 			sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg;
5716 		} else {
5717 			sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg;
5718 			sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg;
5719 		}
5720 		break;
5721 	}
5722 	sc->sc_mii.mii_statchg = wm_gmii_statchg;
5723 
5724 	wm_gmii_reset(sc);
5725 
5726 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
5727 	ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_gmii_mediachange,
5728 	    wm_gmii_mediastatus);
5729 
5730 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
5731 	    || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)) {
5732 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
5733 			/* Attach only one port */
5734 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
5735 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
5736 		} else {
5737 			int i;
5738 			uint32_t ctrl_ext;
5739 
5740 			/* Power on sgmii phy if it is disabled */
5741 			ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
5742 			CSR_WRITE(sc, WMREG_CTRL_EXT,
5743 			    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
5744 			CSR_WRITE_FLUSH(sc);
5745 			delay(300*1000); /* XXX too long */
5746 
5747 			/* from 1 to 8 */
5748 			for (i = 1; i < 8; i++)
5749 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
5750 				    i, MII_OFFSET_ANY, MIIF_DOPAUSE);
5751 
5752 			/* restore previous sfp cage power state */
5753 			CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
5754 		}
5755 	} else {
5756 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
5757 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
5758 	}
5759 
5760 	if ((sc->sc_type == WM_T_PCH2) &&
5761 	    (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL)) {
5762 		wm_set_mdio_slow_mode_hv(sc);
5763 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
5764 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
5765 	}
5766 
5767 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
5768 		/* if failed, retry with *_bm_* */
5769 		sc->sc_mii.mii_readreg = wm_gmii_bm_readreg;
5770 		sc->sc_mii.mii_writereg = wm_gmii_bm_writereg;
5771 
5772 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
5773 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
5774 	}
5775 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
5776 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
5777 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
5778 		sc->sc_phytype = WMPHY_NONE;
5779 	} else {
5780 		/* Check PHY type */
5781 		uint32_t model;
5782 		struct mii_softc *child;
5783 
5784 		child = LIST_FIRST(&sc->sc_mii.mii_phys);
5785 		if (device_is_a(child->mii_dev, "igphy")) {
5786 			struct igphy_softc *isc = (struct igphy_softc *)child;
5787 
5788 			model = isc->sc_mii.mii_mpd_model;
5789 			if (model == MII_MODEL_yyINTEL_I82566)
5790 				sc->sc_phytype = WMPHY_IGP_3;
5791 		}
5792 
5793 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
5794 	}
5795 }
5796 
5797 /*
5798  * wm_gmii_mediastatus:	[ifmedia interface function]
5799  *
5800  *	Get the current interface media status on a 1000BASE-T device.
5801  */
5802 static void
5803 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
5804 {
5805 	struct wm_softc *sc = ifp->if_softc;
5806 
5807 	ether_mediastatus(ifp, ifmr);
5808 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
5809 	    | sc->sc_flowflags;
5810 }
5811 
5812 /*
5813  * wm_gmii_mediachange:	[ifmedia interface function]
5814  *
5815  *	Set hardware to newly-selected media on a 1000BASE-T device.
5816  */
5817 static int
5818 wm_gmii_mediachange(struct ifnet *ifp)
5819 {
5820 	struct wm_softc *sc = ifp->if_softc;
5821 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
5822 	int rc;
5823 
5824 	if ((ifp->if_flags & IFF_UP) == 0)
5825 		return 0;
5826 
5827 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
5828 	sc->sc_ctrl |= CTRL_SLU;
5829 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
5830 	    || (sc->sc_type > WM_T_82543)) {
5831 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
5832 	} else {
5833 		sc->sc_ctrl &= ~CTRL_ASDE;
5834 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
5835 		if (ife->ifm_media & IFM_FDX)
5836 			sc->sc_ctrl |= CTRL_FD;
5837 		switch (IFM_SUBTYPE(ife->ifm_media)) {
5838 		case IFM_10_T:
5839 			sc->sc_ctrl |= CTRL_SPEED_10;
5840 			break;
5841 		case IFM_100_TX:
5842 			sc->sc_ctrl |= CTRL_SPEED_100;
5843 			break;
5844 		case IFM_1000_T:
5845 			sc->sc_ctrl |= CTRL_SPEED_1000;
5846 			break;
5847 		default:
5848 			panic("wm_gmii_mediachange: bad media 0x%x",
5849 			    ife->ifm_media);
5850 		}
5851 	}
5852 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5853 	if (sc->sc_type <= WM_T_82543)
5854 		wm_gmii_reset(sc);
5855 
5856 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
5857 		return 0;
5858 	return rc;
5859 }
5860 
5861 #define	MDI_IO		CTRL_SWDPIN(2)
5862 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
5863 #define	MDI_CLK		CTRL_SWDPIN(3)
5864 
5865 static void
5866 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
5867 {
5868 	uint32_t i, v;
5869 
5870 	v = CSR_READ(sc, WMREG_CTRL);
5871 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
5872 	v |= MDI_DIR | CTRL_SWDPIO(3);
5873 
5874 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
5875 		if (data & i)
5876 			v |= MDI_IO;
5877 		else
5878 			v &= ~MDI_IO;
5879 		CSR_WRITE(sc, WMREG_CTRL, v);
5880 		delay(10);
5881 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
5882 		delay(10);
5883 		CSR_WRITE(sc, WMREG_CTRL, v);
5884 		delay(10);
5885 	}
5886 }
5887 
5888 static uint32_t
5889 i82543_mii_recvbits(struct wm_softc *sc)
5890 {
5891 	uint32_t v, i, data = 0;
5892 
5893 	v = CSR_READ(sc, WMREG_CTRL);
5894 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
5895 	v |= CTRL_SWDPIO(3);
5896 
5897 	CSR_WRITE(sc, WMREG_CTRL, v);
5898 	delay(10);
5899 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
5900 	delay(10);
5901 	CSR_WRITE(sc, WMREG_CTRL, v);
5902 	delay(10);
5903 
5904 	for (i = 0; i < 16; i++) {
5905 		data <<= 1;
5906 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
5907 		delay(10);
5908 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
5909 			data |= 1;
5910 		CSR_WRITE(sc, WMREG_CTRL, v);
5911 		delay(10);
5912 	}
5913 
5914 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
5915 	delay(10);
5916 	CSR_WRITE(sc, WMREG_CTRL, v);
5917 	delay(10);
5918 
5919 	return data;
5920 }
5921 
5922 #undef MDI_IO
5923 #undef MDI_DIR
5924 #undef MDI_CLK
5925 
5926 /*
5927  * wm_gmii_i82543_readreg:	[mii interface function]
5928  *
5929  *	Read a PHY register on the GMII (i82543 version).
5930  */
5931 static int
5932 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
5933 {
5934 	struct wm_softc *sc = device_private(self);
5935 	int rv;
5936 
5937 	i82543_mii_sendbits(sc, 0xffffffffU, 32);
5938 	i82543_mii_sendbits(sc, reg | (phy << 5) |
5939 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
5940 	rv = i82543_mii_recvbits(sc) & 0xffff;
5941 
5942 	DPRINTF(WM_DEBUG_GMII,
5943 	    ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
5944 	    device_xname(sc->sc_dev), phy, reg, rv));
5945 
5946 	return rv;
5947 }
5948 
5949 /*
5950  * wm_gmii_i82543_writereg:	[mii interface function]
5951  *
5952  *	Write a PHY register on the GMII (i82543 version).
5953  */
5954 static void
5955 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
5956 {
5957 	struct wm_softc *sc = device_private(self);
5958 
5959 	i82543_mii_sendbits(sc, 0xffffffffU, 32);
5960 	i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
5961 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
5962 	    (MII_COMMAND_START << 30), 32);
5963 }
5964 
5965 /*
5966  * wm_gmii_i82544_readreg:	[mii interface function]
5967  *
5968  *	Read a PHY register on the GMII.
5969  */
5970 static int
5971 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
5972 {
5973 	struct wm_softc *sc = device_private(self);
5974 	uint32_t mdic = 0;
5975 	int i, rv;
5976 
5977 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
5978 	    MDIC_REGADD(reg));
5979 
5980 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
5981 		mdic = CSR_READ(sc, WMREG_MDIC);
5982 		if (mdic & MDIC_READY)
5983 			break;
5984 		delay(50);
5985 	}
5986 
5987 	if ((mdic & MDIC_READY) == 0) {
5988 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
5989 		    device_xname(sc->sc_dev), phy, reg);
5990 		rv = 0;
5991 	} else if (mdic & MDIC_E) {
5992 #if 0 /* This is normal if no PHY is present. */
5993 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
5994 		    device_xname(sc->sc_dev), phy, reg);
5995 #endif
5996 		rv = 0;
5997 	} else {
5998 		rv = MDIC_DATA(mdic);
5999 		if (rv == 0xffff)
6000 			rv = 0;
6001 	}
6002 
6003 	return rv;
6004 }
6005 
6006 /*
6007  * wm_gmii_i82544_writereg:	[mii interface function]
6008  *
6009  *	Write a PHY register on the GMII.
6010  */
6011 static void
6012 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
6013 {
6014 	struct wm_softc *sc = device_private(self);
6015 	uint32_t mdic = 0;
6016 	int i;
6017 
6018 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
6019 	    MDIC_REGADD(reg) | MDIC_DATA(val));
6020 
6021 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
6022 		mdic = CSR_READ(sc, WMREG_MDIC);
6023 		if (mdic & MDIC_READY)
6024 			break;
6025 		delay(50);
6026 	}
6027 
6028 	if ((mdic & MDIC_READY) == 0)
6029 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
6030 		    device_xname(sc->sc_dev), phy, reg);
6031 	else if (mdic & MDIC_E)
6032 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
6033 		    device_xname(sc->sc_dev), phy, reg);
6034 }
6035 
6036 /*
6037  * wm_gmii_i80003_readreg:	[mii interface function]
6038  *
6039  *	Read a PHY register on the kumeran
6040  * This could be handled by the PHY layer if we didn't have to lock the
6041  * ressource ...
6042  */
6043 static int
6044 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
6045 {
6046 	struct wm_softc *sc = device_private(self);
6047 	int sem;
6048 	int rv;
6049 
6050 	if (phy != 1) /* only one PHY on kumeran bus */
6051 		return 0;
6052 
6053 	sem = swfwphysem[sc->sc_funcid];
6054 	if (wm_get_swfw_semaphore(sc, sem)) {
6055 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6056 		    __func__);
6057 		return 0;
6058 	}
6059 
6060 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
6061 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6062 		    reg >> GG82563_PAGE_SHIFT);
6063 	} else {
6064 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
6065 		    reg >> GG82563_PAGE_SHIFT);
6066 	}
6067 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
6068 	delay(200);
6069 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
6070 	delay(200);
6071 
6072 	wm_put_swfw_semaphore(sc, sem);
6073 	return rv;
6074 }
6075 
6076 /*
6077  * wm_gmii_i80003_writereg:	[mii interface function]
6078  *
6079  *	Write a PHY register on the kumeran.
6080  * This could be handled by the PHY layer if we didn't have to lock the
6081  * ressource ...
6082  */
6083 static void
6084 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
6085 {
6086 	struct wm_softc *sc = device_private(self);
6087 	int sem;
6088 
6089 	if (phy != 1) /* only one PHY on kumeran bus */
6090 		return;
6091 
6092 	sem = swfwphysem[sc->sc_funcid];
6093 	if (wm_get_swfw_semaphore(sc, sem)) {
6094 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6095 		    __func__);
6096 		return;
6097 	}
6098 
6099 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
6100 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6101 		    reg >> GG82563_PAGE_SHIFT);
6102 	} else {
6103 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
6104 		    reg >> GG82563_PAGE_SHIFT);
6105 	}
6106 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
6107 	delay(200);
6108 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
6109 	delay(200);
6110 
6111 	wm_put_swfw_semaphore(sc, sem);
6112 }
6113 
6114 /*
6115  * wm_gmii_bm_readreg:	[mii interface function]
6116  *
6117  *	Read a PHY register on the kumeran
6118  * This could be handled by the PHY layer if we didn't have to lock the
6119  * ressource ...
6120  */
6121 static int
6122 wm_gmii_bm_readreg(device_t self, int phy, int reg)
6123 {
6124 	struct wm_softc *sc = device_private(self);
6125 	int sem;
6126 	int rv;
6127 
6128 	sem = swfwphysem[sc->sc_funcid];
6129 	if (wm_get_swfw_semaphore(sc, sem)) {
6130 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6131 		    __func__);
6132 		return 0;
6133 	}
6134 
6135 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
6136 		if (phy == 1)
6137 			wm_gmii_i82544_writereg(self, phy, 0x1f,
6138 			    reg);
6139 		else
6140 			wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6141 			    reg >> GG82563_PAGE_SHIFT);
6142 
6143 	}
6144 
6145 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
6146 	wm_put_swfw_semaphore(sc, sem);
6147 	return rv;
6148 }
6149 
6150 /*
6151  * wm_gmii_bm_writereg:	[mii interface function]
6152  *
6153  *	Write a PHY register on the kumeran.
6154  * This could be handled by the PHY layer if we didn't have to lock the
6155  * ressource ...
6156  */
6157 static void
6158 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
6159 {
6160 	struct wm_softc *sc = device_private(self);
6161 	int sem;
6162 
6163 	sem = swfwphysem[sc->sc_funcid];
6164 	if (wm_get_swfw_semaphore(sc, sem)) {
6165 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6166 		    __func__);
6167 		return;
6168 	}
6169 
6170 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
6171 		if (phy == 1)
6172 			wm_gmii_i82544_writereg(self, phy, 0x1f,
6173 			    reg);
6174 		else
6175 			wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6176 			    reg >> GG82563_PAGE_SHIFT);
6177 
6178 	}
6179 
6180 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
6181 	wm_put_swfw_semaphore(sc, sem);
6182 }
6183 
6184 static void
6185 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
6186 {
6187 	struct wm_softc *sc = device_private(self);
6188 	uint16_t regnum = BM_PHY_REG_NUM(offset);
6189 	uint16_t wuce;
6190 
6191 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
6192 	if (sc->sc_type == WM_T_PCH) {
6193 		/* XXX e1000 driver do nothing... why? */
6194 	}
6195 
6196 	/* Set page 769 */
6197 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6198 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
6199 
6200 	wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
6201 
6202 	wuce &= ~BM_WUC_HOST_WU_BIT;
6203 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
6204 	    wuce | BM_WUC_ENABLE_BIT);
6205 
6206 	/* Select page 800 */
6207 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6208 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
6209 
6210 	/* Write page 800 */
6211 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
6212 
6213 	if (rd)
6214 		*val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
6215 	else
6216 		wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
6217 
6218 	/* Set page 769 */
6219 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6220 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
6221 
6222 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
6223 }
6224 
6225 /*
6226  * wm_gmii_hv_readreg:	[mii interface function]
6227  *
6228  *	Read a PHY register on the kumeran
6229  * This could be handled by the PHY layer if we didn't have to lock the
6230  * ressource ...
6231  */
6232 static int
6233 wm_gmii_hv_readreg(device_t self, int phy, int reg)
6234 {
6235 	struct wm_softc *sc = device_private(self);
6236 	uint16_t page = BM_PHY_REG_PAGE(reg);
6237 	uint16_t regnum = BM_PHY_REG_NUM(reg);
6238 	uint16_t val;
6239 	int rv;
6240 
6241 	if (wm_get_swfw_semaphore(sc, SWFW_PHY0_SM)) {
6242 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6243 		    __func__);
6244 		return 0;
6245 	}
6246 
6247 	/* XXX Workaround failure in MDIO access while cable is disconnected */
6248 	if (sc->sc_phytype == WMPHY_82577) {
6249 		/* XXX must write */
6250 	}
6251 
6252 	/* Page 800 works differently than the rest so it has its own func */
6253 	if (page == BM_WUC_PAGE) {
6254 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
6255 		return val;
6256 	}
6257 
6258 	/*
6259 	 * Lower than page 768 works differently than the rest so it has its
6260 	 * own func
6261 	 */
6262 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
6263 		printf("gmii_hv_readreg!!!\n");
6264 		return 0;
6265 	}
6266 
6267 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
6268 		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6269 		    page << BME1000_PAGE_SHIFT);
6270 	}
6271 
6272 	rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
6273 	wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
6274 	return rv;
6275 }
6276 
6277 /*
6278  * wm_gmii_hv_writereg:	[mii interface function]
6279  *
6280  *	Write a PHY register on the kumeran.
6281  * This could be handled by the PHY layer if we didn't have to lock the
6282  * ressource ...
6283  */
6284 static void
6285 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
6286 {
6287 	struct wm_softc *sc = device_private(self);
6288 	uint16_t page = BM_PHY_REG_PAGE(reg);
6289 	uint16_t regnum = BM_PHY_REG_NUM(reg);
6290 
6291 	if (wm_get_swfw_semaphore(sc, SWFW_PHY0_SM)) {
6292 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6293 		    __func__);
6294 		return;
6295 	}
6296 
6297 	/* XXX Workaround failure in MDIO access while cable is disconnected */
6298 
6299 	/* Page 800 works differently than the rest so it has its own func */
6300 	if (page == BM_WUC_PAGE) {
6301 		uint16_t tmp;
6302 
6303 		tmp = val;
6304 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
6305 		return;
6306 	}
6307 
6308 	/*
6309 	 * Lower than page 768 works differently than the rest so it has its
6310 	 * own func
6311 	 */
6312 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
6313 		printf("gmii_hv_writereg!!!\n");
6314 		return;
6315 	}
6316 
6317 	/*
6318 	 * XXX Workaround MDIO accesses being disabled after entering IEEE
6319 	 * Power Down (whenever bit 11 of the PHY control register is set)
6320 	 */
6321 
6322 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
6323 		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6324 		    page << BME1000_PAGE_SHIFT);
6325 	}
6326 
6327 	wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
6328 	wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
6329 }
6330 
6331 /*
6332  * wm_gmii_hv_readreg:	[mii interface function]
6333  *
6334  *	Read a PHY register on the kumeran
6335  * This could be handled by the PHY layer if we didn't have to lock the
6336  * ressource ...
6337  */
6338 static int
6339 wm_sgmii_readreg(device_t self, int phy, int reg)
6340 {
6341 	struct wm_softc *sc = device_private(self);
6342 	uint32_t i2ccmd;
6343 	int i, rv;
6344 
6345 	if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
6346 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6347 		    __func__);
6348 		return 0;
6349 	}
6350 
6351 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
6352 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
6353 	    | I2CCMD_OPCODE_READ;
6354 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
6355 
6356 	/* Poll the ready bit */
6357 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
6358 		delay(50);
6359 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
6360 		if (i2ccmd & I2CCMD_READY)
6361 			break;
6362 	}
6363 	if ((i2ccmd & I2CCMD_READY) == 0)
6364 		aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
6365 	if ((i2ccmd & I2CCMD_ERROR) != 0)
6366 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
6367 
6368 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
6369 
6370 	wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6371 	return rv;
6372 }
6373 
6374 /*
6375  * wm_gmii_hv_writereg:	[mii interface function]
6376  *
6377  *	Write a PHY register on the kumeran.
6378  * This could be handled by the PHY layer if we didn't have to lock the
6379  * ressource ...
6380  */
6381 static void
6382 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
6383 {
6384 	struct wm_softc *sc = device_private(self);
6385 	uint32_t i2ccmd;
6386 	int i;
6387 
6388 	if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
6389 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6390 		    __func__);
6391 		return;
6392 	}
6393 
6394 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
6395 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
6396 	    | I2CCMD_OPCODE_WRITE;
6397 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
6398 
6399 	/* Poll the ready bit */
6400 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
6401 		delay(50);
6402 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
6403 		if (i2ccmd & I2CCMD_READY)
6404 			break;
6405 	}
6406 	if ((i2ccmd & I2CCMD_READY) == 0)
6407 		aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
6408 	if ((i2ccmd & I2CCMD_ERROR) != 0)
6409 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
6410 
6411 	wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
6412 }
6413 
6414 /*
6415  * wm_gmii_statchg:	[mii interface function]
6416  *
6417  *	Callback from MII layer when media changes.
6418  */
6419 static void
6420 wm_gmii_statchg(device_t self)
6421 {
6422 	struct wm_softc *sc = device_private(self);
6423 	struct mii_data *mii = &sc->sc_mii;
6424 
6425 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
6426 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
6427 	sc->sc_fcrtl &= ~FCRTL_XONE;
6428 
6429 	/*
6430 	 * Get flow control negotiation result.
6431 	 */
6432 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
6433 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
6434 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
6435 		mii->mii_media_active &= ~IFM_ETH_FMASK;
6436 	}
6437 
6438 	if (sc->sc_flowflags & IFM_FLOW) {
6439 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
6440 			sc->sc_ctrl |= CTRL_TFCE;
6441 			sc->sc_fcrtl |= FCRTL_XONE;
6442 		}
6443 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
6444 			sc->sc_ctrl |= CTRL_RFCE;
6445 	}
6446 
6447 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
6448 		DPRINTF(WM_DEBUG_LINK,
6449 		    ("%s: LINK: statchg: FDX\n", device_xname(sc->sc_dev)));
6450 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
6451 	} else {
6452 		DPRINTF(WM_DEBUG_LINK,
6453 		    ("%s: LINK: statchg: HDX\n", device_xname(sc->sc_dev)));
6454 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
6455 	}
6456 
6457 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6458 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
6459 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
6460 						 : WMREG_FCRTL, sc->sc_fcrtl);
6461 	if (sc->sc_type == WM_T_80003) {
6462 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
6463 		case IFM_1000_T:
6464 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
6465 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
6466 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
6467 			break;
6468 		default:
6469 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
6470 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
6471 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
6472 			break;
6473 		}
6474 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
6475 	}
6476 }
6477 
6478 /*
6479  * wm_kmrn_readreg:
6480  *
6481  *	Read a kumeran register
6482  */
6483 static int
6484 wm_kmrn_readreg(struct wm_softc *sc, int reg)
6485 {
6486 	int rv;
6487 
6488 	if (sc->sc_flags == WM_F_SWFW_SYNC) {
6489 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
6490 			aprint_error_dev(sc->sc_dev,
6491 			    "%s: failed to get semaphore\n", __func__);
6492 			return 0;
6493 		}
6494 	} else if (sc->sc_flags == WM_F_SWFWHW_SYNC) {
6495 		if (wm_get_swfwhw_semaphore(sc)) {
6496 			aprint_error_dev(sc->sc_dev,
6497 			    "%s: failed to get semaphore\n", __func__);
6498 			return 0;
6499 		}
6500 	}
6501 
6502 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
6503 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
6504 	    KUMCTRLSTA_REN);
6505 	delay(2);
6506 
6507 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
6508 
6509 	if (sc->sc_flags == WM_F_SWFW_SYNC)
6510 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
6511 	else if (sc->sc_flags == WM_F_SWFWHW_SYNC)
6512 		wm_put_swfwhw_semaphore(sc);
6513 
6514 	return rv;
6515 }
6516 
6517 /*
6518  * wm_kmrn_writereg:
6519  *
6520  *	Write a kumeran register
6521  */
6522 static void
6523 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
6524 {
6525 
6526 	if (sc->sc_flags == WM_F_SWFW_SYNC) {
6527 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
6528 			aprint_error_dev(sc->sc_dev,
6529 			    "%s: failed to get semaphore\n", __func__);
6530 			return;
6531 		}
6532 	} else if (sc->sc_flags == WM_F_SWFWHW_SYNC) {
6533 		if (wm_get_swfwhw_semaphore(sc)) {
6534 			aprint_error_dev(sc->sc_dev,
6535 			    "%s: failed to get semaphore\n", __func__);
6536 			return;
6537 		}
6538 	}
6539 
6540 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
6541 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
6542 	    (val & KUMCTRLSTA_MASK));
6543 
6544 	if (sc->sc_flags == WM_F_SWFW_SYNC)
6545 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
6546 	else if (sc->sc_flags == WM_F_SWFWHW_SYNC)
6547 		wm_put_swfwhw_semaphore(sc);
6548 }
6549 
6550 static int
6551 wm_is_onboard_nvm_eeprom(struct wm_softc *sc)
6552 {
6553 	uint32_t eecd = 0;
6554 
6555 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
6556 	    || sc->sc_type == WM_T_82583) {
6557 		eecd = CSR_READ(sc, WMREG_EECD);
6558 
6559 		/* Isolate bits 15 & 16 */
6560 		eecd = ((eecd >> 15) & 0x03);
6561 
6562 		/* If both bits are set, device is Flash type */
6563 		if (eecd == 0x03)
6564 			return 0;
6565 	}
6566 	return 1;
6567 }
6568 
6569 static int
6570 wm_get_swsm_semaphore(struct wm_softc *sc)
6571 {
6572 	int32_t timeout;
6573 	uint32_t swsm;
6574 
6575 	/* Get the FW semaphore. */
6576 	timeout = 1000 + 1; /* XXX */
6577 	while (timeout) {
6578 		swsm = CSR_READ(sc, WMREG_SWSM);
6579 		swsm |= SWSM_SWESMBI;
6580 		CSR_WRITE(sc, WMREG_SWSM, swsm);
6581 		/* if we managed to set the bit we got the semaphore. */
6582 		swsm = CSR_READ(sc, WMREG_SWSM);
6583 		if (swsm & SWSM_SWESMBI)
6584 			break;
6585 
6586 		delay(50);
6587 		timeout--;
6588 	}
6589 
6590 	if (timeout == 0) {
6591 		aprint_error_dev(sc->sc_dev, "could not acquire EEPROM GNT\n");
6592 		/* Release semaphores */
6593 		wm_put_swsm_semaphore(sc);
6594 		return 1;
6595 	}
6596 	return 0;
6597 }
6598 
6599 static void
6600 wm_put_swsm_semaphore(struct wm_softc *sc)
6601 {
6602 	uint32_t swsm;
6603 
6604 	swsm = CSR_READ(sc, WMREG_SWSM);
6605 	swsm &= ~(SWSM_SWESMBI);
6606 	CSR_WRITE(sc, WMREG_SWSM, swsm);
6607 }
6608 
6609 static int
6610 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
6611 {
6612 	uint32_t swfw_sync;
6613 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
6614 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
6615 	int timeout = 200;
6616 
6617 	for (timeout = 0; timeout < 200; timeout++) {
6618 		if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
6619 			if (wm_get_swsm_semaphore(sc)) {
6620 				aprint_error_dev(sc->sc_dev,
6621 				    "%s: failed to get semaphore\n",
6622 				    __func__);
6623 				return 1;
6624 			}
6625 		}
6626 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
6627 		if ((swfw_sync & (swmask | fwmask)) == 0) {
6628 			swfw_sync |= swmask;
6629 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
6630 			if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
6631 				wm_put_swsm_semaphore(sc);
6632 			return 0;
6633 		}
6634 		if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
6635 			wm_put_swsm_semaphore(sc);
6636 		delay(5000);
6637 	}
6638 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
6639 	    device_xname(sc->sc_dev), mask, swfw_sync);
6640 	return 1;
6641 }
6642 
6643 static void
6644 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
6645 {
6646 	uint32_t swfw_sync;
6647 
6648 	if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
6649 		while (wm_get_swsm_semaphore(sc) != 0)
6650 			continue;
6651 	}
6652 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
6653 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
6654 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
6655 	if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
6656 		wm_put_swsm_semaphore(sc);
6657 }
6658 
6659 static int
6660 wm_get_swfwhw_semaphore(struct wm_softc *sc)
6661 {
6662 	uint32_t ext_ctrl;
6663 	int timeout = 200;
6664 
6665 	for (timeout = 0; timeout < 200; timeout++) {
6666 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
6667 		ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
6668 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
6669 
6670 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
6671 		if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
6672 			return 0;
6673 		delay(5000);
6674 	}
6675 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
6676 	    device_xname(sc->sc_dev), ext_ctrl);
6677 	return 1;
6678 }
6679 
6680 static void
6681 wm_put_swfwhw_semaphore(struct wm_softc *sc)
6682 {
6683 	uint32_t ext_ctrl;
6684 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
6685 	ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
6686 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
6687 }
6688 
6689 static int
6690 wm_valid_nvm_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
6691 {
6692 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
6693 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
6694 
6695 	if ((sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)) {
6696 		/* Value of bit 22 corresponds to the flash bank we're on. */
6697 		*bank = (CSR_READ(sc, WMREG_EECD) & EECD_SEC1VAL) ? 1 : 0;
6698 	} else {
6699 		uint8_t bank_high_byte;
6700 		wm_read_ich8_byte(sc, act_offset, &bank_high_byte);
6701 		if ((bank_high_byte & 0xc0) == 0x80)
6702 			*bank = 0;
6703 		else {
6704 			wm_read_ich8_byte(sc, act_offset + bank1_offset,
6705 			    &bank_high_byte);
6706 			if ((bank_high_byte & 0xc0) == 0x80)
6707 				*bank = 1;
6708 			else {
6709 				aprint_error_dev(sc->sc_dev,
6710 				    "EEPROM not present\n");
6711 				return -1;
6712 			}
6713 		}
6714 	}
6715 
6716 	return 0;
6717 }
6718 
6719 /******************************************************************************
6720  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
6721  * register.
6722  *
6723  * sc - Struct containing variables accessed by shared code
6724  * offset - offset of word in the EEPROM to read
6725  * data - word read from the EEPROM
6726  * words - number of words to read
6727  *****************************************************************************/
6728 static int
6729 wm_read_eeprom_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
6730 {
6731 	int32_t  error = 0;
6732 	uint32_t flash_bank = 0;
6733 	uint32_t act_offset = 0;
6734 	uint32_t bank_offset = 0;
6735 	uint16_t word = 0;
6736 	uint16_t i = 0;
6737 
6738 	/* We need to know which is the valid flash bank.  In the event
6739 	 * that we didn't allocate eeprom_shadow_ram, we may not be
6740 	 * managing flash_bank.  So it cannot be trusted and needs
6741 	 * to be updated with each read.
6742 	 */
6743 	error = wm_valid_nvm_bank_detect_ich8lan(sc, &flash_bank);
6744 	if (error) {
6745 		aprint_error_dev(sc->sc_dev, "%s: failed to detect NVM bank\n",
6746 		    __func__);
6747 		return error;
6748 	}
6749 
6750 	/* Adjust offset appropriately if we're on bank 1 - adjust for word size */
6751 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
6752 
6753 	error = wm_get_swfwhw_semaphore(sc);
6754 	if (error) {
6755 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6756 		    __func__);
6757 		return error;
6758 	}
6759 
6760 	for (i = 0; i < words; i++) {
6761 		/* The NVM part needs a byte offset, hence * 2 */
6762 		act_offset = bank_offset + ((offset + i) * 2);
6763 		error = wm_read_ich8_word(sc, act_offset, &word);
6764 		if (error) {
6765 			aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
6766 			    __func__);
6767 			break;
6768 		}
6769 		data[i] = word;
6770 	}
6771 
6772 	wm_put_swfwhw_semaphore(sc);
6773 	return error;
6774 }
6775 
6776 /******************************************************************************
6777  * This function does initial flash setup so that a new read/write/erase cycle
6778  * can be started.
6779  *
6780  * sc - The pointer to the hw structure
6781  ****************************************************************************/
6782 static int32_t
6783 wm_ich8_cycle_init(struct wm_softc *sc)
6784 {
6785 	uint16_t hsfsts;
6786 	int32_t error = 1;
6787 	int32_t i     = 0;
6788 
6789 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
6790 
6791 	/* May be check the Flash Des Valid bit in Hw status */
6792 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
6793 		return error;
6794 	}
6795 
6796 	/* Clear FCERR in Hw status by writing 1 */
6797 	/* Clear DAEL in Hw status by writing a 1 */
6798 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
6799 
6800 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
6801 
6802 	/*
6803 	 * Either we should have a hardware SPI cycle in progress bit to check
6804 	 * against, in order to start a new cycle or FDONE bit should be
6805 	 * changed in the hardware so that it is 1 after harware reset, which
6806 	 * can then be used as an indication whether a cycle is in progress or
6807 	 * has been completed .. we should also have some software semaphore
6808 	 * mechanism to guard FDONE or the cycle in progress bit so that two
6809 	 * threads access to those bits can be sequentiallized or a way so that
6810 	 * 2 threads dont start the cycle at the same time
6811 	 */
6812 
6813 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
6814 		/*
6815 		 * There is no cycle running at present, so we can start a
6816 		 * cycle
6817 		 */
6818 
6819 		/* Begin by setting Flash Cycle Done. */
6820 		hsfsts |= HSFSTS_DONE;
6821 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
6822 		error = 0;
6823 	} else {
6824 		/*
6825 		 * otherwise poll for sometime so the current cycle has a
6826 		 * chance to end before giving up.
6827 		 */
6828 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
6829 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
6830 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
6831 				error = 0;
6832 				break;
6833 			}
6834 			delay(1);
6835 		}
6836 		if (error == 0) {
6837 			/*
6838 			 * Successful in waiting for previous cycle to timeout,
6839 			 * now set the Flash Cycle Done.
6840 			 */
6841 			hsfsts |= HSFSTS_DONE;
6842 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
6843 		}
6844 	}
6845 	return error;
6846 }
6847 
6848 /******************************************************************************
6849  * This function starts a flash cycle and waits for its completion
6850  *
6851  * sc - The pointer to the hw structure
6852  ****************************************************************************/
6853 static int32_t
6854 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
6855 {
6856 	uint16_t hsflctl;
6857 	uint16_t hsfsts;
6858 	int32_t error = 1;
6859 	uint32_t i = 0;
6860 
6861 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
6862 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
6863 	hsflctl |= HSFCTL_GO;
6864 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
6865 
6866 	/* wait till FDONE bit is set to 1 */
6867 	do {
6868 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
6869 		if (hsfsts & HSFSTS_DONE)
6870 			break;
6871 		delay(1);
6872 		i++;
6873 	} while (i < timeout);
6874 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
6875 		error = 0;
6876 
6877 	return error;
6878 }
6879 
6880 /******************************************************************************
6881  * Reads a byte or word from the NVM using the ICH8 flash access registers.
6882  *
6883  * sc - The pointer to the hw structure
6884  * index - The index of the byte or word to read.
6885  * size - Size of data to read, 1=byte 2=word
6886  * data - Pointer to the word to store the value read.
6887  *****************************************************************************/
6888 static int32_t
6889 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
6890     uint32_t size, uint16_t* data)
6891 {
6892 	uint16_t hsfsts;
6893 	uint16_t hsflctl;
6894 	uint32_t flash_linear_address;
6895 	uint32_t flash_data = 0;
6896 	int32_t error = 1;
6897 	int32_t count = 0;
6898 
6899 	if (size < 1  || size > 2 || data == 0x0 ||
6900 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
6901 		return error;
6902 
6903 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
6904 	    sc->sc_ich8_flash_base;
6905 
6906 	do {
6907 		delay(1);
6908 		/* Steps */
6909 		error = wm_ich8_cycle_init(sc);
6910 		if (error)
6911 			break;
6912 
6913 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
6914 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
6915 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
6916 		    & HSFCTL_BCOUNT_MASK;
6917 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
6918 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
6919 
6920 		/*
6921 		 * Write the last 24 bits of index into Flash Linear address
6922 		 * field in Flash Address
6923 		 */
6924 		/* TODO: TBD maybe check the index against the size of flash */
6925 
6926 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
6927 
6928 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
6929 
6930 		/*
6931 		 * Check if FCERR is set to 1, if set to 1, clear it and try
6932 		 * the whole sequence a few more times, else read in (shift in)
6933 		 * the Flash Data0, the order is least significant byte first
6934 		 * msb to lsb
6935 		 */
6936 		if (error == 0) {
6937 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
6938 			if (size == 1)
6939 				*data = (uint8_t)(flash_data & 0x000000FF);
6940 			else if (size == 2)
6941 				*data = (uint16_t)(flash_data & 0x0000FFFF);
6942 			break;
6943 		} else {
6944 			/*
6945 			 * If we've gotten here, then things are probably
6946 			 * completely hosed, but if the error condition is
6947 			 * detected, it won't hurt to give it another try...
6948 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
6949 			 */
6950 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
6951 			if (hsfsts & HSFSTS_ERR) {
6952 				/* Repeat for some time before giving up. */
6953 				continue;
6954 			} else if ((hsfsts & HSFSTS_DONE) == 0)
6955 				break;
6956 		}
6957 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
6958 
6959 	return error;
6960 }
6961 
6962 /******************************************************************************
6963  * Reads a single byte from the NVM using the ICH8 flash access registers.
6964  *
6965  * sc - pointer to wm_hw structure
6966  * index - The index of the byte to read.
6967  * data - Pointer to a byte to store the value read.
6968  *****************************************************************************/
6969 static int32_t
6970 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
6971 {
6972 	int32_t status;
6973 	uint16_t word = 0;
6974 
6975 	status = wm_read_ich8_data(sc, index, 1, &word);
6976 	if (status == 0)
6977 		*data = (uint8_t)word;
6978 	else
6979 		*data = 0;
6980 
6981 	return status;
6982 }
6983 
6984 /******************************************************************************
6985  * Reads a word from the NVM using the ICH8 flash access registers.
6986  *
6987  * sc - pointer to wm_hw structure
6988  * index - The starting byte index of the word to read.
6989  * data - Pointer to a word to store the value read.
6990  *****************************************************************************/
6991 static int32_t
6992 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
6993 {
6994 	int32_t status;
6995 
6996 	status = wm_read_ich8_data(sc, index, 2, data);
6997 	return status;
6998 }
6999 
7000 static int
7001 wm_check_mng_mode(struct wm_softc *sc)
7002 {
7003 	int rv;
7004 
7005 	switch (sc->sc_type) {
7006 	case WM_T_ICH8:
7007 	case WM_T_ICH9:
7008 	case WM_T_ICH10:
7009 	case WM_T_PCH:
7010 	case WM_T_PCH2:
7011 		rv = wm_check_mng_mode_ich8lan(sc);
7012 		break;
7013 	case WM_T_82574:
7014 	case WM_T_82583:
7015 		rv = wm_check_mng_mode_82574(sc);
7016 		break;
7017 	case WM_T_82571:
7018 	case WM_T_82572:
7019 	case WM_T_82573:
7020 	case WM_T_80003:
7021 		rv = wm_check_mng_mode_generic(sc);
7022 		break;
7023 	default:
7024 		/* noting to do */
7025 		rv = 0;
7026 		break;
7027 	}
7028 
7029 	return rv;
7030 }
7031 
7032 static int
7033 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
7034 {
7035 	uint32_t fwsm;
7036 
7037 	fwsm = CSR_READ(sc, WMREG_FWSM);
7038 
7039 	if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
7040 		return 1;
7041 
7042 	return 0;
7043 }
7044 
7045 static int
7046 wm_check_mng_mode_82574(struct wm_softc *sc)
7047 {
7048 	uint16_t data;
7049 
7050 	wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &data);
7051 
7052 	if ((data & EEPROM_CFG2_MNGM_MASK) != 0)
7053 		return 1;
7054 
7055 	return 0;
7056 }
7057 
7058 static int
7059 wm_check_mng_mode_generic(struct wm_softc *sc)
7060 {
7061 	uint32_t fwsm;
7062 
7063 	fwsm = CSR_READ(sc, WMREG_FWSM);
7064 
7065 	if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
7066 		return 1;
7067 
7068 	return 0;
7069 }
7070 
7071 static int
7072 wm_enable_mng_pass_thru(struct wm_softc *sc)
7073 {
7074 	uint32_t manc, fwsm, factps;
7075 
7076 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
7077 		return 0;
7078 
7079 	manc = CSR_READ(sc, WMREG_MANC);
7080 
7081 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
7082 		device_xname(sc->sc_dev), manc));
7083 	if (((manc & MANC_RECV_TCO_EN) == 0)
7084 	    || ((manc & MANC_EN_MAC_ADDR_FILTER) == 0))
7085 		return 0;
7086 
7087 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
7088 		fwsm = CSR_READ(sc, WMREG_FWSM);
7089 		factps = CSR_READ(sc, WMREG_FACTPS);
7090 		if (((factps & FACTPS_MNGCG) == 0)
7091 		    && ((fwsm & FWSM_MODE_MASK)
7092 			== (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT)))
7093 			return 1;
7094 	} else if (((manc & MANC_SMBUS_EN) != 0)
7095 	    && ((manc & MANC_ASF_EN) == 0))
7096 		return 1;
7097 
7098 	return 0;
7099 }
7100 
7101 static int
7102 wm_check_reset_block(struct wm_softc *sc)
7103 {
7104 	uint32_t reg;
7105 
7106 	switch (sc->sc_type) {
7107 	case WM_T_ICH8:
7108 	case WM_T_ICH9:
7109 	case WM_T_ICH10:
7110 	case WM_T_PCH:
7111 	case WM_T_PCH2:
7112 		reg = CSR_READ(sc, WMREG_FWSM);
7113 		if ((reg & FWSM_RSPCIPHY) != 0)
7114 			return 0;
7115 		else
7116 			return -1;
7117 		break;
7118 	case WM_T_82571:
7119 	case WM_T_82572:
7120 	case WM_T_82573:
7121 	case WM_T_82574:
7122 	case WM_T_82583:
7123 	case WM_T_80003:
7124 		reg = CSR_READ(sc, WMREG_MANC);
7125 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
7126 			return -1;
7127 		else
7128 			return 0;
7129 		break;
7130 	default:
7131 		/* no problem */
7132 		break;
7133 	}
7134 
7135 	return 0;
7136 }
7137 
7138 static void
7139 wm_get_hw_control(struct wm_softc *sc)
7140 {
7141 	uint32_t reg;
7142 
7143 	switch (sc->sc_type) {
7144 	case WM_T_82573:
7145 		reg = CSR_READ(sc, WMREG_SWSM);
7146 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
7147 		break;
7148 	case WM_T_82571:
7149 	case WM_T_82572:
7150 	case WM_T_82574:
7151 	case WM_T_82583:
7152 	case WM_T_80003:
7153 	case WM_T_ICH8:
7154 	case WM_T_ICH9:
7155 	case WM_T_ICH10:
7156 	case WM_T_PCH:
7157 	case WM_T_PCH2:
7158 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
7159 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
7160 		break;
7161 	default:
7162 		break;
7163 	}
7164 }
7165 
7166 static void
7167 wm_release_hw_control(struct wm_softc *sc)
7168 {
7169 	uint32_t reg;
7170 
7171 	if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
7172 		return;
7173 
7174 	if (sc->sc_type == WM_T_82573) {
7175 		reg = CSR_READ(sc, WMREG_SWSM);
7176 		reg &= ~SWSM_DRV_LOAD;
7177 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
7178 	} else {
7179 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
7180 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
7181 	}
7182 }
7183 
7184 /* XXX Currently TBI only */
7185 static int
7186 wm_check_for_link(struct wm_softc *sc)
7187 {
7188 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7189 	uint32_t rxcw;
7190 	uint32_t ctrl;
7191 	uint32_t status;
7192 	uint32_t sig;
7193 
7194 	rxcw = CSR_READ(sc, WMREG_RXCW);
7195 	ctrl = CSR_READ(sc, WMREG_CTRL);
7196 	status = CSR_READ(sc, WMREG_STATUS);
7197 
7198 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
7199 
7200 	DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
7201 		device_xname(sc->sc_dev), __func__,
7202 		((ctrl & CTRL_SWDPIN(1)) == sig),
7203 		((status & STATUS_LU) != 0),
7204 		((rxcw & RXCW_C) != 0)
7205 		    ));
7206 
7207 	/*
7208 	 * SWDPIN   LU RXCW
7209 	 *      0    0    0
7210 	 *      0    0    1	(should not happen)
7211 	 *      0    1    0	(should not happen)
7212 	 *      0    1    1	(should not happen)
7213 	 *      1    0    0	Disable autonego and force linkup
7214 	 *      1    0    1	got /C/ but not linkup yet
7215 	 *      1    1    0	(linkup)
7216 	 *      1    1    1	If IFM_AUTO, back to autonego
7217 	 *
7218 	 */
7219 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
7220 	    && ((status & STATUS_LU) == 0)
7221 	    && ((rxcw & RXCW_C) == 0)) {
7222 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
7223 			__func__));
7224 		sc->sc_tbi_linkup = 0;
7225 		/* Disable auto-negotiation in the TXCW register */
7226 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
7227 
7228 		/*
7229 		 * Force link-up and also force full-duplex.
7230 		 *
7231 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
7232 		 * so we should update sc->sc_ctrl
7233 		 */
7234 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
7235 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7236 	} else if (((status & STATUS_LU) != 0)
7237 	    && ((rxcw & RXCW_C) != 0)
7238 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
7239 		sc->sc_tbi_linkup = 1;
7240 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
7241 			__func__));
7242 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
7243 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
7244 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
7245 	    && ((rxcw & RXCW_C) != 0)) {
7246 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
7247 	} else {
7248 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
7249 			status));
7250 	}
7251 
7252 	return 0;
7253 }
7254 
7255 /* Work-around for 82566 Kumeran PCS lock loss */
7256 static void
7257 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
7258 {
7259 	int miistatus, active, i;
7260 	int reg;
7261 
7262 	miistatus = sc->sc_mii.mii_media_status;
7263 
7264 	/* If the link is not up, do nothing */
7265 	if ((miistatus & IFM_ACTIVE) != 0)
7266 		return;
7267 
7268 	active = sc->sc_mii.mii_media_active;
7269 
7270 	/* Nothing to do if the link is other than 1Gbps */
7271 	if (IFM_SUBTYPE(active) != IFM_1000_T)
7272 		return;
7273 
7274 	for (i = 0; i < 10; i++) {
7275 		/* read twice */
7276 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
7277 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
7278 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) != 0)
7279 			goto out;	/* GOOD! */
7280 
7281 		/* Reset the PHY */
7282 		wm_gmii_reset(sc);
7283 		delay(5*1000);
7284 	}
7285 
7286 	/* Disable GigE link negotiation */
7287 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
7288 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
7289 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
7290 
7291 	/*
7292 	 * Call gig speed drop workaround on Gig disable before accessing
7293 	 * any PHY registers.
7294 	 */
7295 	wm_gig_downshift_workaround_ich8lan(sc);
7296 
7297 out:
7298 	return;
7299 }
7300 
7301 /* WOL from S5 stops working */
7302 static void
7303 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
7304 {
7305 	uint16_t kmrn_reg;
7306 
7307 	/* Only for igp3 */
7308 	if (sc->sc_phytype == WMPHY_IGP_3) {
7309 		kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
7310 		kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
7311 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
7312 		kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
7313 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
7314 	}
7315 }
7316 
7317 #ifdef WM_WOL
7318 /* Power down workaround on D3 */
7319 static void
7320 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
7321 {
7322 	uint32_t reg;
7323 	int i;
7324 
7325 	for (i = 0; i < 2; i++) {
7326 		/* Disable link */
7327 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
7328 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
7329 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
7330 
7331 		/*
7332 		 * Call gig speed drop workaround on Gig disable before
7333 		 * accessing any PHY registers
7334 		 */
7335 		if (sc->sc_type == WM_T_ICH8)
7336 			wm_gig_downshift_workaround_ich8lan(sc);
7337 
7338 		/* Write VR power-down enable */
7339 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
7340 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
7341 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
7342 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
7343 
7344 		/* Read it back and test */
7345 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
7346 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
7347 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
7348 			break;
7349 
7350 		/* Issue PHY reset and repeat at most one more time */
7351 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
7352 	}
7353 }
7354 #endif /* WM_WOL */
7355 
7356 /*
7357  * Workaround for pch's PHYs
7358  * XXX should be moved to new PHY driver?
7359  */
7360 static void
7361 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
7362 {
7363 	if (sc->sc_phytype == WMPHY_82577)
7364 		wm_set_mdio_slow_mode_hv(sc);
7365 
7366 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
7367 
7368 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
7369 
7370 	/* 82578 */
7371 	if (sc->sc_phytype == WMPHY_82578) {
7372 		/* PCH rev. < 3 */
7373 		if (sc->sc_rev < 3) {
7374 			/* XXX 6 bit shift? Why? Is it page2? */
7375 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
7376 			    0x66c0);
7377 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
7378 			    0xffff);
7379 		}
7380 
7381 		/* XXX phy rev. < 2 */
7382 	}
7383 
7384 	/* Select page 0 */
7385 
7386 	/* XXX acquire semaphore */
7387 	wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
7388 	/* XXX release semaphore */
7389 
7390 	/*
7391 	 * Configure the K1 Si workaround during phy reset assuming there is
7392 	 * link so that it disables K1 if link is in 1Gbps.
7393 	 */
7394 	wm_k1_gig_workaround_hv(sc, 1);
7395 }
7396 
7397 static void
7398 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
7399 {
7400 
7401 	wm_set_mdio_slow_mode_hv(sc);
7402 }
7403 
7404 static void
7405 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
7406 {
7407 	int k1_enable = sc->sc_nvm_k1_enabled;
7408 
7409 	/* XXX acquire semaphore */
7410 
7411 	if (link) {
7412 		k1_enable = 0;
7413 
7414 		/* Link stall fix for link up */
7415 		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
7416 	} else {
7417 		/* Link stall fix for link down */
7418 		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
7419 	}
7420 
7421 	wm_configure_k1_ich8lan(sc, k1_enable);
7422 
7423 	/* XXX release semaphore */
7424 }
7425 
7426 static void
7427 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
7428 {
7429 	uint32_t reg;
7430 
7431 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
7432 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
7433 	    reg | HV_KMRN_MDIO_SLOW);
7434 }
7435 
7436 static void
7437 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
7438 {
7439 	uint32_t ctrl, ctrl_ext, tmp;
7440 	uint16_t kmrn_reg;
7441 
7442 	kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
7443 
7444 	if (k1_enable)
7445 		kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
7446 	else
7447 		kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
7448 
7449 	wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
7450 
7451 	delay(20);
7452 
7453 	ctrl = CSR_READ(sc, WMREG_CTRL);
7454 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
7455 
7456 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
7457 	tmp |= CTRL_FRCSPD;
7458 
7459 	CSR_WRITE(sc, WMREG_CTRL, tmp);
7460 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
7461 	delay(20);
7462 
7463 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
7464 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
7465 	delay(20);
7466 }
7467 
7468 static void
7469 wm_smbustopci(struct wm_softc *sc)
7470 {
7471 	uint32_t fwsm;
7472 
7473 	fwsm = CSR_READ(sc, WMREG_FWSM);
7474 	if (((fwsm & FWSM_FW_VALID) == 0)
7475 	    && ((wm_check_reset_block(sc) == 0))) {
7476 		sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
7477 		sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
7478 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7479 		delay(10);
7480 		sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
7481 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7482 		delay(50*1000);
7483 
7484 		/*
7485 		 * Gate automatic PHY configuration by hardware on non-managed
7486 		 * 82579
7487 		 */
7488 		if (sc->sc_type == WM_T_PCH2)
7489 			wm_gate_hw_phy_config_ich8lan(sc, 1);
7490 	}
7491 }
7492 
7493 static void
7494 wm_set_pcie_completion_timeout(struct wm_softc *sc)
7495 {
7496 	uint32_t gcr;
7497 	pcireg_t ctrl2;
7498 
7499 	gcr = CSR_READ(sc, WMREG_GCR);
7500 
7501 	/* Only take action if timeout value is defaulted to 0 */
7502 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
7503 		goto out;
7504 
7505 	if ((gcr & GCR_CAP_VER2) == 0) {
7506 		gcr |= GCR_CMPL_TMOUT_10MS;
7507 		goto out;
7508 	}
7509 
7510 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
7511 	    sc->sc_pcixe_capoff + PCI_PCIE_DCSR2);
7512 	ctrl2 |= WM_PCI_PCIE_DCSR2_16MS;
7513 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
7514 	    sc->sc_pcixe_capoff + PCI_PCIE_DCSR2, ctrl2);
7515 
7516 out:
7517 	/* Disable completion timeout resend */
7518 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
7519 
7520 	CSR_WRITE(sc, WMREG_GCR, gcr);
7521 }
7522 
7523 /* special case - for 82575 - need to do manual init ... */
7524 static void
7525 wm_reset_init_script_82575(struct wm_softc *sc)
7526 {
7527 	/*
7528 	 * remark: this is untested code - we have no board without EEPROM
7529 	 *  same setup as mentioned int the freeBSD driver for the i82575
7530 	 */
7531 
7532 	/* SerDes configuration via SERDESCTRL */
7533 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
7534 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
7535 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
7536 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
7537 
7538 	/* CCM configuration via CCMCTL register */
7539 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
7540 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
7541 
7542 	/* PCIe lanes configuration */
7543 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
7544 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
7545 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
7546 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
7547 
7548 	/* PCIe PLL Configuration */
7549 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
7550 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
7551 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
7552 }
7553 
7554 static void
7555 wm_init_manageability(struct wm_softc *sc)
7556 {
7557 
7558 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
7559 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
7560 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
7561 
7562 		/* disabl hardware interception of ARP */
7563 		manc &= ~MANC_ARP_EN;
7564 
7565 		/* enable receiving management packets to the host */
7566 		if (sc->sc_type >= WM_T_82571) {
7567 			manc |= MANC_EN_MNG2HOST;
7568 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
7569 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
7570 
7571 		}
7572 
7573 		CSR_WRITE(sc, WMREG_MANC, manc);
7574 	}
7575 }
7576 
7577 static void
7578 wm_release_manageability(struct wm_softc *sc)
7579 {
7580 
7581 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
7582 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
7583 
7584 		if (sc->sc_type >= WM_T_82571)
7585 			manc &= ~MANC_EN_MNG2HOST;
7586 
7587 		CSR_WRITE(sc, WMREG_MANC, manc);
7588 	}
7589 }
7590 
7591 static void
7592 wm_get_wakeup(struct wm_softc *sc)
7593 {
7594 
7595 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
7596 	switch (sc->sc_type) {
7597 	case WM_T_82573:
7598 	case WM_T_82583:
7599 		sc->sc_flags |= WM_F_HAS_AMT;
7600 		/* FALLTHROUGH */
7601 	case WM_T_80003:
7602 	case WM_T_82541:
7603 	case WM_T_82547:
7604 	case WM_T_82571:
7605 	case WM_T_82572:
7606 	case WM_T_82574:
7607 	case WM_T_82575:
7608 	case WM_T_82576:
7609 #if 0 /* XXX */
7610 	case WM_T_82580:
7611 	case WM_T_82580ER:
7612 #endif
7613 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE_MASK) != 0)
7614 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
7615 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
7616 		break;
7617 	case WM_T_ICH8:
7618 	case WM_T_ICH9:
7619 	case WM_T_ICH10:
7620 	case WM_T_PCH:
7621 	case WM_T_PCH2:
7622 		sc->sc_flags |= WM_F_HAS_AMT;
7623 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
7624 		break;
7625 	default:
7626 		break;
7627 	}
7628 
7629 	/* 1: HAS_MANAGE */
7630 	if (wm_enable_mng_pass_thru(sc) != 0)
7631 		sc->sc_flags |= WM_F_HAS_MANAGE;
7632 
7633 #ifdef WM_DEBUG
7634 	printf("\n");
7635 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
7636 		printf("HAS_AMT,");
7637 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
7638 		printf("ARC_SUBSYS_VALID,");
7639 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
7640 		printf("ASF_FIRMWARE_PRES,");
7641 	if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
7642 		printf("HAS_MANAGE,");
7643 	printf("\n");
7644 #endif
7645 	/*
7646 	 * Note that the WOL flags is set after the resetting of the eeprom
7647 	 * stuff
7648 	 */
7649 }
7650 
7651 #ifdef WM_WOL
7652 /* WOL in the newer chipset interfaces (pchlan) */
7653 static void
7654 wm_enable_phy_wakeup(struct wm_softc *sc)
7655 {
7656 #if 0
7657 	uint16_t preg;
7658 
7659 	/* Copy MAC RARs to PHY RARs */
7660 
7661 	/* Copy MAC MTA to PHY MTA */
7662 
7663 	/* Configure PHY Rx Control register */
7664 
7665 	/* Enable PHY wakeup in MAC register */
7666 
7667 	/* Configure and enable PHY wakeup in PHY registers */
7668 
7669 	/* Activate PHY wakeup */
7670 
7671 	/* XXX */
7672 #endif
7673 }
7674 
7675 static void
7676 wm_enable_wakeup(struct wm_softc *sc)
7677 {
7678 	uint32_t reg, pmreg;
7679 	pcireg_t pmode;
7680 
7681 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
7682 		&pmreg, NULL) == 0)
7683 		return;
7684 
7685 	/* Advertise the wakeup capability */
7686 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
7687 	    | CTRL_SWDPIN(3));
7688 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
7689 
7690 	/* ICH workaround */
7691 	switch (sc->sc_type) {
7692 	case WM_T_ICH8:
7693 	case WM_T_ICH9:
7694 	case WM_T_ICH10:
7695 	case WM_T_PCH:
7696 	case WM_T_PCH2:
7697 		/* Disable gig during WOL */
7698 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
7699 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
7700 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
7701 		if (sc->sc_type == WM_T_PCH)
7702 			wm_gmii_reset(sc);
7703 
7704 		/* Power down workaround */
7705 		if (sc->sc_phytype == WMPHY_82577) {
7706 			struct mii_softc *child;
7707 
7708 			/* Assume that the PHY is copper */
7709 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
7710 			if (child->mii_mpd_rev <= 2)
7711 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
7712 				    (768 << 5) | 25, 0x0444); /* magic num */
7713 		}
7714 		break;
7715 	default:
7716 		break;
7717 	}
7718 
7719 	/* Keep the laser running on fiber adapters */
7720 	if (((sc->sc_wmp->wmp_flags & WMP_F_1000X) != 0)
7721 	    || (sc->sc_wmp->wmp_flags & WMP_F_SERDES) != 0) {
7722 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
7723 		reg |= CTRL_EXT_SWDPIN(3);
7724 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
7725 	}
7726 
7727 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
7728 #if 0	/* for the multicast packet */
7729 	reg |= WUFC_MC;
7730 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
7731 #endif
7732 
7733 	if (sc->sc_type == WM_T_PCH) {
7734 		wm_enable_phy_wakeup(sc);
7735 	} else {
7736 		CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
7737 		CSR_WRITE(sc, WMREG_WUFC, reg);
7738 	}
7739 
7740 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
7741 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
7742 		|| (sc->sc_type == WM_T_PCH2))
7743 		    && (sc->sc_phytype == WMPHY_IGP_3))
7744 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
7745 
7746 	/* Request PME */
7747 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
7748 #if 0
7749 	/* Disable WOL */
7750 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
7751 #else
7752 	/* For WOL */
7753 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
7754 #endif
7755 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
7756 }
7757 #endif /* WM_WOL */
7758 
7759 static bool
7760 wm_suspend(device_t self, const pmf_qual_t *qual)
7761 {
7762 	struct wm_softc *sc = device_private(self);
7763 
7764 	wm_release_manageability(sc);
7765 	wm_release_hw_control(sc);
7766 #ifdef WM_WOL
7767 	wm_enable_wakeup(sc);
7768 #endif
7769 
7770 	return true;
7771 }
7772 
7773 static bool
7774 wm_resume(device_t self, const pmf_qual_t *qual)
7775 {
7776 	struct wm_softc *sc = device_private(self);
7777 
7778 	wm_init_manageability(sc);
7779 
7780 	return true;
7781 }
7782