xref: /netbsd-src/sys/dev/pci/if_wm.c (revision e39ef1d61eee3ccba837ee281f1e098c864487aa)
1 /*	$NetBSD: if_wm.c,v 1.225 2011/11/28 18:21:46 bouyer Exp $	*/
2 
3 /*
4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed for the NetBSD Project by
20  *	Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 /*******************************************************************************
39 
40   Copyright (c) 2001-2005, Intel Corporation
41   All rights reserved.
42 
43   Redistribution and use in source and binary forms, with or without
44   modification, are permitted provided that the following conditions are met:
45 
46    1. Redistributions of source code must retain the above copyright notice,
47       this list of conditions and the following disclaimer.
48 
49    2. Redistributions in binary form must reproduce the above copyright
50       notice, this list of conditions and the following disclaimer in the
51       documentation and/or other materials provided with the distribution.
52 
53    3. Neither the name of the Intel Corporation nor the names of its
54       contributors may be used to endorse or promote products derived from
55       this software without specific prior written permission.
56 
57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67   POSSIBILITY OF SUCH DAMAGE.
68 
69 *******************************************************************************/
70 /*
71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72  *
73  * TODO (in order of importance):
74  *
75  *	- Rework how parameters are loaded from the EEPROM.
76  */
77 
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.225 2011/11/28 18:21:46 bouyer Exp $");
80 
81 #include "rnd.h"
82 
83 #include <sys/param.h>
84 #include <sys/systm.h>
85 #include <sys/callout.h>
86 #include <sys/mbuf.h>
87 #include <sys/malloc.h>
88 #include <sys/kernel.h>
89 #include <sys/socket.h>
90 #include <sys/ioctl.h>
91 #include <sys/errno.h>
92 #include <sys/device.h>
93 #include <sys/queue.h>
94 #include <sys/syslog.h>
95 
96 #if NRND > 0
97 #include <sys/rnd.h>
98 #endif
99 
100 #include <net/if.h>
101 #include <net/if_dl.h>
102 #include <net/if_media.h>
103 #include <net/if_ether.h>
104 
105 #include <net/bpf.h>
106 
107 #include <netinet/in.h>			/* XXX for struct ip */
108 #include <netinet/in_systm.h>		/* XXX for struct ip */
109 #include <netinet/ip.h>			/* XXX for struct ip */
110 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
111 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
112 
113 #include <sys/bus.h>
114 #include <sys/intr.h>
115 #include <machine/endian.h>
116 
117 #include <dev/mii/mii.h>
118 #include <dev/mii/miivar.h>
119 #include <dev/mii/miidevs.h>
120 #include <dev/mii/mii_bitbang.h>
121 #include <dev/mii/ikphyreg.h>
122 #include <dev/mii/igphyreg.h>
123 #include <dev/mii/igphyvar.h>
124 #include <dev/mii/inbmphyreg.h>
125 
126 #include <dev/pci/pcireg.h>
127 #include <dev/pci/pcivar.h>
128 #include <dev/pci/pcidevs.h>
129 
130 #include <dev/pci/if_wmreg.h>
131 #include <dev/pci/if_wmvar.h>
132 
133 #ifdef WM_DEBUG
134 #define	WM_DEBUG_LINK		0x01
135 #define	WM_DEBUG_TX		0x02
136 #define	WM_DEBUG_RX		0x04
137 #define	WM_DEBUG_GMII		0x08
138 #define	WM_DEBUG_MANAGE		0x10
139 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
140     | WM_DEBUG_MANAGE;
141 
142 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
143 #else
144 #define	DPRINTF(x, y)	/* nothing */
145 #endif /* WM_DEBUG */
146 
147 /*
148  * Transmit descriptor list size.  Due to errata, we can only have
149  * 256 hardware descriptors in the ring on < 82544, but we use 4096
150  * on >= 82544.  We tell the upper layers that they can queue a lot
151  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
152  * of them at a time.
153  *
154  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
155  * chains containing many small mbufs have been observed in zero-copy
156  * situations with jumbo frames.
157  */
158 #define	WM_NTXSEGS		256
159 #define	WM_IFQUEUELEN		256
160 #define	WM_TXQUEUELEN_MAX	64
161 #define	WM_TXQUEUELEN_MAX_82547	16
162 #define	WM_TXQUEUELEN(sc)	((sc)->sc_txnum)
163 #define	WM_TXQUEUELEN_MASK(sc)	(WM_TXQUEUELEN(sc) - 1)
164 #define	WM_TXQUEUE_GC(sc)	(WM_TXQUEUELEN(sc) / 8)
165 #define	WM_NTXDESC_82542	256
166 #define	WM_NTXDESC_82544	4096
167 #define	WM_NTXDESC(sc)		((sc)->sc_ntxdesc)
168 #define	WM_NTXDESC_MASK(sc)	(WM_NTXDESC(sc) - 1)
169 #define	WM_TXDESCSIZE(sc)	(WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
170 #define	WM_NEXTTX(sc, x)	(((x) + 1) & WM_NTXDESC_MASK(sc))
171 #define	WM_NEXTTXS(sc, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(sc))
172 
173 #define	WM_MAXTXDMA		round_page(IP_MAXPACKET) /* for TSO */
174 
175 /*
176  * Receive descriptor list size.  We have one Rx buffer for normal
177  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
178  * packet.  We allocate 256 receive descriptors, each with a 2k
179  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
180  */
181 #define	WM_NRXDESC		256
182 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
183 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
184 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
185 
186 /*
187  * Control structures are DMA'd to the i82542 chip.  We allocate them in
188  * a single clump that maps to a single DMA segment to make several things
189  * easier.
190  */
191 struct wm_control_data_82544 {
192 	/*
193 	 * The receive descriptors.
194 	 */
195 	wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
196 
197 	/*
198 	 * The transmit descriptors.  Put these at the end, because
199 	 * we might use a smaller number of them.
200 	 */
201 	wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82544];
202 };
203 
204 struct wm_control_data_82542 {
205 	wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
206 	wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
207 };
208 
209 #define	WM_CDOFF(x)	offsetof(struct wm_control_data_82544, x)
210 #define	WM_CDTXOFF(x)	WM_CDOFF(wcd_txdescs[(x)])
211 #define	WM_CDRXOFF(x)	WM_CDOFF(wcd_rxdescs[(x)])
212 
213 /*
214  * Software state for transmit jobs.
215  */
216 struct wm_txsoft {
217 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
218 	bus_dmamap_t txs_dmamap;	/* our DMA map */
219 	int txs_firstdesc;		/* first descriptor in packet */
220 	int txs_lastdesc;		/* last descriptor in packet */
221 	int txs_ndesc;			/* # of descriptors used */
222 };
223 
224 /*
225  * Software state for receive buffers.  Each descriptor gets a
226  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
227  * more than one buffer, we chain them together.
228  */
229 struct wm_rxsoft {
230 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
231 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
232 };
233 
234 #define WM_LINKUP_TIMEOUT	50
235 
236 static uint16_t swfwphysem[] = {
237 	SWFW_PHY0_SM,
238 	SWFW_PHY1_SM,
239 	SWFW_PHY2_SM,
240 	SWFW_PHY3_SM
241 };
242 
243 /*
244  * Software state per device.
245  */
246 struct wm_softc {
247 	device_t sc_dev;		/* generic device information */
248 	bus_space_tag_t sc_st;		/* bus space tag */
249 	bus_space_handle_t sc_sh;	/* bus space handle */
250 	bus_size_t sc_ss;		/* bus space size */
251 	bus_space_tag_t sc_iot;		/* I/O space tag */
252 	bus_space_handle_t sc_ioh;	/* I/O space handle */
253 	bus_size_t sc_ios;		/* I/O space size */
254 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
255 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
256 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
257 
258 	struct ethercom sc_ethercom;	/* ethernet common data */
259 	struct mii_data sc_mii;		/* MII/media information */
260 
261 	pci_chipset_tag_t sc_pc;
262 	pcitag_t sc_pcitag;
263 	int sc_bus_speed;		/* PCI/PCIX bus speed */
264 	int sc_pcixe_capoff;		/* PCI[Xe] capability register offset */
265 
266 	const struct wm_product *sc_wmp; /* Pointer to the wm_product entry */
267 	wm_chip_type sc_type;		/* MAC type */
268 	int sc_rev;			/* MAC revision */
269 	wm_phy_type sc_phytype;		/* PHY type */
270 	int sc_funcid;			/* unit number of the chip (0 to 3) */
271 	int sc_flags;			/* flags; see below */
272 	int sc_if_flags;		/* last if_flags */
273 	int sc_flowflags;		/* 802.3x flow control flags */
274 	int sc_align_tweak;
275 
276 	void *sc_ih;			/* interrupt cookie */
277 	callout_t sc_tick_ch;		/* tick callout */
278 
279 	int sc_ee_addrbits;		/* EEPROM address bits */
280 	int sc_ich8_flash_base;
281 	int sc_ich8_flash_bank_size;
282 	int sc_nvm_k1_enabled;
283 
284 	/*
285 	 * Software state for the transmit and receive descriptors.
286 	 */
287 	int sc_txnum;			/* must be a power of two */
288 	struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX];
289 	struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
290 
291 	/*
292 	 * Control data structures.
293 	 */
294 	int sc_ntxdesc;			/* must be a power of two */
295 	struct wm_control_data_82544 *sc_control_data;
296 	bus_dmamap_t sc_cddmamap;	/* control data DMA map */
297 	bus_dma_segment_t sc_cd_seg;	/* control data segment */
298 	int sc_cd_rseg;			/* real number of control segment */
299 	size_t sc_cd_size;		/* control data size */
300 #define	sc_cddma	sc_cddmamap->dm_segs[0].ds_addr
301 #define	sc_txdescs	sc_control_data->wcd_txdescs
302 #define	sc_rxdescs	sc_control_data->wcd_rxdescs
303 
304 #ifdef WM_EVENT_COUNTERS
305 	/* Event counters. */
306 	struct evcnt sc_ev_txsstall;	/* Tx stalled due to no txs */
307 	struct evcnt sc_ev_txdstall;	/* Tx stalled due to no txd */
308 	struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
309 	struct evcnt sc_ev_txdw;	/* Tx descriptor interrupts */
310 	struct evcnt sc_ev_txqe;	/* Tx queue empty interrupts */
311 	struct evcnt sc_ev_rxintr;	/* Rx interrupts */
312 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
313 
314 	struct evcnt sc_ev_rxipsum;	/* IP checksums checked in-bound */
315 	struct evcnt sc_ev_rxtusum;	/* TCP/UDP cksums checked in-bound */
316 	struct evcnt sc_ev_txipsum;	/* IP checksums comp. out-bound */
317 	struct evcnt sc_ev_txtusum;	/* TCP/UDP cksums comp. out-bound */
318 	struct evcnt sc_ev_txtusum6;	/* TCP/UDP v6 cksums comp. out-bound */
319 	struct evcnt sc_ev_txtso;	/* TCP seg offload out-bound (IPv4) */
320 	struct evcnt sc_ev_txtso6;	/* TCP seg offload out-bound (IPv6) */
321 	struct evcnt sc_ev_txtsopain;	/* painful header manip. for TSO */
322 
323 	struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
324 	struct evcnt sc_ev_txdrop;	/* Tx packets dropped (too many segs) */
325 
326 	struct evcnt sc_ev_tu;		/* Tx underrun */
327 
328 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
329 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
330 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
331 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
332 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
333 #endif /* WM_EVENT_COUNTERS */
334 
335 	bus_addr_t sc_tdt_reg;		/* offset of TDT register */
336 
337 	int	sc_txfree;		/* number of free Tx descriptors */
338 	int	sc_txnext;		/* next ready Tx descriptor */
339 
340 	int	sc_txsfree;		/* number of free Tx jobs */
341 	int	sc_txsnext;		/* next free Tx job */
342 	int	sc_txsdirty;		/* dirty Tx jobs */
343 
344 	/* These 5 variables are used only on the 82547. */
345 	int	sc_txfifo_size;		/* Tx FIFO size */
346 	int	sc_txfifo_head;		/* current head of FIFO */
347 	uint32_t sc_txfifo_addr;	/* internal address of start of FIFO */
348 	int	sc_txfifo_stall;	/* Tx FIFO is stalled */
349 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
350 
351 	bus_addr_t sc_rdt_reg;		/* offset of RDT register */
352 
353 	int	sc_rxptr;		/* next ready Rx descriptor/queue ent */
354 	int	sc_rxdiscard;
355 	int	sc_rxlen;
356 	struct mbuf *sc_rxhead;
357 	struct mbuf *sc_rxtail;
358 	struct mbuf **sc_rxtailp;
359 
360 	uint32_t sc_ctrl;		/* prototype CTRL register */
361 #if 0
362 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
363 #endif
364 	uint32_t sc_icr;		/* prototype interrupt bits */
365 	uint32_t sc_itr;		/* prototype intr throttling reg */
366 	uint32_t sc_tctl;		/* prototype TCTL register */
367 	uint32_t sc_rctl;		/* prototype RCTL register */
368 	uint32_t sc_txcw;		/* prototype TXCW register */
369 	uint32_t sc_tipg;		/* prototype TIPG register */
370 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
371 	uint32_t sc_pba;		/* prototype PBA register */
372 
373 	int sc_tbi_linkup;		/* TBI link status */
374 	int sc_tbi_anegticks;		/* autonegotiation ticks */
375 	int sc_tbi_ticks;		/* tbi ticks */
376 	int sc_tbi_nrxcfg;		/* count of ICR_RXCFG */
377 	int sc_tbi_lastnrxcfg;		/* count of ICR_RXCFG (on last tick) */
378 
379 	int sc_mchash_type;		/* multicast filter offset */
380 
381 #if NRND > 0
382 	krndsource_t rnd_source;	/* random source */
383 #endif
384 };
385 
386 #define	WM_RXCHAIN_RESET(sc)						\
387 do {									\
388 	(sc)->sc_rxtailp = &(sc)->sc_rxhead;				\
389 	*(sc)->sc_rxtailp = NULL;					\
390 	(sc)->sc_rxlen = 0;						\
391 } while (/*CONSTCOND*/0)
392 
393 #define	WM_RXCHAIN_LINK(sc, m)						\
394 do {									\
395 	*(sc)->sc_rxtailp = (sc)->sc_rxtail = (m);			\
396 	(sc)->sc_rxtailp = &(m)->m_next;				\
397 } while (/*CONSTCOND*/0)
398 
399 #ifdef WM_EVENT_COUNTERS
400 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
401 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
402 #else
403 #define	WM_EVCNT_INCR(ev)	/* nothing */
404 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
405 #endif
406 
407 #define	CSR_READ(sc, reg)						\
408 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
409 #define	CSR_WRITE(sc, reg, val)						\
410 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
411 #define	CSR_WRITE_FLUSH(sc)						\
412 	(void) CSR_READ((sc), WMREG_STATUS)
413 
414 #define ICH8_FLASH_READ32(sc, reg) \
415 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
416 #define ICH8_FLASH_WRITE32(sc, reg, data) \
417 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
418 
419 #define ICH8_FLASH_READ16(sc, reg) \
420 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
421 #define ICH8_FLASH_WRITE16(sc, reg, data) \
422 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
423 
424 #define	WM_CDTXADDR(sc, x)	((sc)->sc_cddma + WM_CDTXOFF((x)))
425 #define	WM_CDRXADDR(sc, x)	((sc)->sc_cddma + WM_CDRXOFF((x)))
426 
427 #define	WM_CDTXADDR_LO(sc, x)	(WM_CDTXADDR((sc), (x)) & 0xffffffffU)
428 #define	WM_CDTXADDR_HI(sc, x)						\
429 	(sizeof(bus_addr_t) == 8 ?					\
430 	 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
431 
432 #define	WM_CDRXADDR_LO(sc, x)	(WM_CDRXADDR((sc), (x)) & 0xffffffffU)
433 #define	WM_CDRXADDR_HI(sc, x)						\
434 	(sizeof(bus_addr_t) == 8 ?					\
435 	 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
436 
437 #define	WM_CDTXSYNC(sc, x, n, ops)					\
438 do {									\
439 	int __x, __n;							\
440 									\
441 	__x = (x);							\
442 	__n = (n);							\
443 									\
444 	/* If it will wrap around, sync to the end of the ring. */	\
445 	if ((__x + __n) > WM_NTXDESC(sc)) {				\
446 		bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,	\
447 		    WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) *		\
448 		    (WM_NTXDESC(sc) - __x), (ops));			\
449 		__n -= (WM_NTXDESC(sc) - __x);				\
450 		__x = 0;						\
451 	}								\
452 									\
453 	/* Now sync whatever is left. */				\
454 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
455 	    WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops));	\
456 } while (/*CONSTCOND*/0)
457 
458 #define	WM_CDRXSYNC(sc, x, ops)						\
459 do {									\
460 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
461 	   WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops));		\
462 } while (/*CONSTCOND*/0)
463 
464 #define	WM_INIT_RXDESC(sc, x)						\
465 do {									\
466 	struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)];		\
467 	wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)];		\
468 	struct mbuf *__m = __rxs->rxs_mbuf;				\
469 									\
470 	/*								\
471 	 * Note: We scoot the packet forward 2 bytes in the buffer	\
472 	 * so that the payload after the Ethernet header is aligned	\
473 	 * to a 4-byte boundary.					\
474 	 *								\
475 	 * XXX BRAINDAMAGE ALERT!					\
476 	 * The stupid chip uses the same size for every buffer, which	\
477 	 * is set in the Receive Control register.  We are using the 2K	\
478 	 * size option, but what we REALLY want is (2K - 2)!  For this	\
479 	 * reason, we can't "scoot" packets longer than the standard	\
480 	 * Ethernet MTU.  On strict-alignment platforms, if the total	\
481 	 * size exceeds (2K - 2) we set align_tweak to 0 and let	\
482 	 * the upper layer copy the headers.				\
483 	 */								\
484 	__m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak;	\
485 									\
486 	wm_set_dma_addr(&__rxd->wrx_addr,				\
487 	    __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
488 	__rxd->wrx_len = 0;						\
489 	__rxd->wrx_cksum = 0;						\
490 	__rxd->wrx_status = 0;						\
491 	__rxd->wrx_errors = 0;						\
492 	__rxd->wrx_special = 0;						\
493 	WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
494 									\
495 	CSR_WRITE((sc), (sc)->sc_rdt_reg, (x));				\
496 } while (/*CONSTCOND*/0)
497 
498 static void	wm_start(struct ifnet *);
499 static void	wm_watchdog(struct ifnet *);
500 static int	wm_ifflags_cb(struct ethercom *);
501 static int	wm_ioctl(struct ifnet *, u_long, void *);
502 static int	wm_init(struct ifnet *);
503 static void	wm_stop(struct ifnet *, int);
504 static bool	wm_suspend(device_t, const pmf_qual_t *);
505 static bool	wm_resume(device_t, const pmf_qual_t *);
506 
507 static void	wm_reset(struct wm_softc *);
508 static void	wm_rxdrain(struct wm_softc *);
509 static int	wm_add_rxbuf(struct wm_softc *, int);
510 static int	wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
511 static int	wm_read_eeprom_eerd(struct wm_softc *, int, int, u_int16_t *);
512 static int	wm_validate_eeprom_checksum(struct wm_softc *);
513 static int	wm_check_alt_mac_addr(struct wm_softc *);
514 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
515 static void	wm_tick(void *);
516 
517 static void	wm_set_filter(struct wm_softc *);
518 static void	wm_set_vlan(struct wm_softc *);
519 
520 static int	wm_intr(void *);
521 static void	wm_txintr(struct wm_softc *);
522 static void	wm_rxintr(struct wm_softc *);
523 static void	wm_linkintr(struct wm_softc *, uint32_t);
524 
525 static void	wm_tbi_mediainit(struct wm_softc *);
526 static int	wm_tbi_mediachange(struct ifnet *);
527 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
528 
529 static void	wm_tbi_set_linkled(struct wm_softc *);
530 static void	wm_tbi_check_link(struct wm_softc *);
531 
532 static void	wm_gmii_reset(struct wm_softc *);
533 
534 static int	wm_gmii_i82543_readreg(device_t, int, int);
535 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
536 
537 static int	wm_gmii_i82544_readreg(device_t, int, int);
538 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
539 
540 static int	wm_gmii_i80003_readreg(device_t, int, int);
541 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
542 static int	wm_gmii_bm_readreg(device_t, int, int);
543 static void	wm_gmii_bm_writereg(device_t, int, int, int);
544 static int	wm_gmii_hv_readreg(device_t, int, int);
545 static void	wm_gmii_hv_writereg(device_t, int, int, int);
546 static int	wm_sgmii_readreg(device_t, int, int);
547 static void	wm_sgmii_writereg(device_t, int, int, int);
548 
549 static void	wm_gmii_statchg(device_t);
550 
551 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
552 static int	wm_gmii_mediachange(struct ifnet *);
553 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
554 
555 static int	wm_kmrn_readreg(struct wm_softc *, int);
556 static void	wm_kmrn_writereg(struct wm_softc *, int, int);
557 
558 static void	wm_set_spiaddrbits(struct wm_softc *);
559 static int	wm_match(device_t, cfdata_t, void *);
560 static void	wm_attach(device_t, device_t, void *);
561 static int	wm_detach(device_t, int);
562 static int	wm_is_onboard_nvm_eeprom(struct wm_softc *);
563 static void	wm_get_auto_rd_done(struct wm_softc *);
564 static void	wm_lan_init_done(struct wm_softc *);
565 static void	wm_get_cfg_done(struct wm_softc *);
566 static int	wm_get_swsm_semaphore(struct wm_softc *);
567 static void	wm_put_swsm_semaphore(struct wm_softc *);
568 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
569 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
570 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
571 static int	wm_get_swfwhw_semaphore(struct wm_softc *);
572 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
573 
574 static int	wm_read_eeprom_ich8(struct wm_softc *, int, int, uint16_t *);
575 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
576 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
577 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t,
578 		     uint32_t, uint16_t *);
579 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
580 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
581 static void	wm_82547_txfifo_stall(void *);
582 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, int);
583 static int	wm_check_mng_mode(struct wm_softc *);
584 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
585 static int	wm_check_mng_mode_82574(struct wm_softc *);
586 static int	wm_check_mng_mode_generic(struct wm_softc *);
587 static int	wm_enable_mng_pass_thru(struct wm_softc *);
588 static int	wm_check_reset_block(struct wm_softc *);
589 static void	wm_get_hw_control(struct wm_softc *);
590 static int	wm_check_for_link(struct wm_softc *);
591 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
592 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
593 #ifdef WM_WOL
594 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
595 #endif
596 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
597 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
598 static void	wm_k1_gig_workaround_hv(struct wm_softc *, int);
599 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
600 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
601 static void	wm_smbustopci(struct wm_softc *);
602 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
603 static void	wm_reset_init_script_82575(struct wm_softc *);
604 static void	wm_release_manageability(struct wm_softc *);
605 static void	wm_release_hw_control(struct wm_softc *);
606 static void	wm_get_wakeup(struct wm_softc *);
607 #ifdef WM_WOL
608 static void	wm_enable_phy_wakeup(struct wm_softc *);
609 static void	wm_enable_wakeup(struct wm_softc *);
610 #endif
611 static void	wm_init_manageability(struct wm_softc *);
612 
613 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
614     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
615 
616 /*
617  * Devices supported by this driver.
618  */
619 static const struct wm_product {
620 	pci_vendor_id_t		wmp_vendor;
621 	pci_product_id_t	wmp_product;
622 	const char		*wmp_name;
623 	wm_chip_type		wmp_type;
624 	int			wmp_flags;
625 #define	WMP_F_1000X		0x01
626 #define	WMP_F_1000T		0x02
627 #define	WMP_F_SERDES		0x04
628 } wm_products[] = {
629 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
630 	  "Intel i82542 1000BASE-X Ethernet",
631 	  WM_T_82542_2_1,	WMP_F_1000X },
632 
633 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
634 	  "Intel i82543GC 1000BASE-X Ethernet",
635 	  WM_T_82543,		WMP_F_1000X },
636 
637 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
638 	  "Intel i82543GC 1000BASE-T Ethernet",
639 	  WM_T_82543,		WMP_F_1000T },
640 
641 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
642 	  "Intel i82544EI 1000BASE-T Ethernet",
643 	  WM_T_82544,		WMP_F_1000T },
644 
645 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
646 	  "Intel i82544EI 1000BASE-X Ethernet",
647 	  WM_T_82544,		WMP_F_1000X },
648 
649 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
650 	  "Intel i82544GC 1000BASE-T Ethernet",
651 	  WM_T_82544,		WMP_F_1000T },
652 
653 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
654 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
655 	  WM_T_82544,		WMP_F_1000T },
656 
657 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
658 	  "Intel i82540EM 1000BASE-T Ethernet",
659 	  WM_T_82540,		WMP_F_1000T },
660 
661 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
662 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
663 	  WM_T_82540,		WMP_F_1000T },
664 
665 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
666 	  "Intel i82540EP 1000BASE-T Ethernet",
667 	  WM_T_82540,		WMP_F_1000T },
668 
669 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
670 	  "Intel i82540EP 1000BASE-T Ethernet",
671 	  WM_T_82540,		WMP_F_1000T },
672 
673 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
674 	  "Intel i82540EP 1000BASE-T Ethernet",
675 	  WM_T_82540,		WMP_F_1000T },
676 
677 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
678 	  "Intel i82545EM 1000BASE-T Ethernet",
679 	  WM_T_82545,		WMP_F_1000T },
680 
681 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
682 	  "Intel i82545GM 1000BASE-T Ethernet",
683 	  WM_T_82545_3,		WMP_F_1000T },
684 
685 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
686 	  "Intel i82545GM 1000BASE-X Ethernet",
687 	  WM_T_82545_3,		WMP_F_1000X },
688 #if 0
689 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
690 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
691 	  WM_T_82545_3,		WMP_F_SERDES },
692 #endif
693 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
694 	  "Intel i82546EB 1000BASE-T Ethernet",
695 	  WM_T_82546,		WMP_F_1000T },
696 
697 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
698 	  "Intel i82546EB 1000BASE-T Ethernet",
699 	  WM_T_82546,		WMP_F_1000T },
700 
701 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
702 	  "Intel i82545EM 1000BASE-X Ethernet",
703 	  WM_T_82545,		WMP_F_1000X },
704 
705 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
706 	  "Intel i82546EB 1000BASE-X Ethernet",
707 	  WM_T_82546,		WMP_F_1000X },
708 
709 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
710 	  "Intel i82546GB 1000BASE-T Ethernet",
711 	  WM_T_82546_3,		WMP_F_1000T },
712 
713 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
714 	  "Intel i82546GB 1000BASE-X Ethernet",
715 	  WM_T_82546_3,		WMP_F_1000X },
716 #if 0
717 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
718 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
719 	  WM_T_82546_3,		WMP_F_SERDES },
720 #endif
721 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
722 	  "i82546GB quad-port Gigabit Ethernet",
723 	  WM_T_82546_3,		WMP_F_1000T },
724 
725 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
726 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
727 	  WM_T_82546_3,		WMP_F_1000T },
728 
729 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
730 	  "Intel PRO/1000MT (82546GB)",
731 	  WM_T_82546_3,		WMP_F_1000T },
732 
733 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
734 	  "Intel i82541EI 1000BASE-T Ethernet",
735 	  WM_T_82541,		WMP_F_1000T },
736 
737 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
738 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
739 	  WM_T_82541,		WMP_F_1000T },
740 
741 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
742 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
743 	  WM_T_82541,		WMP_F_1000T },
744 
745 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
746 	  "Intel i82541ER 1000BASE-T Ethernet",
747 	  WM_T_82541_2,		WMP_F_1000T },
748 
749 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
750 	  "Intel i82541GI 1000BASE-T Ethernet",
751 	  WM_T_82541_2,		WMP_F_1000T },
752 
753 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
754 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
755 	  WM_T_82541_2,		WMP_F_1000T },
756 
757 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
758 	  "Intel i82541PI 1000BASE-T Ethernet",
759 	  WM_T_82541_2,		WMP_F_1000T },
760 
761 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
762 	  "Intel i82547EI 1000BASE-T Ethernet",
763 	  WM_T_82547,		WMP_F_1000T },
764 
765 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
766 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
767 	  WM_T_82547,		WMP_F_1000T },
768 
769 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
770 	  "Intel i82547GI 1000BASE-T Ethernet",
771 	  WM_T_82547_2,		WMP_F_1000T },
772 
773 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
774 	  "Intel PRO/1000 PT (82571EB)",
775 	  WM_T_82571,		WMP_F_1000T },
776 
777 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
778 	  "Intel PRO/1000 PF (82571EB)",
779 	  WM_T_82571,		WMP_F_1000X },
780 #if 0
781 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
782 	  "Intel PRO/1000 PB (82571EB)",
783 	  WM_T_82571,		WMP_F_SERDES },
784 #endif
785 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
786 	  "Intel PRO/1000 QT (82571EB)",
787 	  WM_T_82571,		WMP_F_1000T },
788 
789 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
790 	  "Intel i82572EI 1000baseT Ethernet",
791 	  WM_T_82572,		WMP_F_1000T },
792 
793 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
794 	  "Intel PRO/1000 PT Quad Port Server Adapter",
795 	  WM_T_82571,		WMP_F_1000T, },
796 
797 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
798 	  "Intel i82572EI 1000baseX Ethernet",
799 	  WM_T_82572,		WMP_F_1000X },
800 #if 0
801 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
802 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
803 	  WM_T_82572,		WMP_F_SERDES },
804 #endif
805 
806 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
807 	  "Intel i82572EI 1000baseT Ethernet",
808 	  WM_T_82572,		WMP_F_1000T },
809 
810 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
811 	  "Intel i82573E",
812 	  WM_T_82573,		WMP_F_1000T },
813 
814 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
815 	  "Intel i82573E IAMT",
816 	  WM_T_82573,		WMP_F_1000T },
817 
818 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
819 	  "Intel i82573L Gigabit Ethernet",
820 	  WM_T_82573,		WMP_F_1000T },
821 
822 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
823 	  "Intel i82574L",
824 	  WM_T_82574,		WMP_F_1000T },
825 
826 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
827 	  "Intel i82583V",
828 	  WM_T_82583,		WMP_F_1000T },
829 
830 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
831 	  "i80003 dual 1000baseT Ethernet",
832 	  WM_T_80003,		WMP_F_1000T },
833 
834 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
835 	  "i80003 dual 1000baseX Ethernet",
836 	  WM_T_80003,		WMP_F_1000T },
837 #if 0
838 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
839 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
840 	  WM_T_80003,		WMP_F_SERDES },
841 #endif
842 
843 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
844 	  "Intel i80003 1000baseT Ethernet",
845 	  WM_T_80003,		WMP_F_1000T },
846 #if 0
847 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
848 	  "Intel i80003 Gigabit Ethernet (SERDES)",
849 	  WM_T_80003,		WMP_F_SERDES },
850 #endif
851 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
852 	  "Intel i82801H (M_AMT) LAN Controller",
853 	  WM_T_ICH8,		WMP_F_1000T },
854 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
855 	  "Intel i82801H (AMT) LAN Controller",
856 	  WM_T_ICH8,		WMP_F_1000T },
857 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
858 	  "Intel i82801H LAN Controller",
859 	  WM_T_ICH8,		WMP_F_1000T },
860 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
861 	  "Intel i82801H (IFE) LAN Controller",
862 	  WM_T_ICH8,		WMP_F_1000T },
863 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
864 	  "Intel i82801H (M) LAN Controller",
865 	  WM_T_ICH8,		WMP_F_1000T },
866 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
867 	  "Intel i82801H IFE (GT) LAN Controller",
868 	  WM_T_ICH8,		WMP_F_1000T },
869 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
870 	  "Intel i82801H IFE (G) LAN Controller",
871 	  WM_T_ICH8,		WMP_F_1000T },
872 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
873 	  "82801I (AMT) LAN Controller",
874 	  WM_T_ICH9,		WMP_F_1000T },
875 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
876 	  "82801I LAN Controller",
877 	  WM_T_ICH9,		WMP_F_1000T },
878 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
879 	  "82801I (G) LAN Controller",
880 	  WM_T_ICH9,		WMP_F_1000T },
881 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
882 	  "82801I (GT) LAN Controller",
883 	  WM_T_ICH9,		WMP_F_1000T },
884 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
885 	  "82801I (C) LAN Controller",
886 	  WM_T_ICH9,		WMP_F_1000T },
887 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
888 	  "82801I mobile LAN Controller",
889 	  WM_T_ICH9,		WMP_F_1000T },
890 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IGP_M_V,
891 	  "82801I mobile (V) LAN Controller",
892 	  WM_T_ICH9,		WMP_F_1000T },
893 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
894 	  "82801I mobile (AMT) LAN Controller",
895 	  WM_T_ICH9,		WMP_F_1000T },
896 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
897 	  "82567LM-4 LAN Controller",
898 	  WM_T_ICH9,		WMP_F_1000T },
899 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_82567V_3,
900 	  "82567V-3 LAN Controller",
901 	  WM_T_ICH9,		WMP_F_1000T },
902 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
903 	  "82567LM-2 LAN Controller",
904 	  WM_T_ICH10,		WMP_F_1000T },
905 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
906 	  "82567LF-2 LAN Controller",
907 	  WM_T_ICH10,		WMP_F_1000T },
908 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
909 	  "82567LM-3 LAN Controller",
910 	  WM_T_ICH10,		WMP_F_1000T },
911 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
912 	  "82567LF-3 LAN Controller",
913 	  WM_T_ICH10,		WMP_F_1000T },
914 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
915 	  "82567V-2 LAN Controller",
916 	  WM_T_ICH10,		WMP_F_1000T },
917 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
918 	  "82567V-3? LAN Controller",
919 	  WM_T_ICH10,		WMP_F_1000T },
920 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
921 	  "HANKSVILLE LAN Controller",
922 	  WM_T_ICH10,		WMP_F_1000T },
923 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
924 	  "PCH LAN (82577LM) Controller",
925 	  WM_T_PCH,		WMP_F_1000T },
926 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
927 	  "PCH LAN (82577LC) Controller",
928 	  WM_T_PCH,		WMP_F_1000T },
929 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
930 	  "PCH LAN (82578DM) Controller",
931 	  WM_T_PCH,		WMP_F_1000T },
932 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
933 	  "PCH LAN (82578DC) Controller",
934 	  WM_T_PCH2,		WMP_F_1000T },
935 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
936 	  "PCH2 LAN (82579LM) Controller",
937 	  WM_T_PCH2,		WMP_F_1000T },
938 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
939 	  "PCH2 LAN (82579V) Controller",
940 	  WM_T_PCH,		WMP_F_1000T },
941 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
942 	  "82575EB dual-1000baseT Ethernet",
943 	  WM_T_82575,		WMP_F_1000T },
944 #if 0
945 	/*
946 	 * not sure if WMP_F_1000X or WMP_F_SERDES - we do not have it - so
947 	 * disabled for now ...
948 	 */
949 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
950 	  "82575EB dual-1000baseX Ethernet (SERDES)",
951 	  WM_T_82575,		WMP_F_SERDES },
952 #endif
953 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
954 	  "82575GB quad-1000baseT Ethernet",
955 	  WM_T_82575,		WMP_F_1000T },
956 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
957 	  "82575GB quad-1000baseT Ethernet (PM)",
958 	  WM_T_82575,		WMP_F_1000T },
959 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
960 	  "82576 1000BaseT Ethernet",
961 	  WM_T_82576,		WMP_F_1000T },
962 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
963 	  "82576 1000BaseX Ethernet",
964 	  WM_T_82576,		WMP_F_1000X },
965 #if 0
966 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
967 	  "82576 gigabit Ethernet (SERDES)",
968 	  WM_T_82576,		WMP_F_SERDES },
969 #endif
970 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
971 	  "82576 quad-1000BaseT Ethernet",
972 	  WM_T_82576,		WMP_F_1000T },
973 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
974 	  "82576 gigabit Ethernet",
975 	  WM_T_82576,		WMP_F_1000T },
976 #if 0
977 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
978 	  "82576 gigabit Ethernet (SERDES)",
979 	  WM_T_82576,		WMP_F_SERDES },
980 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
981 	  "82576 quad-gigabit Ethernet (SERDES)",
982 	  WM_T_82576,		WMP_F_SERDES },
983 #endif
984 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
985 	  "82580 1000BaseT Ethernet",
986 	  WM_T_82580,		WMP_F_1000T },
987 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
988 	  "82580 1000BaseX Ethernet",
989 	  WM_T_82580,		WMP_F_1000X },
990 #if 0
991 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
992 	  "82580 1000BaseT Ethernet (SERDES)",
993 	  WM_T_82580,		WMP_F_SERDES },
994 #endif
995 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
996 	  "82580 gigabit Ethernet (SGMII)",
997 	  WM_T_82580,		WMP_F_1000T },
998 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
999 	  "82580 dual-1000BaseT Ethernet",
1000 	  WM_T_82580,		WMP_F_1000T },
1001 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_ER,
1002 	  "82580 1000BaseT Ethernet",
1003 	  WM_T_82580ER,		WMP_F_1000T },
1004 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_ER_DUAL,
1005 	  "82580 dual-1000BaseT Ethernet",
1006 	  WM_T_82580ER,		WMP_F_1000T },
1007 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1008 	  "82580 quad-1000BaseX Ethernet",
1009 	  WM_T_82580,		WMP_F_1000X },
1010 	{ 0,			0,
1011 	  NULL,
1012 	  0,			0 },
1013 };
1014 
1015 #ifdef WM_EVENT_COUNTERS
1016 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
1017 #endif /* WM_EVENT_COUNTERS */
1018 
1019 #if 0 /* Not currently used */
1020 static inline uint32_t
1021 wm_io_read(struct wm_softc *sc, int reg)
1022 {
1023 
1024 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1025 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1026 }
1027 #endif
1028 
1029 static inline void
1030 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1031 {
1032 
1033 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1034 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1035 }
1036 
1037 static inline void
1038 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1039     uint32_t data)
1040 {
1041 	uint32_t regval;
1042 	int i;
1043 
1044 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1045 
1046 	CSR_WRITE(sc, reg, regval);
1047 
1048 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1049 		delay(5);
1050 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1051 			break;
1052 	}
1053 	if (i == SCTL_CTL_POLL_TIMEOUT) {
1054 		aprint_error("%s: WARNING: i82575 reg 0x%08x setup did not indicate ready\n",
1055 		    device_xname(sc->sc_dev), reg);
1056 	}
1057 }
1058 
1059 static inline void
1060 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1061 {
1062 	wa->wa_low = htole32(v & 0xffffffffU);
1063 	if (sizeof(bus_addr_t) == 8)
1064 		wa->wa_high = htole32((uint64_t) v >> 32);
1065 	else
1066 		wa->wa_high = 0;
1067 }
1068 
1069 static void
1070 wm_set_spiaddrbits(struct wm_softc *sc)
1071 {
1072 	uint32_t reg;
1073 
1074 	sc->sc_flags |= WM_F_EEPROM_SPI;
1075 	reg = CSR_READ(sc, WMREG_EECD);
1076 	sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
1077 }
1078 
1079 static const struct wm_product *
1080 wm_lookup(const struct pci_attach_args *pa)
1081 {
1082 	const struct wm_product *wmp;
1083 
1084 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1085 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1086 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1087 			return wmp;
1088 	}
1089 	return NULL;
1090 }
1091 
1092 static int
1093 wm_match(device_t parent, cfdata_t cf, void *aux)
1094 {
1095 	struct pci_attach_args *pa = aux;
1096 
1097 	if (wm_lookup(pa) != NULL)
1098 		return 1;
1099 
1100 	return 0;
1101 }
1102 
1103 static void
1104 wm_attach(device_t parent, device_t self, void *aux)
1105 {
1106 	struct wm_softc *sc = device_private(self);
1107 	struct pci_attach_args *pa = aux;
1108 	prop_dictionary_t dict;
1109 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1110 	pci_chipset_tag_t pc = pa->pa_pc;
1111 	pci_intr_handle_t ih;
1112 	const char *intrstr = NULL;
1113 	const char *eetype, *xname;
1114 	bus_space_tag_t memt;
1115 	bus_space_handle_t memh;
1116 	bus_size_t memsize;
1117 	int memh_valid;
1118 	int i, error;
1119 	const struct wm_product *wmp;
1120 	prop_data_t ea;
1121 	prop_number_t pn;
1122 	uint8_t enaddr[ETHER_ADDR_LEN];
1123 	uint16_t cfg1, cfg2, swdpin, io3;
1124 	pcireg_t preg, memtype;
1125 	uint16_t eeprom_data, apme_mask;
1126 	uint32_t reg;
1127 
1128 	sc->sc_dev = self;
1129 	callout_init(&sc->sc_tick_ch, 0);
1130 
1131 	sc->sc_wmp = wmp = wm_lookup(pa);
1132 	if (wmp == NULL) {
1133 		printf("\n");
1134 		panic("wm_attach: impossible");
1135 	}
1136 
1137 	sc->sc_pc = pa->pa_pc;
1138 	sc->sc_pcitag = pa->pa_tag;
1139 
1140 	if (pci_dma64_available(pa))
1141 		sc->sc_dmat = pa->pa_dmat64;
1142 	else
1143 		sc->sc_dmat = pa->pa_dmat;
1144 
1145 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
1146 	aprint_naive(": Ethernet controller\n");
1147 	aprint_normal(": %s, rev. %d\n", wmp->wmp_name, sc->sc_rev);
1148 
1149 	sc->sc_type = wmp->wmp_type;
1150 	if (sc->sc_type < WM_T_82543) {
1151 		if (sc->sc_rev < 2) {
1152 			aprint_error_dev(sc->sc_dev,
1153 			    "i82542 must be at least rev. 2\n");
1154 			return;
1155 		}
1156 		if (sc->sc_rev < 3)
1157 			sc->sc_type = WM_T_82542_2_0;
1158 	}
1159 
1160 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1161 	    || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER))
1162 		sc->sc_flags |= WM_F_NEWQUEUE;
1163 
1164 	/* Set device properties (mactype) */
1165 	dict = device_properties(sc->sc_dev);
1166 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1167 
1168 	/*
1169 	 * Map the device.  All devices support memory-mapped acccess,
1170 	 * and it is really required for normal operation.
1171 	 */
1172 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1173 	switch (memtype) {
1174 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1175 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1176 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1177 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1178 		break;
1179 	default:
1180 		memh_valid = 0;
1181 		break;
1182 	}
1183 
1184 	if (memh_valid) {
1185 		sc->sc_st = memt;
1186 		sc->sc_sh = memh;
1187 		sc->sc_ss = memsize;
1188 	} else {
1189 		aprint_error_dev(sc->sc_dev,
1190 		    "unable to map device registers\n");
1191 		return;
1192 	}
1193 
1194 	wm_get_wakeup(sc);
1195 
1196 	/*
1197 	 * In addition, i82544 and later support I/O mapped indirect
1198 	 * register access.  It is not desirable (nor supported in
1199 	 * this driver) to use it for normal operation, though it is
1200 	 * required to work around bugs in some chip versions.
1201 	 */
1202 	if (sc->sc_type >= WM_T_82544) {
1203 		/* First we have to find the I/O BAR. */
1204 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1205 			if (pci_mapreg_type(pa->pa_pc, pa->pa_tag, i) ==
1206 			    PCI_MAPREG_TYPE_IO)
1207 				break;
1208 		}
1209 		if (i != PCI_MAPREG_END) {
1210 			/*
1211 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
1212 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
1213 			 * It's no problem because newer chips has no this
1214 			 * bug.
1215 			 *
1216 			 * The i8254x doesn't apparently respond when the
1217 			 * I/O BAR is 0, which looks somewhat like it's not
1218 			 * been configured.
1219 			 */
1220 			preg = pci_conf_read(pc, pa->pa_tag, i);
1221 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1222 				aprint_error_dev(sc->sc_dev,
1223 				    "WARNING: I/O BAR at zero.\n");
1224 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1225 					0, &sc->sc_iot, &sc->sc_ioh,
1226 					NULL, &sc->sc_ios) == 0) {
1227 				sc->sc_flags |= WM_F_IOH_VALID;
1228 			} else {
1229 				aprint_error_dev(sc->sc_dev,
1230 				    "WARNING: unable to map I/O space\n");
1231 			}
1232 		}
1233 
1234 	}
1235 
1236 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
1237 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1238 	preg |= PCI_COMMAND_MASTER_ENABLE;
1239 	if (sc->sc_type < WM_T_82542_2_1)
1240 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1241 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1242 
1243 	/* power up chip */
1244 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1245 	    NULL)) && error != EOPNOTSUPP) {
1246 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1247 		return;
1248 	}
1249 
1250 	/*
1251 	 * Map and establish our interrupt.
1252 	 */
1253 	if (pci_intr_map(pa, &ih)) {
1254 		aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
1255 		return;
1256 	}
1257 	intrstr = pci_intr_string(pc, ih);
1258 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
1259 	if (sc->sc_ih == NULL) {
1260 		aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
1261 		if (intrstr != NULL)
1262 			aprint_error(" at %s", intrstr);
1263 		aprint_error("\n");
1264 		return;
1265 	}
1266 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
1267 
1268 	/*
1269 	 * Check the function ID (unit number of the chip).
1270 	 */
1271 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
1272 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
1273 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1274 	    || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER))
1275 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
1276 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
1277 	else
1278 		sc->sc_funcid = 0;
1279 
1280 	/*
1281 	 * Determine a few things about the bus we're connected to.
1282 	 */
1283 	if (sc->sc_type < WM_T_82543) {
1284 		/* We don't really know the bus characteristics here. */
1285 		sc->sc_bus_speed = 33;
1286 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1287 		/*
1288 		 * CSA (Communication Streaming Architecture) is about as fast
1289 		 * a 32-bit 66MHz PCI Bus.
1290 		 */
1291 		sc->sc_flags |= WM_F_CSA;
1292 		sc->sc_bus_speed = 66;
1293 		aprint_verbose_dev(sc->sc_dev,
1294 		    "Communication Streaming Architecture\n");
1295 		if (sc->sc_type == WM_T_82547) {
1296 			callout_init(&sc->sc_txfifo_ch, 0);
1297 			callout_setfunc(&sc->sc_txfifo_ch,
1298 					wm_82547_txfifo_stall, sc);
1299 			aprint_verbose_dev(sc->sc_dev,
1300 			    "using 82547 Tx FIFO stall work-around\n");
1301 		}
1302 	} else if (sc->sc_type >= WM_T_82571) {
1303 		sc->sc_flags |= WM_F_PCIE;
1304 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1305 		    && (sc->sc_type != WM_T_ICH10)
1306 		    && (sc->sc_type != WM_T_PCH)
1307 		    && (sc->sc_type != WM_T_PCH2)) {
1308 			sc->sc_flags |= WM_F_EEPROM_SEMAPHORE;
1309 			/* ICH* and PCH* have no PCIe capability registers */
1310 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1311 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
1312 				NULL) == 0)
1313 				aprint_error_dev(sc->sc_dev,
1314 				    "unable to find PCIe capability\n");
1315 		}
1316 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1317 	} else {
1318 		reg = CSR_READ(sc, WMREG_STATUS);
1319 		if (reg & STATUS_BUS64)
1320 			sc->sc_flags |= WM_F_BUS64;
1321 		if ((reg & STATUS_PCIX_MODE) != 0) {
1322 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1323 
1324 			sc->sc_flags |= WM_F_PCIX;
1325 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1326 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
1327 				aprint_error_dev(sc->sc_dev,
1328 				    "unable to find PCIX capability\n");
1329 			else if (sc->sc_type != WM_T_82545_3 &&
1330 				 sc->sc_type != WM_T_82546_3) {
1331 				/*
1332 				 * Work around a problem caused by the BIOS
1333 				 * setting the max memory read byte count
1334 				 * incorrectly.
1335 				 */
1336 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1337 				    sc->sc_pcixe_capoff + PCI_PCIX_CMD);
1338 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1339 				    sc->sc_pcixe_capoff + PCI_PCIX_STATUS);
1340 
1341 				bytecnt =
1342 				    (pcix_cmd & PCI_PCIX_CMD_BYTECNT_MASK) >>
1343 				    PCI_PCIX_CMD_BYTECNT_SHIFT;
1344 				maxb =
1345 				    (pcix_sts & PCI_PCIX_STATUS_MAXB_MASK) >>
1346 				    PCI_PCIX_STATUS_MAXB_SHIFT;
1347 				if (bytecnt > maxb) {
1348 					aprint_verbose_dev(sc->sc_dev,
1349 					    "resetting PCI-X MMRBC: %d -> %d\n",
1350 					    512 << bytecnt, 512 << maxb);
1351 					pcix_cmd = (pcix_cmd &
1352 					    ~PCI_PCIX_CMD_BYTECNT_MASK) |
1353 					   (maxb << PCI_PCIX_CMD_BYTECNT_SHIFT);
1354 					pci_conf_write(pa->pa_pc, pa->pa_tag,
1355 					    sc->sc_pcixe_capoff + PCI_PCIX_CMD,
1356 					    pcix_cmd);
1357 				}
1358 			}
1359 		}
1360 		/*
1361 		 * The quad port adapter is special; it has a PCIX-PCIX
1362 		 * bridge on the board, and can run the secondary bus at
1363 		 * a higher speed.
1364 		 */
1365 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1366 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1367 								      : 66;
1368 		} else if (sc->sc_flags & WM_F_PCIX) {
1369 			switch (reg & STATUS_PCIXSPD_MASK) {
1370 			case STATUS_PCIXSPD_50_66:
1371 				sc->sc_bus_speed = 66;
1372 				break;
1373 			case STATUS_PCIXSPD_66_100:
1374 				sc->sc_bus_speed = 100;
1375 				break;
1376 			case STATUS_PCIXSPD_100_133:
1377 				sc->sc_bus_speed = 133;
1378 				break;
1379 			default:
1380 				aprint_error_dev(sc->sc_dev,
1381 				    "unknown PCIXSPD %d; assuming 66MHz\n",
1382 				    reg & STATUS_PCIXSPD_MASK);
1383 				sc->sc_bus_speed = 66;
1384 				break;
1385 			}
1386 		} else
1387 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1388 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1389 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1390 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1391 	}
1392 
1393 	/*
1394 	 * Allocate the control data structures, and create and load the
1395 	 * DMA map for it.
1396 	 *
1397 	 * NOTE: All Tx descriptors must be in the same 4G segment of
1398 	 * memory.  So must Rx descriptors.  We simplify by allocating
1399 	 * both sets within the same 4G segment.
1400 	 */
1401 	WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
1402 	    WM_NTXDESC_82542 : WM_NTXDESC_82544;
1403 	sc->sc_cd_size = sc->sc_type < WM_T_82544 ?
1404 	    sizeof(struct wm_control_data_82542) :
1405 	    sizeof(struct wm_control_data_82544);
1406 	if ((error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_cd_size, PAGE_SIZE,
1407 		    (bus_size_t) 0x100000000ULL, &sc->sc_cd_seg, 1,
1408 		    &sc->sc_cd_rseg, 0)) != 0) {
1409 		aprint_error_dev(sc->sc_dev,
1410 		    "unable to allocate control data, error = %d\n",
1411 		    error);
1412 		goto fail_0;
1413 	}
1414 
1415 	if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cd_seg,
1416 		    sc->sc_cd_rseg, sc->sc_cd_size,
1417 		    (void **)&sc->sc_control_data, BUS_DMA_COHERENT)) != 0) {
1418 		aprint_error_dev(sc->sc_dev,
1419 		    "unable to map control data, error = %d\n", error);
1420 		goto fail_1;
1421 	}
1422 
1423 	if ((error = bus_dmamap_create(sc->sc_dmat, sc->sc_cd_size, 1,
1424 		    sc->sc_cd_size, 0, 0, &sc->sc_cddmamap)) != 0) {
1425 		aprint_error_dev(sc->sc_dev,
1426 		    "unable to create control data DMA map, error = %d\n",
1427 		    error);
1428 		goto fail_2;
1429 	}
1430 
1431 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
1432 		    sc->sc_control_data, sc->sc_cd_size, NULL, 0)) != 0) {
1433 		aprint_error_dev(sc->sc_dev,
1434 		    "unable to load control data DMA map, error = %d\n",
1435 		    error);
1436 		goto fail_3;
1437 	}
1438 
1439 	/*
1440 	 * Create the transmit buffer DMA maps.
1441 	 */
1442 	WM_TXQUEUELEN(sc) =
1443 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
1444 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
1445 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1446 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
1447 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
1448 			    &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1449 			aprint_error_dev(sc->sc_dev,
1450 			    "unable to create Tx DMA map %d, error = %d\n",
1451 			    i, error);
1452 			goto fail_4;
1453 		}
1454 	}
1455 
1456 	/*
1457 	 * Create the receive buffer DMA maps.
1458 	 */
1459 	for (i = 0; i < WM_NRXDESC; i++) {
1460 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1461 			    MCLBYTES, 0, 0,
1462 			    &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1463 			aprint_error_dev(sc->sc_dev,
1464 			    "unable to create Rx DMA map %d error = %d\n",
1465 			    i, error);
1466 			goto fail_5;
1467 		}
1468 		sc->sc_rxsoft[i].rxs_mbuf = NULL;
1469 	}
1470 
1471 	/* clear interesting stat counters */
1472 	CSR_READ(sc, WMREG_COLC);
1473 	CSR_READ(sc, WMREG_RXERRC);
1474 
1475 	/* get PHY control from SMBus to PCIe */
1476 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2))
1477 		wm_smbustopci(sc);
1478 
1479 	/*
1480 	 * Reset the chip to a known state.
1481 	 */
1482 	wm_reset(sc);
1483 
1484 	switch (sc->sc_type) {
1485 	case WM_T_82571:
1486 	case WM_T_82572:
1487 	case WM_T_82573:
1488 	case WM_T_82574:
1489 	case WM_T_82583:
1490 	case WM_T_80003:
1491 	case WM_T_ICH8:
1492 	case WM_T_ICH9:
1493 	case WM_T_ICH10:
1494 	case WM_T_PCH:
1495 	case WM_T_PCH2:
1496 		if (wm_check_mng_mode(sc) != 0)
1497 			wm_get_hw_control(sc);
1498 		break;
1499 	default:
1500 		break;
1501 	}
1502 
1503 	/*
1504 	 * Get some information about the EEPROM.
1505 	 */
1506 	switch (sc->sc_type) {
1507 	case WM_T_82542_2_0:
1508 	case WM_T_82542_2_1:
1509 	case WM_T_82543:
1510 	case WM_T_82544:
1511 		/* Microwire */
1512 		sc->sc_ee_addrbits = 6;
1513 		break;
1514 	case WM_T_82540:
1515 	case WM_T_82545:
1516 	case WM_T_82545_3:
1517 	case WM_T_82546:
1518 	case WM_T_82546_3:
1519 		/* Microwire */
1520 		reg = CSR_READ(sc, WMREG_EECD);
1521 		if (reg & EECD_EE_SIZE)
1522 			sc->sc_ee_addrbits = 8;
1523 		else
1524 			sc->sc_ee_addrbits = 6;
1525 		sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1526 		break;
1527 	case WM_T_82541:
1528 	case WM_T_82541_2:
1529 	case WM_T_82547:
1530 	case WM_T_82547_2:
1531 		reg = CSR_READ(sc, WMREG_EECD);
1532 		if (reg & EECD_EE_TYPE) {
1533 			/* SPI */
1534 			wm_set_spiaddrbits(sc);
1535 		} else
1536 			/* Microwire */
1537 			sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6;
1538 		sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1539 		break;
1540 	case WM_T_82571:
1541 	case WM_T_82572:
1542 		/* SPI */
1543 		wm_set_spiaddrbits(sc);
1544 		sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1545 		break;
1546 	case WM_T_82573:
1547 	case WM_T_82574:
1548 	case WM_T_82583:
1549 		if (wm_is_onboard_nvm_eeprom(sc) == 0)
1550 			sc->sc_flags |= WM_F_EEPROM_FLASH;
1551 		else {
1552 			/* SPI */
1553 			wm_set_spiaddrbits(sc);
1554 		}
1555 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1556 		break;
1557 	case WM_T_82575:
1558 	case WM_T_82576:
1559 	case WM_T_82580:
1560 	case WM_T_82580ER:
1561 	case WM_T_80003:
1562 		/* SPI */
1563 		wm_set_spiaddrbits(sc);
1564 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_SWFW_SYNC;
1565 		break;
1566 	case WM_T_ICH8:
1567 	case WM_T_ICH9:
1568 	case WM_T_ICH10:
1569 	case WM_T_PCH:
1570 	case WM_T_PCH2:
1571 		/* FLASH */
1572 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_SWFWHW_SYNC;
1573 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
1574 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
1575 		    &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) {
1576 			aprint_error_dev(sc->sc_dev,
1577 			    "can't map FLASH registers\n");
1578 			return;
1579 		}
1580 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
1581 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
1582 						ICH_FLASH_SECTOR_SIZE;
1583 		sc->sc_ich8_flash_bank_size =
1584 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
1585 		sc->sc_ich8_flash_bank_size -=
1586 		    (reg & ICH_GFPREG_BASE_MASK);
1587 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
1588 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
1589 		break;
1590 	default:
1591 		break;
1592 	}
1593 
1594 	/*
1595 	 * Defer printing the EEPROM type until after verifying the checksum
1596 	 * This allows the EEPROM type to be printed correctly in the case
1597 	 * that no EEPROM is attached.
1598 	 */
1599 	/*
1600 	 * Validate the EEPROM checksum. If the checksum fails, flag
1601 	 * this for later, so we can fail future reads from the EEPROM.
1602 	 */
1603 	if (wm_validate_eeprom_checksum(sc)) {
1604 		/*
1605 		 * Read twice again because some PCI-e parts fail the
1606 		 * first check due to the link being in sleep state.
1607 		 */
1608 		if (wm_validate_eeprom_checksum(sc))
1609 			sc->sc_flags |= WM_F_EEPROM_INVALID;
1610 	}
1611 
1612 	/* Set device properties (macflags) */
1613 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
1614 
1615 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
1616 		aprint_verbose_dev(sc->sc_dev, "No EEPROM\n");
1617 	else if (sc->sc_flags & WM_F_EEPROM_FLASH) {
1618 		aprint_verbose_dev(sc->sc_dev, "FLASH\n");
1619 	} else {
1620 		if (sc->sc_flags & WM_F_EEPROM_SPI)
1621 			eetype = "SPI";
1622 		else
1623 			eetype = "MicroWire";
1624 		aprint_verbose_dev(sc->sc_dev,
1625 		    "%u word (%d address bits) %s EEPROM\n",
1626 		    1U << sc->sc_ee_addrbits,
1627 		    sc->sc_ee_addrbits, eetype);
1628 	}
1629 
1630 	/*
1631 	 * Read the Ethernet address from the EEPROM, if not first found
1632 	 * in device properties.
1633 	 */
1634 	ea = prop_dictionary_get(dict, "mac-address");
1635 	if (ea != NULL) {
1636 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
1637 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
1638 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
1639 	} else {
1640 		if (wm_read_mac_addr(sc, enaddr) != 0) {
1641 			aprint_error_dev(sc->sc_dev,
1642 			    "unable to read Ethernet address\n");
1643 			return;
1644 		}
1645 	}
1646 
1647 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
1648 	    ether_sprintf(enaddr));
1649 
1650 	/*
1651 	 * Read the config info from the EEPROM, and set up various
1652 	 * bits in the control registers based on their contents.
1653 	 */
1654 	pn = prop_dictionary_get(dict, "i82543-cfg1");
1655 	if (pn != NULL) {
1656 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1657 		cfg1 = (uint16_t) prop_number_integer_value(pn);
1658 	} else {
1659 		if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) {
1660 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
1661 			return;
1662 		}
1663 	}
1664 
1665 	pn = prop_dictionary_get(dict, "i82543-cfg2");
1666 	if (pn != NULL) {
1667 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1668 		cfg2 = (uint16_t) prop_number_integer_value(pn);
1669 	} else {
1670 		if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) {
1671 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
1672 			return;
1673 		}
1674 	}
1675 
1676 	/* check for WM_F_WOL */
1677 	switch (sc->sc_type) {
1678 	case WM_T_82542_2_0:
1679 	case WM_T_82542_2_1:
1680 	case WM_T_82543:
1681 		/* dummy? */
1682 		eeprom_data = 0;
1683 		apme_mask = EEPROM_CFG3_APME;
1684 		break;
1685 	case WM_T_82544:
1686 		apme_mask = EEPROM_CFG2_82544_APM_EN;
1687 		eeprom_data = cfg2;
1688 		break;
1689 	case WM_T_82546:
1690 	case WM_T_82546_3:
1691 	case WM_T_82571:
1692 	case WM_T_82572:
1693 	case WM_T_82573:
1694 	case WM_T_82574:
1695 	case WM_T_82583:
1696 	case WM_T_80003:
1697 	default:
1698 		apme_mask = EEPROM_CFG3_APME;
1699 		wm_read_eeprom(sc, (sc->sc_funcid == 1) ? EEPROM_OFF_CFG3_PORTB
1700 		    : EEPROM_OFF_CFG3_PORTA, 1, &eeprom_data);
1701 		break;
1702 	case WM_T_82575:
1703 	case WM_T_82576:
1704 	case WM_T_82580:
1705 	case WM_T_82580ER:
1706 	case WM_T_ICH8:
1707 	case WM_T_ICH9:
1708 	case WM_T_ICH10:
1709 	case WM_T_PCH:
1710 	case WM_T_PCH2:
1711 		apme_mask = WUC_APME;
1712 		eeprom_data = CSR_READ(sc, WMREG_WUC);
1713 		break;
1714 	}
1715 
1716 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
1717 	if ((eeprom_data & apme_mask) != 0)
1718 		sc->sc_flags |= WM_F_WOL;
1719 #ifdef WM_DEBUG
1720 	if ((sc->sc_flags & WM_F_WOL) != 0)
1721 		printf("WOL\n");
1722 #endif
1723 
1724 	/*
1725 	 * XXX need special handling for some multiple port cards
1726 	 * to disable a paticular port.
1727 	 */
1728 
1729 	if (sc->sc_type >= WM_T_82544) {
1730 		pn = prop_dictionary_get(dict, "i82543-swdpin");
1731 		if (pn != NULL) {
1732 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1733 			swdpin = (uint16_t) prop_number_integer_value(pn);
1734 		} else {
1735 			if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) {
1736 				aprint_error_dev(sc->sc_dev,
1737 				    "unable to read SWDPIN\n");
1738 				return;
1739 			}
1740 		}
1741 	}
1742 
1743 	if (cfg1 & EEPROM_CFG1_ILOS)
1744 		sc->sc_ctrl |= CTRL_ILOS;
1745 	if (sc->sc_type >= WM_T_82544) {
1746 		sc->sc_ctrl |=
1747 		    ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
1748 		    CTRL_SWDPIO_SHIFT;
1749 		sc->sc_ctrl |=
1750 		    ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
1751 		    CTRL_SWDPINS_SHIFT;
1752 	} else {
1753 		sc->sc_ctrl |=
1754 		    ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
1755 		    CTRL_SWDPIO_SHIFT;
1756 	}
1757 
1758 #if 0
1759 	if (sc->sc_type >= WM_T_82544) {
1760 		if (cfg1 & EEPROM_CFG1_IPS0)
1761 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
1762 		if (cfg1 & EEPROM_CFG1_IPS1)
1763 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
1764 		sc->sc_ctrl_ext |=
1765 		    ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
1766 		    CTRL_EXT_SWDPIO_SHIFT;
1767 		sc->sc_ctrl_ext |=
1768 		    ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
1769 		    CTRL_EXT_SWDPINS_SHIFT;
1770 	} else {
1771 		sc->sc_ctrl_ext |=
1772 		    ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
1773 		    CTRL_EXT_SWDPIO_SHIFT;
1774 	}
1775 #endif
1776 
1777 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
1778 #if 0
1779 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
1780 #endif
1781 
1782 	/*
1783 	 * Set up some register offsets that are different between
1784 	 * the i82542 and the i82543 and later chips.
1785 	 */
1786 	if (sc->sc_type < WM_T_82543) {
1787 		sc->sc_rdt_reg = WMREG_OLD_RDT0;
1788 		sc->sc_tdt_reg = WMREG_OLD_TDT;
1789 	} else {
1790 		sc->sc_rdt_reg = WMREG_RDT;
1791 		sc->sc_tdt_reg = WMREG_TDT;
1792 	}
1793 
1794 	if (sc->sc_type == WM_T_PCH) {
1795 		uint16_t val;
1796 
1797 		/* Save the NVM K1 bit setting */
1798 		wm_read_eeprom(sc, EEPROM_OFF_K1_CONFIG, 1, &val);
1799 
1800 		if ((val & EEPROM_K1_CONFIG_ENABLE) != 0)
1801 			sc->sc_nvm_k1_enabled = 1;
1802 		else
1803 			sc->sc_nvm_k1_enabled = 0;
1804 	}
1805 
1806 	/*
1807 	 * Determine if we're TBI,GMII or SGMII mode, and initialize the
1808 	 * media structures accordingly.
1809 	 */
1810 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
1811 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
1812 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_82573
1813 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
1814 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
1815 		wm_gmii_mediainit(sc, wmp->wmp_product);
1816 	} else if (sc->sc_type < WM_T_82543 ||
1817 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
1818 		if (wmp->wmp_flags & WMP_F_1000T)
1819 			aprint_error_dev(sc->sc_dev,
1820 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
1821 		wm_tbi_mediainit(sc);
1822 	} else {
1823 		switch (sc->sc_type) {
1824 		case WM_T_82575:
1825 		case WM_T_82576:
1826 		case WM_T_82580:
1827 		case WM_T_82580ER:
1828 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
1829 			switch (reg & CTRL_EXT_LINK_MODE_MASK) {
1830 			case CTRL_EXT_LINK_MODE_SGMII:
1831 				aprint_verbose_dev(sc->sc_dev, "SGMII\n");
1832 				sc->sc_flags |= WM_F_SGMII;
1833 				CSR_WRITE(sc, WMREG_CTRL_EXT,
1834 				    reg | CTRL_EXT_I2C_ENA);
1835 				wm_gmii_mediainit(sc, wmp->wmp_product);
1836 				break;
1837 			case CTRL_EXT_LINK_MODE_1000KX:
1838 			case CTRL_EXT_LINK_MODE_PCIE_SERDES:
1839 				aprint_verbose_dev(sc->sc_dev, "1000KX or SERDES\n");
1840 				CSR_WRITE(sc, WMREG_CTRL_EXT,
1841 				    reg | CTRL_EXT_I2C_ENA);
1842 				panic("not supported yet\n");
1843 				break;
1844 			case CTRL_EXT_LINK_MODE_GMII:
1845 			default:
1846 				CSR_WRITE(sc, WMREG_CTRL_EXT,
1847 				    reg & ~CTRL_EXT_I2C_ENA);
1848 				wm_gmii_mediainit(sc, wmp->wmp_product);
1849 				break;
1850 			}
1851 			break;
1852 		default:
1853 			if (wmp->wmp_flags & WMP_F_1000X)
1854 				aprint_error_dev(sc->sc_dev,
1855 				    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
1856 			wm_gmii_mediainit(sc, wmp->wmp_product);
1857 		}
1858 	}
1859 
1860 	ifp = &sc->sc_ethercom.ec_if;
1861 	xname = device_xname(sc->sc_dev);
1862 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
1863 	ifp->if_softc = sc;
1864 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1865 	ifp->if_ioctl = wm_ioctl;
1866 	ifp->if_start = wm_start;
1867 	ifp->if_watchdog = wm_watchdog;
1868 	ifp->if_init = wm_init;
1869 	ifp->if_stop = wm_stop;
1870 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
1871 	IFQ_SET_READY(&ifp->if_snd);
1872 
1873 	/* Check for jumbo frame */
1874 	switch (sc->sc_type) {
1875 	case WM_T_82573:
1876 		/* XXX limited to 9234 if ASPM is disabled */
1877 		wm_read_eeprom(sc, EEPROM_INIT_3GIO_3, 1, &io3);
1878 		if ((io3 & EEPROM_3GIO_3_ASPM_MASK) != 0)
1879 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1880 		break;
1881 	case WM_T_82571:
1882 	case WM_T_82572:
1883 	case WM_T_82574:
1884 	case WM_T_82575:
1885 	case WM_T_82576:
1886 	case WM_T_82580:
1887 	case WM_T_82580ER:
1888 	case WM_T_80003:
1889 	case WM_T_ICH9:
1890 	case WM_T_ICH10:
1891 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
1892 		/* XXX limited to 9234 */
1893 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1894 		break;
1895 	case WM_T_PCH:
1896 		/* XXX limited to 4096 */
1897 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1898 		break;
1899 	case WM_T_82542_2_0:
1900 	case WM_T_82542_2_1:
1901 	case WM_T_82583:
1902 	case WM_T_ICH8:
1903 		/* No support for jumbo frame */
1904 		break;
1905 	default:
1906 		/* ETHER_MAX_LEN_JUMBO */
1907 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1908 		break;
1909 	}
1910 
1911 	/*
1912 	 * If we're a i82543 or greater, we can support VLANs.
1913 	 */
1914 	if (sc->sc_type == WM_T_82575 || sc->sc_type == WM_T_82576)
1915 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
1916 	else if (sc->sc_type >= WM_T_82543)
1917 		sc->sc_ethercom.ec_capabilities |=
1918 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
1919 
1920 	/*
1921 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
1922 	 * on i82543 and later.
1923 	 */
1924 	if (sc->sc_type >= WM_T_82543) {
1925 		ifp->if_capabilities |=
1926 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
1927 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
1928 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
1929 		    IFCAP_CSUM_TCPv6_Tx |
1930 		    IFCAP_CSUM_UDPv6_Tx;
1931 	}
1932 
1933 	/*
1934 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
1935 	 *
1936 	 *	82541GI (8086:1076) ... no
1937 	 *	82572EI (8086:10b9) ... yes
1938 	 */
1939 	if (sc->sc_type >= WM_T_82571) {
1940 		ifp->if_capabilities |=
1941 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
1942 	}
1943 
1944 	/*
1945 	 * If we're a i82544 or greater (except i82547), we can do
1946 	 * TCP segmentation offload.
1947 	 */
1948 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
1949 		ifp->if_capabilities |= IFCAP_TSOv4;
1950 	}
1951 
1952 	if (sc->sc_type >= WM_T_82571) {
1953 		ifp->if_capabilities |= IFCAP_TSOv6;
1954 	}
1955 
1956 	/*
1957 	 * Attach the interface.
1958 	 */
1959 	if_attach(ifp);
1960 	ether_ifattach(ifp, enaddr);
1961 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
1962 #if NRND > 0
1963 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET, 0);
1964 #endif
1965 
1966 #ifdef WM_EVENT_COUNTERS
1967 	/* Attach event counters. */
1968 	evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
1969 	    NULL, xname, "txsstall");
1970 	evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
1971 	    NULL, xname, "txdstall");
1972 	evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
1973 	    NULL, xname, "txfifo_stall");
1974 	evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
1975 	    NULL, xname, "txdw");
1976 	evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
1977 	    NULL, xname, "txqe");
1978 	evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
1979 	    NULL, xname, "rxintr");
1980 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
1981 	    NULL, xname, "linkintr");
1982 
1983 	evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
1984 	    NULL, xname, "rxipsum");
1985 	evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
1986 	    NULL, xname, "rxtusum");
1987 	evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
1988 	    NULL, xname, "txipsum");
1989 	evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
1990 	    NULL, xname, "txtusum");
1991 	evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
1992 	    NULL, xname, "txtusum6");
1993 
1994 	evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
1995 	    NULL, xname, "txtso");
1996 	evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
1997 	    NULL, xname, "txtso6");
1998 	evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
1999 	    NULL, xname, "txtsopain");
2000 
2001 	for (i = 0; i < WM_NTXSEGS; i++) {
2002 		sprintf(wm_txseg_evcnt_names[i], "txseg%d", i);
2003 		evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
2004 		    NULL, xname, wm_txseg_evcnt_names[i]);
2005 	}
2006 
2007 	evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
2008 	    NULL, xname, "txdrop");
2009 
2010 	evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
2011 	    NULL, xname, "tu");
2012 
2013 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
2014 	    NULL, xname, "tx_xoff");
2015 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
2016 	    NULL, xname, "tx_xon");
2017 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
2018 	    NULL, xname, "rx_xoff");
2019 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
2020 	    NULL, xname, "rx_xon");
2021 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
2022 	    NULL, xname, "rx_macctl");
2023 #endif /* WM_EVENT_COUNTERS */
2024 
2025 	if (pmf_device_register(self, wm_suspend, wm_resume))
2026 		pmf_class_network_register(self, ifp);
2027 	else
2028 		aprint_error_dev(self, "couldn't establish power handler\n");
2029 
2030 	return;
2031 
2032 	/*
2033 	 * Free any resources we've allocated during the failed attach
2034 	 * attempt.  Do this in reverse order and fall through.
2035 	 */
2036  fail_5:
2037 	for (i = 0; i < WM_NRXDESC; i++) {
2038 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2039 			bus_dmamap_destroy(sc->sc_dmat,
2040 			    sc->sc_rxsoft[i].rxs_dmamap);
2041 	}
2042  fail_4:
2043 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2044 		if (sc->sc_txsoft[i].txs_dmamap != NULL)
2045 			bus_dmamap_destroy(sc->sc_dmat,
2046 			    sc->sc_txsoft[i].txs_dmamap);
2047 	}
2048 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2049  fail_3:
2050 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2051  fail_2:
2052 	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2053 	    sc->sc_cd_size);
2054  fail_1:
2055 	bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2056  fail_0:
2057 	return;
2058 }
2059 
2060 static int
2061 wm_detach(device_t self, int flags __unused)
2062 {
2063 	struct wm_softc *sc = device_private(self);
2064 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2065 	int i, s;
2066 
2067 	s = splnet();
2068 	/* Stop the interface. Callouts are stopped in it. */
2069 	wm_stop(ifp, 1);
2070 	splx(s);
2071 
2072 	pmf_device_deregister(self);
2073 
2074 	/* Tell the firmware about the release */
2075 	wm_release_manageability(sc);
2076 	wm_release_hw_control(sc);
2077 
2078 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2079 
2080 	/* Delete all remaining media. */
2081 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2082 
2083 	ether_ifdetach(ifp);
2084 	if_detach(ifp);
2085 
2086 
2087 	/* Unload RX dmamaps and free mbufs */
2088 	wm_rxdrain(sc);
2089 
2090 	/* Free dmamap. It's the same as the end of the wm_attach() function */
2091 	for (i = 0; i < WM_NRXDESC; i++) {
2092 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2093 			bus_dmamap_destroy(sc->sc_dmat,
2094 			    sc->sc_rxsoft[i].rxs_dmamap);
2095 	}
2096 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2097 		if (sc->sc_txsoft[i].txs_dmamap != NULL)
2098 			bus_dmamap_destroy(sc->sc_dmat,
2099 			    sc->sc_txsoft[i].txs_dmamap);
2100 	}
2101 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2102 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2103 	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2104 	    sc->sc_cd_size);
2105 	bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2106 
2107 	/* Disestablish the interrupt handler */
2108 	if (sc->sc_ih != NULL) {
2109 		pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
2110 		sc->sc_ih = NULL;
2111 	}
2112 
2113 	/* Unmap the registers */
2114 	if (sc->sc_ss) {
2115 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
2116 		sc->sc_ss = 0;
2117 	}
2118 
2119 	if (sc->sc_ios) {
2120 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
2121 		sc->sc_ios = 0;
2122 	}
2123 
2124 	return 0;
2125 }
2126 
2127 /*
2128  * wm_tx_offload:
2129  *
2130  *	Set up TCP/IP checksumming parameters for the
2131  *	specified packet.
2132  */
2133 static int
2134 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
2135     uint8_t *fieldsp)
2136 {
2137 	struct mbuf *m0 = txs->txs_mbuf;
2138 	struct livengood_tcpip_ctxdesc *t;
2139 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
2140 	uint32_t ipcse;
2141 	struct ether_header *eh;
2142 	int offset, iphl;
2143 	uint8_t fields;
2144 
2145 	/*
2146 	 * XXX It would be nice if the mbuf pkthdr had offset
2147 	 * fields for the protocol headers.
2148 	 */
2149 
2150 	eh = mtod(m0, struct ether_header *);
2151 	switch (htons(eh->ether_type)) {
2152 	case ETHERTYPE_IP:
2153 	case ETHERTYPE_IPV6:
2154 		offset = ETHER_HDR_LEN;
2155 		break;
2156 
2157 	case ETHERTYPE_VLAN:
2158 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2159 		break;
2160 
2161 	default:
2162 		/*
2163 		 * Don't support this protocol or encapsulation.
2164 		 */
2165 		*fieldsp = 0;
2166 		*cmdp = 0;
2167 		return 0;
2168 	}
2169 
2170 	if ((m0->m_pkthdr.csum_flags &
2171 	    (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
2172 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
2173 	} else {
2174 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
2175 	}
2176 	ipcse = offset + iphl - 1;
2177 
2178 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
2179 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
2180 	seg = 0;
2181 	fields = 0;
2182 
2183 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
2184 		int hlen = offset + iphl;
2185 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
2186 
2187 		if (__predict_false(m0->m_len <
2188 				    (hlen + sizeof(struct tcphdr)))) {
2189 			/*
2190 			 * TCP/IP headers are not in the first mbuf; we need
2191 			 * to do this the slow and painful way.  Let's just
2192 			 * hope this doesn't happen very often.
2193 			 */
2194 			struct tcphdr th;
2195 
2196 			WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
2197 
2198 			m_copydata(m0, hlen, sizeof(th), &th);
2199 			if (v4) {
2200 				struct ip ip;
2201 
2202 				m_copydata(m0, offset, sizeof(ip), &ip);
2203 				ip.ip_len = 0;
2204 				m_copyback(m0,
2205 				    offset + offsetof(struct ip, ip_len),
2206 				    sizeof(ip.ip_len), &ip.ip_len);
2207 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
2208 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
2209 			} else {
2210 				struct ip6_hdr ip6;
2211 
2212 				m_copydata(m0, offset, sizeof(ip6), &ip6);
2213 				ip6.ip6_plen = 0;
2214 				m_copyback(m0,
2215 				    offset + offsetof(struct ip6_hdr, ip6_plen),
2216 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
2217 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
2218 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
2219 			}
2220 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
2221 			    sizeof(th.th_sum), &th.th_sum);
2222 
2223 			hlen += th.th_off << 2;
2224 		} else {
2225 			/*
2226 			 * TCP/IP headers are in the first mbuf; we can do
2227 			 * this the easy way.
2228 			 */
2229 			struct tcphdr *th;
2230 
2231 			if (v4) {
2232 				struct ip *ip =
2233 				    (void *)(mtod(m0, char *) + offset);
2234 				th = (void *)(mtod(m0, char *) + hlen);
2235 
2236 				ip->ip_len = 0;
2237 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
2238 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
2239 			} else {
2240 				struct ip6_hdr *ip6 =
2241 				    (void *)(mtod(m0, char *) + offset);
2242 				th = (void *)(mtod(m0, char *) + hlen);
2243 
2244 				ip6->ip6_plen = 0;
2245 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
2246 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
2247 			}
2248 			hlen += th->th_off << 2;
2249 		}
2250 
2251 		if (v4) {
2252 			WM_EVCNT_INCR(&sc->sc_ev_txtso);
2253 			cmdlen |= WTX_TCPIP_CMD_IP;
2254 		} else {
2255 			WM_EVCNT_INCR(&sc->sc_ev_txtso6);
2256 			ipcse = 0;
2257 		}
2258 		cmd |= WTX_TCPIP_CMD_TSE;
2259 		cmdlen |= WTX_TCPIP_CMD_TSE |
2260 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
2261 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
2262 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
2263 	}
2264 
2265 	/*
2266 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
2267 	 * offload feature, if we load the context descriptor, we
2268 	 * MUST provide valid values for IPCSS and TUCSS fields.
2269 	 */
2270 
2271 	ipcs = WTX_TCPIP_IPCSS(offset) |
2272 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
2273 	    WTX_TCPIP_IPCSE(ipcse);
2274 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
2275 		WM_EVCNT_INCR(&sc->sc_ev_txipsum);
2276 		fields |= WTX_IXSM;
2277 	}
2278 
2279 	offset += iphl;
2280 
2281 	if (m0->m_pkthdr.csum_flags &
2282 	    (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
2283 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
2284 		fields |= WTX_TXSM;
2285 		tucs = WTX_TCPIP_TUCSS(offset) |
2286 		    WTX_TCPIP_TUCSO(offset +
2287 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
2288 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
2289 	} else if ((m0->m_pkthdr.csum_flags &
2290 	    (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
2291 		WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
2292 		fields |= WTX_TXSM;
2293 		tucs = WTX_TCPIP_TUCSS(offset) |
2294 		    WTX_TCPIP_TUCSO(offset +
2295 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
2296 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
2297 	} else {
2298 		/* Just initialize it to a valid TCP context. */
2299 		tucs = WTX_TCPIP_TUCSS(offset) |
2300 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
2301 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
2302 	}
2303 
2304 	/* Fill in the context descriptor. */
2305 	t = (struct livengood_tcpip_ctxdesc *)
2306 	    &sc->sc_txdescs[sc->sc_txnext];
2307 	t->tcpip_ipcs = htole32(ipcs);
2308 	t->tcpip_tucs = htole32(tucs);
2309 	t->tcpip_cmdlen = htole32(cmdlen);
2310 	t->tcpip_seg = htole32(seg);
2311 	WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
2312 
2313 	sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
2314 	txs->txs_ndesc++;
2315 
2316 	*cmdp = cmd;
2317 	*fieldsp = fields;
2318 
2319 	return 0;
2320 }
2321 
2322 static void
2323 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
2324 {
2325 	struct mbuf *m;
2326 	int i;
2327 
2328 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
2329 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
2330 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
2331 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
2332 		    m->m_data, m->m_len, m->m_flags);
2333 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
2334 	    i, i == 1 ? "" : "s");
2335 }
2336 
2337 /*
2338  * wm_82547_txfifo_stall:
2339  *
2340  *	Callout used to wait for the 82547 Tx FIFO to drain,
2341  *	reset the FIFO pointers, and restart packet transmission.
2342  */
2343 static void
2344 wm_82547_txfifo_stall(void *arg)
2345 {
2346 	struct wm_softc *sc = arg;
2347 	int s;
2348 
2349 	s = splnet();
2350 
2351 	if (sc->sc_txfifo_stall) {
2352 		if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
2353 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
2354 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
2355 			/*
2356 			 * Packets have drained.  Stop transmitter, reset
2357 			 * FIFO pointers, restart transmitter, and kick
2358 			 * the packet queue.
2359 			 */
2360 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
2361 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
2362 			CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
2363 			CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
2364 			CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
2365 			CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
2366 			CSR_WRITE(sc, WMREG_TCTL, tctl);
2367 			CSR_WRITE_FLUSH(sc);
2368 
2369 			sc->sc_txfifo_head = 0;
2370 			sc->sc_txfifo_stall = 0;
2371 			wm_start(&sc->sc_ethercom.ec_if);
2372 		} else {
2373 			/*
2374 			 * Still waiting for packets to drain; try again in
2375 			 * another tick.
2376 			 */
2377 			callout_schedule(&sc->sc_txfifo_ch, 1);
2378 		}
2379 	}
2380 
2381 	splx(s);
2382 }
2383 
2384 static void
2385 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, int on)
2386 {
2387 	uint32_t reg;
2388 
2389 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
2390 
2391 	if (on != 0)
2392 		reg |= EXTCNFCTR_GATE_PHY_CFG;
2393 	else
2394 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
2395 
2396 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
2397 }
2398 
2399 /*
2400  * wm_82547_txfifo_bugchk:
2401  *
2402  *	Check for bug condition in the 82547 Tx FIFO.  We need to
2403  *	prevent enqueueing a packet that would wrap around the end
2404  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
2405  *
2406  *	We do this by checking the amount of space before the end
2407  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
2408  *	the Tx FIFO, wait for all remaining packets to drain, reset
2409  *	the internal FIFO pointers to the beginning, and restart
2410  *	transmission on the interface.
2411  */
2412 #define	WM_FIFO_HDR		0x10
2413 #define	WM_82547_PAD_LEN	0x3e0
2414 static int
2415 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
2416 {
2417 	int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
2418 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
2419 
2420 	/* Just return if already stalled. */
2421 	if (sc->sc_txfifo_stall)
2422 		return 1;
2423 
2424 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
2425 		/* Stall only occurs in half-duplex mode. */
2426 		goto send_packet;
2427 	}
2428 
2429 	if (len >= WM_82547_PAD_LEN + space) {
2430 		sc->sc_txfifo_stall = 1;
2431 		callout_schedule(&sc->sc_txfifo_ch, 1);
2432 		return 1;
2433 	}
2434 
2435  send_packet:
2436 	sc->sc_txfifo_head += len;
2437 	if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
2438 		sc->sc_txfifo_head -= sc->sc_txfifo_size;
2439 
2440 	return 0;
2441 }
2442 
2443 /*
2444  * wm_start:		[ifnet interface function]
2445  *
2446  *	Start packet transmission on the interface.
2447  */
2448 static void
2449 wm_start(struct ifnet *ifp)
2450 {
2451 	struct wm_softc *sc = ifp->if_softc;
2452 	struct mbuf *m0;
2453 	struct m_tag *mtag;
2454 	struct wm_txsoft *txs;
2455 	bus_dmamap_t dmamap;
2456 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
2457 	bus_addr_t curaddr;
2458 	bus_size_t seglen, curlen;
2459 	uint32_t cksumcmd;
2460 	uint8_t cksumfields;
2461 
2462 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
2463 		return;
2464 
2465 	/*
2466 	 * Remember the previous number of free descriptors.
2467 	 */
2468 	ofree = sc->sc_txfree;
2469 
2470 	/*
2471 	 * Loop through the send queue, setting up transmit descriptors
2472 	 * until we drain the queue, or use up all available transmit
2473 	 * descriptors.
2474 	 */
2475 	for (;;) {
2476 		/* Grab a packet off the queue. */
2477 		IFQ_POLL(&ifp->if_snd, m0);
2478 		if (m0 == NULL)
2479 			break;
2480 
2481 		DPRINTF(WM_DEBUG_TX,
2482 		    ("%s: TX: have packet to transmit: %p\n",
2483 		    device_xname(sc->sc_dev), m0));
2484 
2485 		/* Get a work queue entry. */
2486 		if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
2487 			wm_txintr(sc);
2488 			if (sc->sc_txsfree == 0) {
2489 				DPRINTF(WM_DEBUG_TX,
2490 				    ("%s: TX: no free job descriptors\n",
2491 					device_xname(sc->sc_dev)));
2492 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
2493 				break;
2494 			}
2495 		}
2496 
2497 		txs = &sc->sc_txsoft[sc->sc_txsnext];
2498 		dmamap = txs->txs_dmamap;
2499 
2500 		use_tso = (m0->m_pkthdr.csum_flags &
2501 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
2502 
2503 		/*
2504 		 * So says the Linux driver:
2505 		 * The controller does a simple calculation to make sure
2506 		 * there is enough room in the FIFO before initiating the
2507 		 * DMA for each buffer.  The calc is:
2508 		 *	4 = ceil(buffer len / MSS)
2509 		 * To make sure we don't overrun the FIFO, adjust the max
2510 		 * buffer len if the MSS drops.
2511 		 */
2512 		dmamap->dm_maxsegsz =
2513 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
2514 		    ? m0->m_pkthdr.segsz << 2
2515 		    : WTX_MAX_LEN;
2516 
2517 		/*
2518 		 * Load the DMA map.  If this fails, the packet either
2519 		 * didn't fit in the allotted number of segments, or we
2520 		 * were short on resources.  For the too-many-segments
2521 		 * case, we simply report an error and drop the packet,
2522 		 * since we can't sanely copy a jumbo packet to a single
2523 		 * buffer.
2524 		 */
2525 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
2526 		    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
2527 		if (error) {
2528 			if (error == EFBIG) {
2529 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
2530 				log(LOG_ERR, "%s: Tx packet consumes too many "
2531 				    "DMA segments, dropping...\n",
2532 				    device_xname(sc->sc_dev));
2533 				IFQ_DEQUEUE(&ifp->if_snd, m0);
2534 				wm_dump_mbuf_chain(sc, m0);
2535 				m_freem(m0);
2536 				continue;
2537 			}
2538 			/*
2539 			 * Short on resources, just stop for now.
2540 			 */
2541 			DPRINTF(WM_DEBUG_TX,
2542 			    ("%s: TX: dmamap load failed: %d\n",
2543 			    device_xname(sc->sc_dev), error));
2544 			break;
2545 		}
2546 
2547 		segs_needed = dmamap->dm_nsegs;
2548 		if (use_tso) {
2549 			/* For sentinel descriptor; see below. */
2550 			segs_needed++;
2551 		}
2552 
2553 		/*
2554 		 * Ensure we have enough descriptors free to describe
2555 		 * the packet.  Note, we always reserve one descriptor
2556 		 * at the end of the ring due to the semantics of the
2557 		 * TDT register, plus one more in the event we need
2558 		 * to load offload context.
2559 		 */
2560 		if (segs_needed > sc->sc_txfree - 2) {
2561 			/*
2562 			 * Not enough free descriptors to transmit this
2563 			 * packet.  We haven't committed anything yet,
2564 			 * so just unload the DMA map, put the packet
2565 			 * pack on the queue, and punt.  Notify the upper
2566 			 * layer that there are no more slots left.
2567 			 */
2568 			DPRINTF(WM_DEBUG_TX,
2569 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
2570 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
2571 			    segs_needed, sc->sc_txfree - 1));
2572 			ifp->if_flags |= IFF_OACTIVE;
2573 			bus_dmamap_unload(sc->sc_dmat, dmamap);
2574 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
2575 			break;
2576 		}
2577 
2578 		/*
2579 		 * Check for 82547 Tx FIFO bug.  We need to do this
2580 		 * once we know we can transmit the packet, since we
2581 		 * do some internal FIFO space accounting here.
2582 		 */
2583 		if (sc->sc_type == WM_T_82547 &&
2584 		    wm_82547_txfifo_bugchk(sc, m0)) {
2585 			DPRINTF(WM_DEBUG_TX,
2586 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
2587 			    device_xname(sc->sc_dev)));
2588 			ifp->if_flags |= IFF_OACTIVE;
2589 			bus_dmamap_unload(sc->sc_dmat, dmamap);
2590 			WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
2591 			break;
2592 		}
2593 
2594 		IFQ_DEQUEUE(&ifp->if_snd, m0);
2595 
2596 		/*
2597 		 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
2598 		 */
2599 
2600 		DPRINTF(WM_DEBUG_TX,
2601 		    ("%s: TX: packet has %d (%d) DMA segments\n",
2602 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
2603 
2604 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
2605 
2606 		/*
2607 		 * Store a pointer to the packet so that we can free it
2608 		 * later.
2609 		 *
2610 		 * Initially, we consider the number of descriptors the
2611 		 * packet uses the number of DMA segments.  This may be
2612 		 * incremented by 1 if we do checksum offload (a descriptor
2613 		 * is used to set the checksum context).
2614 		 */
2615 		txs->txs_mbuf = m0;
2616 		txs->txs_firstdesc = sc->sc_txnext;
2617 		txs->txs_ndesc = segs_needed;
2618 
2619 		/* Set up offload parameters for this packet. */
2620 		if (m0->m_pkthdr.csum_flags &
2621 		    (M_CSUM_TSOv4|M_CSUM_TSOv6|
2622 		    M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
2623 		    M_CSUM_TCPv6|M_CSUM_UDPv6)) {
2624 			if (wm_tx_offload(sc, txs, &cksumcmd,
2625 					  &cksumfields) != 0) {
2626 				/* Error message already displayed. */
2627 				bus_dmamap_unload(sc->sc_dmat, dmamap);
2628 				continue;
2629 			}
2630 		} else {
2631 			cksumcmd = 0;
2632 			cksumfields = 0;
2633 		}
2634 
2635 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
2636 
2637 		/* Sync the DMA map. */
2638 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
2639 		    BUS_DMASYNC_PREWRITE);
2640 
2641 		/*
2642 		 * Initialize the transmit descriptor.
2643 		 */
2644 		for (nexttx = sc->sc_txnext, seg = 0;
2645 		     seg < dmamap->dm_nsegs; seg++) {
2646 			for (seglen = dmamap->dm_segs[seg].ds_len,
2647 			     curaddr = dmamap->dm_segs[seg].ds_addr;
2648 			     seglen != 0;
2649 			     curaddr += curlen, seglen -= curlen,
2650 			     nexttx = WM_NEXTTX(sc, nexttx)) {
2651 				curlen = seglen;
2652 
2653 				/*
2654 				 * So says the Linux driver:
2655 				 * Work around for premature descriptor
2656 				 * write-backs in TSO mode.  Append a
2657 				 * 4-byte sentinel descriptor.
2658 				 */
2659 				if (use_tso &&
2660 				    seg == dmamap->dm_nsegs - 1 &&
2661 				    curlen > 8)
2662 					curlen -= 4;
2663 
2664 				wm_set_dma_addr(
2665 				    &sc->sc_txdescs[nexttx].wtx_addr,
2666 				    curaddr);
2667 				sc->sc_txdescs[nexttx].wtx_cmdlen =
2668 				    htole32(cksumcmd | curlen);
2669 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
2670 				    0;
2671 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
2672 				    cksumfields;
2673 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
2674 				lasttx = nexttx;
2675 
2676 				DPRINTF(WM_DEBUG_TX,
2677 				    ("%s: TX: desc %d: low %#" PRIxPADDR ", "
2678 				     "len %#04zx\n",
2679 				    device_xname(sc->sc_dev), nexttx,
2680 				    curaddr & 0xffffffffUL, curlen));
2681 			}
2682 		}
2683 
2684 		KASSERT(lasttx != -1);
2685 
2686 		/*
2687 		 * Set up the command byte on the last descriptor of
2688 		 * the packet.  If we're in the interrupt delay window,
2689 		 * delay the interrupt.
2690 		 */
2691 		sc->sc_txdescs[lasttx].wtx_cmdlen |=
2692 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
2693 
2694 		/*
2695 		 * If VLANs are enabled and the packet has a VLAN tag, set
2696 		 * up the descriptor to encapsulate the packet for us.
2697 		 *
2698 		 * This is only valid on the last descriptor of the packet.
2699 		 */
2700 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
2701 			sc->sc_txdescs[lasttx].wtx_cmdlen |=
2702 			    htole32(WTX_CMD_VLE);
2703 			sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
2704 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
2705 		}
2706 
2707 		txs->txs_lastdesc = lasttx;
2708 
2709 		DPRINTF(WM_DEBUG_TX,
2710 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
2711 		    device_xname(sc->sc_dev),
2712 		    lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
2713 
2714 		/* Sync the descriptors we're using. */
2715 		WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
2716 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2717 
2718 		/* Give the packet to the chip. */
2719 		CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
2720 
2721 		DPRINTF(WM_DEBUG_TX,
2722 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
2723 
2724 		DPRINTF(WM_DEBUG_TX,
2725 		    ("%s: TX: finished transmitting packet, job %d\n",
2726 		    device_xname(sc->sc_dev), sc->sc_txsnext));
2727 
2728 		/* Advance the tx pointer. */
2729 		sc->sc_txfree -= txs->txs_ndesc;
2730 		sc->sc_txnext = nexttx;
2731 
2732 		sc->sc_txsfree--;
2733 		sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
2734 
2735 		/* Pass the packet to any BPF listeners. */
2736 		bpf_mtap(ifp, m0);
2737 	}
2738 
2739 	if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
2740 		/* No more slots; notify upper layer. */
2741 		ifp->if_flags |= IFF_OACTIVE;
2742 	}
2743 
2744 	if (sc->sc_txfree != ofree) {
2745 		/* Set a watchdog timer in case the chip flakes out. */
2746 		ifp->if_timer = 5;
2747 	}
2748 }
2749 
2750 /*
2751  * wm_watchdog:		[ifnet interface function]
2752  *
2753  *	Watchdog timer handler.
2754  */
2755 static void
2756 wm_watchdog(struct ifnet *ifp)
2757 {
2758 	struct wm_softc *sc = ifp->if_softc;
2759 
2760 	/*
2761 	 * Since we're using delayed interrupts, sweep up
2762 	 * before we report an error.
2763 	 */
2764 	wm_txintr(sc);
2765 
2766 	if (sc->sc_txfree != WM_NTXDESC(sc)) {
2767 		log(LOG_ERR,
2768 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2769 		    device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
2770 		    sc->sc_txnext);
2771 		ifp->if_oerrors++;
2772 
2773 		/* Reset the interface. */
2774 		(void) wm_init(ifp);
2775 	}
2776 
2777 	/* Try to get more packets going. */
2778 	wm_start(ifp);
2779 }
2780 
2781 static int
2782 wm_ifflags_cb(struct ethercom *ec)
2783 {
2784 	struct ifnet *ifp = &ec->ec_if;
2785 	struct wm_softc *sc = ifp->if_softc;
2786 	int change = ifp->if_flags ^ sc->sc_if_flags;
2787 
2788 	if (change != 0)
2789 		sc->sc_if_flags = ifp->if_flags;
2790 
2791 	if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
2792 		return ENETRESET;
2793 
2794 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
2795 		wm_set_filter(sc);
2796 
2797 	wm_set_vlan(sc);
2798 
2799 	return 0;
2800 }
2801 
2802 /*
2803  * wm_ioctl:		[ifnet interface function]
2804  *
2805  *	Handle control requests from the operator.
2806  */
2807 static int
2808 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2809 {
2810 	struct wm_softc *sc = ifp->if_softc;
2811 	struct ifreq *ifr = (struct ifreq *) data;
2812 	struct ifaddr *ifa = (struct ifaddr *)data;
2813 	struct sockaddr_dl *sdl;
2814 	int s, error;
2815 
2816 	s = splnet();
2817 
2818 	switch (cmd) {
2819 	case SIOCSIFMEDIA:
2820 	case SIOCGIFMEDIA:
2821 		/* Flow control requires full-duplex mode. */
2822 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
2823 		    (ifr->ifr_media & IFM_FDX) == 0)
2824 			ifr->ifr_media &= ~IFM_ETH_FMASK;
2825 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
2826 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
2827 				/* We can do both TXPAUSE and RXPAUSE. */
2828 				ifr->ifr_media |=
2829 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
2830 			}
2831 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
2832 		}
2833 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
2834 		break;
2835 	case SIOCINITIFADDR:
2836 		if (ifa->ifa_addr->sa_family == AF_LINK) {
2837 			sdl = satosdl(ifp->if_dl->ifa_addr);
2838 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
2839 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
2840 			/* unicast address is first multicast entry */
2841 			wm_set_filter(sc);
2842 			error = 0;
2843 			break;
2844 		}
2845 		/*FALLTHROUGH*/
2846 	default:
2847 		if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
2848 			break;
2849 
2850 		error = 0;
2851 
2852 		if (cmd == SIOCSIFCAP)
2853 			error = (*ifp->if_init)(ifp);
2854 		else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
2855 			;
2856 		else if (ifp->if_flags & IFF_RUNNING) {
2857 			/*
2858 			 * Multicast list has changed; set the hardware filter
2859 			 * accordingly.
2860 			 */
2861 			wm_set_filter(sc);
2862 		}
2863 		break;
2864 	}
2865 
2866 	/* Try to get more packets going. */
2867 	wm_start(ifp);
2868 
2869 	splx(s);
2870 	return error;
2871 }
2872 
2873 /*
2874  * wm_intr:
2875  *
2876  *	Interrupt service routine.
2877  */
2878 static int
2879 wm_intr(void *arg)
2880 {
2881 	struct wm_softc *sc = arg;
2882 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2883 	uint32_t icr;
2884 	int handled = 0;
2885 
2886 	while (1 /* CONSTCOND */) {
2887 		icr = CSR_READ(sc, WMREG_ICR);
2888 		if ((icr & sc->sc_icr) == 0)
2889 			break;
2890 #if 0 /*NRND > 0*/
2891 		if (RND_ENABLED(&sc->rnd_source))
2892 			rnd_add_uint32(&sc->rnd_source, icr);
2893 #endif
2894 
2895 		handled = 1;
2896 
2897 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
2898 		if (icr & (ICR_RXDMT0|ICR_RXT0)) {
2899 			DPRINTF(WM_DEBUG_RX,
2900 			    ("%s: RX: got Rx intr 0x%08x\n",
2901 			    device_xname(sc->sc_dev),
2902 			    icr & (ICR_RXDMT0|ICR_RXT0)));
2903 			WM_EVCNT_INCR(&sc->sc_ev_rxintr);
2904 		}
2905 #endif
2906 		wm_rxintr(sc);
2907 
2908 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
2909 		if (icr & ICR_TXDW) {
2910 			DPRINTF(WM_DEBUG_TX,
2911 			    ("%s: TX: got TXDW interrupt\n",
2912 			    device_xname(sc->sc_dev)));
2913 			WM_EVCNT_INCR(&sc->sc_ev_txdw);
2914 		}
2915 #endif
2916 		wm_txintr(sc);
2917 
2918 		if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
2919 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
2920 			wm_linkintr(sc, icr);
2921 		}
2922 
2923 		if (icr & ICR_RXO) {
2924 #if defined(WM_DEBUG)
2925 			log(LOG_WARNING, "%s: Receive overrun\n",
2926 			    device_xname(sc->sc_dev));
2927 #endif /* defined(WM_DEBUG) */
2928 		}
2929 	}
2930 
2931 	if (handled) {
2932 		/* Try to get more packets going. */
2933 		wm_start(ifp);
2934 	}
2935 
2936 	return handled;
2937 }
2938 
2939 /*
2940  * wm_txintr:
2941  *
2942  *	Helper; handle transmit interrupts.
2943  */
2944 static void
2945 wm_txintr(struct wm_softc *sc)
2946 {
2947 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2948 	struct wm_txsoft *txs;
2949 	uint8_t status;
2950 	int i;
2951 
2952 	ifp->if_flags &= ~IFF_OACTIVE;
2953 
2954 	/*
2955 	 * Go through the Tx list and free mbufs for those
2956 	 * frames which have been transmitted.
2957 	 */
2958 	for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
2959 	     i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
2960 		txs = &sc->sc_txsoft[i];
2961 
2962 		DPRINTF(WM_DEBUG_TX,
2963 		    ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
2964 
2965 		WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
2966 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2967 
2968 		status =
2969 		    sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
2970 		if ((status & WTX_ST_DD) == 0) {
2971 			WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
2972 			    BUS_DMASYNC_PREREAD);
2973 			break;
2974 		}
2975 
2976 		DPRINTF(WM_DEBUG_TX,
2977 		    ("%s: TX: job %d done: descs %d..%d\n",
2978 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
2979 		    txs->txs_lastdesc));
2980 
2981 		/*
2982 		 * XXX We should probably be using the statistics
2983 		 * XXX registers, but I don't know if they exist
2984 		 * XXX on chips before the i82544.
2985 		 */
2986 
2987 #ifdef WM_EVENT_COUNTERS
2988 		if (status & WTX_ST_TU)
2989 			WM_EVCNT_INCR(&sc->sc_ev_tu);
2990 #endif /* WM_EVENT_COUNTERS */
2991 
2992 		if (status & (WTX_ST_EC|WTX_ST_LC)) {
2993 			ifp->if_oerrors++;
2994 			if (status & WTX_ST_LC)
2995 				log(LOG_WARNING, "%s: late collision\n",
2996 				    device_xname(sc->sc_dev));
2997 			else if (status & WTX_ST_EC) {
2998 				ifp->if_collisions += 16;
2999 				log(LOG_WARNING, "%s: excessive collisions\n",
3000 				    device_xname(sc->sc_dev));
3001 			}
3002 		} else
3003 			ifp->if_opackets++;
3004 
3005 		sc->sc_txfree += txs->txs_ndesc;
3006 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
3007 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
3008 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
3009 		m_freem(txs->txs_mbuf);
3010 		txs->txs_mbuf = NULL;
3011 	}
3012 
3013 	/* Update the dirty transmit buffer pointer. */
3014 	sc->sc_txsdirty = i;
3015 	DPRINTF(WM_DEBUG_TX,
3016 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
3017 
3018 	/*
3019 	 * If there are no more pending transmissions, cancel the watchdog
3020 	 * timer.
3021 	 */
3022 	if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
3023 		ifp->if_timer = 0;
3024 }
3025 
3026 /*
3027  * wm_rxintr:
3028  *
3029  *	Helper; handle receive interrupts.
3030  */
3031 static void
3032 wm_rxintr(struct wm_softc *sc)
3033 {
3034 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3035 	struct wm_rxsoft *rxs;
3036 	struct mbuf *m;
3037 	int i, len;
3038 	uint8_t status, errors;
3039 	uint16_t vlantag;
3040 
3041 	for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
3042 		rxs = &sc->sc_rxsoft[i];
3043 
3044 		DPRINTF(WM_DEBUG_RX,
3045 		    ("%s: RX: checking descriptor %d\n",
3046 		    device_xname(sc->sc_dev), i));
3047 
3048 		WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3049 
3050 		status = sc->sc_rxdescs[i].wrx_status;
3051 		errors = sc->sc_rxdescs[i].wrx_errors;
3052 		len = le16toh(sc->sc_rxdescs[i].wrx_len);
3053 		vlantag = sc->sc_rxdescs[i].wrx_special;
3054 
3055 		if ((status & WRX_ST_DD) == 0) {
3056 			/*
3057 			 * We have processed all of the receive descriptors.
3058 			 */
3059 			WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
3060 			break;
3061 		}
3062 
3063 		if (__predict_false(sc->sc_rxdiscard)) {
3064 			DPRINTF(WM_DEBUG_RX,
3065 			    ("%s: RX: discarding contents of descriptor %d\n",
3066 			    device_xname(sc->sc_dev), i));
3067 			WM_INIT_RXDESC(sc, i);
3068 			if (status & WRX_ST_EOP) {
3069 				/* Reset our state. */
3070 				DPRINTF(WM_DEBUG_RX,
3071 				    ("%s: RX: resetting rxdiscard -> 0\n",
3072 				    device_xname(sc->sc_dev)));
3073 				sc->sc_rxdiscard = 0;
3074 			}
3075 			continue;
3076 		}
3077 
3078 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3079 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
3080 
3081 		m = rxs->rxs_mbuf;
3082 
3083 		/*
3084 		 * Add a new receive buffer to the ring, unless of
3085 		 * course the length is zero. Treat the latter as a
3086 		 * failed mapping.
3087 		 */
3088 		if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
3089 			/*
3090 			 * Failed, throw away what we've done so
3091 			 * far, and discard the rest of the packet.
3092 			 */
3093 			ifp->if_ierrors++;
3094 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3095 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
3096 			WM_INIT_RXDESC(sc, i);
3097 			if ((status & WRX_ST_EOP) == 0)
3098 				sc->sc_rxdiscard = 1;
3099 			if (sc->sc_rxhead != NULL)
3100 				m_freem(sc->sc_rxhead);
3101 			WM_RXCHAIN_RESET(sc);
3102 			DPRINTF(WM_DEBUG_RX,
3103 			    ("%s: RX: Rx buffer allocation failed, "
3104 			    "dropping packet%s\n", device_xname(sc->sc_dev),
3105 			    sc->sc_rxdiscard ? " (discard)" : ""));
3106 			continue;
3107 		}
3108 
3109 		m->m_len = len;
3110 		sc->sc_rxlen += len;
3111 		DPRINTF(WM_DEBUG_RX,
3112 		    ("%s: RX: buffer at %p len %d\n",
3113 		    device_xname(sc->sc_dev), m->m_data, len));
3114 
3115 		/*
3116 		 * If this is not the end of the packet, keep
3117 		 * looking.
3118 		 */
3119 		if ((status & WRX_ST_EOP) == 0) {
3120 			WM_RXCHAIN_LINK(sc, m);
3121 			DPRINTF(WM_DEBUG_RX,
3122 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
3123 			    device_xname(sc->sc_dev), sc->sc_rxlen));
3124 			continue;
3125 		}
3126 
3127 		/*
3128 		 * Okay, we have the entire packet now.  The chip is
3129 		 * configured to include the FCS (not all chips can
3130 		 * be configured to strip it), so we need to trim it.
3131 		 * May need to adjust length of previous mbuf in the
3132 		 * chain if the current mbuf is too short.
3133 		 */
3134 		if (m->m_len < ETHER_CRC_LEN) {
3135 			sc->sc_rxtail->m_len -= (ETHER_CRC_LEN - m->m_len);
3136 			m->m_len = 0;
3137 		} else {
3138 			m->m_len -= ETHER_CRC_LEN;
3139 		}
3140 		len = sc->sc_rxlen - ETHER_CRC_LEN;
3141 
3142 		WM_RXCHAIN_LINK(sc, m);
3143 
3144 		*sc->sc_rxtailp = NULL;
3145 		m = sc->sc_rxhead;
3146 
3147 		WM_RXCHAIN_RESET(sc);
3148 
3149 		DPRINTF(WM_DEBUG_RX,
3150 		    ("%s: RX: have entire packet, len -> %d\n",
3151 		    device_xname(sc->sc_dev), len));
3152 
3153 		/*
3154 		 * If an error occurred, update stats and drop the packet.
3155 		 */
3156 		if (errors &
3157 		     (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
3158 			if (errors & WRX_ER_SE)
3159 				log(LOG_WARNING, "%s: symbol error\n",
3160 				    device_xname(sc->sc_dev));
3161 			else if (errors & WRX_ER_SEQ)
3162 				log(LOG_WARNING, "%s: receive sequence error\n",
3163 				    device_xname(sc->sc_dev));
3164 			else if (errors & WRX_ER_CE)
3165 				log(LOG_WARNING, "%s: CRC error\n",
3166 				    device_xname(sc->sc_dev));
3167 			m_freem(m);
3168 			continue;
3169 		}
3170 
3171 		/*
3172 		 * No errors.  Receive the packet.
3173 		 */
3174 		m->m_pkthdr.rcvif = ifp;
3175 		m->m_pkthdr.len = len;
3176 
3177 		/*
3178 		 * If VLANs are enabled, VLAN packets have been unwrapped
3179 		 * for us.  Associate the tag with the packet.
3180 		 */
3181 		if ((status & WRX_ST_VP) != 0) {
3182 			VLAN_INPUT_TAG(ifp, m,
3183 			    le16toh(vlantag),
3184 			    continue);
3185 		}
3186 
3187 		/*
3188 		 * Set up checksum info for this packet.
3189 		 */
3190 		if ((status & WRX_ST_IXSM) == 0) {
3191 			if (status & WRX_ST_IPCS) {
3192 				WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
3193 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
3194 				if (errors & WRX_ER_IPE)
3195 					m->m_pkthdr.csum_flags |=
3196 					    M_CSUM_IPv4_BAD;
3197 			}
3198 			if (status & WRX_ST_TCPCS) {
3199 				/*
3200 				 * Note: we don't know if this was TCP or UDP,
3201 				 * so we just set both bits, and expect the
3202 				 * upper layers to deal.
3203 				 */
3204 				WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
3205 				m->m_pkthdr.csum_flags |=
3206 				    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
3207 				    M_CSUM_TCPv6 | M_CSUM_UDPv6;
3208 				if (errors & WRX_ER_TCPE)
3209 					m->m_pkthdr.csum_flags |=
3210 					    M_CSUM_TCP_UDP_BAD;
3211 			}
3212 		}
3213 
3214 		ifp->if_ipackets++;
3215 
3216 		/* Pass this up to any BPF listeners. */
3217 		bpf_mtap(ifp, m);
3218 
3219 		/* Pass it on. */
3220 		(*ifp->if_input)(ifp, m);
3221 	}
3222 
3223 	/* Update the receive pointer. */
3224 	sc->sc_rxptr = i;
3225 
3226 	DPRINTF(WM_DEBUG_RX,
3227 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
3228 }
3229 
3230 /*
3231  * wm_linkintr_gmii:
3232  *
3233  *	Helper; handle link interrupts for GMII.
3234  */
3235 static void
3236 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
3237 {
3238 
3239 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
3240 		__func__));
3241 
3242 	if (icr & ICR_LSC) {
3243 		DPRINTF(WM_DEBUG_LINK,
3244 		    ("%s: LINK: LSC -> mii_tick\n",
3245 			device_xname(sc->sc_dev)));
3246 		mii_tick(&sc->sc_mii);
3247 		if (sc->sc_type == WM_T_82543) {
3248 			int miistatus, active;
3249 
3250 			/*
3251 			 * With 82543, we need to force speed and
3252 			 * duplex on the MAC equal to what the PHY
3253 			 * speed and duplex configuration is.
3254 			 */
3255 			miistatus = sc->sc_mii.mii_media_status;
3256 
3257 			if (miistatus & IFM_ACTIVE) {
3258 				active = sc->sc_mii.mii_media_active;
3259 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
3260 				switch (IFM_SUBTYPE(active)) {
3261 				case IFM_10_T:
3262 					sc->sc_ctrl |= CTRL_SPEED_10;
3263 					break;
3264 				case IFM_100_TX:
3265 					sc->sc_ctrl |= CTRL_SPEED_100;
3266 					break;
3267 				case IFM_1000_T:
3268 					sc->sc_ctrl |= CTRL_SPEED_1000;
3269 					break;
3270 				default:
3271 					/*
3272 					 * fiber?
3273 					 * Shoud not enter here.
3274 					 */
3275 					printf("unknown media (%x)\n",
3276 					    active);
3277 					break;
3278 				}
3279 				if (active & IFM_FDX)
3280 					sc->sc_ctrl |= CTRL_FD;
3281 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3282 			}
3283 		} else if ((sc->sc_type == WM_T_ICH8)
3284 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
3285 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
3286 		} else if (sc->sc_type == WM_T_PCH) {
3287 			wm_k1_gig_workaround_hv(sc,
3288 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
3289 		}
3290 
3291 		if ((sc->sc_phytype == WMPHY_82578)
3292 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
3293 			== IFM_1000_T)) {
3294 
3295 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
3296 				delay(200*1000); /* XXX too big */
3297 
3298 				/* Link stall fix for link up */
3299 				wm_gmii_hv_writereg(sc->sc_dev, 1,
3300 				    HV_MUX_DATA_CTRL,
3301 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
3302 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
3303 				wm_gmii_hv_writereg(sc->sc_dev, 1,
3304 				    HV_MUX_DATA_CTRL,
3305 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
3306 			}
3307 		}
3308 	} else if (icr & ICR_RXSEQ) {
3309 		DPRINTF(WM_DEBUG_LINK,
3310 		    ("%s: LINK Receive sequence error\n",
3311 			device_xname(sc->sc_dev)));
3312 	}
3313 }
3314 
3315 /*
3316  * wm_linkintr_tbi:
3317  *
3318  *	Helper; handle link interrupts for TBI mode.
3319  */
3320 static void
3321 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
3322 {
3323 	uint32_t status;
3324 
3325 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
3326 		__func__));
3327 
3328 	status = CSR_READ(sc, WMREG_STATUS);
3329 	if (icr & ICR_LSC) {
3330 		if (status & STATUS_LU) {
3331 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
3332 			    device_xname(sc->sc_dev),
3333 			    (status & STATUS_FD) ? "FDX" : "HDX"));
3334 			/*
3335 			 * NOTE: CTRL will update TFCE and RFCE automatically,
3336 			 * so we should update sc->sc_ctrl
3337 			 */
3338 
3339 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3340 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
3341 			sc->sc_fcrtl &= ~FCRTL_XONE;
3342 			if (status & STATUS_FD)
3343 				sc->sc_tctl |=
3344 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3345 			else
3346 				sc->sc_tctl |=
3347 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
3348 			if (sc->sc_ctrl & CTRL_TFCE)
3349 				sc->sc_fcrtl |= FCRTL_XONE;
3350 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3351 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
3352 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
3353 				      sc->sc_fcrtl);
3354 			sc->sc_tbi_linkup = 1;
3355 		} else {
3356 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
3357 			    device_xname(sc->sc_dev)));
3358 			sc->sc_tbi_linkup = 0;
3359 		}
3360 		wm_tbi_set_linkled(sc);
3361 	} else if (icr & ICR_RXCFG) {
3362 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
3363 		    device_xname(sc->sc_dev)));
3364 		sc->sc_tbi_nrxcfg++;
3365 		wm_check_for_link(sc);
3366 	} else if (icr & ICR_RXSEQ) {
3367 		DPRINTF(WM_DEBUG_LINK,
3368 		    ("%s: LINK: Receive sequence error\n",
3369 		    device_xname(sc->sc_dev)));
3370 	}
3371 }
3372 
3373 /*
3374  * wm_linkintr:
3375  *
3376  *	Helper; handle link interrupts.
3377  */
3378 static void
3379 wm_linkintr(struct wm_softc *sc, uint32_t icr)
3380 {
3381 
3382 	if (sc->sc_flags & WM_F_HAS_MII)
3383 		wm_linkintr_gmii(sc, icr);
3384 	else
3385 		wm_linkintr_tbi(sc, icr);
3386 }
3387 
3388 /*
3389  * wm_tick:
3390  *
3391  *	One second timer, used to check link status, sweep up
3392  *	completed transmit jobs, etc.
3393  */
3394 static void
3395 wm_tick(void *arg)
3396 {
3397 	struct wm_softc *sc = arg;
3398 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3399 	int s;
3400 
3401 	s = splnet();
3402 
3403 	if (sc->sc_type >= WM_T_82542_2_1) {
3404 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
3405 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
3406 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
3407 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
3408 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
3409 	}
3410 
3411 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3412 	ifp->if_ierrors += 0ULL + /* ensure quad_t */
3413 	    + CSR_READ(sc, WMREG_CRCERRS)
3414 	    + CSR_READ(sc, WMREG_ALGNERRC)
3415 	    + CSR_READ(sc, WMREG_SYMERRC)
3416 	    + CSR_READ(sc, WMREG_RXERRC)
3417 	    + CSR_READ(sc, WMREG_SEC)
3418 	    + CSR_READ(sc, WMREG_CEXTERR)
3419 	    + CSR_READ(sc, WMREG_RLEC);
3420 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
3421 
3422 	if (sc->sc_flags & WM_F_HAS_MII)
3423 		mii_tick(&sc->sc_mii);
3424 	else
3425 		wm_tbi_check_link(sc);
3426 
3427 	splx(s);
3428 
3429 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
3430 }
3431 
3432 /*
3433  * wm_reset:
3434  *
3435  *	Reset the i82542 chip.
3436  */
3437 static void
3438 wm_reset(struct wm_softc *sc)
3439 {
3440 	int phy_reset = 0;
3441 	uint32_t reg, mask;
3442 	int i;
3443 
3444 	/*
3445 	 * Allocate on-chip memory according to the MTU size.
3446 	 * The Packet Buffer Allocation register must be written
3447 	 * before the chip is reset.
3448 	 */
3449 	switch (sc->sc_type) {
3450 	case WM_T_82547:
3451 	case WM_T_82547_2:
3452 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3453 		    PBA_22K : PBA_30K;
3454 		sc->sc_txfifo_head = 0;
3455 		sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
3456 		sc->sc_txfifo_size =
3457 		    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
3458 		sc->sc_txfifo_stall = 0;
3459 		break;
3460 	case WM_T_82571:
3461 	case WM_T_82572:
3462 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
3463 	case WM_T_80003:
3464 		sc->sc_pba = PBA_32K;
3465 		break;
3466 	case WM_T_82580:
3467 	case WM_T_82580ER:
3468 		sc->sc_pba = PBA_35K;
3469 		break;
3470 	case WM_T_82576:
3471 		sc->sc_pba = PBA_64K;
3472 		break;
3473 	case WM_T_82573:
3474 		sc->sc_pba = PBA_12K;
3475 		break;
3476 	case WM_T_82574:
3477 	case WM_T_82583:
3478 		sc->sc_pba = PBA_20K;
3479 		break;
3480 	case WM_T_ICH8:
3481 		sc->sc_pba = PBA_8K;
3482 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
3483 		break;
3484 	case WM_T_ICH9:
3485 	case WM_T_ICH10:
3486 		sc->sc_pba = PBA_10K;
3487 		break;
3488 	case WM_T_PCH:
3489 	case WM_T_PCH2:
3490 		sc->sc_pba = PBA_26K;
3491 		break;
3492 	default:
3493 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3494 		    PBA_40K : PBA_48K;
3495 		break;
3496 	}
3497 	CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
3498 
3499 	/* Prevent the PCI-E bus from sticking */
3500 	if (sc->sc_flags & WM_F_PCIE) {
3501 		int timeout = 800;
3502 
3503 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
3504 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3505 
3506 		while (timeout--) {
3507 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA) == 0)
3508 				break;
3509 			delay(100);
3510 		}
3511 	}
3512 
3513 	/* Set the completion timeout for interface */
3514 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
3515 		wm_set_pcie_completion_timeout(sc);
3516 
3517 	/* Clear interrupt */
3518 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3519 
3520 	/* Stop the transmit and receive processes. */
3521 	CSR_WRITE(sc, WMREG_RCTL, 0);
3522 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
3523 	sc->sc_rctl &= ~RCTL_EN;
3524 
3525 	/* XXX set_tbi_sbp_82543() */
3526 
3527 	delay(10*1000);
3528 
3529 	/* Must acquire the MDIO ownership before MAC reset */
3530 	switch (sc->sc_type) {
3531 	case WM_T_82573:
3532 	case WM_T_82574:
3533 	case WM_T_82583:
3534 		i = 0;
3535 		reg = CSR_READ(sc, WMREG_EXTCNFCTR)
3536 		    | EXTCNFCTR_MDIO_SW_OWNERSHIP;
3537 		do {
3538 			CSR_WRITE(sc, WMREG_EXTCNFCTR,
3539 			    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
3540 			reg = CSR_READ(sc, WMREG_EXTCNFCTR);
3541 			if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
3542 				break;
3543 			reg |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
3544 			delay(2*1000);
3545 			i++;
3546 		} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
3547 		break;
3548 	default:
3549 		break;
3550 	}
3551 
3552 	/*
3553 	 * 82541 Errata 29? & 82547 Errata 28?
3554 	 * See also the description about PHY_RST bit in CTRL register
3555 	 * in 8254x_GBe_SDM.pdf.
3556 	 */
3557 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
3558 		CSR_WRITE(sc, WMREG_CTRL,
3559 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
3560 		delay(5000);
3561 	}
3562 
3563 	switch (sc->sc_type) {
3564 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
3565 	case WM_T_82541:
3566 	case WM_T_82541_2:
3567 	case WM_T_82547:
3568 	case WM_T_82547_2:
3569 		/*
3570 		 * On some chipsets, a reset through a memory-mapped write
3571 		 * cycle can cause the chip to reset before completing the
3572 		 * write cycle.  This causes major headache that can be
3573 		 * avoided by issuing the reset via indirect register writes
3574 		 * through I/O space.
3575 		 *
3576 		 * So, if we successfully mapped the I/O BAR at attach time,
3577 		 * use that.  Otherwise, try our luck with a memory-mapped
3578 		 * reset.
3579 		 */
3580 		if (sc->sc_flags & WM_F_IOH_VALID)
3581 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
3582 		else
3583 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
3584 		break;
3585 	case WM_T_82545_3:
3586 	case WM_T_82546_3:
3587 		/* Use the shadow control register on these chips. */
3588 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
3589 		break;
3590 	case WM_T_80003:
3591 		mask = swfwphysem[sc->sc_funcid];
3592 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3593 		wm_get_swfw_semaphore(sc, mask);
3594 		CSR_WRITE(sc, WMREG_CTRL, reg);
3595 		wm_put_swfw_semaphore(sc, mask);
3596 		break;
3597 	case WM_T_ICH8:
3598 	case WM_T_ICH9:
3599 	case WM_T_ICH10:
3600 	case WM_T_PCH:
3601 	case WM_T_PCH2:
3602 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3603 		if (wm_check_reset_block(sc) == 0) {
3604 			/*
3605 			 * Gate automatic PHY configuration by hardware on
3606 			 * manaed 82579
3607 			 */
3608 			if ((sc->sc_type == WM_T_PCH2)
3609 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
3610 				!= 0))
3611 				wm_gate_hw_phy_config_ich8lan(sc, 1);
3612 
3613 
3614 			reg |= CTRL_PHY_RESET;
3615 			phy_reset = 1;
3616 		}
3617 		wm_get_swfwhw_semaphore(sc);
3618 		CSR_WRITE(sc, WMREG_CTRL, reg);
3619 		delay(20*1000);
3620 		wm_put_swfwhw_semaphore(sc);
3621 		break;
3622 	case WM_T_82542_2_0:
3623 	case WM_T_82542_2_1:
3624 	case WM_T_82543:
3625 	case WM_T_82540:
3626 	case WM_T_82545:
3627 	case WM_T_82546:
3628 	case WM_T_82571:
3629 	case WM_T_82572:
3630 	case WM_T_82573:
3631 	case WM_T_82574:
3632 	case WM_T_82575:
3633 	case WM_T_82576:
3634 	case WM_T_82580:
3635 	case WM_T_82580ER:
3636 	case WM_T_82583:
3637 	default:
3638 		/* Everything else can safely use the documented method. */
3639 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3640 		break;
3641 	}
3642 
3643 	if (phy_reset != 0)
3644 		wm_get_cfg_done(sc);
3645 
3646 	/* reload EEPROM */
3647 	switch (sc->sc_type) {
3648 	case WM_T_82542_2_0:
3649 	case WM_T_82542_2_1:
3650 	case WM_T_82543:
3651 	case WM_T_82544:
3652 		delay(10);
3653 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3654 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3655 		delay(2000);
3656 		break;
3657 	case WM_T_82540:
3658 	case WM_T_82545:
3659 	case WM_T_82545_3:
3660 	case WM_T_82546:
3661 	case WM_T_82546_3:
3662 		delay(5*1000);
3663 		/* XXX Disable HW ARPs on ASF enabled adapters */
3664 		break;
3665 	case WM_T_82541:
3666 	case WM_T_82541_2:
3667 	case WM_T_82547:
3668 	case WM_T_82547_2:
3669 		delay(20000);
3670 		/* XXX Disable HW ARPs on ASF enabled adapters */
3671 		break;
3672 	case WM_T_82571:
3673 	case WM_T_82572:
3674 	case WM_T_82573:
3675 	case WM_T_82574:
3676 	case WM_T_82583:
3677 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
3678 			delay(10);
3679 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3680 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3681 		}
3682 		/* check EECD_EE_AUTORD */
3683 		wm_get_auto_rd_done(sc);
3684 		/*
3685 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
3686 		 * is set.
3687 		 */
3688 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
3689 		    || (sc->sc_type == WM_T_82583))
3690 			delay(25*1000);
3691 		break;
3692 	case WM_T_82575:
3693 	case WM_T_82576:
3694 	case WM_T_82580:
3695 	case WM_T_82580ER:
3696 	case WM_T_80003:
3697 	case WM_T_ICH8:
3698 	case WM_T_ICH9:
3699 		/* check EECD_EE_AUTORD */
3700 		wm_get_auto_rd_done(sc);
3701 		break;
3702 	case WM_T_ICH10:
3703 	case WM_T_PCH:
3704 	case WM_T_PCH2:
3705 		wm_lan_init_done(sc);
3706 		break;
3707 	default:
3708 		panic("%s: unknown type\n", __func__);
3709 	}
3710 
3711 	/* Check whether EEPROM is present or not */
3712 	switch (sc->sc_type) {
3713 	case WM_T_82575:
3714 	case WM_T_82576:
3715 #if 0 /* XXX */
3716 	case WM_T_82580:
3717 	case WM_T_82580ER:
3718 #endif
3719 	case WM_T_ICH8:
3720 	case WM_T_ICH9:
3721 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
3722 			/* Not found */
3723 			sc->sc_flags |= WM_F_EEPROM_INVALID;
3724 			if ((sc->sc_type == WM_T_82575)
3725 			    || (sc->sc_type == WM_T_82576)
3726 			    || (sc->sc_type == WM_T_82580)
3727 			    || (sc->sc_type == WM_T_82580ER))
3728 				wm_reset_init_script_82575(sc);
3729 		}
3730 		break;
3731 	default:
3732 		break;
3733 	}
3734 
3735 	if ((sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)) {
3736 		/* clear global device reset status bit */
3737 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
3738 	}
3739 
3740 	/* Clear any pending interrupt events. */
3741 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3742 	reg = CSR_READ(sc, WMREG_ICR);
3743 
3744 	/* reload sc_ctrl */
3745 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3746 
3747 	/* dummy read from WUC */
3748 	if (sc->sc_type == WM_T_PCH)
3749 		reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
3750 	/*
3751 	 * For PCH, this write will make sure that any noise will be detected
3752 	 * as a CRC error and be dropped rather than show up as a bad packet
3753 	 * to the DMA engine
3754 	 */
3755 	if (sc->sc_type == WM_T_PCH)
3756 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
3757 
3758 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
3759 		CSR_WRITE(sc, WMREG_WUC, 0);
3760 
3761 	/* XXX need special handling for 82580 */
3762 }
3763 
3764 static void
3765 wm_set_vlan(struct wm_softc *sc)
3766 {
3767 	/* Deal with VLAN enables. */
3768 	if (VLAN_ATTACHED(&sc->sc_ethercom))
3769 		sc->sc_ctrl |= CTRL_VME;
3770 	else
3771 		sc->sc_ctrl &= ~CTRL_VME;
3772 
3773 	/* Write the control registers. */
3774 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3775 }
3776 
3777 /*
3778  * wm_init:		[ifnet interface function]
3779  *
3780  *	Initialize the interface.  Must be called at splnet().
3781  */
3782 static int
3783 wm_init(struct ifnet *ifp)
3784 {
3785 	struct wm_softc *sc = ifp->if_softc;
3786 	struct wm_rxsoft *rxs;
3787 	int i, error = 0;
3788 	uint32_t reg;
3789 
3790 	/*
3791 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
3792 	 * There is a small but measurable benefit to avoiding the adjusment
3793 	 * of the descriptor so that the headers are aligned, for normal mtu,
3794 	 * on such platforms.  One possibility is that the DMA itself is
3795 	 * slightly more efficient if the front of the entire packet (instead
3796 	 * of the front of the headers) is aligned.
3797 	 *
3798 	 * Note we must always set align_tweak to 0 if we are using
3799 	 * jumbo frames.
3800 	 */
3801 #ifdef __NO_STRICT_ALIGNMENT
3802 	sc->sc_align_tweak = 0;
3803 #else
3804 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
3805 		sc->sc_align_tweak = 0;
3806 	else
3807 		sc->sc_align_tweak = 2;
3808 #endif /* __NO_STRICT_ALIGNMENT */
3809 
3810 	/* Cancel any pending I/O. */
3811 	wm_stop(ifp, 0);
3812 
3813 	/* update statistics before reset */
3814 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3815 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
3816 
3817 	/* Reset the chip to a known state. */
3818 	wm_reset(sc);
3819 
3820 	switch (sc->sc_type) {
3821 	case WM_T_82571:
3822 	case WM_T_82572:
3823 	case WM_T_82573:
3824 	case WM_T_82574:
3825 	case WM_T_82583:
3826 	case WM_T_80003:
3827 	case WM_T_ICH8:
3828 	case WM_T_ICH9:
3829 	case WM_T_ICH10:
3830 	case WM_T_PCH:
3831 	case WM_T_PCH2:
3832 		if (wm_check_mng_mode(sc) != 0)
3833 			wm_get_hw_control(sc);
3834 		break;
3835 	default:
3836 		break;
3837 	}
3838 
3839 	/* Reset the PHY. */
3840 	if (sc->sc_flags & WM_F_HAS_MII)
3841 		wm_gmii_reset(sc);
3842 
3843 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
3844 	/* Enable PHY low-power state when MAC is at D3 w/o WoL */
3845 	if ((sc->sc_type == WM_T_PCH) && (sc->sc_type == WM_T_PCH2))
3846 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_PHYPDEN);
3847 
3848 	/* Initialize the transmit descriptor ring. */
3849 	memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
3850 	WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
3851 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3852 	sc->sc_txfree = WM_NTXDESC(sc);
3853 	sc->sc_txnext = 0;
3854 
3855 	if (sc->sc_type < WM_T_82543) {
3856 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(sc, 0));
3857 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(sc, 0));
3858 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
3859 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
3860 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
3861 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
3862 	} else {
3863 		CSR_WRITE(sc, WMREG_TDBAH, WM_CDTXADDR_HI(sc, 0));
3864 		CSR_WRITE(sc, WMREG_TDBAL, WM_CDTXADDR_LO(sc, 0));
3865 		CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
3866 		CSR_WRITE(sc, WMREG_TDH, 0);
3867 		CSR_WRITE(sc, WMREG_TIDV, 375);		/* ITR / 4 */
3868 		CSR_WRITE(sc, WMREG_TADV, 375);		/* should be same */
3869 
3870 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
3871 			/*
3872 			 * Don't write TDT before TCTL.EN is set.
3873 			 * See the document.
3874 			 */
3875 			CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_QUEUE_ENABLE
3876 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
3877 			    | TXDCTL_WTHRESH(0));
3878 		else {
3879 			CSR_WRITE(sc, WMREG_TDT, 0);
3880 			CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
3881 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
3882 			CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
3883 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
3884 		}
3885 	}
3886 	CSR_WRITE(sc, WMREG_TQSA_LO, 0);
3887 	CSR_WRITE(sc, WMREG_TQSA_HI, 0);
3888 
3889 	/* Initialize the transmit job descriptors. */
3890 	for (i = 0; i < WM_TXQUEUELEN(sc); i++)
3891 		sc->sc_txsoft[i].txs_mbuf = NULL;
3892 	sc->sc_txsfree = WM_TXQUEUELEN(sc);
3893 	sc->sc_txsnext = 0;
3894 	sc->sc_txsdirty = 0;
3895 
3896 	/*
3897 	 * Initialize the receive descriptor and receive job
3898 	 * descriptor rings.
3899 	 */
3900 	if (sc->sc_type < WM_T_82543) {
3901 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
3902 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
3903 		CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
3904 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
3905 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
3906 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
3907 
3908 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
3909 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
3910 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
3911 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
3912 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
3913 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
3914 	} else {
3915 		CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
3916 		CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
3917 		CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
3918 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3919 			CSR_WRITE(sc, WMREG_EITR(0), 450);
3920 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
3921 				panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
3922 			CSR_WRITE(sc, WMREG_SRRCTL, SRRCTL_DESCTYPE_LEGACY
3923 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
3924 			CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_QUEUE_ENABLE
3925 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
3926 			    | RXDCTL_WTHRESH(1));
3927 		} else {
3928 			CSR_WRITE(sc, WMREG_RDH, 0);
3929 			CSR_WRITE(sc, WMREG_RDT, 0);
3930 			CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD);	/* ITR/4 */
3931 			CSR_WRITE(sc, WMREG_RADV, 375);		/* MUST be same */
3932 		}
3933 	}
3934 	for (i = 0; i < WM_NRXDESC; i++) {
3935 		rxs = &sc->sc_rxsoft[i];
3936 		if (rxs->rxs_mbuf == NULL) {
3937 			if ((error = wm_add_rxbuf(sc, i)) != 0) {
3938 				log(LOG_ERR, "%s: unable to allocate or map rx "
3939 				    "buffer %d, error = %d\n",
3940 				    device_xname(sc->sc_dev), i, error);
3941 				/*
3942 				 * XXX Should attempt to run with fewer receive
3943 				 * XXX buffers instead of just failing.
3944 				 */
3945 				wm_rxdrain(sc);
3946 				goto out;
3947 			}
3948 		} else {
3949 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
3950 				WM_INIT_RXDESC(sc, i);
3951 			/*
3952 			 * For 82575 and newer device, the RX descriptors
3953 			 * must be initialized after the setting of RCTL.EN in
3954 			 * wm_set_filter()
3955 			 */
3956 		}
3957 	}
3958 	sc->sc_rxptr = 0;
3959 	sc->sc_rxdiscard = 0;
3960 	WM_RXCHAIN_RESET(sc);
3961 
3962 	/*
3963 	 * Clear out the VLAN table -- we don't use it (yet).
3964 	 */
3965 	CSR_WRITE(sc, WMREG_VET, 0);
3966 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
3967 		CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
3968 
3969 	/*
3970 	 * Set up flow-control parameters.
3971 	 *
3972 	 * XXX Values could probably stand some tuning.
3973 	 */
3974 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
3975 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
3976 	    && (sc->sc_type != WM_T_PCH2)) {
3977 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
3978 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
3979 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
3980 	}
3981 
3982 	sc->sc_fcrtl = FCRTL_DFLT;
3983 	if (sc->sc_type < WM_T_82543) {
3984 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
3985 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
3986 	} else {
3987 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
3988 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
3989 	}
3990 
3991 	if (sc->sc_type == WM_T_80003)
3992 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
3993 	else
3994 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
3995 
3996 	/* Writes the control register. */
3997 	wm_set_vlan(sc);
3998 
3999 	if (sc->sc_flags & WM_F_HAS_MII) {
4000 		int val;
4001 
4002 		switch (sc->sc_type) {
4003 		case WM_T_80003:
4004 		case WM_T_ICH8:
4005 		case WM_T_ICH9:
4006 		case WM_T_ICH10:
4007 		case WM_T_PCH:
4008 		case WM_T_PCH2:
4009 			/*
4010 			 * Set the mac to wait the maximum time between each
4011 			 * iteration and increase the max iterations when
4012 			 * polling the phy; this fixes erroneous timeouts at
4013 			 * 10Mbps.
4014 			 */
4015 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
4016 			    0xFFFF);
4017 			val = wm_kmrn_readreg(sc,
4018 			    KUMCTRLSTA_OFFSET_INB_PARAM);
4019 			val |= 0x3F;
4020 			wm_kmrn_writereg(sc,
4021 			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
4022 			break;
4023 		default:
4024 			break;
4025 		}
4026 
4027 		if (sc->sc_type == WM_T_80003) {
4028 			val = CSR_READ(sc, WMREG_CTRL_EXT);
4029 			val &= ~CTRL_EXT_LINK_MODE_MASK;
4030 			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
4031 
4032 			/* Bypass RX and TX FIFO's */
4033 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
4034 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
4035 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
4036 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
4037 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
4038 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
4039 		}
4040 	}
4041 #if 0
4042 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
4043 #endif
4044 
4045 	/*
4046 	 * Set up checksum offload parameters.
4047 	 */
4048 	reg = CSR_READ(sc, WMREG_RXCSUM);
4049 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
4050 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
4051 		reg |= RXCSUM_IPOFL;
4052 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
4053 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
4054 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
4055 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
4056 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
4057 
4058 	/* Reset TBI's RXCFG count */
4059 	sc->sc_tbi_nrxcfg = sc->sc_tbi_lastnrxcfg = 0;
4060 
4061 	/*
4062 	 * Set up the interrupt registers.
4063 	 */
4064 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4065 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
4066 	    ICR_RXO | ICR_RXT0;
4067 	if ((sc->sc_flags & WM_F_HAS_MII) == 0)
4068 		sc->sc_icr |= ICR_RXCFG;
4069 	CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
4070 
4071 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4072 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4073 		 || (sc->sc_type == WM_T_PCH2)) {
4074 		reg = CSR_READ(sc, WMREG_KABGTXD);
4075 		reg |= KABGTXD_BGSQLBIAS;
4076 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
4077 	}
4078 
4079 	/* Set up the inter-packet gap. */
4080 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
4081 
4082 	if (sc->sc_type >= WM_T_82543) {
4083 		/*
4084 		 * Set up the interrupt throttling register (units of 256ns)
4085 		 * Note that a footnote in Intel's documentation says this
4086 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
4087 		 * or 10Mbit mode.  Empirically, it appears to be the case
4088 		 * that that is also true for the 1024ns units of the other
4089 		 * interrupt-related timer registers -- so, really, we ought
4090 		 * to divide this value by 4 when the link speed is low.
4091 		 *
4092 		 * XXX implement this division at link speed change!
4093 		 */
4094 
4095 		 /*
4096 		  * For N interrupts/sec, set this value to:
4097 		  * 1000000000 / (N * 256).  Note that we set the
4098 		  * absolute and packet timer values to this value
4099 		  * divided by 4 to get "simple timer" behavior.
4100 		  */
4101 
4102 		sc->sc_itr = 1500;		/* 2604 ints/sec */
4103 		CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
4104 	}
4105 
4106 	/* Set the VLAN ethernetype. */
4107 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
4108 
4109 	/*
4110 	 * Set up the transmit control register; we start out with
4111 	 * a collision distance suitable for FDX, but update it whe
4112 	 * we resolve the media type.
4113 	 */
4114 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
4115 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
4116 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4117 	if (sc->sc_type >= WM_T_82571)
4118 		sc->sc_tctl |= TCTL_MULR;
4119 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4120 
4121 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4122 		/*
4123 		 * Write TDT after TCTL.EN is set.
4124 		 * See the document.
4125 		 */
4126 		CSR_WRITE(sc, WMREG_TDT, 0);
4127 	}
4128 
4129 	if (sc->sc_type == WM_T_80003) {
4130 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
4131 		reg &= ~TCTL_EXT_GCEX_MASK;
4132 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
4133 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
4134 	}
4135 
4136 	/* Set the media. */
4137 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
4138 		goto out;
4139 
4140 	/* Configure for OS presence */
4141 	wm_init_manageability(sc);
4142 
4143 	/*
4144 	 * Set up the receive control register; we actually program
4145 	 * the register when we set the receive filter.  Use multicast
4146 	 * address offset type 0.
4147 	 *
4148 	 * Only the i82544 has the ability to strip the incoming
4149 	 * CRC, so we don't enable that feature.
4150 	 */
4151 	sc->sc_mchash_type = 0;
4152 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
4153 	    | RCTL_MO(sc->sc_mchash_type);
4154 
4155 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
4156 	    && (ifp->if_mtu > ETHERMTU)) {
4157 		sc->sc_rctl |= RCTL_LPE;
4158 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4159 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
4160 	}
4161 
4162 	if (MCLBYTES == 2048) {
4163 		sc->sc_rctl |= RCTL_2k;
4164 	} else {
4165 		if (sc->sc_type >= WM_T_82543) {
4166 			switch (MCLBYTES) {
4167 			case 4096:
4168 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
4169 				break;
4170 			case 8192:
4171 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
4172 				break;
4173 			case 16384:
4174 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
4175 				break;
4176 			default:
4177 				panic("wm_init: MCLBYTES %d unsupported",
4178 				    MCLBYTES);
4179 				break;
4180 			}
4181 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
4182 	}
4183 
4184 	/* Set the receive filter. */
4185 	wm_set_filter(sc);
4186 
4187 	/* On 575 and later set RDT only if RX enabled */
4188 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4189 		for (i = 0; i < WM_NRXDESC; i++)
4190 			WM_INIT_RXDESC(sc, i);
4191 
4192 	/* Start the one second link check clock. */
4193 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
4194 
4195 	/* ...all done! */
4196 	ifp->if_flags |= IFF_RUNNING;
4197 	ifp->if_flags &= ~IFF_OACTIVE;
4198 
4199  out:
4200 	sc->sc_if_flags = ifp->if_flags;
4201 	if (error)
4202 		log(LOG_ERR, "%s: interface not running\n",
4203 		    device_xname(sc->sc_dev));
4204 	return error;
4205 }
4206 
4207 /*
4208  * wm_rxdrain:
4209  *
4210  *	Drain the receive queue.
4211  */
4212 static void
4213 wm_rxdrain(struct wm_softc *sc)
4214 {
4215 	struct wm_rxsoft *rxs;
4216 	int i;
4217 
4218 	for (i = 0; i < WM_NRXDESC; i++) {
4219 		rxs = &sc->sc_rxsoft[i];
4220 		if (rxs->rxs_mbuf != NULL) {
4221 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4222 			m_freem(rxs->rxs_mbuf);
4223 			rxs->rxs_mbuf = NULL;
4224 		}
4225 	}
4226 }
4227 
4228 /*
4229  * wm_stop:		[ifnet interface function]
4230  *
4231  *	Stop transmission on the interface.
4232  */
4233 static void
4234 wm_stop(struct ifnet *ifp, int disable)
4235 {
4236 	struct wm_softc *sc = ifp->if_softc;
4237 	struct wm_txsoft *txs;
4238 	int i;
4239 
4240 	/* Stop the one second clock. */
4241 	callout_stop(&sc->sc_tick_ch);
4242 
4243 	/* Stop the 82547 Tx FIFO stall check timer. */
4244 	if (sc->sc_type == WM_T_82547)
4245 		callout_stop(&sc->sc_txfifo_ch);
4246 
4247 	if (sc->sc_flags & WM_F_HAS_MII) {
4248 		/* Down the MII. */
4249 		mii_down(&sc->sc_mii);
4250 	} else {
4251 #if 0
4252 		/* Should we clear PHY's status properly? */
4253 		wm_reset(sc);
4254 #endif
4255 	}
4256 
4257 	/* Stop the transmit and receive processes. */
4258 	CSR_WRITE(sc, WMREG_TCTL, 0);
4259 	CSR_WRITE(sc, WMREG_RCTL, 0);
4260 	sc->sc_rctl &= ~RCTL_EN;
4261 
4262 	/*
4263 	 * Clear the interrupt mask to ensure the device cannot assert its
4264 	 * interrupt line.
4265 	 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
4266 	 * any currently pending or shared interrupt.
4267 	 */
4268 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4269 	sc->sc_icr = 0;
4270 
4271 	/* Release any queued transmit buffers. */
4272 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
4273 		txs = &sc->sc_txsoft[i];
4274 		if (txs->txs_mbuf != NULL) {
4275 			bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
4276 			m_freem(txs->txs_mbuf);
4277 			txs->txs_mbuf = NULL;
4278 		}
4279 	}
4280 
4281 	/* Mark the interface as down and cancel the watchdog timer. */
4282 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
4283 	ifp->if_timer = 0;
4284 
4285 	if (disable)
4286 		wm_rxdrain(sc);
4287 
4288 #if 0 /* notyet */
4289 	if (sc->sc_type >= WM_T_82544)
4290 		CSR_WRITE(sc, WMREG_WUC, 0);
4291 #endif
4292 }
4293 
4294 void
4295 wm_get_auto_rd_done(struct wm_softc *sc)
4296 {
4297 	int i;
4298 
4299 	/* wait for eeprom to reload */
4300 	switch (sc->sc_type) {
4301 	case WM_T_82571:
4302 	case WM_T_82572:
4303 	case WM_T_82573:
4304 	case WM_T_82574:
4305 	case WM_T_82583:
4306 	case WM_T_82575:
4307 	case WM_T_82576:
4308 	case WM_T_82580:
4309 	case WM_T_82580ER:
4310 	case WM_T_80003:
4311 	case WM_T_ICH8:
4312 	case WM_T_ICH9:
4313 		for (i = 0; i < 10; i++) {
4314 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
4315 				break;
4316 			delay(1000);
4317 		}
4318 		if (i == 10) {
4319 			log(LOG_ERR, "%s: auto read from eeprom failed to "
4320 			    "complete\n", device_xname(sc->sc_dev));
4321 		}
4322 		break;
4323 	default:
4324 		break;
4325 	}
4326 }
4327 
4328 void
4329 wm_lan_init_done(struct wm_softc *sc)
4330 {
4331 	uint32_t reg = 0;
4332 	int i;
4333 
4334 	/* wait for eeprom to reload */
4335 	switch (sc->sc_type) {
4336 	case WM_T_ICH10:
4337 	case WM_T_PCH:
4338 	case WM_T_PCH2:
4339 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
4340 			reg = CSR_READ(sc, WMREG_STATUS);
4341 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
4342 				break;
4343 			delay(100);
4344 		}
4345 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
4346 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
4347 			    "complete\n", device_xname(sc->sc_dev), __func__);
4348 		}
4349 		break;
4350 	default:
4351 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4352 		    __func__);
4353 		break;
4354 	}
4355 
4356 	reg &= ~STATUS_LAN_INIT_DONE;
4357 	CSR_WRITE(sc, WMREG_STATUS, reg);
4358 }
4359 
4360 void
4361 wm_get_cfg_done(struct wm_softc *sc)
4362 {
4363 	int mask;
4364 	uint32_t reg;
4365 	int i;
4366 
4367 	/* wait for eeprom to reload */
4368 	switch (sc->sc_type) {
4369 	case WM_T_82542_2_0:
4370 	case WM_T_82542_2_1:
4371 		/* null */
4372 		break;
4373 	case WM_T_82543:
4374 	case WM_T_82544:
4375 	case WM_T_82540:
4376 	case WM_T_82545:
4377 	case WM_T_82545_3:
4378 	case WM_T_82546:
4379 	case WM_T_82546_3:
4380 	case WM_T_82541:
4381 	case WM_T_82541_2:
4382 	case WM_T_82547:
4383 	case WM_T_82547_2:
4384 	case WM_T_82573:
4385 	case WM_T_82574:
4386 	case WM_T_82583:
4387 		/* generic */
4388 		delay(10*1000);
4389 		break;
4390 	case WM_T_80003:
4391 	case WM_T_82571:
4392 	case WM_T_82572:
4393 	case WM_T_82575:
4394 	case WM_T_82576:
4395 	case WM_T_82580:
4396 	case WM_T_82580ER:
4397 		if (sc->sc_type == WM_T_82571) {
4398 			/* Only 82571 shares port 0 */
4399 			mask = EEMNGCTL_CFGDONE_0;
4400 		} else
4401 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
4402 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
4403 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
4404 				break;
4405 			delay(1000);
4406 		}
4407 		if (i >= WM_PHY_CFG_TIMEOUT) {
4408 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
4409 				device_xname(sc->sc_dev), __func__));
4410 		}
4411 		break;
4412 	case WM_T_ICH8:
4413 	case WM_T_ICH9:
4414 	case WM_T_ICH10:
4415 	case WM_T_PCH:
4416 	case WM_T_PCH2:
4417 		if (sc->sc_type >= WM_T_PCH) {
4418 			reg = CSR_READ(sc, WMREG_STATUS);
4419 			if ((reg & STATUS_PHYRA) != 0)
4420 				CSR_WRITE(sc, WMREG_STATUS,
4421 				    reg & ~STATUS_PHYRA);
4422 		}
4423 		delay(10*1000);
4424 		break;
4425 	default:
4426 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4427 		    __func__);
4428 		break;
4429 	}
4430 }
4431 
4432 /*
4433  * wm_acquire_eeprom:
4434  *
4435  *	Perform the EEPROM handshake required on some chips.
4436  */
4437 static int
4438 wm_acquire_eeprom(struct wm_softc *sc)
4439 {
4440 	uint32_t reg;
4441 	int x;
4442 	int ret = 0;
4443 
4444 	/* always success */
4445 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
4446 		return 0;
4447 
4448 	if (sc->sc_flags & WM_F_SWFWHW_SYNC) {
4449 		ret = wm_get_swfwhw_semaphore(sc);
4450 	} else if (sc->sc_flags & WM_F_SWFW_SYNC) {
4451 		/* this will also do wm_get_swsm_semaphore() if needed */
4452 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
4453 	} else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
4454 		ret = wm_get_swsm_semaphore(sc);
4455 	}
4456 
4457 	if (ret) {
4458 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
4459 			__func__);
4460 		return 1;
4461 	}
4462 
4463 	if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
4464 		reg = CSR_READ(sc, WMREG_EECD);
4465 
4466 		/* Request EEPROM access. */
4467 		reg |= EECD_EE_REQ;
4468 		CSR_WRITE(sc, WMREG_EECD, reg);
4469 
4470 		/* ..and wait for it to be granted. */
4471 		for (x = 0; x < 1000; x++) {
4472 			reg = CSR_READ(sc, WMREG_EECD);
4473 			if (reg & EECD_EE_GNT)
4474 				break;
4475 			delay(5);
4476 		}
4477 		if ((reg & EECD_EE_GNT) == 0) {
4478 			aprint_error_dev(sc->sc_dev,
4479 			    "could not acquire EEPROM GNT\n");
4480 			reg &= ~EECD_EE_REQ;
4481 			CSR_WRITE(sc, WMREG_EECD, reg);
4482 			if (sc->sc_flags & WM_F_SWFWHW_SYNC)
4483 				wm_put_swfwhw_semaphore(sc);
4484 			if (sc->sc_flags & WM_F_SWFW_SYNC)
4485 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
4486 			else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
4487 				wm_put_swsm_semaphore(sc);
4488 			return 1;
4489 		}
4490 	}
4491 
4492 	return 0;
4493 }
4494 
4495 /*
4496  * wm_release_eeprom:
4497  *
4498  *	Release the EEPROM mutex.
4499  */
4500 static void
4501 wm_release_eeprom(struct wm_softc *sc)
4502 {
4503 	uint32_t reg;
4504 
4505 	/* always success */
4506 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
4507 		return;
4508 
4509 	if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
4510 		reg = CSR_READ(sc, WMREG_EECD);
4511 		reg &= ~EECD_EE_REQ;
4512 		CSR_WRITE(sc, WMREG_EECD, reg);
4513 	}
4514 
4515 	if (sc->sc_flags & WM_F_SWFWHW_SYNC)
4516 		wm_put_swfwhw_semaphore(sc);
4517 	if (sc->sc_flags & WM_F_SWFW_SYNC)
4518 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
4519 	else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
4520 		wm_put_swsm_semaphore(sc);
4521 }
4522 
4523 /*
4524  * wm_eeprom_sendbits:
4525  *
4526  *	Send a series of bits to the EEPROM.
4527  */
4528 static void
4529 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
4530 {
4531 	uint32_t reg;
4532 	int x;
4533 
4534 	reg = CSR_READ(sc, WMREG_EECD);
4535 
4536 	for (x = nbits; x > 0; x--) {
4537 		if (bits & (1U << (x - 1)))
4538 			reg |= EECD_DI;
4539 		else
4540 			reg &= ~EECD_DI;
4541 		CSR_WRITE(sc, WMREG_EECD, reg);
4542 		delay(2);
4543 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
4544 		delay(2);
4545 		CSR_WRITE(sc, WMREG_EECD, reg);
4546 		delay(2);
4547 	}
4548 }
4549 
4550 /*
4551  * wm_eeprom_recvbits:
4552  *
4553  *	Receive a series of bits from the EEPROM.
4554  */
4555 static void
4556 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
4557 {
4558 	uint32_t reg, val;
4559 	int x;
4560 
4561 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
4562 
4563 	val = 0;
4564 	for (x = nbits; x > 0; x--) {
4565 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
4566 		delay(2);
4567 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
4568 			val |= (1U << (x - 1));
4569 		CSR_WRITE(sc, WMREG_EECD, reg);
4570 		delay(2);
4571 	}
4572 	*valp = val;
4573 }
4574 
4575 /*
4576  * wm_read_eeprom_uwire:
4577  *
4578  *	Read a word from the EEPROM using the MicroWire protocol.
4579  */
4580 static int
4581 wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
4582 {
4583 	uint32_t reg, val;
4584 	int i;
4585 
4586 	for (i = 0; i < wordcnt; i++) {
4587 		/* Clear SK and DI. */
4588 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
4589 		CSR_WRITE(sc, WMREG_EECD, reg);
4590 
4591 		/* Set CHIP SELECT. */
4592 		reg |= EECD_CS;
4593 		CSR_WRITE(sc, WMREG_EECD, reg);
4594 		delay(2);
4595 
4596 		/* Shift in the READ command. */
4597 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
4598 
4599 		/* Shift in address. */
4600 		wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits);
4601 
4602 		/* Shift out the data. */
4603 		wm_eeprom_recvbits(sc, &val, 16);
4604 		data[i] = val & 0xffff;
4605 
4606 		/* Clear CHIP SELECT. */
4607 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
4608 		CSR_WRITE(sc, WMREG_EECD, reg);
4609 		delay(2);
4610 	}
4611 
4612 	return 0;
4613 }
4614 
4615 /*
4616  * wm_spi_eeprom_ready:
4617  *
4618  *	Wait for a SPI EEPROM to be ready for commands.
4619  */
4620 static int
4621 wm_spi_eeprom_ready(struct wm_softc *sc)
4622 {
4623 	uint32_t val;
4624 	int usec;
4625 
4626 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
4627 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
4628 		wm_eeprom_recvbits(sc, &val, 8);
4629 		if ((val & SPI_SR_RDY) == 0)
4630 			break;
4631 	}
4632 	if (usec >= SPI_MAX_RETRIES) {
4633 		aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
4634 		return 1;
4635 	}
4636 	return 0;
4637 }
4638 
4639 /*
4640  * wm_read_eeprom_spi:
4641  *
4642  *	Read a work from the EEPROM using the SPI protocol.
4643  */
4644 static int
4645 wm_read_eeprom_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
4646 {
4647 	uint32_t reg, val;
4648 	int i;
4649 	uint8_t opc;
4650 
4651 	/* Clear SK and CS. */
4652 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
4653 	CSR_WRITE(sc, WMREG_EECD, reg);
4654 	delay(2);
4655 
4656 	if (wm_spi_eeprom_ready(sc))
4657 		return 1;
4658 
4659 	/* Toggle CS to flush commands. */
4660 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
4661 	delay(2);
4662 	CSR_WRITE(sc, WMREG_EECD, reg);
4663 	delay(2);
4664 
4665 	opc = SPI_OPC_READ;
4666 	if (sc->sc_ee_addrbits == 8 && word >= 128)
4667 		opc |= SPI_OPC_A8;
4668 
4669 	wm_eeprom_sendbits(sc, opc, 8);
4670 	wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits);
4671 
4672 	for (i = 0; i < wordcnt; i++) {
4673 		wm_eeprom_recvbits(sc, &val, 16);
4674 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
4675 	}
4676 
4677 	/* Raise CS and clear SK. */
4678 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
4679 	CSR_WRITE(sc, WMREG_EECD, reg);
4680 	delay(2);
4681 
4682 	return 0;
4683 }
4684 
4685 #define EEPROM_CHECKSUM		0xBABA
4686 #define EEPROM_SIZE		0x0040
4687 
4688 /*
4689  * wm_validate_eeprom_checksum
4690  *
4691  * The checksum is defined as the sum of the first 64 (16 bit) words.
4692  */
4693 static int
4694 wm_validate_eeprom_checksum(struct wm_softc *sc)
4695 {
4696 	uint16_t checksum;
4697 	uint16_t eeprom_data;
4698 	int i;
4699 
4700 	checksum = 0;
4701 
4702 	for (i = 0; i < EEPROM_SIZE; i++) {
4703 		if (wm_read_eeprom(sc, i, 1, &eeprom_data))
4704 			return 1;
4705 		checksum += eeprom_data;
4706 	}
4707 
4708 	if (checksum != (uint16_t) EEPROM_CHECKSUM)
4709 		return 1;
4710 
4711 	return 0;
4712 }
4713 
4714 /*
4715  * wm_read_eeprom:
4716  *
4717  *	Read data from the serial EEPROM.
4718  */
4719 static int
4720 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
4721 {
4722 	int rv;
4723 
4724 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
4725 		return 1;
4726 
4727 	if (wm_acquire_eeprom(sc))
4728 		return 1;
4729 
4730 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4731 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4732 		 || (sc->sc_type == WM_T_PCH2))
4733 		rv = wm_read_eeprom_ich8(sc, word, wordcnt, data);
4734 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
4735 		rv = wm_read_eeprom_eerd(sc, word, wordcnt, data);
4736 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
4737 		rv = wm_read_eeprom_spi(sc, word, wordcnt, data);
4738 	else
4739 		rv = wm_read_eeprom_uwire(sc, word, wordcnt, data);
4740 
4741 	wm_release_eeprom(sc);
4742 	return rv;
4743 }
4744 
4745 static int
4746 wm_read_eeprom_eerd(struct wm_softc *sc, int offset, int wordcnt,
4747     uint16_t *data)
4748 {
4749 	int i, eerd = 0;
4750 	int error = 0;
4751 
4752 	for (i = 0; i < wordcnt; i++) {
4753 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
4754 
4755 		CSR_WRITE(sc, WMREG_EERD, eerd);
4756 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
4757 		if (error != 0)
4758 			break;
4759 
4760 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
4761 	}
4762 
4763 	return error;
4764 }
4765 
4766 static int
4767 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
4768 {
4769 	uint32_t attempts = 100000;
4770 	uint32_t i, reg = 0;
4771 	int32_t done = -1;
4772 
4773 	for (i = 0; i < attempts; i++) {
4774 		reg = CSR_READ(sc, rw);
4775 
4776 		if (reg & EERD_DONE) {
4777 			done = 0;
4778 			break;
4779 		}
4780 		delay(5);
4781 	}
4782 
4783 	return done;
4784 }
4785 
4786 static int
4787 wm_check_alt_mac_addr(struct wm_softc *sc)
4788 {
4789 	uint16_t myea[ETHER_ADDR_LEN / 2];
4790 	uint16_t offset = EEPROM_OFF_MACADDR;
4791 
4792 	/* Try to read alternative MAC address pointer */
4793 	if (wm_read_eeprom(sc, EEPROM_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
4794 		return -1;
4795 
4796 	/* Check pointer */
4797 	if (offset == 0xffff)
4798 		return -1;
4799 
4800 	/*
4801 	 * Check whether alternative MAC address is valid or not.
4802 	 * Some cards have non 0xffff pointer but those don't use
4803 	 * alternative MAC address in reality.
4804 	 *
4805 	 * Check whether the broadcast bit is set or not.
4806 	 */
4807 	if (wm_read_eeprom(sc, offset, 1, myea) == 0)
4808 		if (((myea[0] & 0xff) & 0x01) == 0)
4809 			return 0; /* found! */
4810 
4811 	/* not found */
4812 	return -1;
4813 }
4814 
4815 static int
4816 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
4817 {
4818 	uint16_t myea[ETHER_ADDR_LEN / 2];
4819 	uint16_t offset = EEPROM_OFF_MACADDR;
4820 	int do_invert = 0;
4821 
4822 	switch (sc->sc_type) {
4823 	case WM_T_82580:
4824 	case WM_T_82580ER:
4825 		switch (sc->sc_funcid) {
4826 		case 0:
4827 			/* default value (== EEPROM_OFF_MACADDR) */
4828 			break;
4829 		case 1:
4830 			offset = EEPROM_OFF_LAN1;
4831 			break;
4832 		case 2:
4833 			offset = EEPROM_OFF_LAN2;
4834 			break;
4835 		case 3:
4836 			offset = EEPROM_OFF_LAN3;
4837 			break;
4838 		default:
4839 			goto bad;
4840 			/* NOTREACHED */
4841 			break;
4842 		}
4843 		break;
4844 	case WM_T_82571:
4845 	case WM_T_82575:
4846 	case WM_T_82576:
4847 	case WM_T_80003:
4848 		if (wm_check_alt_mac_addr(sc) != 0) {
4849 			/* reset the offset to LAN0 */
4850 			offset = EEPROM_OFF_MACADDR;
4851 			if ((sc->sc_funcid & 0x01) == 1)
4852 				do_invert = 1;
4853 			goto do_read;
4854 		}
4855 		switch (sc->sc_funcid) {
4856 		case 0:
4857 			/*
4858 			 * The offset is the value in EEPROM_ALT_MAC_ADDR_PTR
4859 			 * itself.
4860 			 */
4861 			break;
4862 		case 1:
4863 			offset += EEPROM_OFF_MACADDR_LAN1;
4864 			break;
4865 		case 2:
4866 			offset += EEPROM_OFF_MACADDR_LAN2;
4867 			break;
4868 		case 3:
4869 			offset += EEPROM_OFF_MACADDR_LAN3;
4870 			break;
4871 		default:
4872 			goto bad;
4873 			/* NOTREACHED */
4874 			break;
4875 		}
4876 		break;
4877 	default:
4878 		if ((sc->sc_funcid & 0x01) == 1)
4879 			do_invert = 1;
4880 		break;
4881 	}
4882 
4883  do_read:
4884 	if (wm_read_eeprom(sc, offset, sizeof(myea) / sizeof(myea[0]),
4885 		myea) != 0) {
4886 		goto bad;
4887 	}
4888 
4889 	enaddr[0] = myea[0] & 0xff;
4890 	enaddr[1] = myea[0] >> 8;
4891 	enaddr[2] = myea[1] & 0xff;
4892 	enaddr[3] = myea[1] >> 8;
4893 	enaddr[4] = myea[2] & 0xff;
4894 	enaddr[5] = myea[2] >> 8;
4895 
4896 	/*
4897 	 * Toggle the LSB of the MAC address on the second port
4898 	 * of some dual port cards.
4899 	 */
4900 	if (do_invert != 0)
4901 		enaddr[5] ^= 1;
4902 
4903 	return 0;
4904 
4905  bad:
4906 	aprint_error_dev(sc->sc_dev, "unable to read Ethernet address\n");
4907 
4908 	return -1;
4909 }
4910 
4911 /*
4912  * wm_add_rxbuf:
4913  *
4914  *	Add a receive buffer to the indiciated descriptor.
4915  */
4916 static int
4917 wm_add_rxbuf(struct wm_softc *sc, int idx)
4918 {
4919 	struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
4920 	struct mbuf *m;
4921 	int error;
4922 
4923 	MGETHDR(m, M_DONTWAIT, MT_DATA);
4924 	if (m == NULL)
4925 		return ENOBUFS;
4926 
4927 	MCLGET(m, M_DONTWAIT);
4928 	if ((m->m_flags & M_EXT) == 0) {
4929 		m_freem(m);
4930 		return ENOBUFS;
4931 	}
4932 
4933 	if (rxs->rxs_mbuf != NULL)
4934 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4935 
4936 	rxs->rxs_mbuf = m;
4937 
4938 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
4939 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
4940 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
4941 	if (error) {
4942 		/* XXX XXX XXX */
4943 		aprint_error_dev(sc->sc_dev,
4944 		    "unable to load rx DMA map %d, error = %d\n",
4945 		    idx, error);
4946 		panic("wm_add_rxbuf");
4947 	}
4948 
4949 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
4950 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
4951 
4952 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4953 		if ((sc->sc_rctl & RCTL_EN) != 0)
4954 			WM_INIT_RXDESC(sc, idx);
4955 	} else
4956 		WM_INIT_RXDESC(sc, idx);
4957 
4958 	return 0;
4959 }
4960 
4961 /*
4962  * wm_set_ral:
4963  *
4964  *	Set an entery in the receive address list.
4965  */
4966 static void
4967 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
4968 {
4969 	uint32_t ral_lo, ral_hi;
4970 
4971 	if (enaddr != NULL) {
4972 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
4973 		    (enaddr[3] << 24);
4974 		ral_hi = enaddr[4] | (enaddr[5] << 8);
4975 		ral_hi |= RAL_AV;
4976 	} else {
4977 		ral_lo = 0;
4978 		ral_hi = 0;
4979 	}
4980 
4981 	if (sc->sc_type >= WM_T_82544) {
4982 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
4983 		    ral_lo);
4984 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
4985 		    ral_hi);
4986 	} else {
4987 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
4988 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
4989 	}
4990 }
4991 
4992 /*
4993  * wm_mchash:
4994  *
4995  *	Compute the hash of the multicast address for the 4096-bit
4996  *	multicast filter.
4997  */
4998 static uint32_t
4999 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
5000 {
5001 	static const int lo_shift[4] = { 4, 3, 2, 0 };
5002 	static const int hi_shift[4] = { 4, 5, 6, 8 };
5003 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
5004 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
5005 	uint32_t hash;
5006 
5007 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5008 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5009 	    || (sc->sc_type == WM_T_PCH2)) {
5010 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
5011 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
5012 		return (hash & 0x3ff);
5013 	}
5014 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
5015 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
5016 
5017 	return (hash & 0xfff);
5018 }
5019 
5020 /*
5021  * wm_set_filter:
5022  *
5023  *	Set up the receive filter.
5024  */
5025 static void
5026 wm_set_filter(struct wm_softc *sc)
5027 {
5028 	struct ethercom *ec = &sc->sc_ethercom;
5029 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5030 	struct ether_multi *enm;
5031 	struct ether_multistep step;
5032 	bus_addr_t mta_reg;
5033 	uint32_t hash, reg, bit;
5034 	int i, size;
5035 
5036 	if (sc->sc_type >= WM_T_82544)
5037 		mta_reg = WMREG_CORDOVA_MTA;
5038 	else
5039 		mta_reg = WMREG_MTA;
5040 
5041 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
5042 
5043 	if (ifp->if_flags & IFF_BROADCAST)
5044 		sc->sc_rctl |= RCTL_BAM;
5045 	if (ifp->if_flags & IFF_PROMISC) {
5046 		sc->sc_rctl |= RCTL_UPE;
5047 		goto allmulti;
5048 	}
5049 
5050 	/*
5051 	 * Set the station address in the first RAL slot, and
5052 	 * clear the remaining slots.
5053 	 */
5054 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5055 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5056 	    || (sc->sc_type == WM_T_PCH2))
5057 		size = WM_ICH8_RAL_TABSIZE;
5058 	else
5059 		size = WM_RAL_TABSIZE;
5060 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
5061 	for (i = 1; i < size; i++)
5062 		wm_set_ral(sc, NULL, i);
5063 
5064 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5065 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5066 	    || (sc->sc_type == WM_T_PCH2))
5067 		size = WM_ICH8_MC_TABSIZE;
5068 	else
5069 		size = WM_MC_TABSIZE;
5070 	/* Clear out the multicast table. */
5071 	for (i = 0; i < size; i++)
5072 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
5073 
5074 	ETHER_FIRST_MULTI(step, ec, enm);
5075 	while (enm != NULL) {
5076 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
5077 			/*
5078 			 * We must listen to a range of multicast addresses.
5079 			 * For now, just accept all multicasts, rather than
5080 			 * trying to set only those filter bits needed to match
5081 			 * the range.  (At this time, the only use of address
5082 			 * ranges is for IP multicast routing, for which the
5083 			 * range is big enough to require all bits set.)
5084 			 */
5085 			goto allmulti;
5086 		}
5087 
5088 		hash = wm_mchash(sc, enm->enm_addrlo);
5089 
5090 		reg = (hash >> 5);
5091 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5092 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5093 		    || (sc->sc_type == WM_T_PCH2))
5094 			reg &= 0x1f;
5095 		else
5096 			reg &= 0x7f;
5097 		bit = hash & 0x1f;
5098 
5099 		hash = CSR_READ(sc, mta_reg + (reg << 2));
5100 		hash |= 1U << bit;
5101 
5102 		/* XXX Hardware bug?? */
5103 		if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
5104 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
5105 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
5106 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
5107 		} else
5108 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
5109 
5110 		ETHER_NEXT_MULTI(step, enm);
5111 	}
5112 
5113 	ifp->if_flags &= ~IFF_ALLMULTI;
5114 	goto setit;
5115 
5116  allmulti:
5117 	ifp->if_flags |= IFF_ALLMULTI;
5118 	sc->sc_rctl |= RCTL_MPE;
5119 
5120  setit:
5121 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
5122 }
5123 
5124 /*
5125  * wm_tbi_mediainit:
5126  *
5127  *	Initialize media for use on 1000BASE-X devices.
5128  */
5129 static void
5130 wm_tbi_mediainit(struct wm_softc *sc)
5131 {
5132 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5133 	const char *sep = "";
5134 
5135 	if (sc->sc_type < WM_T_82543)
5136 		sc->sc_tipg = TIPG_WM_DFLT;
5137 	else
5138 		sc->sc_tipg = TIPG_LG_DFLT;
5139 
5140 	sc->sc_tbi_anegticks = 5;
5141 
5142 	/* Initialize our media structures */
5143 	sc->sc_mii.mii_ifp = ifp;
5144 
5145 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
5146 	ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
5147 	    wm_tbi_mediastatus);
5148 
5149 	/*
5150 	 * SWD Pins:
5151 	 *
5152 	 *	0 = Link LED (output)
5153 	 *	1 = Loss Of Signal (input)
5154 	 */
5155 	sc->sc_ctrl |= CTRL_SWDPIO(0);
5156 	sc->sc_ctrl &= ~CTRL_SWDPIO(1);
5157 
5158 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5159 
5160 #define	ADD(ss, mm, dd)							\
5161 do {									\
5162 	aprint_normal("%s%s", sep, ss);					\
5163 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL);	\
5164 	sep = ", ";							\
5165 } while (/*CONSTCOND*/0)
5166 
5167 	aprint_normal_dev(sc->sc_dev, "");
5168 	ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
5169 	ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
5170 	ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
5171 	aprint_normal("\n");
5172 
5173 #undef ADD
5174 
5175 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
5176 }
5177 
5178 /*
5179  * wm_tbi_mediastatus:	[ifmedia interface function]
5180  *
5181  *	Get the current interface media status on a 1000BASE-X device.
5182  */
5183 static void
5184 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
5185 {
5186 	struct wm_softc *sc = ifp->if_softc;
5187 	uint32_t ctrl, status;
5188 
5189 	ifmr->ifm_status = IFM_AVALID;
5190 	ifmr->ifm_active = IFM_ETHER;
5191 
5192 	status = CSR_READ(sc, WMREG_STATUS);
5193 	if ((status & STATUS_LU) == 0) {
5194 		ifmr->ifm_active |= IFM_NONE;
5195 		return;
5196 	}
5197 
5198 	ifmr->ifm_status |= IFM_ACTIVE;
5199 	ifmr->ifm_active |= IFM_1000_SX;
5200 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
5201 		ifmr->ifm_active |= IFM_FDX;
5202 	ctrl = CSR_READ(sc, WMREG_CTRL);
5203 	if (ctrl & CTRL_RFCE)
5204 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
5205 	if (ctrl & CTRL_TFCE)
5206 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
5207 }
5208 
5209 /*
5210  * wm_tbi_mediachange:	[ifmedia interface function]
5211  *
5212  *	Set hardware to newly-selected media on a 1000BASE-X device.
5213  */
5214 static int
5215 wm_tbi_mediachange(struct ifnet *ifp)
5216 {
5217 	struct wm_softc *sc = ifp->if_softc;
5218 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
5219 	uint32_t status;
5220 	int i;
5221 
5222 	sc->sc_txcw = 0;
5223 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO ||
5224 	    (sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
5225 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
5226 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
5227 		sc->sc_txcw |= TXCW_ANE;
5228 	} else {
5229 		/*
5230 		 * If autonegotiation is turned off, force link up and turn on
5231 		 * full duplex
5232 		 */
5233 		sc->sc_txcw &= ~TXCW_ANE;
5234 		sc->sc_ctrl |= CTRL_SLU | CTRL_FD;
5235 		sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
5236 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5237 		delay(1000);
5238 	}
5239 
5240 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
5241 		    device_xname(sc->sc_dev),sc->sc_txcw));
5242 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
5243 	delay(10000);
5244 
5245 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
5246 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
5247 
5248 	/*
5249 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
5250 	 * optics detect a signal, 0 if they don't.
5251 	 */
5252 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
5253 		/* Have signal; wait for the link to come up. */
5254 
5255 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
5256 			/*
5257 			 * Reset the link, and let autonegotiation do its thing
5258 			 */
5259 			sc->sc_ctrl |= CTRL_LRST;
5260 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5261 			delay(1000);
5262 			sc->sc_ctrl &= ~CTRL_LRST;
5263 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5264 			delay(1000);
5265 		}
5266 
5267 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
5268 			delay(10000);
5269 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
5270 				break;
5271 		}
5272 
5273 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
5274 			    device_xname(sc->sc_dev),i));
5275 
5276 		status = CSR_READ(sc, WMREG_STATUS);
5277 		DPRINTF(WM_DEBUG_LINK,
5278 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
5279 			device_xname(sc->sc_dev),status, STATUS_LU));
5280 		if (status & STATUS_LU) {
5281 			/* Link is up. */
5282 			DPRINTF(WM_DEBUG_LINK,
5283 			    ("%s: LINK: set media -> link up %s\n",
5284 			    device_xname(sc->sc_dev),
5285 			    (status & STATUS_FD) ? "FDX" : "HDX"));
5286 
5287 			/*
5288 			 * NOTE: CTRL will update TFCE and RFCE automatically,
5289 			 * so we should update sc->sc_ctrl
5290 			 */
5291 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
5292 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
5293 			sc->sc_fcrtl &= ~FCRTL_XONE;
5294 			if (status & STATUS_FD)
5295 				sc->sc_tctl |=
5296 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
5297 			else
5298 				sc->sc_tctl |=
5299 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
5300 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
5301 				sc->sc_fcrtl |= FCRTL_XONE;
5302 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
5303 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
5304 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
5305 				      sc->sc_fcrtl);
5306 			sc->sc_tbi_linkup = 1;
5307 		} else {
5308 			if (i == WM_LINKUP_TIMEOUT)
5309 				wm_check_for_link(sc);
5310 			/* Link is down. */
5311 			DPRINTF(WM_DEBUG_LINK,
5312 			    ("%s: LINK: set media -> link down\n",
5313 			    device_xname(sc->sc_dev)));
5314 			sc->sc_tbi_linkup = 0;
5315 		}
5316 	} else {
5317 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
5318 		    device_xname(sc->sc_dev)));
5319 		sc->sc_tbi_linkup = 0;
5320 	}
5321 
5322 	wm_tbi_set_linkled(sc);
5323 
5324 	return 0;
5325 }
5326 
5327 /*
5328  * wm_tbi_set_linkled:
5329  *
5330  *	Update the link LED on 1000BASE-X devices.
5331  */
5332 static void
5333 wm_tbi_set_linkled(struct wm_softc *sc)
5334 {
5335 
5336 	if (sc->sc_tbi_linkup)
5337 		sc->sc_ctrl |= CTRL_SWDPIN(0);
5338 	else
5339 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
5340 
5341 	/* 82540 or newer devices are active low */
5342 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
5343 
5344 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5345 }
5346 
5347 /*
5348  * wm_tbi_check_link:
5349  *
5350  *	Check the link on 1000BASE-X devices.
5351  */
5352 static void
5353 wm_tbi_check_link(struct wm_softc *sc)
5354 {
5355 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5356 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
5357 	uint32_t rxcw, ctrl, status;
5358 
5359 	status = CSR_READ(sc, WMREG_STATUS);
5360 
5361 	rxcw = CSR_READ(sc, WMREG_RXCW);
5362 	ctrl = CSR_READ(sc, WMREG_CTRL);
5363 
5364 	/* set link status */
5365 	if ((status & STATUS_LU) == 0) {
5366 		DPRINTF(WM_DEBUG_LINK,
5367 		    ("%s: LINK: checklink -> down\n", device_xname(sc->sc_dev)));
5368 		sc->sc_tbi_linkup = 0;
5369 	} else if (sc->sc_tbi_linkup == 0) {
5370 		DPRINTF(WM_DEBUG_LINK,
5371 		    ("%s: LINK: checklink -> up %s\n", device_xname(sc->sc_dev),
5372 		    (status & STATUS_FD) ? "FDX" : "HDX"));
5373 		sc->sc_tbi_linkup = 1;
5374 	}
5375 
5376 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP)
5377 	    && ((status & STATUS_LU) == 0)) {
5378 		sc->sc_tbi_linkup = 0;
5379 		if (sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg > 100) {
5380 			/* RXCFG storm! */
5381 			DPRINTF(WM_DEBUG_LINK, ("RXCFG storm! (%d)\n",
5382 				sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg));
5383 			wm_init(ifp);
5384 			wm_start(ifp);
5385 		} else if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
5386 			/* If the timer expired, retry autonegotiation */
5387 			if (++sc->sc_tbi_ticks >= sc->sc_tbi_anegticks) {
5388 				DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
5389 				sc->sc_tbi_ticks = 0;
5390 				/*
5391 				 * Reset the link, and let autonegotiation do
5392 				 * its thing
5393 				 */
5394 				sc->sc_ctrl |= CTRL_LRST;
5395 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5396 				delay(1000);
5397 				sc->sc_ctrl &= ~CTRL_LRST;
5398 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5399 				delay(1000);
5400 				CSR_WRITE(sc, WMREG_TXCW,
5401 				    sc->sc_txcw & ~TXCW_ANE);
5402 				CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
5403 			}
5404 		}
5405 	}
5406 
5407 	wm_tbi_set_linkled(sc);
5408 }
5409 
5410 /*
5411  * wm_gmii_reset:
5412  *
5413  *	Reset the PHY.
5414  */
5415 static void
5416 wm_gmii_reset(struct wm_softc *sc)
5417 {
5418 	uint32_t reg;
5419 	int rv;
5420 
5421 	/* get phy semaphore */
5422 	switch (sc->sc_type) {
5423 	case WM_T_82571:
5424 	case WM_T_82572:
5425 	case WM_T_82573:
5426 	case WM_T_82574:
5427 	case WM_T_82583:
5428 		 /* XXX should get sw semaphore, too */
5429 		rv = wm_get_swsm_semaphore(sc);
5430 		break;
5431 	case WM_T_82575:
5432 	case WM_T_82576:
5433 	case WM_T_82580:
5434 	case WM_T_82580ER:
5435 	case WM_T_80003:
5436 		rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
5437 		break;
5438 	case WM_T_ICH8:
5439 	case WM_T_ICH9:
5440 	case WM_T_ICH10:
5441 	case WM_T_PCH:
5442 	case WM_T_PCH2:
5443 		rv = wm_get_swfwhw_semaphore(sc);
5444 		break;
5445 	default:
5446 		/* nothing to do*/
5447 		rv = 0;
5448 		break;
5449 	}
5450 	if (rv != 0) {
5451 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5452 		    __func__);
5453 		return;
5454 	}
5455 
5456 	switch (sc->sc_type) {
5457 	case WM_T_82542_2_0:
5458 	case WM_T_82542_2_1:
5459 		/* null */
5460 		break;
5461 	case WM_T_82543:
5462 		/*
5463 		 * With 82543, we need to force speed and duplex on the MAC
5464 		 * equal to what the PHY speed and duplex configuration is.
5465 		 * In addition, we need to perform a hardware reset on the PHY
5466 		 * to take it out of reset.
5467 		 */
5468 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
5469 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5470 
5471 		/* The PHY reset pin is active-low. */
5472 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
5473 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
5474 		    CTRL_EXT_SWDPIN(4));
5475 		reg |= CTRL_EXT_SWDPIO(4);
5476 
5477 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5478 		delay(10*1000);
5479 
5480 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
5481 		delay(150);
5482 #if 0
5483 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
5484 #endif
5485 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
5486 		break;
5487 	case WM_T_82544:	/* reset 10000us */
5488 	case WM_T_82540:
5489 	case WM_T_82545:
5490 	case WM_T_82545_3:
5491 	case WM_T_82546:
5492 	case WM_T_82546_3:
5493 	case WM_T_82541:
5494 	case WM_T_82541_2:
5495 	case WM_T_82547:
5496 	case WM_T_82547_2:
5497 	case WM_T_82571:	/* reset 100us */
5498 	case WM_T_82572:
5499 	case WM_T_82573:
5500 	case WM_T_82574:
5501 	case WM_T_82575:
5502 	case WM_T_82576:
5503 	case WM_T_82580:
5504 	case WM_T_82580ER:
5505 	case WM_T_82583:
5506 	case WM_T_80003:
5507 		/* generic reset */
5508 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
5509 		delay(20000);
5510 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5511 		delay(20000);
5512 
5513 		if ((sc->sc_type == WM_T_82541)
5514 		    || (sc->sc_type == WM_T_82541_2)
5515 		    || (sc->sc_type == WM_T_82547)
5516 		    || (sc->sc_type == WM_T_82547_2)) {
5517 			/* workaround for igp are done in igp_reset() */
5518 			/* XXX add code to set LED after phy reset */
5519 		}
5520 		break;
5521 	case WM_T_ICH8:
5522 	case WM_T_ICH9:
5523 	case WM_T_ICH10:
5524 	case WM_T_PCH:
5525 	case WM_T_PCH2:
5526 		/* generic reset */
5527 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
5528 		delay(100);
5529 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5530 		delay(150);
5531 		break;
5532 	default:
5533 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
5534 		    __func__);
5535 		break;
5536 	}
5537 
5538 	/* release PHY semaphore */
5539 	switch (sc->sc_type) {
5540 	case WM_T_82571:
5541 	case WM_T_82572:
5542 	case WM_T_82573:
5543 	case WM_T_82574:
5544 	case WM_T_82583:
5545 		 /* XXX should put sw semaphore, too */
5546 		wm_put_swsm_semaphore(sc);
5547 		break;
5548 	case WM_T_82575:
5549 	case WM_T_82576:
5550 	case WM_T_82580:
5551 	case WM_T_82580ER:
5552 	case WM_T_80003:
5553 		wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
5554 		break;
5555 	case WM_T_ICH8:
5556 	case WM_T_ICH9:
5557 	case WM_T_ICH10:
5558 	case WM_T_PCH:
5559 	case WM_T_PCH2:
5560 		wm_put_swfwhw_semaphore(sc);
5561 		break;
5562 	default:
5563 		/* nothing to do*/
5564 		rv = 0;
5565 		break;
5566 	}
5567 
5568 	/* get_cfg_done */
5569 	wm_get_cfg_done(sc);
5570 
5571 	/* extra setup */
5572 	switch (sc->sc_type) {
5573 	case WM_T_82542_2_0:
5574 	case WM_T_82542_2_1:
5575 	case WM_T_82543:
5576 	case WM_T_82544:
5577 	case WM_T_82540:
5578 	case WM_T_82545:
5579 	case WM_T_82545_3:
5580 	case WM_T_82546:
5581 	case WM_T_82546_3:
5582 	case WM_T_82541_2:
5583 	case WM_T_82547_2:
5584 	case WM_T_82571:
5585 	case WM_T_82572:
5586 	case WM_T_82573:
5587 	case WM_T_82574:
5588 	case WM_T_82575:
5589 	case WM_T_82576:
5590 	case WM_T_82580:
5591 	case WM_T_82580ER:
5592 	case WM_T_82583:
5593 	case WM_T_80003:
5594 		/* null */
5595 		break;
5596 	case WM_T_82541:
5597 	case WM_T_82547:
5598 		/* XXX Configure actively LED after PHY reset */
5599 		break;
5600 	case WM_T_ICH8:
5601 	case WM_T_ICH9:
5602 	case WM_T_ICH10:
5603 	case WM_T_PCH:
5604 	case WM_T_PCH2:
5605 		/* Allow time for h/w to get to a quiescent state afer reset */
5606 		delay(10*1000);
5607 
5608 		if (sc->sc_type == WM_T_PCH)
5609 			wm_hv_phy_workaround_ich8lan(sc);
5610 
5611 		if (sc->sc_type == WM_T_PCH2)
5612 			wm_lv_phy_workaround_ich8lan(sc);
5613 
5614 		if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
5615 			/*
5616 			 * dummy read to clear the phy wakeup bit after lcd
5617 			 * reset
5618 			 */
5619 			reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
5620 		}
5621 
5622 		/*
5623 		 * XXX Configure the LCD with th extended configuration region
5624 		 * in NVM
5625 		 */
5626 
5627 		/* Configure the LCD with the OEM bits in NVM */
5628 		if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
5629 			/*
5630 			 * Disable LPLU.
5631 			 * XXX It seems that 82567 has LPLU, too.
5632 			 */
5633 			reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
5634 			reg &= ~(HV_OEM_BITS_A1KDIS| HV_OEM_BITS_LPLU);
5635 			reg |= HV_OEM_BITS_ANEGNOW;
5636 			wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
5637 		}
5638 		break;
5639 	default:
5640 		panic("%s: unknown type\n", __func__);
5641 		break;
5642 	}
5643 }
5644 
5645 /*
5646  * wm_gmii_mediainit:
5647  *
5648  *	Initialize media for use on 1000BASE-T devices.
5649  */
5650 static void
5651 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
5652 {
5653 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5654 
5655 	/* We have MII. */
5656 	sc->sc_flags |= WM_F_HAS_MII;
5657 
5658 	if (sc->sc_type == WM_T_80003)
5659 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
5660 	else
5661 		sc->sc_tipg = TIPG_1000T_DFLT;
5662 
5663 	/*
5664 	 * Let the chip set speed/duplex on its own based on
5665 	 * signals from the PHY.
5666 	 * XXXbouyer - I'm not sure this is right for the 80003,
5667 	 * the em driver only sets CTRL_SLU here - but it seems to work.
5668 	 */
5669 	sc->sc_ctrl |= CTRL_SLU;
5670 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5671 
5672 	/* Initialize our media structures and probe the GMII. */
5673 	sc->sc_mii.mii_ifp = ifp;
5674 
5675 	switch (prodid) {
5676 	case PCI_PRODUCT_INTEL_PCH_M_LM:
5677 	case PCI_PRODUCT_INTEL_PCH_M_LC:
5678 		/* 82577 */
5679 		sc->sc_phytype = WMPHY_82577;
5680 		sc->sc_mii.mii_readreg = wm_gmii_hv_readreg;
5681 		sc->sc_mii.mii_writereg = wm_gmii_hv_writereg;
5682 		break;
5683 	case PCI_PRODUCT_INTEL_PCH_D_DM:
5684 	case PCI_PRODUCT_INTEL_PCH_D_DC:
5685 		/* 82578 */
5686 		sc->sc_phytype = WMPHY_82578;
5687 		sc->sc_mii.mii_readreg = wm_gmii_hv_readreg;
5688 		sc->sc_mii.mii_writereg = wm_gmii_hv_writereg;
5689 		break;
5690 	case PCI_PRODUCT_INTEL_PCH2_LV_LM:
5691 	case PCI_PRODUCT_INTEL_PCH2_LV_V:
5692 		/* 82578 */
5693 		sc->sc_phytype = WMPHY_82579;
5694 		sc->sc_mii.mii_readreg = wm_gmii_hv_readreg;
5695 		sc->sc_mii.mii_writereg = wm_gmii_hv_writereg;
5696 		break;
5697 	case PCI_PRODUCT_INTEL_82801I_BM:
5698 	case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
5699 	case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
5700 	case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
5701 	case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
5702 	case PCI_PRODUCT_INTEL_82801J_R_BM_V:
5703 		/* 82567 */
5704 		sc->sc_phytype = WMPHY_BM;
5705 		sc->sc_mii.mii_readreg = wm_gmii_bm_readreg;
5706 		sc->sc_mii.mii_writereg = wm_gmii_bm_writereg;
5707 		break;
5708 	default:
5709 		if ((sc->sc_flags & WM_F_SGMII) != 0) {
5710 			sc->sc_mii.mii_readreg = wm_sgmii_readreg;
5711 			sc->sc_mii.mii_writereg = wm_sgmii_writereg;
5712 		} else if (sc->sc_type >= WM_T_80003) {
5713 			sc->sc_mii.mii_readreg = wm_gmii_i80003_readreg;
5714 			sc->sc_mii.mii_writereg = wm_gmii_i80003_writereg;
5715 		} else if (sc->sc_type >= WM_T_82544) {
5716 			sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg;
5717 			sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg;
5718 		} else {
5719 			sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg;
5720 			sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg;
5721 		}
5722 		break;
5723 	}
5724 	sc->sc_mii.mii_statchg = wm_gmii_statchg;
5725 
5726 	wm_gmii_reset(sc);
5727 
5728 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
5729 	ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_gmii_mediachange,
5730 	    wm_gmii_mediastatus);
5731 
5732 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
5733 	    || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)) {
5734 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
5735 			/* Attach only one port */
5736 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
5737 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
5738 		} else {
5739 			int i;
5740 			uint32_t ctrl_ext;
5741 
5742 			/* Power on sgmii phy if it is disabled */
5743 			ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
5744 			CSR_WRITE(sc, WMREG_CTRL_EXT,
5745 			    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
5746 			CSR_WRITE_FLUSH(sc);
5747 			delay(300*1000); /* XXX too long */
5748 
5749 			/* from 1 to 8 */
5750 			for (i = 1; i < 8; i++)
5751 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
5752 				    i, MII_OFFSET_ANY, MIIF_DOPAUSE);
5753 
5754 			/* restore previous sfp cage power state */
5755 			CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
5756 		}
5757 	} else {
5758 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
5759 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
5760 	}
5761 
5762 	if ((sc->sc_type == WM_T_PCH2) &&
5763 	    (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL)) {
5764 		wm_set_mdio_slow_mode_hv(sc);
5765 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
5766 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
5767 	}
5768 
5769 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
5770 		/* if failed, retry with *_bm_* */
5771 		sc->sc_mii.mii_readreg = wm_gmii_bm_readreg;
5772 		sc->sc_mii.mii_writereg = wm_gmii_bm_writereg;
5773 
5774 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
5775 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
5776 	}
5777 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
5778 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
5779 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
5780 		sc->sc_phytype = WMPHY_NONE;
5781 	} else {
5782 		/* Check PHY type */
5783 		uint32_t model;
5784 		struct mii_softc *child;
5785 
5786 		child = LIST_FIRST(&sc->sc_mii.mii_phys);
5787 		if (device_is_a(child->mii_dev, "igphy")) {
5788 			struct igphy_softc *isc = (struct igphy_softc *)child;
5789 
5790 			model = isc->sc_mii.mii_mpd_model;
5791 			if (model == MII_MODEL_yyINTEL_I82566)
5792 				sc->sc_phytype = WMPHY_IGP_3;
5793 		}
5794 
5795 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
5796 	}
5797 }
5798 
5799 /*
5800  * wm_gmii_mediastatus:	[ifmedia interface function]
5801  *
5802  *	Get the current interface media status on a 1000BASE-T device.
5803  */
5804 static void
5805 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
5806 {
5807 	struct wm_softc *sc = ifp->if_softc;
5808 
5809 	ether_mediastatus(ifp, ifmr);
5810 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
5811 	    | sc->sc_flowflags;
5812 }
5813 
5814 /*
5815  * wm_gmii_mediachange:	[ifmedia interface function]
5816  *
5817  *	Set hardware to newly-selected media on a 1000BASE-T device.
5818  */
5819 static int
5820 wm_gmii_mediachange(struct ifnet *ifp)
5821 {
5822 	struct wm_softc *sc = ifp->if_softc;
5823 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
5824 	int rc;
5825 
5826 	if ((ifp->if_flags & IFF_UP) == 0)
5827 		return 0;
5828 
5829 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
5830 	sc->sc_ctrl |= CTRL_SLU;
5831 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
5832 	    || (sc->sc_type > WM_T_82543)) {
5833 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
5834 	} else {
5835 		sc->sc_ctrl &= ~CTRL_ASDE;
5836 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
5837 		if (ife->ifm_media & IFM_FDX)
5838 			sc->sc_ctrl |= CTRL_FD;
5839 		switch (IFM_SUBTYPE(ife->ifm_media)) {
5840 		case IFM_10_T:
5841 			sc->sc_ctrl |= CTRL_SPEED_10;
5842 			break;
5843 		case IFM_100_TX:
5844 			sc->sc_ctrl |= CTRL_SPEED_100;
5845 			break;
5846 		case IFM_1000_T:
5847 			sc->sc_ctrl |= CTRL_SPEED_1000;
5848 			break;
5849 		default:
5850 			panic("wm_gmii_mediachange: bad media 0x%x",
5851 			    ife->ifm_media);
5852 		}
5853 	}
5854 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5855 	if (sc->sc_type <= WM_T_82543)
5856 		wm_gmii_reset(sc);
5857 
5858 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
5859 		return 0;
5860 	return rc;
5861 }
5862 
5863 #define	MDI_IO		CTRL_SWDPIN(2)
5864 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
5865 #define	MDI_CLK		CTRL_SWDPIN(3)
5866 
5867 static void
5868 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
5869 {
5870 	uint32_t i, v;
5871 
5872 	v = CSR_READ(sc, WMREG_CTRL);
5873 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
5874 	v |= MDI_DIR | CTRL_SWDPIO(3);
5875 
5876 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
5877 		if (data & i)
5878 			v |= MDI_IO;
5879 		else
5880 			v &= ~MDI_IO;
5881 		CSR_WRITE(sc, WMREG_CTRL, v);
5882 		delay(10);
5883 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
5884 		delay(10);
5885 		CSR_WRITE(sc, WMREG_CTRL, v);
5886 		delay(10);
5887 	}
5888 }
5889 
5890 static uint32_t
5891 i82543_mii_recvbits(struct wm_softc *sc)
5892 {
5893 	uint32_t v, i, data = 0;
5894 
5895 	v = CSR_READ(sc, WMREG_CTRL);
5896 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
5897 	v |= CTRL_SWDPIO(3);
5898 
5899 	CSR_WRITE(sc, WMREG_CTRL, v);
5900 	delay(10);
5901 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
5902 	delay(10);
5903 	CSR_WRITE(sc, WMREG_CTRL, v);
5904 	delay(10);
5905 
5906 	for (i = 0; i < 16; i++) {
5907 		data <<= 1;
5908 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
5909 		delay(10);
5910 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
5911 			data |= 1;
5912 		CSR_WRITE(sc, WMREG_CTRL, v);
5913 		delay(10);
5914 	}
5915 
5916 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
5917 	delay(10);
5918 	CSR_WRITE(sc, WMREG_CTRL, v);
5919 	delay(10);
5920 
5921 	return data;
5922 }
5923 
5924 #undef MDI_IO
5925 #undef MDI_DIR
5926 #undef MDI_CLK
5927 
5928 /*
5929  * wm_gmii_i82543_readreg:	[mii interface function]
5930  *
5931  *	Read a PHY register on the GMII (i82543 version).
5932  */
5933 static int
5934 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
5935 {
5936 	struct wm_softc *sc = device_private(self);
5937 	int rv;
5938 
5939 	i82543_mii_sendbits(sc, 0xffffffffU, 32);
5940 	i82543_mii_sendbits(sc, reg | (phy << 5) |
5941 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
5942 	rv = i82543_mii_recvbits(sc) & 0xffff;
5943 
5944 	DPRINTF(WM_DEBUG_GMII,
5945 	    ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
5946 	    device_xname(sc->sc_dev), phy, reg, rv));
5947 
5948 	return rv;
5949 }
5950 
5951 /*
5952  * wm_gmii_i82543_writereg:	[mii interface function]
5953  *
5954  *	Write a PHY register on the GMII (i82543 version).
5955  */
5956 static void
5957 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
5958 {
5959 	struct wm_softc *sc = device_private(self);
5960 
5961 	i82543_mii_sendbits(sc, 0xffffffffU, 32);
5962 	i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
5963 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
5964 	    (MII_COMMAND_START << 30), 32);
5965 }
5966 
5967 /*
5968  * wm_gmii_i82544_readreg:	[mii interface function]
5969  *
5970  *	Read a PHY register on the GMII.
5971  */
5972 static int
5973 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
5974 {
5975 	struct wm_softc *sc = device_private(self);
5976 	uint32_t mdic = 0;
5977 	int i, rv;
5978 
5979 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
5980 	    MDIC_REGADD(reg));
5981 
5982 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
5983 		mdic = CSR_READ(sc, WMREG_MDIC);
5984 		if (mdic & MDIC_READY)
5985 			break;
5986 		delay(50);
5987 	}
5988 
5989 	if ((mdic & MDIC_READY) == 0) {
5990 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
5991 		    device_xname(sc->sc_dev), phy, reg);
5992 		rv = 0;
5993 	} else if (mdic & MDIC_E) {
5994 #if 0 /* This is normal if no PHY is present. */
5995 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
5996 		    device_xname(sc->sc_dev), phy, reg);
5997 #endif
5998 		rv = 0;
5999 	} else {
6000 		rv = MDIC_DATA(mdic);
6001 		if (rv == 0xffff)
6002 			rv = 0;
6003 	}
6004 
6005 	return rv;
6006 }
6007 
6008 /*
6009  * wm_gmii_i82544_writereg:	[mii interface function]
6010  *
6011  *	Write a PHY register on the GMII.
6012  */
6013 static void
6014 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
6015 {
6016 	struct wm_softc *sc = device_private(self);
6017 	uint32_t mdic = 0;
6018 	int i;
6019 
6020 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
6021 	    MDIC_REGADD(reg) | MDIC_DATA(val));
6022 
6023 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
6024 		mdic = CSR_READ(sc, WMREG_MDIC);
6025 		if (mdic & MDIC_READY)
6026 			break;
6027 		delay(50);
6028 	}
6029 
6030 	if ((mdic & MDIC_READY) == 0)
6031 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
6032 		    device_xname(sc->sc_dev), phy, reg);
6033 	else if (mdic & MDIC_E)
6034 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
6035 		    device_xname(sc->sc_dev), phy, reg);
6036 }
6037 
6038 /*
6039  * wm_gmii_i80003_readreg:	[mii interface function]
6040  *
6041  *	Read a PHY register on the kumeran
6042  * This could be handled by the PHY layer if we didn't have to lock the
6043  * ressource ...
6044  */
6045 static int
6046 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
6047 {
6048 	struct wm_softc *sc = device_private(self);
6049 	int sem;
6050 	int rv;
6051 
6052 	if (phy != 1) /* only one PHY on kumeran bus */
6053 		return 0;
6054 
6055 	sem = swfwphysem[sc->sc_funcid];
6056 	if (wm_get_swfw_semaphore(sc, sem)) {
6057 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6058 		    __func__);
6059 		return 0;
6060 	}
6061 
6062 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
6063 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6064 		    reg >> GG82563_PAGE_SHIFT);
6065 	} else {
6066 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
6067 		    reg >> GG82563_PAGE_SHIFT);
6068 	}
6069 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
6070 	delay(200);
6071 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
6072 	delay(200);
6073 
6074 	wm_put_swfw_semaphore(sc, sem);
6075 	return rv;
6076 }
6077 
6078 /*
6079  * wm_gmii_i80003_writereg:	[mii interface function]
6080  *
6081  *	Write a PHY register on the kumeran.
6082  * This could be handled by the PHY layer if we didn't have to lock the
6083  * ressource ...
6084  */
6085 static void
6086 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
6087 {
6088 	struct wm_softc *sc = device_private(self);
6089 	int sem;
6090 
6091 	if (phy != 1) /* only one PHY on kumeran bus */
6092 		return;
6093 
6094 	sem = swfwphysem[sc->sc_funcid];
6095 	if (wm_get_swfw_semaphore(sc, sem)) {
6096 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6097 		    __func__);
6098 		return;
6099 	}
6100 
6101 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
6102 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6103 		    reg >> GG82563_PAGE_SHIFT);
6104 	} else {
6105 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
6106 		    reg >> GG82563_PAGE_SHIFT);
6107 	}
6108 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
6109 	delay(200);
6110 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
6111 	delay(200);
6112 
6113 	wm_put_swfw_semaphore(sc, sem);
6114 }
6115 
6116 /*
6117  * wm_gmii_bm_readreg:	[mii interface function]
6118  *
6119  *	Read a PHY register on the kumeran
6120  * This could be handled by the PHY layer if we didn't have to lock the
6121  * ressource ...
6122  */
6123 static int
6124 wm_gmii_bm_readreg(device_t self, int phy, int reg)
6125 {
6126 	struct wm_softc *sc = device_private(self);
6127 	int sem;
6128 	int rv;
6129 
6130 	sem = swfwphysem[sc->sc_funcid];
6131 	if (wm_get_swfw_semaphore(sc, sem)) {
6132 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6133 		    __func__);
6134 		return 0;
6135 	}
6136 
6137 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
6138 		if (phy == 1)
6139 			wm_gmii_i82544_writereg(self, phy, 0x1f,
6140 			    reg);
6141 		else
6142 			wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6143 			    reg >> GG82563_PAGE_SHIFT);
6144 
6145 	}
6146 
6147 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
6148 	wm_put_swfw_semaphore(sc, sem);
6149 	return rv;
6150 }
6151 
6152 /*
6153  * wm_gmii_bm_writereg:	[mii interface function]
6154  *
6155  *	Write a PHY register on the kumeran.
6156  * This could be handled by the PHY layer if we didn't have to lock the
6157  * ressource ...
6158  */
6159 static void
6160 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
6161 {
6162 	struct wm_softc *sc = device_private(self);
6163 	int sem;
6164 
6165 	sem = swfwphysem[sc->sc_funcid];
6166 	if (wm_get_swfw_semaphore(sc, sem)) {
6167 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6168 		    __func__);
6169 		return;
6170 	}
6171 
6172 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
6173 		if (phy == 1)
6174 			wm_gmii_i82544_writereg(self, phy, 0x1f,
6175 			    reg);
6176 		else
6177 			wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6178 			    reg >> GG82563_PAGE_SHIFT);
6179 
6180 	}
6181 
6182 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
6183 	wm_put_swfw_semaphore(sc, sem);
6184 }
6185 
6186 static void
6187 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
6188 {
6189 	struct wm_softc *sc = device_private(self);
6190 	uint16_t regnum = BM_PHY_REG_NUM(offset);
6191 	uint16_t wuce;
6192 
6193 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
6194 	if (sc->sc_type == WM_T_PCH) {
6195 		/* XXX e1000 driver do nothing... why? */
6196 	}
6197 
6198 	/* Set page 769 */
6199 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6200 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
6201 
6202 	wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
6203 
6204 	wuce &= ~BM_WUC_HOST_WU_BIT;
6205 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
6206 	    wuce | BM_WUC_ENABLE_BIT);
6207 
6208 	/* Select page 800 */
6209 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6210 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
6211 
6212 	/* Write page 800 */
6213 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
6214 
6215 	if (rd)
6216 		*val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
6217 	else
6218 		wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
6219 
6220 	/* Set page 769 */
6221 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6222 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
6223 
6224 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
6225 }
6226 
6227 /*
6228  * wm_gmii_hv_readreg:	[mii interface function]
6229  *
6230  *	Read a PHY register on the kumeran
6231  * This could be handled by the PHY layer if we didn't have to lock the
6232  * ressource ...
6233  */
6234 static int
6235 wm_gmii_hv_readreg(device_t self, int phy, int reg)
6236 {
6237 	struct wm_softc *sc = device_private(self);
6238 	uint16_t page = BM_PHY_REG_PAGE(reg);
6239 	uint16_t regnum = BM_PHY_REG_NUM(reg);
6240 	uint16_t val;
6241 	int rv;
6242 
6243 	if (wm_get_swfw_semaphore(sc, SWFW_PHY0_SM)) {
6244 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6245 		    __func__);
6246 		return 0;
6247 	}
6248 
6249 	/* XXX Workaround failure in MDIO access while cable is disconnected */
6250 	if (sc->sc_phytype == WMPHY_82577) {
6251 		/* XXX must write */
6252 	}
6253 
6254 	/* Page 800 works differently than the rest so it has its own func */
6255 	if (page == BM_WUC_PAGE) {
6256 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
6257 		return val;
6258 	}
6259 
6260 	/*
6261 	 * Lower than page 768 works differently than the rest so it has its
6262 	 * own func
6263 	 */
6264 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
6265 		printf("gmii_hv_readreg!!!\n");
6266 		return 0;
6267 	}
6268 
6269 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
6270 		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6271 		    page << BME1000_PAGE_SHIFT);
6272 	}
6273 
6274 	rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
6275 	wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
6276 	return rv;
6277 }
6278 
6279 /*
6280  * wm_gmii_hv_writereg:	[mii interface function]
6281  *
6282  *	Write a PHY register on the kumeran.
6283  * This could be handled by the PHY layer if we didn't have to lock the
6284  * ressource ...
6285  */
6286 static void
6287 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
6288 {
6289 	struct wm_softc *sc = device_private(self);
6290 	uint16_t page = BM_PHY_REG_PAGE(reg);
6291 	uint16_t regnum = BM_PHY_REG_NUM(reg);
6292 
6293 	if (wm_get_swfw_semaphore(sc, SWFW_PHY0_SM)) {
6294 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6295 		    __func__);
6296 		return;
6297 	}
6298 
6299 	/* XXX Workaround failure in MDIO access while cable is disconnected */
6300 
6301 	/* Page 800 works differently than the rest so it has its own func */
6302 	if (page == BM_WUC_PAGE) {
6303 		uint16_t tmp;
6304 
6305 		tmp = val;
6306 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
6307 		return;
6308 	}
6309 
6310 	/*
6311 	 * Lower than page 768 works differently than the rest so it has its
6312 	 * own func
6313 	 */
6314 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
6315 		printf("gmii_hv_writereg!!!\n");
6316 		return;
6317 	}
6318 
6319 	/*
6320 	 * XXX Workaround MDIO accesses being disabled after entering IEEE
6321 	 * Power Down (whenever bit 11 of the PHY control register is set)
6322 	 */
6323 
6324 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
6325 		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6326 		    page << BME1000_PAGE_SHIFT);
6327 	}
6328 
6329 	wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
6330 	wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
6331 }
6332 
6333 /*
6334  * wm_gmii_hv_readreg:	[mii interface function]
6335  *
6336  *	Read a PHY register on the kumeran
6337  * This could be handled by the PHY layer if we didn't have to lock the
6338  * ressource ...
6339  */
6340 static int
6341 wm_sgmii_readreg(device_t self, int phy, int reg)
6342 {
6343 	struct wm_softc *sc = device_private(self);
6344 	uint32_t i2ccmd;
6345 	int i, rv;
6346 
6347 	if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
6348 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6349 		    __func__);
6350 		return 0;
6351 	}
6352 
6353 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
6354 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
6355 	    | I2CCMD_OPCODE_READ;
6356 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
6357 
6358 	/* Poll the ready bit */
6359 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
6360 		delay(50);
6361 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
6362 		if (i2ccmd & I2CCMD_READY)
6363 			break;
6364 	}
6365 	if ((i2ccmd & I2CCMD_READY) == 0)
6366 		aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
6367 	if ((i2ccmd & I2CCMD_ERROR) != 0)
6368 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
6369 
6370 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
6371 
6372 	wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6373 	return rv;
6374 }
6375 
6376 /*
6377  * wm_gmii_hv_writereg:	[mii interface function]
6378  *
6379  *	Write a PHY register on the kumeran.
6380  * This could be handled by the PHY layer if we didn't have to lock the
6381  * ressource ...
6382  */
6383 static void
6384 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
6385 {
6386 	struct wm_softc *sc = device_private(self);
6387 	uint32_t i2ccmd;
6388 	int i;
6389 
6390 	if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
6391 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6392 		    __func__);
6393 		return;
6394 	}
6395 
6396 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
6397 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
6398 	    | I2CCMD_OPCODE_WRITE;
6399 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
6400 
6401 	/* Poll the ready bit */
6402 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
6403 		delay(50);
6404 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
6405 		if (i2ccmd & I2CCMD_READY)
6406 			break;
6407 	}
6408 	if ((i2ccmd & I2CCMD_READY) == 0)
6409 		aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
6410 	if ((i2ccmd & I2CCMD_ERROR) != 0)
6411 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
6412 
6413 	wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
6414 }
6415 
6416 /*
6417  * wm_gmii_statchg:	[mii interface function]
6418  *
6419  *	Callback from MII layer when media changes.
6420  */
6421 static void
6422 wm_gmii_statchg(device_t self)
6423 {
6424 	struct wm_softc *sc = device_private(self);
6425 	struct mii_data *mii = &sc->sc_mii;
6426 
6427 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
6428 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
6429 	sc->sc_fcrtl &= ~FCRTL_XONE;
6430 
6431 	/*
6432 	 * Get flow control negotiation result.
6433 	 */
6434 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
6435 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
6436 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
6437 		mii->mii_media_active &= ~IFM_ETH_FMASK;
6438 	}
6439 
6440 	if (sc->sc_flowflags & IFM_FLOW) {
6441 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
6442 			sc->sc_ctrl |= CTRL_TFCE;
6443 			sc->sc_fcrtl |= FCRTL_XONE;
6444 		}
6445 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
6446 			sc->sc_ctrl |= CTRL_RFCE;
6447 	}
6448 
6449 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
6450 		DPRINTF(WM_DEBUG_LINK,
6451 		    ("%s: LINK: statchg: FDX\n", device_xname(sc->sc_dev)));
6452 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
6453 	} else {
6454 		DPRINTF(WM_DEBUG_LINK,
6455 		    ("%s: LINK: statchg: HDX\n", device_xname(sc->sc_dev)));
6456 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
6457 	}
6458 
6459 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6460 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
6461 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
6462 						 : WMREG_FCRTL, sc->sc_fcrtl);
6463 	if (sc->sc_type == WM_T_80003) {
6464 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
6465 		case IFM_1000_T:
6466 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
6467 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
6468 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
6469 			break;
6470 		default:
6471 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
6472 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
6473 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
6474 			break;
6475 		}
6476 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
6477 	}
6478 }
6479 
6480 /*
6481  * wm_kmrn_readreg:
6482  *
6483  *	Read a kumeran register
6484  */
6485 static int
6486 wm_kmrn_readreg(struct wm_softc *sc, int reg)
6487 {
6488 	int rv;
6489 
6490 	if (sc->sc_flags == WM_F_SWFW_SYNC) {
6491 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
6492 			aprint_error_dev(sc->sc_dev,
6493 			    "%s: failed to get semaphore\n", __func__);
6494 			return 0;
6495 		}
6496 	} else if (sc->sc_flags == WM_F_SWFWHW_SYNC) {
6497 		if (wm_get_swfwhw_semaphore(sc)) {
6498 			aprint_error_dev(sc->sc_dev,
6499 			    "%s: failed to get semaphore\n", __func__);
6500 			return 0;
6501 		}
6502 	}
6503 
6504 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
6505 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
6506 	    KUMCTRLSTA_REN);
6507 	delay(2);
6508 
6509 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
6510 
6511 	if (sc->sc_flags == WM_F_SWFW_SYNC)
6512 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
6513 	else if (sc->sc_flags == WM_F_SWFWHW_SYNC)
6514 		wm_put_swfwhw_semaphore(sc);
6515 
6516 	return rv;
6517 }
6518 
6519 /*
6520  * wm_kmrn_writereg:
6521  *
6522  *	Write a kumeran register
6523  */
6524 static void
6525 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
6526 {
6527 
6528 	if (sc->sc_flags == WM_F_SWFW_SYNC) {
6529 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
6530 			aprint_error_dev(sc->sc_dev,
6531 			    "%s: failed to get semaphore\n", __func__);
6532 			return;
6533 		}
6534 	} else if (sc->sc_flags == WM_F_SWFWHW_SYNC) {
6535 		if (wm_get_swfwhw_semaphore(sc)) {
6536 			aprint_error_dev(sc->sc_dev,
6537 			    "%s: failed to get semaphore\n", __func__);
6538 			return;
6539 		}
6540 	}
6541 
6542 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
6543 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
6544 	    (val & KUMCTRLSTA_MASK));
6545 
6546 	if (sc->sc_flags == WM_F_SWFW_SYNC)
6547 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
6548 	else if (sc->sc_flags == WM_F_SWFWHW_SYNC)
6549 		wm_put_swfwhw_semaphore(sc);
6550 }
6551 
6552 static int
6553 wm_is_onboard_nvm_eeprom(struct wm_softc *sc)
6554 {
6555 	uint32_t eecd = 0;
6556 
6557 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
6558 	    || sc->sc_type == WM_T_82583) {
6559 		eecd = CSR_READ(sc, WMREG_EECD);
6560 
6561 		/* Isolate bits 15 & 16 */
6562 		eecd = ((eecd >> 15) & 0x03);
6563 
6564 		/* If both bits are set, device is Flash type */
6565 		if (eecd == 0x03)
6566 			return 0;
6567 	}
6568 	return 1;
6569 }
6570 
6571 static int
6572 wm_get_swsm_semaphore(struct wm_softc *sc)
6573 {
6574 	int32_t timeout;
6575 	uint32_t swsm;
6576 
6577 	/* Get the FW semaphore. */
6578 	timeout = 1000 + 1; /* XXX */
6579 	while (timeout) {
6580 		swsm = CSR_READ(sc, WMREG_SWSM);
6581 		swsm |= SWSM_SWESMBI;
6582 		CSR_WRITE(sc, WMREG_SWSM, swsm);
6583 		/* if we managed to set the bit we got the semaphore. */
6584 		swsm = CSR_READ(sc, WMREG_SWSM);
6585 		if (swsm & SWSM_SWESMBI)
6586 			break;
6587 
6588 		delay(50);
6589 		timeout--;
6590 	}
6591 
6592 	if (timeout == 0) {
6593 		aprint_error_dev(sc->sc_dev, "could not acquire EEPROM GNT\n");
6594 		/* Release semaphores */
6595 		wm_put_swsm_semaphore(sc);
6596 		return 1;
6597 	}
6598 	return 0;
6599 }
6600 
6601 static void
6602 wm_put_swsm_semaphore(struct wm_softc *sc)
6603 {
6604 	uint32_t swsm;
6605 
6606 	swsm = CSR_READ(sc, WMREG_SWSM);
6607 	swsm &= ~(SWSM_SWESMBI);
6608 	CSR_WRITE(sc, WMREG_SWSM, swsm);
6609 }
6610 
6611 static int
6612 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
6613 {
6614 	uint32_t swfw_sync;
6615 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
6616 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
6617 	int timeout = 200;
6618 
6619 	for (timeout = 0; timeout < 200; timeout++) {
6620 		if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
6621 			if (wm_get_swsm_semaphore(sc)) {
6622 				aprint_error_dev(sc->sc_dev,
6623 				    "%s: failed to get semaphore\n",
6624 				    __func__);
6625 				return 1;
6626 			}
6627 		}
6628 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
6629 		if ((swfw_sync & (swmask | fwmask)) == 0) {
6630 			swfw_sync |= swmask;
6631 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
6632 			if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
6633 				wm_put_swsm_semaphore(sc);
6634 			return 0;
6635 		}
6636 		if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
6637 			wm_put_swsm_semaphore(sc);
6638 		delay(5000);
6639 	}
6640 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
6641 	    device_xname(sc->sc_dev), mask, swfw_sync);
6642 	return 1;
6643 }
6644 
6645 static void
6646 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
6647 {
6648 	uint32_t swfw_sync;
6649 
6650 	if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
6651 		while (wm_get_swsm_semaphore(sc) != 0)
6652 			continue;
6653 	}
6654 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
6655 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
6656 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
6657 	if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
6658 		wm_put_swsm_semaphore(sc);
6659 }
6660 
6661 static int
6662 wm_get_swfwhw_semaphore(struct wm_softc *sc)
6663 {
6664 	uint32_t ext_ctrl;
6665 	int timeout = 200;
6666 
6667 	for (timeout = 0; timeout < 200; timeout++) {
6668 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
6669 		ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
6670 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
6671 
6672 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
6673 		if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
6674 			return 0;
6675 		delay(5000);
6676 	}
6677 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
6678 	    device_xname(sc->sc_dev), ext_ctrl);
6679 	return 1;
6680 }
6681 
6682 static void
6683 wm_put_swfwhw_semaphore(struct wm_softc *sc)
6684 {
6685 	uint32_t ext_ctrl;
6686 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
6687 	ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
6688 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
6689 }
6690 
6691 static int
6692 wm_valid_nvm_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
6693 {
6694 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
6695 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
6696 
6697 	if ((sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)) {
6698 		/* Value of bit 22 corresponds to the flash bank we're on. */
6699 		*bank = (CSR_READ(sc, WMREG_EECD) & EECD_SEC1VAL) ? 1 : 0;
6700 	} else {
6701 		uint8_t bank_high_byte;
6702 		wm_read_ich8_byte(sc, act_offset, &bank_high_byte);
6703 		if ((bank_high_byte & 0xc0) == 0x80)
6704 			*bank = 0;
6705 		else {
6706 			wm_read_ich8_byte(sc, act_offset + bank1_offset,
6707 			    &bank_high_byte);
6708 			if ((bank_high_byte & 0xc0) == 0x80)
6709 				*bank = 1;
6710 			else {
6711 				aprint_error_dev(sc->sc_dev,
6712 				    "EEPROM not present\n");
6713 				return -1;
6714 			}
6715 		}
6716 	}
6717 
6718 	return 0;
6719 }
6720 
6721 /******************************************************************************
6722  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
6723  * register.
6724  *
6725  * sc - Struct containing variables accessed by shared code
6726  * offset - offset of word in the EEPROM to read
6727  * data - word read from the EEPROM
6728  * words - number of words to read
6729  *****************************************************************************/
6730 static int
6731 wm_read_eeprom_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
6732 {
6733 	int32_t  error = 0;
6734 	uint32_t flash_bank = 0;
6735 	uint32_t act_offset = 0;
6736 	uint32_t bank_offset = 0;
6737 	uint16_t word = 0;
6738 	uint16_t i = 0;
6739 
6740 	/* We need to know which is the valid flash bank.  In the event
6741 	 * that we didn't allocate eeprom_shadow_ram, we may not be
6742 	 * managing flash_bank.  So it cannot be trusted and needs
6743 	 * to be updated with each read.
6744 	 */
6745 	error = wm_valid_nvm_bank_detect_ich8lan(sc, &flash_bank);
6746 	if (error) {
6747 		aprint_error_dev(sc->sc_dev, "%s: failed to detect NVM bank\n",
6748 		    __func__);
6749 		return error;
6750 	}
6751 
6752 	/* Adjust offset appropriately if we're on bank 1 - adjust for word size */
6753 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
6754 
6755 	error = wm_get_swfwhw_semaphore(sc);
6756 	if (error) {
6757 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6758 		    __func__);
6759 		return error;
6760 	}
6761 
6762 	for (i = 0; i < words; i++) {
6763 		/* The NVM part needs a byte offset, hence * 2 */
6764 		act_offset = bank_offset + ((offset + i) * 2);
6765 		error = wm_read_ich8_word(sc, act_offset, &word);
6766 		if (error) {
6767 			aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
6768 			    __func__);
6769 			break;
6770 		}
6771 		data[i] = word;
6772 	}
6773 
6774 	wm_put_swfwhw_semaphore(sc);
6775 	return error;
6776 }
6777 
6778 /******************************************************************************
6779  * This function does initial flash setup so that a new read/write/erase cycle
6780  * can be started.
6781  *
6782  * sc - The pointer to the hw structure
6783  ****************************************************************************/
6784 static int32_t
6785 wm_ich8_cycle_init(struct wm_softc *sc)
6786 {
6787 	uint16_t hsfsts;
6788 	int32_t error = 1;
6789 	int32_t i     = 0;
6790 
6791 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
6792 
6793 	/* May be check the Flash Des Valid bit in Hw status */
6794 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
6795 		return error;
6796 	}
6797 
6798 	/* Clear FCERR in Hw status by writing 1 */
6799 	/* Clear DAEL in Hw status by writing a 1 */
6800 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
6801 
6802 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
6803 
6804 	/*
6805 	 * Either we should have a hardware SPI cycle in progress bit to check
6806 	 * against, in order to start a new cycle or FDONE bit should be
6807 	 * changed in the hardware so that it is 1 after harware reset, which
6808 	 * can then be used as an indication whether a cycle is in progress or
6809 	 * has been completed .. we should also have some software semaphore
6810 	 * mechanism to guard FDONE or the cycle in progress bit so that two
6811 	 * threads access to those bits can be sequentiallized or a way so that
6812 	 * 2 threads dont start the cycle at the same time
6813 	 */
6814 
6815 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
6816 		/*
6817 		 * There is no cycle running at present, so we can start a
6818 		 * cycle
6819 		 */
6820 
6821 		/* Begin by setting Flash Cycle Done. */
6822 		hsfsts |= HSFSTS_DONE;
6823 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
6824 		error = 0;
6825 	} else {
6826 		/*
6827 		 * otherwise poll for sometime so the current cycle has a
6828 		 * chance to end before giving up.
6829 		 */
6830 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
6831 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
6832 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
6833 				error = 0;
6834 				break;
6835 			}
6836 			delay(1);
6837 		}
6838 		if (error == 0) {
6839 			/*
6840 			 * Successful in waiting for previous cycle to timeout,
6841 			 * now set the Flash Cycle Done.
6842 			 */
6843 			hsfsts |= HSFSTS_DONE;
6844 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
6845 		}
6846 	}
6847 	return error;
6848 }
6849 
6850 /******************************************************************************
6851  * This function starts a flash cycle and waits for its completion
6852  *
6853  * sc - The pointer to the hw structure
6854  ****************************************************************************/
6855 static int32_t
6856 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
6857 {
6858 	uint16_t hsflctl;
6859 	uint16_t hsfsts;
6860 	int32_t error = 1;
6861 	uint32_t i = 0;
6862 
6863 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
6864 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
6865 	hsflctl |= HSFCTL_GO;
6866 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
6867 
6868 	/* wait till FDONE bit is set to 1 */
6869 	do {
6870 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
6871 		if (hsfsts & HSFSTS_DONE)
6872 			break;
6873 		delay(1);
6874 		i++;
6875 	} while (i < timeout);
6876 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
6877 		error = 0;
6878 
6879 	return error;
6880 }
6881 
6882 /******************************************************************************
6883  * Reads a byte or word from the NVM using the ICH8 flash access registers.
6884  *
6885  * sc - The pointer to the hw structure
6886  * index - The index of the byte or word to read.
6887  * size - Size of data to read, 1=byte 2=word
6888  * data - Pointer to the word to store the value read.
6889  *****************************************************************************/
6890 static int32_t
6891 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
6892     uint32_t size, uint16_t* data)
6893 {
6894 	uint16_t hsfsts;
6895 	uint16_t hsflctl;
6896 	uint32_t flash_linear_address;
6897 	uint32_t flash_data = 0;
6898 	int32_t error = 1;
6899 	int32_t count = 0;
6900 
6901 	if (size < 1  || size > 2 || data == 0x0 ||
6902 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
6903 		return error;
6904 
6905 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
6906 	    sc->sc_ich8_flash_base;
6907 
6908 	do {
6909 		delay(1);
6910 		/* Steps */
6911 		error = wm_ich8_cycle_init(sc);
6912 		if (error)
6913 			break;
6914 
6915 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
6916 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
6917 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
6918 		    & HSFCTL_BCOUNT_MASK;
6919 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
6920 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
6921 
6922 		/*
6923 		 * Write the last 24 bits of index into Flash Linear address
6924 		 * field in Flash Address
6925 		 */
6926 		/* TODO: TBD maybe check the index against the size of flash */
6927 
6928 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
6929 
6930 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
6931 
6932 		/*
6933 		 * Check if FCERR is set to 1, if set to 1, clear it and try
6934 		 * the whole sequence a few more times, else read in (shift in)
6935 		 * the Flash Data0, the order is least significant byte first
6936 		 * msb to lsb
6937 		 */
6938 		if (error == 0) {
6939 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
6940 			if (size == 1)
6941 				*data = (uint8_t)(flash_data & 0x000000FF);
6942 			else if (size == 2)
6943 				*data = (uint16_t)(flash_data & 0x0000FFFF);
6944 			break;
6945 		} else {
6946 			/*
6947 			 * If we've gotten here, then things are probably
6948 			 * completely hosed, but if the error condition is
6949 			 * detected, it won't hurt to give it another try...
6950 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
6951 			 */
6952 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
6953 			if (hsfsts & HSFSTS_ERR) {
6954 				/* Repeat for some time before giving up. */
6955 				continue;
6956 			} else if ((hsfsts & HSFSTS_DONE) == 0)
6957 				break;
6958 		}
6959 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
6960 
6961 	return error;
6962 }
6963 
6964 /******************************************************************************
6965  * Reads a single byte from the NVM using the ICH8 flash access registers.
6966  *
6967  * sc - pointer to wm_hw structure
6968  * index - The index of the byte to read.
6969  * data - Pointer to a byte to store the value read.
6970  *****************************************************************************/
6971 static int32_t
6972 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
6973 {
6974 	int32_t status;
6975 	uint16_t word = 0;
6976 
6977 	status = wm_read_ich8_data(sc, index, 1, &word);
6978 	if (status == 0)
6979 		*data = (uint8_t)word;
6980 	else
6981 		*data = 0;
6982 
6983 	return status;
6984 }
6985 
6986 /******************************************************************************
6987  * Reads a word from the NVM using the ICH8 flash access registers.
6988  *
6989  * sc - pointer to wm_hw structure
6990  * index - The starting byte index of the word to read.
6991  * data - Pointer to a word to store the value read.
6992  *****************************************************************************/
6993 static int32_t
6994 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
6995 {
6996 	int32_t status;
6997 
6998 	status = wm_read_ich8_data(sc, index, 2, data);
6999 	return status;
7000 }
7001 
7002 static int
7003 wm_check_mng_mode(struct wm_softc *sc)
7004 {
7005 	int rv;
7006 
7007 	switch (sc->sc_type) {
7008 	case WM_T_ICH8:
7009 	case WM_T_ICH9:
7010 	case WM_T_ICH10:
7011 	case WM_T_PCH:
7012 	case WM_T_PCH2:
7013 		rv = wm_check_mng_mode_ich8lan(sc);
7014 		break;
7015 	case WM_T_82574:
7016 	case WM_T_82583:
7017 		rv = wm_check_mng_mode_82574(sc);
7018 		break;
7019 	case WM_T_82571:
7020 	case WM_T_82572:
7021 	case WM_T_82573:
7022 	case WM_T_80003:
7023 		rv = wm_check_mng_mode_generic(sc);
7024 		break;
7025 	default:
7026 		/* noting to do */
7027 		rv = 0;
7028 		break;
7029 	}
7030 
7031 	return rv;
7032 }
7033 
7034 static int
7035 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
7036 {
7037 	uint32_t fwsm;
7038 
7039 	fwsm = CSR_READ(sc, WMREG_FWSM);
7040 
7041 	if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
7042 		return 1;
7043 
7044 	return 0;
7045 }
7046 
7047 static int
7048 wm_check_mng_mode_82574(struct wm_softc *sc)
7049 {
7050 	uint16_t data;
7051 
7052 	wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &data);
7053 
7054 	if ((data & EEPROM_CFG2_MNGM_MASK) != 0)
7055 		return 1;
7056 
7057 	return 0;
7058 }
7059 
7060 static int
7061 wm_check_mng_mode_generic(struct wm_softc *sc)
7062 {
7063 	uint32_t fwsm;
7064 
7065 	fwsm = CSR_READ(sc, WMREG_FWSM);
7066 
7067 	if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
7068 		return 1;
7069 
7070 	return 0;
7071 }
7072 
7073 static int
7074 wm_enable_mng_pass_thru(struct wm_softc *sc)
7075 {
7076 	uint32_t manc, fwsm, factps;
7077 
7078 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
7079 		return 0;
7080 
7081 	manc = CSR_READ(sc, WMREG_MANC);
7082 
7083 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
7084 		device_xname(sc->sc_dev), manc));
7085 	if (((manc & MANC_RECV_TCO_EN) == 0)
7086 	    || ((manc & MANC_EN_MAC_ADDR_FILTER) == 0))
7087 		return 0;
7088 
7089 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
7090 		fwsm = CSR_READ(sc, WMREG_FWSM);
7091 		factps = CSR_READ(sc, WMREG_FACTPS);
7092 		if (((factps & FACTPS_MNGCG) == 0)
7093 		    && ((fwsm & FWSM_MODE_MASK)
7094 			== (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT)))
7095 			return 1;
7096 	} else if (((manc & MANC_SMBUS_EN) != 0)
7097 	    && ((manc & MANC_ASF_EN) == 0))
7098 		return 1;
7099 
7100 	return 0;
7101 }
7102 
7103 static int
7104 wm_check_reset_block(struct wm_softc *sc)
7105 {
7106 	uint32_t reg;
7107 
7108 	switch (sc->sc_type) {
7109 	case WM_T_ICH8:
7110 	case WM_T_ICH9:
7111 	case WM_T_ICH10:
7112 	case WM_T_PCH:
7113 	case WM_T_PCH2:
7114 		reg = CSR_READ(sc, WMREG_FWSM);
7115 		if ((reg & FWSM_RSPCIPHY) != 0)
7116 			return 0;
7117 		else
7118 			return -1;
7119 		break;
7120 	case WM_T_82571:
7121 	case WM_T_82572:
7122 	case WM_T_82573:
7123 	case WM_T_82574:
7124 	case WM_T_82583:
7125 	case WM_T_80003:
7126 		reg = CSR_READ(sc, WMREG_MANC);
7127 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
7128 			return -1;
7129 		else
7130 			return 0;
7131 		break;
7132 	default:
7133 		/* no problem */
7134 		break;
7135 	}
7136 
7137 	return 0;
7138 }
7139 
7140 static void
7141 wm_get_hw_control(struct wm_softc *sc)
7142 {
7143 	uint32_t reg;
7144 
7145 	switch (sc->sc_type) {
7146 	case WM_T_82573:
7147 		reg = CSR_READ(sc, WMREG_SWSM);
7148 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
7149 		break;
7150 	case WM_T_82571:
7151 	case WM_T_82572:
7152 	case WM_T_82574:
7153 	case WM_T_82583:
7154 	case WM_T_80003:
7155 	case WM_T_ICH8:
7156 	case WM_T_ICH9:
7157 	case WM_T_ICH10:
7158 	case WM_T_PCH:
7159 	case WM_T_PCH2:
7160 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
7161 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
7162 		break;
7163 	default:
7164 		break;
7165 	}
7166 }
7167 
7168 static void
7169 wm_release_hw_control(struct wm_softc *sc)
7170 {
7171 	uint32_t reg;
7172 
7173 	if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
7174 		return;
7175 
7176 	if (sc->sc_type == WM_T_82573) {
7177 		reg = CSR_READ(sc, WMREG_SWSM);
7178 		reg &= ~SWSM_DRV_LOAD;
7179 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
7180 	} else {
7181 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
7182 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
7183 	}
7184 }
7185 
7186 /* XXX Currently TBI only */
7187 static int
7188 wm_check_for_link(struct wm_softc *sc)
7189 {
7190 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7191 	uint32_t rxcw;
7192 	uint32_t ctrl;
7193 	uint32_t status;
7194 	uint32_t sig;
7195 
7196 	rxcw = CSR_READ(sc, WMREG_RXCW);
7197 	ctrl = CSR_READ(sc, WMREG_CTRL);
7198 	status = CSR_READ(sc, WMREG_STATUS);
7199 
7200 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
7201 
7202 	DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
7203 		device_xname(sc->sc_dev), __func__,
7204 		((ctrl & CTRL_SWDPIN(1)) == sig),
7205 		((status & STATUS_LU) != 0),
7206 		((rxcw & RXCW_C) != 0)
7207 		    ));
7208 
7209 	/*
7210 	 * SWDPIN   LU RXCW
7211 	 *      0    0    0
7212 	 *      0    0    1	(should not happen)
7213 	 *      0    1    0	(should not happen)
7214 	 *      0    1    1	(should not happen)
7215 	 *      1    0    0	Disable autonego and force linkup
7216 	 *      1    0    1	got /C/ but not linkup yet
7217 	 *      1    1    0	(linkup)
7218 	 *      1    1    1	If IFM_AUTO, back to autonego
7219 	 *
7220 	 */
7221 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
7222 	    && ((status & STATUS_LU) == 0)
7223 	    && ((rxcw & RXCW_C) == 0)) {
7224 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
7225 			__func__));
7226 		sc->sc_tbi_linkup = 0;
7227 		/* Disable auto-negotiation in the TXCW register */
7228 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
7229 
7230 		/*
7231 		 * Force link-up and also force full-duplex.
7232 		 *
7233 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
7234 		 * so we should update sc->sc_ctrl
7235 		 */
7236 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
7237 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7238 	} else if (((status & STATUS_LU) != 0)
7239 	    && ((rxcw & RXCW_C) != 0)
7240 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
7241 		sc->sc_tbi_linkup = 1;
7242 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
7243 			__func__));
7244 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
7245 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
7246 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
7247 	    && ((rxcw & RXCW_C) != 0)) {
7248 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
7249 	} else {
7250 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
7251 			status));
7252 	}
7253 
7254 	return 0;
7255 }
7256 
7257 /* Work-around for 82566 Kumeran PCS lock loss */
7258 static void
7259 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
7260 {
7261 	int miistatus, active, i;
7262 	int reg;
7263 
7264 	miistatus = sc->sc_mii.mii_media_status;
7265 
7266 	/* If the link is not up, do nothing */
7267 	if ((miistatus & IFM_ACTIVE) != 0)
7268 		return;
7269 
7270 	active = sc->sc_mii.mii_media_active;
7271 
7272 	/* Nothing to do if the link is other than 1Gbps */
7273 	if (IFM_SUBTYPE(active) != IFM_1000_T)
7274 		return;
7275 
7276 	for (i = 0; i < 10; i++) {
7277 		/* read twice */
7278 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
7279 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
7280 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) != 0)
7281 			goto out;	/* GOOD! */
7282 
7283 		/* Reset the PHY */
7284 		wm_gmii_reset(sc);
7285 		delay(5*1000);
7286 	}
7287 
7288 	/* Disable GigE link negotiation */
7289 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
7290 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
7291 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
7292 
7293 	/*
7294 	 * Call gig speed drop workaround on Gig disable before accessing
7295 	 * any PHY registers.
7296 	 */
7297 	wm_gig_downshift_workaround_ich8lan(sc);
7298 
7299 out:
7300 	return;
7301 }
7302 
7303 /* WOL from S5 stops working */
7304 static void
7305 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
7306 {
7307 	uint16_t kmrn_reg;
7308 
7309 	/* Only for igp3 */
7310 	if (sc->sc_phytype == WMPHY_IGP_3) {
7311 		kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
7312 		kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
7313 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
7314 		kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
7315 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
7316 	}
7317 }
7318 
7319 #ifdef WM_WOL
7320 /* Power down workaround on D3 */
7321 static void
7322 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
7323 {
7324 	uint32_t reg;
7325 	int i;
7326 
7327 	for (i = 0; i < 2; i++) {
7328 		/* Disable link */
7329 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
7330 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
7331 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
7332 
7333 		/*
7334 		 * Call gig speed drop workaround on Gig disable before
7335 		 * accessing any PHY registers
7336 		 */
7337 		if (sc->sc_type == WM_T_ICH8)
7338 			wm_gig_downshift_workaround_ich8lan(sc);
7339 
7340 		/* Write VR power-down enable */
7341 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
7342 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
7343 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
7344 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
7345 
7346 		/* Read it back and test */
7347 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
7348 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
7349 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
7350 			break;
7351 
7352 		/* Issue PHY reset and repeat at most one more time */
7353 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
7354 	}
7355 }
7356 #endif /* WM_WOL */
7357 
7358 /*
7359  * Workaround for pch's PHYs
7360  * XXX should be moved to new PHY driver?
7361  */
7362 static void
7363 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
7364 {
7365 	if (sc->sc_phytype == WMPHY_82577)
7366 		wm_set_mdio_slow_mode_hv(sc);
7367 
7368 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
7369 
7370 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
7371 
7372 	/* 82578 */
7373 	if (sc->sc_phytype == WMPHY_82578) {
7374 		/* PCH rev. < 3 */
7375 		if (sc->sc_rev < 3) {
7376 			/* XXX 6 bit shift? Why? Is it page2? */
7377 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
7378 			    0x66c0);
7379 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
7380 			    0xffff);
7381 		}
7382 
7383 		/* XXX phy rev. < 2 */
7384 	}
7385 
7386 	/* Select page 0 */
7387 
7388 	/* XXX acquire semaphore */
7389 	wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
7390 	/* XXX release semaphore */
7391 
7392 	/*
7393 	 * Configure the K1 Si workaround during phy reset assuming there is
7394 	 * link so that it disables K1 if link is in 1Gbps.
7395 	 */
7396 	wm_k1_gig_workaround_hv(sc, 1);
7397 }
7398 
7399 static void
7400 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
7401 {
7402 
7403 	wm_set_mdio_slow_mode_hv(sc);
7404 }
7405 
7406 static void
7407 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
7408 {
7409 	int k1_enable = sc->sc_nvm_k1_enabled;
7410 
7411 	/* XXX acquire semaphore */
7412 
7413 	if (link) {
7414 		k1_enable = 0;
7415 
7416 		/* Link stall fix for link up */
7417 		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
7418 	} else {
7419 		/* Link stall fix for link down */
7420 		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
7421 	}
7422 
7423 	wm_configure_k1_ich8lan(sc, k1_enable);
7424 
7425 	/* XXX release semaphore */
7426 }
7427 
7428 static void
7429 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
7430 {
7431 	uint32_t reg;
7432 
7433 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
7434 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
7435 	    reg | HV_KMRN_MDIO_SLOW);
7436 }
7437 
7438 static void
7439 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
7440 {
7441 	uint32_t ctrl, ctrl_ext, tmp;
7442 	uint16_t kmrn_reg;
7443 
7444 	kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
7445 
7446 	if (k1_enable)
7447 		kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
7448 	else
7449 		kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
7450 
7451 	wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
7452 
7453 	delay(20);
7454 
7455 	ctrl = CSR_READ(sc, WMREG_CTRL);
7456 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
7457 
7458 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
7459 	tmp |= CTRL_FRCSPD;
7460 
7461 	CSR_WRITE(sc, WMREG_CTRL, tmp);
7462 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
7463 	delay(20);
7464 
7465 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
7466 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
7467 	delay(20);
7468 }
7469 
7470 static void
7471 wm_smbustopci(struct wm_softc *sc)
7472 {
7473 	uint32_t fwsm;
7474 
7475 	fwsm = CSR_READ(sc, WMREG_FWSM);
7476 	if (((fwsm & FWSM_FW_VALID) == 0)
7477 	    && ((wm_check_reset_block(sc) == 0))) {
7478 		sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
7479 		sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
7480 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7481 		delay(10);
7482 		sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
7483 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7484 		delay(50*1000);
7485 
7486 		/*
7487 		 * Gate automatic PHY configuration by hardware on non-managed
7488 		 * 82579
7489 		 */
7490 		if (sc->sc_type == WM_T_PCH2)
7491 			wm_gate_hw_phy_config_ich8lan(sc, 1);
7492 	}
7493 }
7494 
7495 static void
7496 wm_set_pcie_completion_timeout(struct wm_softc *sc)
7497 {
7498 	uint32_t gcr;
7499 	pcireg_t ctrl2;
7500 
7501 	gcr = CSR_READ(sc, WMREG_GCR);
7502 
7503 	/* Only take action if timeout value is defaulted to 0 */
7504 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
7505 		goto out;
7506 
7507 	if ((gcr & GCR_CAP_VER2) == 0) {
7508 		gcr |= GCR_CMPL_TMOUT_10MS;
7509 		goto out;
7510 	}
7511 
7512 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
7513 	    sc->sc_pcixe_capoff + PCI_PCIE_DCSR2);
7514 	ctrl2 |= WM_PCI_PCIE_DCSR2_16MS;
7515 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
7516 	    sc->sc_pcixe_capoff + PCI_PCIE_DCSR2, ctrl2);
7517 
7518 out:
7519 	/* Disable completion timeout resend */
7520 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
7521 
7522 	CSR_WRITE(sc, WMREG_GCR, gcr);
7523 }
7524 
7525 /* special case - for 82575 - need to do manual init ... */
7526 static void
7527 wm_reset_init_script_82575(struct wm_softc *sc)
7528 {
7529 	/*
7530 	 * remark: this is untested code - we have no board without EEPROM
7531 	 *  same setup as mentioned int the freeBSD driver for the i82575
7532 	 */
7533 
7534 	/* SerDes configuration via SERDESCTRL */
7535 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
7536 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
7537 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
7538 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
7539 
7540 	/* CCM configuration via CCMCTL register */
7541 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
7542 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
7543 
7544 	/* PCIe lanes configuration */
7545 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
7546 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
7547 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
7548 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
7549 
7550 	/* PCIe PLL Configuration */
7551 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
7552 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
7553 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
7554 }
7555 
7556 static void
7557 wm_init_manageability(struct wm_softc *sc)
7558 {
7559 
7560 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
7561 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
7562 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
7563 
7564 		/* disabl hardware interception of ARP */
7565 		manc &= ~MANC_ARP_EN;
7566 
7567 		/* enable receiving management packets to the host */
7568 		if (sc->sc_type >= WM_T_82571) {
7569 			manc |= MANC_EN_MNG2HOST;
7570 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
7571 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
7572 
7573 		}
7574 
7575 		CSR_WRITE(sc, WMREG_MANC, manc);
7576 	}
7577 }
7578 
7579 static void
7580 wm_release_manageability(struct wm_softc *sc)
7581 {
7582 
7583 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
7584 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
7585 
7586 		if (sc->sc_type >= WM_T_82571)
7587 			manc &= ~MANC_EN_MNG2HOST;
7588 
7589 		CSR_WRITE(sc, WMREG_MANC, manc);
7590 	}
7591 }
7592 
7593 static void
7594 wm_get_wakeup(struct wm_softc *sc)
7595 {
7596 
7597 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
7598 	switch (sc->sc_type) {
7599 	case WM_T_82573:
7600 	case WM_T_82583:
7601 		sc->sc_flags |= WM_F_HAS_AMT;
7602 		/* FALLTHROUGH */
7603 	case WM_T_80003:
7604 	case WM_T_82541:
7605 	case WM_T_82547:
7606 	case WM_T_82571:
7607 	case WM_T_82572:
7608 	case WM_T_82574:
7609 	case WM_T_82575:
7610 	case WM_T_82576:
7611 #if 0 /* XXX */
7612 	case WM_T_82580:
7613 	case WM_T_82580ER:
7614 #endif
7615 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE_MASK) != 0)
7616 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
7617 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
7618 		break;
7619 	case WM_T_ICH8:
7620 	case WM_T_ICH9:
7621 	case WM_T_ICH10:
7622 	case WM_T_PCH:
7623 	case WM_T_PCH2:
7624 		sc->sc_flags |= WM_F_HAS_AMT;
7625 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
7626 		break;
7627 	default:
7628 		break;
7629 	}
7630 
7631 	/* 1: HAS_MANAGE */
7632 	if (wm_enable_mng_pass_thru(sc) != 0)
7633 		sc->sc_flags |= WM_F_HAS_MANAGE;
7634 
7635 #ifdef WM_DEBUG
7636 	printf("\n");
7637 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
7638 		printf("HAS_AMT,");
7639 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
7640 		printf("ARC_SUBSYS_VALID,");
7641 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
7642 		printf("ASF_FIRMWARE_PRES,");
7643 	if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
7644 		printf("HAS_MANAGE,");
7645 	printf("\n");
7646 #endif
7647 	/*
7648 	 * Note that the WOL flags is set after the resetting of the eeprom
7649 	 * stuff
7650 	 */
7651 }
7652 
7653 #ifdef WM_WOL
7654 /* WOL in the newer chipset interfaces (pchlan) */
7655 static void
7656 wm_enable_phy_wakeup(struct wm_softc *sc)
7657 {
7658 #if 0
7659 	uint16_t preg;
7660 
7661 	/* Copy MAC RARs to PHY RARs */
7662 
7663 	/* Copy MAC MTA to PHY MTA */
7664 
7665 	/* Configure PHY Rx Control register */
7666 
7667 	/* Enable PHY wakeup in MAC register */
7668 
7669 	/* Configure and enable PHY wakeup in PHY registers */
7670 
7671 	/* Activate PHY wakeup */
7672 
7673 	/* XXX */
7674 #endif
7675 }
7676 
7677 static void
7678 wm_enable_wakeup(struct wm_softc *sc)
7679 {
7680 	uint32_t reg, pmreg;
7681 	pcireg_t pmode;
7682 
7683 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
7684 		&pmreg, NULL) == 0)
7685 		return;
7686 
7687 	/* Advertise the wakeup capability */
7688 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
7689 	    | CTRL_SWDPIN(3));
7690 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
7691 
7692 	/* ICH workaround */
7693 	switch (sc->sc_type) {
7694 	case WM_T_ICH8:
7695 	case WM_T_ICH9:
7696 	case WM_T_ICH10:
7697 	case WM_T_PCH:
7698 	case WM_T_PCH2:
7699 		/* Disable gig during WOL */
7700 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
7701 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
7702 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
7703 		if (sc->sc_type == WM_T_PCH)
7704 			wm_gmii_reset(sc);
7705 
7706 		/* Power down workaround */
7707 		if (sc->sc_phytype == WMPHY_82577) {
7708 			struct mii_softc *child;
7709 
7710 			/* Assume that the PHY is copper */
7711 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
7712 			if (child->mii_mpd_rev <= 2)
7713 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
7714 				    (768 << 5) | 25, 0x0444); /* magic num */
7715 		}
7716 		break;
7717 	default:
7718 		break;
7719 	}
7720 
7721 	/* Keep the laser running on fiber adapters */
7722 	if (((sc->sc_wmp->wmp_flags & WMP_F_1000X) != 0)
7723 	    || (sc->sc_wmp->wmp_flags & WMP_F_SERDES) != 0) {
7724 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
7725 		reg |= CTRL_EXT_SWDPIN(3);
7726 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
7727 	}
7728 
7729 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
7730 #if 0	/* for the multicast packet */
7731 	reg |= WUFC_MC;
7732 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
7733 #endif
7734 
7735 	if (sc->sc_type == WM_T_PCH) {
7736 		wm_enable_phy_wakeup(sc);
7737 	} else {
7738 		CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
7739 		CSR_WRITE(sc, WMREG_WUFC, reg);
7740 	}
7741 
7742 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
7743 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
7744 		|| (sc->sc_type == WM_T_PCH2))
7745 		    && (sc->sc_phytype == WMPHY_IGP_3))
7746 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
7747 
7748 	/* Request PME */
7749 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
7750 #if 0
7751 	/* Disable WOL */
7752 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
7753 #else
7754 	/* For WOL */
7755 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
7756 #endif
7757 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
7758 }
7759 #endif /* WM_WOL */
7760 
7761 static bool
7762 wm_suspend(device_t self, const pmf_qual_t *qual)
7763 {
7764 	struct wm_softc *sc = device_private(self);
7765 
7766 	wm_release_manageability(sc);
7767 	wm_release_hw_control(sc);
7768 #ifdef WM_WOL
7769 	wm_enable_wakeup(sc);
7770 #endif
7771 
7772 	return true;
7773 }
7774 
7775 static bool
7776 wm_resume(device_t self, const pmf_qual_t *qual)
7777 {
7778 	struct wm_softc *sc = device_private(self);
7779 
7780 	wm_init_manageability(sc);
7781 
7782 	return true;
7783 }
7784