xref: /netbsd-src/sys/dev/pci/if_wm.c (revision daf6c4152fcddc27c445489775ed1f66ab4ea9a9)
1 /*	$NetBSD: if_wm.c,v 1.219 2011/02/06 16:23:00 bouyer Exp $	*/
2 
3 /*
4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed for the NetBSD Project by
20  *	Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 /*******************************************************************************
39 
40   Copyright (c) 2001-2005, Intel Corporation
41   All rights reserved.
42 
43   Redistribution and use in source and binary forms, with or without
44   modification, are permitted provided that the following conditions are met:
45 
46    1. Redistributions of source code must retain the above copyright notice,
47       this list of conditions and the following disclaimer.
48 
49    2. Redistributions in binary form must reproduce the above copyright
50       notice, this list of conditions and the following disclaimer in the
51       documentation and/or other materials provided with the distribution.
52 
53    3. Neither the name of the Intel Corporation nor the names of its
54       contributors may be used to endorse or promote products derived from
55       this software without specific prior written permission.
56 
57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67   POSSIBILITY OF SUCH DAMAGE.
68 
69 *******************************************************************************/
70 /*
71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72  *
73  * TODO (in order of importance):
74  *
75  *	- Rework how parameters are loaded from the EEPROM.
76  */
77 
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.219 2011/02/06 16:23:00 bouyer Exp $");
80 
81 #include "rnd.h"
82 
83 #include <sys/param.h>
84 #include <sys/systm.h>
85 #include <sys/callout.h>
86 #include <sys/mbuf.h>
87 #include <sys/malloc.h>
88 #include <sys/kernel.h>
89 #include <sys/socket.h>
90 #include <sys/ioctl.h>
91 #include <sys/errno.h>
92 #include <sys/device.h>
93 #include <sys/queue.h>
94 #include <sys/syslog.h>
95 
96 #if NRND > 0
97 #include <sys/rnd.h>
98 #endif
99 
100 #include <net/if.h>
101 #include <net/if_dl.h>
102 #include <net/if_media.h>
103 #include <net/if_ether.h>
104 
105 #include <net/bpf.h>
106 
107 #include <netinet/in.h>			/* XXX for struct ip */
108 #include <netinet/in_systm.h>		/* XXX for struct ip */
109 #include <netinet/ip.h>			/* XXX for struct ip */
110 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
111 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
112 
113 #include <sys/bus.h>
114 #include <sys/intr.h>
115 #include <machine/endian.h>
116 
117 #include <dev/mii/mii.h>
118 #include <dev/mii/miivar.h>
119 #include <dev/mii/miidevs.h>
120 #include <dev/mii/mii_bitbang.h>
121 #include <dev/mii/ikphyreg.h>
122 #include <dev/mii/igphyreg.h>
123 #include <dev/mii/igphyvar.h>
124 #include <dev/mii/inbmphyreg.h>
125 
126 #include <dev/pci/pcireg.h>
127 #include <dev/pci/pcivar.h>
128 #include <dev/pci/pcidevs.h>
129 
130 #include <dev/pci/if_wmreg.h>
131 #include <dev/pci/if_wmvar.h>
132 
133 #ifdef WM_DEBUG
134 #define	WM_DEBUG_LINK		0x01
135 #define	WM_DEBUG_TX		0x02
136 #define	WM_DEBUG_RX		0x04
137 #define	WM_DEBUG_GMII		0x08
138 #define	WM_DEBUG_MANAGE		0x10
139 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
140     | WM_DEBUG_MANAGE;
141 
142 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
143 #else
144 #define	DPRINTF(x, y)	/* nothing */
145 #endif /* WM_DEBUG */
146 
147 /*
148  * Transmit descriptor list size.  Due to errata, we can only have
149  * 256 hardware descriptors in the ring on < 82544, but we use 4096
150  * on >= 82544.  We tell the upper layers that they can queue a lot
151  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
152  * of them at a time.
153  *
154  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
155  * chains containing many small mbufs have been observed in zero-copy
156  * situations with jumbo frames.
157  */
158 #define	WM_NTXSEGS		256
159 #define	WM_IFQUEUELEN		256
160 #define	WM_TXQUEUELEN_MAX	64
161 #define	WM_TXQUEUELEN_MAX_82547	16
162 #define	WM_TXQUEUELEN(sc)	((sc)->sc_txnum)
163 #define	WM_TXQUEUELEN_MASK(sc)	(WM_TXQUEUELEN(sc) - 1)
164 #define	WM_TXQUEUE_GC(sc)	(WM_TXQUEUELEN(sc) / 8)
165 #define	WM_NTXDESC_82542	256
166 #define	WM_NTXDESC_82544	4096
167 #define	WM_NTXDESC(sc)		((sc)->sc_ntxdesc)
168 #define	WM_NTXDESC_MASK(sc)	(WM_NTXDESC(sc) - 1)
169 #define	WM_TXDESCSIZE(sc)	(WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
170 #define	WM_NEXTTX(sc, x)	(((x) + 1) & WM_NTXDESC_MASK(sc))
171 #define	WM_NEXTTXS(sc, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(sc))
172 
173 #define	WM_MAXTXDMA		round_page(IP_MAXPACKET) /* for TSO */
174 
175 /*
176  * Receive descriptor list size.  We have one Rx buffer for normal
177  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
178  * packet.  We allocate 256 receive descriptors, each with a 2k
179  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
180  */
181 #define	WM_NRXDESC		256
182 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
183 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
184 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
185 
186 /*
187  * Control structures are DMA'd to the i82542 chip.  We allocate them in
188  * a single clump that maps to a single DMA segment to make several things
189  * easier.
190  */
191 struct wm_control_data_82544 {
192 	/*
193 	 * The receive descriptors.
194 	 */
195 	wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
196 
197 	/*
198 	 * The transmit descriptors.  Put these at the end, because
199 	 * we might use a smaller number of them.
200 	 */
201 	wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82544];
202 };
203 
204 struct wm_control_data_82542 {
205 	wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
206 	wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
207 };
208 
209 #define	WM_CDOFF(x)	offsetof(struct wm_control_data_82544, x)
210 #define	WM_CDTXOFF(x)	WM_CDOFF(wcd_txdescs[(x)])
211 #define	WM_CDRXOFF(x)	WM_CDOFF(wcd_rxdescs[(x)])
212 
213 /*
214  * Software state for transmit jobs.
215  */
216 struct wm_txsoft {
217 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
218 	bus_dmamap_t txs_dmamap;	/* our DMA map */
219 	int txs_firstdesc;		/* first descriptor in packet */
220 	int txs_lastdesc;		/* last descriptor in packet */
221 	int txs_ndesc;			/* # of descriptors used */
222 };
223 
224 /*
225  * Software state for receive buffers.  Each descriptor gets a
226  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
227  * more than one buffer, we chain them together.
228  */
229 struct wm_rxsoft {
230 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
231 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
232 };
233 
234 #define WM_LINKUP_TIMEOUT	50
235 
236 static uint16_t swfwphysem[] = {
237 	SWFW_PHY0_SM,
238 	SWFW_PHY1_SM,
239 	SWFW_PHY2_SM,
240 	SWFW_PHY3_SM
241 };
242 
243 /*
244  * Software state per device.
245  */
246 struct wm_softc {
247 	device_t sc_dev;		/* generic device information */
248 	bus_space_tag_t sc_st;		/* bus space tag */
249 	bus_space_handle_t sc_sh;	/* bus space handle */
250 	bus_size_t sc_ss;		/* bus space size */
251 	bus_space_tag_t sc_iot;		/* I/O space tag */
252 	bus_space_handle_t sc_ioh;	/* I/O space handle */
253 	bus_size_t sc_ios;		/* I/O space size */
254 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
255 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
256 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
257 
258 	struct ethercom sc_ethercom;	/* ethernet common data */
259 	struct mii_data sc_mii;		/* MII/media information */
260 
261 	pci_chipset_tag_t sc_pc;
262 	pcitag_t sc_pcitag;
263 	int sc_bus_speed;		/* PCI/PCIX bus speed */
264 	int sc_pcixe_capoff;		/* PCI[Xe] capability register offset */
265 
266 	const struct wm_product *sc_wmp; /* Pointer to the wm_product entry */
267 	wm_chip_type sc_type;		/* MAC type */
268 	int sc_rev;			/* MAC revision */
269 	wm_phy_type sc_phytype;		/* PHY type */
270 	int sc_funcid;			/* unit number of the chip (0 to 3) */
271 	int sc_flags;			/* flags; see below */
272 	int sc_if_flags;		/* last if_flags */
273 	int sc_flowflags;		/* 802.3x flow control flags */
274 	int sc_align_tweak;
275 
276 	void *sc_ih;			/* interrupt cookie */
277 	callout_t sc_tick_ch;		/* tick callout */
278 
279 	int sc_ee_addrbits;		/* EEPROM address bits */
280 	int sc_ich8_flash_base;
281 	int sc_ich8_flash_bank_size;
282 	int sc_nvm_k1_enabled;
283 
284 	/*
285 	 * Software state for the transmit and receive descriptors.
286 	 */
287 	int sc_txnum;			/* must be a power of two */
288 	struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX];
289 	struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
290 
291 	/*
292 	 * Control data structures.
293 	 */
294 	int sc_ntxdesc;			/* must be a power of two */
295 	struct wm_control_data_82544 *sc_control_data;
296 	bus_dmamap_t sc_cddmamap;	/* control data DMA map */
297 	bus_dma_segment_t sc_cd_seg;	/* control data segment */
298 	int sc_cd_rseg;			/* real number of control segment */
299 	size_t sc_cd_size;		/* control data size */
300 #define	sc_cddma	sc_cddmamap->dm_segs[0].ds_addr
301 #define	sc_txdescs	sc_control_data->wcd_txdescs
302 #define	sc_rxdescs	sc_control_data->wcd_rxdescs
303 
304 #ifdef WM_EVENT_COUNTERS
305 	/* Event counters. */
306 	struct evcnt sc_ev_txsstall;	/* Tx stalled due to no txs */
307 	struct evcnt sc_ev_txdstall;	/* Tx stalled due to no txd */
308 	struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
309 	struct evcnt sc_ev_txdw;	/* Tx descriptor interrupts */
310 	struct evcnt sc_ev_txqe;	/* Tx queue empty interrupts */
311 	struct evcnt sc_ev_rxintr;	/* Rx interrupts */
312 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
313 
314 	struct evcnt sc_ev_rxipsum;	/* IP checksums checked in-bound */
315 	struct evcnt sc_ev_rxtusum;	/* TCP/UDP cksums checked in-bound */
316 	struct evcnt sc_ev_txipsum;	/* IP checksums comp. out-bound */
317 	struct evcnt sc_ev_txtusum;	/* TCP/UDP cksums comp. out-bound */
318 	struct evcnt sc_ev_txtusum6;	/* TCP/UDP v6 cksums comp. out-bound */
319 	struct evcnt sc_ev_txtso;	/* TCP seg offload out-bound (IPv4) */
320 	struct evcnt sc_ev_txtso6;	/* TCP seg offload out-bound (IPv6) */
321 	struct evcnt sc_ev_txtsopain;	/* painful header manip. for TSO */
322 
323 	struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
324 	struct evcnt sc_ev_txdrop;	/* Tx packets dropped (too many segs) */
325 
326 	struct evcnt sc_ev_tu;		/* Tx underrun */
327 
328 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
329 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
330 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
331 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
332 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
333 #endif /* WM_EVENT_COUNTERS */
334 
335 	bus_addr_t sc_tdt_reg;		/* offset of TDT register */
336 
337 	int	sc_txfree;		/* number of free Tx descriptors */
338 	int	sc_txnext;		/* next ready Tx descriptor */
339 
340 	int	sc_txsfree;		/* number of free Tx jobs */
341 	int	sc_txsnext;		/* next free Tx job */
342 	int	sc_txsdirty;		/* dirty Tx jobs */
343 
344 	/* These 5 variables are used only on the 82547. */
345 	int	sc_txfifo_size;		/* Tx FIFO size */
346 	int	sc_txfifo_head;		/* current head of FIFO */
347 	uint32_t sc_txfifo_addr;	/* internal address of start of FIFO */
348 	int	sc_txfifo_stall;	/* Tx FIFO is stalled */
349 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
350 
351 	bus_addr_t sc_rdt_reg;		/* offset of RDT register */
352 
353 	int	sc_rxptr;		/* next ready Rx descriptor/queue ent */
354 	int	sc_rxdiscard;
355 	int	sc_rxlen;
356 	struct mbuf *sc_rxhead;
357 	struct mbuf *sc_rxtail;
358 	struct mbuf **sc_rxtailp;
359 
360 	uint32_t sc_ctrl;		/* prototype CTRL register */
361 #if 0
362 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
363 #endif
364 	uint32_t sc_icr;		/* prototype interrupt bits */
365 	uint32_t sc_itr;		/* prototype intr throttling reg */
366 	uint32_t sc_tctl;		/* prototype TCTL register */
367 	uint32_t sc_rctl;		/* prototype RCTL register */
368 	uint32_t sc_txcw;		/* prototype TXCW register */
369 	uint32_t sc_tipg;		/* prototype TIPG register */
370 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
371 	uint32_t sc_pba;		/* prototype PBA register */
372 
373 	int sc_tbi_linkup;		/* TBI link status */
374 	int sc_tbi_anegticks;		/* autonegotiation ticks */
375 	int sc_tbi_ticks;		/* tbi ticks */
376 	int sc_tbi_nrxcfg;		/* count of ICR_RXCFG */
377 	int sc_tbi_lastnrxcfg;		/* count of ICR_RXCFG (on last tick) */
378 
379 	int sc_mchash_type;		/* multicast filter offset */
380 
381 #if NRND > 0
382 	rndsource_element_t rnd_source;	/* random source */
383 #endif
384 };
385 
386 #define	WM_RXCHAIN_RESET(sc)						\
387 do {									\
388 	(sc)->sc_rxtailp = &(sc)->sc_rxhead;				\
389 	*(sc)->sc_rxtailp = NULL;					\
390 	(sc)->sc_rxlen = 0;						\
391 } while (/*CONSTCOND*/0)
392 
393 #define	WM_RXCHAIN_LINK(sc, m)						\
394 do {									\
395 	*(sc)->sc_rxtailp = (sc)->sc_rxtail = (m);			\
396 	(sc)->sc_rxtailp = &(m)->m_next;				\
397 } while (/*CONSTCOND*/0)
398 
399 #ifdef WM_EVENT_COUNTERS
400 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
401 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
402 #else
403 #define	WM_EVCNT_INCR(ev)	/* nothing */
404 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
405 #endif
406 
407 #define	CSR_READ(sc, reg)						\
408 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
409 #define	CSR_WRITE(sc, reg, val)						\
410 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
411 #define	CSR_WRITE_FLUSH(sc)						\
412 	(void) CSR_READ((sc), WMREG_STATUS)
413 
414 #define ICH8_FLASH_READ32(sc, reg) \
415 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
416 #define ICH8_FLASH_WRITE32(sc, reg, data) \
417 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
418 
419 #define ICH8_FLASH_READ16(sc, reg) \
420 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
421 #define ICH8_FLASH_WRITE16(sc, reg, data) \
422 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
423 
424 #define	WM_CDTXADDR(sc, x)	((sc)->sc_cddma + WM_CDTXOFF((x)))
425 #define	WM_CDRXADDR(sc, x)	((sc)->sc_cddma + WM_CDRXOFF((x)))
426 
427 #define	WM_CDTXADDR_LO(sc, x)	(WM_CDTXADDR((sc), (x)) & 0xffffffffU)
428 #define	WM_CDTXADDR_HI(sc, x)						\
429 	(sizeof(bus_addr_t) == 8 ?					\
430 	 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
431 
432 #define	WM_CDRXADDR_LO(sc, x)	(WM_CDRXADDR((sc), (x)) & 0xffffffffU)
433 #define	WM_CDRXADDR_HI(sc, x)						\
434 	(sizeof(bus_addr_t) == 8 ?					\
435 	 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
436 
437 #define	WM_CDTXSYNC(sc, x, n, ops)					\
438 do {									\
439 	int __x, __n;							\
440 									\
441 	__x = (x);							\
442 	__n = (n);							\
443 									\
444 	/* If it will wrap around, sync to the end of the ring. */	\
445 	if ((__x + __n) > WM_NTXDESC(sc)) {				\
446 		bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,	\
447 		    WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) *		\
448 		    (WM_NTXDESC(sc) - __x), (ops));			\
449 		__n -= (WM_NTXDESC(sc) - __x);				\
450 		__x = 0;						\
451 	}								\
452 									\
453 	/* Now sync whatever is left. */				\
454 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
455 	    WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops));	\
456 } while (/*CONSTCOND*/0)
457 
458 #define	WM_CDRXSYNC(sc, x, ops)						\
459 do {									\
460 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
461 	   WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops));		\
462 } while (/*CONSTCOND*/0)
463 
464 #define	WM_INIT_RXDESC(sc, x)						\
465 do {									\
466 	struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)];		\
467 	wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)];		\
468 	struct mbuf *__m = __rxs->rxs_mbuf;				\
469 									\
470 	/*								\
471 	 * Note: We scoot the packet forward 2 bytes in the buffer	\
472 	 * so that the payload after the Ethernet header is aligned	\
473 	 * to a 4-byte boundary.					\
474 	 *								\
475 	 * XXX BRAINDAMAGE ALERT!					\
476 	 * The stupid chip uses the same size for every buffer, which	\
477 	 * is set in the Receive Control register.  We are using the 2K	\
478 	 * size option, but what we REALLY want is (2K - 2)!  For this	\
479 	 * reason, we can't "scoot" packets longer than the standard	\
480 	 * Ethernet MTU.  On strict-alignment platforms, if the total	\
481 	 * size exceeds (2K - 2) we set align_tweak to 0 and let	\
482 	 * the upper layer copy the headers.				\
483 	 */								\
484 	__m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak;	\
485 									\
486 	wm_set_dma_addr(&__rxd->wrx_addr,				\
487 	    __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
488 	__rxd->wrx_len = 0;						\
489 	__rxd->wrx_cksum = 0;						\
490 	__rxd->wrx_status = 0;						\
491 	__rxd->wrx_errors = 0;						\
492 	__rxd->wrx_special = 0;						\
493 	WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
494 									\
495 	CSR_WRITE((sc), (sc)->sc_rdt_reg, (x));				\
496 } while (/*CONSTCOND*/0)
497 
498 static void	wm_start(struct ifnet *);
499 static void	wm_watchdog(struct ifnet *);
500 static int	wm_ifflags_cb(struct ethercom *);
501 static int	wm_ioctl(struct ifnet *, u_long, void *);
502 static int	wm_init(struct ifnet *);
503 static void	wm_stop(struct ifnet *, int);
504 static bool	wm_suspend(device_t, const pmf_qual_t *);
505 static bool	wm_resume(device_t, const pmf_qual_t *);
506 
507 static void	wm_reset(struct wm_softc *);
508 static void	wm_rxdrain(struct wm_softc *);
509 static int	wm_add_rxbuf(struct wm_softc *, int);
510 static int	wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
511 static int	wm_read_eeprom_eerd(struct wm_softc *, int, int, u_int16_t *);
512 static int	wm_validate_eeprom_checksum(struct wm_softc *);
513 static int	wm_check_alt_mac_addr(struct wm_softc *);
514 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
515 static void	wm_tick(void *);
516 
517 static void	wm_set_filter(struct wm_softc *);
518 static void	wm_set_vlan(struct wm_softc *);
519 
520 static int	wm_intr(void *);
521 static void	wm_txintr(struct wm_softc *);
522 static void	wm_rxintr(struct wm_softc *);
523 static void	wm_linkintr(struct wm_softc *, uint32_t);
524 
525 static void	wm_tbi_mediainit(struct wm_softc *);
526 static int	wm_tbi_mediachange(struct ifnet *);
527 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
528 
529 static void	wm_tbi_set_linkled(struct wm_softc *);
530 static void	wm_tbi_check_link(struct wm_softc *);
531 
532 static void	wm_gmii_reset(struct wm_softc *);
533 
534 static int	wm_gmii_i82543_readreg(device_t, int, int);
535 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
536 
537 static int	wm_gmii_i82544_readreg(device_t, int, int);
538 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
539 
540 static int	wm_gmii_i80003_readreg(device_t, int, int);
541 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
542 static int	wm_gmii_bm_readreg(device_t, int, int);
543 static void	wm_gmii_bm_writereg(device_t, int, int, int);
544 static int	wm_gmii_hv_readreg(device_t, int, int);
545 static void	wm_gmii_hv_writereg(device_t, int, int, int);
546 static int	wm_sgmii_readreg(device_t, int, int);
547 static void	wm_sgmii_writereg(device_t, int, int, int);
548 
549 static void	wm_gmii_statchg(device_t);
550 
551 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
552 static int	wm_gmii_mediachange(struct ifnet *);
553 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
554 
555 static int	wm_kmrn_readreg(struct wm_softc *, int);
556 static void	wm_kmrn_writereg(struct wm_softc *, int, int);
557 
558 static void	wm_set_spiaddrbits(struct wm_softc *);
559 static int	wm_match(device_t, cfdata_t, void *);
560 static void	wm_attach(device_t, device_t, void *);
561 static int	wm_detach(device_t, int);
562 static int	wm_is_onboard_nvm_eeprom(struct wm_softc *);
563 static void	wm_get_auto_rd_done(struct wm_softc *);
564 static void	wm_lan_init_done(struct wm_softc *);
565 static void	wm_get_cfg_done(struct wm_softc *);
566 static int	wm_get_swsm_semaphore(struct wm_softc *);
567 static void	wm_put_swsm_semaphore(struct wm_softc *);
568 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
569 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
570 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
571 static int	wm_get_swfwhw_semaphore(struct wm_softc *);
572 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
573 
574 static int	wm_read_eeprom_ich8(struct wm_softc *, int, int, uint16_t *);
575 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
576 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
577 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t,
578 		     uint32_t, uint16_t *);
579 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
580 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
581 static void	wm_82547_txfifo_stall(void *);
582 static int	wm_check_mng_mode(struct wm_softc *);
583 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
584 static int	wm_check_mng_mode_82574(struct wm_softc *);
585 static int	wm_check_mng_mode_generic(struct wm_softc *);
586 static int	wm_enable_mng_pass_thru(struct wm_softc *);
587 static int	wm_check_reset_block(struct wm_softc *);
588 static void	wm_get_hw_control(struct wm_softc *);
589 static int	wm_check_for_link(struct wm_softc *);
590 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
591 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
592 #ifdef WM_WOL
593 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
594 #endif
595 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
596 static void	wm_k1_gig_workaround_hv(struct wm_softc *, int);
597 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
598 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
599 static void	wm_reset_init_script_82575(struct wm_softc *);
600 static void	wm_release_manageability(struct wm_softc *);
601 static void	wm_release_hw_control(struct wm_softc *);
602 static void	wm_get_wakeup(struct wm_softc *);
603 #ifdef WM_WOL
604 static void	wm_enable_phy_wakeup(struct wm_softc *);
605 static void	wm_enable_wakeup(struct wm_softc *);
606 #endif
607 static void	wm_init_manageability(struct wm_softc *);
608 
609 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
610     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
611 
612 /*
613  * Devices supported by this driver.
614  */
615 static const struct wm_product {
616 	pci_vendor_id_t		wmp_vendor;
617 	pci_product_id_t	wmp_product;
618 	const char		*wmp_name;
619 	wm_chip_type		wmp_type;
620 	int			wmp_flags;
621 #define	WMP_F_1000X		0x01
622 #define	WMP_F_1000T		0x02
623 #define	WMP_F_SERDES		0x04
624 } wm_products[] = {
625 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
626 	  "Intel i82542 1000BASE-X Ethernet",
627 	  WM_T_82542_2_1,	WMP_F_1000X },
628 
629 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
630 	  "Intel i82543GC 1000BASE-X Ethernet",
631 	  WM_T_82543,		WMP_F_1000X },
632 
633 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
634 	  "Intel i82543GC 1000BASE-T Ethernet",
635 	  WM_T_82543,		WMP_F_1000T },
636 
637 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
638 	  "Intel i82544EI 1000BASE-T Ethernet",
639 	  WM_T_82544,		WMP_F_1000T },
640 
641 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
642 	  "Intel i82544EI 1000BASE-X Ethernet",
643 	  WM_T_82544,		WMP_F_1000X },
644 
645 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
646 	  "Intel i82544GC 1000BASE-T Ethernet",
647 	  WM_T_82544,		WMP_F_1000T },
648 
649 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
650 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
651 	  WM_T_82544,		WMP_F_1000T },
652 
653 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
654 	  "Intel i82540EM 1000BASE-T Ethernet",
655 	  WM_T_82540,		WMP_F_1000T },
656 
657 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
658 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
659 	  WM_T_82540,		WMP_F_1000T },
660 
661 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
662 	  "Intel i82540EP 1000BASE-T Ethernet",
663 	  WM_T_82540,		WMP_F_1000T },
664 
665 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
666 	  "Intel i82540EP 1000BASE-T Ethernet",
667 	  WM_T_82540,		WMP_F_1000T },
668 
669 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
670 	  "Intel i82540EP 1000BASE-T Ethernet",
671 	  WM_T_82540,		WMP_F_1000T },
672 
673 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
674 	  "Intel i82545EM 1000BASE-T Ethernet",
675 	  WM_T_82545,		WMP_F_1000T },
676 
677 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
678 	  "Intel i82545GM 1000BASE-T Ethernet",
679 	  WM_T_82545_3,		WMP_F_1000T },
680 
681 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
682 	  "Intel i82545GM 1000BASE-X Ethernet",
683 	  WM_T_82545_3,		WMP_F_1000X },
684 #if 0
685 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
686 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
687 	  WM_T_82545_3,		WMP_F_SERDES },
688 #endif
689 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
690 	  "Intel i82546EB 1000BASE-T Ethernet",
691 	  WM_T_82546,		WMP_F_1000T },
692 
693 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
694 	  "Intel i82546EB 1000BASE-T Ethernet",
695 	  WM_T_82546,		WMP_F_1000T },
696 
697 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
698 	  "Intel i82545EM 1000BASE-X Ethernet",
699 	  WM_T_82545,		WMP_F_1000X },
700 
701 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
702 	  "Intel i82546EB 1000BASE-X Ethernet",
703 	  WM_T_82546,		WMP_F_1000X },
704 
705 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
706 	  "Intel i82546GB 1000BASE-T Ethernet",
707 	  WM_T_82546_3,		WMP_F_1000T },
708 
709 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
710 	  "Intel i82546GB 1000BASE-X Ethernet",
711 	  WM_T_82546_3,		WMP_F_1000X },
712 #if 0
713 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
714 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
715 	  WM_T_82546_3,		WMP_F_SERDES },
716 #endif
717 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
718 	  "i82546GB quad-port Gigabit Ethernet",
719 	  WM_T_82546_3,		WMP_F_1000T },
720 
721 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
722 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
723 	  WM_T_82546_3,		WMP_F_1000T },
724 
725 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
726 	  "Intel PRO/1000MT (82546GB)",
727 	  WM_T_82546_3,		WMP_F_1000T },
728 
729 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
730 	  "Intel i82541EI 1000BASE-T Ethernet",
731 	  WM_T_82541,		WMP_F_1000T },
732 
733 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
734 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
735 	  WM_T_82541,		WMP_F_1000T },
736 
737 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
738 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
739 	  WM_T_82541,		WMP_F_1000T },
740 
741 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
742 	  "Intel i82541ER 1000BASE-T Ethernet",
743 	  WM_T_82541_2,		WMP_F_1000T },
744 
745 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
746 	  "Intel i82541GI 1000BASE-T Ethernet",
747 	  WM_T_82541_2,		WMP_F_1000T },
748 
749 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
750 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
751 	  WM_T_82541_2,		WMP_F_1000T },
752 
753 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
754 	  "Intel i82541PI 1000BASE-T Ethernet",
755 	  WM_T_82541_2,		WMP_F_1000T },
756 
757 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
758 	  "Intel i82547EI 1000BASE-T Ethernet",
759 	  WM_T_82547,		WMP_F_1000T },
760 
761 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
762 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
763 	  WM_T_82547,		WMP_F_1000T },
764 
765 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
766 	  "Intel i82547GI 1000BASE-T Ethernet",
767 	  WM_T_82547_2,		WMP_F_1000T },
768 
769 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
770 	  "Intel PRO/1000 PT (82571EB)",
771 	  WM_T_82571,		WMP_F_1000T },
772 
773 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
774 	  "Intel PRO/1000 PF (82571EB)",
775 	  WM_T_82571,		WMP_F_1000X },
776 #if 0
777 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
778 	  "Intel PRO/1000 PB (82571EB)",
779 	  WM_T_82571,		WMP_F_SERDES },
780 #endif
781 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
782 	  "Intel PRO/1000 QT (82571EB)",
783 	  WM_T_82571,		WMP_F_1000T },
784 
785 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
786 	  "Intel i82572EI 1000baseT Ethernet",
787 	  WM_T_82572,		WMP_F_1000T },
788 
789 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
790 	  "Intel PRO/1000 PT Quad Port Server Adapter",
791 	  WM_T_82571,		WMP_F_1000T, },
792 
793 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
794 	  "Intel i82572EI 1000baseX Ethernet",
795 	  WM_T_82572,		WMP_F_1000X },
796 #if 0
797 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
798 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
799 	  WM_T_82572,		WMP_F_SERDES },
800 #endif
801 
802 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
803 	  "Intel i82572EI 1000baseT Ethernet",
804 	  WM_T_82572,		WMP_F_1000T },
805 
806 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
807 	  "Intel i82573E",
808 	  WM_T_82573,		WMP_F_1000T },
809 
810 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
811 	  "Intel i82573E IAMT",
812 	  WM_T_82573,		WMP_F_1000T },
813 
814 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
815 	  "Intel i82573L Gigabit Ethernet",
816 	  WM_T_82573,		WMP_F_1000T },
817 
818 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
819 	  "Intel i82574L",
820 	  WM_T_82574,		WMP_F_1000T },
821 
822 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
823 	  "Intel i82583V",
824 	  WM_T_82583,		WMP_F_1000T },
825 
826 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
827 	  "i80003 dual 1000baseT Ethernet",
828 	  WM_T_80003,		WMP_F_1000T },
829 
830 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
831 	  "i80003 dual 1000baseX Ethernet",
832 	  WM_T_80003,		WMP_F_1000T },
833 #if 0
834 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
835 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
836 	  WM_T_80003,		WMP_F_SERDES },
837 #endif
838 
839 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
840 	  "Intel i80003 1000baseT Ethernet",
841 	  WM_T_80003,		WMP_F_1000T },
842 #if 0
843 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
844 	  "Intel i80003 Gigabit Ethernet (SERDES)",
845 	  WM_T_80003,		WMP_F_SERDES },
846 #endif
847 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
848 	  "Intel i82801H (M_AMT) LAN Controller",
849 	  WM_T_ICH8,		WMP_F_1000T },
850 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
851 	  "Intel i82801H (AMT) LAN Controller",
852 	  WM_T_ICH8,		WMP_F_1000T },
853 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
854 	  "Intel i82801H LAN Controller",
855 	  WM_T_ICH8,		WMP_F_1000T },
856 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
857 	  "Intel i82801H (IFE) LAN Controller",
858 	  WM_T_ICH8,		WMP_F_1000T },
859 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
860 	  "Intel i82801H (M) LAN Controller",
861 	  WM_T_ICH8,		WMP_F_1000T },
862 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
863 	  "Intel i82801H IFE (GT) LAN Controller",
864 	  WM_T_ICH8,		WMP_F_1000T },
865 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
866 	  "Intel i82801H IFE (G) LAN Controller",
867 	  WM_T_ICH8,		WMP_F_1000T },
868 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
869 	  "82801I (AMT) LAN Controller",
870 	  WM_T_ICH9,		WMP_F_1000T },
871 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
872 	  "82801I LAN Controller",
873 	  WM_T_ICH9,		WMP_F_1000T },
874 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
875 	  "82801I (G) LAN Controller",
876 	  WM_T_ICH9,		WMP_F_1000T },
877 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
878 	  "82801I (GT) LAN Controller",
879 	  WM_T_ICH9,		WMP_F_1000T },
880 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
881 	  "82801I (C) LAN Controller",
882 	  WM_T_ICH9,		WMP_F_1000T },
883 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
884 	  "82801I mobile LAN Controller",
885 	  WM_T_ICH9,		WMP_F_1000T },
886 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IGP_M_V,
887 	  "82801I mobile (V) LAN Controller",
888 	  WM_T_ICH9,		WMP_F_1000T },
889 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
890 	  "82801I mobile (AMT) LAN Controller",
891 	  WM_T_ICH9,		WMP_F_1000T },
892 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
893 	  "82567LM-4 LAN Controller",
894 	  WM_T_ICH9,		WMP_F_1000T },
895 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_82567V_3,
896 	  "82567V-3 LAN Controller",
897 	  WM_T_ICH9,		WMP_F_1000T },
898 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
899 	  "82567LM-2 LAN Controller",
900 	  WM_T_ICH10,		WMP_F_1000T },
901 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
902 	  "82567LF-2 LAN Controller",
903 	  WM_T_ICH10,		WMP_F_1000T },
904 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
905 	  "82567LM-3 LAN Controller",
906 	  WM_T_ICH10,		WMP_F_1000T },
907 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
908 	  "82567LF-3 LAN Controller",
909 	  WM_T_ICH10,		WMP_F_1000T },
910 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
911 	  "82567V-2 LAN Controller",
912 	  WM_T_ICH10,		WMP_F_1000T },
913 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
914 	  "PCH LAN (82577LM) Controller",
915 	  WM_T_PCH,		WMP_F_1000T },
916 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
917 	  "PCH LAN (82577LC) Controller",
918 	  WM_T_PCH,		WMP_F_1000T },
919 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
920 	  "PCH LAN (82578DM) Controller",
921 	  WM_T_PCH,		WMP_F_1000T },
922 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
923 	  "PCH LAN (82578DC) Controller",
924 	  WM_T_PCH,		WMP_F_1000T },
925 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
926 	  "82575EB dual-1000baseT Ethernet",
927 	  WM_T_82575,		WMP_F_1000T },
928 #if 0
929 	/*
930 	 * not sure if WMP_F_1000X or WMP_F_SERDES - we do not have it - so
931 	 * disabled for now ...
932 	 */
933 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
934 	  "82575EB dual-1000baseX Ethernet (SERDES)",
935 	  WM_T_82575,		WMP_F_SERDES },
936 #endif
937 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
938 	  "82575GB quad-1000baseT Ethernet",
939 	  WM_T_82575,		WMP_F_1000T },
940 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
941 	  "82575GB quad-1000baseT Ethernet (PM)",
942 	  WM_T_82575,		WMP_F_1000T },
943 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
944 	  "82576 1000BaseT Ethernet",
945 	  WM_T_82576,		WMP_F_1000T },
946 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
947 	  "82576 1000BaseX Ethernet",
948 	  WM_T_82576,		WMP_F_1000X },
949 #if 0
950 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
951 	  "82576 gigabit Ethernet (SERDES)",
952 	  WM_T_82576,		WMP_F_SERDES },
953 #endif
954 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
955 	  "82576 quad-1000BaseT Ethernet",
956 	  WM_T_82576,		WMP_F_1000T },
957 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
958 	  "82576 gigabit Ethernet",
959 	  WM_T_82576,		WMP_F_1000T },
960 #if 0
961 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
962 	  "82576 gigabit Ethernet (SERDES)",
963 	  WM_T_82576,		WMP_F_SERDES },
964 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
965 	  "82576 quad-gigabit Ethernet (SERDES)",
966 	  WM_T_82576,		WMP_F_SERDES },
967 #endif
968 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
969 	  "82580 1000BaseT Ethernet",
970 	  WM_T_82580,		WMP_F_1000T },
971 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
972 	  "82580 1000BaseX Ethernet",
973 	  WM_T_82580,		WMP_F_1000X },
974 #if 0
975 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
976 	  "82580 1000BaseT Ethernet (SERDES)",
977 	  WM_T_82580,		WMP_F_SERDES },
978 #endif
979 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
980 	  "82580 gigabit Ethernet (SGMII)",
981 	  WM_T_82580,		WMP_F_1000T },
982 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
983 	  "82580 dual-1000BaseT Ethernet",
984 	  WM_T_82580,		WMP_F_1000T },
985 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_ER,
986 	  "82580 1000BaseT Ethernet",
987 	  WM_T_82580ER,		WMP_F_1000T },
988 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_ER_DUAL,
989 	  "82580 dual-1000BaseT Ethernet",
990 	  WM_T_82580ER,		WMP_F_1000T },
991 	{ 0,			0,
992 	  NULL,
993 	  0,			0 },
994 };
995 
996 #ifdef WM_EVENT_COUNTERS
997 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
998 #endif /* WM_EVENT_COUNTERS */
999 
1000 #if 0 /* Not currently used */
1001 static inline uint32_t
1002 wm_io_read(struct wm_softc *sc, int reg)
1003 {
1004 
1005 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1006 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1007 }
1008 #endif
1009 
1010 static inline void
1011 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1012 {
1013 
1014 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1015 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1016 }
1017 
1018 static inline void
1019 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1020     uint32_t data)
1021 {
1022 	uint32_t regval;
1023 	int i;
1024 
1025 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1026 
1027 	CSR_WRITE(sc, reg, regval);
1028 
1029 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1030 		delay(5);
1031 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1032 			break;
1033 	}
1034 	if (i == SCTL_CTL_POLL_TIMEOUT) {
1035 		aprint_error("%s: WARNING: i82575 reg 0x%08x setup did not indicate ready\n",
1036 		    device_xname(sc->sc_dev), reg);
1037 	}
1038 }
1039 
1040 static inline void
1041 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1042 {
1043 	wa->wa_low = htole32(v & 0xffffffffU);
1044 	if (sizeof(bus_addr_t) == 8)
1045 		wa->wa_high = htole32((uint64_t) v >> 32);
1046 	else
1047 		wa->wa_high = 0;
1048 }
1049 
1050 static void
1051 wm_set_spiaddrbits(struct wm_softc *sc)
1052 {
1053 	uint32_t reg;
1054 
1055 	sc->sc_flags |= WM_F_EEPROM_SPI;
1056 	reg = CSR_READ(sc, WMREG_EECD);
1057 	sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
1058 }
1059 
1060 static const struct wm_product *
1061 wm_lookup(const struct pci_attach_args *pa)
1062 {
1063 	const struct wm_product *wmp;
1064 
1065 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1066 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1067 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1068 			return wmp;
1069 	}
1070 	return NULL;
1071 }
1072 
1073 static int
1074 wm_match(device_t parent, cfdata_t cf, void *aux)
1075 {
1076 	struct pci_attach_args *pa = aux;
1077 
1078 	if (wm_lookup(pa) != NULL)
1079 		return 1;
1080 
1081 	return 0;
1082 }
1083 
1084 static void
1085 wm_attach(device_t parent, device_t self, void *aux)
1086 {
1087 	struct wm_softc *sc = device_private(self);
1088 	struct pci_attach_args *pa = aux;
1089 	prop_dictionary_t dict;
1090 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1091 	pci_chipset_tag_t pc = pa->pa_pc;
1092 	pci_intr_handle_t ih;
1093 	const char *intrstr = NULL;
1094 	const char *eetype, *xname;
1095 	bus_space_tag_t memt;
1096 	bus_space_handle_t memh;
1097 	bus_size_t memsize;
1098 	int memh_valid;
1099 	int i, error;
1100 	const struct wm_product *wmp;
1101 	prop_data_t ea;
1102 	prop_number_t pn;
1103 	uint8_t enaddr[ETHER_ADDR_LEN];
1104 	uint16_t cfg1, cfg2, swdpin, io3;
1105 	pcireg_t preg, memtype;
1106 	uint16_t eeprom_data, apme_mask;
1107 	uint32_t reg;
1108 
1109 	sc->sc_dev = self;
1110 	callout_init(&sc->sc_tick_ch, 0);
1111 
1112 	sc->sc_wmp = wmp = wm_lookup(pa);
1113 	if (wmp == NULL) {
1114 		printf("\n");
1115 		panic("wm_attach: impossible");
1116 	}
1117 
1118 	sc->sc_pc = pa->pa_pc;
1119 	sc->sc_pcitag = pa->pa_tag;
1120 
1121 	if (pci_dma64_available(pa))
1122 		sc->sc_dmat = pa->pa_dmat64;
1123 	else
1124 		sc->sc_dmat = pa->pa_dmat;
1125 
1126 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
1127 	aprint_naive(": Ethernet controller\n");
1128 	aprint_normal(": %s, rev. %d\n", wmp->wmp_name, sc->sc_rev);
1129 
1130 	sc->sc_type = wmp->wmp_type;
1131 	if (sc->sc_type < WM_T_82543) {
1132 		if (sc->sc_rev < 2) {
1133 			aprint_error_dev(sc->sc_dev,
1134 			    "i82542 must be at least rev. 2\n");
1135 			return;
1136 		}
1137 		if (sc->sc_rev < 3)
1138 			sc->sc_type = WM_T_82542_2_0;
1139 	}
1140 
1141 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1142 	    || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER))
1143 		sc->sc_flags |= WM_F_NEWQUEUE;
1144 
1145 	/* Set device properties (mactype) */
1146 	dict = device_properties(sc->sc_dev);
1147 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1148 
1149 	/*
1150 	 * Map the device.  All devices support memory-mapped acccess,
1151 	 * and it is really required for normal operation.
1152 	 */
1153 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1154 	switch (memtype) {
1155 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1156 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1157 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1158 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1159 		break;
1160 	default:
1161 		memh_valid = 0;
1162 		break;
1163 	}
1164 
1165 	if (memh_valid) {
1166 		sc->sc_st = memt;
1167 		sc->sc_sh = memh;
1168 		sc->sc_ss = memsize;
1169 	} else {
1170 		aprint_error_dev(sc->sc_dev,
1171 		    "unable to map device registers\n");
1172 		return;
1173 	}
1174 
1175 	wm_get_wakeup(sc);
1176 
1177 	/*
1178 	 * In addition, i82544 and later support I/O mapped indirect
1179 	 * register access.  It is not desirable (nor supported in
1180 	 * this driver) to use it for normal operation, though it is
1181 	 * required to work around bugs in some chip versions.
1182 	 */
1183 	if (sc->sc_type >= WM_T_82544) {
1184 		/* First we have to find the I/O BAR. */
1185 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1186 			if (pci_mapreg_type(pa->pa_pc, pa->pa_tag, i) ==
1187 			    PCI_MAPREG_TYPE_IO)
1188 				break;
1189 		}
1190 		if (i != PCI_MAPREG_END) {
1191 			/*
1192 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
1193 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
1194 			 * It's no problem because newer chips has no this
1195 			 * bug.
1196 			 *
1197 			 * The i8254x doesn't apparently respond when the
1198 			 * I/O BAR is 0, which looks somewhat like it's not
1199 			 * been configured.
1200 			 */
1201 			preg = pci_conf_read(pc, pa->pa_tag, i);
1202 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1203 				aprint_error_dev(sc->sc_dev,
1204 				    "WARNING: I/O BAR at zero.\n");
1205 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1206 					0, &sc->sc_iot, &sc->sc_ioh,
1207 					NULL, &sc->sc_ios) == 0) {
1208 				sc->sc_flags |= WM_F_IOH_VALID;
1209 			} else {
1210 				aprint_error_dev(sc->sc_dev,
1211 				    "WARNING: unable to map I/O space\n");
1212 			}
1213 		}
1214 
1215 	}
1216 
1217 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
1218 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1219 	preg |= PCI_COMMAND_MASTER_ENABLE;
1220 	if (sc->sc_type < WM_T_82542_2_1)
1221 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1222 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1223 
1224 	/* power up chip */
1225 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1226 	    NULL)) && error != EOPNOTSUPP) {
1227 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1228 		return;
1229 	}
1230 
1231 	/*
1232 	 * Map and establish our interrupt.
1233 	 */
1234 	if (pci_intr_map(pa, &ih)) {
1235 		aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
1236 		return;
1237 	}
1238 	intrstr = pci_intr_string(pc, ih);
1239 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
1240 	if (sc->sc_ih == NULL) {
1241 		aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
1242 		if (intrstr != NULL)
1243 			aprint_error(" at %s", intrstr);
1244 		aprint_error("\n");
1245 		return;
1246 	}
1247 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
1248 
1249 	/*
1250 	 * Check the function ID (unit number of the chip).
1251 	 */
1252 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
1253 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
1254 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1255 	    || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER))
1256 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
1257 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
1258 	else
1259 		sc->sc_funcid = 0;
1260 
1261 	/*
1262 	 * Determine a few things about the bus we're connected to.
1263 	 */
1264 	if (sc->sc_type < WM_T_82543) {
1265 		/* We don't really know the bus characteristics here. */
1266 		sc->sc_bus_speed = 33;
1267 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1268 		/*
1269 		 * CSA (Communication Streaming Architecture) is about as fast
1270 		 * a 32-bit 66MHz PCI Bus.
1271 		 */
1272 		sc->sc_flags |= WM_F_CSA;
1273 		sc->sc_bus_speed = 66;
1274 		aprint_verbose_dev(sc->sc_dev,
1275 		    "Communication Streaming Architecture\n");
1276 		if (sc->sc_type == WM_T_82547) {
1277 			callout_init(&sc->sc_txfifo_ch, 0);
1278 			callout_setfunc(&sc->sc_txfifo_ch,
1279 					wm_82547_txfifo_stall, sc);
1280 			aprint_verbose_dev(sc->sc_dev,
1281 			    "using 82547 Tx FIFO stall work-around\n");
1282 		}
1283 	} else if (sc->sc_type >= WM_T_82571) {
1284 		sc->sc_flags |= WM_F_PCIE;
1285 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1286 		    && (sc->sc_type != WM_T_ICH10)
1287 		    && (sc->sc_type != WM_T_PCH)) {
1288 			sc->sc_flags |= WM_F_EEPROM_SEMAPHORE;
1289 			/* ICH* and PCH have no PCIe capability registers */
1290 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1291 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
1292 				NULL) == 0)
1293 				aprint_error_dev(sc->sc_dev,
1294 				    "unable to find PCIe capability\n");
1295 		}
1296 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1297 	} else {
1298 		reg = CSR_READ(sc, WMREG_STATUS);
1299 		if (reg & STATUS_BUS64)
1300 			sc->sc_flags |= WM_F_BUS64;
1301 		if ((reg & STATUS_PCIX_MODE) != 0) {
1302 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1303 
1304 			sc->sc_flags |= WM_F_PCIX;
1305 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1306 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
1307 				aprint_error_dev(sc->sc_dev,
1308 				    "unable to find PCIX capability\n");
1309 			else if (sc->sc_type != WM_T_82545_3 &&
1310 				 sc->sc_type != WM_T_82546_3) {
1311 				/*
1312 				 * Work around a problem caused by the BIOS
1313 				 * setting the max memory read byte count
1314 				 * incorrectly.
1315 				 */
1316 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1317 				    sc->sc_pcixe_capoff + PCI_PCIX_CMD);
1318 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1319 				    sc->sc_pcixe_capoff + PCI_PCIX_STATUS);
1320 
1321 				bytecnt =
1322 				    (pcix_cmd & PCI_PCIX_CMD_BYTECNT_MASK) >>
1323 				    PCI_PCIX_CMD_BYTECNT_SHIFT;
1324 				maxb =
1325 				    (pcix_sts & PCI_PCIX_STATUS_MAXB_MASK) >>
1326 				    PCI_PCIX_STATUS_MAXB_SHIFT;
1327 				if (bytecnt > maxb) {
1328 					aprint_verbose_dev(sc->sc_dev,
1329 					    "resetting PCI-X MMRBC: %d -> %d\n",
1330 					    512 << bytecnt, 512 << maxb);
1331 					pcix_cmd = (pcix_cmd &
1332 					    ~PCI_PCIX_CMD_BYTECNT_MASK) |
1333 					   (maxb << PCI_PCIX_CMD_BYTECNT_SHIFT);
1334 					pci_conf_write(pa->pa_pc, pa->pa_tag,
1335 					    sc->sc_pcixe_capoff + PCI_PCIX_CMD,
1336 					    pcix_cmd);
1337 				}
1338 			}
1339 		}
1340 		/*
1341 		 * The quad port adapter is special; it has a PCIX-PCIX
1342 		 * bridge on the board, and can run the secondary bus at
1343 		 * a higher speed.
1344 		 */
1345 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1346 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1347 								      : 66;
1348 		} else if (sc->sc_flags & WM_F_PCIX) {
1349 			switch (reg & STATUS_PCIXSPD_MASK) {
1350 			case STATUS_PCIXSPD_50_66:
1351 				sc->sc_bus_speed = 66;
1352 				break;
1353 			case STATUS_PCIXSPD_66_100:
1354 				sc->sc_bus_speed = 100;
1355 				break;
1356 			case STATUS_PCIXSPD_100_133:
1357 				sc->sc_bus_speed = 133;
1358 				break;
1359 			default:
1360 				aprint_error_dev(sc->sc_dev,
1361 				    "unknown PCIXSPD %d; assuming 66MHz\n",
1362 				    reg & STATUS_PCIXSPD_MASK);
1363 				sc->sc_bus_speed = 66;
1364 				break;
1365 			}
1366 		} else
1367 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1368 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1369 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1370 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1371 	}
1372 
1373 	/*
1374 	 * Allocate the control data structures, and create and load the
1375 	 * DMA map for it.
1376 	 *
1377 	 * NOTE: All Tx descriptors must be in the same 4G segment of
1378 	 * memory.  So must Rx descriptors.  We simplify by allocating
1379 	 * both sets within the same 4G segment.
1380 	 */
1381 	WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
1382 	    WM_NTXDESC_82542 : WM_NTXDESC_82544;
1383 	sc->sc_cd_size = sc->sc_type < WM_T_82544 ?
1384 	    sizeof(struct wm_control_data_82542) :
1385 	    sizeof(struct wm_control_data_82544);
1386 	if ((error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_cd_size, PAGE_SIZE,
1387 		    (bus_size_t) 0x100000000ULL, &sc->sc_cd_seg, 1,
1388 		    &sc->sc_cd_rseg, 0)) != 0) {
1389 		aprint_error_dev(sc->sc_dev,
1390 		    "unable to allocate control data, error = %d\n",
1391 		    error);
1392 		goto fail_0;
1393 	}
1394 
1395 	if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cd_seg,
1396 		    sc->sc_cd_rseg, sc->sc_cd_size,
1397 		    (void **)&sc->sc_control_data, BUS_DMA_COHERENT)) != 0) {
1398 		aprint_error_dev(sc->sc_dev,
1399 		    "unable to map control data, error = %d\n", error);
1400 		goto fail_1;
1401 	}
1402 
1403 	if ((error = bus_dmamap_create(sc->sc_dmat, sc->sc_cd_size, 1,
1404 		    sc->sc_cd_size, 0, 0, &sc->sc_cddmamap)) != 0) {
1405 		aprint_error_dev(sc->sc_dev,
1406 		    "unable to create control data DMA map, error = %d\n",
1407 		    error);
1408 		goto fail_2;
1409 	}
1410 
1411 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
1412 		    sc->sc_control_data, sc->sc_cd_size, NULL, 0)) != 0) {
1413 		aprint_error_dev(sc->sc_dev,
1414 		    "unable to load control data DMA map, error = %d\n",
1415 		    error);
1416 		goto fail_3;
1417 	}
1418 
1419 	/*
1420 	 * Create the transmit buffer DMA maps.
1421 	 */
1422 	WM_TXQUEUELEN(sc) =
1423 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
1424 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
1425 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1426 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
1427 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
1428 			    &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1429 			aprint_error_dev(sc->sc_dev,
1430 			    "unable to create Tx DMA map %d, error = %d\n",
1431 			    i, error);
1432 			goto fail_4;
1433 		}
1434 	}
1435 
1436 	/*
1437 	 * Create the receive buffer DMA maps.
1438 	 */
1439 	for (i = 0; i < WM_NRXDESC; i++) {
1440 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1441 			    MCLBYTES, 0, 0,
1442 			    &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1443 			aprint_error_dev(sc->sc_dev,
1444 			    "unable to create Rx DMA map %d error = %d\n",
1445 			    i, error);
1446 			goto fail_5;
1447 		}
1448 		sc->sc_rxsoft[i].rxs_mbuf = NULL;
1449 	}
1450 
1451 	/* clear interesting stat counters */
1452 	CSR_READ(sc, WMREG_COLC);
1453 	CSR_READ(sc, WMREG_RXERRC);
1454 
1455 	/*
1456 	 * Reset the chip to a known state.
1457 	 */
1458 	wm_reset(sc);
1459 
1460 	switch (sc->sc_type) {
1461 	case WM_T_82571:
1462 	case WM_T_82572:
1463 	case WM_T_82573:
1464 	case WM_T_82574:
1465 	case WM_T_82583:
1466 	case WM_T_80003:
1467 	case WM_T_ICH8:
1468 	case WM_T_ICH9:
1469 	case WM_T_ICH10:
1470 	case WM_T_PCH:
1471 		if (wm_check_mng_mode(sc) != 0)
1472 			wm_get_hw_control(sc);
1473 		break;
1474 	default:
1475 		break;
1476 	}
1477 
1478 	/*
1479 	 * Get some information about the EEPROM.
1480 	 */
1481 	switch (sc->sc_type) {
1482 	case WM_T_82542_2_0:
1483 	case WM_T_82542_2_1:
1484 	case WM_T_82543:
1485 	case WM_T_82544:
1486 		/* Microwire */
1487 		sc->sc_ee_addrbits = 6;
1488 		break;
1489 	case WM_T_82540:
1490 	case WM_T_82545:
1491 	case WM_T_82545_3:
1492 	case WM_T_82546:
1493 	case WM_T_82546_3:
1494 		/* Microwire */
1495 		reg = CSR_READ(sc, WMREG_EECD);
1496 		if (reg & EECD_EE_SIZE)
1497 			sc->sc_ee_addrbits = 8;
1498 		else
1499 			sc->sc_ee_addrbits = 6;
1500 		sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1501 		break;
1502 	case WM_T_82541:
1503 	case WM_T_82541_2:
1504 	case WM_T_82547:
1505 	case WM_T_82547_2:
1506 		reg = CSR_READ(sc, WMREG_EECD);
1507 		if (reg & EECD_EE_TYPE) {
1508 			/* SPI */
1509 			wm_set_spiaddrbits(sc);
1510 		} else
1511 			/* Microwire */
1512 			sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6;
1513 		sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1514 		break;
1515 	case WM_T_82571:
1516 	case WM_T_82572:
1517 		/* SPI */
1518 		wm_set_spiaddrbits(sc);
1519 		sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1520 		break;
1521 	case WM_T_82573:
1522 	case WM_T_82574:
1523 	case WM_T_82583:
1524 		if (wm_is_onboard_nvm_eeprom(sc) == 0)
1525 			sc->sc_flags |= WM_F_EEPROM_FLASH;
1526 		else {
1527 			/* SPI */
1528 			wm_set_spiaddrbits(sc);
1529 		}
1530 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1531 		break;
1532 	case WM_T_82575:
1533 	case WM_T_82576:
1534 	case WM_T_82580:
1535 	case WM_T_82580ER:
1536 	case WM_T_80003:
1537 		/* SPI */
1538 		wm_set_spiaddrbits(sc);
1539 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_SWFW_SYNC;
1540 		break;
1541 	case WM_T_ICH8:
1542 	case WM_T_ICH9:
1543 	case WM_T_ICH10:
1544 	case WM_T_PCH:
1545 		/* FLASH */
1546 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_SWFWHW_SYNC;
1547 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
1548 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
1549 		    &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) {
1550 			aprint_error_dev(sc->sc_dev,
1551 			    "can't map FLASH registers\n");
1552 			return;
1553 		}
1554 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
1555 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
1556 						ICH_FLASH_SECTOR_SIZE;
1557 		sc->sc_ich8_flash_bank_size =
1558 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
1559 		sc->sc_ich8_flash_bank_size -=
1560 		    (reg & ICH_GFPREG_BASE_MASK);
1561 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
1562 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
1563 		break;
1564 	default:
1565 		break;
1566 	}
1567 
1568 	/*
1569 	 * Defer printing the EEPROM type until after verifying the checksum
1570 	 * This allows the EEPROM type to be printed correctly in the case
1571 	 * that no EEPROM is attached.
1572 	 */
1573 	/*
1574 	 * Validate the EEPROM checksum. If the checksum fails, flag
1575 	 * this for later, so we can fail future reads from the EEPROM.
1576 	 */
1577 	if (wm_validate_eeprom_checksum(sc)) {
1578 		/*
1579 		 * Read twice again because some PCI-e parts fail the
1580 		 * first check due to the link being in sleep state.
1581 		 */
1582 		if (wm_validate_eeprom_checksum(sc))
1583 			sc->sc_flags |= WM_F_EEPROM_INVALID;
1584 	}
1585 
1586 	/* Set device properties (macflags) */
1587 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
1588 
1589 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
1590 		aprint_verbose_dev(sc->sc_dev, "No EEPROM\n");
1591 	else if (sc->sc_flags & WM_F_EEPROM_FLASH) {
1592 		aprint_verbose_dev(sc->sc_dev, "FLASH\n");
1593 	} else {
1594 		if (sc->sc_flags & WM_F_EEPROM_SPI)
1595 			eetype = "SPI";
1596 		else
1597 			eetype = "MicroWire";
1598 		aprint_verbose_dev(sc->sc_dev,
1599 		    "%u word (%d address bits) %s EEPROM\n",
1600 		    1U << sc->sc_ee_addrbits,
1601 		    sc->sc_ee_addrbits, eetype);
1602 	}
1603 
1604 	/*
1605 	 * Read the Ethernet address from the EEPROM, if not first found
1606 	 * in device properties.
1607 	 */
1608 	ea = prop_dictionary_get(dict, "mac-address");
1609 	if (ea != NULL) {
1610 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
1611 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
1612 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
1613 	} else {
1614 		if (wm_read_mac_addr(sc, enaddr) != 0) {
1615 			aprint_error_dev(sc->sc_dev,
1616 			    "unable to read Ethernet address\n");
1617 			return;
1618 		}
1619 	}
1620 
1621 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
1622 	    ether_sprintf(enaddr));
1623 
1624 	/*
1625 	 * Read the config info from the EEPROM, and set up various
1626 	 * bits in the control registers based on their contents.
1627 	 */
1628 	pn = prop_dictionary_get(dict, "i82543-cfg1");
1629 	if (pn != NULL) {
1630 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1631 		cfg1 = (uint16_t) prop_number_integer_value(pn);
1632 	} else {
1633 		if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) {
1634 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
1635 			return;
1636 		}
1637 	}
1638 
1639 	pn = prop_dictionary_get(dict, "i82543-cfg2");
1640 	if (pn != NULL) {
1641 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1642 		cfg2 = (uint16_t) prop_number_integer_value(pn);
1643 	} else {
1644 		if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) {
1645 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
1646 			return;
1647 		}
1648 	}
1649 
1650 	/* check for WM_F_WOL */
1651 	switch (sc->sc_type) {
1652 	case WM_T_82542_2_0:
1653 	case WM_T_82542_2_1:
1654 	case WM_T_82543:
1655 		/* dummy? */
1656 		eeprom_data = 0;
1657 		apme_mask = EEPROM_CFG3_APME;
1658 		break;
1659 	case WM_T_82544:
1660 		apme_mask = EEPROM_CFG2_82544_APM_EN;
1661 		eeprom_data = cfg2;
1662 		break;
1663 	case WM_T_82546:
1664 	case WM_T_82546_3:
1665 	case WM_T_82571:
1666 	case WM_T_82572:
1667 	case WM_T_82573:
1668 	case WM_T_82574:
1669 	case WM_T_82583:
1670 	case WM_T_80003:
1671 	default:
1672 		apme_mask = EEPROM_CFG3_APME;
1673 		wm_read_eeprom(sc, (sc->sc_funcid == 1) ? EEPROM_OFF_CFG3_PORTB
1674 		    : EEPROM_OFF_CFG3_PORTA, 1, &eeprom_data);
1675 		break;
1676 	case WM_T_82575:
1677 	case WM_T_82576:
1678 	case WM_T_82580:
1679 	case WM_T_82580ER:
1680 	case WM_T_ICH8:
1681 	case WM_T_ICH9:
1682 	case WM_T_ICH10:
1683 	case WM_T_PCH:
1684 		apme_mask = WUC_APME;
1685 		eeprom_data = CSR_READ(sc, WMREG_WUC);
1686 		break;
1687 	}
1688 
1689 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
1690 	if ((eeprom_data & apme_mask) != 0)
1691 		sc->sc_flags |= WM_F_WOL;
1692 #ifdef WM_DEBUG
1693 	if ((sc->sc_flags & WM_F_WOL) != 0)
1694 		printf("WOL\n");
1695 #endif
1696 
1697 	/*
1698 	 * XXX need special handling for some multiple port cards
1699 	 * to disable a paticular port.
1700 	 */
1701 
1702 	if (sc->sc_type >= WM_T_82544) {
1703 		pn = prop_dictionary_get(dict, "i82543-swdpin");
1704 		if (pn != NULL) {
1705 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1706 			swdpin = (uint16_t) prop_number_integer_value(pn);
1707 		} else {
1708 			if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) {
1709 				aprint_error_dev(sc->sc_dev,
1710 				    "unable to read SWDPIN\n");
1711 				return;
1712 			}
1713 		}
1714 	}
1715 
1716 	if (cfg1 & EEPROM_CFG1_ILOS)
1717 		sc->sc_ctrl |= CTRL_ILOS;
1718 	if (sc->sc_type >= WM_T_82544) {
1719 		sc->sc_ctrl |=
1720 		    ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
1721 		    CTRL_SWDPIO_SHIFT;
1722 		sc->sc_ctrl |=
1723 		    ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
1724 		    CTRL_SWDPINS_SHIFT;
1725 	} else {
1726 		sc->sc_ctrl |=
1727 		    ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
1728 		    CTRL_SWDPIO_SHIFT;
1729 	}
1730 
1731 #if 0
1732 	if (sc->sc_type >= WM_T_82544) {
1733 		if (cfg1 & EEPROM_CFG1_IPS0)
1734 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
1735 		if (cfg1 & EEPROM_CFG1_IPS1)
1736 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
1737 		sc->sc_ctrl_ext |=
1738 		    ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
1739 		    CTRL_EXT_SWDPIO_SHIFT;
1740 		sc->sc_ctrl_ext |=
1741 		    ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
1742 		    CTRL_EXT_SWDPINS_SHIFT;
1743 	} else {
1744 		sc->sc_ctrl_ext |=
1745 		    ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
1746 		    CTRL_EXT_SWDPIO_SHIFT;
1747 	}
1748 #endif
1749 
1750 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
1751 #if 0
1752 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
1753 #endif
1754 
1755 	/*
1756 	 * Set up some register offsets that are different between
1757 	 * the i82542 and the i82543 and later chips.
1758 	 */
1759 	if (sc->sc_type < WM_T_82543) {
1760 		sc->sc_rdt_reg = WMREG_OLD_RDT0;
1761 		sc->sc_tdt_reg = WMREG_OLD_TDT;
1762 	} else {
1763 		sc->sc_rdt_reg = WMREG_RDT;
1764 		sc->sc_tdt_reg = WMREG_TDT;
1765 	}
1766 
1767 	if (sc->sc_type == WM_T_PCH) {
1768 		uint16_t val;
1769 
1770 		/* Save the NVM K1 bit setting */
1771 		wm_read_eeprom(sc, EEPROM_OFF_K1_CONFIG, 1, &val);
1772 
1773 		if ((val & EEPROM_K1_CONFIG_ENABLE) != 0)
1774 			sc->sc_nvm_k1_enabled = 1;
1775 		else
1776 			sc->sc_nvm_k1_enabled = 0;
1777 	}
1778 
1779 	/*
1780 	 * Determine if we're TBI,GMII or SGMII mode, and initialize the
1781 	 * media structures accordingly.
1782 	 */
1783 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
1784 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
1785 	    || sc->sc_type == WM_T_82573
1786 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
1787 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
1788 		wm_gmii_mediainit(sc, wmp->wmp_product);
1789 	} else if (sc->sc_type < WM_T_82543 ||
1790 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
1791 		if (wmp->wmp_flags & WMP_F_1000T)
1792 			aprint_error_dev(sc->sc_dev,
1793 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
1794 		wm_tbi_mediainit(sc);
1795 	} else {
1796 		switch (sc->sc_type) {
1797 		case WM_T_82575:
1798 		case WM_T_82576:
1799 		case WM_T_82580:
1800 		case WM_T_82580ER:
1801 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
1802 			switch (reg & CTRL_EXT_LINK_MODE_MASK) {
1803 			case CTRL_EXT_LINK_MODE_SGMII:
1804 				aprint_verbose_dev(sc->sc_dev, "SGMII\n");
1805 				sc->sc_flags |= WM_F_SGMII;
1806 				CSR_WRITE(sc, WMREG_CTRL_EXT,
1807 				    reg | CTRL_EXT_I2C_ENA);
1808 				wm_gmii_mediainit(sc, wmp->wmp_product);
1809 				break;
1810 			case CTRL_EXT_LINK_MODE_1000KX:
1811 			case CTRL_EXT_LINK_MODE_PCIE_SERDES:
1812 				aprint_verbose_dev(sc->sc_dev, "1000KX or SERDES\n");
1813 				CSR_WRITE(sc, WMREG_CTRL_EXT,
1814 				    reg | CTRL_EXT_I2C_ENA);
1815 				panic("not supported yet\n");
1816 				break;
1817 			case CTRL_EXT_LINK_MODE_GMII:
1818 			default:
1819 				CSR_WRITE(sc, WMREG_CTRL_EXT,
1820 				    reg & ~CTRL_EXT_I2C_ENA);
1821 				wm_gmii_mediainit(sc, wmp->wmp_product);
1822 				break;
1823 			}
1824 			break;
1825 		default:
1826 			if (wmp->wmp_flags & WMP_F_1000X)
1827 				aprint_error_dev(sc->sc_dev,
1828 				    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
1829 			wm_gmii_mediainit(sc, wmp->wmp_product);
1830 		}
1831 	}
1832 
1833 	ifp = &sc->sc_ethercom.ec_if;
1834 	xname = device_xname(sc->sc_dev);
1835 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
1836 	ifp->if_softc = sc;
1837 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1838 	ifp->if_ioctl = wm_ioctl;
1839 	ifp->if_start = wm_start;
1840 	ifp->if_watchdog = wm_watchdog;
1841 	ifp->if_init = wm_init;
1842 	ifp->if_stop = wm_stop;
1843 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
1844 	IFQ_SET_READY(&ifp->if_snd);
1845 
1846 	/* Check for jumbo frame */
1847 	switch (sc->sc_type) {
1848 	case WM_T_82573:
1849 		/* XXX limited to 9234 if ASPM is disabled */
1850 		wm_read_eeprom(sc, EEPROM_INIT_3GIO_3, 1, &io3);
1851 		if ((io3 & EEPROM_3GIO_3_ASPM_MASK) != 0)
1852 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1853 		break;
1854 	case WM_T_82571:
1855 	case WM_T_82572:
1856 	case WM_T_82574:
1857 	case WM_T_82575:
1858 	case WM_T_82576:
1859 	case WM_T_82580:
1860 	case WM_T_82580ER:
1861 	case WM_T_80003:
1862 	case WM_T_ICH9:
1863 	case WM_T_ICH10:
1864 		/* XXX limited to 9234 */
1865 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1866 		break;
1867 	case WM_T_PCH:
1868 		/* XXX limited to 4096 */
1869 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1870 		break;
1871 	case WM_T_82542_2_0:
1872 	case WM_T_82542_2_1:
1873 	case WM_T_82583:
1874 	case WM_T_ICH8:
1875 		/* No support for jumbo frame */
1876 		break;
1877 	default:
1878 		/* ETHER_MAX_LEN_JUMBO */
1879 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1880 		break;
1881 	}
1882 
1883 	/*
1884 	 * If we're a i82543 or greater, we can support VLANs.
1885 	 */
1886 	if (sc->sc_type >= WM_T_82543)
1887 		sc->sc_ethercom.ec_capabilities |=
1888 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
1889 
1890 	/*
1891 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
1892 	 * on i82543 and later.
1893 	 */
1894 	if (sc->sc_type >= WM_T_82543) {
1895 		ifp->if_capabilities |=
1896 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
1897 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
1898 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
1899 		    IFCAP_CSUM_TCPv6_Tx |
1900 		    IFCAP_CSUM_UDPv6_Tx;
1901 	}
1902 
1903 	/*
1904 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
1905 	 *
1906 	 *	82541GI (8086:1076) ... no
1907 	 *	82572EI (8086:10b9) ... yes
1908 	 */
1909 	if (sc->sc_type >= WM_T_82571) {
1910 		ifp->if_capabilities |=
1911 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
1912 	}
1913 
1914 	/*
1915 	 * If we're a i82544 or greater (except i82547), we can do
1916 	 * TCP segmentation offload.
1917 	 */
1918 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
1919 		ifp->if_capabilities |= IFCAP_TSOv4;
1920 	}
1921 
1922 	if (sc->sc_type >= WM_T_82571) {
1923 		ifp->if_capabilities |= IFCAP_TSOv6;
1924 	}
1925 
1926 	/*
1927 	 * Attach the interface.
1928 	 */
1929 	if_attach(ifp);
1930 	ether_ifattach(ifp, enaddr);
1931 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
1932 #if NRND > 0
1933 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET, 0);
1934 #endif
1935 
1936 #ifdef WM_EVENT_COUNTERS
1937 	/* Attach event counters. */
1938 	evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
1939 	    NULL, xname, "txsstall");
1940 	evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
1941 	    NULL, xname, "txdstall");
1942 	evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
1943 	    NULL, xname, "txfifo_stall");
1944 	evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
1945 	    NULL, xname, "txdw");
1946 	evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
1947 	    NULL, xname, "txqe");
1948 	evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
1949 	    NULL, xname, "rxintr");
1950 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
1951 	    NULL, xname, "linkintr");
1952 
1953 	evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
1954 	    NULL, xname, "rxipsum");
1955 	evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
1956 	    NULL, xname, "rxtusum");
1957 	evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
1958 	    NULL, xname, "txipsum");
1959 	evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
1960 	    NULL, xname, "txtusum");
1961 	evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
1962 	    NULL, xname, "txtusum6");
1963 
1964 	evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
1965 	    NULL, xname, "txtso");
1966 	evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
1967 	    NULL, xname, "txtso6");
1968 	evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
1969 	    NULL, xname, "txtsopain");
1970 
1971 	for (i = 0; i < WM_NTXSEGS; i++) {
1972 		sprintf(wm_txseg_evcnt_names[i], "txseg%d", i);
1973 		evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
1974 		    NULL, xname, wm_txseg_evcnt_names[i]);
1975 	}
1976 
1977 	evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
1978 	    NULL, xname, "txdrop");
1979 
1980 	evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
1981 	    NULL, xname, "tu");
1982 
1983 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
1984 	    NULL, xname, "tx_xoff");
1985 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
1986 	    NULL, xname, "tx_xon");
1987 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
1988 	    NULL, xname, "rx_xoff");
1989 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
1990 	    NULL, xname, "rx_xon");
1991 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
1992 	    NULL, xname, "rx_macctl");
1993 #endif /* WM_EVENT_COUNTERS */
1994 
1995 	if (pmf_device_register(self, wm_suspend, wm_resume))
1996 		pmf_class_network_register(self, ifp);
1997 	else
1998 		aprint_error_dev(self, "couldn't establish power handler\n");
1999 
2000 	return;
2001 
2002 	/*
2003 	 * Free any resources we've allocated during the failed attach
2004 	 * attempt.  Do this in reverse order and fall through.
2005 	 */
2006  fail_5:
2007 	for (i = 0; i < WM_NRXDESC; i++) {
2008 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2009 			bus_dmamap_destroy(sc->sc_dmat,
2010 			    sc->sc_rxsoft[i].rxs_dmamap);
2011 	}
2012  fail_4:
2013 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2014 		if (sc->sc_txsoft[i].txs_dmamap != NULL)
2015 			bus_dmamap_destroy(sc->sc_dmat,
2016 			    sc->sc_txsoft[i].txs_dmamap);
2017 	}
2018 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2019  fail_3:
2020 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2021  fail_2:
2022 	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2023 	    sc->sc_cd_size);
2024  fail_1:
2025 	bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2026  fail_0:
2027 	return;
2028 }
2029 
2030 static int
2031 wm_detach(device_t self, int flags __unused)
2032 {
2033 	struct wm_softc *sc = device_private(self);
2034 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2035 	int i, s;
2036 
2037 	s = splnet();
2038 	/* Stop the interface. Callouts are stopped in it. */
2039 	wm_stop(ifp, 1);
2040 	splx(s);
2041 
2042 	pmf_device_deregister(self);
2043 
2044 	/* Tell the firmware about the release */
2045 	wm_release_manageability(sc);
2046 	wm_release_hw_control(sc);
2047 
2048 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2049 
2050 	/* Delete all remaining media. */
2051 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2052 
2053 	ether_ifdetach(ifp);
2054 	if_detach(ifp);
2055 
2056 
2057 	/* Unload RX dmamaps and free mbufs */
2058 	wm_rxdrain(sc);
2059 
2060 	/* Free dmamap. It's the same as the end of the wm_attach() function */
2061 	for (i = 0; i < WM_NRXDESC; i++) {
2062 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2063 			bus_dmamap_destroy(sc->sc_dmat,
2064 			    sc->sc_rxsoft[i].rxs_dmamap);
2065 	}
2066 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2067 		if (sc->sc_txsoft[i].txs_dmamap != NULL)
2068 			bus_dmamap_destroy(sc->sc_dmat,
2069 			    sc->sc_txsoft[i].txs_dmamap);
2070 	}
2071 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2072 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2073 	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2074 	    sc->sc_cd_size);
2075 	bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2076 
2077 	/* Disestablish the interrupt handler */
2078 	if (sc->sc_ih != NULL) {
2079 		pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
2080 		sc->sc_ih = NULL;
2081 	}
2082 
2083 	/* Unmap the registers */
2084 	if (sc->sc_ss) {
2085 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
2086 		sc->sc_ss = 0;
2087 	}
2088 
2089 	if (sc->sc_ios) {
2090 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
2091 		sc->sc_ios = 0;
2092 	}
2093 
2094 	return 0;
2095 }
2096 
2097 /*
2098  * wm_tx_offload:
2099  *
2100  *	Set up TCP/IP checksumming parameters for the
2101  *	specified packet.
2102  */
2103 static int
2104 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
2105     uint8_t *fieldsp)
2106 {
2107 	struct mbuf *m0 = txs->txs_mbuf;
2108 	struct livengood_tcpip_ctxdesc *t;
2109 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
2110 	uint32_t ipcse;
2111 	struct ether_header *eh;
2112 	int offset, iphl;
2113 	uint8_t fields;
2114 
2115 	/*
2116 	 * XXX It would be nice if the mbuf pkthdr had offset
2117 	 * fields for the protocol headers.
2118 	 */
2119 
2120 	eh = mtod(m0, struct ether_header *);
2121 	switch (htons(eh->ether_type)) {
2122 	case ETHERTYPE_IP:
2123 	case ETHERTYPE_IPV6:
2124 		offset = ETHER_HDR_LEN;
2125 		break;
2126 
2127 	case ETHERTYPE_VLAN:
2128 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2129 		break;
2130 
2131 	default:
2132 		/*
2133 		 * Don't support this protocol or encapsulation.
2134 		 */
2135 		*fieldsp = 0;
2136 		*cmdp = 0;
2137 		return 0;
2138 	}
2139 
2140 	if ((m0->m_pkthdr.csum_flags &
2141 	    (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
2142 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
2143 	} else {
2144 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
2145 	}
2146 	ipcse = offset + iphl - 1;
2147 
2148 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
2149 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
2150 	seg = 0;
2151 	fields = 0;
2152 
2153 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
2154 		int hlen = offset + iphl;
2155 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
2156 
2157 		if (__predict_false(m0->m_len <
2158 				    (hlen + sizeof(struct tcphdr)))) {
2159 			/*
2160 			 * TCP/IP headers are not in the first mbuf; we need
2161 			 * to do this the slow and painful way.  Let's just
2162 			 * hope this doesn't happen very often.
2163 			 */
2164 			struct tcphdr th;
2165 
2166 			WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
2167 
2168 			m_copydata(m0, hlen, sizeof(th), &th);
2169 			if (v4) {
2170 				struct ip ip;
2171 
2172 				m_copydata(m0, offset, sizeof(ip), &ip);
2173 				ip.ip_len = 0;
2174 				m_copyback(m0,
2175 				    offset + offsetof(struct ip, ip_len),
2176 				    sizeof(ip.ip_len), &ip.ip_len);
2177 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
2178 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
2179 			} else {
2180 				struct ip6_hdr ip6;
2181 
2182 				m_copydata(m0, offset, sizeof(ip6), &ip6);
2183 				ip6.ip6_plen = 0;
2184 				m_copyback(m0,
2185 				    offset + offsetof(struct ip6_hdr, ip6_plen),
2186 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
2187 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
2188 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
2189 			}
2190 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
2191 			    sizeof(th.th_sum), &th.th_sum);
2192 
2193 			hlen += th.th_off << 2;
2194 		} else {
2195 			/*
2196 			 * TCP/IP headers are in the first mbuf; we can do
2197 			 * this the easy way.
2198 			 */
2199 			struct tcphdr *th;
2200 
2201 			if (v4) {
2202 				struct ip *ip =
2203 				    (void *)(mtod(m0, char *) + offset);
2204 				th = (void *)(mtod(m0, char *) + hlen);
2205 
2206 				ip->ip_len = 0;
2207 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
2208 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
2209 			} else {
2210 				struct ip6_hdr *ip6 =
2211 				    (void *)(mtod(m0, char *) + offset);
2212 				th = (void *)(mtod(m0, char *) + hlen);
2213 
2214 				ip6->ip6_plen = 0;
2215 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
2216 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
2217 			}
2218 			hlen += th->th_off << 2;
2219 		}
2220 
2221 		if (v4) {
2222 			WM_EVCNT_INCR(&sc->sc_ev_txtso);
2223 			cmdlen |= WTX_TCPIP_CMD_IP;
2224 		} else {
2225 			WM_EVCNT_INCR(&sc->sc_ev_txtso6);
2226 			ipcse = 0;
2227 		}
2228 		cmd |= WTX_TCPIP_CMD_TSE;
2229 		cmdlen |= WTX_TCPIP_CMD_TSE |
2230 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
2231 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
2232 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
2233 	}
2234 
2235 	/*
2236 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
2237 	 * offload feature, if we load the context descriptor, we
2238 	 * MUST provide valid values for IPCSS and TUCSS fields.
2239 	 */
2240 
2241 	ipcs = WTX_TCPIP_IPCSS(offset) |
2242 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
2243 	    WTX_TCPIP_IPCSE(ipcse);
2244 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
2245 		WM_EVCNT_INCR(&sc->sc_ev_txipsum);
2246 		fields |= WTX_IXSM;
2247 	}
2248 
2249 	offset += iphl;
2250 
2251 	if (m0->m_pkthdr.csum_flags &
2252 	    (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
2253 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
2254 		fields |= WTX_TXSM;
2255 		tucs = WTX_TCPIP_TUCSS(offset) |
2256 		    WTX_TCPIP_TUCSO(offset +
2257 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
2258 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
2259 	} else if ((m0->m_pkthdr.csum_flags &
2260 	    (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
2261 		WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
2262 		fields |= WTX_TXSM;
2263 		tucs = WTX_TCPIP_TUCSS(offset) |
2264 		    WTX_TCPIP_TUCSO(offset +
2265 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
2266 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
2267 	} else {
2268 		/* Just initialize it to a valid TCP context. */
2269 		tucs = WTX_TCPIP_TUCSS(offset) |
2270 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
2271 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
2272 	}
2273 
2274 	/* Fill in the context descriptor. */
2275 	t = (struct livengood_tcpip_ctxdesc *)
2276 	    &sc->sc_txdescs[sc->sc_txnext];
2277 	t->tcpip_ipcs = htole32(ipcs);
2278 	t->tcpip_tucs = htole32(tucs);
2279 	t->tcpip_cmdlen = htole32(cmdlen);
2280 	t->tcpip_seg = htole32(seg);
2281 	WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
2282 
2283 	sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
2284 	txs->txs_ndesc++;
2285 
2286 	*cmdp = cmd;
2287 	*fieldsp = fields;
2288 
2289 	return 0;
2290 }
2291 
2292 static void
2293 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
2294 {
2295 	struct mbuf *m;
2296 	int i;
2297 
2298 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
2299 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
2300 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
2301 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
2302 		    m->m_data, m->m_len, m->m_flags);
2303 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
2304 	    i, i == 1 ? "" : "s");
2305 }
2306 
2307 /*
2308  * wm_82547_txfifo_stall:
2309  *
2310  *	Callout used to wait for the 82547 Tx FIFO to drain,
2311  *	reset the FIFO pointers, and restart packet transmission.
2312  */
2313 static void
2314 wm_82547_txfifo_stall(void *arg)
2315 {
2316 	struct wm_softc *sc = arg;
2317 	int s;
2318 
2319 	s = splnet();
2320 
2321 	if (sc->sc_txfifo_stall) {
2322 		if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
2323 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
2324 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
2325 			/*
2326 			 * Packets have drained.  Stop transmitter, reset
2327 			 * FIFO pointers, restart transmitter, and kick
2328 			 * the packet queue.
2329 			 */
2330 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
2331 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
2332 			CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
2333 			CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
2334 			CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
2335 			CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
2336 			CSR_WRITE(sc, WMREG_TCTL, tctl);
2337 			CSR_WRITE_FLUSH(sc);
2338 
2339 			sc->sc_txfifo_head = 0;
2340 			sc->sc_txfifo_stall = 0;
2341 			wm_start(&sc->sc_ethercom.ec_if);
2342 		} else {
2343 			/*
2344 			 * Still waiting for packets to drain; try again in
2345 			 * another tick.
2346 			 */
2347 			callout_schedule(&sc->sc_txfifo_ch, 1);
2348 		}
2349 	}
2350 
2351 	splx(s);
2352 }
2353 
2354 /*
2355  * wm_82547_txfifo_bugchk:
2356  *
2357  *	Check for bug condition in the 82547 Tx FIFO.  We need to
2358  *	prevent enqueueing a packet that would wrap around the end
2359  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
2360  *
2361  *	We do this by checking the amount of space before the end
2362  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
2363  *	the Tx FIFO, wait for all remaining packets to drain, reset
2364  *	the internal FIFO pointers to the beginning, and restart
2365  *	transmission on the interface.
2366  */
2367 #define	WM_FIFO_HDR		0x10
2368 #define	WM_82547_PAD_LEN	0x3e0
2369 static int
2370 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
2371 {
2372 	int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
2373 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
2374 
2375 	/* Just return if already stalled. */
2376 	if (sc->sc_txfifo_stall)
2377 		return 1;
2378 
2379 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
2380 		/* Stall only occurs in half-duplex mode. */
2381 		goto send_packet;
2382 	}
2383 
2384 	if (len >= WM_82547_PAD_LEN + space) {
2385 		sc->sc_txfifo_stall = 1;
2386 		callout_schedule(&sc->sc_txfifo_ch, 1);
2387 		return 1;
2388 	}
2389 
2390  send_packet:
2391 	sc->sc_txfifo_head += len;
2392 	if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
2393 		sc->sc_txfifo_head -= sc->sc_txfifo_size;
2394 
2395 	return 0;
2396 }
2397 
2398 /*
2399  * wm_start:		[ifnet interface function]
2400  *
2401  *	Start packet transmission on the interface.
2402  */
2403 static void
2404 wm_start(struct ifnet *ifp)
2405 {
2406 	struct wm_softc *sc = ifp->if_softc;
2407 	struct mbuf *m0;
2408 	struct m_tag *mtag;
2409 	struct wm_txsoft *txs;
2410 	bus_dmamap_t dmamap;
2411 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
2412 	bus_addr_t curaddr;
2413 	bus_size_t seglen, curlen;
2414 	uint32_t cksumcmd;
2415 	uint8_t cksumfields;
2416 
2417 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
2418 		return;
2419 
2420 	/*
2421 	 * Remember the previous number of free descriptors.
2422 	 */
2423 	ofree = sc->sc_txfree;
2424 
2425 	/*
2426 	 * Loop through the send queue, setting up transmit descriptors
2427 	 * until we drain the queue, or use up all available transmit
2428 	 * descriptors.
2429 	 */
2430 	for (;;) {
2431 		/* Grab a packet off the queue. */
2432 		IFQ_POLL(&ifp->if_snd, m0);
2433 		if (m0 == NULL)
2434 			break;
2435 
2436 		DPRINTF(WM_DEBUG_TX,
2437 		    ("%s: TX: have packet to transmit: %p\n",
2438 		    device_xname(sc->sc_dev), m0));
2439 
2440 		/* Get a work queue entry. */
2441 		if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
2442 			wm_txintr(sc);
2443 			if (sc->sc_txsfree == 0) {
2444 				DPRINTF(WM_DEBUG_TX,
2445 				    ("%s: TX: no free job descriptors\n",
2446 					device_xname(sc->sc_dev)));
2447 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
2448 				break;
2449 			}
2450 		}
2451 
2452 		txs = &sc->sc_txsoft[sc->sc_txsnext];
2453 		dmamap = txs->txs_dmamap;
2454 
2455 		use_tso = (m0->m_pkthdr.csum_flags &
2456 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
2457 
2458 		/*
2459 		 * So says the Linux driver:
2460 		 * The controller does a simple calculation to make sure
2461 		 * there is enough room in the FIFO before initiating the
2462 		 * DMA for each buffer.  The calc is:
2463 		 *	4 = ceil(buffer len / MSS)
2464 		 * To make sure we don't overrun the FIFO, adjust the max
2465 		 * buffer len if the MSS drops.
2466 		 */
2467 		dmamap->dm_maxsegsz =
2468 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
2469 		    ? m0->m_pkthdr.segsz << 2
2470 		    : WTX_MAX_LEN;
2471 
2472 		/*
2473 		 * Load the DMA map.  If this fails, the packet either
2474 		 * didn't fit in the allotted number of segments, or we
2475 		 * were short on resources.  For the too-many-segments
2476 		 * case, we simply report an error and drop the packet,
2477 		 * since we can't sanely copy a jumbo packet to a single
2478 		 * buffer.
2479 		 */
2480 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
2481 		    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
2482 		if (error) {
2483 			if (error == EFBIG) {
2484 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
2485 				log(LOG_ERR, "%s: Tx packet consumes too many "
2486 				    "DMA segments, dropping...\n",
2487 				    device_xname(sc->sc_dev));
2488 				IFQ_DEQUEUE(&ifp->if_snd, m0);
2489 				wm_dump_mbuf_chain(sc, m0);
2490 				m_freem(m0);
2491 				continue;
2492 			}
2493 			/*
2494 			 * Short on resources, just stop for now.
2495 			 */
2496 			DPRINTF(WM_DEBUG_TX,
2497 			    ("%s: TX: dmamap load failed: %d\n",
2498 			    device_xname(sc->sc_dev), error));
2499 			break;
2500 		}
2501 
2502 		segs_needed = dmamap->dm_nsegs;
2503 		if (use_tso) {
2504 			/* For sentinel descriptor; see below. */
2505 			segs_needed++;
2506 		}
2507 
2508 		/*
2509 		 * Ensure we have enough descriptors free to describe
2510 		 * the packet.  Note, we always reserve one descriptor
2511 		 * at the end of the ring due to the semantics of the
2512 		 * TDT register, plus one more in the event we need
2513 		 * to load offload context.
2514 		 */
2515 		if (segs_needed > sc->sc_txfree - 2) {
2516 			/*
2517 			 * Not enough free descriptors to transmit this
2518 			 * packet.  We haven't committed anything yet,
2519 			 * so just unload the DMA map, put the packet
2520 			 * pack on the queue, and punt.  Notify the upper
2521 			 * layer that there are no more slots left.
2522 			 */
2523 			DPRINTF(WM_DEBUG_TX,
2524 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
2525 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
2526 			    segs_needed, sc->sc_txfree - 1));
2527 			ifp->if_flags |= IFF_OACTIVE;
2528 			bus_dmamap_unload(sc->sc_dmat, dmamap);
2529 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
2530 			break;
2531 		}
2532 
2533 		/*
2534 		 * Check for 82547 Tx FIFO bug.  We need to do this
2535 		 * once we know we can transmit the packet, since we
2536 		 * do some internal FIFO space accounting here.
2537 		 */
2538 		if (sc->sc_type == WM_T_82547 &&
2539 		    wm_82547_txfifo_bugchk(sc, m0)) {
2540 			DPRINTF(WM_DEBUG_TX,
2541 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
2542 			    device_xname(sc->sc_dev)));
2543 			ifp->if_flags |= IFF_OACTIVE;
2544 			bus_dmamap_unload(sc->sc_dmat, dmamap);
2545 			WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
2546 			break;
2547 		}
2548 
2549 		IFQ_DEQUEUE(&ifp->if_snd, m0);
2550 
2551 		/*
2552 		 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
2553 		 */
2554 
2555 		DPRINTF(WM_DEBUG_TX,
2556 		    ("%s: TX: packet has %d (%d) DMA segments\n",
2557 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
2558 
2559 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
2560 
2561 		/*
2562 		 * Store a pointer to the packet so that we can free it
2563 		 * later.
2564 		 *
2565 		 * Initially, we consider the number of descriptors the
2566 		 * packet uses the number of DMA segments.  This may be
2567 		 * incremented by 1 if we do checksum offload (a descriptor
2568 		 * is used to set the checksum context).
2569 		 */
2570 		txs->txs_mbuf = m0;
2571 		txs->txs_firstdesc = sc->sc_txnext;
2572 		txs->txs_ndesc = segs_needed;
2573 
2574 		/* Set up offload parameters for this packet. */
2575 		if (m0->m_pkthdr.csum_flags &
2576 		    (M_CSUM_TSOv4|M_CSUM_TSOv6|
2577 		    M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
2578 		    M_CSUM_TCPv6|M_CSUM_UDPv6)) {
2579 			if (wm_tx_offload(sc, txs, &cksumcmd,
2580 					  &cksumfields) != 0) {
2581 				/* Error message already displayed. */
2582 				bus_dmamap_unload(sc->sc_dmat, dmamap);
2583 				continue;
2584 			}
2585 		} else {
2586 			cksumcmd = 0;
2587 			cksumfields = 0;
2588 		}
2589 
2590 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
2591 
2592 		/* Sync the DMA map. */
2593 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
2594 		    BUS_DMASYNC_PREWRITE);
2595 
2596 		/*
2597 		 * Initialize the transmit descriptor.
2598 		 */
2599 		for (nexttx = sc->sc_txnext, seg = 0;
2600 		     seg < dmamap->dm_nsegs; seg++) {
2601 			for (seglen = dmamap->dm_segs[seg].ds_len,
2602 			     curaddr = dmamap->dm_segs[seg].ds_addr;
2603 			     seglen != 0;
2604 			     curaddr += curlen, seglen -= curlen,
2605 			     nexttx = WM_NEXTTX(sc, nexttx)) {
2606 				curlen = seglen;
2607 
2608 				/*
2609 				 * So says the Linux driver:
2610 				 * Work around for premature descriptor
2611 				 * write-backs in TSO mode.  Append a
2612 				 * 4-byte sentinel descriptor.
2613 				 */
2614 				if (use_tso &&
2615 				    seg == dmamap->dm_nsegs - 1 &&
2616 				    curlen > 8)
2617 					curlen -= 4;
2618 
2619 				wm_set_dma_addr(
2620 				    &sc->sc_txdescs[nexttx].wtx_addr,
2621 				    curaddr);
2622 				sc->sc_txdescs[nexttx].wtx_cmdlen =
2623 				    htole32(cksumcmd | curlen);
2624 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
2625 				    0;
2626 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
2627 				    cksumfields;
2628 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
2629 				lasttx = nexttx;
2630 
2631 				DPRINTF(WM_DEBUG_TX,
2632 				    ("%s: TX: desc %d: low %#" PRIxPADDR ", "
2633 				     "len %#04zx\n",
2634 				    device_xname(sc->sc_dev), nexttx,
2635 				    curaddr & 0xffffffffUL, curlen));
2636 			}
2637 		}
2638 
2639 		KASSERT(lasttx != -1);
2640 
2641 		/*
2642 		 * Set up the command byte on the last descriptor of
2643 		 * the packet.  If we're in the interrupt delay window,
2644 		 * delay the interrupt.
2645 		 */
2646 		sc->sc_txdescs[lasttx].wtx_cmdlen |=
2647 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
2648 
2649 		/*
2650 		 * If VLANs are enabled and the packet has a VLAN tag, set
2651 		 * up the descriptor to encapsulate the packet for us.
2652 		 *
2653 		 * This is only valid on the last descriptor of the packet.
2654 		 */
2655 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
2656 			sc->sc_txdescs[lasttx].wtx_cmdlen |=
2657 			    htole32(WTX_CMD_VLE);
2658 			sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
2659 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
2660 		}
2661 
2662 		txs->txs_lastdesc = lasttx;
2663 
2664 		DPRINTF(WM_DEBUG_TX,
2665 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
2666 		    device_xname(sc->sc_dev),
2667 		    lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
2668 
2669 		/* Sync the descriptors we're using. */
2670 		WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
2671 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2672 
2673 		/* Give the packet to the chip. */
2674 		CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
2675 
2676 		DPRINTF(WM_DEBUG_TX,
2677 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
2678 
2679 		DPRINTF(WM_DEBUG_TX,
2680 		    ("%s: TX: finished transmitting packet, job %d\n",
2681 		    device_xname(sc->sc_dev), sc->sc_txsnext));
2682 
2683 		/* Advance the tx pointer. */
2684 		sc->sc_txfree -= txs->txs_ndesc;
2685 		sc->sc_txnext = nexttx;
2686 
2687 		sc->sc_txsfree--;
2688 		sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
2689 
2690 		/* Pass the packet to any BPF listeners. */
2691 		bpf_mtap(ifp, m0);
2692 	}
2693 
2694 	if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
2695 		/* No more slots; notify upper layer. */
2696 		ifp->if_flags |= IFF_OACTIVE;
2697 	}
2698 
2699 	if (sc->sc_txfree != ofree) {
2700 		/* Set a watchdog timer in case the chip flakes out. */
2701 		ifp->if_timer = 5;
2702 	}
2703 }
2704 
2705 /*
2706  * wm_watchdog:		[ifnet interface function]
2707  *
2708  *	Watchdog timer handler.
2709  */
2710 static void
2711 wm_watchdog(struct ifnet *ifp)
2712 {
2713 	struct wm_softc *sc = ifp->if_softc;
2714 
2715 	/*
2716 	 * Since we're using delayed interrupts, sweep up
2717 	 * before we report an error.
2718 	 */
2719 	wm_txintr(sc);
2720 
2721 	if (sc->sc_txfree != WM_NTXDESC(sc)) {
2722 		log(LOG_ERR,
2723 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2724 		    device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
2725 		    sc->sc_txnext);
2726 		ifp->if_oerrors++;
2727 
2728 		/* Reset the interface. */
2729 		(void) wm_init(ifp);
2730 	}
2731 
2732 	/* Try to get more packets going. */
2733 	wm_start(ifp);
2734 }
2735 
2736 static int
2737 wm_ifflags_cb(struct ethercom *ec)
2738 {
2739 	struct ifnet *ifp = &ec->ec_if;
2740 	struct wm_softc *sc = ifp->if_softc;
2741 	int change = ifp->if_flags ^ sc->sc_if_flags;
2742 
2743 	if (change != 0)
2744 		sc->sc_if_flags = ifp->if_flags;
2745 
2746 	if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
2747 		return ENETRESET;
2748 
2749 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
2750 		wm_set_filter(sc);
2751 
2752 	wm_set_vlan(sc);
2753 
2754 	return 0;
2755 }
2756 
2757 /*
2758  * wm_ioctl:		[ifnet interface function]
2759  *
2760  *	Handle control requests from the operator.
2761  */
2762 static int
2763 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2764 {
2765 	struct wm_softc *sc = ifp->if_softc;
2766 	struct ifreq *ifr = (struct ifreq *) data;
2767 	struct ifaddr *ifa = (struct ifaddr *)data;
2768 	struct sockaddr_dl *sdl;
2769 	int s, error;
2770 
2771 	s = splnet();
2772 
2773 	switch (cmd) {
2774 	case SIOCSIFMEDIA:
2775 	case SIOCGIFMEDIA:
2776 		/* Flow control requires full-duplex mode. */
2777 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
2778 		    (ifr->ifr_media & IFM_FDX) == 0)
2779 			ifr->ifr_media &= ~IFM_ETH_FMASK;
2780 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
2781 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
2782 				/* We can do both TXPAUSE and RXPAUSE. */
2783 				ifr->ifr_media |=
2784 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
2785 			}
2786 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
2787 		}
2788 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
2789 		break;
2790 	case SIOCINITIFADDR:
2791 		if (ifa->ifa_addr->sa_family == AF_LINK) {
2792 			sdl = satosdl(ifp->if_dl->ifa_addr);
2793 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
2794 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
2795 			/* unicast address is first multicast entry */
2796 			wm_set_filter(sc);
2797 			error = 0;
2798 			break;
2799 		}
2800 		/* Fall through for rest */
2801 	default:
2802 		if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
2803 			break;
2804 
2805 		error = 0;
2806 
2807 		if (cmd == SIOCSIFCAP)
2808 			error = (*ifp->if_init)(ifp);
2809 		else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
2810 			;
2811 		else if (ifp->if_flags & IFF_RUNNING) {
2812 			/*
2813 			 * Multicast list has changed; set the hardware filter
2814 			 * accordingly.
2815 			 */
2816 			wm_set_filter(sc);
2817 		}
2818 		break;
2819 	}
2820 
2821 	/* Try to get more packets going. */
2822 	wm_start(ifp);
2823 
2824 	splx(s);
2825 	return error;
2826 }
2827 
2828 /*
2829  * wm_intr:
2830  *
2831  *	Interrupt service routine.
2832  */
2833 static int
2834 wm_intr(void *arg)
2835 {
2836 	struct wm_softc *sc = arg;
2837 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2838 	uint32_t icr;
2839 	int handled = 0;
2840 
2841 	while (1 /* CONSTCOND */) {
2842 		icr = CSR_READ(sc, WMREG_ICR);
2843 		if ((icr & sc->sc_icr) == 0)
2844 			break;
2845 #if 0 /*NRND > 0*/
2846 		if (RND_ENABLED(&sc->rnd_source))
2847 			rnd_add_uint32(&sc->rnd_source, icr);
2848 #endif
2849 
2850 		handled = 1;
2851 
2852 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
2853 		if (icr & (ICR_RXDMT0|ICR_RXT0)) {
2854 			DPRINTF(WM_DEBUG_RX,
2855 			    ("%s: RX: got Rx intr 0x%08x\n",
2856 			    device_xname(sc->sc_dev),
2857 			    icr & (ICR_RXDMT0|ICR_RXT0)));
2858 			WM_EVCNT_INCR(&sc->sc_ev_rxintr);
2859 		}
2860 #endif
2861 		wm_rxintr(sc);
2862 
2863 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
2864 		if (icr & ICR_TXDW) {
2865 			DPRINTF(WM_DEBUG_TX,
2866 			    ("%s: TX: got TXDW interrupt\n",
2867 			    device_xname(sc->sc_dev)));
2868 			WM_EVCNT_INCR(&sc->sc_ev_txdw);
2869 		}
2870 #endif
2871 		wm_txintr(sc);
2872 
2873 		if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
2874 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
2875 			wm_linkintr(sc, icr);
2876 		}
2877 
2878 		if (icr & ICR_RXO) {
2879 #if defined(WM_DEBUG)
2880 			log(LOG_WARNING, "%s: Receive overrun\n",
2881 			    device_xname(sc->sc_dev));
2882 #endif /* defined(WM_DEBUG) */
2883 		}
2884 	}
2885 
2886 	if (handled) {
2887 		/* Try to get more packets going. */
2888 		wm_start(ifp);
2889 	}
2890 
2891 	return handled;
2892 }
2893 
2894 /*
2895  * wm_txintr:
2896  *
2897  *	Helper; handle transmit interrupts.
2898  */
2899 static void
2900 wm_txintr(struct wm_softc *sc)
2901 {
2902 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2903 	struct wm_txsoft *txs;
2904 	uint8_t status;
2905 	int i;
2906 
2907 	ifp->if_flags &= ~IFF_OACTIVE;
2908 
2909 	/*
2910 	 * Go through the Tx list and free mbufs for those
2911 	 * frames which have been transmitted.
2912 	 */
2913 	for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
2914 	     i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
2915 		txs = &sc->sc_txsoft[i];
2916 
2917 		DPRINTF(WM_DEBUG_TX,
2918 		    ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
2919 
2920 		WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
2921 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2922 
2923 		status =
2924 		    sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
2925 		if ((status & WTX_ST_DD) == 0) {
2926 			WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
2927 			    BUS_DMASYNC_PREREAD);
2928 			break;
2929 		}
2930 
2931 		DPRINTF(WM_DEBUG_TX,
2932 		    ("%s: TX: job %d done: descs %d..%d\n",
2933 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
2934 		    txs->txs_lastdesc));
2935 
2936 		/*
2937 		 * XXX We should probably be using the statistics
2938 		 * XXX registers, but I don't know if they exist
2939 		 * XXX on chips before the i82544.
2940 		 */
2941 
2942 #ifdef WM_EVENT_COUNTERS
2943 		if (status & WTX_ST_TU)
2944 			WM_EVCNT_INCR(&sc->sc_ev_tu);
2945 #endif /* WM_EVENT_COUNTERS */
2946 
2947 		if (status & (WTX_ST_EC|WTX_ST_LC)) {
2948 			ifp->if_oerrors++;
2949 			if (status & WTX_ST_LC)
2950 				log(LOG_WARNING, "%s: late collision\n",
2951 				    device_xname(sc->sc_dev));
2952 			else if (status & WTX_ST_EC) {
2953 				ifp->if_collisions += 16;
2954 				log(LOG_WARNING, "%s: excessive collisions\n",
2955 				    device_xname(sc->sc_dev));
2956 			}
2957 		} else
2958 			ifp->if_opackets++;
2959 
2960 		sc->sc_txfree += txs->txs_ndesc;
2961 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
2962 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2963 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2964 		m_freem(txs->txs_mbuf);
2965 		txs->txs_mbuf = NULL;
2966 	}
2967 
2968 	/* Update the dirty transmit buffer pointer. */
2969 	sc->sc_txsdirty = i;
2970 	DPRINTF(WM_DEBUG_TX,
2971 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
2972 
2973 	/*
2974 	 * If there are no more pending transmissions, cancel the watchdog
2975 	 * timer.
2976 	 */
2977 	if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
2978 		ifp->if_timer = 0;
2979 }
2980 
2981 /*
2982  * wm_rxintr:
2983  *
2984  *	Helper; handle receive interrupts.
2985  */
2986 static void
2987 wm_rxintr(struct wm_softc *sc)
2988 {
2989 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2990 	struct wm_rxsoft *rxs;
2991 	struct mbuf *m;
2992 	int i, len;
2993 	uint8_t status, errors;
2994 	uint16_t vlantag;
2995 
2996 	for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
2997 		rxs = &sc->sc_rxsoft[i];
2998 
2999 		DPRINTF(WM_DEBUG_RX,
3000 		    ("%s: RX: checking descriptor %d\n",
3001 		    device_xname(sc->sc_dev), i));
3002 
3003 		WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3004 
3005 		status = sc->sc_rxdescs[i].wrx_status;
3006 		errors = sc->sc_rxdescs[i].wrx_errors;
3007 		len = le16toh(sc->sc_rxdescs[i].wrx_len);
3008 		vlantag = sc->sc_rxdescs[i].wrx_special;
3009 
3010 		if ((status & WRX_ST_DD) == 0) {
3011 			/*
3012 			 * We have processed all of the receive descriptors.
3013 			 */
3014 			WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
3015 			break;
3016 		}
3017 
3018 		if (__predict_false(sc->sc_rxdiscard)) {
3019 			DPRINTF(WM_DEBUG_RX,
3020 			    ("%s: RX: discarding contents of descriptor %d\n",
3021 			    device_xname(sc->sc_dev), i));
3022 			WM_INIT_RXDESC(sc, i);
3023 			if (status & WRX_ST_EOP) {
3024 				/* Reset our state. */
3025 				DPRINTF(WM_DEBUG_RX,
3026 				    ("%s: RX: resetting rxdiscard -> 0\n",
3027 				    device_xname(sc->sc_dev)));
3028 				sc->sc_rxdiscard = 0;
3029 			}
3030 			continue;
3031 		}
3032 
3033 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3034 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
3035 
3036 		m = rxs->rxs_mbuf;
3037 
3038 		/*
3039 		 * Add a new receive buffer to the ring, unless of
3040 		 * course the length is zero. Treat the latter as a
3041 		 * failed mapping.
3042 		 */
3043 		if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
3044 			/*
3045 			 * Failed, throw away what we've done so
3046 			 * far, and discard the rest of the packet.
3047 			 */
3048 			ifp->if_ierrors++;
3049 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3050 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
3051 			WM_INIT_RXDESC(sc, i);
3052 			if ((status & WRX_ST_EOP) == 0)
3053 				sc->sc_rxdiscard = 1;
3054 			if (sc->sc_rxhead != NULL)
3055 				m_freem(sc->sc_rxhead);
3056 			WM_RXCHAIN_RESET(sc);
3057 			DPRINTF(WM_DEBUG_RX,
3058 			    ("%s: RX: Rx buffer allocation failed, "
3059 			    "dropping packet%s\n", device_xname(sc->sc_dev),
3060 			    sc->sc_rxdiscard ? " (discard)" : ""));
3061 			continue;
3062 		}
3063 
3064 		m->m_len = len;
3065 		sc->sc_rxlen += len;
3066 		DPRINTF(WM_DEBUG_RX,
3067 		    ("%s: RX: buffer at %p len %d\n",
3068 		    device_xname(sc->sc_dev), m->m_data, len));
3069 
3070 		/*
3071 		 * If this is not the end of the packet, keep
3072 		 * looking.
3073 		 */
3074 		if ((status & WRX_ST_EOP) == 0) {
3075 			WM_RXCHAIN_LINK(sc, m);
3076 			DPRINTF(WM_DEBUG_RX,
3077 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
3078 			    device_xname(sc->sc_dev), sc->sc_rxlen));
3079 			continue;
3080 		}
3081 
3082 		/*
3083 		 * Okay, we have the entire packet now.  The chip is
3084 		 * configured to include the FCS (not all chips can
3085 		 * be configured to strip it), so we need to trim it.
3086 		 * May need to adjust length of previous mbuf in the
3087 		 * chain if the current mbuf is too short.
3088 		 */
3089 		if (m->m_len < ETHER_CRC_LEN) {
3090 			sc->sc_rxtail->m_len -= (ETHER_CRC_LEN - m->m_len);
3091 			m->m_len = 0;
3092 		} else {
3093 			m->m_len -= ETHER_CRC_LEN;
3094 		}
3095 		len = sc->sc_rxlen - ETHER_CRC_LEN;
3096 
3097 		WM_RXCHAIN_LINK(sc, m);
3098 
3099 		*sc->sc_rxtailp = NULL;
3100 		m = sc->sc_rxhead;
3101 
3102 		WM_RXCHAIN_RESET(sc);
3103 
3104 		DPRINTF(WM_DEBUG_RX,
3105 		    ("%s: RX: have entire packet, len -> %d\n",
3106 		    device_xname(sc->sc_dev), len));
3107 
3108 		/*
3109 		 * If an error occurred, update stats and drop the packet.
3110 		 */
3111 		if (errors &
3112 		     (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
3113 			if (errors & WRX_ER_SE)
3114 				log(LOG_WARNING, "%s: symbol error\n",
3115 				    device_xname(sc->sc_dev));
3116 			else if (errors & WRX_ER_SEQ)
3117 				log(LOG_WARNING, "%s: receive sequence error\n",
3118 				    device_xname(sc->sc_dev));
3119 			else if (errors & WRX_ER_CE)
3120 				log(LOG_WARNING, "%s: CRC error\n",
3121 				    device_xname(sc->sc_dev));
3122 			m_freem(m);
3123 			continue;
3124 		}
3125 
3126 		/*
3127 		 * No errors.  Receive the packet.
3128 		 */
3129 		m->m_pkthdr.rcvif = ifp;
3130 		m->m_pkthdr.len = len;
3131 
3132 		/*
3133 		 * If VLANs are enabled, VLAN packets have been unwrapped
3134 		 * for us.  Associate the tag with the packet.
3135 		 */
3136 		if ((status & WRX_ST_VP) != 0) {
3137 			VLAN_INPUT_TAG(ifp, m,
3138 			    le16toh(vlantag),
3139 			    continue);
3140 		}
3141 
3142 		/*
3143 		 * Set up checksum info for this packet.
3144 		 */
3145 		if ((status & WRX_ST_IXSM) == 0) {
3146 			if (status & WRX_ST_IPCS) {
3147 				WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
3148 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
3149 				if (errors & WRX_ER_IPE)
3150 					m->m_pkthdr.csum_flags |=
3151 					    M_CSUM_IPv4_BAD;
3152 			}
3153 			if (status & WRX_ST_TCPCS) {
3154 				/*
3155 				 * Note: we don't know if this was TCP or UDP,
3156 				 * so we just set both bits, and expect the
3157 				 * upper layers to deal.
3158 				 */
3159 				WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
3160 				m->m_pkthdr.csum_flags |=
3161 				    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
3162 				    M_CSUM_TCPv6 | M_CSUM_UDPv6;
3163 				if (errors & WRX_ER_TCPE)
3164 					m->m_pkthdr.csum_flags |=
3165 					    M_CSUM_TCP_UDP_BAD;
3166 			}
3167 		}
3168 
3169 		ifp->if_ipackets++;
3170 
3171 		/* Pass this up to any BPF listeners. */
3172 		bpf_mtap(ifp, m);
3173 
3174 		/* Pass it on. */
3175 		(*ifp->if_input)(ifp, m);
3176 	}
3177 
3178 	/* Update the receive pointer. */
3179 	sc->sc_rxptr = i;
3180 
3181 	DPRINTF(WM_DEBUG_RX,
3182 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
3183 }
3184 
3185 /*
3186  * wm_linkintr_gmii:
3187  *
3188  *	Helper; handle link interrupts for GMII.
3189  */
3190 static void
3191 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
3192 {
3193 
3194 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
3195 		__func__));
3196 
3197 	if (icr & ICR_LSC) {
3198 		DPRINTF(WM_DEBUG_LINK,
3199 		    ("%s: LINK: LSC -> mii_tick\n",
3200 			device_xname(sc->sc_dev)));
3201 		mii_tick(&sc->sc_mii);
3202 		if (sc->sc_type == WM_T_82543) {
3203 			int miistatus, active;
3204 
3205 			/*
3206 			 * With 82543, we need to force speed and
3207 			 * duplex on the MAC equal to what the PHY
3208 			 * speed and duplex configuration is.
3209 			 */
3210 			miistatus = sc->sc_mii.mii_media_status;
3211 
3212 			if (miistatus & IFM_ACTIVE) {
3213 				active = sc->sc_mii.mii_media_active;
3214 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
3215 				switch (IFM_SUBTYPE(active)) {
3216 				case IFM_10_T:
3217 					sc->sc_ctrl |= CTRL_SPEED_10;
3218 					break;
3219 				case IFM_100_TX:
3220 					sc->sc_ctrl |= CTRL_SPEED_100;
3221 					break;
3222 				case IFM_1000_T:
3223 					sc->sc_ctrl |= CTRL_SPEED_1000;
3224 					break;
3225 				default:
3226 					/*
3227 					 * fiber?
3228 					 * Shoud not enter here.
3229 					 */
3230 					printf("unknown media (%x)\n",
3231 					    active);
3232 					break;
3233 				}
3234 				if (active & IFM_FDX)
3235 					sc->sc_ctrl |= CTRL_FD;
3236 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3237 			}
3238 		} else if ((sc->sc_type == WM_T_ICH8)
3239 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
3240 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
3241 		} else if (sc->sc_type == WM_T_PCH) {
3242 			wm_k1_gig_workaround_hv(sc,
3243 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
3244 		}
3245 
3246 		if ((sc->sc_phytype == WMPHY_82578)
3247 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
3248 			== IFM_1000_T)) {
3249 
3250 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
3251 				delay(200*1000); /* XXX too big */
3252 
3253 				/* Link stall fix for link up */
3254 				wm_gmii_hv_writereg(sc->sc_dev, 1,
3255 				    HV_MUX_DATA_CTRL,
3256 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
3257 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
3258 				wm_gmii_hv_writereg(sc->sc_dev, 1,
3259 				    HV_MUX_DATA_CTRL,
3260 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
3261 			}
3262 		}
3263 	} else if (icr & ICR_RXSEQ) {
3264 		DPRINTF(WM_DEBUG_LINK,
3265 		    ("%s: LINK Receive sequence error\n",
3266 			device_xname(sc->sc_dev)));
3267 	}
3268 }
3269 
3270 /*
3271  * wm_linkintr_tbi:
3272  *
3273  *	Helper; handle link interrupts for TBI mode.
3274  */
3275 static void
3276 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
3277 {
3278 	uint32_t status;
3279 
3280 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
3281 		__func__));
3282 
3283 	status = CSR_READ(sc, WMREG_STATUS);
3284 	if (icr & ICR_LSC) {
3285 		if (status & STATUS_LU) {
3286 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
3287 			    device_xname(sc->sc_dev),
3288 			    (status & STATUS_FD) ? "FDX" : "HDX"));
3289 			/*
3290 			 * NOTE: CTRL will update TFCE and RFCE automatically,
3291 			 * so we should update sc->sc_ctrl
3292 			 */
3293 
3294 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3295 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
3296 			sc->sc_fcrtl &= ~FCRTL_XONE;
3297 			if (status & STATUS_FD)
3298 				sc->sc_tctl |=
3299 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3300 			else
3301 				sc->sc_tctl |=
3302 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
3303 			if (sc->sc_ctrl & CTRL_TFCE)
3304 				sc->sc_fcrtl |= FCRTL_XONE;
3305 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3306 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
3307 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
3308 				      sc->sc_fcrtl);
3309 			sc->sc_tbi_linkup = 1;
3310 		} else {
3311 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
3312 			    device_xname(sc->sc_dev)));
3313 			sc->sc_tbi_linkup = 0;
3314 		}
3315 		wm_tbi_set_linkled(sc);
3316 	} else if (icr & ICR_RXCFG) {
3317 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
3318 		    device_xname(sc->sc_dev)));
3319 		sc->sc_tbi_nrxcfg++;
3320 		wm_check_for_link(sc);
3321 	} else if (icr & ICR_RXSEQ) {
3322 		DPRINTF(WM_DEBUG_LINK,
3323 		    ("%s: LINK: Receive sequence error\n",
3324 		    device_xname(sc->sc_dev)));
3325 	}
3326 }
3327 
3328 /*
3329  * wm_linkintr:
3330  *
3331  *	Helper; handle link interrupts.
3332  */
3333 static void
3334 wm_linkintr(struct wm_softc *sc, uint32_t icr)
3335 {
3336 
3337 	if (sc->sc_flags & WM_F_HAS_MII)
3338 		wm_linkintr_gmii(sc, icr);
3339 	else
3340 		wm_linkintr_tbi(sc, icr);
3341 }
3342 
3343 /*
3344  * wm_tick:
3345  *
3346  *	One second timer, used to check link status, sweep up
3347  *	completed transmit jobs, etc.
3348  */
3349 static void
3350 wm_tick(void *arg)
3351 {
3352 	struct wm_softc *sc = arg;
3353 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3354 	int s;
3355 
3356 	s = splnet();
3357 
3358 	if (sc->sc_type >= WM_T_82542_2_1) {
3359 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
3360 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
3361 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
3362 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
3363 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
3364 	}
3365 
3366 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3367 	ifp->if_ierrors += 0ULL + /* ensure quad_t */
3368 	    + CSR_READ(sc, WMREG_CRCERRS)
3369 	    + CSR_READ(sc, WMREG_ALGNERRC)
3370 	    + CSR_READ(sc, WMREG_SYMERRC)
3371 	    + CSR_READ(sc, WMREG_RXERRC)
3372 	    + CSR_READ(sc, WMREG_SEC)
3373 	    + CSR_READ(sc, WMREG_CEXTERR)
3374 	    + CSR_READ(sc, WMREG_RLEC);
3375 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
3376 
3377 	if (sc->sc_flags & WM_F_HAS_MII)
3378 		mii_tick(&sc->sc_mii);
3379 	else
3380 		wm_tbi_check_link(sc);
3381 
3382 	splx(s);
3383 
3384 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
3385 }
3386 
3387 /*
3388  * wm_reset:
3389  *
3390  *	Reset the i82542 chip.
3391  */
3392 static void
3393 wm_reset(struct wm_softc *sc)
3394 {
3395 	int phy_reset = 0;
3396 	uint32_t reg, mask;
3397 	int i;
3398 
3399 	/*
3400 	 * Allocate on-chip memory according to the MTU size.
3401 	 * The Packet Buffer Allocation register must be written
3402 	 * before the chip is reset.
3403 	 */
3404 	switch (sc->sc_type) {
3405 	case WM_T_82547:
3406 	case WM_T_82547_2:
3407 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3408 		    PBA_22K : PBA_30K;
3409 		sc->sc_txfifo_head = 0;
3410 		sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
3411 		sc->sc_txfifo_size =
3412 		    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
3413 		sc->sc_txfifo_stall = 0;
3414 		break;
3415 	case WM_T_82571:
3416 	case WM_T_82572:
3417 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
3418 	case WM_T_80003:
3419 		sc->sc_pba = PBA_32K;
3420 		break;
3421 	case WM_T_82580:
3422 	case WM_T_82580ER:
3423 		sc->sc_pba = PBA_35K;
3424 		break;
3425 	case WM_T_82576:
3426 		sc->sc_pba = PBA_64K;
3427 		break;
3428 	case WM_T_82573:
3429 		sc->sc_pba = PBA_12K;
3430 		break;
3431 	case WM_T_82574:
3432 	case WM_T_82583:
3433 		sc->sc_pba = PBA_20K;
3434 		break;
3435 	case WM_T_ICH8:
3436 		sc->sc_pba = PBA_8K;
3437 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
3438 		break;
3439 	case WM_T_ICH9:
3440 	case WM_T_ICH10:
3441 	case WM_T_PCH:
3442 		sc->sc_pba = PBA_10K;
3443 		break;
3444 	default:
3445 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3446 		    PBA_40K : PBA_48K;
3447 		break;
3448 	}
3449 	CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
3450 
3451 	/* Prevent the PCI-E bus from sticking */
3452 	if (sc->sc_flags & WM_F_PCIE) {
3453 		int timeout = 800;
3454 
3455 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
3456 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3457 
3458 		while (timeout--) {
3459 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA) == 0)
3460 				break;
3461 			delay(100);
3462 		}
3463 	}
3464 
3465 	/* Set the completion timeout for interface */
3466 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
3467 		wm_set_pcie_completion_timeout(sc);
3468 
3469 	/* Clear interrupt */
3470 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3471 
3472 	/* Stop the transmit and receive processes. */
3473 	CSR_WRITE(sc, WMREG_RCTL, 0);
3474 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
3475 	sc->sc_rctl &= ~RCTL_EN;
3476 
3477 	/* XXX set_tbi_sbp_82543() */
3478 
3479 	delay(10*1000);
3480 
3481 	/* Must acquire the MDIO ownership before MAC reset */
3482 	switch (sc->sc_type) {
3483 	case WM_T_82573:
3484 	case WM_T_82574:
3485 	case WM_T_82583:
3486 		i = 0;
3487 		reg = CSR_READ(sc, WMREG_EXTCNFCTR)
3488 		    | EXTCNFCTR_MDIO_SW_OWNERSHIP;
3489 		do {
3490 			CSR_WRITE(sc, WMREG_EXTCNFCTR,
3491 			    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
3492 			reg = CSR_READ(sc, WMREG_EXTCNFCTR);
3493 			if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
3494 				break;
3495 			reg |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
3496 			delay(2*1000);
3497 			i++;
3498 		} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
3499 		break;
3500 	default:
3501 		break;
3502 	}
3503 
3504 	/*
3505 	 * 82541 Errata 29? & 82547 Errata 28?
3506 	 * See also the description about PHY_RST bit in CTRL register
3507 	 * in 8254x_GBe_SDM.pdf.
3508 	 */
3509 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
3510 		CSR_WRITE(sc, WMREG_CTRL,
3511 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
3512 		delay(5000);
3513 	}
3514 
3515 	switch (sc->sc_type) {
3516 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
3517 	case WM_T_82541:
3518 	case WM_T_82541_2:
3519 	case WM_T_82547:
3520 	case WM_T_82547_2:
3521 		/*
3522 		 * On some chipsets, a reset through a memory-mapped write
3523 		 * cycle can cause the chip to reset before completing the
3524 		 * write cycle.  This causes major headache that can be
3525 		 * avoided by issuing the reset via indirect register writes
3526 		 * through I/O space.
3527 		 *
3528 		 * So, if we successfully mapped the I/O BAR at attach time,
3529 		 * use that.  Otherwise, try our luck with a memory-mapped
3530 		 * reset.
3531 		 */
3532 		if (sc->sc_flags & WM_F_IOH_VALID)
3533 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
3534 		else
3535 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
3536 		break;
3537 	case WM_T_82545_3:
3538 	case WM_T_82546_3:
3539 		/* Use the shadow control register on these chips. */
3540 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
3541 		break;
3542 	case WM_T_80003:
3543 		mask = swfwphysem[sc->sc_funcid];
3544 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3545 		wm_get_swfw_semaphore(sc, mask);
3546 		CSR_WRITE(sc, WMREG_CTRL, reg);
3547 		wm_put_swfw_semaphore(sc, mask);
3548 		break;
3549 	case WM_T_ICH8:
3550 	case WM_T_ICH9:
3551 	case WM_T_ICH10:
3552 	case WM_T_PCH:
3553 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3554 		if (wm_check_reset_block(sc) == 0) {
3555 			if (sc->sc_type >= WM_T_PCH) {
3556 				uint32_t status;
3557 
3558 				status = CSR_READ(sc, WMREG_STATUS);
3559 				CSR_WRITE(sc, WMREG_STATUS,
3560 				    status & ~STATUS_PHYRA);
3561 			}
3562 
3563 			reg |= CTRL_PHY_RESET;
3564 			phy_reset = 1;
3565 		}
3566 		wm_get_swfwhw_semaphore(sc);
3567 		CSR_WRITE(sc, WMREG_CTRL, reg);
3568 		delay(20*1000);
3569 		wm_put_swfwhw_semaphore(sc);
3570 		break;
3571 	case WM_T_82542_2_0:
3572 	case WM_T_82542_2_1:
3573 	case WM_T_82543:
3574 	case WM_T_82540:
3575 	case WM_T_82545:
3576 	case WM_T_82546:
3577 	case WM_T_82571:
3578 	case WM_T_82572:
3579 	case WM_T_82573:
3580 	case WM_T_82574:
3581 	case WM_T_82575:
3582 	case WM_T_82576:
3583 	case WM_T_82580:
3584 	case WM_T_82580ER:
3585 	case WM_T_82583:
3586 	default:
3587 		/* Everything else can safely use the documented method. */
3588 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3589 		break;
3590 	}
3591 
3592 	if (phy_reset != 0)
3593 		wm_get_cfg_done(sc);
3594 
3595 	/* reload EEPROM */
3596 	switch (sc->sc_type) {
3597 	case WM_T_82542_2_0:
3598 	case WM_T_82542_2_1:
3599 	case WM_T_82543:
3600 	case WM_T_82544:
3601 		delay(10);
3602 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3603 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3604 		delay(2000);
3605 		break;
3606 	case WM_T_82540:
3607 	case WM_T_82545:
3608 	case WM_T_82545_3:
3609 	case WM_T_82546:
3610 	case WM_T_82546_3:
3611 		delay(5*1000);
3612 		/* XXX Disable HW ARPs on ASF enabled adapters */
3613 		break;
3614 	case WM_T_82541:
3615 	case WM_T_82541_2:
3616 	case WM_T_82547:
3617 	case WM_T_82547_2:
3618 		delay(20000);
3619 		/* XXX Disable HW ARPs on ASF enabled adapters */
3620 		break;
3621 	case WM_T_82571:
3622 	case WM_T_82572:
3623 	case WM_T_82573:
3624 	case WM_T_82574:
3625 	case WM_T_82583:
3626 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
3627 			delay(10);
3628 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3629 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3630 		}
3631 		/* check EECD_EE_AUTORD */
3632 		wm_get_auto_rd_done(sc);
3633 		/*
3634 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
3635 		 * is set.
3636 		 */
3637 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
3638 		    || (sc->sc_type == WM_T_82583))
3639 			delay(25*1000);
3640 		break;
3641 	case WM_T_82575:
3642 	case WM_T_82576:
3643 	case WM_T_82580:
3644 	case WM_T_82580ER:
3645 	case WM_T_80003:
3646 	case WM_T_ICH8:
3647 	case WM_T_ICH9:
3648 		/* check EECD_EE_AUTORD */
3649 		wm_get_auto_rd_done(sc);
3650 		break;
3651 	case WM_T_ICH10:
3652 	case WM_T_PCH:
3653 		wm_lan_init_done(sc);
3654 		break;
3655 	default:
3656 		panic("%s: unknown type\n", __func__);
3657 	}
3658 
3659 	/* Check whether EEPROM is present or not */
3660 	switch (sc->sc_type) {
3661 	case WM_T_82575:
3662 	case WM_T_82576:
3663 #if 0 /* XXX */
3664 	case WM_T_82580:
3665 	case WM_T_82580ER:
3666 #endif
3667 	case WM_T_ICH8:
3668 	case WM_T_ICH9:
3669 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
3670 			/* Not found */
3671 			sc->sc_flags |= WM_F_EEPROM_INVALID;
3672 			if ((sc->sc_type == WM_T_82575)
3673 			    || (sc->sc_type == WM_T_82576)
3674 			    || (sc->sc_type == WM_T_82580)
3675 			    || (sc->sc_type == WM_T_82580ER))
3676 				wm_reset_init_script_82575(sc);
3677 		}
3678 		break;
3679 	default:
3680 		break;
3681 	}
3682 
3683 	if ((sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)) {
3684 		/* clear global device reset status bit */
3685 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
3686 	}
3687 
3688 	/* Clear any pending interrupt events. */
3689 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3690 	reg = CSR_READ(sc, WMREG_ICR);
3691 
3692 	/* reload sc_ctrl */
3693 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3694 
3695 	/* dummy read from WUC */
3696 	if (sc->sc_type == WM_T_PCH)
3697 		reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
3698 	/*
3699 	 * For PCH, this write will make sure that any noise will be detected
3700 	 * as a CRC error and be dropped rather than show up as a bad packet
3701 	 * to the DMA engine
3702 	 */
3703 	if (sc->sc_type == WM_T_PCH)
3704 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
3705 
3706 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
3707 		CSR_WRITE(sc, WMREG_WUC, 0);
3708 
3709 	/* XXX need special handling for 82580 */
3710 }
3711 
3712 static void
3713 wm_set_vlan(struct wm_softc *sc)
3714 {
3715 	/* Deal with VLAN enables. */
3716 	if (VLAN_ATTACHED(&sc->sc_ethercom))
3717 		sc->sc_ctrl |= CTRL_VME;
3718 	else
3719 		sc->sc_ctrl &= ~CTRL_VME;
3720 
3721 	/* Write the control registers. */
3722 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3723 }
3724 
3725 /*
3726  * wm_init:		[ifnet interface function]
3727  *
3728  *	Initialize the interface.  Must be called at splnet().
3729  */
3730 static int
3731 wm_init(struct ifnet *ifp)
3732 {
3733 	struct wm_softc *sc = ifp->if_softc;
3734 	struct wm_rxsoft *rxs;
3735 	int i, error = 0;
3736 	uint32_t reg;
3737 
3738 	/*
3739 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
3740 	 * There is a small but measurable benefit to avoiding the adjusment
3741 	 * of the descriptor so that the headers are aligned, for normal mtu,
3742 	 * on such platforms.  One possibility is that the DMA itself is
3743 	 * slightly more efficient if the front of the entire packet (instead
3744 	 * of the front of the headers) is aligned.
3745 	 *
3746 	 * Note we must always set align_tweak to 0 if we are using
3747 	 * jumbo frames.
3748 	 */
3749 #ifdef __NO_STRICT_ALIGNMENT
3750 	sc->sc_align_tweak = 0;
3751 #else
3752 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
3753 		sc->sc_align_tweak = 0;
3754 	else
3755 		sc->sc_align_tweak = 2;
3756 #endif /* __NO_STRICT_ALIGNMENT */
3757 
3758 	/* Cancel any pending I/O. */
3759 	wm_stop(ifp, 0);
3760 
3761 	/* update statistics before reset */
3762 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3763 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
3764 
3765 	/* Reset the chip to a known state. */
3766 	wm_reset(sc);
3767 
3768 	switch (sc->sc_type) {
3769 	case WM_T_82571:
3770 	case WM_T_82572:
3771 	case WM_T_82573:
3772 	case WM_T_82574:
3773 	case WM_T_82583:
3774 	case WM_T_80003:
3775 	case WM_T_ICH8:
3776 	case WM_T_ICH9:
3777 	case WM_T_ICH10:
3778 	case WM_T_PCH:
3779 		if (wm_check_mng_mode(sc) != 0)
3780 			wm_get_hw_control(sc);
3781 		break;
3782 	default:
3783 		break;
3784 	}
3785 
3786 	/* Reset the PHY. */
3787 	if (sc->sc_flags & WM_F_HAS_MII)
3788 		wm_gmii_reset(sc);
3789 
3790 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
3791 	/* Enable PHY low-power state when MAC is at D3 w/o WoL */
3792 	if (sc->sc_type == WM_T_PCH)
3793 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_PHYPDEN);
3794 
3795 	/* Initialize the transmit descriptor ring. */
3796 	memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
3797 	WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
3798 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3799 	sc->sc_txfree = WM_NTXDESC(sc);
3800 	sc->sc_txnext = 0;
3801 
3802 	if (sc->sc_type < WM_T_82543) {
3803 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(sc, 0));
3804 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(sc, 0));
3805 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
3806 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
3807 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
3808 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
3809 	} else {
3810 		CSR_WRITE(sc, WMREG_TDBAH, WM_CDTXADDR_HI(sc, 0));
3811 		CSR_WRITE(sc, WMREG_TDBAL, WM_CDTXADDR_LO(sc, 0));
3812 		CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
3813 		CSR_WRITE(sc, WMREG_TDH, 0);
3814 		CSR_WRITE(sc, WMREG_TIDV, 375);		/* ITR / 4 */
3815 		CSR_WRITE(sc, WMREG_TADV, 375);		/* should be same */
3816 
3817 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
3818 			/*
3819 			 * Don't write TDT before TCTL.EN is set.
3820 			 * See the document.
3821 			 */
3822 			CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_QUEUE_ENABLE
3823 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
3824 			    | TXDCTL_WTHRESH(0));
3825 		else {
3826 			CSR_WRITE(sc, WMREG_TDT, 0);
3827 			CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
3828 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
3829 			CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
3830 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
3831 		}
3832 	}
3833 	CSR_WRITE(sc, WMREG_TQSA_LO, 0);
3834 	CSR_WRITE(sc, WMREG_TQSA_HI, 0);
3835 
3836 	/* Initialize the transmit job descriptors. */
3837 	for (i = 0; i < WM_TXQUEUELEN(sc); i++)
3838 		sc->sc_txsoft[i].txs_mbuf = NULL;
3839 	sc->sc_txsfree = WM_TXQUEUELEN(sc);
3840 	sc->sc_txsnext = 0;
3841 	sc->sc_txsdirty = 0;
3842 
3843 	/*
3844 	 * Initialize the receive descriptor and receive job
3845 	 * descriptor rings.
3846 	 */
3847 	if (sc->sc_type < WM_T_82543) {
3848 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
3849 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
3850 		CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
3851 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
3852 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
3853 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
3854 
3855 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
3856 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
3857 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
3858 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
3859 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
3860 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
3861 	} else {
3862 		CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
3863 		CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
3864 		CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
3865 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3866 			CSR_WRITE(sc, WMREG_EITR(0), 450);
3867 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
3868 				panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
3869 			CSR_WRITE(sc, WMREG_SRRCTL, SRRCTL_DESCTYPE_LEGACY
3870 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
3871 			CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_QUEUE_ENABLE
3872 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
3873 			    | RXDCTL_WTHRESH(1));
3874 		} else {
3875 			CSR_WRITE(sc, WMREG_RDH, 0);
3876 			CSR_WRITE(sc, WMREG_RDT, 0);
3877 			CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD);	/* ITR/4 */
3878 			CSR_WRITE(sc, WMREG_RADV, 375);		/* MUST be same */
3879 		}
3880 	}
3881 	for (i = 0; i < WM_NRXDESC; i++) {
3882 		rxs = &sc->sc_rxsoft[i];
3883 		if (rxs->rxs_mbuf == NULL) {
3884 			if ((error = wm_add_rxbuf(sc, i)) != 0) {
3885 				log(LOG_ERR, "%s: unable to allocate or map rx "
3886 				    "buffer %d, error = %d\n",
3887 				    device_xname(sc->sc_dev), i, error);
3888 				/*
3889 				 * XXX Should attempt to run with fewer receive
3890 				 * XXX buffers instead of just failing.
3891 				 */
3892 				wm_rxdrain(sc);
3893 				goto out;
3894 			}
3895 		} else {
3896 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
3897 				WM_INIT_RXDESC(sc, i);
3898 			/*
3899 			 * For 82575 and newer device, the RX descriptors
3900 			 * must be initialized after the setting of RCTL.EN in
3901 			 * wm_set_filter()
3902 			 */
3903 		}
3904 	}
3905 	sc->sc_rxptr = 0;
3906 	sc->sc_rxdiscard = 0;
3907 	WM_RXCHAIN_RESET(sc);
3908 
3909 	/*
3910 	 * Clear out the VLAN table -- we don't use it (yet).
3911 	 */
3912 	CSR_WRITE(sc, WMREG_VET, 0);
3913 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
3914 		CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
3915 
3916 	/*
3917 	 * Set up flow-control parameters.
3918 	 *
3919 	 * XXX Values could probably stand some tuning.
3920 	 */
3921 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
3922 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)) {
3923 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
3924 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
3925 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
3926 	}
3927 
3928 	sc->sc_fcrtl = FCRTL_DFLT;
3929 	if (sc->sc_type < WM_T_82543) {
3930 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
3931 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
3932 	} else {
3933 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
3934 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
3935 	}
3936 
3937 	if (sc->sc_type == WM_T_80003)
3938 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
3939 	else
3940 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
3941 
3942 	/* Writes the control register. */
3943 	wm_set_vlan(sc);
3944 
3945 	if (sc->sc_flags & WM_F_HAS_MII) {
3946 		int val;
3947 
3948 		switch (sc->sc_type) {
3949 		case WM_T_80003:
3950 		case WM_T_ICH8:
3951 		case WM_T_ICH9:
3952 		case WM_T_ICH10:
3953 		case WM_T_PCH:
3954 			/*
3955 			 * Set the mac to wait the maximum time between each
3956 			 * iteration and increase the max iterations when
3957 			 * polling the phy; this fixes erroneous timeouts at
3958 			 * 10Mbps.
3959 			 */
3960 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
3961 			    0xFFFF);
3962 			val = wm_kmrn_readreg(sc,
3963 			    KUMCTRLSTA_OFFSET_INB_PARAM);
3964 			val |= 0x3F;
3965 			wm_kmrn_writereg(sc,
3966 			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
3967 			break;
3968 		default:
3969 			break;
3970 		}
3971 
3972 		if (sc->sc_type == WM_T_80003) {
3973 			val = CSR_READ(sc, WMREG_CTRL_EXT);
3974 			val &= ~CTRL_EXT_LINK_MODE_MASK;
3975 			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
3976 
3977 			/* Bypass RX and TX FIFO's */
3978 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
3979 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
3980 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
3981 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
3982 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
3983 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
3984 		}
3985 	}
3986 #if 0
3987 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
3988 #endif
3989 
3990 	/*
3991 	 * Set up checksum offload parameters.
3992 	 */
3993 	reg = CSR_READ(sc, WMREG_RXCSUM);
3994 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
3995 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
3996 		reg |= RXCSUM_IPOFL;
3997 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
3998 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
3999 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
4000 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
4001 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
4002 
4003 	/* Reset TBI's RXCFG count */
4004 	sc->sc_tbi_nrxcfg = sc->sc_tbi_lastnrxcfg = 0;
4005 
4006 	/*
4007 	 * Set up the interrupt registers.
4008 	 */
4009 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4010 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
4011 	    ICR_RXO | ICR_RXT0;
4012 	if ((sc->sc_flags & WM_F_HAS_MII) == 0)
4013 		sc->sc_icr |= ICR_RXCFG;
4014 	CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
4015 
4016 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4017 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)) {
4018 		reg = CSR_READ(sc, WMREG_KABGTXD);
4019 		reg |= KABGTXD_BGSQLBIAS;
4020 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
4021 	}
4022 
4023 	/* Set up the inter-packet gap. */
4024 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
4025 
4026 	if (sc->sc_type >= WM_T_82543) {
4027 		/*
4028 		 * Set up the interrupt throttling register (units of 256ns)
4029 		 * Note that a footnote in Intel's documentation says this
4030 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
4031 		 * or 10Mbit mode.  Empirically, it appears to be the case
4032 		 * that that is also true for the 1024ns units of the other
4033 		 * interrupt-related timer registers -- so, really, we ought
4034 		 * to divide this value by 4 when the link speed is low.
4035 		 *
4036 		 * XXX implement this division at link speed change!
4037 		 */
4038 
4039 		 /*
4040 		  * For N interrupts/sec, set this value to:
4041 		  * 1000000000 / (N * 256).  Note that we set the
4042 		  * absolute and packet timer values to this value
4043 		  * divided by 4 to get "simple timer" behavior.
4044 		  */
4045 
4046 		sc->sc_itr = 1500;		/* 2604 ints/sec */
4047 		CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
4048 	}
4049 
4050 	/* Set the VLAN ethernetype. */
4051 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
4052 
4053 	/*
4054 	 * Set up the transmit control register; we start out with
4055 	 * a collision distance suitable for FDX, but update it whe
4056 	 * we resolve the media type.
4057 	 */
4058 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
4059 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
4060 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4061 	if (sc->sc_type >= WM_T_82571)
4062 		sc->sc_tctl |= TCTL_MULR;
4063 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4064 
4065 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4066 		/*
4067 		 * Write TDT after TCTL.EN is set.
4068 		 * See the document.
4069 		 */
4070 		CSR_WRITE(sc, WMREG_TDT, 0);
4071 	}
4072 
4073 	if (sc->sc_type == WM_T_80003) {
4074 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
4075 		reg &= ~TCTL_EXT_GCEX_MASK;
4076 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
4077 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
4078 	}
4079 
4080 	/* Set the media. */
4081 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
4082 		goto out;
4083 
4084 	/* Configure for OS presence */
4085 	wm_init_manageability(sc);
4086 
4087 	/*
4088 	 * Set up the receive control register; we actually program
4089 	 * the register when we set the receive filter.  Use multicast
4090 	 * address offset type 0.
4091 	 *
4092 	 * Only the i82544 has the ability to strip the incoming
4093 	 * CRC, so we don't enable that feature.
4094 	 */
4095 	sc->sc_mchash_type = 0;
4096 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
4097 	    | RCTL_MO(sc->sc_mchash_type);
4098 
4099 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
4100 	    && (ifp->if_mtu > ETHERMTU)) {
4101 		sc->sc_rctl |= RCTL_LPE;
4102 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4103 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
4104 	}
4105 
4106 	if (MCLBYTES == 2048) {
4107 		sc->sc_rctl |= RCTL_2k;
4108 	} else {
4109 		if (sc->sc_type >= WM_T_82543) {
4110 			switch (MCLBYTES) {
4111 			case 4096:
4112 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
4113 				break;
4114 			case 8192:
4115 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
4116 				break;
4117 			case 16384:
4118 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
4119 				break;
4120 			default:
4121 				panic("wm_init: MCLBYTES %d unsupported",
4122 				    MCLBYTES);
4123 				break;
4124 			}
4125 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
4126 	}
4127 
4128 	/* Set the receive filter. */
4129 	wm_set_filter(sc);
4130 
4131 	/* On 575 and later set RDT only if RX enabled */
4132 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4133 		for (i = 0; i < WM_NRXDESC; i++)
4134 			WM_INIT_RXDESC(sc, i);
4135 
4136 	/* Start the one second link check clock. */
4137 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
4138 
4139 	/* ...all done! */
4140 	ifp->if_flags |= IFF_RUNNING;
4141 	ifp->if_flags &= ~IFF_OACTIVE;
4142 
4143  out:
4144 	sc->sc_if_flags = ifp->if_flags;
4145 	if (error)
4146 		log(LOG_ERR, "%s: interface not running\n",
4147 		    device_xname(sc->sc_dev));
4148 	return error;
4149 }
4150 
4151 /*
4152  * wm_rxdrain:
4153  *
4154  *	Drain the receive queue.
4155  */
4156 static void
4157 wm_rxdrain(struct wm_softc *sc)
4158 {
4159 	struct wm_rxsoft *rxs;
4160 	int i;
4161 
4162 	for (i = 0; i < WM_NRXDESC; i++) {
4163 		rxs = &sc->sc_rxsoft[i];
4164 		if (rxs->rxs_mbuf != NULL) {
4165 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4166 			m_freem(rxs->rxs_mbuf);
4167 			rxs->rxs_mbuf = NULL;
4168 		}
4169 	}
4170 }
4171 
4172 /*
4173  * wm_stop:		[ifnet interface function]
4174  *
4175  *	Stop transmission on the interface.
4176  */
4177 static void
4178 wm_stop(struct ifnet *ifp, int disable)
4179 {
4180 	struct wm_softc *sc = ifp->if_softc;
4181 	struct wm_txsoft *txs;
4182 	int i;
4183 
4184 	/* Stop the one second clock. */
4185 	callout_stop(&sc->sc_tick_ch);
4186 
4187 	/* Stop the 82547 Tx FIFO stall check timer. */
4188 	if (sc->sc_type == WM_T_82547)
4189 		callout_stop(&sc->sc_txfifo_ch);
4190 
4191 	if (sc->sc_flags & WM_F_HAS_MII) {
4192 		/* Down the MII. */
4193 		mii_down(&sc->sc_mii);
4194 	} else {
4195 #if 0
4196 		/* Should we clear PHY's status properly? */
4197 		wm_reset(sc);
4198 #endif
4199 	}
4200 
4201 	/* Stop the transmit and receive processes. */
4202 	CSR_WRITE(sc, WMREG_TCTL, 0);
4203 	CSR_WRITE(sc, WMREG_RCTL, 0);
4204 	sc->sc_rctl &= ~RCTL_EN;
4205 
4206 	/*
4207 	 * Clear the interrupt mask to ensure the device cannot assert its
4208 	 * interrupt line.
4209 	 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
4210 	 * any currently pending or shared interrupt.
4211 	 */
4212 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4213 	sc->sc_icr = 0;
4214 
4215 	/* Release any queued transmit buffers. */
4216 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
4217 		txs = &sc->sc_txsoft[i];
4218 		if (txs->txs_mbuf != NULL) {
4219 			bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
4220 			m_freem(txs->txs_mbuf);
4221 			txs->txs_mbuf = NULL;
4222 		}
4223 	}
4224 
4225 	/* Mark the interface as down and cancel the watchdog timer. */
4226 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
4227 	ifp->if_timer = 0;
4228 
4229 	if (disable)
4230 		wm_rxdrain(sc);
4231 
4232 #if 0 /* notyet */
4233 	if (sc->sc_type >= WM_T_82544)
4234 		CSR_WRITE(sc, WMREG_WUC, 0);
4235 #endif
4236 }
4237 
4238 void
4239 wm_get_auto_rd_done(struct wm_softc *sc)
4240 {
4241 	int i;
4242 
4243 	/* wait for eeprom to reload */
4244 	switch (sc->sc_type) {
4245 	case WM_T_82571:
4246 	case WM_T_82572:
4247 	case WM_T_82573:
4248 	case WM_T_82574:
4249 	case WM_T_82583:
4250 	case WM_T_82575:
4251 	case WM_T_82576:
4252 	case WM_T_82580:
4253 	case WM_T_82580ER:
4254 	case WM_T_80003:
4255 	case WM_T_ICH8:
4256 	case WM_T_ICH9:
4257 		for (i = 0; i < 10; i++) {
4258 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
4259 				break;
4260 			delay(1000);
4261 		}
4262 		if (i == 10) {
4263 			log(LOG_ERR, "%s: auto read from eeprom failed to "
4264 			    "complete\n", device_xname(sc->sc_dev));
4265 		}
4266 		break;
4267 	default:
4268 		break;
4269 	}
4270 }
4271 
4272 void
4273 wm_lan_init_done(struct wm_softc *sc)
4274 {
4275 	uint32_t reg = 0;
4276 	int i;
4277 
4278 	/* wait for eeprom to reload */
4279 	switch (sc->sc_type) {
4280 	case WM_T_ICH10:
4281 	case WM_T_PCH:
4282 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
4283 			reg = CSR_READ(sc, WMREG_STATUS);
4284 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
4285 				break;
4286 			delay(100);
4287 		}
4288 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
4289 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
4290 			    "complete\n", device_xname(sc->sc_dev), __func__);
4291 		}
4292 		break;
4293 	default:
4294 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4295 		    __func__);
4296 		break;
4297 	}
4298 
4299 	reg &= ~STATUS_LAN_INIT_DONE;
4300 	CSR_WRITE(sc, WMREG_STATUS, reg);
4301 }
4302 
4303 void
4304 wm_get_cfg_done(struct wm_softc *sc)
4305 {
4306 	int mask;
4307 	uint32_t reg;
4308 	int i;
4309 
4310 	/* wait for eeprom to reload */
4311 	switch (sc->sc_type) {
4312 	case WM_T_82542_2_0:
4313 	case WM_T_82542_2_1:
4314 		/* null */
4315 		break;
4316 	case WM_T_82543:
4317 	case WM_T_82544:
4318 	case WM_T_82540:
4319 	case WM_T_82545:
4320 	case WM_T_82545_3:
4321 	case WM_T_82546:
4322 	case WM_T_82546_3:
4323 	case WM_T_82541:
4324 	case WM_T_82541_2:
4325 	case WM_T_82547:
4326 	case WM_T_82547_2:
4327 	case WM_T_82573:
4328 	case WM_T_82574:
4329 	case WM_T_82583:
4330 		/* generic */
4331 		delay(10*1000);
4332 		break;
4333 	case WM_T_80003:
4334 	case WM_T_82571:
4335 	case WM_T_82572:
4336 	case WM_T_82575:
4337 	case WM_T_82576:
4338 	case WM_T_82580:
4339 	case WM_T_82580ER:
4340 		if (sc->sc_type == WM_T_82571) {
4341 			/* Only 82571 shares port 0 */
4342 			mask = EEMNGCTL_CFGDONE_0;
4343 		} else
4344 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
4345 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
4346 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
4347 				break;
4348 			delay(1000);
4349 		}
4350 		if (i >= WM_PHY_CFG_TIMEOUT) {
4351 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
4352 				device_xname(sc->sc_dev), __func__));
4353 		}
4354 		break;
4355 	case WM_T_ICH8:
4356 	case WM_T_ICH9:
4357 	case WM_T_ICH10:
4358 	case WM_T_PCH:
4359 		if (sc->sc_type >= WM_T_PCH) {
4360 			reg = CSR_READ(sc, WMREG_STATUS);
4361 			if ((reg & STATUS_PHYRA) != 0)
4362 				CSR_WRITE(sc, WMREG_STATUS,
4363 				    reg & ~STATUS_PHYRA);
4364 		}
4365 		delay(10*1000);
4366 		break;
4367 	default:
4368 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4369 		    __func__);
4370 		break;
4371 	}
4372 }
4373 
4374 /*
4375  * wm_acquire_eeprom:
4376  *
4377  *	Perform the EEPROM handshake required on some chips.
4378  */
4379 static int
4380 wm_acquire_eeprom(struct wm_softc *sc)
4381 {
4382 	uint32_t reg;
4383 	int x;
4384 	int ret = 0;
4385 
4386 	/* always success */
4387 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
4388 		return 0;
4389 
4390 	if (sc->sc_flags & WM_F_SWFWHW_SYNC) {
4391 		ret = wm_get_swfwhw_semaphore(sc);
4392 	} else if (sc->sc_flags & WM_F_SWFW_SYNC) {
4393 		/* this will also do wm_get_swsm_semaphore() if needed */
4394 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
4395 	} else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
4396 		ret = wm_get_swsm_semaphore(sc);
4397 	}
4398 
4399 	if (ret) {
4400 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
4401 			__func__);
4402 		return 1;
4403 	}
4404 
4405 	if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
4406 		reg = CSR_READ(sc, WMREG_EECD);
4407 
4408 		/* Request EEPROM access. */
4409 		reg |= EECD_EE_REQ;
4410 		CSR_WRITE(sc, WMREG_EECD, reg);
4411 
4412 		/* ..and wait for it to be granted. */
4413 		for (x = 0; x < 1000; x++) {
4414 			reg = CSR_READ(sc, WMREG_EECD);
4415 			if (reg & EECD_EE_GNT)
4416 				break;
4417 			delay(5);
4418 		}
4419 		if ((reg & EECD_EE_GNT) == 0) {
4420 			aprint_error_dev(sc->sc_dev,
4421 			    "could not acquire EEPROM GNT\n");
4422 			reg &= ~EECD_EE_REQ;
4423 			CSR_WRITE(sc, WMREG_EECD, reg);
4424 			if (sc->sc_flags & WM_F_SWFWHW_SYNC)
4425 				wm_put_swfwhw_semaphore(sc);
4426 			if (sc->sc_flags & WM_F_SWFW_SYNC)
4427 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
4428 			else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
4429 				wm_put_swsm_semaphore(sc);
4430 			return 1;
4431 		}
4432 	}
4433 
4434 	return 0;
4435 }
4436 
4437 /*
4438  * wm_release_eeprom:
4439  *
4440  *	Release the EEPROM mutex.
4441  */
4442 static void
4443 wm_release_eeprom(struct wm_softc *sc)
4444 {
4445 	uint32_t reg;
4446 
4447 	/* always success */
4448 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
4449 		return;
4450 
4451 	if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
4452 		reg = CSR_READ(sc, WMREG_EECD);
4453 		reg &= ~EECD_EE_REQ;
4454 		CSR_WRITE(sc, WMREG_EECD, reg);
4455 	}
4456 
4457 	if (sc->sc_flags & WM_F_SWFWHW_SYNC)
4458 		wm_put_swfwhw_semaphore(sc);
4459 	if (sc->sc_flags & WM_F_SWFW_SYNC)
4460 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
4461 	else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
4462 		wm_put_swsm_semaphore(sc);
4463 }
4464 
4465 /*
4466  * wm_eeprom_sendbits:
4467  *
4468  *	Send a series of bits to the EEPROM.
4469  */
4470 static void
4471 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
4472 {
4473 	uint32_t reg;
4474 	int x;
4475 
4476 	reg = CSR_READ(sc, WMREG_EECD);
4477 
4478 	for (x = nbits; x > 0; x--) {
4479 		if (bits & (1U << (x - 1)))
4480 			reg |= EECD_DI;
4481 		else
4482 			reg &= ~EECD_DI;
4483 		CSR_WRITE(sc, WMREG_EECD, reg);
4484 		delay(2);
4485 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
4486 		delay(2);
4487 		CSR_WRITE(sc, WMREG_EECD, reg);
4488 		delay(2);
4489 	}
4490 }
4491 
4492 /*
4493  * wm_eeprom_recvbits:
4494  *
4495  *	Receive a series of bits from the EEPROM.
4496  */
4497 static void
4498 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
4499 {
4500 	uint32_t reg, val;
4501 	int x;
4502 
4503 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
4504 
4505 	val = 0;
4506 	for (x = nbits; x > 0; x--) {
4507 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
4508 		delay(2);
4509 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
4510 			val |= (1U << (x - 1));
4511 		CSR_WRITE(sc, WMREG_EECD, reg);
4512 		delay(2);
4513 	}
4514 	*valp = val;
4515 }
4516 
4517 /*
4518  * wm_read_eeprom_uwire:
4519  *
4520  *	Read a word from the EEPROM using the MicroWire protocol.
4521  */
4522 static int
4523 wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
4524 {
4525 	uint32_t reg, val;
4526 	int i;
4527 
4528 	for (i = 0; i < wordcnt; i++) {
4529 		/* Clear SK and DI. */
4530 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
4531 		CSR_WRITE(sc, WMREG_EECD, reg);
4532 
4533 		/* Set CHIP SELECT. */
4534 		reg |= EECD_CS;
4535 		CSR_WRITE(sc, WMREG_EECD, reg);
4536 		delay(2);
4537 
4538 		/* Shift in the READ command. */
4539 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
4540 
4541 		/* Shift in address. */
4542 		wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits);
4543 
4544 		/* Shift out the data. */
4545 		wm_eeprom_recvbits(sc, &val, 16);
4546 		data[i] = val & 0xffff;
4547 
4548 		/* Clear CHIP SELECT. */
4549 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
4550 		CSR_WRITE(sc, WMREG_EECD, reg);
4551 		delay(2);
4552 	}
4553 
4554 	return 0;
4555 }
4556 
4557 /*
4558  * wm_spi_eeprom_ready:
4559  *
4560  *	Wait for a SPI EEPROM to be ready for commands.
4561  */
4562 static int
4563 wm_spi_eeprom_ready(struct wm_softc *sc)
4564 {
4565 	uint32_t val;
4566 	int usec;
4567 
4568 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
4569 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
4570 		wm_eeprom_recvbits(sc, &val, 8);
4571 		if ((val & SPI_SR_RDY) == 0)
4572 			break;
4573 	}
4574 	if (usec >= SPI_MAX_RETRIES) {
4575 		aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
4576 		return 1;
4577 	}
4578 	return 0;
4579 }
4580 
4581 /*
4582  * wm_read_eeprom_spi:
4583  *
4584  *	Read a work from the EEPROM using the SPI protocol.
4585  */
4586 static int
4587 wm_read_eeprom_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
4588 {
4589 	uint32_t reg, val;
4590 	int i;
4591 	uint8_t opc;
4592 
4593 	/* Clear SK and CS. */
4594 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
4595 	CSR_WRITE(sc, WMREG_EECD, reg);
4596 	delay(2);
4597 
4598 	if (wm_spi_eeprom_ready(sc))
4599 		return 1;
4600 
4601 	/* Toggle CS to flush commands. */
4602 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
4603 	delay(2);
4604 	CSR_WRITE(sc, WMREG_EECD, reg);
4605 	delay(2);
4606 
4607 	opc = SPI_OPC_READ;
4608 	if (sc->sc_ee_addrbits == 8 && word >= 128)
4609 		opc |= SPI_OPC_A8;
4610 
4611 	wm_eeprom_sendbits(sc, opc, 8);
4612 	wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits);
4613 
4614 	for (i = 0; i < wordcnt; i++) {
4615 		wm_eeprom_recvbits(sc, &val, 16);
4616 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
4617 	}
4618 
4619 	/* Raise CS and clear SK. */
4620 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
4621 	CSR_WRITE(sc, WMREG_EECD, reg);
4622 	delay(2);
4623 
4624 	return 0;
4625 }
4626 
4627 #define EEPROM_CHECKSUM		0xBABA
4628 #define EEPROM_SIZE		0x0040
4629 
4630 /*
4631  * wm_validate_eeprom_checksum
4632  *
4633  * The checksum is defined as the sum of the first 64 (16 bit) words.
4634  */
4635 static int
4636 wm_validate_eeprom_checksum(struct wm_softc *sc)
4637 {
4638 	uint16_t checksum;
4639 	uint16_t eeprom_data;
4640 	int i;
4641 
4642 	checksum = 0;
4643 
4644 	for (i = 0; i < EEPROM_SIZE; i++) {
4645 		if (wm_read_eeprom(sc, i, 1, &eeprom_data))
4646 			return 1;
4647 		checksum += eeprom_data;
4648 	}
4649 
4650 	if (checksum != (uint16_t) EEPROM_CHECKSUM)
4651 		return 1;
4652 
4653 	return 0;
4654 }
4655 
4656 /*
4657  * wm_read_eeprom:
4658  *
4659  *	Read data from the serial EEPROM.
4660  */
4661 static int
4662 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
4663 {
4664 	int rv;
4665 
4666 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
4667 		return 1;
4668 
4669 	if (wm_acquire_eeprom(sc))
4670 		return 1;
4671 
4672 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4673 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH))
4674 		rv = wm_read_eeprom_ich8(sc, word, wordcnt, data);
4675 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
4676 		rv = wm_read_eeprom_eerd(sc, word, wordcnt, data);
4677 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
4678 		rv = wm_read_eeprom_spi(sc, word, wordcnt, data);
4679 	else
4680 		rv = wm_read_eeprom_uwire(sc, word, wordcnt, data);
4681 
4682 	wm_release_eeprom(sc);
4683 	return rv;
4684 }
4685 
4686 static int
4687 wm_read_eeprom_eerd(struct wm_softc *sc, int offset, int wordcnt,
4688     uint16_t *data)
4689 {
4690 	int i, eerd = 0;
4691 	int error = 0;
4692 
4693 	for (i = 0; i < wordcnt; i++) {
4694 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
4695 
4696 		CSR_WRITE(sc, WMREG_EERD, eerd);
4697 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
4698 		if (error != 0)
4699 			break;
4700 
4701 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
4702 	}
4703 
4704 	return error;
4705 }
4706 
4707 static int
4708 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
4709 {
4710 	uint32_t attempts = 100000;
4711 	uint32_t i, reg = 0;
4712 	int32_t done = -1;
4713 
4714 	for (i = 0; i < attempts; i++) {
4715 		reg = CSR_READ(sc, rw);
4716 
4717 		if (reg & EERD_DONE) {
4718 			done = 0;
4719 			break;
4720 		}
4721 		delay(5);
4722 	}
4723 
4724 	return done;
4725 }
4726 
4727 static int
4728 wm_check_alt_mac_addr(struct wm_softc *sc)
4729 {
4730 	uint16_t myea[ETHER_ADDR_LEN / 2];
4731 	uint16_t offset = EEPROM_OFF_MACADDR;
4732 
4733 	/* Try to read alternative MAC address pointer */
4734 	if (wm_read_eeprom(sc, EEPROM_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
4735 		return -1;
4736 
4737 	/* Check pointer */
4738 	if (offset == 0xffff)
4739 		return -1;
4740 
4741 	/*
4742 	 * Check whether alternative MAC address is valid or not.
4743 	 * Some cards have non 0xffff pointer but those don't use
4744 	 * alternative MAC address in reality.
4745 	 *
4746 	 * Check whether the broadcast bit is set or not.
4747 	 */
4748 	if (wm_read_eeprom(sc, offset, 1, myea) == 0)
4749 		if (((myea[0] & 0xff) & 0x01) == 0)
4750 			return 0; /* found! */
4751 
4752 	/* not found */
4753 	return -1;
4754 }
4755 
4756 static int
4757 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
4758 {
4759 	uint16_t myea[ETHER_ADDR_LEN / 2];
4760 	uint16_t offset = EEPROM_OFF_MACADDR;
4761 	int do_invert = 0;
4762 
4763 	switch (sc->sc_type) {
4764 	case WM_T_82580:
4765 	case WM_T_82580ER:
4766 		switch (sc->sc_funcid) {
4767 		case 0:
4768 			/* default value (== EEPROM_OFF_MACADDR) */
4769 			break;
4770 		case 1:
4771 			offset = EEPROM_OFF_LAN1;
4772 			break;
4773 		case 2:
4774 			offset = EEPROM_OFF_LAN2;
4775 			break;
4776 		case 3:
4777 			offset = EEPROM_OFF_LAN3;
4778 			break;
4779 		default:
4780 			goto bad;
4781 			/* NOTREACHED */
4782 			break;
4783 		}
4784 		break;
4785 	case WM_T_82571:
4786 	case WM_T_82575:
4787 	case WM_T_82576:
4788 	case WM_T_80003:
4789 		if (wm_check_alt_mac_addr(sc) != 0) {
4790 			/* reset the offset to LAN0 */
4791 			offset = EEPROM_OFF_MACADDR;
4792 			if ((sc->sc_funcid & 0x01) == 1)
4793 				do_invert = 1;
4794 			goto do_read;
4795 		}
4796 		switch (sc->sc_funcid) {
4797 		case 0:
4798 			/*
4799 			 * The offset is the value in EEPROM_ALT_MAC_ADDR_PTR
4800 			 * itself.
4801 			 */
4802 			break;
4803 		case 1:
4804 			offset += EEPROM_OFF_MACADDR_LAN1;
4805 			break;
4806 		case 2:
4807 			offset += EEPROM_OFF_MACADDR_LAN2;
4808 			break;
4809 		case 3:
4810 			offset += EEPROM_OFF_MACADDR_LAN3;
4811 			break;
4812 		default:
4813 			goto bad;
4814 			/* NOTREACHED */
4815 			break;
4816 		}
4817 		break;
4818 	default:
4819 		if ((sc->sc_funcid & 0x01) == 1)
4820 			do_invert = 1;
4821 		break;
4822 	}
4823 
4824  do_read:
4825 	if (wm_read_eeprom(sc, offset, sizeof(myea) / sizeof(myea[0]),
4826 		myea) != 0) {
4827 		goto bad;
4828 	}
4829 
4830 	enaddr[0] = myea[0] & 0xff;
4831 	enaddr[1] = myea[0] >> 8;
4832 	enaddr[2] = myea[1] & 0xff;
4833 	enaddr[3] = myea[1] >> 8;
4834 	enaddr[4] = myea[2] & 0xff;
4835 	enaddr[5] = myea[2] >> 8;
4836 
4837 	/*
4838 	 * Toggle the LSB of the MAC address on the second port
4839 	 * of some dual port cards.
4840 	 */
4841 	if (do_invert != 0)
4842 		enaddr[5] ^= 1;
4843 
4844 	return 0;
4845 
4846  bad:
4847 	aprint_error_dev(sc->sc_dev, "unable to read Ethernet address\n");
4848 
4849 	return -1;
4850 }
4851 
4852 /*
4853  * wm_add_rxbuf:
4854  *
4855  *	Add a receive buffer to the indiciated descriptor.
4856  */
4857 static int
4858 wm_add_rxbuf(struct wm_softc *sc, int idx)
4859 {
4860 	struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
4861 	struct mbuf *m;
4862 	int error;
4863 
4864 	MGETHDR(m, M_DONTWAIT, MT_DATA);
4865 	if (m == NULL)
4866 		return ENOBUFS;
4867 
4868 	MCLGET(m, M_DONTWAIT);
4869 	if ((m->m_flags & M_EXT) == 0) {
4870 		m_freem(m);
4871 		return ENOBUFS;
4872 	}
4873 
4874 	if (rxs->rxs_mbuf != NULL)
4875 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4876 
4877 	rxs->rxs_mbuf = m;
4878 
4879 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
4880 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
4881 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
4882 	if (error) {
4883 		/* XXX XXX XXX */
4884 		aprint_error_dev(sc->sc_dev,
4885 		    "unable to load rx DMA map %d, error = %d\n",
4886 		    idx, error);
4887 		panic("wm_add_rxbuf");
4888 	}
4889 
4890 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
4891 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
4892 
4893 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4894 		if ((sc->sc_rctl & RCTL_EN) != 0)
4895 			WM_INIT_RXDESC(sc, idx);
4896 	} else
4897 		WM_INIT_RXDESC(sc, idx);
4898 
4899 	return 0;
4900 }
4901 
4902 /*
4903  * wm_set_ral:
4904  *
4905  *	Set an entery in the receive address list.
4906  */
4907 static void
4908 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
4909 {
4910 	uint32_t ral_lo, ral_hi;
4911 
4912 	if (enaddr != NULL) {
4913 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
4914 		    (enaddr[3] << 24);
4915 		ral_hi = enaddr[4] | (enaddr[5] << 8);
4916 		ral_hi |= RAL_AV;
4917 	} else {
4918 		ral_lo = 0;
4919 		ral_hi = 0;
4920 	}
4921 
4922 	if (sc->sc_type >= WM_T_82544) {
4923 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
4924 		    ral_lo);
4925 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
4926 		    ral_hi);
4927 	} else {
4928 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
4929 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
4930 	}
4931 }
4932 
4933 /*
4934  * wm_mchash:
4935  *
4936  *	Compute the hash of the multicast address for the 4096-bit
4937  *	multicast filter.
4938  */
4939 static uint32_t
4940 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
4941 {
4942 	static const int lo_shift[4] = { 4, 3, 2, 0 };
4943 	static const int hi_shift[4] = { 4, 5, 6, 8 };
4944 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
4945 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
4946 	uint32_t hash;
4947 
4948 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4949 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)) {
4950 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
4951 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
4952 		return (hash & 0x3ff);
4953 	}
4954 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
4955 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
4956 
4957 	return (hash & 0xfff);
4958 }
4959 
4960 /*
4961  * wm_set_filter:
4962  *
4963  *	Set up the receive filter.
4964  */
4965 static void
4966 wm_set_filter(struct wm_softc *sc)
4967 {
4968 	struct ethercom *ec = &sc->sc_ethercom;
4969 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4970 	struct ether_multi *enm;
4971 	struct ether_multistep step;
4972 	bus_addr_t mta_reg;
4973 	uint32_t hash, reg, bit;
4974 	int i, size;
4975 
4976 	if (sc->sc_type >= WM_T_82544)
4977 		mta_reg = WMREG_CORDOVA_MTA;
4978 	else
4979 		mta_reg = WMREG_MTA;
4980 
4981 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
4982 
4983 	if (ifp->if_flags & IFF_BROADCAST)
4984 		sc->sc_rctl |= RCTL_BAM;
4985 	if (ifp->if_flags & IFF_PROMISC) {
4986 		sc->sc_rctl |= RCTL_UPE;
4987 		goto allmulti;
4988 	}
4989 
4990 	/*
4991 	 * Set the station address in the first RAL slot, and
4992 	 * clear the remaining slots.
4993 	 */
4994 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4995 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH))
4996 		size = WM_ICH8_RAL_TABSIZE;
4997 	else
4998 		size = WM_RAL_TABSIZE;
4999 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
5000 	for (i = 1; i < size; i++)
5001 		wm_set_ral(sc, NULL, i);
5002 
5003 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5004 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH))
5005 		size = WM_ICH8_MC_TABSIZE;
5006 	else
5007 		size = WM_MC_TABSIZE;
5008 	/* Clear out the multicast table. */
5009 	for (i = 0; i < size; i++)
5010 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
5011 
5012 	ETHER_FIRST_MULTI(step, ec, enm);
5013 	while (enm != NULL) {
5014 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
5015 			/*
5016 			 * We must listen to a range of multicast addresses.
5017 			 * For now, just accept all multicasts, rather than
5018 			 * trying to set only those filter bits needed to match
5019 			 * the range.  (At this time, the only use of address
5020 			 * ranges is for IP multicast routing, for which the
5021 			 * range is big enough to require all bits set.)
5022 			 */
5023 			goto allmulti;
5024 		}
5025 
5026 		hash = wm_mchash(sc, enm->enm_addrlo);
5027 
5028 		reg = (hash >> 5);
5029 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5030 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH))
5031 			reg &= 0x1f;
5032 		else
5033 			reg &= 0x7f;
5034 		bit = hash & 0x1f;
5035 
5036 		hash = CSR_READ(sc, mta_reg + (reg << 2));
5037 		hash |= 1U << bit;
5038 
5039 		/* XXX Hardware bug?? */
5040 		if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
5041 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
5042 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
5043 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
5044 		} else
5045 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
5046 
5047 		ETHER_NEXT_MULTI(step, enm);
5048 	}
5049 
5050 	ifp->if_flags &= ~IFF_ALLMULTI;
5051 	goto setit;
5052 
5053  allmulti:
5054 	ifp->if_flags |= IFF_ALLMULTI;
5055 	sc->sc_rctl |= RCTL_MPE;
5056 
5057  setit:
5058 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
5059 }
5060 
5061 /*
5062  * wm_tbi_mediainit:
5063  *
5064  *	Initialize media for use on 1000BASE-X devices.
5065  */
5066 static void
5067 wm_tbi_mediainit(struct wm_softc *sc)
5068 {
5069 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5070 	const char *sep = "";
5071 
5072 	if (sc->sc_type < WM_T_82543)
5073 		sc->sc_tipg = TIPG_WM_DFLT;
5074 	else
5075 		sc->sc_tipg = TIPG_LG_DFLT;
5076 
5077 	sc->sc_tbi_anegticks = 5;
5078 
5079 	/* Initialize our media structures */
5080 	sc->sc_mii.mii_ifp = ifp;
5081 
5082 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
5083 	ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
5084 	    wm_tbi_mediastatus);
5085 
5086 	/*
5087 	 * SWD Pins:
5088 	 *
5089 	 *	0 = Link LED (output)
5090 	 *	1 = Loss Of Signal (input)
5091 	 */
5092 	sc->sc_ctrl |= CTRL_SWDPIO(0);
5093 	sc->sc_ctrl &= ~CTRL_SWDPIO(1);
5094 
5095 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5096 
5097 #define	ADD(ss, mm, dd)							\
5098 do {									\
5099 	aprint_normal("%s%s", sep, ss);					\
5100 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL);	\
5101 	sep = ", ";							\
5102 } while (/*CONSTCOND*/0)
5103 
5104 	aprint_normal_dev(sc->sc_dev, "");
5105 	ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
5106 	ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
5107 	ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
5108 	aprint_normal("\n");
5109 
5110 #undef ADD
5111 
5112 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
5113 }
5114 
5115 /*
5116  * wm_tbi_mediastatus:	[ifmedia interface function]
5117  *
5118  *	Get the current interface media status on a 1000BASE-X device.
5119  */
5120 static void
5121 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
5122 {
5123 	struct wm_softc *sc = ifp->if_softc;
5124 	uint32_t ctrl, status;
5125 
5126 	ifmr->ifm_status = IFM_AVALID;
5127 	ifmr->ifm_active = IFM_ETHER;
5128 
5129 	status = CSR_READ(sc, WMREG_STATUS);
5130 	if ((status & STATUS_LU) == 0) {
5131 		ifmr->ifm_active |= IFM_NONE;
5132 		return;
5133 	}
5134 
5135 	ifmr->ifm_status |= IFM_ACTIVE;
5136 	ifmr->ifm_active |= IFM_1000_SX;
5137 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
5138 		ifmr->ifm_active |= IFM_FDX;
5139 	ctrl = CSR_READ(sc, WMREG_CTRL);
5140 	if (ctrl & CTRL_RFCE)
5141 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
5142 	if (ctrl & CTRL_TFCE)
5143 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
5144 }
5145 
5146 /*
5147  * wm_tbi_mediachange:	[ifmedia interface function]
5148  *
5149  *	Set hardware to newly-selected media on a 1000BASE-X device.
5150  */
5151 static int
5152 wm_tbi_mediachange(struct ifnet *ifp)
5153 {
5154 	struct wm_softc *sc = ifp->if_softc;
5155 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
5156 	uint32_t status;
5157 	int i;
5158 
5159 	sc->sc_txcw = 0;
5160 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO ||
5161 	    (sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
5162 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
5163 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
5164 		sc->sc_txcw |= TXCW_ANE;
5165 	} else {
5166 		/*
5167 		 * If autonegotiation is turned off, force link up and turn on
5168 		 * full duplex
5169 		 */
5170 		sc->sc_txcw &= ~TXCW_ANE;
5171 		sc->sc_ctrl |= CTRL_SLU | CTRL_FD;
5172 		sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
5173 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5174 		delay(1000);
5175 	}
5176 
5177 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
5178 		    device_xname(sc->sc_dev),sc->sc_txcw));
5179 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
5180 	delay(10000);
5181 
5182 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
5183 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
5184 
5185 	/*
5186 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
5187 	 * optics detect a signal, 0 if they don't.
5188 	 */
5189 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
5190 		/* Have signal; wait for the link to come up. */
5191 
5192 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
5193 			/*
5194 			 * Reset the link, and let autonegotiation do its thing
5195 			 */
5196 			sc->sc_ctrl |= CTRL_LRST;
5197 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5198 			delay(1000);
5199 			sc->sc_ctrl &= ~CTRL_LRST;
5200 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5201 			delay(1000);
5202 		}
5203 
5204 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
5205 			delay(10000);
5206 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
5207 				break;
5208 		}
5209 
5210 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
5211 			    device_xname(sc->sc_dev),i));
5212 
5213 		status = CSR_READ(sc, WMREG_STATUS);
5214 		DPRINTF(WM_DEBUG_LINK,
5215 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
5216 			device_xname(sc->sc_dev),status, STATUS_LU));
5217 		if (status & STATUS_LU) {
5218 			/* Link is up. */
5219 			DPRINTF(WM_DEBUG_LINK,
5220 			    ("%s: LINK: set media -> link up %s\n",
5221 			    device_xname(sc->sc_dev),
5222 			    (status & STATUS_FD) ? "FDX" : "HDX"));
5223 
5224 			/*
5225 			 * NOTE: CTRL will update TFCE and RFCE automatically,
5226 			 * so we should update sc->sc_ctrl
5227 			 */
5228 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
5229 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
5230 			sc->sc_fcrtl &= ~FCRTL_XONE;
5231 			if (status & STATUS_FD)
5232 				sc->sc_tctl |=
5233 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
5234 			else
5235 				sc->sc_tctl |=
5236 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
5237 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
5238 				sc->sc_fcrtl |= FCRTL_XONE;
5239 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
5240 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
5241 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
5242 				      sc->sc_fcrtl);
5243 			sc->sc_tbi_linkup = 1;
5244 		} else {
5245 			if (i == WM_LINKUP_TIMEOUT)
5246 				wm_check_for_link(sc);
5247 			/* Link is down. */
5248 			DPRINTF(WM_DEBUG_LINK,
5249 			    ("%s: LINK: set media -> link down\n",
5250 			    device_xname(sc->sc_dev)));
5251 			sc->sc_tbi_linkup = 0;
5252 		}
5253 	} else {
5254 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
5255 		    device_xname(sc->sc_dev)));
5256 		sc->sc_tbi_linkup = 0;
5257 	}
5258 
5259 	wm_tbi_set_linkled(sc);
5260 
5261 	return 0;
5262 }
5263 
5264 /*
5265  * wm_tbi_set_linkled:
5266  *
5267  *	Update the link LED on 1000BASE-X devices.
5268  */
5269 static void
5270 wm_tbi_set_linkled(struct wm_softc *sc)
5271 {
5272 
5273 	if (sc->sc_tbi_linkup)
5274 		sc->sc_ctrl |= CTRL_SWDPIN(0);
5275 	else
5276 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
5277 
5278 	/* 82540 or newer devices are active low */
5279 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
5280 
5281 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5282 }
5283 
5284 /*
5285  * wm_tbi_check_link:
5286  *
5287  *	Check the link on 1000BASE-X devices.
5288  */
5289 static void
5290 wm_tbi_check_link(struct wm_softc *sc)
5291 {
5292 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5293 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
5294 	uint32_t rxcw, ctrl, status;
5295 
5296 	status = CSR_READ(sc, WMREG_STATUS);
5297 
5298 	rxcw = CSR_READ(sc, WMREG_RXCW);
5299 	ctrl = CSR_READ(sc, WMREG_CTRL);
5300 
5301 	/* set link status */
5302 	if ((status & STATUS_LU) == 0) {
5303 		DPRINTF(WM_DEBUG_LINK,
5304 		    ("%s: LINK: checklink -> down\n", device_xname(sc->sc_dev)));
5305 		sc->sc_tbi_linkup = 0;
5306 	} else if (sc->sc_tbi_linkup == 0) {
5307 		DPRINTF(WM_DEBUG_LINK,
5308 		    ("%s: LINK: checklink -> up %s\n", device_xname(sc->sc_dev),
5309 		    (status & STATUS_FD) ? "FDX" : "HDX"));
5310 		sc->sc_tbi_linkup = 1;
5311 	}
5312 
5313 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP)
5314 	    && ((status & STATUS_LU) == 0)) {
5315 		sc->sc_tbi_linkup = 0;
5316 		if (sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg > 100) {
5317 			/* RXCFG storm! */
5318 			DPRINTF(WM_DEBUG_LINK, ("RXCFG storm! (%d)\n",
5319 				sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg));
5320 			wm_init(ifp);
5321 			wm_start(ifp);
5322 		} else if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
5323 			/* If the timer expired, retry autonegotiation */
5324 			if (++sc->sc_tbi_ticks >= sc->sc_tbi_anegticks) {
5325 				DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
5326 				sc->sc_tbi_ticks = 0;
5327 				/*
5328 				 * Reset the link, and let autonegotiation do
5329 				 * its thing
5330 				 */
5331 				sc->sc_ctrl |= CTRL_LRST;
5332 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5333 				delay(1000);
5334 				sc->sc_ctrl &= ~CTRL_LRST;
5335 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5336 				delay(1000);
5337 				CSR_WRITE(sc, WMREG_TXCW,
5338 				    sc->sc_txcw & ~TXCW_ANE);
5339 				CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
5340 			}
5341 		}
5342 	}
5343 
5344 	wm_tbi_set_linkled(sc);
5345 }
5346 
5347 /*
5348  * wm_gmii_reset:
5349  *
5350  *	Reset the PHY.
5351  */
5352 static void
5353 wm_gmii_reset(struct wm_softc *sc)
5354 {
5355 	uint32_t reg;
5356 	int rv;
5357 
5358 	/* get phy semaphore */
5359 	switch (sc->sc_type) {
5360 	case WM_T_82571:
5361 	case WM_T_82572:
5362 	case WM_T_82573:
5363 	case WM_T_82574:
5364 	case WM_T_82583:
5365 		 /* XXX should get sw semaphore, too */
5366 		rv = wm_get_swsm_semaphore(sc);
5367 		break;
5368 	case WM_T_82575:
5369 	case WM_T_82576:
5370 	case WM_T_82580:
5371 	case WM_T_82580ER:
5372 	case WM_T_80003:
5373 		rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
5374 		break;
5375 	case WM_T_ICH8:
5376 	case WM_T_ICH9:
5377 	case WM_T_ICH10:
5378 	case WM_T_PCH:
5379 		rv = wm_get_swfwhw_semaphore(sc);
5380 		break;
5381 	default:
5382 		/* nothing to do*/
5383 		rv = 0;
5384 		break;
5385 	}
5386 	if (rv != 0) {
5387 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5388 		    __func__);
5389 		return;
5390 	}
5391 
5392 	switch (sc->sc_type) {
5393 	case WM_T_82542_2_0:
5394 	case WM_T_82542_2_1:
5395 		/* null */
5396 		break;
5397 	case WM_T_82543:
5398 		/*
5399 		 * With 82543, we need to force speed and duplex on the MAC
5400 		 * equal to what the PHY speed and duplex configuration is.
5401 		 * In addition, we need to perform a hardware reset on the PHY
5402 		 * to take it out of reset.
5403 		 */
5404 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
5405 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5406 
5407 		/* The PHY reset pin is active-low. */
5408 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
5409 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
5410 		    CTRL_EXT_SWDPIN(4));
5411 		reg |= CTRL_EXT_SWDPIO(4);
5412 
5413 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5414 		delay(10*1000);
5415 
5416 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
5417 		delay(150);
5418 #if 0
5419 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
5420 #endif
5421 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
5422 		break;
5423 	case WM_T_82544:	/* reset 10000us */
5424 	case WM_T_82540:
5425 	case WM_T_82545:
5426 	case WM_T_82545_3:
5427 	case WM_T_82546:
5428 	case WM_T_82546_3:
5429 	case WM_T_82541:
5430 	case WM_T_82541_2:
5431 	case WM_T_82547:
5432 	case WM_T_82547_2:
5433 	case WM_T_82571:	/* reset 100us */
5434 	case WM_T_82572:
5435 	case WM_T_82573:
5436 	case WM_T_82574:
5437 	case WM_T_82575:
5438 	case WM_T_82576:
5439 	case WM_T_82580:
5440 	case WM_T_82580ER:
5441 	case WM_T_82583:
5442 	case WM_T_80003:
5443 		/* generic reset */
5444 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
5445 		delay(20000);
5446 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5447 		delay(20000);
5448 
5449 		if ((sc->sc_type == WM_T_82541)
5450 		    || (sc->sc_type == WM_T_82541_2)
5451 		    || (sc->sc_type == WM_T_82547)
5452 		    || (sc->sc_type == WM_T_82547_2)) {
5453 			/* workaround for igp are done in igp_reset() */
5454 			/* XXX add code to set LED after phy reset */
5455 		}
5456 		break;
5457 	case WM_T_ICH8:
5458 	case WM_T_ICH9:
5459 	case WM_T_ICH10:
5460 	case WM_T_PCH:
5461 		/* generic reset */
5462 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
5463 		delay(100);
5464 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5465 		delay(150);
5466 		break;
5467 	default:
5468 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
5469 		    __func__);
5470 		break;
5471 	}
5472 
5473 	/* release PHY semaphore */
5474 	switch (sc->sc_type) {
5475 	case WM_T_82571:
5476 	case WM_T_82572:
5477 	case WM_T_82573:
5478 	case WM_T_82574:
5479 	case WM_T_82583:
5480 		 /* XXX should put sw semaphore, too */
5481 		wm_put_swsm_semaphore(sc);
5482 		break;
5483 	case WM_T_82575:
5484 	case WM_T_82576:
5485 	case WM_T_82580:
5486 	case WM_T_82580ER:
5487 	case WM_T_80003:
5488 		wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
5489 		break;
5490 	case WM_T_ICH8:
5491 	case WM_T_ICH9:
5492 	case WM_T_ICH10:
5493 	case WM_T_PCH:
5494 		wm_put_swfwhw_semaphore(sc);
5495 		break;
5496 	default:
5497 		/* nothing to do*/
5498 		rv = 0;
5499 		break;
5500 	}
5501 
5502 	/* get_cfg_done */
5503 	wm_get_cfg_done(sc);
5504 
5505 	/* extra setup */
5506 	switch (sc->sc_type) {
5507 	case WM_T_82542_2_0:
5508 	case WM_T_82542_2_1:
5509 	case WM_T_82543:
5510 	case WM_T_82544:
5511 	case WM_T_82540:
5512 	case WM_T_82545:
5513 	case WM_T_82545_3:
5514 	case WM_T_82546:
5515 	case WM_T_82546_3:
5516 	case WM_T_82541_2:
5517 	case WM_T_82547_2:
5518 	case WM_T_82571:
5519 	case WM_T_82572:
5520 	case WM_T_82573:
5521 	case WM_T_82574:
5522 	case WM_T_82575:
5523 	case WM_T_82576:
5524 	case WM_T_82580:
5525 	case WM_T_82580ER:
5526 	case WM_T_82583:
5527 	case WM_T_80003:
5528 		/* null */
5529 		break;
5530 	case WM_T_82541:
5531 	case WM_T_82547:
5532 		/* XXX Configure actively LED after PHY reset */
5533 		break;
5534 	case WM_T_ICH8:
5535 	case WM_T_ICH9:
5536 	case WM_T_ICH10:
5537 	case WM_T_PCH:
5538 		/* Allow time for h/w to get to a quiescent state afer reset */
5539 		delay(10*1000);
5540 
5541 		if (sc->sc_type == WM_T_PCH) {
5542 			wm_hv_phy_workaround_ich8lan(sc);
5543 
5544 			/*
5545 			 * dummy read to clear the phy wakeup bit after lcd
5546 			 * reset
5547 			 */
5548 			reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
5549 		}
5550 
5551 		/*
5552 		 * XXX Configure the LCD with th extended configuration region
5553 		 * in NVM
5554 		 */
5555 
5556 		/* Configure the LCD with the OEM bits in NVM */
5557 		if (sc->sc_type == WM_T_PCH) {
5558 			/*
5559 			 * Disable LPLU.
5560 			 * XXX It seems that 82567 has LPLU, too.
5561 			 */
5562 			reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
5563 			reg &= ~(HV_OEM_BITS_A1KDIS| HV_OEM_BITS_LPLU);
5564 			reg |= HV_OEM_BITS_ANEGNOW;
5565 			wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
5566 		}
5567 		break;
5568 	default:
5569 		panic("%s: unknown type\n", __func__);
5570 		break;
5571 	}
5572 }
5573 
5574 /*
5575  * wm_gmii_mediainit:
5576  *
5577  *	Initialize media for use on 1000BASE-T devices.
5578  */
5579 static void
5580 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
5581 {
5582 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5583 
5584 	/* We have MII. */
5585 	sc->sc_flags |= WM_F_HAS_MII;
5586 
5587 	if (sc->sc_type == WM_T_80003)
5588 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
5589 	else
5590 		sc->sc_tipg = TIPG_1000T_DFLT;
5591 
5592 	/*
5593 	 * Let the chip set speed/duplex on its own based on
5594 	 * signals from the PHY.
5595 	 * XXXbouyer - I'm not sure this is right for the 80003,
5596 	 * the em driver only sets CTRL_SLU here - but it seems to work.
5597 	 */
5598 	sc->sc_ctrl |= CTRL_SLU;
5599 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5600 
5601 	/* Initialize our media structures and probe the GMII. */
5602 	sc->sc_mii.mii_ifp = ifp;
5603 
5604 	switch (prodid) {
5605 	case PCI_PRODUCT_INTEL_PCH_M_LM:
5606 	case PCI_PRODUCT_INTEL_PCH_M_LC:
5607 		/* 82577 */
5608 		sc->sc_phytype = WMPHY_82577;
5609 		sc->sc_mii.mii_readreg = wm_gmii_hv_readreg;
5610 		sc->sc_mii.mii_writereg = wm_gmii_hv_writereg;
5611 		break;
5612 	case PCI_PRODUCT_INTEL_PCH_D_DM:
5613 	case PCI_PRODUCT_INTEL_PCH_D_DC:
5614 		/* 82578 */
5615 		sc->sc_phytype = WMPHY_82578;
5616 		sc->sc_mii.mii_readreg = wm_gmii_hv_readreg;
5617 		sc->sc_mii.mii_writereg = wm_gmii_hv_writereg;
5618 		break;
5619 	case PCI_PRODUCT_INTEL_82801I_BM:
5620 	case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
5621 	case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
5622 	case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
5623 	case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
5624 	case PCI_PRODUCT_INTEL_82801J_R_BM_V:
5625 		/* 82567 */
5626 		sc->sc_phytype = WMPHY_BM;
5627 		sc->sc_mii.mii_readreg = wm_gmii_bm_readreg;
5628 		sc->sc_mii.mii_writereg = wm_gmii_bm_writereg;
5629 		break;
5630 	default:
5631 		if ((sc->sc_flags & WM_F_SGMII) != 0) {
5632 			sc->sc_mii.mii_readreg = wm_sgmii_readreg;
5633 			sc->sc_mii.mii_writereg = wm_sgmii_writereg;
5634 		} else if (sc->sc_type >= WM_T_80003) {
5635 			sc->sc_mii.mii_readreg = wm_gmii_i80003_readreg;
5636 			sc->sc_mii.mii_writereg = wm_gmii_i80003_writereg;
5637 		} else if (sc->sc_type >= WM_T_82544) {
5638 			sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg;
5639 			sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg;
5640 		} else {
5641 			sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg;
5642 			sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg;
5643 		}
5644 		break;
5645 	}
5646 	sc->sc_mii.mii_statchg = wm_gmii_statchg;
5647 
5648 	wm_gmii_reset(sc);
5649 
5650 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
5651 	ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_gmii_mediachange,
5652 	    wm_gmii_mediastatus);
5653 
5654 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
5655 	    || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)) {
5656 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
5657 			/* Attach only one port */
5658 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
5659 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
5660 		} else {
5661 			int i;
5662 			uint32_t ctrl_ext;
5663 
5664 			/* Power on sgmii phy if it is disabled */
5665 			ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
5666 			CSR_WRITE(sc, WMREG_CTRL_EXT,
5667 			    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
5668 			CSR_WRITE_FLUSH(sc);
5669 			delay(300*1000); /* XXX too long */
5670 
5671 			/* from 1 to 8 */
5672 			for (i = 1; i < 8; i++)
5673 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
5674 				    i, MII_OFFSET_ANY, MIIF_DOPAUSE);
5675 
5676 			/* restore previous sfp cage power state */
5677 			CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
5678 		}
5679 	} else {
5680 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
5681 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
5682 	}
5683 
5684 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
5685 		/* if failed, retry with *_bm_* */
5686 		sc->sc_mii.mii_readreg = wm_gmii_bm_readreg;
5687 		sc->sc_mii.mii_writereg = wm_gmii_bm_writereg;
5688 
5689 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
5690 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
5691 	}
5692 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
5693 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
5694 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
5695 		sc->sc_phytype = WMPHY_NONE;
5696 	} else {
5697 		/* Check PHY type */
5698 		uint32_t model;
5699 		struct mii_softc *child;
5700 
5701 		child = LIST_FIRST(&sc->sc_mii.mii_phys);
5702 		if (device_is_a(child->mii_dev, "igphy")) {
5703 			struct igphy_softc *isc = (struct igphy_softc *)child;
5704 
5705 			model = isc->sc_mii.mii_mpd_model;
5706 			if (model == MII_MODEL_yyINTEL_I82566)
5707 				sc->sc_phytype = WMPHY_IGP_3;
5708 		}
5709 
5710 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
5711 	}
5712 }
5713 
5714 /*
5715  * wm_gmii_mediastatus:	[ifmedia interface function]
5716  *
5717  *	Get the current interface media status on a 1000BASE-T device.
5718  */
5719 static void
5720 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
5721 {
5722 	struct wm_softc *sc = ifp->if_softc;
5723 
5724 	ether_mediastatus(ifp, ifmr);
5725 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
5726 	    | sc->sc_flowflags;
5727 }
5728 
5729 /*
5730  * wm_gmii_mediachange:	[ifmedia interface function]
5731  *
5732  *	Set hardware to newly-selected media on a 1000BASE-T device.
5733  */
5734 static int
5735 wm_gmii_mediachange(struct ifnet *ifp)
5736 {
5737 	struct wm_softc *sc = ifp->if_softc;
5738 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
5739 	int rc;
5740 
5741 	if ((ifp->if_flags & IFF_UP) == 0)
5742 		return 0;
5743 
5744 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
5745 	sc->sc_ctrl |= CTRL_SLU;
5746 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
5747 	    || (sc->sc_type > WM_T_82543)) {
5748 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
5749 	} else {
5750 		sc->sc_ctrl &= ~CTRL_ASDE;
5751 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
5752 		if (ife->ifm_media & IFM_FDX)
5753 			sc->sc_ctrl |= CTRL_FD;
5754 		switch (IFM_SUBTYPE(ife->ifm_media)) {
5755 		case IFM_10_T:
5756 			sc->sc_ctrl |= CTRL_SPEED_10;
5757 			break;
5758 		case IFM_100_TX:
5759 			sc->sc_ctrl |= CTRL_SPEED_100;
5760 			break;
5761 		case IFM_1000_T:
5762 			sc->sc_ctrl |= CTRL_SPEED_1000;
5763 			break;
5764 		default:
5765 			panic("wm_gmii_mediachange: bad media 0x%x",
5766 			    ife->ifm_media);
5767 		}
5768 	}
5769 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5770 	if (sc->sc_type <= WM_T_82543)
5771 		wm_gmii_reset(sc);
5772 
5773 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
5774 		return 0;
5775 	return rc;
5776 }
5777 
5778 #define	MDI_IO		CTRL_SWDPIN(2)
5779 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
5780 #define	MDI_CLK		CTRL_SWDPIN(3)
5781 
5782 static void
5783 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
5784 {
5785 	uint32_t i, v;
5786 
5787 	v = CSR_READ(sc, WMREG_CTRL);
5788 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
5789 	v |= MDI_DIR | CTRL_SWDPIO(3);
5790 
5791 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
5792 		if (data & i)
5793 			v |= MDI_IO;
5794 		else
5795 			v &= ~MDI_IO;
5796 		CSR_WRITE(sc, WMREG_CTRL, v);
5797 		delay(10);
5798 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
5799 		delay(10);
5800 		CSR_WRITE(sc, WMREG_CTRL, v);
5801 		delay(10);
5802 	}
5803 }
5804 
5805 static uint32_t
5806 i82543_mii_recvbits(struct wm_softc *sc)
5807 {
5808 	uint32_t v, i, data = 0;
5809 
5810 	v = CSR_READ(sc, WMREG_CTRL);
5811 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
5812 	v |= CTRL_SWDPIO(3);
5813 
5814 	CSR_WRITE(sc, WMREG_CTRL, v);
5815 	delay(10);
5816 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
5817 	delay(10);
5818 	CSR_WRITE(sc, WMREG_CTRL, v);
5819 	delay(10);
5820 
5821 	for (i = 0; i < 16; i++) {
5822 		data <<= 1;
5823 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
5824 		delay(10);
5825 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
5826 			data |= 1;
5827 		CSR_WRITE(sc, WMREG_CTRL, v);
5828 		delay(10);
5829 	}
5830 
5831 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
5832 	delay(10);
5833 	CSR_WRITE(sc, WMREG_CTRL, v);
5834 	delay(10);
5835 
5836 	return data;
5837 }
5838 
5839 #undef MDI_IO
5840 #undef MDI_DIR
5841 #undef MDI_CLK
5842 
5843 /*
5844  * wm_gmii_i82543_readreg:	[mii interface function]
5845  *
5846  *	Read a PHY register on the GMII (i82543 version).
5847  */
5848 static int
5849 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
5850 {
5851 	struct wm_softc *sc = device_private(self);
5852 	int rv;
5853 
5854 	i82543_mii_sendbits(sc, 0xffffffffU, 32);
5855 	i82543_mii_sendbits(sc, reg | (phy << 5) |
5856 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
5857 	rv = i82543_mii_recvbits(sc) & 0xffff;
5858 
5859 	DPRINTF(WM_DEBUG_GMII,
5860 	    ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
5861 	    device_xname(sc->sc_dev), phy, reg, rv));
5862 
5863 	return rv;
5864 }
5865 
5866 /*
5867  * wm_gmii_i82543_writereg:	[mii interface function]
5868  *
5869  *	Write a PHY register on the GMII (i82543 version).
5870  */
5871 static void
5872 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
5873 {
5874 	struct wm_softc *sc = device_private(self);
5875 
5876 	i82543_mii_sendbits(sc, 0xffffffffU, 32);
5877 	i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
5878 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
5879 	    (MII_COMMAND_START << 30), 32);
5880 }
5881 
5882 /*
5883  * wm_gmii_i82544_readreg:	[mii interface function]
5884  *
5885  *	Read a PHY register on the GMII.
5886  */
5887 static int
5888 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
5889 {
5890 	struct wm_softc *sc = device_private(self);
5891 	uint32_t mdic = 0;
5892 	int i, rv;
5893 
5894 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
5895 	    MDIC_REGADD(reg));
5896 
5897 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
5898 		mdic = CSR_READ(sc, WMREG_MDIC);
5899 		if (mdic & MDIC_READY)
5900 			break;
5901 		delay(50);
5902 	}
5903 
5904 	if ((mdic & MDIC_READY) == 0) {
5905 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
5906 		    device_xname(sc->sc_dev), phy, reg);
5907 		rv = 0;
5908 	} else if (mdic & MDIC_E) {
5909 #if 0 /* This is normal if no PHY is present. */
5910 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
5911 		    device_xname(sc->sc_dev), phy, reg);
5912 #endif
5913 		rv = 0;
5914 	} else {
5915 		rv = MDIC_DATA(mdic);
5916 		if (rv == 0xffff)
5917 			rv = 0;
5918 	}
5919 
5920 	return rv;
5921 }
5922 
5923 /*
5924  * wm_gmii_i82544_writereg:	[mii interface function]
5925  *
5926  *	Write a PHY register on the GMII.
5927  */
5928 static void
5929 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
5930 {
5931 	struct wm_softc *sc = device_private(self);
5932 	uint32_t mdic = 0;
5933 	int i;
5934 
5935 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
5936 	    MDIC_REGADD(reg) | MDIC_DATA(val));
5937 
5938 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
5939 		mdic = CSR_READ(sc, WMREG_MDIC);
5940 		if (mdic & MDIC_READY)
5941 			break;
5942 		delay(50);
5943 	}
5944 
5945 	if ((mdic & MDIC_READY) == 0)
5946 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
5947 		    device_xname(sc->sc_dev), phy, reg);
5948 	else if (mdic & MDIC_E)
5949 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
5950 		    device_xname(sc->sc_dev), phy, reg);
5951 }
5952 
5953 /*
5954  * wm_gmii_i80003_readreg:	[mii interface function]
5955  *
5956  *	Read a PHY register on the kumeran
5957  * This could be handled by the PHY layer if we didn't have to lock the
5958  * ressource ...
5959  */
5960 static int
5961 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
5962 {
5963 	struct wm_softc *sc = device_private(self);
5964 	int sem;
5965 	int rv;
5966 
5967 	if (phy != 1) /* only one PHY on kumeran bus */
5968 		return 0;
5969 
5970 	sem = swfwphysem[sc->sc_funcid];
5971 	if (wm_get_swfw_semaphore(sc, sem)) {
5972 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5973 		    __func__);
5974 		return 0;
5975 	}
5976 
5977 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
5978 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
5979 		    reg >> GG82563_PAGE_SHIFT);
5980 	} else {
5981 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
5982 		    reg >> GG82563_PAGE_SHIFT);
5983 	}
5984 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
5985 	delay(200);
5986 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
5987 	delay(200);
5988 
5989 	wm_put_swfw_semaphore(sc, sem);
5990 	return rv;
5991 }
5992 
5993 /*
5994  * wm_gmii_i80003_writereg:	[mii interface function]
5995  *
5996  *	Write a PHY register on the kumeran.
5997  * This could be handled by the PHY layer if we didn't have to lock the
5998  * ressource ...
5999  */
6000 static void
6001 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
6002 {
6003 	struct wm_softc *sc = device_private(self);
6004 	int sem;
6005 
6006 	if (phy != 1) /* only one PHY on kumeran bus */
6007 		return;
6008 
6009 	sem = swfwphysem[sc->sc_funcid];
6010 	if (wm_get_swfw_semaphore(sc, sem)) {
6011 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6012 		    __func__);
6013 		return;
6014 	}
6015 
6016 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
6017 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6018 		    reg >> GG82563_PAGE_SHIFT);
6019 	} else {
6020 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
6021 		    reg >> GG82563_PAGE_SHIFT);
6022 	}
6023 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
6024 	delay(200);
6025 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
6026 	delay(200);
6027 
6028 	wm_put_swfw_semaphore(sc, sem);
6029 }
6030 
6031 /*
6032  * wm_gmii_bm_readreg:	[mii interface function]
6033  *
6034  *	Read a PHY register on the kumeran
6035  * This could be handled by the PHY layer if we didn't have to lock the
6036  * ressource ...
6037  */
6038 static int
6039 wm_gmii_bm_readreg(device_t self, int phy, int reg)
6040 {
6041 	struct wm_softc *sc = device_private(self);
6042 	int sem;
6043 	int rv;
6044 
6045 	sem = swfwphysem[sc->sc_funcid];
6046 	if (wm_get_swfw_semaphore(sc, sem)) {
6047 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6048 		    __func__);
6049 		return 0;
6050 	}
6051 
6052 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
6053 		if (phy == 1)
6054 			wm_gmii_i82544_writereg(self, phy, 0x1f,
6055 			    reg);
6056 		else
6057 			wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6058 			    reg >> GG82563_PAGE_SHIFT);
6059 
6060 	}
6061 
6062 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
6063 	wm_put_swfw_semaphore(sc, sem);
6064 	return rv;
6065 }
6066 
6067 /*
6068  * wm_gmii_bm_writereg:	[mii interface function]
6069  *
6070  *	Write a PHY register on the kumeran.
6071  * This could be handled by the PHY layer if we didn't have to lock the
6072  * ressource ...
6073  */
6074 static void
6075 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
6076 {
6077 	struct wm_softc *sc = device_private(self);
6078 	int sem;
6079 
6080 	sem = swfwphysem[sc->sc_funcid];
6081 	if (wm_get_swfw_semaphore(sc, sem)) {
6082 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6083 		    __func__);
6084 		return;
6085 	}
6086 
6087 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
6088 		if (phy == 1)
6089 			wm_gmii_i82544_writereg(self, phy, 0x1f,
6090 			    reg);
6091 		else
6092 			wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6093 			    reg >> GG82563_PAGE_SHIFT);
6094 
6095 	}
6096 
6097 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
6098 	wm_put_swfw_semaphore(sc, sem);
6099 }
6100 
6101 static void
6102 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
6103 {
6104 	struct wm_softc *sc = device_private(self);
6105 	uint16_t regnum = BM_PHY_REG_NUM(offset);
6106 	uint16_t wuce;
6107 
6108 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
6109 	if (sc->sc_type == WM_T_PCH) {
6110 		/* XXX e1000 driver do nothing... why? */
6111 	}
6112 
6113 	/* Set page 769 */
6114 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6115 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
6116 
6117 	wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
6118 
6119 	wuce &= ~BM_WUC_HOST_WU_BIT;
6120 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
6121 	    wuce | BM_WUC_ENABLE_BIT);
6122 
6123 	/* Select page 800 */
6124 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6125 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
6126 
6127 	/* Write page 800 */
6128 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
6129 
6130 	if (rd)
6131 		*val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
6132 	else
6133 		wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
6134 
6135 	/* Set page 769 */
6136 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6137 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
6138 
6139 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
6140 }
6141 
6142 /*
6143  * wm_gmii_hv_readreg:	[mii interface function]
6144  *
6145  *	Read a PHY register on the kumeran
6146  * This could be handled by the PHY layer if we didn't have to lock the
6147  * ressource ...
6148  */
6149 static int
6150 wm_gmii_hv_readreg(device_t self, int phy, int reg)
6151 {
6152 	struct wm_softc *sc = device_private(self);
6153 	uint16_t page = BM_PHY_REG_PAGE(reg);
6154 	uint16_t regnum = BM_PHY_REG_NUM(reg);
6155 	uint16_t val;
6156 	int rv;
6157 
6158 	if (wm_get_swfw_semaphore(sc, SWFW_PHY0_SM)) {
6159 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6160 		    __func__);
6161 		return 0;
6162 	}
6163 
6164 	/* XXX Workaround failure in MDIO access while cable is disconnected */
6165 	if (sc->sc_phytype == WMPHY_82577) {
6166 		/* XXX must write */
6167 	}
6168 
6169 	/* Page 800 works differently than the rest so it has its own func */
6170 	if (page == BM_WUC_PAGE) {
6171 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
6172 		return val;
6173 	}
6174 
6175 	/*
6176 	 * Lower than page 768 works differently than the rest so it has its
6177 	 * own func
6178 	 */
6179 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
6180 		printf("gmii_hv_readreg!!!\n");
6181 		return 0;
6182 	}
6183 
6184 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
6185 		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6186 		    page << BME1000_PAGE_SHIFT);
6187 	}
6188 
6189 	rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
6190 	wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
6191 	return rv;
6192 }
6193 
6194 /*
6195  * wm_gmii_hv_writereg:	[mii interface function]
6196  *
6197  *	Write a PHY register on the kumeran.
6198  * This could be handled by the PHY layer if we didn't have to lock the
6199  * ressource ...
6200  */
6201 static void
6202 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
6203 {
6204 	struct wm_softc *sc = device_private(self);
6205 	uint16_t page = BM_PHY_REG_PAGE(reg);
6206 	uint16_t regnum = BM_PHY_REG_NUM(reg);
6207 
6208 	if (wm_get_swfw_semaphore(sc, SWFW_PHY0_SM)) {
6209 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6210 		    __func__);
6211 		return;
6212 	}
6213 
6214 	/* XXX Workaround failure in MDIO access while cable is disconnected */
6215 
6216 	/* Page 800 works differently than the rest so it has its own func */
6217 	if (page == BM_WUC_PAGE) {
6218 		uint16_t tmp;
6219 
6220 		tmp = val;
6221 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
6222 		return;
6223 	}
6224 
6225 	/*
6226 	 * Lower than page 768 works differently than the rest so it has its
6227 	 * own func
6228 	 */
6229 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
6230 		printf("gmii_hv_writereg!!!\n");
6231 		return;
6232 	}
6233 
6234 	/*
6235 	 * XXX Workaround MDIO accesses being disabled after entering IEEE
6236 	 * Power Down (whenever bit 11 of the PHY control register is set)
6237 	 */
6238 
6239 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
6240 		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6241 		    page << BME1000_PAGE_SHIFT);
6242 	}
6243 
6244 	wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
6245 	wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
6246 }
6247 
6248 /*
6249  * wm_gmii_hv_readreg:	[mii interface function]
6250  *
6251  *	Read a PHY register on the kumeran
6252  * This could be handled by the PHY layer if we didn't have to lock the
6253  * ressource ...
6254  */
6255 static int
6256 wm_sgmii_readreg(device_t self, int phy, int reg)
6257 {
6258 	struct wm_softc *sc = device_private(self);
6259 	uint32_t i2ccmd;
6260 	int i, rv;
6261 
6262 	if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
6263 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6264 		    __func__);
6265 		return 0;
6266 	}
6267 
6268 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
6269 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
6270 	    | I2CCMD_OPCODE_READ;
6271 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
6272 
6273 	/* Poll the ready bit */
6274 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
6275 		delay(50);
6276 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
6277 		if (i2ccmd & I2CCMD_READY)
6278 			break;
6279 	}
6280 	if ((i2ccmd & I2CCMD_READY) == 0)
6281 		aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
6282 	if ((i2ccmd & I2CCMD_ERROR) != 0)
6283 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
6284 
6285 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
6286 
6287 	wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6288 	return rv;
6289 }
6290 
6291 /*
6292  * wm_gmii_hv_writereg:	[mii interface function]
6293  *
6294  *	Write a PHY register on the kumeran.
6295  * This could be handled by the PHY layer if we didn't have to lock the
6296  * ressource ...
6297  */
6298 static void
6299 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
6300 {
6301 	struct wm_softc *sc = device_private(self);
6302 	uint32_t i2ccmd;
6303 	int i;
6304 
6305 	if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
6306 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6307 		    __func__);
6308 		return;
6309 	}
6310 
6311 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
6312 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
6313 	    | I2CCMD_OPCODE_WRITE;
6314 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
6315 
6316 	/* Poll the ready bit */
6317 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
6318 		delay(50);
6319 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
6320 		if (i2ccmd & I2CCMD_READY)
6321 			break;
6322 	}
6323 	if ((i2ccmd & I2CCMD_READY) == 0)
6324 		aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
6325 	if ((i2ccmd & I2CCMD_ERROR) != 0)
6326 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
6327 
6328 	wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
6329 }
6330 
6331 /*
6332  * wm_gmii_statchg:	[mii interface function]
6333  *
6334  *	Callback from MII layer when media changes.
6335  */
6336 static void
6337 wm_gmii_statchg(device_t self)
6338 {
6339 	struct wm_softc *sc = device_private(self);
6340 	struct mii_data *mii = &sc->sc_mii;
6341 
6342 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
6343 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
6344 	sc->sc_fcrtl &= ~FCRTL_XONE;
6345 
6346 	/*
6347 	 * Get flow control negotiation result.
6348 	 */
6349 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
6350 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
6351 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
6352 		mii->mii_media_active &= ~IFM_ETH_FMASK;
6353 	}
6354 
6355 	if (sc->sc_flowflags & IFM_FLOW) {
6356 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
6357 			sc->sc_ctrl |= CTRL_TFCE;
6358 			sc->sc_fcrtl |= FCRTL_XONE;
6359 		}
6360 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
6361 			sc->sc_ctrl |= CTRL_RFCE;
6362 	}
6363 
6364 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
6365 		DPRINTF(WM_DEBUG_LINK,
6366 		    ("%s: LINK: statchg: FDX\n", device_xname(sc->sc_dev)));
6367 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
6368 	} else {
6369 		DPRINTF(WM_DEBUG_LINK,
6370 		    ("%s: LINK: statchg: HDX\n", device_xname(sc->sc_dev)));
6371 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
6372 	}
6373 
6374 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6375 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
6376 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
6377 						 : WMREG_FCRTL, sc->sc_fcrtl);
6378 	if (sc->sc_type == WM_T_80003) {
6379 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
6380 		case IFM_1000_T:
6381 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
6382 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
6383 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
6384 			break;
6385 		default:
6386 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
6387 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
6388 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
6389 			break;
6390 		}
6391 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
6392 	}
6393 }
6394 
6395 /*
6396  * wm_kmrn_readreg:
6397  *
6398  *	Read a kumeran register
6399  */
6400 static int
6401 wm_kmrn_readreg(struct wm_softc *sc, int reg)
6402 {
6403 	int rv;
6404 
6405 	if (sc->sc_flags == WM_F_SWFW_SYNC) {
6406 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
6407 			aprint_error_dev(sc->sc_dev,
6408 			    "%s: failed to get semaphore\n", __func__);
6409 			return 0;
6410 		}
6411 	} else if (sc->sc_flags == WM_F_SWFWHW_SYNC) {
6412 		if (wm_get_swfwhw_semaphore(sc)) {
6413 			aprint_error_dev(sc->sc_dev,
6414 			    "%s: failed to get semaphore\n", __func__);
6415 			return 0;
6416 		}
6417 	}
6418 
6419 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
6420 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
6421 	    KUMCTRLSTA_REN);
6422 	delay(2);
6423 
6424 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
6425 
6426 	if (sc->sc_flags == WM_F_SWFW_SYNC)
6427 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
6428 	else if (sc->sc_flags == WM_F_SWFWHW_SYNC)
6429 		wm_put_swfwhw_semaphore(sc);
6430 
6431 	return rv;
6432 }
6433 
6434 /*
6435  * wm_kmrn_writereg:
6436  *
6437  *	Write a kumeran register
6438  */
6439 static void
6440 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
6441 {
6442 
6443 	if (sc->sc_flags == WM_F_SWFW_SYNC) {
6444 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
6445 			aprint_error_dev(sc->sc_dev,
6446 			    "%s: failed to get semaphore\n", __func__);
6447 			return;
6448 		}
6449 	} else if (sc->sc_flags == WM_F_SWFWHW_SYNC) {
6450 		if (wm_get_swfwhw_semaphore(sc)) {
6451 			aprint_error_dev(sc->sc_dev,
6452 			    "%s: failed to get semaphore\n", __func__);
6453 			return;
6454 		}
6455 	}
6456 
6457 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
6458 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
6459 	    (val & KUMCTRLSTA_MASK));
6460 
6461 	if (sc->sc_flags == WM_F_SWFW_SYNC)
6462 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
6463 	else if (sc->sc_flags == WM_F_SWFWHW_SYNC)
6464 		wm_put_swfwhw_semaphore(sc);
6465 }
6466 
6467 static int
6468 wm_is_onboard_nvm_eeprom(struct wm_softc *sc)
6469 {
6470 	uint32_t eecd = 0;
6471 
6472 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
6473 	    || sc->sc_type == WM_T_82583) {
6474 		eecd = CSR_READ(sc, WMREG_EECD);
6475 
6476 		/* Isolate bits 15 & 16 */
6477 		eecd = ((eecd >> 15) & 0x03);
6478 
6479 		/* If both bits are set, device is Flash type */
6480 		if (eecd == 0x03)
6481 			return 0;
6482 	}
6483 	return 1;
6484 }
6485 
6486 static int
6487 wm_get_swsm_semaphore(struct wm_softc *sc)
6488 {
6489 	int32_t timeout;
6490 	uint32_t swsm;
6491 
6492 	/* Get the FW semaphore. */
6493 	timeout = 1000 + 1; /* XXX */
6494 	while (timeout) {
6495 		swsm = CSR_READ(sc, WMREG_SWSM);
6496 		swsm |= SWSM_SWESMBI;
6497 		CSR_WRITE(sc, WMREG_SWSM, swsm);
6498 		/* if we managed to set the bit we got the semaphore. */
6499 		swsm = CSR_READ(sc, WMREG_SWSM);
6500 		if (swsm & SWSM_SWESMBI)
6501 			break;
6502 
6503 		delay(50);
6504 		timeout--;
6505 	}
6506 
6507 	if (timeout == 0) {
6508 		aprint_error_dev(sc->sc_dev, "could not acquire EEPROM GNT\n");
6509 		/* Release semaphores */
6510 		wm_put_swsm_semaphore(sc);
6511 		return 1;
6512 	}
6513 	return 0;
6514 }
6515 
6516 static void
6517 wm_put_swsm_semaphore(struct wm_softc *sc)
6518 {
6519 	uint32_t swsm;
6520 
6521 	swsm = CSR_READ(sc, WMREG_SWSM);
6522 	swsm &= ~(SWSM_SWESMBI);
6523 	CSR_WRITE(sc, WMREG_SWSM, swsm);
6524 }
6525 
6526 static int
6527 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
6528 {
6529 	uint32_t swfw_sync;
6530 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
6531 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
6532 	int timeout = 200;
6533 
6534 	for (timeout = 0; timeout < 200; timeout++) {
6535 		if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
6536 			if (wm_get_swsm_semaphore(sc)) {
6537 				aprint_error_dev(sc->sc_dev,
6538 				    "%s: failed to get semaphore\n",
6539 				    __func__);
6540 				return 1;
6541 			}
6542 		}
6543 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
6544 		if ((swfw_sync & (swmask | fwmask)) == 0) {
6545 			swfw_sync |= swmask;
6546 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
6547 			if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
6548 				wm_put_swsm_semaphore(sc);
6549 			return 0;
6550 		}
6551 		if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
6552 			wm_put_swsm_semaphore(sc);
6553 		delay(5000);
6554 	}
6555 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
6556 	    device_xname(sc->sc_dev), mask, swfw_sync);
6557 	return 1;
6558 }
6559 
6560 static void
6561 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
6562 {
6563 	uint32_t swfw_sync;
6564 
6565 	if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
6566 		while (wm_get_swsm_semaphore(sc) != 0)
6567 			continue;
6568 	}
6569 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
6570 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
6571 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
6572 	if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
6573 		wm_put_swsm_semaphore(sc);
6574 }
6575 
6576 static int
6577 wm_get_swfwhw_semaphore(struct wm_softc *sc)
6578 {
6579 	uint32_t ext_ctrl;
6580 	int timeout = 200;
6581 
6582 	for (timeout = 0; timeout < 200; timeout++) {
6583 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
6584 		ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
6585 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
6586 
6587 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
6588 		if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
6589 			return 0;
6590 		delay(5000);
6591 	}
6592 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
6593 	    device_xname(sc->sc_dev), ext_ctrl);
6594 	return 1;
6595 }
6596 
6597 static void
6598 wm_put_swfwhw_semaphore(struct wm_softc *sc)
6599 {
6600 	uint32_t ext_ctrl;
6601 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
6602 	ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
6603 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
6604 }
6605 
6606 static int
6607 wm_valid_nvm_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
6608 {
6609 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
6610 	uint8_t bank_high_byte;
6611 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
6612 
6613 	if ((sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)) {
6614 		/* Value of bit 22 corresponds to the flash bank we're on. */
6615 		*bank = (CSR_READ(sc, WMREG_EECD) & EECD_SEC1VAL) ? 1 : 0;
6616 	} else {
6617 		wm_read_ich8_byte(sc, act_offset, &bank_high_byte);
6618 		if ((bank_high_byte & 0xc0) == 0x80)
6619 			*bank = 0;
6620 		else {
6621 			wm_read_ich8_byte(sc, act_offset + bank1_offset,
6622 			    &bank_high_byte);
6623 			if ((bank_high_byte & 0xc0) == 0x80)
6624 				*bank = 1;
6625 			else {
6626 				aprint_error_dev(sc->sc_dev,
6627 				    "EEPROM not present\n");
6628 				return -1;
6629 			}
6630 		}
6631 	}
6632 
6633 	return 0;
6634 }
6635 
6636 /******************************************************************************
6637  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
6638  * register.
6639  *
6640  * sc - Struct containing variables accessed by shared code
6641  * offset - offset of word in the EEPROM to read
6642  * data - word read from the EEPROM
6643  * words - number of words to read
6644  *****************************************************************************/
6645 static int
6646 wm_read_eeprom_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
6647 {
6648 	int32_t  error = 0;
6649 	uint32_t flash_bank = 0;
6650 	uint32_t act_offset = 0;
6651 	uint32_t bank_offset = 0;
6652 	uint16_t word = 0;
6653 	uint16_t i = 0;
6654 
6655 	/* We need to know which is the valid flash bank.  In the event
6656 	 * that we didn't allocate eeprom_shadow_ram, we may not be
6657 	 * managing flash_bank.  So it cannot be trusted and needs
6658 	 * to be updated with each read.
6659 	 */
6660 	error = wm_valid_nvm_bank_detect_ich8lan(sc, &flash_bank);
6661 	if (error) {
6662 		aprint_error_dev(sc->sc_dev, "%s: failed to detect NVM bank\n",
6663 		    __func__);
6664 		return error;
6665 	}
6666 
6667 	/* Adjust offset appropriately if we're on bank 1 - adjust for word size */
6668 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
6669 
6670 	error = wm_get_swfwhw_semaphore(sc);
6671 	if (error) {
6672 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6673 		    __func__);
6674 		return error;
6675 	}
6676 
6677 	for (i = 0; i < words; i++) {
6678 		/* The NVM part needs a byte offset, hence * 2 */
6679 		act_offset = bank_offset + ((offset + i) * 2);
6680 		error = wm_read_ich8_word(sc, act_offset, &word);
6681 		if (error) {
6682 			aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
6683 			    __func__);
6684 			break;
6685 		}
6686 		data[i] = word;
6687 	}
6688 
6689 	wm_put_swfwhw_semaphore(sc);
6690 	return error;
6691 }
6692 
6693 /******************************************************************************
6694  * This function does initial flash setup so that a new read/write/erase cycle
6695  * can be started.
6696  *
6697  * sc - The pointer to the hw structure
6698  ****************************************************************************/
6699 static int32_t
6700 wm_ich8_cycle_init(struct wm_softc *sc)
6701 {
6702 	uint16_t hsfsts;
6703 	int32_t error = 1;
6704 	int32_t i     = 0;
6705 
6706 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
6707 
6708 	/* May be check the Flash Des Valid bit in Hw status */
6709 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
6710 		return error;
6711 	}
6712 
6713 	/* Clear FCERR in Hw status by writing 1 */
6714 	/* Clear DAEL in Hw status by writing a 1 */
6715 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
6716 
6717 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
6718 
6719 	/*
6720 	 * Either we should have a hardware SPI cycle in progress bit to check
6721 	 * against, in order to start a new cycle or FDONE bit should be
6722 	 * changed in the hardware so that it is 1 after harware reset, which
6723 	 * can then be used as an indication whether a cycle is in progress or
6724 	 * has been completed .. we should also have some software semaphore
6725 	 * mechanism to guard FDONE or the cycle in progress bit so that two
6726 	 * threads access to those bits can be sequentiallized or a way so that
6727 	 * 2 threads dont start the cycle at the same time
6728 	 */
6729 
6730 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
6731 		/*
6732 		 * There is no cycle running at present, so we can start a
6733 		 * cycle
6734 		 */
6735 
6736 		/* Begin by setting Flash Cycle Done. */
6737 		hsfsts |= HSFSTS_DONE;
6738 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
6739 		error = 0;
6740 	} else {
6741 		/*
6742 		 * otherwise poll for sometime so the current cycle has a
6743 		 * chance to end before giving up.
6744 		 */
6745 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
6746 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
6747 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
6748 				error = 0;
6749 				break;
6750 			}
6751 			delay(1);
6752 		}
6753 		if (error == 0) {
6754 			/*
6755 			 * Successful in waiting for previous cycle to timeout,
6756 			 * now set the Flash Cycle Done.
6757 			 */
6758 			hsfsts |= HSFSTS_DONE;
6759 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
6760 		}
6761 	}
6762 	return error;
6763 }
6764 
6765 /******************************************************************************
6766  * This function starts a flash cycle and waits for its completion
6767  *
6768  * sc - The pointer to the hw structure
6769  ****************************************************************************/
6770 static int32_t
6771 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
6772 {
6773 	uint16_t hsflctl;
6774 	uint16_t hsfsts;
6775 	int32_t error = 1;
6776 	uint32_t i = 0;
6777 
6778 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
6779 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
6780 	hsflctl |= HSFCTL_GO;
6781 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
6782 
6783 	/* wait till FDONE bit is set to 1 */
6784 	do {
6785 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
6786 		if (hsfsts & HSFSTS_DONE)
6787 			break;
6788 		delay(1);
6789 		i++;
6790 	} while (i < timeout);
6791 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
6792 		error = 0;
6793 
6794 	return error;
6795 }
6796 
6797 /******************************************************************************
6798  * Reads a byte or word from the NVM using the ICH8 flash access registers.
6799  *
6800  * sc - The pointer to the hw structure
6801  * index - The index of the byte or word to read.
6802  * size - Size of data to read, 1=byte 2=word
6803  * data - Pointer to the word to store the value read.
6804  *****************************************************************************/
6805 static int32_t
6806 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
6807     uint32_t size, uint16_t* data)
6808 {
6809 	uint16_t hsfsts;
6810 	uint16_t hsflctl;
6811 	uint32_t flash_linear_address;
6812 	uint32_t flash_data = 0;
6813 	int32_t error = 1;
6814 	int32_t count = 0;
6815 
6816 	if (size < 1  || size > 2 || data == 0x0 ||
6817 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
6818 		return error;
6819 
6820 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
6821 	    sc->sc_ich8_flash_base;
6822 
6823 	do {
6824 		delay(1);
6825 		/* Steps */
6826 		error = wm_ich8_cycle_init(sc);
6827 		if (error)
6828 			break;
6829 
6830 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
6831 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
6832 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
6833 		    & HSFCTL_BCOUNT_MASK;
6834 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
6835 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
6836 
6837 		/*
6838 		 * Write the last 24 bits of index into Flash Linear address
6839 		 * field in Flash Address
6840 		 */
6841 		/* TODO: TBD maybe check the index against the size of flash */
6842 
6843 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
6844 
6845 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
6846 
6847 		/*
6848 		 * Check if FCERR is set to 1, if set to 1, clear it and try
6849 		 * the whole sequence a few more times, else read in (shift in)
6850 		 * the Flash Data0, the order is least significant byte first
6851 		 * msb to lsb
6852 		 */
6853 		if (error == 0) {
6854 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
6855 			if (size == 1)
6856 				*data = (uint8_t)(flash_data & 0x000000FF);
6857 			else if (size == 2)
6858 				*data = (uint16_t)(flash_data & 0x0000FFFF);
6859 			break;
6860 		} else {
6861 			/*
6862 			 * If we've gotten here, then things are probably
6863 			 * completely hosed, but if the error condition is
6864 			 * detected, it won't hurt to give it another try...
6865 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
6866 			 */
6867 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
6868 			if (hsfsts & HSFSTS_ERR) {
6869 				/* Repeat for some time before giving up. */
6870 				continue;
6871 			} else if ((hsfsts & HSFSTS_DONE) == 0)
6872 				break;
6873 		}
6874 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
6875 
6876 	return error;
6877 }
6878 
6879 /******************************************************************************
6880  * Reads a single byte from the NVM using the ICH8 flash access registers.
6881  *
6882  * sc - pointer to wm_hw structure
6883  * index - The index of the byte to read.
6884  * data - Pointer to a byte to store the value read.
6885  *****************************************************************************/
6886 static int32_t
6887 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
6888 {
6889 	int32_t status;
6890 	uint16_t word = 0;
6891 
6892 	status = wm_read_ich8_data(sc, index, 1, &word);
6893 	if (status == 0)
6894 		*data = (uint8_t)word;
6895 
6896 	return status;
6897 }
6898 
6899 /******************************************************************************
6900  * Reads a word from the NVM using the ICH8 flash access registers.
6901  *
6902  * sc - pointer to wm_hw structure
6903  * index - The starting byte index of the word to read.
6904  * data - Pointer to a word to store the value read.
6905  *****************************************************************************/
6906 static int32_t
6907 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
6908 {
6909 	int32_t status;
6910 
6911 	status = wm_read_ich8_data(sc, index, 2, data);
6912 	return status;
6913 }
6914 
6915 static int
6916 wm_check_mng_mode(struct wm_softc *sc)
6917 {
6918 	int rv;
6919 
6920 	switch (sc->sc_type) {
6921 	case WM_T_ICH8:
6922 	case WM_T_ICH9:
6923 	case WM_T_ICH10:
6924 	case WM_T_PCH:
6925 		rv = wm_check_mng_mode_ich8lan(sc);
6926 		break;
6927 	case WM_T_82574:
6928 	case WM_T_82583:
6929 		rv = wm_check_mng_mode_82574(sc);
6930 		break;
6931 	case WM_T_82571:
6932 	case WM_T_82572:
6933 	case WM_T_82573:
6934 	case WM_T_80003:
6935 		rv = wm_check_mng_mode_generic(sc);
6936 		break;
6937 	default:
6938 		/* noting to do */
6939 		rv = 0;
6940 		break;
6941 	}
6942 
6943 	return rv;
6944 }
6945 
6946 static int
6947 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
6948 {
6949 	uint32_t fwsm;
6950 
6951 	fwsm = CSR_READ(sc, WMREG_FWSM);
6952 
6953 	if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
6954 		return 1;
6955 
6956 	return 0;
6957 }
6958 
6959 static int
6960 wm_check_mng_mode_82574(struct wm_softc *sc)
6961 {
6962 	uint16_t data;
6963 
6964 	wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &data);
6965 
6966 	if ((data & EEPROM_CFG2_MNGM_MASK) != 0)
6967 		return 1;
6968 
6969 	return 0;
6970 }
6971 
6972 static int
6973 wm_check_mng_mode_generic(struct wm_softc *sc)
6974 {
6975 	uint32_t fwsm;
6976 
6977 	fwsm = CSR_READ(sc, WMREG_FWSM);
6978 
6979 	if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
6980 		return 1;
6981 
6982 	return 0;
6983 }
6984 
6985 static int
6986 wm_enable_mng_pass_thru(struct wm_softc *sc)
6987 {
6988 	uint32_t manc, fwsm, factps;
6989 
6990 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
6991 		return 0;
6992 
6993 	manc = CSR_READ(sc, WMREG_MANC);
6994 
6995 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
6996 		device_xname(sc->sc_dev), manc));
6997 	if (((manc & MANC_RECV_TCO_EN) == 0)
6998 	    || ((manc & MANC_EN_MAC_ADDR_FILTER) == 0))
6999 		return 0;
7000 
7001 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
7002 		fwsm = CSR_READ(sc, WMREG_FWSM);
7003 		factps = CSR_READ(sc, WMREG_FACTPS);
7004 		if (((factps & FACTPS_MNGCG) == 0)
7005 		    && ((fwsm & FWSM_MODE_MASK)
7006 			== (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT)))
7007 			return 1;
7008 	} else if (((manc & MANC_SMBUS_EN) != 0)
7009 	    && ((manc & MANC_ASF_EN) == 0))
7010 		return 1;
7011 
7012 	return 0;
7013 }
7014 
7015 static int
7016 wm_check_reset_block(struct wm_softc *sc)
7017 {
7018 	uint32_t reg;
7019 
7020 	switch (sc->sc_type) {
7021 	case WM_T_ICH8:
7022 	case WM_T_ICH9:
7023 	case WM_T_ICH10:
7024 	case WM_T_PCH:
7025 		reg = CSR_READ(sc, WMREG_FWSM);
7026 		if ((reg & FWSM_RSPCIPHY) != 0)
7027 			return 0;
7028 		else
7029 			return -1;
7030 		break;
7031 	case WM_T_82571:
7032 	case WM_T_82572:
7033 	case WM_T_82573:
7034 	case WM_T_82574:
7035 	case WM_T_82583:
7036 	case WM_T_80003:
7037 		reg = CSR_READ(sc, WMREG_MANC);
7038 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
7039 			return -1;
7040 		else
7041 			return 0;
7042 		break;
7043 	default:
7044 		/* no problem */
7045 		break;
7046 	}
7047 
7048 	return 0;
7049 }
7050 
7051 static void
7052 wm_get_hw_control(struct wm_softc *sc)
7053 {
7054 	uint32_t reg;
7055 
7056 	switch (sc->sc_type) {
7057 	case WM_T_82573:
7058 		reg = CSR_READ(sc, WMREG_SWSM);
7059 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
7060 		break;
7061 	case WM_T_82571:
7062 	case WM_T_82572:
7063 	case WM_T_82574:
7064 	case WM_T_82583:
7065 	case WM_T_80003:
7066 	case WM_T_ICH8:
7067 	case WM_T_ICH9:
7068 	case WM_T_ICH10:
7069 	case WM_T_PCH:
7070 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
7071 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
7072 		break;
7073 	default:
7074 		break;
7075 	}
7076 }
7077 
7078 static void
7079 wm_release_hw_control(struct wm_softc *sc)
7080 {
7081 	uint32_t reg;
7082 
7083 	if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
7084 		return;
7085 
7086 	if (sc->sc_type == WM_T_82573) {
7087 		reg = CSR_READ(sc, WMREG_SWSM);
7088 		reg &= ~SWSM_DRV_LOAD;
7089 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
7090 	} else {
7091 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
7092 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
7093 	}
7094 }
7095 
7096 /* XXX Currently TBI only */
7097 static int
7098 wm_check_for_link(struct wm_softc *sc)
7099 {
7100 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7101 	uint32_t rxcw;
7102 	uint32_t ctrl;
7103 	uint32_t status;
7104 	uint32_t sig;
7105 
7106 	rxcw = CSR_READ(sc, WMREG_RXCW);
7107 	ctrl = CSR_READ(sc, WMREG_CTRL);
7108 	status = CSR_READ(sc, WMREG_STATUS);
7109 
7110 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
7111 
7112 	DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
7113 		device_xname(sc->sc_dev), __func__,
7114 		((ctrl & CTRL_SWDPIN(1)) == sig),
7115 		((status & STATUS_LU) != 0),
7116 		((rxcw & RXCW_C) != 0)
7117 		    ));
7118 
7119 	/*
7120 	 * SWDPIN   LU RXCW
7121 	 *      0    0    0
7122 	 *      0    0    1	(should not happen)
7123 	 *      0    1    0	(should not happen)
7124 	 *      0    1    1	(should not happen)
7125 	 *      1    0    0	Disable autonego and force linkup
7126 	 *      1    0    1	got /C/ but not linkup yet
7127 	 *      1    1    0	(linkup)
7128 	 *      1    1    1	If IFM_AUTO, back to autonego
7129 	 *
7130 	 */
7131 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
7132 	    && ((status & STATUS_LU) == 0)
7133 	    && ((rxcw & RXCW_C) == 0)) {
7134 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
7135 			__func__));
7136 		sc->sc_tbi_linkup = 0;
7137 		/* Disable auto-negotiation in the TXCW register */
7138 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
7139 
7140 		/*
7141 		 * Force link-up and also force full-duplex.
7142 		 *
7143 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
7144 		 * so we should update sc->sc_ctrl
7145 		 */
7146 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
7147 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7148 	} else if (((status & STATUS_LU) != 0)
7149 	    && ((rxcw & RXCW_C) != 0)
7150 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
7151 		sc->sc_tbi_linkup = 1;
7152 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
7153 			__func__));
7154 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
7155 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
7156 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
7157 	    && ((rxcw & RXCW_C) != 0)) {
7158 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
7159 	} else {
7160 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
7161 			status));
7162 	}
7163 
7164 	return 0;
7165 }
7166 
7167 /* Work-around for 82566 Kumeran PCS lock loss */
7168 static void
7169 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
7170 {
7171 	int miistatus, active, i;
7172 	int reg;
7173 
7174 	miistatus = sc->sc_mii.mii_media_status;
7175 
7176 	/* If the link is not up, do nothing */
7177 	if ((miistatus & IFM_ACTIVE) != 0)
7178 		return;
7179 
7180 	active = sc->sc_mii.mii_media_active;
7181 
7182 	/* Nothing to do if the link is other than 1Gbps */
7183 	if (IFM_SUBTYPE(active) != IFM_1000_T)
7184 		return;
7185 
7186 	for (i = 0; i < 10; i++) {
7187 		/* read twice */
7188 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
7189 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
7190 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) != 0)
7191 			goto out;	/* GOOD! */
7192 
7193 		/* Reset the PHY */
7194 		wm_gmii_reset(sc);
7195 		delay(5*1000);
7196 	}
7197 
7198 	/* Disable GigE link negotiation */
7199 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
7200 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
7201 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
7202 
7203 	/*
7204 	 * Call gig speed drop workaround on Gig disable before accessing
7205 	 * any PHY registers.
7206 	 */
7207 	wm_gig_downshift_workaround_ich8lan(sc);
7208 
7209 out:
7210 	return;
7211 }
7212 
7213 /* WOL from S5 stops working */
7214 static void
7215 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
7216 {
7217 	uint16_t kmrn_reg;
7218 
7219 	/* Only for igp3 */
7220 	if (sc->sc_phytype == WMPHY_IGP_3) {
7221 		kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
7222 		kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
7223 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
7224 		kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
7225 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
7226 	}
7227 }
7228 
7229 #ifdef WM_WOL
7230 /* Power down workaround on D3 */
7231 static void
7232 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
7233 {
7234 	uint32_t reg;
7235 	int i;
7236 
7237 	for (i = 0; i < 2; i++) {
7238 		/* Disable link */
7239 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
7240 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
7241 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
7242 
7243 		/*
7244 		 * Call gig speed drop workaround on Gig disable before
7245 		 * accessing any PHY registers
7246 		 */
7247 		if (sc->sc_type == WM_T_ICH8)
7248 			wm_gig_downshift_workaround_ich8lan(sc);
7249 
7250 		/* Write VR power-down enable */
7251 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
7252 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
7253 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
7254 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
7255 
7256 		/* Read it back and test */
7257 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
7258 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
7259 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
7260 			break;
7261 
7262 		/* Issue PHY reset and repeat at most one more time */
7263 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
7264 	}
7265 }
7266 #endif /* WM_WOL */
7267 
7268 /*
7269  * Workaround for pch's PHYs
7270  * XXX should be moved to new PHY driver?
7271  */
7272 static void
7273 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
7274 {
7275 
7276 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
7277 
7278 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
7279 
7280 	/* 82578 */
7281 	if (sc->sc_phytype == WMPHY_82578) {
7282 		/* PCH rev. < 3 */
7283 		if (sc->sc_rev < 3) {
7284 			/* XXX 6 bit shift? Why? Is it page2? */
7285 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
7286 			    0x66c0);
7287 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
7288 			    0xffff);
7289 		}
7290 
7291 		/* XXX phy rev. < 2 */
7292 	}
7293 
7294 	/* Select page 0 */
7295 
7296 	/* XXX acquire semaphore */
7297 	wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
7298 	/* XXX release semaphore */
7299 
7300 	/*
7301 	 * Configure the K1 Si workaround during phy reset assuming there is
7302 	 * link so that it disables K1 if link is in 1Gbps.
7303 	 */
7304 	wm_k1_gig_workaround_hv(sc, 1);
7305 }
7306 
7307 static void
7308 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
7309 {
7310 	int k1_enable = sc->sc_nvm_k1_enabled;
7311 
7312 	/* XXX acquire semaphore */
7313 
7314 	if (link) {
7315 		k1_enable = 0;
7316 
7317 		/* Link stall fix for link up */
7318 		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
7319 	} else {
7320 		/* Link stall fix for link down */
7321 		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
7322 	}
7323 
7324 	wm_configure_k1_ich8lan(sc, k1_enable);
7325 
7326 	/* XXX release semaphore */
7327 }
7328 
7329 static void
7330 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
7331 {
7332 	uint32_t ctrl, ctrl_ext, tmp;
7333 	uint16_t kmrn_reg;
7334 
7335 	kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
7336 
7337 	if (k1_enable)
7338 		kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
7339 	else
7340 		kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
7341 
7342 	wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
7343 
7344 	delay(20);
7345 
7346 	ctrl = CSR_READ(sc, WMREG_CTRL);
7347 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
7348 
7349 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
7350 	tmp |= CTRL_FRCSPD;
7351 
7352 	CSR_WRITE(sc, WMREG_CTRL, tmp);
7353 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
7354 	delay(20);
7355 
7356 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
7357 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
7358 	delay(20);
7359 }
7360 
7361 static void
7362 wm_set_pcie_completion_timeout(struct wm_softc *sc)
7363 {
7364 	uint32_t gcr;
7365 	pcireg_t ctrl2;
7366 
7367 	gcr = CSR_READ(sc, WMREG_GCR);
7368 
7369 	/* Only take action if timeout value is defaulted to 0 */
7370 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
7371 		goto out;
7372 
7373 	if ((gcr & GCR_CAP_VER2) == 0) {
7374 		gcr |= GCR_CMPL_TMOUT_10MS;
7375 		goto out;
7376 	}
7377 
7378 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
7379 	    sc->sc_pcixe_capoff + PCI_PCIE_DCSR2);
7380 	ctrl2 |= WM_PCI_PCIE_DCSR2_16MS;
7381 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
7382 	    sc->sc_pcixe_capoff + PCI_PCIE_DCSR2, ctrl2);
7383 
7384 out:
7385 	/* Disable completion timeout resend */
7386 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
7387 
7388 	CSR_WRITE(sc, WMREG_GCR, gcr);
7389 }
7390 
7391 /* special case - for 82575 - need to do manual init ... */
7392 static void
7393 wm_reset_init_script_82575(struct wm_softc *sc)
7394 {
7395 	/*
7396 	 * remark: this is untested code - we have no board without EEPROM
7397 	 *  same setup as mentioned int the freeBSD driver for the i82575
7398 	 */
7399 
7400 	/* SerDes configuration via SERDESCTRL */
7401 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
7402 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
7403 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
7404 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
7405 
7406 	/* CCM configuration via CCMCTL register */
7407 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
7408 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
7409 
7410 	/* PCIe lanes configuration */
7411 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
7412 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
7413 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
7414 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
7415 
7416 	/* PCIe PLL Configuration */
7417 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
7418 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
7419 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
7420 }
7421 
7422 static void
7423 wm_init_manageability(struct wm_softc *sc)
7424 {
7425 
7426 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
7427 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
7428 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
7429 
7430 		/* disabl hardware interception of ARP */
7431 		manc &= ~MANC_ARP_EN;
7432 
7433 		/* enable receiving management packets to the host */
7434 		if (sc->sc_type >= WM_T_82571) {
7435 			manc |= MANC_EN_MNG2HOST;
7436 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
7437 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
7438 
7439 		}
7440 
7441 		CSR_WRITE(sc, WMREG_MANC, manc);
7442 	}
7443 }
7444 
7445 static void
7446 wm_release_manageability(struct wm_softc *sc)
7447 {
7448 
7449 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
7450 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
7451 
7452 		if (sc->sc_type >= WM_T_82571)
7453 			manc &= ~MANC_EN_MNG2HOST;
7454 
7455 		CSR_WRITE(sc, WMREG_MANC, manc);
7456 	}
7457 }
7458 
7459 static void
7460 wm_get_wakeup(struct wm_softc *sc)
7461 {
7462 
7463 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
7464 	switch (sc->sc_type) {
7465 	case WM_T_82573:
7466 	case WM_T_82583:
7467 		sc->sc_flags |= WM_F_HAS_AMT;
7468 		/* FALLTHROUGH */
7469 	case WM_T_80003:
7470 	case WM_T_82541:
7471 	case WM_T_82547:
7472 	case WM_T_82571:
7473 	case WM_T_82572:
7474 	case WM_T_82574:
7475 	case WM_T_82575:
7476 	case WM_T_82576:
7477 #if 0 /* XXX */
7478 	case WM_T_82580:
7479 	case WM_T_82580ER:
7480 #endif
7481 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE_MASK) != 0)
7482 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
7483 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
7484 		break;
7485 	case WM_T_ICH8:
7486 	case WM_T_ICH9:
7487 	case WM_T_ICH10:
7488 	case WM_T_PCH:
7489 		sc->sc_flags |= WM_F_HAS_AMT;
7490 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
7491 		break;
7492 	default:
7493 		break;
7494 	}
7495 
7496 	/* 1: HAS_MANAGE */
7497 	if (wm_enable_mng_pass_thru(sc) != 0)
7498 		sc->sc_flags |= WM_F_HAS_MANAGE;
7499 
7500 #ifdef WM_DEBUG
7501 	printf("\n");
7502 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
7503 		printf("HAS_AMT,");
7504 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
7505 		printf("ARC_SUBSYS_VALID,");
7506 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
7507 		printf("ASF_FIRMWARE_PRES,");
7508 	if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
7509 		printf("HAS_MANAGE,");
7510 	printf("\n");
7511 #endif
7512 	/*
7513 	 * Note that the WOL flags is set after the resetting of the eeprom
7514 	 * stuff
7515 	 */
7516 }
7517 
7518 #ifdef WM_WOL
7519 /* WOL in the newer chipset interfaces (pchlan) */
7520 static void
7521 wm_enable_phy_wakeup(struct wm_softc *sc)
7522 {
7523 #if 0
7524 	uint16_t preg;
7525 
7526 	/* Copy MAC RARs to PHY RARs */
7527 
7528 	/* Copy MAC MTA to PHY MTA */
7529 
7530 	/* Configure PHY Rx Control register */
7531 
7532 	/* Enable PHY wakeup in MAC register */
7533 
7534 	/* Configure and enable PHY wakeup in PHY registers */
7535 
7536 	/* Activate PHY wakeup */
7537 
7538 	/* XXX */
7539 #endif
7540 }
7541 
7542 static void
7543 wm_enable_wakeup(struct wm_softc *sc)
7544 {
7545 	uint32_t reg, pmreg;
7546 	pcireg_t pmode;
7547 
7548 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
7549 		&pmreg, NULL) == 0)
7550 		return;
7551 
7552 	/* Advertise the wakeup capability */
7553 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
7554 	    | CTRL_SWDPIN(3));
7555 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
7556 
7557 	/* ICH workaround */
7558 	switch (sc->sc_type) {
7559 	case WM_T_ICH8:
7560 	case WM_T_ICH9:
7561 	case WM_T_ICH10:
7562 	case WM_T_PCH:
7563 		/* Disable gig during WOL */
7564 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
7565 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
7566 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
7567 		if (sc->sc_type == WM_T_PCH)
7568 			wm_gmii_reset(sc);
7569 
7570 		/* Power down workaround */
7571 		if (sc->sc_phytype == WMPHY_82577) {
7572 			struct mii_softc *child;
7573 
7574 			/* Assume that the PHY is copper */
7575 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
7576 			if (child->mii_mpd_rev <= 2)
7577 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
7578 				    (768 << 5) | 25, 0x0444); /* magic num */
7579 		}
7580 		break;
7581 	default:
7582 		break;
7583 	}
7584 
7585 	/* Keep the laser running on fiber adapters */
7586 	if (((sc->sc_wmp->wmp_flags & WMP_F_1000X) != 0)
7587 	    || (sc->sc_wmp->wmp_flags & WMP_F_SERDES) != 0) {
7588 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
7589 		reg |= CTRL_EXT_SWDPIN(3);
7590 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
7591 	}
7592 
7593 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
7594 #if 0	/* for the multicast packet */
7595 	reg |= WUFC_MC;
7596 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
7597 #endif
7598 
7599 	if (sc->sc_type == WM_T_PCH) {
7600 		wm_enable_phy_wakeup(sc);
7601 	} else {
7602 		CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
7603 		CSR_WRITE(sc, WMREG_WUFC, reg);
7604 	}
7605 
7606 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
7607 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH))
7608 		    && (sc->sc_phytype == WMPHY_IGP_3))
7609 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
7610 
7611 	/* Request PME */
7612 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
7613 #if 0
7614 	/* Disable WOL */
7615 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
7616 #else
7617 	/* For WOL */
7618 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
7619 #endif
7620 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
7621 }
7622 #endif /* WM_WOL */
7623 
7624 static bool
7625 wm_suspend(device_t self, const pmf_qual_t *qual)
7626 {
7627 	struct wm_softc *sc = device_private(self);
7628 
7629 	wm_release_manageability(sc);
7630 	wm_release_hw_control(sc);
7631 #ifdef WM_WOL
7632 	wm_enable_wakeup(sc);
7633 #endif
7634 
7635 	return true;
7636 }
7637 
7638 static bool
7639 wm_resume(device_t self, const pmf_qual_t *qual)
7640 {
7641 	struct wm_softc *sc = device_private(self);
7642 
7643 	wm_init_manageability(sc);
7644 
7645 	return true;
7646 }
7647