xref: /netbsd-src/sys/dev/pci/if_wm.c (revision c2f76ff004a2cb67efe5b12d97bd3ef7fe89e18d)
1 /*	$NetBSD: if_wm.c,v 1.217 2010/12/14 02:51:46 dyoung Exp $	*/
2 
3 /*
4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed for the NetBSD Project by
20  *	Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 /*******************************************************************************
39 
40   Copyright (c) 2001-2005, Intel Corporation
41   All rights reserved.
42 
43   Redistribution and use in source and binary forms, with or without
44   modification, are permitted provided that the following conditions are met:
45 
46    1. Redistributions of source code must retain the above copyright notice,
47       this list of conditions and the following disclaimer.
48 
49    2. Redistributions in binary form must reproduce the above copyright
50       notice, this list of conditions and the following disclaimer in the
51       documentation and/or other materials provided with the distribution.
52 
53    3. Neither the name of the Intel Corporation nor the names of its
54       contributors may be used to endorse or promote products derived from
55       this software without specific prior written permission.
56 
57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67   POSSIBILITY OF SUCH DAMAGE.
68 
69 *******************************************************************************/
70 /*
71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72  *
73  * TODO (in order of importance):
74  *
75  *	- Rework how parameters are loaded from the EEPROM.
76  */
77 
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.217 2010/12/14 02:51:46 dyoung Exp $");
80 
81 #include "rnd.h"
82 
83 #include <sys/param.h>
84 #include <sys/systm.h>
85 #include <sys/callout.h>
86 #include <sys/mbuf.h>
87 #include <sys/malloc.h>
88 #include <sys/kernel.h>
89 #include <sys/socket.h>
90 #include <sys/ioctl.h>
91 #include <sys/errno.h>
92 #include <sys/device.h>
93 #include <sys/queue.h>
94 #include <sys/syslog.h>
95 
96 #if NRND > 0
97 #include <sys/rnd.h>
98 #endif
99 
100 #include <net/if.h>
101 #include <net/if_dl.h>
102 #include <net/if_media.h>
103 #include <net/if_ether.h>
104 
105 #include <net/bpf.h>
106 
107 #include <netinet/in.h>			/* XXX for struct ip */
108 #include <netinet/in_systm.h>		/* XXX for struct ip */
109 #include <netinet/ip.h>			/* XXX for struct ip */
110 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
111 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
112 
113 #include <sys/bus.h>
114 #include <sys/intr.h>
115 #include <machine/endian.h>
116 
117 #include <dev/mii/mii.h>
118 #include <dev/mii/miivar.h>
119 #include <dev/mii/miidevs.h>
120 #include <dev/mii/mii_bitbang.h>
121 #include <dev/mii/ikphyreg.h>
122 #include <dev/mii/igphyreg.h>
123 #include <dev/mii/igphyvar.h>
124 #include <dev/mii/inbmphyreg.h>
125 
126 #include <dev/pci/pcireg.h>
127 #include <dev/pci/pcivar.h>
128 #include <dev/pci/pcidevs.h>
129 
130 #include <dev/pci/if_wmreg.h>
131 #include <dev/pci/if_wmvar.h>
132 
133 #ifdef WM_DEBUG
134 #define	WM_DEBUG_LINK		0x01
135 #define	WM_DEBUG_TX		0x02
136 #define	WM_DEBUG_RX		0x04
137 #define	WM_DEBUG_GMII		0x08
138 #define	WM_DEBUG_MANAGE		0x10
139 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
140     | WM_DEBUG_MANAGE;
141 
142 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
143 #else
144 #define	DPRINTF(x, y)	/* nothing */
145 #endif /* WM_DEBUG */
146 
147 /*
148  * Transmit descriptor list size.  Due to errata, we can only have
149  * 256 hardware descriptors in the ring on < 82544, but we use 4096
150  * on >= 82544.  We tell the upper layers that they can queue a lot
151  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
152  * of them at a time.
153  *
154  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
155  * chains containing many small mbufs have been observed in zero-copy
156  * situations with jumbo frames.
157  */
158 #define	WM_NTXSEGS		256
159 #define	WM_IFQUEUELEN		256
160 #define	WM_TXQUEUELEN_MAX	64
161 #define	WM_TXQUEUELEN_MAX_82547	16
162 #define	WM_TXQUEUELEN(sc)	((sc)->sc_txnum)
163 #define	WM_TXQUEUELEN_MASK(sc)	(WM_TXQUEUELEN(sc) - 1)
164 #define	WM_TXQUEUE_GC(sc)	(WM_TXQUEUELEN(sc) / 8)
165 #define	WM_NTXDESC_82542	256
166 #define	WM_NTXDESC_82544	4096
167 #define	WM_NTXDESC(sc)		((sc)->sc_ntxdesc)
168 #define	WM_NTXDESC_MASK(sc)	(WM_NTXDESC(sc) - 1)
169 #define	WM_TXDESCSIZE(sc)	(WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
170 #define	WM_NEXTTX(sc, x)	(((x) + 1) & WM_NTXDESC_MASK(sc))
171 #define	WM_NEXTTXS(sc, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(sc))
172 
173 #define	WM_MAXTXDMA		round_page(IP_MAXPACKET) /* for TSO */
174 
175 /*
176  * Receive descriptor list size.  We have one Rx buffer for normal
177  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
178  * packet.  We allocate 256 receive descriptors, each with a 2k
179  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
180  */
181 #define	WM_NRXDESC		256
182 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
183 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
184 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
185 
186 /*
187  * Control structures are DMA'd to the i82542 chip.  We allocate them in
188  * a single clump that maps to a single DMA segment to make several things
189  * easier.
190  */
191 struct wm_control_data_82544 {
192 	/*
193 	 * The receive descriptors.
194 	 */
195 	wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
196 
197 	/*
198 	 * The transmit descriptors.  Put these at the end, because
199 	 * we might use a smaller number of them.
200 	 */
201 	wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82544];
202 };
203 
204 struct wm_control_data_82542 {
205 	wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
206 	wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
207 };
208 
209 #define	WM_CDOFF(x)	offsetof(struct wm_control_data_82544, x)
210 #define	WM_CDTXOFF(x)	WM_CDOFF(wcd_txdescs[(x)])
211 #define	WM_CDRXOFF(x)	WM_CDOFF(wcd_rxdescs[(x)])
212 
213 /*
214  * Software state for transmit jobs.
215  */
216 struct wm_txsoft {
217 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
218 	bus_dmamap_t txs_dmamap;	/* our DMA map */
219 	int txs_firstdesc;		/* first descriptor in packet */
220 	int txs_lastdesc;		/* last descriptor in packet */
221 	int txs_ndesc;			/* # of descriptors used */
222 };
223 
224 /*
225  * Software state for receive buffers.  Each descriptor gets a
226  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
227  * more than one buffer, we chain them together.
228  */
229 struct wm_rxsoft {
230 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
231 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
232 };
233 
234 #define WM_LINKUP_TIMEOUT	50
235 
236 static uint16_t swfwphysem[] = {
237 	SWFW_PHY0_SM,
238 	SWFW_PHY1_SM,
239 	SWFW_PHY2_SM,
240 	SWFW_PHY3_SM
241 };
242 
243 /*
244  * Software state per device.
245  */
246 struct wm_softc {
247 	device_t sc_dev;		/* generic device information */
248 	bus_space_tag_t sc_st;		/* bus space tag */
249 	bus_space_handle_t sc_sh;	/* bus space handle */
250 	bus_size_t sc_ss;		/* bus space size */
251 	bus_space_tag_t sc_iot;		/* I/O space tag */
252 	bus_space_handle_t sc_ioh;	/* I/O space handle */
253 	bus_size_t sc_ios;		/* I/O space size */
254 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
255 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
256 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
257 
258 	struct ethercom sc_ethercom;	/* ethernet common data */
259 	struct mii_data sc_mii;		/* MII/media information */
260 
261 	pci_chipset_tag_t sc_pc;
262 	pcitag_t sc_pcitag;
263 	int sc_bus_speed;		/* PCI/PCIX bus speed */
264 	int sc_pcixe_capoff;		/* PCI[Xe] capability register offset */
265 
266 	const struct wm_product *sc_wmp; /* Pointer to the wm_product entry */
267 	wm_chip_type sc_type;		/* MAC type */
268 	int sc_rev;			/* MAC revision */
269 	wm_phy_type sc_phytype;		/* PHY type */
270 	int sc_funcid;			/* unit number of the chip (0 to 3) */
271 	int sc_flags;			/* flags; see below */
272 	int sc_if_flags;		/* last if_flags */
273 	int sc_flowflags;		/* 802.3x flow control flags */
274 	int sc_align_tweak;
275 
276 	void *sc_ih;			/* interrupt cookie */
277 	callout_t sc_tick_ch;		/* tick callout */
278 
279 	int sc_ee_addrbits;		/* EEPROM address bits */
280 	int sc_ich8_flash_base;
281 	int sc_ich8_flash_bank_size;
282 	int sc_nvm_k1_enabled;
283 
284 	/*
285 	 * Software state for the transmit and receive descriptors.
286 	 */
287 	int sc_txnum;			/* must be a power of two */
288 	struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX];
289 	struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
290 
291 	/*
292 	 * Control data structures.
293 	 */
294 	int sc_ntxdesc;			/* must be a power of two */
295 	struct wm_control_data_82544 *sc_control_data;
296 	bus_dmamap_t sc_cddmamap;	/* control data DMA map */
297 	bus_dma_segment_t sc_cd_seg;	/* control data segment */
298 	int sc_cd_rseg;			/* real number of control segment */
299 	size_t sc_cd_size;		/* control data size */
300 #define	sc_cddma	sc_cddmamap->dm_segs[0].ds_addr
301 #define	sc_txdescs	sc_control_data->wcd_txdescs
302 #define	sc_rxdescs	sc_control_data->wcd_rxdescs
303 
304 #ifdef WM_EVENT_COUNTERS
305 	/* Event counters. */
306 	struct evcnt sc_ev_txsstall;	/* Tx stalled due to no txs */
307 	struct evcnt sc_ev_txdstall;	/* Tx stalled due to no txd */
308 	struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
309 	struct evcnt sc_ev_txdw;	/* Tx descriptor interrupts */
310 	struct evcnt sc_ev_txqe;	/* Tx queue empty interrupts */
311 	struct evcnt sc_ev_rxintr;	/* Rx interrupts */
312 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
313 
314 	struct evcnt sc_ev_rxipsum;	/* IP checksums checked in-bound */
315 	struct evcnt sc_ev_rxtusum;	/* TCP/UDP cksums checked in-bound */
316 	struct evcnt sc_ev_txipsum;	/* IP checksums comp. out-bound */
317 	struct evcnt sc_ev_txtusum;	/* TCP/UDP cksums comp. out-bound */
318 	struct evcnt sc_ev_txtusum6;	/* TCP/UDP v6 cksums comp. out-bound */
319 	struct evcnt sc_ev_txtso;	/* TCP seg offload out-bound (IPv4) */
320 	struct evcnt sc_ev_txtso6;	/* TCP seg offload out-bound (IPv6) */
321 	struct evcnt sc_ev_txtsopain;	/* painful header manip. for TSO */
322 
323 	struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
324 	struct evcnt sc_ev_txdrop;	/* Tx packets dropped (too many segs) */
325 
326 	struct evcnt sc_ev_tu;		/* Tx underrun */
327 
328 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
329 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
330 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
331 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
332 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
333 #endif /* WM_EVENT_COUNTERS */
334 
335 	bus_addr_t sc_tdt_reg;		/* offset of TDT register */
336 
337 	int	sc_txfree;		/* number of free Tx descriptors */
338 	int	sc_txnext;		/* next ready Tx descriptor */
339 
340 	int	sc_txsfree;		/* number of free Tx jobs */
341 	int	sc_txsnext;		/* next free Tx job */
342 	int	sc_txsdirty;		/* dirty Tx jobs */
343 
344 	/* These 5 variables are used only on the 82547. */
345 	int	sc_txfifo_size;		/* Tx FIFO size */
346 	int	sc_txfifo_head;		/* current head of FIFO */
347 	uint32_t sc_txfifo_addr;	/* internal address of start of FIFO */
348 	int	sc_txfifo_stall;	/* Tx FIFO is stalled */
349 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
350 
351 	bus_addr_t sc_rdt_reg;		/* offset of RDT register */
352 
353 	int	sc_rxptr;		/* next ready Rx descriptor/queue ent */
354 	int	sc_rxdiscard;
355 	int	sc_rxlen;
356 	struct mbuf *sc_rxhead;
357 	struct mbuf *sc_rxtail;
358 	struct mbuf **sc_rxtailp;
359 
360 	uint32_t sc_ctrl;		/* prototype CTRL register */
361 #if 0
362 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
363 #endif
364 	uint32_t sc_icr;		/* prototype interrupt bits */
365 	uint32_t sc_itr;		/* prototype intr throttling reg */
366 	uint32_t sc_tctl;		/* prototype TCTL register */
367 	uint32_t sc_rctl;		/* prototype RCTL register */
368 	uint32_t sc_txcw;		/* prototype TXCW register */
369 	uint32_t sc_tipg;		/* prototype TIPG register */
370 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
371 	uint32_t sc_pba;		/* prototype PBA register */
372 
373 	int sc_tbi_linkup;		/* TBI link status */
374 	int sc_tbi_anegticks;		/* autonegotiation ticks */
375 	int sc_tbi_ticks;		/* tbi ticks */
376 	int sc_tbi_nrxcfg;		/* count of ICR_RXCFG */
377 	int sc_tbi_lastnrxcfg;		/* count of ICR_RXCFG (on last tick) */
378 
379 	int sc_mchash_type;		/* multicast filter offset */
380 
381 #if NRND > 0
382 	rndsource_element_t rnd_source;	/* random source */
383 #endif
384 };
385 
386 #define	WM_RXCHAIN_RESET(sc)						\
387 do {									\
388 	(sc)->sc_rxtailp = &(sc)->sc_rxhead;				\
389 	*(sc)->sc_rxtailp = NULL;					\
390 	(sc)->sc_rxlen = 0;						\
391 } while (/*CONSTCOND*/0)
392 
393 #define	WM_RXCHAIN_LINK(sc, m)						\
394 do {									\
395 	*(sc)->sc_rxtailp = (sc)->sc_rxtail = (m);			\
396 	(sc)->sc_rxtailp = &(m)->m_next;				\
397 } while (/*CONSTCOND*/0)
398 
399 #ifdef WM_EVENT_COUNTERS
400 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
401 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
402 #else
403 #define	WM_EVCNT_INCR(ev)	/* nothing */
404 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
405 #endif
406 
407 #define	CSR_READ(sc, reg)						\
408 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
409 #define	CSR_WRITE(sc, reg, val)						\
410 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
411 #define	CSR_WRITE_FLUSH(sc)						\
412 	(void) CSR_READ((sc), WMREG_STATUS)
413 
414 #define ICH8_FLASH_READ32(sc, reg) \
415 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
416 #define ICH8_FLASH_WRITE32(sc, reg, data) \
417 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
418 
419 #define ICH8_FLASH_READ16(sc, reg) \
420 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
421 #define ICH8_FLASH_WRITE16(sc, reg, data) \
422 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
423 
424 #define	WM_CDTXADDR(sc, x)	((sc)->sc_cddma + WM_CDTXOFF((x)))
425 #define	WM_CDRXADDR(sc, x)	((sc)->sc_cddma + WM_CDRXOFF((x)))
426 
427 #define	WM_CDTXADDR_LO(sc, x)	(WM_CDTXADDR((sc), (x)) & 0xffffffffU)
428 #define	WM_CDTXADDR_HI(sc, x)						\
429 	(sizeof(bus_addr_t) == 8 ?					\
430 	 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
431 
432 #define	WM_CDRXADDR_LO(sc, x)	(WM_CDRXADDR((sc), (x)) & 0xffffffffU)
433 #define	WM_CDRXADDR_HI(sc, x)						\
434 	(sizeof(bus_addr_t) == 8 ?					\
435 	 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
436 
437 #define	WM_CDTXSYNC(sc, x, n, ops)					\
438 do {									\
439 	int __x, __n;							\
440 									\
441 	__x = (x);							\
442 	__n = (n);							\
443 									\
444 	/* If it will wrap around, sync to the end of the ring. */	\
445 	if ((__x + __n) > WM_NTXDESC(sc)) {				\
446 		bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,	\
447 		    WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) *		\
448 		    (WM_NTXDESC(sc) - __x), (ops));			\
449 		__n -= (WM_NTXDESC(sc) - __x);				\
450 		__x = 0;						\
451 	}								\
452 									\
453 	/* Now sync whatever is left. */				\
454 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
455 	    WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops));	\
456 } while (/*CONSTCOND*/0)
457 
458 #define	WM_CDRXSYNC(sc, x, ops)						\
459 do {									\
460 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
461 	   WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops));		\
462 } while (/*CONSTCOND*/0)
463 
464 #define	WM_INIT_RXDESC(sc, x)						\
465 do {									\
466 	struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)];		\
467 	wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)];		\
468 	struct mbuf *__m = __rxs->rxs_mbuf;				\
469 									\
470 	/*								\
471 	 * Note: We scoot the packet forward 2 bytes in the buffer	\
472 	 * so that the payload after the Ethernet header is aligned	\
473 	 * to a 4-byte boundary.					\
474 	 *								\
475 	 * XXX BRAINDAMAGE ALERT!					\
476 	 * The stupid chip uses the same size for every buffer, which	\
477 	 * is set in the Receive Control register.  We are using the 2K	\
478 	 * size option, but what we REALLY want is (2K - 2)!  For this	\
479 	 * reason, we can't "scoot" packets longer than the standard	\
480 	 * Ethernet MTU.  On strict-alignment platforms, if the total	\
481 	 * size exceeds (2K - 2) we set align_tweak to 0 and let	\
482 	 * the upper layer copy the headers.				\
483 	 */								\
484 	__m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak;	\
485 									\
486 	wm_set_dma_addr(&__rxd->wrx_addr,				\
487 	    __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
488 	__rxd->wrx_len = 0;						\
489 	__rxd->wrx_cksum = 0;						\
490 	__rxd->wrx_status = 0;						\
491 	__rxd->wrx_errors = 0;						\
492 	__rxd->wrx_special = 0;						\
493 	WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
494 									\
495 	CSR_WRITE((sc), (sc)->sc_rdt_reg, (x));				\
496 } while (/*CONSTCOND*/0)
497 
498 static void	wm_start(struct ifnet *);
499 static void	wm_watchdog(struct ifnet *);
500 static int	wm_ifflags_cb(struct ethercom *);
501 static int	wm_ioctl(struct ifnet *, u_long, void *);
502 static int	wm_init(struct ifnet *);
503 static void	wm_stop(struct ifnet *, int);
504 static bool	wm_suspend(device_t, const pmf_qual_t *);
505 static bool	wm_resume(device_t, const pmf_qual_t *);
506 
507 static void	wm_reset(struct wm_softc *);
508 static void	wm_rxdrain(struct wm_softc *);
509 static int	wm_add_rxbuf(struct wm_softc *, int);
510 static int	wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
511 static int	wm_read_eeprom_eerd(struct wm_softc *, int, int, u_int16_t *);
512 static int	wm_validate_eeprom_checksum(struct wm_softc *);
513 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
514 static void	wm_tick(void *);
515 
516 static void	wm_set_filter(struct wm_softc *);
517 static void	wm_set_vlan(struct wm_softc *);
518 
519 static int	wm_intr(void *);
520 static void	wm_txintr(struct wm_softc *);
521 static void	wm_rxintr(struct wm_softc *);
522 static void	wm_linkintr(struct wm_softc *, uint32_t);
523 
524 static void	wm_tbi_mediainit(struct wm_softc *);
525 static int	wm_tbi_mediachange(struct ifnet *);
526 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
527 
528 static void	wm_tbi_set_linkled(struct wm_softc *);
529 static void	wm_tbi_check_link(struct wm_softc *);
530 
531 static void	wm_gmii_reset(struct wm_softc *);
532 
533 static int	wm_gmii_i82543_readreg(device_t, int, int);
534 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
535 
536 static int	wm_gmii_i82544_readreg(device_t, int, int);
537 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
538 
539 static int	wm_gmii_i80003_readreg(device_t, int, int);
540 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
541 static int	wm_gmii_bm_readreg(device_t, int, int);
542 static void	wm_gmii_bm_writereg(device_t, int, int, int);
543 static int	wm_gmii_hv_readreg(device_t, int, int);
544 static void	wm_gmii_hv_writereg(device_t, int, int, int);
545 static int	wm_sgmii_readreg(device_t, int, int);
546 static void	wm_sgmii_writereg(device_t, int, int, int);
547 
548 static void	wm_gmii_statchg(device_t);
549 
550 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
551 static int	wm_gmii_mediachange(struct ifnet *);
552 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
553 
554 static int	wm_kmrn_readreg(struct wm_softc *, int);
555 static void	wm_kmrn_writereg(struct wm_softc *, int, int);
556 
557 static void	wm_set_spiaddrbits(struct wm_softc *);
558 static int	wm_match(device_t, cfdata_t, void *);
559 static void	wm_attach(device_t, device_t, void *);
560 static int	wm_detach(device_t, int);
561 static int	wm_is_onboard_nvm_eeprom(struct wm_softc *);
562 static void	wm_get_auto_rd_done(struct wm_softc *);
563 static void	wm_lan_init_done(struct wm_softc *);
564 static void	wm_get_cfg_done(struct wm_softc *);
565 static int	wm_get_swsm_semaphore(struct wm_softc *);
566 static void	wm_put_swsm_semaphore(struct wm_softc *);
567 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
568 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
569 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
570 static int	wm_get_swfwhw_semaphore(struct wm_softc *);
571 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
572 
573 static int	wm_read_eeprom_ich8(struct wm_softc *, int, int, uint16_t *);
574 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
575 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
576 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t,
577 		     uint32_t, uint16_t *);
578 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
579 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
580 static void	wm_82547_txfifo_stall(void *);
581 static int	wm_check_mng_mode(struct wm_softc *);
582 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
583 static int	wm_check_mng_mode_82574(struct wm_softc *);
584 static int	wm_check_mng_mode_generic(struct wm_softc *);
585 static int	wm_enable_mng_pass_thru(struct wm_softc *);
586 static int	wm_check_reset_block(struct wm_softc *);
587 static void	wm_get_hw_control(struct wm_softc *);
588 static int	wm_check_for_link(struct wm_softc *);
589 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
590 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
591 #ifdef WM_WOL
592 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
593 #endif
594 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
595 static void	wm_k1_gig_workaround_hv(struct wm_softc *, int);
596 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
597 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
598 static void	wm_reset_init_script_82575(struct wm_softc *);
599 static void	wm_release_manageability(struct wm_softc *);
600 static void	wm_release_hw_control(struct wm_softc *);
601 static void	wm_get_wakeup(struct wm_softc *);
602 #ifdef WM_WOL
603 static void	wm_enable_phy_wakeup(struct wm_softc *);
604 static void	wm_enable_wakeup(struct wm_softc *);
605 #endif
606 static void	wm_init_manageability(struct wm_softc *);
607 
608 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
609     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
610 
611 /*
612  * Devices supported by this driver.
613  */
614 static const struct wm_product {
615 	pci_vendor_id_t		wmp_vendor;
616 	pci_product_id_t	wmp_product;
617 	const char		*wmp_name;
618 	wm_chip_type		wmp_type;
619 	int			wmp_flags;
620 #define	WMP_F_1000X		0x01
621 #define	WMP_F_1000T		0x02
622 #define	WMP_F_SERDES		0x04
623 } wm_products[] = {
624 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
625 	  "Intel i82542 1000BASE-X Ethernet",
626 	  WM_T_82542_2_1,	WMP_F_1000X },
627 
628 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
629 	  "Intel i82543GC 1000BASE-X Ethernet",
630 	  WM_T_82543,		WMP_F_1000X },
631 
632 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
633 	  "Intel i82543GC 1000BASE-T Ethernet",
634 	  WM_T_82543,		WMP_F_1000T },
635 
636 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
637 	  "Intel i82544EI 1000BASE-T Ethernet",
638 	  WM_T_82544,		WMP_F_1000T },
639 
640 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
641 	  "Intel i82544EI 1000BASE-X Ethernet",
642 	  WM_T_82544,		WMP_F_1000X },
643 
644 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
645 	  "Intel i82544GC 1000BASE-T Ethernet",
646 	  WM_T_82544,		WMP_F_1000T },
647 
648 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
649 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
650 	  WM_T_82544,		WMP_F_1000T },
651 
652 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
653 	  "Intel i82540EM 1000BASE-T Ethernet",
654 	  WM_T_82540,		WMP_F_1000T },
655 
656 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
657 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
658 	  WM_T_82540,		WMP_F_1000T },
659 
660 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
661 	  "Intel i82540EP 1000BASE-T Ethernet",
662 	  WM_T_82540,		WMP_F_1000T },
663 
664 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
665 	  "Intel i82540EP 1000BASE-T Ethernet",
666 	  WM_T_82540,		WMP_F_1000T },
667 
668 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
669 	  "Intel i82540EP 1000BASE-T Ethernet",
670 	  WM_T_82540,		WMP_F_1000T },
671 
672 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
673 	  "Intel i82545EM 1000BASE-T Ethernet",
674 	  WM_T_82545,		WMP_F_1000T },
675 
676 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
677 	  "Intel i82545GM 1000BASE-T Ethernet",
678 	  WM_T_82545_3,		WMP_F_1000T },
679 
680 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
681 	  "Intel i82545GM 1000BASE-X Ethernet",
682 	  WM_T_82545_3,		WMP_F_1000X },
683 #if 0
684 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
685 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
686 	  WM_T_82545_3,		WMP_F_SERDES },
687 #endif
688 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
689 	  "Intel i82546EB 1000BASE-T Ethernet",
690 	  WM_T_82546,		WMP_F_1000T },
691 
692 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
693 	  "Intel i82546EB 1000BASE-T Ethernet",
694 	  WM_T_82546,		WMP_F_1000T },
695 
696 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
697 	  "Intel i82545EM 1000BASE-X Ethernet",
698 	  WM_T_82545,		WMP_F_1000X },
699 
700 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
701 	  "Intel i82546EB 1000BASE-X Ethernet",
702 	  WM_T_82546,		WMP_F_1000X },
703 
704 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
705 	  "Intel i82546GB 1000BASE-T Ethernet",
706 	  WM_T_82546_3,		WMP_F_1000T },
707 
708 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
709 	  "Intel i82546GB 1000BASE-X Ethernet",
710 	  WM_T_82546_3,		WMP_F_1000X },
711 #if 0
712 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
713 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
714 	  WM_T_82546_3,		WMP_F_SERDES },
715 #endif
716 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
717 	  "i82546GB quad-port Gigabit Ethernet",
718 	  WM_T_82546_3,		WMP_F_1000T },
719 
720 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
721 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
722 	  WM_T_82546_3,		WMP_F_1000T },
723 
724 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
725 	  "Intel PRO/1000MT (82546GB)",
726 	  WM_T_82546_3,		WMP_F_1000T },
727 
728 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
729 	  "Intel i82541EI 1000BASE-T Ethernet",
730 	  WM_T_82541,		WMP_F_1000T },
731 
732 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
733 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
734 	  WM_T_82541,		WMP_F_1000T },
735 
736 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
737 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
738 	  WM_T_82541,		WMP_F_1000T },
739 
740 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
741 	  "Intel i82541ER 1000BASE-T Ethernet",
742 	  WM_T_82541_2,		WMP_F_1000T },
743 
744 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
745 	  "Intel i82541GI 1000BASE-T Ethernet",
746 	  WM_T_82541_2,		WMP_F_1000T },
747 
748 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
749 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
750 	  WM_T_82541_2,		WMP_F_1000T },
751 
752 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
753 	  "Intel i82541PI 1000BASE-T Ethernet",
754 	  WM_T_82541_2,		WMP_F_1000T },
755 
756 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
757 	  "Intel i82547EI 1000BASE-T Ethernet",
758 	  WM_T_82547,		WMP_F_1000T },
759 
760 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
761 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
762 	  WM_T_82547,		WMP_F_1000T },
763 
764 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
765 	  "Intel i82547GI 1000BASE-T Ethernet",
766 	  WM_T_82547_2,		WMP_F_1000T },
767 
768 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
769 	  "Intel PRO/1000 PT (82571EB)",
770 	  WM_T_82571,		WMP_F_1000T },
771 
772 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
773 	  "Intel PRO/1000 PF (82571EB)",
774 	  WM_T_82571,		WMP_F_1000X },
775 #if 0
776 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
777 	  "Intel PRO/1000 PB (82571EB)",
778 	  WM_T_82571,		WMP_F_SERDES },
779 #endif
780 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
781 	  "Intel PRO/1000 QT (82571EB)",
782 	  WM_T_82571,		WMP_F_1000T },
783 
784 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
785 	  "Intel i82572EI 1000baseT Ethernet",
786 	  WM_T_82572,		WMP_F_1000T },
787 
788 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
789 	  "Intel PRO/1000 PT Quad Port Server Adapter",
790 	  WM_T_82571,		WMP_F_1000T, },
791 
792 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
793 	  "Intel i82572EI 1000baseX Ethernet",
794 	  WM_T_82572,		WMP_F_1000X },
795 #if 0
796 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
797 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
798 	  WM_T_82572,		WMP_F_SERDES },
799 #endif
800 
801 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
802 	  "Intel i82572EI 1000baseT Ethernet",
803 	  WM_T_82572,		WMP_F_1000T },
804 
805 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
806 	  "Intel i82573E",
807 	  WM_T_82573,		WMP_F_1000T },
808 
809 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
810 	  "Intel i82573E IAMT",
811 	  WM_T_82573,		WMP_F_1000T },
812 
813 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
814 	  "Intel i82573L Gigabit Ethernet",
815 	  WM_T_82573,		WMP_F_1000T },
816 
817 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
818 	  "Intel i82574L",
819 	  WM_T_82574,		WMP_F_1000T },
820 
821 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
822 	  "Intel i82583V",
823 	  WM_T_82583,		WMP_F_1000T },
824 
825 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
826 	  "i80003 dual 1000baseT Ethernet",
827 	  WM_T_80003,		WMP_F_1000T },
828 
829 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
830 	  "i80003 dual 1000baseX Ethernet",
831 	  WM_T_80003,		WMP_F_1000T },
832 #if 0
833 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
834 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
835 	  WM_T_80003,		WMP_F_SERDES },
836 #endif
837 
838 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
839 	  "Intel i80003 1000baseT Ethernet",
840 	  WM_T_80003,		WMP_F_1000T },
841 #if 0
842 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
843 	  "Intel i80003 Gigabit Ethernet (SERDES)",
844 	  WM_T_80003,		WMP_F_SERDES },
845 #endif
846 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
847 	  "Intel i82801H (M_AMT) LAN Controller",
848 	  WM_T_ICH8,		WMP_F_1000T },
849 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
850 	  "Intel i82801H (AMT) LAN Controller",
851 	  WM_T_ICH8,		WMP_F_1000T },
852 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
853 	  "Intel i82801H LAN Controller",
854 	  WM_T_ICH8,		WMP_F_1000T },
855 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
856 	  "Intel i82801H (IFE) LAN Controller",
857 	  WM_T_ICH8,		WMP_F_1000T },
858 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
859 	  "Intel i82801H (M) LAN Controller",
860 	  WM_T_ICH8,		WMP_F_1000T },
861 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
862 	  "Intel i82801H IFE (GT) LAN Controller",
863 	  WM_T_ICH8,		WMP_F_1000T },
864 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
865 	  "Intel i82801H IFE (G) LAN Controller",
866 	  WM_T_ICH8,		WMP_F_1000T },
867 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
868 	  "82801I (AMT) LAN Controller",
869 	  WM_T_ICH9,		WMP_F_1000T },
870 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
871 	  "82801I LAN Controller",
872 	  WM_T_ICH9,		WMP_F_1000T },
873 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
874 	  "82801I (G) LAN Controller",
875 	  WM_T_ICH9,		WMP_F_1000T },
876 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
877 	  "82801I (GT) LAN Controller",
878 	  WM_T_ICH9,		WMP_F_1000T },
879 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
880 	  "82801I (C) LAN Controller",
881 	  WM_T_ICH9,		WMP_F_1000T },
882 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
883 	  "82801I mobile LAN Controller",
884 	  WM_T_ICH9,		WMP_F_1000T },
885 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IGP_M_V,
886 	  "82801I mobile (V) LAN Controller",
887 	  WM_T_ICH9,		WMP_F_1000T },
888 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
889 	  "82801I mobile (AMT) LAN Controller",
890 	  WM_T_ICH9,		WMP_F_1000T },
891 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
892 	  "82567LM-4 LAN Controller",
893 	  WM_T_ICH9,		WMP_F_1000T },
894 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_82567V_3,
895 	  "82567V-3 LAN Controller",
896 	  WM_T_ICH9,		WMP_F_1000T },
897 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
898 	  "82567LM-2 LAN Controller",
899 	  WM_T_ICH10,		WMP_F_1000T },
900 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
901 	  "82567LF-2 LAN Controller",
902 	  WM_T_ICH10,		WMP_F_1000T },
903 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
904 	  "82567LM-3 LAN Controller",
905 	  WM_T_ICH10,		WMP_F_1000T },
906 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
907 	  "82567LF-3 LAN Controller",
908 	  WM_T_ICH10,		WMP_F_1000T },
909 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
910 	  "82567V-2 LAN Controller",
911 	  WM_T_ICH10,		WMP_F_1000T },
912 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
913 	  "PCH LAN (82577LM) Controller",
914 	  WM_T_PCH,		WMP_F_1000T },
915 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
916 	  "PCH LAN (82577LC) Controller",
917 	  WM_T_PCH,		WMP_F_1000T },
918 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
919 	  "PCH LAN (82578DM) Controller",
920 	  WM_T_PCH,		WMP_F_1000T },
921 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
922 	  "PCH LAN (82578DC) Controller",
923 	  WM_T_PCH,		WMP_F_1000T },
924 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
925 	  "82575EB dual-1000baseT Ethernet",
926 	  WM_T_82575,		WMP_F_1000T },
927 #if 0
928 	/*
929 	 * not sure if WMP_F_1000X or WMP_F_SERDES - we do not have it - so
930 	 * disabled for now ...
931 	 */
932 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
933 	  "82575EB dual-1000baseX Ethernet (SERDES)",
934 	  WM_T_82575,		WMP_F_SERDES },
935 #endif
936 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
937 	  "82575GB quad-1000baseT Ethernet",
938 	  WM_T_82575,		WMP_F_1000T },
939 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
940 	  "82575GB quad-1000baseT Ethernet (PM)",
941 	  WM_T_82575,		WMP_F_1000T },
942 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
943 	  "82576 1000BaseT Ethernet",
944 	  WM_T_82576,		WMP_F_1000T },
945 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
946 	  "82576 1000BaseX Ethernet",
947 	  WM_T_82576,		WMP_F_1000X },
948 #if 0
949 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
950 	  "82576 gigabit Ethernet (SERDES)",
951 	  WM_T_82576,		WMP_F_SERDES },
952 #endif
953 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
954 	  "82576 quad-1000BaseT Ethernet",
955 	  WM_T_82576,		WMP_F_1000T },
956 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
957 	  "82576 gigabit Ethernet",
958 	  WM_T_82576,		WMP_F_1000T },
959 #if 0
960 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
961 	  "82576 gigabit Ethernet (SERDES)",
962 	  WM_T_82576,		WMP_F_SERDES },
963 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
964 	  "82576 quad-gigabit Ethernet (SERDES)",
965 	  WM_T_82576,		WMP_F_SERDES },
966 #endif
967 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
968 	  "82580 1000BaseT Ethernet",
969 	  WM_T_82580,		WMP_F_1000T },
970 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
971 	  "82580 1000BaseX Ethernet",
972 	  WM_T_82580,		WMP_F_1000X },
973 #if 0
974 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
975 	  "82580 1000BaseT Ethernet (SERDES)",
976 	  WM_T_82580,		WMP_F_SERDES },
977 #endif
978 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
979 	  "82580 gigabit Ethernet (SGMII)",
980 	  WM_T_82580,		WMP_F_1000T },
981 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
982 	  "82580 dual-1000BaseT Ethernet",
983 	  WM_T_82580,		WMP_F_1000T },
984 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_ER,
985 	  "82580 1000BaseT Ethernet",
986 	  WM_T_82580ER,		WMP_F_1000T },
987 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_ER_DUAL,
988 	  "82580 dual-1000BaseT Ethernet",
989 	  WM_T_82580ER,		WMP_F_1000T },
990 	{ 0,			0,
991 	  NULL,
992 	  0,			0 },
993 };
994 
995 #ifdef WM_EVENT_COUNTERS
996 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
997 #endif /* WM_EVENT_COUNTERS */
998 
999 #if 0 /* Not currently used */
1000 static inline uint32_t
1001 wm_io_read(struct wm_softc *sc, int reg)
1002 {
1003 
1004 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1005 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1006 }
1007 #endif
1008 
1009 static inline void
1010 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1011 {
1012 
1013 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1014 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1015 }
1016 
1017 static inline void
1018 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1019     uint32_t data)
1020 {
1021 	uint32_t regval;
1022 	int i;
1023 
1024 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1025 
1026 	CSR_WRITE(sc, reg, regval);
1027 
1028 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1029 		delay(5);
1030 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1031 			break;
1032 	}
1033 	if (i == SCTL_CTL_POLL_TIMEOUT) {
1034 		aprint_error("%s: WARNING: i82575 reg 0x%08x setup did not indicate ready\n",
1035 		    device_xname(sc->sc_dev), reg);
1036 	}
1037 }
1038 
1039 static inline void
1040 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1041 {
1042 	wa->wa_low = htole32(v & 0xffffffffU);
1043 	if (sizeof(bus_addr_t) == 8)
1044 		wa->wa_high = htole32((uint64_t) v >> 32);
1045 	else
1046 		wa->wa_high = 0;
1047 }
1048 
1049 static void
1050 wm_set_spiaddrbits(struct wm_softc *sc)
1051 {
1052 	uint32_t reg;
1053 
1054 	sc->sc_flags |= WM_F_EEPROM_SPI;
1055 	reg = CSR_READ(sc, WMREG_EECD);
1056 	sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
1057 }
1058 
1059 static const struct wm_product *
1060 wm_lookup(const struct pci_attach_args *pa)
1061 {
1062 	const struct wm_product *wmp;
1063 
1064 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1065 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1066 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1067 			return wmp;
1068 	}
1069 	return NULL;
1070 }
1071 
1072 static int
1073 wm_match(device_t parent, cfdata_t cf, void *aux)
1074 {
1075 	struct pci_attach_args *pa = aux;
1076 
1077 	if (wm_lookup(pa) != NULL)
1078 		return 1;
1079 
1080 	return 0;
1081 }
1082 
1083 static void
1084 wm_attach(device_t parent, device_t self, void *aux)
1085 {
1086 	struct wm_softc *sc = device_private(self);
1087 	struct pci_attach_args *pa = aux;
1088 	prop_dictionary_t dict;
1089 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1090 	pci_chipset_tag_t pc = pa->pa_pc;
1091 	pci_intr_handle_t ih;
1092 	const char *intrstr = NULL;
1093 	const char *eetype, *xname;
1094 	bus_space_tag_t memt;
1095 	bus_space_handle_t memh;
1096 	bus_size_t memsize;
1097 	int memh_valid;
1098 	int i, error;
1099 	const struct wm_product *wmp;
1100 	prop_data_t ea;
1101 	prop_number_t pn;
1102 	uint8_t enaddr[ETHER_ADDR_LEN];
1103 	uint16_t cfg1, cfg2, swdpin, io3;
1104 	pcireg_t preg, memtype;
1105 	uint16_t eeprom_data, apme_mask;
1106 	uint32_t reg;
1107 
1108 	sc->sc_dev = self;
1109 	callout_init(&sc->sc_tick_ch, 0);
1110 
1111 	sc->sc_wmp = wmp = wm_lookup(pa);
1112 	if (wmp == NULL) {
1113 		printf("\n");
1114 		panic("wm_attach: impossible");
1115 	}
1116 
1117 	sc->sc_pc = pa->pa_pc;
1118 	sc->sc_pcitag = pa->pa_tag;
1119 
1120 	if (pci_dma64_available(pa))
1121 		sc->sc_dmat = pa->pa_dmat64;
1122 	else
1123 		sc->sc_dmat = pa->pa_dmat;
1124 
1125 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
1126 	aprint_naive(": Ethernet controller\n");
1127 	aprint_normal(": %s, rev. %d\n", wmp->wmp_name, sc->sc_rev);
1128 
1129 	sc->sc_type = wmp->wmp_type;
1130 	if (sc->sc_type < WM_T_82543) {
1131 		if (sc->sc_rev < 2) {
1132 			aprint_error_dev(sc->sc_dev,
1133 			    "i82542 must be at least rev. 2\n");
1134 			return;
1135 		}
1136 		if (sc->sc_rev < 3)
1137 			sc->sc_type = WM_T_82542_2_0;
1138 	}
1139 
1140 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1141 	    || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER))
1142 		sc->sc_flags |= WM_F_NEWQUEUE;
1143 
1144 	/* Set device properties (mactype) */
1145 	dict = device_properties(sc->sc_dev);
1146 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1147 
1148 	/*
1149 	 * Map the device.  All devices support memory-mapped acccess,
1150 	 * and it is really required for normal operation.
1151 	 */
1152 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1153 	switch (memtype) {
1154 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1155 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1156 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1157 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1158 		break;
1159 	default:
1160 		memh_valid = 0;
1161 		break;
1162 	}
1163 
1164 	if (memh_valid) {
1165 		sc->sc_st = memt;
1166 		sc->sc_sh = memh;
1167 		sc->sc_ss = memsize;
1168 	} else {
1169 		aprint_error_dev(sc->sc_dev,
1170 		    "unable to map device registers\n");
1171 		return;
1172 	}
1173 
1174 	wm_get_wakeup(sc);
1175 
1176 	/*
1177 	 * In addition, i82544 and later support I/O mapped indirect
1178 	 * register access.  It is not desirable (nor supported in
1179 	 * this driver) to use it for normal operation, though it is
1180 	 * required to work around bugs in some chip versions.
1181 	 */
1182 	if (sc->sc_type >= WM_T_82544) {
1183 		/* First we have to find the I/O BAR. */
1184 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1185 			if (pci_mapreg_type(pa->pa_pc, pa->pa_tag, i) ==
1186 			    PCI_MAPREG_TYPE_IO)
1187 				break;
1188 		}
1189 		if (i == PCI_MAPREG_END)
1190 			aprint_error_dev(sc->sc_dev,
1191 			    "WARNING: unable to find I/O BAR\n");
1192 		else {
1193 			/*
1194 			 * The i8254x doesn't apparently respond when the
1195 			 * I/O BAR is 0, which looks somewhat like it's not
1196 			 * been configured.
1197 			 */
1198 			preg = pci_conf_read(pc, pa->pa_tag, i);
1199 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1200 				aprint_error_dev(sc->sc_dev,
1201 				    "WARNING: I/O BAR at zero.\n");
1202 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1203 					0, &sc->sc_iot, &sc->sc_ioh,
1204 					NULL, &sc->sc_ios) == 0) {
1205 				sc->sc_flags |= WM_F_IOH_VALID;
1206 			} else {
1207 				aprint_error_dev(sc->sc_dev,
1208 				    "WARNING: unable to map I/O space\n");
1209 			}
1210 		}
1211 
1212 	}
1213 
1214 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
1215 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1216 	preg |= PCI_COMMAND_MASTER_ENABLE;
1217 	if (sc->sc_type < WM_T_82542_2_1)
1218 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1219 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1220 
1221 	/* power up chip */
1222 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1223 	    NULL)) && error != EOPNOTSUPP) {
1224 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1225 		return;
1226 	}
1227 
1228 	/*
1229 	 * Map and establish our interrupt.
1230 	 */
1231 	if (pci_intr_map(pa, &ih)) {
1232 		aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
1233 		return;
1234 	}
1235 	intrstr = pci_intr_string(pc, ih);
1236 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
1237 	if (sc->sc_ih == NULL) {
1238 		aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
1239 		if (intrstr != NULL)
1240 			aprint_error(" at %s", intrstr);
1241 		aprint_error("\n");
1242 		return;
1243 	}
1244 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
1245 
1246 	/*
1247 	 * Check the function ID (unit number of the chip).
1248 	 */
1249 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
1250 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
1251 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1252 	    || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER))
1253 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
1254 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
1255 	else
1256 		sc->sc_funcid = 0;
1257 
1258 	/*
1259 	 * Determine a few things about the bus we're connected to.
1260 	 */
1261 	if (sc->sc_type < WM_T_82543) {
1262 		/* We don't really know the bus characteristics here. */
1263 		sc->sc_bus_speed = 33;
1264 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1265 		/*
1266 		 * CSA (Communication Streaming Architecture) is about as fast
1267 		 * a 32-bit 66MHz PCI Bus.
1268 		 */
1269 		sc->sc_flags |= WM_F_CSA;
1270 		sc->sc_bus_speed = 66;
1271 		aprint_verbose_dev(sc->sc_dev,
1272 		    "Communication Streaming Architecture\n");
1273 		if (sc->sc_type == WM_T_82547) {
1274 			callout_init(&sc->sc_txfifo_ch, 0);
1275 			callout_setfunc(&sc->sc_txfifo_ch,
1276 					wm_82547_txfifo_stall, sc);
1277 			aprint_verbose_dev(sc->sc_dev,
1278 			    "using 82547 Tx FIFO stall work-around\n");
1279 		}
1280 	} else if (sc->sc_type >= WM_T_82571) {
1281 		sc->sc_flags |= WM_F_PCIE;
1282 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1283 		    && (sc->sc_type != WM_T_ICH10)
1284 		    && (sc->sc_type != WM_T_PCH)) {
1285 			sc->sc_flags |= WM_F_EEPROM_SEMAPHORE;
1286 			/* ICH* and PCH have no PCIe capability registers */
1287 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1288 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
1289 				NULL) == 0)
1290 				aprint_error_dev(sc->sc_dev,
1291 				    "unable to find PCIe capability\n");
1292 		}
1293 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1294 	} else {
1295 		reg = CSR_READ(sc, WMREG_STATUS);
1296 		if (reg & STATUS_BUS64)
1297 			sc->sc_flags |= WM_F_BUS64;
1298 		if ((reg & STATUS_PCIX_MODE) != 0) {
1299 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1300 
1301 			sc->sc_flags |= WM_F_PCIX;
1302 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1303 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
1304 				aprint_error_dev(sc->sc_dev,
1305 				    "unable to find PCIX capability\n");
1306 			else if (sc->sc_type != WM_T_82545_3 &&
1307 				 sc->sc_type != WM_T_82546_3) {
1308 				/*
1309 				 * Work around a problem caused by the BIOS
1310 				 * setting the max memory read byte count
1311 				 * incorrectly.
1312 				 */
1313 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1314 				    sc->sc_pcixe_capoff + PCI_PCIX_CMD);
1315 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1316 				    sc->sc_pcixe_capoff + PCI_PCIX_STATUS);
1317 
1318 				bytecnt =
1319 				    (pcix_cmd & PCI_PCIX_CMD_BYTECNT_MASK) >>
1320 				    PCI_PCIX_CMD_BYTECNT_SHIFT;
1321 				maxb =
1322 				    (pcix_sts & PCI_PCIX_STATUS_MAXB_MASK) >>
1323 				    PCI_PCIX_STATUS_MAXB_SHIFT;
1324 				if (bytecnt > maxb) {
1325 					aprint_verbose_dev(sc->sc_dev,
1326 					    "resetting PCI-X MMRBC: %d -> %d\n",
1327 					    512 << bytecnt, 512 << maxb);
1328 					pcix_cmd = (pcix_cmd &
1329 					    ~PCI_PCIX_CMD_BYTECNT_MASK) |
1330 					   (maxb << PCI_PCIX_CMD_BYTECNT_SHIFT);
1331 					pci_conf_write(pa->pa_pc, pa->pa_tag,
1332 					    sc->sc_pcixe_capoff + PCI_PCIX_CMD,
1333 					    pcix_cmd);
1334 				}
1335 			}
1336 		}
1337 		/*
1338 		 * The quad port adapter is special; it has a PCIX-PCIX
1339 		 * bridge on the board, and can run the secondary bus at
1340 		 * a higher speed.
1341 		 */
1342 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1343 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1344 								      : 66;
1345 		} else if (sc->sc_flags & WM_F_PCIX) {
1346 			switch (reg & STATUS_PCIXSPD_MASK) {
1347 			case STATUS_PCIXSPD_50_66:
1348 				sc->sc_bus_speed = 66;
1349 				break;
1350 			case STATUS_PCIXSPD_66_100:
1351 				sc->sc_bus_speed = 100;
1352 				break;
1353 			case STATUS_PCIXSPD_100_133:
1354 				sc->sc_bus_speed = 133;
1355 				break;
1356 			default:
1357 				aprint_error_dev(sc->sc_dev,
1358 				    "unknown PCIXSPD %d; assuming 66MHz\n",
1359 				    reg & STATUS_PCIXSPD_MASK);
1360 				sc->sc_bus_speed = 66;
1361 				break;
1362 			}
1363 		} else
1364 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1365 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1366 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1367 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1368 	}
1369 
1370 	/*
1371 	 * Allocate the control data structures, and create and load the
1372 	 * DMA map for it.
1373 	 *
1374 	 * NOTE: All Tx descriptors must be in the same 4G segment of
1375 	 * memory.  So must Rx descriptors.  We simplify by allocating
1376 	 * both sets within the same 4G segment.
1377 	 */
1378 	WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
1379 	    WM_NTXDESC_82542 : WM_NTXDESC_82544;
1380 	sc->sc_cd_size = sc->sc_type < WM_T_82544 ?
1381 	    sizeof(struct wm_control_data_82542) :
1382 	    sizeof(struct wm_control_data_82544);
1383 	if ((error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_cd_size, PAGE_SIZE,
1384 		    (bus_size_t) 0x100000000ULL, &sc->sc_cd_seg, 1,
1385 		    &sc->sc_cd_rseg, 0)) != 0) {
1386 		aprint_error_dev(sc->sc_dev,
1387 		    "unable to allocate control data, error = %d\n",
1388 		    error);
1389 		goto fail_0;
1390 	}
1391 
1392 	if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cd_seg,
1393 		    sc->sc_cd_rseg, sc->sc_cd_size,
1394 		    (void **)&sc->sc_control_data, BUS_DMA_COHERENT)) != 0) {
1395 		aprint_error_dev(sc->sc_dev,
1396 		    "unable to map control data, error = %d\n", error);
1397 		goto fail_1;
1398 	}
1399 
1400 	if ((error = bus_dmamap_create(sc->sc_dmat, sc->sc_cd_size, 1,
1401 		    sc->sc_cd_size, 0, 0, &sc->sc_cddmamap)) != 0) {
1402 		aprint_error_dev(sc->sc_dev,
1403 		    "unable to create control data DMA map, error = %d\n",
1404 		    error);
1405 		goto fail_2;
1406 	}
1407 
1408 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
1409 		    sc->sc_control_data, sc->sc_cd_size, NULL, 0)) != 0) {
1410 		aprint_error_dev(sc->sc_dev,
1411 		    "unable to load control data DMA map, error = %d\n",
1412 		    error);
1413 		goto fail_3;
1414 	}
1415 
1416 	/*
1417 	 * Create the transmit buffer DMA maps.
1418 	 */
1419 	WM_TXQUEUELEN(sc) =
1420 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
1421 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
1422 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1423 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
1424 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
1425 			    &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1426 			aprint_error_dev(sc->sc_dev,
1427 			    "unable to create Tx DMA map %d, error = %d\n",
1428 			    i, error);
1429 			goto fail_4;
1430 		}
1431 	}
1432 
1433 	/*
1434 	 * Create the receive buffer DMA maps.
1435 	 */
1436 	for (i = 0; i < WM_NRXDESC; i++) {
1437 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1438 			    MCLBYTES, 0, 0,
1439 			    &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1440 			aprint_error_dev(sc->sc_dev,
1441 			    "unable to create Rx DMA map %d error = %d\n",
1442 			    i, error);
1443 			goto fail_5;
1444 		}
1445 		sc->sc_rxsoft[i].rxs_mbuf = NULL;
1446 	}
1447 
1448 	/* clear interesting stat counters */
1449 	CSR_READ(sc, WMREG_COLC);
1450 	CSR_READ(sc, WMREG_RXERRC);
1451 
1452 	/*
1453 	 * Reset the chip to a known state.
1454 	 */
1455 	wm_reset(sc);
1456 
1457 	switch (sc->sc_type) {
1458 	case WM_T_82571:
1459 	case WM_T_82572:
1460 	case WM_T_82573:
1461 	case WM_T_82574:
1462 	case WM_T_82583:
1463 	case WM_T_80003:
1464 	case WM_T_ICH8:
1465 	case WM_T_ICH9:
1466 	case WM_T_ICH10:
1467 	case WM_T_PCH:
1468 		if (wm_check_mng_mode(sc) != 0)
1469 			wm_get_hw_control(sc);
1470 		break;
1471 	default:
1472 		break;
1473 	}
1474 
1475 	/*
1476 	 * Get some information about the EEPROM.
1477 	 */
1478 	switch (sc->sc_type) {
1479 	case WM_T_82542_2_0:
1480 	case WM_T_82542_2_1:
1481 	case WM_T_82543:
1482 	case WM_T_82544:
1483 		/* Microwire */
1484 		sc->sc_ee_addrbits = 6;
1485 		break;
1486 	case WM_T_82540:
1487 	case WM_T_82545:
1488 	case WM_T_82545_3:
1489 	case WM_T_82546:
1490 	case WM_T_82546_3:
1491 		/* Microwire */
1492 		reg = CSR_READ(sc, WMREG_EECD);
1493 		if (reg & EECD_EE_SIZE)
1494 			sc->sc_ee_addrbits = 8;
1495 		else
1496 			sc->sc_ee_addrbits = 6;
1497 		sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1498 		break;
1499 	case WM_T_82541:
1500 	case WM_T_82541_2:
1501 	case WM_T_82547:
1502 	case WM_T_82547_2:
1503 		reg = CSR_READ(sc, WMREG_EECD);
1504 		if (reg & EECD_EE_TYPE) {
1505 			/* SPI */
1506 			wm_set_spiaddrbits(sc);
1507 		} else
1508 			/* Microwire */
1509 			sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6;
1510 		sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1511 		break;
1512 	case WM_T_82571:
1513 	case WM_T_82572:
1514 		/* SPI */
1515 		wm_set_spiaddrbits(sc);
1516 		sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1517 		break;
1518 	case WM_T_82573:
1519 	case WM_T_82574:
1520 	case WM_T_82583:
1521 		if (wm_is_onboard_nvm_eeprom(sc) == 0)
1522 			sc->sc_flags |= WM_F_EEPROM_FLASH;
1523 		else {
1524 			/* SPI */
1525 			wm_set_spiaddrbits(sc);
1526 		}
1527 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1528 		break;
1529 	case WM_T_82575:
1530 	case WM_T_82576:
1531 	case WM_T_82580:
1532 	case WM_T_82580ER:
1533 	case WM_T_80003:
1534 		/* SPI */
1535 		wm_set_spiaddrbits(sc);
1536 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_SWFW_SYNC;
1537 		break;
1538 	case WM_T_ICH8:
1539 	case WM_T_ICH9:
1540 	case WM_T_ICH10:
1541 	case WM_T_PCH:
1542 		/* FLASH */
1543 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_SWFWHW_SYNC;
1544 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
1545 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
1546 		    &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) {
1547 			aprint_error_dev(sc->sc_dev,
1548 			    "can't map FLASH registers\n");
1549 			return;
1550 		}
1551 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
1552 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
1553 						ICH_FLASH_SECTOR_SIZE;
1554 		sc->sc_ich8_flash_bank_size =
1555 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
1556 		sc->sc_ich8_flash_bank_size -=
1557 		    (reg & ICH_GFPREG_BASE_MASK);
1558 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
1559 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
1560 		break;
1561 	default:
1562 		break;
1563 	}
1564 
1565 	/*
1566 	 * Defer printing the EEPROM type until after verifying the checksum
1567 	 * This allows the EEPROM type to be printed correctly in the case
1568 	 * that no EEPROM is attached.
1569 	 */
1570 	/*
1571 	 * Validate the EEPROM checksum. If the checksum fails, flag
1572 	 * this for later, so we can fail future reads from the EEPROM.
1573 	 */
1574 	if (wm_validate_eeprom_checksum(sc)) {
1575 		/*
1576 		 * Read twice again because some PCI-e parts fail the
1577 		 * first check due to the link being in sleep state.
1578 		 */
1579 		if (wm_validate_eeprom_checksum(sc))
1580 			sc->sc_flags |= WM_F_EEPROM_INVALID;
1581 	}
1582 
1583 	/* Set device properties (macflags) */
1584 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
1585 
1586 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
1587 		aprint_verbose_dev(sc->sc_dev, "No EEPROM\n");
1588 	else if (sc->sc_flags & WM_F_EEPROM_FLASH) {
1589 		aprint_verbose_dev(sc->sc_dev, "FLASH\n");
1590 	} else {
1591 		if (sc->sc_flags & WM_F_EEPROM_SPI)
1592 			eetype = "SPI";
1593 		else
1594 			eetype = "MicroWire";
1595 		aprint_verbose_dev(sc->sc_dev,
1596 		    "%u word (%d address bits) %s EEPROM\n",
1597 		    1U << sc->sc_ee_addrbits,
1598 		    sc->sc_ee_addrbits, eetype);
1599 	}
1600 
1601 	/*
1602 	 * Read the Ethernet address from the EEPROM, if not first found
1603 	 * in device properties.
1604 	 */
1605 	ea = prop_dictionary_get(dict, "mac-address");
1606 	if (ea != NULL) {
1607 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
1608 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
1609 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
1610 	} else {
1611 		if (wm_read_mac_addr(sc, enaddr) != 0) {
1612 			aprint_error_dev(sc->sc_dev,
1613 			    "unable to read Ethernet address\n");
1614 			return;
1615 		}
1616 	}
1617 
1618 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
1619 	    ether_sprintf(enaddr));
1620 
1621 	/*
1622 	 * Read the config info from the EEPROM, and set up various
1623 	 * bits in the control registers based on their contents.
1624 	 */
1625 	pn = prop_dictionary_get(dict, "i82543-cfg1");
1626 	if (pn != NULL) {
1627 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1628 		cfg1 = (uint16_t) prop_number_integer_value(pn);
1629 	} else {
1630 		if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) {
1631 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
1632 			return;
1633 		}
1634 	}
1635 
1636 	pn = prop_dictionary_get(dict, "i82543-cfg2");
1637 	if (pn != NULL) {
1638 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1639 		cfg2 = (uint16_t) prop_number_integer_value(pn);
1640 	} else {
1641 		if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) {
1642 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
1643 			return;
1644 		}
1645 	}
1646 
1647 	/* check for WM_F_WOL */
1648 	switch (sc->sc_type) {
1649 	case WM_T_82542_2_0:
1650 	case WM_T_82542_2_1:
1651 	case WM_T_82543:
1652 		/* dummy? */
1653 		eeprom_data = 0;
1654 		apme_mask = EEPROM_CFG3_APME;
1655 		break;
1656 	case WM_T_82544:
1657 		apme_mask = EEPROM_CFG2_82544_APM_EN;
1658 		eeprom_data = cfg2;
1659 		break;
1660 	case WM_T_82546:
1661 	case WM_T_82546_3:
1662 	case WM_T_82571:
1663 	case WM_T_82572:
1664 	case WM_T_82573:
1665 	case WM_T_82574:
1666 	case WM_T_82583:
1667 	case WM_T_80003:
1668 	default:
1669 		apme_mask = EEPROM_CFG3_APME;
1670 		wm_read_eeprom(sc, (sc->sc_funcid == 1) ? EEPROM_OFF_CFG3_PORTB
1671 		    : EEPROM_OFF_CFG3_PORTA, 1, &eeprom_data);
1672 		break;
1673 	case WM_T_82575:
1674 	case WM_T_82576:
1675 	case WM_T_82580:
1676 	case WM_T_82580ER:
1677 	case WM_T_ICH8:
1678 	case WM_T_ICH9:
1679 	case WM_T_ICH10:
1680 	case WM_T_PCH:
1681 		apme_mask = WUC_APME;
1682 		eeprom_data = CSR_READ(sc, WMREG_WUC);
1683 		break;
1684 	}
1685 
1686 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
1687 	if ((eeprom_data & apme_mask) != 0)
1688 		sc->sc_flags |= WM_F_WOL;
1689 #ifdef WM_DEBUG
1690 	if ((sc->sc_flags & WM_F_WOL) != 0)
1691 		printf("WOL\n");
1692 #endif
1693 
1694 	/*
1695 	 * XXX need special handling for some multiple port cards
1696 	 * to disable a paticular port.
1697 	 */
1698 
1699 	if (sc->sc_type >= WM_T_82544) {
1700 		pn = prop_dictionary_get(dict, "i82543-swdpin");
1701 		if (pn != NULL) {
1702 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1703 			swdpin = (uint16_t) prop_number_integer_value(pn);
1704 		} else {
1705 			if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) {
1706 				aprint_error_dev(sc->sc_dev,
1707 				    "unable to read SWDPIN\n");
1708 				return;
1709 			}
1710 		}
1711 	}
1712 
1713 	if (cfg1 & EEPROM_CFG1_ILOS)
1714 		sc->sc_ctrl |= CTRL_ILOS;
1715 	if (sc->sc_type >= WM_T_82544) {
1716 		sc->sc_ctrl |=
1717 		    ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
1718 		    CTRL_SWDPIO_SHIFT;
1719 		sc->sc_ctrl |=
1720 		    ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
1721 		    CTRL_SWDPINS_SHIFT;
1722 	} else {
1723 		sc->sc_ctrl |=
1724 		    ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
1725 		    CTRL_SWDPIO_SHIFT;
1726 	}
1727 
1728 #if 0
1729 	if (sc->sc_type >= WM_T_82544) {
1730 		if (cfg1 & EEPROM_CFG1_IPS0)
1731 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
1732 		if (cfg1 & EEPROM_CFG1_IPS1)
1733 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
1734 		sc->sc_ctrl_ext |=
1735 		    ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
1736 		    CTRL_EXT_SWDPIO_SHIFT;
1737 		sc->sc_ctrl_ext |=
1738 		    ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
1739 		    CTRL_EXT_SWDPINS_SHIFT;
1740 	} else {
1741 		sc->sc_ctrl_ext |=
1742 		    ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
1743 		    CTRL_EXT_SWDPIO_SHIFT;
1744 	}
1745 #endif
1746 
1747 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
1748 #if 0
1749 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
1750 #endif
1751 
1752 	/*
1753 	 * Set up some register offsets that are different between
1754 	 * the i82542 and the i82543 and later chips.
1755 	 */
1756 	if (sc->sc_type < WM_T_82543) {
1757 		sc->sc_rdt_reg = WMREG_OLD_RDT0;
1758 		sc->sc_tdt_reg = WMREG_OLD_TDT;
1759 	} else {
1760 		sc->sc_rdt_reg = WMREG_RDT;
1761 		sc->sc_tdt_reg = WMREG_TDT;
1762 	}
1763 
1764 	if (sc->sc_type == WM_T_PCH) {
1765 		uint16_t val;
1766 
1767 		/* Save the NVM K1 bit setting */
1768 		wm_read_eeprom(sc, EEPROM_OFF_K1_CONFIG, 1, &val);
1769 
1770 		if ((val & EEPROM_K1_CONFIG_ENABLE) != 0)
1771 			sc->sc_nvm_k1_enabled = 1;
1772 		else
1773 			sc->sc_nvm_k1_enabled = 0;
1774 	}
1775 
1776 	/*
1777 	 * Determine if we're TBI,GMII or SGMII mode, and initialize the
1778 	 * media structures accordingly.
1779 	 */
1780 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
1781 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
1782 	    || sc->sc_type == WM_T_82573
1783 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
1784 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
1785 		wm_gmii_mediainit(sc, wmp->wmp_product);
1786 	} else if (sc->sc_type < WM_T_82543 ||
1787 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
1788 		if (wmp->wmp_flags & WMP_F_1000T)
1789 			aprint_error_dev(sc->sc_dev,
1790 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
1791 		wm_tbi_mediainit(sc);
1792 	} else {
1793 		switch (sc->sc_type) {
1794 		case WM_T_82575:
1795 		case WM_T_82576:
1796 		case WM_T_82580:
1797 		case WM_T_82580ER:
1798 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
1799 			switch (reg & CTRL_EXT_LINK_MODE_MASK) {
1800 			case CTRL_EXT_LINK_MODE_SGMII:
1801 				aprint_verbose_dev(sc->sc_dev, "SGMII\n");
1802 				sc->sc_flags |= WM_F_SGMII;
1803 				CSR_WRITE(sc, WMREG_CTRL_EXT,
1804 				    reg | CTRL_EXT_I2C_ENA);
1805 				wm_gmii_mediainit(sc, wmp->wmp_product);
1806 				break;
1807 			case CTRL_EXT_LINK_MODE_1000KX:
1808 			case CTRL_EXT_LINK_MODE_PCIE_SERDES:
1809 				aprint_verbose_dev(sc->sc_dev, "1000KX or SERDES\n");
1810 				CSR_WRITE(sc, WMREG_CTRL_EXT,
1811 				    reg | CTRL_EXT_I2C_ENA);
1812 				panic("not supported yet\n");
1813 				break;
1814 			case CTRL_EXT_LINK_MODE_GMII:
1815 			default:
1816 				CSR_WRITE(sc, WMREG_CTRL_EXT,
1817 				    reg & ~CTRL_EXT_I2C_ENA);
1818 				wm_gmii_mediainit(sc, wmp->wmp_product);
1819 				break;
1820 			}
1821 			break;
1822 		default:
1823 			if (wmp->wmp_flags & WMP_F_1000X)
1824 				aprint_error_dev(sc->sc_dev,
1825 				    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
1826 			wm_gmii_mediainit(sc, wmp->wmp_product);
1827 		}
1828 	}
1829 
1830 	ifp = &sc->sc_ethercom.ec_if;
1831 	xname = device_xname(sc->sc_dev);
1832 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
1833 	ifp->if_softc = sc;
1834 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1835 	ifp->if_ioctl = wm_ioctl;
1836 	ifp->if_start = wm_start;
1837 	ifp->if_watchdog = wm_watchdog;
1838 	ifp->if_init = wm_init;
1839 	ifp->if_stop = wm_stop;
1840 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
1841 	IFQ_SET_READY(&ifp->if_snd);
1842 
1843 	/* Check for jumbo frame */
1844 	switch (sc->sc_type) {
1845 	case WM_T_82573:
1846 		/* XXX limited to 9234 if ASPM is disabled */
1847 		wm_read_eeprom(sc, EEPROM_INIT_3GIO_3, 1, &io3);
1848 		if ((io3 & EEPROM_3GIO_3_ASPM_MASK) != 0)
1849 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1850 		break;
1851 	case WM_T_82571:
1852 	case WM_T_82572:
1853 	case WM_T_82574:
1854 	case WM_T_82575:
1855 	case WM_T_82576:
1856 	case WM_T_82580:
1857 	case WM_T_82580ER:
1858 	case WM_T_80003:
1859 	case WM_T_ICH9:
1860 	case WM_T_ICH10:
1861 		/* XXX limited to 9234 */
1862 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1863 		break;
1864 	case WM_T_PCH:
1865 		/* XXX limited to 4096 */
1866 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1867 		break;
1868 	case WM_T_82542_2_0:
1869 	case WM_T_82542_2_1:
1870 	case WM_T_82583:
1871 	case WM_T_ICH8:
1872 		/* No support for jumbo frame */
1873 		break;
1874 	default:
1875 		/* ETHER_MAX_LEN_JUMBO */
1876 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1877 		break;
1878 	}
1879 
1880 	/*
1881 	 * If we're a i82543 or greater, we can support VLANs.
1882 	 */
1883 	if (sc->sc_type >= WM_T_82543)
1884 		sc->sc_ethercom.ec_capabilities |=
1885 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
1886 
1887 	/*
1888 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
1889 	 * on i82543 and later.
1890 	 */
1891 	if (sc->sc_type >= WM_T_82543) {
1892 		ifp->if_capabilities |=
1893 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
1894 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
1895 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
1896 		    IFCAP_CSUM_TCPv6_Tx |
1897 		    IFCAP_CSUM_UDPv6_Tx;
1898 	}
1899 
1900 	/*
1901 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
1902 	 *
1903 	 *	82541GI (8086:1076) ... no
1904 	 *	82572EI (8086:10b9) ... yes
1905 	 */
1906 	if (sc->sc_type >= WM_T_82571) {
1907 		ifp->if_capabilities |=
1908 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
1909 	}
1910 
1911 	/*
1912 	 * If we're a i82544 or greater (except i82547), we can do
1913 	 * TCP segmentation offload.
1914 	 */
1915 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
1916 		ifp->if_capabilities |= IFCAP_TSOv4;
1917 	}
1918 
1919 	if (sc->sc_type >= WM_T_82571) {
1920 		ifp->if_capabilities |= IFCAP_TSOv6;
1921 	}
1922 
1923 	/*
1924 	 * Attach the interface.
1925 	 */
1926 	if_attach(ifp);
1927 	ether_ifattach(ifp, enaddr);
1928 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
1929 #if NRND > 0
1930 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET, 0);
1931 #endif
1932 
1933 #ifdef WM_EVENT_COUNTERS
1934 	/* Attach event counters. */
1935 	evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
1936 	    NULL, xname, "txsstall");
1937 	evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
1938 	    NULL, xname, "txdstall");
1939 	evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
1940 	    NULL, xname, "txfifo_stall");
1941 	evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
1942 	    NULL, xname, "txdw");
1943 	evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
1944 	    NULL, xname, "txqe");
1945 	evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
1946 	    NULL, xname, "rxintr");
1947 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
1948 	    NULL, xname, "linkintr");
1949 
1950 	evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
1951 	    NULL, xname, "rxipsum");
1952 	evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
1953 	    NULL, xname, "rxtusum");
1954 	evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
1955 	    NULL, xname, "txipsum");
1956 	evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
1957 	    NULL, xname, "txtusum");
1958 	evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
1959 	    NULL, xname, "txtusum6");
1960 
1961 	evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
1962 	    NULL, xname, "txtso");
1963 	evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
1964 	    NULL, xname, "txtso6");
1965 	evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
1966 	    NULL, xname, "txtsopain");
1967 
1968 	for (i = 0; i < WM_NTXSEGS; i++) {
1969 		sprintf(wm_txseg_evcnt_names[i], "txseg%d", i);
1970 		evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
1971 		    NULL, xname, wm_txseg_evcnt_names[i]);
1972 	}
1973 
1974 	evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
1975 	    NULL, xname, "txdrop");
1976 
1977 	evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
1978 	    NULL, xname, "tu");
1979 
1980 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
1981 	    NULL, xname, "tx_xoff");
1982 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
1983 	    NULL, xname, "tx_xon");
1984 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
1985 	    NULL, xname, "rx_xoff");
1986 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
1987 	    NULL, xname, "rx_xon");
1988 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
1989 	    NULL, xname, "rx_macctl");
1990 #endif /* WM_EVENT_COUNTERS */
1991 
1992 	if (pmf_device_register(self, wm_suspend, wm_resume))
1993 		pmf_class_network_register(self, ifp);
1994 	else
1995 		aprint_error_dev(self, "couldn't establish power handler\n");
1996 
1997 	return;
1998 
1999 	/*
2000 	 * Free any resources we've allocated during the failed attach
2001 	 * attempt.  Do this in reverse order and fall through.
2002 	 */
2003  fail_5:
2004 	for (i = 0; i < WM_NRXDESC; i++) {
2005 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2006 			bus_dmamap_destroy(sc->sc_dmat,
2007 			    sc->sc_rxsoft[i].rxs_dmamap);
2008 	}
2009  fail_4:
2010 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2011 		if (sc->sc_txsoft[i].txs_dmamap != NULL)
2012 			bus_dmamap_destroy(sc->sc_dmat,
2013 			    sc->sc_txsoft[i].txs_dmamap);
2014 	}
2015 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2016  fail_3:
2017 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2018  fail_2:
2019 	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2020 	    sc->sc_cd_size);
2021  fail_1:
2022 	bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2023  fail_0:
2024 	return;
2025 }
2026 
2027 static int
2028 wm_detach(device_t self, int flags __unused)
2029 {
2030 	struct wm_softc *sc = device_private(self);
2031 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2032 	int i, s;
2033 
2034 	s = splnet();
2035 	/* Stop the interface. Callouts are stopped in it. */
2036 	wm_stop(ifp, 1);
2037 	splx(s);
2038 
2039 	pmf_device_deregister(self);
2040 
2041 	/* Tell the firmware about the release */
2042 	wm_release_manageability(sc);
2043 	wm_release_hw_control(sc);
2044 
2045 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2046 
2047 	/* Delete all remaining media. */
2048 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2049 
2050 	ether_ifdetach(ifp);
2051 	if_detach(ifp);
2052 
2053 
2054 	/* Unload RX dmamaps and free mbufs */
2055 	wm_rxdrain(sc);
2056 
2057 	/* Free dmamap. It's the same as the end of the wm_attach() function */
2058 	for (i = 0; i < WM_NRXDESC; i++) {
2059 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2060 			bus_dmamap_destroy(sc->sc_dmat,
2061 			    sc->sc_rxsoft[i].rxs_dmamap);
2062 	}
2063 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2064 		if (sc->sc_txsoft[i].txs_dmamap != NULL)
2065 			bus_dmamap_destroy(sc->sc_dmat,
2066 			    sc->sc_txsoft[i].txs_dmamap);
2067 	}
2068 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2069 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2070 	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2071 	    sc->sc_cd_size);
2072 	bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2073 
2074 	/* Disestablish the interrupt handler */
2075 	if (sc->sc_ih != NULL) {
2076 		pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
2077 		sc->sc_ih = NULL;
2078 	}
2079 
2080 	/* Unmap the registers */
2081 	if (sc->sc_ss) {
2082 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
2083 		sc->sc_ss = 0;
2084 	}
2085 
2086 	if (sc->sc_ios) {
2087 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
2088 		sc->sc_ios = 0;
2089 	}
2090 
2091 	return 0;
2092 }
2093 
2094 /*
2095  * wm_tx_offload:
2096  *
2097  *	Set up TCP/IP checksumming parameters for the
2098  *	specified packet.
2099  */
2100 static int
2101 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
2102     uint8_t *fieldsp)
2103 {
2104 	struct mbuf *m0 = txs->txs_mbuf;
2105 	struct livengood_tcpip_ctxdesc *t;
2106 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
2107 	uint32_t ipcse;
2108 	struct ether_header *eh;
2109 	int offset, iphl;
2110 	uint8_t fields;
2111 
2112 	/*
2113 	 * XXX It would be nice if the mbuf pkthdr had offset
2114 	 * fields for the protocol headers.
2115 	 */
2116 
2117 	eh = mtod(m0, struct ether_header *);
2118 	switch (htons(eh->ether_type)) {
2119 	case ETHERTYPE_IP:
2120 	case ETHERTYPE_IPV6:
2121 		offset = ETHER_HDR_LEN;
2122 		break;
2123 
2124 	case ETHERTYPE_VLAN:
2125 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2126 		break;
2127 
2128 	default:
2129 		/*
2130 		 * Don't support this protocol or encapsulation.
2131 		 */
2132 		*fieldsp = 0;
2133 		*cmdp = 0;
2134 		return 0;
2135 	}
2136 
2137 	if ((m0->m_pkthdr.csum_flags &
2138 	    (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
2139 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
2140 	} else {
2141 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
2142 	}
2143 	ipcse = offset + iphl - 1;
2144 
2145 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
2146 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
2147 	seg = 0;
2148 	fields = 0;
2149 
2150 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
2151 		int hlen = offset + iphl;
2152 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
2153 
2154 		if (__predict_false(m0->m_len <
2155 				    (hlen + sizeof(struct tcphdr)))) {
2156 			/*
2157 			 * TCP/IP headers are not in the first mbuf; we need
2158 			 * to do this the slow and painful way.  Let's just
2159 			 * hope this doesn't happen very often.
2160 			 */
2161 			struct tcphdr th;
2162 
2163 			WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
2164 
2165 			m_copydata(m0, hlen, sizeof(th), &th);
2166 			if (v4) {
2167 				struct ip ip;
2168 
2169 				m_copydata(m0, offset, sizeof(ip), &ip);
2170 				ip.ip_len = 0;
2171 				m_copyback(m0,
2172 				    offset + offsetof(struct ip, ip_len),
2173 				    sizeof(ip.ip_len), &ip.ip_len);
2174 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
2175 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
2176 			} else {
2177 				struct ip6_hdr ip6;
2178 
2179 				m_copydata(m0, offset, sizeof(ip6), &ip6);
2180 				ip6.ip6_plen = 0;
2181 				m_copyback(m0,
2182 				    offset + offsetof(struct ip6_hdr, ip6_plen),
2183 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
2184 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
2185 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
2186 			}
2187 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
2188 			    sizeof(th.th_sum), &th.th_sum);
2189 
2190 			hlen += th.th_off << 2;
2191 		} else {
2192 			/*
2193 			 * TCP/IP headers are in the first mbuf; we can do
2194 			 * this the easy way.
2195 			 */
2196 			struct tcphdr *th;
2197 
2198 			if (v4) {
2199 				struct ip *ip =
2200 				    (void *)(mtod(m0, char *) + offset);
2201 				th = (void *)(mtod(m0, char *) + hlen);
2202 
2203 				ip->ip_len = 0;
2204 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
2205 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
2206 			} else {
2207 				struct ip6_hdr *ip6 =
2208 				    (void *)(mtod(m0, char *) + offset);
2209 				th = (void *)(mtod(m0, char *) + hlen);
2210 
2211 				ip6->ip6_plen = 0;
2212 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
2213 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
2214 			}
2215 			hlen += th->th_off << 2;
2216 		}
2217 
2218 		if (v4) {
2219 			WM_EVCNT_INCR(&sc->sc_ev_txtso);
2220 			cmdlen |= WTX_TCPIP_CMD_IP;
2221 		} else {
2222 			WM_EVCNT_INCR(&sc->sc_ev_txtso6);
2223 			ipcse = 0;
2224 		}
2225 		cmd |= WTX_TCPIP_CMD_TSE;
2226 		cmdlen |= WTX_TCPIP_CMD_TSE |
2227 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
2228 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
2229 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
2230 	}
2231 
2232 	/*
2233 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
2234 	 * offload feature, if we load the context descriptor, we
2235 	 * MUST provide valid values for IPCSS and TUCSS fields.
2236 	 */
2237 
2238 	ipcs = WTX_TCPIP_IPCSS(offset) |
2239 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
2240 	    WTX_TCPIP_IPCSE(ipcse);
2241 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
2242 		WM_EVCNT_INCR(&sc->sc_ev_txipsum);
2243 		fields |= WTX_IXSM;
2244 	}
2245 
2246 	offset += iphl;
2247 
2248 	if (m0->m_pkthdr.csum_flags &
2249 	    (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
2250 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
2251 		fields |= WTX_TXSM;
2252 		tucs = WTX_TCPIP_TUCSS(offset) |
2253 		    WTX_TCPIP_TUCSO(offset +
2254 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
2255 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
2256 	} else if ((m0->m_pkthdr.csum_flags &
2257 	    (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
2258 		WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
2259 		fields |= WTX_TXSM;
2260 		tucs = WTX_TCPIP_TUCSS(offset) |
2261 		    WTX_TCPIP_TUCSO(offset +
2262 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
2263 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
2264 	} else {
2265 		/* Just initialize it to a valid TCP context. */
2266 		tucs = WTX_TCPIP_TUCSS(offset) |
2267 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
2268 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
2269 	}
2270 
2271 	/* Fill in the context descriptor. */
2272 	t = (struct livengood_tcpip_ctxdesc *)
2273 	    &sc->sc_txdescs[sc->sc_txnext];
2274 	t->tcpip_ipcs = htole32(ipcs);
2275 	t->tcpip_tucs = htole32(tucs);
2276 	t->tcpip_cmdlen = htole32(cmdlen);
2277 	t->tcpip_seg = htole32(seg);
2278 	WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
2279 
2280 	sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
2281 	txs->txs_ndesc++;
2282 
2283 	*cmdp = cmd;
2284 	*fieldsp = fields;
2285 
2286 	return 0;
2287 }
2288 
2289 static void
2290 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
2291 {
2292 	struct mbuf *m;
2293 	int i;
2294 
2295 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
2296 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
2297 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
2298 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
2299 		    m->m_data, m->m_len, m->m_flags);
2300 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
2301 	    i, i == 1 ? "" : "s");
2302 }
2303 
2304 /*
2305  * wm_82547_txfifo_stall:
2306  *
2307  *	Callout used to wait for the 82547 Tx FIFO to drain,
2308  *	reset the FIFO pointers, and restart packet transmission.
2309  */
2310 static void
2311 wm_82547_txfifo_stall(void *arg)
2312 {
2313 	struct wm_softc *sc = arg;
2314 	int s;
2315 
2316 	s = splnet();
2317 
2318 	if (sc->sc_txfifo_stall) {
2319 		if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
2320 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
2321 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
2322 			/*
2323 			 * Packets have drained.  Stop transmitter, reset
2324 			 * FIFO pointers, restart transmitter, and kick
2325 			 * the packet queue.
2326 			 */
2327 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
2328 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
2329 			CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
2330 			CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
2331 			CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
2332 			CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
2333 			CSR_WRITE(sc, WMREG_TCTL, tctl);
2334 			CSR_WRITE_FLUSH(sc);
2335 
2336 			sc->sc_txfifo_head = 0;
2337 			sc->sc_txfifo_stall = 0;
2338 			wm_start(&sc->sc_ethercom.ec_if);
2339 		} else {
2340 			/*
2341 			 * Still waiting for packets to drain; try again in
2342 			 * another tick.
2343 			 */
2344 			callout_schedule(&sc->sc_txfifo_ch, 1);
2345 		}
2346 	}
2347 
2348 	splx(s);
2349 }
2350 
2351 /*
2352  * wm_82547_txfifo_bugchk:
2353  *
2354  *	Check for bug condition in the 82547 Tx FIFO.  We need to
2355  *	prevent enqueueing a packet that would wrap around the end
2356  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
2357  *
2358  *	We do this by checking the amount of space before the end
2359  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
2360  *	the Tx FIFO, wait for all remaining packets to drain, reset
2361  *	the internal FIFO pointers to the beginning, and restart
2362  *	transmission on the interface.
2363  */
2364 #define	WM_FIFO_HDR		0x10
2365 #define	WM_82547_PAD_LEN	0x3e0
2366 static int
2367 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
2368 {
2369 	int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
2370 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
2371 
2372 	/* Just return if already stalled. */
2373 	if (sc->sc_txfifo_stall)
2374 		return 1;
2375 
2376 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
2377 		/* Stall only occurs in half-duplex mode. */
2378 		goto send_packet;
2379 	}
2380 
2381 	if (len >= WM_82547_PAD_LEN + space) {
2382 		sc->sc_txfifo_stall = 1;
2383 		callout_schedule(&sc->sc_txfifo_ch, 1);
2384 		return 1;
2385 	}
2386 
2387  send_packet:
2388 	sc->sc_txfifo_head += len;
2389 	if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
2390 		sc->sc_txfifo_head -= sc->sc_txfifo_size;
2391 
2392 	return 0;
2393 }
2394 
2395 /*
2396  * wm_start:		[ifnet interface function]
2397  *
2398  *	Start packet transmission on the interface.
2399  */
2400 static void
2401 wm_start(struct ifnet *ifp)
2402 {
2403 	struct wm_softc *sc = ifp->if_softc;
2404 	struct mbuf *m0;
2405 	struct m_tag *mtag;
2406 	struct wm_txsoft *txs;
2407 	bus_dmamap_t dmamap;
2408 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
2409 	bus_addr_t curaddr;
2410 	bus_size_t seglen, curlen;
2411 	uint32_t cksumcmd;
2412 	uint8_t cksumfields;
2413 
2414 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
2415 		return;
2416 
2417 	/*
2418 	 * Remember the previous number of free descriptors.
2419 	 */
2420 	ofree = sc->sc_txfree;
2421 
2422 	/*
2423 	 * Loop through the send queue, setting up transmit descriptors
2424 	 * until we drain the queue, or use up all available transmit
2425 	 * descriptors.
2426 	 */
2427 	for (;;) {
2428 		/* Grab a packet off the queue. */
2429 		IFQ_POLL(&ifp->if_snd, m0);
2430 		if (m0 == NULL)
2431 			break;
2432 
2433 		DPRINTF(WM_DEBUG_TX,
2434 		    ("%s: TX: have packet to transmit: %p\n",
2435 		    device_xname(sc->sc_dev), m0));
2436 
2437 		/* Get a work queue entry. */
2438 		if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
2439 			wm_txintr(sc);
2440 			if (sc->sc_txsfree == 0) {
2441 				DPRINTF(WM_DEBUG_TX,
2442 				    ("%s: TX: no free job descriptors\n",
2443 					device_xname(sc->sc_dev)));
2444 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
2445 				break;
2446 			}
2447 		}
2448 
2449 		txs = &sc->sc_txsoft[sc->sc_txsnext];
2450 		dmamap = txs->txs_dmamap;
2451 
2452 		use_tso = (m0->m_pkthdr.csum_flags &
2453 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
2454 
2455 		/*
2456 		 * So says the Linux driver:
2457 		 * The controller does a simple calculation to make sure
2458 		 * there is enough room in the FIFO before initiating the
2459 		 * DMA for each buffer.  The calc is:
2460 		 *	4 = ceil(buffer len / MSS)
2461 		 * To make sure we don't overrun the FIFO, adjust the max
2462 		 * buffer len if the MSS drops.
2463 		 */
2464 		dmamap->dm_maxsegsz =
2465 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
2466 		    ? m0->m_pkthdr.segsz << 2
2467 		    : WTX_MAX_LEN;
2468 
2469 		/*
2470 		 * Load the DMA map.  If this fails, the packet either
2471 		 * didn't fit in the allotted number of segments, or we
2472 		 * were short on resources.  For the too-many-segments
2473 		 * case, we simply report an error and drop the packet,
2474 		 * since we can't sanely copy a jumbo packet to a single
2475 		 * buffer.
2476 		 */
2477 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
2478 		    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
2479 		if (error) {
2480 			if (error == EFBIG) {
2481 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
2482 				log(LOG_ERR, "%s: Tx packet consumes too many "
2483 				    "DMA segments, dropping...\n",
2484 				    device_xname(sc->sc_dev));
2485 				IFQ_DEQUEUE(&ifp->if_snd, m0);
2486 				wm_dump_mbuf_chain(sc, m0);
2487 				m_freem(m0);
2488 				continue;
2489 			}
2490 			/*
2491 			 * Short on resources, just stop for now.
2492 			 */
2493 			DPRINTF(WM_DEBUG_TX,
2494 			    ("%s: TX: dmamap load failed: %d\n",
2495 			    device_xname(sc->sc_dev), error));
2496 			break;
2497 		}
2498 
2499 		segs_needed = dmamap->dm_nsegs;
2500 		if (use_tso) {
2501 			/* For sentinel descriptor; see below. */
2502 			segs_needed++;
2503 		}
2504 
2505 		/*
2506 		 * Ensure we have enough descriptors free to describe
2507 		 * the packet.  Note, we always reserve one descriptor
2508 		 * at the end of the ring due to the semantics of the
2509 		 * TDT register, plus one more in the event we need
2510 		 * to load offload context.
2511 		 */
2512 		if (segs_needed > sc->sc_txfree - 2) {
2513 			/*
2514 			 * Not enough free descriptors to transmit this
2515 			 * packet.  We haven't committed anything yet,
2516 			 * so just unload the DMA map, put the packet
2517 			 * pack on the queue, and punt.  Notify the upper
2518 			 * layer that there are no more slots left.
2519 			 */
2520 			DPRINTF(WM_DEBUG_TX,
2521 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
2522 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
2523 			    segs_needed, sc->sc_txfree - 1));
2524 			ifp->if_flags |= IFF_OACTIVE;
2525 			bus_dmamap_unload(sc->sc_dmat, dmamap);
2526 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
2527 			break;
2528 		}
2529 
2530 		/*
2531 		 * Check for 82547 Tx FIFO bug.  We need to do this
2532 		 * once we know we can transmit the packet, since we
2533 		 * do some internal FIFO space accounting here.
2534 		 */
2535 		if (sc->sc_type == WM_T_82547 &&
2536 		    wm_82547_txfifo_bugchk(sc, m0)) {
2537 			DPRINTF(WM_DEBUG_TX,
2538 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
2539 			    device_xname(sc->sc_dev)));
2540 			ifp->if_flags |= IFF_OACTIVE;
2541 			bus_dmamap_unload(sc->sc_dmat, dmamap);
2542 			WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
2543 			break;
2544 		}
2545 
2546 		IFQ_DEQUEUE(&ifp->if_snd, m0);
2547 
2548 		/*
2549 		 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
2550 		 */
2551 
2552 		DPRINTF(WM_DEBUG_TX,
2553 		    ("%s: TX: packet has %d (%d) DMA segments\n",
2554 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
2555 
2556 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
2557 
2558 		/*
2559 		 * Store a pointer to the packet so that we can free it
2560 		 * later.
2561 		 *
2562 		 * Initially, we consider the number of descriptors the
2563 		 * packet uses the number of DMA segments.  This may be
2564 		 * incremented by 1 if we do checksum offload (a descriptor
2565 		 * is used to set the checksum context).
2566 		 */
2567 		txs->txs_mbuf = m0;
2568 		txs->txs_firstdesc = sc->sc_txnext;
2569 		txs->txs_ndesc = segs_needed;
2570 
2571 		/* Set up offload parameters for this packet. */
2572 		if (m0->m_pkthdr.csum_flags &
2573 		    (M_CSUM_TSOv4|M_CSUM_TSOv6|
2574 		    M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
2575 		    M_CSUM_TCPv6|M_CSUM_UDPv6)) {
2576 			if (wm_tx_offload(sc, txs, &cksumcmd,
2577 					  &cksumfields) != 0) {
2578 				/* Error message already displayed. */
2579 				bus_dmamap_unload(sc->sc_dmat, dmamap);
2580 				continue;
2581 			}
2582 		} else {
2583 			cksumcmd = 0;
2584 			cksumfields = 0;
2585 		}
2586 
2587 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
2588 
2589 		/* Sync the DMA map. */
2590 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
2591 		    BUS_DMASYNC_PREWRITE);
2592 
2593 		/*
2594 		 * Initialize the transmit descriptor.
2595 		 */
2596 		for (nexttx = sc->sc_txnext, seg = 0;
2597 		     seg < dmamap->dm_nsegs; seg++) {
2598 			for (seglen = dmamap->dm_segs[seg].ds_len,
2599 			     curaddr = dmamap->dm_segs[seg].ds_addr;
2600 			     seglen != 0;
2601 			     curaddr += curlen, seglen -= curlen,
2602 			     nexttx = WM_NEXTTX(sc, nexttx)) {
2603 				curlen = seglen;
2604 
2605 				/*
2606 				 * So says the Linux driver:
2607 				 * Work around for premature descriptor
2608 				 * write-backs in TSO mode.  Append a
2609 				 * 4-byte sentinel descriptor.
2610 				 */
2611 				if (use_tso &&
2612 				    seg == dmamap->dm_nsegs - 1 &&
2613 				    curlen > 8)
2614 					curlen -= 4;
2615 
2616 				wm_set_dma_addr(
2617 				    &sc->sc_txdescs[nexttx].wtx_addr,
2618 				    curaddr);
2619 				sc->sc_txdescs[nexttx].wtx_cmdlen =
2620 				    htole32(cksumcmd | curlen);
2621 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
2622 				    0;
2623 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
2624 				    cksumfields;
2625 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
2626 				lasttx = nexttx;
2627 
2628 				DPRINTF(WM_DEBUG_TX,
2629 				    ("%s: TX: desc %d: low %#" PRIxPADDR ", "
2630 				     "len %#04zx\n",
2631 				    device_xname(sc->sc_dev), nexttx,
2632 				    curaddr & 0xffffffffUL, curlen));
2633 			}
2634 		}
2635 
2636 		KASSERT(lasttx != -1);
2637 
2638 		/*
2639 		 * Set up the command byte on the last descriptor of
2640 		 * the packet.  If we're in the interrupt delay window,
2641 		 * delay the interrupt.
2642 		 */
2643 		sc->sc_txdescs[lasttx].wtx_cmdlen |=
2644 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
2645 
2646 		/*
2647 		 * If VLANs are enabled and the packet has a VLAN tag, set
2648 		 * up the descriptor to encapsulate the packet for us.
2649 		 *
2650 		 * This is only valid on the last descriptor of the packet.
2651 		 */
2652 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
2653 			sc->sc_txdescs[lasttx].wtx_cmdlen |=
2654 			    htole32(WTX_CMD_VLE);
2655 			sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
2656 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
2657 		}
2658 
2659 		txs->txs_lastdesc = lasttx;
2660 
2661 		DPRINTF(WM_DEBUG_TX,
2662 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
2663 		    device_xname(sc->sc_dev),
2664 		    lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
2665 
2666 		/* Sync the descriptors we're using. */
2667 		WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
2668 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2669 
2670 		/* Give the packet to the chip. */
2671 		CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
2672 
2673 		DPRINTF(WM_DEBUG_TX,
2674 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
2675 
2676 		DPRINTF(WM_DEBUG_TX,
2677 		    ("%s: TX: finished transmitting packet, job %d\n",
2678 		    device_xname(sc->sc_dev), sc->sc_txsnext));
2679 
2680 		/* Advance the tx pointer. */
2681 		sc->sc_txfree -= txs->txs_ndesc;
2682 		sc->sc_txnext = nexttx;
2683 
2684 		sc->sc_txsfree--;
2685 		sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
2686 
2687 		/* Pass the packet to any BPF listeners. */
2688 		bpf_mtap(ifp, m0);
2689 	}
2690 
2691 	if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
2692 		/* No more slots; notify upper layer. */
2693 		ifp->if_flags |= IFF_OACTIVE;
2694 	}
2695 
2696 	if (sc->sc_txfree != ofree) {
2697 		/* Set a watchdog timer in case the chip flakes out. */
2698 		ifp->if_timer = 5;
2699 	}
2700 }
2701 
2702 /*
2703  * wm_watchdog:		[ifnet interface function]
2704  *
2705  *	Watchdog timer handler.
2706  */
2707 static void
2708 wm_watchdog(struct ifnet *ifp)
2709 {
2710 	struct wm_softc *sc = ifp->if_softc;
2711 
2712 	/*
2713 	 * Since we're using delayed interrupts, sweep up
2714 	 * before we report an error.
2715 	 */
2716 	wm_txintr(sc);
2717 
2718 	if (sc->sc_txfree != WM_NTXDESC(sc)) {
2719 		log(LOG_ERR,
2720 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2721 		    device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
2722 		    sc->sc_txnext);
2723 		ifp->if_oerrors++;
2724 
2725 		/* Reset the interface. */
2726 		(void) wm_init(ifp);
2727 	}
2728 
2729 	/* Try to get more packets going. */
2730 	wm_start(ifp);
2731 }
2732 
2733 static int
2734 wm_ifflags_cb(struct ethercom *ec)
2735 {
2736 	struct ifnet *ifp = &ec->ec_if;
2737 	struct wm_softc *sc = ifp->if_softc;
2738 	int change = ifp->if_flags ^ sc->sc_if_flags;
2739 
2740 	if (change != 0)
2741 		sc->sc_if_flags = ifp->if_flags;
2742 
2743 	if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
2744 		return ENETRESET;
2745 
2746 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
2747 		wm_set_filter(sc);
2748 
2749 	wm_set_vlan(sc);
2750 
2751 	return 0;
2752 }
2753 
2754 /*
2755  * wm_ioctl:		[ifnet interface function]
2756  *
2757  *	Handle control requests from the operator.
2758  */
2759 static int
2760 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2761 {
2762 	struct wm_softc *sc = ifp->if_softc;
2763 	struct ifreq *ifr = (struct ifreq *) data;
2764 	struct ifaddr *ifa = (struct ifaddr *)data;
2765 	struct sockaddr_dl *sdl;
2766 	int s, error;
2767 
2768 	s = splnet();
2769 
2770 	switch (cmd) {
2771 	case SIOCSIFMEDIA:
2772 	case SIOCGIFMEDIA:
2773 		/* Flow control requires full-duplex mode. */
2774 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
2775 		    (ifr->ifr_media & IFM_FDX) == 0)
2776 			ifr->ifr_media &= ~IFM_ETH_FMASK;
2777 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
2778 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
2779 				/* We can do both TXPAUSE and RXPAUSE. */
2780 				ifr->ifr_media |=
2781 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
2782 			}
2783 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
2784 		}
2785 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
2786 		break;
2787 	case SIOCINITIFADDR:
2788 		if (ifa->ifa_addr->sa_family == AF_LINK) {
2789 			sdl = satosdl(ifp->if_dl->ifa_addr);
2790 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
2791 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
2792 			/* unicast address is first multicast entry */
2793 			wm_set_filter(sc);
2794 			error = 0;
2795 			break;
2796 		}
2797 		/* Fall through for rest */
2798 	default:
2799 		if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
2800 			break;
2801 
2802 		error = 0;
2803 
2804 		if (cmd == SIOCSIFCAP)
2805 			error = (*ifp->if_init)(ifp);
2806 		else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
2807 			;
2808 		else if (ifp->if_flags & IFF_RUNNING) {
2809 			/*
2810 			 * Multicast list has changed; set the hardware filter
2811 			 * accordingly.
2812 			 */
2813 			wm_set_filter(sc);
2814 		}
2815 		break;
2816 	}
2817 
2818 	/* Try to get more packets going. */
2819 	wm_start(ifp);
2820 
2821 	splx(s);
2822 	return error;
2823 }
2824 
2825 /*
2826  * wm_intr:
2827  *
2828  *	Interrupt service routine.
2829  */
2830 static int
2831 wm_intr(void *arg)
2832 {
2833 	struct wm_softc *sc = arg;
2834 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2835 	uint32_t icr;
2836 	int handled = 0;
2837 
2838 	while (1 /* CONSTCOND */) {
2839 		icr = CSR_READ(sc, WMREG_ICR);
2840 		if ((icr & sc->sc_icr) == 0)
2841 			break;
2842 #if 0 /*NRND > 0*/
2843 		if (RND_ENABLED(&sc->rnd_source))
2844 			rnd_add_uint32(&sc->rnd_source, icr);
2845 #endif
2846 
2847 		handled = 1;
2848 
2849 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
2850 		if (icr & (ICR_RXDMT0|ICR_RXT0)) {
2851 			DPRINTF(WM_DEBUG_RX,
2852 			    ("%s: RX: got Rx intr 0x%08x\n",
2853 			    device_xname(sc->sc_dev),
2854 			    icr & (ICR_RXDMT0|ICR_RXT0)));
2855 			WM_EVCNT_INCR(&sc->sc_ev_rxintr);
2856 		}
2857 #endif
2858 		wm_rxintr(sc);
2859 
2860 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
2861 		if (icr & ICR_TXDW) {
2862 			DPRINTF(WM_DEBUG_TX,
2863 			    ("%s: TX: got TXDW interrupt\n",
2864 			    device_xname(sc->sc_dev)));
2865 			WM_EVCNT_INCR(&sc->sc_ev_txdw);
2866 		}
2867 #endif
2868 		wm_txintr(sc);
2869 
2870 		if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
2871 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
2872 			wm_linkintr(sc, icr);
2873 		}
2874 
2875 		if (icr & ICR_RXO) {
2876 #if defined(WM_DEBUG)
2877 			log(LOG_WARNING, "%s: Receive overrun\n",
2878 			    device_xname(sc->sc_dev));
2879 #endif /* defined(WM_DEBUG) */
2880 		}
2881 	}
2882 
2883 	if (handled) {
2884 		/* Try to get more packets going. */
2885 		wm_start(ifp);
2886 	}
2887 
2888 	return handled;
2889 }
2890 
2891 /*
2892  * wm_txintr:
2893  *
2894  *	Helper; handle transmit interrupts.
2895  */
2896 static void
2897 wm_txintr(struct wm_softc *sc)
2898 {
2899 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2900 	struct wm_txsoft *txs;
2901 	uint8_t status;
2902 	int i;
2903 
2904 	ifp->if_flags &= ~IFF_OACTIVE;
2905 
2906 	/*
2907 	 * Go through the Tx list and free mbufs for those
2908 	 * frames which have been transmitted.
2909 	 */
2910 	for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
2911 	     i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
2912 		txs = &sc->sc_txsoft[i];
2913 
2914 		DPRINTF(WM_DEBUG_TX,
2915 		    ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
2916 
2917 		WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
2918 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2919 
2920 		status =
2921 		    sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
2922 		if ((status & WTX_ST_DD) == 0) {
2923 			WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
2924 			    BUS_DMASYNC_PREREAD);
2925 			break;
2926 		}
2927 
2928 		DPRINTF(WM_DEBUG_TX,
2929 		    ("%s: TX: job %d done: descs %d..%d\n",
2930 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
2931 		    txs->txs_lastdesc));
2932 
2933 		/*
2934 		 * XXX We should probably be using the statistics
2935 		 * XXX registers, but I don't know if they exist
2936 		 * XXX on chips before the i82544.
2937 		 */
2938 
2939 #ifdef WM_EVENT_COUNTERS
2940 		if (status & WTX_ST_TU)
2941 			WM_EVCNT_INCR(&sc->sc_ev_tu);
2942 #endif /* WM_EVENT_COUNTERS */
2943 
2944 		if (status & (WTX_ST_EC|WTX_ST_LC)) {
2945 			ifp->if_oerrors++;
2946 			if (status & WTX_ST_LC)
2947 				log(LOG_WARNING, "%s: late collision\n",
2948 				    device_xname(sc->sc_dev));
2949 			else if (status & WTX_ST_EC) {
2950 				ifp->if_collisions += 16;
2951 				log(LOG_WARNING, "%s: excessive collisions\n",
2952 				    device_xname(sc->sc_dev));
2953 			}
2954 		} else
2955 			ifp->if_opackets++;
2956 
2957 		sc->sc_txfree += txs->txs_ndesc;
2958 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
2959 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2960 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2961 		m_freem(txs->txs_mbuf);
2962 		txs->txs_mbuf = NULL;
2963 	}
2964 
2965 	/* Update the dirty transmit buffer pointer. */
2966 	sc->sc_txsdirty = i;
2967 	DPRINTF(WM_DEBUG_TX,
2968 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
2969 
2970 	/*
2971 	 * If there are no more pending transmissions, cancel the watchdog
2972 	 * timer.
2973 	 */
2974 	if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
2975 		ifp->if_timer = 0;
2976 }
2977 
2978 /*
2979  * wm_rxintr:
2980  *
2981  *	Helper; handle receive interrupts.
2982  */
2983 static void
2984 wm_rxintr(struct wm_softc *sc)
2985 {
2986 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2987 	struct wm_rxsoft *rxs;
2988 	struct mbuf *m;
2989 	int i, len;
2990 	uint8_t status, errors;
2991 	uint16_t vlantag;
2992 
2993 	for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
2994 		rxs = &sc->sc_rxsoft[i];
2995 
2996 		DPRINTF(WM_DEBUG_RX,
2997 		    ("%s: RX: checking descriptor %d\n",
2998 		    device_xname(sc->sc_dev), i));
2999 
3000 		WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3001 
3002 		status = sc->sc_rxdescs[i].wrx_status;
3003 		errors = sc->sc_rxdescs[i].wrx_errors;
3004 		len = le16toh(sc->sc_rxdescs[i].wrx_len);
3005 		vlantag = sc->sc_rxdescs[i].wrx_special;
3006 
3007 		if ((status & WRX_ST_DD) == 0) {
3008 			/*
3009 			 * We have processed all of the receive descriptors.
3010 			 */
3011 			WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
3012 			break;
3013 		}
3014 
3015 		if (__predict_false(sc->sc_rxdiscard)) {
3016 			DPRINTF(WM_DEBUG_RX,
3017 			    ("%s: RX: discarding contents of descriptor %d\n",
3018 			    device_xname(sc->sc_dev), i));
3019 			WM_INIT_RXDESC(sc, i);
3020 			if (status & WRX_ST_EOP) {
3021 				/* Reset our state. */
3022 				DPRINTF(WM_DEBUG_RX,
3023 				    ("%s: RX: resetting rxdiscard -> 0\n",
3024 				    device_xname(sc->sc_dev)));
3025 				sc->sc_rxdiscard = 0;
3026 			}
3027 			continue;
3028 		}
3029 
3030 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3031 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
3032 
3033 		m = rxs->rxs_mbuf;
3034 
3035 		/*
3036 		 * Add a new receive buffer to the ring, unless of
3037 		 * course the length is zero. Treat the latter as a
3038 		 * failed mapping.
3039 		 */
3040 		if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
3041 			/*
3042 			 * Failed, throw away what we've done so
3043 			 * far, and discard the rest of the packet.
3044 			 */
3045 			ifp->if_ierrors++;
3046 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3047 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
3048 			WM_INIT_RXDESC(sc, i);
3049 			if ((status & WRX_ST_EOP) == 0)
3050 				sc->sc_rxdiscard = 1;
3051 			if (sc->sc_rxhead != NULL)
3052 				m_freem(sc->sc_rxhead);
3053 			WM_RXCHAIN_RESET(sc);
3054 			DPRINTF(WM_DEBUG_RX,
3055 			    ("%s: RX: Rx buffer allocation failed, "
3056 			    "dropping packet%s\n", device_xname(sc->sc_dev),
3057 			    sc->sc_rxdiscard ? " (discard)" : ""));
3058 			continue;
3059 		}
3060 
3061 		m->m_len = len;
3062 		sc->sc_rxlen += len;
3063 		DPRINTF(WM_DEBUG_RX,
3064 		    ("%s: RX: buffer at %p len %d\n",
3065 		    device_xname(sc->sc_dev), m->m_data, len));
3066 
3067 		/*
3068 		 * If this is not the end of the packet, keep
3069 		 * looking.
3070 		 */
3071 		if ((status & WRX_ST_EOP) == 0) {
3072 			WM_RXCHAIN_LINK(sc, m);
3073 			DPRINTF(WM_DEBUG_RX,
3074 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
3075 			    device_xname(sc->sc_dev), sc->sc_rxlen));
3076 			continue;
3077 		}
3078 
3079 		/*
3080 		 * Okay, we have the entire packet now.  The chip is
3081 		 * configured to include the FCS (not all chips can
3082 		 * be configured to strip it), so we need to trim it.
3083 		 * May need to adjust length of previous mbuf in the
3084 		 * chain if the current mbuf is too short.
3085 		 */
3086 		if (m->m_len < ETHER_CRC_LEN) {
3087 			sc->sc_rxtail->m_len -= (ETHER_CRC_LEN - m->m_len);
3088 			m->m_len = 0;
3089 		} else {
3090 			m->m_len -= ETHER_CRC_LEN;
3091 		}
3092 		len = sc->sc_rxlen - ETHER_CRC_LEN;
3093 
3094 		WM_RXCHAIN_LINK(sc, m);
3095 
3096 		*sc->sc_rxtailp = NULL;
3097 		m = sc->sc_rxhead;
3098 
3099 		WM_RXCHAIN_RESET(sc);
3100 
3101 		DPRINTF(WM_DEBUG_RX,
3102 		    ("%s: RX: have entire packet, len -> %d\n",
3103 		    device_xname(sc->sc_dev), len));
3104 
3105 		/*
3106 		 * If an error occurred, update stats and drop the packet.
3107 		 */
3108 		if (errors &
3109 		     (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
3110 			if (errors & WRX_ER_SE)
3111 				log(LOG_WARNING, "%s: symbol error\n",
3112 				    device_xname(sc->sc_dev));
3113 			else if (errors & WRX_ER_SEQ)
3114 				log(LOG_WARNING, "%s: receive sequence error\n",
3115 				    device_xname(sc->sc_dev));
3116 			else if (errors & WRX_ER_CE)
3117 				log(LOG_WARNING, "%s: CRC error\n",
3118 				    device_xname(sc->sc_dev));
3119 			m_freem(m);
3120 			continue;
3121 		}
3122 
3123 		/*
3124 		 * No errors.  Receive the packet.
3125 		 */
3126 		m->m_pkthdr.rcvif = ifp;
3127 		m->m_pkthdr.len = len;
3128 
3129 		/*
3130 		 * If VLANs are enabled, VLAN packets have been unwrapped
3131 		 * for us.  Associate the tag with the packet.
3132 		 */
3133 		if ((status & WRX_ST_VP) != 0) {
3134 			VLAN_INPUT_TAG(ifp, m,
3135 			    le16toh(vlantag),
3136 			    continue);
3137 		}
3138 
3139 		/*
3140 		 * Set up checksum info for this packet.
3141 		 */
3142 		if ((status & WRX_ST_IXSM) == 0) {
3143 			if (status & WRX_ST_IPCS) {
3144 				WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
3145 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
3146 				if (errors & WRX_ER_IPE)
3147 					m->m_pkthdr.csum_flags |=
3148 					    M_CSUM_IPv4_BAD;
3149 			}
3150 			if (status & WRX_ST_TCPCS) {
3151 				/*
3152 				 * Note: we don't know if this was TCP or UDP,
3153 				 * so we just set both bits, and expect the
3154 				 * upper layers to deal.
3155 				 */
3156 				WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
3157 				m->m_pkthdr.csum_flags |=
3158 				    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
3159 				    M_CSUM_TCPv6 | M_CSUM_UDPv6;
3160 				if (errors & WRX_ER_TCPE)
3161 					m->m_pkthdr.csum_flags |=
3162 					    M_CSUM_TCP_UDP_BAD;
3163 			}
3164 		}
3165 
3166 		ifp->if_ipackets++;
3167 
3168 		/* Pass this up to any BPF listeners. */
3169 		bpf_mtap(ifp, m);
3170 
3171 		/* Pass it on. */
3172 		(*ifp->if_input)(ifp, m);
3173 	}
3174 
3175 	/* Update the receive pointer. */
3176 	sc->sc_rxptr = i;
3177 
3178 	DPRINTF(WM_DEBUG_RX,
3179 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
3180 }
3181 
3182 /*
3183  * wm_linkintr_gmii:
3184  *
3185  *	Helper; handle link interrupts for GMII.
3186  */
3187 static void
3188 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
3189 {
3190 
3191 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
3192 		__func__));
3193 
3194 	if (icr & ICR_LSC) {
3195 		DPRINTF(WM_DEBUG_LINK,
3196 		    ("%s: LINK: LSC -> mii_tick\n",
3197 			device_xname(sc->sc_dev)));
3198 		mii_tick(&sc->sc_mii);
3199 		if (sc->sc_type == WM_T_82543) {
3200 			int miistatus, active;
3201 
3202 			/*
3203 			 * With 82543, we need to force speed and
3204 			 * duplex on the MAC equal to what the PHY
3205 			 * speed and duplex configuration is.
3206 			 */
3207 			miistatus = sc->sc_mii.mii_media_status;
3208 
3209 			if (miistatus & IFM_ACTIVE) {
3210 				active = sc->sc_mii.mii_media_active;
3211 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
3212 				switch (IFM_SUBTYPE(active)) {
3213 				case IFM_10_T:
3214 					sc->sc_ctrl |= CTRL_SPEED_10;
3215 					break;
3216 				case IFM_100_TX:
3217 					sc->sc_ctrl |= CTRL_SPEED_100;
3218 					break;
3219 				case IFM_1000_T:
3220 					sc->sc_ctrl |= CTRL_SPEED_1000;
3221 					break;
3222 				default:
3223 					/*
3224 					 * fiber?
3225 					 * Shoud not enter here.
3226 					 */
3227 					printf("unknown media (%x)\n",
3228 					    active);
3229 					break;
3230 				}
3231 				if (active & IFM_FDX)
3232 					sc->sc_ctrl |= CTRL_FD;
3233 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3234 			}
3235 		} else if ((sc->sc_type == WM_T_ICH8)
3236 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
3237 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
3238 		} else if (sc->sc_type == WM_T_PCH) {
3239 			wm_k1_gig_workaround_hv(sc,
3240 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
3241 		}
3242 
3243 		if ((sc->sc_phytype == WMPHY_82578)
3244 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
3245 			== IFM_1000_T)) {
3246 
3247 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
3248 				delay(200*1000); /* XXX too big */
3249 
3250 				/* Link stall fix for link up */
3251 				wm_gmii_hv_writereg(sc->sc_dev, 1,
3252 				    HV_MUX_DATA_CTRL,
3253 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
3254 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
3255 				wm_gmii_hv_writereg(sc->sc_dev, 1,
3256 				    HV_MUX_DATA_CTRL,
3257 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
3258 			}
3259 		}
3260 	} else if (icr & ICR_RXSEQ) {
3261 		DPRINTF(WM_DEBUG_LINK,
3262 		    ("%s: LINK Receive sequence error\n",
3263 			device_xname(sc->sc_dev)));
3264 	}
3265 }
3266 
3267 /*
3268  * wm_linkintr_tbi:
3269  *
3270  *	Helper; handle link interrupts for TBI mode.
3271  */
3272 static void
3273 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
3274 {
3275 	uint32_t status;
3276 
3277 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
3278 		__func__));
3279 
3280 	status = CSR_READ(sc, WMREG_STATUS);
3281 	if (icr & ICR_LSC) {
3282 		if (status & STATUS_LU) {
3283 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
3284 			    device_xname(sc->sc_dev),
3285 			    (status & STATUS_FD) ? "FDX" : "HDX"));
3286 			/*
3287 			 * NOTE: CTRL will update TFCE and RFCE automatically,
3288 			 * so we should update sc->sc_ctrl
3289 			 */
3290 
3291 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3292 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
3293 			sc->sc_fcrtl &= ~FCRTL_XONE;
3294 			if (status & STATUS_FD)
3295 				sc->sc_tctl |=
3296 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3297 			else
3298 				sc->sc_tctl |=
3299 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
3300 			if (sc->sc_ctrl & CTRL_TFCE)
3301 				sc->sc_fcrtl |= FCRTL_XONE;
3302 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3303 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
3304 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
3305 				      sc->sc_fcrtl);
3306 			sc->sc_tbi_linkup = 1;
3307 		} else {
3308 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
3309 			    device_xname(sc->sc_dev)));
3310 			sc->sc_tbi_linkup = 0;
3311 		}
3312 		wm_tbi_set_linkled(sc);
3313 	} else if (icr & ICR_RXCFG) {
3314 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
3315 		    device_xname(sc->sc_dev)));
3316 		sc->sc_tbi_nrxcfg++;
3317 		wm_check_for_link(sc);
3318 	} else if (icr & ICR_RXSEQ) {
3319 		DPRINTF(WM_DEBUG_LINK,
3320 		    ("%s: LINK: Receive sequence error\n",
3321 		    device_xname(sc->sc_dev)));
3322 	}
3323 }
3324 
3325 /*
3326  * wm_linkintr:
3327  *
3328  *	Helper; handle link interrupts.
3329  */
3330 static void
3331 wm_linkintr(struct wm_softc *sc, uint32_t icr)
3332 {
3333 
3334 	if (sc->sc_flags & WM_F_HAS_MII)
3335 		wm_linkintr_gmii(sc, icr);
3336 	else
3337 		wm_linkintr_tbi(sc, icr);
3338 }
3339 
3340 /*
3341  * wm_tick:
3342  *
3343  *	One second timer, used to check link status, sweep up
3344  *	completed transmit jobs, etc.
3345  */
3346 static void
3347 wm_tick(void *arg)
3348 {
3349 	struct wm_softc *sc = arg;
3350 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3351 	int s;
3352 
3353 	s = splnet();
3354 
3355 	if (sc->sc_type >= WM_T_82542_2_1) {
3356 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
3357 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
3358 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
3359 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
3360 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
3361 	}
3362 
3363 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3364 	ifp->if_ierrors += 0ULL + /* ensure quad_t */
3365 	    + CSR_READ(sc, WMREG_CRCERRS)
3366 	    + CSR_READ(sc, WMREG_ALGNERRC)
3367 	    + CSR_READ(sc, WMREG_SYMERRC)
3368 	    + CSR_READ(sc, WMREG_RXERRC)
3369 	    + CSR_READ(sc, WMREG_SEC)
3370 	    + CSR_READ(sc, WMREG_CEXTERR)
3371 	    + CSR_READ(sc, WMREG_RLEC);
3372 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
3373 
3374 	if (sc->sc_flags & WM_F_HAS_MII)
3375 		mii_tick(&sc->sc_mii);
3376 	else
3377 		wm_tbi_check_link(sc);
3378 
3379 	splx(s);
3380 
3381 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
3382 }
3383 
3384 /*
3385  * wm_reset:
3386  *
3387  *	Reset the i82542 chip.
3388  */
3389 static void
3390 wm_reset(struct wm_softc *sc)
3391 {
3392 	int phy_reset = 0;
3393 	uint32_t reg, mask;
3394 	int i;
3395 
3396 	/*
3397 	 * Allocate on-chip memory according to the MTU size.
3398 	 * The Packet Buffer Allocation register must be written
3399 	 * before the chip is reset.
3400 	 */
3401 	switch (sc->sc_type) {
3402 	case WM_T_82547:
3403 	case WM_T_82547_2:
3404 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3405 		    PBA_22K : PBA_30K;
3406 		sc->sc_txfifo_head = 0;
3407 		sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
3408 		sc->sc_txfifo_size =
3409 		    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
3410 		sc->sc_txfifo_stall = 0;
3411 		break;
3412 	case WM_T_82571:
3413 	case WM_T_82572:
3414 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
3415 	case WM_T_80003:
3416 		sc->sc_pba = PBA_32K;
3417 		break;
3418 	case WM_T_82580:
3419 	case WM_T_82580ER:
3420 		sc->sc_pba = PBA_35K;
3421 		break;
3422 	case WM_T_82576:
3423 		sc->sc_pba = PBA_64K;
3424 		break;
3425 	case WM_T_82573:
3426 		sc->sc_pba = PBA_12K;
3427 		break;
3428 	case WM_T_82574:
3429 	case WM_T_82583:
3430 		sc->sc_pba = PBA_20K;
3431 		break;
3432 	case WM_T_ICH8:
3433 		sc->sc_pba = PBA_8K;
3434 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
3435 		break;
3436 	case WM_T_ICH9:
3437 	case WM_T_ICH10:
3438 	case WM_T_PCH:
3439 		sc->sc_pba = PBA_10K;
3440 		break;
3441 	default:
3442 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3443 		    PBA_40K : PBA_48K;
3444 		break;
3445 	}
3446 	CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
3447 
3448 	/* Prevent the PCI-E bus from sticking */
3449 	if (sc->sc_flags & WM_F_PCIE) {
3450 		int timeout = 800;
3451 
3452 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
3453 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3454 
3455 		while (timeout--) {
3456 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA) == 0)
3457 				break;
3458 			delay(100);
3459 		}
3460 	}
3461 
3462 	/* Set the completion timeout for interface */
3463 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
3464 		wm_set_pcie_completion_timeout(sc);
3465 
3466 	/* Clear interrupt */
3467 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3468 
3469 	/* Stop the transmit and receive processes. */
3470 	CSR_WRITE(sc, WMREG_RCTL, 0);
3471 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
3472 	sc->sc_rctl &= ~RCTL_EN;
3473 
3474 	/* XXX set_tbi_sbp_82543() */
3475 
3476 	delay(10*1000);
3477 
3478 	/* Must acquire the MDIO ownership before MAC reset */
3479 	switch (sc->sc_type) {
3480 	case WM_T_82573:
3481 	case WM_T_82574:
3482 	case WM_T_82583:
3483 		i = 0;
3484 		reg = CSR_READ(sc, WMREG_EXTCNFCTR)
3485 		    | EXTCNFCTR_MDIO_SW_OWNERSHIP;
3486 		do {
3487 			CSR_WRITE(sc, WMREG_EXTCNFCTR,
3488 			    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
3489 			reg = CSR_READ(sc, WMREG_EXTCNFCTR);
3490 			if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
3491 				break;
3492 			reg |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
3493 			delay(2*1000);
3494 			i++;
3495 		} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
3496 		break;
3497 	default:
3498 		break;
3499 	}
3500 
3501 	/*
3502 	 * 82541 Errata 29? & 82547 Errata 28?
3503 	 * See also the description about PHY_RST bit in CTRL register
3504 	 * in 8254x_GBe_SDM.pdf.
3505 	 */
3506 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
3507 		CSR_WRITE(sc, WMREG_CTRL,
3508 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
3509 		delay(5000);
3510 	}
3511 
3512 	switch (sc->sc_type) {
3513 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
3514 	case WM_T_82541:
3515 	case WM_T_82541_2:
3516 	case WM_T_82547:
3517 	case WM_T_82547_2:
3518 		/*
3519 		 * On some chipsets, a reset through a memory-mapped write
3520 		 * cycle can cause the chip to reset before completing the
3521 		 * write cycle.  This causes major headache that can be
3522 		 * avoided by issuing the reset via indirect register writes
3523 		 * through I/O space.
3524 		 *
3525 		 * So, if we successfully mapped the I/O BAR at attach time,
3526 		 * use that.  Otherwise, try our luck with a memory-mapped
3527 		 * reset.
3528 		 */
3529 		if (sc->sc_flags & WM_F_IOH_VALID)
3530 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
3531 		else
3532 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
3533 		break;
3534 	case WM_T_82545_3:
3535 	case WM_T_82546_3:
3536 		/* Use the shadow control register on these chips. */
3537 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
3538 		break;
3539 	case WM_T_80003:
3540 		mask = swfwphysem[sc->sc_funcid];
3541 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3542 		wm_get_swfw_semaphore(sc, mask);
3543 		CSR_WRITE(sc, WMREG_CTRL, reg);
3544 		wm_put_swfw_semaphore(sc, mask);
3545 		break;
3546 	case WM_T_ICH8:
3547 	case WM_T_ICH9:
3548 	case WM_T_ICH10:
3549 	case WM_T_PCH:
3550 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3551 		if (wm_check_reset_block(sc) == 0) {
3552 			if (sc->sc_type >= WM_T_PCH) {
3553 				uint32_t status;
3554 
3555 				status = CSR_READ(sc, WMREG_STATUS);
3556 				CSR_WRITE(sc, WMREG_STATUS,
3557 				    status & ~STATUS_PHYRA);
3558 			}
3559 
3560 			reg |= CTRL_PHY_RESET;
3561 			phy_reset = 1;
3562 		}
3563 		wm_get_swfwhw_semaphore(sc);
3564 		CSR_WRITE(sc, WMREG_CTRL, reg);
3565 		delay(20*1000);
3566 		wm_put_swfwhw_semaphore(sc);
3567 		break;
3568 	case WM_T_82542_2_0:
3569 	case WM_T_82542_2_1:
3570 	case WM_T_82543:
3571 	case WM_T_82540:
3572 	case WM_T_82545:
3573 	case WM_T_82546:
3574 	case WM_T_82571:
3575 	case WM_T_82572:
3576 	case WM_T_82573:
3577 	case WM_T_82574:
3578 	case WM_T_82575:
3579 	case WM_T_82576:
3580 	case WM_T_82580:
3581 	case WM_T_82580ER:
3582 	case WM_T_82583:
3583 	default:
3584 		/* Everything else can safely use the documented method. */
3585 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3586 		break;
3587 	}
3588 
3589 	if (phy_reset != 0)
3590 		wm_get_cfg_done(sc);
3591 
3592 	/* reload EEPROM */
3593 	switch (sc->sc_type) {
3594 	case WM_T_82542_2_0:
3595 	case WM_T_82542_2_1:
3596 	case WM_T_82543:
3597 	case WM_T_82544:
3598 		delay(10);
3599 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3600 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3601 		delay(2000);
3602 		break;
3603 	case WM_T_82540:
3604 	case WM_T_82545:
3605 	case WM_T_82545_3:
3606 	case WM_T_82546:
3607 	case WM_T_82546_3:
3608 		delay(5*1000);
3609 		/* XXX Disable HW ARPs on ASF enabled adapters */
3610 		break;
3611 	case WM_T_82541:
3612 	case WM_T_82541_2:
3613 	case WM_T_82547:
3614 	case WM_T_82547_2:
3615 		delay(20000);
3616 		/* XXX Disable HW ARPs on ASF enabled adapters */
3617 		break;
3618 	case WM_T_82571:
3619 	case WM_T_82572:
3620 	case WM_T_82573:
3621 	case WM_T_82574:
3622 	case WM_T_82583:
3623 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
3624 			delay(10);
3625 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3626 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3627 		}
3628 		/* check EECD_EE_AUTORD */
3629 		wm_get_auto_rd_done(sc);
3630 		/*
3631 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
3632 		 * is set.
3633 		 */
3634 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
3635 		    || (sc->sc_type == WM_T_82583))
3636 			delay(25*1000);
3637 		break;
3638 	case WM_T_82575:
3639 	case WM_T_82576:
3640 	case WM_T_82580:
3641 	case WM_T_82580ER:
3642 	case WM_T_80003:
3643 	case WM_T_ICH8:
3644 	case WM_T_ICH9:
3645 		/* check EECD_EE_AUTORD */
3646 		wm_get_auto_rd_done(sc);
3647 		break;
3648 	case WM_T_ICH10:
3649 	case WM_T_PCH:
3650 		wm_lan_init_done(sc);
3651 		break;
3652 	default:
3653 		panic("%s: unknown type\n", __func__);
3654 	}
3655 
3656 	/* Check whether EEPROM is present or not */
3657 	switch (sc->sc_type) {
3658 	case WM_T_82575:
3659 	case WM_T_82576:
3660 #if 0 /* XXX */
3661 	case WM_T_82580:
3662 	case WM_T_82580ER:
3663 #endif
3664 	case WM_T_ICH8:
3665 	case WM_T_ICH9:
3666 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
3667 			/* Not found */
3668 			sc->sc_flags |= WM_F_EEPROM_INVALID;
3669 			if ((sc->sc_type == WM_T_82575)
3670 			    || (sc->sc_type == WM_T_82576)
3671 			    || (sc->sc_type == WM_T_82580)
3672 			    || (sc->sc_type == WM_T_82580ER))
3673 				wm_reset_init_script_82575(sc);
3674 		}
3675 		break;
3676 	default:
3677 		break;
3678 	}
3679 
3680 	if ((sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)) {
3681 		/* clear global device reset status bit */
3682 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
3683 	}
3684 
3685 	/* Clear any pending interrupt events. */
3686 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3687 	reg = CSR_READ(sc, WMREG_ICR);
3688 
3689 	/* reload sc_ctrl */
3690 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3691 
3692 	/* dummy read from WUC */
3693 	if (sc->sc_type == WM_T_PCH)
3694 		reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
3695 	/*
3696 	 * For PCH, this write will make sure that any noise will be detected
3697 	 * as a CRC error and be dropped rather than show up as a bad packet
3698 	 * to the DMA engine
3699 	 */
3700 	if (sc->sc_type == WM_T_PCH)
3701 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
3702 
3703 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
3704 		CSR_WRITE(sc, WMREG_WUC, 0);
3705 
3706 	/* XXX need special handling for 82580 */
3707 }
3708 
3709 static void
3710 wm_set_vlan(struct wm_softc *sc)
3711 {
3712 	/* Deal with VLAN enables. */
3713 	if (VLAN_ATTACHED(&sc->sc_ethercom))
3714 		sc->sc_ctrl |= CTRL_VME;
3715 	else
3716 		sc->sc_ctrl &= ~CTRL_VME;
3717 
3718 	/* Write the control registers. */
3719 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3720 }
3721 
3722 /*
3723  * wm_init:		[ifnet interface function]
3724  *
3725  *	Initialize the interface.  Must be called at splnet().
3726  */
3727 static int
3728 wm_init(struct ifnet *ifp)
3729 {
3730 	struct wm_softc *sc = ifp->if_softc;
3731 	struct wm_rxsoft *rxs;
3732 	int i, error = 0;
3733 	uint32_t reg;
3734 
3735 	/*
3736 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
3737 	 * There is a small but measurable benefit to avoiding the adjusment
3738 	 * of the descriptor so that the headers are aligned, for normal mtu,
3739 	 * on such platforms.  One possibility is that the DMA itself is
3740 	 * slightly more efficient if the front of the entire packet (instead
3741 	 * of the front of the headers) is aligned.
3742 	 *
3743 	 * Note we must always set align_tweak to 0 if we are using
3744 	 * jumbo frames.
3745 	 */
3746 #ifdef __NO_STRICT_ALIGNMENT
3747 	sc->sc_align_tweak = 0;
3748 #else
3749 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
3750 		sc->sc_align_tweak = 0;
3751 	else
3752 		sc->sc_align_tweak = 2;
3753 #endif /* __NO_STRICT_ALIGNMENT */
3754 
3755 	/* Cancel any pending I/O. */
3756 	wm_stop(ifp, 0);
3757 
3758 	/* update statistics before reset */
3759 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3760 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
3761 
3762 	/* Reset the chip to a known state. */
3763 	wm_reset(sc);
3764 
3765 	switch (sc->sc_type) {
3766 	case WM_T_82571:
3767 	case WM_T_82572:
3768 	case WM_T_82573:
3769 	case WM_T_82574:
3770 	case WM_T_82583:
3771 	case WM_T_80003:
3772 	case WM_T_ICH8:
3773 	case WM_T_ICH9:
3774 	case WM_T_ICH10:
3775 	case WM_T_PCH:
3776 		if (wm_check_mng_mode(sc) != 0)
3777 			wm_get_hw_control(sc);
3778 		break;
3779 	default:
3780 		break;
3781 	}
3782 
3783 	/* Reset the PHY. */
3784 	if (sc->sc_flags & WM_F_HAS_MII)
3785 		wm_gmii_reset(sc);
3786 
3787 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
3788 	/* Enable PHY low-power state when MAC is at D3 w/o WoL */
3789 	if (sc->sc_type == WM_T_PCH)
3790 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_PHYPDEN);
3791 
3792 	/* Initialize the transmit descriptor ring. */
3793 	memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
3794 	WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
3795 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3796 	sc->sc_txfree = WM_NTXDESC(sc);
3797 	sc->sc_txnext = 0;
3798 
3799 	if (sc->sc_type < WM_T_82543) {
3800 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(sc, 0));
3801 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(sc, 0));
3802 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
3803 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
3804 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
3805 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
3806 	} else {
3807 		CSR_WRITE(sc, WMREG_TDBAH, WM_CDTXADDR_HI(sc, 0));
3808 		CSR_WRITE(sc, WMREG_TDBAL, WM_CDTXADDR_LO(sc, 0));
3809 		CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
3810 		CSR_WRITE(sc, WMREG_TDH, 0);
3811 		CSR_WRITE(sc, WMREG_TIDV, 375);		/* ITR / 4 */
3812 		CSR_WRITE(sc, WMREG_TADV, 375);		/* should be same */
3813 
3814 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
3815 			/*
3816 			 * Don't write TDT before TCTL.EN is set.
3817 			 * See the document.
3818 			 */
3819 			CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_QUEUE_ENABLE
3820 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
3821 			    | TXDCTL_WTHRESH(0));
3822 		else {
3823 			CSR_WRITE(sc, WMREG_TDT, 0);
3824 			CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
3825 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
3826 			CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
3827 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
3828 		}
3829 	}
3830 	CSR_WRITE(sc, WMREG_TQSA_LO, 0);
3831 	CSR_WRITE(sc, WMREG_TQSA_HI, 0);
3832 
3833 	/* Initialize the transmit job descriptors. */
3834 	for (i = 0; i < WM_TXQUEUELEN(sc); i++)
3835 		sc->sc_txsoft[i].txs_mbuf = NULL;
3836 	sc->sc_txsfree = WM_TXQUEUELEN(sc);
3837 	sc->sc_txsnext = 0;
3838 	sc->sc_txsdirty = 0;
3839 
3840 	/*
3841 	 * Initialize the receive descriptor and receive job
3842 	 * descriptor rings.
3843 	 */
3844 	if (sc->sc_type < WM_T_82543) {
3845 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
3846 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
3847 		CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
3848 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
3849 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
3850 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
3851 
3852 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
3853 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
3854 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
3855 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
3856 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
3857 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
3858 	} else {
3859 		CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
3860 		CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
3861 		CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
3862 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3863 			CSR_WRITE(sc, WMREG_EITR(0), 450);
3864 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
3865 				panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
3866 			CSR_WRITE(sc, WMREG_SRRCTL, SRRCTL_DESCTYPE_LEGACY
3867 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
3868 			CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_QUEUE_ENABLE
3869 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
3870 			    | RXDCTL_WTHRESH(1));
3871 		} else {
3872 			CSR_WRITE(sc, WMREG_RDH, 0);
3873 			CSR_WRITE(sc, WMREG_RDT, 0);
3874 			CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD);	/* ITR/4 */
3875 			CSR_WRITE(sc, WMREG_RADV, 375);		/* MUST be same */
3876 		}
3877 	}
3878 	for (i = 0; i < WM_NRXDESC; i++) {
3879 		rxs = &sc->sc_rxsoft[i];
3880 		if (rxs->rxs_mbuf == NULL) {
3881 			if ((error = wm_add_rxbuf(sc, i)) != 0) {
3882 				log(LOG_ERR, "%s: unable to allocate or map rx "
3883 				    "buffer %d, error = %d\n",
3884 				    device_xname(sc->sc_dev), i, error);
3885 				/*
3886 				 * XXX Should attempt to run with fewer receive
3887 				 * XXX buffers instead of just failing.
3888 				 */
3889 				wm_rxdrain(sc);
3890 				goto out;
3891 			}
3892 		} else {
3893 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
3894 				WM_INIT_RXDESC(sc, i);
3895 			/*
3896 			 * For 82575 and newer device, the RX descriptors
3897 			 * must be initialized after the setting of RCTL.EN in
3898 			 * wm_set_filter()
3899 			 */
3900 		}
3901 	}
3902 	sc->sc_rxptr = 0;
3903 	sc->sc_rxdiscard = 0;
3904 	WM_RXCHAIN_RESET(sc);
3905 
3906 	/*
3907 	 * Clear out the VLAN table -- we don't use it (yet).
3908 	 */
3909 	CSR_WRITE(sc, WMREG_VET, 0);
3910 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
3911 		CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
3912 
3913 	/*
3914 	 * Set up flow-control parameters.
3915 	 *
3916 	 * XXX Values could probably stand some tuning.
3917 	 */
3918 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
3919 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)) {
3920 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
3921 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
3922 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
3923 	}
3924 
3925 	sc->sc_fcrtl = FCRTL_DFLT;
3926 	if (sc->sc_type < WM_T_82543) {
3927 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
3928 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
3929 	} else {
3930 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
3931 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
3932 	}
3933 
3934 	if (sc->sc_type == WM_T_80003)
3935 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
3936 	else
3937 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
3938 
3939 	/* Writes the control register. */
3940 	wm_set_vlan(sc);
3941 
3942 	if (sc->sc_flags & WM_F_HAS_MII) {
3943 		int val;
3944 
3945 		switch (sc->sc_type) {
3946 		case WM_T_80003:
3947 		case WM_T_ICH8:
3948 		case WM_T_ICH9:
3949 		case WM_T_ICH10:
3950 		case WM_T_PCH:
3951 			/*
3952 			 * Set the mac to wait the maximum time between each
3953 			 * iteration and increase the max iterations when
3954 			 * polling the phy; this fixes erroneous timeouts at
3955 			 * 10Mbps.
3956 			 */
3957 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
3958 			    0xFFFF);
3959 			val = wm_kmrn_readreg(sc,
3960 			    KUMCTRLSTA_OFFSET_INB_PARAM);
3961 			val |= 0x3F;
3962 			wm_kmrn_writereg(sc,
3963 			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
3964 			break;
3965 		default:
3966 			break;
3967 		}
3968 
3969 		if (sc->sc_type == WM_T_80003) {
3970 			val = CSR_READ(sc, WMREG_CTRL_EXT);
3971 			val &= ~CTRL_EXT_LINK_MODE_MASK;
3972 			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
3973 
3974 			/* Bypass RX and TX FIFO's */
3975 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
3976 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
3977 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
3978 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
3979 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
3980 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
3981 		}
3982 	}
3983 #if 0
3984 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
3985 #endif
3986 
3987 	/*
3988 	 * Set up checksum offload parameters.
3989 	 */
3990 	reg = CSR_READ(sc, WMREG_RXCSUM);
3991 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
3992 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
3993 		reg |= RXCSUM_IPOFL;
3994 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
3995 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
3996 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
3997 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
3998 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
3999 
4000 	/* Reset TBI's RXCFG count */
4001 	sc->sc_tbi_nrxcfg = sc->sc_tbi_lastnrxcfg = 0;
4002 
4003 	/*
4004 	 * Set up the interrupt registers.
4005 	 */
4006 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4007 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
4008 	    ICR_RXO | ICR_RXT0;
4009 	if ((sc->sc_flags & WM_F_HAS_MII) == 0)
4010 		sc->sc_icr |= ICR_RXCFG;
4011 	CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
4012 
4013 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4014 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)) {
4015 		reg = CSR_READ(sc, WMREG_KABGTXD);
4016 		reg |= KABGTXD_BGSQLBIAS;
4017 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
4018 	}
4019 
4020 	/* Set up the inter-packet gap. */
4021 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
4022 
4023 	if (sc->sc_type >= WM_T_82543) {
4024 		/*
4025 		 * Set up the interrupt throttling register (units of 256ns)
4026 		 * Note that a footnote in Intel's documentation says this
4027 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
4028 		 * or 10Mbit mode.  Empirically, it appears to be the case
4029 		 * that that is also true for the 1024ns units of the other
4030 		 * interrupt-related timer registers -- so, really, we ought
4031 		 * to divide this value by 4 when the link speed is low.
4032 		 *
4033 		 * XXX implement this division at link speed change!
4034 		 */
4035 
4036 		 /*
4037 		  * For N interrupts/sec, set this value to:
4038 		  * 1000000000 / (N * 256).  Note that we set the
4039 		  * absolute and packet timer values to this value
4040 		  * divided by 4 to get "simple timer" behavior.
4041 		  */
4042 
4043 		sc->sc_itr = 1500;		/* 2604 ints/sec */
4044 		CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
4045 	}
4046 
4047 	/* Set the VLAN ethernetype. */
4048 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
4049 
4050 	/*
4051 	 * Set up the transmit control register; we start out with
4052 	 * a collision distance suitable for FDX, but update it whe
4053 	 * we resolve the media type.
4054 	 */
4055 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
4056 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
4057 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4058 	if (sc->sc_type >= WM_T_82571)
4059 		sc->sc_tctl |= TCTL_MULR;
4060 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4061 
4062 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4063 		/*
4064 		 * Write TDT after TCTL.EN is set.
4065 		 * See the document.
4066 		 */
4067 		CSR_WRITE(sc, WMREG_TDT, 0);
4068 	}
4069 
4070 	if (sc->sc_type == WM_T_80003) {
4071 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
4072 		reg &= ~TCTL_EXT_GCEX_MASK;
4073 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
4074 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
4075 	}
4076 
4077 	/* Set the media. */
4078 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
4079 		goto out;
4080 
4081 	/* Configure for OS presence */
4082 	wm_init_manageability(sc);
4083 
4084 	/*
4085 	 * Set up the receive control register; we actually program
4086 	 * the register when we set the receive filter.  Use multicast
4087 	 * address offset type 0.
4088 	 *
4089 	 * Only the i82544 has the ability to strip the incoming
4090 	 * CRC, so we don't enable that feature.
4091 	 */
4092 	sc->sc_mchash_type = 0;
4093 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
4094 	    | RCTL_MO(sc->sc_mchash_type);
4095 
4096 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
4097 	    && (ifp->if_mtu > ETHERMTU)) {
4098 		sc->sc_rctl |= RCTL_LPE;
4099 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4100 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
4101 	}
4102 
4103 	if (MCLBYTES == 2048) {
4104 		sc->sc_rctl |= RCTL_2k;
4105 	} else {
4106 		if (sc->sc_type >= WM_T_82543) {
4107 			switch (MCLBYTES) {
4108 			case 4096:
4109 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
4110 				break;
4111 			case 8192:
4112 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
4113 				break;
4114 			case 16384:
4115 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
4116 				break;
4117 			default:
4118 				panic("wm_init: MCLBYTES %d unsupported",
4119 				    MCLBYTES);
4120 				break;
4121 			}
4122 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
4123 	}
4124 
4125 	/* Set the receive filter. */
4126 	wm_set_filter(sc);
4127 
4128 	/* On 575 and later set RDT only if RX enabled */
4129 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4130 		for (i = 0; i < WM_NRXDESC; i++)
4131 			WM_INIT_RXDESC(sc, i);
4132 
4133 	/* Start the one second link check clock. */
4134 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
4135 
4136 	/* ...all done! */
4137 	ifp->if_flags |= IFF_RUNNING;
4138 	ifp->if_flags &= ~IFF_OACTIVE;
4139 
4140  out:
4141 	sc->sc_if_flags = ifp->if_flags;
4142 	if (error)
4143 		log(LOG_ERR, "%s: interface not running\n",
4144 		    device_xname(sc->sc_dev));
4145 	return error;
4146 }
4147 
4148 /*
4149  * wm_rxdrain:
4150  *
4151  *	Drain the receive queue.
4152  */
4153 static void
4154 wm_rxdrain(struct wm_softc *sc)
4155 {
4156 	struct wm_rxsoft *rxs;
4157 	int i;
4158 
4159 	for (i = 0; i < WM_NRXDESC; i++) {
4160 		rxs = &sc->sc_rxsoft[i];
4161 		if (rxs->rxs_mbuf != NULL) {
4162 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4163 			m_freem(rxs->rxs_mbuf);
4164 			rxs->rxs_mbuf = NULL;
4165 		}
4166 	}
4167 }
4168 
4169 /*
4170  * wm_stop:		[ifnet interface function]
4171  *
4172  *	Stop transmission on the interface.
4173  */
4174 static void
4175 wm_stop(struct ifnet *ifp, int disable)
4176 {
4177 	struct wm_softc *sc = ifp->if_softc;
4178 	struct wm_txsoft *txs;
4179 	int i;
4180 
4181 	/* Stop the one second clock. */
4182 	callout_stop(&sc->sc_tick_ch);
4183 
4184 	/* Stop the 82547 Tx FIFO stall check timer. */
4185 	if (sc->sc_type == WM_T_82547)
4186 		callout_stop(&sc->sc_txfifo_ch);
4187 
4188 	if (sc->sc_flags & WM_F_HAS_MII) {
4189 		/* Down the MII. */
4190 		mii_down(&sc->sc_mii);
4191 	} else {
4192 #if 0
4193 		/* Should we clear PHY's status properly? */
4194 		wm_reset(sc);
4195 #endif
4196 	}
4197 
4198 	/* Stop the transmit and receive processes. */
4199 	CSR_WRITE(sc, WMREG_TCTL, 0);
4200 	CSR_WRITE(sc, WMREG_RCTL, 0);
4201 	sc->sc_rctl &= ~RCTL_EN;
4202 
4203 	/*
4204 	 * Clear the interrupt mask to ensure the device cannot assert its
4205 	 * interrupt line.
4206 	 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
4207 	 * any currently pending or shared interrupt.
4208 	 */
4209 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4210 	sc->sc_icr = 0;
4211 
4212 	/* Release any queued transmit buffers. */
4213 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
4214 		txs = &sc->sc_txsoft[i];
4215 		if (txs->txs_mbuf != NULL) {
4216 			bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
4217 			m_freem(txs->txs_mbuf);
4218 			txs->txs_mbuf = NULL;
4219 		}
4220 	}
4221 
4222 	/* Mark the interface as down and cancel the watchdog timer. */
4223 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
4224 	ifp->if_timer = 0;
4225 
4226 	if (disable)
4227 		wm_rxdrain(sc);
4228 
4229 #if 0 /* notyet */
4230 	if (sc->sc_type >= WM_T_82544)
4231 		CSR_WRITE(sc, WMREG_WUC, 0);
4232 #endif
4233 }
4234 
4235 void
4236 wm_get_auto_rd_done(struct wm_softc *sc)
4237 {
4238 	int i;
4239 
4240 	/* wait for eeprom to reload */
4241 	switch (sc->sc_type) {
4242 	case WM_T_82571:
4243 	case WM_T_82572:
4244 	case WM_T_82573:
4245 	case WM_T_82574:
4246 	case WM_T_82583:
4247 	case WM_T_82575:
4248 	case WM_T_82576:
4249 	case WM_T_82580:
4250 	case WM_T_82580ER:
4251 	case WM_T_80003:
4252 	case WM_T_ICH8:
4253 	case WM_T_ICH9:
4254 		for (i = 0; i < 10; i++) {
4255 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
4256 				break;
4257 			delay(1000);
4258 		}
4259 		if (i == 10) {
4260 			log(LOG_ERR, "%s: auto read from eeprom failed to "
4261 			    "complete\n", device_xname(sc->sc_dev));
4262 		}
4263 		break;
4264 	default:
4265 		break;
4266 	}
4267 }
4268 
4269 void
4270 wm_lan_init_done(struct wm_softc *sc)
4271 {
4272 	uint32_t reg = 0;
4273 	int i;
4274 
4275 	/* wait for eeprom to reload */
4276 	switch (sc->sc_type) {
4277 	case WM_T_ICH10:
4278 	case WM_T_PCH:
4279 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
4280 			reg = CSR_READ(sc, WMREG_STATUS);
4281 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
4282 				break;
4283 			delay(100);
4284 		}
4285 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
4286 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
4287 			    "complete\n", device_xname(sc->sc_dev), __func__);
4288 		}
4289 		break;
4290 	default:
4291 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4292 		    __func__);
4293 		break;
4294 	}
4295 
4296 	reg &= ~STATUS_LAN_INIT_DONE;
4297 	CSR_WRITE(sc, WMREG_STATUS, reg);
4298 }
4299 
4300 void
4301 wm_get_cfg_done(struct wm_softc *sc)
4302 {
4303 	int mask;
4304 	uint32_t reg;
4305 	int i;
4306 
4307 	/* wait for eeprom to reload */
4308 	switch (sc->sc_type) {
4309 	case WM_T_82542_2_0:
4310 	case WM_T_82542_2_1:
4311 		/* null */
4312 		break;
4313 	case WM_T_82543:
4314 	case WM_T_82544:
4315 	case WM_T_82540:
4316 	case WM_T_82545:
4317 	case WM_T_82545_3:
4318 	case WM_T_82546:
4319 	case WM_T_82546_3:
4320 	case WM_T_82541:
4321 	case WM_T_82541_2:
4322 	case WM_T_82547:
4323 	case WM_T_82547_2:
4324 	case WM_T_82573:
4325 	case WM_T_82574:
4326 	case WM_T_82583:
4327 		/* generic */
4328 		delay(10*1000);
4329 		break;
4330 	case WM_T_80003:
4331 	case WM_T_82571:
4332 	case WM_T_82572:
4333 	case WM_T_82575:
4334 	case WM_T_82576:
4335 	case WM_T_82580:
4336 	case WM_T_82580ER:
4337 		if (sc->sc_type == WM_T_82571) {
4338 			/* Only 82571 shares port 0 */
4339 			mask = EEMNGCTL_CFGDONE_0;
4340 		} else
4341 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
4342 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
4343 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
4344 				break;
4345 			delay(1000);
4346 		}
4347 		if (i >= WM_PHY_CFG_TIMEOUT) {
4348 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
4349 				device_xname(sc->sc_dev), __func__));
4350 		}
4351 		break;
4352 	case WM_T_ICH8:
4353 	case WM_T_ICH9:
4354 	case WM_T_ICH10:
4355 	case WM_T_PCH:
4356 		if (sc->sc_type >= WM_T_PCH) {
4357 			reg = CSR_READ(sc, WMREG_STATUS);
4358 			if ((reg & STATUS_PHYRA) != 0)
4359 				CSR_WRITE(sc, WMREG_STATUS,
4360 				    reg & ~STATUS_PHYRA);
4361 		}
4362 		delay(10*1000);
4363 		break;
4364 	default:
4365 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4366 		    __func__);
4367 		break;
4368 	}
4369 }
4370 
4371 /*
4372  * wm_acquire_eeprom:
4373  *
4374  *	Perform the EEPROM handshake required on some chips.
4375  */
4376 static int
4377 wm_acquire_eeprom(struct wm_softc *sc)
4378 {
4379 	uint32_t reg;
4380 	int x;
4381 	int ret = 0;
4382 
4383 	/* always success */
4384 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
4385 		return 0;
4386 
4387 	if (sc->sc_flags & WM_F_SWFWHW_SYNC) {
4388 		ret = wm_get_swfwhw_semaphore(sc);
4389 	} else if (sc->sc_flags & WM_F_SWFW_SYNC) {
4390 		/* this will also do wm_get_swsm_semaphore() if needed */
4391 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
4392 	} else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
4393 		ret = wm_get_swsm_semaphore(sc);
4394 	}
4395 
4396 	if (ret) {
4397 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
4398 			__func__);
4399 		return 1;
4400 	}
4401 
4402 	if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
4403 		reg = CSR_READ(sc, WMREG_EECD);
4404 
4405 		/* Request EEPROM access. */
4406 		reg |= EECD_EE_REQ;
4407 		CSR_WRITE(sc, WMREG_EECD, reg);
4408 
4409 		/* ..and wait for it to be granted. */
4410 		for (x = 0; x < 1000; x++) {
4411 			reg = CSR_READ(sc, WMREG_EECD);
4412 			if (reg & EECD_EE_GNT)
4413 				break;
4414 			delay(5);
4415 		}
4416 		if ((reg & EECD_EE_GNT) == 0) {
4417 			aprint_error_dev(sc->sc_dev,
4418 			    "could not acquire EEPROM GNT\n");
4419 			reg &= ~EECD_EE_REQ;
4420 			CSR_WRITE(sc, WMREG_EECD, reg);
4421 			if (sc->sc_flags & WM_F_SWFWHW_SYNC)
4422 				wm_put_swfwhw_semaphore(sc);
4423 			if (sc->sc_flags & WM_F_SWFW_SYNC)
4424 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
4425 			else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
4426 				wm_put_swsm_semaphore(sc);
4427 			return 1;
4428 		}
4429 	}
4430 
4431 	return 0;
4432 }
4433 
4434 /*
4435  * wm_release_eeprom:
4436  *
4437  *	Release the EEPROM mutex.
4438  */
4439 static void
4440 wm_release_eeprom(struct wm_softc *sc)
4441 {
4442 	uint32_t reg;
4443 
4444 	/* always success */
4445 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
4446 		return;
4447 
4448 	if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
4449 		reg = CSR_READ(sc, WMREG_EECD);
4450 		reg &= ~EECD_EE_REQ;
4451 		CSR_WRITE(sc, WMREG_EECD, reg);
4452 	}
4453 
4454 	if (sc->sc_flags & WM_F_SWFWHW_SYNC)
4455 		wm_put_swfwhw_semaphore(sc);
4456 	if (sc->sc_flags & WM_F_SWFW_SYNC)
4457 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
4458 	else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
4459 		wm_put_swsm_semaphore(sc);
4460 }
4461 
4462 /*
4463  * wm_eeprom_sendbits:
4464  *
4465  *	Send a series of bits to the EEPROM.
4466  */
4467 static void
4468 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
4469 {
4470 	uint32_t reg;
4471 	int x;
4472 
4473 	reg = CSR_READ(sc, WMREG_EECD);
4474 
4475 	for (x = nbits; x > 0; x--) {
4476 		if (bits & (1U << (x - 1)))
4477 			reg |= EECD_DI;
4478 		else
4479 			reg &= ~EECD_DI;
4480 		CSR_WRITE(sc, WMREG_EECD, reg);
4481 		delay(2);
4482 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
4483 		delay(2);
4484 		CSR_WRITE(sc, WMREG_EECD, reg);
4485 		delay(2);
4486 	}
4487 }
4488 
4489 /*
4490  * wm_eeprom_recvbits:
4491  *
4492  *	Receive a series of bits from the EEPROM.
4493  */
4494 static void
4495 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
4496 {
4497 	uint32_t reg, val;
4498 	int x;
4499 
4500 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
4501 
4502 	val = 0;
4503 	for (x = nbits; x > 0; x--) {
4504 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
4505 		delay(2);
4506 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
4507 			val |= (1U << (x - 1));
4508 		CSR_WRITE(sc, WMREG_EECD, reg);
4509 		delay(2);
4510 	}
4511 	*valp = val;
4512 }
4513 
4514 /*
4515  * wm_read_eeprom_uwire:
4516  *
4517  *	Read a word from the EEPROM using the MicroWire protocol.
4518  */
4519 static int
4520 wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
4521 {
4522 	uint32_t reg, val;
4523 	int i;
4524 
4525 	for (i = 0; i < wordcnt; i++) {
4526 		/* Clear SK and DI. */
4527 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
4528 		CSR_WRITE(sc, WMREG_EECD, reg);
4529 
4530 		/* Set CHIP SELECT. */
4531 		reg |= EECD_CS;
4532 		CSR_WRITE(sc, WMREG_EECD, reg);
4533 		delay(2);
4534 
4535 		/* Shift in the READ command. */
4536 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
4537 
4538 		/* Shift in address. */
4539 		wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits);
4540 
4541 		/* Shift out the data. */
4542 		wm_eeprom_recvbits(sc, &val, 16);
4543 		data[i] = val & 0xffff;
4544 
4545 		/* Clear CHIP SELECT. */
4546 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
4547 		CSR_WRITE(sc, WMREG_EECD, reg);
4548 		delay(2);
4549 	}
4550 
4551 	return 0;
4552 }
4553 
4554 /*
4555  * wm_spi_eeprom_ready:
4556  *
4557  *	Wait for a SPI EEPROM to be ready for commands.
4558  */
4559 static int
4560 wm_spi_eeprom_ready(struct wm_softc *sc)
4561 {
4562 	uint32_t val;
4563 	int usec;
4564 
4565 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
4566 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
4567 		wm_eeprom_recvbits(sc, &val, 8);
4568 		if ((val & SPI_SR_RDY) == 0)
4569 			break;
4570 	}
4571 	if (usec >= SPI_MAX_RETRIES) {
4572 		aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
4573 		return 1;
4574 	}
4575 	return 0;
4576 }
4577 
4578 /*
4579  * wm_read_eeprom_spi:
4580  *
4581  *	Read a work from the EEPROM using the SPI protocol.
4582  */
4583 static int
4584 wm_read_eeprom_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
4585 {
4586 	uint32_t reg, val;
4587 	int i;
4588 	uint8_t opc;
4589 
4590 	/* Clear SK and CS. */
4591 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
4592 	CSR_WRITE(sc, WMREG_EECD, reg);
4593 	delay(2);
4594 
4595 	if (wm_spi_eeprom_ready(sc))
4596 		return 1;
4597 
4598 	/* Toggle CS to flush commands. */
4599 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
4600 	delay(2);
4601 	CSR_WRITE(sc, WMREG_EECD, reg);
4602 	delay(2);
4603 
4604 	opc = SPI_OPC_READ;
4605 	if (sc->sc_ee_addrbits == 8 && word >= 128)
4606 		opc |= SPI_OPC_A8;
4607 
4608 	wm_eeprom_sendbits(sc, opc, 8);
4609 	wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits);
4610 
4611 	for (i = 0; i < wordcnt; i++) {
4612 		wm_eeprom_recvbits(sc, &val, 16);
4613 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
4614 	}
4615 
4616 	/* Raise CS and clear SK. */
4617 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
4618 	CSR_WRITE(sc, WMREG_EECD, reg);
4619 	delay(2);
4620 
4621 	return 0;
4622 }
4623 
4624 #define EEPROM_CHECKSUM		0xBABA
4625 #define EEPROM_SIZE		0x0040
4626 
4627 /*
4628  * wm_validate_eeprom_checksum
4629  *
4630  * The checksum is defined as the sum of the first 64 (16 bit) words.
4631  */
4632 static int
4633 wm_validate_eeprom_checksum(struct wm_softc *sc)
4634 {
4635 	uint16_t checksum;
4636 	uint16_t eeprom_data;
4637 	int i;
4638 
4639 	checksum = 0;
4640 
4641 	for (i = 0; i < EEPROM_SIZE; i++) {
4642 		if (wm_read_eeprom(sc, i, 1, &eeprom_data))
4643 			return 1;
4644 		checksum += eeprom_data;
4645 	}
4646 
4647 	if (checksum != (uint16_t) EEPROM_CHECKSUM)
4648 		return 1;
4649 
4650 	return 0;
4651 }
4652 
4653 /*
4654  * wm_read_eeprom:
4655  *
4656  *	Read data from the serial EEPROM.
4657  */
4658 static int
4659 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
4660 {
4661 	int rv;
4662 
4663 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
4664 		return 1;
4665 
4666 	if (wm_acquire_eeprom(sc))
4667 		return 1;
4668 
4669 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4670 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH))
4671 		rv = wm_read_eeprom_ich8(sc, word, wordcnt, data);
4672 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
4673 		rv = wm_read_eeprom_eerd(sc, word, wordcnt, data);
4674 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
4675 		rv = wm_read_eeprom_spi(sc, word, wordcnt, data);
4676 	else
4677 		rv = wm_read_eeprom_uwire(sc, word, wordcnt, data);
4678 
4679 	wm_release_eeprom(sc);
4680 	return rv;
4681 }
4682 
4683 static int
4684 wm_read_eeprom_eerd(struct wm_softc *sc, int offset, int wordcnt,
4685     uint16_t *data)
4686 {
4687 	int i, eerd = 0;
4688 	int error = 0;
4689 
4690 	for (i = 0; i < wordcnt; i++) {
4691 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
4692 
4693 		CSR_WRITE(sc, WMREG_EERD, eerd);
4694 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
4695 		if (error != 0)
4696 			break;
4697 
4698 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
4699 	}
4700 
4701 	return error;
4702 }
4703 
4704 static int
4705 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
4706 {
4707 	uint32_t attempts = 100000;
4708 	uint32_t i, reg = 0;
4709 	int32_t done = -1;
4710 
4711 	for (i = 0; i < attempts; i++) {
4712 		reg = CSR_READ(sc, rw);
4713 
4714 		if (reg & EERD_DONE) {
4715 			done = 0;
4716 			break;
4717 		}
4718 		delay(5);
4719 	}
4720 
4721 	return done;
4722 }
4723 
4724 static int
4725 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
4726 {
4727 	uint16_t myea[ETHER_ADDR_LEN / 2];
4728 	uint16_t offset = EEPROM_OFF_MACADDR;
4729 	int do_invert = 0;
4730 
4731 	if (sc->sc_funcid != 0)
4732 		switch (sc->sc_type) {
4733 		case WM_T_82580:
4734 		case WM_T_82580ER:
4735 			switch (sc->sc_funcid) {
4736 			case 1:
4737 				offset = EEPROM_OFF_LAN1;
4738 				break;
4739 			case 2:
4740 				offset = EEPROM_OFF_LAN2;
4741 				break;
4742 			case 3:
4743 				offset = EEPROM_OFF_LAN3;
4744 				break;
4745 			default:
4746 				goto bad;
4747 				/* NOTREACHED */
4748 				break;
4749 			}
4750 			break;
4751 		case WM_T_82571:
4752 		case WM_T_82575:
4753 		case WM_T_82576:
4754 		case WM_T_80003:
4755 			if (wm_read_eeprom(sc, EEPROM_ALT_MAC_ADDR_PTR, 1,
4756 				&offset) != 0) {
4757 				goto bad;
4758 			}
4759 
4760 			/* no pointer */
4761 			if (offset == 0xffff) {
4762 				/* reset the offset to LAN0 */
4763 				offset = EEPROM_OFF_MACADDR;
4764 				do_invert = 1;
4765 				goto do_read;
4766 			}
4767 
4768 			switch (sc->sc_funcid) {
4769 			case 1:
4770 				offset += EEPROM_OFF_MACADDR_LAN1;
4771 				break;
4772 			case 2:
4773 				offset += EEPROM_OFF_MACADDR_LAN2;
4774 				break;
4775 			case 3:
4776 				offset += EEPROM_OFF_MACADDR_LAN3;
4777 				break;
4778 			default:
4779 				goto bad;
4780 				/* NOTREACHED */
4781 				break;
4782 			}
4783 			break;
4784 		default:
4785 			do_invert = 1;
4786 			break;
4787 		}
4788 
4789  do_read:
4790 	if (wm_read_eeprom(sc, offset, sizeof(myea) / sizeof(myea[0]),
4791 		myea) != 0) {
4792 		goto bad;
4793 	}
4794 
4795 	enaddr[0] = myea[0] & 0xff;
4796 	enaddr[1] = myea[0] >> 8;
4797 	enaddr[2] = myea[1] & 0xff;
4798 	enaddr[3] = myea[1] >> 8;
4799 	enaddr[4] = myea[2] & 0xff;
4800 	enaddr[5] = myea[2] >> 8;
4801 
4802 	/*
4803 	 * Toggle the LSB of the MAC address on the second port
4804 	 * of some dual port cards.
4805 	 */
4806 	if (do_invert != 0)
4807 		enaddr[5] ^= 1;
4808 
4809 	return 0;
4810 
4811  bad:
4812 	aprint_error_dev(sc->sc_dev, "unable to read Ethernet address\n");
4813 
4814 	return -1;
4815 }
4816 
4817 /*
4818  * wm_add_rxbuf:
4819  *
4820  *	Add a receive buffer to the indiciated descriptor.
4821  */
4822 static int
4823 wm_add_rxbuf(struct wm_softc *sc, int idx)
4824 {
4825 	struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
4826 	struct mbuf *m;
4827 	int error;
4828 
4829 	MGETHDR(m, M_DONTWAIT, MT_DATA);
4830 	if (m == NULL)
4831 		return ENOBUFS;
4832 
4833 	MCLGET(m, M_DONTWAIT);
4834 	if ((m->m_flags & M_EXT) == 0) {
4835 		m_freem(m);
4836 		return ENOBUFS;
4837 	}
4838 
4839 	if (rxs->rxs_mbuf != NULL)
4840 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4841 
4842 	rxs->rxs_mbuf = m;
4843 
4844 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
4845 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
4846 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
4847 	if (error) {
4848 		/* XXX XXX XXX */
4849 		aprint_error_dev(sc->sc_dev,
4850 		    "unable to load rx DMA map %d, error = %d\n",
4851 		    idx, error);
4852 		panic("wm_add_rxbuf");
4853 	}
4854 
4855 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
4856 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
4857 
4858 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4859 		if ((sc->sc_rctl & RCTL_EN) != 0)
4860 			WM_INIT_RXDESC(sc, idx);
4861 	} else
4862 		WM_INIT_RXDESC(sc, idx);
4863 
4864 	return 0;
4865 }
4866 
4867 /*
4868  * wm_set_ral:
4869  *
4870  *	Set an entery in the receive address list.
4871  */
4872 static void
4873 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
4874 {
4875 	uint32_t ral_lo, ral_hi;
4876 
4877 	if (enaddr != NULL) {
4878 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
4879 		    (enaddr[3] << 24);
4880 		ral_hi = enaddr[4] | (enaddr[5] << 8);
4881 		ral_hi |= RAL_AV;
4882 	} else {
4883 		ral_lo = 0;
4884 		ral_hi = 0;
4885 	}
4886 
4887 	if (sc->sc_type >= WM_T_82544) {
4888 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
4889 		    ral_lo);
4890 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
4891 		    ral_hi);
4892 	} else {
4893 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
4894 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
4895 	}
4896 }
4897 
4898 /*
4899  * wm_mchash:
4900  *
4901  *	Compute the hash of the multicast address for the 4096-bit
4902  *	multicast filter.
4903  */
4904 static uint32_t
4905 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
4906 {
4907 	static const int lo_shift[4] = { 4, 3, 2, 0 };
4908 	static const int hi_shift[4] = { 4, 5, 6, 8 };
4909 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
4910 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
4911 	uint32_t hash;
4912 
4913 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4914 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)) {
4915 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
4916 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
4917 		return (hash & 0x3ff);
4918 	}
4919 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
4920 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
4921 
4922 	return (hash & 0xfff);
4923 }
4924 
4925 /*
4926  * wm_set_filter:
4927  *
4928  *	Set up the receive filter.
4929  */
4930 static void
4931 wm_set_filter(struct wm_softc *sc)
4932 {
4933 	struct ethercom *ec = &sc->sc_ethercom;
4934 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4935 	struct ether_multi *enm;
4936 	struct ether_multistep step;
4937 	bus_addr_t mta_reg;
4938 	uint32_t hash, reg, bit;
4939 	int i, size;
4940 
4941 	if (sc->sc_type >= WM_T_82544)
4942 		mta_reg = WMREG_CORDOVA_MTA;
4943 	else
4944 		mta_reg = WMREG_MTA;
4945 
4946 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
4947 
4948 	if (ifp->if_flags & IFF_BROADCAST)
4949 		sc->sc_rctl |= RCTL_BAM;
4950 	if (ifp->if_flags & IFF_PROMISC) {
4951 		sc->sc_rctl |= RCTL_UPE;
4952 		goto allmulti;
4953 	}
4954 
4955 	/*
4956 	 * Set the station address in the first RAL slot, and
4957 	 * clear the remaining slots.
4958 	 */
4959 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4960 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH))
4961 		size = WM_ICH8_RAL_TABSIZE;
4962 	else
4963 		size = WM_RAL_TABSIZE;
4964 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
4965 	for (i = 1; i < size; i++)
4966 		wm_set_ral(sc, NULL, i);
4967 
4968 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4969 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH))
4970 		size = WM_ICH8_MC_TABSIZE;
4971 	else
4972 		size = WM_MC_TABSIZE;
4973 	/* Clear out the multicast table. */
4974 	for (i = 0; i < size; i++)
4975 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
4976 
4977 	ETHER_FIRST_MULTI(step, ec, enm);
4978 	while (enm != NULL) {
4979 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
4980 			/*
4981 			 * We must listen to a range of multicast addresses.
4982 			 * For now, just accept all multicasts, rather than
4983 			 * trying to set only those filter bits needed to match
4984 			 * the range.  (At this time, the only use of address
4985 			 * ranges is for IP multicast routing, for which the
4986 			 * range is big enough to require all bits set.)
4987 			 */
4988 			goto allmulti;
4989 		}
4990 
4991 		hash = wm_mchash(sc, enm->enm_addrlo);
4992 
4993 		reg = (hash >> 5);
4994 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4995 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH))
4996 			reg &= 0x1f;
4997 		else
4998 			reg &= 0x7f;
4999 		bit = hash & 0x1f;
5000 
5001 		hash = CSR_READ(sc, mta_reg + (reg << 2));
5002 		hash |= 1U << bit;
5003 
5004 		/* XXX Hardware bug?? */
5005 		if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
5006 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
5007 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
5008 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
5009 		} else
5010 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
5011 
5012 		ETHER_NEXT_MULTI(step, enm);
5013 	}
5014 
5015 	ifp->if_flags &= ~IFF_ALLMULTI;
5016 	goto setit;
5017 
5018  allmulti:
5019 	ifp->if_flags |= IFF_ALLMULTI;
5020 	sc->sc_rctl |= RCTL_MPE;
5021 
5022  setit:
5023 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
5024 }
5025 
5026 /*
5027  * wm_tbi_mediainit:
5028  *
5029  *	Initialize media for use on 1000BASE-X devices.
5030  */
5031 static void
5032 wm_tbi_mediainit(struct wm_softc *sc)
5033 {
5034 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5035 	const char *sep = "";
5036 
5037 	if (sc->sc_type < WM_T_82543)
5038 		sc->sc_tipg = TIPG_WM_DFLT;
5039 	else
5040 		sc->sc_tipg = TIPG_LG_DFLT;
5041 
5042 	sc->sc_tbi_anegticks = 5;
5043 
5044 	/* Initialize our media structures */
5045 	sc->sc_mii.mii_ifp = ifp;
5046 
5047 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
5048 	ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
5049 	    wm_tbi_mediastatus);
5050 
5051 	/*
5052 	 * SWD Pins:
5053 	 *
5054 	 *	0 = Link LED (output)
5055 	 *	1 = Loss Of Signal (input)
5056 	 */
5057 	sc->sc_ctrl |= CTRL_SWDPIO(0);
5058 	sc->sc_ctrl &= ~CTRL_SWDPIO(1);
5059 
5060 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5061 
5062 #define	ADD(ss, mm, dd)							\
5063 do {									\
5064 	aprint_normal("%s%s", sep, ss);					\
5065 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL);	\
5066 	sep = ", ";							\
5067 } while (/*CONSTCOND*/0)
5068 
5069 	aprint_normal_dev(sc->sc_dev, "");
5070 	ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
5071 	ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
5072 	ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
5073 	aprint_normal("\n");
5074 
5075 #undef ADD
5076 
5077 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
5078 }
5079 
5080 /*
5081  * wm_tbi_mediastatus:	[ifmedia interface function]
5082  *
5083  *	Get the current interface media status on a 1000BASE-X device.
5084  */
5085 static void
5086 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
5087 {
5088 	struct wm_softc *sc = ifp->if_softc;
5089 	uint32_t ctrl, status;
5090 
5091 	ifmr->ifm_status = IFM_AVALID;
5092 	ifmr->ifm_active = IFM_ETHER;
5093 
5094 	status = CSR_READ(sc, WMREG_STATUS);
5095 	if ((status & STATUS_LU) == 0) {
5096 		ifmr->ifm_active |= IFM_NONE;
5097 		return;
5098 	}
5099 
5100 	ifmr->ifm_status |= IFM_ACTIVE;
5101 	ifmr->ifm_active |= IFM_1000_SX;
5102 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
5103 		ifmr->ifm_active |= IFM_FDX;
5104 	ctrl = CSR_READ(sc, WMREG_CTRL);
5105 	if (ctrl & CTRL_RFCE)
5106 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
5107 	if (ctrl & CTRL_TFCE)
5108 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
5109 }
5110 
5111 /*
5112  * wm_tbi_mediachange:	[ifmedia interface function]
5113  *
5114  *	Set hardware to newly-selected media on a 1000BASE-X device.
5115  */
5116 static int
5117 wm_tbi_mediachange(struct ifnet *ifp)
5118 {
5119 	struct wm_softc *sc = ifp->if_softc;
5120 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
5121 	uint32_t status;
5122 	int i;
5123 
5124 	sc->sc_txcw = 0;
5125 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO ||
5126 	    (sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
5127 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
5128 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
5129 		sc->sc_txcw |= TXCW_ANE;
5130 	} else {
5131 		/*
5132 		 * If autonegotiation is turned off, force link up and turn on
5133 		 * full duplex
5134 		 */
5135 		sc->sc_txcw &= ~TXCW_ANE;
5136 		sc->sc_ctrl |= CTRL_SLU | CTRL_FD;
5137 		sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
5138 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5139 		delay(1000);
5140 	}
5141 
5142 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
5143 		    device_xname(sc->sc_dev),sc->sc_txcw));
5144 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
5145 	delay(10000);
5146 
5147 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
5148 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
5149 
5150 	/*
5151 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
5152 	 * optics detect a signal, 0 if they don't.
5153 	 */
5154 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
5155 		/* Have signal; wait for the link to come up. */
5156 
5157 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
5158 			/*
5159 			 * Reset the link, and let autonegotiation do its thing
5160 			 */
5161 			sc->sc_ctrl |= CTRL_LRST;
5162 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5163 			delay(1000);
5164 			sc->sc_ctrl &= ~CTRL_LRST;
5165 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5166 			delay(1000);
5167 		}
5168 
5169 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
5170 			delay(10000);
5171 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
5172 				break;
5173 		}
5174 
5175 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
5176 			    device_xname(sc->sc_dev),i));
5177 
5178 		status = CSR_READ(sc, WMREG_STATUS);
5179 		DPRINTF(WM_DEBUG_LINK,
5180 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
5181 			device_xname(sc->sc_dev),status, STATUS_LU));
5182 		if (status & STATUS_LU) {
5183 			/* Link is up. */
5184 			DPRINTF(WM_DEBUG_LINK,
5185 			    ("%s: LINK: set media -> link up %s\n",
5186 			    device_xname(sc->sc_dev),
5187 			    (status & STATUS_FD) ? "FDX" : "HDX"));
5188 
5189 			/*
5190 			 * NOTE: CTRL will update TFCE and RFCE automatically,
5191 			 * so we should update sc->sc_ctrl
5192 			 */
5193 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
5194 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
5195 			sc->sc_fcrtl &= ~FCRTL_XONE;
5196 			if (status & STATUS_FD)
5197 				sc->sc_tctl |=
5198 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
5199 			else
5200 				sc->sc_tctl |=
5201 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
5202 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
5203 				sc->sc_fcrtl |= FCRTL_XONE;
5204 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
5205 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
5206 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
5207 				      sc->sc_fcrtl);
5208 			sc->sc_tbi_linkup = 1;
5209 		} else {
5210 			if (i == WM_LINKUP_TIMEOUT)
5211 				wm_check_for_link(sc);
5212 			/* Link is down. */
5213 			DPRINTF(WM_DEBUG_LINK,
5214 			    ("%s: LINK: set media -> link down\n",
5215 			    device_xname(sc->sc_dev)));
5216 			sc->sc_tbi_linkup = 0;
5217 		}
5218 	} else {
5219 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
5220 		    device_xname(sc->sc_dev)));
5221 		sc->sc_tbi_linkup = 0;
5222 	}
5223 
5224 	wm_tbi_set_linkled(sc);
5225 
5226 	return 0;
5227 }
5228 
5229 /*
5230  * wm_tbi_set_linkled:
5231  *
5232  *	Update the link LED on 1000BASE-X devices.
5233  */
5234 static void
5235 wm_tbi_set_linkled(struct wm_softc *sc)
5236 {
5237 
5238 	if (sc->sc_tbi_linkup)
5239 		sc->sc_ctrl |= CTRL_SWDPIN(0);
5240 	else
5241 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
5242 
5243 	/* 82540 or newer devices are active low */
5244 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
5245 
5246 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5247 }
5248 
5249 /*
5250  * wm_tbi_check_link:
5251  *
5252  *	Check the link on 1000BASE-X devices.
5253  */
5254 static void
5255 wm_tbi_check_link(struct wm_softc *sc)
5256 {
5257 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5258 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
5259 	uint32_t rxcw, ctrl, status;
5260 
5261 	status = CSR_READ(sc, WMREG_STATUS);
5262 
5263 	rxcw = CSR_READ(sc, WMREG_RXCW);
5264 	ctrl = CSR_READ(sc, WMREG_CTRL);
5265 
5266 	/* set link status */
5267 	if ((status & STATUS_LU) == 0) {
5268 		DPRINTF(WM_DEBUG_LINK,
5269 		    ("%s: LINK: checklink -> down\n", device_xname(sc->sc_dev)));
5270 		sc->sc_tbi_linkup = 0;
5271 	} else if (sc->sc_tbi_linkup == 0) {
5272 		DPRINTF(WM_DEBUG_LINK,
5273 		    ("%s: LINK: checklink -> up %s\n", device_xname(sc->sc_dev),
5274 		    (status & STATUS_FD) ? "FDX" : "HDX"));
5275 		sc->sc_tbi_linkup = 1;
5276 	}
5277 
5278 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP)
5279 	    && ((status & STATUS_LU) == 0)) {
5280 		sc->sc_tbi_linkup = 0;
5281 		if (sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg > 100) {
5282 			/* RXCFG storm! */
5283 			DPRINTF(WM_DEBUG_LINK, ("RXCFG storm! (%d)\n",
5284 				sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg));
5285 			wm_init(ifp);
5286 			wm_start(ifp);
5287 		} else if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
5288 			/* If the timer expired, retry autonegotiation */
5289 			if (++sc->sc_tbi_ticks >= sc->sc_tbi_anegticks) {
5290 				DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
5291 				sc->sc_tbi_ticks = 0;
5292 				/*
5293 				 * Reset the link, and let autonegotiation do
5294 				 * its thing
5295 				 */
5296 				sc->sc_ctrl |= CTRL_LRST;
5297 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5298 				delay(1000);
5299 				sc->sc_ctrl &= ~CTRL_LRST;
5300 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5301 				delay(1000);
5302 				CSR_WRITE(sc, WMREG_TXCW,
5303 				    sc->sc_txcw & ~TXCW_ANE);
5304 				CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
5305 			}
5306 		}
5307 	}
5308 
5309 	wm_tbi_set_linkled(sc);
5310 }
5311 
5312 /*
5313  * wm_gmii_reset:
5314  *
5315  *	Reset the PHY.
5316  */
5317 static void
5318 wm_gmii_reset(struct wm_softc *sc)
5319 {
5320 	uint32_t reg;
5321 	int rv;
5322 
5323 	/* get phy semaphore */
5324 	switch (sc->sc_type) {
5325 	case WM_T_82571:
5326 	case WM_T_82572:
5327 	case WM_T_82573:
5328 	case WM_T_82574:
5329 	case WM_T_82583:
5330 		 /* XXX should get sw semaphore, too */
5331 		rv = wm_get_swsm_semaphore(sc);
5332 		break;
5333 	case WM_T_82575:
5334 	case WM_T_82576:
5335 	case WM_T_82580:
5336 	case WM_T_82580ER:
5337 	case WM_T_80003:
5338 		rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
5339 		break;
5340 	case WM_T_ICH8:
5341 	case WM_T_ICH9:
5342 	case WM_T_ICH10:
5343 	case WM_T_PCH:
5344 		rv = wm_get_swfwhw_semaphore(sc);
5345 		break;
5346 	default:
5347 		/* nothing to do*/
5348 		rv = 0;
5349 		break;
5350 	}
5351 	if (rv != 0) {
5352 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5353 		    __func__);
5354 		return;
5355 	}
5356 
5357 	switch (sc->sc_type) {
5358 	case WM_T_82542_2_0:
5359 	case WM_T_82542_2_1:
5360 		/* null */
5361 		break;
5362 	case WM_T_82543:
5363 		/*
5364 		 * With 82543, we need to force speed and duplex on the MAC
5365 		 * equal to what the PHY speed and duplex configuration is.
5366 		 * In addition, we need to perform a hardware reset on the PHY
5367 		 * to take it out of reset.
5368 		 */
5369 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
5370 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5371 
5372 		/* The PHY reset pin is active-low. */
5373 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
5374 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
5375 		    CTRL_EXT_SWDPIN(4));
5376 		reg |= CTRL_EXT_SWDPIO(4);
5377 
5378 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5379 		delay(10*1000);
5380 
5381 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
5382 		delay(150);
5383 #if 0
5384 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
5385 #endif
5386 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
5387 		break;
5388 	case WM_T_82544:	/* reset 10000us */
5389 	case WM_T_82540:
5390 	case WM_T_82545:
5391 	case WM_T_82545_3:
5392 	case WM_T_82546:
5393 	case WM_T_82546_3:
5394 	case WM_T_82541:
5395 	case WM_T_82541_2:
5396 	case WM_T_82547:
5397 	case WM_T_82547_2:
5398 	case WM_T_82571:	/* reset 100us */
5399 	case WM_T_82572:
5400 	case WM_T_82573:
5401 	case WM_T_82574:
5402 	case WM_T_82575:
5403 	case WM_T_82576:
5404 	case WM_T_82580:
5405 	case WM_T_82580ER:
5406 	case WM_T_82583:
5407 	case WM_T_80003:
5408 		/* generic reset */
5409 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
5410 		delay((sc->sc_type >= WM_T_82571) ? 100 : 10*1000);
5411 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5412 		delay(150);
5413 
5414 		if ((sc->sc_type == WM_T_82541)
5415 		    || (sc->sc_type == WM_T_82541_2)
5416 		    || (sc->sc_type == WM_T_82547)
5417 		    || (sc->sc_type == WM_T_82547_2)) {
5418 			/* workaround for igp are done in igp_reset() */
5419 			/* XXX add code to set LED after phy reset */
5420 		}
5421 		break;
5422 	case WM_T_ICH8:
5423 	case WM_T_ICH9:
5424 	case WM_T_ICH10:
5425 	case WM_T_PCH:
5426 		/* generic reset */
5427 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
5428 		delay(100);
5429 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5430 		delay(150);
5431 		break;
5432 	default:
5433 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
5434 		    __func__);
5435 		break;
5436 	}
5437 
5438 	/* release PHY semaphore */
5439 	switch (sc->sc_type) {
5440 	case WM_T_82571:
5441 	case WM_T_82572:
5442 	case WM_T_82573:
5443 	case WM_T_82574:
5444 	case WM_T_82583:
5445 		 /* XXX should put sw semaphore, too */
5446 		wm_put_swsm_semaphore(sc);
5447 		break;
5448 	case WM_T_82575:
5449 	case WM_T_82576:
5450 	case WM_T_82580:
5451 	case WM_T_82580ER:
5452 	case WM_T_80003:
5453 		wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
5454 		break;
5455 	case WM_T_ICH8:
5456 	case WM_T_ICH9:
5457 	case WM_T_ICH10:
5458 	case WM_T_PCH:
5459 		wm_put_swfwhw_semaphore(sc);
5460 		break;
5461 	default:
5462 		/* nothing to do*/
5463 		rv = 0;
5464 		break;
5465 	}
5466 
5467 	/* get_cfg_done */
5468 	wm_get_cfg_done(sc);
5469 
5470 	/* extra setup */
5471 	switch (sc->sc_type) {
5472 	case WM_T_82542_2_0:
5473 	case WM_T_82542_2_1:
5474 	case WM_T_82543:
5475 	case WM_T_82544:
5476 	case WM_T_82540:
5477 	case WM_T_82545:
5478 	case WM_T_82545_3:
5479 	case WM_T_82546:
5480 	case WM_T_82546_3:
5481 	case WM_T_82541_2:
5482 	case WM_T_82547_2:
5483 	case WM_T_82571:
5484 	case WM_T_82572:
5485 	case WM_T_82573:
5486 	case WM_T_82574:
5487 	case WM_T_82575:
5488 	case WM_T_82576:
5489 	case WM_T_82580:
5490 	case WM_T_82580ER:
5491 	case WM_T_82583:
5492 	case WM_T_80003:
5493 		/* null */
5494 		break;
5495 	case WM_T_82541:
5496 	case WM_T_82547:
5497 		/* XXX Configure actively LED after PHY reset */
5498 		break;
5499 	case WM_T_ICH8:
5500 	case WM_T_ICH9:
5501 	case WM_T_ICH10:
5502 	case WM_T_PCH:
5503 		/* Allow time for h/w to get to a quiescent state afer reset */
5504 		delay(10*1000);
5505 
5506 		if (sc->sc_type == WM_T_PCH) {
5507 			wm_hv_phy_workaround_ich8lan(sc);
5508 
5509 			/*
5510 			 * dummy read to clear the phy wakeup bit after lcd
5511 			 * reset
5512 			 */
5513 			reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
5514 		}
5515 
5516 		/*
5517 		 * XXX Configure the LCD with th extended configuration region
5518 		 * in NVM
5519 		 */
5520 
5521 		/* Configure the LCD with the OEM bits in NVM */
5522 		if (sc->sc_type == WM_T_PCH) {
5523 			/*
5524 			 * Disable LPLU.
5525 			 * XXX It seems that 82567 has LPLU, too.
5526 			 */
5527 			reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
5528 			reg &= ~(HV_OEM_BITS_A1KDIS| HV_OEM_BITS_LPLU);
5529 			reg |= HV_OEM_BITS_ANEGNOW;
5530 			wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
5531 		}
5532 		break;
5533 	default:
5534 		panic("%s: unknown type\n", __func__);
5535 		break;
5536 	}
5537 }
5538 
5539 /*
5540  * wm_gmii_mediainit:
5541  *
5542  *	Initialize media for use on 1000BASE-T devices.
5543  */
5544 static void
5545 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
5546 {
5547 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5548 
5549 	/* We have MII. */
5550 	sc->sc_flags |= WM_F_HAS_MII;
5551 
5552 	if (sc->sc_type == WM_T_80003)
5553 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
5554 	else
5555 		sc->sc_tipg = TIPG_1000T_DFLT;
5556 
5557 	/*
5558 	 * Let the chip set speed/duplex on its own based on
5559 	 * signals from the PHY.
5560 	 * XXXbouyer - I'm not sure this is right for the 80003,
5561 	 * the em driver only sets CTRL_SLU here - but it seems to work.
5562 	 */
5563 	sc->sc_ctrl |= CTRL_SLU;
5564 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5565 
5566 	/* Initialize our media structures and probe the GMII. */
5567 	sc->sc_mii.mii_ifp = ifp;
5568 
5569 	switch (prodid) {
5570 	case PCI_PRODUCT_INTEL_PCH_M_LM:
5571 	case PCI_PRODUCT_INTEL_PCH_M_LC:
5572 		/* 82577 */
5573 		sc->sc_phytype = WMPHY_82577;
5574 		sc->sc_mii.mii_readreg = wm_gmii_hv_readreg;
5575 		sc->sc_mii.mii_writereg = wm_gmii_hv_writereg;
5576 		break;
5577 	case PCI_PRODUCT_INTEL_PCH_D_DM:
5578 	case PCI_PRODUCT_INTEL_PCH_D_DC:
5579 		/* 82578 */
5580 		sc->sc_phytype = WMPHY_82578;
5581 		sc->sc_mii.mii_readreg = wm_gmii_hv_readreg;
5582 		sc->sc_mii.mii_writereg = wm_gmii_hv_writereg;
5583 		break;
5584 	case PCI_PRODUCT_INTEL_82801I_BM:
5585 	case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
5586 	case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
5587 	case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
5588 	case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
5589 	case PCI_PRODUCT_INTEL_82801J_R_BM_V:
5590 		/* 82567 */
5591 		sc->sc_phytype = WMPHY_BM;
5592 		sc->sc_mii.mii_readreg = wm_gmii_bm_readreg;
5593 		sc->sc_mii.mii_writereg = wm_gmii_bm_writereg;
5594 		break;
5595 	default:
5596 		if ((sc->sc_flags & WM_F_SGMII) != 0) {
5597 			sc->sc_mii.mii_readreg = wm_sgmii_readreg;
5598 			sc->sc_mii.mii_writereg = wm_sgmii_writereg;
5599 		} else if (sc->sc_type >= WM_T_80003) {
5600 			sc->sc_mii.mii_readreg = wm_gmii_i80003_readreg;
5601 			sc->sc_mii.mii_writereg = wm_gmii_i80003_writereg;
5602 		} else if (sc->sc_type >= WM_T_82544) {
5603 			sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg;
5604 			sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg;
5605 		} else {
5606 			sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg;
5607 			sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg;
5608 		}
5609 		break;
5610 	}
5611 	sc->sc_mii.mii_statchg = wm_gmii_statchg;
5612 
5613 	wm_gmii_reset(sc);
5614 
5615 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
5616 	ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_gmii_mediachange,
5617 	    wm_gmii_mediastatus);
5618 
5619 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
5620 	    || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)) {
5621 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
5622 			/* Attach only one port */
5623 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
5624 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
5625 		} else {
5626 			int i;
5627 			uint32_t ctrl_ext;
5628 
5629 			/* Power on sgmii phy if it is disabled */
5630 			ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
5631 			CSR_WRITE(sc, WMREG_CTRL_EXT,
5632 			    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
5633 			CSR_WRITE_FLUSH(sc);
5634 			delay(300*1000); /* XXX too long */
5635 
5636 			/* from 1 to 8 */
5637 			for (i = 1; i < 8; i++)
5638 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
5639 				    i, MII_OFFSET_ANY, MIIF_DOPAUSE);
5640 
5641 			/* restore previous sfp cage power state */
5642 			CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
5643 		}
5644 	} else {
5645 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
5646 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
5647 	}
5648 
5649 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
5650 		/* if failed, retry with *_bm_* */
5651 		sc->sc_mii.mii_readreg = wm_gmii_bm_readreg;
5652 		sc->sc_mii.mii_writereg = wm_gmii_bm_writereg;
5653 
5654 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
5655 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
5656 	}
5657 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
5658 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
5659 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
5660 		sc->sc_phytype = WMPHY_NONE;
5661 	} else {
5662 		/* Check PHY type */
5663 		uint32_t model;
5664 		struct mii_softc *child;
5665 
5666 		child = LIST_FIRST(&sc->sc_mii.mii_phys);
5667 		if (device_is_a(child->mii_dev, "igphy")) {
5668 			struct igphy_softc *isc = (struct igphy_softc *)child;
5669 
5670 			model = isc->sc_mii.mii_mpd_model;
5671 			if (model == MII_MODEL_yyINTEL_I82566)
5672 				sc->sc_phytype = WMPHY_IGP_3;
5673 		}
5674 
5675 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
5676 	}
5677 }
5678 
5679 /*
5680  * wm_gmii_mediastatus:	[ifmedia interface function]
5681  *
5682  *	Get the current interface media status on a 1000BASE-T device.
5683  */
5684 static void
5685 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
5686 {
5687 	struct wm_softc *sc = ifp->if_softc;
5688 
5689 	ether_mediastatus(ifp, ifmr);
5690 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
5691 	    | sc->sc_flowflags;
5692 }
5693 
5694 /*
5695  * wm_gmii_mediachange:	[ifmedia interface function]
5696  *
5697  *	Set hardware to newly-selected media on a 1000BASE-T device.
5698  */
5699 static int
5700 wm_gmii_mediachange(struct ifnet *ifp)
5701 {
5702 	struct wm_softc *sc = ifp->if_softc;
5703 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
5704 	int rc;
5705 
5706 	if ((ifp->if_flags & IFF_UP) == 0)
5707 		return 0;
5708 
5709 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
5710 	sc->sc_ctrl |= CTRL_SLU;
5711 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
5712 	    || (sc->sc_type > WM_T_82543)) {
5713 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
5714 	} else {
5715 		sc->sc_ctrl &= ~CTRL_ASDE;
5716 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
5717 		if (ife->ifm_media & IFM_FDX)
5718 			sc->sc_ctrl |= CTRL_FD;
5719 		switch (IFM_SUBTYPE(ife->ifm_media)) {
5720 		case IFM_10_T:
5721 			sc->sc_ctrl |= CTRL_SPEED_10;
5722 			break;
5723 		case IFM_100_TX:
5724 			sc->sc_ctrl |= CTRL_SPEED_100;
5725 			break;
5726 		case IFM_1000_T:
5727 			sc->sc_ctrl |= CTRL_SPEED_1000;
5728 			break;
5729 		default:
5730 			panic("wm_gmii_mediachange: bad media 0x%x",
5731 			    ife->ifm_media);
5732 		}
5733 	}
5734 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5735 	if (sc->sc_type <= WM_T_82543)
5736 		wm_gmii_reset(sc);
5737 
5738 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
5739 		return 0;
5740 	return rc;
5741 }
5742 
5743 #define	MDI_IO		CTRL_SWDPIN(2)
5744 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
5745 #define	MDI_CLK		CTRL_SWDPIN(3)
5746 
5747 static void
5748 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
5749 {
5750 	uint32_t i, v;
5751 
5752 	v = CSR_READ(sc, WMREG_CTRL);
5753 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
5754 	v |= MDI_DIR | CTRL_SWDPIO(3);
5755 
5756 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
5757 		if (data & i)
5758 			v |= MDI_IO;
5759 		else
5760 			v &= ~MDI_IO;
5761 		CSR_WRITE(sc, WMREG_CTRL, v);
5762 		delay(10);
5763 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
5764 		delay(10);
5765 		CSR_WRITE(sc, WMREG_CTRL, v);
5766 		delay(10);
5767 	}
5768 }
5769 
5770 static uint32_t
5771 i82543_mii_recvbits(struct wm_softc *sc)
5772 {
5773 	uint32_t v, i, data = 0;
5774 
5775 	v = CSR_READ(sc, WMREG_CTRL);
5776 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
5777 	v |= CTRL_SWDPIO(3);
5778 
5779 	CSR_WRITE(sc, WMREG_CTRL, v);
5780 	delay(10);
5781 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
5782 	delay(10);
5783 	CSR_WRITE(sc, WMREG_CTRL, v);
5784 	delay(10);
5785 
5786 	for (i = 0; i < 16; i++) {
5787 		data <<= 1;
5788 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
5789 		delay(10);
5790 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
5791 			data |= 1;
5792 		CSR_WRITE(sc, WMREG_CTRL, v);
5793 		delay(10);
5794 	}
5795 
5796 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
5797 	delay(10);
5798 	CSR_WRITE(sc, WMREG_CTRL, v);
5799 	delay(10);
5800 
5801 	return data;
5802 }
5803 
5804 #undef MDI_IO
5805 #undef MDI_DIR
5806 #undef MDI_CLK
5807 
5808 /*
5809  * wm_gmii_i82543_readreg:	[mii interface function]
5810  *
5811  *	Read a PHY register on the GMII (i82543 version).
5812  */
5813 static int
5814 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
5815 {
5816 	struct wm_softc *sc = device_private(self);
5817 	int rv;
5818 
5819 	i82543_mii_sendbits(sc, 0xffffffffU, 32);
5820 	i82543_mii_sendbits(sc, reg | (phy << 5) |
5821 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
5822 	rv = i82543_mii_recvbits(sc) & 0xffff;
5823 
5824 	DPRINTF(WM_DEBUG_GMII,
5825 	    ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
5826 	    device_xname(sc->sc_dev), phy, reg, rv));
5827 
5828 	return rv;
5829 }
5830 
5831 /*
5832  * wm_gmii_i82543_writereg:	[mii interface function]
5833  *
5834  *	Write a PHY register on the GMII (i82543 version).
5835  */
5836 static void
5837 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
5838 {
5839 	struct wm_softc *sc = device_private(self);
5840 
5841 	i82543_mii_sendbits(sc, 0xffffffffU, 32);
5842 	i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
5843 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
5844 	    (MII_COMMAND_START << 30), 32);
5845 }
5846 
5847 /*
5848  * wm_gmii_i82544_readreg:	[mii interface function]
5849  *
5850  *	Read a PHY register on the GMII.
5851  */
5852 static int
5853 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
5854 {
5855 	struct wm_softc *sc = device_private(self);
5856 	uint32_t mdic = 0;
5857 	int i, rv;
5858 
5859 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
5860 	    MDIC_REGADD(reg));
5861 
5862 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
5863 		mdic = CSR_READ(sc, WMREG_MDIC);
5864 		if (mdic & MDIC_READY)
5865 			break;
5866 		delay(50);
5867 	}
5868 
5869 	if ((mdic & MDIC_READY) == 0) {
5870 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
5871 		    device_xname(sc->sc_dev), phy, reg);
5872 		rv = 0;
5873 	} else if (mdic & MDIC_E) {
5874 #if 0 /* This is normal if no PHY is present. */
5875 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
5876 		    device_xname(sc->sc_dev), phy, reg);
5877 #endif
5878 		rv = 0;
5879 	} else {
5880 		rv = MDIC_DATA(mdic);
5881 		if (rv == 0xffff)
5882 			rv = 0;
5883 	}
5884 
5885 	return rv;
5886 }
5887 
5888 /*
5889  * wm_gmii_i82544_writereg:	[mii interface function]
5890  *
5891  *	Write a PHY register on the GMII.
5892  */
5893 static void
5894 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
5895 {
5896 	struct wm_softc *sc = device_private(self);
5897 	uint32_t mdic = 0;
5898 	int i;
5899 
5900 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
5901 	    MDIC_REGADD(reg) | MDIC_DATA(val));
5902 
5903 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
5904 		mdic = CSR_READ(sc, WMREG_MDIC);
5905 		if (mdic & MDIC_READY)
5906 			break;
5907 		delay(50);
5908 	}
5909 
5910 	if ((mdic & MDIC_READY) == 0)
5911 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
5912 		    device_xname(sc->sc_dev), phy, reg);
5913 	else if (mdic & MDIC_E)
5914 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
5915 		    device_xname(sc->sc_dev), phy, reg);
5916 }
5917 
5918 /*
5919  * wm_gmii_i80003_readreg:	[mii interface function]
5920  *
5921  *	Read a PHY register on the kumeran
5922  * This could be handled by the PHY layer if we didn't have to lock the
5923  * ressource ...
5924  */
5925 static int
5926 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
5927 {
5928 	struct wm_softc *sc = device_private(self);
5929 	int sem;
5930 	int rv;
5931 
5932 	if (phy != 1) /* only one PHY on kumeran bus */
5933 		return 0;
5934 
5935 	sem = swfwphysem[sc->sc_funcid];
5936 	if (wm_get_swfw_semaphore(sc, sem)) {
5937 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5938 		    __func__);
5939 		return 0;
5940 	}
5941 
5942 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
5943 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
5944 		    reg >> GG82563_PAGE_SHIFT);
5945 	} else {
5946 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
5947 		    reg >> GG82563_PAGE_SHIFT);
5948 	}
5949 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
5950 	delay(200);
5951 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
5952 	delay(200);
5953 
5954 	wm_put_swfw_semaphore(sc, sem);
5955 	return rv;
5956 }
5957 
5958 /*
5959  * wm_gmii_i80003_writereg:	[mii interface function]
5960  *
5961  *	Write a PHY register on the kumeran.
5962  * This could be handled by the PHY layer if we didn't have to lock the
5963  * ressource ...
5964  */
5965 static void
5966 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
5967 {
5968 	struct wm_softc *sc = device_private(self);
5969 	int sem;
5970 
5971 	if (phy != 1) /* only one PHY on kumeran bus */
5972 		return;
5973 
5974 	sem = swfwphysem[sc->sc_funcid];
5975 	if (wm_get_swfw_semaphore(sc, sem)) {
5976 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5977 		    __func__);
5978 		return;
5979 	}
5980 
5981 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
5982 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
5983 		    reg >> GG82563_PAGE_SHIFT);
5984 	} else {
5985 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
5986 		    reg >> GG82563_PAGE_SHIFT);
5987 	}
5988 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
5989 	delay(200);
5990 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
5991 	delay(200);
5992 
5993 	wm_put_swfw_semaphore(sc, sem);
5994 }
5995 
5996 /*
5997  * wm_gmii_bm_readreg:	[mii interface function]
5998  *
5999  *	Read a PHY register on the kumeran
6000  * This could be handled by the PHY layer if we didn't have to lock the
6001  * ressource ...
6002  */
6003 static int
6004 wm_gmii_bm_readreg(device_t self, int phy, int reg)
6005 {
6006 	struct wm_softc *sc = device_private(self);
6007 	int sem;
6008 	int rv;
6009 
6010 	sem = swfwphysem[sc->sc_funcid];
6011 	if (wm_get_swfw_semaphore(sc, sem)) {
6012 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6013 		    __func__);
6014 		return 0;
6015 	}
6016 
6017 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
6018 		if (phy == 1)
6019 			wm_gmii_i82544_writereg(self, phy, 0x1f,
6020 			    reg);
6021 		else
6022 			wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6023 			    reg >> GG82563_PAGE_SHIFT);
6024 
6025 	}
6026 
6027 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
6028 	wm_put_swfw_semaphore(sc, sem);
6029 	return rv;
6030 }
6031 
6032 /*
6033  * wm_gmii_bm_writereg:	[mii interface function]
6034  *
6035  *	Write a PHY register on the kumeran.
6036  * This could be handled by the PHY layer if we didn't have to lock the
6037  * ressource ...
6038  */
6039 static void
6040 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
6041 {
6042 	struct wm_softc *sc = device_private(self);
6043 	int sem;
6044 
6045 	sem = swfwphysem[sc->sc_funcid];
6046 	if (wm_get_swfw_semaphore(sc, sem)) {
6047 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6048 		    __func__);
6049 		return;
6050 	}
6051 
6052 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
6053 		if (phy == 1)
6054 			wm_gmii_i82544_writereg(self, phy, 0x1f,
6055 			    reg);
6056 		else
6057 			wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6058 			    reg >> GG82563_PAGE_SHIFT);
6059 
6060 	}
6061 
6062 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
6063 	wm_put_swfw_semaphore(sc, sem);
6064 }
6065 
6066 static void
6067 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
6068 {
6069 	struct wm_softc *sc = device_private(self);
6070 	uint16_t regnum = BM_PHY_REG_NUM(offset);
6071 	uint16_t wuce;
6072 
6073 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
6074 	if (sc->sc_type == WM_T_PCH) {
6075 		/* XXX e1000 driver do nothing... why? */
6076 	}
6077 
6078 	/* Set page 769 */
6079 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6080 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
6081 
6082 	wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
6083 
6084 	wuce &= ~BM_WUC_HOST_WU_BIT;
6085 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
6086 	    wuce | BM_WUC_ENABLE_BIT);
6087 
6088 	/* Select page 800 */
6089 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6090 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
6091 
6092 	/* Write page 800 */
6093 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
6094 
6095 	if (rd)
6096 		*val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
6097 	else
6098 		wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
6099 
6100 	/* Set page 769 */
6101 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6102 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
6103 
6104 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
6105 }
6106 
6107 /*
6108  * wm_gmii_hv_readreg:	[mii interface function]
6109  *
6110  *	Read a PHY register on the kumeran
6111  * This could be handled by the PHY layer if we didn't have to lock the
6112  * ressource ...
6113  */
6114 static int
6115 wm_gmii_hv_readreg(device_t self, int phy, int reg)
6116 {
6117 	struct wm_softc *sc = device_private(self);
6118 	uint16_t page = BM_PHY_REG_PAGE(reg);
6119 	uint16_t regnum = BM_PHY_REG_NUM(reg);
6120 	uint16_t val;
6121 	int rv;
6122 
6123 	if (wm_get_swfw_semaphore(sc, SWFW_PHY0_SM)) {
6124 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6125 		    __func__);
6126 		return 0;
6127 	}
6128 
6129 	/* XXX Workaround failure in MDIO access while cable is disconnected */
6130 	if (sc->sc_phytype == WMPHY_82577) {
6131 		/* XXX must write */
6132 	}
6133 
6134 	/* Page 800 works differently than the rest so it has its own func */
6135 	if (page == BM_WUC_PAGE) {
6136 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
6137 		return val;
6138 	}
6139 
6140 	/*
6141 	 * Lower than page 768 works differently than the rest so it has its
6142 	 * own func
6143 	 */
6144 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
6145 		printf("gmii_hv_readreg!!!\n");
6146 		return 0;
6147 	}
6148 
6149 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
6150 		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6151 		    page << BME1000_PAGE_SHIFT);
6152 	}
6153 
6154 	rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
6155 	wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
6156 	return rv;
6157 }
6158 
6159 /*
6160  * wm_gmii_hv_writereg:	[mii interface function]
6161  *
6162  *	Write a PHY register on the kumeran.
6163  * This could be handled by the PHY layer if we didn't have to lock the
6164  * ressource ...
6165  */
6166 static void
6167 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
6168 {
6169 	struct wm_softc *sc = device_private(self);
6170 	uint16_t page = BM_PHY_REG_PAGE(reg);
6171 	uint16_t regnum = BM_PHY_REG_NUM(reg);
6172 
6173 	if (wm_get_swfw_semaphore(sc, SWFW_PHY0_SM)) {
6174 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6175 		    __func__);
6176 		return;
6177 	}
6178 
6179 	/* XXX Workaround failure in MDIO access while cable is disconnected */
6180 
6181 	/* Page 800 works differently than the rest so it has its own func */
6182 	if (page == BM_WUC_PAGE) {
6183 		uint16_t tmp;
6184 
6185 		tmp = val;
6186 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
6187 		return;
6188 	}
6189 
6190 	/*
6191 	 * Lower than page 768 works differently than the rest so it has its
6192 	 * own func
6193 	 */
6194 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
6195 		printf("gmii_hv_writereg!!!\n");
6196 		return;
6197 	}
6198 
6199 	/*
6200 	 * XXX Workaround MDIO accesses being disabled after entering IEEE
6201 	 * Power Down (whenever bit 11 of the PHY control register is set)
6202 	 */
6203 
6204 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
6205 		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6206 		    page << BME1000_PAGE_SHIFT);
6207 	}
6208 
6209 	wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
6210 	wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
6211 }
6212 
6213 /*
6214  * wm_gmii_hv_readreg:	[mii interface function]
6215  *
6216  *	Read a PHY register on the kumeran
6217  * This could be handled by the PHY layer if we didn't have to lock the
6218  * ressource ...
6219  */
6220 static int
6221 wm_sgmii_readreg(device_t self, int phy, int reg)
6222 {
6223 	struct wm_softc *sc = device_private(self);
6224 	uint32_t i2ccmd;
6225 	int i, rv;
6226 
6227 	if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
6228 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6229 		    __func__);
6230 		return 0;
6231 	}
6232 
6233 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
6234 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
6235 	    | I2CCMD_OPCODE_READ;
6236 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
6237 
6238 	/* Poll the ready bit */
6239 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
6240 		delay(50);
6241 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
6242 		if (i2ccmd & I2CCMD_READY)
6243 			break;
6244 	}
6245 	if ((i2ccmd & I2CCMD_READY) == 0)
6246 		aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
6247 	if ((i2ccmd & I2CCMD_ERROR) != 0)
6248 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
6249 
6250 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
6251 
6252 	wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6253 	return rv;
6254 }
6255 
6256 /*
6257  * wm_gmii_hv_writereg:	[mii interface function]
6258  *
6259  *	Write a PHY register on the kumeran.
6260  * This could be handled by the PHY layer if we didn't have to lock the
6261  * ressource ...
6262  */
6263 static void
6264 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
6265 {
6266 	struct wm_softc *sc = device_private(self);
6267 	uint32_t i2ccmd;
6268 	int i;
6269 
6270 	if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
6271 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6272 		    __func__);
6273 		return;
6274 	}
6275 
6276 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
6277 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
6278 	    | I2CCMD_OPCODE_WRITE;
6279 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
6280 
6281 	/* Poll the ready bit */
6282 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
6283 		delay(50);
6284 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
6285 		if (i2ccmd & I2CCMD_READY)
6286 			break;
6287 	}
6288 	if ((i2ccmd & I2CCMD_READY) == 0)
6289 		aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
6290 	if ((i2ccmd & I2CCMD_ERROR) != 0)
6291 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
6292 
6293 	wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
6294 }
6295 
6296 /*
6297  * wm_gmii_statchg:	[mii interface function]
6298  *
6299  *	Callback from MII layer when media changes.
6300  */
6301 static void
6302 wm_gmii_statchg(device_t self)
6303 {
6304 	struct wm_softc *sc = device_private(self);
6305 	struct mii_data *mii = &sc->sc_mii;
6306 
6307 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
6308 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
6309 	sc->sc_fcrtl &= ~FCRTL_XONE;
6310 
6311 	/*
6312 	 * Get flow control negotiation result.
6313 	 */
6314 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
6315 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
6316 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
6317 		mii->mii_media_active &= ~IFM_ETH_FMASK;
6318 	}
6319 
6320 	if (sc->sc_flowflags & IFM_FLOW) {
6321 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
6322 			sc->sc_ctrl |= CTRL_TFCE;
6323 			sc->sc_fcrtl |= FCRTL_XONE;
6324 		}
6325 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
6326 			sc->sc_ctrl |= CTRL_RFCE;
6327 	}
6328 
6329 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
6330 		DPRINTF(WM_DEBUG_LINK,
6331 		    ("%s: LINK: statchg: FDX\n", device_xname(sc->sc_dev)));
6332 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
6333 	} else {
6334 		DPRINTF(WM_DEBUG_LINK,
6335 		    ("%s: LINK: statchg: HDX\n", device_xname(sc->sc_dev)));
6336 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
6337 	}
6338 
6339 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6340 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
6341 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
6342 						 : WMREG_FCRTL, sc->sc_fcrtl);
6343 	if (sc->sc_type == WM_T_80003) {
6344 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
6345 		case IFM_1000_T:
6346 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
6347 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
6348 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
6349 			break;
6350 		default:
6351 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
6352 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
6353 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
6354 			break;
6355 		}
6356 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
6357 	}
6358 }
6359 
6360 /*
6361  * wm_kmrn_readreg:
6362  *
6363  *	Read a kumeran register
6364  */
6365 static int
6366 wm_kmrn_readreg(struct wm_softc *sc, int reg)
6367 {
6368 	int rv;
6369 
6370 	if (sc->sc_flags == WM_F_SWFW_SYNC) {
6371 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
6372 			aprint_error_dev(sc->sc_dev,
6373 			    "%s: failed to get semaphore\n", __func__);
6374 			return 0;
6375 		}
6376 	} else if (sc->sc_flags == WM_F_SWFWHW_SYNC) {
6377 		if (wm_get_swfwhw_semaphore(sc)) {
6378 			aprint_error_dev(sc->sc_dev,
6379 			    "%s: failed to get semaphore\n", __func__);
6380 			return 0;
6381 		}
6382 	}
6383 
6384 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
6385 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
6386 	    KUMCTRLSTA_REN);
6387 	delay(2);
6388 
6389 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
6390 
6391 	if (sc->sc_flags == WM_F_SWFW_SYNC)
6392 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
6393 	else if (sc->sc_flags == WM_F_SWFWHW_SYNC)
6394 		wm_put_swfwhw_semaphore(sc);
6395 
6396 	return rv;
6397 }
6398 
6399 /*
6400  * wm_kmrn_writereg:
6401  *
6402  *	Write a kumeran register
6403  */
6404 static void
6405 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
6406 {
6407 
6408 	if (sc->sc_flags == WM_F_SWFW_SYNC) {
6409 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
6410 			aprint_error_dev(sc->sc_dev,
6411 			    "%s: failed to get semaphore\n", __func__);
6412 			return;
6413 		}
6414 	} else if (sc->sc_flags == WM_F_SWFWHW_SYNC) {
6415 		if (wm_get_swfwhw_semaphore(sc)) {
6416 			aprint_error_dev(sc->sc_dev,
6417 			    "%s: failed to get semaphore\n", __func__);
6418 			return;
6419 		}
6420 	}
6421 
6422 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
6423 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
6424 	    (val & KUMCTRLSTA_MASK));
6425 
6426 	if (sc->sc_flags == WM_F_SWFW_SYNC)
6427 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
6428 	else if (sc->sc_flags == WM_F_SWFWHW_SYNC)
6429 		wm_put_swfwhw_semaphore(sc);
6430 }
6431 
6432 static int
6433 wm_is_onboard_nvm_eeprom(struct wm_softc *sc)
6434 {
6435 	uint32_t eecd = 0;
6436 
6437 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
6438 	    || sc->sc_type == WM_T_82583) {
6439 		eecd = CSR_READ(sc, WMREG_EECD);
6440 
6441 		/* Isolate bits 15 & 16 */
6442 		eecd = ((eecd >> 15) & 0x03);
6443 
6444 		/* If both bits are set, device is Flash type */
6445 		if (eecd == 0x03)
6446 			return 0;
6447 	}
6448 	return 1;
6449 }
6450 
6451 static int
6452 wm_get_swsm_semaphore(struct wm_softc *sc)
6453 {
6454 	int32_t timeout;
6455 	uint32_t swsm;
6456 
6457 	/* Get the FW semaphore. */
6458 	timeout = 1000 + 1; /* XXX */
6459 	while (timeout) {
6460 		swsm = CSR_READ(sc, WMREG_SWSM);
6461 		swsm |= SWSM_SWESMBI;
6462 		CSR_WRITE(sc, WMREG_SWSM, swsm);
6463 		/* if we managed to set the bit we got the semaphore. */
6464 		swsm = CSR_READ(sc, WMREG_SWSM);
6465 		if (swsm & SWSM_SWESMBI)
6466 			break;
6467 
6468 		delay(50);
6469 		timeout--;
6470 	}
6471 
6472 	if (timeout == 0) {
6473 		aprint_error_dev(sc->sc_dev, "could not acquire EEPROM GNT\n");
6474 		/* Release semaphores */
6475 		wm_put_swsm_semaphore(sc);
6476 		return 1;
6477 	}
6478 	return 0;
6479 }
6480 
6481 static void
6482 wm_put_swsm_semaphore(struct wm_softc *sc)
6483 {
6484 	uint32_t swsm;
6485 
6486 	swsm = CSR_READ(sc, WMREG_SWSM);
6487 	swsm &= ~(SWSM_SWESMBI);
6488 	CSR_WRITE(sc, WMREG_SWSM, swsm);
6489 }
6490 
6491 static int
6492 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
6493 {
6494 	uint32_t swfw_sync;
6495 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
6496 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
6497 	int timeout = 200;
6498 
6499 	for (timeout = 0; timeout < 200; timeout++) {
6500 		if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
6501 			if (wm_get_swsm_semaphore(sc)) {
6502 				aprint_error_dev(sc->sc_dev,
6503 				    "%s: failed to get semaphore\n",
6504 				    __func__);
6505 				return 1;
6506 			}
6507 		}
6508 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
6509 		if ((swfw_sync & (swmask | fwmask)) == 0) {
6510 			swfw_sync |= swmask;
6511 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
6512 			if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
6513 				wm_put_swsm_semaphore(sc);
6514 			return 0;
6515 		}
6516 		if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
6517 			wm_put_swsm_semaphore(sc);
6518 		delay(5000);
6519 	}
6520 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
6521 	    device_xname(sc->sc_dev), mask, swfw_sync);
6522 	return 1;
6523 }
6524 
6525 static void
6526 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
6527 {
6528 	uint32_t swfw_sync;
6529 
6530 	if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
6531 		while (wm_get_swsm_semaphore(sc) != 0)
6532 			continue;
6533 	}
6534 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
6535 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
6536 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
6537 	if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
6538 		wm_put_swsm_semaphore(sc);
6539 }
6540 
6541 static int
6542 wm_get_swfwhw_semaphore(struct wm_softc *sc)
6543 {
6544 	uint32_t ext_ctrl;
6545 	int timeout = 200;
6546 
6547 	for (timeout = 0; timeout < 200; timeout++) {
6548 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
6549 		ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
6550 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
6551 
6552 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
6553 		if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
6554 			return 0;
6555 		delay(5000);
6556 	}
6557 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
6558 	    device_xname(sc->sc_dev), ext_ctrl);
6559 	return 1;
6560 }
6561 
6562 static void
6563 wm_put_swfwhw_semaphore(struct wm_softc *sc)
6564 {
6565 	uint32_t ext_ctrl;
6566 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
6567 	ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
6568 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
6569 }
6570 
6571 static int
6572 wm_valid_nvm_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
6573 {
6574 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
6575 	uint8_t bank_high_byte;
6576 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
6577 
6578 	if ((sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)) {
6579 		/* Value of bit 22 corresponds to the flash bank we're on. */
6580 		*bank = (CSR_READ(sc, WMREG_EECD) & EECD_SEC1VAL) ? 1 : 0;
6581 	} else {
6582 		wm_read_ich8_byte(sc, act_offset, &bank_high_byte);
6583 		if ((bank_high_byte & 0xc0) == 0x80)
6584 			*bank = 0;
6585 		else {
6586 			wm_read_ich8_byte(sc, act_offset + bank1_offset,
6587 			    &bank_high_byte);
6588 			if ((bank_high_byte & 0xc0) == 0x80)
6589 				*bank = 1;
6590 			else {
6591 				aprint_error_dev(sc->sc_dev,
6592 				    "EEPROM not present\n");
6593 				return -1;
6594 			}
6595 		}
6596 	}
6597 
6598 	return 0;
6599 }
6600 
6601 /******************************************************************************
6602  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
6603  * register.
6604  *
6605  * sc - Struct containing variables accessed by shared code
6606  * offset - offset of word in the EEPROM to read
6607  * data - word read from the EEPROM
6608  * words - number of words to read
6609  *****************************************************************************/
6610 static int
6611 wm_read_eeprom_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
6612 {
6613 	int32_t  error = 0;
6614 	uint32_t flash_bank = 0;
6615 	uint32_t act_offset = 0;
6616 	uint32_t bank_offset = 0;
6617 	uint16_t word = 0;
6618 	uint16_t i = 0;
6619 
6620 	/* We need to know which is the valid flash bank.  In the event
6621 	 * that we didn't allocate eeprom_shadow_ram, we may not be
6622 	 * managing flash_bank.  So it cannot be trusted and needs
6623 	 * to be updated with each read.
6624 	 */
6625 	error = wm_valid_nvm_bank_detect_ich8lan(sc, &flash_bank);
6626 	if (error) {
6627 		aprint_error_dev(sc->sc_dev, "%s: failed to detect NVM bank\n",
6628 		    __func__);
6629 		return error;
6630 	}
6631 
6632 	/* Adjust offset appropriately if we're on bank 1 - adjust for word size */
6633 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
6634 
6635 	error = wm_get_swfwhw_semaphore(sc);
6636 	if (error) {
6637 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6638 		    __func__);
6639 		return error;
6640 	}
6641 
6642 	for (i = 0; i < words; i++) {
6643 		/* The NVM part needs a byte offset, hence * 2 */
6644 		act_offset = bank_offset + ((offset + i) * 2);
6645 		error = wm_read_ich8_word(sc, act_offset, &word);
6646 		if (error) {
6647 			aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
6648 			    __func__);
6649 			break;
6650 		}
6651 		data[i] = word;
6652 	}
6653 
6654 	wm_put_swfwhw_semaphore(sc);
6655 	return error;
6656 }
6657 
6658 /******************************************************************************
6659  * This function does initial flash setup so that a new read/write/erase cycle
6660  * can be started.
6661  *
6662  * sc - The pointer to the hw structure
6663  ****************************************************************************/
6664 static int32_t
6665 wm_ich8_cycle_init(struct wm_softc *sc)
6666 {
6667 	uint16_t hsfsts;
6668 	int32_t error = 1;
6669 	int32_t i     = 0;
6670 
6671 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
6672 
6673 	/* May be check the Flash Des Valid bit in Hw status */
6674 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
6675 		return error;
6676 	}
6677 
6678 	/* Clear FCERR in Hw status by writing 1 */
6679 	/* Clear DAEL in Hw status by writing a 1 */
6680 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
6681 
6682 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
6683 
6684 	/*
6685 	 * Either we should have a hardware SPI cycle in progress bit to check
6686 	 * against, in order to start a new cycle or FDONE bit should be
6687 	 * changed in the hardware so that it is 1 after harware reset, which
6688 	 * can then be used as an indication whether a cycle is in progress or
6689 	 * has been completed .. we should also have some software semaphore
6690 	 * mechanism to guard FDONE or the cycle in progress bit so that two
6691 	 * threads access to those bits can be sequentiallized or a way so that
6692 	 * 2 threads dont start the cycle at the same time
6693 	 */
6694 
6695 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
6696 		/*
6697 		 * There is no cycle running at present, so we can start a
6698 		 * cycle
6699 		 */
6700 
6701 		/* Begin by setting Flash Cycle Done. */
6702 		hsfsts |= HSFSTS_DONE;
6703 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
6704 		error = 0;
6705 	} else {
6706 		/*
6707 		 * otherwise poll for sometime so the current cycle has a
6708 		 * chance to end before giving up.
6709 		 */
6710 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
6711 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
6712 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
6713 				error = 0;
6714 				break;
6715 			}
6716 			delay(1);
6717 		}
6718 		if (error == 0) {
6719 			/*
6720 			 * Successful in waiting for previous cycle to timeout,
6721 			 * now set the Flash Cycle Done.
6722 			 */
6723 			hsfsts |= HSFSTS_DONE;
6724 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
6725 		}
6726 	}
6727 	return error;
6728 }
6729 
6730 /******************************************************************************
6731  * This function starts a flash cycle and waits for its completion
6732  *
6733  * sc - The pointer to the hw structure
6734  ****************************************************************************/
6735 static int32_t
6736 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
6737 {
6738 	uint16_t hsflctl;
6739 	uint16_t hsfsts;
6740 	int32_t error = 1;
6741 	uint32_t i = 0;
6742 
6743 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
6744 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
6745 	hsflctl |= HSFCTL_GO;
6746 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
6747 
6748 	/* wait till FDONE bit is set to 1 */
6749 	do {
6750 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
6751 		if (hsfsts & HSFSTS_DONE)
6752 			break;
6753 		delay(1);
6754 		i++;
6755 	} while (i < timeout);
6756 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
6757 		error = 0;
6758 
6759 	return error;
6760 }
6761 
6762 /******************************************************************************
6763  * Reads a byte or word from the NVM using the ICH8 flash access registers.
6764  *
6765  * sc - The pointer to the hw structure
6766  * index - The index of the byte or word to read.
6767  * size - Size of data to read, 1=byte 2=word
6768  * data - Pointer to the word to store the value read.
6769  *****************************************************************************/
6770 static int32_t
6771 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
6772     uint32_t size, uint16_t* data)
6773 {
6774 	uint16_t hsfsts;
6775 	uint16_t hsflctl;
6776 	uint32_t flash_linear_address;
6777 	uint32_t flash_data = 0;
6778 	int32_t error = 1;
6779 	int32_t count = 0;
6780 
6781 	if (size < 1  || size > 2 || data == 0x0 ||
6782 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
6783 		return error;
6784 
6785 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
6786 	    sc->sc_ich8_flash_base;
6787 
6788 	do {
6789 		delay(1);
6790 		/* Steps */
6791 		error = wm_ich8_cycle_init(sc);
6792 		if (error)
6793 			break;
6794 
6795 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
6796 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
6797 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
6798 		    & HSFCTL_BCOUNT_MASK;
6799 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
6800 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
6801 
6802 		/*
6803 		 * Write the last 24 bits of index into Flash Linear address
6804 		 * field in Flash Address
6805 		 */
6806 		/* TODO: TBD maybe check the index against the size of flash */
6807 
6808 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
6809 
6810 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
6811 
6812 		/*
6813 		 * Check if FCERR is set to 1, if set to 1, clear it and try
6814 		 * the whole sequence a few more times, else read in (shift in)
6815 		 * the Flash Data0, the order is least significant byte first
6816 		 * msb to lsb
6817 		 */
6818 		if (error == 0) {
6819 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
6820 			if (size == 1)
6821 				*data = (uint8_t)(flash_data & 0x000000FF);
6822 			else if (size == 2)
6823 				*data = (uint16_t)(flash_data & 0x0000FFFF);
6824 			break;
6825 		} else {
6826 			/*
6827 			 * If we've gotten here, then things are probably
6828 			 * completely hosed, but if the error condition is
6829 			 * detected, it won't hurt to give it another try...
6830 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
6831 			 */
6832 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
6833 			if (hsfsts & HSFSTS_ERR) {
6834 				/* Repeat for some time before giving up. */
6835 				continue;
6836 			} else if ((hsfsts & HSFSTS_DONE) == 0)
6837 				break;
6838 		}
6839 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
6840 
6841 	return error;
6842 }
6843 
6844 /******************************************************************************
6845  * Reads a single byte from the NVM using the ICH8 flash access registers.
6846  *
6847  * sc - pointer to wm_hw structure
6848  * index - The index of the byte to read.
6849  * data - Pointer to a byte to store the value read.
6850  *****************************************************************************/
6851 static int32_t
6852 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
6853 {
6854 	int32_t status;
6855 	uint16_t word = 0;
6856 
6857 	status = wm_read_ich8_data(sc, index, 1, &word);
6858 	if (status == 0)
6859 		*data = (uint8_t)word;
6860 
6861 	return status;
6862 }
6863 
6864 /******************************************************************************
6865  * Reads a word from the NVM using the ICH8 flash access registers.
6866  *
6867  * sc - pointer to wm_hw structure
6868  * index - The starting byte index of the word to read.
6869  * data - Pointer to a word to store the value read.
6870  *****************************************************************************/
6871 static int32_t
6872 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
6873 {
6874 	int32_t status;
6875 
6876 	status = wm_read_ich8_data(sc, index, 2, data);
6877 	return status;
6878 }
6879 
6880 static int
6881 wm_check_mng_mode(struct wm_softc *sc)
6882 {
6883 	int rv;
6884 
6885 	switch (sc->sc_type) {
6886 	case WM_T_ICH8:
6887 	case WM_T_ICH9:
6888 	case WM_T_ICH10:
6889 	case WM_T_PCH:
6890 		rv = wm_check_mng_mode_ich8lan(sc);
6891 		break;
6892 	case WM_T_82574:
6893 	case WM_T_82583:
6894 		rv = wm_check_mng_mode_82574(sc);
6895 		break;
6896 	case WM_T_82571:
6897 	case WM_T_82572:
6898 	case WM_T_82573:
6899 	case WM_T_80003:
6900 		rv = wm_check_mng_mode_generic(sc);
6901 		break;
6902 	default:
6903 		/* noting to do */
6904 		rv = 0;
6905 		break;
6906 	}
6907 
6908 	return rv;
6909 }
6910 
6911 static int
6912 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
6913 {
6914 	uint32_t fwsm;
6915 
6916 	fwsm = CSR_READ(sc, WMREG_FWSM);
6917 
6918 	if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
6919 		return 1;
6920 
6921 	return 0;
6922 }
6923 
6924 static int
6925 wm_check_mng_mode_82574(struct wm_softc *sc)
6926 {
6927 	uint16_t data;
6928 
6929 	wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &data);
6930 
6931 	if ((data & EEPROM_CFG2_MNGM_MASK) != 0)
6932 		return 1;
6933 
6934 	return 0;
6935 }
6936 
6937 static int
6938 wm_check_mng_mode_generic(struct wm_softc *sc)
6939 {
6940 	uint32_t fwsm;
6941 
6942 	fwsm = CSR_READ(sc, WMREG_FWSM);
6943 
6944 	if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
6945 		return 1;
6946 
6947 	return 0;
6948 }
6949 
6950 static int
6951 wm_enable_mng_pass_thru(struct wm_softc *sc)
6952 {
6953 	uint32_t manc, fwsm, factps;
6954 
6955 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
6956 		return 0;
6957 
6958 	manc = CSR_READ(sc, WMREG_MANC);
6959 
6960 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
6961 		device_xname(sc->sc_dev), manc));
6962 	if (((manc & MANC_RECV_TCO_EN) == 0)
6963 	    || ((manc & MANC_EN_MAC_ADDR_FILTER) == 0))
6964 		return 0;
6965 
6966 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
6967 		fwsm = CSR_READ(sc, WMREG_FWSM);
6968 		factps = CSR_READ(sc, WMREG_FACTPS);
6969 		if (((factps & FACTPS_MNGCG) == 0)
6970 		    && ((fwsm & FWSM_MODE_MASK)
6971 			== (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT)))
6972 			return 1;
6973 	} else if (((manc & MANC_SMBUS_EN) != 0)
6974 	    && ((manc & MANC_ASF_EN) == 0))
6975 		return 1;
6976 
6977 	return 0;
6978 }
6979 
6980 static int
6981 wm_check_reset_block(struct wm_softc *sc)
6982 {
6983 	uint32_t reg;
6984 
6985 	switch (sc->sc_type) {
6986 	case WM_T_ICH8:
6987 	case WM_T_ICH9:
6988 	case WM_T_ICH10:
6989 	case WM_T_PCH:
6990 		reg = CSR_READ(sc, WMREG_FWSM);
6991 		if ((reg & FWSM_RSPCIPHY) != 0)
6992 			return 0;
6993 		else
6994 			return -1;
6995 		break;
6996 	case WM_T_82571:
6997 	case WM_T_82572:
6998 	case WM_T_82573:
6999 	case WM_T_82574:
7000 	case WM_T_82583:
7001 	case WM_T_80003:
7002 		reg = CSR_READ(sc, WMREG_MANC);
7003 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
7004 			return -1;
7005 		else
7006 			return 0;
7007 		break;
7008 	default:
7009 		/* no problem */
7010 		break;
7011 	}
7012 
7013 	return 0;
7014 }
7015 
7016 static void
7017 wm_get_hw_control(struct wm_softc *sc)
7018 {
7019 	uint32_t reg;
7020 
7021 	switch (sc->sc_type) {
7022 	case WM_T_82573:
7023 		reg = CSR_READ(sc, WMREG_SWSM);
7024 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
7025 		break;
7026 	case WM_T_82571:
7027 	case WM_T_82572:
7028 	case WM_T_82574:
7029 	case WM_T_82583:
7030 	case WM_T_80003:
7031 	case WM_T_ICH8:
7032 	case WM_T_ICH9:
7033 	case WM_T_ICH10:
7034 	case WM_T_PCH:
7035 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
7036 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
7037 		break;
7038 	default:
7039 		break;
7040 	}
7041 }
7042 
7043 static void
7044 wm_release_hw_control(struct wm_softc *sc)
7045 {
7046 	uint32_t reg;
7047 
7048 	if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
7049 		return;
7050 
7051 	if (sc->sc_type == WM_T_82573) {
7052 		reg = CSR_READ(sc, WMREG_SWSM);
7053 		reg &= ~SWSM_DRV_LOAD;
7054 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
7055 	} else {
7056 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
7057 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
7058 	}
7059 }
7060 
7061 /* XXX Currently TBI only */
7062 static int
7063 wm_check_for_link(struct wm_softc *sc)
7064 {
7065 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7066 	uint32_t rxcw;
7067 	uint32_t ctrl;
7068 	uint32_t status;
7069 	uint32_t sig;
7070 
7071 	rxcw = CSR_READ(sc, WMREG_RXCW);
7072 	ctrl = CSR_READ(sc, WMREG_CTRL);
7073 	status = CSR_READ(sc, WMREG_STATUS);
7074 
7075 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
7076 
7077 	DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
7078 		device_xname(sc->sc_dev), __func__,
7079 		((ctrl & CTRL_SWDPIN(1)) == sig),
7080 		((status & STATUS_LU) != 0),
7081 		((rxcw & RXCW_C) != 0)
7082 		    ));
7083 
7084 	/*
7085 	 * SWDPIN   LU RXCW
7086 	 *      0    0    0
7087 	 *      0    0    1	(should not happen)
7088 	 *      0    1    0	(should not happen)
7089 	 *      0    1    1	(should not happen)
7090 	 *      1    0    0	Disable autonego and force linkup
7091 	 *      1    0    1	got /C/ but not linkup yet
7092 	 *      1    1    0	(linkup)
7093 	 *      1    1    1	If IFM_AUTO, back to autonego
7094 	 *
7095 	 */
7096 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
7097 	    && ((status & STATUS_LU) == 0)
7098 	    && ((rxcw & RXCW_C) == 0)) {
7099 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
7100 			__func__));
7101 		sc->sc_tbi_linkup = 0;
7102 		/* Disable auto-negotiation in the TXCW register */
7103 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
7104 
7105 		/*
7106 		 * Force link-up and also force full-duplex.
7107 		 *
7108 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
7109 		 * so we should update sc->sc_ctrl
7110 		 */
7111 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
7112 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7113 	} else if (((status & STATUS_LU) != 0)
7114 	    && ((rxcw & RXCW_C) != 0)
7115 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
7116 		sc->sc_tbi_linkup = 1;
7117 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
7118 			__func__));
7119 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
7120 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
7121 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
7122 	    && ((rxcw & RXCW_C) != 0)) {
7123 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
7124 	} else {
7125 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
7126 			status));
7127 	}
7128 
7129 	return 0;
7130 }
7131 
7132 /* Work-around for 82566 Kumeran PCS lock loss */
7133 static void
7134 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
7135 {
7136 	int miistatus, active, i;
7137 	int reg;
7138 
7139 	miistatus = sc->sc_mii.mii_media_status;
7140 
7141 	/* If the link is not up, do nothing */
7142 	if ((miistatus & IFM_ACTIVE) != 0)
7143 		return;
7144 
7145 	active = sc->sc_mii.mii_media_active;
7146 
7147 	/* Nothing to do if the link is other than 1Gbps */
7148 	if (IFM_SUBTYPE(active) != IFM_1000_T)
7149 		return;
7150 
7151 	for (i = 0; i < 10; i++) {
7152 		/* read twice */
7153 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
7154 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
7155 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) != 0)
7156 			goto out;	/* GOOD! */
7157 
7158 		/* Reset the PHY */
7159 		wm_gmii_reset(sc);
7160 		delay(5*1000);
7161 	}
7162 
7163 	/* Disable GigE link negotiation */
7164 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
7165 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
7166 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
7167 
7168 	/*
7169 	 * Call gig speed drop workaround on Gig disable before accessing
7170 	 * any PHY registers.
7171 	 */
7172 	wm_gig_downshift_workaround_ich8lan(sc);
7173 
7174 out:
7175 	return;
7176 }
7177 
7178 /* WOL from S5 stops working */
7179 static void
7180 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
7181 {
7182 	uint16_t kmrn_reg;
7183 
7184 	/* Only for igp3 */
7185 	if (sc->sc_phytype == WMPHY_IGP_3) {
7186 		kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
7187 		kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
7188 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
7189 		kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
7190 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
7191 	}
7192 }
7193 
7194 #ifdef WM_WOL
7195 /* Power down workaround on D3 */
7196 static void
7197 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
7198 {
7199 	uint32_t reg;
7200 	int i;
7201 
7202 	for (i = 0; i < 2; i++) {
7203 		/* Disable link */
7204 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
7205 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
7206 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
7207 
7208 		/*
7209 		 * Call gig speed drop workaround on Gig disable before
7210 		 * accessing any PHY registers
7211 		 */
7212 		if (sc->sc_type == WM_T_ICH8)
7213 			wm_gig_downshift_workaround_ich8lan(sc);
7214 
7215 		/* Write VR power-down enable */
7216 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
7217 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
7218 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
7219 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
7220 
7221 		/* Read it back and test */
7222 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
7223 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
7224 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
7225 			break;
7226 
7227 		/* Issue PHY reset and repeat at most one more time */
7228 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
7229 	}
7230 }
7231 #endif /* WM_WOL */
7232 
7233 /*
7234  * Workaround for pch's PHYs
7235  * XXX should be moved to new PHY driver?
7236  */
7237 static void
7238 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
7239 {
7240 
7241 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
7242 
7243 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
7244 
7245 	/* 82578 */
7246 	if (sc->sc_phytype == WMPHY_82578) {
7247 		/* PCH rev. < 3 */
7248 		if (sc->sc_rev < 3) {
7249 			/* XXX 6 bit shift? Why? Is it page2? */
7250 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
7251 			    0x66c0);
7252 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
7253 			    0xffff);
7254 		}
7255 
7256 		/* XXX phy rev. < 2 */
7257 	}
7258 
7259 	/* Select page 0 */
7260 
7261 	/* XXX acquire semaphore */
7262 	wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
7263 	/* XXX release semaphore */
7264 
7265 	/*
7266 	 * Configure the K1 Si workaround during phy reset assuming there is
7267 	 * link so that it disables K1 if link is in 1Gbps.
7268 	 */
7269 	wm_k1_gig_workaround_hv(sc, 1);
7270 }
7271 
7272 static void
7273 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
7274 {
7275 	int k1_enable = sc->sc_nvm_k1_enabled;
7276 
7277 	/* XXX acquire semaphore */
7278 
7279 	if (link) {
7280 		k1_enable = 0;
7281 
7282 		/* Link stall fix for link up */
7283 		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
7284 	} else {
7285 		/* Link stall fix for link down */
7286 		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
7287 	}
7288 
7289 	wm_configure_k1_ich8lan(sc, k1_enable);
7290 
7291 	/* XXX release semaphore */
7292 }
7293 
7294 static void
7295 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
7296 {
7297 	uint32_t ctrl, ctrl_ext, tmp;
7298 	uint16_t kmrn_reg;
7299 
7300 	kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
7301 
7302 	if (k1_enable)
7303 		kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
7304 	else
7305 		kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
7306 
7307 	wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
7308 
7309 	delay(20);
7310 
7311 	ctrl = CSR_READ(sc, WMREG_CTRL);
7312 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
7313 
7314 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
7315 	tmp |= CTRL_FRCSPD;
7316 
7317 	CSR_WRITE(sc, WMREG_CTRL, tmp);
7318 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
7319 	delay(20);
7320 
7321 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
7322 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
7323 	delay(20);
7324 }
7325 
7326 static void
7327 wm_set_pcie_completion_timeout(struct wm_softc *sc)
7328 {
7329 	uint32_t gcr;
7330 	pcireg_t ctrl2;
7331 
7332 	gcr = CSR_READ(sc, WMREG_GCR);
7333 
7334 	/* Only take action if timeout value is defaulted to 0 */
7335 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
7336 		goto out;
7337 
7338 	if ((gcr & GCR_CAP_VER2) == 0) {
7339 		gcr |= GCR_CMPL_TMOUT_10MS;
7340 		goto out;
7341 	}
7342 
7343 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
7344 	    sc->sc_pcixe_capoff + PCI_PCIE_DCSR2);
7345 	ctrl2 |= WM_PCI_PCIE_DCSR2_16MS;
7346 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
7347 	    sc->sc_pcixe_capoff + PCI_PCIE_DCSR2, ctrl2);
7348 
7349 out:
7350 	/* Disable completion timeout resend */
7351 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
7352 
7353 	CSR_WRITE(sc, WMREG_GCR, gcr);
7354 }
7355 
7356 /* special case - for 82575 - need to do manual init ... */
7357 static void
7358 wm_reset_init_script_82575(struct wm_softc *sc)
7359 {
7360 	/*
7361 	 * remark: this is untested code - we have no board without EEPROM
7362 	 *  same setup as mentioned int the freeBSD driver for the i82575
7363 	 */
7364 
7365 	/* SerDes configuration via SERDESCTRL */
7366 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
7367 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
7368 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
7369 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
7370 
7371 	/* CCM configuration via CCMCTL register */
7372 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
7373 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
7374 
7375 	/* PCIe lanes configuration */
7376 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
7377 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
7378 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
7379 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
7380 
7381 	/* PCIe PLL Configuration */
7382 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
7383 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
7384 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
7385 }
7386 
7387 static void
7388 wm_init_manageability(struct wm_softc *sc)
7389 {
7390 
7391 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
7392 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
7393 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
7394 
7395 		/* disabl hardware interception of ARP */
7396 		manc &= ~MANC_ARP_EN;
7397 
7398 		/* enable receiving management packets to the host */
7399 		if (sc->sc_type >= WM_T_82571) {
7400 			manc |= MANC_EN_MNG2HOST;
7401 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
7402 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
7403 
7404 		}
7405 
7406 		CSR_WRITE(sc, WMREG_MANC, manc);
7407 	}
7408 }
7409 
7410 static void
7411 wm_release_manageability(struct wm_softc *sc)
7412 {
7413 
7414 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
7415 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
7416 
7417 		if (sc->sc_type >= WM_T_82571)
7418 			manc &= ~MANC_EN_MNG2HOST;
7419 
7420 		CSR_WRITE(sc, WMREG_MANC, manc);
7421 	}
7422 }
7423 
7424 static void
7425 wm_get_wakeup(struct wm_softc *sc)
7426 {
7427 
7428 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
7429 	switch (sc->sc_type) {
7430 	case WM_T_82573:
7431 	case WM_T_82583:
7432 		sc->sc_flags |= WM_F_HAS_AMT;
7433 		/* FALLTHROUGH */
7434 	case WM_T_80003:
7435 	case WM_T_82541:
7436 	case WM_T_82547:
7437 	case WM_T_82571:
7438 	case WM_T_82572:
7439 	case WM_T_82574:
7440 	case WM_T_82575:
7441 	case WM_T_82576:
7442 #if 0 /* XXX */
7443 	case WM_T_82580:
7444 	case WM_T_82580ER:
7445 #endif
7446 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE_MASK) != 0)
7447 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
7448 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
7449 		break;
7450 	case WM_T_ICH8:
7451 	case WM_T_ICH9:
7452 	case WM_T_ICH10:
7453 	case WM_T_PCH:
7454 		sc->sc_flags |= WM_F_HAS_AMT;
7455 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
7456 		break;
7457 	default:
7458 		break;
7459 	}
7460 
7461 	/* 1: HAS_MANAGE */
7462 	if (wm_enable_mng_pass_thru(sc) != 0)
7463 		sc->sc_flags |= WM_F_HAS_MANAGE;
7464 
7465 #ifdef WM_DEBUG
7466 	printf("\n");
7467 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
7468 		printf("HAS_AMT,");
7469 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
7470 		printf("ARC_SUBSYS_VALID,");
7471 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
7472 		printf("ASF_FIRMWARE_PRES,");
7473 	if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
7474 		printf("HAS_MANAGE,");
7475 	printf("\n");
7476 #endif
7477 	/*
7478 	 * Note that the WOL flags is set after the resetting of the eeprom
7479 	 * stuff
7480 	 */
7481 }
7482 
7483 #ifdef WM_WOL
7484 /* WOL in the newer chipset interfaces (pchlan) */
7485 static void
7486 wm_enable_phy_wakeup(struct wm_softc *sc)
7487 {
7488 #if 0
7489 	uint16_t preg;
7490 
7491 	/* Copy MAC RARs to PHY RARs */
7492 
7493 	/* Copy MAC MTA to PHY MTA */
7494 
7495 	/* Configure PHY Rx Control register */
7496 
7497 	/* Enable PHY wakeup in MAC register */
7498 
7499 	/* Configure and enable PHY wakeup in PHY registers */
7500 
7501 	/* Activate PHY wakeup */
7502 
7503 	/* XXX */
7504 #endif
7505 }
7506 
7507 static void
7508 wm_enable_wakeup(struct wm_softc *sc)
7509 {
7510 	uint32_t reg, pmreg;
7511 	pcireg_t pmode;
7512 
7513 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
7514 		&pmreg, NULL) == 0)
7515 		return;
7516 
7517 	/* Advertise the wakeup capability */
7518 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
7519 	    | CTRL_SWDPIN(3));
7520 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
7521 
7522 	/* ICH workaround */
7523 	switch (sc->sc_type) {
7524 	case WM_T_ICH8:
7525 	case WM_T_ICH9:
7526 	case WM_T_ICH10:
7527 	case WM_T_PCH:
7528 		/* Disable gig during WOL */
7529 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
7530 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
7531 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
7532 		if (sc->sc_type == WM_T_PCH)
7533 			wm_gmii_reset(sc);
7534 
7535 		/* Power down workaround */
7536 		if (sc->sc_phytype == WMPHY_82577) {
7537 			struct mii_softc *child;
7538 
7539 			/* Assume that the PHY is copper */
7540 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
7541 			if (child->mii_mpd_rev <= 2)
7542 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
7543 				    (768 << 5) | 25, 0x0444); /* magic num */
7544 		}
7545 		break;
7546 	default:
7547 		break;
7548 	}
7549 
7550 	/* Keep the laser running on fiber adapters */
7551 	if (((sc->sc_wmp->wmp_flags & WMP_F_1000X) != 0)
7552 	    || (sc->sc_wmp->wmp_flags & WMP_F_SERDES) != 0) {
7553 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
7554 		reg |= CTRL_EXT_SWDPIN(3);
7555 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
7556 	}
7557 
7558 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
7559 #if 0	/* for the multicast packet */
7560 	reg |= WUFC_MC;
7561 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
7562 #endif
7563 
7564 	if (sc->sc_type == WM_T_PCH) {
7565 		wm_enable_phy_wakeup(sc);
7566 	} else {
7567 		CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
7568 		CSR_WRITE(sc, WMREG_WUFC, reg);
7569 	}
7570 
7571 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
7572 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH))
7573 		    && (sc->sc_phytype == WMPHY_IGP_3))
7574 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
7575 
7576 	/* Request PME */
7577 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
7578 #if 0
7579 	/* Disable WOL */
7580 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
7581 #else
7582 	/* For WOL */
7583 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
7584 #endif
7585 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
7586 }
7587 #endif /* WM_WOL */
7588 
7589 static bool
7590 wm_suspend(device_t self, const pmf_qual_t *qual)
7591 {
7592 	struct wm_softc *sc = device_private(self);
7593 
7594 	wm_release_manageability(sc);
7595 	wm_release_hw_control(sc);
7596 #ifdef WM_WOL
7597 	wm_enable_wakeup(sc);
7598 #endif
7599 
7600 	return true;
7601 }
7602 
7603 static bool
7604 wm_resume(device_t self, const pmf_qual_t *qual)
7605 {
7606 	struct wm_softc *sc = device_private(self);
7607 
7608 	wm_init_manageability(sc);
7609 
7610 	return true;
7611 }
7612