xref: /netbsd-src/sys/dev/pci/if_wm.c (revision 4e6df137e8e14049b5a701d249962c480449c141)
1 /*	$NetBSD: if_wm.c,v 1.204 2010/03/07 10:11:04 msaitoh Exp $	*/
2 
3 /*
4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed for the NetBSD Project by
20  *	Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 /*******************************************************************************
39 
40   Copyright (c) 2001-2005, Intel Corporation
41   All rights reserved.
42 
43   Redistribution and use in source and binary forms, with or without
44   modification, are permitted provided that the following conditions are met:
45 
46    1. Redistributions of source code must retain the above copyright notice,
47       this list of conditions and the following disclaimer.
48 
49    2. Redistributions in binary form must reproduce the above copyright
50       notice, this list of conditions and the following disclaimer in the
51       documentation and/or other materials provided with the distribution.
52 
53    3. Neither the name of the Intel Corporation nor the names of its
54       contributors may be used to endorse or promote products derived from
55       this software without specific prior written permission.
56 
57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67   POSSIBILITY OF SUCH DAMAGE.
68 
69 *******************************************************************************/
70 /*
71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72  *
73  * TODO (in order of importance):
74  *
75  *	- Rework how parameters are loaded from the EEPROM.
76  */
77 
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.204 2010/03/07 10:11:04 msaitoh Exp $");
80 
81 #include "rnd.h"
82 
83 #include <sys/param.h>
84 #include <sys/systm.h>
85 #include <sys/callout.h>
86 #include <sys/mbuf.h>
87 #include <sys/malloc.h>
88 #include <sys/kernel.h>
89 #include <sys/socket.h>
90 #include <sys/ioctl.h>
91 #include <sys/errno.h>
92 #include <sys/device.h>
93 #include <sys/queue.h>
94 #include <sys/syslog.h>
95 
96 #include <uvm/uvm_extern.h>		/* for PAGE_SIZE */
97 
98 #if NRND > 0
99 #include <sys/rnd.h>
100 #endif
101 
102 #include <net/if.h>
103 #include <net/if_dl.h>
104 #include <net/if_media.h>
105 #include <net/if_ether.h>
106 
107 #include <net/bpf.h>
108 
109 #include <netinet/in.h>			/* XXX for struct ip */
110 #include <netinet/in_systm.h>		/* XXX for struct ip */
111 #include <netinet/ip.h>			/* XXX for struct ip */
112 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
113 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
114 
115 #include <sys/bus.h>
116 #include <sys/intr.h>
117 #include <machine/endian.h>
118 
119 #include <dev/mii/mii.h>
120 #include <dev/mii/miivar.h>
121 #include <dev/mii/miidevs.h>
122 #include <dev/mii/mii_bitbang.h>
123 #include <dev/mii/ikphyreg.h>
124 #include <dev/mii/igphyreg.h>
125 #include <dev/mii/igphyvar.h>
126 #include <dev/mii/inbmphyreg.h>
127 
128 #include <dev/pci/pcireg.h>
129 #include <dev/pci/pcivar.h>
130 #include <dev/pci/pcidevs.h>
131 
132 #include <dev/pci/if_wmreg.h>
133 #include <dev/pci/if_wmvar.h>
134 
135 #ifdef WM_DEBUG
136 #define	WM_DEBUG_LINK		0x01
137 #define	WM_DEBUG_TX		0x02
138 #define	WM_DEBUG_RX		0x04
139 #define	WM_DEBUG_GMII		0x08
140 #define	WM_DEBUG_MANAGE		0x10
141 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
142     | WM_DEBUG_MANAGE;
143 
144 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
145 #else
146 #define	DPRINTF(x, y)	/* nothing */
147 #endif /* WM_DEBUG */
148 
149 /*
150  * Transmit descriptor list size.  Due to errata, we can only have
151  * 256 hardware descriptors in the ring on < 82544, but we use 4096
152  * on >= 82544.  We tell the upper layers that they can queue a lot
153  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
154  * of them at a time.
155  *
156  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
157  * chains containing many small mbufs have been observed in zero-copy
158  * situations with jumbo frames.
159  */
160 #define	WM_NTXSEGS		256
161 #define	WM_IFQUEUELEN		256
162 #define	WM_TXQUEUELEN_MAX	64
163 #define	WM_TXQUEUELEN_MAX_82547	16
164 #define	WM_TXQUEUELEN(sc)	((sc)->sc_txnum)
165 #define	WM_TXQUEUELEN_MASK(sc)	(WM_TXQUEUELEN(sc) - 1)
166 #define	WM_TXQUEUE_GC(sc)	(WM_TXQUEUELEN(sc) / 8)
167 #define	WM_NTXDESC_82542	256
168 #define	WM_NTXDESC_82544	4096
169 #define	WM_NTXDESC(sc)		((sc)->sc_ntxdesc)
170 #define	WM_NTXDESC_MASK(sc)	(WM_NTXDESC(sc) - 1)
171 #define	WM_TXDESCSIZE(sc)	(WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
172 #define	WM_NEXTTX(sc, x)	(((x) + 1) & WM_NTXDESC_MASK(sc))
173 #define	WM_NEXTTXS(sc, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(sc))
174 
175 #define	WM_MAXTXDMA		round_page(IP_MAXPACKET) /* for TSO */
176 
177 /*
178  * Receive descriptor list size.  We have one Rx buffer for normal
179  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
180  * packet.  We allocate 256 receive descriptors, each with a 2k
181  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
182  */
183 #define	WM_NRXDESC		256
184 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
185 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
186 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
187 
188 /*
189  * Control structures are DMA'd to the i82542 chip.  We allocate them in
190  * a single clump that maps to a single DMA segment to make several things
191  * easier.
192  */
193 struct wm_control_data_82544 {
194 	/*
195 	 * The receive descriptors.
196 	 */
197 	wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
198 
199 	/*
200 	 * The transmit descriptors.  Put these at the end, because
201 	 * we might use a smaller number of them.
202 	 */
203 	wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82544];
204 };
205 
206 struct wm_control_data_82542 {
207 	wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
208 	wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
209 };
210 
211 #define	WM_CDOFF(x)	offsetof(struct wm_control_data_82544, x)
212 #define	WM_CDTXOFF(x)	WM_CDOFF(wcd_txdescs[(x)])
213 #define	WM_CDRXOFF(x)	WM_CDOFF(wcd_rxdescs[(x)])
214 
215 /*
216  * Software state for transmit jobs.
217  */
218 struct wm_txsoft {
219 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
220 	bus_dmamap_t txs_dmamap;	/* our DMA map */
221 	int txs_firstdesc;		/* first descriptor in packet */
222 	int txs_lastdesc;		/* last descriptor in packet */
223 	int txs_ndesc;			/* # of descriptors used */
224 };
225 
226 /*
227  * Software state for receive buffers.  Each descriptor gets a
228  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
229  * more than one buffer, we chain them together.
230  */
231 struct wm_rxsoft {
232 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
233 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
234 };
235 
236 #define WM_LINKUP_TIMEOUT	50
237 
238 static uint16_t swfwphysem[] = {
239 	SWFW_PHY0_SM,
240 	SWFW_PHY1_SM,
241 	SWFW_PHY2_SM,
242 	SWFW_PHY3_SM
243 };
244 
245 /*
246  * Software state per device.
247  */
248 struct wm_softc {
249 	device_t sc_dev;		/* generic device information */
250 	bus_space_tag_t sc_st;		/* bus space tag */
251 	bus_space_handle_t sc_sh;	/* bus space handle */
252 	bus_size_t sc_ss;		/* bus space size */
253 	bus_space_tag_t sc_iot;		/* I/O space tag */
254 	bus_space_handle_t sc_ioh;	/* I/O space handle */
255 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
256 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
257 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
258 
259 	struct ethercom sc_ethercom;	/* ethernet common data */
260 	struct mii_data sc_mii;		/* MII/media information */
261 
262 	pci_chipset_tag_t sc_pc;
263 	pcitag_t sc_pcitag;
264 	int sc_bus_speed;		/* PCI/PCIX bus speed */
265 	int sc_pcixe_capoff;		/* PCI[Xe] capability register offset */
266 
267 	const struct wm_product *sc_wmp; /* Pointer to the wm_product entry */
268 	wm_chip_type sc_type;		/* MAC type */
269 	int sc_rev;			/* MAC revision */
270 	wm_phy_type sc_phytype;		/* PHY type */
271 	int sc_funcid;			/* unit number of the chip (0 to 3) */
272 	int sc_flags;			/* flags; see below */
273 	int sc_if_flags;		/* last if_flags */
274 	int sc_flowflags;		/* 802.3x flow control flags */
275 	int sc_align_tweak;
276 
277 	void *sc_ih;			/* interrupt cookie */
278 	callout_t sc_tick_ch;		/* tick callout */
279 
280 	int sc_ee_addrbits;		/* EEPROM address bits */
281 	int sc_ich8_flash_base;
282 	int sc_ich8_flash_bank_size;
283 	int sc_nvm_k1_enabled;
284 
285 	/*
286 	 * Software state for the transmit and receive descriptors.
287 	 */
288 	int sc_txnum;			/* must be a power of two */
289 	struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX];
290 	struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
291 
292 	/*
293 	 * Control data structures.
294 	 */
295 	int sc_ntxdesc;			/* must be a power of two */
296 	struct wm_control_data_82544 *sc_control_data;
297 	bus_dmamap_t sc_cddmamap;	/* control data DMA map */
298 	bus_dma_segment_t sc_cd_seg;	/* control data segment */
299 	int sc_cd_rseg;			/* real number of control segment */
300 	size_t sc_cd_size;		/* control data size */
301 #define	sc_cddma	sc_cddmamap->dm_segs[0].ds_addr
302 #define	sc_txdescs	sc_control_data->wcd_txdescs
303 #define	sc_rxdescs	sc_control_data->wcd_rxdescs
304 
305 #ifdef WM_EVENT_COUNTERS
306 	/* Event counters. */
307 	struct evcnt sc_ev_txsstall;	/* Tx stalled due to no txs */
308 	struct evcnt sc_ev_txdstall;	/* Tx stalled due to no txd */
309 	struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
310 	struct evcnt sc_ev_txdw;	/* Tx descriptor interrupts */
311 	struct evcnt sc_ev_txqe;	/* Tx queue empty interrupts */
312 	struct evcnt sc_ev_rxintr;	/* Rx interrupts */
313 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
314 
315 	struct evcnt sc_ev_rxipsum;	/* IP checksums checked in-bound */
316 	struct evcnt sc_ev_rxtusum;	/* TCP/UDP cksums checked in-bound */
317 	struct evcnt sc_ev_txipsum;	/* IP checksums comp. out-bound */
318 	struct evcnt sc_ev_txtusum;	/* TCP/UDP cksums comp. out-bound */
319 	struct evcnt sc_ev_txtusum6;	/* TCP/UDP v6 cksums comp. out-bound */
320 	struct evcnt sc_ev_txtso;	/* TCP seg offload out-bound (IPv4) */
321 	struct evcnt sc_ev_txtso6;	/* TCP seg offload out-bound (IPv6) */
322 	struct evcnt sc_ev_txtsopain;	/* painful header manip. for TSO */
323 
324 	struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
325 	struct evcnt sc_ev_txdrop;	/* Tx packets dropped (too many segs) */
326 
327 	struct evcnt sc_ev_tu;		/* Tx underrun */
328 
329 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
330 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
331 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
332 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
333 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
334 #endif /* WM_EVENT_COUNTERS */
335 
336 	bus_addr_t sc_tdt_reg;		/* offset of TDT register */
337 
338 	int	sc_txfree;		/* number of free Tx descriptors */
339 	int	sc_txnext;		/* next ready Tx descriptor */
340 
341 	int	sc_txsfree;		/* number of free Tx jobs */
342 	int	sc_txsnext;		/* next free Tx job */
343 	int	sc_txsdirty;		/* dirty Tx jobs */
344 
345 	/* These 5 variables are used only on the 82547. */
346 	int	sc_txfifo_size;		/* Tx FIFO size */
347 	int	sc_txfifo_head;		/* current head of FIFO */
348 	uint32_t sc_txfifo_addr;	/* internal address of start of FIFO */
349 	int	sc_txfifo_stall;	/* Tx FIFO is stalled */
350 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
351 
352 	bus_addr_t sc_rdt_reg;		/* offset of RDT register */
353 
354 	int	sc_rxptr;		/* next ready Rx descriptor/queue ent */
355 	int	sc_rxdiscard;
356 	int	sc_rxlen;
357 	struct mbuf *sc_rxhead;
358 	struct mbuf *sc_rxtail;
359 	struct mbuf **sc_rxtailp;
360 
361 	uint32_t sc_ctrl;		/* prototype CTRL register */
362 #if 0
363 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
364 #endif
365 	uint32_t sc_icr;		/* prototype interrupt bits */
366 	uint32_t sc_itr;		/* prototype intr throttling reg */
367 	uint32_t sc_tctl;		/* prototype TCTL register */
368 	uint32_t sc_rctl;		/* prototype RCTL register */
369 	uint32_t sc_txcw;		/* prototype TXCW register */
370 	uint32_t sc_tipg;		/* prototype TIPG register */
371 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
372 	uint32_t sc_pba;		/* prototype PBA register */
373 
374 	int sc_tbi_linkup;		/* TBI link status */
375 	int sc_tbi_anegticks;		/* autonegotiation ticks */
376 	int sc_tbi_ticks;		/* tbi ticks */
377 	int sc_tbi_nrxcfg;		/* count of ICR_RXCFG */
378 	int sc_tbi_lastnrxcfg;		/* count of ICR_RXCFG (on last tick) */
379 
380 	int sc_mchash_type;		/* multicast filter offset */
381 
382 #if NRND > 0
383 	rndsource_element_t rnd_source;	/* random source */
384 #endif
385 };
386 
387 #define	WM_RXCHAIN_RESET(sc)						\
388 do {									\
389 	(sc)->sc_rxtailp = &(sc)->sc_rxhead;				\
390 	*(sc)->sc_rxtailp = NULL;					\
391 	(sc)->sc_rxlen = 0;						\
392 } while (/*CONSTCOND*/0)
393 
394 #define	WM_RXCHAIN_LINK(sc, m)						\
395 do {									\
396 	*(sc)->sc_rxtailp = (sc)->sc_rxtail = (m);			\
397 	(sc)->sc_rxtailp = &(m)->m_next;				\
398 } while (/*CONSTCOND*/0)
399 
400 #ifdef WM_EVENT_COUNTERS
401 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
402 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
403 #else
404 #define	WM_EVCNT_INCR(ev)	/* nothing */
405 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
406 #endif
407 
408 #define	CSR_READ(sc, reg)						\
409 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
410 #define	CSR_WRITE(sc, reg, val)						\
411 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
412 #define	CSR_WRITE_FLUSH(sc)						\
413 	(void) CSR_READ((sc), WMREG_STATUS)
414 
415 #define ICH8_FLASH_READ32(sc, reg) \
416 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
417 #define ICH8_FLASH_WRITE32(sc, reg, data) \
418 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
419 
420 #define ICH8_FLASH_READ16(sc, reg) \
421 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
422 #define ICH8_FLASH_WRITE16(sc, reg, data) \
423 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
424 
425 #define	WM_CDTXADDR(sc, x)	((sc)->sc_cddma + WM_CDTXOFF((x)))
426 #define	WM_CDRXADDR(sc, x)	((sc)->sc_cddma + WM_CDRXOFF((x)))
427 
428 #define	WM_CDTXADDR_LO(sc, x)	(WM_CDTXADDR((sc), (x)) & 0xffffffffU)
429 #define	WM_CDTXADDR_HI(sc, x)						\
430 	(sizeof(bus_addr_t) == 8 ?					\
431 	 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
432 
433 #define	WM_CDRXADDR_LO(sc, x)	(WM_CDRXADDR((sc), (x)) & 0xffffffffU)
434 #define	WM_CDRXADDR_HI(sc, x)						\
435 	(sizeof(bus_addr_t) == 8 ?					\
436 	 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
437 
438 #define	WM_CDTXSYNC(sc, x, n, ops)					\
439 do {									\
440 	int __x, __n;							\
441 									\
442 	__x = (x);							\
443 	__n = (n);							\
444 									\
445 	/* If it will wrap around, sync to the end of the ring. */	\
446 	if ((__x + __n) > WM_NTXDESC(sc)) {				\
447 		bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,	\
448 		    WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) *		\
449 		    (WM_NTXDESC(sc) - __x), (ops));			\
450 		__n -= (WM_NTXDESC(sc) - __x);				\
451 		__x = 0;						\
452 	}								\
453 									\
454 	/* Now sync whatever is left. */				\
455 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
456 	    WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops));	\
457 } while (/*CONSTCOND*/0)
458 
459 #define	WM_CDRXSYNC(sc, x, ops)						\
460 do {									\
461 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
462 	   WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops));		\
463 } while (/*CONSTCOND*/0)
464 
465 #define	WM_INIT_RXDESC(sc, x)						\
466 do {									\
467 	struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)];		\
468 	wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)];		\
469 	struct mbuf *__m = __rxs->rxs_mbuf;				\
470 									\
471 	/*								\
472 	 * Note: We scoot the packet forward 2 bytes in the buffer	\
473 	 * so that the payload after the Ethernet header is aligned	\
474 	 * to a 4-byte boundary.					\
475 	 *								\
476 	 * XXX BRAINDAMAGE ALERT!					\
477 	 * The stupid chip uses the same size for every buffer, which	\
478 	 * is set in the Receive Control register.  We are using the 2K	\
479 	 * size option, but what we REALLY want is (2K - 2)!  For this	\
480 	 * reason, we can't "scoot" packets longer than the standard	\
481 	 * Ethernet MTU.  On strict-alignment platforms, if the total	\
482 	 * size exceeds (2K - 2) we set align_tweak to 0 and let	\
483 	 * the upper layer copy the headers.				\
484 	 */								\
485 	__m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak;	\
486 									\
487 	wm_set_dma_addr(&__rxd->wrx_addr,				\
488 	    __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
489 	__rxd->wrx_len = 0;						\
490 	__rxd->wrx_cksum = 0;						\
491 	__rxd->wrx_status = 0;						\
492 	__rxd->wrx_errors = 0;						\
493 	__rxd->wrx_special = 0;						\
494 	WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
495 									\
496 	CSR_WRITE((sc), (sc)->sc_rdt_reg, (x));				\
497 } while (/*CONSTCOND*/0)
498 
499 static void	wm_start(struct ifnet *);
500 static void	wm_watchdog(struct ifnet *);
501 static int	wm_ioctl(struct ifnet *, u_long, void *);
502 static int	wm_init(struct ifnet *);
503 static void	wm_stop(struct ifnet *, int);
504 static bool	wm_suspend(device_t, const pmf_qual_t *);
505 static bool	wm_resume(device_t, const pmf_qual_t *);
506 
507 static void	wm_reset(struct wm_softc *);
508 static void	wm_rxdrain(struct wm_softc *);
509 static int	wm_add_rxbuf(struct wm_softc *, int);
510 static int	wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
511 static int	wm_read_eeprom_eerd(struct wm_softc *, int, int, u_int16_t *);
512 static int	wm_validate_eeprom_checksum(struct wm_softc *);
513 static void	wm_tick(void *);
514 
515 static void	wm_set_filter(struct wm_softc *);
516 
517 static int	wm_intr(void *);
518 static void	wm_txintr(struct wm_softc *);
519 static void	wm_rxintr(struct wm_softc *);
520 static void	wm_linkintr(struct wm_softc *, uint32_t);
521 
522 static void	wm_tbi_mediainit(struct wm_softc *);
523 static int	wm_tbi_mediachange(struct ifnet *);
524 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
525 
526 static void	wm_tbi_set_linkled(struct wm_softc *);
527 static void	wm_tbi_check_link(struct wm_softc *);
528 
529 static void	wm_gmii_reset(struct wm_softc *);
530 
531 static int	wm_gmii_i82543_readreg(device_t, int, int);
532 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
533 
534 static int	wm_gmii_i82544_readreg(device_t, int, int);
535 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
536 
537 static int	wm_gmii_i80003_readreg(device_t, int, int);
538 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
539 static int	wm_gmii_bm_readreg(device_t, int, int);
540 static void	wm_gmii_bm_writereg(device_t, int, int, int);
541 static int	wm_gmii_hv_readreg(device_t, int, int);
542 static void	wm_gmii_hv_writereg(device_t, int, int, int);
543 static int	wm_sgmii_readreg(device_t, int, int);
544 static void	wm_sgmii_writereg(device_t, int, int, int);
545 
546 static void	wm_gmii_statchg(device_t);
547 
548 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
549 static int	wm_gmii_mediachange(struct ifnet *);
550 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
551 
552 static int	wm_kmrn_readreg(struct wm_softc *, int);
553 static void	wm_kmrn_writereg(struct wm_softc *, int, int);
554 
555 static void	wm_set_spiaddrbits(struct wm_softc *);
556 static int	wm_match(device_t, cfdata_t, void *);
557 static void	wm_attach(device_t, device_t, void *);
558 static int	wm_detach(device_t, int);
559 static int	wm_is_onboard_nvm_eeprom(struct wm_softc *);
560 static void	wm_get_auto_rd_done(struct wm_softc *);
561 static void	wm_lan_init_done(struct wm_softc *);
562 static void	wm_get_cfg_done(struct wm_softc *);
563 static int	wm_get_swsm_semaphore(struct wm_softc *);
564 static void	wm_put_swsm_semaphore(struct wm_softc *);
565 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
566 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
567 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
568 static int	wm_get_swfwhw_semaphore(struct wm_softc *);
569 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
570 
571 static int	wm_read_eeprom_ich8(struct wm_softc *, int, int, uint16_t *);
572 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
573 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
574 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t,
575 		     uint32_t, uint16_t *);
576 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
577 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
578 static void	wm_82547_txfifo_stall(void *);
579 static int	wm_check_mng_mode(struct wm_softc *);
580 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
581 static int	wm_check_mng_mode_82574(struct wm_softc *);
582 static int	wm_check_mng_mode_generic(struct wm_softc *);
583 static int	wm_enable_mng_pass_thru(struct wm_softc *);
584 static int	wm_check_reset_block(struct wm_softc *);
585 static void	wm_get_hw_control(struct wm_softc *);
586 static int	wm_check_for_link(struct wm_softc *);
587 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
588 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
589 #ifdef WM_WOL
590 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
591 #endif
592 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
593 static void	wm_k1_gig_workaround_hv(struct wm_softc *, int);
594 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
595 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
596 static void	wm_reset_init_script_82575(struct wm_softc *);
597 static void	wm_release_manageability(struct wm_softc *);
598 static void	wm_release_hw_control(struct wm_softc *);
599 static void	wm_get_wakeup(struct wm_softc *);
600 #ifdef WM_WOL
601 static void	wm_enable_phy_wakeup(struct wm_softc *);
602 static void	wm_enable_wakeup(struct wm_softc *);
603 #endif
604 static void	wm_init_manageability(struct wm_softc *);
605 
606 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
607     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
608 
609 /*
610  * Devices supported by this driver.
611  */
612 static const struct wm_product {
613 	pci_vendor_id_t		wmp_vendor;
614 	pci_product_id_t	wmp_product;
615 	const char		*wmp_name;
616 	wm_chip_type		wmp_type;
617 	int			wmp_flags;
618 #define	WMP_F_1000X		0x01
619 #define	WMP_F_1000T		0x02
620 #define	WMP_F_SERDES		0x04
621 } wm_products[] = {
622 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
623 	  "Intel i82542 1000BASE-X Ethernet",
624 	  WM_T_82542_2_1,	WMP_F_1000X },
625 
626 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
627 	  "Intel i82543GC 1000BASE-X Ethernet",
628 	  WM_T_82543,		WMP_F_1000X },
629 
630 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
631 	  "Intel i82543GC 1000BASE-T Ethernet",
632 	  WM_T_82543,		WMP_F_1000T },
633 
634 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
635 	  "Intel i82544EI 1000BASE-T Ethernet",
636 	  WM_T_82544,		WMP_F_1000T },
637 
638 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
639 	  "Intel i82544EI 1000BASE-X Ethernet",
640 	  WM_T_82544,		WMP_F_1000X },
641 
642 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
643 	  "Intel i82544GC 1000BASE-T Ethernet",
644 	  WM_T_82544,		WMP_F_1000T },
645 
646 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
647 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
648 	  WM_T_82544,		WMP_F_1000T },
649 
650 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
651 	  "Intel i82540EM 1000BASE-T Ethernet",
652 	  WM_T_82540,		WMP_F_1000T },
653 
654 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
655 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
656 	  WM_T_82540,		WMP_F_1000T },
657 
658 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
659 	  "Intel i82540EP 1000BASE-T Ethernet",
660 	  WM_T_82540,		WMP_F_1000T },
661 
662 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
663 	  "Intel i82540EP 1000BASE-T Ethernet",
664 	  WM_T_82540,		WMP_F_1000T },
665 
666 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
667 	  "Intel i82540EP 1000BASE-T Ethernet",
668 	  WM_T_82540,		WMP_F_1000T },
669 
670 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
671 	  "Intel i82545EM 1000BASE-T Ethernet",
672 	  WM_T_82545,		WMP_F_1000T },
673 
674 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
675 	  "Intel i82545GM 1000BASE-T Ethernet",
676 	  WM_T_82545_3,		WMP_F_1000T },
677 
678 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
679 	  "Intel i82545GM 1000BASE-X Ethernet",
680 	  WM_T_82545_3,		WMP_F_1000X },
681 #if 0
682 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
683 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
684 	  WM_T_82545_3,		WMP_F_SERDES },
685 #endif
686 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
687 	  "Intel i82546EB 1000BASE-T Ethernet",
688 	  WM_T_82546,		WMP_F_1000T },
689 
690 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
691 	  "Intel i82546EB 1000BASE-T Ethernet",
692 	  WM_T_82546,		WMP_F_1000T },
693 
694 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
695 	  "Intel i82545EM 1000BASE-X Ethernet",
696 	  WM_T_82545,		WMP_F_1000X },
697 
698 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
699 	  "Intel i82546EB 1000BASE-X Ethernet",
700 	  WM_T_82546,		WMP_F_1000X },
701 
702 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
703 	  "Intel i82546GB 1000BASE-T Ethernet",
704 	  WM_T_82546_3,		WMP_F_1000T },
705 
706 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
707 	  "Intel i82546GB 1000BASE-X Ethernet",
708 	  WM_T_82546_3,		WMP_F_1000X },
709 #if 0
710 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
711 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
712 	  WM_T_82546_3,		WMP_F_SERDES },
713 #endif
714 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
715 	  "i82546GB quad-port Gigabit Ethernet",
716 	  WM_T_82546_3,		WMP_F_1000T },
717 
718 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
719 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
720 	  WM_T_82546_3,		WMP_F_1000T },
721 
722 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
723 	  "Intel PRO/1000MT (82546GB)",
724 	  WM_T_82546_3,		WMP_F_1000T },
725 
726 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
727 	  "Intel i82541EI 1000BASE-T Ethernet",
728 	  WM_T_82541,		WMP_F_1000T },
729 
730 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
731 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
732 	  WM_T_82541,		WMP_F_1000T },
733 
734 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
735 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
736 	  WM_T_82541,		WMP_F_1000T },
737 
738 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
739 	  "Intel i82541ER 1000BASE-T Ethernet",
740 	  WM_T_82541_2,		WMP_F_1000T },
741 
742 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
743 	  "Intel i82541GI 1000BASE-T Ethernet",
744 	  WM_T_82541_2,		WMP_F_1000T },
745 
746 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
747 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
748 	  WM_T_82541_2,		WMP_F_1000T },
749 
750 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
751 	  "Intel i82541PI 1000BASE-T Ethernet",
752 	  WM_T_82541_2,		WMP_F_1000T },
753 
754 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
755 	  "Intel i82547EI 1000BASE-T Ethernet",
756 	  WM_T_82547,		WMP_F_1000T },
757 
758 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
759 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
760 	  WM_T_82547,		WMP_F_1000T },
761 
762 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
763 	  "Intel i82547GI 1000BASE-T Ethernet",
764 	  WM_T_82547_2,		WMP_F_1000T },
765 
766 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
767 	  "Intel PRO/1000 PT (82571EB)",
768 	  WM_T_82571,		WMP_F_1000T },
769 
770 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
771 	  "Intel PRO/1000 PF (82571EB)",
772 	  WM_T_82571,		WMP_F_1000X },
773 #if 0
774 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
775 	  "Intel PRO/1000 PB (82571EB)",
776 	  WM_T_82571,		WMP_F_SERDES },
777 #endif
778 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
779 	  "Intel PRO/1000 QT (82571EB)",
780 	  WM_T_82571,		WMP_F_1000T },
781 
782 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
783 	  "Intel i82572EI 1000baseT Ethernet",
784 	  WM_T_82572,		WMP_F_1000T },
785 
786 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
787 	  "Intel� PRO/1000 PT Quad Port Server Adapter",
788 	  WM_T_82571,		WMP_F_1000T, },
789 
790 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
791 	  "Intel i82572EI 1000baseX Ethernet",
792 	  WM_T_82572,		WMP_F_1000X },
793 #if 0
794 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
795 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
796 	  WM_T_82572,		WMP_F_SERDES },
797 #endif
798 
799 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
800 	  "Intel i82572EI 1000baseT Ethernet",
801 	  WM_T_82572,		WMP_F_1000T },
802 
803 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
804 	  "Intel i82573E",
805 	  WM_T_82573,		WMP_F_1000T },
806 
807 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
808 	  "Intel i82573E IAMT",
809 	  WM_T_82573,		WMP_F_1000T },
810 
811 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
812 	  "Intel i82573L Gigabit Ethernet",
813 	  WM_T_82573,		WMP_F_1000T },
814 
815 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
816 	  "Intel i82574L",
817 	  WM_T_82574,		WMP_F_1000T },
818 
819 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
820 	  "Intel i82583V",
821 	  WM_T_82583,		WMP_F_1000T },
822 
823 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
824 	  "i80003 dual 1000baseT Ethernet",
825 	  WM_T_80003,		WMP_F_1000T },
826 
827 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
828 	  "i80003 dual 1000baseX Ethernet",
829 	  WM_T_80003,		WMP_F_1000T },
830 #if 0
831 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
832 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
833 	  WM_T_80003,		WMP_F_SERDES },
834 #endif
835 
836 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
837 	  "Intel i80003 1000baseT Ethernet",
838 	  WM_T_80003,		WMP_F_1000T },
839 #if 0
840 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
841 	  "Intel i80003 Gigabit Ethernet (SERDES)",
842 	  WM_T_80003,		WMP_F_SERDES },
843 #endif
844 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
845 	  "Intel i82801H (M_AMT) LAN Controller",
846 	  WM_T_ICH8,		WMP_F_1000T },
847 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
848 	  "Intel i82801H (AMT) LAN Controller",
849 	  WM_T_ICH8,		WMP_F_1000T },
850 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
851 	  "Intel i82801H LAN Controller",
852 	  WM_T_ICH8,		WMP_F_1000T },
853 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
854 	  "Intel i82801H (IFE) LAN Controller",
855 	  WM_T_ICH8,		WMP_F_1000T },
856 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
857 	  "Intel i82801H (M) LAN Controller",
858 	  WM_T_ICH8,		WMP_F_1000T },
859 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
860 	  "Intel i82801H IFE (GT) LAN Controller",
861 	  WM_T_ICH8,		WMP_F_1000T },
862 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
863 	  "Intel i82801H IFE (G) LAN Controller",
864 	  WM_T_ICH8,		WMP_F_1000T },
865 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
866 	  "82801I (AMT) LAN Controller",
867 	  WM_T_ICH9,		WMP_F_1000T },
868 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
869 	  "82801I LAN Controller",
870 	  WM_T_ICH9,		WMP_F_1000T },
871 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
872 	  "82801I (G) LAN Controller",
873 	  WM_T_ICH9,		WMP_F_1000T },
874 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
875 	  "82801I (GT) LAN Controller",
876 	  WM_T_ICH9,		WMP_F_1000T },
877 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
878 	  "82801I (C) LAN Controller",
879 	  WM_T_ICH9,		WMP_F_1000T },
880 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
881 	  "82801I mobile LAN Controller",
882 	  WM_T_ICH9,		WMP_F_1000T },
883 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IGP_M_V,
884 	  "82801I mobile (V) LAN Controller",
885 	  WM_T_ICH9,		WMP_F_1000T },
886 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
887 	  "82801I mobile (AMT) LAN Controller",
888 	  WM_T_ICH9,		WMP_F_1000T },
889 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
890 	  "82567LM-4 LAN Controller",
891 	  WM_T_ICH9,		WMP_F_1000T },
892 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_82567V_3,
893 	  "82567V-3 LAN Controller",
894 	  WM_T_ICH9,		WMP_F_1000T },
895 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
896 	  "82567LM-2 LAN Controller",
897 	  WM_T_ICH10,		WMP_F_1000T },
898 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
899 	  "82567LF-2 LAN Controller",
900 	  WM_T_ICH10,		WMP_F_1000T },
901 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
902 	  "82567LM-3 LAN Controller",
903 	  WM_T_ICH10,		WMP_F_1000T },
904 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
905 	  "82567LF-3 LAN Controller",
906 	  WM_T_ICH10,		WMP_F_1000T },
907 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
908 	  "82567V-2 LAN Controller",
909 	  WM_T_ICH10,		WMP_F_1000T },
910 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
911 	  "PCH LAN (82578LM) Controller",
912 	  WM_T_PCH,		WMP_F_1000T },
913 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
914 	  "PCH LAN (82578LC) Controller",
915 	  WM_T_PCH,		WMP_F_1000T },
916 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
917 	  "PCH LAN (82578DM) Controller",
918 	  WM_T_PCH,		WMP_F_1000T },
919 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
920 	  "PCH LAN (82578DC) Controller",
921 	  WM_T_PCH,		WMP_F_1000T },
922 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
923 	  "82575EB dual-1000baseT Ethernet",
924 	  WM_T_82575,		WMP_F_1000T },
925 #if 0
926 	/*
927 	 * not sure if WMP_F_1000X or WMP_F_SERDES - we do not have it - so
928 	 * disabled for now ...
929 	 */
930 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
931 	  "82575EB dual-1000baseX Ethernet (SERDES)",
932 	  WM_T_82575,		WMP_F_SERDES },
933 #endif
934 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
935 	  "82575GB quad-1000baseT Ethernet",
936 	  WM_T_82575,		WMP_F_1000T },
937 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
938 	  "82575GB quad-1000baseT Ethernet (PM)",
939 	  WM_T_82575,		WMP_F_1000T },
940 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
941 	  "82576 1000BaseT Ethernet",
942 	  WM_T_82576,		WMP_F_1000T },
943 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
944 	  "82576 1000BaseX Ethernet",
945 	  WM_T_82576,		WMP_F_1000X },
946 #if 0
947 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
948 	  "82576 gigabit Ethernet (SERDES)",
949 	  WM_T_82576,		WMP_F_SERDES },
950 #endif
951 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
952 	  "82576 quad-1000BaseT Ethernet",
953 	  WM_T_82576,		WMP_F_1000T },
954 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
955 	  "82576 gigabit Ethernet",
956 	  WM_T_82576,		WMP_F_1000T },
957 #if 0
958 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
959 	  "82576 gigabit Ethernet (SERDES)",
960 	  WM_T_82576,		WMP_F_SERDES },
961 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
962 	  "82576 quad-gigabit Ethernet (SERDES)",
963 	  WM_T_82576,		WMP_F_SERDES },
964 #endif
965 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
966 	  "82580 1000BaseT Ethernet",
967 	  WM_T_82580,		WMP_F_1000T },
968 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
969 	  "82580 1000BaseX Ethernet",
970 	  WM_T_82580,		WMP_F_1000X },
971 #if 0
972 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
973 	  "82580 1000BaseT Ethernet (SERDES)",
974 	  WM_T_82580,		WMP_F_SERDES },
975 #endif
976 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
977 	  "82580 gigabit Ethernet (SGMII)",
978 	  WM_T_82580,		WMP_F_1000T },
979 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
980 	  "82580 dual-1000BaseT Ethernet",
981 	  WM_T_82580,		WMP_F_1000T },
982 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_ER,
983 	  "82580 1000BaseT Ethernet",
984 	  WM_T_82580ER,		WMP_F_1000T },
985 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_ER_DUAL,
986 	  "82580 dual-1000BaseT Ethernet",
987 	  WM_T_82580ER,		WMP_F_1000T },
988 	{ 0,			0,
989 	  NULL,
990 	  0,			0 },
991 };
992 
993 #ifdef WM_EVENT_COUNTERS
994 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
995 #endif /* WM_EVENT_COUNTERS */
996 
997 #if 0 /* Not currently used */
998 static inline uint32_t
999 wm_io_read(struct wm_softc *sc, int reg)
1000 {
1001 
1002 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1003 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1004 }
1005 #endif
1006 
1007 static inline void
1008 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1009 {
1010 
1011 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1012 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1013 }
1014 
1015 static inline void
1016 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1017     uint32_t data)
1018 {
1019 	uint32_t regval;
1020 	int i;
1021 
1022 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1023 
1024 	CSR_WRITE(sc, reg, regval);
1025 
1026 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1027 		delay(5);
1028 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1029 			break;
1030 	}
1031 	if (i == SCTL_CTL_POLL_TIMEOUT) {
1032 		aprint_error("%s: WARNING: i82575 reg 0x%08x setup did not indicate ready\n",
1033 		    device_xname(sc->sc_dev), reg);
1034 	}
1035 }
1036 
1037 static inline void
1038 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1039 {
1040 	wa->wa_low = htole32(v & 0xffffffffU);
1041 	if (sizeof(bus_addr_t) == 8)
1042 		wa->wa_high = htole32((uint64_t) v >> 32);
1043 	else
1044 		wa->wa_high = 0;
1045 }
1046 
1047 static void
1048 wm_set_spiaddrbits(struct wm_softc *sc)
1049 {
1050 	uint32_t reg;
1051 
1052 	sc->sc_flags |= WM_F_EEPROM_SPI;
1053 	reg = CSR_READ(sc, WMREG_EECD);
1054 	sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
1055 }
1056 
1057 static const struct wm_product *
1058 wm_lookup(const struct pci_attach_args *pa)
1059 {
1060 	const struct wm_product *wmp;
1061 
1062 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1063 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1064 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1065 			return wmp;
1066 	}
1067 	return NULL;
1068 }
1069 
1070 static int
1071 wm_match(device_t parent, cfdata_t cf, void *aux)
1072 {
1073 	struct pci_attach_args *pa = aux;
1074 
1075 	if (wm_lookup(pa) != NULL)
1076 		return 1;
1077 
1078 	return 0;
1079 }
1080 
1081 static void
1082 wm_attach(device_t parent, device_t self, void *aux)
1083 {
1084 	struct wm_softc *sc = device_private(self);
1085 	struct pci_attach_args *pa = aux;
1086 	prop_dictionary_t dict;
1087 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1088 	pci_chipset_tag_t pc = pa->pa_pc;
1089 	pci_intr_handle_t ih;
1090 	const char *intrstr = NULL;
1091 	const char *eetype, *xname;
1092 	bus_space_tag_t memt;
1093 	bus_space_handle_t memh;
1094 	bus_size_t memsize;
1095 	int memh_valid;
1096 	int i, error;
1097 	const struct wm_product *wmp;
1098 	prop_data_t ea;
1099 	prop_number_t pn;
1100 	uint8_t enaddr[ETHER_ADDR_LEN];
1101 	uint16_t myea[ETHER_ADDR_LEN / 2], cfg1, cfg2, swdpin, io3;
1102 	pcireg_t preg, memtype;
1103 	uint16_t eeprom_data, apme_mask;
1104 	uint32_t reg;
1105 
1106 	sc->sc_dev = self;
1107 	callout_init(&sc->sc_tick_ch, 0);
1108 
1109 	sc->sc_wmp = wmp = wm_lookup(pa);
1110 	if (wmp == NULL) {
1111 		printf("\n");
1112 		panic("wm_attach: impossible");
1113 	}
1114 
1115 	sc->sc_pc = pa->pa_pc;
1116 	sc->sc_pcitag = pa->pa_tag;
1117 
1118 	if (pci_dma64_available(pa))
1119 		sc->sc_dmat = pa->pa_dmat64;
1120 	else
1121 		sc->sc_dmat = pa->pa_dmat;
1122 
1123 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
1124 	aprint_naive(": Ethernet controller\n");
1125 	aprint_normal(": %s, rev. %d\n", wmp->wmp_name, sc->sc_rev);
1126 
1127 	sc->sc_type = wmp->wmp_type;
1128 	if (sc->sc_type < WM_T_82543) {
1129 		if (sc->sc_rev < 2) {
1130 			aprint_error_dev(sc->sc_dev,
1131 			    "i82542 must be at least rev. 2\n");
1132 			return;
1133 		}
1134 		if (sc->sc_rev < 3)
1135 			sc->sc_type = WM_T_82542_2_0;
1136 	}
1137 
1138 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1139 	    || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER))
1140 		sc->sc_flags |= WM_F_NEWQUEUE;
1141 
1142 	/* Set device properties (mactype) */
1143 	dict = device_properties(sc->sc_dev);
1144 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1145 
1146 	/*
1147 	 * Map the device.  All devices support memory-mapped acccess,
1148 	 * and it is really required for normal operation.
1149 	 */
1150 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1151 	switch (memtype) {
1152 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1153 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1154 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1155 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1156 		break;
1157 	default:
1158 		memh_valid = 0;
1159 		break;
1160 	}
1161 
1162 	if (memh_valid) {
1163 		sc->sc_st = memt;
1164 		sc->sc_sh = memh;
1165 		sc->sc_ss = memsize;
1166 	} else {
1167 		aprint_error_dev(sc->sc_dev,
1168 		    "unable to map device registers\n");
1169 		return;
1170 	}
1171 
1172 	wm_get_wakeup(sc);
1173 
1174 	/*
1175 	 * In addition, i82544 and later support I/O mapped indirect
1176 	 * register access.  It is not desirable (nor supported in
1177 	 * this driver) to use it for normal operation, though it is
1178 	 * required to work around bugs in some chip versions.
1179 	 */
1180 	if (sc->sc_type >= WM_T_82544) {
1181 		/* First we have to find the I/O BAR. */
1182 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1183 			if (pci_mapreg_type(pa->pa_pc, pa->pa_tag, i) ==
1184 			    PCI_MAPREG_TYPE_IO)
1185 				break;
1186 		}
1187 		if (i == PCI_MAPREG_END)
1188 			aprint_error_dev(sc->sc_dev,
1189 			    "WARNING: unable to find I/O BAR\n");
1190 		else {
1191 			/*
1192 			 * The i8254x doesn't apparently respond when the
1193 			 * I/O BAR is 0, which looks somewhat like it's not
1194 			 * been configured.
1195 			 */
1196 			preg = pci_conf_read(pc, pa->pa_tag, i);
1197 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1198 				aprint_error_dev(sc->sc_dev,
1199 				    "WARNING: I/O BAR at zero.\n");
1200 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1201 					0, &sc->sc_iot, &sc->sc_ioh,
1202 					NULL, NULL) == 0) {
1203 				sc->sc_flags |= WM_F_IOH_VALID;
1204 			} else {
1205 				aprint_error_dev(sc->sc_dev,
1206 				    "WARNING: unable to map I/O space\n");
1207 			}
1208 		}
1209 
1210 	}
1211 
1212 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
1213 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1214 	preg |= PCI_COMMAND_MASTER_ENABLE;
1215 	if (sc->sc_type < WM_T_82542_2_1)
1216 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1217 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1218 
1219 	/* power up chip */
1220 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1221 	    NULL)) && error != EOPNOTSUPP) {
1222 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1223 		return;
1224 	}
1225 
1226 	/*
1227 	 * Map and establish our interrupt.
1228 	 */
1229 	if (pci_intr_map(pa, &ih)) {
1230 		aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
1231 		return;
1232 	}
1233 	intrstr = pci_intr_string(pc, ih);
1234 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
1235 	if (sc->sc_ih == NULL) {
1236 		aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
1237 		if (intrstr != NULL)
1238 			aprint_error(" at %s", intrstr);
1239 		aprint_error("\n");
1240 		return;
1241 	}
1242 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
1243 
1244 	/*
1245 	 * Check the function ID (unit number of the chip).
1246 	 */
1247 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
1248 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
1249 	    || (sc->sc_type == WM_T_82575))
1250 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
1251 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
1252 	else
1253 		sc->sc_funcid = 0;
1254 
1255 	/*
1256 	 * Determine a few things about the bus we're connected to.
1257 	 */
1258 	if (sc->sc_type < WM_T_82543) {
1259 		/* We don't really know the bus characteristics here. */
1260 		sc->sc_bus_speed = 33;
1261 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1262 		/*
1263 		 * CSA (Communication Streaming Architecture) is about as fast
1264 		 * a 32-bit 66MHz PCI Bus.
1265 		 */
1266 		sc->sc_flags |= WM_F_CSA;
1267 		sc->sc_bus_speed = 66;
1268 		aprint_verbose_dev(sc->sc_dev,
1269 		    "Communication Streaming Architecture\n");
1270 		if (sc->sc_type == WM_T_82547) {
1271 			callout_init(&sc->sc_txfifo_ch, 0);
1272 			callout_setfunc(&sc->sc_txfifo_ch,
1273 					wm_82547_txfifo_stall, sc);
1274 			aprint_verbose_dev(sc->sc_dev,
1275 			    "using 82547 Tx FIFO stall work-around\n");
1276 		}
1277 	} else if (sc->sc_type >= WM_T_82571) {
1278 		sc->sc_flags |= WM_F_PCIE;
1279 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1280 		    && (sc->sc_type != WM_T_ICH10)
1281 		    && (sc->sc_type != WM_T_PCH)) {
1282 			sc->sc_flags |= WM_F_EEPROM_SEMAPHORE;
1283 			/* ICH* and PCH have no PCIe capability registers */
1284 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1285 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
1286 				NULL) == 0)
1287 				aprint_error_dev(sc->sc_dev,
1288 				    "unable to find PCIe capability\n");
1289 		}
1290 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1291 	} else {
1292 		reg = CSR_READ(sc, WMREG_STATUS);
1293 		if (reg & STATUS_BUS64)
1294 			sc->sc_flags |= WM_F_BUS64;
1295 		if ((reg & STATUS_PCIX_MODE) != 0) {
1296 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1297 
1298 			sc->sc_flags |= WM_F_PCIX;
1299 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1300 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
1301 				aprint_error_dev(sc->sc_dev,
1302 				    "unable to find PCIX capability\n");
1303 			else if (sc->sc_type != WM_T_82545_3 &&
1304 				 sc->sc_type != WM_T_82546_3) {
1305 				/*
1306 				 * Work around a problem caused by the BIOS
1307 				 * setting the max memory read byte count
1308 				 * incorrectly.
1309 				 */
1310 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1311 				    sc->sc_pcixe_capoff + PCI_PCIX_CMD);
1312 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1313 				    sc->sc_pcixe_capoff + PCI_PCIX_STATUS);
1314 
1315 				bytecnt =
1316 				    (pcix_cmd & PCI_PCIX_CMD_BYTECNT_MASK) >>
1317 				    PCI_PCIX_CMD_BYTECNT_SHIFT;
1318 				maxb =
1319 				    (pcix_sts & PCI_PCIX_STATUS_MAXB_MASK) >>
1320 				    PCI_PCIX_STATUS_MAXB_SHIFT;
1321 				if (bytecnt > maxb) {
1322 					aprint_verbose_dev(sc->sc_dev,
1323 					    "resetting PCI-X MMRBC: %d -> %d\n",
1324 					    512 << bytecnt, 512 << maxb);
1325 					pcix_cmd = (pcix_cmd &
1326 					    ~PCI_PCIX_CMD_BYTECNT_MASK) |
1327 					   (maxb << PCI_PCIX_CMD_BYTECNT_SHIFT);
1328 					pci_conf_write(pa->pa_pc, pa->pa_tag,
1329 					    sc->sc_pcixe_capoff + PCI_PCIX_CMD,
1330 					    pcix_cmd);
1331 				}
1332 			}
1333 		}
1334 		/*
1335 		 * The quad port adapter is special; it has a PCIX-PCIX
1336 		 * bridge on the board, and can run the secondary bus at
1337 		 * a higher speed.
1338 		 */
1339 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1340 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1341 								      : 66;
1342 		} else if (sc->sc_flags & WM_F_PCIX) {
1343 			switch (reg & STATUS_PCIXSPD_MASK) {
1344 			case STATUS_PCIXSPD_50_66:
1345 				sc->sc_bus_speed = 66;
1346 				break;
1347 			case STATUS_PCIXSPD_66_100:
1348 				sc->sc_bus_speed = 100;
1349 				break;
1350 			case STATUS_PCIXSPD_100_133:
1351 				sc->sc_bus_speed = 133;
1352 				break;
1353 			default:
1354 				aprint_error_dev(sc->sc_dev,
1355 				    "unknown PCIXSPD %d; assuming 66MHz\n",
1356 				    reg & STATUS_PCIXSPD_MASK);
1357 				sc->sc_bus_speed = 66;
1358 				break;
1359 			}
1360 		} else
1361 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1362 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1363 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1364 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1365 	}
1366 
1367 	/*
1368 	 * Allocate the control data structures, and create and load the
1369 	 * DMA map for it.
1370 	 *
1371 	 * NOTE: All Tx descriptors must be in the same 4G segment of
1372 	 * memory.  So must Rx descriptors.  We simplify by allocating
1373 	 * both sets within the same 4G segment.
1374 	 */
1375 	WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
1376 	    WM_NTXDESC_82542 : WM_NTXDESC_82544;
1377 	sc->sc_cd_size = sc->sc_type < WM_T_82544 ?
1378 	    sizeof(struct wm_control_data_82542) :
1379 	    sizeof(struct wm_control_data_82544);
1380 	if ((error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_cd_size, PAGE_SIZE,
1381 		    (bus_size_t) 0x100000000ULL, &sc->sc_cd_seg, 1,
1382 		    &sc->sc_cd_rseg, 0)) != 0) {
1383 		aprint_error_dev(sc->sc_dev,
1384 		    "unable to allocate control data, error = %d\n",
1385 		    error);
1386 		goto fail_0;
1387 	}
1388 
1389 	if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cd_seg,
1390 		    sc->sc_cd_rseg, sc->sc_cd_size,
1391 		    (void **)&sc->sc_control_data, BUS_DMA_COHERENT)) != 0) {
1392 		aprint_error_dev(sc->sc_dev,
1393 		    "unable to map control data, error = %d\n", error);
1394 		goto fail_1;
1395 	}
1396 
1397 	if ((error = bus_dmamap_create(sc->sc_dmat, sc->sc_cd_size, 1,
1398 		    sc->sc_cd_size, 0, 0, &sc->sc_cddmamap)) != 0) {
1399 		aprint_error_dev(sc->sc_dev,
1400 		    "unable to create control data DMA map, error = %d\n",
1401 		    error);
1402 		goto fail_2;
1403 	}
1404 
1405 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
1406 		    sc->sc_control_data, sc->sc_cd_size, NULL, 0)) != 0) {
1407 		aprint_error_dev(sc->sc_dev,
1408 		    "unable to load control data DMA map, error = %d\n",
1409 		    error);
1410 		goto fail_3;
1411 	}
1412 
1413 	/*
1414 	 * Create the transmit buffer DMA maps.
1415 	 */
1416 	WM_TXQUEUELEN(sc) =
1417 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
1418 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
1419 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1420 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
1421 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
1422 			    &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1423 			aprint_error_dev(sc->sc_dev,
1424 			    "unable to create Tx DMA map %d, error = %d\n",
1425 			    i, error);
1426 			goto fail_4;
1427 		}
1428 	}
1429 
1430 	/*
1431 	 * Create the receive buffer DMA maps.
1432 	 */
1433 	for (i = 0; i < WM_NRXDESC; i++) {
1434 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1435 			    MCLBYTES, 0, 0,
1436 			    &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1437 			aprint_error_dev(sc->sc_dev,
1438 			    "unable to create Rx DMA map %d error = %d\n",
1439 			    i, error);
1440 			goto fail_5;
1441 		}
1442 		sc->sc_rxsoft[i].rxs_mbuf = NULL;
1443 	}
1444 
1445 	/* clear interesting stat counters */
1446 	CSR_READ(sc, WMREG_COLC);
1447 	CSR_READ(sc, WMREG_RXERRC);
1448 
1449 	/*
1450 	 * Reset the chip to a known state.
1451 	 */
1452 	wm_reset(sc);
1453 
1454 	switch (sc->sc_type) {
1455 	case WM_T_82571:
1456 	case WM_T_82572:
1457 	case WM_T_82573:
1458 	case WM_T_82574:
1459 	case WM_T_82583:
1460 	case WM_T_80003:
1461 	case WM_T_ICH8:
1462 	case WM_T_ICH9:
1463 	case WM_T_ICH10:
1464 	case WM_T_PCH:
1465 		if (wm_check_mng_mode(sc) != 0)
1466 			wm_get_hw_control(sc);
1467 		break;
1468 	default:
1469 		break;
1470 	}
1471 
1472 	/*
1473 	 * Get some information about the EEPROM.
1474 	 */
1475 	switch (sc->sc_type) {
1476 	case WM_T_82542_2_0:
1477 	case WM_T_82542_2_1:
1478 	case WM_T_82543:
1479 	case WM_T_82544:
1480 		/* Microwire */
1481 		sc->sc_ee_addrbits = 6;
1482 		break;
1483 	case WM_T_82540:
1484 	case WM_T_82545:
1485 	case WM_T_82545_3:
1486 	case WM_T_82546:
1487 	case WM_T_82546_3:
1488 		/* Microwire */
1489 		reg = CSR_READ(sc, WMREG_EECD);
1490 		if (reg & EECD_EE_SIZE)
1491 			sc->sc_ee_addrbits = 8;
1492 		else
1493 			sc->sc_ee_addrbits = 6;
1494 		sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1495 		break;
1496 	case WM_T_82541:
1497 	case WM_T_82541_2:
1498 	case WM_T_82547:
1499 	case WM_T_82547_2:
1500 		reg = CSR_READ(sc, WMREG_EECD);
1501 		if (reg & EECD_EE_TYPE) {
1502 			/* SPI */
1503 			wm_set_spiaddrbits(sc);
1504 		} else
1505 			/* Microwire */
1506 			sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6;
1507 		sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1508 		break;
1509 	case WM_T_82571:
1510 	case WM_T_82572:
1511 		/* SPI */
1512 		wm_set_spiaddrbits(sc);
1513 		sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1514 		break;
1515 	case WM_T_82573:
1516 	case WM_T_82574:
1517 	case WM_T_82583:
1518 		if (wm_is_onboard_nvm_eeprom(sc) == 0)
1519 			sc->sc_flags |= WM_F_EEPROM_FLASH;
1520 		else {
1521 			/* SPI */
1522 			wm_set_spiaddrbits(sc);
1523 		}
1524 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1525 		break;
1526 	case WM_T_82575:
1527 	case WM_T_82576:
1528 	case WM_T_82580:
1529 	case WM_T_82580ER:
1530 	case WM_T_80003:
1531 		/* SPI */
1532 		wm_set_spiaddrbits(sc);
1533 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_SWFW_SYNC;
1534 		break;
1535 	case WM_T_ICH8:
1536 	case WM_T_ICH9:
1537 	case WM_T_ICH10:
1538 	case WM_T_PCH:
1539 		/* FLASH */
1540 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_SWFWHW_SYNC;
1541 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
1542 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
1543 		    &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) {
1544 			aprint_error_dev(sc->sc_dev,
1545 			    "can't map FLASH registers\n");
1546 			return;
1547 		}
1548 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
1549 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
1550 						ICH_FLASH_SECTOR_SIZE;
1551 		sc->sc_ich8_flash_bank_size =
1552 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
1553 		sc->sc_ich8_flash_bank_size -=
1554 		    (reg & ICH_GFPREG_BASE_MASK);
1555 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
1556 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
1557 		break;
1558 	default:
1559 		break;
1560 	}
1561 
1562 	/*
1563 	 * Defer printing the EEPROM type until after verifying the checksum
1564 	 * This allows the EEPROM type to be printed correctly in the case
1565 	 * that no EEPROM is attached.
1566 	 */
1567 	/*
1568 	 * Validate the EEPROM checksum. If the checksum fails, flag
1569 	 * this for later, so we can fail future reads from the EEPROM.
1570 	 */
1571 	if (wm_validate_eeprom_checksum(sc)) {
1572 		/*
1573 		 * Read twice again because some PCI-e parts fail the
1574 		 * first check due to the link being in sleep state.
1575 		 */
1576 		if (wm_validate_eeprom_checksum(sc))
1577 			sc->sc_flags |= WM_F_EEPROM_INVALID;
1578 	}
1579 
1580 	/* Set device properties (macflags) */
1581 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
1582 
1583 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
1584 		aprint_verbose_dev(sc->sc_dev, "No EEPROM\n");
1585 	else if (sc->sc_flags & WM_F_EEPROM_FLASH) {
1586 		aprint_verbose_dev(sc->sc_dev, "FLASH\n");
1587 	} else {
1588 		if (sc->sc_flags & WM_F_EEPROM_SPI)
1589 			eetype = "SPI";
1590 		else
1591 			eetype = "MicroWire";
1592 		aprint_verbose_dev(sc->sc_dev,
1593 		    "%u word (%d address bits) %s EEPROM\n",
1594 		    1U << sc->sc_ee_addrbits,
1595 		    sc->sc_ee_addrbits, eetype);
1596 	}
1597 
1598 	/*
1599 	 * Read the Ethernet address from the EEPROM, if not first found
1600 	 * in device properties.
1601 	 */
1602 	ea = prop_dictionary_get(dict, "mac-address");
1603 	if (ea != NULL) {
1604 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
1605 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
1606 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
1607 	} else {
1608 		if (wm_read_eeprom(sc, EEPROM_OFF_MACADDR,
1609 		    sizeof(myea) / sizeof(myea[0]), myea)) {
1610 			aprint_error_dev(sc->sc_dev,
1611 			    "unable to read Ethernet address\n");
1612 			return;
1613 		}
1614 		enaddr[0] = myea[0] & 0xff;
1615 		enaddr[1] = myea[0] >> 8;
1616 		enaddr[2] = myea[1] & 0xff;
1617 		enaddr[3] = myea[1] >> 8;
1618 		enaddr[4] = myea[2] & 0xff;
1619 		enaddr[5] = myea[2] >> 8;
1620 	}
1621 
1622 	/*
1623 	 * Toggle the LSB of the MAC address on the second port
1624 	 * of the dual port controller.
1625 	 */
1626 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
1627 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
1628 	    || (sc->sc_type == WM_T_82575)) {
1629 		if (sc->sc_funcid == 1)
1630 			enaddr[5] ^= 1;
1631 	}
1632 
1633 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
1634 	    ether_sprintf(enaddr));
1635 
1636 	/*
1637 	 * Read the config info from the EEPROM, and set up various
1638 	 * bits in the control registers based on their contents.
1639 	 */
1640 	pn = prop_dictionary_get(dict, "i82543-cfg1");
1641 	if (pn != NULL) {
1642 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1643 		cfg1 = (uint16_t) prop_number_integer_value(pn);
1644 	} else {
1645 		if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) {
1646 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
1647 			return;
1648 		}
1649 	}
1650 
1651 	pn = prop_dictionary_get(dict, "i82543-cfg2");
1652 	if (pn != NULL) {
1653 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1654 		cfg2 = (uint16_t) prop_number_integer_value(pn);
1655 	} else {
1656 		if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) {
1657 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
1658 			return;
1659 		}
1660 	}
1661 
1662 	/* check for WM_F_WOL */
1663 	switch (sc->sc_type) {
1664 	case WM_T_82542_2_0:
1665 	case WM_T_82542_2_1:
1666 	case WM_T_82543:
1667 		/* dummy? */
1668 		eeprom_data = 0;
1669 		apme_mask = EEPROM_CFG3_APME;
1670 		break;
1671 	case WM_T_82544:
1672 		apme_mask = EEPROM_CFG2_82544_APM_EN;
1673 		eeprom_data = cfg2;
1674 		break;
1675 	case WM_T_82546:
1676 	case WM_T_82546_3:
1677 	case WM_T_82571:
1678 	case WM_T_82572:
1679 	case WM_T_82573:
1680 	case WM_T_82574:
1681 	case WM_T_82583:
1682 	case WM_T_80003:
1683 	default:
1684 		apme_mask = EEPROM_CFG3_APME;
1685 		wm_read_eeprom(sc, (sc->sc_funcid == 1) ? EEPROM_OFF_CFG3_PORTB
1686 		    : EEPROM_OFF_CFG3_PORTA, 1, &eeprom_data);
1687 		break;
1688 	case WM_T_82575:
1689 	case WM_T_82576:
1690 	case WM_T_82580:
1691 	case WM_T_82580ER:
1692 	case WM_T_ICH8:
1693 	case WM_T_ICH9:
1694 	case WM_T_ICH10:
1695 	case WM_T_PCH:
1696 		apme_mask = WUC_APME;
1697 		eeprom_data = CSR_READ(sc, WMREG_WUC);
1698 		break;
1699 	}
1700 
1701 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
1702 	if ((eeprom_data & apme_mask) != 0)
1703 		sc->sc_flags |= WM_F_WOL;
1704 #ifdef WM_DEBUG
1705 	if ((sc->sc_flags & WM_F_WOL) != 0)
1706 		printf("WOL\n");
1707 #endif
1708 
1709 	/*
1710 	 * XXX need special handling for some multiple port cards
1711 	 * to disable a paticular port.
1712 	 */
1713 
1714 	if (sc->sc_type >= WM_T_82544) {
1715 		pn = prop_dictionary_get(dict, "i82543-swdpin");
1716 		if (pn != NULL) {
1717 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1718 			swdpin = (uint16_t) prop_number_integer_value(pn);
1719 		} else {
1720 			if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) {
1721 				aprint_error_dev(sc->sc_dev,
1722 				    "unable to read SWDPIN\n");
1723 				return;
1724 			}
1725 		}
1726 	}
1727 
1728 	if (cfg1 & EEPROM_CFG1_ILOS)
1729 		sc->sc_ctrl |= CTRL_ILOS;
1730 	if (sc->sc_type >= WM_T_82544) {
1731 		sc->sc_ctrl |=
1732 		    ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
1733 		    CTRL_SWDPIO_SHIFT;
1734 		sc->sc_ctrl |=
1735 		    ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
1736 		    CTRL_SWDPINS_SHIFT;
1737 	} else {
1738 		sc->sc_ctrl |=
1739 		    ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
1740 		    CTRL_SWDPIO_SHIFT;
1741 	}
1742 
1743 #if 0
1744 	if (sc->sc_type >= WM_T_82544) {
1745 		if (cfg1 & EEPROM_CFG1_IPS0)
1746 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
1747 		if (cfg1 & EEPROM_CFG1_IPS1)
1748 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
1749 		sc->sc_ctrl_ext |=
1750 		    ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
1751 		    CTRL_EXT_SWDPIO_SHIFT;
1752 		sc->sc_ctrl_ext |=
1753 		    ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
1754 		    CTRL_EXT_SWDPINS_SHIFT;
1755 	} else {
1756 		sc->sc_ctrl_ext |=
1757 		    ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
1758 		    CTRL_EXT_SWDPIO_SHIFT;
1759 	}
1760 #endif
1761 
1762 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
1763 #if 0
1764 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
1765 #endif
1766 
1767 	/*
1768 	 * Set up some register offsets that are different between
1769 	 * the i82542 and the i82543 and later chips.
1770 	 */
1771 	if (sc->sc_type < WM_T_82543) {
1772 		sc->sc_rdt_reg = WMREG_OLD_RDT0;
1773 		sc->sc_tdt_reg = WMREG_OLD_TDT;
1774 	} else {
1775 		sc->sc_rdt_reg = WMREG_RDT;
1776 		sc->sc_tdt_reg = WMREG_TDT;
1777 	}
1778 
1779 	if (sc->sc_type == WM_T_PCH) {
1780 		uint16_t val;
1781 
1782 		/* Save the NVM K1 bit setting */
1783 		wm_read_eeprom(sc, EEPROM_OFF_K1_CONFIG, 1, &val);
1784 
1785 		if ((val & EEPROM_K1_CONFIG_ENABLE) != 0)
1786 			sc->sc_nvm_k1_enabled = 1;
1787 		else
1788 			sc->sc_nvm_k1_enabled = 0;
1789 	}
1790 
1791 	/*
1792 	 * Determine if we're TBI,GMII or SGMII mode, and initialize the
1793 	 * media structures accordingly.
1794 	 */
1795 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
1796 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
1797 	    || sc->sc_type == WM_T_82573
1798 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
1799 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
1800 		wm_gmii_mediainit(sc, wmp->wmp_product);
1801 	} else if (sc->sc_type < WM_T_82543 ||
1802 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
1803 		if (wmp->wmp_flags & WMP_F_1000T)
1804 			aprint_error_dev(sc->sc_dev,
1805 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
1806 		wm_tbi_mediainit(sc);
1807 	} else {
1808 		switch (sc->sc_type) {
1809 		case WM_T_82575:
1810 		case WM_T_82576:
1811 		case WM_T_82580:
1812 		case WM_T_82580ER:
1813 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
1814 			switch (reg & CTRL_EXT_LINK_MODE_MASK) {
1815 			case CTRL_EXT_LINK_MODE_SGMII:
1816 				aprint_verbose_dev(sc->sc_dev, "SGMII\n");
1817 				sc->sc_flags |= WM_F_SGMII;
1818 				CSR_WRITE(sc, WMREG_CTRL_EXT,
1819 				    reg | CTRL_EXT_I2C_ENA);
1820 				wm_gmii_mediainit(sc, wmp->wmp_product);
1821 				break;
1822 			case CTRL_EXT_LINK_MODE_1000KX:
1823 			case CTRL_EXT_LINK_MODE_PCIE_SERDES:
1824 				aprint_verbose_dev(sc->sc_dev, "1000KX or SERDES\n");
1825 				CSR_WRITE(sc, WMREG_CTRL_EXT,
1826 				    reg | CTRL_EXT_I2C_ENA);
1827 				panic("not supported yet\n");
1828 				break;
1829 			case CTRL_EXT_LINK_MODE_GMII:
1830 			default:
1831 				CSR_WRITE(sc, WMREG_CTRL_EXT,
1832 				    reg & ~CTRL_EXT_I2C_ENA);
1833 				wm_gmii_mediainit(sc, wmp->wmp_product);
1834 				break;
1835 			}
1836 			break;
1837 		default:
1838 			if (wmp->wmp_flags & WMP_F_1000X)
1839 				aprint_error_dev(sc->sc_dev,
1840 				    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
1841 			wm_gmii_mediainit(sc, wmp->wmp_product);
1842 		}
1843 	}
1844 
1845 	ifp = &sc->sc_ethercom.ec_if;
1846 	xname = device_xname(sc->sc_dev);
1847 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
1848 	ifp->if_softc = sc;
1849 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1850 	ifp->if_ioctl = wm_ioctl;
1851 	ifp->if_start = wm_start;
1852 	ifp->if_watchdog = wm_watchdog;
1853 	ifp->if_init = wm_init;
1854 	ifp->if_stop = wm_stop;
1855 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
1856 	IFQ_SET_READY(&ifp->if_snd);
1857 
1858 	/* Check for jumbo frame */
1859 	switch (sc->sc_type) {
1860 	case WM_T_82573:
1861 		/* XXX limited to 9234 if ASPM is disabled */
1862 		wm_read_eeprom(sc, EEPROM_INIT_3GIO_3, 1, &io3);
1863 		if ((io3 & EEPROM_3GIO_3_ASPM_MASK) != 0)
1864 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1865 		break;
1866 	case WM_T_82571:
1867 	case WM_T_82572:
1868 	case WM_T_82574:
1869 	case WM_T_82575:
1870 	case WM_T_82576:
1871 	case WM_T_82580:
1872 	case WM_T_82580ER:
1873 	case WM_T_80003:
1874 	case WM_T_ICH9:
1875 	case WM_T_ICH10:
1876 		/* XXX limited to 9234 */
1877 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1878 		break;
1879 	case WM_T_PCH:
1880 		/* XXX limited to 4096 */
1881 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1882 		break;
1883 	case WM_T_82542_2_0:
1884 	case WM_T_82542_2_1:
1885 	case WM_T_82583:
1886 	case WM_T_ICH8:
1887 		/* No support for jumbo frame */
1888 		break;
1889 	default:
1890 		/* ETHER_MAX_LEN_JUMBO */
1891 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1892 		break;
1893 	}
1894 
1895 	/*
1896 	 * If we're a i82543 or greater, we can support VLANs.
1897 	 */
1898 	if (sc->sc_type >= WM_T_82543)
1899 		sc->sc_ethercom.ec_capabilities |=
1900 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
1901 
1902 	/*
1903 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
1904 	 * on i82543 and later.
1905 	 */
1906 	if (sc->sc_type >= WM_T_82543) {
1907 		ifp->if_capabilities |=
1908 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
1909 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
1910 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
1911 		    IFCAP_CSUM_TCPv6_Tx |
1912 		    IFCAP_CSUM_UDPv6_Tx;
1913 	}
1914 
1915 	/*
1916 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
1917 	 *
1918 	 *	82541GI (8086:1076) ... no
1919 	 *	82572EI (8086:10b9) ... yes
1920 	 */
1921 	if (sc->sc_type >= WM_T_82571) {
1922 		ifp->if_capabilities |=
1923 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
1924 	}
1925 
1926 	/*
1927 	 * If we're a i82544 or greater (except i82547), we can do
1928 	 * TCP segmentation offload.
1929 	 */
1930 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
1931 		ifp->if_capabilities |= IFCAP_TSOv4;
1932 	}
1933 
1934 	if (sc->sc_type >= WM_T_82571) {
1935 		ifp->if_capabilities |= IFCAP_TSOv6;
1936 	}
1937 
1938 	/*
1939 	 * Attach the interface.
1940 	 */
1941 	if_attach(ifp);
1942 	ether_ifattach(ifp, enaddr);
1943 #if NRND > 0
1944 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET, 0);
1945 #endif
1946 
1947 #ifdef WM_EVENT_COUNTERS
1948 	/* Attach event counters. */
1949 	evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
1950 	    NULL, xname, "txsstall");
1951 	evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
1952 	    NULL, xname, "txdstall");
1953 	evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
1954 	    NULL, xname, "txfifo_stall");
1955 	evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
1956 	    NULL, xname, "txdw");
1957 	evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
1958 	    NULL, xname, "txqe");
1959 	evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
1960 	    NULL, xname, "rxintr");
1961 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
1962 	    NULL, xname, "linkintr");
1963 
1964 	evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
1965 	    NULL, xname, "rxipsum");
1966 	evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
1967 	    NULL, xname, "rxtusum");
1968 	evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
1969 	    NULL, xname, "txipsum");
1970 	evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
1971 	    NULL, xname, "txtusum");
1972 	evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
1973 	    NULL, xname, "txtusum6");
1974 
1975 	evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
1976 	    NULL, xname, "txtso");
1977 	evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
1978 	    NULL, xname, "txtso6");
1979 	evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
1980 	    NULL, xname, "txtsopain");
1981 
1982 	for (i = 0; i < WM_NTXSEGS; i++) {
1983 		sprintf(wm_txseg_evcnt_names[i], "txseg%d", i);
1984 		evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
1985 		    NULL, xname, wm_txseg_evcnt_names[i]);
1986 	}
1987 
1988 	evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
1989 	    NULL, xname, "txdrop");
1990 
1991 	evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
1992 	    NULL, xname, "tu");
1993 
1994 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
1995 	    NULL, xname, "tx_xoff");
1996 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
1997 	    NULL, xname, "tx_xon");
1998 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
1999 	    NULL, xname, "rx_xoff");
2000 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
2001 	    NULL, xname, "rx_xon");
2002 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
2003 	    NULL, xname, "rx_macctl");
2004 #endif /* WM_EVENT_COUNTERS */
2005 
2006 	if (pmf_device_register(self, wm_suspend, wm_resume))
2007 		pmf_class_network_register(self, ifp);
2008 	else
2009 		aprint_error_dev(self, "couldn't establish power handler\n");
2010 
2011 	return;
2012 
2013 	/*
2014 	 * Free any resources we've allocated during the failed attach
2015 	 * attempt.  Do this in reverse order and fall through.
2016 	 */
2017  fail_5:
2018 	for (i = 0; i < WM_NRXDESC; i++) {
2019 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2020 			bus_dmamap_destroy(sc->sc_dmat,
2021 			    sc->sc_rxsoft[i].rxs_dmamap);
2022 	}
2023  fail_4:
2024 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2025 		if (sc->sc_txsoft[i].txs_dmamap != NULL)
2026 			bus_dmamap_destroy(sc->sc_dmat,
2027 			    sc->sc_txsoft[i].txs_dmamap);
2028 	}
2029 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2030  fail_3:
2031 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2032  fail_2:
2033 	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2034 	    sc->sc_cd_size);
2035  fail_1:
2036 	bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2037  fail_0:
2038 	return;
2039 }
2040 
2041 static int
2042 wm_detach(device_t self, int flags __unused)
2043 {
2044 	struct wm_softc *sc = device_private(self);
2045 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2046 	int i, s;
2047 
2048 	s = splnet();
2049 	/* Stop the interface. Callouts are stopped in it. */
2050 	wm_stop(ifp, 1);
2051 	splx(s);
2052 
2053 	pmf_device_deregister(self);
2054 
2055 	/* Tell the firmware about the release */
2056 	wm_release_manageability(sc);
2057 
2058 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2059 
2060 	/* Delete all remaining media. */
2061 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2062 
2063 	ether_ifdetach(ifp);
2064 	if_detach(ifp);
2065 
2066 
2067 	/* Unload RX dmamaps and free mbufs */
2068 	wm_rxdrain(sc);
2069 
2070 	/* Free dmamap. It's the same as the end of the wm_attach() function */
2071 	for (i = 0; i < WM_NRXDESC; i++) {
2072 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2073 			bus_dmamap_destroy(sc->sc_dmat,
2074 			    sc->sc_rxsoft[i].rxs_dmamap);
2075 	}
2076 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2077 		if (sc->sc_txsoft[i].txs_dmamap != NULL)
2078 			bus_dmamap_destroy(sc->sc_dmat,
2079 			    sc->sc_txsoft[i].txs_dmamap);
2080 	}
2081 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2082 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2083 	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2084 	    sc->sc_cd_size);
2085 	bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2086 
2087 	/* Disestablish the interrupt handler */
2088 	if (sc->sc_ih != NULL) {
2089 		pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
2090 		sc->sc_ih = NULL;
2091 	}
2092 
2093 	/* Unmap the register */
2094 	if (sc->sc_ss) {
2095 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
2096 		sc->sc_ss = 0;
2097 	}
2098 
2099 	wm_release_hw_control(sc);
2100 
2101 	return 0;
2102 }
2103 
2104 /*
2105  * wm_tx_offload:
2106  *
2107  *	Set up TCP/IP checksumming parameters for the
2108  *	specified packet.
2109  */
2110 static int
2111 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
2112     uint8_t *fieldsp)
2113 {
2114 	struct mbuf *m0 = txs->txs_mbuf;
2115 	struct livengood_tcpip_ctxdesc *t;
2116 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
2117 	uint32_t ipcse;
2118 	struct ether_header *eh;
2119 	int offset, iphl;
2120 	uint8_t fields;
2121 
2122 	/*
2123 	 * XXX It would be nice if the mbuf pkthdr had offset
2124 	 * fields for the protocol headers.
2125 	 */
2126 
2127 	eh = mtod(m0, struct ether_header *);
2128 	switch (htons(eh->ether_type)) {
2129 	case ETHERTYPE_IP:
2130 	case ETHERTYPE_IPV6:
2131 		offset = ETHER_HDR_LEN;
2132 		break;
2133 
2134 	case ETHERTYPE_VLAN:
2135 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2136 		break;
2137 
2138 	default:
2139 		/*
2140 		 * Don't support this protocol or encapsulation.
2141 		 */
2142 		*fieldsp = 0;
2143 		*cmdp = 0;
2144 		return 0;
2145 	}
2146 
2147 	if ((m0->m_pkthdr.csum_flags &
2148 	    (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
2149 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
2150 	} else {
2151 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
2152 	}
2153 	ipcse = offset + iphl - 1;
2154 
2155 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
2156 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
2157 	seg = 0;
2158 	fields = 0;
2159 
2160 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
2161 		int hlen = offset + iphl;
2162 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
2163 
2164 		if (__predict_false(m0->m_len <
2165 				    (hlen + sizeof(struct tcphdr)))) {
2166 			/*
2167 			 * TCP/IP headers are not in the first mbuf; we need
2168 			 * to do this the slow and painful way.  Let's just
2169 			 * hope this doesn't happen very often.
2170 			 */
2171 			struct tcphdr th;
2172 
2173 			WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
2174 
2175 			m_copydata(m0, hlen, sizeof(th), &th);
2176 			if (v4) {
2177 				struct ip ip;
2178 
2179 				m_copydata(m0, offset, sizeof(ip), &ip);
2180 				ip.ip_len = 0;
2181 				m_copyback(m0,
2182 				    offset + offsetof(struct ip, ip_len),
2183 				    sizeof(ip.ip_len), &ip.ip_len);
2184 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
2185 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
2186 			} else {
2187 				struct ip6_hdr ip6;
2188 
2189 				m_copydata(m0, offset, sizeof(ip6), &ip6);
2190 				ip6.ip6_plen = 0;
2191 				m_copyback(m0,
2192 				    offset + offsetof(struct ip6_hdr, ip6_plen),
2193 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
2194 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
2195 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
2196 			}
2197 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
2198 			    sizeof(th.th_sum), &th.th_sum);
2199 
2200 			hlen += th.th_off << 2;
2201 		} else {
2202 			/*
2203 			 * TCP/IP headers are in the first mbuf; we can do
2204 			 * this the easy way.
2205 			 */
2206 			struct tcphdr *th;
2207 
2208 			if (v4) {
2209 				struct ip *ip =
2210 				    (void *)(mtod(m0, char *) + offset);
2211 				th = (void *)(mtod(m0, char *) + hlen);
2212 
2213 				ip->ip_len = 0;
2214 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
2215 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
2216 			} else {
2217 				struct ip6_hdr *ip6 =
2218 				    (void *)(mtod(m0, char *) + offset);
2219 				th = (void *)(mtod(m0, char *) + hlen);
2220 
2221 				ip6->ip6_plen = 0;
2222 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
2223 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
2224 			}
2225 			hlen += th->th_off << 2;
2226 		}
2227 
2228 		if (v4) {
2229 			WM_EVCNT_INCR(&sc->sc_ev_txtso);
2230 			cmdlen |= WTX_TCPIP_CMD_IP;
2231 		} else {
2232 			WM_EVCNT_INCR(&sc->sc_ev_txtso6);
2233 			ipcse = 0;
2234 		}
2235 		cmd |= WTX_TCPIP_CMD_TSE;
2236 		cmdlen |= WTX_TCPIP_CMD_TSE |
2237 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
2238 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
2239 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
2240 	}
2241 
2242 	/*
2243 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
2244 	 * offload feature, if we load the context descriptor, we
2245 	 * MUST provide valid values for IPCSS and TUCSS fields.
2246 	 */
2247 
2248 	ipcs = WTX_TCPIP_IPCSS(offset) |
2249 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
2250 	    WTX_TCPIP_IPCSE(ipcse);
2251 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
2252 		WM_EVCNT_INCR(&sc->sc_ev_txipsum);
2253 		fields |= WTX_IXSM;
2254 	}
2255 
2256 	offset += iphl;
2257 
2258 	if (m0->m_pkthdr.csum_flags &
2259 	    (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
2260 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
2261 		fields |= WTX_TXSM;
2262 		tucs = WTX_TCPIP_TUCSS(offset) |
2263 		    WTX_TCPIP_TUCSO(offset +
2264 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
2265 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
2266 	} else if ((m0->m_pkthdr.csum_flags &
2267 	    (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
2268 		WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
2269 		fields |= WTX_TXSM;
2270 		tucs = WTX_TCPIP_TUCSS(offset) |
2271 		    WTX_TCPIP_TUCSO(offset +
2272 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
2273 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
2274 	} else {
2275 		/* Just initialize it to a valid TCP context. */
2276 		tucs = WTX_TCPIP_TUCSS(offset) |
2277 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
2278 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
2279 	}
2280 
2281 	/* Fill in the context descriptor. */
2282 	t = (struct livengood_tcpip_ctxdesc *)
2283 	    &sc->sc_txdescs[sc->sc_txnext];
2284 	t->tcpip_ipcs = htole32(ipcs);
2285 	t->tcpip_tucs = htole32(tucs);
2286 	t->tcpip_cmdlen = htole32(cmdlen);
2287 	t->tcpip_seg = htole32(seg);
2288 	WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
2289 
2290 	sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
2291 	txs->txs_ndesc++;
2292 
2293 	*cmdp = cmd;
2294 	*fieldsp = fields;
2295 
2296 	return 0;
2297 }
2298 
2299 static void
2300 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
2301 {
2302 	struct mbuf *m;
2303 	int i;
2304 
2305 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
2306 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
2307 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
2308 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
2309 		    m->m_data, m->m_len, m->m_flags);
2310 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
2311 	    i, i == 1 ? "" : "s");
2312 }
2313 
2314 /*
2315  * wm_82547_txfifo_stall:
2316  *
2317  *	Callout used to wait for the 82547 Tx FIFO to drain,
2318  *	reset the FIFO pointers, and restart packet transmission.
2319  */
2320 static void
2321 wm_82547_txfifo_stall(void *arg)
2322 {
2323 	struct wm_softc *sc = arg;
2324 	int s;
2325 
2326 	s = splnet();
2327 
2328 	if (sc->sc_txfifo_stall) {
2329 		if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
2330 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
2331 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
2332 			/*
2333 			 * Packets have drained.  Stop transmitter, reset
2334 			 * FIFO pointers, restart transmitter, and kick
2335 			 * the packet queue.
2336 			 */
2337 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
2338 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
2339 			CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
2340 			CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
2341 			CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
2342 			CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
2343 			CSR_WRITE(sc, WMREG_TCTL, tctl);
2344 			CSR_WRITE_FLUSH(sc);
2345 
2346 			sc->sc_txfifo_head = 0;
2347 			sc->sc_txfifo_stall = 0;
2348 			wm_start(&sc->sc_ethercom.ec_if);
2349 		} else {
2350 			/*
2351 			 * Still waiting for packets to drain; try again in
2352 			 * another tick.
2353 			 */
2354 			callout_schedule(&sc->sc_txfifo_ch, 1);
2355 		}
2356 	}
2357 
2358 	splx(s);
2359 }
2360 
2361 /*
2362  * wm_82547_txfifo_bugchk:
2363  *
2364  *	Check for bug condition in the 82547 Tx FIFO.  We need to
2365  *	prevent enqueueing a packet that would wrap around the end
2366  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
2367  *
2368  *	We do this by checking the amount of space before the end
2369  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
2370  *	the Tx FIFO, wait for all remaining packets to drain, reset
2371  *	the internal FIFO pointers to the beginning, and restart
2372  *	transmission on the interface.
2373  */
2374 #define	WM_FIFO_HDR		0x10
2375 #define	WM_82547_PAD_LEN	0x3e0
2376 static int
2377 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
2378 {
2379 	int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
2380 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
2381 
2382 	/* Just return if already stalled. */
2383 	if (sc->sc_txfifo_stall)
2384 		return 1;
2385 
2386 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
2387 		/* Stall only occurs in half-duplex mode. */
2388 		goto send_packet;
2389 	}
2390 
2391 	if (len >= WM_82547_PAD_LEN + space) {
2392 		sc->sc_txfifo_stall = 1;
2393 		callout_schedule(&sc->sc_txfifo_ch, 1);
2394 		return 1;
2395 	}
2396 
2397  send_packet:
2398 	sc->sc_txfifo_head += len;
2399 	if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
2400 		sc->sc_txfifo_head -= sc->sc_txfifo_size;
2401 
2402 	return 0;
2403 }
2404 
2405 /*
2406  * wm_start:		[ifnet interface function]
2407  *
2408  *	Start packet transmission on the interface.
2409  */
2410 static void
2411 wm_start(struct ifnet *ifp)
2412 {
2413 	struct wm_softc *sc = ifp->if_softc;
2414 	struct mbuf *m0;
2415 	struct m_tag *mtag;
2416 	struct wm_txsoft *txs;
2417 	bus_dmamap_t dmamap;
2418 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
2419 	bus_addr_t curaddr;
2420 	bus_size_t seglen, curlen;
2421 	uint32_t cksumcmd;
2422 	uint8_t cksumfields;
2423 
2424 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
2425 		return;
2426 
2427 	/*
2428 	 * Remember the previous number of free descriptors.
2429 	 */
2430 	ofree = sc->sc_txfree;
2431 
2432 	/*
2433 	 * Loop through the send queue, setting up transmit descriptors
2434 	 * until we drain the queue, or use up all available transmit
2435 	 * descriptors.
2436 	 */
2437 	for (;;) {
2438 		/* Grab a packet off the queue. */
2439 		IFQ_POLL(&ifp->if_snd, m0);
2440 		if (m0 == NULL)
2441 			break;
2442 
2443 		DPRINTF(WM_DEBUG_TX,
2444 		    ("%s: TX: have packet to transmit: %p\n",
2445 		    device_xname(sc->sc_dev), m0));
2446 
2447 		/* Get a work queue entry. */
2448 		if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
2449 			wm_txintr(sc);
2450 			if (sc->sc_txsfree == 0) {
2451 				DPRINTF(WM_DEBUG_TX,
2452 				    ("%s: TX: no free job descriptors\n",
2453 					device_xname(sc->sc_dev)));
2454 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
2455 				break;
2456 			}
2457 		}
2458 
2459 		txs = &sc->sc_txsoft[sc->sc_txsnext];
2460 		dmamap = txs->txs_dmamap;
2461 
2462 		use_tso = (m0->m_pkthdr.csum_flags &
2463 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
2464 
2465 		/*
2466 		 * So says the Linux driver:
2467 		 * The controller does a simple calculation to make sure
2468 		 * there is enough room in the FIFO before initiating the
2469 		 * DMA for each buffer.  The calc is:
2470 		 *	4 = ceil(buffer len / MSS)
2471 		 * To make sure we don't overrun the FIFO, adjust the max
2472 		 * buffer len if the MSS drops.
2473 		 */
2474 		dmamap->dm_maxsegsz =
2475 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
2476 		    ? m0->m_pkthdr.segsz << 2
2477 		    : WTX_MAX_LEN;
2478 
2479 		/*
2480 		 * Load the DMA map.  If this fails, the packet either
2481 		 * didn't fit in the allotted number of segments, or we
2482 		 * were short on resources.  For the too-many-segments
2483 		 * case, we simply report an error and drop the packet,
2484 		 * since we can't sanely copy a jumbo packet to a single
2485 		 * buffer.
2486 		 */
2487 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
2488 		    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
2489 		if (error) {
2490 			if (error == EFBIG) {
2491 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
2492 				log(LOG_ERR, "%s: Tx packet consumes too many "
2493 				    "DMA segments, dropping...\n",
2494 				    device_xname(sc->sc_dev));
2495 				IFQ_DEQUEUE(&ifp->if_snd, m0);
2496 				wm_dump_mbuf_chain(sc, m0);
2497 				m_freem(m0);
2498 				continue;
2499 			}
2500 			/*
2501 			 * Short on resources, just stop for now.
2502 			 */
2503 			DPRINTF(WM_DEBUG_TX,
2504 			    ("%s: TX: dmamap load failed: %d\n",
2505 			    device_xname(sc->sc_dev), error));
2506 			break;
2507 		}
2508 
2509 		segs_needed = dmamap->dm_nsegs;
2510 		if (use_tso) {
2511 			/* For sentinel descriptor; see below. */
2512 			segs_needed++;
2513 		}
2514 
2515 		/*
2516 		 * Ensure we have enough descriptors free to describe
2517 		 * the packet.  Note, we always reserve one descriptor
2518 		 * at the end of the ring due to the semantics of the
2519 		 * TDT register, plus one more in the event we need
2520 		 * to load offload context.
2521 		 */
2522 		if (segs_needed > sc->sc_txfree - 2) {
2523 			/*
2524 			 * Not enough free descriptors to transmit this
2525 			 * packet.  We haven't committed anything yet,
2526 			 * so just unload the DMA map, put the packet
2527 			 * pack on the queue, and punt.  Notify the upper
2528 			 * layer that there are no more slots left.
2529 			 */
2530 			DPRINTF(WM_DEBUG_TX,
2531 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
2532 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
2533 			    segs_needed, sc->sc_txfree - 1));
2534 			ifp->if_flags |= IFF_OACTIVE;
2535 			bus_dmamap_unload(sc->sc_dmat, dmamap);
2536 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
2537 			break;
2538 		}
2539 
2540 		/*
2541 		 * Check for 82547 Tx FIFO bug.  We need to do this
2542 		 * once we know we can transmit the packet, since we
2543 		 * do some internal FIFO space accounting here.
2544 		 */
2545 		if (sc->sc_type == WM_T_82547 &&
2546 		    wm_82547_txfifo_bugchk(sc, m0)) {
2547 			DPRINTF(WM_DEBUG_TX,
2548 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
2549 			    device_xname(sc->sc_dev)));
2550 			ifp->if_flags |= IFF_OACTIVE;
2551 			bus_dmamap_unload(sc->sc_dmat, dmamap);
2552 			WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
2553 			break;
2554 		}
2555 
2556 		IFQ_DEQUEUE(&ifp->if_snd, m0);
2557 
2558 		/*
2559 		 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
2560 		 */
2561 
2562 		DPRINTF(WM_DEBUG_TX,
2563 		    ("%s: TX: packet has %d (%d) DMA segments\n",
2564 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
2565 
2566 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
2567 
2568 		/*
2569 		 * Store a pointer to the packet so that we can free it
2570 		 * later.
2571 		 *
2572 		 * Initially, we consider the number of descriptors the
2573 		 * packet uses the number of DMA segments.  This may be
2574 		 * incremented by 1 if we do checksum offload (a descriptor
2575 		 * is used to set the checksum context).
2576 		 */
2577 		txs->txs_mbuf = m0;
2578 		txs->txs_firstdesc = sc->sc_txnext;
2579 		txs->txs_ndesc = segs_needed;
2580 
2581 		/* Set up offload parameters for this packet. */
2582 		if (m0->m_pkthdr.csum_flags &
2583 		    (M_CSUM_TSOv4|M_CSUM_TSOv6|
2584 		    M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
2585 		    M_CSUM_TCPv6|M_CSUM_UDPv6)) {
2586 			if (wm_tx_offload(sc, txs, &cksumcmd,
2587 					  &cksumfields) != 0) {
2588 				/* Error message already displayed. */
2589 				bus_dmamap_unload(sc->sc_dmat, dmamap);
2590 				continue;
2591 			}
2592 		} else {
2593 			cksumcmd = 0;
2594 			cksumfields = 0;
2595 		}
2596 
2597 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
2598 
2599 		/* Sync the DMA map. */
2600 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
2601 		    BUS_DMASYNC_PREWRITE);
2602 
2603 		/*
2604 		 * Initialize the transmit descriptor.
2605 		 */
2606 		for (nexttx = sc->sc_txnext, seg = 0;
2607 		     seg < dmamap->dm_nsegs; seg++) {
2608 			for (seglen = dmamap->dm_segs[seg].ds_len,
2609 			     curaddr = dmamap->dm_segs[seg].ds_addr;
2610 			     seglen != 0;
2611 			     curaddr += curlen, seglen -= curlen,
2612 			     nexttx = WM_NEXTTX(sc, nexttx)) {
2613 				curlen = seglen;
2614 
2615 				/*
2616 				 * So says the Linux driver:
2617 				 * Work around for premature descriptor
2618 				 * write-backs in TSO mode.  Append a
2619 				 * 4-byte sentinel descriptor.
2620 				 */
2621 				if (use_tso &&
2622 				    seg == dmamap->dm_nsegs - 1 &&
2623 				    curlen > 8)
2624 					curlen -= 4;
2625 
2626 				wm_set_dma_addr(
2627 				    &sc->sc_txdescs[nexttx].wtx_addr,
2628 				    curaddr);
2629 				sc->sc_txdescs[nexttx].wtx_cmdlen =
2630 				    htole32(cksumcmd | curlen);
2631 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
2632 				    0;
2633 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
2634 				    cksumfields;
2635 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
2636 				lasttx = nexttx;
2637 
2638 				DPRINTF(WM_DEBUG_TX,
2639 				    ("%s: TX: desc %d: low 0x%08lx, "
2640 				     "len 0x%04x\n",
2641 				    device_xname(sc->sc_dev), nexttx,
2642 				    curaddr & 0xffffffffUL, (unsigned)curlen));
2643 			}
2644 		}
2645 
2646 		KASSERT(lasttx != -1);
2647 
2648 		/*
2649 		 * Set up the command byte on the last descriptor of
2650 		 * the packet.  If we're in the interrupt delay window,
2651 		 * delay the interrupt.
2652 		 */
2653 		sc->sc_txdescs[lasttx].wtx_cmdlen |=
2654 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
2655 
2656 		/*
2657 		 * If VLANs are enabled and the packet has a VLAN tag, set
2658 		 * up the descriptor to encapsulate the packet for us.
2659 		 *
2660 		 * This is only valid on the last descriptor of the packet.
2661 		 */
2662 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
2663 			sc->sc_txdescs[lasttx].wtx_cmdlen |=
2664 			    htole32(WTX_CMD_VLE);
2665 			sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
2666 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
2667 		}
2668 
2669 		txs->txs_lastdesc = lasttx;
2670 
2671 		DPRINTF(WM_DEBUG_TX,
2672 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
2673 		    device_xname(sc->sc_dev),
2674 		    lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
2675 
2676 		/* Sync the descriptors we're using. */
2677 		WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
2678 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2679 
2680 		/* Give the packet to the chip. */
2681 		CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
2682 
2683 		DPRINTF(WM_DEBUG_TX,
2684 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
2685 
2686 		DPRINTF(WM_DEBUG_TX,
2687 		    ("%s: TX: finished transmitting packet, job %d\n",
2688 		    device_xname(sc->sc_dev), sc->sc_txsnext));
2689 
2690 		/* Advance the tx pointer. */
2691 		sc->sc_txfree -= txs->txs_ndesc;
2692 		sc->sc_txnext = nexttx;
2693 
2694 		sc->sc_txsfree--;
2695 		sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
2696 
2697 		/* Pass the packet to any BPF listeners. */
2698 		if (ifp->if_bpf)
2699 			bpf_ops->bpf_mtap(ifp->if_bpf, m0);
2700 	}
2701 
2702 	if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
2703 		/* No more slots; notify upper layer. */
2704 		ifp->if_flags |= IFF_OACTIVE;
2705 	}
2706 
2707 	if (sc->sc_txfree != ofree) {
2708 		/* Set a watchdog timer in case the chip flakes out. */
2709 		ifp->if_timer = 5;
2710 	}
2711 }
2712 
2713 /*
2714  * wm_watchdog:		[ifnet interface function]
2715  *
2716  *	Watchdog timer handler.
2717  */
2718 static void
2719 wm_watchdog(struct ifnet *ifp)
2720 {
2721 	struct wm_softc *sc = ifp->if_softc;
2722 
2723 	/*
2724 	 * Since we're using delayed interrupts, sweep up
2725 	 * before we report an error.
2726 	 */
2727 	wm_txintr(sc);
2728 
2729 	if (sc->sc_txfree != WM_NTXDESC(sc)) {
2730 		log(LOG_ERR,
2731 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2732 		    device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
2733 		    sc->sc_txnext);
2734 		ifp->if_oerrors++;
2735 
2736 		/* Reset the interface. */
2737 		(void) wm_init(ifp);
2738 	}
2739 
2740 	/* Try to get more packets going. */
2741 	wm_start(ifp);
2742 }
2743 
2744 /*
2745  * wm_ioctl:		[ifnet interface function]
2746  *
2747  *	Handle control requests from the operator.
2748  */
2749 static int
2750 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2751 {
2752 	struct wm_softc *sc = ifp->if_softc;
2753 	struct ifreq *ifr = (struct ifreq *) data;
2754 	struct ifaddr *ifa = (struct ifaddr *)data;
2755 	struct sockaddr_dl *sdl;
2756 	int diff, s, error;
2757 
2758 	s = splnet();
2759 
2760 	switch (cmd) {
2761 	case SIOCSIFFLAGS:
2762 		if ((error = ifioctl_common(ifp, cmd, data)) != 0)
2763 			break;
2764 		if (ifp->if_flags & IFF_UP) {
2765 			diff = (ifp->if_flags ^ sc->sc_if_flags)
2766 			    & (IFF_PROMISC | IFF_ALLMULTI);
2767 			if ((diff & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
2768 				/*
2769 				 * If the difference bettween last flag and
2770 				 * new flag is only IFF_PROMISC or
2771 				 * IFF_ALLMULTI, set multicast filter only
2772 				 * (don't reset to prevent link down).
2773 				 */
2774 				wm_set_filter(sc);
2775 			} else {
2776 				/*
2777 				 * Reset the interface to pick up changes in
2778 				 * any other flags that affect the hardware
2779 				 * state.
2780 				 */
2781 				wm_init(ifp);
2782 			}
2783 		} else {
2784 			if (ifp->if_flags & IFF_RUNNING)
2785 				wm_stop(ifp, 1);
2786 		}
2787 		sc->sc_if_flags = ifp->if_flags;
2788 		error = 0;
2789 		break;
2790 	case SIOCSIFMEDIA:
2791 	case SIOCGIFMEDIA:
2792 		/* Flow control requires full-duplex mode. */
2793 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
2794 		    (ifr->ifr_media & IFM_FDX) == 0)
2795 			ifr->ifr_media &= ~IFM_ETH_FMASK;
2796 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
2797 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
2798 				/* We can do both TXPAUSE and RXPAUSE. */
2799 				ifr->ifr_media |=
2800 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
2801 			}
2802 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
2803 		}
2804 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
2805 		break;
2806 	case SIOCINITIFADDR:
2807 		if (ifa->ifa_addr->sa_family == AF_LINK) {
2808 			sdl = satosdl(ifp->if_dl->ifa_addr);
2809 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
2810 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
2811 			/* unicast address is first multicast entry */
2812 			wm_set_filter(sc);
2813 			error = 0;
2814 			break;
2815 		}
2816 		/* Fall through for rest */
2817 	default:
2818 		if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
2819 			break;
2820 
2821 		error = 0;
2822 
2823 		if (cmd == SIOCSIFCAP)
2824 			error = (*ifp->if_init)(ifp);
2825 		else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
2826 			;
2827 		else if (ifp->if_flags & IFF_RUNNING) {
2828 			/*
2829 			 * Multicast list has changed; set the hardware filter
2830 			 * accordingly.
2831 			 */
2832 			wm_set_filter(sc);
2833 		}
2834 		break;
2835 	}
2836 
2837 	/* Try to get more packets going. */
2838 	wm_start(ifp);
2839 
2840 	splx(s);
2841 	return error;
2842 }
2843 
2844 /*
2845  * wm_intr:
2846  *
2847  *	Interrupt service routine.
2848  */
2849 static int
2850 wm_intr(void *arg)
2851 {
2852 	struct wm_softc *sc = arg;
2853 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2854 	uint32_t icr;
2855 	int handled = 0;
2856 
2857 	while (1 /* CONSTCOND */) {
2858 		icr = CSR_READ(sc, WMREG_ICR);
2859 		if ((icr & sc->sc_icr) == 0)
2860 			break;
2861 #if 0 /*NRND > 0*/
2862 		if (RND_ENABLED(&sc->rnd_source))
2863 			rnd_add_uint32(&sc->rnd_source, icr);
2864 #endif
2865 
2866 		handled = 1;
2867 
2868 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
2869 		if (icr & (ICR_RXDMT0|ICR_RXT0)) {
2870 			DPRINTF(WM_DEBUG_RX,
2871 			    ("%s: RX: got Rx intr 0x%08x\n",
2872 			    device_xname(sc->sc_dev),
2873 			    icr & (ICR_RXDMT0|ICR_RXT0)));
2874 			WM_EVCNT_INCR(&sc->sc_ev_rxintr);
2875 		}
2876 #endif
2877 		wm_rxintr(sc);
2878 
2879 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
2880 		if (icr & ICR_TXDW) {
2881 			DPRINTF(WM_DEBUG_TX,
2882 			    ("%s: TX: got TXDW interrupt\n",
2883 			    device_xname(sc->sc_dev)));
2884 			WM_EVCNT_INCR(&sc->sc_ev_txdw);
2885 		}
2886 #endif
2887 		wm_txintr(sc);
2888 
2889 		if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
2890 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
2891 			wm_linkintr(sc, icr);
2892 		}
2893 
2894 		if (icr & ICR_RXO) {
2895 #if defined(WM_DEBUG)
2896 			log(LOG_WARNING, "%s: Receive overrun\n",
2897 			    device_xname(sc->sc_dev));
2898 #endif /* defined(WM_DEBUG) */
2899 		}
2900 	}
2901 
2902 	if (handled) {
2903 		/* Try to get more packets going. */
2904 		wm_start(ifp);
2905 	}
2906 
2907 	return handled;
2908 }
2909 
2910 /*
2911  * wm_txintr:
2912  *
2913  *	Helper; handle transmit interrupts.
2914  */
2915 static void
2916 wm_txintr(struct wm_softc *sc)
2917 {
2918 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2919 	struct wm_txsoft *txs;
2920 	uint8_t status;
2921 	int i;
2922 
2923 	ifp->if_flags &= ~IFF_OACTIVE;
2924 
2925 	/*
2926 	 * Go through the Tx list and free mbufs for those
2927 	 * frames which have been transmitted.
2928 	 */
2929 	for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
2930 	     i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
2931 		txs = &sc->sc_txsoft[i];
2932 
2933 		DPRINTF(WM_DEBUG_TX,
2934 		    ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
2935 
2936 		WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
2937 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2938 
2939 		status =
2940 		    sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
2941 		if ((status & WTX_ST_DD) == 0) {
2942 			WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
2943 			    BUS_DMASYNC_PREREAD);
2944 			break;
2945 		}
2946 
2947 		DPRINTF(WM_DEBUG_TX,
2948 		    ("%s: TX: job %d done: descs %d..%d\n",
2949 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
2950 		    txs->txs_lastdesc));
2951 
2952 		/*
2953 		 * XXX We should probably be using the statistics
2954 		 * XXX registers, but I don't know if they exist
2955 		 * XXX on chips before the i82544.
2956 		 */
2957 
2958 #ifdef WM_EVENT_COUNTERS
2959 		if (status & WTX_ST_TU)
2960 			WM_EVCNT_INCR(&sc->sc_ev_tu);
2961 #endif /* WM_EVENT_COUNTERS */
2962 
2963 		if (status & (WTX_ST_EC|WTX_ST_LC)) {
2964 			ifp->if_oerrors++;
2965 			if (status & WTX_ST_LC)
2966 				log(LOG_WARNING, "%s: late collision\n",
2967 				    device_xname(sc->sc_dev));
2968 			else if (status & WTX_ST_EC) {
2969 				ifp->if_collisions += 16;
2970 				log(LOG_WARNING, "%s: excessive collisions\n",
2971 				    device_xname(sc->sc_dev));
2972 			}
2973 		} else
2974 			ifp->if_opackets++;
2975 
2976 		sc->sc_txfree += txs->txs_ndesc;
2977 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
2978 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2979 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2980 		m_freem(txs->txs_mbuf);
2981 		txs->txs_mbuf = NULL;
2982 	}
2983 
2984 	/* Update the dirty transmit buffer pointer. */
2985 	sc->sc_txsdirty = i;
2986 	DPRINTF(WM_DEBUG_TX,
2987 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
2988 
2989 	/*
2990 	 * If there are no more pending transmissions, cancel the watchdog
2991 	 * timer.
2992 	 */
2993 	if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
2994 		ifp->if_timer = 0;
2995 }
2996 
2997 /*
2998  * wm_rxintr:
2999  *
3000  *	Helper; handle receive interrupts.
3001  */
3002 static void
3003 wm_rxintr(struct wm_softc *sc)
3004 {
3005 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3006 	struct wm_rxsoft *rxs;
3007 	struct mbuf *m;
3008 	int i, len;
3009 	uint8_t status, errors;
3010 	uint16_t vlantag;
3011 
3012 	for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
3013 		rxs = &sc->sc_rxsoft[i];
3014 
3015 		DPRINTF(WM_DEBUG_RX,
3016 		    ("%s: RX: checking descriptor %d\n",
3017 		    device_xname(sc->sc_dev), i));
3018 
3019 		WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3020 
3021 		status = sc->sc_rxdescs[i].wrx_status;
3022 		errors = sc->sc_rxdescs[i].wrx_errors;
3023 		len = le16toh(sc->sc_rxdescs[i].wrx_len);
3024 		vlantag = sc->sc_rxdescs[i].wrx_special;
3025 
3026 		if ((status & WRX_ST_DD) == 0) {
3027 			/*
3028 			 * We have processed all of the receive descriptors.
3029 			 */
3030 			WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
3031 			break;
3032 		}
3033 
3034 		if (__predict_false(sc->sc_rxdiscard)) {
3035 			DPRINTF(WM_DEBUG_RX,
3036 			    ("%s: RX: discarding contents of descriptor %d\n",
3037 			    device_xname(sc->sc_dev), i));
3038 			WM_INIT_RXDESC(sc, i);
3039 			if (status & WRX_ST_EOP) {
3040 				/* Reset our state. */
3041 				DPRINTF(WM_DEBUG_RX,
3042 				    ("%s: RX: resetting rxdiscard -> 0\n",
3043 				    device_xname(sc->sc_dev)));
3044 				sc->sc_rxdiscard = 0;
3045 			}
3046 			continue;
3047 		}
3048 
3049 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3050 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
3051 
3052 		m = rxs->rxs_mbuf;
3053 
3054 		/*
3055 		 * Add a new receive buffer to the ring, unless of
3056 		 * course the length is zero. Treat the latter as a
3057 		 * failed mapping.
3058 		 */
3059 		if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
3060 			/*
3061 			 * Failed, throw away what we've done so
3062 			 * far, and discard the rest of the packet.
3063 			 */
3064 			ifp->if_ierrors++;
3065 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3066 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
3067 			WM_INIT_RXDESC(sc, i);
3068 			if ((status & WRX_ST_EOP) == 0)
3069 				sc->sc_rxdiscard = 1;
3070 			if (sc->sc_rxhead != NULL)
3071 				m_freem(sc->sc_rxhead);
3072 			WM_RXCHAIN_RESET(sc);
3073 			DPRINTF(WM_DEBUG_RX,
3074 			    ("%s: RX: Rx buffer allocation failed, "
3075 			    "dropping packet%s\n", device_xname(sc->sc_dev),
3076 			    sc->sc_rxdiscard ? " (discard)" : ""));
3077 			continue;
3078 		}
3079 
3080 		m->m_len = len;
3081 		sc->sc_rxlen += len;
3082 		DPRINTF(WM_DEBUG_RX,
3083 		    ("%s: RX: buffer at %p len %d\n",
3084 		    device_xname(sc->sc_dev), m->m_data, len));
3085 
3086 		/*
3087 		 * If this is not the end of the packet, keep
3088 		 * looking.
3089 		 */
3090 		if ((status & WRX_ST_EOP) == 0) {
3091 			WM_RXCHAIN_LINK(sc, m);
3092 			DPRINTF(WM_DEBUG_RX,
3093 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
3094 			    device_xname(sc->sc_dev), sc->sc_rxlen));
3095 			continue;
3096 		}
3097 
3098 		/*
3099 		 * Okay, we have the entire packet now.  The chip is
3100 		 * configured to include the FCS (not all chips can
3101 		 * be configured to strip it), so we need to trim it.
3102 		 * May need to adjust length of previous mbuf in the
3103 		 * chain if the current mbuf is too short.
3104 		 */
3105 		if (m->m_len < ETHER_CRC_LEN) {
3106 			sc->sc_rxtail->m_len -= (ETHER_CRC_LEN - m->m_len);
3107 			m->m_len = 0;
3108 		} else {
3109 			m->m_len -= ETHER_CRC_LEN;
3110 		}
3111 		len = sc->sc_rxlen - ETHER_CRC_LEN;
3112 
3113 		WM_RXCHAIN_LINK(sc, m);
3114 
3115 		*sc->sc_rxtailp = NULL;
3116 		m = sc->sc_rxhead;
3117 
3118 		WM_RXCHAIN_RESET(sc);
3119 
3120 		DPRINTF(WM_DEBUG_RX,
3121 		    ("%s: RX: have entire packet, len -> %d\n",
3122 		    device_xname(sc->sc_dev), len));
3123 
3124 		/*
3125 		 * If an error occurred, update stats and drop the packet.
3126 		 */
3127 		if (errors &
3128 		     (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
3129 			if (errors & WRX_ER_SE)
3130 				log(LOG_WARNING, "%s: symbol error\n",
3131 				    device_xname(sc->sc_dev));
3132 			else if (errors & WRX_ER_SEQ)
3133 				log(LOG_WARNING, "%s: receive sequence error\n",
3134 				    device_xname(sc->sc_dev));
3135 			else if (errors & WRX_ER_CE)
3136 				log(LOG_WARNING, "%s: CRC error\n",
3137 				    device_xname(sc->sc_dev));
3138 			m_freem(m);
3139 			continue;
3140 		}
3141 
3142 		/*
3143 		 * No errors.  Receive the packet.
3144 		 */
3145 		m->m_pkthdr.rcvif = ifp;
3146 		m->m_pkthdr.len = len;
3147 
3148 		/*
3149 		 * If VLANs are enabled, VLAN packets have been unwrapped
3150 		 * for us.  Associate the tag with the packet.
3151 		 */
3152 		if ((status & WRX_ST_VP) != 0) {
3153 			VLAN_INPUT_TAG(ifp, m,
3154 			    le16toh(vlantag),
3155 			    continue);
3156 		}
3157 
3158 		/*
3159 		 * Set up checksum info for this packet.
3160 		 */
3161 		if ((status & WRX_ST_IXSM) == 0) {
3162 			if (status & WRX_ST_IPCS) {
3163 				WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
3164 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
3165 				if (errors & WRX_ER_IPE)
3166 					m->m_pkthdr.csum_flags |=
3167 					    M_CSUM_IPv4_BAD;
3168 			}
3169 			if (status & WRX_ST_TCPCS) {
3170 				/*
3171 				 * Note: we don't know if this was TCP or UDP,
3172 				 * so we just set both bits, and expect the
3173 				 * upper layers to deal.
3174 				 */
3175 				WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
3176 				m->m_pkthdr.csum_flags |=
3177 				    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
3178 				    M_CSUM_TCPv6 | M_CSUM_UDPv6;
3179 				if (errors & WRX_ER_TCPE)
3180 					m->m_pkthdr.csum_flags |=
3181 					    M_CSUM_TCP_UDP_BAD;
3182 			}
3183 		}
3184 
3185 		ifp->if_ipackets++;
3186 
3187 		/* Pass this up to any BPF listeners. */
3188 		if (ifp->if_bpf)
3189 			bpf_ops->bpf_mtap(ifp->if_bpf, m);
3190 
3191 		/* Pass it on. */
3192 		(*ifp->if_input)(ifp, m);
3193 	}
3194 
3195 	/* Update the receive pointer. */
3196 	sc->sc_rxptr = i;
3197 
3198 	DPRINTF(WM_DEBUG_RX,
3199 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
3200 }
3201 
3202 /*
3203  * wm_linkintr_gmii:
3204  *
3205  *	Helper; handle link interrupts for GMII.
3206  */
3207 static void
3208 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
3209 {
3210 
3211 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
3212 		__func__));
3213 
3214 	if (icr & ICR_LSC) {
3215 		DPRINTF(WM_DEBUG_LINK,
3216 		    ("%s: LINK: LSC -> mii_tick\n",
3217 			device_xname(sc->sc_dev)));
3218 		mii_tick(&sc->sc_mii);
3219 		if (sc->sc_type == WM_T_82543) {
3220 			int miistatus, active;
3221 
3222 			/*
3223 			 * With 82543, we need to force speed and
3224 			 * duplex on the MAC equal to what the PHY
3225 			 * speed and duplex configuration is.
3226 			 */
3227 			miistatus = sc->sc_mii.mii_media_status;
3228 
3229 			if (miistatus & IFM_ACTIVE) {
3230 				active = sc->sc_mii.mii_media_active;
3231 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
3232 				switch (IFM_SUBTYPE(active)) {
3233 				case IFM_10_T:
3234 					sc->sc_ctrl |= CTRL_SPEED_10;
3235 					break;
3236 				case IFM_100_TX:
3237 					sc->sc_ctrl |= CTRL_SPEED_100;
3238 					break;
3239 				case IFM_1000_T:
3240 					sc->sc_ctrl |= CTRL_SPEED_1000;
3241 					break;
3242 				default:
3243 					/*
3244 					 * fiber?
3245 					 * Shoud not enter here.
3246 					 */
3247 					printf("unknown media (%x)\n",
3248 					    active);
3249 					break;
3250 				}
3251 				if (active & IFM_FDX)
3252 					sc->sc_ctrl |= CTRL_FD;
3253 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3254 			}
3255 		} else if ((sc->sc_type == WM_T_ICH8)
3256 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
3257 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
3258 		} else if (sc->sc_type == WM_T_PCH) {
3259 			wm_k1_gig_workaround_hv(sc,
3260 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
3261 		}
3262 
3263 		if ((sc->sc_phytype == WMPHY_82578)
3264 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
3265 			== IFM_1000_T)) {
3266 
3267 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
3268 				delay(200*1000); /* XXX too big */
3269 
3270 				/* Link stall fix for link up */
3271 				wm_gmii_hv_writereg(sc->sc_dev, 1,
3272 				    HV_MUX_DATA_CTRL,
3273 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
3274 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
3275 				wm_gmii_hv_writereg(sc->sc_dev, 1,
3276 				    HV_MUX_DATA_CTRL,
3277 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
3278 			}
3279 		}
3280 	} else if (icr & ICR_RXSEQ) {
3281 		DPRINTF(WM_DEBUG_LINK,
3282 		    ("%s: LINK Receive sequence error\n",
3283 			device_xname(sc->sc_dev)));
3284 	}
3285 }
3286 
3287 /*
3288  * wm_linkintr_tbi:
3289  *
3290  *	Helper; handle link interrupts for TBI mode.
3291  */
3292 static void
3293 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
3294 {
3295 	uint32_t status;
3296 
3297 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
3298 		__func__));
3299 
3300 	status = CSR_READ(sc, WMREG_STATUS);
3301 	if (icr & ICR_LSC) {
3302 		if (status & STATUS_LU) {
3303 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
3304 			    device_xname(sc->sc_dev),
3305 			    (status & STATUS_FD) ? "FDX" : "HDX"));
3306 			/*
3307 			 * NOTE: CTRL will update TFCE and RFCE automatically,
3308 			 * so we should update sc->sc_ctrl
3309 			 */
3310 
3311 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3312 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
3313 			sc->sc_fcrtl &= ~FCRTL_XONE;
3314 			if (status & STATUS_FD)
3315 				sc->sc_tctl |=
3316 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3317 			else
3318 				sc->sc_tctl |=
3319 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
3320 			if (sc->sc_ctrl & CTRL_TFCE)
3321 				sc->sc_fcrtl |= FCRTL_XONE;
3322 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3323 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
3324 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
3325 				      sc->sc_fcrtl);
3326 			sc->sc_tbi_linkup = 1;
3327 		} else {
3328 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
3329 			    device_xname(sc->sc_dev)));
3330 			sc->sc_tbi_linkup = 0;
3331 		}
3332 		wm_tbi_set_linkled(sc);
3333 	} else if (icr & ICR_RXCFG) {
3334 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
3335 		    device_xname(sc->sc_dev)));
3336 		sc->sc_tbi_nrxcfg++;
3337 		wm_check_for_link(sc);
3338 	} else if (icr & ICR_RXSEQ) {
3339 		DPRINTF(WM_DEBUG_LINK,
3340 		    ("%s: LINK: Receive sequence error\n",
3341 		    device_xname(sc->sc_dev)));
3342 	}
3343 }
3344 
3345 /*
3346  * wm_linkintr:
3347  *
3348  *	Helper; handle link interrupts.
3349  */
3350 static void
3351 wm_linkintr(struct wm_softc *sc, uint32_t icr)
3352 {
3353 
3354 	if (sc->sc_flags & WM_F_HAS_MII)
3355 		wm_linkintr_gmii(sc, icr);
3356 	else
3357 		wm_linkintr_tbi(sc, icr);
3358 }
3359 
3360 /*
3361  * wm_tick:
3362  *
3363  *	One second timer, used to check link status, sweep up
3364  *	completed transmit jobs, etc.
3365  */
3366 static void
3367 wm_tick(void *arg)
3368 {
3369 	struct wm_softc *sc = arg;
3370 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3371 	int s;
3372 
3373 	s = splnet();
3374 
3375 	if (sc->sc_type >= WM_T_82542_2_1) {
3376 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
3377 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
3378 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
3379 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
3380 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
3381 	}
3382 
3383 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3384 	ifp->if_ierrors += 0ULL + /* ensure quad_t */
3385 	    + CSR_READ(sc, WMREG_CRCERRS)
3386 	    + CSR_READ(sc, WMREG_ALGNERRC)
3387 	    + CSR_READ(sc, WMREG_SYMERRC)
3388 	    + CSR_READ(sc, WMREG_RXERRC)
3389 	    + CSR_READ(sc, WMREG_SEC)
3390 	    + CSR_READ(sc, WMREG_CEXTERR)
3391 	    + CSR_READ(sc, WMREG_RLEC);
3392 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
3393 
3394 	if (sc->sc_flags & WM_F_HAS_MII)
3395 		mii_tick(&sc->sc_mii);
3396 	else
3397 		wm_tbi_check_link(sc);
3398 
3399 	splx(s);
3400 
3401 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
3402 }
3403 
3404 /*
3405  * wm_reset:
3406  *
3407  *	Reset the i82542 chip.
3408  */
3409 static void
3410 wm_reset(struct wm_softc *sc)
3411 {
3412 	int phy_reset = 0;
3413 	uint32_t reg, mask;
3414 	int i;
3415 
3416 	/*
3417 	 * Allocate on-chip memory according to the MTU size.
3418 	 * The Packet Buffer Allocation register must be written
3419 	 * before the chip is reset.
3420 	 */
3421 	switch (sc->sc_type) {
3422 	case WM_T_82547:
3423 	case WM_T_82547_2:
3424 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3425 		    PBA_22K : PBA_30K;
3426 		sc->sc_txfifo_head = 0;
3427 		sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
3428 		sc->sc_txfifo_size =
3429 		    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
3430 		sc->sc_txfifo_stall = 0;
3431 		break;
3432 	case WM_T_82571:
3433 	case WM_T_82572:
3434 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
3435 	case WM_T_80003:
3436 		sc->sc_pba = PBA_32K;
3437 		break;
3438 	case WM_T_82580:
3439 	case WM_T_82580ER:
3440 		sc->sc_pba = PBA_35K;
3441 		break;
3442 	case WM_T_82576:
3443 		sc->sc_pba = PBA_64K;
3444 		break;
3445 	case WM_T_82573:
3446 		sc->sc_pba = PBA_12K;
3447 		break;
3448 	case WM_T_82574:
3449 	case WM_T_82583:
3450 		sc->sc_pba = PBA_20K;
3451 		break;
3452 	case WM_T_ICH8:
3453 		sc->sc_pba = PBA_8K;
3454 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
3455 		break;
3456 	case WM_T_ICH9:
3457 	case WM_T_ICH10:
3458 	case WM_T_PCH:
3459 		sc->sc_pba = PBA_10K;
3460 		break;
3461 	default:
3462 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3463 		    PBA_40K : PBA_48K;
3464 		break;
3465 	}
3466 	CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
3467 
3468 	/* Prevent the PCI-E bus from sticking */
3469 	if (sc->sc_flags & WM_F_PCIE) {
3470 		int timeout = 800;
3471 
3472 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
3473 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3474 
3475 		while (timeout--) {
3476 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA) == 0)
3477 				break;
3478 			delay(100);
3479 		}
3480 	}
3481 
3482 	/* Set the completion timeout for interface */
3483 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
3484 		wm_set_pcie_completion_timeout(sc);
3485 
3486 	/* Clear interrupt */
3487 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3488 
3489 	/* Stop the transmit and receive processes. */
3490 	CSR_WRITE(sc, WMREG_RCTL, 0);
3491 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
3492 	sc->sc_rctl &= ~RCTL_EN;
3493 
3494 	/* XXX set_tbi_sbp_82543() */
3495 
3496 	delay(10*1000);
3497 
3498 	/* Must acquire the MDIO ownership before MAC reset */
3499 	switch (sc->sc_type) {
3500 	case WM_T_82573:
3501 	case WM_T_82574:
3502 	case WM_T_82583:
3503 		i = 0;
3504 		reg = CSR_READ(sc, WMREG_EXTCNFCTR)
3505 		    | EXTCNFCTR_MDIO_SW_OWNERSHIP;
3506 		do {
3507 			CSR_WRITE(sc, WMREG_EXTCNFCTR,
3508 			    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
3509 			reg = CSR_READ(sc, WMREG_EXTCNFCTR);
3510 			if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
3511 				break;
3512 			reg |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
3513 			delay(2*1000);
3514 			i++;
3515 		} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
3516 		break;
3517 	default:
3518 		break;
3519 	}
3520 
3521 	/*
3522 	 * 82541 Errata 29? & 82547 Errata 28?
3523 	 * See also the description about PHY_RST bit in CTRL register
3524 	 * in 8254x_GBe_SDM.pdf.
3525 	 */
3526 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
3527 		CSR_WRITE(sc, WMREG_CTRL,
3528 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
3529 		delay(5000);
3530 	}
3531 
3532 	switch (sc->sc_type) {
3533 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
3534 	case WM_T_82541:
3535 	case WM_T_82541_2:
3536 	case WM_T_82547:
3537 	case WM_T_82547_2:
3538 		/*
3539 		 * On some chipsets, a reset through a memory-mapped write
3540 		 * cycle can cause the chip to reset before completing the
3541 		 * write cycle.  This causes major headache that can be
3542 		 * avoided by issuing the reset via indirect register writes
3543 		 * through I/O space.
3544 		 *
3545 		 * So, if we successfully mapped the I/O BAR at attach time,
3546 		 * use that.  Otherwise, try our luck with a memory-mapped
3547 		 * reset.
3548 		 */
3549 		if (sc->sc_flags & WM_F_IOH_VALID)
3550 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
3551 		else
3552 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
3553 		break;
3554 	case WM_T_82545_3:
3555 	case WM_T_82546_3:
3556 		/* Use the shadow control register on these chips. */
3557 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
3558 		break;
3559 	case WM_T_80003:
3560 		mask = swfwphysem[sc->sc_funcid];
3561 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3562 		wm_get_swfw_semaphore(sc, mask);
3563 		CSR_WRITE(sc, WMREG_CTRL, reg);
3564 		wm_put_swfw_semaphore(sc, mask);
3565 		break;
3566 	case WM_T_ICH8:
3567 	case WM_T_ICH9:
3568 	case WM_T_ICH10:
3569 	case WM_T_PCH:
3570 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3571 		if (wm_check_reset_block(sc) == 0) {
3572 			if (sc->sc_type >= WM_T_PCH) {
3573 				uint32_t status;
3574 
3575 				status = CSR_READ(sc, WMREG_STATUS);
3576 				CSR_WRITE(sc, WMREG_STATUS,
3577 				    status & ~STATUS_PHYRA);
3578 			}
3579 
3580 			reg |= CTRL_PHY_RESET;
3581 			phy_reset = 1;
3582 		}
3583 		wm_get_swfwhw_semaphore(sc);
3584 		CSR_WRITE(sc, WMREG_CTRL, reg);
3585 		delay(20*1000);
3586 		wm_put_swfwhw_semaphore(sc);
3587 		break;
3588 	case WM_T_82542_2_0:
3589 	case WM_T_82542_2_1:
3590 	case WM_T_82543:
3591 	case WM_T_82540:
3592 	case WM_T_82545:
3593 	case WM_T_82546:
3594 	case WM_T_82571:
3595 	case WM_T_82572:
3596 	case WM_T_82573:
3597 	case WM_T_82574:
3598 	case WM_T_82575:
3599 	case WM_T_82576:
3600 	case WM_T_82583:
3601 	default:
3602 		/* Everything else can safely use the documented method. */
3603 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3604 		break;
3605 	}
3606 
3607 	if (phy_reset != 0)
3608 		wm_get_cfg_done(sc);
3609 
3610 	/* reload EEPROM */
3611 	switch (sc->sc_type) {
3612 	case WM_T_82542_2_0:
3613 	case WM_T_82542_2_1:
3614 	case WM_T_82543:
3615 	case WM_T_82544:
3616 		delay(10);
3617 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3618 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3619 		delay(2000);
3620 		break;
3621 	case WM_T_82540:
3622 	case WM_T_82545:
3623 	case WM_T_82545_3:
3624 	case WM_T_82546:
3625 	case WM_T_82546_3:
3626 		delay(5*1000);
3627 		/* XXX Disable HW ARPs on ASF enabled adapters */
3628 		break;
3629 	case WM_T_82541:
3630 	case WM_T_82541_2:
3631 	case WM_T_82547:
3632 	case WM_T_82547_2:
3633 		delay(20000);
3634 		/* XXX Disable HW ARPs on ASF enabled adapters */
3635 		break;
3636 	case WM_T_82571:
3637 	case WM_T_82572:
3638 	case WM_T_82573:
3639 	case WM_T_82574:
3640 	case WM_T_82583:
3641 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
3642 			delay(10);
3643 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3644 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3645 		}
3646 		/* check EECD_EE_AUTORD */
3647 		wm_get_auto_rd_done(sc);
3648 		/*
3649 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
3650 		 * is set.
3651 		 */
3652 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
3653 		    || (sc->sc_type == WM_T_82583))
3654 			delay(25*1000);
3655 		break;
3656 	case WM_T_82575:
3657 	case WM_T_82576:
3658 	case WM_T_80003:
3659 	case WM_T_ICH8:
3660 	case WM_T_ICH9:
3661 		/* check EECD_EE_AUTORD */
3662 		wm_get_auto_rd_done(sc);
3663 		break;
3664 	case WM_T_ICH10:
3665 	case WM_T_PCH:
3666 		wm_lan_init_done(sc);
3667 		break;
3668 	default:
3669 		panic("%s: unknown type\n", __func__);
3670 	}
3671 
3672 	/* Check whether EEPROM is present or not */
3673 	switch (sc->sc_type) {
3674 	case WM_T_82575:
3675 	case WM_T_82576:
3676 	case WM_T_82580:
3677 	case WM_T_ICH8:
3678 	case WM_T_ICH9:
3679 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
3680 			/* Not found */
3681 			sc->sc_flags |= WM_F_EEPROM_INVALID;
3682 			if (sc->sc_type == WM_T_82575) /* 82575 only */
3683 				wm_reset_init_script_82575(sc);
3684 		}
3685 		break;
3686 	default:
3687 		break;
3688 	}
3689 
3690 	/* Clear any pending interrupt events. */
3691 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3692 	reg = CSR_READ(sc, WMREG_ICR);
3693 
3694 	/* reload sc_ctrl */
3695 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3696 
3697 	/* dummy read from WUC */
3698 	if (sc->sc_type == WM_T_PCH)
3699 		reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
3700 	/*
3701 	 * For PCH, this write will make sure that any noise will be detected
3702 	 * as a CRC error and be dropped rather than show up as a bad packet
3703 	 * to the DMA engine
3704 	 */
3705 	if (sc->sc_type == WM_T_PCH)
3706 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
3707 
3708 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
3709 		CSR_WRITE(sc, WMREG_WUC, 0);
3710 
3711 	/* XXX need special handling for 82580 */
3712 }
3713 
3714 /*
3715  * wm_init:		[ifnet interface function]
3716  *
3717  *	Initialize the interface.  Must be called at splnet().
3718  */
3719 static int
3720 wm_init(struct ifnet *ifp)
3721 {
3722 	struct wm_softc *sc = ifp->if_softc;
3723 	struct wm_rxsoft *rxs;
3724 	int i, error = 0;
3725 	uint32_t reg;
3726 
3727 	/*
3728 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
3729 	 * There is a small but measurable benefit to avoiding the adjusment
3730 	 * of the descriptor so that the headers are aligned, for normal mtu,
3731 	 * on such platforms.  One possibility is that the DMA itself is
3732 	 * slightly more efficient if the front of the entire packet (instead
3733 	 * of the front of the headers) is aligned.
3734 	 *
3735 	 * Note we must always set align_tweak to 0 if we are using
3736 	 * jumbo frames.
3737 	 */
3738 #ifdef __NO_STRICT_ALIGNMENT
3739 	sc->sc_align_tweak = 0;
3740 #else
3741 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
3742 		sc->sc_align_tweak = 0;
3743 	else
3744 		sc->sc_align_tweak = 2;
3745 #endif /* __NO_STRICT_ALIGNMENT */
3746 
3747 	/* Cancel any pending I/O. */
3748 	wm_stop(ifp, 0);
3749 
3750 	/* update statistics before reset */
3751 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3752 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
3753 
3754 	/* Reset the chip to a known state. */
3755 	wm_reset(sc);
3756 
3757 	switch (sc->sc_type) {
3758 	case WM_T_82571:
3759 	case WM_T_82572:
3760 	case WM_T_82573:
3761 	case WM_T_82574:
3762 	case WM_T_82583:
3763 	case WM_T_80003:
3764 	case WM_T_ICH8:
3765 	case WM_T_ICH9:
3766 	case WM_T_ICH10:
3767 	case WM_T_PCH:
3768 		if (wm_check_mng_mode(sc) != 0)
3769 			wm_get_hw_control(sc);
3770 		break;
3771 	default:
3772 		break;
3773 	}
3774 
3775 	/* Reset the PHY. */
3776 	if (sc->sc_flags & WM_F_HAS_MII)
3777 		wm_gmii_reset(sc);
3778 
3779 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
3780 	/* Enable PHY low-power state when MAC is at D3 w/o WoL */
3781 	if (sc->sc_type == WM_T_PCH)
3782 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_PHYPDEN);
3783 
3784 	/* Initialize the transmit descriptor ring. */
3785 	memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
3786 	WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
3787 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3788 	sc->sc_txfree = WM_NTXDESC(sc);
3789 	sc->sc_txnext = 0;
3790 
3791 	if (sc->sc_type < WM_T_82543) {
3792 		CSR_WRITE(sc, WMREG_OLD_TBDAH, WM_CDTXADDR_HI(sc, 0));
3793 		CSR_WRITE(sc, WMREG_OLD_TBDAL, WM_CDTXADDR_LO(sc, 0));
3794 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
3795 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
3796 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
3797 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
3798 	} else {
3799 		CSR_WRITE(sc, WMREG_TBDAH, WM_CDTXADDR_HI(sc, 0));
3800 		CSR_WRITE(sc, WMREG_TBDAL, WM_CDTXADDR_LO(sc, 0));
3801 		CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
3802 		CSR_WRITE(sc, WMREG_TDH, 0);
3803 		CSR_WRITE(sc, WMREG_TDT, 0);
3804 		CSR_WRITE(sc, WMREG_TIDV, 375);		/* ITR / 4 */
3805 		CSR_WRITE(sc, WMREG_TADV, 375);		/* should be same */
3806 
3807 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
3808 			CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_QUEUE_ENABLE
3809 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
3810 			    | TXDCTL_WTHRESH(0));
3811 		else {
3812 			CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
3813 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
3814 			CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
3815 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
3816 		}
3817 	}
3818 	CSR_WRITE(sc, WMREG_TQSA_LO, 0);
3819 	CSR_WRITE(sc, WMREG_TQSA_HI, 0);
3820 
3821 	/* Initialize the transmit job descriptors. */
3822 	for (i = 0; i < WM_TXQUEUELEN(sc); i++)
3823 		sc->sc_txsoft[i].txs_mbuf = NULL;
3824 	sc->sc_txsfree = WM_TXQUEUELEN(sc);
3825 	sc->sc_txsnext = 0;
3826 	sc->sc_txsdirty = 0;
3827 
3828 	/*
3829 	 * Initialize the receive descriptor and receive job
3830 	 * descriptor rings.
3831 	 */
3832 	if (sc->sc_type < WM_T_82543) {
3833 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
3834 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
3835 		CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
3836 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
3837 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
3838 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
3839 
3840 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
3841 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
3842 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
3843 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
3844 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
3845 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
3846 	} else {
3847 		CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
3848 		CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
3849 		CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
3850 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3851 			CSR_WRITE(sc, WMREG_EITR(0), 450);
3852 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
3853 				panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
3854 			CSR_WRITE(sc, WMREG_SRRCTL, SRRCTL_DESCTYPE_LEGACY
3855 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
3856 			CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_QUEUE_ENABLE
3857 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
3858 			    | RXDCTL_WTHRESH(1));
3859 		} else {
3860 			CSR_WRITE(sc, WMREG_RDH, 0);
3861 			CSR_WRITE(sc, WMREG_RDT, 0);
3862 			CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD);	/* ITR/4 */
3863 			CSR_WRITE(sc, WMREG_RADV, 375);		/* MUST be same */
3864 		}
3865 	}
3866 	for (i = 0; i < WM_NRXDESC; i++) {
3867 		rxs = &sc->sc_rxsoft[i];
3868 		if (rxs->rxs_mbuf == NULL) {
3869 			if ((error = wm_add_rxbuf(sc, i)) != 0) {
3870 				log(LOG_ERR, "%s: unable to allocate or map rx "
3871 				    "buffer %d, error = %d\n",
3872 				    device_xname(sc->sc_dev), i, error);
3873 				/*
3874 				 * XXX Should attempt to run with fewer receive
3875 				 * XXX buffers instead of just failing.
3876 				 */
3877 				wm_rxdrain(sc);
3878 				goto out;
3879 			}
3880 		} else {
3881 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
3882 				WM_INIT_RXDESC(sc, i);
3883 		}
3884 	}
3885 	sc->sc_rxptr = 0;
3886 	sc->sc_rxdiscard = 0;
3887 	WM_RXCHAIN_RESET(sc);
3888 
3889 	/*
3890 	 * Clear out the VLAN table -- we don't use it (yet).
3891 	 */
3892 	CSR_WRITE(sc, WMREG_VET, 0);
3893 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
3894 		CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
3895 
3896 	/*
3897 	 * Set up flow-control parameters.
3898 	 *
3899 	 * XXX Values could probably stand some tuning.
3900 	 */
3901 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
3902 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)) {
3903 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
3904 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
3905 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
3906 	}
3907 
3908 	sc->sc_fcrtl = FCRTL_DFLT;
3909 	if (sc->sc_type < WM_T_82543) {
3910 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
3911 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
3912 	} else {
3913 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
3914 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
3915 	}
3916 
3917 	if (sc->sc_type == WM_T_80003)
3918 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
3919 	else
3920 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
3921 
3922 	/* Deal with VLAN enables. */
3923 	if (VLAN_ATTACHED(&sc->sc_ethercom))
3924 		sc->sc_ctrl |= CTRL_VME;
3925 	else
3926 		sc->sc_ctrl &= ~CTRL_VME;
3927 
3928 	/* Write the control registers. */
3929 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3930 
3931 	if (sc->sc_flags & WM_F_HAS_MII) {
3932 		int val;
3933 
3934 		switch (sc->sc_type) {
3935 		case WM_T_80003:
3936 		case WM_T_ICH8:
3937 		case WM_T_ICH9:
3938 		case WM_T_ICH10:
3939 		case WM_T_PCH:
3940 			/*
3941 			 * Set the mac to wait the maximum time between each
3942 			 * iteration and increase the max iterations when
3943 			 * polling the phy; this fixes erroneous timeouts at
3944 			 * 10Mbps.
3945 			 */
3946 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
3947 			    0xFFFF);
3948 			val = wm_kmrn_readreg(sc,
3949 			    KUMCTRLSTA_OFFSET_INB_PARAM);
3950 			val |= 0x3F;
3951 			wm_kmrn_writereg(sc,
3952 			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
3953 			break;
3954 		default:
3955 			break;
3956 		}
3957 
3958 		if (sc->sc_type == WM_T_80003) {
3959 			val = CSR_READ(sc, WMREG_CTRL_EXT);
3960 			val &= ~CTRL_EXT_LINK_MODE_MASK;
3961 			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
3962 
3963 			/* Bypass RX and TX FIFO's */
3964 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
3965 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
3966 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
3967 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
3968 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
3969 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
3970 		}
3971 	}
3972 #if 0
3973 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
3974 #endif
3975 
3976 	/*
3977 	 * Set up checksum offload parameters.
3978 	 */
3979 	reg = CSR_READ(sc, WMREG_RXCSUM);
3980 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
3981 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
3982 		reg |= RXCSUM_IPOFL;
3983 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
3984 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
3985 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
3986 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
3987 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
3988 
3989 	/* Reset TBI's RXCFG count */
3990 	sc->sc_tbi_nrxcfg = sc->sc_tbi_lastnrxcfg = 0;
3991 
3992 	/*
3993 	 * Set up the interrupt registers.
3994 	 */
3995 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3996 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
3997 	    ICR_RXO | ICR_RXT0;
3998 	if ((sc->sc_flags & WM_F_HAS_MII) == 0)
3999 		sc->sc_icr |= ICR_RXCFG;
4000 	CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
4001 
4002 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4003 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)) {
4004 		reg = CSR_READ(sc, WMREG_KABGTXD);
4005 		reg |= KABGTXD_BGSQLBIAS;
4006 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
4007 	}
4008 
4009 	/* Set up the inter-packet gap. */
4010 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
4011 
4012 	if (sc->sc_type >= WM_T_82543) {
4013 		/*
4014 		 * Set up the interrupt throttling register (units of 256ns)
4015 		 * Note that a footnote in Intel's documentation says this
4016 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
4017 		 * or 10Mbit mode.  Empirically, it appears to be the case
4018 		 * that that is also true for the 1024ns units of the other
4019 		 * interrupt-related timer registers -- so, really, we ought
4020 		 * to divide this value by 4 when the link speed is low.
4021 		 *
4022 		 * XXX implement this division at link speed change!
4023 		 */
4024 
4025 		 /*
4026 		  * For N interrupts/sec, set this value to:
4027 		  * 1000000000 / (N * 256).  Note that we set the
4028 		  * absolute and packet timer values to this value
4029 		  * divided by 4 to get "simple timer" behavior.
4030 		  */
4031 
4032 		sc->sc_itr = 1500;		/* 2604 ints/sec */
4033 		CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
4034 	}
4035 
4036 	/* Set the VLAN ethernetype. */
4037 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
4038 
4039 	/*
4040 	 * Set up the transmit control register; we start out with
4041 	 * a collision distance suitable for FDX, but update it whe
4042 	 * we resolve the media type.
4043 	 */
4044 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
4045 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
4046 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4047 	if (sc->sc_type >= WM_T_82571)
4048 		sc->sc_tctl |= TCTL_MULR;
4049 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4050 
4051 	if (sc->sc_type == WM_T_80003) {
4052 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
4053 		reg &= ~TCTL_EXT_GCEX_MASK;
4054 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
4055 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
4056 	}
4057 
4058 	/* Set the media. */
4059 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
4060 		goto out;
4061 
4062 	/* Configure for OS presence */
4063 	wm_init_manageability(sc);
4064 
4065 	/*
4066 	 * Set up the receive control register; we actually program
4067 	 * the register when we set the receive filter.  Use multicast
4068 	 * address offset type 0.
4069 	 *
4070 	 * Only the i82544 has the ability to strip the incoming
4071 	 * CRC, so we don't enable that feature.
4072 	 */
4073 	sc->sc_mchash_type = 0;
4074 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
4075 	    | RCTL_MO(sc->sc_mchash_type);
4076 
4077 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
4078 	    && (ifp->if_mtu > ETHERMTU)) {
4079 		sc->sc_rctl |= RCTL_LPE;
4080 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4081 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
4082 	}
4083 
4084 	if (MCLBYTES == 2048) {
4085 		sc->sc_rctl |= RCTL_2k;
4086 	} else {
4087 		if (sc->sc_type >= WM_T_82543) {
4088 			switch (MCLBYTES) {
4089 			case 4096:
4090 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
4091 				break;
4092 			case 8192:
4093 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
4094 				break;
4095 			case 16384:
4096 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
4097 				break;
4098 			default:
4099 				panic("wm_init: MCLBYTES %d unsupported",
4100 				    MCLBYTES);
4101 				break;
4102 			}
4103 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
4104 	}
4105 
4106 	/* Set the receive filter. */
4107 	wm_set_filter(sc);
4108 
4109 	/* On 575 and later set RDT only if RX enabled... */
4110 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4111 		for (i = 0; i < WM_NRXDESC; i++)
4112 			WM_INIT_RXDESC(sc, i);
4113 
4114 	/* Start the one second link check clock. */
4115 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
4116 
4117 	/* ...all done! */
4118 	ifp->if_flags |= IFF_RUNNING;
4119 	ifp->if_flags &= ~IFF_OACTIVE;
4120 
4121  out:
4122 	if (error)
4123 		log(LOG_ERR, "%s: interface not running\n",
4124 		    device_xname(sc->sc_dev));
4125 	return error;
4126 }
4127 
4128 /*
4129  * wm_rxdrain:
4130  *
4131  *	Drain the receive queue.
4132  */
4133 static void
4134 wm_rxdrain(struct wm_softc *sc)
4135 {
4136 	struct wm_rxsoft *rxs;
4137 	int i;
4138 
4139 	for (i = 0; i < WM_NRXDESC; i++) {
4140 		rxs = &sc->sc_rxsoft[i];
4141 		if (rxs->rxs_mbuf != NULL) {
4142 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4143 			m_freem(rxs->rxs_mbuf);
4144 			rxs->rxs_mbuf = NULL;
4145 		}
4146 	}
4147 }
4148 
4149 /*
4150  * wm_stop:		[ifnet interface function]
4151  *
4152  *	Stop transmission on the interface.
4153  */
4154 static void
4155 wm_stop(struct ifnet *ifp, int disable)
4156 {
4157 	struct wm_softc *sc = ifp->if_softc;
4158 	struct wm_txsoft *txs;
4159 	int i;
4160 
4161 	/* Stop the one second clock. */
4162 	callout_stop(&sc->sc_tick_ch);
4163 
4164 	/* Stop the 82547 Tx FIFO stall check timer. */
4165 	if (sc->sc_type == WM_T_82547)
4166 		callout_stop(&sc->sc_txfifo_ch);
4167 
4168 	if (sc->sc_flags & WM_F_HAS_MII) {
4169 		/* Down the MII. */
4170 		mii_down(&sc->sc_mii);
4171 	} else {
4172 #if 0
4173 		/* Should we clear PHY's status properly? */
4174 		wm_reset(sc);
4175 #endif
4176 	}
4177 
4178 	/* Stop the transmit and receive processes. */
4179 	CSR_WRITE(sc, WMREG_TCTL, 0);
4180 	CSR_WRITE(sc, WMREG_RCTL, 0);
4181 	sc->sc_rctl &= ~RCTL_EN;
4182 
4183 	/*
4184 	 * Clear the interrupt mask to ensure the device cannot assert its
4185 	 * interrupt line.
4186 	 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
4187 	 * any currently pending or shared interrupt.
4188 	 */
4189 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4190 	sc->sc_icr = 0;
4191 
4192 	/* Release any queued transmit buffers. */
4193 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
4194 		txs = &sc->sc_txsoft[i];
4195 		if (txs->txs_mbuf != NULL) {
4196 			bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
4197 			m_freem(txs->txs_mbuf);
4198 			txs->txs_mbuf = NULL;
4199 		}
4200 	}
4201 
4202 	/* Mark the interface as down and cancel the watchdog timer. */
4203 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
4204 	ifp->if_timer = 0;
4205 
4206 	if (disable)
4207 		wm_rxdrain(sc);
4208 
4209 #if 0 /* notyet */
4210 	if (sc->sc_type >= WM_T_82544)
4211 		CSR_WRITE(sc, WMREG_WUC, 0);
4212 #endif
4213 }
4214 
4215 void
4216 wm_get_auto_rd_done(struct wm_softc *sc)
4217 {
4218 	int i;
4219 
4220 	/* wait for eeprom to reload */
4221 	switch (sc->sc_type) {
4222 	case WM_T_82571:
4223 	case WM_T_82572:
4224 	case WM_T_82573:
4225 	case WM_T_82574:
4226 	case WM_T_82583:
4227 	case WM_T_82575:
4228 	case WM_T_82576:
4229 	case WM_T_80003:
4230 	case WM_T_ICH8:
4231 	case WM_T_ICH9:
4232 		for (i = 0; i < 10; i++) {
4233 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
4234 				break;
4235 			delay(1000);
4236 		}
4237 		if (i == 10) {
4238 			log(LOG_ERR, "%s: auto read from eeprom failed to "
4239 			    "complete\n", device_xname(sc->sc_dev));
4240 		}
4241 		break;
4242 	default:
4243 		break;
4244 	}
4245 }
4246 
4247 void
4248 wm_lan_init_done(struct wm_softc *sc)
4249 {
4250 	uint32_t reg = 0;
4251 	int i;
4252 
4253 	/* wait for eeprom to reload */
4254 	switch (sc->sc_type) {
4255 	case WM_T_ICH10:
4256 	case WM_T_PCH:
4257 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
4258 			reg = CSR_READ(sc, WMREG_STATUS);
4259 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
4260 				break;
4261 			delay(100);
4262 		}
4263 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
4264 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
4265 			    "complete\n", device_xname(sc->sc_dev), __func__);
4266 		}
4267 		break;
4268 	default:
4269 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4270 		    __func__);
4271 		break;
4272 	}
4273 
4274 	reg &= ~STATUS_LAN_INIT_DONE;
4275 	CSR_WRITE(sc, WMREG_STATUS, reg);
4276 }
4277 
4278 void
4279 wm_get_cfg_done(struct wm_softc *sc)
4280 {
4281 	int mask;
4282 	uint32_t reg;
4283 	int i;
4284 
4285 	/* wait for eeprom to reload */
4286 	switch (sc->sc_type) {
4287 	case WM_T_82542_2_0:
4288 	case WM_T_82542_2_1:
4289 		/* null */
4290 		break;
4291 	case WM_T_82543:
4292 	case WM_T_82544:
4293 	case WM_T_82540:
4294 	case WM_T_82545:
4295 	case WM_T_82545_3:
4296 	case WM_T_82546:
4297 	case WM_T_82546_3:
4298 	case WM_T_82541:
4299 	case WM_T_82541_2:
4300 	case WM_T_82547:
4301 	case WM_T_82547_2:
4302 	case WM_T_82573:
4303 	case WM_T_82574:
4304 	case WM_T_82583:
4305 		/* generic */
4306 		delay(10*1000);
4307 		break;
4308 	case WM_T_80003:
4309 	case WM_T_82571:
4310 	case WM_T_82572:
4311 	case WM_T_82575:
4312 	case WM_T_82576:
4313 	case WM_T_82580:
4314 		mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
4315 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
4316 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
4317 				break;
4318 			delay(1000);
4319 		}
4320 		if (i >= WM_PHY_CFG_TIMEOUT) {
4321 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
4322 				device_xname(sc->sc_dev), __func__));
4323 		}
4324 		break;
4325 	case WM_T_ICH8:
4326 	case WM_T_ICH9:
4327 	case WM_T_ICH10:
4328 	case WM_T_PCH:
4329 		if (sc->sc_type >= WM_T_PCH) {
4330 			reg = CSR_READ(sc, WMREG_STATUS);
4331 			if ((reg & STATUS_PHYRA) != 0)
4332 				CSR_WRITE(sc, WMREG_STATUS,
4333 				    reg & ~STATUS_PHYRA);
4334 		}
4335 		delay(10*1000);
4336 		break;
4337 	default:
4338 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4339 		    __func__);
4340 		break;
4341 	}
4342 }
4343 
4344 /*
4345  * wm_acquire_eeprom:
4346  *
4347  *	Perform the EEPROM handshake required on some chips.
4348  */
4349 static int
4350 wm_acquire_eeprom(struct wm_softc *sc)
4351 {
4352 	uint32_t reg;
4353 	int x;
4354 	int ret = 0;
4355 
4356 	/* always success */
4357 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
4358 		return 0;
4359 
4360 	if (sc->sc_flags & WM_F_SWFWHW_SYNC) {
4361 		ret = wm_get_swfwhw_semaphore(sc);
4362 	} else if (sc->sc_flags & WM_F_SWFW_SYNC) {
4363 		/* this will also do wm_get_swsm_semaphore() if needed */
4364 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
4365 	} else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
4366 		ret = wm_get_swsm_semaphore(sc);
4367 	}
4368 
4369 	if (ret) {
4370 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
4371 			__func__);
4372 		return 1;
4373 	}
4374 
4375 	if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
4376 		reg = CSR_READ(sc, WMREG_EECD);
4377 
4378 		/* Request EEPROM access. */
4379 		reg |= EECD_EE_REQ;
4380 		CSR_WRITE(sc, WMREG_EECD, reg);
4381 
4382 		/* ..and wait for it to be granted. */
4383 		for (x = 0; x < 1000; x++) {
4384 			reg = CSR_READ(sc, WMREG_EECD);
4385 			if (reg & EECD_EE_GNT)
4386 				break;
4387 			delay(5);
4388 		}
4389 		if ((reg & EECD_EE_GNT) == 0) {
4390 			aprint_error_dev(sc->sc_dev,
4391 			    "could not acquire EEPROM GNT\n");
4392 			reg &= ~EECD_EE_REQ;
4393 			CSR_WRITE(sc, WMREG_EECD, reg);
4394 			if (sc->sc_flags & WM_F_SWFWHW_SYNC)
4395 				wm_put_swfwhw_semaphore(sc);
4396 			if (sc->sc_flags & WM_F_SWFW_SYNC)
4397 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
4398 			else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
4399 				wm_put_swsm_semaphore(sc);
4400 			return 1;
4401 		}
4402 	}
4403 
4404 	return 0;
4405 }
4406 
4407 /*
4408  * wm_release_eeprom:
4409  *
4410  *	Release the EEPROM mutex.
4411  */
4412 static void
4413 wm_release_eeprom(struct wm_softc *sc)
4414 {
4415 	uint32_t reg;
4416 
4417 	/* always success */
4418 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
4419 		return;
4420 
4421 	if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
4422 		reg = CSR_READ(sc, WMREG_EECD);
4423 		reg &= ~EECD_EE_REQ;
4424 		CSR_WRITE(sc, WMREG_EECD, reg);
4425 	}
4426 
4427 	if (sc->sc_flags & WM_F_SWFWHW_SYNC)
4428 		wm_put_swfwhw_semaphore(sc);
4429 	if (sc->sc_flags & WM_F_SWFW_SYNC)
4430 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
4431 	else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
4432 		wm_put_swsm_semaphore(sc);
4433 }
4434 
4435 /*
4436  * wm_eeprom_sendbits:
4437  *
4438  *	Send a series of bits to the EEPROM.
4439  */
4440 static void
4441 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
4442 {
4443 	uint32_t reg;
4444 	int x;
4445 
4446 	reg = CSR_READ(sc, WMREG_EECD);
4447 
4448 	for (x = nbits; x > 0; x--) {
4449 		if (bits & (1U << (x - 1)))
4450 			reg |= EECD_DI;
4451 		else
4452 			reg &= ~EECD_DI;
4453 		CSR_WRITE(sc, WMREG_EECD, reg);
4454 		delay(2);
4455 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
4456 		delay(2);
4457 		CSR_WRITE(sc, WMREG_EECD, reg);
4458 		delay(2);
4459 	}
4460 }
4461 
4462 /*
4463  * wm_eeprom_recvbits:
4464  *
4465  *	Receive a series of bits from the EEPROM.
4466  */
4467 static void
4468 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
4469 {
4470 	uint32_t reg, val;
4471 	int x;
4472 
4473 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
4474 
4475 	val = 0;
4476 	for (x = nbits; x > 0; x--) {
4477 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
4478 		delay(2);
4479 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
4480 			val |= (1U << (x - 1));
4481 		CSR_WRITE(sc, WMREG_EECD, reg);
4482 		delay(2);
4483 	}
4484 	*valp = val;
4485 }
4486 
4487 /*
4488  * wm_read_eeprom_uwire:
4489  *
4490  *	Read a word from the EEPROM using the MicroWire protocol.
4491  */
4492 static int
4493 wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
4494 {
4495 	uint32_t reg, val;
4496 	int i;
4497 
4498 	for (i = 0; i < wordcnt; i++) {
4499 		/* Clear SK and DI. */
4500 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
4501 		CSR_WRITE(sc, WMREG_EECD, reg);
4502 
4503 		/* Set CHIP SELECT. */
4504 		reg |= EECD_CS;
4505 		CSR_WRITE(sc, WMREG_EECD, reg);
4506 		delay(2);
4507 
4508 		/* Shift in the READ command. */
4509 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
4510 
4511 		/* Shift in address. */
4512 		wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits);
4513 
4514 		/* Shift out the data. */
4515 		wm_eeprom_recvbits(sc, &val, 16);
4516 		data[i] = val & 0xffff;
4517 
4518 		/* Clear CHIP SELECT. */
4519 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
4520 		CSR_WRITE(sc, WMREG_EECD, reg);
4521 		delay(2);
4522 	}
4523 
4524 	return 0;
4525 }
4526 
4527 /*
4528  * wm_spi_eeprom_ready:
4529  *
4530  *	Wait for a SPI EEPROM to be ready for commands.
4531  */
4532 static int
4533 wm_spi_eeprom_ready(struct wm_softc *sc)
4534 {
4535 	uint32_t val;
4536 	int usec;
4537 
4538 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
4539 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
4540 		wm_eeprom_recvbits(sc, &val, 8);
4541 		if ((val & SPI_SR_RDY) == 0)
4542 			break;
4543 	}
4544 	if (usec >= SPI_MAX_RETRIES) {
4545 		aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
4546 		return 1;
4547 	}
4548 	return 0;
4549 }
4550 
4551 /*
4552  * wm_read_eeprom_spi:
4553  *
4554  *	Read a work from the EEPROM using the SPI protocol.
4555  */
4556 static int
4557 wm_read_eeprom_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
4558 {
4559 	uint32_t reg, val;
4560 	int i;
4561 	uint8_t opc;
4562 
4563 	/* Clear SK and CS. */
4564 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
4565 	CSR_WRITE(sc, WMREG_EECD, reg);
4566 	delay(2);
4567 
4568 	if (wm_spi_eeprom_ready(sc))
4569 		return 1;
4570 
4571 	/* Toggle CS to flush commands. */
4572 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
4573 	delay(2);
4574 	CSR_WRITE(sc, WMREG_EECD, reg);
4575 	delay(2);
4576 
4577 	opc = SPI_OPC_READ;
4578 	if (sc->sc_ee_addrbits == 8 && word >= 128)
4579 		opc |= SPI_OPC_A8;
4580 
4581 	wm_eeprom_sendbits(sc, opc, 8);
4582 	wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits);
4583 
4584 	for (i = 0; i < wordcnt; i++) {
4585 		wm_eeprom_recvbits(sc, &val, 16);
4586 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
4587 	}
4588 
4589 	/* Raise CS and clear SK. */
4590 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
4591 	CSR_WRITE(sc, WMREG_EECD, reg);
4592 	delay(2);
4593 
4594 	return 0;
4595 }
4596 
4597 #define EEPROM_CHECKSUM		0xBABA
4598 #define EEPROM_SIZE		0x0040
4599 
4600 /*
4601  * wm_validate_eeprom_checksum
4602  *
4603  * The checksum is defined as the sum of the first 64 (16 bit) words.
4604  */
4605 static int
4606 wm_validate_eeprom_checksum(struct wm_softc *sc)
4607 {
4608 	uint16_t checksum;
4609 	uint16_t eeprom_data;
4610 	int i;
4611 
4612 	checksum = 0;
4613 
4614 	for (i = 0; i < EEPROM_SIZE; i++) {
4615 		if (wm_read_eeprom(sc, i, 1, &eeprom_data))
4616 			return 1;
4617 		checksum += eeprom_data;
4618 	}
4619 
4620 	if (checksum != (uint16_t) EEPROM_CHECKSUM)
4621 		return 1;
4622 
4623 	return 0;
4624 }
4625 
4626 /*
4627  * wm_read_eeprom:
4628  *
4629  *	Read data from the serial EEPROM.
4630  */
4631 static int
4632 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
4633 {
4634 	int rv;
4635 
4636 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
4637 		return 1;
4638 
4639 	if (wm_acquire_eeprom(sc))
4640 		return 1;
4641 
4642 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4643 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH))
4644 		rv = wm_read_eeprom_ich8(sc, word, wordcnt, data);
4645 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
4646 		rv = wm_read_eeprom_eerd(sc, word, wordcnt, data);
4647 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
4648 		rv = wm_read_eeprom_spi(sc, word, wordcnt, data);
4649 	else
4650 		rv = wm_read_eeprom_uwire(sc, word, wordcnt, data);
4651 
4652 	wm_release_eeprom(sc);
4653 	return rv;
4654 }
4655 
4656 static int
4657 wm_read_eeprom_eerd(struct wm_softc *sc, int offset, int wordcnt,
4658     uint16_t *data)
4659 {
4660 	int i, eerd = 0;
4661 	int error = 0;
4662 
4663 	for (i = 0; i < wordcnt; i++) {
4664 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
4665 
4666 		CSR_WRITE(sc, WMREG_EERD, eerd);
4667 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
4668 		if (error != 0)
4669 			break;
4670 
4671 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
4672 	}
4673 
4674 	return error;
4675 }
4676 
4677 static int
4678 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
4679 {
4680 	uint32_t attempts = 100000;
4681 	uint32_t i, reg = 0;
4682 	int32_t done = -1;
4683 
4684 	for (i = 0; i < attempts; i++) {
4685 		reg = CSR_READ(sc, rw);
4686 
4687 		if (reg & EERD_DONE) {
4688 			done = 0;
4689 			break;
4690 		}
4691 		delay(5);
4692 	}
4693 
4694 	return done;
4695 }
4696 
4697 /*
4698  * wm_add_rxbuf:
4699  *
4700  *	Add a receive buffer to the indiciated descriptor.
4701  */
4702 static int
4703 wm_add_rxbuf(struct wm_softc *sc, int idx)
4704 {
4705 	struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
4706 	struct mbuf *m;
4707 	int error;
4708 
4709 	MGETHDR(m, M_DONTWAIT, MT_DATA);
4710 	if (m == NULL)
4711 		return ENOBUFS;
4712 
4713 	MCLGET(m, M_DONTWAIT);
4714 	if ((m->m_flags & M_EXT) == 0) {
4715 		m_freem(m);
4716 		return ENOBUFS;
4717 	}
4718 
4719 	if (rxs->rxs_mbuf != NULL)
4720 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4721 
4722 	rxs->rxs_mbuf = m;
4723 
4724 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
4725 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
4726 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
4727 	if (error) {
4728 		/* XXX XXX XXX */
4729 		aprint_error_dev(sc->sc_dev,
4730 		    "unable to load rx DMA map %d, error = %d\n",
4731 		    idx, error);
4732 		panic("wm_add_rxbuf");
4733 	}
4734 
4735 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
4736 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
4737 
4738 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4739 		if ((sc->sc_rctl & RCTL_EN) != 0)
4740 			WM_INIT_RXDESC(sc, idx);
4741 	} else
4742 		WM_INIT_RXDESC(sc, idx);
4743 
4744 	return 0;
4745 }
4746 
4747 /*
4748  * wm_set_ral:
4749  *
4750  *	Set an entery in the receive address list.
4751  */
4752 static void
4753 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
4754 {
4755 	uint32_t ral_lo, ral_hi;
4756 
4757 	if (enaddr != NULL) {
4758 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
4759 		    (enaddr[3] << 24);
4760 		ral_hi = enaddr[4] | (enaddr[5] << 8);
4761 		ral_hi |= RAL_AV;
4762 	} else {
4763 		ral_lo = 0;
4764 		ral_hi = 0;
4765 	}
4766 
4767 	if (sc->sc_type >= WM_T_82544) {
4768 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
4769 		    ral_lo);
4770 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
4771 		    ral_hi);
4772 	} else {
4773 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
4774 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
4775 	}
4776 }
4777 
4778 /*
4779  * wm_mchash:
4780  *
4781  *	Compute the hash of the multicast address for the 4096-bit
4782  *	multicast filter.
4783  */
4784 static uint32_t
4785 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
4786 {
4787 	static const int lo_shift[4] = { 4, 3, 2, 0 };
4788 	static const int hi_shift[4] = { 4, 5, 6, 8 };
4789 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
4790 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
4791 	uint32_t hash;
4792 
4793 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4794 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)) {
4795 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
4796 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
4797 		return (hash & 0x3ff);
4798 	}
4799 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
4800 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
4801 
4802 	return (hash & 0xfff);
4803 }
4804 
4805 /*
4806  * wm_set_filter:
4807  *
4808  *	Set up the receive filter.
4809  */
4810 static void
4811 wm_set_filter(struct wm_softc *sc)
4812 {
4813 	struct ethercom *ec = &sc->sc_ethercom;
4814 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4815 	struct ether_multi *enm;
4816 	struct ether_multistep step;
4817 	bus_addr_t mta_reg;
4818 	uint32_t hash, reg, bit;
4819 	int i, size;
4820 
4821 	if (sc->sc_type >= WM_T_82544)
4822 		mta_reg = WMREG_CORDOVA_MTA;
4823 	else
4824 		mta_reg = WMREG_MTA;
4825 
4826 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
4827 
4828 	if (ifp->if_flags & IFF_BROADCAST)
4829 		sc->sc_rctl |= RCTL_BAM;
4830 	if (ifp->if_flags & IFF_PROMISC) {
4831 		sc->sc_rctl |= RCTL_UPE;
4832 		goto allmulti;
4833 	}
4834 
4835 	/*
4836 	 * Set the station address in the first RAL slot, and
4837 	 * clear the remaining slots.
4838 	 */
4839 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4840 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH))
4841 		size = WM_ICH8_RAL_TABSIZE;
4842 	else
4843 		size = WM_RAL_TABSIZE;
4844 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
4845 	for (i = 1; i < size; i++)
4846 		wm_set_ral(sc, NULL, i);
4847 
4848 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4849 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH))
4850 		size = WM_ICH8_MC_TABSIZE;
4851 	else
4852 		size = WM_MC_TABSIZE;
4853 	/* Clear out the multicast table. */
4854 	for (i = 0; i < size; i++)
4855 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
4856 
4857 	ETHER_FIRST_MULTI(step, ec, enm);
4858 	while (enm != NULL) {
4859 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
4860 			/*
4861 			 * We must listen to a range of multicast addresses.
4862 			 * For now, just accept all multicasts, rather than
4863 			 * trying to set only those filter bits needed to match
4864 			 * the range.  (At this time, the only use of address
4865 			 * ranges is for IP multicast routing, for which the
4866 			 * range is big enough to require all bits set.)
4867 			 */
4868 			goto allmulti;
4869 		}
4870 
4871 		hash = wm_mchash(sc, enm->enm_addrlo);
4872 
4873 		reg = (hash >> 5);
4874 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4875 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH))
4876 			reg &= 0x1f;
4877 		else
4878 			reg &= 0x7f;
4879 		bit = hash & 0x1f;
4880 
4881 		hash = CSR_READ(sc, mta_reg + (reg << 2));
4882 		hash |= 1U << bit;
4883 
4884 		/* XXX Hardware bug?? */
4885 		if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
4886 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
4887 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
4888 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
4889 		} else
4890 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
4891 
4892 		ETHER_NEXT_MULTI(step, enm);
4893 	}
4894 
4895 	ifp->if_flags &= ~IFF_ALLMULTI;
4896 	goto setit;
4897 
4898  allmulti:
4899 	ifp->if_flags |= IFF_ALLMULTI;
4900 	sc->sc_rctl |= RCTL_MPE;
4901 
4902  setit:
4903 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
4904 }
4905 
4906 /*
4907  * wm_tbi_mediainit:
4908  *
4909  *	Initialize media for use on 1000BASE-X devices.
4910  */
4911 static void
4912 wm_tbi_mediainit(struct wm_softc *sc)
4913 {
4914 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4915 	const char *sep = "";
4916 
4917 	if (sc->sc_type < WM_T_82543)
4918 		sc->sc_tipg = TIPG_WM_DFLT;
4919 	else
4920 		sc->sc_tipg = TIPG_LG_DFLT;
4921 
4922 	sc->sc_tbi_anegticks = 5;
4923 
4924 	/* Initialize our media structures */
4925 	sc->sc_mii.mii_ifp = ifp;
4926 
4927 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
4928 	ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
4929 	    wm_tbi_mediastatus);
4930 
4931 	/*
4932 	 * SWD Pins:
4933 	 *
4934 	 *	0 = Link LED (output)
4935 	 *	1 = Loss Of Signal (input)
4936 	 */
4937 	sc->sc_ctrl |= CTRL_SWDPIO(0);
4938 	sc->sc_ctrl &= ~CTRL_SWDPIO(1);
4939 
4940 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4941 
4942 #define	ADD(ss, mm, dd)							\
4943 do {									\
4944 	aprint_normal("%s%s", sep, ss);					\
4945 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL);	\
4946 	sep = ", ";							\
4947 } while (/*CONSTCOND*/0)
4948 
4949 	aprint_normal_dev(sc->sc_dev, "");
4950 	ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
4951 	ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
4952 	ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
4953 	aprint_normal("\n");
4954 
4955 #undef ADD
4956 
4957 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
4958 }
4959 
4960 /*
4961  * wm_tbi_mediastatus:	[ifmedia interface function]
4962  *
4963  *	Get the current interface media status on a 1000BASE-X device.
4964  */
4965 static void
4966 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
4967 {
4968 	struct wm_softc *sc = ifp->if_softc;
4969 	uint32_t ctrl, status;
4970 
4971 	ifmr->ifm_status = IFM_AVALID;
4972 	ifmr->ifm_active = IFM_ETHER;
4973 
4974 	status = CSR_READ(sc, WMREG_STATUS);
4975 	if ((status & STATUS_LU) == 0) {
4976 		ifmr->ifm_active |= IFM_NONE;
4977 		return;
4978 	}
4979 
4980 	ifmr->ifm_status |= IFM_ACTIVE;
4981 	ifmr->ifm_active |= IFM_1000_SX;
4982 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
4983 		ifmr->ifm_active |= IFM_FDX;
4984 	ctrl = CSR_READ(sc, WMREG_CTRL);
4985 	if (ctrl & CTRL_RFCE)
4986 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
4987 	if (ctrl & CTRL_TFCE)
4988 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
4989 }
4990 
4991 /*
4992  * wm_tbi_mediachange:	[ifmedia interface function]
4993  *
4994  *	Set hardware to newly-selected media on a 1000BASE-X device.
4995  */
4996 static int
4997 wm_tbi_mediachange(struct ifnet *ifp)
4998 {
4999 	struct wm_softc *sc = ifp->if_softc;
5000 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
5001 	uint32_t status;
5002 	int i;
5003 
5004 	sc->sc_txcw = 0;
5005 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO ||
5006 	    (sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
5007 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
5008 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
5009 		sc->sc_txcw |= TXCW_ANE;
5010 	} else {
5011 		/*
5012 		 * If autonegotiation is turned off, force link up and turn on
5013 		 * full duplex
5014 		 */
5015 		sc->sc_txcw &= ~TXCW_ANE;
5016 		sc->sc_ctrl |= CTRL_SLU | CTRL_FD;
5017 		sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
5018 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5019 		delay(1000);
5020 	}
5021 
5022 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
5023 		    device_xname(sc->sc_dev),sc->sc_txcw));
5024 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
5025 	delay(10000);
5026 
5027 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
5028 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
5029 
5030 	/*
5031 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
5032 	 * optics detect a signal, 0 if they don't.
5033 	 */
5034 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
5035 		/* Have signal; wait for the link to come up. */
5036 
5037 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
5038 			/*
5039 			 * Reset the link, and let autonegotiation do its thing
5040 			 */
5041 			sc->sc_ctrl |= CTRL_LRST;
5042 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5043 			delay(1000);
5044 			sc->sc_ctrl &= ~CTRL_LRST;
5045 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5046 			delay(1000);
5047 		}
5048 
5049 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
5050 			delay(10000);
5051 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
5052 				break;
5053 		}
5054 
5055 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
5056 			    device_xname(sc->sc_dev),i));
5057 
5058 		status = CSR_READ(sc, WMREG_STATUS);
5059 		DPRINTF(WM_DEBUG_LINK,
5060 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
5061 			device_xname(sc->sc_dev),status, STATUS_LU));
5062 		if (status & STATUS_LU) {
5063 			/* Link is up. */
5064 			DPRINTF(WM_DEBUG_LINK,
5065 			    ("%s: LINK: set media -> link up %s\n",
5066 			    device_xname(sc->sc_dev),
5067 			    (status & STATUS_FD) ? "FDX" : "HDX"));
5068 
5069 			/*
5070 			 * NOTE: CTRL will update TFCE and RFCE automatically,
5071 			 * so we should update sc->sc_ctrl
5072 			 */
5073 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
5074 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
5075 			sc->sc_fcrtl &= ~FCRTL_XONE;
5076 			if (status & STATUS_FD)
5077 				sc->sc_tctl |=
5078 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
5079 			else
5080 				sc->sc_tctl |=
5081 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
5082 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
5083 				sc->sc_fcrtl |= FCRTL_XONE;
5084 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
5085 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
5086 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
5087 				      sc->sc_fcrtl);
5088 			sc->sc_tbi_linkup = 1;
5089 		} else {
5090 			if (i == WM_LINKUP_TIMEOUT)
5091 				wm_check_for_link(sc);
5092 			/* Link is down. */
5093 			DPRINTF(WM_DEBUG_LINK,
5094 			    ("%s: LINK: set media -> link down\n",
5095 			    device_xname(sc->sc_dev)));
5096 			sc->sc_tbi_linkup = 0;
5097 		}
5098 	} else {
5099 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
5100 		    device_xname(sc->sc_dev)));
5101 		sc->sc_tbi_linkup = 0;
5102 	}
5103 
5104 	wm_tbi_set_linkled(sc);
5105 
5106 	return 0;
5107 }
5108 
5109 /*
5110  * wm_tbi_set_linkled:
5111  *
5112  *	Update the link LED on 1000BASE-X devices.
5113  */
5114 static void
5115 wm_tbi_set_linkled(struct wm_softc *sc)
5116 {
5117 
5118 	if (sc->sc_tbi_linkup)
5119 		sc->sc_ctrl |= CTRL_SWDPIN(0);
5120 	else
5121 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
5122 
5123 	/* 82540 or newer devices are active low */
5124 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
5125 
5126 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5127 }
5128 
5129 /*
5130  * wm_tbi_check_link:
5131  *
5132  *	Check the link on 1000BASE-X devices.
5133  */
5134 static void
5135 wm_tbi_check_link(struct wm_softc *sc)
5136 {
5137 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5138 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
5139 	uint32_t rxcw, ctrl, status;
5140 
5141 	status = CSR_READ(sc, WMREG_STATUS);
5142 
5143 	rxcw = CSR_READ(sc, WMREG_RXCW);
5144 	ctrl = CSR_READ(sc, WMREG_CTRL);
5145 
5146 	/* set link status */
5147 	if ((status & STATUS_LU) == 0) {
5148 		DPRINTF(WM_DEBUG_LINK,
5149 		    ("%s: LINK: checklink -> down\n", device_xname(sc->sc_dev)));
5150 		sc->sc_tbi_linkup = 0;
5151 	} else if (sc->sc_tbi_linkup == 0) {
5152 		DPRINTF(WM_DEBUG_LINK,
5153 		    ("%s: LINK: checklink -> up %s\n", device_xname(sc->sc_dev),
5154 		    (status & STATUS_FD) ? "FDX" : "HDX"));
5155 		sc->sc_tbi_linkup = 1;
5156 	}
5157 
5158 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP)
5159 	    && ((status & STATUS_LU) == 0)) {
5160 		sc->sc_tbi_linkup = 0;
5161 		if (sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg > 100) {
5162 			/* RXCFG storm! */
5163 			DPRINTF(WM_DEBUG_LINK, ("RXCFG storm! (%d)\n",
5164 				sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg));
5165 			wm_init(ifp);
5166 			wm_start(ifp);
5167 		} else if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
5168 			/* If the timer expired, retry autonegotiation */
5169 			if (++sc->sc_tbi_ticks >= sc->sc_tbi_anegticks) {
5170 				DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
5171 				sc->sc_tbi_ticks = 0;
5172 				/*
5173 				 * Reset the link, and let autonegotiation do
5174 				 * its thing
5175 				 */
5176 				sc->sc_ctrl |= CTRL_LRST;
5177 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5178 				delay(1000);
5179 				sc->sc_ctrl &= ~CTRL_LRST;
5180 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5181 				delay(1000);
5182 				CSR_WRITE(sc, WMREG_TXCW,
5183 				    sc->sc_txcw & ~TXCW_ANE);
5184 				CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
5185 			}
5186 		}
5187 	}
5188 
5189 	wm_tbi_set_linkled(sc);
5190 }
5191 
5192 /*
5193  * wm_gmii_reset:
5194  *
5195  *	Reset the PHY.
5196  */
5197 static void
5198 wm_gmii_reset(struct wm_softc *sc)
5199 {
5200 	uint32_t reg;
5201 	int rv;
5202 
5203 	/* get phy semaphore */
5204 	switch (sc->sc_type) {
5205 	case WM_T_82571:
5206 	case WM_T_82572:
5207 	case WM_T_82573:
5208 	case WM_T_82574:
5209 	case WM_T_82583:
5210 		 /* XXX should get sw semaphore, too */
5211 		rv = wm_get_swsm_semaphore(sc);
5212 		break;
5213 	case WM_T_82575:
5214 	case WM_T_82576:
5215 	case WM_T_82580:
5216 	case WM_T_82580ER:
5217 	case WM_T_80003:
5218 		rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
5219 		break;
5220 	case WM_T_ICH8:
5221 	case WM_T_ICH9:
5222 	case WM_T_ICH10:
5223 	case WM_T_PCH:
5224 		rv = wm_get_swfwhw_semaphore(sc);
5225 		break;
5226 	default:
5227 		/* nothing to do*/
5228 		rv = 0;
5229 		break;
5230 	}
5231 	if (rv != 0) {
5232 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5233 		    __func__);
5234 		return;
5235 	}
5236 
5237 	switch (sc->sc_type) {
5238 	case WM_T_82542_2_0:
5239 	case WM_T_82542_2_1:
5240 		/* null */
5241 		break;
5242 	case WM_T_82543:
5243 		/*
5244 		 * With 82543, we need to force speed and duplex on the MAC
5245 		 * equal to what the PHY speed and duplex configuration is.
5246 		 * In addition, we need to perform a hardware reset on the PHY
5247 		 * to take it out of reset.
5248 		 */
5249 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
5250 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5251 
5252 		/* The PHY reset pin is active-low. */
5253 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
5254 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
5255 		    CTRL_EXT_SWDPIN(4));
5256 		reg |= CTRL_EXT_SWDPIO(4);
5257 
5258 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5259 		delay(10*1000);
5260 
5261 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
5262 		delay(150);
5263 #if 0
5264 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
5265 #endif
5266 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
5267 		break;
5268 	case WM_T_82544:	/* reset 10000us */
5269 	case WM_T_82540:
5270 	case WM_T_82545:
5271 	case WM_T_82545_3:
5272 	case WM_T_82546:
5273 	case WM_T_82546_3:
5274 	case WM_T_82541:
5275 	case WM_T_82541_2:
5276 	case WM_T_82547:
5277 	case WM_T_82547_2:
5278 	case WM_T_82571:	/* reset 100us */
5279 	case WM_T_82572:
5280 	case WM_T_82573:
5281 	case WM_T_82574:
5282 	case WM_T_82575:
5283 	case WM_T_82576:
5284 	case WM_T_82580:
5285 	case WM_T_82580ER:
5286 	case WM_T_82583:
5287 	case WM_T_80003:
5288 		/* generic reset */
5289 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
5290 		delay((sc->sc_type >= WM_T_82571) ? 100 : 10*1000);
5291 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5292 		delay(150);
5293 
5294 		if ((sc->sc_type == WM_T_82541)
5295 		    || (sc->sc_type == WM_T_82541_2)
5296 		    || (sc->sc_type == WM_T_82547)
5297 		    || (sc->sc_type == WM_T_82547_2)) {
5298 			/* workaround for igp are done in igp_reset() */
5299 			/* XXX add code to set LED after phy reset */
5300 		}
5301 		break;
5302 	case WM_T_ICH8:
5303 	case WM_T_ICH9:
5304 	case WM_T_ICH10:
5305 	case WM_T_PCH:
5306 		/* generic reset */
5307 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
5308 		delay(100);
5309 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5310 		delay(150);
5311 		break;
5312 	default:
5313 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
5314 		    __func__);
5315 		break;
5316 	}
5317 
5318 	/* release PHY semaphore */
5319 	switch (sc->sc_type) {
5320 	case WM_T_82571:
5321 	case WM_T_82572:
5322 	case WM_T_82573:
5323 	case WM_T_82574:
5324 	case WM_T_82583:
5325 		 /* XXX sould put sw semaphore, too */
5326 		wm_put_swsm_semaphore(sc);
5327 		break;
5328 	case WM_T_82575:
5329 	case WM_T_82576:
5330 	case WM_T_82580:
5331 	case WM_T_82580ER:
5332 	case WM_T_80003:
5333 		wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
5334 		break;
5335 	case WM_T_ICH8:
5336 	case WM_T_ICH9:
5337 	case WM_T_ICH10:
5338 	case WM_T_PCH:
5339 		wm_put_swfwhw_semaphore(sc);
5340 		break;
5341 	default:
5342 		/* nothing to do*/
5343 		rv = 0;
5344 		break;
5345 	}
5346 
5347 	/* get_cfg_done */
5348 	wm_get_cfg_done(sc);
5349 
5350 	/* extra setup */
5351 	switch (sc->sc_type) {
5352 	case WM_T_82542_2_0:
5353 	case WM_T_82542_2_1:
5354 	case WM_T_82543:
5355 	case WM_T_82544:
5356 	case WM_T_82540:
5357 	case WM_T_82545:
5358 	case WM_T_82545_3:
5359 	case WM_T_82546:
5360 	case WM_T_82546_3:
5361 	case WM_T_82541_2:
5362 	case WM_T_82547_2:
5363 	case WM_T_82571:
5364 	case WM_T_82572:
5365 	case WM_T_82573:
5366 	case WM_T_82574:
5367 	case WM_T_82575:
5368 	case WM_T_82576:
5369 	case WM_T_82580:
5370 	case WM_T_82580ER:
5371 	case WM_T_82583:
5372 	case WM_T_80003:
5373 		/* null */
5374 		break;
5375 	case WM_T_82541:
5376 	case WM_T_82547:
5377 		/* XXX Configure actively LED after PHY reset */
5378 		break;
5379 	case WM_T_ICH8:
5380 	case WM_T_ICH9:
5381 	case WM_T_ICH10:
5382 	case WM_T_PCH:
5383 		/* Allow time for h/w to get to a quiescent state afer reset */
5384 		delay(10*1000);
5385 
5386 		if (sc->sc_type == WM_T_PCH) {
5387 			wm_hv_phy_workaround_ich8lan(sc);
5388 
5389 			/*
5390 			 * dummy read to clear the phy wakeup bit after lcd
5391 			 * reset
5392 			 */
5393 			reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
5394 		}
5395 
5396 		/*
5397 		 * XXX Configure the LCD with th extended configuration region
5398 		 * in NVM
5399 		 */
5400 
5401 		/* Configure the LCD with the OEM bits in NVM */
5402 		if (sc->sc_type == WM_T_PCH) {
5403 			/*
5404 			 * Disable LPLU.
5405 			 * XXX It seems that 82567 has LPLU, too.
5406 			 */
5407 			reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
5408 			reg &= ~(HV_OEM_BITS_A1KDIS| HV_OEM_BITS_LPLU);
5409 			reg |= HV_OEM_BITS_ANEGNOW;
5410 			wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
5411 		}
5412 		break;
5413 	default:
5414 		panic("%s: unknown type\n", __func__);
5415 		break;
5416 	}
5417 }
5418 
5419 /*
5420  * wm_gmii_mediainit:
5421  *
5422  *	Initialize media for use on 1000BASE-T devices.
5423  */
5424 static void
5425 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
5426 {
5427 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5428 
5429 	/* We have MII. */
5430 	sc->sc_flags |= WM_F_HAS_MII;
5431 
5432 	if (sc->sc_type == WM_T_80003)
5433 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
5434 	else
5435 		sc->sc_tipg = TIPG_1000T_DFLT;
5436 
5437 	/*
5438 	 * Let the chip set speed/duplex on its own based on
5439 	 * signals from the PHY.
5440 	 * XXXbouyer - I'm not sure this is right for the 80003,
5441 	 * the em driver only sets CTRL_SLU here - but it seems to work.
5442 	 */
5443 	sc->sc_ctrl |= CTRL_SLU;
5444 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5445 
5446 	/* Initialize our media structures and probe the GMII. */
5447 	sc->sc_mii.mii_ifp = ifp;
5448 
5449 	switch (prodid) {
5450 	case PCI_PRODUCT_INTEL_PCH_M_LM:
5451 	case PCI_PRODUCT_INTEL_PCH_M_LC:
5452 		/* 82577 */
5453 		sc->sc_phytype = WMPHY_82577;
5454 		sc->sc_mii.mii_readreg = wm_gmii_hv_readreg;
5455 		sc->sc_mii.mii_writereg = wm_gmii_hv_writereg;
5456 		break;
5457 	case PCI_PRODUCT_INTEL_PCH_D_DM:
5458 	case PCI_PRODUCT_INTEL_PCH_D_DC:
5459 		/* 82578 */
5460 		sc->sc_phytype = WMPHY_82578;
5461 		sc->sc_mii.mii_readreg = wm_gmii_hv_readreg;
5462 		sc->sc_mii.mii_writereg = wm_gmii_hv_writereg;
5463 		break;
5464 	case PCI_PRODUCT_INTEL_82801I_BM:
5465 	case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
5466 	case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
5467 	case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
5468 	case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
5469 	case PCI_PRODUCT_INTEL_82801J_R_BM_V:
5470 		/* 82567 */
5471 		sc->sc_phytype = WMPHY_BM;
5472 		sc->sc_mii.mii_readreg = wm_gmii_bm_readreg;
5473 		sc->sc_mii.mii_writereg = wm_gmii_bm_writereg;
5474 		break;
5475 	default:
5476 		if ((sc->sc_flags & WM_F_SGMII) != 0) {
5477 			sc->sc_mii.mii_readreg = wm_sgmii_readreg;
5478 			sc->sc_mii.mii_writereg = wm_sgmii_writereg;
5479 		} else if (sc->sc_type >= WM_T_80003) {
5480 			sc->sc_mii.mii_readreg = wm_gmii_i80003_readreg;
5481 			sc->sc_mii.mii_writereg = wm_gmii_i80003_writereg;
5482 		} else if (sc->sc_type >= WM_T_82544) {
5483 			sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg;
5484 			sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg;
5485 		} else {
5486 			sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg;
5487 			sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg;
5488 		}
5489 		break;
5490 	}
5491 	sc->sc_mii.mii_statchg = wm_gmii_statchg;
5492 
5493 	wm_gmii_reset(sc);
5494 
5495 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
5496 	ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_gmii_mediachange,
5497 	    wm_gmii_mediastatus);
5498 
5499 	mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
5500 	    MII_OFFSET_ANY, MIIF_DOPAUSE);
5501 
5502 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
5503 		/* if failed, retry with *_bm_* */
5504 		sc->sc_mii.mii_readreg = wm_gmii_bm_readreg;
5505 		sc->sc_mii.mii_writereg = wm_gmii_bm_writereg;
5506 
5507 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
5508 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
5509 	}
5510 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
5511 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
5512 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
5513 		sc->sc_phytype = WMPHY_NONE;
5514 	} else {
5515 		/* Check PHY type */
5516 		uint32_t model;
5517 		struct mii_softc *child;
5518 
5519 		child = LIST_FIRST(&sc->sc_mii.mii_phys);
5520 		if (device_is_a(child->mii_dev, "igphy")) {
5521 			struct igphy_softc *isc = (struct igphy_softc *)child;
5522 
5523 			model = isc->sc_mii.mii_mpd_model;
5524 			if (model == MII_MODEL_yyINTEL_I82566)
5525 				sc->sc_phytype = WMPHY_IGP_3;
5526 		}
5527 
5528 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
5529 	}
5530 }
5531 
5532 /*
5533  * wm_gmii_mediastatus:	[ifmedia interface function]
5534  *
5535  *	Get the current interface media status on a 1000BASE-T device.
5536  */
5537 static void
5538 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
5539 {
5540 	struct wm_softc *sc = ifp->if_softc;
5541 
5542 	ether_mediastatus(ifp, ifmr);
5543 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
5544 	    | sc->sc_flowflags;
5545 }
5546 
5547 /*
5548  * wm_gmii_mediachange:	[ifmedia interface function]
5549  *
5550  *	Set hardware to newly-selected media on a 1000BASE-T device.
5551  */
5552 static int
5553 wm_gmii_mediachange(struct ifnet *ifp)
5554 {
5555 	struct wm_softc *sc = ifp->if_softc;
5556 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
5557 	int rc;
5558 
5559 	if ((ifp->if_flags & IFF_UP) == 0)
5560 		return 0;
5561 
5562 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
5563 	sc->sc_ctrl |= CTRL_SLU;
5564 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
5565 	    || (sc->sc_type > WM_T_82543)) {
5566 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
5567 	} else {
5568 		sc->sc_ctrl &= ~CTRL_ASDE;
5569 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
5570 		if (ife->ifm_media & IFM_FDX)
5571 			sc->sc_ctrl |= CTRL_FD;
5572 		switch (IFM_SUBTYPE(ife->ifm_media)) {
5573 		case IFM_10_T:
5574 			sc->sc_ctrl |= CTRL_SPEED_10;
5575 			break;
5576 		case IFM_100_TX:
5577 			sc->sc_ctrl |= CTRL_SPEED_100;
5578 			break;
5579 		case IFM_1000_T:
5580 			sc->sc_ctrl |= CTRL_SPEED_1000;
5581 			break;
5582 		default:
5583 			panic("wm_gmii_mediachange: bad media 0x%x",
5584 			    ife->ifm_media);
5585 		}
5586 	}
5587 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5588 	if (sc->sc_type <= WM_T_82543)
5589 		wm_gmii_reset(sc);
5590 
5591 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
5592 		return 0;
5593 	return rc;
5594 }
5595 
5596 #define	MDI_IO		CTRL_SWDPIN(2)
5597 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
5598 #define	MDI_CLK		CTRL_SWDPIN(3)
5599 
5600 static void
5601 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
5602 {
5603 	uint32_t i, v;
5604 
5605 	v = CSR_READ(sc, WMREG_CTRL);
5606 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
5607 	v |= MDI_DIR | CTRL_SWDPIO(3);
5608 
5609 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
5610 		if (data & i)
5611 			v |= MDI_IO;
5612 		else
5613 			v &= ~MDI_IO;
5614 		CSR_WRITE(sc, WMREG_CTRL, v);
5615 		delay(10);
5616 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
5617 		delay(10);
5618 		CSR_WRITE(sc, WMREG_CTRL, v);
5619 		delay(10);
5620 	}
5621 }
5622 
5623 static uint32_t
5624 i82543_mii_recvbits(struct wm_softc *sc)
5625 {
5626 	uint32_t v, i, data = 0;
5627 
5628 	v = CSR_READ(sc, WMREG_CTRL);
5629 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
5630 	v |= CTRL_SWDPIO(3);
5631 
5632 	CSR_WRITE(sc, WMREG_CTRL, v);
5633 	delay(10);
5634 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
5635 	delay(10);
5636 	CSR_WRITE(sc, WMREG_CTRL, v);
5637 	delay(10);
5638 
5639 	for (i = 0; i < 16; i++) {
5640 		data <<= 1;
5641 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
5642 		delay(10);
5643 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
5644 			data |= 1;
5645 		CSR_WRITE(sc, WMREG_CTRL, v);
5646 		delay(10);
5647 	}
5648 
5649 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
5650 	delay(10);
5651 	CSR_WRITE(sc, WMREG_CTRL, v);
5652 	delay(10);
5653 
5654 	return data;
5655 }
5656 
5657 #undef MDI_IO
5658 #undef MDI_DIR
5659 #undef MDI_CLK
5660 
5661 /*
5662  * wm_gmii_i82543_readreg:	[mii interface function]
5663  *
5664  *	Read a PHY register on the GMII (i82543 version).
5665  */
5666 static int
5667 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
5668 {
5669 	struct wm_softc *sc = device_private(self);
5670 	int rv;
5671 
5672 	i82543_mii_sendbits(sc, 0xffffffffU, 32);
5673 	i82543_mii_sendbits(sc, reg | (phy << 5) |
5674 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
5675 	rv = i82543_mii_recvbits(sc) & 0xffff;
5676 
5677 	DPRINTF(WM_DEBUG_GMII,
5678 	    ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
5679 	    device_xname(sc->sc_dev), phy, reg, rv));
5680 
5681 	return rv;
5682 }
5683 
5684 /*
5685  * wm_gmii_i82543_writereg:	[mii interface function]
5686  *
5687  *	Write a PHY register on the GMII (i82543 version).
5688  */
5689 static void
5690 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
5691 {
5692 	struct wm_softc *sc = device_private(self);
5693 
5694 	i82543_mii_sendbits(sc, 0xffffffffU, 32);
5695 	i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
5696 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
5697 	    (MII_COMMAND_START << 30), 32);
5698 }
5699 
5700 /*
5701  * wm_gmii_i82544_readreg:	[mii interface function]
5702  *
5703  *	Read a PHY register on the GMII.
5704  */
5705 static int
5706 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
5707 {
5708 	struct wm_softc *sc = device_private(self);
5709 	uint32_t mdic = 0;
5710 	int i, rv;
5711 
5712 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
5713 	    MDIC_REGADD(reg));
5714 
5715 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
5716 		mdic = CSR_READ(sc, WMREG_MDIC);
5717 		if (mdic & MDIC_READY)
5718 			break;
5719 		delay(50);
5720 	}
5721 
5722 	if ((mdic & MDIC_READY) == 0) {
5723 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
5724 		    device_xname(sc->sc_dev), phy, reg);
5725 		rv = 0;
5726 	} else if (mdic & MDIC_E) {
5727 #if 0 /* This is normal if no PHY is present. */
5728 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
5729 		    device_xname(sc->sc_dev), phy, reg);
5730 #endif
5731 		rv = 0;
5732 	} else {
5733 		rv = MDIC_DATA(mdic);
5734 		if (rv == 0xffff)
5735 			rv = 0;
5736 	}
5737 
5738 	return rv;
5739 }
5740 
5741 /*
5742  * wm_gmii_i82544_writereg:	[mii interface function]
5743  *
5744  *	Write a PHY register on the GMII.
5745  */
5746 static void
5747 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
5748 {
5749 	struct wm_softc *sc = device_private(self);
5750 	uint32_t mdic = 0;
5751 	int i;
5752 
5753 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
5754 	    MDIC_REGADD(reg) | MDIC_DATA(val));
5755 
5756 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
5757 		mdic = CSR_READ(sc, WMREG_MDIC);
5758 		if (mdic & MDIC_READY)
5759 			break;
5760 		delay(50);
5761 	}
5762 
5763 	if ((mdic & MDIC_READY) == 0)
5764 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
5765 		    device_xname(sc->sc_dev), phy, reg);
5766 	else if (mdic & MDIC_E)
5767 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
5768 		    device_xname(sc->sc_dev), phy, reg);
5769 }
5770 
5771 /*
5772  * wm_gmii_i80003_readreg:	[mii interface function]
5773  *
5774  *	Read a PHY register on the kumeran
5775  * This could be handled by the PHY layer if we didn't have to lock the
5776  * ressource ...
5777  */
5778 static int
5779 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
5780 {
5781 	struct wm_softc *sc = device_private(self);
5782 	int sem;
5783 	int rv;
5784 
5785 	if (phy != 1) /* only one PHY on kumeran bus */
5786 		return 0;
5787 
5788 	sem = swfwphysem[sc->sc_funcid];
5789 	if (wm_get_swfw_semaphore(sc, sem)) {
5790 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5791 		    __func__);
5792 		return 0;
5793 	}
5794 
5795 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
5796 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
5797 		    reg >> GG82563_PAGE_SHIFT);
5798 	} else {
5799 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
5800 		    reg >> GG82563_PAGE_SHIFT);
5801 	}
5802 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
5803 	delay(200);
5804 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
5805 	delay(200);
5806 
5807 	wm_put_swfw_semaphore(sc, sem);
5808 	return rv;
5809 }
5810 
5811 /*
5812  * wm_gmii_i80003_writereg:	[mii interface function]
5813  *
5814  *	Write a PHY register on the kumeran.
5815  * This could be handled by the PHY layer if we didn't have to lock the
5816  * ressource ...
5817  */
5818 static void
5819 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
5820 {
5821 	struct wm_softc *sc = device_private(self);
5822 	int sem;
5823 
5824 	if (phy != 1) /* only one PHY on kumeran bus */
5825 		return;
5826 
5827 	sem = swfwphysem[sc->sc_funcid];
5828 	if (wm_get_swfw_semaphore(sc, sem)) {
5829 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5830 		    __func__);
5831 		return;
5832 	}
5833 
5834 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
5835 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
5836 		    reg >> GG82563_PAGE_SHIFT);
5837 	} else {
5838 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
5839 		    reg >> GG82563_PAGE_SHIFT);
5840 	}
5841 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
5842 	delay(200);
5843 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
5844 	delay(200);
5845 
5846 	wm_put_swfw_semaphore(sc, sem);
5847 }
5848 
5849 /*
5850  * wm_gmii_bm_readreg:	[mii interface function]
5851  *
5852  *	Read a PHY register on the kumeran
5853  * This could be handled by the PHY layer if we didn't have to lock the
5854  * ressource ...
5855  */
5856 static int
5857 wm_gmii_bm_readreg(device_t self, int phy, int reg)
5858 {
5859 	struct wm_softc *sc = device_private(self);
5860 	int sem;
5861 	int rv;
5862 
5863 	sem = swfwphysem[sc->sc_funcid];
5864 	if (wm_get_swfw_semaphore(sc, sem)) {
5865 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5866 		    __func__);
5867 		return 0;
5868 	}
5869 
5870 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
5871 		if (phy == 1)
5872 			wm_gmii_i82544_writereg(self, phy, 0x1f,
5873 			    reg);
5874 		else
5875 			wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
5876 			    reg >> GG82563_PAGE_SHIFT);
5877 
5878 	}
5879 
5880 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
5881 	wm_put_swfw_semaphore(sc, sem);
5882 	return rv;
5883 }
5884 
5885 /*
5886  * wm_gmii_bm_writereg:	[mii interface function]
5887  *
5888  *	Write a PHY register on the kumeran.
5889  * This could be handled by the PHY layer if we didn't have to lock the
5890  * ressource ...
5891  */
5892 static void
5893 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
5894 {
5895 	struct wm_softc *sc = device_private(self);
5896 	int sem;
5897 
5898 	sem = swfwphysem[sc->sc_funcid];
5899 	if (wm_get_swfw_semaphore(sc, sem)) {
5900 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5901 		    __func__);
5902 		return;
5903 	}
5904 
5905 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
5906 		if (phy == 1)
5907 			wm_gmii_i82544_writereg(self, phy, 0x1f,
5908 			    reg);
5909 		else
5910 			wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
5911 			    reg >> GG82563_PAGE_SHIFT);
5912 
5913 	}
5914 
5915 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
5916 	wm_put_swfw_semaphore(sc, sem);
5917 }
5918 
5919 static void
5920 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
5921 {
5922 	struct wm_softc *sc = device_private(self);
5923 	uint16_t regnum = BM_PHY_REG_NUM(offset);
5924 	uint16_t wuce;
5925 
5926 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
5927 	if (sc->sc_type == WM_T_PCH) {
5928 		/* XXX e1000 driver do nothing... why? */
5929 	}
5930 
5931 	/* Set page 769 */
5932 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
5933 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
5934 
5935 	wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
5936 
5937 	wuce &= ~BM_WUC_HOST_WU_BIT;
5938 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
5939 	    wuce | BM_WUC_ENABLE_BIT);
5940 
5941 	/* Select page 800 */
5942 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
5943 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
5944 
5945 	/* Write page 800 */
5946 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
5947 
5948 	if (rd)
5949 		*val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
5950 	else
5951 		wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
5952 
5953 	/* Set page 769 */
5954 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
5955 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
5956 
5957 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
5958 }
5959 
5960 /*
5961  * wm_gmii_hv_readreg:	[mii interface function]
5962  *
5963  *	Read a PHY register on the kumeran
5964  * This could be handled by the PHY layer if we didn't have to lock the
5965  * ressource ...
5966  */
5967 static int
5968 wm_gmii_hv_readreg(device_t self, int phy, int reg)
5969 {
5970 	struct wm_softc *sc = device_private(self);
5971 	uint16_t page = BM_PHY_REG_PAGE(reg);
5972 	uint16_t regnum = BM_PHY_REG_NUM(reg);
5973 	uint16_t val;
5974 	int rv;
5975 
5976 	if (wm_get_swfw_semaphore(sc, SWFW_PHY0_SM)) {
5977 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5978 		    __func__);
5979 		return 0;
5980 	}
5981 
5982 	/* XXX Workaround failure in MDIO access while cable is disconnected */
5983 	if (sc->sc_phytype == WMPHY_82577) {
5984 		/* XXX must write */
5985 	}
5986 
5987 	/* Page 800 works differently than the rest so it has its own func */
5988 	if (page == BM_WUC_PAGE) {
5989 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
5990 		return val;
5991 	}
5992 
5993 	/*
5994 	 * Lower than page 768 works differently than the rest so it has its
5995 	 * own func
5996 	 */
5997 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
5998 		printf("gmii_hv_readreg!!!\n");
5999 		return 0;
6000 	}
6001 
6002 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
6003 		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6004 		    page << BME1000_PAGE_SHIFT);
6005 	}
6006 
6007 	rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
6008 	wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
6009 	return rv;
6010 }
6011 
6012 /*
6013  * wm_gmii_hv_writereg:	[mii interface function]
6014  *
6015  *	Write a PHY register on the kumeran.
6016  * This could be handled by the PHY layer if we didn't have to lock the
6017  * ressource ...
6018  */
6019 static void
6020 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
6021 {
6022 	struct wm_softc *sc = device_private(self);
6023 	uint16_t page = BM_PHY_REG_PAGE(reg);
6024 	uint16_t regnum = BM_PHY_REG_NUM(reg);
6025 
6026 	if (wm_get_swfw_semaphore(sc, SWFW_PHY0_SM)) {
6027 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6028 		    __func__);
6029 		return;
6030 	}
6031 
6032 	/* XXX Workaround failure in MDIO access while cable is disconnected */
6033 
6034 	/* Page 800 works differently than the rest so it has its own func */
6035 	if (page == BM_WUC_PAGE) {
6036 		uint16_t tmp;
6037 
6038 		tmp = val;
6039 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
6040 		return;
6041 	}
6042 
6043 	/*
6044 	 * Lower than page 768 works differently than the rest so it has its
6045 	 * own func
6046 	 */
6047 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
6048 		printf("gmii_hv_writereg!!!\n");
6049 		return;
6050 	}
6051 
6052 	/*
6053 	 * XXX Workaround MDIO accesses being disabled after entering IEEE
6054 	 * Power Down (whenever bit 11 of the PHY control register is set)
6055 	 */
6056 
6057 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
6058 		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6059 		    page << BME1000_PAGE_SHIFT);
6060 	}
6061 
6062 	wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
6063 	wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
6064 }
6065 
6066 /*
6067  * wm_gmii_hv_readreg:	[mii interface function]
6068  *
6069  *	Read a PHY register on the kumeran
6070  * This could be handled by the PHY layer if we didn't have to lock the
6071  * ressource ...
6072  */
6073 static int
6074 wm_sgmii_readreg(device_t self, int phy, int reg)
6075 {
6076 	struct wm_softc *sc = device_private(self);
6077 	uint32_t i2ccmd;
6078 	int i, rv;
6079 
6080 	if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
6081 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6082 		    __func__);
6083 		return 0;
6084 	}
6085 
6086 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
6087 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
6088 	    | I2CCMD_OPCODE_READ;
6089 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
6090 
6091 	/* Poll the ready bit */
6092 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
6093 		delay(50);
6094 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
6095 		if (i2ccmd & I2CCMD_READY)
6096 			break;
6097 	}
6098 	if ((i2ccmd & I2CCMD_READY) == 0)
6099 		aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
6100 	if ((i2ccmd & I2CCMD_ERROR) != 0)
6101 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
6102 
6103 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
6104 
6105 	wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6106 	return rv;
6107 }
6108 
6109 /*
6110  * wm_gmii_hv_writereg:	[mii interface function]
6111  *
6112  *	Write a PHY register on the kumeran.
6113  * This could be handled by the PHY layer if we didn't have to lock the
6114  * ressource ...
6115  */
6116 static void
6117 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
6118 {
6119 	struct wm_softc *sc = device_private(self);
6120 	uint32_t i2ccmd;
6121 	int i;
6122 
6123 	if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
6124 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6125 		    __func__);
6126 		return;
6127 	}
6128 
6129 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
6130 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
6131 	    | I2CCMD_OPCODE_WRITE;
6132 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
6133 
6134 	/* Poll the ready bit */
6135 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
6136 		delay(50);
6137 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
6138 		if (i2ccmd & I2CCMD_READY)
6139 			break;
6140 	}
6141 	if ((i2ccmd & I2CCMD_READY) == 0)
6142 		aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
6143 	if ((i2ccmd & I2CCMD_ERROR) != 0)
6144 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
6145 
6146 	wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
6147 }
6148 
6149 /*
6150  * wm_gmii_statchg:	[mii interface function]
6151  *
6152  *	Callback from MII layer when media changes.
6153  */
6154 static void
6155 wm_gmii_statchg(device_t self)
6156 {
6157 	struct wm_softc *sc = device_private(self);
6158 	struct mii_data *mii = &sc->sc_mii;
6159 
6160 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
6161 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
6162 	sc->sc_fcrtl &= ~FCRTL_XONE;
6163 
6164 	/*
6165 	 * Get flow control negotiation result.
6166 	 */
6167 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
6168 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
6169 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
6170 		mii->mii_media_active &= ~IFM_ETH_FMASK;
6171 	}
6172 
6173 	if (sc->sc_flowflags & IFM_FLOW) {
6174 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
6175 			sc->sc_ctrl |= CTRL_TFCE;
6176 			sc->sc_fcrtl |= FCRTL_XONE;
6177 		}
6178 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
6179 			sc->sc_ctrl |= CTRL_RFCE;
6180 	}
6181 
6182 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
6183 		DPRINTF(WM_DEBUG_LINK,
6184 		    ("%s: LINK: statchg: FDX\n", device_xname(sc->sc_dev)));
6185 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
6186 	} else {
6187 		DPRINTF(WM_DEBUG_LINK,
6188 		    ("%s: LINK: statchg: HDX\n", device_xname(sc->sc_dev)));
6189 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
6190 	}
6191 
6192 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6193 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
6194 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
6195 						 : WMREG_FCRTL, sc->sc_fcrtl);
6196 	if (sc->sc_type == WM_T_80003) {
6197 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
6198 		case IFM_1000_T:
6199 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
6200 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
6201 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
6202 			break;
6203 		default:
6204 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
6205 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
6206 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
6207 			break;
6208 		}
6209 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
6210 	}
6211 }
6212 
6213 /*
6214  * wm_kmrn_readreg:
6215  *
6216  *	Read a kumeran register
6217  */
6218 static int
6219 wm_kmrn_readreg(struct wm_softc *sc, int reg)
6220 {
6221 	int rv;
6222 
6223 	if (sc->sc_flags == WM_F_SWFW_SYNC) {
6224 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
6225 			aprint_error_dev(sc->sc_dev,
6226 			    "%s: failed to get semaphore\n", __func__);
6227 			return 0;
6228 		}
6229 	} else 	if (sc->sc_flags == WM_F_SWFWHW_SYNC) {
6230 		if (wm_get_swfwhw_semaphore(sc)) {
6231 			aprint_error_dev(sc->sc_dev,
6232 			    "%s: failed to get semaphore\n", __func__);
6233 			return 0;
6234 		}
6235 	}
6236 
6237 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
6238 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
6239 	    KUMCTRLSTA_REN);
6240 	delay(2);
6241 
6242 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
6243 
6244 	if (sc->sc_flags == WM_F_SWFW_SYNC)
6245 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
6246 	else if (sc->sc_flags == WM_F_SWFWHW_SYNC)
6247 		wm_put_swfwhw_semaphore(sc);
6248 
6249 	return rv;
6250 }
6251 
6252 /*
6253  * wm_kmrn_writereg:
6254  *
6255  *	Write a kumeran register
6256  */
6257 static void
6258 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
6259 {
6260 
6261 	if (sc->sc_flags == WM_F_SWFW_SYNC) {
6262 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
6263 			aprint_error_dev(sc->sc_dev,
6264 			    "%s: failed to get semaphore\n", __func__);
6265 			return;
6266 		}
6267 	} else 	if (sc->sc_flags == WM_F_SWFWHW_SYNC) {
6268 		if (wm_get_swfwhw_semaphore(sc)) {
6269 			aprint_error_dev(sc->sc_dev,
6270 			    "%s: failed to get semaphore\n", __func__);
6271 			return;
6272 		}
6273 	}
6274 
6275 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
6276 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
6277 	    (val & KUMCTRLSTA_MASK));
6278 
6279 	if (sc->sc_flags == WM_F_SWFW_SYNC)
6280 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
6281 	else if (sc->sc_flags == WM_F_SWFWHW_SYNC)
6282 		wm_put_swfwhw_semaphore(sc);
6283 }
6284 
6285 static int
6286 wm_is_onboard_nvm_eeprom(struct wm_softc *sc)
6287 {
6288 	uint32_t eecd = 0;
6289 
6290 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
6291 	    || sc->sc_type == WM_T_82583) {
6292 		eecd = CSR_READ(sc, WMREG_EECD);
6293 
6294 		/* Isolate bits 15 & 16 */
6295 		eecd = ((eecd >> 15) & 0x03);
6296 
6297 		/* If both bits are set, device is Flash type */
6298 		if (eecd == 0x03)
6299 			return 0;
6300 	}
6301 	return 1;
6302 }
6303 
6304 static int
6305 wm_get_swsm_semaphore(struct wm_softc *sc)
6306 {
6307 	int32_t timeout;
6308 	uint32_t swsm;
6309 
6310 	/* Get the FW semaphore. */
6311 	timeout = 1000 + 1; /* XXX */
6312 	while (timeout) {
6313 		swsm = CSR_READ(sc, WMREG_SWSM);
6314 		swsm |= SWSM_SWESMBI;
6315 		CSR_WRITE(sc, WMREG_SWSM, swsm);
6316 		/* if we managed to set the bit we got the semaphore. */
6317 		swsm = CSR_READ(sc, WMREG_SWSM);
6318 		if (swsm & SWSM_SWESMBI)
6319 			break;
6320 
6321 		delay(50);
6322 		timeout--;
6323 	}
6324 
6325 	if (timeout == 0) {
6326 		aprint_error_dev(sc->sc_dev, "could not acquire EEPROM GNT\n");
6327 		/* Release semaphores */
6328 		wm_put_swsm_semaphore(sc);
6329 		return 1;
6330 	}
6331 	return 0;
6332 }
6333 
6334 static void
6335 wm_put_swsm_semaphore(struct wm_softc *sc)
6336 {
6337 	uint32_t swsm;
6338 
6339 	swsm = CSR_READ(sc, WMREG_SWSM);
6340 	swsm &= ~(SWSM_SWESMBI);
6341 	CSR_WRITE(sc, WMREG_SWSM, swsm);
6342 }
6343 
6344 static int
6345 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
6346 {
6347 	uint32_t swfw_sync;
6348 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
6349 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
6350 	int timeout = 200;
6351 
6352 	for (timeout = 0; timeout < 200; timeout++) {
6353 		if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
6354 			if (wm_get_swsm_semaphore(sc)) {
6355 				aprint_error_dev(sc->sc_dev,
6356 				    "%s: failed to get semaphore\n",
6357 				    __func__);
6358 				return 1;
6359 			}
6360 		}
6361 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
6362 		if ((swfw_sync & (swmask | fwmask)) == 0) {
6363 			swfw_sync |= swmask;
6364 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
6365 			if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
6366 				wm_put_swsm_semaphore(sc);
6367 			return 0;
6368 		}
6369 		if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
6370 			wm_put_swsm_semaphore(sc);
6371 		delay(5000);
6372 	}
6373 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
6374 	    device_xname(sc->sc_dev), mask, swfw_sync);
6375 	return 1;
6376 }
6377 
6378 static void
6379 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
6380 {
6381 	uint32_t swfw_sync;
6382 
6383 	if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
6384 		while (wm_get_swsm_semaphore(sc) != 0)
6385 			continue;
6386 	}
6387 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
6388 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
6389 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
6390 	if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
6391 		wm_put_swsm_semaphore(sc);
6392 }
6393 
6394 static int
6395 wm_get_swfwhw_semaphore(struct wm_softc *sc)
6396 {
6397 	uint32_t ext_ctrl;
6398 	int timeout = 200;
6399 
6400 	for (timeout = 0; timeout < 200; timeout++) {
6401 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
6402 		ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
6403 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
6404 
6405 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
6406 		if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
6407 			return 0;
6408 		delay(5000);
6409 	}
6410 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
6411 	    device_xname(sc->sc_dev), ext_ctrl);
6412 	return 1;
6413 }
6414 
6415 static void
6416 wm_put_swfwhw_semaphore(struct wm_softc *sc)
6417 {
6418 	uint32_t ext_ctrl;
6419 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
6420 	ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
6421 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
6422 }
6423 
6424 static int
6425 wm_valid_nvm_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
6426 {
6427 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
6428 	uint8_t bank_high_byte;
6429 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
6430 
6431 	if ((sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)) {
6432 		/* Value of bit 22 corresponds to the flash bank we're on. */
6433 		*bank = (CSR_READ(sc, WMREG_EECD) & EECD_SEC1VAL) ? 1 : 0;
6434 	} else {
6435 		wm_read_ich8_byte(sc, act_offset, &bank_high_byte);
6436 		if ((bank_high_byte & 0xc0) == 0x80)
6437 			*bank = 0;
6438 		else {
6439 			wm_read_ich8_byte(sc, act_offset + bank1_offset,
6440 			    &bank_high_byte);
6441 			if ((bank_high_byte & 0xc0) == 0x80)
6442 				*bank = 1;
6443 			else {
6444 				aprint_error_dev(sc->sc_dev,
6445 				    "EEPROM not present\n");
6446 				return -1;
6447 			}
6448 		}
6449 	}
6450 
6451 	return 0;
6452 }
6453 
6454 /******************************************************************************
6455  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
6456  * register.
6457  *
6458  * sc - Struct containing variables accessed by shared code
6459  * offset - offset of word in the EEPROM to read
6460  * data - word read from the EEPROM
6461  * words - number of words to read
6462  *****************************************************************************/
6463 static int
6464 wm_read_eeprom_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
6465 {
6466 	int32_t  error = 0;
6467 	uint32_t flash_bank = 0;
6468 	uint32_t act_offset = 0;
6469 	uint32_t bank_offset = 0;
6470 	uint16_t word = 0;
6471 	uint16_t i = 0;
6472 
6473 	/* We need to know which is the valid flash bank.  In the event
6474 	 * that we didn't allocate eeprom_shadow_ram, we may not be
6475 	 * managing flash_bank.  So it cannot be trusted and needs
6476 	 * to be updated with each read.
6477 	 */
6478 	error = wm_valid_nvm_bank_detect_ich8lan(sc, &flash_bank);
6479 	if (error) {
6480 		aprint_error_dev(sc->sc_dev, "%s: failed to detect NVM bank\n",
6481 		    __func__);
6482 		return error;
6483 	}
6484 
6485 	/* Adjust offset appropriately if we're on bank 1 - adjust for word size */
6486 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
6487 
6488 	error = wm_get_swfwhw_semaphore(sc);
6489 	if (error) {
6490 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6491 		    __func__);
6492 		return error;
6493 	}
6494 
6495 	for (i = 0; i < words; i++) {
6496 		/* The NVM part needs a byte offset, hence * 2 */
6497 		act_offset = bank_offset + ((offset + i) * 2);
6498 		error = wm_read_ich8_word(sc, act_offset, &word);
6499 		if (error) {
6500 			aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
6501 			    __func__);
6502 			break;
6503 		}
6504 		data[i] = word;
6505 	}
6506 
6507 	wm_put_swfwhw_semaphore(sc);
6508 	return error;
6509 }
6510 
6511 /******************************************************************************
6512  * This function does initial flash setup so that a new read/write/erase cycle
6513  * can be started.
6514  *
6515  * sc - The pointer to the hw structure
6516  ****************************************************************************/
6517 static int32_t
6518 wm_ich8_cycle_init(struct wm_softc *sc)
6519 {
6520 	uint16_t hsfsts;
6521 	int32_t error = 1;
6522 	int32_t i     = 0;
6523 
6524 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
6525 
6526 	/* May be check the Flash Des Valid bit in Hw status */
6527 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
6528 		return error;
6529 	}
6530 
6531 	/* Clear FCERR in Hw status by writing 1 */
6532 	/* Clear DAEL in Hw status by writing a 1 */
6533 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
6534 
6535 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
6536 
6537 	/*
6538 	 * Either we should have a hardware SPI cycle in progress bit to check
6539 	 * against, in order to start a new cycle or FDONE bit should be
6540 	 * changed in the hardware so that it is 1 after harware reset, which
6541 	 * can then be used as an indication whether a cycle is in progress or
6542 	 * has been completed .. we should also have some software semaphore me
6543 	 * chanism to guard FDONE or the cycle in progress bit so that two
6544 	 * threads access to those bits can be sequentiallized or a way so that
6545 	 * 2 threads dont start the cycle at the same time
6546 	 */
6547 
6548 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
6549 		/*
6550 		 * There is no cycle running at present, so we can start a
6551 		 * cycle
6552 		 */
6553 
6554 		/* Begin by setting Flash Cycle Done. */
6555 		hsfsts |= HSFSTS_DONE;
6556 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
6557 		error = 0;
6558 	} else {
6559 		/*
6560 		 * otherwise poll for sometime so the current cycle has a
6561 		 * chance to end before giving up.
6562 		 */
6563 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
6564 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
6565 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
6566 				error = 0;
6567 				break;
6568 			}
6569 			delay(1);
6570 		}
6571 		if (error == 0) {
6572 			/*
6573 			 * Successful in waiting for previous cycle to timeout,
6574 			 * now set the Flash Cycle Done.
6575 			 */
6576 			hsfsts |= HSFSTS_DONE;
6577 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
6578 		}
6579 	}
6580 	return error;
6581 }
6582 
6583 /******************************************************************************
6584  * This function starts a flash cycle and waits for its completion
6585  *
6586  * sc - The pointer to the hw structure
6587  ****************************************************************************/
6588 static int32_t
6589 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
6590 {
6591 	uint16_t hsflctl;
6592 	uint16_t hsfsts;
6593 	int32_t error = 1;
6594 	uint32_t i = 0;
6595 
6596 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
6597 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
6598 	hsflctl |= HSFCTL_GO;
6599 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
6600 
6601 	/* wait till FDONE bit is set to 1 */
6602 	do {
6603 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
6604 		if (hsfsts & HSFSTS_DONE)
6605 			break;
6606 		delay(1);
6607 		i++;
6608 	} while (i < timeout);
6609 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
6610 		error = 0;
6611 
6612 	return error;
6613 }
6614 
6615 /******************************************************************************
6616  * Reads a byte or word from the NVM using the ICH8 flash access registers.
6617  *
6618  * sc - The pointer to the hw structure
6619  * index - The index of the byte or word to read.
6620  * size - Size of data to read, 1=byte 2=word
6621  * data - Pointer to the word to store the value read.
6622  *****************************************************************************/
6623 static int32_t
6624 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
6625     uint32_t size, uint16_t* data)
6626 {
6627 	uint16_t hsfsts;
6628 	uint16_t hsflctl;
6629 	uint32_t flash_linear_address;
6630 	uint32_t flash_data = 0;
6631 	int32_t error = 1;
6632 	int32_t count = 0;
6633 
6634 	if (size < 1  || size > 2 || data == 0x0 ||
6635 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
6636 		return error;
6637 
6638 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
6639 	    sc->sc_ich8_flash_base;
6640 
6641 	do {
6642 		delay(1);
6643 		/* Steps */
6644 		error = wm_ich8_cycle_init(sc);
6645 		if (error)
6646 			break;
6647 
6648 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
6649 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
6650 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
6651 		    & HSFCTL_BCOUNT_MASK;
6652 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
6653 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
6654 
6655 		/*
6656 		 * Write the last 24 bits of index into Flash Linear address
6657 		 * field in Flash Address
6658 		 */
6659 		/* TODO: TBD maybe check the index against the size of flash */
6660 
6661 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
6662 
6663 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
6664 
6665 		/*
6666 		 * Check if FCERR is set to 1, if set to 1, clear it and try
6667 		 * the whole sequence a few more times, else read in (shift in)
6668 		 * the Flash Data0, the order is least significant byte first
6669 		 * msb to lsb
6670 		 */
6671 		if (error == 0) {
6672 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
6673 			if (size == 1)
6674 				*data = (uint8_t)(flash_data & 0x000000FF);
6675 			else if (size == 2)
6676 				*data = (uint16_t)(flash_data & 0x0000FFFF);
6677 			break;
6678 		} else {
6679 			/*
6680 			 * If we've gotten here, then things are probably
6681 			 * completely hosed, but if the error condition is
6682 			 * detected, it won't hurt to give it another try...
6683 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
6684 			 */
6685 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
6686 			if (hsfsts & HSFSTS_ERR) {
6687 				/* Repeat for some time before giving up. */
6688 				continue;
6689 			} else if ((hsfsts & HSFSTS_DONE) == 0)
6690 				break;
6691 		}
6692 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
6693 
6694 	return error;
6695 }
6696 
6697 /******************************************************************************
6698  * Reads a single byte from the NVM using the ICH8 flash access registers.
6699  *
6700  * sc - pointer to wm_hw structure
6701  * index - The index of the byte to read.
6702  * data - Pointer to a byte to store the value read.
6703  *****************************************************************************/
6704 static int32_t
6705 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
6706 {
6707 	int32_t status;
6708 	uint16_t word = 0;
6709 
6710 	status = wm_read_ich8_data(sc, index, 1, &word);
6711 	if (status == 0)
6712 		*data = (uint8_t)word;
6713 
6714 	return status;
6715 }
6716 
6717 /******************************************************************************
6718  * Reads a word from the NVM using the ICH8 flash access registers.
6719  *
6720  * sc - pointer to wm_hw structure
6721  * index - The starting byte index of the word to read.
6722  * data - Pointer to a word to store the value read.
6723  *****************************************************************************/
6724 static int32_t
6725 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
6726 {
6727 	int32_t status;
6728 
6729 	status = wm_read_ich8_data(sc, index, 2, data);
6730 	return status;
6731 }
6732 
6733 static int
6734 wm_check_mng_mode(struct wm_softc *sc)
6735 {
6736 	int rv;
6737 
6738 	switch (sc->sc_type) {
6739 	case WM_T_ICH8:
6740 	case WM_T_ICH9:
6741 	case WM_T_ICH10:
6742 	case WM_T_PCH:
6743 		rv = wm_check_mng_mode_ich8lan(sc);
6744 		break;
6745 	case WM_T_82574:
6746 	case WM_T_82583:
6747 		rv = wm_check_mng_mode_82574(sc);
6748 		break;
6749 	case WM_T_82571:
6750 	case WM_T_82572:
6751 	case WM_T_82573:
6752 	case WM_T_80003:
6753 		rv = wm_check_mng_mode_generic(sc);
6754 		break;
6755 	default:
6756 		/* noting to do */
6757 		rv = 0;
6758 		break;
6759 	}
6760 
6761 	return rv;
6762 }
6763 
6764 static int
6765 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
6766 {
6767 	uint32_t fwsm;
6768 
6769 	fwsm = CSR_READ(sc, WMREG_FWSM);
6770 
6771 	if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
6772 		return 1;
6773 
6774 	return 0;
6775 }
6776 
6777 static int
6778 wm_check_mng_mode_82574(struct wm_softc *sc)
6779 {
6780 	uint16_t data;
6781 
6782 	wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &data);
6783 
6784 	if ((data & EEPROM_CFG2_MNGM_MASK) != 0)
6785 		return 1;
6786 
6787 	return 0;
6788 }
6789 
6790 static int
6791 wm_check_mng_mode_generic(struct wm_softc *sc)
6792 {
6793 	uint32_t fwsm;
6794 
6795 	fwsm = CSR_READ(sc, WMREG_FWSM);
6796 
6797 	if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
6798 		return 1;
6799 
6800 	return 0;
6801 }
6802 
6803 static int
6804 wm_enable_mng_pass_thru(struct wm_softc *sc)
6805 {
6806 	uint32_t manc, fwsm, factps;
6807 
6808 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
6809 		return 0;
6810 
6811 	manc = CSR_READ(sc, WMREG_MANC);
6812 
6813 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
6814 		device_xname(sc->sc_dev), manc));
6815 	if (((manc & MANC_RECV_TCO_EN) == 0)
6816 	    || ((manc & MANC_EN_MAC_ADDR_FILTER) == 0))
6817 		return 0;
6818 
6819 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
6820 		fwsm = CSR_READ(sc, WMREG_FWSM);
6821 		factps = CSR_READ(sc, WMREG_FACTPS);
6822 		if (((factps & FACTPS_MNGCG) == 0)
6823 		    && ((fwsm & FWSM_MODE_MASK)
6824 			== (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT)))
6825 			return 1;
6826 	} else if (((manc & MANC_SMBUS_EN) != 0)
6827 	    && ((manc & MANC_ASF_EN) == 0))
6828 		return 1;
6829 
6830 	return 0;
6831 }
6832 
6833 static int
6834 wm_check_reset_block(struct wm_softc *sc)
6835 {
6836 	uint32_t reg;
6837 
6838 	switch (sc->sc_type) {
6839 	case WM_T_ICH8:
6840 	case WM_T_ICH9:
6841 	case WM_T_ICH10:
6842 	case WM_T_PCH:
6843 		reg = CSR_READ(sc, WMREG_FWSM);
6844 		if ((reg & FWSM_RSPCIPHY) != 0)
6845 			return 0;
6846 		else
6847 			return -1;
6848 		break;
6849 	case WM_T_82571:
6850 	case WM_T_82572:
6851 	case WM_T_82573:
6852 	case WM_T_82574:
6853 	case WM_T_82583:
6854 	case WM_T_80003:
6855 		reg = CSR_READ(sc, WMREG_MANC);
6856 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
6857 			return -1;
6858 		else
6859 			return 0;
6860 		break;
6861 	default:
6862 		/* no problem */
6863 		break;
6864 	}
6865 
6866 	return 0;
6867 }
6868 
6869 static void
6870 wm_get_hw_control(struct wm_softc *sc)
6871 {
6872 	uint32_t reg;
6873 
6874 	switch (sc->sc_type) {
6875 	case WM_T_82573:
6876 		reg = CSR_READ(sc, WMREG_SWSM);
6877 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
6878 		break;
6879 	case WM_T_82571:
6880 	case WM_T_82572:
6881 	case WM_T_82574:
6882 	case WM_T_82583:
6883 	case WM_T_80003:
6884 	case WM_T_ICH8:
6885 	case WM_T_ICH9:
6886 	case WM_T_ICH10:
6887 	case WM_T_PCH:
6888 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
6889 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
6890 		break;
6891 	default:
6892 		break;
6893 	}
6894 }
6895 
6896 static void
6897 wm_release_hw_control(struct wm_softc *sc)
6898 {
6899 	uint32_t reg;
6900 
6901 	if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
6902 		return;
6903 
6904 	if (sc->sc_type == WM_T_82573) {
6905 		reg = CSR_READ(sc, WMREG_SWSM);
6906 		reg &= ~SWSM_DRV_LOAD;
6907 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
6908 	} else {
6909 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
6910 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
6911 	}
6912 }
6913 
6914 /* XXX Currently TBI only */
6915 static int
6916 wm_check_for_link(struct wm_softc *sc)
6917 {
6918 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
6919 	uint32_t rxcw;
6920 	uint32_t ctrl;
6921 	uint32_t status;
6922 	uint32_t sig;
6923 
6924 	rxcw = CSR_READ(sc, WMREG_RXCW);
6925 	ctrl = CSR_READ(sc, WMREG_CTRL);
6926 	status = CSR_READ(sc, WMREG_STATUS);
6927 
6928 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
6929 
6930 	DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
6931 		device_xname(sc->sc_dev), __func__,
6932 		((ctrl & CTRL_SWDPIN(1)) == sig),
6933 		((status & STATUS_LU) != 0),
6934 		((rxcw & RXCW_C) != 0)
6935 		    ));
6936 
6937 	/*
6938 	 * SWDPIN   LU RXCW
6939 	 *      0    0    0
6940 	 *      0    0    1	(should not happen)
6941 	 *      0    1    0	(should not happen)
6942 	 *      0    1    1	(should not happen)
6943 	 *      1    0    0	Disable autonego and force linkup
6944 	 *      1    0    1	got /C/ but not linkup yet
6945 	 *      1    1    0	(linkup)
6946 	 *      1    1    1	If IFM_AUTO, back to autonego
6947 	 *
6948 	 */
6949 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
6950 	    && ((status & STATUS_LU) == 0)
6951 	    && ((rxcw & RXCW_C) == 0)) {
6952 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
6953 			__func__));
6954 		sc->sc_tbi_linkup = 0;
6955 		/* Disable auto-negotiation in the TXCW register */
6956 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
6957 
6958 		/*
6959 		 * Force link-up and also force full-duplex.
6960 		 *
6961 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
6962 		 * so we should update sc->sc_ctrl
6963 		 */
6964 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
6965 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6966 	} else if (((status & STATUS_LU) != 0)
6967 	    && ((rxcw & RXCW_C) != 0)
6968 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
6969 		sc->sc_tbi_linkup = 1;
6970 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
6971 			__func__));
6972 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
6973 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
6974 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
6975 	    && ((rxcw & RXCW_C) != 0)) {
6976 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
6977 	} else {
6978 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
6979 			status));
6980 	}
6981 
6982 	return 0;
6983 }
6984 
6985 /* Work-around for 82566 Kumeran PCS lock loss */
6986 static void
6987 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
6988 {
6989 	int miistatus, active, i;
6990 	int reg;
6991 
6992 	miistatus = sc->sc_mii.mii_media_status;
6993 
6994 	/* If the link is not up, do nothing */
6995 	if ((miistatus & IFM_ACTIVE) != 0)
6996 		return;
6997 
6998 	active = sc->sc_mii.mii_media_active;
6999 
7000 	/* Nothing to do if the link is other than 1Gbps */
7001 	if (IFM_SUBTYPE(active) != IFM_1000_T)
7002 		return;
7003 
7004 	for (i = 0; i < 10; i++) {
7005 		/* read twice */
7006 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
7007 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
7008 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) != 0)
7009 			goto out;	/* GOOD! */
7010 
7011 		/* Reset the PHY */
7012 		wm_gmii_reset(sc);
7013 		delay(5*1000);
7014 	}
7015 
7016 	/* Disable GigE link negotiation */
7017 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
7018 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
7019 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
7020 
7021 	/*
7022 	 * Call gig speed drop workaround on Gig disable before accessing
7023 	 * any PHY registers.
7024 	 */
7025 	wm_gig_downshift_workaround_ich8lan(sc);
7026 
7027 out:
7028 	return;
7029 }
7030 
7031 /* WOL from S5 stops working */
7032 static void
7033 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
7034 {
7035 	uint16_t kmrn_reg;
7036 
7037 	/* Only for igp3 */
7038 	if (sc->sc_phytype == WMPHY_IGP_3) {
7039 		kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
7040 		kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
7041 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
7042 		kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
7043 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
7044 	}
7045 }
7046 
7047 #ifdef WM_WOL
7048 /* Power down workaround on D3 */
7049 static void
7050 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
7051 {
7052 	uint32_t reg;
7053 	int i;
7054 
7055 	for (i = 0; i < 2; i++) {
7056 		/* Disable link */
7057 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
7058 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
7059 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
7060 
7061 		/*
7062 		 * Call gig speed drop workaround on Gig disable before
7063 		 * accessing any PHY registers
7064 		 */
7065 		if (sc->sc_type == WM_T_ICH8)
7066 			wm_gig_downshift_workaround_ich8lan(sc);
7067 
7068 		/* Write VR power-down enable */
7069 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
7070 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
7071 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
7072 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
7073 
7074 		/* Read it back and test */
7075 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
7076 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
7077 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
7078 			break;
7079 
7080 		/* Issue PHY reset and repeat at most one more time */
7081 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
7082 	}
7083 }
7084 #endif /* WM_WOL */
7085 
7086 /*
7087  * Workaround for pch's PHYs
7088  * XXX should be moved to new PHY driver?
7089  */
7090 static void
7091 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
7092 {
7093 
7094 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
7095 
7096 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
7097 
7098 	/* 82578 */
7099 	if (sc->sc_phytype == WMPHY_82578) {
7100 		/* PCH rev. < 3 */
7101 		if (sc->sc_rev < 3) {
7102 			/* XXX 6 bit shift? Why? Is it page2? */
7103 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
7104 			    0x66c0);
7105 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
7106 			    0xffff);
7107 		}
7108 
7109 		/* XXX phy rev. < 2 */
7110 	}
7111 
7112 	/* Select page 0 */
7113 
7114 	/* XXX acquire semaphore */
7115 	wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
7116 	/* XXX release semaphore */
7117 
7118 	/*
7119 	 * Configure the K1 Si workaround during phy reset assuming there is
7120 	 * link so that it disables K1 if link is in 1Gbps.
7121 	 */
7122 	wm_k1_gig_workaround_hv(sc, 1);
7123 }
7124 
7125 static void
7126 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
7127 {
7128 	int k1_enable = sc->sc_nvm_k1_enabled;
7129 
7130 	/* XXX acquire semaphore */
7131 
7132 	if (link) {
7133 		k1_enable = 0;
7134 
7135 		/* Link stall fix for link up */
7136 		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
7137 	} else {
7138 		/* Link stall fix for link down */
7139 		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
7140 	}
7141 
7142 	wm_configure_k1_ich8lan(sc, k1_enable);
7143 
7144 	/* XXX release semaphore */
7145 }
7146 
7147 static void
7148 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
7149 {
7150 	uint32_t ctrl, ctrl_ext, tmp;
7151 	uint16_t kmrn_reg;
7152 
7153 	kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
7154 
7155 	if (k1_enable)
7156 		kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
7157 	else
7158 		kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
7159 
7160 	wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
7161 
7162 	delay(20);
7163 
7164 	ctrl = CSR_READ(sc, WMREG_CTRL);
7165 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
7166 
7167 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
7168 	tmp |= CTRL_FRCSPD;
7169 
7170 	CSR_WRITE(sc, WMREG_CTRL, tmp);
7171 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
7172 	delay(20);
7173 
7174 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
7175 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
7176 	delay(20);
7177 }
7178 
7179 static void
7180 wm_set_pcie_completion_timeout(struct wm_softc *sc)
7181 {
7182 	uint32_t gcr;
7183 	pcireg_t ctrl2;
7184 
7185 	gcr = CSR_READ(sc, WMREG_GCR);
7186 
7187 	/* Only take action if timeout value is defaulted to 0 */
7188 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
7189 		goto out;
7190 
7191 	if ((gcr & GCR_CAP_VER2) == 0) {
7192 		gcr |= GCR_CMPL_TMOUT_10MS;
7193 		goto out;
7194 	}
7195 
7196 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
7197 	    sc->sc_pcixe_capoff + PCI_PCIE_DCSR2);
7198 	ctrl2 |= WM_PCI_PCIE_DCSR2_16MS;
7199 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
7200 	    sc->sc_pcixe_capoff + PCI_PCIE_DCSR2, ctrl2);
7201 
7202 out:
7203 	/* Disable completion timeout resend */
7204 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
7205 
7206 	CSR_WRITE(sc, WMREG_GCR, gcr);
7207 }
7208 
7209 /* special case - for 82575 - need to do manual init ... */
7210 static void
7211 wm_reset_init_script_82575(struct wm_softc *sc)
7212 {
7213 	/*
7214 	 * remark: this is untested code - we have no board without EEPROM
7215 	 *  same setup as mentioned int the freeBSD driver for the i82575
7216 	 */
7217 
7218 	/* SerDes configuration via SERDESCTRL */
7219 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
7220 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
7221 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
7222 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
7223 
7224 	/* CCM configuration via CCMCTL register */
7225 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
7226 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
7227 
7228 	/* PCIe lanes configuration */
7229 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
7230 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
7231 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
7232 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
7233 
7234 	/* PCIe PLL Configuration */
7235 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
7236 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
7237 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
7238 }
7239 
7240 static void
7241 wm_init_manageability(struct wm_softc *sc)
7242 {
7243 
7244 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
7245 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
7246 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
7247 
7248 		/* disabl hardware interception of ARP */
7249 		manc &= ~MANC_ARP_EN;
7250 
7251 		/* enable receiving management packets to the host */
7252 		if (sc->sc_type >= WM_T_82571) {
7253 			manc |= MANC_EN_MNG2HOST;
7254 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
7255 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
7256 
7257 		}
7258 
7259 		CSR_WRITE(sc, WMREG_MANC, manc);
7260 	}
7261 }
7262 
7263 static void
7264 wm_release_manageability(struct wm_softc *sc)
7265 {
7266 
7267 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
7268 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
7269 
7270 		if (sc->sc_type >= WM_T_82571)
7271 			manc &= ~MANC_EN_MNG2HOST;
7272 
7273 		CSR_WRITE(sc, WMREG_MANC, manc);
7274 	}
7275 }
7276 
7277 static void
7278 wm_get_wakeup(struct wm_softc *sc)
7279 {
7280 
7281 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
7282 	switch (sc->sc_type) {
7283 	case WM_T_82573:
7284 	case WM_T_82583:
7285 		sc->sc_flags |= WM_F_HAS_AMT;
7286 		/* FALLTHROUGH */
7287 	case WM_T_80003:
7288 	case WM_T_82541:
7289 	case WM_T_82547:
7290 	case WM_T_82571:
7291 	case WM_T_82572:
7292 	case WM_T_82574:
7293 	case WM_T_82575:
7294 	case WM_T_82576:
7295 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE_MASK) != 0)
7296 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
7297 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
7298 		break;
7299 	case WM_T_ICH8:
7300 	case WM_T_ICH9:
7301 	case WM_T_ICH10:
7302 	case WM_T_PCH:
7303 		sc->sc_flags |= WM_F_HAS_AMT;
7304 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
7305 		break;
7306 	default:
7307 		break;
7308 	}
7309 
7310 	/* 1: HAS_MANAGE */
7311 	if (wm_enable_mng_pass_thru(sc) != 0)
7312 		sc->sc_flags |= WM_F_HAS_MANAGE;
7313 
7314 #ifdef WM_DEBUG
7315 	printf("\n");
7316 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
7317 		printf("HAS_AMT,");
7318 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
7319 		printf("ARC_SUBSYS_VALID,");
7320 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
7321 		printf("ASF_FIRMWARE_PRES,");
7322 	if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
7323 		printf("HAS_MANAGE,");
7324 	printf("\n");
7325 #endif
7326 	/*
7327 	 * Note that the WOL flags is set after the resetting of the eeprom
7328 	 * stuff
7329 	 */
7330 }
7331 
7332 #ifdef WM_WOL
7333 /* WOL in the newer chipset interfaces (pchlan) */
7334 static void
7335 wm_enable_phy_wakeup(struct wm_softc *sc)
7336 {
7337 #if 0
7338 	uint16_t preg;
7339 
7340 	/* Copy MAC RARs to PHY RARs */
7341 
7342 	/* Copy MAC MTA to PHY MTA */
7343 
7344 	/* Configure PHY Rx Control register */
7345 
7346 	/* Enable PHY wakeup in MAC register */
7347 
7348 	/* Configure and enable PHY wakeup in PHY registers */
7349 
7350 	/* Activate PHY wakeup */
7351 
7352 	/* XXX */
7353 #endif
7354 }
7355 
7356 static void
7357 wm_enable_wakeup(struct wm_softc *sc)
7358 {
7359 	uint32_t reg, pmreg;
7360 	pcireg_t pmode;
7361 
7362 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
7363 		&pmreg, NULL) == 0)
7364 		return;
7365 
7366 	/* Advertise the wakeup capability */
7367 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
7368 	    | CTRL_SWDPIN(3));
7369 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
7370 
7371 	/* ICH workaround */
7372 	switch (sc->sc_type) {
7373 	case WM_T_ICH8:
7374 	case WM_T_ICH9:
7375 	case WM_T_ICH10:
7376 	case WM_T_PCH:
7377 		/* Disable gig during WOL */
7378 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
7379 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
7380 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
7381 		if (sc->sc_type == WM_T_PCH)
7382 			wm_gmii_reset(sc);
7383 
7384 		/* Power down workaround */
7385 		if (sc->sc_phytype == WMPHY_82577) {
7386 			struct mii_softc *child;
7387 
7388 			/* Assume that the PHY is copper */
7389 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
7390 			if (child->mii_mpd_rev <= 2)
7391 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
7392 				    (768 << 5) | 25, 0x0444); /* magic num */
7393 		}
7394 		break;
7395 	default:
7396 		break;
7397 	}
7398 
7399 	/* Keep the laser running on fiber adapters */
7400 	if (((sc->sc_wmp->wmp_flags & WMP_F_1000X) != 0)
7401 	    || (sc->sc_wmp->wmp_flags & WMP_F_SERDES) != 0) {
7402 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
7403 		reg |= CTRL_EXT_SWDPIN(3);
7404 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
7405 	}
7406 
7407 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
7408 #if 0	/* for the multicast packet */
7409 	reg |= WUFC_MC;
7410 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
7411 #endif
7412 
7413 	if (sc->sc_type == WM_T_PCH) {
7414 		wm_enable_phy_wakeup(sc);
7415 	} else {
7416 		CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
7417 		CSR_WRITE(sc, WMREG_WUFC, reg);
7418 	}
7419 
7420 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
7421 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH))
7422 		    && (sc->sc_phytype == WMPHY_IGP_3))
7423 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
7424 
7425 	/* Request PME */
7426 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
7427 #if 0
7428 	/* Disable WOL */
7429 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
7430 #else
7431 	/* For WOL */
7432 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
7433 #endif
7434 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
7435 }
7436 #endif /* WM_WOL */
7437 
7438 static bool
7439 wm_suspend(device_t self, const pmf_qual_t *qual)
7440 {
7441 	struct wm_softc *sc = device_private(self);
7442 
7443 	wm_release_manageability(sc);
7444 	wm_release_hw_control(sc);
7445 #ifdef WM_WOL
7446 	wm_enable_wakeup(sc);
7447 #endif
7448 
7449 	return true;
7450 }
7451 
7452 static bool
7453 wm_resume(device_t self, const pmf_qual_t *qual)
7454 {
7455 	struct wm_softc *sc = device_private(self);
7456 
7457 	wm_init_manageability(sc);
7458 
7459 	return true;
7460 }
7461