xref: /netbsd-src/sys/dev/pci/if_wm.c (revision 7f21db1c0118155e0dd40b75182e30c589d9f63e)
1 /*	$NetBSD: if_wm.c,v 1.197 2010/02/04 10:20:54 msaitoh Exp $	*/
2 
3 /*
4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed for the NetBSD Project by
20  *	Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 /*******************************************************************************
39 
40   Copyright (c) 2001-2005, Intel Corporation
41   All rights reserved.
42 
43   Redistribution and use in source and binary forms, with or without
44   modification, are permitted provided that the following conditions are met:
45 
46    1. Redistributions of source code must retain the above copyright notice,
47       this list of conditions and the following disclaimer.
48 
49    2. Redistributions in binary form must reproduce the above copyright
50       notice, this list of conditions and the following disclaimer in the
51       documentation and/or other materials provided with the distribution.
52 
53    3. Neither the name of the Intel Corporation nor the names of its
54       contributors may be used to endorse or promote products derived from
55       this software without specific prior written permission.
56 
57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67   POSSIBILITY OF SUCH DAMAGE.
68 
69 *******************************************************************************/
70 /*
71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72  *
73  * TODO (in order of importance):
74  *
75  *	- Rework how parameters are loaded from the EEPROM.
76  */
77 
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.197 2010/02/04 10:20:54 msaitoh Exp $");
80 
81 #include "rnd.h"
82 
83 #include <sys/param.h>
84 #include <sys/systm.h>
85 #include <sys/callout.h>
86 #include <sys/mbuf.h>
87 #include <sys/malloc.h>
88 #include <sys/kernel.h>
89 #include <sys/socket.h>
90 #include <sys/ioctl.h>
91 #include <sys/errno.h>
92 #include <sys/device.h>
93 #include <sys/queue.h>
94 #include <sys/syslog.h>
95 
96 #include <uvm/uvm_extern.h>		/* for PAGE_SIZE */
97 
98 #if NRND > 0
99 #include <sys/rnd.h>
100 #endif
101 
102 #include <net/if.h>
103 #include <net/if_dl.h>
104 #include <net/if_media.h>
105 #include <net/if_ether.h>
106 
107 #include <net/bpf.h>
108 
109 #include <netinet/in.h>			/* XXX for struct ip */
110 #include <netinet/in_systm.h>		/* XXX for struct ip */
111 #include <netinet/ip.h>			/* XXX for struct ip */
112 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
113 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
114 
115 #include <sys/bus.h>
116 #include <sys/intr.h>
117 #include <machine/endian.h>
118 
119 #include <dev/mii/mii.h>
120 #include <dev/mii/miivar.h>
121 #include <dev/mii/mii_bitbang.h>
122 #include <dev/mii/ikphyreg.h>
123 #include <dev/mii/igphyreg.h>
124 #include <dev/mii/inbmphyreg.h>
125 
126 #include <dev/pci/pcireg.h>
127 #include <dev/pci/pcivar.h>
128 #include <dev/pci/pcidevs.h>
129 
130 #include <dev/pci/if_wmreg.h>
131 #include <dev/pci/if_wmvar.h>
132 
133 #ifdef WM_DEBUG
134 #define	WM_DEBUG_LINK		0x01
135 #define	WM_DEBUG_TX		0x02
136 #define	WM_DEBUG_RX		0x04
137 #define	WM_DEBUG_GMII		0x08
138 int	wm_debug = WM_DEBUG_TX|WM_DEBUG_RX|WM_DEBUG_LINK|WM_DEBUG_GMII;
139 
140 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
141 #else
142 #define	DPRINTF(x, y)	/* nothing */
143 #endif /* WM_DEBUG */
144 
145 /*
146  * Transmit descriptor list size.  Due to errata, we can only have
147  * 256 hardware descriptors in the ring on < 82544, but we use 4096
148  * on >= 82544.  We tell the upper layers that they can queue a lot
149  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
150  * of them at a time.
151  *
152  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
153  * chains containing many small mbufs have been observed in zero-copy
154  * situations with jumbo frames.
155  */
156 #define	WM_NTXSEGS		256
157 #define	WM_IFQUEUELEN		256
158 #define	WM_TXQUEUELEN_MAX	64
159 #define	WM_TXQUEUELEN_MAX_82547	16
160 #define	WM_TXQUEUELEN(sc)	((sc)->sc_txnum)
161 #define	WM_TXQUEUELEN_MASK(sc)	(WM_TXQUEUELEN(sc) - 1)
162 #define	WM_TXQUEUE_GC(sc)	(WM_TXQUEUELEN(sc) / 8)
163 #define	WM_NTXDESC_82542	256
164 #define	WM_NTXDESC_82544	4096
165 #define	WM_NTXDESC(sc)		((sc)->sc_ntxdesc)
166 #define	WM_NTXDESC_MASK(sc)	(WM_NTXDESC(sc) - 1)
167 #define	WM_TXDESCSIZE(sc)	(WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
168 #define	WM_NEXTTX(sc, x)	(((x) + 1) & WM_NTXDESC_MASK(sc))
169 #define	WM_NEXTTXS(sc, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(sc))
170 
171 #define	WM_MAXTXDMA		round_page(IP_MAXPACKET) /* for TSO */
172 
173 /*
174  * Receive descriptor list size.  We have one Rx buffer for normal
175  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
176  * packet.  We allocate 256 receive descriptors, each with a 2k
177  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
178  */
179 #define	WM_NRXDESC		256
180 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
181 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
182 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
183 
184 /*
185  * Control structures are DMA'd to the i82542 chip.  We allocate them in
186  * a single clump that maps to a single DMA segment to make several things
187  * easier.
188  */
189 struct wm_control_data_82544 {
190 	/*
191 	 * The receive descriptors.
192 	 */
193 	wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
194 
195 	/*
196 	 * The transmit descriptors.  Put these at the end, because
197 	 * we might use a smaller number of them.
198 	 */
199 	wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82544];
200 };
201 
202 struct wm_control_data_82542 {
203 	wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
204 	wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
205 };
206 
207 #define	WM_CDOFF(x)	offsetof(struct wm_control_data_82544, x)
208 #define	WM_CDTXOFF(x)	WM_CDOFF(wcd_txdescs[(x)])
209 #define	WM_CDRXOFF(x)	WM_CDOFF(wcd_rxdescs[(x)])
210 
211 /*
212  * Software state for transmit jobs.
213  */
214 struct wm_txsoft {
215 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
216 	bus_dmamap_t txs_dmamap;	/* our DMA map */
217 	int txs_firstdesc;		/* first descriptor in packet */
218 	int txs_lastdesc;		/* last descriptor in packet */
219 	int txs_ndesc;			/* # of descriptors used */
220 };
221 
222 /*
223  * Software state for receive buffers.  Each descriptor gets a
224  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
225  * more than one buffer, we chain them together.
226  */
227 struct wm_rxsoft {
228 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
229 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
230 };
231 
232 #define WM_LINKUP_TIMEOUT	50
233 
234 /*
235  * Software state per device.
236  */
237 struct wm_softc {
238 	device_t sc_dev;		/* generic device information */
239 	bus_space_tag_t sc_st;		/* bus space tag */
240 	bus_space_handle_t sc_sh;	/* bus space handle */
241 	bus_space_tag_t sc_iot;		/* I/O space tag */
242 	bus_space_handle_t sc_ioh;	/* I/O space handle */
243 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
244 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
245 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
246 	struct ethercom sc_ethercom;	/* ethernet common data */
247 	pci_chipset_tag_t sc_pc;
248 	pcitag_t sc_pcitag;
249 
250 	wm_chip_type sc_type;		/* MAC type */
251 	int sc_rev;			/* MAC revision */
252 	wm_phy_type sc_phytype;		/* PHY type */
253 	int sc_flags;			/* flags; see below */
254 	int sc_if_flags;		/* last if_flags */
255 	int sc_bus_speed;		/* PCI/PCIX bus speed */
256 	int sc_pcix_offset;		/* PCIX capability register offset */
257 	int sc_flowflags;		/* 802.3x flow control flags */
258 
259 	void *sc_ih;			/* interrupt cookie */
260 
261 	int sc_ee_addrbits;		/* EEPROM address bits */
262 
263 	struct mii_data sc_mii;		/* MII/media information */
264 
265 	callout_t sc_tick_ch;		/* tick callout */
266 
267 	bus_dmamap_t sc_cddmamap;	/* control data DMA map */
268 #define	sc_cddma	sc_cddmamap->dm_segs[0].ds_addr
269 
270 	int		sc_align_tweak;
271 
272 	/*
273 	 * Software state for the transmit and receive descriptors.
274 	 */
275 	int			sc_txnum;	/* must be a power of two */
276 	struct wm_txsoft	sc_txsoft[WM_TXQUEUELEN_MAX];
277 	struct wm_rxsoft	sc_rxsoft[WM_NRXDESC];
278 
279 	/*
280 	 * Control data structures.
281 	 */
282 	int			sc_ntxdesc;	/* must be a power of two */
283 	struct wm_control_data_82544 *sc_control_data;
284 #define	sc_txdescs	sc_control_data->wcd_txdescs
285 #define	sc_rxdescs	sc_control_data->wcd_rxdescs
286 
287 #ifdef WM_EVENT_COUNTERS
288 	/* Event counters. */
289 	struct evcnt sc_ev_txsstall;	/* Tx stalled due to no txs */
290 	struct evcnt sc_ev_txdstall;	/* Tx stalled due to no txd */
291 	struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
292 	struct evcnt sc_ev_txdw;	/* Tx descriptor interrupts */
293 	struct evcnt sc_ev_txqe;	/* Tx queue empty interrupts */
294 	struct evcnt sc_ev_rxintr;	/* Rx interrupts */
295 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
296 
297 	struct evcnt sc_ev_rxipsum;	/* IP checksums checked in-bound */
298 	struct evcnt sc_ev_rxtusum;	/* TCP/UDP cksums checked in-bound */
299 	struct evcnt sc_ev_txipsum;	/* IP checksums comp. out-bound */
300 	struct evcnt sc_ev_txtusum;	/* TCP/UDP cksums comp. out-bound */
301 	struct evcnt sc_ev_txtusum6;	/* TCP/UDP v6 cksums comp. out-bound */
302 	struct evcnt sc_ev_txtso;	/* TCP seg offload out-bound (IPv4) */
303 	struct evcnt sc_ev_txtso6;	/* TCP seg offload out-bound (IPv6) */
304 	struct evcnt sc_ev_txtsopain;	/* painful header manip. for TSO */
305 
306 	struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
307 	struct evcnt sc_ev_txdrop;	/* Tx packets dropped (too many segs) */
308 
309 	struct evcnt sc_ev_tu;		/* Tx underrun */
310 
311 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
312 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
313 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
314 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
315 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
316 #endif /* WM_EVENT_COUNTERS */
317 
318 	bus_addr_t sc_tdt_reg;		/* offset of TDT register */
319 
320 	int	sc_txfree;		/* number of free Tx descriptors */
321 	int	sc_txnext;		/* next ready Tx descriptor */
322 
323 	int	sc_txsfree;		/* number of free Tx jobs */
324 	int	sc_txsnext;		/* next free Tx job */
325 	int	sc_txsdirty;		/* dirty Tx jobs */
326 
327 	/* These 5 variables are used only on the 82547. */
328 	int	sc_txfifo_size;		/* Tx FIFO size */
329 	int	sc_txfifo_head;		/* current head of FIFO */
330 	uint32_t sc_txfifo_addr;	/* internal address of start of FIFO */
331 	int	sc_txfifo_stall;	/* Tx FIFO is stalled */
332 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
333 
334 	bus_addr_t sc_rdt_reg;		/* offset of RDT register */
335 
336 	int	sc_rxptr;		/* next ready Rx descriptor/queue ent */
337 	int	sc_rxdiscard;
338 	int	sc_rxlen;
339 	struct mbuf *sc_rxhead;
340 	struct mbuf *sc_rxtail;
341 	struct mbuf **sc_rxtailp;
342 
343 	uint32_t sc_ctrl;		/* prototype CTRL register */
344 #if 0
345 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
346 #endif
347 	uint32_t sc_icr;		/* prototype interrupt bits */
348 	uint32_t sc_itr;		/* prototype intr throttling reg */
349 	uint32_t sc_tctl;		/* prototype TCTL register */
350 	uint32_t sc_rctl;		/* prototype RCTL register */
351 	uint32_t sc_txcw;		/* prototype TXCW register */
352 	uint32_t sc_tipg;		/* prototype TIPG register */
353 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
354 	uint32_t sc_pba;		/* prototype PBA register */
355 
356 	int sc_tbi_linkup;		/* TBI link status */
357 	int sc_tbi_anegticks;		/* autonegotiation ticks */
358 	int sc_tbi_ticks;		/* tbi ticks */
359 	int sc_tbi_nrxcfg;		/* count of ICR_RXCFG */
360 	int sc_tbi_lastnrxcfg;		/* count of ICR_RXCFG (on last tick) */
361 
362 	int sc_mchash_type;		/* multicast filter offset */
363 
364 #if NRND > 0
365 	rndsource_element_t rnd_source;	/* random source */
366 #endif
367 	int sc_ich8_flash_base;
368 	int sc_ich8_flash_bank_size;
369 	int sc_nvm_k1_enabled;
370 };
371 
372 #define	WM_RXCHAIN_RESET(sc)						\
373 do {									\
374 	(sc)->sc_rxtailp = &(sc)->sc_rxhead;				\
375 	*(sc)->sc_rxtailp = NULL;					\
376 	(sc)->sc_rxlen = 0;						\
377 } while (/*CONSTCOND*/0)
378 
379 #define	WM_RXCHAIN_LINK(sc, m)						\
380 do {									\
381 	*(sc)->sc_rxtailp = (sc)->sc_rxtail = (m);			\
382 	(sc)->sc_rxtailp = &(m)->m_next;				\
383 } while (/*CONSTCOND*/0)
384 
385 #ifdef WM_EVENT_COUNTERS
386 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
387 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
388 #else
389 #define	WM_EVCNT_INCR(ev)	/* nothing */
390 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
391 #endif
392 
393 #define	CSR_READ(sc, reg)						\
394 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
395 #define	CSR_WRITE(sc, reg, val)						\
396 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
397 #define	CSR_WRITE_FLUSH(sc)						\
398 	(void) CSR_READ((sc), WMREG_STATUS)
399 
400 #define ICH8_FLASH_READ32(sc, reg) \
401 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
402 #define ICH8_FLASH_WRITE32(sc, reg, data) \
403 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
404 
405 #define ICH8_FLASH_READ16(sc, reg) \
406 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
407 #define ICH8_FLASH_WRITE16(sc, reg, data) \
408 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
409 
410 #define	WM_CDTXADDR(sc, x)	((sc)->sc_cddma + WM_CDTXOFF((x)))
411 #define	WM_CDRXADDR(sc, x)	((sc)->sc_cddma + WM_CDRXOFF((x)))
412 
413 #define	WM_CDTXADDR_LO(sc, x)	(WM_CDTXADDR((sc), (x)) & 0xffffffffU)
414 #define	WM_CDTXADDR_HI(sc, x)						\
415 	(sizeof(bus_addr_t) == 8 ?					\
416 	 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
417 
418 #define	WM_CDRXADDR_LO(sc, x)	(WM_CDRXADDR((sc), (x)) & 0xffffffffU)
419 #define	WM_CDRXADDR_HI(sc, x)						\
420 	(sizeof(bus_addr_t) == 8 ?					\
421 	 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
422 
423 #define	WM_CDTXSYNC(sc, x, n, ops)					\
424 do {									\
425 	int __x, __n;							\
426 									\
427 	__x = (x);							\
428 	__n = (n);							\
429 									\
430 	/* If it will wrap around, sync to the end of the ring. */	\
431 	if ((__x + __n) > WM_NTXDESC(sc)) {				\
432 		bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,	\
433 		    WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) *		\
434 		    (WM_NTXDESC(sc) - __x), (ops));			\
435 		__n -= (WM_NTXDESC(sc) - __x);				\
436 		__x = 0;						\
437 	}								\
438 									\
439 	/* Now sync whatever is left. */				\
440 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
441 	    WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops));	\
442 } while (/*CONSTCOND*/0)
443 
444 #define	WM_CDRXSYNC(sc, x, ops)						\
445 do {									\
446 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
447 	   WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops));		\
448 } while (/*CONSTCOND*/0)
449 
450 #define	WM_INIT_RXDESC(sc, x)						\
451 do {									\
452 	struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)];		\
453 	wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)];		\
454 	struct mbuf *__m = __rxs->rxs_mbuf;				\
455 									\
456 	/*								\
457 	 * Note: We scoot the packet forward 2 bytes in the buffer	\
458 	 * so that the payload after the Ethernet header is aligned	\
459 	 * to a 4-byte boundary.					\
460 	 *								\
461 	 * XXX BRAINDAMAGE ALERT!					\
462 	 * The stupid chip uses the same size for every buffer, which	\
463 	 * is set in the Receive Control register.  We are using the 2K	\
464 	 * size option, but what we REALLY want is (2K - 2)!  For this	\
465 	 * reason, we can't "scoot" packets longer than the standard	\
466 	 * Ethernet MTU.  On strict-alignment platforms, if the total	\
467 	 * size exceeds (2K - 2) we set align_tweak to 0 and let	\
468 	 * the upper layer copy the headers.				\
469 	 */								\
470 	__m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak;	\
471 									\
472 	wm_set_dma_addr(&__rxd->wrx_addr,				\
473 	    __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
474 	__rxd->wrx_len = 0;						\
475 	__rxd->wrx_cksum = 0;						\
476 	__rxd->wrx_status = 0;						\
477 	__rxd->wrx_errors = 0;						\
478 	__rxd->wrx_special = 0;						\
479 	WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
480 									\
481 	CSR_WRITE((sc), (sc)->sc_rdt_reg, (x));				\
482 } while (/*CONSTCOND*/0)
483 
484 static void	wm_start(struct ifnet *);
485 static void	wm_watchdog(struct ifnet *);
486 static int	wm_ioctl(struct ifnet *, u_long, void *);
487 static int	wm_init(struct ifnet *);
488 static void	wm_stop(struct ifnet *, int);
489 
490 static void	wm_reset(struct wm_softc *);
491 static void	wm_rxdrain(struct wm_softc *);
492 static int	wm_add_rxbuf(struct wm_softc *, int);
493 static int	wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
494 static int	wm_read_eeprom_eerd(struct wm_softc *, int, int, u_int16_t *);
495 static int	wm_validate_eeprom_checksum(struct wm_softc *);
496 static void	wm_tick(void *);
497 
498 static void	wm_set_filter(struct wm_softc *);
499 
500 static int	wm_intr(void *);
501 static void	wm_txintr(struct wm_softc *);
502 static void	wm_rxintr(struct wm_softc *);
503 static void	wm_linkintr(struct wm_softc *, uint32_t);
504 
505 static void	wm_tbi_mediainit(struct wm_softc *);
506 static int	wm_tbi_mediachange(struct ifnet *);
507 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
508 
509 static void	wm_tbi_set_linkled(struct wm_softc *);
510 static void	wm_tbi_check_link(struct wm_softc *);
511 
512 static void	wm_gmii_reset(struct wm_softc *);
513 
514 static int	wm_gmii_i82543_readreg(device_t, int, int);
515 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
516 
517 static int	wm_gmii_i82544_readreg(device_t, int, int);
518 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
519 
520 static int	wm_gmii_i80003_readreg(device_t, int, int);
521 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
522 static int	wm_gmii_bm_readreg(device_t, int, int);
523 static void	wm_gmii_bm_writereg(device_t, int, int, int);
524 static int	wm_gmii_hv_readreg(device_t, int, int);
525 static void	wm_gmii_hv_writereg(device_t, int, int, int);
526 
527 static void	wm_gmii_statchg(device_t);
528 
529 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
530 static int	wm_gmii_mediachange(struct ifnet *);
531 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
532 
533 static int	wm_kmrn_readreg(struct wm_softc *, int);
534 static void	wm_kmrn_writereg(struct wm_softc *, int, int);
535 
536 static void	wm_set_spiaddrsize(struct wm_softc *);
537 static int	wm_match(device_t, cfdata_t, void *);
538 static void	wm_attach(device_t, device_t, void *);
539 static int	wm_is_onboard_nvm_eeprom(struct wm_softc *);
540 static void	wm_get_auto_rd_done(struct wm_softc *);
541 static void	wm_lan_init_done(struct wm_softc *);
542 static void	wm_get_cfg_done(struct wm_softc *);
543 static int	wm_get_swsm_semaphore(struct wm_softc *);
544 static void	wm_put_swsm_semaphore(struct wm_softc *);
545 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
546 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
547 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
548 static int	wm_get_swfwhw_semaphore(struct wm_softc *);
549 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
550 
551 static int	wm_read_eeprom_ich8(struct wm_softc *, int, int, uint16_t *);
552 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
553 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
554 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t,
555 		     uint32_t, uint16_t *);
556 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
557 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
558 static void	wm_82547_txfifo_stall(void *);
559 static int	wm_check_mng_mode(struct wm_softc *);
560 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
561 static int	wm_check_mng_mode_82574(struct wm_softc *);
562 static int	wm_check_mng_mode_generic(struct wm_softc *);
563 static int	wm_check_reset_block(struct wm_softc *);
564 static void	wm_get_hw_control(struct wm_softc *);
565 static int	wm_check_for_link(struct wm_softc *);
566 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
567 static void	wm_k1_gig_workaround_hv(struct wm_softc *, int);
568 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
569 
570 CFATTACH_DECL_NEW(wm, sizeof(struct wm_softc),
571     wm_match, wm_attach, NULL, NULL);
572 
573 /*
574  * Devices supported by this driver.
575  */
576 static const struct wm_product {
577 	pci_vendor_id_t		wmp_vendor;
578 	pci_product_id_t	wmp_product;
579 	const char		*wmp_name;
580 	wm_chip_type		wmp_type;
581 	int			wmp_flags;
582 #define	WMP_F_1000X		0x01
583 #define	WMP_F_1000T		0x02
584 } wm_products[] = {
585 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
586 	  "Intel i82542 1000BASE-X Ethernet",
587 	  WM_T_82542_2_1,	WMP_F_1000X },
588 
589 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
590 	  "Intel i82543GC 1000BASE-X Ethernet",
591 	  WM_T_82543,		WMP_F_1000X },
592 
593 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
594 	  "Intel i82543GC 1000BASE-T Ethernet",
595 	  WM_T_82543,		WMP_F_1000T },
596 
597 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
598 	  "Intel i82544EI 1000BASE-T Ethernet",
599 	  WM_T_82544,		WMP_F_1000T },
600 
601 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
602 	  "Intel i82544EI 1000BASE-X Ethernet",
603 	  WM_T_82544,		WMP_F_1000X },
604 
605 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
606 	  "Intel i82544GC 1000BASE-T Ethernet",
607 	  WM_T_82544,		WMP_F_1000T },
608 
609 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
610 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
611 	  WM_T_82544,		WMP_F_1000T },
612 
613 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
614 	  "Intel i82540EM 1000BASE-T Ethernet",
615 	  WM_T_82540,		WMP_F_1000T },
616 
617 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
618 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
619 	  WM_T_82540,		WMP_F_1000T },
620 
621 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
622 	  "Intel i82540EP 1000BASE-T Ethernet",
623 	  WM_T_82540,		WMP_F_1000T },
624 
625 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
626 	  "Intel i82540EP 1000BASE-T Ethernet",
627 	  WM_T_82540,		WMP_F_1000T },
628 
629 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
630 	  "Intel i82540EP 1000BASE-T Ethernet",
631 	  WM_T_82540,		WMP_F_1000T },
632 
633 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
634 	  "Intel i82545EM 1000BASE-T Ethernet",
635 	  WM_T_82545,		WMP_F_1000T },
636 
637 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
638 	  "Intel i82545GM 1000BASE-T Ethernet",
639 	  WM_T_82545_3,		WMP_F_1000T },
640 
641 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
642 	  "Intel i82545GM 1000BASE-X Ethernet",
643 	  WM_T_82545_3,		WMP_F_1000X },
644 #if 0
645 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
646 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
647 	  WM_T_82545_3,		WMP_F_SERDES },
648 #endif
649 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
650 	  "Intel i82546EB 1000BASE-T Ethernet",
651 	  WM_T_82546,		WMP_F_1000T },
652 
653 	{ PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82546EB_QUAD,
654 	  "Intel i82546EB 1000BASE-T Ethernet",
655 	  WM_T_82546,		WMP_F_1000T },
656 
657 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
658 	  "Intel i82545EM 1000BASE-X Ethernet",
659 	  WM_T_82545,		WMP_F_1000X },
660 
661 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
662 	  "Intel i82546EB 1000BASE-X Ethernet",
663 	  WM_T_82546,		WMP_F_1000X },
664 
665 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
666 	  "Intel i82546GB 1000BASE-T Ethernet",
667 	  WM_T_82546_3,		WMP_F_1000T },
668 
669 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
670 	  "Intel i82546GB 1000BASE-X Ethernet",
671 	  WM_T_82546_3,		WMP_F_1000X },
672 #if 0
673 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
674 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
675 	  WM_T_82546_3,		WMP_F_SERDES },
676 #endif
677 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
678 	  "i82546GB quad-port Gigabit Ethernet",
679 	  WM_T_82546_3,		WMP_F_1000T },
680 
681 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
682 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
683 	  WM_T_82546_3,		WMP_F_1000T },
684 
685 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
686 	  "Intel PRO/1000MT (82546GB)",
687 	  WM_T_82546_3,		WMP_F_1000T },
688 
689 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
690 	  "Intel i82541EI 1000BASE-T Ethernet",
691 	  WM_T_82541,		WMP_F_1000T },
692 
693 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
694 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
695 	  WM_T_82541,		WMP_F_1000T },
696 
697 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
698 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
699 	  WM_T_82541,		WMP_F_1000T },
700 
701 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
702 	  "Intel i82541ER 1000BASE-T Ethernet",
703 	  WM_T_82541_2,		WMP_F_1000T },
704 
705 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
706 	  "Intel i82541GI 1000BASE-T Ethernet",
707 	  WM_T_82541_2,		WMP_F_1000T },
708 
709 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
710 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
711 	  WM_T_82541_2,		WMP_F_1000T },
712 
713 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
714 	  "Intel i82541PI 1000BASE-T Ethernet",
715 	  WM_T_82541_2,		WMP_F_1000T },
716 
717 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
718 	  "Intel i82547EI 1000BASE-T Ethernet",
719 	  WM_T_82547,		WMP_F_1000T },
720 
721 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
722 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
723 	  WM_T_82547,		WMP_F_1000T },
724 
725 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
726 	  "Intel i82547GI 1000BASE-T Ethernet",
727 	  WM_T_82547_2,		WMP_F_1000T },
728 
729 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
730 	  "Intel PRO/1000 PT (82571EB)",
731 	  WM_T_82571,		WMP_F_1000T },
732 
733 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
734 	  "Intel PRO/1000 PF (82571EB)",
735 	  WM_T_82571,		WMP_F_1000X },
736 #if 0
737 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
738 	  "Intel PRO/1000 PB (82571EB)",
739 	  WM_T_82571,		WMP_F_SERDES },
740 #endif
741 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
742 	  "Intel PRO/1000 QT (82571EB)",
743 	  WM_T_82571,		WMP_F_1000T },
744 
745 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
746 	  "Intel i82572EI 1000baseT Ethernet",
747 	  WM_T_82572,		WMP_F_1000T },
748 
749 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
750 	  "Intel� PRO/1000 PT Quad Port Server Adapter",
751 	  WM_T_82571,		WMP_F_1000T, },
752 
753 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
754 	  "Intel i82572EI 1000baseX Ethernet",
755 	  WM_T_82572,		WMP_F_1000X },
756 #if 0
757 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
758 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
759 	  WM_T_82572,		WMP_F_SERDES },
760 #endif
761 
762 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
763 	  "Intel i82572EI 1000baseT Ethernet",
764 	  WM_T_82572,		WMP_F_1000T },
765 
766 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
767 	  "Intel i82573E",
768 	  WM_T_82573,		WMP_F_1000T },
769 
770 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
771 	  "Intel i82573E IAMT",
772 	  WM_T_82573,		WMP_F_1000T },
773 
774 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
775 	  "Intel i82573L Gigabit Ethernet",
776 	  WM_T_82573,		WMP_F_1000T },
777 
778 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
779 	  "Intel i82574L",
780 	  WM_T_82574,		WMP_F_1000T },
781 
782 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
783 	  "Intel i82583V",
784 	  WM_T_82583,		WMP_F_1000T },
785 
786 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
787 	  "i80003 dual 1000baseT Ethernet",
788 	  WM_T_80003,		WMP_F_1000T },
789 
790 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
791 	  "i80003 dual 1000baseX Ethernet",
792 	  WM_T_80003,		WMP_F_1000T },
793 #if 0
794 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
795 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
796 	  WM_T_80003,		WMP_F_SERDES },
797 #endif
798 
799 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
800 	  "Intel i80003 1000baseT Ethernet",
801 	  WM_T_80003,		WMP_F_1000T },
802 #if 0
803 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
804 	  "Intel i80003 Gigabit Ethernet (SERDES)",
805 	  WM_T_80003,		WMP_F_SERDES },
806 #endif
807 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
808 	  "Intel i82801H (M_AMT) LAN Controller",
809 	  WM_T_ICH8,		WMP_F_1000T },
810 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
811 	  "Intel i82801H (AMT) LAN Controller",
812 	  WM_T_ICH8,		WMP_F_1000T },
813 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
814 	  "Intel i82801H LAN Controller",
815 	  WM_T_ICH8,		WMP_F_1000T },
816 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
817 	  "Intel i82801H (IFE) LAN Controller",
818 	  WM_T_ICH8,		WMP_F_1000T },
819 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
820 	  "Intel i82801H (M) LAN Controller",
821 	  WM_T_ICH8,		WMP_F_1000T },
822 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
823 	  "Intel i82801H IFE (GT) LAN Controller",
824 	  WM_T_ICH8,		WMP_F_1000T },
825 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
826 	  "Intel i82801H IFE (G) LAN Controller",
827 	  WM_T_ICH8,		WMP_F_1000T },
828 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
829 	  "82801I (AMT) LAN Controller",
830 	  WM_T_ICH9,		WMP_F_1000T },
831 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
832 	  "82801I LAN Controller",
833 	  WM_T_ICH9,		WMP_F_1000T },
834 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
835 	  "82801I (G) LAN Controller",
836 	  WM_T_ICH9,		WMP_F_1000T },
837 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
838 	  "82801I (GT) LAN Controller",
839 	  WM_T_ICH9,		WMP_F_1000T },
840 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
841 	  "82801I (C) LAN Controller",
842 	  WM_T_ICH9,		WMP_F_1000T },
843 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
844 	  "82801I mobile LAN Controller",
845 	  WM_T_ICH9,		WMP_F_1000T },
846 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IGP_M_V,
847 	  "82801I mobile (V) LAN Controller",
848 	  WM_T_ICH9,		WMP_F_1000T },
849 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
850 	  "82801I mobile (AMT) LAN Controller",
851 	  WM_T_ICH9,		WMP_F_1000T },
852 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
853 	  "82567LM-4 LAN Controller",
854 	  WM_T_ICH9,		WMP_F_1000T },
855 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_82567V_3,
856 	  "82567V-3 LAN Controller",
857 	  WM_T_ICH9,		WMP_F_1000T },
858 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
859 	  "82567LM-2 LAN Controller",
860 	  WM_T_ICH10,		WMP_F_1000T },
861 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
862 	  "82567LF-2 LAN Controller",
863 	  WM_T_ICH10,		WMP_F_1000T },
864 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
865 	  "82567LM-3 LAN Controller",
866 	  WM_T_ICH10,		WMP_F_1000T },
867 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
868 	  "82567LF-3 LAN Controller",
869 	  WM_T_ICH10,		WMP_F_1000T },
870 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
871 	  "82567V-2 LAN Controller",
872 	  WM_T_ICH10,		WMP_F_1000T },
873 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
874 	  "PCH LAN (82578LM) Controller",
875 	  WM_T_PCH,		WMP_F_1000T },
876 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
877 	  "PCH LAN (82578LC) Controller",
878 	  WM_T_PCH,		WMP_F_1000T },
879 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
880 	  "PCH LAN (82578DM) Controller",
881 	  WM_T_PCH,		WMP_F_1000T },
882 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
883 	  "PCH LAN (82578DC) Controller",
884 	  WM_T_PCH,		WMP_F_1000T },
885 	{ 0,			0,
886 	  NULL,
887 	  0,			0 },
888 };
889 
890 #ifdef WM_EVENT_COUNTERS
891 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
892 #endif /* WM_EVENT_COUNTERS */
893 
894 #if 0 /* Not currently used */
895 static inline uint32_t
896 wm_io_read(struct wm_softc *sc, int reg)
897 {
898 
899 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
900 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
901 }
902 #endif
903 
904 static inline void
905 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
906 {
907 
908 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
909 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
910 }
911 
912 static inline void
913 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
914 {
915 	wa->wa_low = htole32(v & 0xffffffffU);
916 	if (sizeof(bus_addr_t) == 8)
917 		wa->wa_high = htole32((uint64_t) v >> 32);
918 	else
919 		wa->wa_high = 0;
920 }
921 
922 static void
923 wm_set_spiaddrsize(struct wm_softc *sc)
924 {
925 	uint32_t reg;
926 
927 	sc->sc_flags |= WM_F_EEPROM_SPI;
928 	reg = CSR_READ(sc, WMREG_EECD);
929 	sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
930 }
931 
932 static const struct wm_product *
933 wm_lookup(const struct pci_attach_args *pa)
934 {
935 	const struct wm_product *wmp;
936 
937 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
938 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
939 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
940 			return wmp;
941 	}
942 	return NULL;
943 }
944 
945 static int
946 wm_match(device_t parent, cfdata_t cf, void *aux)
947 {
948 	struct pci_attach_args *pa = aux;
949 
950 	if (wm_lookup(pa) != NULL)
951 		return 1;
952 
953 	return 0;
954 }
955 
956 static void
957 wm_attach(device_t parent, device_t self, void *aux)
958 {
959 	struct wm_softc *sc = device_private(self);
960 	struct pci_attach_args *pa = aux;
961 	prop_dictionary_t dict;
962 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
963 	pci_chipset_tag_t pc = pa->pa_pc;
964 	pci_intr_handle_t ih;
965 	size_t cdata_size;
966 	const char *intrstr = NULL;
967 	const char *eetype, *xname;
968 	bus_space_tag_t memt;
969 	bus_space_handle_t memh;
970 	bus_dma_segment_t seg;
971 	int memh_valid;
972 	int i, rseg, error;
973 	const struct wm_product *wmp;
974 	prop_data_t ea;
975 	prop_number_t pn;
976 	uint8_t enaddr[ETHER_ADDR_LEN];
977 	uint16_t myea[ETHER_ADDR_LEN / 2], cfg1, cfg2, swdpin, io3;
978 	pcireg_t preg, memtype;
979 	uint32_t reg;
980 
981 	sc->sc_dev = self;
982 	callout_init(&sc->sc_tick_ch, 0);
983 
984 	wmp = wm_lookup(pa);
985 	if (wmp == NULL) {
986 		printf("\n");
987 		panic("wm_attach: impossible");
988 	}
989 
990 	sc->sc_pc = pa->pa_pc;
991 	sc->sc_pcitag = pa->pa_tag;
992 
993 	if (pci_dma64_available(pa))
994 		sc->sc_dmat = pa->pa_dmat64;
995 	else
996 		sc->sc_dmat = pa->pa_dmat;
997 
998 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
999 	aprint_naive(": Ethernet controller\n");
1000 	aprint_normal(": %s, rev. %d\n", wmp->wmp_name, sc->sc_rev);
1001 
1002 	sc->sc_type = wmp->wmp_type;
1003 	if (sc->sc_type < WM_T_82543) {
1004 		if (sc->sc_rev < 2) {
1005 			aprint_error_dev(sc->sc_dev,
1006 			    "i82542 must be at least rev. 2\n");
1007 			return;
1008 		}
1009 		if (sc->sc_rev < 3)
1010 			sc->sc_type = WM_T_82542_2_0;
1011 	}
1012 
1013 	/* Set device properties (mactype) */
1014 	dict = device_properties(sc->sc_dev);
1015 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1016 
1017 	/*
1018 	 * Map the device.  All devices support memory-mapped acccess,
1019 	 * and it is really required for normal operation.
1020 	 */
1021 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1022 	switch (memtype) {
1023 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1024 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1025 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1026 		    memtype, 0, &memt, &memh, NULL, NULL) == 0);
1027 		break;
1028 	default:
1029 		memh_valid = 0;
1030 		break;
1031 	}
1032 
1033 	if (memh_valid) {
1034 		sc->sc_st = memt;
1035 		sc->sc_sh = memh;
1036 	} else {
1037 		aprint_error_dev(sc->sc_dev,
1038 		    "unable to map device registers\n");
1039 		return;
1040 	}
1041 
1042 	/*
1043 	 * In addition, i82544 and later support I/O mapped indirect
1044 	 * register access.  It is not desirable (nor supported in
1045 	 * this driver) to use it for normal operation, though it is
1046 	 * required to work around bugs in some chip versions.
1047 	 */
1048 	if (sc->sc_type >= WM_T_82544) {
1049 		/* First we have to find the I/O BAR. */
1050 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1051 			if (pci_mapreg_type(pa->pa_pc, pa->pa_tag, i) ==
1052 			    PCI_MAPREG_TYPE_IO)
1053 				break;
1054 		}
1055 		if (i == PCI_MAPREG_END)
1056 			aprint_error_dev(sc->sc_dev,
1057 			    "WARNING: unable to find I/O BAR\n");
1058 		else {
1059 			/*
1060 			 * The i8254x doesn't apparently respond when the
1061 			 * I/O BAR is 0, which looks somewhat like it's not
1062 			 * been configured.
1063 			 */
1064 			preg = pci_conf_read(pc, pa->pa_tag, i);
1065 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1066 				aprint_error_dev(sc->sc_dev,
1067 				    "WARNING: I/O BAR at zero.\n");
1068 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1069 					0, &sc->sc_iot, &sc->sc_ioh,
1070 					NULL, NULL) == 0) {
1071 				sc->sc_flags |= WM_F_IOH_VALID;
1072 			} else {
1073 				aprint_error_dev(sc->sc_dev,
1074 				    "WARNING: unable to map I/O space\n");
1075 			}
1076 		}
1077 
1078 	}
1079 
1080 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
1081 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1082 	preg |= PCI_COMMAND_MASTER_ENABLE;
1083 	if (sc->sc_type < WM_T_82542_2_1)
1084 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1085 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1086 
1087 	/* power up chip */
1088 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1089 	    NULL)) && error != EOPNOTSUPP) {
1090 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1091 		return;
1092 	}
1093 
1094 	/*
1095 	 * Map and establish our interrupt.
1096 	 */
1097 	if (pci_intr_map(pa, &ih)) {
1098 		aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
1099 		return;
1100 	}
1101 	intrstr = pci_intr_string(pc, ih);
1102 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
1103 	if (sc->sc_ih == NULL) {
1104 		aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
1105 		if (intrstr != NULL)
1106 			aprint_error(" at %s", intrstr);
1107 		aprint_error("\n");
1108 		return;
1109 	}
1110 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
1111 
1112 	/*
1113 	 * Determine a few things about the bus we're connected to.
1114 	 */
1115 	if (sc->sc_type < WM_T_82543) {
1116 		/* We don't really know the bus characteristics here. */
1117 		sc->sc_bus_speed = 33;
1118 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1119 		/*
1120 		 * CSA (Communication Streaming Architecture) is about as fast
1121 		 * a 32-bit 66MHz PCI Bus.
1122 		 */
1123 		sc->sc_flags |= WM_F_CSA;
1124 		sc->sc_bus_speed = 66;
1125 		aprint_verbose_dev(sc->sc_dev,
1126 		    "Communication Streaming Architecture\n");
1127 		if (sc->sc_type == WM_T_82547) {
1128 			callout_init(&sc->sc_txfifo_ch, 0);
1129 			callout_setfunc(&sc->sc_txfifo_ch,
1130 					wm_82547_txfifo_stall, sc);
1131 			aprint_verbose_dev(sc->sc_dev,
1132 			    "using 82547 Tx FIFO stall work-around\n");
1133 		}
1134 	} else if (sc->sc_type >= WM_T_82571) {
1135 		sc->sc_flags |= WM_F_PCIE;
1136 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1137 		    && (sc->sc_type != WM_T_ICH10)
1138 		    && (sc->sc_type != WM_T_PCH))
1139 			sc->sc_flags |= WM_F_EEPROM_SEMAPHORE;
1140 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1141 	} else {
1142 		reg = CSR_READ(sc, WMREG_STATUS);
1143 		if (reg & STATUS_BUS64)
1144 			sc->sc_flags |= WM_F_BUS64;
1145 		if ((reg & STATUS_PCIX_MODE) != 0) {
1146 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1147 
1148 			sc->sc_flags |= WM_F_PCIX;
1149 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1150 					       PCI_CAP_PCIX,
1151 					       &sc->sc_pcix_offset, NULL) == 0)
1152 				aprint_error_dev(sc->sc_dev,
1153 				    "unable to find PCIX capability\n");
1154 			else if (sc->sc_type != WM_T_82545_3 &&
1155 				 sc->sc_type != WM_T_82546_3) {
1156 				/*
1157 				 * Work around a problem caused by the BIOS
1158 				 * setting the max memory read byte count
1159 				 * incorrectly.
1160 				 */
1161 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1162 				    sc->sc_pcix_offset + PCI_PCIX_CMD);
1163 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1164 				    sc->sc_pcix_offset + PCI_PCIX_STATUS);
1165 
1166 				bytecnt =
1167 				    (pcix_cmd & PCI_PCIX_CMD_BYTECNT_MASK) >>
1168 				    PCI_PCIX_CMD_BYTECNT_SHIFT;
1169 				maxb =
1170 				    (pcix_sts & PCI_PCIX_STATUS_MAXB_MASK) >>
1171 				    PCI_PCIX_STATUS_MAXB_SHIFT;
1172 				if (bytecnt > maxb) {
1173 					aprint_verbose_dev(sc->sc_dev,
1174 					    "resetting PCI-X MMRBC: %d -> %d\n",
1175 					    512 << bytecnt, 512 << maxb);
1176 					pcix_cmd = (pcix_cmd &
1177 					    ~PCI_PCIX_CMD_BYTECNT_MASK) |
1178 					   (maxb << PCI_PCIX_CMD_BYTECNT_SHIFT);
1179 					pci_conf_write(pa->pa_pc, pa->pa_tag,
1180 					    sc->sc_pcix_offset + PCI_PCIX_CMD,
1181 					    pcix_cmd);
1182 				}
1183 			}
1184 		}
1185 		/*
1186 		 * The quad port adapter is special; it has a PCIX-PCIX
1187 		 * bridge on the board, and can run the secondary bus at
1188 		 * a higher speed.
1189 		 */
1190 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1191 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1192 								      : 66;
1193 		} else if (sc->sc_flags & WM_F_PCIX) {
1194 			switch (reg & STATUS_PCIXSPD_MASK) {
1195 			case STATUS_PCIXSPD_50_66:
1196 				sc->sc_bus_speed = 66;
1197 				break;
1198 			case STATUS_PCIXSPD_66_100:
1199 				sc->sc_bus_speed = 100;
1200 				break;
1201 			case STATUS_PCIXSPD_100_133:
1202 				sc->sc_bus_speed = 133;
1203 				break;
1204 			default:
1205 				aprint_error_dev(sc->sc_dev,
1206 				    "unknown PCIXSPD %d; assuming 66MHz\n",
1207 				    reg & STATUS_PCIXSPD_MASK);
1208 				sc->sc_bus_speed = 66;
1209 				break;
1210 			}
1211 		} else
1212 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1213 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1214 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1215 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1216 	}
1217 
1218 	/*
1219 	 * Allocate the control data structures, and create and load the
1220 	 * DMA map for it.
1221 	 *
1222 	 * NOTE: All Tx descriptors must be in the same 4G segment of
1223 	 * memory.  So must Rx descriptors.  We simplify by allocating
1224 	 * both sets within the same 4G segment.
1225 	 */
1226 	WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
1227 	    WM_NTXDESC_82542 : WM_NTXDESC_82544;
1228 	cdata_size = sc->sc_type < WM_T_82544 ?
1229 	    sizeof(struct wm_control_data_82542) :
1230 	    sizeof(struct wm_control_data_82544);
1231 	if ((error = bus_dmamem_alloc(sc->sc_dmat, cdata_size, PAGE_SIZE,
1232 		    (bus_size_t) 0x100000000ULL, &seg, 1, &rseg, 0)) != 0) {
1233 		aprint_error_dev(sc->sc_dev,
1234 		    "unable to allocate control data, error = %d\n",
1235 		    error);
1236 		goto fail_0;
1237 	}
1238 
1239 	if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, cdata_size,
1240 		    (void **)&sc->sc_control_data, BUS_DMA_COHERENT)) != 0) {
1241 		aprint_error_dev(sc->sc_dev,
1242 		    "unable to map control data, error = %d\n", error);
1243 		goto fail_1;
1244 	}
1245 
1246 	if ((error = bus_dmamap_create(sc->sc_dmat, cdata_size, 1, cdata_size,
1247 		    0, 0, &sc->sc_cddmamap)) != 0) {
1248 		aprint_error_dev(sc->sc_dev,
1249 		    "unable to create control data DMA map, error = %d\n",
1250 		    error);
1251 		goto fail_2;
1252 	}
1253 
1254 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
1255 		    sc->sc_control_data, cdata_size, NULL, 0)) != 0) {
1256 		aprint_error_dev(sc->sc_dev,
1257 		    "unable to load control data DMA map, error = %d\n",
1258 		    error);
1259 		goto fail_3;
1260 	}
1261 
1262 	/*
1263 	 * Create the transmit buffer DMA maps.
1264 	 */
1265 	WM_TXQUEUELEN(sc) =
1266 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
1267 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
1268 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1269 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
1270 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
1271 			    &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1272 			aprint_error_dev(sc->sc_dev,
1273 			    "unable to create Tx DMA map %d, error = %d\n",
1274 			    i, error);
1275 			goto fail_4;
1276 		}
1277 	}
1278 
1279 	/*
1280 	 * Create the receive buffer DMA maps.
1281 	 */
1282 	for (i = 0; i < WM_NRXDESC; i++) {
1283 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1284 			    MCLBYTES, 0, 0,
1285 			    &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1286 			aprint_error_dev(sc->sc_dev,
1287 			    "unable to create Rx DMA map %d error = %d\n",
1288 			    i, error);
1289 			goto fail_5;
1290 		}
1291 		sc->sc_rxsoft[i].rxs_mbuf = NULL;
1292 	}
1293 
1294 	/* clear interesting stat counters */
1295 	CSR_READ(sc, WMREG_COLC);
1296 	CSR_READ(sc, WMREG_RXERRC);
1297 
1298 	/*
1299 	 * Reset the chip to a known state.
1300 	 */
1301 	wm_reset(sc);
1302 
1303 	switch (sc->sc_type) {
1304 	case WM_T_82571:
1305 	case WM_T_82572:
1306 	case WM_T_82573:
1307 	case WM_T_82574:
1308 	case WM_T_82583:
1309 	case WM_T_80003:
1310 	case WM_T_ICH8:
1311 	case WM_T_ICH9:
1312 	case WM_T_ICH10:
1313 	case WM_T_PCH:
1314 		if (wm_check_mng_mode(sc) != 0)
1315 			wm_get_hw_control(sc);
1316 		break;
1317 	default:
1318 		break;
1319 	}
1320 
1321 	/*
1322 	 * Get some information about the EEPROM.
1323 	 */
1324 	switch (sc->sc_type) {
1325 	case WM_T_82542_2_0:
1326 	case WM_T_82542_2_1:
1327 	case WM_T_82543:
1328 	case WM_T_82544:
1329 		/* Microwire */
1330 		sc->sc_ee_addrbits = 6;
1331 		break;
1332 	case WM_T_82540:
1333 	case WM_T_82545:
1334 	case WM_T_82545_3:
1335 	case WM_T_82546:
1336 	case WM_T_82546_3:
1337 		/* Microwire */
1338 		reg = CSR_READ(sc, WMREG_EECD);
1339 		if (reg & EECD_EE_SIZE)
1340 			sc->sc_ee_addrbits = 8;
1341 		else
1342 			sc->sc_ee_addrbits = 6;
1343 		sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1344 		break;
1345 	case WM_T_82541:
1346 	case WM_T_82541_2:
1347 	case WM_T_82547:
1348 	case WM_T_82547_2:
1349 		reg = CSR_READ(sc, WMREG_EECD);
1350 		if (reg & EECD_EE_TYPE) {
1351 			/* SPI */
1352 			wm_set_spiaddrsize(sc);
1353 		} else
1354 			/* Microwire */
1355 			sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6;
1356 		sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1357 		break;
1358 	case WM_T_82571:
1359 	case WM_T_82572:
1360 		/* SPI */
1361 		wm_set_spiaddrsize(sc);
1362 		sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1363 		break;
1364 	case WM_T_82573:
1365 	case WM_T_82574:
1366 	case WM_T_82583:
1367 		if (wm_is_onboard_nvm_eeprom(sc) == 0)
1368 			sc->sc_flags |= WM_F_EEPROM_FLASH;
1369 		else {
1370 			/* SPI */
1371 			wm_set_spiaddrsize(sc);
1372 		}
1373 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1374 		break;
1375 	case WM_T_80003:
1376 		/* SPI */
1377 		wm_set_spiaddrsize(sc);
1378 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_SWFW_SYNC;
1379 		break;
1380 	case WM_T_ICH8:
1381 	case WM_T_ICH9:
1382 		/* Check whether EEPROM is present or not */
1383 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
1384 			/* Not found */
1385 			aprint_error_dev(sc->sc_dev,
1386 			    "EEPROM PRESENT bit isn't set\n");
1387 			sc->sc_flags |= WM_F_EEPROM_INVALID;
1388 		}
1389 		/* FALLTHROUGH */
1390 	case WM_T_ICH10:
1391 	case WM_T_PCH:
1392 		/* FLASH */
1393 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_SWFWHW_SYNC;
1394 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
1395 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
1396 		    &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) {
1397 			aprint_error_dev(sc->sc_dev,
1398 			    "can't map FLASH registers\n");
1399 			return;
1400 		}
1401 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
1402 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
1403 						ICH_FLASH_SECTOR_SIZE;
1404 		sc->sc_ich8_flash_bank_size =
1405 			((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
1406 		sc->sc_ich8_flash_bank_size -=
1407 			(reg & ICH_GFPREG_BASE_MASK);
1408 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
1409 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
1410 		break;
1411 	default:
1412 		break;
1413 	}
1414 
1415 	/*
1416 	 * Defer printing the EEPROM type until after verifying the checksum
1417 	 * This allows the EEPROM type to be printed correctly in the case
1418 	 * that no EEPROM is attached.
1419 	 */
1420 	/*
1421 	 * Validate the EEPROM checksum. If the checksum fails, flag
1422 	 * this for later, so we can fail future reads from the EEPROM.
1423 	 */
1424 	if (wm_validate_eeprom_checksum(sc)) {
1425 		/*
1426 		 * Read twice again because some PCI-e parts fail the
1427 		 * first check due to the link being in sleep state.
1428 		 */
1429 		if (wm_validate_eeprom_checksum(sc))
1430 			sc->sc_flags |= WM_F_EEPROM_INVALID;
1431 	}
1432 
1433 	/* Set device properties (macflags) */
1434 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
1435 
1436 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
1437 		aprint_verbose_dev(sc->sc_dev, "No EEPROM\n");
1438 	else if (sc->sc_flags & WM_F_EEPROM_FLASH) {
1439 		aprint_verbose_dev(sc->sc_dev, "FLASH\n");
1440 	} else {
1441 		if (sc->sc_flags & WM_F_EEPROM_SPI)
1442 			eetype = "SPI";
1443 		else
1444 			eetype = "MicroWire";
1445 		aprint_verbose_dev(sc->sc_dev,
1446 		    "%u word (%d address bits) %s EEPROM\n",
1447 		    1U << sc->sc_ee_addrbits,
1448 		    sc->sc_ee_addrbits, eetype);
1449 	}
1450 
1451 	/*
1452 	 * Read the Ethernet address from the EEPROM, if not first found
1453 	 * in device properties.
1454 	 */
1455 	ea = prop_dictionary_get(dict, "mac-address");
1456 	if (ea != NULL) {
1457 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
1458 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
1459 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
1460 	} else {
1461 		if (wm_read_eeprom(sc, EEPROM_OFF_MACADDR,
1462 		    sizeof(myea) / sizeof(myea[0]), myea)) {
1463 			aprint_error_dev(sc->sc_dev,
1464 			    "unable to read Ethernet address\n");
1465 			return;
1466 		}
1467 		enaddr[0] = myea[0] & 0xff;
1468 		enaddr[1] = myea[0] >> 8;
1469 		enaddr[2] = myea[1] & 0xff;
1470 		enaddr[3] = myea[1] >> 8;
1471 		enaddr[4] = myea[2] & 0xff;
1472 		enaddr[5] = myea[2] >> 8;
1473 	}
1474 
1475 	/*
1476 	 * Toggle the LSB of the MAC address on the second port
1477 	 * of the dual port controller.
1478 	 */
1479 	if (sc->sc_type == WM_T_82546 || sc->sc_type == WM_T_82546_3
1480 	    || sc->sc_type ==  WM_T_82571 || sc->sc_type == WM_T_80003) {
1481 		if ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1)
1482 			enaddr[5] ^= 1;
1483 	}
1484 
1485 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
1486 	    ether_sprintf(enaddr));
1487 
1488 	/*
1489 	 * Read the config info from the EEPROM, and set up various
1490 	 * bits in the control registers based on their contents.
1491 	 */
1492 	pn = prop_dictionary_get(dict, "i82543-cfg1");
1493 	if (pn != NULL) {
1494 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1495 		cfg1 = (uint16_t) prop_number_integer_value(pn);
1496 	} else {
1497 		if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) {
1498 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
1499 			return;
1500 		}
1501 	}
1502 
1503 	pn = prop_dictionary_get(dict, "i82543-cfg2");
1504 	if (pn != NULL) {
1505 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1506 		cfg2 = (uint16_t) prop_number_integer_value(pn);
1507 	} else {
1508 		if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) {
1509 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
1510 			return;
1511 		}
1512 	}
1513 
1514 	if (sc->sc_type >= WM_T_82544) {
1515 		pn = prop_dictionary_get(dict, "i82543-swdpin");
1516 		if (pn != NULL) {
1517 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1518 			swdpin = (uint16_t) prop_number_integer_value(pn);
1519 		} else {
1520 			if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) {
1521 				aprint_error_dev(sc->sc_dev,
1522 				    "unable to read SWDPIN\n");
1523 				return;
1524 			}
1525 		}
1526 	}
1527 
1528 	if (cfg1 & EEPROM_CFG1_ILOS)
1529 		sc->sc_ctrl |= CTRL_ILOS;
1530 	if (sc->sc_type >= WM_T_82544) {
1531 		sc->sc_ctrl |=
1532 		    ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
1533 		    CTRL_SWDPIO_SHIFT;
1534 		sc->sc_ctrl |=
1535 		    ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
1536 		    CTRL_SWDPINS_SHIFT;
1537 	} else {
1538 		sc->sc_ctrl |=
1539 		    ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
1540 		    CTRL_SWDPIO_SHIFT;
1541 	}
1542 
1543 #if 0
1544 	if (sc->sc_type >= WM_T_82544) {
1545 		if (cfg1 & EEPROM_CFG1_IPS0)
1546 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
1547 		if (cfg1 & EEPROM_CFG1_IPS1)
1548 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
1549 		sc->sc_ctrl_ext |=
1550 		    ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
1551 		    CTRL_EXT_SWDPIO_SHIFT;
1552 		sc->sc_ctrl_ext |=
1553 		    ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
1554 		    CTRL_EXT_SWDPINS_SHIFT;
1555 	} else {
1556 		sc->sc_ctrl_ext |=
1557 		    ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
1558 		    CTRL_EXT_SWDPIO_SHIFT;
1559 	}
1560 #endif
1561 
1562 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
1563 #if 0
1564 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
1565 #endif
1566 
1567 	/*
1568 	 * Set up some register offsets that are different between
1569 	 * the i82542 and the i82543 and later chips.
1570 	 */
1571 	if (sc->sc_type < WM_T_82543) {
1572 		sc->sc_rdt_reg = WMREG_OLD_RDT0;
1573 		sc->sc_tdt_reg = WMREG_OLD_TDT;
1574 	} else {
1575 		sc->sc_rdt_reg = WMREG_RDT;
1576 		sc->sc_tdt_reg = WMREG_TDT;
1577 	}
1578 
1579 	if (sc->sc_type == WM_T_PCH) {
1580 		uint16_t val;
1581 
1582 		/* Save the NVM K1 bit setting */
1583 		wm_read_eeprom(sc, EEPROM_OFF_K1_CONFIG, 1, &val);
1584 
1585 		if ((val & EEPROM_K1_CONFIG_ENABLE) != 0)
1586 			sc->sc_nvm_k1_enabled = 1;
1587 		else
1588 			sc->sc_nvm_k1_enabled = 0;
1589 	}
1590 
1591 	/*
1592 	 * Determine if we're TBI or GMII mode, and initialize the
1593 	 * media structures accordingly.
1594 	 */
1595 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
1596 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
1597 	    || sc->sc_type == WM_T_82573
1598 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
1599 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
1600 		wm_gmii_mediainit(sc, wmp->wmp_product);
1601 	} else if (sc->sc_type < WM_T_82543 ||
1602 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
1603 		if (wmp->wmp_flags & WMP_F_1000T)
1604 			aprint_error_dev(sc->sc_dev,
1605 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
1606 		wm_tbi_mediainit(sc);
1607 	} else {
1608 		if (wmp->wmp_flags & WMP_F_1000X)
1609 			aprint_error_dev(sc->sc_dev,
1610 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
1611 		wm_gmii_mediainit(sc, wmp->wmp_product);
1612 	}
1613 
1614 	ifp = &sc->sc_ethercom.ec_if;
1615 	xname = device_xname(sc->sc_dev);
1616 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
1617 	ifp->if_softc = sc;
1618 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1619 	ifp->if_ioctl = wm_ioctl;
1620 	ifp->if_start = wm_start;
1621 	ifp->if_watchdog = wm_watchdog;
1622 	ifp->if_init = wm_init;
1623 	ifp->if_stop = wm_stop;
1624 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
1625 	IFQ_SET_READY(&ifp->if_snd);
1626 
1627 	/* Check for jumbo frame */
1628 	switch (sc->sc_type) {
1629 	case WM_T_82573:
1630 		/* XXX limited to 9234 if ASPM is disabled */
1631 		wm_read_eeprom(sc, EEPROM_INIT_3GIO_3, 1, &io3);
1632 		if ((io3 & EEPROM_3GIO_3_ASPM_MASK) != 0)
1633 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1634 		break;
1635 	case WM_T_82571:
1636 	case WM_T_82572:
1637 	case WM_T_82574:
1638 	case WM_T_80003:
1639 	case WM_T_ICH9:
1640 	case WM_T_ICH10:
1641 		/* XXX limited to 9234 */
1642 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1643 		break;
1644 	case WM_T_PCH:
1645 		/* XXX limited to 4096 */
1646 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1647 		break;
1648 	case WM_T_82542_2_0:
1649 	case WM_T_82542_2_1:
1650 	case WM_T_82583:
1651 	case WM_T_ICH8:
1652 		/* No support for jumbo frame */
1653 		break;
1654 	default:
1655 		/* ETHER_MAX_LEN_JUMBO */
1656 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1657 		break;
1658 	}
1659 
1660 	/*
1661 	 * If we're a i82543 or greater, we can support VLANs.
1662 	 */
1663 	if (sc->sc_type >= WM_T_82543)
1664 		sc->sc_ethercom.ec_capabilities |=
1665 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
1666 
1667 	/*
1668 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
1669 	 * on i82543 and later.
1670 	 */
1671 	if (sc->sc_type >= WM_T_82543) {
1672 		ifp->if_capabilities |=
1673 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
1674 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
1675 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
1676 		    IFCAP_CSUM_TCPv6_Tx |
1677 		    IFCAP_CSUM_UDPv6_Tx;
1678 	}
1679 
1680 	/*
1681 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
1682 	 *
1683 	 *	82541GI (8086:1076) ... no
1684 	 *	82572EI (8086:10b9) ... yes
1685 	 */
1686 	if (sc->sc_type >= WM_T_82571) {
1687 		ifp->if_capabilities |=
1688 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
1689 	}
1690 
1691 	/*
1692 	 * If we're a i82544 or greater (except i82547), we can do
1693 	 * TCP segmentation offload.
1694 	 */
1695 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
1696 		ifp->if_capabilities |= IFCAP_TSOv4;
1697 	}
1698 
1699 	if (sc->sc_type >= WM_T_82571) {
1700 		ifp->if_capabilities |= IFCAP_TSOv6;
1701 	}
1702 
1703 	/*
1704 	 * Attach the interface.
1705 	 */
1706 	if_attach(ifp);
1707 	ether_ifattach(ifp, enaddr);
1708 #if NRND > 0
1709 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET, 0);
1710 #endif
1711 
1712 #ifdef WM_EVENT_COUNTERS
1713 	/* Attach event counters. */
1714 	evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
1715 	    NULL, xname, "txsstall");
1716 	evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
1717 	    NULL, xname, "txdstall");
1718 	evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
1719 	    NULL, xname, "txfifo_stall");
1720 	evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
1721 	    NULL, xname, "txdw");
1722 	evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
1723 	    NULL, xname, "txqe");
1724 	evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
1725 	    NULL, xname, "rxintr");
1726 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
1727 	    NULL, xname, "linkintr");
1728 
1729 	evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
1730 	    NULL, xname, "rxipsum");
1731 	evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
1732 	    NULL, xname, "rxtusum");
1733 	evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
1734 	    NULL, xname, "txipsum");
1735 	evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
1736 	    NULL, xname, "txtusum");
1737 	evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
1738 	    NULL, xname, "txtusum6");
1739 
1740 	evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
1741 	    NULL, xname, "txtso");
1742 	evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
1743 	    NULL, xname, "txtso6");
1744 	evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
1745 	    NULL, xname, "txtsopain");
1746 
1747 	for (i = 0; i < WM_NTXSEGS; i++) {
1748 		sprintf(wm_txseg_evcnt_names[i], "txseg%d", i);
1749 		evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
1750 		    NULL, xname, wm_txseg_evcnt_names[i]);
1751 	}
1752 
1753 	evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
1754 	    NULL, xname, "txdrop");
1755 
1756 	evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
1757 	    NULL, xname, "tu");
1758 
1759 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
1760 	    NULL, xname, "tx_xoff");
1761 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
1762 	    NULL, xname, "tx_xon");
1763 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
1764 	    NULL, xname, "rx_xoff");
1765 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
1766 	    NULL, xname, "rx_xon");
1767 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
1768 	    NULL, xname, "rx_macctl");
1769 #endif /* WM_EVENT_COUNTERS */
1770 
1771 	if (pmf_device_register(self, NULL, NULL))
1772 		pmf_class_network_register(self, ifp);
1773 	else
1774 		aprint_error_dev(self, "couldn't establish power handler\n");
1775 
1776 	return;
1777 
1778 	/*
1779 	 * Free any resources we've allocated during the failed attach
1780 	 * attempt.  Do this in reverse order and fall through.
1781 	 */
1782  fail_5:
1783 	for (i = 0; i < WM_NRXDESC; i++) {
1784 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
1785 			bus_dmamap_destroy(sc->sc_dmat,
1786 			    sc->sc_rxsoft[i].rxs_dmamap);
1787 	}
1788  fail_4:
1789 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1790 		if (sc->sc_txsoft[i].txs_dmamap != NULL)
1791 			bus_dmamap_destroy(sc->sc_dmat,
1792 			    sc->sc_txsoft[i].txs_dmamap);
1793 	}
1794 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
1795  fail_3:
1796 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
1797  fail_2:
1798 	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
1799 	    cdata_size);
1800  fail_1:
1801 	bus_dmamem_free(sc->sc_dmat, &seg, rseg);
1802  fail_0:
1803 	return;
1804 }
1805 
1806 /*
1807  * wm_tx_offload:
1808  *
1809  *	Set up TCP/IP checksumming parameters for the
1810  *	specified packet.
1811  */
1812 static int
1813 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
1814     uint8_t *fieldsp)
1815 {
1816 	struct mbuf *m0 = txs->txs_mbuf;
1817 	struct livengood_tcpip_ctxdesc *t;
1818 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
1819 	uint32_t ipcse;
1820 	struct ether_header *eh;
1821 	int offset, iphl;
1822 	uint8_t fields;
1823 
1824 	/*
1825 	 * XXX It would be nice if the mbuf pkthdr had offset
1826 	 * fields for the protocol headers.
1827 	 */
1828 
1829 	eh = mtod(m0, struct ether_header *);
1830 	switch (htons(eh->ether_type)) {
1831 	case ETHERTYPE_IP:
1832 	case ETHERTYPE_IPV6:
1833 		offset = ETHER_HDR_LEN;
1834 		break;
1835 
1836 	case ETHERTYPE_VLAN:
1837 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1838 		break;
1839 
1840 	default:
1841 		/*
1842 		 * Don't support this protocol or encapsulation.
1843 		 */
1844 		*fieldsp = 0;
1845 		*cmdp = 0;
1846 		return 0;
1847 	}
1848 
1849 	if ((m0->m_pkthdr.csum_flags &
1850 	    (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
1851 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
1852 	} else {
1853 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
1854 	}
1855 	ipcse = offset + iphl - 1;
1856 
1857 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
1858 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
1859 	seg = 0;
1860 	fields = 0;
1861 
1862 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
1863 		int hlen = offset + iphl;
1864 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
1865 
1866 		if (__predict_false(m0->m_len <
1867 				    (hlen + sizeof(struct tcphdr)))) {
1868 			/*
1869 			 * TCP/IP headers are not in the first mbuf; we need
1870 			 * to do this the slow and painful way.  Let's just
1871 			 * hope this doesn't happen very often.
1872 			 */
1873 			struct tcphdr th;
1874 
1875 			WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
1876 
1877 			m_copydata(m0, hlen, sizeof(th), &th);
1878 			if (v4) {
1879 				struct ip ip;
1880 
1881 				m_copydata(m0, offset, sizeof(ip), &ip);
1882 				ip.ip_len = 0;
1883 				m_copyback(m0,
1884 				    offset + offsetof(struct ip, ip_len),
1885 				    sizeof(ip.ip_len), &ip.ip_len);
1886 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
1887 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
1888 			} else {
1889 				struct ip6_hdr ip6;
1890 
1891 				m_copydata(m0, offset, sizeof(ip6), &ip6);
1892 				ip6.ip6_plen = 0;
1893 				m_copyback(m0,
1894 				    offset + offsetof(struct ip6_hdr, ip6_plen),
1895 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
1896 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
1897 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
1898 			}
1899 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
1900 			    sizeof(th.th_sum), &th.th_sum);
1901 
1902 			hlen += th.th_off << 2;
1903 		} else {
1904 			/*
1905 			 * TCP/IP headers are in the first mbuf; we can do
1906 			 * this the easy way.
1907 			 */
1908 			struct tcphdr *th;
1909 
1910 			if (v4) {
1911 				struct ip *ip =
1912 				    (void *)(mtod(m0, char *) + offset);
1913 				th = (void *)(mtod(m0, char *) + hlen);
1914 
1915 				ip->ip_len = 0;
1916 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
1917 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
1918 			} else {
1919 				struct ip6_hdr *ip6 =
1920 				    (void *)(mtod(m0, char *) + offset);
1921 				th = (void *)(mtod(m0, char *) + hlen);
1922 
1923 				ip6->ip6_plen = 0;
1924 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
1925 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
1926 			}
1927 			hlen += th->th_off << 2;
1928 		}
1929 
1930 		if (v4) {
1931 			WM_EVCNT_INCR(&sc->sc_ev_txtso);
1932 			cmdlen |= WTX_TCPIP_CMD_IP;
1933 		} else {
1934 			WM_EVCNT_INCR(&sc->sc_ev_txtso6);
1935 			ipcse = 0;
1936 		}
1937 		cmd |= WTX_TCPIP_CMD_TSE;
1938 		cmdlen |= WTX_TCPIP_CMD_TSE |
1939 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
1940 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
1941 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
1942 	}
1943 
1944 	/*
1945 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
1946 	 * offload feature, if we load the context descriptor, we
1947 	 * MUST provide valid values for IPCSS and TUCSS fields.
1948 	 */
1949 
1950 	ipcs = WTX_TCPIP_IPCSS(offset) |
1951 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1952 	    WTX_TCPIP_IPCSE(ipcse);
1953 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
1954 		WM_EVCNT_INCR(&sc->sc_ev_txipsum);
1955 		fields |= WTX_IXSM;
1956 	}
1957 
1958 	offset += iphl;
1959 
1960 	if (m0->m_pkthdr.csum_flags &
1961 	    (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
1962 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
1963 		fields |= WTX_TXSM;
1964 		tucs = WTX_TCPIP_TUCSS(offset) |
1965 		    WTX_TCPIP_TUCSO(offset +
1966 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
1967 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
1968 	} else if ((m0->m_pkthdr.csum_flags &
1969 	    (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
1970 		WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
1971 		fields |= WTX_TXSM;
1972 		tucs = WTX_TCPIP_TUCSS(offset) |
1973 		    WTX_TCPIP_TUCSO(offset +
1974 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
1975 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
1976 	} else {
1977 		/* Just initialize it to a valid TCP context. */
1978 		tucs = WTX_TCPIP_TUCSS(offset) |
1979 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
1980 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
1981 	}
1982 
1983 	/* Fill in the context descriptor. */
1984 	t = (struct livengood_tcpip_ctxdesc *)
1985 	    &sc->sc_txdescs[sc->sc_txnext];
1986 	t->tcpip_ipcs = htole32(ipcs);
1987 	t->tcpip_tucs = htole32(tucs);
1988 	t->tcpip_cmdlen = htole32(cmdlen);
1989 	t->tcpip_seg = htole32(seg);
1990 	WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
1991 
1992 	sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
1993 	txs->txs_ndesc++;
1994 
1995 	*cmdp = cmd;
1996 	*fieldsp = fields;
1997 
1998 	return 0;
1999 }
2000 
2001 static void
2002 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
2003 {
2004 	struct mbuf *m;
2005 	int i;
2006 
2007 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
2008 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
2009 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
2010 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
2011 		    m->m_data, m->m_len, m->m_flags);
2012 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
2013 	    i, i == 1 ? "" : "s");
2014 }
2015 
2016 /*
2017  * wm_82547_txfifo_stall:
2018  *
2019  *	Callout used to wait for the 82547 Tx FIFO to drain,
2020  *	reset the FIFO pointers, and restart packet transmission.
2021  */
2022 static void
2023 wm_82547_txfifo_stall(void *arg)
2024 {
2025 	struct wm_softc *sc = arg;
2026 	int s;
2027 
2028 	s = splnet();
2029 
2030 	if (sc->sc_txfifo_stall) {
2031 		if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
2032 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
2033 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
2034 			/*
2035 			 * Packets have drained.  Stop transmitter, reset
2036 			 * FIFO pointers, restart transmitter, and kick
2037 			 * the packet queue.
2038 			 */
2039 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
2040 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
2041 			CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
2042 			CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
2043 			CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
2044 			CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
2045 			CSR_WRITE(sc, WMREG_TCTL, tctl);
2046 			CSR_WRITE_FLUSH(sc);
2047 
2048 			sc->sc_txfifo_head = 0;
2049 			sc->sc_txfifo_stall = 0;
2050 			wm_start(&sc->sc_ethercom.ec_if);
2051 		} else {
2052 			/*
2053 			 * Still waiting for packets to drain; try again in
2054 			 * another tick.
2055 			 */
2056 			callout_schedule(&sc->sc_txfifo_ch, 1);
2057 		}
2058 	}
2059 
2060 	splx(s);
2061 }
2062 
2063 /*
2064  * wm_82547_txfifo_bugchk:
2065  *
2066  *	Check for bug condition in the 82547 Tx FIFO.  We need to
2067  *	prevent enqueueing a packet that would wrap around the end
2068  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
2069  *
2070  *	We do this by checking the amount of space before the end
2071  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
2072  *	the Tx FIFO, wait for all remaining packets to drain, reset
2073  *	the internal FIFO pointers to the beginning, and restart
2074  *	transmission on the interface.
2075  */
2076 #define	WM_FIFO_HDR		0x10
2077 #define	WM_82547_PAD_LEN	0x3e0
2078 static int
2079 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
2080 {
2081 	int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
2082 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
2083 
2084 	/* Just return if already stalled. */
2085 	if (sc->sc_txfifo_stall)
2086 		return 1;
2087 
2088 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
2089 		/* Stall only occurs in half-duplex mode. */
2090 		goto send_packet;
2091 	}
2092 
2093 	if (len >= WM_82547_PAD_LEN + space) {
2094 		sc->sc_txfifo_stall = 1;
2095 		callout_schedule(&sc->sc_txfifo_ch, 1);
2096 		return 1;
2097 	}
2098 
2099  send_packet:
2100 	sc->sc_txfifo_head += len;
2101 	if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
2102 		sc->sc_txfifo_head -= sc->sc_txfifo_size;
2103 
2104 	return 0;
2105 }
2106 
2107 /*
2108  * wm_start:		[ifnet interface function]
2109  *
2110  *	Start packet transmission on the interface.
2111  */
2112 static void
2113 wm_start(struct ifnet *ifp)
2114 {
2115 	struct wm_softc *sc = ifp->if_softc;
2116 	struct mbuf *m0;
2117 	struct m_tag *mtag;
2118 	struct wm_txsoft *txs;
2119 	bus_dmamap_t dmamap;
2120 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
2121 	bus_addr_t curaddr;
2122 	bus_size_t seglen, curlen;
2123 	uint32_t cksumcmd;
2124 	uint8_t cksumfields;
2125 
2126 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
2127 		return;
2128 
2129 	/*
2130 	 * Remember the previous number of free descriptors.
2131 	 */
2132 	ofree = sc->sc_txfree;
2133 
2134 	/*
2135 	 * Loop through the send queue, setting up transmit descriptors
2136 	 * until we drain the queue, or use up all available transmit
2137 	 * descriptors.
2138 	 */
2139 	for (;;) {
2140 		/* Grab a packet off the queue. */
2141 		IFQ_POLL(&ifp->if_snd, m0);
2142 		if (m0 == NULL)
2143 			break;
2144 
2145 		DPRINTF(WM_DEBUG_TX,
2146 		    ("%s: TX: have packet to transmit: %p\n",
2147 		    device_xname(sc->sc_dev), m0));
2148 
2149 		/* Get a work queue entry. */
2150 		if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
2151 			wm_txintr(sc);
2152 			if (sc->sc_txsfree == 0) {
2153 				DPRINTF(WM_DEBUG_TX,
2154 				    ("%s: TX: no free job descriptors\n",
2155 					device_xname(sc->sc_dev)));
2156 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
2157 				break;
2158 			}
2159 		}
2160 
2161 		txs = &sc->sc_txsoft[sc->sc_txsnext];
2162 		dmamap = txs->txs_dmamap;
2163 
2164 		use_tso = (m0->m_pkthdr.csum_flags &
2165 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
2166 
2167 		/*
2168 		 * So says the Linux driver:
2169 		 * The controller does a simple calculation to make sure
2170 		 * there is enough room in the FIFO before initiating the
2171 		 * DMA for each buffer.  The calc is:
2172 		 *	4 = ceil(buffer len / MSS)
2173 		 * To make sure we don't overrun the FIFO, adjust the max
2174 		 * buffer len if the MSS drops.
2175 		 */
2176 		dmamap->dm_maxsegsz =
2177 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
2178 		    ? m0->m_pkthdr.segsz << 2
2179 		    : WTX_MAX_LEN;
2180 
2181 		/*
2182 		 * Load the DMA map.  If this fails, the packet either
2183 		 * didn't fit in the allotted number of segments, or we
2184 		 * were short on resources.  For the too-many-segments
2185 		 * case, we simply report an error and drop the packet,
2186 		 * since we can't sanely copy a jumbo packet to a single
2187 		 * buffer.
2188 		 */
2189 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
2190 		    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
2191 		if (error) {
2192 			if (error == EFBIG) {
2193 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
2194 				log(LOG_ERR, "%s: Tx packet consumes too many "
2195 				    "DMA segments, dropping...\n",
2196 				    device_xname(sc->sc_dev));
2197 				IFQ_DEQUEUE(&ifp->if_snd, m0);
2198 				wm_dump_mbuf_chain(sc, m0);
2199 				m_freem(m0);
2200 				continue;
2201 			}
2202 			/*
2203 			 * Short on resources, just stop for now.
2204 			 */
2205 			DPRINTF(WM_DEBUG_TX,
2206 			    ("%s: TX: dmamap load failed: %d\n",
2207 			    device_xname(sc->sc_dev), error));
2208 			break;
2209 		}
2210 
2211 		segs_needed = dmamap->dm_nsegs;
2212 		if (use_tso) {
2213 			/* For sentinel descriptor; see below. */
2214 			segs_needed++;
2215 		}
2216 
2217 		/*
2218 		 * Ensure we have enough descriptors free to describe
2219 		 * the packet.  Note, we always reserve one descriptor
2220 		 * at the end of the ring due to the semantics of the
2221 		 * TDT register, plus one more in the event we need
2222 		 * to load offload context.
2223 		 */
2224 		if (segs_needed > sc->sc_txfree - 2) {
2225 			/*
2226 			 * Not enough free descriptors to transmit this
2227 			 * packet.  We haven't committed anything yet,
2228 			 * so just unload the DMA map, put the packet
2229 			 * pack on the queue, and punt.  Notify the upper
2230 			 * layer that there are no more slots left.
2231 			 */
2232 			DPRINTF(WM_DEBUG_TX,
2233 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
2234 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
2235 			    segs_needed, sc->sc_txfree - 1));
2236 			ifp->if_flags |= IFF_OACTIVE;
2237 			bus_dmamap_unload(sc->sc_dmat, dmamap);
2238 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
2239 			break;
2240 		}
2241 
2242 		/*
2243 		 * Check for 82547 Tx FIFO bug.  We need to do this
2244 		 * once we know we can transmit the packet, since we
2245 		 * do some internal FIFO space accounting here.
2246 		 */
2247 		if (sc->sc_type == WM_T_82547 &&
2248 		    wm_82547_txfifo_bugchk(sc, m0)) {
2249 			DPRINTF(WM_DEBUG_TX,
2250 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
2251 			    device_xname(sc->sc_dev)));
2252 			ifp->if_flags |= IFF_OACTIVE;
2253 			bus_dmamap_unload(sc->sc_dmat, dmamap);
2254 			WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
2255 			break;
2256 		}
2257 
2258 		IFQ_DEQUEUE(&ifp->if_snd, m0);
2259 
2260 		/*
2261 		 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
2262 		 */
2263 
2264 		DPRINTF(WM_DEBUG_TX,
2265 		    ("%s: TX: packet has %d (%d) DMA segments\n",
2266 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
2267 
2268 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
2269 
2270 		/*
2271 		 * Store a pointer to the packet so that we can free it
2272 		 * later.
2273 		 *
2274 		 * Initially, we consider the number of descriptors the
2275 		 * packet uses the number of DMA segments.  This may be
2276 		 * incremented by 1 if we do checksum offload (a descriptor
2277 		 * is used to set the checksum context).
2278 		 */
2279 		txs->txs_mbuf = m0;
2280 		txs->txs_firstdesc = sc->sc_txnext;
2281 		txs->txs_ndesc = segs_needed;
2282 
2283 		/* Set up offload parameters for this packet. */
2284 		if (m0->m_pkthdr.csum_flags &
2285 		    (M_CSUM_TSOv4|M_CSUM_TSOv6|
2286 		    M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
2287 		    M_CSUM_TCPv6|M_CSUM_UDPv6)) {
2288 			if (wm_tx_offload(sc, txs, &cksumcmd,
2289 					  &cksumfields) != 0) {
2290 				/* Error message already displayed. */
2291 				bus_dmamap_unload(sc->sc_dmat, dmamap);
2292 				continue;
2293 			}
2294 		} else {
2295 			cksumcmd = 0;
2296 			cksumfields = 0;
2297 		}
2298 
2299 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
2300 
2301 		/* Sync the DMA map. */
2302 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
2303 		    BUS_DMASYNC_PREWRITE);
2304 
2305 		/*
2306 		 * Initialize the transmit descriptor.
2307 		 */
2308 		for (nexttx = sc->sc_txnext, seg = 0;
2309 		     seg < dmamap->dm_nsegs; seg++) {
2310 			for (seglen = dmamap->dm_segs[seg].ds_len,
2311 			     curaddr = dmamap->dm_segs[seg].ds_addr;
2312 			     seglen != 0;
2313 			     curaddr += curlen, seglen -= curlen,
2314 			     nexttx = WM_NEXTTX(sc, nexttx)) {
2315 				curlen = seglen;
2316 
2317 				/*
2318 				 * So says the Linux driver:
2319 				 * Work around for premature descriptor
2320 				 * write-backs in TSO mode.  Append a
2321 				 * 4-byte sentinel descriptor.
2322 				 */
2323 				if (use_tso &&
2324 				    seg == dmamap->dm_nsegs - 1 &&
2325 				    curlen > 8)
2326 					curlen -= 4;
2327 
2328 				wm_set_dma_addr(
2329 				    &sc->sc_txdescs[nexttx].wtx_addr,
2330 				    curaddr);
2331 				sc->sc_txdescs[nexttx].wtx_cmdlen =
2332 				    htole32(cksumcmd | curlen);
2333 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
2334 				    0;
2335 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
2336 				    cksumfields;
2337 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
2338 				lasttx = nexttx;
2339 
2340 				DPRINTF(WM_DEBUG_TX,
2341 				    ("%s: TX: desc %d: low 0x%08lx, "
2342 				     "len 0x%04x\n",
2343 				    device_xname(sc->sc_dev), nexttx,
2344 				    curaddr & 0xffffffffUL, (unsigned)curlen));
2345 			}
2346 		}
2347 
2348 		KASSERT(lasttx != -1);
2349 
2350 		/*
2351 		 * Set up the command byte on the last descriptor of
2352 		 * the packet.  If we're in the interrupt delay window,
2353 		 * delay the interrupt.
2354 		 */
2355 		sc->sc_txdescs[lasttx].wtx_cmdlen |=
2356 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
2357 
2358 		/*
2359 		 * If VLANs are enabled and the packet has a VLAN tag, set
2360 		 * up the descriptor to encapsulate the packet for us.
2361 		 *
2362 		 * This is only valid on the last descriptor of the packet.
2363 		 */
2364 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
2365 			sc->sc_txdescs[lasttx].wtx_cmdlen |=
2366 			    htole32(WTX_CMD_VLE);
2367 			sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
2368 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
2369 		}
2370 
2371 		txs->txs_lastdesc = lasttx;
2372 
2373 		DPRINTF(WM_DEBUG_TX,
2374 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
2375 		    device_xname(sc->sc_dev),
2376 		    lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
2377 
2378 		/* Sync the descriptors we're using. */
2379 		WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
2380 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2381 
2382 		/* Give the packet to the chip. */
2383 		CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
2384 
2385 		DPRINTF(WM_DEBUG_TX,
2386 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
2387 
2388 		DPRINTF(WM_DEBUG_TX,
2389 		    ("%s: TX: finished transmitting packet, job %d\n",
2390 		    device_xname(sc->sc_dev), sc->sc_txsnext));
2391 
2392 		/* Advance the tx pointer. */
2393 		sc->sc_txfree -= txs->txs_ndesc;
2394 		sc->sc_txnext = nexttx;
2395 
2396 		sc->sc_txsfree--;
2397 		sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
2398 
2399 		/* Pass the packet to any BPF listeners. */
2400 		if (ifp->if_bpf)
2401 			bpf_ops->bpf_mtap(ifp->if_bpf, m0);
2402 	}
2403 
2404 	if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
2405 		/* No more slots; notify upper layer. */
2406 		ifp->if_flags |= IFF_OACTIVE;
2407 	}
2408 
2409 	if (sc->sc_txfree != ofree) {
2410 		/* Set a watchdog timer in case the chip flakes out. */
2411 		ifp->if_timer = 5;
2412 	}
2413 }
2414 
2415 /*
2416  * wm_watchdog:		[ifnet interface function]
2417  *
2418  *	Watchdog timer handler.
2419  */
2420 static void
2421 wm_watchdog(struct ifnet *ifp)
2422 {
2423 	struct wm_softc *sc = ifp->if_softc;
2424 
2425 	/*
2426 	 * Since we're using delayed interrupts, sweep up
2427 	 * before we report an error.
2428 	 */
2429 	wm_txintr(sc);
2430 
2431 	if (sc->sc_txfree != WM_NTXDESC(sc)) {
2432 		log(LOG_ERR,
2433 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2434 		    device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
2435 		    sc->sc_txnext);
2436 		ifp->if_oerrors++;
2437 
2438 		/* Reset the interface. */
2439 		(void) wm_init(ifp);
2440 	}
2441 
2442 	/* Try to get more packets going. */
2443 	wm_start(ifp);
2444 }
2445 
2446 /*
2447  * wm_ioctl:		[ifnet interface function]
2448  *
2449  *	Handle control requests from the operator.
2450  */
2451 static int
2452 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2453 {
2454 	struct wm_softc *sc = ifp->if_softc;
2455 	struct ifreq *ifr = (struct ifreq *) data;
2456 	struct ifaddr *ifa = (struct ifaddr *)data;
2457 	struct sockaddr_dl *sdl;
2458 	int diff, s, error;
2459 
2460 	s = splnet();
2461 
2462 	switch (cmd) {
2463 	case SIOCSIFFLAGS:
2464 		if ((error = ifioctl_common(ifp, cmd, data)) != 0)
2465 			break;
2466 		if (ifp->if_flags & IFF_UP) {
2467 			diff = (ifp->if_flags ^ sc->sc_if_flags)
2468 			    & (IFF_PROMISC | IFF_ALLMULTI);
2469 			if ((diff & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
2470 				/*
2471 				 * If the difference bettween last flag and
2472 				 * new flag is only IFF_PROMISC or
2473 				 * IFF_ALLMULTI, set multicast filter only
2474 				 * (don't reset to prevent link down).
2475 				 */
2476 				wm_set_filter(sc);
2477 			} else {
2478 				/*
2479 				 * Reset the interface to pick up changes in
2480 				 * any other flags that affect the hardware
2481 				 * state.
2482 				 */
2483 				wm_init(ifp);
2484 			}
2485 		} else {
2486 			if (ifp->if_flags & IFF_RUNNING)
2487 				wm_stop(ifp, 1);
2488 		}
2489 		sc->sc_if_flags = ifp->if_flags;
2490 		error = 0;
2491 		break;
2492 	case SIOCSIFMEDIA:
2493 	case SIOCGIFMEDIA:
2494 		/* Flow control requires full-duplex mode. */
2495 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
2496 		    (ifr->ifr_media & IFM_FDX) == 0)
2497 			ifr->ifr_media &= ~IFM_ETH_FMASK;
2498 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
2499 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
2500 				/* We can do both TXPAUSE and RXPAUSE. */
2501 				ifr->ifr_media |=
2502 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
2503 			}
2504 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
2505 		}
2506 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
2507 		break;
2508 	case SIOCINITIFADDR:
2509 		if (ifa->ifa_addr->sa_family == AF_LINK) {
2510 			sdl = satosdl(ifp->if_dl->ifa_addr);
2511 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
2512 					LLADDR(satosdl(ifa->ifa_addr)),
2513 					ifp->if_addrlen);
2514 			/* unicast address is first multicast entry */
2515 			wm_set_filter(sc);
2516 			error = 0;
2517 			break;
2518 		}
2519 		/* Fall through for rest */
2520 	default:
2521 		if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
2522 			break;
2523 
2524 		error = 0;
2525 
2526 		if (cmd == SIOCSIFCAP)
2527 			error = (*ifp->if_init)(ifp);
2528 		else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
2529 			;
2530 		else if (ifp->if_flags & IFF_RUNNING) {
2531 			/*
2532 			 * Multicast list has changed; set the hardware filter
2533 			 * accordingly.
2534 			 */
2535 			wm_set_filter(sc);
2536 		}
2537 		break;
2538 	}
2539 
2540 	/* Try to get more packets going. */
2541 	wm_start(ifp);
2542 
2543 	splx(s);
2544 	return error;
2545 }
2546 
2547 /*
2548  * wm_intr:
2549  *
2550  *	Interrupt service routine.
2551  */
2552 static int
2553 wm_intr(void *arg)
2554 {
2555 	struct wm_softc *sc = arg;
2556 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2557 	uint32_t icr;
2558 	int handled = 0;
2559 
2560 	while (1 /* CONSTCOND */) {
2561 		icr = CSR_READ(sc, WMREG_ICR);
2562 		if ((icr & sc->sc_icr) == 0)
2563 			break;
2564 #if 0 /*NRND > 0*/
2565 		if (RND_ENABLED(&sc->rnd_source))
2566 			rnd_add_uint32(&sc->rnd_source, icr);
2567 #endif
2568 
2569 		handled = 1;
2570 
2571 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
2572 		if (icr & (ICR_RXDMT0|ICR_RXT0)) {
2573 			DPRINTF(WM_DEBUG_RX,
2574 			    ("%s: RX: got Rx intr 0x%08x\n",
2575 			    device_xname(sc->sc_dev),
2576 			    icr & (ICR_RXDMT0|ICR_RXT0)));
2577 			WM_EVCNT_INCR(&sc->sc_ev_rxintr);
2578 		}
2579 #endif
2580 		wm_rxintr(sc);
2581 
2582 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
2583 		if (icr & ICR_TXDW) {
2584 			DPRINTF(WM_DEBUG_TX,
2585 			    ("%s: TX: got TXDW interrupt\n",
2586 			    device_xname(sc->sc_dev)));
2587 			WM_EVCNT_INCR(&sc->sc_ev_txdw);
2588 		}
2589 #endif
2590 		wm_txintr(sc);
2591 
2592 		if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
2593 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
2594 			wm_linkintr(sc, icr);
2595 		}
2596 
2597 		if (icr & ICR_RXO) {
2598 #if defined(WM_DEBUG)
2599 			log(LOG_WARNING, "%s: Receive overrun\n",
2600 			    device_xname(sc->sc_dev));
2601 #endif /* defined(WM_DEBUG) */
2602 		}
2603 	}
2604 
2605 	if (handled) {
2606 		/* Try to get more packets going. */
2607 		wm_start(ifp);
2608 	}
2609 
2610 	return handled;
2611 }
2612 
2613 /*
2614  * wm_txintr:
2615  *
2616  *	Helper; handle transmit interrupts.
2617  */
2618 static void
2619 wm_txintr(struct wm_softc *sc)
2620 {
2621 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2622 	struct wm_txsoft *txs;
2623 	uint8_t status;
2624 	int i;
2625 
2626 	ifp->if_flags &= ~IFF_OACTIVE;
2627 
2628 	/*
2629 	 * Go through the Tx list and free mbufs for those
2630 	 * frames which have been transmitted.
2631 	 */
2632 	for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
2633 	     i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
2634 		txs = &sc->sc_txsoft[i];
2635 
2636 		DPRINTF(WM_DEBUG_TX,
2637 		    ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
2638 
2639 		WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
2640 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2641 
2642 		status =
2643 		    sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
2644 		if ((status & WTX_ST_DD) == 0) {
2645 			WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
2646 			    BUS_DMASYNC_PREREAD);
2647 			break;
2648 		}
2649 
2650 		DPRINTF(WM_DEBUG_TX,
2651 		    ("%s: TX: job %d done: descs %d..%d\n",
2652 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
2653 		    txs->txs_lastdesc));
2654 
2655 		/*
2656 		 * XXX We should probably be using the statistics
2657 		 * XXX registers, but I don't know if they exist
2658 		 * XXX on chips before the i82544.
2659 		 */
2660 
2661 #ifdef WM_EVENT_COUNTERS
2662 		if (status & WTX_ST_TU)
2663 			WM_EVCNT_INCR(&sc->sc_ev_tu);
2664 #endif /* WM_EVENT_COUNTERS */
2665 
2666 		if (status & (WTX_ST_EC|WTX_ST_LC)) {
2667 			ifp->if_oerrors++;
2668 			if (status & WTX_ST_LC)
2669 				log(LOG_WARNING, "%s: late collision\n",
2670 				    device_xname(sc->sc_dev));
2671 			else if (status & WTX_ST_EC) {
2672 				ifp->if_collisions += 16;
2673 				log(LOG_WARNING, "%s: excessive collisions\n",
2674 				    device_xname(sc->sc_dev));
2675 			}
2676 		} else
2677 			ifp->if_opackets++;
2678 
2679 		sc->sc_txfree += txs->txs_ndesc;
2680 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
2681 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2682 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2683 		m_freem(txs->txs_mbuf);
2684 		txs->txs_mbuf = NULL;
2685 	}
2686 
2687 	/* Update the dirty transmit buffer pointer. */
2688 	sc->sc_txsdirty = i;
2689 	DPRINTF(WM_DEBUG_TX,
2690 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
2691 
2692 	/*
2693 	 * If there are no more pending transmissions, cancel the watchdog
2694 	 * timer.
2695 	 */
2696 	if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
2697 		ifp->if_timer = 0;
2698 }
2699 
2700 /*
2701  * wm_rxintr:
2702  *
2703  *	Helper; handle receive interrupts.
2704  */
2705 static void
2706 wm_rxintr(struct wm_softc *sc)
2707 {
2708 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2709 	struct wm_rxsoft *rxs;
2710 	struct mbuf *m;
2711 	int i, len;
2712 	uint8_t status, errors;
2713 	uint16_t vlantag;
2714 
2715 	for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
2716 		rxs = &sc->sc_rxsoft[i];
2717 
2718 		DPRINTF(WM_DEBUG_RX,
2719 		    ("%s: RX: checking descriptor %d\n",
2720 		    device_xname(sc->sc_dev), i));
2721 
2722 		WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2723 
2724 		status = sc->sc_rxdescs[i].wrx_status;
2725 		errors = sc->sc_rxdescs[i].wrx_errors;
2726 		len = le16toh(sc->sc_rxdescs[i].wrx_len);
2727 		vlantag = sc->sc_rxdescs[i].wrx_special;
2728 
2729 		if ((status & WRX_ST_DD) == 0) {
2730 			/*
2731 			 * We have processed all of the receive descriptors.
2732 			 */
2733 			WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
2734 			break;
2735 		}
2736 
2737 		if (__predict_false(sc->sc_rxdiscard)) {
2738 			DPRINTF(WM_DEBUG_RX,
2739 			    ("%s: RX: discarding contents of descriptor %d\n",
2740 			    device_xname(sc->sc_dev), i));
2741 			WM_INIT_RXDESC(sc, i);
2742 			if (status & WRX_ST_EOP) {
2743 				/* Reset our state. */
2744 				DPRINTF(WM_DEBUG_RX,
2745 				    ("%s: RX: resetting rxdiscard -> 0\n",
2746 				    device_xname(sc->sc_dev)));
2747 				sc->sc_rxdiscard = 0;
2748 			}
2749 			continue;
2750 		}
2751 
2752 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2753 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
2754 
2755 		m = rxs->rxs_mbuf;
2756 
2757 		/*
2758 		 * Add a new receive buffer to the ring, unless of
2759 		 * course the length is zero. Treat the latter as a
2760 		 * failed mapping.
2761 		 */
2762 		if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
2763 			/*
2764 			 * Failed, throw away what we've done so
2765 			 * far, and discard the rest of the packet.
2766 			 */
2767 			ifp->if_ierrors++;
2768 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2769 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2770 			WM_INIT_RXDESC(sc, i);
2771 			if ((status & WRX_ST_EOP) == 0)
2772 				sc->sc_rxdiscard = 1;
2773 			if (sc->sc_rxhead != NULL)
2774 				m_freem(sc->sc_rxhead);
2775 			WM_RXCHAIN_RESET(sc);
2776 			DPRINTF(WM_DEBUG_RX,
2777 			    ("%s: RX: Rx buffer allocation failed, "
2778 			    "dropping packet%s\n", device_xname(sc->sc_dev),
2779 			    sc->sc_rxdiscard ? " (discard)" : ""));
2780 			continue;
2781 		}
2782 
2783 		m->m_len = len;
2784 		sc->sc_rxlen += len;
2785 		DPRINTF(WM_DEBUG_RX,
2786 		    ("%s: RX: buffer at %p len %d\n",
2787 		    device_xname(sc->sc_dev), m->m_data, len));
2788 
2789 		/*
2790 		 * If this is not the end of the packet, keep
2791 		 * looking.
2792 		 */
2793 		if ((status & WRX_ST_EOP) == 0) {
2794 			WM_RXCHAIN_LINK(sc, m);
2795 			DPRINTF(WM_DEBUG_RX,
2796 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
2797 			    device_xname(sc->sc_dev), sc->sc_rxlen));
2798 			continue;
2799 		}
2800 
2801 		/*
2802 		 * Okay, we have the entire packet now.  The chip is
2803 		 * configured to include the FCS (not all chips can
2804 		 * be configured to strip it), so we need to trim it.
2805 		 * May need to adjust length of previous mbuf in the
2806 		 * chain if the current mbuf is too short.
2807 		 */
2808 		if (m->m_len < ETHER_CRC_LEN) {
2809 			sc->sc_rxtail->m_len -= (ETHER_CRC_LEN - m->m_len);
2810 			m->m_len = 0;
2811 		} else {
2812 			m->m_len -= ETHER_CRC_LEN;
2813 		}
2814 		len = sc->sc_rxlen - ETHER_CRC_LEN;
2815 
2816 		WM_RXCHAIN_LINK(sc, m);
2817 
2818 		*sc->sc_rxtailp = NULL;
2819 		m = sc->sc_rxhead;
2820 
2821 		WM_RXCHAIN_RESET(sc);
2822 
2823 		DPRINTF(WM_DEBUG_RX,
2824 		    ("%s: RX: have entire packet, len -> %d\n",
2825 		    device_xname(sc->sc_dev), len));
2826 
2827 		/*
2828 		 * If an error occurred, update stats and drop the packet.
2829 		 */
2830 		if (errors &
2831 		     (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
2832 			if (errors & WRX_ER_SE)
2833 				log(LOG_WARNING, "%s: symbol error\n",
2834 				    device_xname(sc->sc_dev));
2835 			else if (errors & WRX_ER_SEQ)
2836 				log(LOG_WARNING, "%s: receive sequence error\n",
2837 				    device_xname(sc->sc_dev));
2838 			else if (errors & WRX_ER_CE)
2839 				log(LOG_WARNING, "%s: CRC error\n",
2840 				    device_xname(sc->sc_dev));
2841 			m_freem(m);
2842 			continue;
2843 		}
2844 
2845 		/*
2846 		 * No errors.  Receive the packet.
2847 		 */
2848 		m->m_pkthdr.rcvif = ifp;
2849 		m->m_pkthdr.len = len;
2850 
2851 		/*
2852 		 * If VLANs are enabled, VLAN packets have been unwrapped
2853 		 * for us.  Associate the tag with the packet.
2854 		 */
2855 		if ((status & WRX_ST_VP) != 0) {
2856 			VLAN_INPUT_TAG(ifp, m,
2857 			    le16toh(vlantag),
2858 			    continue);
2859 		}
2860 
2861 		/*
2862 		 * Set up checksum info for this packet.
2863 		 */
2864 		if ((status & WRX_ST_IXSM) == 0) {
2865 			if (status & WRX_ST_IPCS) {
2866 				WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
2867 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
2868 				if (errors & WRX_ER_IPE)
2869 					m->m_pkthdr.csum_flags |=
2870 					    M_CSUM_IPv4_BAD;
2871 			}
2872 			if (status & WRX_ST_TCPCS) {
2873 				/*
2874 				 * Note: we don't know if this was TCP or UDP,
2875 				 * so we just set both bits, and expect the
2876 				 * upper layers to deal.
2877 				 */
2878 				WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
2879 				m->m_pkthdr.csum_flags |=
2880 				    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
2881 				    M_CSUM_TCPv6 | M_CSUM_UDPv6;
2882 				if (errors & WRX_ER_TCPE)
2883 					m->m_pkthdr.csum_flags |=
2884 					    M_CSUM_TCP_UDP_BAD;
2885 			}
2886 		}
2887 
2888 		ifp->if_ipackets++;
2889 
2890 		/* Pass this up to any BPF listeners. */
2891 		if (ifp->if_bpf)
2892 			bpf_ops->bpf_mtap(ifp->if_bpf, m);
2893 
2894 		/* Pass it on. */
2895 		(*ifp->if_input)(ifp, m);
2896 	}
2897 
2898 	/* Update the receive pointer. */
2899 	sc->sc_rxptr = i;
2900 
2901 	DPRINTF(WM_DEBUG_RX,
2902 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
2903 }
2904 
2905 /*
2906  * wm_linkintr_gmii:
2907  *
2908  *	Helper; handle link interrupts for GMII.
2909  */
2910 static void
2911 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
2912 {
2913 
2914 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
2915 		__func__));
2916 
2917 	if (icr & ICR_LSC) {
2918 		DPRINTF(WM_DEBUG_LINK,
2919 		    ("%s: LINK: LSC -> mii_tick\n",
2920 			device_xname(sc->sc_dev)));
2921 		mii_tick(&sc->sc_mii);
2922 		if (sc->sc_type == WM_T_82543) {
2923 			int miistatus, active;
2924 
2925 			/*
2926 			 * With 82543, we need to force speed and
2927 			 * duplex on the MAC equal to what the PHY
2928 			 * speed and duplex configuration is.
2929 			 */
2930 			miistatus = sc->sc_mii.mii_media_status;
2931 
2932 			if (miistatus & IFM_ACTIVE) {
2933 				active = sc->sc_mii.mii_media_active;
2934 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
2935 				switch (IFM_SUBTYPE(active)) {
2936 				case IFM_10_T:
2937 					sc->sc_ctrl |= CTRL_SPEED_10;
2938 					break;
2939 				case IFM_100_TX:
2940 					sc->sc_ctrl |= CTRL_SPEED_100;
2941 					break;
2942 				case IFM_1000_T:
2943 					sc->sc_ctrl |= CTRL_SPEED_1000;
2944 					break;
2945 				default:
2946 					/*
2947 					 * fiber?
2948 					 * Shoud not enter here.
2949 					 */
2950 					printf("unknown media (%x)\n",
2951 					    active);
2952 					break;
2953 				}
2954 				if (active & IFM_FDX)
2955 					sc->sc_ctrl |= CTRL_FD;
2956 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2957 			}
2958 		} else if (sc->sc_type == WM_T_PCH) {
2959 			wm_k1_gig_workaround_hv(sc,
2960 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
2961 		}
2962 
2963 		if ((sc->sc_phytype == WMPHY_82578)
2964 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
2965 			== IFM_1000_T)) {
2966 
2967 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
2968 				delay(200*1000); /* XXX too big */
2969 
2970 				/* Link stall fix for link up */
2971 				wm_gmii_hv_writereg(sc->sc_dev, 1,
2972 				    HV_MUX_DATA_CTRL,
2973 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
2974 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
2975 				wm_gmii_hv_writereg(sc->sc_dev, 1,
2976 				    HV_MUX_DATA_CTRL,
2977 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
2978 			}
2979 		}
2980 	} else if (icr & ICR_RXSEQ) {
2981 		DPRINTF(WM_DEBUG_LINK,
2982 		    ("%s: LINK Receive sequence error\n",
2983 			device_xname(sc->sc_dev)));
2984 	}
2985 }
2986 
2987 /*
2988  * wm_linkintr_tbi:
2989  *
2990  *	Helper; handle link interrupts for TBI mode.
2991  */
2992 static void
2993 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
2994 {
2995 	uint32_t status;
2996 
2997 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
2998 		__func__));
2999 
3000 	status = CSR_READ(sc, WMREG_STATUS);
3001 	if (icr & ICR_LSC) {
3002 		if (status & STATUS_LU) {
3003 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
3004 			    device_xname(sc->sc_dev),
3005 			    (status & STATUS_FD) ? "FDX" : "HDX"));
3006 			/*
3007 			 * NOTE: CTRL will update TFCE and RFCE automatically,
3008 			 * so we should update sc->sc_ctrl
3009 			 */
3010 
3011 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3012 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
3013 			sc->sc_fcrtl &= ~FCRTL_XONE;
3014 			if (status & STATUS_FD)
3015 				sc->sc_tctl |=
3016 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3017 			else
3018 				sc->sc_tctl |=
3019 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
3020 			if (sc->sc_ctrl & CTRL_TFCE)
3021 				sc->sc_fcrtl |= FCRTL_XONE;
3022 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3023 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
3024 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
3025 				      sc->sc_fcrtl);
3026 			sc->sc_tbi_linkup = 1;
3027 		} else {
3028 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
3029 			    device_xname(sc->sc_dev)));
3030 			sc->sc_tbi_linkup = 0;
3031 		}
3032 		wm_tbi_set_linkled(sc);
3033 	} else if (icr & ICR_RXCFG) {
3034 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
3035 		    device_xname(sc->sc_dev)));
3036 		sc->sc_tbi_nrxcfg++;
3037 		wm_check_for_link(sc);
3038 	} else if (icr & ICR_RXSEQ) {
3039 		DPRINTF(WM_DEBUG_LINK,
3040 		    ("%s: LINK: Receive sequence error\n",
3041 		    device_xname(sc->sc_dev)));
3042 	}
3043 }
3044 
3045 /*
3046  * wm_linkintr:
3047  *
3048  *	Helper; handle link interrupts.
3049  */
3050 static void
3051 wm_linkintr(struct wm_softc *sc, uint32_t icr)
3052 {
3053 
3054 	if (sc->sc_flags & WM_F_HAS_MII)
3055 		wm_linkintr_gmii(sc, icr);
3056 	else
3057 		wm_linkintr_tbi(sc, icr);
3058 }
3059 
3060 /*
3061  * wm_tick:
3062  *
3063  *	One second timer, used to check link status, sweep up
3064  *	completed transmit jobs, etc.
3065  */
3066 static void
3067 wm_tick(void *arg)
3068 {
3069 	struct wm_softc *sc = arg;
3070 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3071 	int s;
3072 
3073 	s = splnet();
3074 
3075 	if (sc->sc_type >= WM_T_82542_2_1) {
3076 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
3077 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
3078 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
3079 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
3080 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
3081 	}
3082 
3083 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3084 	ifp->if_ierrors += 0ULL + /* ensure quad_t */
3085 	    + CSR_READ(sc, WMREG_CRCERRS)
3086 	    + CSR_READ(sc, WMREG_ALGNERRC)
3087 	    + CSR_READ(sc, WMREG_SYMERRC)
3088 	    + CSR_READ(sc, WMREG_RXERRC)
3089 	    + CSR_READ(sc, WMREG_SEC)
3090 	    + CSR_READ(sc, WMREG_CEXTERR)
3091 	    + CSR_READ(sc, WMREG_RLEC);
3092 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
3093 
3094 	if (sc->sc_flags & WM_F_HAS_MII)
3095 		mii_tick(&sc->sc_mii);
3096 	else
3097 		wm_tbi_check_link(sc);
3098 
3099 	splx(s);
3100 
3101 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
3102 }
3103 
3104 /*
3105  * wm_reset:
3106  *
3107  *	Reset the i82542 chip.
3108  */
3109 static void
3110 wm_reset(struct wm_softc *sc)
3111 {
3112 	int phy_reset = 0;
3113 	uint32_t reg, func, mask;
3114 	int i;
3115 
3116 	/*
3117 	 * Allocate on-chip memory according to the MTU size.
3118 	 * The Packet Buffer Allocation register must be written
3119 	 * before the chip is reset.
3120 	 */
3121 	switch (sc->sc_type) {
3122 	case WM_T_82547:
3123 	case WM_T_82547_2:
3124 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3125 		    PBA_22K : PBA_30K;
3126 		sc->sc_txfifo_head = 0;
3127 		sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
3128 		sc->sc_txfifo_size =
3129 		    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
3130 		sc->sc_txfifo_stall = 0;
3131 		break;
3132 	case WM_T_82571:
3133 	case WM_T_82572:
3134 	case WM_T_80003:
3135 		sc->sc_pba = PBA_32K;
3136 		break;
3137 	case WM_T_82573:
3138 		sc->sc_pba = PBA_12K;
3139 		break;
3140 	case WM_T_82574:
3141 	case WM_T_82583:
3142 		sc->sc_pba = PBA_20K;
3143 		break;
3144 	case WM_T_ICH8:
3145 		sc->sc_pba = PBA_8K;
3146 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
3147 		break;
3148 	case WM_T_ICH9:
3149 	case WM_T_ICH10:
3150 	case WM_T_PCH:
3151 		sc->sc_pba = PBA_10K;
3152 		break;
3153 	default:
3154 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3155 		    PBA_40K : PBA_48K;
3156 		break;
3157 	}
3158 	CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
3159 
3160 	if (sc->sc_flags & WM_F_PCIE) {
3161 		int timeout = 800;
3162 
3163 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
3164 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3165 
3166 		while (timeout--) {
3167 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA) == 0)
3168 				break;
3169 			delay(100);
3170 		}
3171 	}
3172 
3173 	/* clear interrupt */
3174 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3175 
3176 	/* Stop the transmit and receive processes. */
3177 	CSR_WRITE(sc, WMREG_RCTL, 0);
3178 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
3179 
3180 	/* set_tbi_sbp_82543() */
3181 
3182 	delay(10*1000);
3183 
3184 	/* Must acquire the MDIO ownership before MAC reset */
3185 	switch (sc->sc_type) {
3186 	case WM_T_82573:
3187 	case WM_T_82574:
3188 	case WM_T_82583:
3189 		i = 0;
3190 		reg = CSR_READ(sc, WMREG_EXTCNFCTR)
3191 		    | EXTCNFCTR_MDIO_SW_OWNERSHIP;
3192 		do {
3193 			CSR_WRITE(sc, WMREG_EXTCNFCTR,
3194 			    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
3195 			reg = CSR_READ(sc, WMREG_EXTCNFCTR);
3196 			if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
3197 				break;
3198 			reg |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
3199 			delay(2*1000);
3200 			i++;
3201 		} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
3202 		break;
3203 	default:
3204 		break;
3205 	}
3206 
3207 	/*
3208 	 * 82541 Errata 29? & 82547 Errata 28?
3209 	 * See also the description about PHY_RST bit in CTRL register
3210 	 * in 8254x_GBe_SDM.pdf.
3211 	 */
3212 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
3213 		CSR_WRITE(sc, WMREG_CTRL,
3214 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
3215 		delay(5000);
3216 	}
3217 
3218 	switch (sc->sc_type) {
3219 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
3220 	case WM_T_82541:
3221 	case WM_T_82541_2:
3222 	case WM_T_82547:
3223 	case WM_T_82547_2:
3224 		/*
3225 		 * On some chipsets, a reset through a memory-mapped write
3226 		 * cycle can cause the chip to reset before completing the
3227 		 * write cycle.  This causes major headache that can be
3228 		 * avoided by issuing the reset via indirect register writes
3229 		 * through I/O space.
3230 		 *
3231 		 * So, if we successfully mapped the I/O BAR at attach time,
3232 		 * use that.  Otherwise, try our luck with a memory-mapped
3233 		 * reset.
3234 		 */
3235 		if (sc->sc_flags & WM_F_IOH_VALID)
3236 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
3237 		else
3238 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
3239 		break;
3240 	case WM_T_82545_3:
3241 	case WM_T_82546_3:
3242 		/* Use the shadow control register on these chips. */
3243 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
3244 		break;
3245 	case WM_T_80003:
3246 		func = (CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1;
3247 		mask = func ? SWFW_PHY1_SM : SWFW_PHY0_SM;
3248 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3249 		wm_get_swfw_semaphore(sc, mask);
3250 		CSR_WRITE(sc, WMREG_CTRL, reg);
3251 		wm_put_swfw_semaphore(sc, mask);
3252 		break;
3253 	case WM_T_ICH8:
3254 	case WM_T_ICH9:
3255 	case WM_T_ICH10:
3256 	case WM_T_PCH:
3257 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3258 		if (wm_check_reset_block(sc) == 0) {
3259 			if (sc->sc_type >= WM_T_PCH) {
3260 				uint32_t status;
3261 
3262 				status = CSR_READ(sc, WMREG_STATUS);
3263 				CSR_WRITE(sc, WMREG_STATUS,
3264 				    status & ~STATUS_PHYRA);
3265 			}
3266 
3267 			reg |= CTRL_PHY_RESET;
3268 			phy_reset = 1;
3269 		}
3270 		wm_get_swfwhw_semaphore(sc);
3271 		CSR_WRITE(sc, WMREG_CTRL, reg);
3272 		delay(20*1000);
3273 		wm_put_swfwhw_semaphore(sc);
3274 		break;
3275 	case WM_T_82542_2_0:
3276 	case WM_T_82542_2_1:
3277 	case WM_T_82543:
3278 	case WM_T_82540:
3279 	case WM_T_82545:
3280 	case WM_T_82546:
3281 	case WM_T_82571:
3282 	case WM_T_82572:
3283 	case WM_T_82573:
3284 	case WM_T_82574:
3285 	case WM_T_82583:
3286 	default:
3287 		/* Everything else can safely use the documented method. */
3288 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3289 		break;
3290 	}
3291 
3292 	if (phy_reset != 0)
3293 		wm_get_cfg_done(sc);
3294 
3295 	/* reload EEPROM */
3296 	switch (sc->sc_type) {
3297 	case WM_T_82542_2_0:
3298 	case WM_T_82542_2_1:
3299 	case WM_T_82543:
3300 	case WM_T_82544:
3301 		delay(10);
3302 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3303 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3304 		delay(2000);
3305 		break;
3306 	case WM_T_82540:
3307 	case WM_T_82545:
3308 	case WM_T_82545_3:
3309 	case WM_T_82546:
3310 	case WM_T_82546_3:
3311 		delay(5*1000);
3312 		/* XXX Disable HW ARPs on ASF enabled adapters */
3313 		break;
3314 	case WM_T_82541:
3315 	case WM_T_82541_2:
3316 	case WM_T_82547:
3317 	case WM_T_82547_2:
3318 		delay(20000);
3319 		/* XXX Disable HW ARPs on ASF enabled adapters */
3320 		break;
3321 	case WM_T_82571:
3322 	case WM_T_82572:
3323 	case WM_T_82573:
3324 	case WM_T_82574:
3325 	case WM_T_82583:
3326 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
3327 			delay(10);
3328 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3329 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3330 		}
3331 		/* check EECD_EE_AUTORD */
3332 		wm_get_auto_rd_done(sc);
3333 		/*
3334 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
3335 		 * is set.
3336 		 */
3337 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
3338 		    || (sc->sc_type == WM_T_82583))
3339 			delay(25*1000);
3340 		break;
3341 	case WM_T_80003:
3342 	case WM_T_ICH8:
3343 	case WM_T_ICH9:
3344 		/* check EECD_EE_AUTORD */
3345 		wm_get_auto_rd_done(sc);
3346 		break;
3347 	case WM_T_ICH10:
3348 	case WM_T_PCH:
3349 		wm_lan_init_done(sc);
3350 		break;
3351 	default:
3352 		panic("%s: unknown type\n", __func__);
3353 	}
3354 
3355 	/* reload sc_ctrl */
3356 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3357 
3358 	/* dummy read from WUC */
3359 	if (sc->sc_type == WM_T_PCH)
3360 		reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
3361 	/*
3362 	 * For PCH, this write will make sure that any noise will be detected
3363 	 * as a CRC error and be dropped rather than show up as a bad packet
3364 	 * to the DMA engine
3365 	 */
3366 	if (sc->sc_type == WM_T_PCH)
3367 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
3368 
3369 #if 0
3370 	for (i = 0; i < 1000; i++) {
3371 		if ((CSR_READ(sc, WMREG_CTRL) & CTRL_RST) == 0) {
3372 			return;
3373 		}
3374 		delay(20);
3375 	}
3376 
3377 	if (CSR_READ(sc, WMREG_CTRL) & CTRL_RST)
3378 		log(LOG_ERR, "%s: reset failed to complete\n",
3379 		    device_xname(sc->sc_dev));
3380 #endif
3381 }
3382 
3383 /*
3384  * wm_init:		[ifnet interface function]
3385  *
3386  *	Initialize the interface.  Must be called at splnet().
3387  */
3388 static int
3389 wm_init(struct ifnet *ifp)
3390 {
3391 	struct wm_softc *sc = ifp->if_softc;
3392 	struct wm_rxsoft *rxs;
3393 	int i, error = 0;
3394 	uint32_t reg;
3395 
3396 	/*
3397 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
3398 	 * There is a small but measurable benefit to avoiding the adjusment
3399 	 * of the descriptor so that the headers are aligned, for normal mtu,
3400 	 * on such platforms.  One possibility is that the DMA itself is
3401 	 * slightly more efficient if the front of the entire packet (instead
3402 	 * of the front of the headers) is aligned.
3403 	 *
3404 	 * Note we must always set align_tweak to 0 if we are using
3405 	 * jumbo frames.
3406 	 */
3407 #ifdef __NO_STRICT_ALIGNMENT
3408 	sc->sc_align_tweak = 0;
3409 #else
3410 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
3411 		sc->sc_align_tweak = 0;
3412 	else
3413 		sc->sc_align_tweak = 2;
3414 #endif /* __NO_STRICT_ALIGNMENT */
3415 
3416 	/* Cancel any pending I/O. */
3417 	wm_stop(ifp, 0);
3418 
3419 	/* update statistics before reset */
3420 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3421 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
3422 
3423 	/* Reset the chip to a known state. */
3424 	wm_reset(sc);
3425 
3426 	switch (sc->sc_type) {
3427 	case WM_T_82571:
3428 	case WM_T_82572:
3429 	case WM_T_82573:
3430 	case WM_T_82574:
3431 	case WM_T_82583:
3432 	case WM_T_80003:
3433 	case WM_T_ICH8:
3434 	case WM_T_ICH9:
3435 	case WM_T_ICH10:
3436 	case WM_T_PCH:
3437 		if (wm_check_mng_mode(sc) != 0)
3438 			wm_get_hw_control(sc);
3439 		break;
3440 	default:
3441 		break;
3442 	}
3443 
3444 	/* Reset the PHY. */
3445 	if (sc->sc_flags & WM_F_HAS_MII)
3446 		wm_gmii_reset(sc);
3447 
3448 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
3449 	/* Enable PHY low-power state when MAC is at D3 w/o WoL */
3450 	if (sc->sc_type == WM_T_PCH)
3451 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_PHYPDEN);
3452 
3453 	/* Initialize the transmit descriptor ring. */
3454 	memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
3455 	WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
3456 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3457 	sc->sc_txfree = WM_NTXDESC(sc);
3458 	sc->sc_txnext = 0;
3459 
3460 	if (sc->sc_type < WM_T_82543) {
3461 		CSR_WRITE(sc, WMREG_OLD_TBDAH, WM_CDTXADDR_HI(sc, 0));
3462 		CSR_WRITE(sc, WMREG_OLD_TBDAL, WM_CDTXADDR_LO(sc, 0));
3463 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
3464 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
3465 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
3466 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
3467 	} else {
3468 		CSR_WRITE(sc, WMREG_TBDAH, WM_CDTXADDR_HI(sc, 0));
3469 		CSR_WRITE(sc, WMREG_TBDAL, WM_CDTXADDR_LO(sc, 0));
3470 		CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
3471 		CSR_WRITE(sc, WMREG_TDH, 0);
3472 		CSR_WRITE(sc, WMREG_TDT, 0);
3473 		CSR_WRITE(sc, WMREG_TIDV, 375);		/* ITR / 4 */
3474 		CSR_WRITE(sc, WMREG_TADV, 375);		/* should be same */
3475 
3476 		CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
3477 		    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
3478 		CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
3479 		    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
3480 	}
3481 	CSR_WRITE(sc, WMREG_TQSA_LO, 0);
3482 	CSR_WRITE(sc, WMREG_TQSA_HI, 0);
3483 
3484 	/* Initialize the transmit job descriptors. */
3485 	for (i = 0; i < WM_TXQUEUELEN(sc); i++)
3486 		sc->sc_txsoft[i].txs_mbuf = NULL;
3487 	sc->sc_txsfree = WM_TXQUEUELEN(sc);
3488 	sc->sc_txsnext = 0;
3489 	sc->sc_txsdirty = 0;
3490 
3491 	/*
3492 	 * Initialize the receive descriptor and receive job
3493 	 * descriptor rings.
3494 	 */
3495 	if (sc->sc_type < WM_T_82543) {
3496 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
3497 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
3498 		CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
3499 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
3500 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
3501 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
3502 
3503 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
3504 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
3505 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
3506 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
3507 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
3508 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
3509 	} else {
3510 		CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
3511 		CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
3512 		CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
3513 		CSR_WRITE(sc, WMREG_RDH, 0);
3514 		CSR_WRITE(sc, WMREG_RDT, 0);
3515 		CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD);	/* ITR/4 */
3516 		CSR_WRITE(sc, WMREG_RADV, 375);		/* MUST be same */
3517 	}
3518 	for (i = 0; i < WM_NRXDESC; i++) {
3519 		rxs = &sc->sc_rxsoft[i];
3520 		if (rxs->rxs_mbuf == NULL) {
3521 			if ((error = wm_add_rxbuf(sc, i)) != 0) {
3522 				log(LOG_ERR, "%s: unable to allocate or map rx "
3523 				    "buffer %d, error = %d\n",
3524 				    device_xname(sc->sc_dev), i, error);
3525 				/*
3526 				 * XXX Should attempt to run with fewer receive
3527 				 * XXX buffers instead of just failing.
3528 				 */
3529 				wm_rxdrain(sc);
3530 				goto out;
3531 			}
3532 		} else
3533 			WM_INIT_RXDESC(sc, i);
3534 	}
3535 	sc->sc_rxptr = 0;
3536 	sc->sc_rxdiscard = 0;
3537 	WM_RXCHAIN_RESET(sc);
3538 
3539 	/*
3540 	 * Clear out the VLAN table -- we don't use it (yet).
3541 	 */
3542 	CSR_WRITE(sc, WMREG_VET, 0);
3543 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
3544 		CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
3545 
3546 	/*
3547 	 * Set up flow-control parameters.
3548 	 *
3549 	 * XXX Values could probably stand some tuning.
3550 	 */
3551 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
3552 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)) {
3553 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
3554 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
3555 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
3556 	}
3557 
3558 	sc->sc_fcrtl = FCRTL_DFLT;
3559 	if (sc->sc_type < WM_T_82543) {
3560 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
3561 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
3562 	} else {
3563 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
3564 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
3565 	}
3566 
3567 	if (sc->sc_type == WM_T_80003)
3568 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
3569 	else
3570 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
3571 
3572 	/* Deal with VLAN enables. */
3573 	if (VLAN_ATTACHED(&sc->sc_ethercom))
3574 		sc->sc_ctrl |= CTRL_VME;
3575 	else
3576 		sc->sc_ctrl &= ~CTRL_VME;
3577 
3578 	/* Write the control registers. */
3579 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3580 
3581 	if (sc->sc_flags & WM_F_HAS_MII) {
3582 		int val;
3583 
3584 		switch (sc->sc_type) {
3585 		case WM_T_80003:
3586 		case WM_T_ICH8:
3587 		case WM_T_ICH9:
3588 		case WM_T_ICH10:
3589 		case WM_T_PCH:
3590 			/*
3591 			 * Set the mac to wait the maximum time between each
3592 			 * iteration and increase the max iterations when
3593 			 * polling the phy; this fixes erroneous timeouts at
3594 			 * 10Mbps.
3595 			 */
3596 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
3597 			    0xFFFF);
3598 			val = wm_kmrn_readreg(sc,
3599 			    KUMCTRLSTA_OFFSET_INB_PARAM);
3600 			val |= 0x3F;
3601 			wm_kmrn_writereg(sc,
3602 			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
3603 			break;
3604 		default:
3605 			break;
3606 		}
3607 
3608 		if (sc->sc_type == WM_T_80003) {
3609 			val = CSR_READ(sc, WMREG_CTRL_EXT);
3610 			val &= ~CTRL_EXT_LINK_MODE_MASK;
3611 			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
3612 
3613 			/* Bypass RX and TX FIFO's */
3614 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
3615 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS |
3616 			    KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
3617 
3618 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
3619 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
3620 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
3621 		}
3622 	}
3623 #if 0
3624 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
3625 #endif
3626 
3627 	/*
3628 	 * Set up checksum offload parameters.
3629 	 */
3630 	reg = CSR_READ(sc, WMREG_RXCSUM);
3631 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
3632 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
3633 		reg |= RXCSUM_IPOFL;
3634 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
3635 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
3636 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
3637 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
3638 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
3639 
3640 	/* Reset TBI's RXCFG count */
3641 	sc->sc_tbi_nrxcfg = sc->sc_tbi_lastnrxcfg = 0;
3642 
3643 	/*
3644 	 * Set up the interrupt registers.
3645 	 */
3646 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3647 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
3648 	    ICR_RXO | ICR_RXT0;
3649 	if ((sc->sc_flags & WM_F_HAS_MII) == 0)
3650 		sc->sc_icr |= ICR_RXCFG;
3651 	CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
3652 
3653 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3654 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)) {
3655 		reg = CSR_READ(sc, WMREG_KABGTXD);
3656 		reg |= KABGTXD_BGSQLBIAS;
3657 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
3658 	}
3659 
3660 	/* Set up the inter-packet gap. */
3661 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
3662 
3663 	if (sc->sc_type >= WM_T_82543) {
3664 		/*
3665 		 * Set up the interrupt throttling register (units of 256ns)
3666 		 * Note that a footnote in Intel's documentation says this
3667 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
3668 		 * or 10Mbit mode.  Empirically, it appears to be the case
3669 		 * that that is also true for the 1024ns units of the other
3670 		 * interrupt-related timer registers -- so, really, we ought
3671 		 * to divide this value by 4 when the link speed is low.
3672 		 *
3673 		 * XXX implement this division at link speed change!
3674 		 */
3675 
3676 		 /*
3677 		  * For N interrupts/sec, set this value to:
3678 		  * 1000000000 / (N * 256).  Note that we set the
3679 		  * absolute and packet timer values to this value
3680 		  * divided by 4 to get "simple timer" behavior.
3681 		  */
3682 
3683 		sc->sc_itr = 1500;		/* 2604 ints/sec */
3684 		CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
3685 	}
3686 
3687 	/* Set the VLAN ethernetype. */
3688 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
3689 
3690 	/*
3691 	 * Set up the transmit control register; we start out with
3692 	 * a collision distance suitable for FDX, but update it whe
3693 	 * we resolve the media type.
3694 	 */
3695 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
3696 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
3697 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3698 	if (sc->sc_type >= WM_T_82571)
3699 		sc->sc_tctl |= TCTL_MULR;
3700 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3701 
3702 	if (sc->sc_type == WM_T_80003) {
3703 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
3704 		reg &= ~TCTL_EXT_GCEX_MASK;
3705 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
3706 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
3707 	}
3708 
3709 	/* Set the media. */
3710 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
3711 		goto out;
3712 
3713 	/*
3714 	 * Set up the receive control register; we actually program
3715 	 * the register when we set the receive filter.  Use multicast
3716 	 * address offset type 0.
3717 	 *
3718 	 * Only the i82544 has the ability to strip the incoming
3719 	 * CRC, so we don't enable that feature.
3720 	 */
3721 	sc->sc_mchash_type = 0;
3722 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
3723 	    | RCTL_MO(sc->sc_mchash_type);
3724 
3725 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
3726 	    && (ifp->if_mtu > ETHERMTU))
3727 			sc->sc_rctl |= RCTL_LPE;
3728 
3729 	if (MCLBYTES == 2048) {
3730 		sc->sc_rctl |= RCTL_2k;
3731 	} else {
3732 		if (sc->sc_type >= WM_T_82543) {
3733 			switch (MCLBYTES) {
3734 			case 4096:
3735 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
3736 				break;
3737 			case 8192:
3738 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
3739 				break;
3740 			case 16384:
3741 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
3742 				break;
3743 			default:
3744 				panic("wm_init: MCLBYTES %d unsupported",
3745 				    MCLBYTES);
3746 				break;
3747 			}
3748 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
3749 	}
3750 
3751 	/* Set the receive filter. */
3752 	wm_set_filter(sc);
3753 
3754 	/* Start the one second link check clock. */
3755 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
3756 
3757 	/* ...all done! */
3758 	ifp->if_flags |= IFF_RUNNING;
3759 	ifp->if_flags &= ~IFF_OACTIVE;
3760 
3761  out:
3762 	if (error)
3763 		log(LOG_ERR, "%s: interface not running\n",
3764 		    device_xname(sc->sc_dev));
3765 	return error;
3766 }
3767 
3768 /*
3769  * wm_rxdrain:
3770  *
3771  *	Drain the receive queue.
3772  */
3773 static void
3774 wm_rxdrain(struct wm_softc *sc)
3775 {
3776 	struct wm_rxsoft *rxs;
3777 	int i;
3778 
3779 	for (i = 0; i < WM_NRXDESC; i++) {
3780 		rxs = &sc->sc_rxsoft[i];
3781 		if (rxs->rxs_mbuf != NULL) {
3782 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
3783 			m_freem(rxs->rxs_mbuf);
3784 			rxs->rxs_mbuf = NULL;
3785 		}
3786 	}
3787 }
3788 
3789 /*
3790  * wm_stop:		[ifnet interface function]
3791  *
3792  *	Stop transmission on the interface.
3793  */
3794 static void
3795 wm_stop(struct ifnet *ifp, int disable)
3796 {
3797 	struct wm_softc *sc = ifp->if_softc;
3798 	struct wm_txsoft *txs;
3799 	int i;
3800 
3801 	/* Stop the one second clock. */
3802 	callout_stop(&sc->sc_tick_ch);
3803 
3804 	/* Stop the 82547 Tx FIFO stall check timer. */
3805 	if (sc->sc_type == WM_T_82547)
3806 		callout_stop(&sc->sc_txfifo_ch);
3807 
3808 	if (sc->sc_flags & WM_F_HAS_MII) {
3809 		/* Down the MII. */
3810 		mii_down(&sc->sc_mii);
3811 	} else {
3812 #if 0
3813 		/* Should we clear PHY's status properly? */
3814 		wm_reset(sc);
3815 #endif
3816 	}
3817 
3818 	/* Stop the transmit and receive processes. */
3819 	CSR_WRITE(sc, WMREG_TCTL, 0);
3820 	CSR_WRITE(sc, WMREG_RCTL, 0);
3821 
3822 	/*
3823 	 * Clear the interrupt mask to ensure the device cannot assert its
3824 	 * interrupt line.
3825 	 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
3826 	 * any currently pending or shared interrupt.
3827 	 */
3828 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3829 	sc->sc_icr = 0;
3830 
3831 	/* Release any queued transmit buffers. */
3832 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
3833 		txs = &sc->sc_txsoft[i];
3834 		if (txs->txs_mbuf != NULL) {
3835 			bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
3836 			m_freem(txs->txs_mbuf);
3837 			txs->txs_mbuf = NULL;
3838 		}
3839 	}
3840 
3841 	/* Mark the interface as down and cancel the watchdog timer. */
3842 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
3843 	ifp->if_timer = 0;
3844 
3845 	if (disable)
3846 		wm_rxdrain(sc);
3847 }
3848 
3849 void
3850 wm_get_auto_rd_done(struct wm_softc *sc)
3851 {
3852 	int i;
3853 
3854 	/* wait for eeprom to reload */
3855 	switch (sc->sc_type) {
3856 	case WM_T_82571:
3857 	case WM_T_82572:
3858 	case WM_T_82573:
3859 	case WM_T_82574:
3860 	case WM_T_82583:
3861 	case WM_T_80003:
3862 	case WM_T_ICH8:
3863 	case WM_T_ICH9:
3864 		for (i = 0; i < 10; i++) {
3865 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
3866 				break;
3867 			delay(1000);
3868 		}
3869 		if (i == 10) {
3870 			log(LOG_ERR, "%s: auto read from eeprom failed to "
3871 			    "complete\n", device_xname(sc->sc_dev));
3872 		}
3873 		break;
3874 	default:
3875 		break;
3876 	}
3877 }
3878 
3879 void
3880 wm_lan_init_done(struct wm_softc *sc)
3881 {
3882 	uint32_t reg = 0;
3883 	int i;
3884 
3885 	/* wait for eeprom to reload */
3886 	switch (sc->sc_type) {
3887 	case WM_T_ICH10:
3888 	case WM_T_PCH:
3889 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
3890 			reg = CSR_READ(sc, WMREG_STATUS);
3891 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
3892 				break;
3893 			delay(100);
3894 		}
3895 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
3896 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
3897 			    "complete\n", device_xname(sc->sc_dev), __func__);
3898 		}
3899 		break;
3900 	default:
3901 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3902 		    __func__);
3903 		break;
3904 	}
3905 
3906 	reg &= ~STATUS_LAN_INIT_DONE;
3907 	CSR_WRITE(sc, WMREG_STATUS, reg);
3908 }
3909 
3910 void
3911 wm_get_cfg_done(struct wm_softc *sc)
3912 {
3913 	int func = 0;
3914 	int mask;
3915 	uint32_t reg;
3916 	int i;
3917 
3918 	/* wait for eeprom to reload */
3919 	switch (sc->sc_type) {
3920 	case WM_T_82542_2_0:
3921 	case WM_T_82542_2_1:
3922 		/* null */
3923 		break;
3924 	case WM_T_82543:
3925 	case WM_T_82544:
3926 	case WM_T_82540:
3927 	case WM_T_82545:
3928 	case WM_T_82545_3:
3929 	case WM_T_82546:
3930 	case WM_T_82546_3:
3931 	case WM_T_82541:
3932 	case WM_T_82541_2:
3933 	case WM_T_82547:
3934 	case WM_T_82547_2:
3935 	case WM_T_82573:
3936 	case WM_T_82574:
3937 	case WM_T_82583:
3938 		/* generic */
3939 		delay(10*1000);
3940 		break;
3941 	case WM_T_80003:
3942 	case WM_T_82571:
3943 	case WM_T_82572:
3944 		if (sc->sc_type == WM_T_80003)
3945 			func = (CSR_READ(sc, WMREG_STATUS)
3946 			    >> STATUS_FUNCID_SHIFT) & 1;
3947 		else
3948 			func = 0; /* XXX Is it true for 82571? */
3949 		mask = (func == 1) ? EEMNGCTL_CFGDONE_1 : EEMNGCTL_CFGDONE_0;
3950 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
3951 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
3952 				break;
3953 			delay(1000);
3954 		}
3955 		if (i >= WM_PHY_CFG_TIMEOUT) {
3956 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
3957 				device_xname(sc->sc_dev), __func__));
3958 		}
3959 		break;
3960 	case WM_T_ICH8:
3961 	case WM_T_ICH9:
3962 	case WM_T_ICH10:
3963 	case WM_T_PCH:
3964 		if (sc->sc_type >= WM_T_PCH) {
3965 			reg = CSR_READ(sc, WMREG_STATUS);
3966 			if ((reg & STATUS_PHYRA) != 0)
3967 				CSR_WRITE(sc, WMREG_STATUS,
3968 				    reg & ~STATUS_PHYRA);
3969 		}
3970 		delay(10*1000);
3971 		break;
3972 	default:
3973 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3974 		    __func__);
3975 		break;
3976 	}
3977 }
3978 
3979 /*
3980  * wm_acquire_eeprom:
3981  *
3982  *	Perform the EEPROM handshake required on some chips.
3983  */
3984 static int
3985 wm_acquire_eeprom(struct wm_softc *sc)
3986 {
3987 	uint32_t reg;
3988 	int x;
3989 	int ret = 0;
3990 
3991 	/* always success */
3992 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
3993 		return 0;
3994 
3995 	if (sc->sc_flags & WM_F_SWFWHW_SYNC) {
3996 		ret = wm_get_swfwhw_semaphore(sc);
3997 	} else if (sc->sc_flags & WM_F_SWFW_SYNC) {
3998 		/* this will also do wm_get_swsm_semaphore() if needed */
3999 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
4000 	} else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
4001 		ret = wm_get_swsm_semaphore(sc);
4002 	}
4003 
4004 	if (ret) {
4005 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
4006 			__func__);
4007 		return 1;
4008 	}
4009 
4010 	if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE)  {
4011 		reg = CSR_READ(sc, WMREG_EECD);
4012 
4013 		/* Request EEPROM access. */
4014 		reg |= EECD_EE_REQ;
4015 		CSR_WRITE(sc, WMREG_EECD, reg);
4016 
4017 		/* ..and wait for it to be granted. */
4018 		for (x = 0; x < 1000; x++) {
4019 			reg = CSR_READ(sc, WMREG_EECD);
4020 			if (reg & EECD_EE_GNT)
4021 				break;
4022 			delay(5);
4023 		}
4024 		if ((reg & EECD_EE_GNT) == 0) {
4025 			aprint_error_dev(sc->sc_dev,
4026 			    "could not acquire EEPROM GNT\n");
4027 			reg &= ~EECD_EE_REQ;
4028 			CSR_WRITE(sc, WMREG_EECD, reg);
4029 			if (sc->sc_flags & WM_F_SWFWHW_SYNC)
4030 				wm_put_swfwhw_semaphore(sc);
4031 			if (sc->sc_flags & WM_F_SWFW_SYNC)
4032 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
4033 			else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
4034 				wm_put_swsm_semaphore(sc);
4035 			return 1;
4036 		}
4037 	}
4038 
4039 	return 0;
4040 }
4041 
4042 /*
4043  * wm_release_eeprom:
4044  *
4045  *	Release the EEPROM mutex.
4046  */
4047 static void
4048 wm_release_eeprom(struct wm_softc *sc)
4049 {
4050 	uint32_t reg;
4051 
4052 	/* always success */
4053 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
4054 		return;
4055 
4056 	if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
4057 		reg = CSR_READ(sc, WMREG_EECD);
4058 		reg &= ~EECD_EE_REQ;
4059 		CSR_WRITE(sc, WMREG_EECD, reg);
4060 	}
4061 
4062 	if (sc->sc_flags & WM_F_SWFWHW_SYNC)
4063 		wm_put_swfwhw_semaphore(sc);
4064 	if (sc->sc_flags & WM_F_SWFW_SYNC)
4065 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
4066 	else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
4067 		wm_put_swsm_semaphore(sc);
4068 }
4069 
4070 /*
4071  * wm_eeprom_sendbits:
4072  *
4073  *	Send a series of bits to the EEPROM.
4074  */
4075 static void
4076 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
4077 {
4078 	uint32_t reg;
4079 	int x;
4080 
4081 	reg = CSR_READ(sc, WMREG_EECD);
4082 
4083 	for (x = nbits; x > 0; x--) {
4084 		if (bits & (1U << (x - 1)))
4085 			reg |= EECD_DI;
4086 		else
4087 			reg &= ~EECD_DI;
4088 		CSR_WRITE(sc, WMREG_EECD, reg);
4089 		delay(2);
4090 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
4091 		delay(2);
4092 		CSR_WRITE(sc, WMREG_EECD, reg);
4093 		delay(2);
4094 	}
4095 }
4096 
4097 /*
4098  * wm_eeprom_recvbits:
4099  *
4100  *	Receive a series of bits from the EEPROM.
4101  */
4102 static void
4103 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
4104 {
4105 	uint32_t reg, val;
4106 	int x;
4107 
4108 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
4109 
4110 	val = 0;
4111 	for (x = nbits; x > 0; x--) {
4112 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
4113 		delay(2);
4114 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
4115 			val |= (1U << (x - 1));
4116 		CSR_WRITE(sc, WMREG_EECD, reg);
4117 		delay(2);
4118 	}
4119 	*valp = val;
4120 }
4121 
4122 /*
4123  * wm_read_eeprom_uwire:
4124  *
4125  *	Read a word from the EEPROM using the MicroWire protocol.
4126  */
4127 static int
4128 wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
4129 {
4130 	uint32_t reg, val;
4131 	int i;
4132 
4133 	for (i = 0; i < wordcnt; i++) {
4134 		/* Clear SK and DI. */
4135 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
4136 		CSR_WRITE(sc, WMREG_EECD, reg);
4137 
4138 		/* Set CHIP SELECT. */
4139 		reg |= EECD_CS;
4140 		CSR_WRITE(sc, WMREG_EECD, reg);
4141 		delay(2);
4142 
4143 		/* Shift in the READ command. */
4144 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
4145 
4146 		/* Shift in address. */
4147 		wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits);
4148 
4149 		/* Shift out the data. */
4150 		wm_eeprom_recvbits(sc, &val, 16);
4151 		data[i] = val & 0xffff;
4152 
4153 		/* Clear CHIP SELECT. */
4154 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
4155 		CSR_WRITE(sc, WMREG_EECD, reg);
4156 		delay(2);
4157 	}
4158 
4159 	return 0;
4160 }
4161 
4162 /*
4163  * wm_spi_eeprom_ready:
4164  *
4165  *	Wait for a SPI EEPROM to be ready for commands.
4166  */
4167 static int
4168 wm_spi_eeprom_ready(struct wm_softc *sc)
4169 {
4170 	uint32_t val;
4171 	int usec;
4172 
4173 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
4174 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
4175 		wm_eeprom_recvbits(sc, &val, 8);
4176 		if ((val & SPI_SR_RDY) == 0)
4177 			break;
4178 	}
4179 	if (usec >= SPI_MAX_RETRIES) {
4180 		aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
4181 		return 1;
4182 	}
4183 	return 0;
4184 }
4185 
4186 /*
4187  * wm_read_eeprom_spi:
4188  *
4189  *	Read a work from the EEPROM using the SPI protocol.
4190  */
4191 static int
4192 wm_read_eeprom_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
4193 {
4194 	uint32_t reg, val;
4195 	int i;
4196 	uint8_t opc;
4197 
4198 	/* Clear SK and CS. */
4199 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
4200 	CSR_WRITE(sc, WMREG_EECD, reg);
4201 	delay(2);
4202 
4203 	if (wm_spi_eeprom_ready(sc))
4204 		return 1;
4205 
4206 	/* Toggle CS to flush commands. */
4207 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
4208 	delay(2);
4209 	CSR_WRITE(sc, WMREG_EECD, reg);
4210 	delay(2);
4211 
4212 	opc = SPI_OPC_READ;
4213 	if (sc->sc_ee_addrbits == 8 && word >= 128)
4214 		opc |= SPI_OPC_A8;
4215 
4216 	wm_eeprom_sendbits(sc, opc, 8);
4217 	wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits);
4218 
4219 	for (i = 0; i < wordcnt; i++) {
4220 		wm_eeprom_recvbits(sc, &val, 16);
4221 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
4222 	}
4223 
4224 	/* Raise CS and clear SK. */
4225 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
4226 	CSR_WRITE(sc, WMREG_EECD, reg);
4227 	delay(2);
4228 
4229 	return 0;
4230 }
4231 
4232 #define EEPROM_CHECKSUM		0xBABA
4233 #define EEPROM_SIZE		0x0040
4234 
4235 /*
4236  * wm_validate_eeprom_checksum
4237  *
4238  * The checksum is defined as the sum of the first 64 (16 bit) words.
4239  */
4240 static int
4241 wm_validate_eeprom_checksum(struct wm_softc *sc)
4242 {
4243 	uint16_t checksum;
4244 	uint16_t eeprom_data;
4245 	int i;
4246 
4247 	checksum = 0;
4248 
4249 	for (i = 0; i < EEPROM_SIZE; i++) {
4250 		if (wm_read_eeprom(sc, i, 1, &eeprom_data))
4251 			return 1;
4252 		checksum += eeprom_data;
4253 	}
4254 
4255 	if (checksum != (uint16_t) EEPROM_CHECKSUM)
4256 		return 1;
4257 
4258 	return 0;
4259 }
4260 
4261 /*
4262  * wm_read_eeprom:
4263  *
4264  *	Read data from the serial EEPROM.
4265  */
4266 static int
4267 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
4268 {
4269 	int rv;
4270 
4271 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
4272 		return 1;
4273 
4274 	if (wm_acquire_eeprom(sc))
4275 		return 1;
4276 
4277 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4278 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH))
4279 		rv = wm_read_eeprom_ich8(sc, word, wordcnt, data);
4280 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
4281 		rv = wm_read_eeprom_eerd(sc, word, wordcnt, data);
4282 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
4283 		rv = wm_read_eeprom_spi(sc, word, wordcnt, data);
4284 	else
4285 		rv = wm_read_eeprom_uwire(sc, word, wordcnt, data);
4286 
4287 	wm_release_eeprom(sc);
4288 	return rv;
4289 }
4290 
4291 static int
4292 wm_read_eeprom_eerd(struct wm_softc *sc, int offset, int wordcnt,
4293     uint16_t *data)
4294 {
4295 	int i, eerd = 0;
4296 	int error = 0;
4297 
4298 	for (i = 0; i < wordcnt; i++) {
4299 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
4300 
4301 		CSR_WRITE(sc, WMREG_EERD, eerd);
4302 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
4303 		if (error != 0)
4304 			break;
4305 
4306 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
4307 	}
4308 
4309 	return error;
4310 }
4311 
4312 static int
4313 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
4314 {
4315 	uint32_t attempts = 100000;
4316 	uint32_t i, reg = 0;
4317 	int32_t done = -1;
4318 
4319 	for (i = 0; i < attempts; i++) {
4320 		reg = CSR_READ(sc, rw);
4321 
4322 		if (reg & EERD_DONE) {
4323 			done = 0;
4324 			break;
4325 		}
4326 		delay(5);
4327 	}
4328 
4329 	return done;
4330 }
4331 
4332 /*
4333  * wm_add_rxbuf:
4334  *
4335  *	Add a receive buffer to the indiciated descriptor.
4336  */
4337 static int
4338 wm_add_rxbuf(struct wm_softc *sc, int idx)
4339 {
4340 	struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
4341 	struct mbuf *m;
4342 	int error;
4343 
4344 	MGETHDR(m, M_DONTWAIT, MT_DATA);
4345 	if (m == NULL)
4346 		return ENOBUFS;
4347 
4348 	MCLGET(m, M_DONTWAIT);
4349 	if ((m->m_flags & M_EXT) == 0) {
4350 		m_freem(m);
4351 		return ENOBUFS;
4352 	}
4353 
4354 	if (rxs->rxs_mbuf != NULL)
4355 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4356 
4357 	rxs->rxs_mbuf = m;
4358 
4359 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
4360 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
4361 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
4362 	if (error) {
4363 		/* XXX XXX XXX */
4364 		aprint_error_dev(sc->sc_dev,
4365 		    "unable to load rx DMA map %d, error = %d\n",
4366 		    idx, error);
4367 		panic("wm_add_rxbuf");
4368 	}
4369 
4370 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
4371 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
4372 
4373 	WM_INIT_RXDESC(sc, idx);
4374 
4375 	return 0;
4376 }
4377 
4378 /*
4379  * wm_set_ral:
4380  *
4381  *	Set an entery in the receive address list.
4382  */
4383 static void
4384 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
4385 {
4386 	uint32_t ral_lo, ral_hi;
4387 
4388 	if (enaddr != NULL) {
4389 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
4390 		    (enaddr[3] << 24);
4391 		ral_hi = enaddr[4] | (enaddr[5] << 8);
4392 		ral_hi |= RAL_AV;
4393 	} else {
4394 		ral_lo = 0;
4395 		ral_hi = 0;
4396 	}
4397 
4398 	if (sc->sc_type >= WM_T_82544) {
4399 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
4400 		    ral_lo);
4401 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
4402 		    ral_hi);
4403 	} else {
4404 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
4405 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
4406 	}
4407 }
4408 
4409 /*
4410  * wm_mchash:
4411  *
4412  *	Compute the hash of the multicast address for the 4096-bit
4413  *	multicast filter.
4414  */
4415 static uint32_t
4416 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
4417 {
4418 	static const int lo_shift[4] = { 4, 3, 2, 0 };
4419 	static const int hi_shift[4] = { 4, 5, 6, 8 };
4420 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
4421 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
4422 	uint32_t hash;
4423 
4424 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4425 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)) {
4426 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
4427 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
4428 		return (hash & 0x3ff);
4429 	}
4430 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
4431 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
4432 
4433 	return (hash & 0xfff);
4434 }
4435 
4436 /*
4437  * wm_set_filter:
4438  *
4439  *	Set up the receive filter.
4440  */
4441 static void
4442 wm_set_filter(struct wm_softc *sc)
4443 {
4444 	struct ethercom *ec = &sc->sc_ethercom;
4445 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4446 	struct ether_multi *enm;
4447 	struct ether_multistep step;
4448 	bus_addr_t mta_reg;
4449 	uint32_t hash, reg, bit;
4450 	int i, size;
4451 
4452 	if (sc->sc_type >= WM_T_82544)
4453 		mta_reg = WMREG_CORDOVA_MTA;
4454 	else
4455 		mta_reg = WMREG_MTA;
4456 
4457 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
4458 
4459 	if (ifp->if_flags & IFF_BROADCAST)
4460 		sc->sc_rctl |= RCTL_BAM;
4461 	if (ifp->if_flags & IFF_PROMISC) {
4462 		sc->sc_rctl |= RCTL_UPE;
4463 		goto allmulti;
4464 	}
4465 
4466 	/*
4467 	 * Set the station address in the first RAL slot, and
4468 	 * clear the remaining slots.
4469 	 */
4470 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4471 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH))
4472 		size = WM_ICH8_RAL_TABSIZE;
4473 	else
4474 		size = WM_RAL_TABSIZE;
4475 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
4476 	for (i = 1; i < size; i++)
4477 		wm_set_ral(sc, NULL, i);
4478 
4479 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4480 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH))
4481 		size = WM_ICH8_MC_TABSIZE;
4482 	else
4483 		size = WM_MC_TABSIZE;
4484 	/* Clear out the multicast table. */
4485 	for (i = 0; i < size; i++)
4486 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
4487 
4488 	ETHER_FIRST_MULTI(step, ec, enm);
4489 	while (enm != NULL) {
4490 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
4491 			/*
4492 			 * We must listen to a range of multicast addresses.
4493 			 * For now, just accept all multicasts, rather than
4494 			 * trying to set only those filter bits needed to match
4495 			 * the range.  (At this time, the only use of address
4496 			 * ranges is for IP multicast routing, for which the
4497 			 * range is big enough to require all bits set.)
4498 			 */
4499 			goto allmulti;
4500 		}
4501 
4502 		hash = wm_mchash(sc, enm->enm_addrlo);
4503 
4504 		reg = (hash >> 5);
4505 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4506 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH))
4507 			reg &= 0x1f;
4508 		else
4509 			reg &= 0x7f;
4510 		bit = hash & 0x1f;
4511 
4512 		hash = CSR_READ(sc, mta_reg + (reg << 2));
4513 		hash |= 1U << bit;
4514 
4515 		/* XXX Hardware bug?? */
4516 		if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
4517 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
4518 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
4519 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
4520 		} else
4521 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
4522 
4523 		ETHER_NEXT_MULTI(step, enm);
4524 	}
4525 
4526 	ifp->if_flags &= ~IFF_ALLMULTI;
4527 	goto setit;
4528 
4529  allmulti:
4530 	ifp->if_flags |= IFF_ALLMULTI;
4531 	sc->sc_rctl |= RCTL_MPE;
4532 
4533  setit:
4534 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
4535 }
4536 
4537 /*
4538  * wm_tbi_mediainit:
4539  *
4540  *	Initialize media for use on 1000BASE-X devices.
4541  */
4542 static void
4543 wm_tbi_mediainit(struct wm_softc *sc)
4544 {
4545 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4546 	const char *sep = "";
4547 
4548 	if (sc->sc_type < WM_T_82543)
4549 		sc->sc_tipg = TIPG_WM_DFLT;
4550 	else
4551 		sc->sc_tipg = TIPG_LG_DFLT;
4552 
4553 	sc->sc_tbi_anegticks = 5;
4554 
4555 	/* Initialize our media structures */
4556 	sc->sc_mii.mii_ifp = ifp;
4557 
4558 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
4559 	ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
4560 	    wm_tbi_mediastatus);
4561 
4562 	/*
4563 	 * SWD Pins:
4564 	 *
4565 	 *	0 = Link LED (output)
4566 	 *	1 = Loss Of Signal (input)
4567 	 */
4568 	sc->sc_ctrl |= CTRL_SWDPIO(0);
4569 	sc->sc_ctrl &= ~CTRL_SWDPIO(1);
4570 
4571 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4572 
4573 #define	ADD(ss, mm, dd)							\
4574 do {									\
4575 	aprint_normal("%s%s", sep, ss);					\
4576 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL);	\
4577 	sep = ", ";							\
4578 } while (/*CONSTCOND*/0)
4579 
4580 	aprint_normal_dev(sc->sc_dev, "");
4581 	ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
4582 	ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
4583 	ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
4584 	aprint_normal("\n");
4585 
4586 #undef ADD
4587 
4588 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
4589 }
4590 
4591 /*
4592  * wm_tbi_mediastatus:	[ifmedia interface function]
4593  *
4594  *	Get the current interface media status on a 1000BASE-X device.
4595  */
4596 static void
4597 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
4598 {
4599 	struct wm_softc *sc = ifp->if_softc;
4600 	uint32_t ctrl, status;
4601 
4602 	ifmr->ifm_status = IFM_AVALID;
4603 	ifmr->ifm_active = IFM_ETHER;
4604 
4605 	status = CSR_READ(sc, WMREG_STATUS);
4606 	if ((status & STATUS_LU) == 0) {
4607 		ifmr->ifm_active |= IFM_NONE;
4608 		return;
4609 	}
4610 
4611 	ifmr->ifm_status |= IFM_ACTIVE;
4612 	ifmr->ifm_active |= IFM_1000_SX;
4613 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
4614 		ifmr->ifm_active |= IFM_FDX;
4615 	ctrl = CSR_READ(sc, WMREG_CTRL);
4616 	if (ctrl & CTRL_RFCE)
4617 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
4618 	if (ctrl & CTRL_TFCE)
4619 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
4620 }
4621 
4622 /*
4623  * wm_tbi_mediachange:	[ifmedia interface function]
4624  *
4625  *	Set hardware to newly-selected media on a 1000BASE-X device.
4626  */
4627 static int
4628 wm_tbi_mediachange(struct ifnet *ifp)
4629 {
4630 	struct wm_softc *sc = ifp->if_softc;
4631 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
4632 	uint32_t status;
4633 	int i;
4634 
4635 	sc->sc_txcw = 0;
4636 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO ||
4637 	    (sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
4638 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
4639 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
4640 		sc->sc_txcw |= TXCW_ANE;
4641 	} else {
4642 		/*
4643 		 * If autonegotiation is turned off, force link up and turn on
4644 		 * full duplex
4645 		 */
4646 		sc->sc_txcw &= ~TXCW_ANE;
4647 		sc->sc_ctrl |= CTRL_SLU | CTRL_FD;
4648 		sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
4649 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4650 		delay(1000);
4651 	}
4652 
4653 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
4654 		    device_xname(sc->sc_dev),sc->sc_txcw));
4655 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
4656 	delay(10000);
4657 
4658 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
4659 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
4660 
4661 	/*
4662 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
4663 	 * optics detect a signal, 0 if they don't.
4664 	 */
4665 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
4666 		/* Have signal; wait for the link to come up. */
4667 
4668 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
4669 			/*
4670 			 * Reset the link, and let autonegotiation do its thing
4671 			 */
4672 			sc->sc_ctrl |= CTRL_LRST;
4673 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4674 			delay(1000);
4675 			sc->sc_ctrl &= ~CTRL_LRST;
4676 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4677 			delay(1000);
4678 		}
4679 
4680 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
4681 			delay(10000);
4682 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
4683 				break;
4684 		}
4685 
4686 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
4687 			    device_xname(sc->sc_dev),i));
4688 
4689 		status = CSR_READ(sc, WMREG_STATUS);
4690 		DPRINTF(WM_DEBUG_LINK,
4691 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
4692 			device_xname(sc->sc_dev),status, STATUS_LU));
4693 		if (status & STATUS_LU) {
4694 			/* Link is up. */
4695 			DPRINTF(WM_DEBUG_LINK,
4696 			    ("%s: LINK: set media -> link up %s\n",
4697 			    device_xname(sc->sc_dev),
4698 			    (status & STATUS_FD) ? "FDX" : "HDX"));
4699 
4700 			/*
4701 			 * NOTE: CTRL will update TFCE and RFCE automatically,
4702 			 * so we should update sc->sc_ctrl
4703 			 */
4704 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
4705 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
4706 			sc->sc_fcrtl &= ~FCRTL_XONE;
4707 			if (status & STATUS_FD)
4708 				sc->sc_tctl |=
4709 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4710 			else
4711 				sc->sc_tctl |=
4712 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
4713 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
4714 				sc->sc_fcrtl |= FCRTL_XONE;
4715 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4716 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
4717 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
4718 				      sc->sc_fcrtl);
4719 			sc->sc_tbi_linkup = 1;
4720 		} else {
4721 			if (i == WM_LINKUP_TIMEOUT)
4722 				wm_check_for_link(sc);
4723 			/* Link is down. */
4724 			DPRINTF(WM_DEBUG_LINK,
4725 			    ("%s: LINK: set media -> link down\n",
4726 			    device_xname(sc->sc_dev)));
4727 			sc->sc_tbi_linkup = 0;
4728 		}
4729 	} else {
4730 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
4731 		    device_xname(sc->sc_dev)));
4732 		sc->sc_tbi_linkup = 0;
4733 	}
4734 
4735 	wm_tbi_set_linkled(sc);
4736 
4737 	return 0;
4738 }
4739 
4740 /*
4741  * wm_tbi_set_linkled:
4742  *
4743  *	Update the link LED on 1000BASE-X devices.
4744  */
4745 static void
4746 wm_tbi_set_linkled(struct wm_softc *sc)
4747 {
4748 
4749 	if (sc->sc_tbi_linkup)
4750 		sc->sc_ctrl |= CTRL_SWDPIN(0);
4751 	else
4752 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
4753 
4754 	/* 82540 or newer devices are active low */
4755 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
4756 
4757 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4758 }
4759 
4760 /*
4761  * wm_tbi_check_link:
4762  *
4763  *	Check the link on 1000BASE-X devices.
4764  */
4765 static void
4766 wm_tbi_check_link(struct wm_softc *sc)
4767 {
4768 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4769 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
4770 	uint32_t rxcw, ctrl, status;
4771 
4772 	status = CSR_READ(sc, WMREG_STATUS);
4773 
4774 	rxcw = CSR_READ(sc, WMREG_RXCW);
4775 	ctrl = CSR_READ(sc, WMREG_CTRL);
4776 
4777 	/* set link status */
4778 	if ((status & STATUS_LU) == 0) {
4779 		DPRINTF(WM_DEBUG_LINK,
4780 		    ("%s: LINK: checklink -> down\n", device_xname(sc->sc_dev)));
4781 		sc->sc_tbi_linkup = 0;
4782 	} else if (sc->sc_tbi_linkup == 0) {
4783 		DPRINTF(WM_DEBUG_LINK,
4784 		    ("%s: LINK: checklink -> up %s\n", device_xname(sc->sc_dev),
4785 		    (status & STATUS_FD) ? "FDX" : "HDX"));
4786 		sc->sc_tbi_linkup = 1;
4787 	}
4788 
4789 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP)
4790 	    && ((status & STATUS_LU) == 0)) {
4791 		sc->sc_tbi_linkup = 0;
4792 		if (sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg > 100) {
4793 			/* RXCFG storm! */
4794 			DPRINTF(WM_DEBUG_LINK, ("RXCFG storm! (%d)\n",
4795 				sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg));
4796 			wm_init(ifp);
4797 			wm_start(ifp);
4798 		} else if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
4799 			/* If the timer expired, retry autonegotiation */
4800 			if (++sc->sc_tbi_ticks >= sc->sc_tbi_anegticks) {
4801 				DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
4802 				sc->sc_tbi_ticks = 0;
4803 				/*
4804 				 * Reset the link, and let autonegotiation do
4805 				 * its thing
4806 				 */
4807 				sc->sc_ctrl |= CTRL_LRST;
4808 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4809 				delay(1000);
4810 				sc->sc_ctrl &= ~CTRL_LRST;
4811 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4812 				delay(1000);
4813 				CSR_WRITE(sc, WMREG_TXCW,
4814 				    sc->sc_txcw & ~TXCW_ANE);
4815 				CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
4816 			}
4817 		}
4818 	}
4819 
4820 	wm_tbi_set_linkled(sc);
4821 }
4822 
4823 /*
4824  * wm_gmii_reset:
4825  *
4826  *	Reset the PHY.
4827  */
4828 static void
4829 wm_gmii_reset(struct wm_softc *sc)
4830 {
4831 	uint32_t reg;
4832 	int func = 0; /* XXX gcc */
4833 	int rv;
4834 
4835 	/* get phy semaphore */
4836 	switch (sc->sc_type) {
4837 	case WM_T_82571:
4838 	case WM_T_82572:
4839 	case WM_T_82573:
4840 	case WM_T_82574:
4841 	case WM_T_82583:
4842 		 /* XXX should get sw semaphore, too */
4843 		rv = wm_get_swsm_semaphore(sc);
4844 		break;
4845 	case WM_T_80003:
4846 		func = (CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1;
4847 		rv = wm_get_swfw_semaphore(sc,
4848 		    func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4849 		break;
4850 	case WM_T_ICH8:
4851 	case WM_T_ICH9:
4852 	case WM_T_ICH10:
4853 	case WM_T_PCH:
4854 		rv = wm_get_swfwhw_semaphore(sc);
4855 		break;
4856 	default:
4857 		/* nothing to do*/
4858 		rv = 0;
4859 		break;
4860 	}
4861 	if (rv != 0) {
4862 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
4863 		    __func__);
4864 		return;
4865 	}
4866 
4867 	switch (sc->sc_type) {
4868 	case WM_T_82542_2_0:
4869 	case WM_T_82542_2_1:
4870 		/* null */
4871 		break;
4872 	case WM_T_82543:
4873 		/*
4874 		 * With 82543, we need to force speed and duplex on the MAC
4875 		 * equal to what the PHY speed and duplex configuration is.
4876 		 * In addition, we need to perform a hardware reset on the PHY
4877 		 * to take it out of reset.
4878 		 */
4879 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
4880 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4881 
4882 		/* The PHY reset pin is active-low. */
4883 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
4884 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
4885 		    CTRL_EXT_SWDPIN(4));
4886 		reg |= CTRL_EXT_SWDPIO(4);
4887 
4888 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4889 		delay(10*1000);
4890 
4891 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
4892 		delay(150);
4893 #if 0
4894 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
4895 #endif
4896 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
4897 		break;
4898 	case WM_T_82544:	/* reset 10000us */
4899 	case WM_T_82540:
4900 	case WM_T_82545:
4901 	case WM_T_82545_3:
4902 	case WM_T_82546:
4903 	case WM_T_82546_3:
4904 	case WM_T_82541:
4905 	case WM_T_82541_2:
4906 	case WM_T_82547:
4907 	case WM_T_82547_2:
4908 	case WM_T_82571:	/* reset 100us */
4909 	case WM_T_82572:
4910 	case WM_T_82573:
4911 	case WM_T_82574:
4912 	case WM_T_82583:
4913 	case WM_T_80003:
4914 		/* generic reset */
4915 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
4916 		delay((sc->sc_type >= WM_T_82571) ? 100 : 10*1000);
4917 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4918 		delay(150);
4919 
4920 		if ((sc->sc_type == WM_T_82541)
4921 		    || (sc->sc_type == WM_T_82541_2)
4922 		    || (sc->sc_type == WM_T_82547)
4923 		    || (sc->sc_type == WM_T_82547_2)) {
4924 			/* workaround for igp are done in igp_reset() */
4925 			/* XXX add code to set LED after phy reset */
4926 		}
4927 		break;
4928 	case WM_T_ICH8:
4929 	case WM_T_ICH9:
4930 	case WM_T_ICH10:
4931 	case WM_T_PCH:
4932 		/* generic reset */
4933 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
4934 		delay(100);
4935 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4936 		delay(150);
4937 		break;
4938 	default:
4939 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4940 		    __func__);
4941 		break;
4942 	}
4943 
4944 	/* release PHY semaphore */
4945 	switch (sc->sc_type) {
4946 	case WM_T_82571:
4947 	case WM_T_82572:
4948 	case WM_T_82573:
4949 	case WM_T_82574:
4950 	case WM_T_82583:
4951 		 /* XXX sould put sw semaphore, too */
4952 		wm_put_swsm_semaphore(sc);
4953 		break;
4954 	case WM_T_80003:
4955 		wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4956 		break;
4957 	case WM_T_ICH8:
4958 	case WM_T_ICH9:
4959 	case WM_T_ICH10:
4960 	case WM_T_PCH:
4961 		wm_put_swfwhw_semaphore(sc);
4962 		break;
4963 	default:
4964 		/* nothing to do*/
4965 		rv = 0;
4966 		break;
4967 	}
4968 
4969 	/* get_cfg_done */
4970 	wm_get_cfg_done(sc);
4971 
4972 	/* extra setup */
4973 	switch (sc->sc_type) {
4974 	case WM_T_82542_2_0:
4975 	case WM_T_82542_2_1:
4976 	case WM_T_82543:
4977 	case WM_T_82544:
4978 	case WM_T_82540:
4979 	case WM_T_82545:
4980 	case WM_T_82545_3:
4981 	case WM_T_82546:
4982 	case WM_T_82546_3:
4983 	case WM_T_82541_2:
4984 	case WM_T_82547_2:
4985 	case WM_T_82571:
4986 	case WM_T_82572:
4987 	case WM_T_82573:
4988 	case WM_T_82574:
4989 	case WM_T_82583:
4990 	case WM_T_80003:
4991 		/* null */
4992 		break;
4993 	case WM_T_82541:
4994 	case WM_T_82547:
4995 		/* XXX Configure actively LED after PHY reset */
4996 		break;
4997 	case WM_T_ICH8:
4998 	case WM_T_ICH9:
4999 	case WM_T_ICH10:
5000 	case WM_T_PCH:
5001 		/* Allow time for h/w to get to a quiescent state afer reset */
5002 		delay(10*1000);
5003 
5004 		if (sc->sc_type == WM_T_PCH) {
5005 			wm_hv_phy_workaround_ich8lan(sc);
5006 
5007 			/*
5008 			 * dummy read to clear the phy wakeup bit after lcd
5009 			 * reset
5010 			 */
5011 			reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
5012 		}
5013 
5014 		/*
5015 		 * XXX Configure the LCD with th extended configuration region
5016 		 * in NVM
5017 		 */
5018 
5019 		/* Configure the LCD with the OEM bits in NVM */
5020 		if (sc->sc_type == WM_T_PCH) {
5021 			/*
5022 			 * Disable LPLU.
5023 			 * XXX It seems that 82567 has LPLU, too.
5024 			 */
5025 			reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
5026 			reg &= ~(HV_OEM_BITS_A1KDIS| HV_OEM_BITS_LPLU);
5027 			reg |= HV_OEM_BITS_ANEGNOW;
5028 			wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
5029 		}
5030 		break;
5031 	default:
5032 		panic("%s: unknown type\n", __func__);
5033 		break;
5034 	}
5035 }
5036 
5037 /*
5038  * wm_gmii_mediainit:
5039  *
5040  *	Initialize media for use on 1000BASE-T devices.
5041  */
5042 static void
5043 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
5044 {
5045 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5046 
5047 	/* We have MII. */
5048 	sc->sc_flags |= WM_F_HAS_MII;
5049 
5050 	if (sc->sc_type == WM_T_80003)
5051 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
5052 	else
5053 		sc->sc_tipg = TIPG_1000T_DFLT;
5054 
5055 	/*
5056 	 * Let the chip set speed/duplex on its own based on
5057 	 * signals from the PHY.
5058 	 * XXXbouyer - I'm not sure this is right for the 80003,
5059 	 * the em driver only sets CTRL_SLU here - but it seems to work.
5060 	 */
5061 	sc->sc_ctrl |= CTRL_SLU;
5062 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5063 
5064 	/* Initialize our media structures and probe the GMII. */
5065 	sc->sc_mii.mii_ifp = ifp;
5066 
5067 	switch (prodid) {
5068 	case PCI_PRODUCT_INTEL_PCH_M_LM:
5069 	case PCI_PRODUCT_INTEL_PCH_M_LC:
5070 		/* 82577 */
5071 		sc->sc_phytype = WMPHY_82577;
5072 		sc->sc_mii.mii_readreg = wm_gmii_hv_readreg;
5073 		sc->sc_mii.mii_writereg = wm_gmii_hv_writereg;
5074 		break;
5075 	case PCI_PRODUCT_INTEL_PCH_D_DM:
5076 	case PCI_PRODUCT_INTEL_PCH_D_DC:
5077 		/* 82578 */
5078 		sc->sc_phytype = WMPHY_82578;
5079 		sc->sc_mii.mii_readreg = wm_gmii_hv_readreg;
5080 		sc->sc_mii.mii_writereg = wm_gmii_hv_writereg;
5081 		break;
5082 	case PCI_PRODUCT_INTEL_82801I_BM:
5083 	case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
5084 	case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
5085 	case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
5086 	case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
5087 	case PCI_PRODUCT_INTEL_82801J_R_BM_V:
5088 		/* 82567 */
5089 		sc->sc_phytype = WMPHY_BM;
5090 		sc->sc_mii.mii_readreg = wm_gmii_bm_readreg;
5091 		sc->sc_mii.mii_writereg = wm_gmii_bm_writereg;
5092 		break;
5093 	default:
5094 		if (sc->sc_type >= WM_T_80003) {
5095 			sc->sc_mii.mii_readreg = wm_gmii_i80003_readreg;
5096 			sc->sc_mii.mii_writereg = wm_gmii_i80003_writereg;
5097 		} else if (sc->sc_type >= WM_T_82544) {
5098 			sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg;
5099 			sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg;
5100 		} else {
5101 			sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg;
5102 			sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg;
5103 		}
5104 		break;
5105 
5106 	}
5107 	sc->sc_mii.mii_statchg = wm_gmii_statchg;
5108 
5109 	wm_gmii_reset(sc);
5110 
5111 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
5112 	ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_gmii_mediachange,
5113 	    wm_gmii_mediastatus);
5114 
5115 	mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
5116 	    MII_OFFSET_ANY, MIIF_DOPAUSE);
5117 
5118 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
5119 		/* if failed, retry with *_bm_* */
5120 		sc->sc_mii.mii_readreg = wm_gmii_bm_readreg;
5121 		sc->sc_mii.mii_writereg = wm_gmii_bm_writereg;
5122 
5123 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
5124 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
5125 	}
5126 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
5127 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
5128 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
5129 		sc->sc_phytype = WMPHY_NONE;
5130 	} else {
5131 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
5132 	}
5133 }
5134 
5135 /*
5136  * wm_gmii_mediastatus:	[ifmedia interface function]
5137  *
5138  *	Get the current interface media status on a 1000BASE-T device.
5139  */
5140 static void
5141 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
5142 {
5143 	struct wm_softc *sc = ifp->if_softc;
5144 
5145 	ether_mediastatus(ifp, ifmr);
5146 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK) |
5147 			   sc->sc_flowflags;
5148 }
5149 
5150 /*
5151  * wm_gmii_mediachange:	[ifmedia interface function]
5152  *
5153  *	Set hardware to newly-selected media on a 1000BASE-T device.
5154  */
5155 static int
5156 wm_gmii_mediachange(struct ifnet *ifp)
5157 {
5158 	struct wm_softc *sc = ifp->if_softc;
5159 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
5160 	int rc;
5161 
5162 	if ((ifp->if_flags & IFF_UP) == 0)
5163 		return 0;
5164 
5165 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
5166 	sc->sc_ctrl |= CTRL_SLU;
5167 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
5168 	    || (sc->sc_type > WM_T_82543)) {
5169 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
5170 	} else {
5171 		sc->sc_ctrl &= ~CTRL_ASDE;
5172 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
5173 		if (ife->ifm_media & IFM_FDX)
5174 			sc->sc_ctrl |= CTRL_FD;
5175 		switch (IFM_SUBTYPE(ife->ifm_media)) {
5176 		case IFM_10_T:
5177 			sc->sc_ctrl |= CTRL_SPEED_10;
5178 			break;
5179 		case IFM_100_TX:
5180 			sc->sc_ctrl |= CTRL_SPEED_100;
5181 			break;
5182 		case IFM_1000_T:
5183 			sc->sc_ctrl |= CTRL_SPEED_1000;
5184 			break;
5185 		default:
5186 			panic("wm_gmii_mediachange: bad media 0x%x",
5187 			    ife->ifm_media);
5188 		}
5189 	}
5190 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5191 	if (sc->sc_type <= WM_T_82543)
5192 		wm_gmii_reset(sc);
5193 
5194 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
5195 		return 0;
5196 	return rc;
5197 }
5198 
5199 #define	MDI_IO		CTRL_SWDPIN(2)
5200 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
5201 #define	MDI_CLK		CTRL_SWDPIN(3)
5202 
5203 static void
5204 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
5205 {
5206 	uint32_t i, v;
5207 
5208 	v = CSR_READ(sc, WMREG_CTRL);
5209 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
5210 	v |= MDI_DIR | CTRL_SWDPIO(3);
5211 
5212 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
5213 		if (data & i)
5214 			v |= MDI_IO;
5215 		else
5216 			v &= ~MDI_IO;
5217 		CSR_WRITE(sc, WMREG_CTRL, v);
5218 		delay(10);
5219 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
5220 		delay(10);
5221 		CSR_WRITE(sc, WMREG_CTRL, v);
5222 		delay(10);
5223 	}
5224 }
5225 
5226 static uint32_t
5227 i82543_mii_recvbits(struct wm_softc *sc)
5228 {
5229 	uint32_t v, i, data = 0;
5230 
5231 	v = CSR_READ(sc, WMREG_CTRL);
5232 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
5233 	v |= CTRL_SWDPIO(3);
5234 
5235 	CSR_WRITE(sc, WMREG_CTRL, v);
5236 	delay(10);
5237 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
5238 	delay(10);
5239 	CSR_WRITE(sc, WMREG_CTRL, v);
5240 	delay(10);
5241 
5242 	for (i = 0; i < 16; i++) {
5243 		data <<= 1;
5244 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
5245 		delay(10);
5246 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
5247 			data |= 1;
5248 		CSR_WRITE(sc, WMREG_CTRL, v);
5249 		delay(10);
5250 	}
5251 
5252 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
5253 	delay(10);
5254 	CSR_WRITE(sc, WMREG_CTRL, v);
5255 	delay(10);
5256 
5257 	return data;
5258 }
5259 
5260 #undef MDI_IO
5261 #undef MDI_DIR
5262 #undef MDI_CLK
5263 
5264 /*
5265  * wm_gmii_i82543_readreg:	[mii interface function]
5266  *
5267  *	Read a PHY register on the GMII (i82543 version).
5268  */
5269 static int
5270 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
5271 {
5272 	struct wm_softc *sc = device_private(self);
5273 	int rv;
5274 
5275 	i82543_mii_sendbits(sc, 0xffffffffU, 32);
5276 	i82543_mii_sendbits(sc, reg | (phy << 5) |
5277 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
5278 	rv = i82543_mii_recvbits(sc) & 0xffff;
5279 
5280 	DPRINTF(WM_DEBUG_GMII,
5281 	    ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
5282 	    device_xname(sc->sc_dev), phy, reg, rv));
5283 
5284 	return rv;
5285 }
5286 
5287 /*
5288  * wm_gmii_i82543_writereg:	[mii interface function]
5289  *
5290  *	Write a PHY register on the GMII (i82543 version).
5291  */
5292 static void
5293 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
5294 {
5295 	struct wm_softc *sc = device_private(self);
5296 
5297 	i82543_mii_sendbits(sc, 0xffffffffU, 32);
5298 	i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
5299 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
5300 	    (MII_COMMAND_START << 30), 32);
5301 }
5302 
5303 /*
5304  * wm_gmii_i82544_readreg:	[mii interface function]
5305  *
5306  *	Read a PHY register on the GMII.
5307  */
5308 static int
5309 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
5310 {
5311 	struct wm_softc *sc = device_private(self);
5312 	uint32_t mdic = 0;
5313 	int i, rv;
5314 
5315 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
5316 	    MDIC_REGADD(reg));
5317 
5318 	for (i = 0; i < 320; i++) {
5319 		mdic = CSR_READ(sc, WMREG_MDIC);
5320 		if (mdic & MDIC_READY)
5321 			break;
5322 		delay(10);
5323 	}
5324 
5325 	if ((mdic & MDIC_READY) == 0) {
5326 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
5327 		    device_xname(sc->sc_dev), phy, reg);
5328 		rv = 0;
5329 	} else if (mdic & MDIC_E) {
5330 #if 0 /* This is normal if no PHY is present. */
5331 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
5332 		    device_xname(sc->sc_dev), phy, reg);
5333 #endif
5334 		rv = 0;
5335 	} else {
5336 		rv = MDIC_DATA(mdic);
5337 		if (rv == 0xffff)
5338 			rv = 0;
5339 	}
5340 
5341 	return rv;
5342 }
5343 
5344 /*
5345  * wm_gmii_i82544_writereg:	[mii interface function]
5346  *
5347  *	Write a PHY register on the GMII.
5348  */
5349 static void
5350 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
5351 {
5352 	struct wm_softc *sc = device_private(self);
5353 	uint32_t mdic = 0;
5354 	int i;
5355 
5356 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
5357 	    MDIC_REGADD(reg) | MDIC_DATA(val));
5358 
5359 	for (i = 0; i < 320; i++) {
5360 		mdic = CSR_READ(sc, WMREG_MDIC);
5361 		if (mdic & MDIC_READY)
5362 			break;
5363 		delay(10);
5364 	}
5365 
5366 	if ((mdic & MDIC_READY) == 0)
5367 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
5368 		    device_xname(sc->sc_dev), phy, reg);
5369 	else if (mdic & MDIC_E)
5370 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
5371 		    device_xname(sc->sc_dev), phy, reg);
5372 }
5373 
5374 /*
5375  * wm_gmii_i80003_readreg:	[mii interface function]
5376  *
5377  *	Read a PHY register on the kumeran
5378  * This could be handled by the PHY layer if we didn't have to lock the
5379  * ressource ...
5380  */
5381 static int
5382 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
5383 {
5384 	struct wm_softc *sc = device_private(self);
5385 	int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
5386 	int rv;
5387 
5388 	if (phy != 1) /* only one PHY on kumeran bus */
5389 		return 0;
5390 
5391 	if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) {
5392 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5393 		    __func__);
5394 		return 0;
5395 	}
5396 
5397 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
5398 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
5399 		    reg >> GG82563_PAGE_SHIFT);
5400 	} else {
5401 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
5402 		    reg >> GG82563_PAGE_SHIFT);
5403 	}
5404 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
5405 	delay(200);
5406 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
5407 	delay(200);
5408 
5409 	wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
5410 	return rv;
5411 }
5412 
5413 /*
5414  * wm_gmii_i80003_writereg:	[mii interface function]
5415  *
5416  *	Write a PHY register on the kumeran.
5417  * This could be handled by the PHY layer if we didn't have to lock the
5418  * ressource ...
5419  */
5420 static void
5421 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
5422 {
5423 	struct wm_softc *sc = device_private(self);
5424 	int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
5425 
5426 	if (phy != 1) /* only one PHY on kumeran bus */
5427 		return;
5428 
5429 	if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) {
5430 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5431 		    __func__);
5432 		return;
5433 	}
5434 
5435 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
5436 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
5437 		    reg >> GG82563_PAGE_SHIFT);
5438 	} else {
5439 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
5440 		    reg >> GG82563_PAGE_SHIFT);
5441 	}
5442 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
5443 	delay(200);
5444 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
5445 	delay(200);
5446 
5447 	wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
5448 }
5449 
5450 /*
5451  * wm_gmii_bm_readreg:	[mii interface function]
5452  *
5453  *	Read a PHY register on the kumeran
5454  * This could be handled by the PHY layer if we didn't have to lock the
5455  * ressource ...
5456  */
5457 static int
5458 wm_gmii_bm_readreg(device_t self, int phy, int reg)
5459 {
5460 	struct wm_softc *sc = device_private(self);
5461 	int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
5462 	int rv;
5463 
5464 	if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) {
5465 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5466 		    __func__);
5467 		return 0;
5468 	}
5469 
5470 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
5471 		if (phy == 1)
5472 			wm_gmii_i82544_writereg(self, phy, 0x1f,
5473 			    reg);
5474 		else
5475 			wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
5476 			    reg >> GG82563_PAGE_SHIFT);
5477 
5478 	}
5479 
5480 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
5481 	wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
5482 	return rv;
5483 }
5484 
5485 /*
5486  * wm_gmii_bm_writereg:	[mii interface function]
5487  *
5488  *	Write a PHY register on the kumeran.
5489  * This could be handled by the PHY layer if we didn't have to lock the
5490  * ressource ...
5491  */
5492 static void
5493 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
5494 {
5495 	struct wm_softc *sc = device_private(self);
5496 	int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
5497 
5498 	if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) {
5499 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5500 		    __func__);
5501 		return;
5502 	}
5503 
5504 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
5505 		if (phy == 1)
5506 			wm_gmii_i82544_writereg(self, phy, 0x1f,
5507 			    reg);
5508 		else
5509 			wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
5510 			    reg >> GG82563_PAGE_SHIFT);
5511 
5512 	}
5513 
5514 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
5515 	wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
5516 }
5517 
5518 static void
5519 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
5520 {
5521 	struct wm_softc *sc = device_private(self);
5522 	uint16_t regnum = BM_PHY_REG_NUM(offset);
5523 	uint16_t wuce;
5524 
5525 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
5526 	if (sc->sc_type == WM_T_PCH) {
5527 		/* XXX e1000 driver do nothing... why? */
5528 	}
5529 
5530 	/* Set page 769 */
5531 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
5532 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
5533 
5534 	wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
5535 
5536 	wuce &= ~BM_WUC_HOST_WU_BIT;
5537 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
5538 	    wuce | BM_WUC_ENABLE_BIT);
5539 
5540 	/* Select page 800 */
5541 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
5542 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
5543 
5544 	/* Write page 800 */
5545 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
5546 
5547 	if (rd)
5548 		*val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
5549 	else
5550 		wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
5551 
5552 	/* Set page 769 */
5553 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
5554 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
5555 
5556 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
5557 }
5558 
5559 /*
5560  * wm_gmii_hv_readreg:	[mii interface function]
5561  *
5562  *	Read a PHY register on the kumeran
5563  * This could be handled by the PHY layer if we didn't have to lock the
5564  * ressource ...
5565  */
5566 static int
5567 wm_gmii_hv_readreg(device_t self, int phy, int reg)
5568 {
5569 	struct wm_softc *sc = device_private(self);
5570 	uint16_t page = BM_PHY_REG_PAGE(reg);
5571 	uint16_t regnum = BM_PHY_REG_NUM(reg);
5572 	uint16_t val;
5573 	int rv;
5574 
5575 	if (wm_get_swfw_semaphore(sc, SWFW_PHY0_SM)) {
5576 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5577 		    __func__);
5578 		return 0;
5579 	}
5580 
5581 	/* XXX Workaround failure in MDIO access while cable is disconnected */
5582 	if (sc->sc_phytype == WMPHY_82577) {
5583 		/* XXX must write */
5584 	}
5585 
5586 	/* Page 800 works differently than the rest so it has its own func */
5587 	if (page == BM_WUC_PAGE) {
5588 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
5589 		return val;
5590 	}
5591 
5592 	/*
5593 	 * Lower than page 768 works differently than the rest so it has its
5594 	 * own func
5595 	 */
5596 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
5597 		printf("gmii_hv_readreg!!!\n");
5598 		return 0;
5599 	}
5600 
5601 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
5602 		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
5603 		    page << BME1000_PAGE_SHIFT);
5604 	}
5605 
5606 	rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
5607 	wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
5608 	return rv;
5609 }
5610 
5611 /*
5612  * wm_gmii_hv_writereg:	[mii interface function]
5613  *
5614  *	Write a PHY register on the kumeran.
5615  * This could be handled by the PHY layer if we didn't have to lock the
5616  * ressource ...
5617  */
5618 static void
5619 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
5620 {
5621 	struct wm_softc *sc = device_private(self);
5622 	uint16_t page = BM_PHY_REG_PAGE(reg);
5623 	uint16_t regnum = BM_PHY_REG_NUM(reg);
5624 
5625 	if (wm_get_swfw_semaphore(sc, SWFW_PHY0_SM)) {
5626 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5627 		    __func__);
5628 		return;
5629 	}
5630 
5631 	/* XXX Workaround failure in MDIO access while cable is disconnected */
5632 
5633 	/* Page 800 works differently than the rest so it has its own func */
5634 	if (page == BM_WUC_PAGE) {
5635 		uint16_t tmp;
5636 
5637 		tmp = val;
5638 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
5639 		return;
5640 	}
5641 
5642 	/*
5643 	 * Lower than page 768 works differently than the rest so it has its
5644 	 * own func
5645 	 */
5646 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
5647 		printf("gmii_hv_writereg!!!\n");
5648 		return;
5649 	}
5650 
5651 	/*
5652 	 * XXX Workaround MDIO accesses being disabled after entering IEEE
5653 	 * Power Down (whenever bit 11 of the PHY control register is set)
5654 	 */
5655 
5656 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
5657 		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
5658 		    page << BME1000_PAGE_SHIFT);
5659 	}
5660 
5661 	wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
5662 	wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
5663 }
5664 
5665 /*
5666  * wm_gmii_statchg:	[mii interface function]
5667  *
5668  *	Callback from MII layer when media changes.
5669  */
5670 static void
5671 wm_gmii_statchg(device_t self)
5672 {
5673 	struct wm_softc *sc = device_private(self);
5674 	struct mii_data *mii = &sc->sc_mii;
5675 
5676 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
5677 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
5678 	sc->sc_fcrtl &= ~FCRTL_XONE;
5679 
5680 	/*
5681 	 * Get flow control negotiation result.
5682 	 */
5683 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
5684 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
5685 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
5686 		mii->mii_media_active &= ~IFM_ETH_FMASK;
5687 	}
5688 
5689 	if (sc->sc_flowflags & IFM_FLOW) {
5690 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
5691 			sc->sc_ctrl |= CTRL_TFCE;
5692 			sc->sc_fcrtl |= FCRTL_XONE;
5693 		}
5694 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
5695 			sc->sc_ctrl |= CTRL_RFCE;
5696 	}
5697 
5698 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
5699 		DPRINTF(WM_DEBUG_LINK,
5700 		    ("%s: LINK: statchg: FDX\n", device_xname(sc->sc_dev)));
5701 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
5702 	} else  {
5703 		DPRINTF(WM_DEBUG_LINK,
5704 		    ("%s: LINK: statchg: HDX\n", device_xname(sc->sc_dev)));
5705 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
5706 	}
5707 
5708 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5709 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
5710 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
5711 						 : WMREG_FCRTL, sc->sc_fcrtl);
5712 	if (sc->sc_type == WM_T_80003) {
5713 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
5714 		case IFM_1000_T:
5715 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
5716 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
5717 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
5718 			break;
5719 		default:
5720 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
5721 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
5722 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
5723 			break;
5724 		}
5725 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
5726 	}
5727 }
5728 
5729 /*
5730  * wm_kmrn_readreg:
5731  *
5732  *	Read a kumeran register
5733  */
5734 static int
5735 wm_kmrn_readreg(struct wm_softc *sc, int reg)
5736 {
5737 	int rv;
5738 
5739 	if (sc->sc_flags == WM_F_SWFW_SYNC) {
5740 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
5741 			aprint_error_dev(sc->sc_dev,
5742 			    "%s: failed to get semaphore\n", __func__);
5743 			return 0;
5744 		}
5745 	} else 	if (sc->sc_flags == WM_F_SWFWHW_SYNC) {
5746 		if (wm_get_swfwhw_semaphore(sc)) {
5747 			aprint_error_dev(sc->sc_dev,
5748 			    "%s: failed to get semaphore\n", __func__);
5749 			return 0;
5750 		}
5751 	}
5752 
5753 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
5754 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
5755 	    KUMCTRLSTA_REN);
5756 	delay(2);
5757 
5758 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
5759 
5760 	if (sc->sc_flags == WM_F_SWFW_SYNC)
5761 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
5762 	else if (sc->sc_flags == WM_F_SWFWHW_SYNC)
5763 		wm_put_swfwhw_semaphore(sc);
5764 
5765 	return rv;
5766 }
5767 
5768 /*
5769  * wm_kmrn_writereg:
5770  *
5771  *	Write a kumeran register
5772  */
5773 static void
5774 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
5775 {
5776 
5777 	if (sc->sc_flags == WM_F_SWFW_SYNC) {
5778 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
5779 			aprint_error_dev(sc->sc_dev,
5780 			    "%s: failed to get semaphore\n", __func__);
5781 			return;
5782 		}
5783 	} else 	if (sc->sc_flags == WM_F_SWFWHW_SYNC) {
5784 		if (wm_get_swfwhw_semaphore(sc)) {
5785 			aprint_error_dev(sc->sc_dev,
5786 			    "%s: failed to get semaphore\n", __func__);
5787 			return;
5788 		}
5789 	}
5790 
5791 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
5792 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
5793 	    (val & KUMCTRLSTA_MASK));
5794 
5795 	if (sc->sc_flags == WM_F_SWFW_SYNC)
5796 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
5797 	else if (sc->sc_flags == WM_F_SWFWHW_SYNC)
5798 		wm_put_swfwhw_semaphore(sc);
5799 }
5800 
5801 static int
5802 wm_is_onboard_nvm_eeprom(struct wm_softc *sc)
5803 {
5804 	uint32_t eecd = 0;
5805 
5806 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
5807 	    || sc->sc_type == WM_T_82583) {
5808 		eecd = CSR_READ(sc, WMREG_EECD);
5809 
5810 		/* Isolate bits 15 & 16 */
5811 		eecd = ((eecd >> 15) & 0x03);
5812 
5813 		/* If both bits are set, device is Flash type */
5814 		if (eecd == 0x03)
5815 			return 0;
5816 	}
5817 	return 1;
5818 }
5819 
5820 static int
5821 wm_get_swsm_semaphore(struct wm_softc *sc)
5822 {
5823 	int32_t timeout;
5824 	uint32_t swsm;
5825 
5826 	/* Get the FW semaphore. */
5827 	timeout = 1000 + 1; /* XXX */
5828 	while (timeout) {
5829 		swsm = CSR_READ(sc, WMREG_SWSM);
5830 		swsm |= SWSM_SWESMBI;
5831 		CSR_WRITE(sc, WMREG_SWSM, swsm);
5832 		/* if we managed to set the bit we got the semaphore. */
5833 		swsm = CSR_READ(sc, WMREG_SWSM);
5834 		if (swsm & SWSM_SWESMBI)
5835 			break;
5836 
5837 		delay(50);
5838 		timeout--;
5839 	}
5840 
5841 	if (timeout == 0) {
5842 		aprint_error_dev(sc->sc_dev, "could not acquire EEPROM GNT\n");
5843 		/* Release semaphores */
5844 		wm_put_swsm_semaphore(sc);
5845 		return 1;
5846 	}
5847 	return 0;
5848 }
5849 
5850 static void
5851 wm_put_swsm_semaphore(struct wm_softc *sc)
5852 {
5853 	uint32_t swsm;
5854 
5855 	swsm = CSR_READ(sc, WMREG_SWSM);
5856 	swsm &= ~(SWSM_SWESMBI);
5857 	CSR_WRITE(sc, WMREG_SWSM, swsm);
5858 }
5859 
5860 static int
5861 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
5862 {
5863 	uint32_t swfw_sync;
5864 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
5865 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
5866 	int timeout = 200;
5867 
5868 	for (timeout = 0; timeout < 200; timeout++) {
5869 		if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
5870 			if (wm_get_swsm_semaphore(sc)) {
5871 				aprint_error_dev(sc->sc_dev,
5872 				    "%s: failed to get semaphore\n",
5873 				    __func__);
5874 				return 1;
5875 			}
5876 		}
5877 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
5878 		if ((swfw_sync & (swmask | fwmask)) == 0) {
5879 			swfw_sync |= swmask;
5880 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
5881 			if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
5882 				wm_put_swsm_semaphore(sc);
5883 			return 0;
5884 		}
5885 		if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
5886 			wm_put_swsm_semaphore(sc);
5887 		delay(5000);
5888 	}
5889 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
5890 	    device_xname(sc->sc_dev), mask, swfw_sync);
5891 	return 1;
5892 }
5893 
5894 static void
5895 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
5896 {
5897 	uint32_t swfw_sync;
5898 
5899 	if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
5900 		while (wm_get_swsm_semaphore(sc) != 0)
5901 			continue;
5902 	}
5903 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
5904 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
5905 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
5906 	if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
5907 		wm_put_swsm_semaphore(sc);
5908 }
5909 
5910 static int
5911 wm_get_swfwhw_semaphore(struct wm_softc *sc)
5912 {
5913 	uint32_t ext_ctrl;
5914 	int timeout = 200;
5915 
5916 	for (timeout = 0; timeout < 200; timeout++) {
5917 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
5918 		ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
5919 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
5920 
5921 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
5922 		if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
5923 			return 0;
5924 		delay(5000);
5925 	}
5926 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
5927 	    device_xname(sc->sc_dev), ext_ctrl);
5928 	return 1;
5929 }
5930 
5931 static void
5932 wm_put_swfwhw_semaphore(struct wm_softc *sc)
5933 {
5934 	uint32_t ext_ctrl;
5935 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
5936 	ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
5937 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
5938 }
5939 
5940 static int
5941 wm_valid_nvm_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
5942 {
5943 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
5944 	uint8_t bank_high_byte;
5945 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
5946 
5947 	if ((sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)) {
5948 		/* Value of bit 22 corresponds to the flash bank we're on. */
5949 		*bank = (CSR_READ(sc, WMREG_EECD) & EECD_SEC1VAL) ? 1 : 0;
5950 	} else {
5951 		wm_read_ich8_byte(sc, act_offset, &bank_high_byte);
5952 		if ((bank_high_byte & 0xc0) == 0x80)
5953 			*bank = 0;
5954 		else {
5955 			wm_read_ich8_byte(sc, act_offset + bank1_offset,
5956 			    &bank_high_byte);
5957 			if ((bank_high_byte & 0xc0) == 0x80)
5958 				*bank = 1;
5959 			else {
5960 				aprint_error_dev(sc->sc_dev,
5961 				    "EEPROM not present\n");
5962 				return -1;
5963 			}
5964 		}
5965 	}
5966 
5967 	return 0;
5968 }
5969 
5970 /******************************************************************************
5971  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
5972  * register.
5973  *
5974  * sc - Struct containing variables accessed by shared code
5975  * offset - offset of word in the EEPROM to read
5976  * data - word read from the EEPROM
5977  * words - number of words to read
5978  *****************************************************************************/
5979 static int
5980 wm_read_eeprom_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
5981 {
5982 	int32_t  error = 0;
5983 	uint32_t flash_bank = 0;
5984 	uint32_t act_offset = 0;
5985 	uint32_t bank_offset = 0;
5986 	uint16_t word = 0;
5987 	uint16_t i = 0;
5988 
5989 	/* We need to know which is the valid flash bank.  In the event
5990 	 * that we didn't allocate eeprom_shadow_ram, we may not be
5991 	 * managing flash_bank.  So it cannot be trusted and needs
5992 	 * to be updated with each read.
5993 	 */
5994 	error = wm_valid_nvm_bank_detect_ich8lan(sc, &flash_bank);
5995 	if (error) {
5996 		aprint_error_dev(sc->sc_dev, "%s: failed to detect NVM bank\n",
5997 		    __func__);
5998 		return error;
5999 	}
6000 
6001 	/* Adjust offset appropriately if we're on bank 1 - adjust for word size */
6002 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
6003 
6004 	error = wm_get_swfwhw_semaphore(sc);
6005 	if (error) {
6006 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6007 		    __func__);
6008 		return error;
6009 	}
6010 
6011 	for (i = 0; i < words; i++) {
6012 		/* The NVM part needs a byte offset, hence * 2 */
6013 		act_offset = bank_offset + ((offset + i) * 2);
6014 		error = wm_read_ich8_word(sc, act_offset, &word);
6015 		if (error) {
6016 			aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
6017 			    __func__);
6018 			break;
6019 		}
6020 		data[i] = word;
6021 	}
6022 
6023 	wm_put_swfwhw_semaphore(sc);
6024 	return error;
6025 }
6026 
6027 /******************************************************************************
6028  * This function does initial flash setup so that a new read/write/erase cycle
6029  * can be started.
6030  *
6031  * sc - The pointer to the hw structure
6032  ****************************************************************************/
6033 static int32_t
6034 wm_ich8_cycle_init(struct wm_softc *sc)
6035 {
6036 	uint16_t hsfsts;
6037 	int32_t error = 1;
6038 	int32_t i     = 0;
6039 
6040 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
6041 
6042 	/* May be check the Flash Des Valid bit in Hw status */
6043 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
6044 		return error;
6045 	}
6046 
6047 	/* Clear FCERR in Hw status by writing 1 */
6048 	/* Clear DAEL in Hw status by writing a 1 */
6049 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
6050 
6051 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
6052 
6053 	/*
6054 	 * Either we should have a hardware SPI cycle in progress bit to check
6055 	 * against, in order to start a new cycle or FDONE bit should be
6056 	 * changed in the hardware so that it is 1 after harware reset, which
6057 	 * can then be used as an indication whether a cycle is in progress or
6058 	 * has been completed .. we should also have some software semaphore me
6059 	 * chanism to guard FDONE or the cycle in progress bit so that two
6060 	 * threads access to those bits can be sequentiallized or a way so that
6061 	 * 2 threads dont start the cycle at the same time
6062 	 */
6063 
6064 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
6065 		/*
6066 		 * There is no cycle running at present, so we can start a
6067 		 * cycle
6068 		 */
6069 
6070 		/* Begin by setting Flash Cycle Done. */
6071 		hsfsts |= HSFSTS_DONE;
6072 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
6073 		error = 0;
6074 	} else {
6075 		/*
6076 		 * otherwise poll for sometime so the current cycle has a
6077 		 * chance to end before giving up.
6078 		 */
6079 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
6080 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
6081 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
6082 				error = 0;
6083 				break;
6084 			}
6085 			delay(1);
6086 		}
6087 		if (error == 0) {
6088 			/*
6089 			 * Successful in waiting for previous cycle to timeout,
6090 			 * now set the Flash Cycle Done.
6091 			 */
6092 			hsfsts |= HSFSTS_DONE;
6093 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
6094 		}
6095 	}
6096 	return error;
6097 }
6098 
6099 /******************************************************************************
6100  * This function starts a flash cycle and waits for its completion
6101  *
6102  * sc - The pointer to the hw structure
6103  ****************************************************************************/
6104 static int32_t
6105 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
6106 {
6107 	uint16_t hsflctl;
6108 	uint16_t hsfsts;
6109 	int32_t error = 1;
6110 	uint32_t i = 0;
6111 
6112 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
6113 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
6114 	hsflctl |= HSFCTL_GO;
6115 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
6116 
6117 	/* wait till FDONE bit is set to 1 */
6118 	do {
6119 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
6120 		if (hsfsts & HSFSTS_DONE)
6121 			break;
6122 		delay(1);
6123 		i++;
6124 	} while (i < timeout);
6125 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
6126 		error = 0;
6127 
6128 	return error;
6129 }
6130 
6131 /******************************************************************************
6132  * Reads a byte or word from the NVM using the ICH8 flash access registers.
6133  *
6134  * sc - The pointer to the hw structure
6135  * index - The index of the byte or word to read.
6136  * size - Size of data to read, 1=byte 2=word
6137  * data - Pointer to the word to store the value read.
6138  *****************************************************************************/
6139 static int32_t
6140 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
6141     uint32_t size, uint16_t* data)
6142 {
6143 	uint16_t hsfsts;
6144 	uint16_t hsflctl;
6145 	uint32_t flash_linear_address;
6146 	uint32_t flash_data = 0;
6147 	int32_t error = 1;
6148 	int32_t count = 0;
6149 
6150 	if (size < 1  || size > 2 || data == 0x0 ||
6151 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
6152 		return error;
6153 
6154 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
6155 	    sc->sc_ich8_flash_base;
6156 
6157 	do {
6158 		delay(1);
6159 		/* Steps */
6160 		error = wm_ich8_cycle_init(sc);
6161 		if (error)
6162 			break;
6163 
6164 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
6165 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
6166 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
6167 		    & HSFCTL_BCOUNT_MASK;
6168 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
6169 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
6170 
6171 		/*
6172 		 * Write the last 24 bits of index into Flash Linear address
6173 		 * field in Flash Address
6174 		 */
6175 		/* TODO: TBD maybe check the index against the size of flash */
6176 
6177 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
6178 
6179 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
6180 
6181 		/*
6182 		 * Check if FCERR is set to 1, if set to 1, clear it and try
6183 		 * the whole sequence a few more times, else read in (shift in)
6184 		 * the Flash Data0, the order is least significant byte first
6185 		 * msb to lsb
6186 		 */
6187 		if (error == 0) {
6188 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
6189 			if (size == 1)
6190 				*data = (uint8_t)(flash_data & 0x000000FF);
6191 			else if (size == 2)
6192 				*data = (uint16_t)(flash_data & 0x0000FFFF);
6193 			break;
6194 		} else {
6195 			/*
6196 			 * If we've gotten here, then things are probably
6197 			 * completely hosed, but if the error condition is
6198 			 * detected, it won't hurt to give it another try...
6199 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
6200 			 */
6201 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
6202 			if (hsfsts & HSFSTS_ERR) {
6203 				/* Repeat for some time before giving up. */
6204 				continue;
6205 			} else if ((hsfsts & HSFSTS_DONE) == 0)
6206 				break;
6207 		}
6208 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
6209 
6210 	return error;
6211 }
6212 
6213 /******************************************************************************
6214  * Reads a single byte from the NVM using the ICH8 flash access registers.
6215  *
6216  * sc - pointer to wm_hw structure
6217  * index - The index of the byte to read.
6218  * data - Pointer to a byte to store the value read.
6219  *****************************************************************************/
6220 static int32_t
6221 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
6222 {
6223 	int32_t status;
6224 	uint16_t word = 0;
6225 
6226 	status = wm_read_ich8_data(sc, index, 1, &word);
6227 	if (status == 0)
6228 		*data = (uint8_t)word;
6229 
6230 	return status;
6231 }
6232 
6233 /******************************************************************************
6234  * Reads a word from the NVM using the ICH8 flash access registers.
6235  *
6236  * sc - pointer to wm_hw structure
6237  * index - The starting byte index of the word to read.
6238  * data - Pointer to a word to store the value read.
6239  *****************************************************************************/
6240 static int32_t
6241 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
6242 {
6243 	int32_t status;
6244 
6245 	status = wm_read_ich8_data(sc, index, 2, data);
6246 	return status;
6247 }
6248 
6249 static int
6250 wm_check_mng_mode(struct wm_softc *sc)
6251 {
6252 	int rv;
6253 
6254 	switch (sc->sc_type) {
6255 	case WM_T_ICH8:
6256 	case WM_T_ICH9:
6257 	case WM_T_ICH10:
6258 	case WM_T_PCH:
6259 		rv = wm_check_mng_mode_ich8lan(sc);
6260 		break;
6261 	case WM_T_82574:
6262 	case WM_T_82583:
6263 		rv = wm_check_mng_mode_82574(sc);
6264 		break;
6265 	case WM_T_82571:
6266 	case WM_T_82572:
6267 	case WM_T_82573:
6268 	case WM_T_80003:
6269 		rv = wm_check_mng_mode_generic(sc);
6270 		break;
6271 	default:
6272 		/* noting to do */
6273 		rv = 0;
6274 		break;
6275 	}
6276 
6277 	return rv;
6278 }
6279 
6280 static int
6281 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
6282 {
6283 	uint32_t fwsm;
6284 
6285 	fwsm = CSR_READ(sc, WMREG_FWSM);
6286 
6287 	if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
6288 		return 1;
6289 
6290 	return 0;
6291 }
6292 
6293 static int
6294 wm_check_mng_mode_82574(struct wm_softc *sc)
6295 {
6296 	uint16_t data;
6297 
6298 	wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &data);
6299 
6300 	if ((data & EEPROM_CFG2_MNGM_MASK) != 0)
6301 		return 1;
6302 
6303 	return 0;
6304 }
6305 
6306 static int
6307 wm_check_mng_mode_generic(struct wm_softc *sc)
6308 {
6309 	uint32_t fwsm;
6310 
6311 	fwsm = CSR_READ(sc, WMREG_FWSM);
6312 
6313 	if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
6314 		return 1;
6315 
6316 	return 0;
6317 }
6318 
6319 static int
6320 wm_check_reset_block(struct wm_softc *sc)
6321 {
6322 	uint32_t reg;
6323 
6324 	switch (sc->sc_type) {
6325 	case WM_T_ICH8:
6326 	case WM_T_ICH9:
6327 	case WM_T_ICH10:
6328 	case WM_T_PCH:
6329 		reg = CSR_READ(sc, WMREG_FWSM);
6330 		if ((reg & FWSM_RSPCIPHY) != 0)
6331 			return 0;
6332 		else
6333 			return -1;
6334 		break;
6335 	case WM_T_82571:
6336 	case WM_T_82572:
6337 	case WM_T_82573:
6338 	case WM_T_82574:
6339 	case WM_T_82583:
6340 	case WM_T_80003:
6341 		reg = CSR_READ(sc, WMREG_MANC);
6342 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
6343 			return -1;
6344 		else
6345 			return 0;
6346 		break;
6347 	default:
6348 		/* no problem */
6349 		break;
6350 	}
6351 
6352 	return 0;
6353 }
6354 
6355 static void
6356 wm_get_hw_control(struct wm_softc *sc)
6357 {
6358 	uint32_t reg;
6359 
6360 	switch (sc->sc_type) {
6361 	case WM_T_82573:
6362 #if 0
6363 	case WM_T_82574:
6364 	case WM_T_82583:
6365 		/*
6366 		 * FreeBSD's em driver has the function for 82574 to checks
6367 		 * the management mode, but it's not used. Why?
6368 		 */
6369 #endif
6370 		reg = CSR_READ(sc, WMREG_SWSM);
6371 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
6372 		break;
6373 	case WM_T_82571:
6374 	case WM_T_82572:
6375 	case WM_T_80003:
6376 	case WM_T_ICH8:
6377 	case WM_T_ICH9:
6378 	case WM_T_ICH10:
6379 	case WM_T_PCH:
6380 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
6381 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
6382 		break;
6383 	default:
6384 		break;
6385 	}
6386 }
6387 
6388 /* XXX Currently TBI only */
6389 static int
6390 wm_check_for_link(struct wm_softc *sc)
6391 {
6392 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
6393 	uint32_t rxcw;
6394 	uint32_t ctrl;
6395 	uint32_t status;
6396 	uint32_t sig;
6397 
6398 	rxcw = CSR_READ(sc, WMREG_RXCW);
6399 	ctrl = CSR_READ(sc, WMREG_CTRL);
6400 	status = CSR_READ(sc, WMREG_STATUS);
6401 
6402 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
6403 
6404 	DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
6405 		device_xname(sc->sc_dev), __func__,
6406 		((ctrl & CTRL_SWDPIN(1)) == sig),
6407 		((status & STATUS_LU) != 0),
6408 		((rxcw & RXCW_C) != 0)
6409 		    ));
6410 
6411 	/*
6412 	 * SWDPIN   LU RXCW
6413 	 *      0    0    0
6414 	 *      0    0    1	(should not happen)
6415 	 *      0    1    0	(should not happen)
6416 	 *      0    1    1	(should not happen)
6417 	 *      1    0    0	Disable autonego and force linkup
6418 	 *      1    0    1	got /C/ but not linkup yet
6419 	 *      1    1    0	(linkup)
6420 	 *      1    1    1	If IFM_AUTO, back to autonego
6421 	 *
6422 	 */
6423 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
6424 	    && ((status & STATUS_LU) == 0)
6425 	    && ((rxcw & RXCW_C) == 0)) {
6426 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
6427 			__func__));
6428 		sc->sc_tbi_linkup = 0;
6429 		/* Disable auto-negotiation in the TXCW register */
6430 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
6431 
6432 		/*
6433 		 * Force link-up and also force full-duplex.
6434 		 *
6435 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
6436 		 * so we should update sc->sc_ctrl
6437 		 */
6438 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
6439 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6440 	} else if (((status & STATUS_LU) != 0)
6441 	    && ((rxcw & RXCW_C) != 0)
6442 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
6443 		sc->sc_tbi_linkup = 1;
6444 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
6445 			__func__));
6446 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
6447 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
6448 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
6449 	    && ((rxcw & RXCW_C) != 0)) {
6450 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
6451 	} else {
6452 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
6453 			status));
6454 	}
6455 
6456 	return 0;
6457 }
6458 
6459 /*
6460  * Workaround for pch's PHYs
6461  * XXX should be moved to new PHY driver?
6462  */
6463 static void
6464 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
6465 {
6466 
6467 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
6468 
6469 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
6470 
6471 	/* 82578 */
6472 	if (sc->sc_phytype == WMPHY_82578) {
6473 		/* PCH rev. < 3 */
6474 		if (sc->sc_rev < 3) {
6475 			/* XXX 6 bit shift? Why? Is it page2? */
6476 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
6477 			    0x66c0);
6478 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
6479 			    0xffff);
6480 		}
6481 
6482 		/* XXX phy rev. < 2 */
6483 	}
6484 
6485 	/* Select page 0 */
6486 
6487 	/* XXX acquire semaphore */
6488 	wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
6489 	/* XXX release semaphore */
6490 
6491 	/*
6492 	 * Configure the K1 Si workaround during phy reset assuming there is
6493 	 * link so that it disables K1 if link is in 1Gbps.
6494 	 */
6495 	wm_k1_gig_workaround_hv(sc, 1);
6496 }
6497 
6498 static void
6499 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
6500 {
6501 	int k1_enable = sc->sc_nvm_k1_enabled;
6502 
6503 	/* XXX acquire semaphore */
6504 
6505 	if (link) {
6506 		k1_enable = 0;
6507 
6508 		/* Link stall fix for link up */
6509 		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
6510 	} else {
6511 		/* Link stall fix for link down */
6512 		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
6513 	}
6514 
6515 	wm_configure_k1_ich8lan(sc, k1_enable);
6516 
6517 	/* XXX release semaphore */
6518 }
6519 
6520 static void
6521 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
6522 {
6523 	uint32_t ctrl, ctrl_ext, tmp;
6524 	uint16_t kmrn_reg;
6525 
6526 	kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
6527 
6528 	if (k1_enable)
6529 		kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
6530 	else
6531 		kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
6532 
6533 	wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
6534 
6535 	delay(20);
6536 
6537 	ctrl = CSR_READ(sc, WMREG_CTRL);
6538 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
6539 
6540 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
6541 	tmp |= CTRL_FRCSPD;
6542 
6543 	CSR_WRITE(sc, WMREG_CTRL, tmp);
6544 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
6545 	delay(20);
6546 
6547 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
6548 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
6549 	delay(20);
6550 }
6551