xref: /netbsd-src/sys/dev/pci/if_wm.c (revision 3816d47b2c42fcd6e549e3407f842a5b1a1d23ad)
1 /*	$NetBSD: if_wm.c,v 1.189 2010/01/07 17:45:58 msaitoh Exp $	*/
2 
3 /*
4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed for the NetBSD Project by
20  *	Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 /*******************************************************************************
39 
40   Copyright (c) 2001-2005, Intel Corporation
41   All rights reserved.
42 
43   Redistribution and use in source and binary forms, with or without
44   modification, are permitted provided that the following conditions are met:
45 
46    1. Redistributions of source code must retain the above copyright notice,
47       this list of conditions and the following disclaimer.
48 
49    2. Redistributions in binary form must reproduce the above copyright
50       notice, this list of conditions and the following disclaimer in the
51       documentation and/or other materials provided with the distribution.
52 
53    3. Neither the name of the Intel Corporation nor the names of its
54       contributors may be used to endorse or promote products derived from
55       this software without specific prior written permission.
56 
57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67   POSSIBILITY OF SUCH DAMAGE.
68 
69 *******************************************************************************/
70 /*
71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72  *
73  * TODO (in order of importance):
74  *
75  *	- Rework how parameters are loaded from the EEPROM.
76  */
77 
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.189 2010/01/07 17:45:58 msaitoh Exp $");
80 
81 #include "bpfilter.h"
82 #include "rnd.h"
83 
84 #include <sys/param.h>
85 #include <sys/systm.h>
86 #include <sys/callout.h>
87 #include <sys/mbuf.h>
88 #include <sys/malloc.h>
89 #include <sys/kernel.h>
90 #include <sys/socket.h>
91 #include <sys/ioctl.h>
92 #include <sys/errno.h>
93 #include <sys/device.h>
94 #include <sys/queue.h>
95 #include <sys/syslog.h>
96 
97 #include <uvm/uvm_extern.h>		/* for PAGE_SIZE */
98 
99 #if NRND > 0
100 #include <sys/rnd.h>
101 #endif
102 
103 #include <net/if.h>
104 #include <net/if_dl.h>
105 #include <net/if_media.h>
106 #include <net/if_ether.h>
107 
108 #if NBPFILTER > 0
109 #include <net/bpf.h>
110 #endif
111 
112 #include <netinet/in.h>			/* XXX for struct ip */
113 #include <netinet/in_systm.h>		/* XXX for struct ip */
114 #include <netinet/ip.h>			/* XXX for struct ip */
115 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
116 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
117 
118 #include <sys/bus.h>
119 #include <sys/intr.h>
120 #include <machine/endian.h>
121 
122 #include <dev/mii/mii.h>
123 #include <dev/mii/miivar.h>
124 #include <dev/mii/mii_bitbang.h>
125 #include <dev/mii/ikphyreg.h>
126 
127 #include <dev/pci/pcireg.h>
128 #include <dev/pci/pcivar.h>
129 #include <dev/pci/pcidevs.h>
130 
131 #include <dev/pci/if_wmreg.h>
132 #include <dev/pci/if_wmvar.h>
133 
134 #ifdef WM_DEBUG
135 #define	WM_DEBUG_LINK		0x01
136 #define	WM_DEBUG_TX		0x02
137 #define	WM_DEBUG_RX		0x04
138 #define	WM_DEBUG_GMII		0x08
139 int	wm_debug = WM_DEBUG_TX|WM_DEBUG_RX|WM_DEBUG_LINK|WM_DEBUG_GMII;
140 
141 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
142 #else
143 #define	DPRINTF(x, y)	/* nothing */
144 #endif /* WM_DEBUG */
145 
146 /*
147  * Transmit descriptor list size.  Due to errata, we can only have
148  * 256 hardware descriptors in the ring on < 82544, but we use 4096
149  * on >= 82544.  We tell the upper layers that they can queue a lot
150  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
151  * of them at a time.
152  *
153  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
154  * chains containing many small mbufs have been observed in zero-copy
155  * situations with jumbo frames.
156  */
157 #define	WM_NTXSEGS		256
158 #define	WM_IFQUEUELEN		256
159 #define	WM_TXQUEUELEN_MAX	64
160 #define	WM_TXQUEUELEN_MAX_82547	16
161 #define	WM_TXQUEUELEN(sc)	((sc)->sc_txnum)
162 #define	WM_TXQUEUELEN_MASK(sc)	(WM_TXQUEUELEN(sc) - 1)
163 #define	WM_TXQUEUE_GC(sc)	(WM_TXQUEUELEN(sc) / 8)
164 #define	WM_NTXDESC_82542	256
165 #define	WM_NTXDESC_82544	4096
166 #define	WM_NTXDESC(sc)		((sc)->sc_ntxdesc)
167 #define	WM_NTXDESC_MASK(sc)	(WM_NTXDESC(sc) - 1)
168 #define	WM_TXDESCSIZE(sc)	(WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
169 #define	WM_NEXTTX(sc, x)	(((x) + 1) & WM_NTXDESC_MASK(sc))
170 #define	WM_NEXTTXS(sc, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(sc))
171 
172 #define	WM_MAXTXDMA		round_page(IP_MAXPACKET) /* for TSO */
173 
174 /*
175  * Receive descriptor list size.  We have one Rx buffer for normal
176  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
177  * packet.  We allocate 256 receive descriptors, each with a 2k
178  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
179  */
180 #define	WM_NRXDESC		256
181 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
182 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
183 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
184 
185 /*
186  * Control structures are DMA'd to the i82542 chip.  We allocate them in
187  * a single clump that maps to a single DMA segment to make several things
188  * easier.
189  */
190 struct wm_control_data_82544 {
191 	/*
192 	 * The receive descriptors.
193 	 */
194 	wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
195 
196 	/*
197 	 * The transmit descriptors.  Put these at the end, because
198 	 * we might use a smaller number of them.
199 	 */
200 	wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82544];
201 };
202 
203 struct wm_control_data_82542 {
204 	wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
205 	wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
206 };
207 
208 #define	WM_CDOFF(x)	offsetof(struct wm_control_data_82544, x)
209 #define	WM_CDTXOFF(x)	WM_CDOFF(wcd_txdescs[(x)])
210 #define	WM_CDRXOFF(x)	WM_CDOFF(wcd_rxdescs[(x)])
211 
212 /*
213  * Software state for transmit jobs.
214  */
215 struct wm_txsoft {
216 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
217 	bus_dmamap_t txs_dmamap;	/* our DMA map */
218 	int txs_firstdesc;		/* first descriptor in packet */
219 	int txs_lastdesc;		/* last descriptor in packet */
220 	int txs_ndesc;			/* # of descriptors used */
221 };
222 
223 /*
224  * Software state for receive buffers.  Each descriptor gets a
225  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
226  * more than one buffer, we chain them together.
227  */
228 struct wm_rxsoft {
229 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
230 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
231 };
232 
233 #define WM_LINKUP_TIMEOUT	50
234 
235 /*
236  * Software state per device.
237  */
238 struct wm_softc {
239 	device_t sc_dev;		/* generic device information */
240 	bus_space_tag_t sc_st;		/* bus space tag */
241 	bus_space_handle_t sc_sh;	/* bus space handle */
242 	bus_space_tag_t sc_iot;		/* I/O space tag */
243 	bus_space_handle_t sc_ioh;	/* I/O space handle */
244 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
245 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
246 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
247 	struct ethercom sc_ethercom;	/* ethernet common data */
248 	pci_chipset_tag_t sc_pc;
249 	pcitag_t sc_pcitag;
250 
251 	wm_chip_type sc_type;		/* chip type */
252 	int sc_flags;			/* flags; see below */
253 	int sc_if_flags;		/* last if_flags */
254 	int sc_bus_speed;		/* PCI/PCIX bus speed */
255 	int sc_pcix_offset;		/* PCIX capability register offset */
256 	int sc_flowflags;		/* 802.3x flow control flags */
257 
258 	void *sc_ih;			/* interrupt cookie */
259 
260 	int sc_ee_addrbits;		/* EEPROM address bits */
261 
262 	struct mii_data sc_mii;		/* MII/media information */
263 
264 	callout_t sc_tick_ch;		/* tick callout */
265 
266 	bus_dmamap_t sc_cddmamap;	/* control data DMA map */
267 #define	sc_cddma	sc_cddmamap->dm_segs[0].ds_addr
268 
269 	int		sc_align_tweak;
270 
271 	/*
272 	 * Software state for the transmit and receive descriptors.
273 	 */
274 	int			sc_txnum;	/* must be a power of two */
275 	struct wm_txsoft	sc_txsoft[WM_TXQUEUELEN_MAX];
276 	struct wm_rxsoft	sc_rxsoft[WM_NRXDESC];
277 
278 	/*
279 	 * Control data structures.
280 	 */
281 	int			sc_ntxdesc;	/* must be a power of two */
282 	struct wm_control_data_82544 *sc_control_data;
283 #define	sc_txdescs	sc_control_data->wcd_txdescs
284 #define	sc_rxdescs	sc_control_data->wcd_rxdescs
285 
286 #ifdef WM_EVENT_COUNTERS
287 	/* Event counters. */
288 	struct evcnt sc_ev_txsstall;	/* Tx stalled due to no txs */
289 	struct evcnt sc_ev_txdstall;	/* Tx stalled due to no txd */
290 	struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
291 	struct evcnt sc_ev_txdw;	/* Tx descriptor interrupts */
292 	struct evcnt sc_ev_txqe;	/* Tx queue empty interrupts */
293 	struct evcnt sc_ev_rxintr;	/* Rx interrupts */
294 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
295 
296 	struct evcnt sc_ev_rxipsum;	/* IP checksums checked in-bound */
297 	struct evcnt sc_ev_rxtusum;	/* TCP/UDP cksums checked in-bound */
298 	struct evcnt sc_ev_txipsum;	/* IP checksums comp. out-bound */
299 	struct evcnt sc_ev_txtusum;	/* TCP/UDP cksums comp. out-bound */
300 	struct evcnt sc_ev_txtusum6;	/* TCP/UDP v6 cksums comp. out-bound */
301 	struct evcnt sc_ev_txtso;	/* TCP seg offload out-bound (IPv4) */
302 	struct evcnt sc_ev_txtso6;	/* TCP seg offload out-bound (IPv6) */
303 	struct evcnt sc_ev_txtsopain;	/* painful header manip. for TSO */
304 
305 	struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
306 	struct evcnt sc_ev_txdrop;	/* Tx packets dropped (too many segs) */
307 
308 	struct evcnt sc_ev_tu;		/* Tx underrun */
309 
310 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
311 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
312 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
313 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
314 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
315 #endif /* WM_EVENT_COUNTERS */
316 
317 	bus_addr_t sc_tdt_reg;		/* offset of TDT register */
318 
319 	int	sc_txfree;		/* number of free Tx descriptors */
320 	int	sc_txnext;		/* next ready Tx descriptor */
321 
322 	int	sc_txsfree;		/* number of free Tx jobs */
323 	int	sc_txsnext;		/* next free Tx job */
324 	int	sc_txsdirty;		/* dirty Tx jobs */
325 
326 	/* These 5 variables are used only on the 82547. */
327 	int	sc_txfifo_size;		/* Tx FIFO size */
328 	int	sc_txfifo_head;		/* current head of FIFO */
329 	uint32_t sc_txfifo_addr;	/* internal address of start of FIFO */
330 	int	sc_txfifo_stall;	/* Tx FIFO is stalled */
331 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
332 
333 	bus_addr_t sc_rdt_reg;		/* offset of RDT register */
334 
335 	int	sc_rxptr;		/* next ready Rx descriptor/queue ent */
336 	int	sc_rxdiscard;
337 	int	sc_rxlen;
338 	struct mbuf *sc_rxhead;
339 	struct mbuf *sc_rxtail;
340 	struct mbuf **sc_rxtailp;
341 
342 	uint32_t sc_ctrl;		/* prototype CTRL register */
343 #if 0
344 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
345 #endif
346 	uint32_t sc_icr;		/* prototype interrupt bits */
347 	uint32_t sc_itr;		/* prototype intr throttling reg */
348 	uint32_t sc_tctl;		/* prototype TCTL register */
349 	uint32_t sc_rctl;		/* prototype RCTL register */
350 	uint32_t sc_txcw;		/* prototype TXCW register */
351 	uint32_t sc_tipg;		/* prototype TIPG register */
352 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
353 	uint32_t sc_pba;		/* prototype PBA register */
354 
355 	int sc_tbi_linkup;		/* TBI link status */
356 	int sc_tbi_anegticks;		/* autonegotiation ticks */
357 	int sc_tbi_ticks;		/* tbi ticks */
358 	int sc_tbi_nrxcfg;		/* count of ICR_RXCFG */
359 	int sc_tbi_lastnrxcfg;		/* count of ICR_RXCFG (on last tick) */
360 
361 	int sc_mchash_type;		/* multicast filter offset */
362 
363 #if NRND > 0
364 	rndsource_element_t rnd_source;	/* random source */
365 #endif
366 	int sc_ich8_flash_base;
367 	int sc_ich8_flash_bank_size;
368 };
369 
370 #define	WM_RXCHAIN_RESET(sc)						\
371 do {									\
372 	(sc)->sc_rxtailp = &(sc)->sc_rxhead;				\
373 	*(sc)->sc_rxtailp = NULL;					\
374 	(sc)->sc_rxlen = 0;						\
375 } while (/*CONSTCOND*/0)
376 
377 #define	WM_RXCHAIN_LINK(sc, m)						\
378 do {									\
379 	*(sc)->sc_rxtailp = (sc)->sc_rxtail = (m);			\
380 	(sc)->sc_rxtailp = &(m)->m_next;				\
381 } while (/*CONSTCOND*/0)
382 
383 #ifdef WM_EVENT_COUNTERS
384 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
385 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
386 #else
387 #define	WM_EVCNT_INCR(ev)	/* nothing */
388 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
389 #endif
390 
391 #define	CSR_READ(sc, reg)						\
392 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
393 #define	CSR_WRITE(sc, reg, val)						\
394 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
395 #define	CSR_WRITE_FLUSH(sc)						\
396 	(void) CSR_READ((sc), WMREG_STATUS)
397 
398 #define ICH8_FLASH_READ32(sc, reg) \
399 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
400 #define ICH8_FLASH_WRITE32(sc, reg, data) \
401 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
402 
403 #define ICH8_FLASH_READ16(sc, reg) \
404 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
405 #define ICH8_FLASH_WRITE16(sc, reg, data) \
406 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
407 
408 #define	WM_CDTXADDR(sc, x)	((sc)->sc_cddma + WM_CDTXOFF((x)))
409 #define	WM_CDRXADDR(sc, x)	((sc)->sc_cddma + WM_CDRXOFF((x)))
410 
411 #define	WM_CDTXADDR_LO(sc, x)	(WM_CDTXADDR((sc), (x)) & 0xffffffffU)
412 #define	WM_CDTXADDR_HI(sc, x)						\
413 	(sizeof(bus_addr_t) == 8 ?					\
414 	 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
415 
416 #define	WM_CDRXADDR_LO(sc, x)	(WM_CDRXADDR((sc), (x)) & 0xffffffffU)
417 #define	WM_CDRXADDR_HI(sc, x)						\
418 	(sizeof(bus_addr_t) == 8 ?					\
419 	 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
420 
421 #define	WM_CDTXSYNC(sc, x, n, ops)					\
422 do {									\
423 	int __x, __n;							\
424 									\
425 	__x = (x);							\
426 	__n = (n);							\
427 									\
428 	/* If it will wrap around, sync to the end of the ring. */	\
429 	if ((__x + __n) > WM_NTXDESC(sc)) {				\
430 		bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,	\
431 		    WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) *		\
432 		    (WM_NTXDESC(sc) - __x), (ops));			\
433 		__n -= (WM_NTXDESC(sc) - __x);				\
434 		__x = 0;						\
435 	}								\
436 									\
437 	/* Now sync whatever is left. */				\
438 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
439 	    WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops));	\
440 } while (/*CONSTCOND*/0)
441 
442 #define	WM_CDRXSYNC(sc, x, ops)						\
443 do {									\
444 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
445 	   WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops));		\
446 } while (/*CONSTCOND*/0)
447 
448 #define	WM_INIT_RXDESC(sc, x)						\
449 do {									\
450 	struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)];		\
451 	wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)];		\
452 	struct mbuf *__m = __rxs->rxs_mbuf;				\
453 									\
454 	/*								\
455 	 * Note: We scoot the packet forward 2 bytes in the buffer	\
456 	 * so that the payload after the Ethernet header is aligned	\
457 	 * to a 4-byte boundary.					\
458 	 *								\
459 	 * XXX BRAINDAMAGE ALERT!					\
460 	 * The stupid chip uses the same size for every buffer, which	\
461 	 * is set in the Receive Control register.  We are using the 2K	\
462 	 * size option, but what we REALLY want is (2K - 2)!  For this	\
463 	 * reason, we can't "scoot" packets longer than the standard	\
464 	 * Ethernet MTU.  On strict-alignment platforms, if the total	\
465 	 * size exceeds (2K - 2) we set align_tweak to 0 and let	\
466 	 * the upper layer copy the headers.				\
467 	 */								\
468 	__m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak;	\
469 									\
470 	wm_set_dma_addr(&__rxd->wrx_addr,				\
471 	    __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
472 	__rxd->wrx_len = 0;						\
473 	__rxd->wrx_cksum = 0;						\
474 	__rxd->wrx_status = 0;						\
475 	__rxd->wrx_errors = 0;						\
476 	__rxd->wrx_special = 0;						\
477 	WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
478 									\
479 	CSR_WRITE((sc), (sc)->sc_rdt_reg, (x));				\
480 } while (/*CONSTCOND*/0)
481 
482 static void	wm_start(struct ifnet *);
483 static void	wm_watchdog(struct ifnet *);
484 static int	wm_ioctl(struct ifnet *, u_long, void *);
485 static int	wm_init(struct ifnet *);
486 static void	wm_stop(struct ifnet *, int);
487 
488 static void	wm_reset(struct wm_softc *);
489 static void	wm_rxdrain(struct wm_softc *);
490 static int	wm_add_rxbuf(struct wm_softc *, int);
491 static int	wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
492 static int	wm_read_eeprom_eerd(struct wm_softc *, int, int, u_int16_t *);
493 static int	wm_validate_eeprom_checksum(struct wm_softc *);
494 static void	wm_tick(void *);
495 
496 static void	wm_set_filter(struct wm_softc *);
497 
498 static int	wm_intr(void *);
499 static void	wm_txintr(struct wm_softc *);
500 static void	wm_rxintr(struct wm_softc *);
501 static void	wm_linkintr(struct wm_softc *, uint32_t);
502 
503 static void	wm_tbi_mediainit(struct wm_softc *);
504 static int	wm_tbi_mediachange(struct ifnet *);
505 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
506 
507 static void	wm_tbi_set_linkled(struct wm_softc *);
508 static void	wm_tbi_check_link(struct wm_softc *);
509 
510 static void	wm_gmii_reset(struct wm_softc *);
511 
512 static int	wm_gmii_i82543_readreg(device_t, int, int);
513 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
514 
515 static int	wm_gmii_i82544_readreg(device_t, int, int);
516 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
517 
518 static int	wm_gmii_i80003_readreg(device_t, int, int);
519 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
520 
521 static int	wm_gmii_bm_readreg(device_t, int, int);
522 static void	wm_gmii_bm_writereg(device_t, int, int, int);
523 
524 static void	wm_gmii_statchg(device_t);
525 
526 static void	wm_gmii_mediainit(struct wm_softc *);
527 static int	wm_gmii_mediachange(struct ifnet *);
528 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
529 
530 static int	wm_kmrn_readreg(struct wm_softc *, int);
531 static void	wm_kmrn_writereg(struct wm_softc *, int, int);
532 
533 static void	wm_set_spiaddrsize(struct wm_softc *);
534 static int	wm_match(device_t, cfdata_t, void *);
535 static void	wm_attach(device_t, device_t, void *);
536 static int	wm_is_onboard_nvm_eeprom(struct wm_softc *);
537 static void	wm_get_auto_rd_done(struct wm_softc *);
538 static void	wm_lan_init_done(struct wm_softc *);
539 static void	wm_get_cfg_done(struct wm_softc *);
540 static int	wm_get_swsm_semaphore(struct wm_softc *);
541 static void	wm_put_swsm_semaphore(struct wm_softc *);
542 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
543 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
544 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
545 static int	wm_get_swfwhw_semaphore(struct wm_softc *);
546 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
547 
548 static int	wm_read_eeprom_ich8(struct wm_softc *, int, int, uint16_t *);
549 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
550 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
551 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t,
552 		     uint32_t, uint16_t *);
553 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
554 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
555 static void	wm_82547_txfifo_stall(void *);
556 static int	wm_check_mng_mode(struct wm_softc *);
557 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
558 static int	wm_check_mng_mode_82574(struct wm_softc *);
559 static int	wm_check_mng_mode_generic(struct wm_softc *);
560 static int	wm_check_reset_block(struct wm_softc *);
561 static void	wm_get_hw_control(struct wm_softc *);
562 static int	wm_check_for_link(struct wm_softc *);
563 
564 CFATTACH_DECL_NEW(wm, sizeof(struct wm_softc),
565     wm_match, wm_attach, NULL, NULL);
566 
567 /*
568  * Devices supported by this driver.
569  */
570 static const struct wm_product {
571 	pci_vendor_id_t		wmp_vendor;
572 	pci_product_id_t	wmp_product;
573 	const char		*wmp_name;
574 	wm_chip_type		wmp_type;
575 	int			wmp_flags;
576 #define	WMP_F_1000X		0x01
577 #define	WMP_F_1000T		0x02
578 } wm_products[] = {
579 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
580 	  "Intel i82542 1000BASE-X Ethernet",
581 	  WM_T_82542_2_1,	WMP_F_1000X },
582 
583 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
584 	  "Intel i82543GC 1000BASE-X Ethernet",
585 	  WM_T_82543,		WMP_F_1000X },
586 
587 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
588 	  "Intel i82543GC 1000BASE-T Ethernet",
589 	  WM_T_82543,		WMP_F_1000T },
590 
591 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
592 	  "Intel i82544EI 1000BASE-T Ethernet",
593 	  WM_T_82544,		WMP_F_1000T },
594 
595 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
596 	  "Intel i82544EI 1000BASE-X Ethernet",
597 	  WM_T_82544,		WMP_F_1000X },
598 
599 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
600 	  "Intel i82544GC 1000BASE-T Ethernet",
601 	  WM_T_82544,		WMP_F_1000T },
602 
603 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
604 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
605 	  WM_T_82544,		WMP_F_1000T },
606 
607 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
608 	  "Intel i82540EM 1000BASE-T Ethernet",
609 	  WM_T_82540,		WMP_F_1000T },
610 
611 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
612 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
613 	  WM_T_82540,		WMP_F_1000T },
614 
615 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
616 	  "Intel i82540EP 1000BASE-T Ethernet",
617 	  WM_T_82540,		WMP_F_1000T },
618 
619 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
620 	  "Intel i82540EP 1000BASE-T Ethernet",
621 	  WM_T_82540,		WMP_F_1000T },
622 
623 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
624 	  "Intel i82540EP 1000BASE-T Ethernet",
625 	  WM_T_82540,		WMP_F_1000T },
626 
627 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
628 	  "Intel i82545EM 1000BASE-T Ethernet",
629 	  WM_T_82545,		WMP_F_1000T },
630 
631 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
632 	  "Intel i82545GM 1000BASE-T Ethernet",
633 	  WM_T_82545_3,		WMP_F_1000T },
634 
635 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
636 	  "Intel i82545GM 1000BASE-X Ethernet",
637 	  WM_T_82545_3,		WMP_F_1000X },
638 #if 0
639 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
640 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
641 	  WM_T_82545_3,		WMP_F_SERDES },
642 #endif
643 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
644 	  "Intel i82546EB 1000BASE-T Ethernet",
645 	  WM_T_82546,		WMP_F_1000T },
646 
647 	{ PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82546EB_QUAD,
648 	  "Intel i82546EB 1000BASE-T Ethernet",
649 	  WM_T_82546,		WMP_F_1000T },
650 
651 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
652 	  "Intel i82545EM 1000BASE-X Ethernet",
653 	  WM_T_82545,		WMP_F_1000X },
654 
655 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
656 	  "Intel i82546EB 1000BASE-X Ethernet",
657 	  WM_T_82546,		WMP_F_1000X },
658 
659 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
660 	  "Intel i82546GB 1000BASE-T Ethernet",
661 	  WM_T_82546_3,		WMP_F_1000T },
662 
663 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
664 	  "Intel i82546GB 1000BASE-X Ethernet",
665 	  WM_T_82546_3,		WMP_F_1000X },
666 #if 0
667 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
668 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
669 	  WM_T_82546_3,		WMP_F_SERDES },
670 #endif
671 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
672 	  "i82546GB quad-port Gigabit Ethernet",
673 	  WM_T_82546_3,		WMP_F_1000T },
674 
675 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
676 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
677 	  WM_T_82546_3,		WMP_F_1000T },
678 
679 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
680 	  "Intel PRO/1000MT (82546GB)",
681 	  WM_T_82546_3,		WMP_F_1000T },
682 
683 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
684 	  "Intel i82541EI 1000BASE-T Ethernet",
685 	  WM_T_82541,		WMP_F_1000T },
686 
687 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
688 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
689 	  WM_T_82541,		WMP_F_1000T },
690 
691 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
692 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
693 	  WM_T_82541,		WMP_F_1000T },
694 
695 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
696 	  "Intel i82541ER 1000BASE-T Ethernet",
697 	  WM_T_82541_2,		WMP_F_1000T },
698 
699 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
700 	  "Intel i82541GI 1000BASE-T Ethernet",
701 	  WM_T_82541_2,		WMP_F_1000T },
702 
703 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
704 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
705 	  WM_T_82541_2,		WMP_F_1000T },
706 
707 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
708 	  "Intel i82541PI 1000BASE-T Ethernet",
709 	  WM_T_82541_2,		WMP_F_1000T },
710 
711 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
712 	  "Intel i82547EI 1000BASE-T Ethernet",
713 	  WM_T_82547,		WMP_F_1000T },
714 
715 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
716 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
717 	  WM_T_82547,		WMP_F_1000T },
718 
719 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
720 	  "Intel i82547GI 1000BASE-T Ethernet",
721 	  WM_T_82547_2,		WMP_F_1000T },
722 
723 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
724 	  "Intel PRO/1000 PT (82571EB)",
725 	  WM_T_82571,		WMP_F_1000T },
726 
727 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
728 	  "Intel PRO/1000 PF (82571EB)",
729 	  WM_T_82571,		WMP_F_1000X },
730 #if 0
731 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
732 	  "Intel PRO/1000 PB (82571EB)",
733 	  WM_T_82571,		WMP_F_SERDES },
734 #endif
735 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
736 	  "Intel PRO/1000 QT (82571EB)",
737 	  WM_T_82571,		WMP_F_1000T },
738 
739 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
740 	  "Intel i82572EI 1000baseT Ethernet",
741 	  WM_T_82572,		WMP_F_1000T },
742 
743 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
744 	  "Intel� PRO/1000 PT Quad Port Server Adapter",
745 	  WM_T_82571,		WMP_F_1000T, },
746 
747 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
748 	  "Intel i82572EI 1000baseX Ethernet",
749 	  WM_T_82572,		WMP_F_1000X },
750 #if 0
751 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
752 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
753 	  WM_T_82572,		WMP_F_SERDES },
754 #endif
755 
756 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
757 	  "Intel i82572EI 1000baseT Ethernet",
758 	  WM_T_82572,		WMP_F_1000T },
759 
760 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
761 	  "Intel i82573E",
762 	  WM_T_82573,		WMP_F_1000T },
763 
764 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
765 	  "Intel i82573E IAMT",
766 	  WM_T_82573,		WMP_F_1000T },
767 
768 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
769 	  "Intel i82573L Gigabit Ethernet",
770 	  WM_T_82573,		WMP_F_1000T },
771 
772 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
773 	  "Intel i82574L",
774 	  WM_T_82574,		WMP_F_1000T },
775 
776 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
777 	  "Intel i82583V",
778 	  WM_T_82583,		WMP_F_1000T },
779 
780 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
781 	  "i80003 dual 1000baseT Ethernet",
782 	  WM_T_80003,		WMP_F_1000T },
783 
784 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
785 	  "i80003 dual 1000baseX Ethernet",
786 	  WM_T_80003,		WMP_F_1000T },
787 #if 0
788 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
789 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
790 	  WM_T_80003,		WMP_F_SERDES },
791 #endif
792 
793 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
794 	  "Intel i80003 1000baseT Ethernet",
795 	  WM_T_80003,		WMP_F_1000T },
796 #if 0
797 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
798 	  "Intel i80003 Gigabit Ethernet (SERDES)",
799 	  WM_T_80003,		WMP_F_SERDES },
800 #endif
801 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
802 	  "Intel i82801H (M_AMT) LAN Controller",
803 	  WM_T_ICH8,		WMP_F_1000T },
804 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
805 	  "Intel i82801H (AMT) LAN Controller",
806 	  WM_T_ICH8,		WMP_F_1000T },
807 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
808 	  "Intel i82801H LAN Controller",
809 	  WM_T_ICH8,		WMP_F_1000T },
810 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
811 	  "Intel i82801H (IFE) LAN Controller",
812 	  WM_T_ICH8,		WMP_F_1000T },
813 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
814 	  "Intel i82801H (M) LAN Controller",
815 	  WM_T_ICH8,		WMP_F_1000T },
816 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
817 	  "Intel i82801H IFE (GT) LAN Controller",
818 	  WM_T_ICH8,		WMP_F_1000T },
819 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
820 	  "Intel i82801H IFE (G) LAN Controller",
821 	  WM_T_ICH8,		WMP_F_1000T },
822 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
823 	  "82801I (AMT) LAN Controller",
824 	  WM_T_ICH9,		WMP_F_1000T },
825 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
826 	  "82801I LAN Controller",
827 	  WM_T_ICH9,		WMP_F_1000T },
828 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
829 	  "82801I (G) LAN Controller",
830 	  WM_T_ICH9,		WMP_F_1000T },
831 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
832 	  "82801I (GT) LAN Controller",
833 	  WM_T_ICH9,		WMP_F_1000T },
834 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
835 	  "82801I (C) LAN Controller",
836 	  WM_T_ICH9,		WMP_F_1000T },
837 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
838 	  "82801I mobile LAN Controller",
839 	  WM_T_ICH9,		WMP_F_1000T },
840 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IGP_M_V,
841 	  "82801I mobile (V) LAN Controller",
842 	  WM_T_ICH9,		WMP_F_1000T },
843 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
844 	  "82801I mobile (AMT) LAN Controller",
845 	  WM_T_ICH9,		WMP_F_1000T },
846 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82567LM_3,
847 	  "82567LM-3 LAN Controller",
848 	  WM_T_ICH10,		WMP_F_1000T },
849 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82567LF_3,
850 	  "82567LF-3 LAN Controller",
851 	  WM_T_ICH10,		WMP_F_1000T },
852 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
853 	  "i82801J (LF) LAN Controller",
854 	  WM_T_ICH10,		WMP_F_1000T },
855 	{ 0,			0,
856 	  NULL,
857 	  0,			0 },
858 };
859 
860 #ifdef WM_EVENT_COUNTERS
861 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
862 #endif /* WM_EVENT_COUNTERS */
863 
864 #if 0 /* Not currently used */
865 static inline uint32_t
866 wm_io_read(struct wm_softc *sc, int reg)
867 {
868 
869 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
870 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
871 }
872 #endif
873 
874 static inline void
875 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
876 {
877 
878 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
879 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
880 }
881 
882 static inline void
883 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
884 {
885 	wa->wa_low = htole32(v & 0xffffffffU);
886 	if (sizeof(bus_addr_t) == 8)
887 		wa->wa_high = htole32((uint64_t) v >> 32);
888 	else
889 		wa->wa_high = 0;
890 }
891 
892 static void
893 wm_set_spiaddrsize(struct wm_softc *sc)
894 {
895 	uint32_t reg;
896 
897 	sc->sc_flags |= WM_F_EEPROM_SPI;
898 	reg = CSR_READ(sc, WMREG_EECD);
899 	sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
900 }
901 
902 static const struct wm_product *
903 wm_lookup(const struct pci_attach_args *pa)
904 {
905 	const struct wm_product *wmp;
906 
907 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
908 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
909 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
910 			return (wmp);
911 	}
912 	return (NULL);
913 }
914 
915 static int
916 wm_match(device_t parent, cfdata_t cf, void *aux)
917 {
918 	struct pci_attach_args *pa = aux;
919 
920 	if (wm_lookup(pa) != NULL)
921 		return (1);
922 
923 	return (0);
924 }
925 
926 static void
927 wm_attach(device_t parent, device_t self, void *aux)
928 {
929 	struct wm_softc *sc = device_private(self);
930 	struct pci_attach_args *pa = aux;
931 	prop_dictionary_t dict;
932 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
933 	pci_chipset_tag_t pc = pa->pa_pc;
934 	pci_intr_handle_t ih;
935 	size_t cdata_size;
936 	const char *intrstr = NULL;
937 	const char *eetype, *xname;
938 	bus_space_tag_t memt;
939 	bus_space_handle_t memh;
940 	bus_dma_segment_t seg;
941 	int memh_valid;
942 	int i, rseg, error;
943 	const struct wm_product *wmp;
944 	prop_data_t ea;
945 	prop_number_t pn;
946 	uint8_t enaddr[ETHER_ADDR_LEN];
947 	uint16_t myea[ETHER_ADDR_LEN / 2], cfg1, cfg2, swdpin, io3;
948 	pcireg_t preg, memtype;
949 	uint32_t reg;
950 
951 	sc->sc_dev = self;
952 	callout_init(&sc->sc_tick_ch, 0);
953 
954 	wmp = wm_lookup(pa);
955 	if (wmp == NULL) {
956 		printf("\n");
957 		panic("wm_attach: impossible");
958 	}
959 
960 	sc->sc_pc = pa->pa_pc;
961 	sc->sc_pcitag = pa->pa_tag;
962 
963 	if (pci_dma64_available(pa))
964 		sc->sc_dmat = pa->pa_dmat64;
965 	else
966 		sc->sc_dmat = pa->pa_dmat;
967 
968 	preg = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
969 	aprint_naive(": Ethernet controller\n");
970 	aprint_normal(": %s, rev. %d\n", wmp->wmp_name, preg);
971 
972 	sc->sc_type = wmp->wmp_type;
973 	if (sc->sc_type < WM_T_82543) {
974 		if (preg < 2) {
975 			aprint_error_dev(sc->sc_dev,
976 			    "i82542 must be at least rev. 2\n");
977 			return;
978 		}
979 		if (preg < 3)
980 			sc->sc_type = WM_T_82542_2_0;
981 	}
982 
983 	/* Set device properties (mactype) */
984 	dict = device_properties(sc->sc_dev);
985 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
986 
987 	/*
988 	 * Map the device.  All devices support memory-mapped acccess,
989 	 * and it is really required for normal operation.
990 	 */
991 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
992 	switch (memtype) {
993 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
994 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
995 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
996 		    memtype, 0, &memt, &memh, NULL, NULL) == 0);
997 		break;
998 	default:
999 		memh_valid = 0;
1000 		break;
1001 	}
1002 
1003 	if (memh_valid) {
1004 		sc->sc_st = memt;
1005 		sc->sc_sh = memh;
1006 	} else {
1007 		aprint_error_dev(sc->sc_dev,
1008 		    "unable to map device registers\n");
1009 		return;
1010 	}
1011 
1012 	/*
1013 	 * In addition, i82544 and later support I/O mapped indirect
1014 	 * register access.  It is not desirable (nor supported in
1015 	 * this driver) to use it for normal operation, though it is
1016 	 * required to work around bugs in some chip versions.
1017 	 */
1018 	if (sc->sc_type >= WM_T_82544) {
1019 		/* First we have to find the I/O BAR. */
1020 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1021 			if (pci_mapreg_type(pa->pa_pc, pa->pa_tag, i) ==
1022 			    PCI_MAPREG_TYPE_IO)
1023 				break;
1024 		}
1025 		if (i == PCI_MAPREG_END)
1026 			aprint_error_dev(sc->sc_dev,
1027 			    "WARNING: unable to find I/O BAR\n");
1028 		else {
1029 			/*
1030 			 * The i8254x doesn't apparently respond when the
1031 			 * I/O BAR is 0, which looks somewhat like it's not
1032 			 * been configured.
1033 			 */
1034 			preg = pci_conf_read(pc, pa->pa_tag, i);
1035 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1036 				aprint_error_dev(sc->sc_dev,
1037 				    "WARNING: I/O BAR at zero.\n");
1038 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1039 					0, &sc->sc_iot, &sc->sc_ioh,
1040 					NULL, NULL) == 0) {
1041 				sc->sc_flags |= WM_F_IOH_VALID;
1042 			} else {
1043 				aprint_error_dev(sc->sc_dev,
1044 				    "WARNING: unable to map I/O space\n");
1045 			}
1046 		}
1047 
1048 	}
1049 
1050 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
1051 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1052 	preg |= PCI_COMMAND_MASTER_ENABLE;
1053 	if (sc->sc_type < WM_T_82542_2_1)
1054 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1055 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1056 
1057 	/* power up chip */
1058 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1059 	    NULL)) && error != EOPNOTSUPP) {
1060 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1061 		return;
1062 	}
1063 
1064 	/*
1065 	 * Map and establish our interrupt.
1066 	 */
1067 	if (pci_intr_map(pa, &ih)) {
1068 		aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
1069 		return;
1070 	}
1071 	intrstr = pci_intr_string(pc, ih);
1072 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
1073 	if (sc->sc_ih == NULL) {
1074 		aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
1075 		if (intrstr != NULL)
1076 			aprint_error(" at %s", intrstr);
1077 		aprint_error("\n");
1078 		return;
1079 	}
1080 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
1081 
1082 	/*
1083 	 * Determine a few things about the bus we're connected to.
1084 	 */
1085 	if (sc->sc_type < WM_T_82543) {
1086 		/* We don't really know the bus characteristics here. */
1087 		sc->sc_bus_speed = 33;
1088 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1089 		/*
1090 		 * CSA (Communication Streaming Architecture) is about as fast
1091 		 * a 32-bit 66MHz PCI Bus.
1092 		 */
1093 		sc->sc_flags |= WM_F_CSA;
1094 		sc->sc_bus_speed = 66;
1095 		aprint_verbose_dev(sc->sc_dev,
1096 		    "Communication Streaming Architecture\n");
1097 		if (sc->sc_type == WM_T_82547) {
1098 			callout_init(&sc->sc_txfifo_ch, 0);
1099 			callout_setfunc(&sc->sc_txfifo_ch,
1100 					wm_82547_txfifo_stall, sc);
1101 			aprint_verbose_dev(sc->sc_dev,
1102 			    "using 82547 Tx FIFO stall work-around\n");
1103 		}
1104 	} else if (sc->sc_type >= WM_T_82571) {
1105 		sc->sc_flags |= WM_F_PCIE;
1106 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1107 			&& (sc->sc_type != WM_T_ICH10))
1108 			sc->sc_flags |= WM_F_EEPROM_SEMAPHORE;
1109 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1110 	} else {
1111 		reg = CSR_READ(sc, WMREG_STATUS);
1112 		if (reg & STATUS_BUS64)
1113 			sc->sc_flags |= WM_F_BUS64;
1114 		if ((reg & STATUS_PCIX_MODE) != 0) {
1115 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1116 
1117 			sc->sc_flags |= WM_F_PCIX;
1118 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1119 					       PCI_CAP_PCIX,
1120 					       &sc->sc_pcix_offset, NULL) == 0)
1121 				aprint_error_dev(sc->sc_dev,
1122 				    "unable to find PCIX capability\n");
1123 			else if (sc->sc_type != WM_T_82545_3 &&
1124 				 sc->sc_type != WM_T_82546_3) {
1125 				/*
1126 				 * Work around a problem caused by the BIOS
1127 				 * setting the max memory read byte count
1128 				 * incorrectly.
1129 				 */
1130 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1131 				    sc->sc_pcix_offset + PCI_PCIX_CMD);
1132 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1133 				    sc->sc_pcix_offset + PCI_PCIX_STATUS);
1134 
1135 				bytecnt =
1136 				    (pcix_cmd & PCI_PCIX_CMD_BYTECNT_MASK) >>
1137 				    PCI_PCIX_CMD_BYTECNT_SHIFT;
1138 				maxb =
1139 				    (pcix_sts & PCI_PCIX_STATUS_MAXB_MASK) >>
1140 				    PCI_PCIX_STATUS_MAXB_SHIFT;
1141 				if (bytecnt > maxb) {
1142 					aprint_verbose_dev(sc->sc_dev,
1143 					    "resetting PCI-X MMRBC: %d -> %d\n",
1144 					    512 << bytecnt, 512 << maxb);
1145 					pcix_cmd = (pcix_cmd &
1146 					    ~PCI_PCIX_CMD_BYTECNT_MASK) |
1147 					   (maxb << PCI_PCIX_CMD_BYTECNT_SHIFT);
1148 					pci_conf_write(pa->pa_pc, pa->pa_tag,
1149 					    sc->sc_pcix_offset + PCI_PCIX_CMD,
1150 					    pcix_cmd);
1151 				}
1152 			}
1153 		}
1154 		/*
1155 		 * The quad port adapter is special; it has a PCIX-PCIX
1156 		 * bridge on the board, and can run the secondary bus at
1157 		 * a higher speed.
1158 		 */
1159 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1160 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1161 								      : 66;
1162 		} else if (sc->sc_flags & WM_F_PCIX) {
1163 			switch (reg & STATUS_PCIXSPD_MASK) {
1164 			case STATUS_PCIXSPD_50_66:
1165 				sc->sc_bus_speed = 66;
1166 				break;
1167 			case STATUS_PCIXSPD_66_100:
1168 				sc->sc_bus_speed = 100;
1169 				break;
1170 			case STATUS_PCIXSPD_100_133:
1171 				sc->sc_bus_speed = 133;
1172 				break;
1173 			default:
1174 				aprint_error_dev(sc->sc_dev,
1175 				    "unknown PCIXSPD %d; assuming 66MHz\n",
1176 				    reg & STATUS_PCIXSPD_MASK);
1177 				sc->sc_bus_speed = 66;
1178 				break;
1179 			}
1180 		} else
1181 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1182 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1183 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1184 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1185 	}
1186 
1187 	/*
1188 	 * Allocate the control data structures, and create and load the
1189 	 * DMA map for it.
1190 	 *
1191 	 * NOTE: All Tx descriptors must be in the same 4G segment of
1192 	 * memory.  So must Rx descriptors.  We simplify by allocating
1193 	 * both sets within the same 4G segment.
1194 	 */
1195 	WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
1196 	    WM_NTXDESC_82542 : WM_NTXDESC_82544;
1197 	cdata_size = sc->sc_type < WM_T_82544 ?
1198 	    sizeof(struct wm_control_data_82542) :
1199 	    sizeof(struct wm_control_data_82544);
1200 	if ((error = bus_dmamem_alloc(sc->sc_dmat, cdata_size, PAGE_SIZE,
1201 				      (bus_size_t) 0x100000000ULL,
1202 				      &seg, 1, &rseg, 0)) != 0) {
1203 		aprint_error_dev(sc->sc_dev,
1204 		    "unable to allocate control data, error = %d\n",
1205 		    error);
1206 		goto fail_0;
1207 	}
1208 
1209 	if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, cdata_size,
1210 				    (void **)&sc->sc_control_data,
1211 				    BUS_DMA_COHERENT)) != 0) {
1212 		aprint_error_dev(sc->sc_dev,
1213 		    "unable to map control data, error = %d\n", error);
1214 		goto fail_1;
1215 	}
1216 
1217 	if ((error = bus_dmamap_create(sc->sc_dmat, cdata_size, 1, cdata_size,
1218 				       0, 0, &sc->sc_cddmamap)) != 0) {
1219 		aprint_error_dev(sc->sc_dev,
1220 		    "unable to create control data DMA map, error = %d\n",
1221 		    error);
1222 		goto fail_2;
1223 	}
1224 
1225 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
1226 				     sc->sc_control_data, cdata_size, NULL,
1227 				     0)) != 0) {
1228 		aprint_error_dev(sc->sc_dev,
1229 		    "unable to load control data DMA map, error = %d\n",
1230 		    error);
1231 		goto fail_3;
1232 	}
1233 
1234 	/*
1235 	 * Create the transmit buffer DMA maps.
1236 	 */
1237 	WM_TXQUEUELEN(sc) =
1238 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
1239 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
1240 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1241 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
1242 					       WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
1243 					  &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1244 			aprint_error_dev(sc->sc_dev,
1245 			    "unable to create Tx DMA map %d, error = %d\n",
1246 			    i, error);
1247 			goto fail_4;
1248 		}
1249 	}
1250 
1251 	/*
1252 	 * Create the receive buffer DMA maps.
1253 	 */
1254 	for (i = 0; i < WM_NRXDESC; i++) {
1255 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1256 					       MCLBYTES, 0, 0,
1257 					  &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1258 			aprint_error_dev(sc->sc_dev,
1259 			    "unable to create Rx DMA map %d error = %d\n",
1260 			    i, error);
1261 			goto fail_5;
1262 		}
1263 		sc->sc_rxsoft[i].rxs_mbuf = NULL;
1264 	}
1265 
1266 	/* clear interesting stat counters */
1267 	CSR_READ(sc, WMREG_COLC);
1268 	CSR_READ(sc, WMREG_RXERRC);
1269 
1270 	/*
1271 	 * Reset the chip to a known state.
1272 	 */
1273 	wm_reset(sc);
1274 
1275 	switch (sc->sc_type) {
1276 	case WM_T_82571:
1277 	case WM_T_82572:
1278 	case WM_T_82573:
1279 	case WM_T_82574:
1280 	case WM_T_82583:
1281 	case WM_T_80003:
1282 	case WM_T_ICH8:
1283 	case WM_T_ICH9:
1284 	case WM_T_ICH10:
1285 		if (wm_check_mng_mode(sc) != 0)
1286 			wm_get_hw_control(sc);
1287 		break;
1288 	default:
1289 		break;
1290 	}
1291 
1292 	/*
1293 	 * Get some information about the EEPROM.
1294 	 */
1295 	switch (sc->sc_type) {
1296 	case WM_T_82542_2_0:
1297 	case WM_T_82542_2_1:
1298 	case WM_T_82543:
1299 	case WM_T_82544:
1300 		/* Microwire */
1301 		sc->sc_ee_addrbits = 6;
1302 		break;
1303 	case WM_T_82540:
1304 	case WM_T_82545:
1305 	case WM_T_82545_3:
1306 	case WM_T_82546:
1307 	case WM_T_82546_3:
1308 		/* Microwire */
1309 		reg = CSR_READ(sc, WMREG_EECD);
1310 		if (reg & EECD_EE_SIZE)
1311 			sc->sc_ee_addrbits = 8;
1312 		else
1313 			sc->sc_ee_addrbits = 6;
1314 		sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1315 		break;
1316 	case WM_T_82541:
1317 	case WM_T_82541_2:
1318 	case WM_T_82547:
1319 	case WM_T_82547_2:
1320 		reg = CSR_READ(sc, WMREG_EECD);
1321 		if (reg & EECD_EE_TYPE) {
1322 			/* SPI */
1323 			wm_set_spiaddrsize(sc);
1324 		} else
1325 			/* Microwire */
1326 			sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6;
1327 		sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1328 		break;
1329 	case WM_T_82571:
1330 	case WM_T_82572:
1331 		/* SPI */
1332 		wm_set_spiaddrsize(sc);
1333 		sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1334 		break;
1335 	case WM_T_82573:
1336 	case WM_T_82574:
1337 	case WM_T_82583:
1338 		if (wm_is_onboard_nvm_eeprom(sc) == 0)
1339 			sc->sc_flags |= WM_F_EEPROM_FLASH;
1340 		else {
1341 			/* SPI */
1342 			wm_set_spiaddrsize(sc);
1343 		}
1344 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1345 		break;
1346 	case WM_T_80003:
1347 		/* SPI */
1348 		wm_set_spiaddrsize(sc);
1349 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_SWFW_SYNC;
1350 		break;
1351 	case WM_T_ICH8:
1352 	case WM_T_ICH9:
1353 		/* Check whether EEPROM is present or not */
1354 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
1355 			/* Not found */
1356 			aprint_error_dev(sc->sc_dev,
1357 			    "EEPROM PRESENT bit isn't set\n");
1358 			sc->sc_flags |= WM_F_EEPROM_INVALID;
1359 		}
1360 		/* FALLTHROUGH */
1361 	case WM_T_ICH10:
1362 		/* FLASH */
1363 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_SWFWHW_SYNC;
1364 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
1365 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
1366 		    &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) {
1367 			aprint_error_dev(sc->sc_dev,
1368 			    "can't map FLASH registers\n");
1369 			return;
1370 		}
1371 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
1372 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
1373 						ICH_FLASH_SECTOR_SIZE;
1374 		sc->sc_ich8_flash_bank_size =
1375 			((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
1376 		sc->sc_ich8_flash_bank_size -=
1377 			(reg & ICH_GFPREG_BASE_MASK);
1378 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
1379 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
1380 		break;
1381 	default:
1382 		break;
1383 	}
1384 
1385 	/*
1386 	 * Defer printing the EEPROM type until after verifying the checksum
1387 	 * This allows the EEPROM type to be printed correctly in the case
1388 	 * that no EEPROM is attached.
1389 	 */
1390 	/*
1391 	 * Validate the EEPROM checksum. If the checksum fails, flag
1392 	 * this for later, so we can fail future reads from the EEPROM.
1393 	 */
1394 	if (wm_validate_eeprom_checksum(sc)) {
1395 		/*
1396 		 * Read twice again because some PCI-e parts fail the
1397 		 * first check due to the link being in sleep state.
1398 		 */
1399 		if (wm_validate_eeprom_checksum(sc))
1400 			sc->sc_flags |= WM_F_EEPROM_INVALID;
1401 	}
1402 
1403 	/* Set device properties (macflags) */
1404 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
1405 
1406 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
1407 		aprint_verbose_dev(sc->sc_dev, "No EEPROM\n");
1408 	else if (sc->sc_flags & WM_F_EEPROM_FLASH) {
1409 		aprint_verbose_dev(sc->sc_dev, "FLASH\n");
1410 	} else {
1411 		if (sc->sc_flags & WM_F_EEPROM_SPI)
1412 			eetype = "SPI";
1413 		else
1414 			eetype = "MicroWire";
1415 		aprint_verbose_dev(sc->sc_dev,
1416 		    "%u word (%d address bits) %s EEPROM\n",
1417 		    1U << sc->sc_ee_addrbits,
1418 		    sc->sc_ee_addrbits, eetype);
1419 	}
1420 
1421 	/*
1422 	 * Read the Ethernet address from the EEPROM, if not first found
1423 	 * in device properties.
1424 	 */
1425 	ea = prop_dictionary_get(dict, "mac-addr");
1426 	if (ea != NULL) {
1427 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
1428 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
1429 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
1430 	} else {
1431 		if (wm_read_eeprom(sc, EEPROM_OFF_MACADDR,
1432 		    sizeof(myea) / sizeof(myea[0]), myea)) {
1433 			aprint_error_dev(sc->sc_dev,
1434 			    "unable to read Ethernet address\n");
1435 			return;
1436 		}
1437 		enaddr[0] = myea[0] & 0xff;
1438 		enaddr[1] = myea[0] >> 8;
1439 		enaddr[2] = myea[1] & 0xff;
1440 		enaddr[3] = myea[1] >> 8;
1441 		enaddr[4] = myea[2] & 0xff;
1442 		enaddr[5] = myea[2] >> 8;
1443 	}
1444 
1445 	/*
1446 	 * Toggle the LSB of the MAC address on the second port
1447 	 * of the dual port controller.
1448 	 */
1449 	if (sc->sc_type == WM_T_82546 || sc->sc_type == WM_T_82546_3
1450 	    || sc->sc_type ==  WM_T_82571 || sc->sc_type == WM_T_80003) {
1451 		if ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1)
1452 			enaddr[5] ^= 1;
1453 	}
1454 
1455 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
1456 	    ether_sprintf(enaddr));
1457 
1458 	/*
1459 	 * Read the config info from the EEPROM, and set up various
1460 	 * bits in the control registers based on their contents.
1461 	 */
1462 	pn = prop_dictionary_get(dict, "i82543-cfg1");
1463 	if (pn != NULL) {
1464 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1465 		cfg1 = (uint16_t) prop_number_integer_value(pn);
1466 	} else {
1467 		if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) {
1468 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
1469 			return;
1470 		}
1471 	}
1472 
1473 	pn = prop_dictionary_get(dict, "i82543-cfg2");
1474 	if (pn != NULL) {
1475 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1476 		cfg2 = (uint16_t) prop_number_integer_value(pn);
1477 	} else {
1478 		if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) {
1479 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
1480 			return;
1481 		}
1482 	}
1483 
1484 	if (sc->sc_type >= WM_T_82544) {
1485 		pn = prop_dictionary_get(dict, "i82543-swdpin");
1486 		if (pn != NULL) {
1487 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1488 			swdpin = (uint16_t) prop_number_integer_value(pn);
1489 		} else {
1490 			if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) {
1491 				aprint_error_dev(sc->sc_dev,
1492 				    "unable to read SWDPIN\n");
1493 				return;
1494 			}
1495 		}
1496 	}
1497 
1498 	if (cfg1 & EEPROM_CFG1_ILOS)
1499 		sc->sc_ctrl |= CTRL_ILOS;
1500 	if (sc->sc_type >= WM_T_82544) {
1501 		sc->sc_ctrl |=
1502 		    ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
1503 		    CTRL_SWDPIO_SHIFT;
1504 		sc->sc_ctrl |=
1505 		    ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
1506 		    CTRL_SWDPINS_SHIFT;
1507 	} else {
1508 		sc->sc_ctrl |=
1509 		    ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
1510 		    CTRL_SWDPIO_SHIFT;
1511 	}
1512 
1513 #if 0
1514 	if (sc->sc_type >= WM_T_82544) {
1515 		if (cfg1 & EEPROM_CFG1_IPS0)
1516 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
1517 		if (cfg1 & EEPROM_CFG1_IPS1)
1518 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
1519 		sc->sc_ctrl_ext |=
1520 		    ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
1521 		    CTRL_EXT_SWDPIO_SHIFT;
1522 		sc->sc_ctrl_ext |=
1523 		    ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
1524 		    CTRL_EXT_SWDPINS_SHIFT;
1525 	} else {
1526 		sc->sc_ctrl_ext |=
1527 		    ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
1528 		    CTRL_EXT_SWDPIO_SHIFT;
1529 	}
1530 #endif
1531 
1532 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
1533 #if 0
1534 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
1535 #endif
1536 
1537 	/*
1538 	 * Set up some register offsets that are different between
1539 	 * the i82542 and the i82543 and later chips.
1540 	 */
1541 	if (sc->sc_type < WM_T_82543) {
1542 		sc->sc_rdt_reg = WMREG_OLD_RDT0;
1543 		sc->sc_tdt_reg = WMREG_OLD_TDT;
1544 	} else {
1545 		sc->sc_rdt_reg = WMREG_RDT;
1546 		sc->sc_tdt_reg = WMREG_TDT;
1547 	}
1548 
1549 	/*
1550 	 * Determine if we're TBI or GMII mode, and initialize the
1551 	 * media structures accordingly.
1552 	 */
1553 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
1554 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_82573
1555 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
1556 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
1557 		wm_gmii_mediainit(sc);
1558 	} else if (sc->sc_type < WM_T_82543 ||
1559 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
1560 		if (wmp->wmp_flags & WMP_F_1000T)
1561 			aprint_error_dev(sc->sc_dev,
1562 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
1563 		wm_tbi_mediainit(sc);
1564 	} else {
1565 		if (wmp->wmp_flags & WMP_F_1000X)
1566 			aprint_error_dev(sc->sc_dev,
1567 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
1568 		wm_gmii_mediainit(sc);
1569 	}
1570 
1571 	ifp = &sc->sc_ethercom.ec_if;
1572 	xname = device_xname(sc->sc_dev);
1573 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
1574 	ifp->if_softc = sc;
1575 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1576 	ifp->if_ioctl = wm_ioctl;
1577 	ifp->if_start = wm_start;
1578 	ifp->if_watchdog = wm_watchdog;
1579 	ifp->if_init = wm_init;
1580 	ifp->if_stop = wm_stop;
1581 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
1582 	IFQ_SET_READY(&ifp->if_snd);
1583 
1584 	/* Check for jumbo frame */
1585 	switch (sc->sc_type) {
1586 	case WM_T_82573:
1587 		/* XXX limited to 9234 if ASPM is disabled */
1588 		wm_read_eeprom(sc, EEPROM_INIT_3GIO_3, 1, &io3);
1589 		if ((io3 & EEPROM_3GIO_3_ASPM_MASK) != 0)
1590 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1591 		break;
1592 	case WM_T_82571:
1593 	case WM_T_82572:
1594 	case WM_T_82574:
1595 	case WM_T_80003:
1596 	case WM_T_ICH9:
1597 	case WM_T_ICH10:
1598 		/* XXX limited to 9234 */
1599 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1600 		break;
1601 	case WM_T_82542_2_0:
1602 	case WM_T_82542_2_1:
1603 	case WM_T_82583:
1604 	case WM_T_ICH8:
1605 		/* No support for jumbo frame */
1606 		break;
1607 	default:
1608 		/* ETHER_MAX_LEN_JUMBO */
1609 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1610 		break;
1611 	}
1612 
1613 	/*
1614 	 * If we're a i82543 or greater, we can support VLANs.
1615 	 */
1616 	if (sc->sc_type >= WM_T_82543)
1617 		sc->sc_ethercom.ec_capabilities |=
1618 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
1619 
1620 	/*
1621 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
1622 	 * on i82543 and later.
1623 	 */
1624 	if (sc->sc_type >= WM_T_82543) {
1625 		ifp->if_capabilities |=
1626 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
1627 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
1628 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
1629 		    IFCAP_CSUM_TCPv6_Tx |
1630 		    IFCAP_CSUM_UDPv6_Tx;
1631 	}
1632 
1633 	/*
1634 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
1635 	 *
1636 	 *	82541GI (8086:1076) ... no
1637 	 *	82572EI (8086:10b9) ... yes
1638 	 */
1639 	if (sc->sc_type >= WM_T_82571) {
1640 		ifp->if_capabilities |=
1641 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
1642 	}
1643 
1644 	/*
1645 	 * If we're a i82544 or greater (except i82547), we can do
1646 	 * TCP segmentation offload.
1647 	 */
1648 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
1649 		ifp->if_capabilities |= IFCAP_TSOv4;
1650 	}
1651 
1652 	if (sc->sc_type >= WM_T_82571) {
1653 		ifp->if_capabilities |= IFCAP_TSOv6;
1654 	}
1655 
1656 	/*
1657 	 * Attach the interface.
1658 	 */
1659 	if_attach(ifp);
1660 	ether_ifattach(ifp, enaddr);
1661 #if NRND > 0
1662 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET, 0);
1663 #endif
1664 
1665 #ifdef WM_EVENT_COUNTERS
1666 	/* Attach event counters. */
1667 	evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
1668 	    NULL, xname, "txsstall");
1669 	evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
1670 	    NULL, xname, "txdstall");
1671 	evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
1672 	    NULL, xname, "txfifo_stall");
1673 	evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
1674 	    NULL, xname, "txdw");
1675 	evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
1676 	    NULL, xname, "txqe");
1677 	evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
1678 	    NULL, xname, "rxintr");
1679 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
1680 	    NULL, xname, "linkintr");
1681 
1682 	evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
1683 	    NULL, xname, "rxipsum");
1684 	evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
1685 	    NULL, xname, "rxtusum");
1686 	evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
1687 	    NULL, xname, "txipsum");
1688 	evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
1689 	    NULL, xname, "txtusum");
1690 	evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
1691 	    NULL, xname, "txtusum6");
1692 
1693 	evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
1694 	    NULL, xname, "txtso");
1695 	evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
1696 	    NULL, xname, "txtso6");
1697 	evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
1698 	    NULL, xname, "txtsopain");
1699 
1700 	for (i = 0; i < WM_NTXSEGS; i++) {
1701 		sprintf(wm_txseg_evcnt_names[i], "txseg%d", i);
1702 		evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
1703 		    NULL, xname, wm_txseg_evcnt_names[i]);
1704 	}
1705 
1706 	evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
1707 	    NULL, xname, "txdrop");
1708 
1709 	evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
1710 	    NULL, xname, "tu");
1711 
1712 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
1713 	    NULL, xname, "tx_xoff");
1714 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
1715 	    NULL, xname, "tx_xon");
1716 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
1717 	    NULL, xname, "rx_xoff");
1718 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
1719 	    NULL, xname, "rx_xon");
1720 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
1721 	    NULL, xname, "rx_macctl");
1722 #endif /* WM_EVENT_COUNTERS */
1723 
1724 	if (pmf_device_register(self, NULL, NULL))
1725 		pmf_class_network_register(self, ifp);
1726 	else
1727 		aprint_error_dev(self, "couldn't establish power handler\n");
1728 
1729 	return;
1730 
1731 	/*
1732 	 * Free any resources we've allocated during the failed attach
1733 	 * attempt.  Do this in reverse order and fall through.
1734 	 */
1735  fail_5:
1736 	for (i = 0; i < WM_NRXDESC; i++) {
1737 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
1738 			bus_dmamap_destroy(sc->sc_dmat,
1739 			    sc->sc_rxsoft[i].rxs_dmamap);
1740 	}
1741  fail_4:
1742 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1743 		if (sc->sc_txsoft[i].txs_dmamap != NULL)
1744 			bus_dmamap_destroy(sc->sc_dmat,
1745 			    sc->sc_txsoft[i].txs_dmamap);
1746 	}
1747 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
1748  fail_3:
1749 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
1750  fail_2:
1751 	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
1752 	    cdata_size);
1753  fail_1:
1754 	bus_dmamem_free(sc->sc_dmat, &seg, rseg);
1755  fail_0:
1756 	return;
1757 }
1758 
1759 /*
1760  * wm_tx_offload:
1761  *
1762  *	Set up TCP/IP checksumming parameters for the
1763  *	specified packet.
1764  */
1765 static int
1766 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
1767     uint8_t *fieldsp)
1768 {
1769 	struct mbuf *m0 = txs->txs_mbuf;
1770 	struct livengood_tcpip_ctxdesc *t;
1771 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
1772 	uint32_t ipcse;
1773 	struct ether_header *eh;
1774 	int offset, iphl;
1775 	uint8_t fields;
1776 
1777 	/*
1778 	 * XXX It would be nice if the mbuf pkthdr had offset
1779 	 * fields for the protocol headers.
1780 	 */
1781 
1782 	eh = mtod(m0, struct ether_header *);
1783 	switch (htons(eh->ether_type)) {
1784 	case ETHERTYPE_IP:
1785 	case ETHERTYPE_IPV6:
1786 		offset = ETHER_HDR_LEN;
1787 		break;
1788 
1789 	case ETHERTYPE_VLAN:
1790 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1791 		break;
1792 
1793 	default:
1794 		/*
1795 		 * Don't support this protocol or encapsulation.
1796 		 */
1797 		*fieldsp = 0;
1798 		*cmdp = 0;
1799 		return (0);
1800 	}
1801 
1802 	if ((m0->m_pkthdr.csum_flags &
1803 	    (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
1804 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
1805 	} else {
1806 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
1807 	}
1808 	ipcse = offset + iphl - 1;
1809 
1810 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
1811 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
1812 	seg = 0;
1813 	fields = 0;
1814 
1815 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
1816 		int hlen = offset + iphl;
1817 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
1818 
1819 		if (__predict_false(m0->m_len <
1820 				    (hlen + sizeof(struct tcphdr)))) {
1821 			/*
1822 			 * TCP/IP headers are not in the first mbuf; we need
1823 			 * to do this the slow and painful way.  Let's just
1824 			 * hope this doesn't happen very often.
1825 			 */
1826 			struct tcphdr th;
1827 
1828 			WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
1829 
1830 			m_copydata(m0, hlen, sizeof(th), &th);
1831 			if (v4) {
1832 				struct ip ip;
1833 
1834 				m_copydata(m0, offset, sizeof(ip), &ip);
1835 				ip.ip_len = 0;
1836 				m_copyback(m0,
1837 				    offset + offsetof(struct ip, ip_len),
1838 				    sizeof(ip.ip_len), &ip.ip_len);
1839 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
1840 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
1841 			} else {
1842 				struct ip6_hdr ip6;
1843 
1844 				m_copydata(m0, offset, sizeof(ip6), &ip6);
1845 				ip6.ip6_plen = 0;
1846 				m_copyback(m0,
1847 				    offset + offsetof(struct ip6_hdr, ip6_plen),
1848 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
1849 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
1850 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
1851 			}
1852 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
1853 			    sizeof(th.th_sum), &th.th_sum);
1854 
1855 			hlen += th.th_off << 2;
1856 		} else {
1857 			/*
1858 			 * TCP/IP headers are in the first mbuf; we can do
1859 			 * this the easy way.
1860 			 */
1861 			struct tcphdr *th;
1862 
1863 			if (v4) {
1864 				struct ip *ip =
1865 				    (void *)(mtod(m0, char *) + offset);
1866 				th = (void *)(mtod(m0, char *) + hlen);
1867 
1868 				ip->ip_len = 0;
1869 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
1870 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
1871 			} else {
1872 				struct ip6_hdr *ip6 =
1873 				    (void *)(mtod(m0, char *) + offset);
1874 				th = (void *)(mtod(m0, char *) + hlen);
1875 
1876 				ip6->ip6_plen = 0;
1877 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
1878 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
1879 			}
1880 			hlen += th->th_off << 2;
1881 		}
1882 
1883 		if (v4) {
1884 			WM_EVCNT_INCR(&sc->sc_ev_txtso);
1885 			cmdlen |= WTX_TCPIP_CMD_IP;
1886 		} else {
1887 			WM_EVCNT_INCR(&sc->sc_ev_txtso6);
1888 			ipcse = 0;
1889 		}
1890 		cmd |= WTX_TCPIP_CMD_TSE;
1891 		cmdlen |= WTX_TCPIP_CMD_TSE |
1892 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
1893 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
1894 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
1895 	}
1896 
1897 	/*
1898 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
1899 	 * offload feature, if we load the context descriptor, we
1900 	 * MUST provide valid values for IPCSS and TUCSS fields.
1901 	 */
1902 
1903 	ipcs = WTX_TCPIP_IPCSS(offset) |
1904 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1905 	    WTX_TCPIP_IPCSE(ipcse);
1906 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
1907 		WM_EVCNT_INCR(&sc->sc_ev_txipsum);
1908 		fields |= WTX_IXSM;
1909 	}
1910 
1911 	offset += iphl;
1912 
1913 	if (m0->m_pkthdr.csum_flags &
1914 	    (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
1915 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
1916 		fields |= WTX_TXSM;
1917 		tucs = WTX_TCPIP_TUCSS(offset) |
1918 		    WTX_TCPIP_TUCSO(offset +
1919 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
1920 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
1921 	} else if ((m0->m_pkthdr.csum_flags &
1922 	    (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
1923 		WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
1924 		fields |= WTX_TXSM;
1925 		tucs = WTX_TCPIP_TUCSS(offset) |
1926 		    WTX_TCPIP_TUCSO(offset +
1927 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
1928 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
1929 	} else {
1930 		/* Just initialize it to a valid TCP context. */
1931 		tucs = WTX_TCPIP_TUCSS(offset) |
1932 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
1933 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
1934 	}
1935 
1936 	/* Fill in the context descriptor. */
1937 	t = (struct livengood_tcpip_ctxdesc *)
1938 	    &sc->sc_txdescs[sc->sc_txnext];
1939 	t->tcpip_ipcs = htole32(ipcs);
1940 	t->tcpip_tucs = htole32(tucs);
1941 	t->tcpip_cmdlen = htole32(cmdlen);
1942 	t->tcpip_seg = htole32(seg);
1943 	WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
1944 
1945 	sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
1946 	txs->txs_ndesc++;
1947 
1948 	*cmdp = cmd;
1949 	*fieldsp = fields;
1950 
1951 	return (0);
1952 }
1953 
1954 static void
1955 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
1956 {
1957 	struct mbuf *m;
1958 	int i;
1959 
1960 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
1961 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
1962 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
1963 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
1964 		    m->m_data, m->m_len, m->m_flags);
1965 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
1966 	    i, i == 1 ? "" : "s");
1967 }
1968 
1969 /*
1970  * wm_82547_txfifo_stall:
1971  *
1972  *	Callout used to wait for the 82547 Tx FIFO to drain,
1973  *	reset the FIFO pointers, and restart packet transmission.
1974  */
1975 static void
1976 wm_82547_txfifo_stall(void *arg)
1977 {
1978 	struct wm_softc *sc = arg;
1979 	int s;
1980 
1981 	s = splnet();
1982 
1983 	if (sc->sc_txfifo_stall) {
1984 		if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
1985 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
1986 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
1987 			/*
1988 			 * Packets have drained.  Stop transmitter, reset
1989 			 * FIFO pointers, restart transmitter, and kick
1990 			 * the packet queue.
1991 			 */
1992 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
1993 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
1994 			CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
1995 			CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
1996 			CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
1997 			CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
1998 			CSR_WRITE(sc, WMREG_TCTL, tctl);
1999 			CSR_WRITE_FLUSH(sc);
2000 
2001 			sc->sc_txfifo_head = 0;
2002 			sc->sc_txfifo_stall = 0;
2003 			wm_start(&sc->sc_ethercom.ec_if);
2004 		} else {
2005 			/*
2006 			 * Still waiting for packets to drain; try again in
2007 			 * another tick.
2008 			 */
2009 			callout_schedule(&sc->sc_txfifo_ch, 1);
2010 		}
2011 	}
2012 
2013 	splx(s);
2014 }
2015 
2016 /*
2017  * wm_82547_txfifo_bugchk:
2018  *
2019  *	Check for bug condition in the 82547 Tx FIFO.  We need to
2020  *	prevent enqueueing a packet that would wrap around the end
2021  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
2022  *
2023  *	We do this by checking the amount of space before the end
2024  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
2025  *	the Tx FIFO, wait for all remaining packets to drain, reset
2026  *	the internal FIFO pointers to the beginning, and restart
2027  *	transmission on the interface.
2028  */
2029 #define	WM_FIFO_HDR		0x10
2030 #define	WM_82547_PAD_LEN	0x3e0
2031 static int
2032 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
2033 {
2034 	int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
2035 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
2036 
2037 	/* Just return if already stalled. */
2038 	if (sc->sc_txfifo_stall)
2039 		return (1);
2040 
2041 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
2042 		/* Stall only occurs in half-duplex mode. */
2043 		goto send_packet;
2044 	}
2045 
2046 	if (len >= WM_82547_PAD_LEN + space) {
2047 		sc->sc_txfifo_stall = 1;
2048 		callout_schedule(&sc->sc_txfifo_ch, 1);
2049 		return (1);
2050 	}
2051 
2052  send_packet:
2053 	sc->sc_txfifo_head += len;
2054 	if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
2055 		sc->sc_txfifo_head -= sc->sc_txfifo_size;
2056 
2057 	return (0);
2058 }
2059 
2060 /*
2061  * wm_start:		[ifnet interface function]
2062  *
2063  *	Start packet transmission on the interface.
2064  */
2065 static void
2066 wm_start(struct ifnet *ifp)
2067 {
2068 	struct wm_softc *sc = ifp->if_softc;
2069 	struct mbuf *m0;
2070 	struct m_tag *mtag;
2071 	struct wm_txsoft *txs;
2072 	bus_dmamap_t dmamap;
2073 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
2074 	bus_addr_t curaddr;
2075 	bus_size_t seglen, curlen;
2076 	uint32_t cksumcmd;
2077 	uint8_t cksumfields;
2078 
2079 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
2080 		return;
2081 
2082 	/*
2083 	 * Remember the previous number of free descriptors.
2084 	 */
2085 	ofree = sc->sc_txfree;
2086 
2087 	/*
2088 	 * Loop through the send queue, setting up transmit descriptors
2089 	 * until we drain the queue, or use up all available transmit
2090 	 * descriptors.
2091 	 */
2092 	for (;;) {
2093 		/* Grab a packet off the queue. */
2094 		IFQ_POLL(&ifp->if_snd, m0);
2095 		if (m0 == NULL)
2096 			break;
2097 
2098 		DPRINTF(WM_DEBUG_TX,
2099 		    ("%s: TX: have packet to transmit: %p\n",
2100 		    device_xname(sc->sc_dev), m0));
2101 
2102 		/* Get a work queue entry. */
2103 		if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
2104 			wm_txintr(sc);
2105 			if (sc->sc_txsfree == 0) {
2106 				DPRINTF(WM_DEBUG_TX,
2107 				    ("%s: TX: no free job descriptors\n",
2108 					device_xname(sc->sc_dev)));
2109 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
2110 				break;
2111 			}
2112 		}
2113 
2114 		txs = &sc->sc_txsoft[sc->sc_txsnext];
2115 		dmamap = txs->txs_dmamap;
2116 
2117 		use_tso = (m0->m_pkthdr.csum_flags &
2118 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
2119 
2120 		/*
2121 		 * So says the Linux driver:
2122 		 * The controller does a simple calculation to make sure
2123 		 * there is enough room in the FIFO before initiating the
2124 		 * DMA for each buffer.  The calc is:
2125 		 *	4 = ceil(buffer len / MSS)
2126 		 * To make sure we don't overrun the FIFO, adjust the max
2127 		 * buffer len if the MSS drops.
2128 		 */
2129 		dmamap->dm_maxsegsz =
2130 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
2131 		    ? m0->m_pkthdr.segsz << 2
2132 		    : WTX_MAX_LEN;
2133 
2134 		/*
2135 		 * Load the DMA map.  If this fails, the packet either
2136 		 * didn't fit in the allotted number of segments, or we
2137 		 * were short on resources.  For the too-many-segments
2138 		 * case, we simply report an error and drop the packet,
2139 		 * since we can't sanely copy a jumbo packet to a single
2140 		 * buffer.
2141 		 */
2142 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
2143 		    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
2144 		if (error) {
2145 			if (error == EFBIG) {
2146 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
2147 				log(LOG_ERR, "%s: Tx packet consumes too many "
2148 				    "DMA segments, dropping...\n",
2149 				    device_xname(sc->sc_dev));
2150 				IFQ_DEQUEUE(&ifp->if_snd, m0);
2151 				wm_dump_mbuf_chain(sc, m0);
2152 				m_freem(m0);
2153 				continue;
2154 			}
2155 			/*
2156 			 * Short on resources, just stop for now.
2157 			 */
2158 			DPRINTF(WM_DEBUG_TX,
2159 			    ("%s: TX: dmamap load failed: %d\n",
2160 			    device_xname(sc->sc_dev), error));
2161 			break;
2162 		}
2163 
2164 		segs_needed = dmamap->dm_nsegs;
2165 		if (use_tso) {
2166 			/* For sentinel descriptor; see below. */
2167 			segs_needed++;
2168 		}
2169 
2170 		/*
2171 		 * Ensure we have enough descriptors free to describe
2172 		 * the packet.  Note, we always reserve one descriptor
2173 		 * at the end of the ring due to the semantics of the
2174 		 * TDT register, plus one more in the event we need
2175 		 * to load offload context.
2176 		 */
2177 		if (segs_needed > sc->sc_txfree - 2) {
2178 			/*
2179 			 * Not enough free descriptors to transmit this
2180 			 * packet.  We haven't committed anything yet,
2181 			 * so just unload the DMA map, put the packet
2182 			 * pack on the queue, and punt.  Notify the upper
2183 			 * layer that there are no more slots left.
2184 			 */
2185 			DPRINTF(WM_DEBUG_TX,
2186 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
2187 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
2188 			    segs_needed, sc->sc_txfree - 1));
2189 			ifp->if_flags |= IFF_OACTIVE;
2190 			bus_dmamap_unload(sc->sc_dmat, dmamap);
2191 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
2192 			break;
2193 		}
2194 
2195 		/*
2196 		 * Check for 82547 Tx FIFO bug.  We need to do this
2197 		 * once we know we can transmit the packet, since we
2198 		 * do some internal FIFO space accounting here.
2199 		 */
2200 		if (sc->sc_type == WM_T_82547 &&
2201 		    wm_82547_txfifo_bugchk(sc, m0)) {
2202 			DPRINTF(WM_DEBUG_TX,
2203 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
2204 			    device_xname(sc->sc_dev)));
2205 			ifp->if_flags |= IFF_OACTIVE;
2206 			bus_dmamap_unload(sc->sc_dmat, dmamap);
2207 			WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
2208 			break;
2209 		}
2210 
2211 		IFQ_DEQUEUE(&ifp->if_snd, m0);
2212 
2213 		/*
2214 		 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
2215 		 */
2216 
2217 		DPRINTF(WM_DEBUG_TX,
2218 		    ("%s: TX: packet has %d (%d) DMA segments\n",
2219 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
2220 
2221 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
2222 
2223 		/*
2224 		 * Store a pointer to the packet so that we can free it
2225 		 * later.
2226 		 *
2227 		 * Initially, we consider the number of descriptors the
2228 		 * packet uses the number of DMA segments.  This may be
2229 		 * incremented by 1 if we do checksum offload (a descriptor
2230 		 * is used to set the checksum context).
2231 		 */
2232 		txs->txs_mbuf = m0;
2233 		txs->txs_firstdesc = sc->sc_txnext;
2234 		txs->txs_ndesc = segs_needed;
2235 
2236 		/* Set up offload parameters for this packet. */
2237 		if (m0->m_pkthdr.csum_flags &
2238 		    (M_CSUM_TSOv4|M_CSUM_TSOv6|
2239 		    M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
2240 		    M_CSUM_TCPv6|M_CSUM_UDPv6)) {
2241 			if (wm_tx_offload(sc, txs, &cksumcmd,
2242 					  &cksumfields) != 0) {
2243 				/* Error message already displayed. */
2244 				bus_dmamap_unload(sc->sc_dmat, dmamap);
2245 				continue;
2246 			}
2247 		} else {
2248 			cksumcmd = 0;
2249 			cksumfields = 0;
2250 		}
2251 
2252 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
2253 
2254 		/* Sync the DMA map. */
2255 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
2256 		    BUS_DMASYNC_PREWRITE);
2257 
2258 		/*
2259 		 * Initialize the transmit descriptor.
2260 		 */
2261 		for (nexttx = sc->sc_txnext, seg = 0;
2262 		     seg < dmamap->dm_nsegs; seg++) {
2263 			for (seglen = dmamap->dm_segs[seg].ds_len,
2264 			     curaddr = dmamap->dm_segs[seg].ds_addr;
2265 			     seglen != 0;
2266 			     curaddr += curlen, seglen -= curlen,
2267 			     nexttx = WM_NEXTTX(sc, nexttx)) {
2268 				curlen = seglen;
2269 
2270 				/*
2271 				 * So says the Linux driver:
2272 				 * Work around for premature descriptor
2273 				 * write-backs in TSO mode.  Append a
2274 				 * 4-byte sentinel descriptor.
2275 				 */
2276 				if (use_tso &&
2277 				    seg == dmamap->dm_nsegs - 1 &&
2278 				    curlen > 8)
2279 					curlen -= 4;
2280 
2281 				wm_set_dma_addr(
2282 				    &sc->sc_txdescs[nexttx].wtx_addr,
2283 				    curaddr);
2284 				sc->sc_txdescs[nexttx].wtx_cmdlen =
2285 				    htole32(cksumcmd | curlen);
2286 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
2287 				    0;
2288 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
2289 				    cksumfields;
2290 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
2291 				lasttx = nexttx;
2292 
2293 				DPRINTF(WM_DEBUG_TX,
2294 				    ("%s: TX: desc %d: low 0x%08lx, "
2295 				     "len 0x%04x\n",
2296 				    device_xname(sc->sc_dev), nexttx,
2297 				    curaddr & 0xffffffffUL, (unsigned)curlen));
2298 			}
2299 		}
2300 
2301 		KASSERT(lasttx != -1);
2302 
2303 		/*
2304 		 * Set up the command byte on the last descriptor of
2305 		 * the packet.  If we're in the interrupt delay window,
2306 		 * delay the interrupt.
2307 		 */
2308 		sc->sc_txdescs[lasttx].wtx_cmdlen |=
2309 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
2310 
2311 		/*
2312 		 * If VLANs are enabled and the packet has a VLAN tag, set
2313 		 * up the descriptor to encapsulate the packet for us.
2314 		 *
2315 		 * This is only valid on the last descriptor of the packet.
2316 		 */
2317 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
2318 			sc->sc_txdescs[lasttx].wtx_cmdlen |=
2319 			    htole32(WTX_CMD_VLE);
2320 			sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
2321 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
2322 		}
2323 
2324 		txs->txs_lastdesc = lasttx;
2325 
2326 		DPRINTF(WM_DEBUG_TX,
2327 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
2328 		    device_xname(sc->sc_dev),
2329 		    lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
2330 
2331 		/* Sync the descriptors we're using. */
2332 		WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
2333 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2334 
2335 		/* Give the packet to the chip. */
2336 		CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
2337 
2338 		DPRINTF(WM_DEBUG_TX,
2339 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
2340 
2341 		DPRINTF(WM_DEBUG_TX,
2342 		    ("%s: TX: finished transmitting packet, job %d\n",
2343 		    device_xname(sc->sc_dev), sc->sc_txsnext));
2344 
2345 		/* Advance the tx pointer. */
2346 		sc->sc_txfree -= txs->txs_ndesc;
2347 		sc->sc_txnext = nexttx;
2348 
2349 		sc->sc_txsfree--;
2350 		sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
2351 
2352 #if NBPFILTER > 0
2353 		/* Pass the packet to any BPF listeners. */
2354 		if (ifp->if_bpf)
2355 			bpf_mtap(ifp->if_bpf, m0);
2356 #endif /* NBPFILTER > 0 */
2357 	}
2358 
2359 	if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
2360 		/* No more slots; notify upper layer. */
2361 		ifp->if_flags |= IFF_OACTIVE;
2362 	}
2363 
2364 	if (sc->sc_txfree != ofree) {
2365 		/* Set a watchdog timer in case the chip flakes out. */
2366 		ifp->if_timer = 5;
2367 	}
2368 }
2369 
2370 /*
2371  * wm_watchdog:		[ifnet interface function]
2372  *
2373  *	Watchdog timer handler.
2374  */
2375 static void
2376 wm_watchdog(struct ifnet *ifp)
2377 {
2378 	struct wm_softc *sc = ifp->if_softc;
2379 
2380 	/*
2381 	 * Since we're using delayed interrupts, sweep up
2382 	 * before we report an error.
2383 	 */
2384 	wm_txintr(sc);
2385 
2386 	if (sc->sc_txfree != WM_NTXDESC(sc)) {
2387 		log(LOG_ERR,
2388 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2389 		    device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
2390 		    sc->sc_txnext);
2391 		ifp->if_oerrors++;
2392 
2393 		/* Reset the interface. */
2394 		(void) wm_init(ifp);
2395 	}
2396 
2397 	/* Try to get more packets going. */
2398 	wm_start(ifp);
2399 }
2400 
2401 /*
2402  * wm_ioctl:		[ifnet interface function]
2403  *
2404  *	Handle control requests from the operator.
2405  */
2406 static int
2407 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2408 {
2409 	struct wm_softc *sc = ifp->if_softc;
2410 	struct ifreq *ifr = (struct ifreq *) data;
2411 	struct ifaddr *ifa = (struct ifaddr *)data;
2412 	struct sockaddr_dl *sdl;
2413 	int diff, s, error;
2414 
2415 	s = splnet();
2416 
2417 	switch (cmd) {
2418 	case SIOCSIFFLAGS:
2419 		if ((error = ifioctl_common(ifp, cmd, data)) != 0)
2420 			break;
2421 		if (ifp->if_flags & IFF_UP) {
2422 			diff = (ifp->if_flags ^ sc->sc_if_flags)
2423 			    & (IFF_PROMISC | IFF_ALLMULTI);
2424 			if ((diff & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
2425 				/*
2426 				 * If the difference bettween last flag and
2427 				 * new flag is only IFF_PROMISC or
2428 				 * IFF_ALLMULTI, set multicast filter only
2429 				 * (don't reset to prevent link down).
2430 				 */
2431 				wm_set_filter(sc);
2432 			} else {
2433 				/*
2434 				 * Reset the interface to pick up changes in
2435 				 * any other flags that affect the hardware
2436 				 * state.
2437 				 */
2438 				wm_init(ifp);
2439 			}
2440 		} else {
2441 			if (ifp->if_flags & IFF_RUNNING)
2442 				wm_stop(ifp, 1);
2443 		}
2444 		sc->sc_if_flags = ifp->if_flags;
2445 		error = 0;
2446 		break;
2447 	case SIOCSIFMEDIA:
2448 	case SIOCGIFMEDIA:
2449 		/* Flow control requires full-duplex mode. */
2450 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
2451 		    (ifr->ifr_media & IFM_FDX) == 0)
2452 			ifr->ifr_media &= ~IFM_ETH_FMASK;
2453 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
2454 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
2455 				/* We can do both TXPAUSE and RXPAUSE. */
2456 				ifr->ifr_media |=
2457 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
2458 			}
2459 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
2460 		}
2461 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
2462 		break;
2463 	case SIOCINITIFADDR:
2464 		if (ifa->ifa_addr->sa_family == AF_LINK) {
2465 			sdl = satosdl(ifp->if_dl->ifa_addr);
2466 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
2467 					LLADDR(satosdl(ifa->ifa_addr)),
2468 					ifp->if_addrlen);
2469 			/* unicast address is first multicast entry */
2470 			wm_set_filter(sc);
2471 			error = 0;
2472 			break;
2473 		}
2474 		/* Fall through for rest */
2475 	default:
2476 		if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
2477 			break;
2478 
2479 		error = 0;
2480 
2481 		if (cmd == SIOCSIFCAP)
2482 			error = (*ifp->if_init)(ifp);
2483 		else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
2484 			;
2485 		else if (ifp->if_flags & IFF_RUNNING) {
2486 			/*
2487 			 * Multicast list has changed; set the hardware filter
2488 			 * accordingly.
2489 			 */
2490 			wm_set_filter(sc);
2491 		}
2492 		break;
2493 	}
2494 
2495 	/* Try to get more packets going. */
2496 	wm_start(ifp);
2497 
2498 	splx(s);
2499 	return (error);
2500 }
2501 
2502 /*
2503  * wm_intr:
2504  *
2505  *	Interrupt service routine.
2506  */
2507 static int
2508 wm_intr(void *arg)
2509 {
2510 	struct wm_softc *sc = arg;
2511 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2512 	uint32_t icr;
2513 	int handled = 0;
2514 
2515 	while (1 /* CONSTCOND */) {
2516 		icr = CSR_READ(sc, WMREG_ICR);
2517 		if ((icr & sc->sc_icr) == 0)
2518 			break;
2519 #if 0 /*NRND > 0*/
2520 		if (RND_ENABLED(&sc->rnd_source))
2521 			rnd_add_uint32(&sc->rnd_source, icr);
2522 #endif
2523 
2524 		handled = 1;
2525 
2526 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
2527 		if (icr & (ICR_RXDMT0|ICR_RXT0)) {
2528 			DPRINTF(WM_DEBUG_RX,
2529 			    ("%s: RX: got Rx intr 0x%08x\n",
2530 			    device_xname(sc->sc_dev),
2531 			    icr & (ICR_RXDMT0|ICR_RXT0)));
2532 			WM_EVCNT_INCR(&sc->sc_ev_rxintr);
2533 		}
2534 #endif
2535 		wm_rxintr(sc);
2536 
2537 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
2538 		if (icr & ICR_TXDW) {
2539 			DPRINTF(WM_DEBUG_TX,
2540 			    ("%s: TX: got TXDW interrupt\n",
2541 			    device_xname(sc->sc_dev)));
2542 			WM_EVCNT_INCR(&sc->sc_ev_txdw);
2543 		}
2544 #endif
2545 		wm_txintr(sc);
2546 
2547 		if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
2548 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
2549 			wm_linkintr(sc, icr);
2550 		}
2551 
2552 		if (icr & ICR_RXO) {
2553 			ifp->if_ierrors++;
2554 #if defined(WM_DEBUG)
2555 			log(LOG_WARNING, "%s: Receive overrun\n",
2556 			    device_xname(sc->sc_dev));
2557 #endif /* defined(WM_DEBUG) */
2558 		}
2559 	}
2560 
2561 	if (handled) {
2562 		/* Try to get more packets going. */
2563 		wm_start(ifp);
2564 	}
2565 
2566 	return (handled);
2567 }
2568 
2569 /*
2570  * wm_txintr:
2571  *
2572  *	Helper; handle transmit interrupts.
2573  */
2574 static void
2575 wm_txintr(struct wm_softc *sc)
2576 {
2577 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2578 	struct wm_txsoft *txs;
2579 	uint8_t status;
2580 	int i;
2581 
2582 	ifp->if_flags &= ~IFF_OACTIVE;
2583 
2584 	/*
2585 	 * Go through the Tx list and free mbufs for those
2586 	 * frames which have been transmitted.
2587 	 */
2588 	for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
2589 	     i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
2590 		txs = &sc->sc_txsoft[i];
2591 
2592 		DPRINTF(WM_DEBUG_TX,
2593 		    ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
2594 
2595 		WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
2596 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2597 
2598 		status =
2599 		    sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
2600 		if ((status & WTX_ST_DD) == 0) {
2601 			WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
2602 			    BUS_DMASYNC_PREREAD);
2603 			break;
2604 		}
2605 
2606 		DPRINTF(WM_DEBUG_TX,
2607 		    ("%s: TX: job %d done: descs %d..%d\n",
2608 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
2609 		    txs->txs_lastdesc));
2610 
2611 		/*
2612 		 * XXX We should probably be using the statistics
2613 		 * XXX registers, but I don't know if they exist
2614 		 * XXX on chips before the i82544.
2615 		 */
2616 
2617 #ifdef WM_EVENT_COUNTERS
2618 		if (status & WTX_ST_TU)
2619 			WM_EVCNT_INCR(&sc->sc_ev_tu);
2620 #endif /* WM_EVENT_COUNTERS */
2621 
2622 		if (status & (WTX_ST_EC|WTX_ST_LC)) {
2623 			ifp->if_oerrors++;
2624 			if (status & WTX_ST_LC)
2625 				log(LOG_WARNING, "%s: late collision\n",
2626 				    device_xname(sc->sc_dev));
2627 			else if (status & WTX_ST_EC) {
2628 				ifp->if_collisions += 16;
2629 				log(LOG_WARNING, "%s: excessive collisions\n",
2630 				    device_xname(sc->sc_dev));
2631 			}
2632 		} else
2633 			ifp->if_opackets++;
2634 
2635 		sc->sc_txfree += txs->txs_ndesc;
2636 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
2637 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2638 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2639 		m_freem(txs->txs_mbuf);
2640 		txs->txs_mbuf = NULL;
2641 	}
2642 
2643 	/* Update the dirty transmit buffer pointer. */
2644 	sc->sc_txsdirty = i;
2645 	DPRINTF(WM_DEBUG_TX,
2646 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
2647 
2648 	/*
2649 	 * If there are no more pending transmissions, cancel the watchdog
2650 	 * timer.
2651 	 */
2652 	if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
2653 		ifp->if_timer = 0;
2654 }
2655 
2656 /*
2657  * wm_rxintr:
2658  *
2659  *	Helper; handle receive interrupts.
2660  */
2661 static void
2662 wm_rxintr(struct wm_softc *sc)
2663 {
2664 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2665 	struct wm_rxsoft *rxs;
2666 	struct mbuf *m;
2667 	int i, len;
2668 	uint8_t status, errors;
2669 	uint16_t vlantag;
2670 
2671 	for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
2672 		rxs = &sc->sc_rxsoft[i];
2673 
2674 		DPRINTF(WM_DEBUG_RX,
2675 		    ("%s: RX: checking descriptor %d\n",
2676 		    device_xname(sc->sc_dev), i));
2677 
2678 		WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2679 
2680 		status = sc->sc_rxdescs[i].wrx_status;
2681 		errors = sc->sc_rxdescs[i].wrx_errors;
2682 		len = le16toh(sc->sc_rxdescs[i].wrx_len);
2683 		vlantag = sc->sc_rxdescs[i].wrx_special;
2684 
2685 		if ((status & WRX_ST_DD) == 0) {
2686 			/*
2687 			 * We have processed all of the receive descriptors.
2688 			 */
2689 			WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
2690 			break;
2691 		}
2692 
2693 		if (__predict_false(sc->sc_rxdiscard)) {
2694 			DPRINTF(WM_DEBUG_RX,
2695 			    ("%s: RX: discarding contents of descriptor %d\n",
2696 			    device_xname(sc->sc_dev), i));
2697 			WM_INIT_RXDESC(sc, i);
2698 			if (status & WRX_ST_EOP) {
2699 				/* Reset our state. */
2700 				DPRINTF(WM_DEBUG_RX,
2701 				    ("%s: RX: resetting rxdiscard -> 0\n",
2702 				    device_xname(sc->sc_dev)));
2703 				sc->sc_rxdiscard = 0;
2704 			}
2705 			continue;
2706 		}
2707 
2708 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2709 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
2710 
2711 		m = rxs->rxs_mbuf;
2712 
2713 		/*
2714 		 * Add a new receive buffer to the ring, unless of
2715 		 * course the length is zero. Treat the latter as a
2716 		 * failed mapping.
2717 		 */
2718 		if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
2719 			/*
2720 			 * Failed, throw away what we've done so
2721 			 * far, and discard the rest of the packet.
2722 			 */
2723 			ifp->if_ierrors++;
2724 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2725 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2726 			WM_INIT_RXDESC(sc, i);
2727 			if ((status & WRX_ST_EOP) == 0)
2728 				sc->sc_rxdiscard = 1;
2729 			if (sc->sc_rxhead != NULL)
2730 				m_freem(sc->sc_rxhead);
2731 			WM_RXCHAIN_RESET(sc);
2732 			DPRINTF(WM_DEBUG_RX,
2733 			    ("%s: RX: Rx buffer allocation failed, "
2734 			    "dropping packet%s\n", device_xname(sc->sc_dev),
2735 			    sc->sc_rxdiscard ? " (discard)" : ""));
2736 			continue;
2737 		}
2738 
2739 		m->m_len = len;
2740 		sc->sc_rxlen += len;
2741 		DPRINTF(WM_DEBUG_RX,
2742 		    ("%s: RX: buffer at %p len %d\n",
2743 		    device_xname(sc->sc_dev), m->m_data, len));
2744 
2745 		/*
2746 		 * If this is not the end of the packet, keep
2747 		 * looking.
2748 		 */
2749 		if ((status & WRX_ST_EOP) == 0) {
2750 			WM_RXCHAIN_LINK(sc, m);
2751 			DPRINTF(WM_DEBUG_RX,
2752 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
2753 			    device_xname(sc->sc_dev), sc->sc_rxlen));
2754 			continue;
2755 		}
2756 
2757 		/*
2758 		 * Okay, we have the entire packet now.  The chip is
2759 		 * configured to include the FCS (not all chips can
2760 		 * be configured to strip it), so we need to trim it.
2761 		 * May need to adjust length of previous mbuf in the
2762 		 * chain if the current mbuf is too short.
2763 		 */
2764 		if (m->m_len < ETHER_CRC_LEN) {
2765 			sc->sc_rxtail->m_len -= (ETHER_CRC_LEN - m->m_len);
2766 			m->m_len = 0;
2767 		} else {
2768 			m->m_len -= ETHER_CRC_LEN;
2769 		}
2770 		len = sc->sc_rxlen - ETHER_CRC_LEN;
2771 
2772 		WM_RXCHAIN_LINK(sc, m);
2773 
2774 		*sc->sc_rxtailp = NULL;
2775 		m = sc->sc_rxhead;
2776 
2777 		WM_RXCHAIN_RESET(sc);
2778 
2779 		DPRINTF(WM_DEBUG_RX,
2780 		    ("%s: RX: have entire packet, len -> %d\n",
2781 		    device_xname(sc->sc_dev), len));
2782 
2783 		/*
2784 		 * If an error occurred, update stats and drop the packet.
2785 		 */
2786 		if (errors &
2787 		     (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
2788 			ifp->if_ierrors++;
2789 			if (errors & WRX_ER_SE)
2790 				log(LOG_WARNING, "%s: symbol error\n",
2791 				    device_xname(sc->sc_dev));
2792 			else if (errors & WRX_ER_SEQ)
2793 				log(LOG_WARNING, "%s: receive sequence error\n",
2794 				    device_xname(sc->sc_dev));
2795 			else if (errors & WRX_ER_CE)
2796 				log(LOG_WARNING, "%s: CRC error\n",
2797 				    device_xname(sc->sc_dev));
2798 			m_freem(m);
2799 			continue;
2800 		}
2801 
2802 		/*
2803 		 * No errors.  Receive the packet.
2804 		 */
2805 		m->m_pkthdr.rcvif = ifp;
2806 		m->m_pkthdr.len = len;
2807 
2808 		/*
2809 		 * If VLANs are enabled, VLAN packets have been unwrapped
2810 		 * for us.  Associate the tag with the packet.
2811 		 */
2812 		if ((status & WRX_ST_VP) != 0) {
2813 			VLAN_INPUT_TAG(ifp, m,
2814 			    le16toh(vlantag),
2815 			    continue);
2816 		}
2817 
2818 		/*
2819 		 * Set up checksum info for this packet.
2820 		 */
2821 		if ((status & WRX_ST_IXSM) == 0) {
2822 			if (status & WRX_ST_IPCS) {
2823 				WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
2824 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
2825 				if (errors & WRX_ER_IPE)
2826 					m->m_pkthdr.csum_flags |=
2827 					    M_CSUM_IPv4_BAD;
2828 			}
2829 			if (status & WRX_ST_TCPCS) {
2830 				/*
2831 				 * Note: we don't know if this was TCP or UDP,
2832 				 * so we just set both bits, and expect the
2833 				 * upper layers to deal.
2834 				 */
2835 				WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
2836 				m->m_pkthdr.csum_flags |=
2837 				    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
2838 				    M_CSUM_TCPv6 | M_CSUM_UDPv6;
2839 				if (errors & WRX_ER_TCPE)
2840 					m->m_pkthdr.csum_flags |=
2841 					    M_CSUM_TCP_UDP_BAD;
2842 			}
2843 		}
2844 
2845 		ifp->if_ipackets++;
2846 
2847 #if NBPFILTER > 0
2848 		/* Pass this up to any BPF listeners. */
2849 		if (ifp->if_bpf)
2850 			bpf_mtap(ifp->if_bpf, m);
2851 #endif /* NBPFILTER > 0 */
2852 
2853 		/* Pass it on. */
2854 		(*ifp->if_input)(ifp, m);
2855 	}
2856 
2857 	/* Update the receive pointer. */
2858 	sc->sc_rxptr = i;
2859 
2860 	DPRINTF(WM_DEBUG_RX,
2861 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
2862 }
2863 
2864 /*
2865  * wm_linkintr:
2866  *
2867  *	Helper; handle link interrupts.
2868  */
2869 static void
2870 wm_linkintr(struct wm_softc *sc, uint32_t icr)
2871 {
2872 	uint32_t status;
2873 
2874 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
2875 		__func__));
2876 	/*
2877 	 * If we get a link status interrupt on a 1000BASE-T
2878 	 * device, just fall into the normal MII tick path.
2879 	 */
2880 	if (sc->sc_flags & WM_F_HAS_MII) {
2881 		if (icr & ICR_LSC) {
2882 			DPRINTF(WM_DEBUG_LINK,
2883 			    ("%s: LINK: LSC -> mii_tick\n",
2884 			    device_xname(sc->sc_dev)));
2885 			mii_tick(&sc->sc_mii);
2886 			if (sc->sc_type == WM_T_82543) {
2887 				int miistatus, active;
2888 
2889 				/*
2890 				 * With 82543, we need to force speed and
2891 				 * duplex on the MAC equal to what the PHY
2892 				 * speed and duplex configuration is.
2893 				 */
2894 				miistatus = sc->sc_mii.mii_media_status;
2895 
2896 				if (miistatus & IFM_ACTIVE) {
2897 					active = sc->sc_mii.mii_media_active;
2898 					sc->sc_ctrl &= ~(CTRL_SPEED_MASK
2899 					    | CTRL_FD);
2900 					switch (IFM_SUBTYPE(active)) {
2901 					case IFM_10_T:
2902 						sc->sc_ctrl |= CTRL_SPEED_10;
2903 						break;
2904 					case IFM_100_TX:
2905 						sc->sc_ctrl |= CTRL_SPEED_100;
2906 						break;
2907 					case IFM_1000_T:
2908 						sc->sc_ctrl |= CTRL_SPEED_1000;
2909 						break;
2910 					default:
2911 						/*
2912 						 * fiber?
2913 						 * Shoud not enter here.
2914 						 */
2915 						printf("unknown media (%x)\n",
2916 						    active);
2917 						break;
2918 					}
2919 					if (active & IFM_FDX)
2920 						sc->sc_ctrl |= CTRL_FD;
2921 					CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2922 				}
2923 			}
2924 		} else if (icr & ICR_RXSEQ) {
2925 			DPRINTF(WM_DEBUG_LINK,
2926 			    ("%s: LINK Receive sequence error\n",
2927 			    device_xname(sc->sc_dev)));
2928 		}
2929 		return;
2930 	}
2931 
2932 	status = CSR_READ(sc, WMREG_STATUS);
2933 	if (icr & ICR_LSC) {
2934 		if (status & STATUS_LU) {
2935 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
2936 			    device_xname(sc->sc_dev),
2937 			    (status & STATUS_FD) ? "FDX" : "HDX"));
2938 			/*
2939 			 * NOTE: CTRL will update TFCE and RFCE automatically,
2940 			 * so we should update sc->sc_ctrl
2941 			 */
2942 
2943 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
2944 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2945 			sc->sc_fcrtl &= ~FCRTL_XONE;
2946 			if (status & STATUS_FD)
2947 				sc->sc_tctl |=
2948 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2949 			else
2950 				sc->sc_tctl |=
2951 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2952 			if (sc->sc_ctrl & CTRL_TFCE)
2953 				sc->sc_fcrtl |= FCRTL_XONE;
2954 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2955 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
2956 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
2957 				      sc->sc_fcrtl);
2958 			sc->sc_tbi_linkup = 1;
2959 		} else {
2960 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
2961 			    device_xname(sc->sc_dev)));
2962 			sc->sc_tbi_linkup = 0;
2963 		}
2964 		wm_tbi_set_linkled(sc);
2965 	} else if (icr & ICR_RXCFG) {
2966 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
2967 		    device_xname(sc->sc_dev)));
2968 		sc->sc_tbi_nrxcfg++;
2969 		wm_check_for_link(sc);
2970 	} else if (icr & ICR_RXSEQ) {
2971 		DPRINTF(WM_DEBUG_LINK,
2972 		    ("%s: LINK: Receive sequence error\n",
2973 		    device_xname(sc->sc_dev)));
2974 	}
2975 }
2976 
2977 /*
2978  * wm_tick:
2979  *
2980  *	One second timer, used to check link status, sweep up
2981  *	completed transmit jobs, etc.
2982  */
2983 static void
2984 wm_tick(void *arg)
2985 {
2986 	struct wm_softc *sc = arg;
2987 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2988 	int s;
2989 
2990 	s = splnet();
2991 
2992 	if (sc->sc_type >= WM_T_82542_2_1) {
2993 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
2994 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
2995 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
2996 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
2997 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
2998 	}
2999 
3000 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3001 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
3002 
3003 	if (sc->sc_flags & WM_F_HAS_MII)
3004 		mii_tick(&sc->sc_mii);
3005 	else
3006 		wm_tbi_check_link(sc);
3007 
3008 	splx(s);
3009 
3010 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
3011 }
3012 
3013 /*
3014  * wm_reset:
3015  *
3016  *	Reset the i82542 chip.
3017  */
3018 static void
3019 wm_reset(struct wm_softc *sc)
3020 {
3021 	int phy_reset = 0;
3022 	uint32_t reg, func, mask;
3023 	int i;
3024 
3025 	/*
3026 	 * Allocate on-chip memory according to the MTU size.
3027 	 * The Packet Buffer Allocation register must be written
3028 	 * before the chip is reset.
3029 	 */
3030 	switch (sc->sc_type) {
3031 	case WM_T_82547:
3032 	case WM_T_82547_2:
3033 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3034 		    PBA_22K : PBA_30K;
3035 		sc->sc_txfifo_head = 0;
3036 		sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
3037 		sc->sc_txfifo_size =
3038 		    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
3039 		sc->sc_txfifo_stall = 0;
3040 		break;
3041 	case WM_T_82571:
3042 	case WM_T_82572:
3043 	case WM_T_80003:
3044 		sc->sc_pba = PBA_32K;
3045 		break;
3046 	case WM_T_82573:
3047 		sc->sc_pba = PBA_12K;
3048 		break;
3049 	case WM_T_82574:
3050 	case WM_T_82583:
3051 		sc->sc_pba = PBA_20K;
3052 		break;
3053 	case WM_T_ICH8:
3054 		sc->sc_pba = PBA_8K;
3055 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
3056 		break;
3057 	case WM_T_ICH9:
3058 	case WM_T_ICH10:
3059 		sc->sc_pba = PBA_10K;
3060 		break;
3061 	default:
3062 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3063 		    PBA_40K : PBA_48K;
3064 		break;
3065 	}
3066 	CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
3067 
3068 	if (sc->sc_flags & WM_F_PCIE) {
3069 		int timeout = 800;
3070 
3071 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
3072 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3073 
3074 		while (timeout--) {
3075 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA) == 0)
3076 				break;
3077 			delay(100);
3078 		}
3079 	}
3080 
3081 	/* clear interrupt */
3082 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3083 
3084 	/* Stop the transmit and receive processes. */
3085 	CSR_WRITE(sc, WMREG_RCTL, 0);
3086 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
3087 
3088 	/* set_tbi_sbp_82543() */
3089 
3090 	delay(10*1000);
3091 
3092 	/* Must acquire the MDIO ownership before MAC reset */
3093 	switch(sc->sc_type) {
3094 	case WM_T_82573:
3095 	case WM_T_82574:
3096 	case WM_T_82583:
3097 		i = 0;
3098 		reg = CSR_READ(sc, WMREG_EXTCNFCTR)
3099 		    | EXTCNFCTR_MDIO_SW_OWNERSHIP;
3100 		do {
3101 			CSR_WRITE(sc, WMREG_EXTCNFCTR,
3102 			    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
3103 			reg = CSR_READ(sc, WMREG_EXTCNFCTR);
3104 			if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
3105 				break;
3106 			reg |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
3107 			delay(2*1000);
3108 			i++;
3109 		} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
3110 		break;
3111 	default:
3112 		break;
3113 	}
3114 
3115 	/*
3116 	 * 82541 Errata 29? & 82547 Errata 28?
3117 	 * See also the description about PHY_RST bit in CTRL register
3118 	 * in 8254x_GBe_SDM.pdf.
3119 	 */
3120 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
3121 		CSR_WRITE(sc, WMREG_CTRL,
3122 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
3123 		delay(5000);
3124 	}
3125 
3126 	switch (sc->sc_type) {
3127 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
3128 	case WM_T_82541:
3129 	case WM_T_82541_2:
3130 	case WM_T_82547:
3131 	case WM_T_82547_2:
3132 		/*
3133 		 * On some chipsets, a reset through a memory-mapped write
3134 		 * cycle can cause the chip to reset before completing the
3135 		 * write cycle.  This causes major headache that can be
3136 		 * avoided by issuing the reset via indirect register writes
3137 		 * through I/O space.
3138 		 *
3139 		 * So, if we successfully mapped the I/O BAR at attach time,
3140 		 * use that.  Otherwise, try our luck with a memory-mapped
3141 		 * reset.
3142 		 */
3143 		if (sc->sc_flags & WM_F_IOH_VALID)
3144 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
3145 		else
3146 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
3147 		break;
3148 	case WM_T_82545_3:
3149 	case WM_T_82546_3:
3150 		/* Use the shadow control register on these chips. */
3151 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
3152 		break;
3153 	case WM_T_80003:
3154 		func = (CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1;
3155 		mask = func ? SWFW_PHY1_SM : SWFW_PHY0_SM;
3156 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3157 		wm_get_swfw_semaphore(sc, mask);
3158 		CSR_WRITE(sc, WMREG_CTRL, reg);
3159 		wm_put_swfw_semaphore(sc, mask);
3160 		break;
3161 	case WM_T_ICH8:
3162 	case WM_T_ICH9:
3163 	case WM_T_ICH10:
3164 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3165 		if (wm_check_reset_block(sc) == 0) {
3166 			reg |= CTRL_PHY_RESET;
3167 			phy_reset = 1;
3168 		}
3169 		wm_get_swfwhw_semaphore(sc);
3170 		CSR_WRITE(sc, WMREG_CTRL, reg);
3171 		delay(20*1000);
3172 		wm_put_swfwhw_semaphore(sc);
3173 		break;
3174 	case WM_T_82542_2_0:
3175 	case WM_T_82542_2_1:
3176 	case WM_T_82543:
3177 	case WM_T_82540:
3178 	case WM_T_82545:
3179 	case WM_T_82546:
3180 	case WM_T_82571:
3181 	case WM_T_82572:
3182 	case WM_T_82573:
3183 	case WM_T_82574:
3184 	case WM_T_82583:
3185 	default:
3186 		/* Everything else can safely use the documented method. */
3187 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3188 		break;
3189 	}
3190 
3191 	if (phy_reset != 0)
3192 		wm_get_cfg_done(sc);
3193 
3194 	/* reload EEPROM */
3195 	switch(sc->sc_type) {
3196 	case WM_T_82542_2_0:
3197 	case WM_T_82542_2_1:
3198 	case WM_T_82543:
3199 	case WM_T_82544:
3200 		delay(10);
3201 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3202 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3203 		delay(2000);
3204 		break;
3205 	case WM_T_82540:
3206 	case WM_T_82545:
3207 	case WM_T_82545_3:
3208 	case WM_T_82546:
3209 	case WM_T_82546_3:
3210 		delay(5*1000);
3211 		/* XXX Disable HW ARPs on ASF enabled adapters */
3212 		break;
3213 	case WM_T_82541:
3214 	case WM_T_82541_2:
3215 	case WM_T_82547:
3216 	case WM_T_82547_2:
3217 		delay(20000);
3218 		/* XXX Disable HW ARPs on ASF enabled adapters */
3219 		break;
3220 	case WM_T_82571:
3221 	case WM_T_82572:
3222 	case WM_T_82573:
3223 	case WM_T_82574:
3224 	case WM_T_82583:
3225 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
3226 			delay(10);
3227 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3228 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3229 		}
3230 		/* check EECD_EE_AUTORD */
3231 		wm_get_auto_rd_done(sc);
3232 		/*
3233 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
3234 		 * is set.
3235 		 */
3236 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
3237 		    || (sc->sc_type == WM_T_82583))
3238 			delay(25*1000);
3239 		break;
3240 	case WM_T_80003:
3241 	case WM_T_ICH8:
3242 	case WM_T_ICH9:
3243 		/* check EECD_EE_AUTORD */
3244 		wm_get_auto_rd_done(sc);
3245 		break;
3246 	case WM_T_ICH10: /* & PCH */
3247 		wm_lan_init_done(sc);
3248 		break;
3249 	default:
3250 		panic("%s: unknown type\n", __func__);
3251 	}
3252 
3253 	/* reload sc_ctrl */
3254 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3255 
3256 #if 0
3257 	for (i = 0; i < 1000; i++) {
3258 		if ((CSR_READ(sc, WMREG_CTRL) & CTRL_RST) == 0) {
3259 			return;
3260 		}
3261 		delay(20);
3262 	}
3263 
3264 	if (CSR_READ(sc, WMREG_CTRL) & CTRL_RST)
3265 		log(LOG_ERR, "%s: reset failed to complete\n",
3266 		    device_xname(sc->sc_dev));
3267 #endif
3268 }
3269 
3270 /*
3271  * wm_init:		[ifnet interface function]
3272  *
3273  *	Initialize the interface.  Must be called at splnet().
3274  */
3275 static int
3276 wm_init(struct ifnet *ifp)
3277 {
3278 	struct wm_softc *sc = ifp->if_softc;
3279 	struct wm_rxsoft *rxs;
3280 	int i, error = 0;
3281 	uint32_t reg;
3282 
3283 	/*
3284 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
3285 	 * There is a small but measurable benefit to avoiding the adjusment
3286 	 * of the descriptor so that the headers are aligned, for normal mtu,
3287 	 * on such platforms.  One possibility is that the DMA itself is
3288 	 * slightly more efficient if the front of the entire packet (instead
3289 	 * of the front of the headers) is aligned.
3290 	 *
3291 	 * Note we must always set align_tweak to 0 if we are using
3292 	 * jumbo frames.
3293 	 */
3294 #ifdef __NO_STRICT_ALIGNMENT
3295 	sc->sc_align_tweak = 0;
3296 #else
3297 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
3298 		sc->sc_align_tweak = 0;
3299 	else
3300 		sc->sc_align_tweak = 2;
3301 #endif /* __NO_STRICT_ALIGNMENT */
3302 
3303 	/* Cancel any pending I/O. */
3304 	wm_stop(ifp, 0);
3305 
3306 	/* update statistics before reset */
3307 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3308 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
3309 
3310 	/* Reset the chip to a known state. */
3311 	wm_reset(sc);
3312 
3313 	switch (sc->sc_type) {
3314 	case WM_T_82571:
3315 	case WM_T_82572:
3316 	case WM_T_82573:
3317 	case WM_T_82574:
3318 	case WM_T_82583:
3319 	case WM_T_80003:
3320 	case WM_T_ICH8:
3321 	case WM_T_ICH9:
3322 	case WM_T_ICH10:
3323 		if (wm_check_mng_mode(sc) != 0)
3324 			wm_get_hw_control(sc);
3325 		break;
3326 	default:
3327 		break;
3328 	}
3329 
3330 	/* Initialize the transmit descriptor ring. */
3331 	memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
3332 	WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
3333 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3334 	sc->sc_txfree = WM_NTXDESC(sc);
3335 	sc->sc_txnext = 0;
3336 
3337 	if (sc->sc_type < WM_T_82543) {
3338 		CSR_WRITE(sc, WMREG_OLD_TBDAH, WM_CDTXADDR_HI(sc, 0));
3339 		CSR_WRITE(sc, WMREG_OLD_TBDAL, WM_CDTXADDR_LO(sc, 0));
3340 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
3341 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
3342 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
3343 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
3344 	} else {
3345 		CSR_WRITE(sc, WMREG_TBDAH, WM_CDTXADDR_HI(sc, 0));
3346 		CSR_WRITE(sc, WMREG_TBDAL, WM_CDTXADDR_LO(sc, 0));
3347 		CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
3348 		CSR_WRITE(sc, WMREG_TDH, 0);
3349 		CSR_WRITE(sc, WMREG_TDT, 0);
3350 		CSR_WRITE(sc, WMREG_TIDV, 375);		/* ITR / 4 */
3351 		CSR_WRITE(sc, WMREG_TADV, 375);		/* should be same */
3352 
3353 		CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
3354 		    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
3355 		CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
3356 		    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
3357 	}
3358 	CSR_WRITE(sc, WMREG_TQSA_LO, 0);
3359 	CSR_WRITE(sc, WMREG_TQSA_HI, 0);
3360 
3361 	/* Initialize the transmit job descriptors. */
3362 	for (i = 0; i < WM_TXQUEUELEN(sc); i++)
3363 		sc->sc_txsoft[i].txs_mbuf = NULL;
3364 	sc->sc_txsfree = WM_TXQUEUELEN(sc);
3365 	sc->sc_txsnext = 0;
3366 	sc->sc_txsdirty = 0;
3367 
3368 	/*
3369 	 * Initialize the receive descriptor and receive job
3370 	 * descriptor rings.
3371 	 */
3372 	if (sc->sc_type < WM_T_82543) {
3373 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
3374 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
3375 		CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
3376 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
3377 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
3378 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
3379 
3380 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
3381 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
3382 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
3383 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
3384 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
3385 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
3386 	} else {
3387 		CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
3388 		CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
3389 		CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
3390 		CSR_WRITE(sc, WMREG_RDH, 0);
3391 		CSR_WRITE(sc, WMREG_RDT, 0);
3392 		CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD);	/* ITR/4 */
3393 		CSR_WRITE(sc, WMREG_RADV, 375);		/* MUST be same */
3394 	}
3395 	for (i = 0; i < WM_NRXDESC; i++) {
3396 		rxs = &sc->sc_rxsoft[i];
3397 		if (rxs->rxs_mbuf == NULL) {
3398 			if ((error = wm_add_rxbuf(sc, i)) != 0) {
3399 				log(LOG_ERR, "%s: unable to allocate or map rx "
3400 				    "buffer %d, error = %d\n",
3401 				    device_xname(sc->sc_dev), i, error);
3402 				/*
3403 				 * XXX Should attempt to run with fewer receive
3404 				 * XXX buffers instead of just failing.
3405 				 */
3406 				wm_rxdrain(sc);
3407 				goto out;
3408 			}
3409 		} else
3410 			WM_INIT_RXDESC(sc, i);
3411 	}
3412 	sc->sc_rxptr = 0;
3413 	sc->sc_rxdiscard = 0;
3414 	WM_RXCHAIN_RESET(sc);
3415 
3416 	/*
3417 	 * Clear out the VLAN table -- we don't use it (yet).
3418 	 */
3419 	CSR_WRITE(sc, WMREG_VET, 0);
3420 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
3421 		CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
3422 
3423 	/*
3424 	 * Set up flow-control parameters.
3425 	 *
3426 	 * XXX Values could probably stand some tuning.
3427 	 */
3428 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
3429 	    && (sc->sc_type != WM_T_ICH10)) {
3430 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
3431 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
3432 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
3433 	}
3434 
3435 	sc->sc_fcrtl = FCRTL_DFLT;
3436 	if (sc->sc_type < WM_T_82543) {
3437 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
3438 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
3439 	} else {
3440 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
3441 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
3442 	}
3443 
3444 	if (sc->sc_type == WM_T_80003)
3445 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
3446 	else
3447 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
3448 
3449 	/* Deal with VLAN enables. */
3450 	if (VLAN_ATTACHED(&sc->sc_ethercom))
3451 		sc->sc_ctrl |= CTRL_VME;
3452 	else
3453 		sc->sc_ctrl &= ~CTRL_VME;
3454 
3455 	/* Write the control registers. */
3456 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3457 
3458 	if (sc->sc_flags & WM_F_HAS_MII) {
3459 		int val;
3460 
3461 		switch (sc->sc_type) {
3462 		case WM_T_80003:
3463 		case WM_T_ICH8:
3464 		case WM_T_ICH9:
3465 		case WM_T_ICH10:
3466 			/*
3467 			 * Set the mac to wait the maximum time between each
3468 			 * iteration and increase the max iterations when
3469 			 * polling the phy; this fixes erroneous timeouts at
3470 			 * 10Mbps.
3471 			 */
3472 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
3473 			    0xFFFF);
3474 			val = wm_kmrn_readreg(sc,
3475 			    KUMCTRLSTA_OFFSET_INB_PARAM);
3476 			val |= 0x3F;
3477 			wm_kmrn_writereg(sc,
3478 			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
3479 			break;
3480 		default:
3481 			break;
3482 		}
3483 
3484 		if (sc->sc_type == WM_T_80003) {
3485 			val = CSR_READ(sc, WMREG_CTRL_EXT);
3486 			val &= ~CTRL_EXT_LINK_MODE_MASK;
3487 			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
3488 
3489 			/* Bypass RX and TX FIFO's */
3490 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
3491 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS |
3492 			    KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
3493 
3494 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
3495 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
3496 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
3497 		}
3498 	}
3499 #if 0
3500 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
3501 #endif
3502 
3503 	/*
3504 	 * Set up checksum offload parameters.
3505 	 */
3506 	reg = CSR_READ(sc, WMREG_RXCSUM);
3507 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
3508 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
3509 		reg |= RXCSUM_IPOFL;
3510 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
3511 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
3512 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
3513 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
3514 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
3515 
3516 	/* Reset TBI's RXCFG count */
3517 	sc->sc_tbi_nrxcfg = sc->sc_tbi_lastnrxcfg = 0;
3518 
3519 	/*
3520 	 * Set up the interrupt registers.
3521 	 */
3522 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3523 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
3524 	    ICR_RXO | ICR_RXT0;
3525 	if ((sc->sc_flags & WM_F_HAS_MII) == 0)
3526 		sc->sc_icr |= ICR_RXCFG;
3527 	CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
3528 
3529 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3530 	    || (sc->sc_type == WM_T_ICH10)) {
3531 		reg = CSR_READ(sc, WMREG_KABGTXD);
3532 		reg |= KABGTXD_BGSQLBIAS;
3533 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
3534 	}
3535 
3536 	/* Set up the inter-packet gap. */
3537 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
3538 
3539 	if (sc->sc_type >= WM_T_82543) {
3540 		/*
3541 		 * Set up the interrupt throttling register (units of 256ns)
3542 		 * Note that a footnote in Intel's documentation says this
3543 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
3544 		 * or 10Mbit mode.  Empirically, it appears to be the case
3545 		 * that that is also true for the 1024ns units of the other
3546 		 * interrupt-related timer registers -- so, really, we ought
3547 		 * to divide this value by 4 when the link speed is low.
3548 		 *
3549 		 * XXX implement this division at link speed change!
3550 		 */
3551 
3552 		 /*
3553 		  * For N interrupts/sec, set this value to:
3554 		  * 1000000000 / (N * 256).  Note that we set the
3555 		  * absolute and packet timer values to this value
3556 		  * divided by 4 to get "simple timer" behavior.
3557 		  */
3558 
3559 		sc->sc_itr = 1500;		/* 2604 ints/sec */
3560 		CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
3561 	}
3562 
3563 	/* Set the VLAN ethernetype. */
3564 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
3565 
3566 	/*
3567 	 * Set up the transmit control register; we start out with
3568 	 * a collision distance suitable for FDX, but update it whe
3569 	 * we resolve the media type.
3570 	 */
3571 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
3572 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
3573 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3574 	if (sc->sc_type >= WM_T_82571)
3575 		sc->sc_tctl |= TCTL_MULR;
3576 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3577 
3578 	if (sc->sc_type == WM_T_80003) {
3579 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
3580 		reg &= ~TCTL_EXT_GCEX_MASK;
3581 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
3582 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
3583 	}
3584 
3585 	/* Set the media. */
3586 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
3587 		goto out;
3588 
3589 	/*
3590 	 * Set up the receive control register; we actually program
3591 	 * the register when we set the receive filter.  Use multicast
3592 	 * address offset type 0.
3593 	 *
3594 	 * Only the i82544 has the ability to strip the incoming
3595 	 * CRC, so we don't enable that feature.
3596 	 */
3597 	sc->sc_mchash_type = 0;
3598 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
3599 	    | RCTL_MO(sc->sc_mchash_type);
3600 
3601 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
3602 	    && (ifp->if_mtu > ETHERMTU))
3603 			sc->sc_rctl |= RCTL_LPE;
3604 
3605 	if (MCLBYTES == 2048) {
3606 		sc->sc_rctl |= RCTL_2k;
3607 	} else {
3608 		if (sc->sc_type >= WM_T_82543) {
3609 			switch(MCLBYTES) {
3610 			case 4096:
3611 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
3612 				break;
3613 			case 8192:
3614 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
3615 				break;
3616 			case 16384:
3617 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
3618 				break;
3619 			default:
3620 				panic("wm_init: MCLBYTES %d unsupported",
3621 				    MCLBYTES);
3622 				break;
3623 			}
3624 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
3625 	}
3626 
3627 	/* Set the receive filter. */
3628 	wm_set_filter(sc);
3629 
3630 	/* Start the one second link check clock. */
3631 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
3632 
3633 	/* ...all done! */
3634 	ifp->if_flags |= IFF_RUNNING;
3635 	ifp->if_flags &= ~IFF_OACTIVE;
3636 
3637  out:
3638 	if (error)
3639 		log(LOG_ERR, "%s: interface not running\n",
3640 		    device_xname(sc->sc_dev));
3641 	return (error);
3642 }
3643 
3644 /*
3645  * wm_rxdrain:
3646  *
3647  *	Drain the receive queue.
3648  */
3649 static void
3650 wm_rxdrain(struct wm_softc *sc)
3651 {
3652 	struct wm_rxsoft *rxs;
3653 	int i;
3654 
3655 	for (i = 0; i < WM_NRXDESC; i++) {
3656 		rxs = &sc->sc_rxsoft[i];
3657 		if (rxs->rxs_mbuf != NULL) {
3658 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
3659 			m_freem(rxs->rxs_mbuf);
3660 			rxs->rxs_mbuf = NULL;
3661 		}
3662 	}
3663 }
3664 
3665 /*
3666  * wm_stop:		[ifnet interface function]
3667  *
3668  *	Stop transmission on the interface.
3669  */
3670 static void
3671 wm_stop(struct ifnet *ifp, int disable)
3672 {
3673 	struct wm_softc *sc = ifp->if_softc;
3674 	struct wm_txsoft *txs;
3675 	int i;
3676 
3677 	/* Stop the one second clock. */
3678 	callout_stop(&sc->sc_tick_ch);
3679 
3680 	/* Stop the 82547 Tx FIFO stall check timer. */
3681 	if (sc->sc_type == WM_T_82547)
3682 		callout_stop(&sc->sc_txfifo_ch);
3683 
3684 	if (sc->sc_flags & WM_F_HAS_MII) {
3685 		/* Down the MII. */
3686 		mii_down(&sc->sc_mii);
3687 	} else {
3688 #if 0
3689 		/* Should we clear PHY's status properly? */
3690 		wm_reset(sc);
3691 #endif
3692 	}
3693 
3694 	/* Stop the transmit and receive processes. */
3695 	CSR_WRITE(sc, WMREG_TCTL, 0);
3696 	CSR_WRITE(sc, WMREG_RCTL, 0);
3697 
3698 	/*
3699 	 * Clear the interrupt mask to ensure the device cannot assert its
3700 	 * interrupt line.
3701 	 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
3702 	 * any currently pending or shared interrupt.
3703 	 */
3704 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3705 	sc->sc_icr = 0;
3706 
3707 	/* Release any queued transmit buffers. */
3708 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
3709 		txs = &sc->sc_txsoft[i];
3710 		if (txs->txs_mbuf != NULL) {
3711 			bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
3712 			m_freem(txs->txs_mbuf);
3713 			txs->txs_mbuf = NULL;
3714 		}
3715 	}
3716 
3717 	/* Mark the interface as down and cancel the watchdog timer. */
3718 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
3719 	ifp->if_timer = 0;
3720 
3721 	if (disable)
3722 		wm_rxdrain(sc);
3723 }
3724 
3725 void
3726 wm_get_auto_rd_done(struct wm_softc *sc)
3727 {
3728 	int i;
3729 
3730 	/* wait for eeprom to reload */
3731 	switch (sc->sc_type) {
3732 	case WM_T_82571:
3733 	case WM_T_82572:
3734 	case WM_T_82573:
3735 	case WM_T_82574:
3736 	case WM_T_82583:
3737 	case WM_T_80003:
3738 	case WM_T_ICH8:
3739 	case WM_T_ICH9:
3740 	case WM_T_ICH10:
3741 		for (i = 0; i < 10; i++) {
3742 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
3743 				break;
3744 			delay(1000);
3745 		}
3746 		if (i == 10) {
3747 			log(LOG_ERR, "%s: auto read from eeprom failed to "
3748 			    "complete\n", device_xname(sc->sc_dev));
3749 		}
3750 		break;
3751 	default:
3752 		delay(5000);
3753 		break;
3754 	}
3755 }
3756 
3757 void
3758 wm_lan_init_done(struct wm_softc *sc)
3759 {
3760 	uint32_t reg = 0;
3761 	int i;
3762 
3763 	/* wait for eeprom to reload */
3764 	switch (sc->sc_type) {
3765 	case WM_T_ICH10: /* & PCH */
3766 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
3767 			reg = CSR_READ(sc, WMREG_STATUS);
3768 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
3769 				break;
3770 			delay(100);
3771 		}
3772 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
3773 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
3774 			    "complete\n", device_xname(sc->sc_dev), __func__);
3775 		}
3776 		break;
3777 	default:
3778 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3779 		    __func__);
3780 		break;
3781 	}
3782 
3783 	reg &= ~STATUS_LAN_INIT_DONE;
3784 	CSR_WRITE(sc, WMREG_STATUS, reg);
3785 }
3786 
3787 void
3788 wm_get_cfg_done(struct wm_softc *sc)
3789 {
3790 	int func = 0;
3791 	int mask;
3792 	int i;
3793 
3794 	/* wait for eeprom to reload */
3795 	switch (sc->sc_type) {
3796 	case WM_T_82542_2_0:
3797 	case WM_T_82542_2_1:
3798 		/* null */
3799 		break;
3800 	case WM_T_82543:
3801 	case WM_T_82544:
3802 	case WM_T_82540:
3803 	case WM_T_82545:
3804 	case WM_T_82545_3:
3805 	case WM_T_82546:
3806 	case WM_T_82546_3:
3807 	case WM_T_82541:
3808 	case WM_T_82541_2:
3809 	case WM_T_82547:
3810 	case WM_T_82547_2:
3811 	case WM_T_82573:
3812 	case WM_T_82574:
3813 	case WM_T_82583:
3814 		/* generic */
3815 		delay(10*1000);
3816 		break;
3817 	case WM_T_80003:
3818 	case WM_T_82571:
3819 	case WM_T_82572:
3820 	case WM_T_ICH8:
3821 	case WM_T_ICH9:
3822 	case WM_T_ICH10:
3823 		if (sc->sc_type == WM_T_80003)
3824 			func = (CSR_READ(sc, WMREG_STATUS)
3825 			    >> STATUS_FUNCID_SHIFT) & 1;
3826 		else
3827 			func = 0; /* XXX Is it true for 82571? */
3828 		mask = (func == 1) ? EEMNGCTL_CFGDONE_1 : EEMNGCTL_CFGDONE_0;
3829 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
3830 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
3831 				break;
3832 			delay(1000);
3833 		}
3834 		if (i >= WM_PHY_CFG_TIMEOUT) {
3835 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
3836 				device_xname(sc->sc_dev), __func__));
3837 		}
3838 		break;
3839 	default:
3840 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3841 		    __func__);
3842 		break;
3843 	}
3844 }
3845 
3846 /*
3847  * wm_acquire_eeprom:
3848  *
3849  *	Perform the EEPROM handshake required on some chips.
3850  */
3851 static int
3852 wm_acquire_eeprom(struct wm_softc *sc)
3853 {
3854 	uint32_t reg;
3855 	int x;
3856 	int ret = 0;
3857 
3858 	/* always success */
3859 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
3860 		return 0;
3861 
3862 	if (sc->sc_flags & WM_F_SWFWHW_SYNC) {
3863 		ret = wm_get_swfwhw_semaphore(sc);
3864 	} else if (sc->sc_flags & WM_F_SWFW_SYNC) {
3865 		/* this will also do wm_get_swsm_semaphore() if needed */
3866 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
3867 	} else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
3868 		ret = wm_get_swsm_semaphore(sc);
3869 	}
3870 
3871 	if (ret) {
3872 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
3873 			__func__);
3874 		return 1;
3875 	}
3876 
3877 	if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE)  {
3878 		reg = CSR_READ(sc, WMREG_EECD);
3879 
3880 		/* Request EEPROM access. */
3881 		reg |= EECD_EE_REQ;
3882 		CSR_WRITE(sc, WMREG_EECD, reg);
3883 
3884 		/* ..and wait for it to be granted. */
3885 		for (x = 0; x < 1000; x++) {
3886 			reg = CSR_READ(sc, WMREG_EECD);
3887 			if (reg & EECD_EE_GNT)
3888 				break;
3889 			delay(5);
3890 		}
3891 		if ((reg & EECD_EE_GNT) == 0) {
3892 			aprint_error_dev(sc->sc_dev,
3893 			    "could not acquire EEPROM GNT\n");
3894 			reg &= ~EECD_EE_REQ;
3895 			CSR_WRITE(sc, WMREG_EECD, reg);
3896 			if (sc->sc_flags & WM_F_SWFWHW_SYNC)
3897 				wm_put_swfwhw_semaphore(sc);
3898 			if (sc->sc_flags & WM_F_SWFW_SYNC)
3899 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
3900 			else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
3901 				wm_put_swsm_semaphore(sc);
3902 			return (1);
3903 		}
3904 	}
3905 
3906 	return (0);
3907 }
3908 
3909 /*
3910  * wm_release_eeprom:
3911  *
3912  *	Release the EEPROM mutex.
3913  */
3914 static void
3915 wm_release_eeprom(struct wm_softc *sc)
3916 {
3917 	uint32_t reg;
3918 
3919 	/* always success */
3920 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
3921 		return;
3922 
3923 	if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
3924 		reg = CSR_READ(sc, WMREG_EECD);
3925 		reg &= ~EECD_EE_REQ;
3926 		CSR_WRITE(sc, WMREG_EECD, reg);
3927 	}
3928 
3929 	if (sc->sc_flags & WM_F_SWFWHW_SYNC)
3930 		wm_put_swfwhw_semaphore(sc);
3931 	if (sc->sc_flags & WM_F_SWFW_SYNC)
3932 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
3933 	else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
3934 		wm_put_swsm_semaphore(sc);
3935 }
3936 
3937 /*
3938  * wm_eeprom_sendbits:
3939  *
3940  *	Send a series of bits to the EEPROM.
3941  */
3942 static void
3943 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
3944 {
3945 	uint32_t reg;
3946 	int x;
3947 
3948 	reg = CSR_READ(sc, WMREG_EECD);
3949 
3950 	for (x = nbits; x > 0; x--) {
3951 		if (bits & (1U << (x - 1)))
3952 			reg |= EECD_DI;
3953 		else
3954 			reg &= ~EECD_DI;
3955 		CSR_WRITE(sc, WMREG_EECD, reg);
3956 		delay(2);
3957 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
3958 		delay(2);
3959 		CSR_WRITE(sc, WMREG_EECD, reg);
3960 		delay(2);
3961 	}
3962 }
3963 
3964 /*
3965  * wm_eeprom_recvbits:
3966  *
3967  *	Receive a series of bits from the EEPROM.
3968  */
3969 static void
3970 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
3971 {
3972 	uint32_t reg, val;
3973 	int x;
3974 
3975 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
3976 
3977 	val = 0;
3978 	for (x = nbits; x > 0; x--) {
3979 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
3980 		delay(2);
3981 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
3982 			val |= (1U << (x - 1));
3983 		CSR_WRITE(sc, WMREG_EECD, reg);
3984 		delay(2);
3985 	}
3986 	*valp = val;
3987 }
3988 
3989 /*
3990  * wm_read_eeprom_uwire:
3991  *
3992  *	Read a word from the EEPROM using the MicroWire protocol.
3993  */
3994 static int
3995 wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
3996 {
3997 	uint32_t reg, val;
3998 	int i;
3999 
4000 	for (i = 0; i < wordcnt; i++) {
4001 		/* Clear SK and DI. */
4002 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
4003 		CSR_WRITE(sc, WMREG_EECD, reg);
4004 
4005 		/* Set CHIP SELECT. */
4006 		reg |= EECD_CS;
4007 		CSR_WRITE(sc, WMREG_EECD, reg);
4008 		delay(2);
4009 
4010 		/* Shift in the READ command. */
4011 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
4012 
4013 		/* Shift in address. */
4014 		wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits);
4015 
4016 		/* Shift out the data. */
4017 		wm_eeprom_recvbits(sc, &val, 16);
4018 		data[i] = val & 0xffff;
4019 
4020 		/* Clear CHIP SELECT. */
4021 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
4022 		CSR_WRITE(sc, WMREG_EECD, reg);
4023 		delay(2);
4024 	}
4025 
4026 	return (0);
4027 }
4028 
4029 /*
4030  * wm_spi_eeprom_ready:
4031  *
4032  *	Wait for a SPI EEPROM to be ready for commands.
4033  */
4034 static int
4035 wm_spi_eeprom_ready(struct wm_softc *sc)
4036 {
4037 	uint32_t val;
4038 	int usec;
4039 
4040 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
4041 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
4042 		wm_eeprom_recvbits(sc, &val, 8);
4043 		if ((val & SPI_SR_RDY) == 0)
4044 			break;
4045 	}
4046 	if (usec >= SPI_MAX_RETRIES) {
4047 		aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
4048 		return (1);
4049 	}
4050 	return (0);
4051 }
4052 
4053 /*
4054  * wm_read_eeprom_spi:
4055  *
4056  *	Read a work from the EEPROM using the SPI protocol.
4057  */
4058 static int
4059 wm_read_eeprom_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
4060 {
4061 	uint32_t reg, val;
4062 	int i;
4063 	uint8_t opc;
4064 
4065 	/* Clear SK and CS. */
4066 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
4067 	CSR_WRITE(sc, WMREG_EECD, reg);
4068 	delay(2);
4069 
4070 	if (wm_spi_eeprom_ready(sc))
4071 		return (1);
4072 
4073 	/* Toggle CS to flush commands. */
4074 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
4075 	delay(2);
4076 	CSR_WRITE(sc, WMREG_EECD, reg);
4077 	delay(2);
4078 
4079 	opc = SPI_OPC_READ;
4080 	if (sc->sc_ee_addrbits == 8 && word >= 128)
4081 		opc |= SPI_OPC_A8;
4082 
4083 	wm_eeprom_sendbits(sc, opc, 8);
4084 	wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits);
4085 
4086 	for (i = 0; i < wordcnt; i++) {
4087 		wm_eeprom_recvbits(sc, &val, 16);
4088 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
4089 	}
4090 
4091 	/* Raise CS and clear SK. */
4092 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
4093 	CSR_WRITE(sc, WMREG_EECD, reg);
4094 	delay(2);
4095 
4096 	return (0);
4097 }
4098 
4099 #define EEPROM_CHECKSUM		0xBABA
4100 #define EEPROM_SIZE		0x0040
4101 
4102 /*
4103  * wm_validate_eeprom_checksum
4104  *
4105  * The checksum is defined as the sum of the first 64 (16 bit) words.
4106  */
4107 static int
4108 wm_validate_eeprom_checksum(struct wm_softc *sc)
4109 {
4110 	uint16_t checksum;
4111 	uint16_t eeprom_data;
4112 	int i;
4113 
4114 	checksum = 0;
4115 
4116 	for (i = 0; i < EEPROM_SIZE; i++) {
4117 		if (wm_read_eeprom(sc, i, 1, &eeprom_data))
4118 			return 1;
4119 		checksum += eeprom_data;
4120 	}
4121 
4122 	if (checksum != (uint16_t) EEPROM_CHECKSUM)
4123 		return 1;
4124 
4125 	return 0;
4126 }
4127 
4128 /*
4129  * wm_read_eeprom:
4130  *
4131  *	Read data from the serial EEPROM.
4132  */
4133 static int
4134 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
4135 {
4136 	int rv;
4137 
4138 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
4139 		return 1;
4140 
4141 	if (wm_acquire_eeprom(sc))
4142 		return 1;
4143 
4144 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4145 	    || (sc->sc_type == WM_T_ICH10))
4146 		rv = wm_read_eeprom_ich8(sc, word, wordcnt, data);
4147 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
4148 		rv = wm_read_eeprom_eerd(sc, word, wordcnt, data);
4149 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
4150 		rv = wm_read_eeprom_spi(sc, word, wordcnt, data);
4151 	else
4152 		rv = wm_read_eeprom_uwire(sc, word, wordcnt, data);
4153 
4154 	wm_release_eeprom(sc);
4155 	return rv;
4156 }
4157 
4158 static int
4159 wm_read_eeprom_eerd(struct wm_softc *sc, int offset, int wordcnt,
4160     uint16_t *data)
4161 {
4162 	int i, eerd = 0;
4163 	int error = 0;
4164 
4165 	for (i = 0; i < wordcnt; i++) {
4166 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
4167 
4168 		CSR_WRITE(sc, WMREG_EERD, eerd);
4169 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
4170 		if (error != 0)
4171 			break;
4172 
4173 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
4174 	}
4175 
4176 	return error;
4177 }
4178 
4179 static int
4180 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
4181 {
4182 	uint32_t attempts = 100000;
4183 	uint32_t i, reg = 0;
4184 	int32_t done = -1;
4185 
4186 	for (i = 0; i < attempts; i++) {
4187 		reg = CSR_READ(sc, rw);
4188 
4189 		if (reg & EERD_DONE) {
4190 			done = 0;
4191 			break;
4192 		}
4193 		delay(5);
4194 	}
4195 
4196 	return done;
4197 }
4198 
4199 /*
4200  * wm_add_rxbuf:
4201  *
4202  *	Add a receive buffer to the indiciated descriptor.
4203  */
4204 static int
4205 wm_add_rxbuf(struct wm_softc *sc, int idx)
4206 {
4207 	struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
4208 	struct mbuf *m;
4209 	int error;
4210 
4211 	MGETHDR(m, M_DONTWAIT, MT_DATA);
4212 	if (m == NULL)
4213 		return (ENOBUFS);
4214 
4215 	MCLGET(m, M_DONTWAIT);
4216 	if ((m->m_flags & M_EXT) == 0) {
4217 		m_freem(m);
4218 		return (ENOBUFS);
4219 	}
4220 
4221 	if (rxs->rxs_mbuf != NULL)
4222 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4223 
4224 	rxs->rxs_mbuf = m;
4225 
4226 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
4227 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
4228 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
4229 	if (error) {
4230 		/* XXX XXX XXX */
4231 		aprint_error_dev(sc->sc_dev,
4232 		    "unable to load rx DMA map %d, error = %d\n",
4233 		    idx, error);
4234 		panic("wm_add_rxbuf");
4235 	}
4236 
4237 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
4238 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
4239 
4240 	WM_INIT_RXDESC(sc, idx);
4241 
4242 	return (0);
4243 }
4244 
4245 /*
4246  * wm_set_ral:
4247  *
4248  *	Set an entery in the receive address list.
4249  */
4250 static void
4251 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
4252 {
4253 	uint32_t ral_lo, ral_hi;
4254 
4255 	if (enaddr != NULL) {
4256 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
4257 		    (enaddr[3] << 24);
4258 		ral_hi = enaddr[4] | (enaddr[5] << 8);
4259 		ral_hi |= RAL_AV;
4260 	} else {
4261 		ral_lo = 0;
4262 		ral_hi = 0;
4263 	}
4264 
4265 	if (sc->sc_type >= WM_T_82544) {
4266 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
4267 		    ral_lo);
4268 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
4269 		    ral_hi);
4270 	} else {
4271 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
4272 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
4273 	}
4274 }
4275 
4276 /*
4277  * wm_mchash:
4278  *
4279  *	Compute the hash of the multicast address for the 4096-bit
4280  *	multicast filter.
4281  */
4282 static uint32_t
4283 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
4284 {
4285 	static const int lo_shift[4] = { 4, 3, 2, 0 };
4286 	static const int hi_shift[4] = { 4, 5, 6, 8 };
4287 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
4288 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
4289 	uint32_t hash;
4290 
4291 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4292 	    || (sc->sc_type == WM_T_ICH10)) {
4293 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
4294 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
4295 		return (hash & 0x3ff);
4296 	}
4297 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
4298 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
4299 
4300 	return (hash & 0xfff);
4301 }
4302 
4303 /*
4304  * wm_set_filter:
4305  *
4306  *	Set up the receive filter.
4307  */
4308 static void
4309 wm_set_filter(struct wm_softc *sc)
4310 {
4311 	struct ethercom *ec = &sc->sc_ethercom;
4312 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4313 	struct ether_multi *enm;
4314 	struct ether_multistep step;
4315 	bus_addr_t mta_reg;
4316 	uint32_t hash, reg, bit;
4317 	int i, size;
4318 
4319 	if (sc->sc_type >= WM_T_82544)
4320 		mta_reg = WMREG_CORDOVA_MTA;
4321 	else
4322 		mta_reg = WMREG_MTA;
4323 
4324 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
4325 
4326 	if (ifp->if_flags & IFF_BROADCAST)
4327 		sc->sc_rctl |= RCTL_BAM;
4328 	if (ifp->if_flags & IFF_PROMISC) {
4329 		sc->sc_rctl |= RCTL_UPE;
4330 		goto allmulti;
4331 	}
4332 
4333 	/*
4334 	 * Set the station address in the first RAL slot, and
4335 	 * clear the remaining slots.
4336 	 */
4337 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4338 		 || (sc->sc_type == WM_T_ICH10))
4339 		size = WM_ICH8_RAL_TABSIZE;
4340 	else
4341 		size = WM_RAL_TABSIZE;
4342 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
4343 	for (i = 1; i < size; i++)
4344 		wm_set_ral(sc, NULL, i);
4345 
4346 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4347 	    || (sc->sc_type == WM_T_ICH10))
4348 		size = WM_ICH8_MC_TABSIZE;
4349 	else
4350 		size = WM_MC_TABSIZE;
4351 	/* Clear out the multicast table. */
4352 	for (i = 0; i < size; i++)
4353 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
4354 
4355 	ETHER_FIRST_MULTI(step, ec, enm);
4356 	while (enm != NULL) {
4357 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
4358 			/*
4359 			 * We must listen to a range of multicast addresses.
4360 			 * For now, just accept all multicasts, rather than
4361 			 * trying to set only those filter bits needed to match
4362 			 * the range.  (At this time, the only use of address
4363 			 * ranges is for IP multicast routing, for which the
4364 			 * range is big enough to require all bits set.)
4365 			 */
4366 			goto allmulti;
4367 		}
4368 
4369 		hash = wm_mchash(sc, enm->enm_addrlo);
4370 
4371 		reg = (hash >> 5);
4372 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4373 		    || (sc->sc_type == WM_T_ICH10))
4374 			reg &= 0x1f;
4375 		else
4376 			reg &= 0x7f;
4377 		bit = hash & 0x1f;
4378 
4379 		hash = CSR_READ(sc, mta_reg + (reg << 2));
4380 		hash |= 1U << bit;
4381 
4382 		/* XXX Hardware bug?? */
4383 		if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
4384 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
4385 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
4386 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
4387 		} else
4388 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
4389 
4390 		ETHER_NEXT_MULTI(step, enm);
4391 	}
4392 
4393 	ifp->if_flags &= ~IFF_ALLMULTI;
4394 	goto setit;
4395 
4396  allmulti:
4397 	ifp->if_flags |= IFF_ALLMULTI;
4398 	sc->sc_rctl |= RCTL_MPE;
4399 
4400  setit:
4401 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
4402 }
4403 
4404 /*
4405  * wm_tbi_mediainit:
4406  *
4407  *	Initialize media for use on 1000BASE-X devices.
4408  */
4409 static void
4410 wm_tbi_mediainit(struct wm_softc *sc)
4411 {
4412 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4413 	const char *sep = "";
4414 
4415 	if (sc->sc_type < WM_T_82543)
4416 		sc->sc_tipg = TIPG_WM_DFLT;
4417 	else
4418 		sc->sc_tipg = TIPG_LG_DFLT;
4419 
4420 	sc->sc_tbi_anegticks = 5;
4421 
4422 	/* Initialize our media structures */
4423 	sc->sc_mii.mii_ifp = ifp;
4424 
4425 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
4426 	ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
4427 	    wm_tbi_mediastatus);
4428 
4429 	/*
4430 	 * SWD Pins:
4431 	 *
4432 	 *	0 = Link LED (output)
4433 	 *	1 = Loss Of Signal (input)
4434 	 */
4435 	sc->sc_ctrl |= CTRL_SWDPIO(0);
4436 	sc->sc_ctrl &= ~CTRL_SWDPIO(1);
4437 
4438 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4439 
4440 #define	ADD(ss, mm, dd)							\
4441 do {									\
4442 	aprint_normal("%s%s", sep, ss);					\
4443 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL);	\
4444 	sep = ", ";							\
4445 } while (/*CONSTCOND*/0)
4446 
4447 	aprint_normal_dev(sc->sc_dev, "");
4448 	ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
4449 	ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
4450 	ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
4451 	aprint_normal("\n");
4452 
4453 #undef ADD
4454 
4455 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
4456 }
4457 
4458 /*
4459  * wm_tbi_mediastatus:	[ifmedia interface function]
4460  *
4461  *	Get the current interface media status on a 1000BASE-X device.
4462  */
4463 static void
4464 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
4465 {
4466 	struct wm_softc *sc = ifp->if_softc;
4467 	uint32_t ctrl, status;
4468 
4469 	ifmr->ifm_status = IFM_AVALID;
4470 	ifmr->ifm_active = IFM_ETHER;
4471 
4472 	status = CSR_READ(sc, WMREG_STATUS);
4473 	if ((status & STATUS_LU) == 0) {
4474 		ifmr->ifm_active |= IFM_NONE;
4475 		return;
4476 	}
4477 
4478 	ifmr->ifm_status |= IFM_ACTIVE;
4479 	ifmr->ifm_active |= IFM_1000_SX;
4480 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
4481 		ifmr->ifm_active |= IFM_FDX;
4482 	ctrl = CSR_READ(sc, WMREG_CTRL);
4483 	if (ctrl & CTRL_RFCE)
4484 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
4485 	if (ctrl & CTRL_TFCE)
4486 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
4487 }
4488 
4489 /*
4490  * wm_tbi_mediachange:	[ifmedia interface function]
4491  *
4492  *	Set hardware to newly-selected media on a 1000BASE-X device.
4493  */
4494 static int
4495 wm_tbi_mediachange(struct ifnet *ifp)
4496 {
4497 	struct wm_softc *sc = ifp->if_softc;
4498 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
4499 	uint32_t status;
4500 	int i;
4501 
4502 	sc->sc_txcw = 0;
4503 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO ||
4504 	    (sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
4505 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
4506 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
4507 		sc->sc_txcw |= TXCW_ANE;
4508 	} else {
4509 		/*
4510 		 * If autonegotiation is turned off, force link up and turn on
4511 		 * full duplex
4512 		 */
4513 		sc->sc_txcw &= ~TXCW_ANE;
4514 		sc->sc_ctrl |= CTRL_SLU | CTRL_FD;
4515 		sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
4516 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4517 		delay(1000);
4518 	}
4519 
4520 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
4521 		    device_xname(sc->sc_dev),sc->sc_txcw));
4522 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
4523 	delay(10000);
4524 
4525 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
4526 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
4527 
4528 	/*
4529 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
4530 	 * optics detect a signal, 0 if they don't.
4531 	 */
4532 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
4533 		/* Have signal; wait for the link to come up. */
4534 
4535 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
4536 			/*
4537 			 * Reset the link, and let autonegotiation do its thing
4538 			 */
4539 			sc->sc_ctrl |= CTRL_LRST;
4540 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4541 			delay(1000);
4542 			sc->sc_ctrl &= ~CTRL_LRST;
4543 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4544 			delay(1000);
4545 		}
4546 
4547 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
4548 			delay(10000);
4549 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
4550 				break;
4551 		}
4552 
4553 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
4554 			    device_xname(sc->sc_dev),i));
4555 
4556 		status = CSR_READ(sc, WMREG_STATUS);
4557 		DPRINTF(WM_DEBUG_LINK,
4558 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
4559 			device_xname(sc->sc_dev),status, STATUS_LU));
4560 		if (status & STATUS_LU) {
4561 			/* Link is up. */
4562 			DPRINTF(WM_DEBUG_LINK,
4563 			    ("%s: LINK: set media -> link up %s\n",
4564 			    device_xname(sc->sc_dev),
4565 			    (status & STATUS_FD) ? "FDX" : "HDX"));
4566 
4567 			/*
4568 			 * NOTE: CTRL will update TFCE and RFCE automatically,
4569 			 * so we should update sc->sc_ctrl
4570 			 */
4571 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
4572 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
4573 			sc->sc_fcrtl &= ~FCRTL_XONE;
4574 			if (status & STATUS_FD)
4575 				sc->sc_tctl |=
4576 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4577 			else
4578 				sc->sc_tctl |=
4579 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
4580 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
4581 				sc->sc_fcrtl |= FCRTL_XONE;
4582 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4583 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
4584 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
4585 				      sc->sc_fcrtl);
4586 			sc->sc_tbi_linkup = 1;
4587 		} else {
4588 			if (i == WM_LINKUP_TIMEOUT)
4589 				wm_check_for_link(sc);
4590 			/* Link is down. */
4591 			DPRINTF(WM_DEBUG_LINK,
4592 			    ("%s: LINK: set media -> link down\n",
4593 			    device_xname(sc->sc_dev)));
4594 			sc->sc_tbi_linkup = 0;
4595 		}
4596 	} else {
4597 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
4598 		    device_xname(sc->sc_dev)));
4599 		sc->sc_tbi_linkup = 0;
4600 	}
4601 
4602 	wm_tbi_set_linkled(sc);
4603 
4604 	return (0);
4605 }
4606 
4607 /*
4608  * wm_tbi_set_linkled:
4609  *
4610  *	Update the link LED on 1000BASE-X devices.
4611  */
4612 static void
4613 wm_tbi_set_linkled(struct wm_softc *sc)
4614 {
4615 
4616 	if (sc->sc_tbi_linkup)
4617 		sc->sc_ctrl |= CTRL_SWDPIN(0);
4618 	else
4619 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
4620 
4621 	/* 82540 or newer devices are active low */
4622 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
4623 
4624 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4625 }
4626 
4627 /*
4628  * wm_tbi_check_link:
4629  *
4630  *	Check the link on 1000BASE-X devices.
4631  */
4632 static void
4633 wm_tbi_check_link(struct wm_softc *sc)
4634 {
4635 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4636 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
4637 	uint32_t rxcw, ctrl, status;
4638 
4639 	status = CSR_READ(sc, WMREG_STATUS);
4640 
4641 	rxcw = CSR_READ(sc, WMREG_RXCW);
4642 	ctrl = CSR_READ(sc, WMREG_CTRL);
4643 
4644 	/* set link status */
4645 	if ((status & STATUS_LU) == 0) {
4646 		DPRINTF(WM_DEBUG_LINK,
4647 		    ("%s: LINK: checklink -> down\n", device_xname(sc->sc_dev)));
4648 		sc->sc_tbi_linkup = 0;
4649 	} else if (sc->sc_tbi_linkup == 0) {
4650 		DPRINTF(WM_DEBUG_LINK,
4651 		    ("%s: LINK: checklink -> up %s\n", device_xname(sc->sc_dev),
4652 		    (status & STATUS_FD) ? "FDX" : "HDX"));
4653 		sc->sc_tbi_linkup = 1;
4654 	}
4655 
4656 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP)
4657 	    && ((status & STATUS_LU) == 0)) {
4658 		sc->sc_tbi_linkup = 0;
4659 		if (sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg > 100) {
4660 			/* RXCFG storm! */
4661 			DPRINTF(WM_DEBUG_LINK, ("RXCFG storm! (%d)\n",
4662 				sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg));
4663 			wm_init(ifp);
4664 			wm_start(ifp);
4665 		} else if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
4666 			/* If the timer expired, retry autonegotiation */
4667 			if (++sc->sc_tbi_ticks >= sc->sc_tbi_anegticks) {
4668 				DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
4669 				sc->sc_tbi_ticks = 0;
4670 				/*
4671 				 * Reset the link, and let autonegotiation do
4672 				 * its thing
4673 				 */
4674 				sc->sc_ctrl |= CTRL_LRST;
4675 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4676 				delay(1000);
4677 				sc->sc_ctrl &= ~CTRL_LRST;
4678 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4679 				delay(1000);
4680 				CSR_WRITE(sc, WMREG_TXCW,
4681 				    sc->sc_txcw & ~TXCW_ANE);
4682 				CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
4683 			}
4684 		}
4685 	}
4686 
4687 	wm_tbi_set_linkled(sc);
4688 }
4689 
4690 /*
4691  * wm_gmii_reset:
4692  *
4693  *	Reset the PHY.
4694  */
4695 static void
4696 wm_gmii_reset(struct wm_softc *sc)
4697 {
4698 	uint32_t reg;
4699 	int func = 0; /* XXX gcc */
4700 	int rv;
4701 
4702 	/* get phy semaphore */
4703 	switch (sc->sc_type) {
4704 	case WM_T_82571:
4705 	case WM_T_82572:
4706 	case WM_T_82573:
4707 	case WM_T_82574:
4708 	case WM_T_82583:
4709 		 /* XXX sould get sw semaphore, too */
4710 		rv = wm_get_swsm_semaphore(sc);
4711 		break;
4712 	case WM_T_80003:
4713 		func = (CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1;
4714 		rv = wm_get_swfw_semaphore(sc,
4715 		    func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4716 		break;
4717 	case WM_T_ICH8:
4718 	case WM_T_ICH9:
4719 	case WM_T_ICH10:
4720 		rv = wm_get_swfwhw_semaphore(sc);
4721 		break;
4722 	default:
4723 		/* nothing to do*/
4724 		rv = 0;
4725 		break;
4726 	}
4727 	if (rv != 0) {
4728 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
4729 		    __func__);
4730 		return;
4731 	}
4732 
4733 	switch (sc->sc_type) {
4734 	case WM_T_82542_2_0:
4735 	case WM_T_82542_2_1:
4736 		/* null */
4737 		break;
4738 	case WM_T_82543:
4739 		/*
4740 		 * With 82543, we need to force speed and duplex on the MAC
4741 		 * equal to what the PHY speed and duplex configuration is.
4742 		 * In addition, we need to perform a hardware reset on the PHY
4743 		 * to take it out of reset.
4744 		 */
4745 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
4746 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4747 
4748 		/* The PHY reset pin is active-low. */
4749 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
4750 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
4751 		    CTRL_EXT_SWDPIN(4));
4752 		reg |= CTRL_EXT_SWDPIO(4);
4753 
4754 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4755 		delay(10*1000);
4756 
4757 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
4758 		delay(150);
4759 #if 0
4760 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
4761 #endif
4762 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
4763 		break;
4764 	case WM_T_82544:	/* reset 10000us */
4765 	case WM_T_82540:
4766 	case WM_T_82545:
4767 	case WM_T_82545_3:
4768 	case WM_T_82546:
4769 	case WM_T_82546_3:
4770 	case WM_T_82541:
4771 	case WM_T_82541_2:
4772 	case WM_T_82547:
4773 	case WM_T_82547_2:
4774 	case WM_T_82571:	/* reset 100us */
4775 	case WM_T_82572:
4776 	case WM_T_82573:
4777 	case WM_T_82574:
4778 	case WM_T_82583:
4779 	case WM_T_80003:
4780 		/* generic reset */
4781 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
4782 		delay((sc->sc_type >= WM_T_82571) ? 100 : 10*1000);
4783 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4784 		delay(150);
4785 
4786 		if ((sc->sc_type == WM_T_82541)
4787 		    || (sc->sc_type == WM_T_82541_2)
4788 		    || (sc->sc_type == WM_T_82547)
4789 		    || (sc->sc_type == WM_T_82547_2)) {
4790 			/* workaround for igp are done in igp_reset() */
4791 			/* XXX add code to set LED after phy reset */
4792 		}
4793 		break;
4794 	case WM_T_ICH8:
4795 	case WM_T_ICH9:
4796 	case WM_T_ICH10:
4797 		/* generic reset */
4798 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
4799 		delay(100);
4800 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4801 		delay(150);
4802 
4803 		/* Allow time for h/w to get to a quiescent state afer reset */
4804 		delay(10*1000);
4805 
4806 		/* XXX add code to set LED after phy reset */
4807 		break;
4808 	default:
4809 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4810 		    __func__);
4811 		break;
4812 	}
4813 
4814 	/* release PHY semaphore */
4815 	switch (sc->sc_type) {
4816 	case WM_T_82571:
4817 	case WM_T_82572:
4818 	case WM_T_82573:
4819 	case WM_T_82574:
4820 	case WM_T_82583:
4821 		 /* XXX sould put sw semaphore, too */
4822 		wm_put_swsm_semaphore(sc);
4823 		break;
4824 	case WM_T_80003:
4825 		wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4826 		break;
4827 	case WM_T_ICH8:
4828 	case WM_T_ICH9:
4829 	case WM_T_ICH10:
4830 		wm_put_swfwhw_semaphore(sc);
4831 		break;
4832 	default:
4833 		/* nothing to do*/
4834 		rv = 0;
4835 		break;
4836 	}
4837 
4838 	/* get_cfg_done */
4839 	wm_get_cfg_done(sc);
4840 
4841 	/* extra setup */
4842 	switch (sc->sc_type) {
4843 	case WM_T_82542_2_0:
4844 	case WM_T_82542_2_1:
4845 	case WM_T_82543:
4846 	case WM_T_82544:
4847 	case WM_T_82540:
4848 	case WM_T_82545:
4849 	case WM_T_82545_3:
4850 	case WM_T_82546:
4851 	case WM_T_82546_3:
4852 	case WM_T_82541_2:
4853 	case WM_T_82547_2:
4854 	case WM_T_82571:
4855 	case WM_T_82572:
4856 	case WM_T_82573:
4857 	case WM_T_82574:
4858 	case WM_T_82583:
4859 	case WM_T_80003:
4860 		/* null */
4861 		break;
4862 	case WM_T_82541:
4863 	case WM_T_82547:
4864 		/* XXX Configure actively LED after PHY reset */
4865 		break;
4866 	case WM_T_ICH8:
4867 	case WM_T_ICH9:
4868 	case WM_T_ICH10:
4869 		delay(10*1000);
4870 		break;
4871 	default:
4872 		panic("%s: unknown type\n", __func__);
4873 		break;
4874 	}
4875 }
4876 
4877 /*
4878  * wm_gmii_mediainit:
4879  *
4880  *	Initialize media for use on 1000BASE-T devices.
4881  */
4882 static void
4883 wm_gmii_mediainit(struct wm_softc *sc)
4884 {
4885 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4886 
4887 	/* We have MII. */
4888 	sc->sc_flags |= WM_F_HAS_MII;
4889 
4890 	if (sc->sc_type == WM_T_80003)
4891 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
4892 	else
4893 		sc->sc_tipg = TIPG_1000T_DFLT;
4894 
4895 	/*
4896 	 * Let the chip set speed/duplex on its own based on
4897 	 * signals from the PHY.
4898 	 * XXXbouyer - I'm not sure this is right for the 80003,
4899 	 * the em driver only sets CTRL_SLU here - but it seems to work.
4900 	 */
4901 	sc->sc_ctrl |= CTRL_SLU;
4902 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4903 
4904 	/* Initialize our media structures and probe the GMII. */
4905 	sc->sc_mii.mii_ifp = ifp;
4906 
4907 	if (sc->sc_type >= WM_T_80003) {
4908 		sc->sc_mii.mii_readreg = wm_gmii_i80003_readreg;
4909 		sc->sc_mii.mii_writereg = wm_gmii_i80003_writereg;
4910 	} else if (sc->sc_type >= WM_T_82544) {
4911 		sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg;
4912 		sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg;
4913 	} else {
4914 		sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg;
4915 		sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg;
4916 	}
4917 	sc->sc_mii.mii_statchg = wm_gmii_statchg;
4918 
4919 	wm_gmii_reset(sc);
4920 
4921 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
4922 	ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_gmii_mediachange,
4923 	    wm_gmii_mediastatus);
4924 
4925 	mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
4926 	    MII_OFFSET_ANY, MIIF_DOPAUSE);
4927 
4928 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
4929 		/* if failed, retry with *_bm_* */
4930 		sc->sc_mii.mii_readreg = wm_gmii_bm_readreg;
4931 		sc->sc_mii.mii_writereg = wm_gmii_bm_writereg;
4932 
4933 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
4934 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
4935 	}
4936 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
4937 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
4938 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
4939 	} else {
4940 		if (sc->sc_type >= WM_T_82574) {
4941 			struct mii_softc *child;
4942 
4943 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
4944 			/* fix read/write functions as e1000 driver */
4945 			if (device_is_a(child->mii_dev, "igphy")) {
4946 				sc->sc_mii.mii_readreg = wm_gmii_i80003_readreg;
4947 				sc->sc_mii.mii_writereg = wm_gmii_i80003_writereg;
4948 			} else {
4949 				sc->sc_mii.mii_readreg = wm_gmii_bm_readreg;
4950 				sc->sc_mii.mii_writereg = wm_gmii_bm_writereg;
4951 			}
4952 		}
4953 
4954 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
4955 	}
4956 }
4957 
4958 /*
4959  * wm_gmii_mediastatus:	[ifmedia interface function]
4960  *
4961  *	Get the current interface media status on a 1000BASE-T device.
4962  */
4963 static void
4964 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
4965 {
4966 	struct wm_softc *sc = ifp->if_softc;
4967 
4968 	ether_mediastatus(ifp, ifmr);
4969 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK) |
4970 			   sc->sc_flowflags;
4971 }
4972 
4973 /*
4974  * wm_gmii_mediachange:	[ifmedia interface function]
4975  *
4976  *	Set hardware to newly-selected media on a 1000BASE-T device.
4977  */
4978 static int
4979 wm_gmii_mediachange(struct ifnet *ifp)
4980 {
4981 	struct wm_softc *sc = ifp->if_softc;
4982 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
4983 	int rc;
4984 
4985 	if ((ifp->if_flags & IFF_UP) == 0)
4986 		return 0;
4987 
4988 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
4989 	sc->sc_ctrl |= CTRL_SLU;
4990 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
4991 	    || (sc->sc_type > WM_T_82543)) {
4992 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
4993 	} else {
4994 		sc->sc_ctrl &= ~CTRL_ASDE;
4995 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
4996 		if (ife->ifm_media & IFM_FDX)
4997 			sc->sc_ctrl |= CTRL_FD;
4998 		switch(IFM_SUBTYPE(ife->ifm_media)) {
4999 		case IFM_10_T:
5000 			sc->sc_ctrl |= CTRL_SPEED_10;
5001 			break;
5002 		case IFM_100_TX:
5003 			sc->sc_ctrl |= CTRL_SPEED_100;
5004 			break;
5005 		case IFM_1000_T:
5006 			sc->sc_ctrl |= CTRL_SPEED_1000;
5007 			break;
5008 		default:
5009 			panic("wm_gmii_mediachange: bad media 0x%x",
5010 			    ife->ifm_media);
5011 		}
5012 	}
5013 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5014 	if (sc->sc_type <= WM_T_82543)
5015 		wm_gmii_reset(sc);
5016 
5017 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
5018 		return 0;
5019 	return rc;
5020 }
5021 
5022 #define	MDI_IO		CTRL_SWDPIN(2)
5023 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
5024 #define	MDI_CLK		CTRL_SWDPIN(3)
5025 
5026 static void
5027 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
5028 {
5029 	uint32_t i, v;
5030 
5031 	v = CSR_READ(sc, WMREG_CTRL);
5032 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
5033 	v |= MDI_DIR | CTRL_SWDPIO(3);
5034 
5035 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
5036 		if (data & i)
5037 			v |= MDI_IO;
5038 		else
5039 			v &= ~MDI_IO;
5040 		CSR_WRITE(sc, WMREG_CTRL, v);
5041 		delay(10);
5042 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
5043 		delay(10);
5044 		CSR_WRITE(sc, WMREG_CTRL, v);
5045 		delay(10);
5046 	}
5047 }
5048 
5049 static uint32_t
5050 i82543_mii_recvbits(struct wm_softc *sc)
5051 {
5052 	uint32_t v, i, data = 0;
5053 
5054 	v = CSR_READ(sc, WMREG_CTRL);
5055 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
5056 	v |= CTRL_SWDPIO(3);
5057 
5058 	CSR_WRITE(sc, WMREG_CTRL, v);
5059 	delay(10);
5060 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
5061 	delay(10);
5062 	CSR_WRITE(sc, WMREG_CTRL, v);
5063 	delay(10);
5064 
5065 	for (i = 0; i < 16; i++) {
5066 		data <<= 1;
5067 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
5068 		delay(10);
5069 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
5070 			data |= 1;
5071 		CSR_WRITE(sc, WMREG_CTRL, v);
5072 		delay(10);
5073 	}
5074 
5075 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
5076 	delay(10);
5077 	CSR_WRITE(sc, WMREG_CTRL, v);
5078 	delay(10);
5079 
5080 	return (data);
5081 }
5082 
5083 #undef MDI_IO
5084 #undef MDI_DIR
5085 #undef MDI_CLK
5086 
5087 /*
5088  * wm_gmii_i82543_readreg:	[mii interface function]
5089  *
5090  *	Read a PHY register on the GMII (i82543 version).
5091  */
5092 static int
5093 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
5094 {
5095 	struct wm_softc *sc = device_private(self);
5096 	int rv;
5097 
5098 	i82543_mii_sendbits(sc, 0xffffffffU, 32);
5099 	i82543_mii_sendbits(sc, reg | (phy << 5) |
5100 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
5101 	rv = i82543_mii_recvbits(sc) & 0xffff;
5102 
5103 	DPRINTF(WM_DEBUG_GMII,
5104 	    ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
5105 	    device_xname(sc->sc_dev), phy, reg, rv));
5106 
5107 	return (rv);
5108 }
5109 
5110 /*
5111  * wm_gmii_i82543_writereg:	[mii interface function]
5112  *
5113  *	Write a PHY register on the GMII (i82543 version).
5114  */
5115 static void
5116 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
5117 {
5118 	struct wm_softc *sc = device_private(self);
5119 
5120 	i82543_mii_sendbits(sc, 0xffffffffU, 32);
5121 	i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
5122 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
5123 	    (MII_COMMAND_START << 30), 32);
5124 }
5125 
5126 /*
5127  * wm_gmii_i82544_readreg:	[mii interface function]
5128  *
5129  *	Read a PHY register on the GMII.
5130  */
5131 static int
5132 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
5133 {
5134 	struct wm_softc *sc = device_private(self);
5135 	uint32_t mdic = 0;
5136 	int i, rv;
5137 
5138 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
5139 	    MDIC_REGADD(reg));
5140 
5141 	for (i = 0; i < 320; i++) {
5142 		mdic = CSR_READ(sc, WMREG_MDIC);
5143 		if (mdic & MDIC_READY)
5144 			break;
5145 		delay(10);
5146 	}
5147 
5148 	if ((mdic & MDIC_READY) == 0) {
5149 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
5150 		    device_xname(sc->sc_dev), phy, reg);
5151 		rv = 0;
5152 	} else if (mdic & MDIC_E) {
5153 #if 0 /* This is normal if no PHY is present. */
5154 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
5155 		    device_xname(sc->sc_dev), phy, reg);
5156 #endif
5157 		rv = 0;
5158 	} else {
5159 		rv = MDIC_DATA(mdic);
5160 		if (rv == 0xffff)
5161 			rv = 0;
5162 	}
5163 
5164 	return (rv);
5165 }
5166 
5167 /*
5168  * wm_gmii_i82544_writereg:	[mii interface function]
5169  *
5170  *	Write a PHY register on the GMII.
5171  */
5172 static void
5173 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
5174 {
5175 	struct wm_softc *sc = device_private(self);
5176 	uint32_t mdic = 0;
5177 	int i;
5178 
5179 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
5180 	    MDIC_REGADD(reg) | MDIC_DATA(val));
5181 
5182 	for (i = 0; i < 320; i++) {
5183 		mdic = CSR_READ(sc, WMREG_MDIC);
5184 		if (mdic & MDIC_READY)
5185 			break;
5186 		delay(10);
5187 	}
5188 
5189 	if ((mdic & MDIC_READY) == 0)
5190 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
5191 		    device_xname(sc->sc_dev), phy, reg);
5192 	else if (mdic & MDIC_E)
5193 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
5194 		    device_xname(sc->sc_dev), phy, reg);
5195 }
5196 
5197 /*
5198  * wm_gmii_i80003_readreg:	[mii interface function]
5199  *
5200  *	Read a PHY register on the kumeran
5201  * This could be handled by the PHY layer if we didn't have to lock the
5202  * ressource ...
5203  */
5204 static int
5205 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
5206 {
5207 	struct wm_softc *sc = device_private(self);
5208 	int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
5209 	int rv;
5210 
5211 	if (phy != 1) /* only one PHY on kumeran bus */
5212 		return 0;
5213 
5214 	if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) {
5215 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5216 		    __func__);
5217 		return 0;
5218 	}
5219 
5220 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
5221 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
5222 		    reg >> GG82563_PAGE_SHIFT);
5223 	} else {
5224 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
5225 		    reg >> GG82563_PAGE_SHIFT);
5226 	}
5227 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
5228 	delay(200);
5229 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
5230 	delay(200);
5231 
5232 	wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
5233 	return (rv);
5234 }
5235 
5236 /*
5237  * wm_gmii_i80003_writereg:	[mii interface function]
5238  *
5239  *	Write a PHY register on the kumeran.
5240  * This could be handled by the PHY layer if we didn't have to lock the
5241  * ressource ...
5242  */
5243 static void
5244 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
5245 {
5246 	struct wm_softc *sc = device_private(self);
5247 	int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
5248 
5249 	if (phy != 1) /* only one PHY on kumeran bus */
5250 		return;
5251 
5252 	if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) {
5253 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5254 		    __func__);
5255 		return;
5256 	}
5257 
5258 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
5259 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
5260 		    reg >> GG82563_PAGE_SHIFT);
5261 	} else {
5262 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
5263 		    reg >> GG82563_PAGE_SHIFT);
5264 	}
5265 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
5266 	delay(200);
5267 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
5268 	delay(200);
5269 
5270 	wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
5271 }
5272 
5273 /*
5274  * wm_gmii_bm_readreg:	[mii interface function]
5275  *
5276  *	Read a PHY register on the kumeran
5277  * This could be handled by the PHY layer if we didn't have to lock the
5278  * ressource ...
5279  */
5280 static int
5281 wm_gmii_bm_readreg(device_t self, int phy, int reg)
5282 {
5283 	struct wm_softc *sc = device_private(self);
5284 	int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
5285 	int rv;
5286 
5287 	if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) {
5288 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5289 		    __func__);
5290 		return 0;
5291 	}
5292 
5293 	if (reg > GG82563_MAX_REG_ADDRESS) {
5294 		if (phy == 1)
5295 			wm_gmii_i82544_writereg(self, phy, 0x1f,
5296 			    reg);
5297 		else
5298 			wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
5299 			    reg >> GG82563_PAGE_SHIFT);
5300 
5301 	}
5302 
5303 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
5304 	wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
5305 	return (rv);
5306 }
5307 
5308 /*
5309  * wm_gmii_bm_writereg:	[mii interface function]
5310  *
5311  *	Write a PHY register on the kumeran.
5312  * This could be handled by the PHY layer if we didn't have to lock the
5313  * ressource ...
5314  */
5315 static void
5316 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
5317 {
5318 	struct wm_softc *sc = device_private(self);
5319 	int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
5320 
5321 	if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) {
5322 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5323 		    __func__);
5324 		return;
5325 	}
5326 
5327 	if (reg > GG82563_MAX_REG_ADDRESS) {
5328 		if (phy == 1)
5329 			wm_gmii_i82544_writereg(self, phy, 0x1f,
5330 			    reg);
5331 		else
5332 			wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
5333 			    reg >> GG82563_PAGE_SHIFT);
5334 
5335 	}
5336 
5337 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
5338 	wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
5339 }
5340 
5341 /*
5342  * wm_gmii_statchg:	[mii interface function]
5343  *
5344  *	Callback from MII layer when media changes.
5345  */
5346 static void
5347 wm_gmii_statchg(device_t self)
5348 {
5349 	struct wm_softc *sc = device_private(self);
5350 	struct mii_data *mii = &sc->sc_mii;
5351 
5352 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
5353 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
5354 	sc->sc_fcrtl &= ~FCRTL_XONE;
5355 
5356 	/*
5357 	 * Get flow control negotiation result.
5358 	 */
5359 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
5360 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
5361 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
5362 		mii->mii_media_active &= ~IFM_ETH_FMASK;
5363 	}
5364 
5365 	if (sc->sc_flowflags & IFM_FLOW) {
5366 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
5367 			sc->sc_ctrl |= CTRL_TFCE;
5368 			sc->sc_fcrtl |= FCRTL_XONE;
5369 		}
5370 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
5371 			sc->sc_ctrl |= CTRL_RFCE;
5372 	}
5373 
5374 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
5375 		DPRINTF(WM_DEBUG_LINK,
5376 		    ("%s: LINK: statchg: FDX\n", device_xname(sc->sc_dev)));
5377 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
5378 	} else  {
5379 		DPRINTF(WM_DEBUG_LINK,
5380 		    ("%s: LINK: statchg: HDX\n", device_xname(sc->sc_dev)));
5381 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
5382 	}
5383 
5384 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5385 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
5386 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
5387 						 : WMREG_FCRTL, sc->sc_fcrtl);
5388 	if (sc->sc_type == WM_T_80003) {
5389 		switch(IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
5390 		case IFM_1000_T:
5391 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
5392 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
5393 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
5394 			break;
5395 		default:
5396 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
5397 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
5398 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
5399 			break;
5400 		}
5401 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
5402 	}
5403 }
5404 
5405 /*
5406  * wm_kmrn_readreg:
5407  *
5408  *	Read a kumeran register
5409  */
5410 static int
5411 wm_kmrn_readreg(struct wm_softc *sc, int reg)
5412 {
5413 	int rv;
5414 
5415 	if (sc->sc_flags == WM_F_SWFW_SYNC) {
5416 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
5417 			aprint_error_dev(sc->sc_dev,
5418 			    "%s: failed to get semaphore\n", __func__);
5419 			return 0;
5420 		}
5421 	} else 	if (sc->sc_flags == WM_F_SWFWHW_SYNC) {
5422 		if (wm_get_swfwhw_semaphore(sc)) {
5423 			aprint_error_dev(sc->sc_dev,
5424 			    "%s: failed to get semaphore\n", __func__);
5425 			return 0;
5426 		}
5427 	}
5428 
5429 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
5430 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
5431 	    KUMCTRLSTA_REN);
5432 	delay(2);
5433 
5434 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
5435 
5436 	if (sc->sc_flags == WM_F_SWFW_SYNC)
5437 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
5438 	else if (sc->sc_flags == WM_F_SWFWHW_SYNC)
5439 		wm_put_swfwhw_semaphore(sc);
5440 
5441 	return (rv);
5442 }
5443 
5444 /*
5445  * wm_kmrn_writereg:
5446  *
5447  *	Write a kumeran register
5448  */
5449 static void
5450 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
5451 {
5452 
5453 	if (sc->sc_flags == WM_F_SWFW_SYNC) {
5454 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
5455 			aprint_error_dev(sc->sc_dev,
5456 			    "%s: failed to get semaphore\n", __func__);
5457 			return;
5458 		}
5459 	} else 	if (sc->sc_flags == WM_F_SWFWHW_SYNC) {
5460 		if (wm_get_swfwhw_semaphore(sc)) {
5461 			aprint_error_dev(sc->sc_dev,
5462 			    "%s: failed to get semaphore\n", __func__);
5463 			return;
5464 		}
5465 	}
5466 
5467 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
5468 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
5469 	    (val & KUMCTRLSTA_MASK));
5470 
5471 	if (sc->sc_flags == WM_F_SWFW_SYNC)
5472 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
5473 	else if (sc->sc_flags == WM_F_SWFWHW_SYNC)
5474 		wm_put_swfwhw_semaphore(sc);
5475 }
5476 
5477 static int
5478 wm_is_onboard_nvm_eeprom(struct wm_softc *sc)
5479 {
5480 	uint32_t eecd = 0;
5481 
5482 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
5483 	    || sc->sc_type == WM_T_82583) {
5484 		eecd = CSR_READ(sc, WMREG_EECD);
5485 
5486 		/* Isolate bits 15 & 16 */
5487 		eecd = ((eecd >> 15) & 0x03);
5488 
5489 		/* If both bits are set, device is Flash type */
5490 		if (eecd == 0x03)
5491 			return 0;
5492 	}
5493 	return 1;
5494 }
5495 
5496 static int
5497 wm_get_swsm_semaphore(struct wm_softc *sc)
5498 {
5499 	int32_t timeout;
5500 	uint32_t swsm;
5501 
5502 	/* Get the FW semaphore. */
5503 	timeout = 1000 + 1; /* XXX */
5504 	while (timeout) {
5505 		swsm = CSR_READ(sc, WMREG_SWSM);
5506 		swsm |= SWSM_SWESMBI;
5507 		CSR_WRITE(sc, WMREG_SWSM, swsm);
5508 		/* if we managed to set the bit we got the semaphore. */
5509 		swsm = CSR_READ(sc, WMREG_SWSM);
5510 		if (swsm & SWSM_SWESMBI)
5511 			break;
5512 
5513 		delay(50);
5514 		timeout--;
5515 	}
5516 
5517 	if (timeout == 0) {
5518 		aprint_error_dev(sc->sc_dev, "could not acquire EEPROM GNT\n");
5519 		/* Release semaphores */
5520 		wm_put_swsm_semaphore(sc);
5521 		return 1;
5522 	}
5523 	return 0;
5524 }
5525 
5526 static void
5527 wm_put_swsm_semaphore(struct wm_softc *sc)
5528 {
5529 	uint32_t swsm;
5530 
5531 	swsm = CSR_READ(sc, WMREG_SWSM);
5532 	swsm &= ~(SWSM_SWESMBI);
5533 	CSR_WRITE(sc, WMREG_SWSM, swsm);
5534 }
5535 
5536 static int
5537 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
5538 {
5539 	uint32_t swfw_sync;
5540 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
5541 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
5542 	int timeout = 200;
5543 
5544 	for(timeout = 0; timeout < 200; timeout++) {
5545 		if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
5546 			if (wm_get_swsm_semaphore(sc)) {
5547 				aprint_error_dev(sc->sc_dev,
5548 				    "%s: failed to get semaphore\n",
5549 				    __func__);
5550 				return 1;
5551 			}
5552 		}
5553 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
5554 		if ((swfw_sync & (swmask | fwmask)) == 0) {
5555 			swfw_sync |= swmask;
5556 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
5557 			if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
5558 				wm_put_swsm_semaphore(sc);
5559 			return 0;
5560 		}
5561 		if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
5562 			wm_put_swsm_semaphore(sc);
5563 		delay(5000);
5564 	}
5565 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
5566 	    device_xname(sc->sc_dev), mask, swfw_sync);
5567 	return 1;
5568 }
5569 
5570 static void
5571 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
5572 {
5573 	uint32_t swfw_sync;
5574 
5575 	if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
5576 		while (wm_get_swsm_semaphore(sc) != 0)
5577 			continue;
5578 	}
5579 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
5580 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
5581 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
5582 	if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
5583 		wm_put_swsm_semaphore(sc);
5584 }
5585 
5586 static int
5587 wm_get_swfwhw_semaphore(struct wm_softc *sc)
5588 {
5589 	uint32_t ext_ctrl;
5590 	int timeout = 200;
5591 
5592 	for(timeout = 0; timeout < 200; timeout++) {
5593 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
5594 		ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
5595 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
5596 
5597 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
5598 		if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
5599 			return 0;
5600 		delay(5000);
5601 	}
5602 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
5603 	    device_xname(sc->sc_dev), ext_ctrl);
5604 	return 1;
5605 }
5606 
5607 static void
5608 wm_put_swfwhw_semaphore(struct wm_softc *sc)
5609 {
5610 	uint32_t ext_ctrl;
5611 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
5612 	ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
5613 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
5614 }
5615 
5616 static int
5617 wm_valid_nvm_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
5618 {
5619 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
5620 	uint8_t bank_high_byte;
5621 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
5622 
5623 	if (sc->sc_type != WM_T_ICH10) {
5624 		/* Value of bit 22 corresponds to the flash bank we're on. */
5625 		*bank = (CSR_READ(sc, WMREG_EECD) & EECD_SEC1VAL) ? 1 : 0;
5626 	} else {
5627 		wm_read_ich8_byte(sc, act_offset, &bank_high_byte);
5628 		if ((bank_high_byte & 0xc0) == 0x80)
5629 			*bank = 0;
5630 		else {
5631 			wm_read_ich8_byte(sc, act_offset + bank1_offset,
5632 			    &bank_high_byte);
5633 			if ((bank_high_byte & 0xc0) == 0x80)
5634 				*bank = 1;
5635 			else {
5636 				aprint_error_dev(sc->sc_dev,
5637 				    "EEPROM not present\n");
5638 				return -1;
5639 			}
5640 		}
5641 	}
5642 
5643 	return 0;
5644 }
5645 
5646 /******************************************************************************
5647  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
5648  * register.
5649  *
5650  * sc - Struct containing variables accessed by shared code
5651  * offset - offset of word in the EEPROM to read
5652  * data - word read from the EEPROM
5653  * words - number of words to read
5654  *****************************************************************************/
5655 static int
5656 wm_read_eeprom_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
5657 {
5658     int32_t  error = 0;
5659     uint32_t flash_bank = 0;
5660     uint32_t act_offset = 0;
5661     uint32_t bank_offset = 0;
5662     uint16_t word = 0;
5663     uint16_t i = 0;
5664 
5665     /* We need to know which is the valid flash bank.  In the event
5666      * that we didn't allocate eeprom_shadow_ram, we may not be
5667      * managing flash_bank.  So it cannot be trusted and needs
5668      * to be updated with each read.
5669      */
5670     error = wm_valid_nvm_bank_detect_ich8lan(sc, &flash_bank);
5671     if (error) {
5672 	    aprint_error_dev(sc->sc_dev, "%s: failed to detect NVM bank\n",
5673 		    __func__);
5674         return error;
5675     }
5676 
5677     /* Adjust offset appropriately if we're on bank 1 - adjust for word size */
5678     bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
5679 
5680     error = wm_get_swfwhw_semaphore(sc);
5681     if (error) {
5682 	    aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5683 		__func__);
5684         return error;
5685     }
5686 
5687     for (i = 0; i < words; i++) {
5688             /* The NVM part needs a byte offset, hence * 2 */
5689             act_offset = bank_offset + ((offset + i) * 2);
5690             error = wm_read_ich8_word(sc, act_offset, &word);
5691             if (error) {
5692 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
5693 		    __func__);
5694                 break;
5695 	    }
5696             data[i] = word;
5697     }
5698 
5699     wm_put_swfwhw_semaphore(sc);
5700     return error;
5701 }
5702 
5703 /******************************************************************************
5704  * This function does initial flash setup so that a new read/write/erase cycle
5705  * can be started.
5706  *
5707  * sc - The pointer to the hw structure
5708  ****************************************************************************/
5709 static int32_t
5710 wm_ich8_cycle_init(struct wm_softc *sc)
5711 {
5712     uint16_t hsfsts;
5713     int32_t error = 1;
5714     int32_t i     = 0;
5715 
5716     hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
5717 
5718     /* May be check the Flash Des Valid bit in Hw status */
5719     if ((hsfsts & HSFSTS_FLDVAL) == 0) {
5720         return error;
5721     }
5722 
5723     /* Clear FCERR in Hw status by writing 1 */
5724     /* Clear DAEL in Hw status by writing a 1 */
5725     hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
5726 
5727     ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
5728 
5729     /* Either we should have a hardware SPI cycle in progress bit to check
5730      * against, in order to start a new cycle or FDONE bit should be changed
5731      * in the hardware so that it is 1 after harware reset, which can then be
5732      * used as an indication whether a cycle is in progress or has been
5733      * completed .. we should also have some software semaphore mechanism to
5734      * guard FDONE or the cycle in progress bit so that two threads access to
5735      * those bits can be sequentiallized or a way so that 2 threads dont
5736      * start the cycle at the same time */
5737 
5738     if ((hsfsts & HSFSTS_FLINPRO) == 0) {
5739         /* There is no cycle running at present, so we can start a cycle */
5740         /* Begin by setting Flash Cycle Done. */
5741         hsfsts |= HSFSTS_DONE;
5742         ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
5743         error = 0;
5744     } else {
5745         /* otherwise poll for sometime so the current cycle has a chance
5746          * to end before giving up. */
5747         for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
5748             hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
5749             if ((hsfsts & HSFSTS_FLINPRO) == 0) {
5750                 error = 0;
5751                 break;
5752             }
5753             delay(1);
5754         }
5755         if (error == 0) {
5756             /* Successful in waiting for previous cycle to timeout,
5757              * now set the Flash Cycle Done. */
5758             hsfsts |= HSFSTS_DONE;
5759             ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
5760         }
5761     }
5762     return error;
5763 }
5764 
5765 /******************************************************************************
5766  * This function starts a flash cycle and waits for its completion
5767  *
5768  * sc - The pointer to the hw structure
5769  ****************************************************************************/
5770 static int32_t
5771 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
5772 {
5773     uint16_t hsflctl;
5774     uint16_t hsfsts;
5775     int32_t error = 1;
5776     uint32_t i = 0;
5777 
5778     /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
5779     hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
5780     hsflctl |= HSFCTL_GO;
5781     ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
5782 
5783     /* wait till FDONE bit is set to 1 */
5784     do {
5785         hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
5786         if (hsfsts & HSFSTS_DONE)
5787             break;
5788         delay(1);
5789         i++;
5790     } while (i < timeout);
5791     if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0) {
5792         error = 0;
5793     }
5794     return error;
5795 }
5796 
5797 /******************************************************************************
5798  * Reads a byte or word from the NVM using the ICH8 flash access registers.
5799  *
5800  * sc - The pointer to the hw structure
5801  * index - The index of the byte or word to read.
5802  * size - Size of data to read, 1=byte 2=word
5803  * data - Pointer to the word to store the value read.
5804  *****************************************************************************/
5805 static int32_t
5806 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
5807                      uint32_t size, uint16_t* data)
5808 {
5809     uint16_t hsfsts;
5810     uint16_t hsflctl;
5811     uint32_t flash_linear_address;
5812     uint32_t flash_data = 0;
5813     int32_t error = 1;
5814     int32_t count = 0;
5815 
5816     if (size < 1  || size > 2 || data == 0x0 ||
5817         index > ICH_FLASH_LINEAR_ADDR_MASK)
5818         return error;
5819 
5820     flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
5821                            sc->sc_ich8_flash_base;
5822 
5823     do {
5824         delay(1);
5825         /* Steps */
5826         error = wm_ich8_cycle_init(sc);
5827         if (error)
5828             break;
5829 
5830         hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
5831         /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
5832         hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT) & HSFCTL_BCOUNT_MASK;
5833         hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
5834         ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
5835 
5836         /* Write the last 24 bits of index into Flash Linear address field in
5837          * Flash Address */
5838         /* TODO: TBD maybe check the index against the size of flash */
5839 
5840         ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
5841 
5842         error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
5843 
5844         /* Check if FCERR is set to 1, if set to 1, clear it and try the whole
5845          * sequence a few more times, else read in (shift in) the Flash Data0,
5846          * the order is least significant byte first msb to lsb */
5847         if (error == 0) {
5848             flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
5849             if (size == 1) {
5850                 *data = (uint8_t)(flash_data & 0x000000FF);
5851             } else if (size == 2) {
5852                 *data = (uint16_t)(flash_data & 0x0000FFFF);
5853             }
5854             break;
5855         } else {
5856             /* If we've gotten here, then things are probably completely hosed,
5857              * but if the error condition is detected, it won't hurt to give
5858              * it another try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
5859              */
5860             hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
5861             if (hsfsts & HSFSTS_ERR) {
5862                 /* Repeat for some time before giving up. */
5863                 continue;
5864             } else if ((hsfsts & HSFSTS_DONE) == 0) {
5865                 break;
5866             }
5867         }
5868     } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
5869 
5870     return error;
5871 }
5872 
5873 /******************************************************************************
5874  * Reads a single byte from the NVM using the ICH8 flash access registers.
5875  *
5876  * sc - pointer to wm_hw structure
5877  * index - The index of the byte to read.
5878  * data - Pointer to a byte to store the value read.
5879  *****************************************************************************/
5880 static int32_t
5881 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
5882 {
5883     int32_t status;
5884     uint16_t word = 0;
5885 
5886     status = wm_read_ich8_data(sc, index, 1, &word);
5887     if (status == 0) {
5888         *data = (uint8_t)word;
5889     }
5890 
5891     return status;
5892 }
5893 
5894 /******************************************************************************
5895  * Reads a word from the NVM using the ICH8 flash access registers.
5896  *
5897  * sc - pointer to wm_hw structure
5898  * index - The starting byte index of the word to read.
5899  * data - Pointer to a word to store the value read.
5900  *****************************************************************************/
5901 static int32_t
5902 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
5903 {
5904     int32_t status;
5905 
5906     status = wm_read_ich8_data(sc, index, 2, data);
5907     return status;
5908 }
5909 
5910 static int
5911 wm_check_mng_mode(struct wm_softc *sc)
5912 {
5913 	int rv;
5914 
5915 	switch (sc->sc_type) {
5916 	case WM_T_ICH8:
5917 	case WM_T_ICH9:
5918 	case WM_T_ICH10:
5919 		rv = wm_check_mng_mode_ich8lan(sc);
5920 		break;
5921 	case WM_T_82574:
5922 	case WM_T_82583:
5923 		rv = wm_check_mng_mode_82574(sc);
5924 		break;
5925 	case WM_T_82571:
5926 	case WM_T_82572:
5927 	case WM_T_82573:
5928 	case WM_T_80003:
5929 		rv = wm_check_mng_mode_generic(sc);
5930 		break;
5931 	default:
5932 		/* noting to do */
5933 		rv = 0;
5934 		break;
5935 	}
5936 
5937 	return rv;
5938 }
5939 
5940 static int
5941 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
5942 {
5943 	uint32_t fwsm;
5944 
5945 	fwsm = CSR_READ(sc, WMREG_FWSM);
5946 
5947 	if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
5948 		return 1;
5949 
5950 	return 0;
5951 }
5952 
5953 static int
5954 wm_check_mng_mode_82574(struct wm_softc *sc)
5955 {
5956 	uint16_t data;
5957 
5958 	wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &data);
5959 
5960 	if ((data & EEPROM_CFG2_MNGM_MASK) != 0)
5961 		return 1;
5962 
5963 	return 0;
5964 }
5965 
5966 static int
5967 wm_check_mng_mode_generic(struct wm_softc *sc)
5968 {
5969 	uint32_t fwsm;
5970 
5971 	fwsm = CSR_READ(sc, WMREG_FWSM);
5972 
5973 	if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
5974 		return 1;
5975 
5976 	return 0;
5977 }
5978 
5979 static int
5980 wm_check_reset_block(struct wm_softc *sc)
5981 {
5982 	uint32_t reg;
5983 
5984 	switch (sc->sc_type) {
5985 	case WM_T_ICH8:
5986 	case WM_T_ICH9:
5987 	case WM_T_ICH10:
5988 		reg = CSR_READ(sc, WMREG_FWSM);
5989 		if ((reg & FWSM_RSPCIPHY) != 0)
5990 			return 0;
5991 		else
5992 			return -1;
5993 		break;
5994 	case WM_T_82571:
5995 	case WM_T_82572:
5996 	case WM_T_82573:
5997 	case WM_T_82574:
5998 	case WM_T_82583:
5999 	case WM_T_80003:
6000 		reg = CSR_READ(sc, WMREG_MANC);
6001 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
6002 			return -1;
6003 		else
6004 			return 0;
6005 		break;
6006 	default:
6007 		/* no problem */
6008 		break;
6009 	}
6010 
6011 	return 0;
6012 }
6013 
6014 static void
6015 wm_get_hw_control(struct wm_softc *sc)
6016 {
6017 	uint32_t reg;
6018 
6019 	switch (sc->sc_type) {
6020 	case WM_T_82573:
6021 #if 0
6022 	case WM_T_82574:
6023 	case WM_T_82583:
6024 		/*
6025 		 * FreeBSD's em driver has the function for 82574 to checks
6026 		 * the management mode, but it's not used. Why?
6027 		 */
6028 #endif
6029 		reg = CSR_READ(sc, WMREG_SWSM);
6030 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
6031 		break;
6032 	case WM_T_82571:
6033 	case WM_T_82572:
6034 	case WM_T_80003:
6035 	case WM_T_ICH8:
6036 	case WM_T_ICH9:
6037 	case WM_T_ICH10:
6038 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
6039 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
6040 		break;
6041 	default:
6042 		break;
6043 	}
6044 }
6045 
6046 /* XXX Currently TBI only */
6047 static int
6048 wm_check_for_link(struct wm_softc *sc)
6049 {
6050 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
6051 	uint32_t rxcw;
6052 	uint32_t ctrl;
6053 	uint32_t status;
6054 	uint32_t sig;
6055 
6056 	rxcw = CSR_READ(sc, WMREG_RXCW);
6057 	ctrl = CSR_READ(sc, WMREG_CTRL);
6058 	status = CSR_READ(sc, WMREG_STATUS);
6059 
6060 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
6061 
6062 	DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
6063 		device_xname(sc->sc_dev), __func__,
6064 		((ctrl & CTRL_SWDPIN(1)) == sig),
6065 		((status & STATUS_LU) != 0),
6066 		((rxcw & RXCW_C) != 0)
6067 		    ));
6068 
6069 	/*
6070 	 * SWDPIN   LU RXCW
6071 	 *      0    0    0
6072 	 *      0    0    1	(should not happen)
6073 	 *      0    1    0	(should not happen)
6074 	 *      0    1    1	(should not happen)
6075 	 *      1    0    0	Disable autonego and force linkup
6076 	 *      1    0    1	got /C/ but not linkup yet
6077 	 *      1    1    0	(linkup)
6078 	 *      1    1    1	If IFM_AUTO, back to autonego
6079 	 *
6080 	 */
6081 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
6082 	    && ((status & STATUS_LU) == 0)
6083 	    && ((rxcw & RXCW_C) == 0)) {
6084 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
6085 			__func__));
6086 		sc->sc_tbi_linkup = 0;
6087 		/* Disable auto-negotiation in the TXCW register */
6088 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
6089 
6090 		/*
6091 		 * Force link-up and also force full-duplex.
6092 		 *
6093 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
6094 		 * so we should update sc->sc_ctrl
6095 		 */
6096 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
6097 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6098 	} else if(((status & STATUS_LU) != 0)
6099 	    && ((rxcw & RXCW_C) != 0)
6100 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
6101 		sc->sc_tbi_linkup = 1;
6102 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
6103 			__func__));
6104 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
6105 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
6106 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
6107 	    && ((rxcw & RXCW_C) != 0)) {
6108 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
6109 	} else {
6110 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
6111 			status));
6112 	}
6113 
6114 	return 0;
6115 }
6116