xref: /netbsd-src/sys/dev/pci/if_wm.c (revision da9817918ec7e88db2912a2882967c7570a83f47)
1 /*	$NetBSD: if_wm.c,v 1.175 2009/05/29 04:57:04 darran Exp $	*/
2 
3 /*
4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed for the NetBSD Project by
20  *	Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 /*******************************************************************************
39 
40   Copyright (c) 2001-2005, Intel Corporation
41   All rights reserved.
42 
43   Redistribution and use in source and binary forms, with or without
44   modification, are permitted provided that the following conditions are met:
45 
46    1. Redistributions of source code must retain the above copyright notice,
47       this list of conditions and the following disclaimer.
48 
49    2. Redistributions in binary form must reproduce the above copyright
50       notice, this list of conditions and the following disclaimer in the
51       documentation and/or other materials provided with the distribution.
52 
53    3. Neither the name of the Intel Corporation nor the names of its
54       contributors may be used to endorse or promote products derived from
55       this software without specific prior written permission.
56 
57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67   POSSIBILITY OF SUCH DAMAGE.
68 
69 *******************************************************************************/
70 /*
71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72  *
73  * TODO (in order of importance):
74  *
75  *	- Rework how parameters are loaded from the EEPROM.
76  */
77 
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.175 2009/05/29 04:57:04 darran Exp $");
80 
81 #include "bpfilter.h"
82 #include "rnd.h"
83 
84 #include <sys/param.h>
85 #include <sys/systm.h>
86 #include <sys/callout.h>
87 #include <sys/mbuf.h>
88 #include <sys/malloc.h>
89 #include <sys/kernel.h>
90 #include <sys/socket.h>
91 #include <sys/ioctl.h>
92 #include <sys/errno.h>
93 #include <sys/device.h>
94 #include <sys/queue.h>
95 #include <sys/syslog.h>
96 
97 #include <uvm/uvm_extern.h>		/* for PAGE_SIZE */
98 
99 #if NRND > 0
100 #include <sys/rnd.h>
101 #endif
102 
103 #include <net/if.h>
104 #include <net/if_dl.h>
105 #include <net/if_media.h>
106 #include <net/if_ether.h>
107 
108 #if NBPFILTER > 0
109 #include <net/bpf.h>
110 #endif
111 
112 #include <netinet/in.h>			/* XXX for struct ip */
113 #include <netinet/in_systm.h>		/* XXX for struct ip */
114 #include <netinet/ip.h>			/* XXX for struct ip */
115 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
116 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
117 
118 #include <sys/bus.h>
119 #include <sys/intr.h>
120 #include <machine/endian.h>
121 
122 #include <dev/mii/mii.h>
123 #include <dev/mii/miivar.h>
124 #include <dev/mii/mii_bitbang.h>
125 #include <dev/mii/ikphyreg.h>
126 
127 #include <dev/pci/pcireg.h>
128 #include <dev/pci/pcivar.h>
129 #include <dev/pci/pcidevs.h>
130 
131 #include <dev/pci/if_wmreg.h>
132 
133 #ifdef WM_DEBUG
134 #define	WM_DEBUG_LINK		0x01
135 #define	WM_DEBUG_TX		0x02
136 #define	WM_DEBUG_RX		0x04
137 #define	WM_DEBUG_GMII		0x08
138 int	wm_debug = WM_DEBUG_TX|WM_DEBUG_RX|WM_DEBUG_LINK|WM_DEBUG_GMII;
139 
140 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
141 #else
142 #define	DPRINTF(x, y)	/* nothing */
143 #endif /* WM_DEBUG */
144 
145 /*
146  * Transmit descriptor list size.  Due to errata, we can only have
147  * 256 hardware descriptors in the ring on < 82544, but we use 4096
148  * on >= 82544.  We tell the upper layers that they can queue a lot
149  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
150  * of them at a time.
151  *
152  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
153  * chains containing many small mbufs have been observed in zero-copy
154  * situations with jumbo frames.
155  */
156 #define	WM_NTXSEGS		256
157 #define	WM_IFQUEUELEN		256
158 #define	WM_TXQUEUELEN_MAX	64
159 #define	WM_TXQUEUELEN_MAX_82547	16
160 #define	WM_TXQUEUELEN(sc)	((sc)->sc_txnum)
161 #define	WM_TXQUEUELEN_MASK(sc)	(WM_TXQUEUELEN(sc) - 1)
162 #define	WM_TXQUEUE_GC(sc)	(WM_TXQUEUELEN(sc) / 8)
163 #define	WM_NTXDESC_82542	256
164 #define	WM_NTXDESC_82544	4096
165 #define	WM_NTXDESC(sc)		((sc)->sc_ntxdesc)
166 #define	WM_NTXDESC_MASK(sc)	(WM_NTXDESC(sc) - 1)
167 #define	WM_TXDESCSIZE(sc)	(WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
168 #define	WM_NEXTTX(sc, x)	(((x) + 1) & WM_NTXDESC_MASK(sc))
169 #define	WM_NEXTTXS(sc, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(sc))
170 
171 #define	WM_MAXTXDMA		round_page(IP_MAXPACKET) /* for TSO */
172 
173 /*
174  * Receive descriptor list size.  We have one Rx buffer for normal
175  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
176  * packet.  We allocate 256 receive descriptors, each with a 2k
177  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
178  */
179 #define	WM_NRXDESC		256
180 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
181 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
182 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
183 
184 /*
185  * Control structures are DMA'd to the i82542 chip.  We allocate them in
186  * a single clump that maps to a single DMA segment to make several things
187  * easier.
188  */
189 struct wm_control_data_82544 {
190 	/*
191 	 * The receive descriptors.
192 	 */
193 	wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
194 
195 	/*
196 	 * The transmit descriptors.  Put these at the end, because
197 	 * we might use a smaller number of them.
198 	 */
199 	wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82544];
200 };
201 
202 struct wm_control_data_82542 {
203 	wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
204 	wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
205 };
206 
207 #define	WM_CDOFF(x)	offsetof(struct wm_control_data_82544, x)
208 #define	WM_CDTXOFF(x)	WM_CDOFF(wcd_txdescs[(x)])
209 #define	WM_CDRXOFF(x)	WM_CDOFF(wcd_rxdescs[(x)])
210 
211 /*
212  * Software state for transmit jobs.
213  */
214 struct wm_txsoft {
215 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
216 	bus_dmamap_t txs_dmamap;	/* our DMA map */
217 	int txs_firstdesc;		/* first descriptor in packet */
218 	int txs_lastdesc;		/* last descriptor in packet */
219 	int txs_ndesc;			/* # of descriptors used */
220 };
221 
222 /*
223  * Software state for receive buffers.  Each descriptor gets a
224  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
225  * more than one buffer, we chain them together.
226  */
227 struct wm_rxsoft {
228 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
229 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
230 };
231 
232 typedef enum {
233 	WM_T_unknown		= 0,
234 	WM_T_82542_2_0,			/* i82542 2.0 (really old) */
235 	WM_T_82542_2_1,			/* i82542 2.1+ (old) */
236 	WM_T_82543,			/* i82543 */
237 	WM_T_82544,			/* i82544 */
238 	WM_T_82540,			/* i82540 */
239 	WM_T_82545,			/* i82545 */
240 	WM_T_82545_3,			/* i82545 3.0+ */
241 	WM_T_82546,			/* i82546 */
242 	WM_T_82546_3,			/* i82546 3.0+ */
243 	WM_T_82541,			/* i82541 */
244 	WM_T_82541_2,			/* i82541 2.0+ */
245 	WM_T_82547,			/* i82547 */
246 	WM_T_82547_2,			/* i82547 2.0+ */
247 	WM_T_82571,			/* i82571 */
248 	WM_T_82572,			/* i82572 */
249 	WM_T_82573,			/* i82573 */
250 	WM_T_82574,			/* i82574 */
251 	WM_T_80003,			/* i80003 */
252 	WM_T_ICH8,			/* ICH8 LAN */
253 	WM_T_ICH9,			/* ICH9 LAN */
254 	WM_T_ICH10,			/* ICH10 LAN */
255 } wm_chip_type;
256 
257 #define WM_LINKUP_TIMEOUT	50
258 
259 /*
260  * Software state per device.
261  */
262 struct wm_softc {
263 	device_t sc_dev;		/* generic device information */
264 	bus_space_tag_t sc_st;		/* bus space tag */
265 	bus_space_handle_t sc_sh;	/* bus space handle */
266 	bus_space_tag_t sc_iot;		/* I/O space tag */
267 	bus_space_handle_t sc_ioh;	/* I/O space handle */
268 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
269 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
270 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
271 	struct ethercom sc_ethercom;	/* ethernet common data */
272 	pci_chipset_tag_t sc_pc;
273 	pcitag_t sc_pcitag;
274 
275 	wm_chip_type sc_type;		/* chip type */
276 	int sc_flags;			/* flags; see below */
277 	int sc_bus_speed;		/* PCI/PCIX bus speed */
278 	int sc_pcix_offset;		/* PCIX capability register offset */
279 	int sc_flowflags;		/* 802.3x flow control flags */
280 
281 	void *sc_ih;			/* interrupt cookie */
282 
283 	int sc_ee_addrbits;		/* EEPROM address bits */
284 
285 	struct mii_data sc_mii;		/* MII/media information */
286 
287 	callout_t sc_tick_ch;		/* tick callout */
288 
289 	bus_dmamap_t sc_cddmamap;	/* control data DMA map */
290 #define	sc_cddma	sc_cddmamap->dm_segs[0].ds_addr
291 
292 	int		sc_align_tweak;
293 
294 	/*
295 	 * Software state for the transmit and receive descriptors.
296 	 */
297 	int			sc_txnum;	/* must be a power of two */
298 	struct wm_txsoft	sc_txsoft[WM_TXQUEUELEN_MAX];
299 	struct wm_rxsoft	sc_rxsoft[WM_NRXDESC];
300 
301 	/*
302 	 * Control data structures.
303 	 */
304 	int			sc_ntxdesc;	/* must be a power of two */
305 	struct wm_control_data_82544 *sc_control_data;
306 #define	sc_txdescs	sc_control_data->wcd_txdescs
307 #define	sc_rxdescs	sc_control_data->wcd_rxdescs
308 
309 #ifdef WM_EVENT_COUNTERS
310 	/* Event counters. */
311 	struct evcnt sc_ev_txsstall;	/* Tx stalled due to no txs */
312 	struct evcnt sc_ev_txdstall;	/* Tx stalled due to no txd */
313 	struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
314 	struct evcnt sc_ev_txdw;	/* Tx descriptor interrupts */
315 	struct evcnt sc_ev_txqe;	/* Tx queue empty interrupts */
316 	struct evcnt sc_ev_rxintr;	/* Rx interrupts */
317 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
318 
319 	struct evcnt sc_ev_rxipsum;	/* IP checksums checked in-bound */
320 	struct evcnt sc_ev_rxtusum;	/* TCP/UDP cksums checked in-bound */
321 	struct evcnt sc_ev_txipsum;	/* IP checksums comp. out-bound */
322 	struct evcnt sc_ev_txtusum;	/* TCP/UDP cksums comp. out-bound */
323 	struct evcnt sc_ev_txtusum6;	/* TCP/UDP v6 cksums comp. out-bound */
324 	struct evcnt sc_ev_txtso;	/* TCP seg offload out-bound (IPv4) */
325 	struct evcnt sc_ev_txtso6;	/* TCP seg offload out-bound (IPv6) */
326 	struct evcnt sc_ev_txtsopain;	/* painful header manip. for TSO */
327 
328 	struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
329 	struct evcnt sc_ev_txdrop;	/* Tx packets dropped (too many segs) */
330 
331 	struct evcnt sc_ev_tu;		/* Tx underrun */
332 
333 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
334 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
335 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
336 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
337 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
338 #endif /* WM_EVENT_COUNTERS */
339 
340 	bus_addr_t sc_tdt_reg;		/* offset of TDT register */
341 
342 	int	sc_txfree;		/* number of free Tx descriptors */
343 	int	sc_txnext;		/* next ready Tx descriptor */
344 
345 	int	sc_txsfree;		/* number of free Tx jobs */
346 	int	sc_txsnext;		/* next free Tx job */
347 	int	sc_txsdirty;		/* dirty Tx jobs */
348 
349 	/* These 5 variables are used only on the 82547. */
350 	int	sc_txfifo_size;		/* Tx FIFO size */
351 	int	sc_txfifo_head;		/* current head of FIFO */
352 	uint32_t sc_txfifo_addr;	/* internal address of start of FIFO */
353 	int	sc_txfifo_stall;	/* Tx FIFO is stalled */
354 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
355 
356 	bus_addr_t sc_rdt_reg;		/* offset of RDT register */
357 
358 	int	sc_rxptr;		/* next ready Rx descriptor/queue ent */
359 	int	sc_rxdiscard;
360 	int	sc_rxlen;
361 	struct mbuf *sc_rxhead;
362 	struct mbuf *sc_rxtail;
363 	struct mbuf **sc_rxtailp;
364 
365 	uint32_t sc_ctrl;		/* prototype CTRL register */
366 #if 0
367 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
368 #endif
369 	uint32_t sc_icr;		/* prototype interrupt bits */
370 	uint32_t sc_itr;		/* prototype intr throttling reg */
371 	uint32_t sc_tctl;		/* prototype TCTL register */
372 	uint32_t sc_rctl;		/* prototype RCTL register */
373 	uint32_t sc_txcw;		/* prototype TXCW register */
374 	uint32_t sc_tipg;		/* prototype TIPG register */
375 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
376 	uint32_t sc_pba;		/* prototype PBA register */
377 
378 	int sc_tbi_linkup;		/* TBI link status */
379 	int sc_tbi_anegticks;		/* autonegotiation ticks */
380 	int sc_tbi_ticks;		/* tbi ticks */
381 	int sc_tbi_nrxcfg;		/* count of ICR_RXCFG */
382 	int sc_tbi_lastnrxcfg;		/* count of ICR_RXCFG (on last tick) */
383 
384 	int sc_mchash_type;		/* multicast filter offset */
385 
386 #if NRND > 0
387 	rndsource_element_t rnd_source;	/* random source */
388 #endif
389 	int sc_ich8_flash_base;
390 	int sc_ich8_flash_bank_size;
391 };
392 
393 #define	WM_RXCHAIN_RESET(sc)						\
394 do {									\
395 	(sc)->sc_rxtailp = &(sc)->sc_rxhead;				\
396 	*(sc)->sc_rxtailp = NULL;					\
397 	(sc)->sc_rxlen = 0;						\
398 } while (/*CONSTCOND*/0)
399 
400 #define	WM_RXCHAIN_LINK(sc, m)						\
401 do {									\
402 	*(sc)->sc_rxtailp = (sc)->sc_rxtail = (m);			\
403 	(sc)->sc_rxtailp = &(m)->m_next;				\
404 } while (/*CONSTCOND*/0)
405 
406 /* sc_flags */
407 #define	WM_F_HAS_MII		0x0001	/* has MII */
408 #define	WM_F_EEPROM_HANDSHAKE	0x0002	/* requires EEPROM handshake */
409 #define	WM_F_EEPROM_SEMAPHORE	0x0004	/* EEPROM with semaphore */
410 #define	WM_F_EEPROM_EERDEEWR	0x0008	/* EEPROM access via EERD/EEWR */
411 #define	WM_F_EEPROM_SPI		0x0010	/* EEPROM is SPI */
412 #define	WM_F_EEPROM_FLASH	0x0020	/* EEPROM is FLASH */
413 #define	WM_F_EEPROM_INVALID	0x0040	/* EEPROM not present (bad checksum) */
414 #define	WM_F_IOH_VALID		0x0080	/* I/O handle is valid */
415 #define	WM_F_BUS64		0x0100	/* bus is 64-bit */
416 #define	WM_F_PCIX		0x0200	/* bus is PCI-X */
417 #define	WM_F_CSA		0x0400	/* bus is CSA */
418 #define	WM_F_PCIE		0x0800	/* bus is PCI-Express */
419 #define WM_F_SWFW_SYNC		0x1000  /* Software-Firmware synchronisation */
420 #define WM_F_SWFWHW_SYNC	0x2000  /* Software-Firmware synchronisation */
421 
422 #ifdef WM_EVENT_COUNTERS
423 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
424 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
425 #else
426 #define	WM_EVCNT_INCR(ev)	/* nothing */
427 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
428 #endif
429 
430 #define	CSR_READ(sc, reg)						\
431 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
432 #define	CSR_WRITE(sc, reg, val)						\
433 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
434 #define	CSR_WRITE_FLUSH(sc)						\
435 	(void) CSR_READ((sc), WMREG_STATUS)
436 
437 #define ICH8_FLASH_READ32(sc, reg) \
438 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
439 #define ICH8_FLASH_WRITE32(sc, reg, data) \
440 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
441 
442 #define ICH8_FLASH_READ16(sc, reg) \
443 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
444 #define ICH8_FLASH_WRITE16(sc, reg, data) \
445 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
446 
447 #define	WM_CDTXADDR(sc, x)	((sc)->sc_cddma + WM_CDTXOFF((x)))
448 #define	WM_CDRXADDR(sc, x)	((sc)->sc_cddma + WM_CDRXOFF((x)))
449 
450 #define	WM_CDTXADDR_LO(sc, x)	(WM_CDTXADDR((sc), (x)) & 0xffffffffU)
451 #define	WM_CDTXADDR_HI(sc, x)						\
452 	(sizeof(bus_addr_t) == 8 ?					\
453 	 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
454 
455 #define	WM_CDRXADDR_LO(sc, x)	(WM_CDRXADDR((sc), (x)) & 0xffffffffU)
456 #define	WM_CDRXADDR_HI(sc, x)						\
457 	(sizeof(bus_addr_t) == 8 ?					\
458 	 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
459 
460 #define	WM_CDTXSYNC(sc, x, n, ops)					\
461 do {									\
462 	int __x, __n;							\
463 									\
464 	__x = (x);							\
465 	__n = (n);							\
466 									\
467 	/* If it will wrap around, sync to the end of the ring. */	\
468 	if ((__x + __n) > WM_NTXDESC(sc)) {				\
469 		bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,	\
470 		    WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) *		\
471 		    (WM_NTXDESC(sc) - __x), (ops));			\
472 		__n -= (WM_NTXDESC(sc) - __x);				\
473 		__x = 0;						\
474 	}								\
475 									\
476 	/* Now sync whatever is left. */				\
477 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
478 	    WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops));	\
479 } while (/*CONSTCOND*/0)
480 
481 #define	WM_CDRXSYNC(sc, x, ops)						\
482 do {									\
483 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
484 	   WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops));		\
485 } while (/*CONSTCOND*/0)
486 
487 #define	WM_INIT_RXDESC(sc, x)						\
488 do {									\
489 	struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)];		\
490 	wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)];		\
491 	struct mbuf *__m = __rxs->rxs_mbuf;				\
492 									\
493 	/*								\
494 	 * Note: We scoot the packet forward 2 bytes in the buffer	\
495 	 * so that the payload after the Ethernet header is aligned	\
496 	 * to a 4-byte boundary.					\
497 	 *								\
498 	 * XXX BRAINDAMAGE ALERT!					\
499 	 * The stupid chip uses the same size for every buffer, which	\
500 	 * is set in the Receive Control register.  We are using the 2K	\
501 	 * size option, but what we REALLY want is (2K - 2)!  For this	\
502 	 * reason, we can't "scoot" packets longer than the standard	\
503 	 * Ethernet MTU.  On strict-alignment platforms, if the total	\
504 	 * size exceeds (2K - 2) we set align_tweak to 0 and let	\
505 	 * the upper layer copy the headers.				\
506 	 */								\
507 	__m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak;	\
508 									\
509 	wm_set_dma_addr(&__rxd->wrx_addr,				\
510 	    __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
511 	__rxd->wrx_len = 0;						\
512 	__rxd->wrx_cksum = 0;						\
513 	__rxd->wrx_status = 0;						\
514 	__rxd->wrx_errors = 0;						\
515 	__rxd->wrx_special = 0;						\
516 	WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
517 									\
518 	CSR_WRITE((sc), (sc)->sc_rdt_reg, (x));				\
519 } while (/*CONSTCOND*/0)
520 
521 static void	wm_start(struct ifnet *);
522 static void	wm_watchdog(struct ifnet *);
523 static int	wm_ioctl(struct ifnet *, u_long, void *);
524 static int	wm_init(struct ifnet *);
525 static void	wm_stop(struct ifnet *, int);
526 
527 static void	wm_reset(struct wm_softc *);
528 static void	wm_rxdrain(struct wm_softc *);
529 static int	wm_add_rxbuf(struct wm_softc *, int);
530 static int	wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
531 static int	wm_read_eeprom_eerd(struct wm_softc *, int, int, u_int16_t *);
532 static int	wm_validate_eeprom_checksum(struct wm_softc *);
533 static void	wm_tick(void *);
534 
535 static void	wm_set_filter(struct wm_softc *);
536 
537 static int	wm_intr(void *);
538 static void	wm_txintr(struct wm_softc *);
539 static void	wm_rxintr(struct wm_softc *);
540 static void	wm_linkintr(struct wm_softc *, uint32_t);
541 
542 static void	wm_tbi_mediainit(struct wm_softc *);
543 static int	wm_tbi_mediachange(struct ifnet *);
544 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
545 
546 static void	wm_tbi_set_linkled(struct wm_softc *);
547 static void	wm_tbi_check_link(struct wm_softc *);
548 
549 static void	wm_gmii_reset(struct wm_softc *);
550 
551 static int	wm_gmii_i82543_readreg(device_t, int, int);
552 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
553 
554 static int	wm_gmii_i82544_readreg(device_t, int, int);
555 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
556 
557 static int	wm_gmii_i80003_readreg(device_t, int, int);
558 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
559 
560 static int	wm_gmii_bm_readreg(device_t, int, int);
561 static void	wm_gmii_bm_writereg(device_t, int, int, int);
562 
563 static void	wm_gmii_statchg(device_t);
564 
565 static void	wm_gmii_mediainit(struct wm_softc *);
566 static int	wm_gmii_mediachange(struct ifnet *);
567 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
568 
569 static int	wm_kmrn_i80003_readreg(struct wm_softc *, int);
570 static void	wm_kmrn_i80003_writereg(struct wm_softc *, int, int);
571 
572 static int	wm_match(device_t, cfdata_t, void *);
573 static void	wm_attach(device_t, device_t, void *);
574 static int	wm_is_onboard_nvm_eeprom(struct wm_softc *);
575 static void	wm_get_auto_rd_done(struct wm_softc *);
576 static int	wm_get_swsm_semaphore(struct wm_softc *);
577 static void	wm_put_swsm_semaphore(struct wm_softc *);
578 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
579 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
580 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
581 static int	wm_get_swfwhw_semaphore(struct wm_softc *);
582 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
583 
584 static int	wm_read_eeprom_ich8(struct wm_softc *, int, int, uint16_t *);
585 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
586 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
587 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t,
588 		     uint32_t, uint16_t *);
589 static int32_t	wm_read_ich8_byte(struct wm_softc *sc, uint32_t, uint8_t *);
590 static int32_t	wm_read_ich8_word(struct wm_softc *sc, uint32_t, uint16_t *);
591 static void	wm_82547_txfifo_stall(void *);
592 static int	wm_check_mng_mode(struct wm_softc *);
593 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
594 #if 0
595 static int	wm_check_mng_mode_82574(struct wm_softc *);
596 #endif
597 static int	wm_check_mng_mode_generic(struct wm_softc *);
598 static void	wm_get_hw_control(struct wm_softc *);
599 static int	wm_check_for_link(struct wm_softc *);
600 
601 CFATTACH_DECL_NEW(wm, sizeof(struct wm_softc),
602     wm_match, wm_attach, NULL, NULL);
603 
604 
605 /*
606  * Devices supported by this driver.
607  */
608 static const struct wm_product {
609 	pci_vendor_id_t		wmp_vendor;
610 	pci_product_id_t	wmp_product;
611 	const char		*wmp_name;
612 	wm_chip_type		wmp_type;
613 	int			wmp_flags;
614 #define	WMP_F_1000X		0x01
615 #define	WMP_F_1000T		0x02
616 } wm_products[] = {
617 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
618 	  "Intel i82542 1000BASE-X Ethernet",
619 	  WM_T_82542_2_1,	WMP_F_1000X },
620 
621 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
622 	  "Intel i82543GC 1000BASE-X Ethernet",
623 	  WM_T_82543,		WMP_F_1000X },
624 
625 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
626 	  "Intel i82543GC 1000BASE-T Ethernet",
627 	  WM_T_82543,		WMP_F_1000T },
628 
629 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
630 	  "Intel i82544EI 1000BASE-T Ethernet",
631 	  WM_T_82544,		WMP_F_1000T },
632 
633 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
634 	  "Intel i82544EI 1000BASE-X Ethernet",
635 	  WM_T_82544,		WMP_F_1000X },
636 
637 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
638 	  "Intel i82544GC 1000BASE-T Ethernet",
639 	  WM_T_82544,		WMP_F_1000T },
640 
641 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
642 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
643 	  WM_T_82544,		WMP_F_1000T },
644 
645 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
646 	  "Intel i82540EM 1000BASE-T Ethernet",
647 	  WM_T_82540,		WMP_F_1000T },
648 
649 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
650 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
651 	  WM_T_82540,		WMP_F_1000T },
652 
653 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
654 	  "Intel i82540EP 1000BASE-T Ethernet",
655 	  WM_T_82540,		WMP_F_1000T },
656 
657 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
658 	  "Intel i82540EP 1000BASE-T Ethernet",
659 	  WM_T_82540,		WMP_F_1000T },
660 
661 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
662 	  "Intel i82540EP 1000BASE-T Ethernet",
663 	  WM_T_82540,		WMP_F_1000T },
664 
665 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
666 	  "Intel i82545EM 1000BASE-T Ethernet",
667 	  WM_T_82545,		WMP_F_1000T },
668 
669 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
670 	  "Intel i82545GM 1000BASE-T Ethernet",
671 	  WM_T_82545_3,		WMP_F_1000T },
672 
673 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
674 	  "Intel i82545GM 1000BASE-X Ethernet",
675 	  WM_T_82545_3,		WMP_F_1000X },
676 #if 0
677 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
678 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
679 	  WM_T_82545_3,		WMP_F_SERDES },
680 #endif
681 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
682 	  "Intel i82546EB 1000BASE-T Ethernet",
683 	  WM_T_82546,		WMP_F_1000T },
684 
685 	{ PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82546EB_QUAD,
686 	  "Intel i82546EB 1000BASE-T Ethernet",
687 	  WM_T_82546,		WMP_F_1000T },
688 
689 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
690 	  "Intel i82545EM 1000BASE-X Ethernet",
691 	  WM_T_82545,		WMP_F_1000X },
692 
693 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
694 	  "Intel i82546EB 1000BASE-X Ethernet",
695 	  WM_T_82546,		WMP_F_1000X },
696 
697 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
698 	  "Intel i82546GB 1000BASE-T Ethernet",
699 	  WM_T_82546_3,		WMP_F_1000T },
700 
701 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
702 	  "Intel i82546GB 1000BASE-X Ethernet",
703 	  WM_T_82546_3,		WMP_F_1000X },
704 #if 0
705 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
706 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
707 	  WM_T_82546_3,		WMP_F_SERDES },
708 #endif
709 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
710 	  "i82546GB quad-port Gigabit Ethernet",
711 	  WM_T_82546_3,		WMP_F_1000T },
712 
713 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
714 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
715 	  WM_T_82546_3,		WMP_F_1000T },
716 
717 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
718 	  "Intel PRO/1000MT (82546GB)",
719 	  WM_T_82546_3,		WMP_F_1000T },
720 
721 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
722 	  "Intel i82541EI 1000BASE-T Ethernet",
723 	  WM_T_82541,		WMP_F_1000T },
724 
725 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
726 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
727 	  WM_T_82541,		WMP_F_1000T },
728 
729 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
730 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
731 	  WM_T_82541,		WMP_F_1000T },
732 
733 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
734 	  "Intel i82541ER 1000BASE-T Ethernet",
735 	  WM_T_82541_2,		WMP_F_1000T },
736 
737 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
738 	  "Intel i82541GI 1000BASE-T Ethernet",
739 	  WM_T_82541_2,		WMP_F_1000T },
740 
741 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
742 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
743 	  WM_T_82541_2,		WMP_F_1000T },
744 
745 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
746 	  "Intel i82541PI 1000BASE-T Ethernet",
747 	  WM_T_82541_2,		WMP_F_1000T },
748 
749 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
750 	  "Intel i82547EI 1000BASE-T Ethernet",
751 	  WM_T_82547,		WMP_F_1000T },
752 
753 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
754 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
755 	  WM_T_82547,		WMP_F_1000T },
756 
757 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
758 	  "Intel i82547GI 1000BASE-T Ethernet",
759 	  WM_T_82547_2,		WMP_F_1000T },
760 
761 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
762 	  "Intel PRO/1000 PT (82571EB)",
763 	  WM_T_82571,		WMP_F_1000T },
764 
765 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
766 	  "Intel PRO/1000 PF (82571EB)",
767 	  WM_T_82571,		WMP_F_1000X },
768 #if 0
769 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
770 	  "Intel PRO/1000 PB (82571EB)",
771 	  WM_T_82571,		WMP_F_SERDES },
772 #endif
773 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
774 	  "Intel PRO/1000 QT (82571EB)",
775 	  WM_T_82571,		WMP_F_1000T },
776 
777 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
778 	  "Intel i82572EI 1000baseT Ethernet",
779 	  WM_T_82572,		WMP_F_1000T },
780 
781 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
782 	  "Intel� PRO/1000 PT Quad Port Server Adapter",
783 	  WM_T_82571,		WMP_F_1000T, },
784 
785 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
786 	  "Intel i82572EI 1000baseX Ethernet",
787 	  WM_T_82572,		WMP_F_1000X },
788 #if 0
789 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
790 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
791 	  WM_T_82572,		WMP_F_SERDES },
792 #endif
793 
794 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
795 	  "Intel i82572EI 1000baseT Ethernet",
796 	  WM_T_82572,		WMP_F_1000T },
797 
798 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
799 	  "Intel i82573E",
800 	  WM_T_82573,		WMP_F_1000T },
801 
802 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
803 	  "Intel i82573E IAMT",
804 	  WM_T_82573,		WMP_F_1000T },
805 
806 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
807 	  "Intel i82573L Gigabit Ethernet",
808 	  WM_T_82573,		WMP_F_1000T },
809 
810 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
811 	  "Intel i82574L",
812 	  WM_T_82574,		WMP_F_1000T },
813 
814 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
815 	  "i80003 dual 1000baseT Ethernet",
816 	  WM_T_80003,		WMP_F_1000T },
817 
818 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
819 	  "i80003 dual 1000baseX Ethernet",
820 	  WM_T_80003,		WMP_F_1000T },
821 #if 0
822 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
823 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
824 	  WM_T_80003,		WMP_F_SERDES },
825 #endif
826 
827 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
828 	  "Intel i80003 1000baseT Ethernet",
829 	  WM_T_80003,		WMP_F_1000T },
830 #if 0
831 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
832 	  "Intel i80003 Gigabit Ethernet (SERDES)",
833 	  WM_T_80003,		WMP_F_SERDES },
834 #endif
835 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
836 	  "Intel i82801H (M_AMT) LAN Controller",
837 	  WM_T_ICH8,		WMP_F_1000T },
838 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
839 	  "Intel i82801H (AMT) LAN Controller",
840 	  WM_T_ICH8,		WMP_F_1000T },
841 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
842 	  "Intel i82801H LAN Controller",
843 	  WM_T_ICH8,		WMP_F_1000T },
844 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
845 	  "Intel i82801H (IFE) LAN Controller",
846 	  WM_T_ICH8,		WMP_F_1000T },
847 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
848 	  "Intel i82801H (M) LAN Controller",
849 	  WM_T_ICH8,		WMP_F_1000T },
850 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
851 	  "Intel i82801H IFE (GT) LAN Controller",
852 	  WM_T_ICH8,		WMP_F_1000T },
853 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
854 	  "Intel i82801H IFE (G) LAN Controller",
855 	  WM_T_ICH8,		WMP_F_1000T },
856 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
857 	  "82801I (AMT) LAN Controller",
858 	  WM_T_ICH9,		WMP_F_1000T },
859 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
860 	  "82801I LAN Controller",
861 	  WM_T_ICH9,		WMP_F_1000T },
862 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
863 	  "82801I (G) LAN Controller",
864 	  WM_T_ICH9,		WMP_F_1000T },
865 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
866 	  "82801I (GT) LAN Controller",
867 	  WM_T_ICH9,		WMP_F_1000T },
868 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
869 	  "82801I (C) LAN Controller",
870 	  WM_T_ICH9,		WMP_F_1000T },
871 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
872 	  "82801I mobile LAN Controller",
873 	  WM_T_ICH9,		WMP_F_1000T },
874 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IGP_M_V,
875 	  "82801I mobile (V) LAN Controller",
876 	  WM_T_ICH9,		WMP_F_1000T },
877 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
878 	  "82801I mobile (AMT) LAN Controller",
879 	  WM_T_ICH9,		WMP_F_1000T },
880 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82567LM_3,
881 	  "82567LM-3 LAN Controller",
882 	  WM_T_ICH10,		WMP_F_1000T },
883 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82567LF_3,
884 	  "82567LF-3 LAN Controller",
885 	  WM_T_ICH10,		WMP_F_1000T },
886 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
887 	  "i82801J (LF) LAN Controller",
888 	  WM_T_ICH10,		WMP_F_1000T },
889 	{ 0,			0,
890 	  NULL,
891 	  0,			0 },
892 };
893 
894 #ifdef WM_EVENT_COUNTERS
895 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
896 #endif /* WM_EVENT_COUNTERS */
897 
898 #if 0 /* Not currently used */
899 static inline uint32_t
900 wm_io_read(struct wm_softc *sc, int reg)
901 {
902 
903 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
904 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
905 }
906 #endif
907 
908 static inline void
909 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
910 {
911 
912 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
913 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
914 }
915 
916 static inline void
917 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
918 {
919 	wa->wa_low = htole32(v & 0xffffffffU);
920 	if (sizeof(bus_addr_t) == 8)
921 		wa->wa_high = htole32((uint64_t) v >> 32);
922 	else
923 		wa->wa_high = 0;
924 }
925 
926 static const struct wm_product *
927 wm_lookup(const struct pci_attach_args *pa)
928 {
929 	const struct wm_product *wmp;
930 
931 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
932 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
933 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
934 			return (wmp);
935 	}
936 	return (NULL);
937 }
938 
939 static int
940 wm_match(device_t parent, cfdata_t cf, void *aux)
941 {
942 	struct pci_attach_args *pa = aux;
943 
944 	if (wm_lookup(pa) != NULL)
945 		return (1);
946 
947 	return (0);
948 }
949 
950 static void
951 wm_attach(device_t parent, device_t self, void *aux)
952 {
953 	struct wm_softc *sc = device_private(self);
954 	struct pci_attach_args *pa = aux;
955 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
956 	pci_chipset_tag_t pc = pa->pa_pc;
957 	pci_intr_handle_t ih;
958 	size_t cdata_size;
959 	const char *intrstr = NULL;
960 	const char *eetype, *xname;
961 	bus_space_tag_t memt;
962 	bus_space_handle_t memh;
963 	bus_dma_segment_t seg;
964 	int memh_valid;
965 	int i, rseg, error;
966 	const struct wm_product *wmp;
967 	prop_data_t ea;
968 	prop_number_t pn;
969 	uint8_t enaddr[ETHER_ADDR_LEN];
970 	uint16_t myea[ETHER_ADDR_LEN / 2], cfg1, cfg2, swdpin;
971 	pcireg_t preg, memtype;
972 	uint32_t reg;
973 
974 	sc->sc_dev = self;
975 	callout_init(&sc->sc_tick_ch, 0);
976 
977 	wmp = wm_lookup(pa);
978 	if (wmp == NULL) {
979 		printf("\n");
980 		panic("wm_attach: impossible");
981 	}
982 
983 	sc->sc_pc = pa->pa_pc;
984 	sc->sc_pcitag = pa->pa_tag;
985 
986 	if (pci_dma64_available(pa))
987 		sc->sc_dmat = pa->pa_dmat64;
988 	else
989 		sc->sc_dmat = pa->pa_dmat;
990 
991 	preg = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
992 	aprint_naive(": Ethernet controller\n");
993 	aprint_normal(": %s, rev. %d\n", wmp->wmp_name, preg);
994 
995 	sc->sc_type = wmp->wmp_type;
996 	if (sc->sc_type < WM_T_82543) {
997 		if (preg < 2) {
998 			aprint_error_dev(sc->sc_dev,
999 			    "i82542 must be at least rev. 2\n");
1000 			return;
1001 		}
1002 		if (preg < 3)
1003 			sc->sc_type = WM_T_82542_2_0;
1004 	}
1005 
1006 	/*
1007 	 * Map the device.  All devices support memory-mapped acccess,
1008 	 * and it is really required for normal operation.
1009 	 */
1010 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1011 	switch (memtype) {
1012 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1013 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1014 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1015 		    memtype, 0, &memt, &memh, NULL, NULL) == 0);
1016 		break;
1017 	default:
1018 		memh_valid = 0;
1019 	}
1020 
1021 	if (memh_valid) {
1022 		sc->sc_st = memt;
1023 		sc->sc_sh = memh;
1024 	} else {
1025 		aprint_error_dev(sc->sc_dev,
1026 		    "unable to map device registers\n");
1027 		return;
1028 	}
1029 
1030 	/*
1031 	 * In addition, i82544 and later support I/O mapped indirect
1032 	 * register access.  It is not desirable (nor supported in
1033 	 * this driver) to use it for normal operation, though it is
1034 	 * required to work around bugs in some chip versions.
1035 	 */
1036 	if (sc->sc_type >= WM_T_82544) {
1037 		/* First we have to find the I/O BAR. */
1038 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1039 			if (pci_mapreg_type(pa->pa_pc, pa->pa_tag, i) ==
1040 			    PCI_MAPREG_TYPE_IO)
1041 				break;
1042 		}
1043 		if (i == PCI_MAPREG_END)
1044 			aprint_error_dev(sc->sc_dev,
1045 			    "WARNING: unable to find I/O BAR\n");
1046 		else {
1047 			/*
1048 			 * The i8254x doesn't apparently respond when the
1049 			 * I/O BAR is 0, which looks somewhat like it's not
1050 			 * been configured.
1051 			 */
1052 			preg = pci_conf_read(pc, pa->pa_tag, i);
1053 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1054 				aprint_error_dev(sc->sc_dev,
1055 				    "WARNING: I/O BAR at zero.\n");
1056 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1057 					0, &sc->sc_iot, &sc->sc_ioh,
1058 					NULL, NULL) == 0) {
1059 				sc->sc_flags |= WM_F_IOH_VALID;
1060 			} else {
1061 				aprint_error_dev(sc->sc_dev,
1062 				    "WARNING: unable to map I/O space\n");
1063 			}
1064 		}
1065 
1066 	}
1067 
1068 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
1069 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1070 	preg |= PCI_COMMAND_MASTER_ENABLE;
1071 	if (sc->sc_type < WM_T_82542_2_1)
1072 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1073 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1074 
1075 	/* power up chip */
1076 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1077 	    NULL)) && error != EOPNOTSUPP) {
1078 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1079 		return;
1080 	}
1081 
1082 	/*
1083 	 * Map and establish our interrupt.
1084 	 */
1085 	if (pci_intr_map(pa, &ih)) {
1086 		aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
1087 		return;
1088 	}
1089 	intrstr = pci_intr_string(pc, ih);
1090 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
1091 	if (sc->sc_ih == NULL) {
1092 		aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
1093 		if (intrstr != NULL)
1094 			aprint_normal(" at %s", intrstr);
1095 		aprint_normal("\n");
1096 		return;
1097 	}
1098 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
1099 
1100 	/*
1101 	 * Determine a few things about the bus we're connected to.
1102 	 */
1103 	if (sc->sc_type < WM_T_82543) {
1104 		/* We don't really know the bus characteristics here. */
1105 		sc->sc_bus_speed = 33;
1106 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1107 		/*
1108 		 * CSA (Communication Streaming Architecture) is about as fast
1109 		 * a 32-bit 66MHz PCI Bus.
1110 		 */
1111 		sc->sc_flags |= WM_F_CSA;
1112 		sc->sc_bus_speed = 66;
1113 		aprint_verbose_dev(sc->sc_dev,
1114 		    "Communication Streaming Architecture\n");
1115 		if (sc->sc_type == WM_T_82547) {
1116 			callout_init(&sc->sc_txfifo_ch, 0);
1117 			callout_setfunc(&sc->sc_txfifo_ch,
1118 					wm_82547_txfifo_stall, sc);
1119 			aprint_verbose_dev(sc->sc_dev,
1120 			    "using 82547 Tx FIFO stall work-around\n");
1121 		}
1122 	} else if (sc->sc_type >= WM_T_82571) {
1123 		sc->sc_flags |= WM_F_PCIE;
1124 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1125 			&& (sc->sc_type != WM_T_ICH10))
1126 			sc->sc_flags |= WM_F_EEPROM_SEMAPHORE;
1127 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1128 	} else {
1129 		reg = CSR_READ(sc, WMREG_STATUS);
1130 		if (reg & STATUS_BUS64)
1131 			sc->sc_flags |= WM_F_BUS64;
1132 		if (sc->sc_type >= WM_T_82544 &&
1133 		    (reg & STATUS_PCIX_MODE) != 0) {
1134 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1135 
1136 			sc->sc_flags |= WM_F_PCIX;
1137 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1138 					       PCI_CAP_PCIX,
1139 					       &sc->sc_pcix_offset, NULL) == 0)
1140 				aprint_error_dev(sc->sc_dev,
1141 				    "unable to find PCIX capability\n");
1142 			else if (sc->sc_type != WM_T_82545_3 &&
1143 				 sc->sc_type != WM_T_82546_3) {
1144 				/*
1145 				 * Work around a problem caused by the BIOS
1146 				 * setting the max memory read byte count
1147 				 * incorrectly.
1148 				 */
1149 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1150 				    sc->sc_pcix_offset + PCI_PCIX_CMD);
1151 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1152 				    sc->sc_pcix_offset + PCI_PCIX_STATUS);
1153 
1154 				bytecnt =
1155 				    (pcix_cmd & PCI_PCIX_CMD_BYTECNT_MASK) >>
1156 				    PCI_PCIX_CMD_BYTECNT_SHIFT;
1157 				maxb =
1158 				    (pcix_sts & PCI_PCIX_STATUS_MAXB_MASK) >>
1159 				    PCI_PCIX_STATUS_MAXB_SHIFT;
1160 				if (bytecnt > maxb) {
1161 					aprint_verbose_dev(sc->sc_dev,
1162 					    "resetting PCI-X MMRBC: %d -> %d\n",
1163 					    512 << bytecnt, 512 << maxb);
1164 					pcix_cmd = (pcix_cmd &
1165 					    ~PCI_PCIX_CMD_BYTECNT_MASK) |
1166 					   (maxb << PCI_PCIX_CMD_BYTECNT_SHIFT);
1167 					pci_conf_write(pa->pa_pc, pa->pa_tag,
1168 					    sc->sc_pcix_offset + PCI_PCIX_CMD,
1169 					    pcix_cmd);
1170 				}
1171 			}
1172 		}
1173 		/*
1174 		 * The quad port adapter is special; it has a PCIX-PCIX
1175 		 * bridge on the board, and can run the secondary bus at
1176 		 * a higher speed.
1177 		 */
1178 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1179 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1180 								      : 66;
1181 		} else if (sc->sc_flags & WM_F_PCIX) {
1182 			switch (reg & STATUS_PCIXSPD_MASK) {
1183 			case STATUS_PCIXSPD_50_66:
1184 				sc->sc_bus_speed = 66;
1185 				break;
1186 			case STATUS_PCIXSPD_66_100:
1187 				sc->sc_bus_speed = 100;
1188 				break;
1189 			case STATUS_PCIXSPD_100_133:
1190 				sc->sc_bus_speed = 133;
1191 				break;
1192 			default:
1193 				aprint_error_dev(sc->sc_dev,
1194 				    "unknown PCIXSPD %d; assuming 66MHz\n",
1195 				    reg & STATUS_PCIXSPD_MASK);
1196 				sc->sc_bus_speed = 66;
1197 			}
1198 		} else
1199 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1200 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1201 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1202 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1203 	}
1204 
1205 	/*
1206 	 * Allocate the control data structures, and create and load the
1207 	 * DMA map for it.
1208 	 *
1209 	 * NOTE: All Tx descriptors must be in the same 4G segment of
1210 	 * memory.  So must Rx descriptors.  We simplify by allocating
1211 	 * both sets within the same 4G segment.
1212 	 */
1213 	WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
1214 	    WM_NTXDESC_82542 : WM_NTXDESC_82544;
1215 	cdata_size = sc->sc_type < WM_T_82544 ?
1216 	    sizeof(struct wm_control_data_82542) :
1217 	    sizeof(struct wm_control_data_82544);
1218 	if ((error = bus_dmamem_alloc(sc->sc_dmat, cdata_size, PAGE_SIZE,
1219 				      (bus_size_t) 0x100000000ULL,
1220 				      &seg, 1, &rseg, 0)) != 0) {
1221 		aprint_error_dev(sc->sc_dev,
1222 		    "unable to allocate control data, error = %d\n",
1223 		    error);
1224 		goto fail_0;
1225 	}
1226 
1227 	if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, cdata_size,
1228 				    (void **)&sc->sc_control_data,
1229 				    BUS_DMA_COHERENT)) != 0) {
1230 		aprint_error_dev(sc->sc_dev,
1231 		    "unable to map control data, error = %d\n", error);
1232 		goto fail_1;
1233 	}
1234 
1235 	if ((error = bus_dmamap_create(sc->sc_dmat, cdata_size, 1, cdata_size,
1236 				       0, 0, &sc->sc_cddmamap)) != 0) {
1237 		aprint_error_dev(sc->sc_dev,
1238 		    "unable to create control data DMA map, error = %d\n",
1239 		    error);
1240 		goto fail_2;
1241 	}
1242 
1243 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
1244 				     sc->sc_control_data, cdata_size, NULL,
1245 				     0)) != 0) {
1246 		aprint_error_dev(sc->sc_dev,
1247 		    "unable to load control data DMA map, error = %d\n",
1248 		    error);
1249 		goto fail_3;
1250 	}
1251 
1252 
1253 	/*
1254 	 * Create the transmit buffer DMA maps.
1255 	 */
1256 	WM_TXQUEUELEN(sc) =
1257 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
1258 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
1259 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1260 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
1261 					       WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
1262 					  &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1263 			aprint_error_dev(sc->sc_dev,
1264 			    "unable to create Tx DMA map %d, error = %d\n",
1265 			    i, error);
1266 			goto fail_4;
1267 		}
1268 	}
1269 
1270 	/*
1271 	 * Create the receive buffer DMA maps.
1272 	 */
1273 	for (i = 0; i < WM_NRXDESC; i++) {
1274 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1275 					       MCLBYTES, 0, 0,
1276 					  &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1277 			aprint_error_dev(sc->sc_dev,
1278 			    "unable to create Rx DMA map %d error = %d\n",
1279 			    i, error);
1280 			goto fail_5;
1281 		}
1282 		sc->sc_rxsoft[i].rxs_mbuf = NULL;
1283 	}
1284 
1285 	/* clear interesting stat counters */
1286 	CSR_READ(sc, WMREG_COLC);
1287 	CSR_READ(sc, WMREG_RXERRC);
1288 
1289 	/*
1290 	 * Reset the chip to a known state.
1291 	 */
1292 	wm_reset(sc);
1293 
1294 	switch (sc->sc_type) {
1295 	case WM_T_82571:
1296 	case WM_T_82572:
1297 	case WM_T_82573:
1298 	case WM_T_82574:
1299 	case WM_T_80003:
1300 	case WM_T_ICH8:
1301 	case WM_T_ICH9:
1302 	case WM_T_ICH10:
1303 		if (wm_check_mng_mode(sc) != 0)
1304 			wm_get_hw_control(sc);
1305 		break;
1306 	default:
1307 		break;
1308 	}
1309 
1310 	/*
1311 	 * Get some information about the EEPROM.
1312 	 */
1313 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
1314 	    || (sc->sc_type == WM_T_ICH10)) {
1315 		uint32_t flash_size;
1316 		sc->sc_flags |= WM_F_SWFWHW_SYNC | WM_F_EEPROM_FLASH;
1317 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
1318 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
1319 		    &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) {
1320 			aprint_error_dev(sc->sc_dev,
1321 			    "can't map FLASH registers\n");
1322 			return;
1323 		}
1324 		flash_size = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
1325 		sc->sc_ich8_flash_base = (flash_size & ICH_GFPREG_BASE_MASK) *
1326 						ICH_FLASH_SECTOR_SIZE;
1327 		sc->sc_ich8_flash_bank_size =
1328 			((flash_size >> 16) & ICH_GFPREG_BASE_MASK) + 1;
1329 		sc->sc_ich8_flash_bank_size -=
1330 			(flash_size & ICH_GFPREG_BASE_MASK);
1331 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
1332 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
1333 	} else if (sc->sc_type == WM_T_80003)
1334 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR |  WM_F_SWFW_SYNC;
1335 	else if (sc->sc_type == WM_T_82573)
1336 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1337 	else if (sc->sc_type == WM_T_82574)
1338 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1339 	else if (sc->sc_type > WM_T_82544)
1340 		sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1341 
1342 	if (sc->sc_type <= WM_T_82544)
1343 		sc->sc_ee_addrbits = 6;
1344 	else if (sc->sc_type <= WM_T_82546_3) {
1345 		reg = CSR_READ(sc, WMREG_EECD);
1346 		if (reg & EECD_EE_SIZE)
1347 			sc->sc_ee_addrbits = 8;
1348 		else
1349 			sc->sc_ee_addrbits = 6;
1350 	} else if (sc->sc_type <= WM_T_82547_2) {
1351 		reg = CSR_READ(sc, WMREG_EECD);
1352 		if (reg & EECD_EE_TYPE) {
1353 			sc->sc_flags |= WM_F_EEPROM_SPI;
1354 			sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
1355 		} else
1356 			sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6;
1357 	} else if ((sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574) &&
1358 	    (wm_is_onboard_nvm_eeprom(sc) == 0)) {
1359 		sc->sc_flags |= WM_F_EEPROM_FLASH;
1360 	} else {
1361 		/* Assume everything else is SPI. */
1362 		reg = CSR_READ(sc, WMREG_EECD);
1363 		sc->sc_flags |= WM_F_EEPROM_SPI;
1364 		sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
1365 	}
1366 
1367 	/*
1368 	 * Defer printing the EEPROM type until after verifying the checksum
1369 	 * This allows the EEPROM type to be printed correctly in the case
1370 	 * that no EEPROM is attached.
1371 	 */
1372 
1373 	/*
1374 	 * Validate the EEPROM checksum. If the checksum fails, flag this for
1375 	 * later, so we can fail future reads from the EEPROM.
1376 	 */
1377 	if (wm_validate_eeprom_checksum(sc)) {
1378 		/*
1379 		 * Read twice again because some PCI-e parts fail the first
1380 		 * check due to the link being in sleep state.
1381 		 */
1382 		if (wm_validate_eeprom_checksum(sc))
1383 			sc->sc_flags |= WM_F_EEPROM_INVALID;
1384 	}
1385 
1386 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
1387 		aprint_verbose_dev(sc->sc_dev, "No EEPROM\n");
1388 	else if (sc->sc_flags & WM_F_EEPROM_FLASH) {
1389 		aprint_verbose_dev(sc->sc_dev, "FLASH\n");
1390 	} else {
1391 		if (sc->sc_flags & WM_F_EEPROM_SPI)
1392 			eetype = "SPI";
1393 		else
1394 			eetype = "MicroWire";
1395 		aprint_verbose_dev(sc->sc_dev,
1396 		    "%u word (%d address bits) %s EEPROM\n",
1397 		    1U << sc->sc_ee_addrbits,
1398 		    sc->sc_ee_addrbits, eetype);
1399 	}
1400 
1401 	/*
1402 	 * Read the Ethernet address from the EEPROM, if not first found
1403 	 * in device properties.
1404 	 */
1405 	ea = prop_dictionary_get(device_properties(sc->sc_dev), "mac-addr");
1406 	if (ea != NULL) {
1407 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
1408 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
1409 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
1410 	} else {
1411 		if (wm_read_eeprom(sc, EEPROM_OFF_MACADDR,
1412 		    sizeof(myea) / sizeof(myea[0]), myea)) {
1413 			aprint_error_dev(sc->sc_dev,
1414 			    "unable to read Ethernet address\n");
1415 			return;
1416 		}
1417 		enaddr[0] = myea[0] & 0xff;
1418 		enaddr[1] = myea[0] >> 8;
1419 		enaddr[2] = myea[1] & 0xff;
1420 		enaddr[3] = myea[1] >> 8;
1421 		enaddr[4] = myea[2] & 0xff;
1422 		enaddr[5] = myea[2] >> 8;
1423 	}
1424 
1425 	/*
1426 	 * Toggle the LSB of the MAC address on the second port
1427 	 * of the dual port controller.
1428 	 */
1429 	if (sc->sc_type == WM_T_82546 || sc->sc_type == WM_T_82546_3
1430 	    || sc->sc_type ==  WM_T_82571 || sc->sc_type == WM_T_80003) {
1431 		if ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1)
1432 			enaddr[5] ^= 1;
1433 	}
1434 
1435 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
1436 	    ether_sprintf(enaddr));
1437 
1438 	/*
1439 	 * Read the config info from the EEPROM, and set up various
1440 	 * bits in the control registers based on their contents.
1441 	 */
1442 	pn = prop_dictionary_get(device_properties(sc->sc_dev),
1443 				 "i82543-cfg1");
1444 	if (pn != NULL) {
1445 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1446 		cfg1 = (uint16_t) prop_number_integer_value(pn);
1447 	} else {
1448 		if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) {
1449 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
1450 			return;
1451 		}
1452 	}
1453 
1454 	pn = prop_dictionary_get(device_properties(sc->sc_dev),
1455 				 "i82543-cfg2");
1456 	if (pn != NULL) {
1457 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1458 		cfg2 = (uint16_t) prop_number_integer_value(pn);
1459 	} else {
1460 		if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) {
1461 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
1462 			return;
1463 		}
1464 	}
1465 
1466 	if (sc->sc_type >= WM_T_82544) {
1467 		pn = prop_dictionary_get(device_properties(sc->sc_dev),
1468 					 "i82543-swdpin");
1469 		if (pn != NULL) {
1470 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1471 			swdpin = (uint16_t) prop_number_integer_value(pn);
1472 		} else {
1473 			if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) {
1474 				aprint_error_dev(sc->sc_dev,
1475 				    "unable to read SWDPIN\n");
1476 				return;
1477 			}
1478 		}
1479 	}
1480 
1481 	if (cfg1 & EEPROM_CFG1_ILOS)
1482 		sc->sc_ctrl |= CTRL_ILOS;
1483 	if (sc->sc_type >= WM_T_82544) {
1484 		sc->sc_ctrl |=
1485 		    ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
1486 		    CTRL_SWDPIO_SHIFT;
1487 		sc->sc_ctrl |=
1488 		    ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
1489 		    CTRL_SWDPINS_SHIFT;
1490 	} else {
1491 		sc->sc_ctrl |=
1492 		    ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
1493 		    CTRL_SWDPIO_SHIFT;
1494 	}
1495 
1496 #if 0
1497 	if (sc->sc_type >= WM_T_82544) {
1498 		if (cfg1 & EEPROM_CFG1_IPS0)
1499 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
1500 		if (cfg1 & EEPROM_CFG1_IPS1)
1501 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
1502 		sc->sc_ctrl_ext |=
1503 		    ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
1504 		    CTRL_EXT_SWDPIO_SHIFT;
1505 		sc->sc_ctrl_ext |=
1506 		    ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
1507 		    CTRL_EXT_SWDPINS_SHIFT;
1508 	} else {
1509 		sc->sc_ctrl_ext |=
1510 		    ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
1511 		    CTRL_EXT_SWDPIO_SHIFT;
1512 	}
1513 #endif
1514 
1515 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
1516 #if 0
1517 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
1518 #endif
1519 
1520 	/*
1521 	 * Set up some register offsets that are different between
1522 	 * the i82542 and the i82543 and later chips.
1523 	 */
1524 	if (sc->sc_type < WM_T_82543) {
1525 		sc->sc_rdt_reg = WMREG_OLD_RDT0;
1526 		sc->sc_tdt_reg = WMREG_OLD_TDT;
1527 	} else {
1528 		sc->sc_rdt_reg = WMREG_RDT;
1529 		sc->sc_tdt_reg = WMREG_TDT;
1530 	}
1531 
1532 	/*
1533 	 * Determine if we're TBI or GMII mode, and initialize the
1534 	 * media structures accordingly.
1535 	 */
1536 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
1537 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_82573
1538 	    || sc->sc_type == WM_T_82574) {
1539 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
1540 		wm_gmii_mediainit(sc);
1541 	} else if (sc->sc_type < WM_T_82543 ||
1542 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
1543 		if (wmp->wmp_flags & WMP_F_1000T)
1544 			aprint_error_dev(sc->sc_dev,
1545 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
1546 		wm_tbi_mediainit(sc);
1547 	} else {
1548 		if (wmp->wmp_flags & WMP_F_1000X)
1549 			aprint_error_dev(sc->sc_dev,
1550 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
1551 		wm_gmii_mediainit(sc);
1552 	}
1553 
1554 	ifp = &sc->sc_ethercom.ec_if;
1555 	xname = device_xname(sc->sc_dev);
1556 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
1557 	ifp->if_softc = sc;
1558 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1559 	ifp->if_ioctl = wm_ioctl;
1560 	ifp->if_start = wm_start;
1561 	ifp->if_watchdog = wm_watchdog;
1562 	ifp->if_init = wm_init;
1563 	ifp->if_stop = wm_stop;
1564 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
1565 	IFQ_SET_READY(&ifp->if_snd);
1566 
1567 	if (sc->sc_type != WM_T_82573 && sc->sc_type != WM_T_82574 &&
1568 	    sc->sc_type != WM_T_ICH8)
1569 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1570 
1571 	/*
1572 	 * If we're a i82543 or greater, we can support VLANs.
1573 	 */
1574 	if (sc->sc_type >= WM_T_82543)
1575 		sc->sc_ethercom.ec_capabilities |=
1576 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
1577 
1578 	/*
1579 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
1580 	 * on i82543 and later.
1581 	 */
1582 	if (sc->sc_type >= WM_T_82543) {
1583 		ifp->if_capabilities |=
1584 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
1585 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
1586 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
1587 		    IFCAP_CSUM_TCPv6_Tx |
1588 		    IFCAP_CSUM_UDPv6_Tx;
1589 	}
1590 
1591 	/*
1592 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
1593 	 *
1594 	 *	82541GI (8086:1076) ... no
1595 	 *	82572EI (8086:10b9) ... yes
1596 	 */
1597 	if (sc->sc_type >= WM_T_82571) {
1598 		ifp->if_capabilities |=
1599 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
1600 	}
1601 
1602 	/*
1603 	 * If we're a i82544 or greater (except i82547), we can do
1604 	 * TCP segmentation offload.
1605 	 */
1606 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
1607 		ifp->if_capabilities |= IFCAP_TSOv4;
1608 	}
1609 
1610 	if (sc->sc_type >= WM_T_82571) {
1611 		ifp->if_capabilities |= IFCAP_TSOv6;
1612 	}
1613 
1614 	/*
1615 	 * Attach the interface.
1616 	 */
1617 	if_attach(ifp);
1618 	ether_ifattach(ifp, enaddr);
1619 #if NRND > 0
1620 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET, 0);
1621 #endif
1622 
1623 #ifdef WM_EVENT_COUNTERS
1624 	/* Attach event counters. */
1625 	evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
1626 	    NULL, xname, "txsstall");
1627 	evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
1628 	    NULL, xname, "txdstall");
1629 	evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
1630 	    NULL, xname, "txfifo_stall");
1631 	evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
1632 	    NULL, xname, "txdw");
1633 	evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
1634 	    NULL, xname, "txqe");
1635 	evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
1636 	    NULL, xname, "rxintr");
1637 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
1638 	    NULL, xname, "linkintr");
1639 
1640 	evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
1641 	    NULL, xname, "rxipsum");
1642 	evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
1643 	    NULL, xname, "rxtusum");
1644 	evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
1645 	    NULL, xname, "txipsum");
1646 	evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
1647 	    NULL, xname, "txtusum");
1648 	evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
1649 	    NULL, xname, "txtusum6");
1650 
1651 	evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
1652 	    NULL, xname, "txtso");
1653 	evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
1654 	    NULL, xname, "txtso6");
1655 	evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
1656 	    NULL, xname, "txtsopain");
1657 
1658 	for (i = 0; i < WM_NTXSEGS; i++) {
1659 		sprintf(wm_txseg_evcnt_names[i], "txseg%d", i);
1660 		evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
1661 		    NULL, xname, wm_txseg_evcnt_names[i]);
1662 	}
1663 
1664 	evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
1665 	    NULL, xname, "txdrop");
1666 
1667 	evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
1668 	    NULL, xname, "tu");
1669 
1670 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
1671 	    NULL, xname, "tx_xoff");
1672 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
1673 	    NULL, xname, "tx_xon");
1674 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
1675 	    NULL, xname, "rx_xoff");
1676 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
1677 	    NULL, xname, "rx_xon");
1678 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
1679 	    NULL, xname, "rx_macctl");
1680 #endif /* WM_EVENT_COUNTERS */
1681 
1682 	if (!pmf_device_register(self, NULL, NULL))
1683 		aprint_error_dev(self, "couldn't establish power handler\n");
1684 	else
1685 		pmf_class_network_register(self, ifp);
1686 
1687 	return;
1688 
1689 	/*
1690 	 * Free any resources we've allocated during the failed attach
1691 	 * attempt.  Do this in reverse order and fall through.
1692 	 */
1693  fail_5:
1694 	for (i = 0; i < WM_NRXDESC; i++) {
1695 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
1696 			bus_dmamap_destroy(sc->sc_dmat,
1697 			    sc->sc_rxsoft[i].rxs_dmamap);
1698 	}
1699  fail_4:
1700 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1701 		if (sc->sc_txsoft[i].txs_dmamap != NULL)
1702 			bus_dmamap_destroy(sc->sc_dmat,
1703 			    sc->sc_txsoft[i].txs_dmamap);
1704 	}
1705 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
1706  fail_3:
1707 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
1708  fail_2:
1709 	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
1710 	    cdata_size);
1711  fail_1:
1712 	bus_dmamem_free(sc->sc_dmat, &seg, rseg);
1713  fail_0:
1714 	return;
1715 }
1716 
1717 /*
1718  * wm_tx_offload:
1719  *
1720  *	Set up TCP/IP checksumming parameters for the
1721  *	specified packet.
1722  */
1723 static int
1724 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
1725     uint8_t *fieldsp)
1726 {
1727 	struct mbuf *m0 = txs->txs_mbuf;
1728 	struct livengood_tcpip_ctxdesc *t;
1729 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
1730 	uint32_t ipcse;
1731 	struct ether_header *eh;
1732 	int offset, iphl;
1733 	uint8_t fields;
1734 
1735 	/*
1736 	 * XXX It would be nice if the mbuf pkthdr had offset
1737 	 * fields for the protocol headers.
1738 	 */
1739 
1740 	eh = mtod(m0, struct ether_header *);
1741 	switch (htons(eh->ether_type)) {
1742 	case ETHERTYPE_IP:
1743 	case ETHERTYPE_IPV6:
1744 		offset = ETHER_HDR_LEN;
1745 		break;
1746 
1747 	case ETHERTYPE_VLAN:
1748 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1749 		break;
1750 
1751 	default:
1752 		/*
1753 		 * Don't support this protocol or encapsulation.
1754 		 */
1755 		*fieldsp = 0;
1756 		*cmdp = 0;
1757 		return (0);
1758 	}
1759 
1760 	if ((m0->m_pkthdr.csum_flags &
1761 	    (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
1762 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
1763 	} else {
1764 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
1765 	}
1766 	ipcse = offset + iphl - 1;
1767 
1768 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
1769 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
1770 	seg = 0;
1771 	fields = 0;
1772 
1773 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
1774 		int hlen = offset + iphl;
1775 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
1776 
1777 		if (__predict_false(m0->m_len <
1778 				    (hlen + sizeof(struct tcphdr)))) {
1779 			/*
1780 			 * TCP/IP headers are not in the first mbuf; we need
1781 			 * to do this the slow and painful way.  Let's just
1782 			 * hope this doesn't happen very often.
1783 			 */
1784 			struct tcphdr th;
1785 
1786 			WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
1787 
1788 			m_copydata(m0, hlen, sizeof(th), &th);
1789 			if (v4) {
1790 				struct ip ip;
1791 
1792 				m_copydata(m0, offset, sizeof(ip), &ip);
1793 				ip.ip_len = 0;
1794 				m_copyback(m0,
1795 				    offset + offsetof(struct ip, ip_len),
1796 				    sizeof(ip.ip_len), &ip.ip_len);
1797 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
1798 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
1799 			} else {
1800 				struct ip6_hdr ip6;
1801 
1802 				m_copydata(m0, offset, sizeof(ip6), &ip6);
1803 				ip6.ip6_plen = 0;
1804 				m_copyback(m0,
1805 				    offset + offsetof(struct ip6_hdr, ip6_plen),
1806 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
1807 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
1808 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
1809 			}
1810 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
1811 			    sizeof(th.th_sum), &th.th_sum);
1812 
1813 			hlen += th.th_off << 2;
1814 		} else {
1815 			/*
1816 			 * TCP/IP headers are in the first mbuf; we can do
1817 			 * this the easy way.
1818 			 */
1819 			struct tcphdr *th;
1820 
1821 			if (v4) {
1822 				struct ip *ip =
1823 				    (void *)(mtod(m0, char *) + offset);
1824 				th = (void *)(mtod(m0, char *) + hlen);
1825 
1826 				ip->ip_len = 0;
1827 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
1828 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
1829 			} else {
1830 				struct ip6_hdr *ip6 =
1831 				    (void *)(mtod(m0, char *) + offset);
1832 				th = (void *)(mtod(m0, char *) + hlen);
1833 
1834 				ip6->ip6_plen = 0;
1835 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
1836 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
1837 			}
1838 			hlen += th->th_off << 2;
1839 		}
1840 
1841 		if (v4) {
1842 			WM_EVCNT_INCR(&sc->sc_ev_txtso);
1843 			cmdlen |= WTX_TCPIP_CMD_IP;
1844 		} else {
1845 			WM_EVCNT_INCR(&sc->sc_ev_txtso6);
1846 			ipcse = 0;
1847 		}
1848 		cmd |= WTX_TCPIP_CMD_TSE;
1849 		cmdlen |= WTX_TCPIP_CMD_TSE |
1850 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
1851 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
1852 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
1853 	}
1854 
1855 	/*
1856 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
1857 	 * offload feature, if we load the context descriptor, we
1858 	 * MUST provide valid values for IPCSS and TUCSS fields.
1859 	 */
1860 
1861 	ipcs = WTX_TCPIP_IPCSS(offset) |
1862 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1863 	    WTX_TCPIP_IPCSE(ipcse);
1864 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
1865 		WM_EVCNT_INCR(&sc->sc_ev_txipsum);
1866 		fields |= WTX_IXSM;
1867 	}
1868 
1869 	offset += iphl;
1870 
1871 	if (m0->m_pkthdr.csum_flags &
1872 	    (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
1873 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
1874 		fields |= WTX_TXSM;
1875 		tucs = WTX_TCPIP_TUCSS(offset) |
1876 		    WTX_TCPIP_TUCSO(offset +
1877 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
1878 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
1879 	} else if ((m0->m_pkthdr.csum_flags &
1880 	    (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
1881 		WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
1882 		fields |= WTX_TXSM;
1883 		tucs = WTX_TCPIP_TUCSS(offset) |
1884 		    WTX_TCPIP_TUCSO(offset +
1885 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
1886 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
1887 	} else {
1888 		/* Just initialize it to a valid TCP context. */
1889 		tucs = WTX_TCPIP_TUCSS(offset) |
1890 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
1891 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
1892 	}
1893 
1894 	/* Fill in the context descriptor. */
1895 	t = (struct livengood_tcpip_ctxdesc *)
1896 	    &sc->sc_txdescs[sc->sc_txnext];
1897 	t->tcpip_ipcs = htole32(ipcs);
1898 	t->tcpip_tucs = htole32(tucs);
1899 	t->tcpip_cmdlen = htole32(cmdlen);
1900 	t->tcpip_seg = htole32(seg);
1901 	WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
1902 
1903 	sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
1904 	txs->txs_ndesc++;
1905 
1906 	*cmdp = cmd;
1907 	*fieldsp = fields;
1908 
1909 	return (0);
1910 }
1911 
1912 static void
1913 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
1914 {
1915 	struct mbuf *m;
1916 	int i;
1917 
1918 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
1919 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
1920 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
1921 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
1922 		    m->m_data, m->m_len, m->m_flags);
1923 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
1924 	    i, i == 1 ? "" : "s");
1925 }
1926 
1927 /*
1928  * wm_82547_txfifo_stall:
1929  *
1930  *	Callout used to wait for the 82547 Tx FIFO to drain,
1931  *	reset the FIFO pointers, and restart packet transmission.
1932  */
1933 static void
1934 wm_82547_txfifo_stall(void *arg)
1935 {
1936 	struct wm_softc *sc = arg;
1937 	int s;
1938 
1939 	s = splnet();
1940 
1941 	if (sc->sc_txfifo_stall) {
1942 		if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
1943 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
1944 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
1945 			/*
1946 			 * Packets have drained.  Stop transmitter, reset
1947 			 * FIFO pointers, restart transmitter, and kick
1948 			 * the packet queue.
1949 			 */
1950 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
1951 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
1952 			CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
1953 			CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
1954 			CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
1955 			CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
1956 			CSR_WRITE(sc, WMREG_TCTL, tctl);
1957 			CSR_WRITE_FLUSH(sc);
1958 
1959 			sc->sc_txfifo_head = 0;
1960 			sc->sc_txfifo_stall = 0;
1961 			wm_start(&sc->sc_ethercom.ec_if);
1962 		} else {
1963 			/*
1964 			 * Still waiting for packets to drain; try again in
1965 			 * another tick.
1966 			 */
1967 			callout_schedule(&sc->sc_txfifo_ch, 1);
1968 		}
1969 	}
1970 
1971 	splx(s);
1972 }
1973 
1974 /*
1975  * wm_82547_txfifo_bugchk:
1976  *
1977  *	Check for bug condition in the 82547 Tx FIFO.  We need to
1978  *	prevent enqueueing a packet that would wrap around the end
1979  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
1980  *
1981  *	We do this by checking the amount of space before the end
1982  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
1983  *	the Tx FIFO, wait for all remaining packets to drain, reset
1984  *	the internal FIFO pointers to the beginning, and restart
1985  *	transmission on the interface.
1986  */
1987 #define	WM_FIFO_HDR		0x10
1988 #define	WM_82547_PAD_LEN	0x3e0
1989 static int
1990 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
1991 {
1992 	int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
1993 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
1994 
1995 	/* Just return if already stalled. */
1996 	if (sc->sc_txfifo_stall)
1997 		return (1);
1998 
1999 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
2000 		/* Stall only occurs in half-duplex mode. */
2001 		goto send_packet;
2002 	}
2003 
2004 	if (len >= WM_82547_PAD_LEN + space) {
2005 		sc->sc_txfifo_stall = 1;
2006 		callout_schedule(&sc->sc_txfifo_ch, 1);
2007 		return (1);
2008 	}
2009 
2010  send_packet:
2011 	sc->sc_txfifo_head += len;
2012 	if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
2013 		sc->sc_txfifo_head -= sc->sc_txfifo_size;
2014 
2015 	return (0);
2016 }
2017 
2018 /*
2019  * wm_start:		[ifnet interface function]
2020  *
2021  *	Start packet transmission on the interface.
2022  */
2023 static void
2024 wm_start(struct ifnet *ifp)
2025 {
2026 	struct wm_softc *sc = ifp->if_softc;
2027 	struct mbuf *m0;
2028 	struct m_tag *mtag;
2029 	struct wm_txsoft *txs;
2030 	bus_dmamap_t dmamap;
2031 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
2032 	bus_addr_t curaddr;
2033 	bus_size_t seglen, curlen;
2034 	uint32_t cksumcmd;
2035 	uint8_t cksumfields;
2036 
2037 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
2038 		return;
2039 
2040 	/*
2041 	 * Remember the previous number of free descriptors.
2042 	 */
2043 	ofree = sc->sc_txfree;
2044 
2045 	/*
2046 	 * Loop through the send queue, setting up transmit descriptors
2047 	 * until we drain the queue, or use up all available transmit
2048 	 * descriptors.
2049 	 */
2050 	for (;;) {
2051 		/* Grab a packet off the queue. */
2052 		IFQ_POLL(&ifp->if_snd, m0);
2053 		if (m0 == NULL)
2054 			break;
2055 
2056 		DPRINTF(WM_DEBUG_TX,
2057 		    ("%s: TX: have packet to transmit: %p\n",
2058 		    device_xname(sc->sc_dev), m0));
2059 
2060 		/* Get a work queue entry. */
2061 		if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
2062 			wm_txintr(sc);
2063 			if (sc->sc_txsfree == 0) {
2064 				DPRINTF(WM_DEBUG_TX,
2065 				    ("%s: TX: no free job descriptors\n",
2066 					device_xname(sc->sc_dev)));
2067 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
2068 				break;
2069 			}
2070 		}
2071 
2072 		txs = &sc->sc_txsoft[sc->sc_txsnext];
2073 		dmamap = txs->txs_dmamap;
2074 
2075 		use_tso = (m0->m_pkthdr.csum_flags &
2076 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
2077 
2078 		/*
2079 		 * So says the Linux driver:
2080 		 * The controller does a simple calculation to make sure
2081 		 * there is enough room in the FIFO before initiating the
2082 		 * DMA for each buffer.  The calc is:
2083 		 *	4 = ceil(buffer len / MSS)
2084 		 * To make sure we don't overrun the FIFO, adjust the max
2085 		 * buffer len if the MSS drops.
2086 		 */
2087 		dmamap->dm_maxsegsz =
2088 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
2089 		    ? m0->m_pkthdr.segsz << 2
2090 		    : WTX_MAX_LEN;
2091 
2092 		/*
2093 		 * Load the DMA map.  If this fails, the packet either
2094 		 * didn't fit in the allotted number of segments, or we
2095 		 * were short on resources.  For the too-many-segments
2096 		 * case, we simply report an error and drop the packet,
2097 		 * since we can't sanely copy a jumbo packet to a single
2098 		 * buffer.
2099 		 */
2100 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
2101 		    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
2102 		if (error) {
2103 			if (error == EFBIG) {
2104 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
2105 				log(LOG_ERR, "%s: Tx packet consumes too many "
2106 				    "DMA segments, dropping...\n",
2107 				    device_xname(sc->sc_dev));
2108 				IFQ_DEQUEUE(&ifp->if_snd, m0);
2109 				wm_dump_mbuf_chain(sc, m0);
2110 				m_freem(m0);
2111 				continue;
2112 			}
2113 			/*
2114 			 * Short on resources, just stop for now.
2115 			 */
2116 			DPRINTF(WM_DEBUG_TX,
2117 			    ("%s: TX: dmamap load failed: %d\n",
2118 			    device_xname(sc->sc_dev), error));
2119 			break;
2120 		}
2121 
2122 		segs_needed = dmamap->dm_nsegs;
2123 		if (use_tso) {
2124 			/* For sentinel descriptor; see below. */
2125 			segs_needed++;
2126 		}
2127 
2128 		/*
2129 		 * Ensure we have enough descriptors free to describe
2130 		 * the packet.  Note, we always reserve one descriptor
2131 		 * at the end of the ring due to the semantics of the
2132 		 * TDT register, plus one more in the event we need
2133 		 * to load offload context.
2134 		 */
2135 		if (segs_needed > sc->sc_txfree - 2) {
2136 			/*
2137 			 * Not enough free descriptors to transmit this
2138 			 * packet.  We haven't committed anything yet,
2139 			 * so just unload the DMA map, put the packet
2140 			 * pack on the queue, and punt.  Notify the upper
2141 			 * layer that there are no more slots left.
2142 			 */
2143 			DPRINTF(WM_DEBUG_TX,
2144 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
2145 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
2146 			    segs_needed, sc->sc_txfree - 1));
2147 			ifp->if_flags |= IFF_OACTIVE;
2148 			bus_dmamap_unload(sc->sc_dmat, dmamap);
2149 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
2150 			break;
2151 		}
2152 
2153 		/*
2154 		 * Check for 82547 Tx FIFO bug.  We need to do this
2155 		 * once we know we can transmit the packet, since we
2156 		 * do some internal FIFO space accounting here.
2157 		 */
2158 		if (sc->sc_type == WM_T_82547 &&
2159 		    wm_82547_txfifo_bugchk(sc, m0)) {
2160 			DPRINTF(WM_DEBUG_TX,
2161 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
2162 			    device_xname(sc->sc_dev)));
2163 			ifp->if_flags |= IFF_OACTIVE;
2164 			bus_dmamap_unload(sc->sc_dmat, dmamap);
2165 			WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
2166 			break;
2167 		}
2168 
2169 		IFQ_DEQUEUE(&ifp->if_snd, m0);
2170 
2171 		/*
2172 		 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
2173 		 */
2174 
2175 		DPRINTF(WM_DEBUG_TX,
2176 		    ("%s: TX: packet has %d (%d) DMA segments\n",
2177 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
2178 
2179 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
2180 
2181 		/*
2182 		 * Store a pointer to the packet so that we can free it
2183 		 * later.
2184 		 *
2185 		 * Initially, we consider the number of descriptors the
2186 		 * packet uses the number of DMA segments.  This may be
2187 		 * incremented by 1 if we do checksum offload (a descriptor
2188 		 * is used to set the checksum context).
2189 		 */
2190 		txs->txs_mbuf = m0;
2191 		txs->txs_firstdesc = sc->sc_txnext;
2192 		txs->txs_ndesc = segs_needed;
2193 
2194 		/* Set up offload parameters for this packet. */
2195 		if (m0->m_pkthdr.csum_flags &
2196 		    (M_CSUM_TSOv4|M_CSUM_TSOv6|
2197 		    M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
2198 		    M_CSUM_TCPv6|M_CSUM_UDPv6)) {
2199 			if (wm_tx_offload(sc, txs, &cksumcmd,
2200 					  &cksumfields) != 0) {
2201 				/* Error message already displayed. */
2202 				bus_dmamap_unload(sc->sc_dmat, dmamap);
2203 				continue;
2204 			}
2205 		} else {
2206 			cksumcmd = 0;
2207 			cksumfields = 0;
2208 		}
2209 
2210 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
2211 
2212 		/* Sync the DMA map. */
2213 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
2214 		    BUS_DMASYNC_PREWRITE);
2215 
2216 		/*
2217 		 * Initialize the transmit descriptor.
2218 		 */
2219 		for (nexttx = sc->sc_txnext, seg = 0;
2220 		     seg < dmamap->dm_nsegs; seg++) {
2221 			for (seglen = dmamap->dm_segs[seg].ds_len,
2222 			     curaddr = dmamap->dm_segs[seg].ds_addr;
2223 			     seglen != 0;
2224 			     curaddr += curlen, seglen -= curlen,
2225 			     nexttx = WM_NEXTTX(sc, nexttx)) {
2226 				curlen = seglen;
2227 
2228 				/*
2229 				 * So says the Linux driver:
2230 				 * Work around for premature descriptor
2231 				 * write-backs in TSO mode.  Append a
2232 				 * 4-byte sentinel descriptor.
2233 				 */
2234 				if (use_tso &&
2235 				    seg == dmamap->dm_nsegs - 1 &&
2236 				    curlen > 8)
2237 					curlen -= 4;
2238 
2239 				wm_set_dma_addr(
2240 				    &sc->sc_txdescs[nexttx].wtx_addr,
2241 				    curaddr);
2242 				sc->sc_txdescs[nexttx].wtx_cmdlen =
2243 				    htole32(cksumcmd | curlen);
2244 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
2245 				    0;
2246 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
2247 				    cksumfields;
2248 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
2249 				lasttx = nexttx;
2250 
2251 				DPRINTF(WM_DEBUG_TX,
2252 				    ("%s: TX: desc %d: low 0x%08lx, "
2253 				     "len 0x%04x\n",
2254 				    device_xname(sc->sc_dev), nexttx,
2255 				    curaddr & 0xffffffffUL, (unsigned)curlen));
2256 			}
2257 		}
2258 
2259 		KASSERT(lasttx != -1);
2260 
2261 		/*
2262 		 * Set up the command byte on the last descriptor of
2263 		 * the packet.  If we're in the interrupt delay window,
2264 		 * delay the interrupt.
2265 		 */
2266 		sc->sc_txdescs[lasttx].wtx_cmdlen |=
2267 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
2268 
2269 		/*
2270 		 * If VLANs are enabled and the packet has a VLAN tag, set
2271 		 * up the descriptor to encapsulate the packet for us.
2272 		 *
2273 		 * This is only valid on the last descriptor of the packet.
2274 		 */
2275 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
2276 			sc->sc_txdescs[lasttx].wtx_cmdlen |=
2277 			    htole32(WTX_CMD_VLE);
2278 			sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
2279 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
2280 		}
2281 
2282 		txs->txs_lastdesc = lasttx;
2283 
2284 		DPRINTF(WM_DEBUG_TX,
2285 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
2286 		    device_xname(sc->sc_dev),
2287 		    lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
2288 
2289 		/* Sync the descriptors we're using. */
2290 		WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
2291 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2292 
2293 		/* Give the packet to the chip. */
2294 		CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
2295 
2296 		DPRINTF(WM_DEBUG_TX,
2297 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
2298 
2299 		DPRINTF(WM_DEBUG_TX,
2300 		    ("%s: TX: finished transmitting packet, job %d\n",
2301 		    device_xname(sc->sc_dev), sc->sc_txsnext));
2302 
2303 		/* Advance the tx pointer. */
2304 		sc->sc_txfree -= txs->txs_ndesc;
2305 		sc->sc_txnext = nexttx;
2306 
2307 		sc->sc_txsfree--;
2308 		sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
2309 
2310 #if NBPFILTER > 0
2311 		/* Pass the packet to any BPF listeners. */
2312 		if (ifp->if_bpf)
2313 			bpf_mtap(ifp->if_bpf, m0);
2314 #endif /* NBPFILTER > 0 */
2315 	}
2316 
2317 	if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
2318 		/* No more slots; notify upper layer. */
2319 		ifp->if_flags |= IFF_OACTIVE;
2320 	}
2321 
2322 	if (sc->sc_txfree != ofree) {
2323 		/* Set a watchdog timer in case the chip flakes out. */
2324 		ifp->if_timer = 5;
2325 	}
2326 }
2327 
2328 /*
2329  * wm_watchdog:		[ifnet interface function]
2330  *
2331  *	Watchdog timer handler.
2332  */
2333 static void
2334 wm_watchdog(struct ifnet *ifp)
2335 {
2336 	struct wm_softc *sc = ifp->if_softc;
2337 
2338 	/*
2339 	 * Since we're using delayed interrupts, sweep up
2340 	 * before we report an error.
2341 	 */
2342 	wm_txintr(sc);
2343 
2344 	if (sc->sc_txfree != WM_NTXDESC(sc)) {
2345 		log(LOG_ERR,
2346 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2347 		    device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
2348 		    sc->sc_txnext);
2349 		ifp->if_oerrors++;
2350 
2351 		/* Reset the interface. */
2352 		(void) wm_init(ifp);
2353 	}
2354 
2355 	/* Try to get more packets going. */
2356 	wm_start(ifp);
2357 }
2358 
2359 /*
2360  * wm_ioctl:		[ifnet interface function]
2361  *
2362  *	Handle control requests from the operator.
2363  */
2364 static int
2365 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2366 {
2367 	struct wm_softc *sc = ifp->if_softc;
2368 	struct ifreq *ifr = (struct ifreq *) data;
2369 	struct ifaddr *ifa = (struct ifaddr *)data;
2370 	struct sockaddr_dl *sdl;
2371 	int s, error;
2372 
2373 	s = splnet();
2374 
2375 	switch (cmd) {
2376 	case SIOCSIFMEDIA:
2377 	case SIOCGIFMEDIA:
2378 		/* Flow control requires full-duplex mode. */
2379 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
2380 		    (ifr->ifr_media & IFM_FDX) == 0)
2381 			ifr->ifr_media &= ~IFM_ETH_FMASK;
2382 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
2383 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
2384 				/* We can do both TXPAUSE and RXPAUSE. */
2385 				ifr->ifr_media |=
2386 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
2387 			}
2388 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
2389 		}
2390 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
2391 		break;
2392 	case SIOCINITIFADDR:
2393 		if (ifa->ifa_addr->sa_family == AF_LINK) {
2394 			sdl = satosdl(ifp->if_dl->ifa_addr);
2395 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
2396 					LLADDR(satosdl(ifa->ifa_addr)),
2397 					ifp->if_addrlen);
2398 			/* unicast address is first multicast entry */
2399 			wm_set_filter(sc);
2400 			error = 0;
2401 			break;
2402 		}
2403 		/* Fall through for rest */
2404 	default:
2405 		if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
2406 			break;
2407 
2408 		error = 0;
2409 
2410 		if (cmd == SIOCSIFCAP)
2411 			error = (*ifp->if_init)(ifp);
2412 		else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
2413 			;
2414 		else if (ifp->if_flags & IFF_RUNNING) {
2415 			/*
2416 			 * Multicast list has changed; set the hardware filter
2417 			 * accordingly.
2418 			 */
2419 			wm_set_filter(sc);
2420 		}
2421 		break;
2422 	}
2423 
2424 	/* Try to get more packets going. */
2425 	wm_start(ifp);
2426 
2427 	splx(s);
2428 	return (error);
2429 }
2430 
2431 /*
2432  * wm_intr:
2433  *
2434  *	Interrupt service routine.
2435  */
2436 static int
2437 wm_intr(void *arg)
2438 {
2439 	struct wm_softc *sc = arg;
2440 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2441 	uint32_t icr;
2442 	int handled = 0;
2443 
2444 	while (1 /* CONSTCOND */) {
2445 		icr = CSR_READ(sc, WMREG_ICR);
2446 		if ((icr & sc->sc_icr) == 0)
2447 			break;
2448 #if 0 /*NRND > 0*/
2449 		if (RND_ENABLED(&sc->rnd_source))
2450 			rnd_add_uint32(&sc->rnd_source, icr);
2451 #endif
2452 
2453 		handled = 1;
2454 
2455 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
2456 		if (icr & (ICR_RXDMT0|ICR_RXT0)) {
2457 			DPRINTF(WM_DEBUG_RX,
2458 			    ("%s: RX: got Rx intr 0x%08x\n",
2459 			    device_xname(sc->sc_dev),
2460 			    icr & (ICR_RXDMT0|ICR_RXT0)));
2461 			WM_EVCNT_INCR(&sc->sc_ev_rxintr);
2462 		}
2463 #endif
2464 		wm_rxintr(sc);
2465 
2466 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
2467 		if (icr & ICR_TXDW) {
2468 			DPRINTF(WM_DEBUG_TX,
2469 			    ("%s: TX: got TXDW interrupt\n",
2470 			    device_xname(sc->sc_dev)));
2471 			WM_EVCNT_INCR(&sc->sc_ev_txdw);
2472 		}
2473 #endif
2474 		wm_txintr(sc);
2475 
2476 		if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
2477 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
2478 			wm_linkintr(sc, icr);
2479 		}
2480 
2481 		if (icr & ICR_RXO) {
2482 			ifp->if_ierrors++;
2483 #if defined(WM_DEBUG)
2484 			log(LOG_WARNING, "%s: Receive overrun\n",
2485 			    device_xname(sc->sc_dev));
2486 #endif /* defined(WM_DEBUG) */
2487 		}
2488 	}
2489 
2490 	if (handled) {
2491 		/* Try to get more packets going. */
2492 		wm_start(ifp);
2493 	}
2494 
2495 	return (handled);
2496 }
2497 
2498 /*
2499  * wm_txintr:
2500  *
2501  *	Helper; handle transmit interrupts.
2502  */
2503 static void
2504 wm_txintr(struct wm_softc *sc)
2505 {
2506 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2507 	struct wm_txsoft *txs;
2508 	uint8_t status;
2509 	int i;
2510 
2511 	ifp->if_flags &= ~IFF_OACTIVE;
2512 
2513 	/*
2514 	 * Go through the Tx list and free mbufs for those
2515 	 * frames which have been transmitted.
2516 	 */
2517 	for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
2518 	     i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
2519 		txs = &sc->sc_txsoft[i];
2520 
2521 		DPRINTF(WM_DEBUG_TX,
2522 		    ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
2523 
2524 		WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
2525 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2526 
2527 		status =
2528 		    sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
2529 		if ((status & WTX_ST_DD) == 0) {
2530 			WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
2531 			    BUS_DMASYNC_PREREAD);
2532 			break;
2533 		}
2534 
2535 		DPRINTF(WM_DEBUG_TX,
2536 		    ("%s: TX: job %d done: descs %d..%d\n",
2537 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
2538 		    txs->txs_lastdesc));
2539 
2540 		/*
2541 		 * XXX We should probably be using the statistics
2542 		 * XXX registers, but I don't know if they exist
2543 		 * XXX on chips before the i82544.
2544 		 */
2545 
2546 #ifdef WM_EVENT_COUNTERS
2547 		if (status & WTX_ST_TU)
2548 			WM_EVCNT_INCR(&sc->sc_ev_tu);
2549 #endif /* WM_EVENT_COUNTERS */
2550 
2551 		if (status & (WTX_ST_EC|WTX_ST_LC)) {
2552 			ifp->if_oerrors++;
2553 			if (status & WTX_ST_LC)
2554 				log(LOG_WARNING, "%s: late collision\n",
2555 				    device_xname(sc->sc_dev));
2556 			else if (status & WTX_ST_EC) {
2557 				ifp->if_collisions += 16;
2558 				log(LOG_WARNING, "%s: excessive collisions\n",
2559 				    device_xname(sc->sc_dev));
2560 			}
2561 		} else
2562 			ifp->if_opackets++;
2563 
2564 		sc->sc_txfree += txs->txs_ndesc;
2565 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
2566 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2567 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2568 		m_freem(txs->txs_mbuf);
2569 		txs->txs_mbuf = NULL;
2570 	}
2571 
2572 	/* Update the dirty transmit buffer pointer. */
2573 	sc->sc_txsdirty = i;
2574 	DPRINTF(WM_DEBUG_TX,
2575 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
2576 
2577 	/*
2578 	 * If there are no more pending transmissions, cancel the watchdog
2579 	 * timer.
2580 	 */
2581 	if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
2582 		ifp->if_timer = 0;
2583 }
2584 
2585 /*
2586  * wm_rxintr:
2587  *
2588  *	Helper; handle receive interrupts.
2589  */
2590 static void
2591 wm_rxintr(struct wm_softc *sc)
2592 {
2593 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2594 	struct wm_rxsoft *rxs;
2595 	struct mbuf *m;
2596 	int i, len;
2597 	uint8_t status, errors;
2598 	uint16_t vlantag;
2599 
2600 	for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
2601 		rxs = &sc->sc_rxsoft[i];
2602 
2603 		DPRINTF(WM_DEBUG_RX,
2604 		    ("%s: RX: checking descriptor %d\n",
2605 		    device_xname(sc->sc_dev), i));
2606 
2607 		WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2608 
2609 		status = sc->sc_rxdescs[i].wrx_status;
2610 		errors = sc->sc_rxdescs[i].wrx_errors;
2611 		len = le16toh(sc->sc_rxdescs[i].wrx_len);
2612 		vlantag = sc->sc_rxdescs[i].wrx_special;
2613 
2614 		if ((status & WRX_ST_DD) == 0) {
2615 			/*
2616 			 * We have processed all of the receive descriptors.
2617 			 */
2618 			WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
2619 			break;
2620 		}
2621 
2622 		if (__predict_false(sc->sc_rxdiscard)) {
2623 			DPRINTF(WM_DEBUG_RX,
2624 			    ("%s: RX: discarding contents of descriptor %d\n",
2625 			    device_xname(sc->sc_dev), i));
2626 			WM_INIT_RXDESC(sc, i);
2627 			if (status & WRX_ST_EOP) {
2628 				/* Reset our state. */
2629 				DPRINTF(WM_DEBUG_RX,
2630 				    ("%s: RX: resetting rxdiscard -> 0\n",
2631 				    device_xname(sc->sc_dev)));
2632 				sc->sc_rxdiscard = 0;
2633 			}
2634 			continue;
2635 		}
2636 
2637 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2638 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
2639 
2640 		m = rxs->rxs_mbuf;
2641 
2642 		/*
2643 		 * Add a new receive buffer to the ring, unless of
2644 		 * course the length is zero. Treat the latter as a
2645 		 * failed mapping.
2646 		 */
2647 		if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
2648 			/*
2649 			 * Failed, throw away what we've done so
2650 			 * far, and discard the rest of the packet.
2651 			 */
2652 			ifp->if_ierrors++;
2653 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2654 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2655 			WM_INIT_RXDESC(sc, i);
2656 			if ((status & WRX_ST_EOP) == 0)
2657 				sc->sc_rxdiscard = 1;
2658 			if (sc->sc_rxhead != NULL)
2659 				m_freem(sc->sc_rxhead);
2660 			WM_RXCHAIN_RESET(sc);
2661 			DPRINTF(WM_DEBUG_RX,
2662 			    ("%s: RX: Rx buffer allocation failed, "
2663 			    "dropping packet%s\n", device_xname(sc->sc_dev),
2664 			    sc->sc_rxdiscard ? " (discard)" : ""));
2665 			continue;
2666 		}
2667 
2668 		m->m_len = len;
2669 		sc->sc_rxlen += len;
2670 		DPRINTF(WM_DEBUG_RX,
2671 		    ("%s: RX: buffer at %p len %d\n",
2672 		    device_xname(sc->sc_dev), m->m_data, len));
2673 
2674 		/*
2675 		 * If this is not the end of the packet, keep
2676 		 * looking.
2677 		 */
2678 		if ((status & WRX_ST_EOP) == 0) {
2679 			WM_RXCHAIN_LINK(sc, m);
2680 			DPRINTF(WM_DEBUG_RX,
2681 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
2682 			    device_xname(sc->sc_dev), sc->sc_rxlen));
2683 			continue;
2684 		}
2685 
2686 		/*
2687 		 * Okay, we have the entire packet now.  The chip is
2688 		 * configured to include the FCS (not all chips can
2689 		 * be configured to strip it), so we need to trim it.
2690 		 * May need to adjust length of previous mbuf in the
2691 		 * chain if the current mbuf is too short.
2692 		 */
2693 		if (m->m_len < ETHER_CRC_LEN) {
2694 			sc->sc_rxtail->m_len -= (ETHER_CRC_LEN - m->m_len);
2695 			m->m_len = 0;
2696 		} else {
2697 			m->m_len -= ETHER_CRC_LEN;
2698 		}
2699 		len = sc->sc_rxlen - ETHER_CRC_LEN;
2700 
2701 		WM_RXCHAIN_LINK(sc, m);
2702 
2703 		*sc->sc_rxtailp = NULL;
2704 		m = sc->sc_rxhead;
2705 
2706 		WM_RXCHAIN_RESET(sc);
2707 
2708 		DPRINTF(WM_DEBUG_RX,
2709 		    ("%s: RX: have entire packet, len -> %d\n",
2710 		    device_xname(sc->sc_dev), len));
2711 
2712 		/*
2713 		 * If an error occurred, update stats and drop the packet.
2714 		 */
2715 		if (errors &
2716 		     (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
2717 			ifp->if_ierrors++;
2718 			if (errors & WRX_ER_SE)
2719 				log(LOG_WARNING, "%s: symbol error\n",
2720 				    device_xname(sc->sc_dev));
2721 			else if (errors & WRX_ER_SEQ)
2722 				log(LOG_WARNING, "%s: receive sequence error\n",
2723 				    device_xname(sc->sc_dev));
2724 			else if (errors & WRX_ER_CE)
2725 				log(LOG_WARNING, "%s: CRC error\n",
2726 				    device_xname(sc->sc_dev));
2727 			m_freem(m);
2728 			continue;
2729 		}
2730 
2731 		/*
2732 		 * No errors.  Receive the packet.
2733 		 */
2734 		m->m_pkthdr.rcvif = ifp;
2735 		m->m_pkthdr.len = len;
2736 
2737 		/*
2738 		 * If VLANs are enabled, VLAN packets have been unwrapped
2739 		 * for us.  Associate the tag with the packet.
2740 		 */
2741 		if ((status & WRX_ST_VP) != 0) {
2742 			VLAN_INPUT_TAG(ifp, m,
2743 			    le16toh(vlantag),
2744 			    continue);
2745 		}
2746 
2747 		/*
2748 		 * Set up checksum info for this packet.
2749 		 */
2750 		if ((status & WRX_ST_IXSM) == 0) {
2751 			if (status & WRX_ST_IPCS) {
2752 				WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
2753 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
2754 				if (errors & WRX_ER_IPE)
2755 					m->m_pkthdr.csum_flags |=
2756 					    M_CSUM_IPv4_BAD;
2757 			}
2758 			if (status & WRX_ST_TCPCS) {
2759 				/*
2760 				 * Note: we don't know if this was TCP or UDP,
2761 				 * so we just set both bits, and expect the
2762 				 * upper layers to deal.
2763 				 */
2764 				WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
2765 				m->m_pkthdr.csum_flags |=
2766 				    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
2767 				    M_CSUM_TCPv6 | M_CSUM_UDPv6;
2768 				if (errors & WRX_ER_TCPE)
2769 					m->m_pkthdr.csum_flags |=
2770 					    M_CSUM_TCP_UDP_BAD;
2771 			}
2772 		}
2773 
2774 		ifp->if_ipackets++;
2775 
2776 #if NBPFILTER > 0
2777 		/* Pass this up to any BPF listeners. */
2778 		if (ifp->if_bpf)
2779 			bpf_mtap(ifp->if_bpf, m);
2780 #endif /* NBPFILTER > 0 */
2781 
2782 		/* Pass it on. */
2783 		(*ifp->if_input)(ifp, m);
2784 	}
2785 
2786 	/* Update the receive pointer. */
2787 	sc->sc_rxptr = i;
2788 
2789 	DPRINTF(WM_DEBUG_RX,
2790 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
2791 }
2792 
2793 /*
2794  * wm_linkintr:
2795  *
2796  *	Helper; handle link interrupts.
2797  */
2798 static void
2799 wm_linkintr(struct wm_softc *sc, uint32_t icr)
2800 {
2801 	uint32_t status;
2802 
2803 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
2804 		__func__));
2805 	/*
2806 	 * If we get a link status interrupt on a 1000BASE-T
2807 	 * device, just fall into the normal MII tick path.
2808 	 */
2809 	if (sc->sc_flags & WM_F_HAS_MII) {
2810 		if (icr & ICR_LSC) {
2811 			DPRINTF(WM_DEBUG_LINK,
2812 			    ("%s: LINK: LSC -> mii_tick\n",
2813 			    device_xname(sc->sc_dev)));
2814 			mii_tick(&sc->sc_mii);
2815 			if (sc->sc_type == WM_T_82543) {
2816 				int miistatus, active;
2817 
2818 				/*
2819 				 * With 82543, we need to force speed and
2820 				 * duplex on the MAC equal to what the PHY
2821 				 * speed and duplex configuration is.
2822 				 */
2823 				miistatus = sc->sc_mii.mii_media_status;
2824 
2825 				if (miistatus & IFM_ACTIVE) {
2826 					active = sc->sc_mii.mii_media_active;
2827 					sc->sc_ctrl &= ~(CTRL_SPEED_MASK
2828 					    | CTRL_FD);
2829 					switch (IFM_SUBTYPE(active)) {
2830 					case IFM_10_T:
2831 						sc->sc_ctrl |= CTRL_SPEED_10;
2832 						break;
2833 					case IFM_100_TX:
2834 						sc->sc_ctrl |= CTRL_SPEED_100;
2835 						break;
2836 					case IFM_1000_T:
2837 						sc->sc_ctrl |= CTRL_SPEED_1000;
2838 						break;
2839 					default:
2840 						/*
2841 						 * fiber?
2842 						 * Shoud not enter here.
2843 						 */
2844 						printf("unknown media (%x)\n",
2845 						    active);
2846 						break;
2847 					}
2848 					if (active & IFM_FDX)
2849 						sc->sc_ctrl |= CTRL_FD;
2850 					CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2851 				}
2852 			}
2853 		} else if (icr & ICR_RXSEQ) {
2854 			DPRINTF(WM_DEBUG_LINK,
2855 			    ("%s: LINK Receive sequence error\n",
2856 			    device_xname(sc->sc_dev)));
2857 		}
2858 		return;
2859 	}
2860 
2861 	status = CSR_READ(sc, WMREG_STATUS);
2862 	if (icr & ICR_LSC) {
2863 		if (status & STATUS_LU) {
2864 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
2865 			    device_xname(sc->sc_dev),
2866 			    (status & STATUS_FD) ? "FDX" : "HDX"));
2867 			/*
2868 			 * NOTE: CTRL will update TFCE and RFCE automatically,
2869 			 * so we should update sc->sc_ctrl
2870 			 */
2871 
2872 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
2873 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2874 			sc->sc_fcrtl &= ~FCRTL_XONE;
2875 			if (status & STATUS_FD)
2876 				sc->sc_tctl |=
2877 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2878 			else
2879 				sc->sc_tctl |=
2880 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2881 			if (sc->sc_ctrl & CTRL_TFCE)
2882 				sc->sc_fcrtl |= FCRTL_XONE;
2883 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2884 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
2885 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
2886 				      sc->sc_fcrtl);
2887 			sc->sc_tbi_linkup = 1;
2888 		} else {
2889 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
2890 			    device_xname(sc->sc_dev)));
2891 			sc->sc_tbi_linkup = 0;
2892 		}
2893 		wm_tbi_set_linkled(sc);
2894 	} else if (icr & ICR_RXCFG) {
2895 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
2896 		    device_xname(sc->sc_dev)));
2897 		sc->sc_tbi_nrxcfg++;
2898 		wm_check_for_link(sc);
2899 	} else if (icr & ICR_RXSEQ) {
2900 		DPRINTF(WM_DEBUG_LINK,
2901 		    ("%s: LINK: Receive sequence error\n",
2902 		    device_xname(sc->sc_dev)));
2903 	}
2904 }
2905 
2906 /*
2907  * wm_tick:
2908  *
2909  *	One second timer, used to check link status, sweep up
2910  *	completed transmit jobs, etc.
2911  */
2912 static void
2913 wm_tick(void *arg)
2914 {
2915 	struct wm_softc *sc = arg;
2916 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2917 	int s;
2918 
2919 	s = splnet();
2920 
2921 	if (sc->sc_type >= WM_T_82542_2_1) {
2922 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
2923 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
2924 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
2925 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
2926 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
2927 	}
2928 
2929 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
2930 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
2931 
2932 
2933 	if (sc->sc_flags & WM_F_HAS_MII)
2934 		mii_tick(&sc->sc_mii);
2935 	else
2936 		wm_tbi_check_link(sc);
2937 
2938 	splx(s);
2939 
2940 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2941 }
2942 
2943 /*
2944  * wm_reset:
2945  *
2946  *	Reset the i82542 chip.
2947  */
2948 static void
2949 wm_reset(struct wm_softc *sc)
2950 {
2951 	uint32_t reg;
2952 
2953 	/*
2954 	 * Allocate on-chip memory according to the MTU size.
2955 	 * The Packet Buffer Allocation register must be written
2956 	 * before the chip is reset.
2957 	 */
2958 	switch (sc->sc_type) {
2959 	case WM_T_82547:
2960 	case WM_T_82547_2:
2961 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
2962 		    PBA_22K : PBA_30K;
2963 		sc->sc_txfifo_head = 0;
2964 		sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
2965 		sc->sc_txfifo_size =
2966 		    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
2967 		sc->sc_txfifo_stall = 0;
2968 		break;
2969 	case WM_T_82571:
2970 	case WM_T_82572:
2971 	case WM_T_80003:
2972 		sc->sc_pba = PBA_32K;
2973 		break;
2974 	case WM_T_82573:
2975 	case WM_T_82574:
2976 		sc->sc_pba = PBA_12K;
2977 		break;
2978 	case WM_T_ICH8:
2979 		sc->sc_pba = PBA_8K;
2980 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
2981 		break;
2982 	case WM_T_ICH9:
2983 	case WM_T_ICH10:
2984 		sc->sc_pba = PBA_10K;
2985 		break;
2986 	default:
2987 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
2988 		    PBA_40K : PBA_48K;
2989 		break;
2990 	}
2991 	CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
2992 
2993 	if (sc->sc_flags & WM_F_PCIE) {
2994 		int timeout = 800;
2995 
2996 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
2997 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2998 
2999 		while (timeout) {
3000 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA) == 0)
3001 				break;
3002 			delay(100);
3003 		}
3004 	}
3005 
3006 	/* clear interrupt */
3007 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3008 
3009 	/*
3010 	 * 82541 Errata 29? & 82547 Errata 28?
3011 	 * See also the description about PHY_RST bit in CTRL register
3012 	 * in 8254x_GBe_SDM.pdf.
3013 	 */
3014 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
3015 		CSR_WRITE(sc, WMREG_CTRL,
3016 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
3017 		delay(5000);
3018 	}
3019 
3020 	switch (sc->sc_type) {
3021 	case WM_T_82544:
3022 	case WM_T_82540:
3023 	case WM_T_82545:
3024 	case WM_T_82546:
3025 	case WM_T_82541:
3026 	case WM_T_82541_2:
3027 		/*
3028 		 * On some chipsets, a reset through a memory-mapped write
3029 		 * cycle can cause the chip to reset before completing the
3030 		 * write cycle.  This causes major headache that can be
3031 		 * avoided by issuing the reset via indirect register writes
3032 		 * through I/O space.
3033 		 *
3034 		 * So, if we successfully mapped the I/O BAR at attach time,
3035 		 * use that.  Otherwise, try our luck with a memory-mapped
3036 		 * reset.
3037 		 */
3038 		if (sc->sc_flags & WM_F_IOH_VALID)
3039 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
3040 		else
3041 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
3042 		break;
3043 
3044 	case WM_T_82545_3:
3045 	case WM_T_82546_3:
3046 		/* Use the shadow control register on these chips. */
3047 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
3048 		break;
3049 
3050 	case WM_T_ICH8:
3051 	case WM_T_ICH9:
3052 	case WM_T_ICH10:
3053 		wm_get_swfwhw_semaphore(sc);
3054 		CSR_WRITE(sc, WMREG_CTRL, CTRL_RST | CTRL_PHY_RESET);
3055 		delay(10000);
3056 
3057 	default:
3058 		/* Everything else can safely use the documented method. */
3059 		CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
3060 		break;
3061 	}
3062 	delay(10000);
3063 
3064 	/* reload EEPROM */
3065 	switch(sc->sc_type) {
3066 	case WM_T_82542_2_0:
3067 	case WM_T_82542_2_1:
3068 	case WM_T_82543:
3069 	case WM_T_82544:
3070 		delay(10);
3071 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3072 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3073 		delay(2000);
3074 		break;
3075 	case WM_T_82541:
3076 	case WM_T_82541_2:
3077 	case WM_T_82547:
3078 	case WM_T_82547_2:
3079 		delay(20000);
3080 		break;
3081 	case WM_T_82573:
3082 	case WM_T_82574:
3083 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
3084 			delay(10);
3085 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3086 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3087 		}
3088 		/* FALLTHROUGH */
3089 	default:
3090 		/* check EECD_EE_AUTORD */
3091 		wm_get_auto_rd_done(sc);
3092 	}
3093 
3094 	/* reload sc_ctrl */
3095 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3096 
3097 #if 0
3098 	for (i = 0; i < 1000; i++) {
3099 		if ((CSR_READ(sc, WMREG_CTRL) & CTRL_RST) == 0) {
3100 			return;
3101 		}
3102 		delay(20);
3103 	}
3104 
3105 	if (CSR_READ(sc, WMREG_CTRL) & CTRL_RST)
3106 		log(LOG_ERR, "%s: reset failed to complete\n",
3107 		    device_xname(sc->sc_dev));
3108 #endif
3109 }
3110 
3111 /*
3112  * wm_init:		[ifnet interface function]
3113  *
3114  *	Initialize the interface.  Must be called at splnet().
3115  */
3116 static int
3117 wm_init(struct ifnet *ifp)
3118 {
3119 	struct wm_softc *sc = ifp->if_softc;
3120 	struct wm_rxsoft *rxs;
3121 	int i, error = 0;
3122 	uint32_t reg;
3123 
3124 	/*
3125 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
3126 	 * There is a small but measurable benefit to avoiding the adjusment
3127 	 * of the descriptor so that the headers are aligned, for normal mtu,
3128 	 * on such platforms.  One possibility is that the DMA itself is
3129 	 * slightly more efficient if the front of the entire packet (instead
3130 	 * of the front of the headers) is aligned.
3131 	 *
3132 	 * Note we must always set align_tweak to 0 if we are using
3133 	 * jumbo frames.
3134 	 */
3135 #ifdef __NO_STRICT_ALIGNMENT
3136 	sc->sc_align_tweak = 0;
3137 #else
3138 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
3139 		sc->sc_align_tweak = 0;
3140 	else
3141 		sc->sc_align_tweak = 2;
3142 #endif /* __NO_STRICT_ALIGNMENT */
3143 
3144 	/* Cancel any pending I/O. */
3145 	wm_stop(ifp, 0);
3146 
3147 	/* update statistics before reset */
3148 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3149 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
3150 
3151 	/* Reset the chip to a known state. */
3152 	wm_reset(sc);
3153 
3154 	switch (sc->sc_type) {
3155 	case WM_T_82571:
3156 	case WM_T_82572:
3157 	case WM_T_82573:
3158 	case WM_T_82574:
3159 	case WM_T_80003:
3160 	case WM_T_ICH8:
3161 	case WM_T_ICH9:
3162 	case WM_T_ICH10:
3163 		if (wm_check_mng_mode(sc) != 0)
3164 			wm_get_hw_control(sc);
3165 		break;
3166 	default:
3167 		break;
3168 	}
3169 
3170 	/* Initialize the transmit descriptor ring. */
3171 	memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
3172 	WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
3173 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3174 	sc->sc_txfree = WM_NTXDESC(sc);
3175 	sc->sc_txnext = 0;
3176 
3177 	if (sc->sc_type < WM_T_82543) {
3178 		CSR_WRITE(sc, WMREG_OLD_TBDAH, WM_CDTXADDR_HI(sc, 0));
3179 		CSR_WRITE(sc, WMREG_OLD_TBDAL, WM_CDTXADDR_LO(sc, 0));
3180 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
3181 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
3182 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
3183 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
3184 	} else {
3185 		CSR_WRITE(sc, WMREG_TBDAH, WM_CDTXADDR_HI(sc, 0));
3186 		CSR_WRITE(sc, WMREG_TBDAL, WM_CDTXADDR_LO(sc, 0));
3187 		CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
3188 		CSR_WRITE(sc, WMREG_TDH, 0);
3189 		CSR_WRITE(sc, WMREG_TDT, 0);
3190 		CSR_WRITE(sc, WMREG_TIDV, 375);		/* ITR / 4 */
3191 		CSR_WRITE(sc, WMREG_TADV, 375);		/* should be same */
3192 
3193 		CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
3194 		    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
3195 		CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
3196 		    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
3197 	}
3198 	CSR_WRITE(sc, WMREG_TQSA_LO, 0);
3199 	CSR_WRITE(sc, WMREG_TQSA_HI, 0);
3200 
3201 	/* Initialize the transmit job descriptors. */
3202 	for (i = 0; i < WM_TXQUEUELEN(sc); i++)
3203 		sc->sc_txsoft[i].txs_mbuf = NULL;
3204 	sc->sc_txsfree = WM_TXQUEUELEN(sc);
3205 	sc->sc_txsnext = 0;
3206 	sc->sc_txsdirty = 0;
3207 
3208 	/*
3209 	 * Initialize the receive descriptor and receive job
3210 	 * descriptor rings.
3211 	 */
3212 	if (sc->sc_type < WM_T_82543) {
3213 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
3214 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
3215 		CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
3216 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
3217 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
3218 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
3219 
3220 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
3221 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
3222 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
3223 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
3224 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
3225 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
3226 	} else {
3227 		CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
3228 		CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
3229 		CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
3230 		CSR_WRITE(sc, WMREG_RDH, 0);
3231 		CSR_WRITE(sc, WMREG_RDT, 0);
3232 		CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD);	/* ITR/4 */
3233 		CSR_WRITE(sc, WMREG_RADV, 375);		/* MUST be same */
3234 	}
3235 	for (i = 0; i < WM_NRXDESC; i++) {
3236 		rxs = &sc->sc_rxsoft[i];
3237 		if (rxs->rxs_mbuf == NULL) {
3238 			if ((error = wm_add_rxbuf(sc, i)) != 0) {
3239 				log(LOG_ERR, "%s: unable to allocate or map rx "
3240 				    "buffer %d, error = %d\n",
3241 				    device_xname(sc->sc_dev), i, error);
3242 				/*
3243 				 * XXX Should attempt to run with fewer receive
3244 				 * XXX buffers instead of just failing.
3245 				 */
3246 				wm_rxdrain(sc);
3247 				goto out;
3248 			}
3249 		} else
3250 			WM_INIT_RXDESC(sc, i);
3251 	}
3252 	sc->sc_rxptr = 0;
3253 	sc->sc_rxdiscard = 0;
3254 	WM_RXCHAIN_RESET(sc);
3255 
3256 	/*
3257 	 * Clear out the VLAN table -- we don't use it (yet).
3258 	 */
3259 	CSR_WRITE(sc, WMREG_VET, 0);
3260 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
3261 		CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
3262 
3263 	/*
3264 	 * Set up flow-control parameters.
3265 	 *
3266 	 * XXX Values could probably stand some tuning.
3267 	 */
3268 	if (sc->sc_type != WM_T_ICH8) {
3269 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
3270 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
3271 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
3272 	}
3273 
3274 	sc->sc_fcrtl = FCRTL_DFLT;
3275 	if (sc->sc_type < WM_T_82543) {
3276 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
3277 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
3278 	} else {
3279 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
3280 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
3281 	}
3282 	CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
3283 
3284 	/* Deal with VLAN enables. */
3285 	if (VLAN_ATTACHED(&sc->sc_ethercom))
3286 		sc->sc_ctrl |= CTRL_VME;
3287 	else
3288 		sc->sc_ctrl &= ~CTRL_VME;
3289 
3290 	/* Write the control registers. */
3291 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3292 	if (sc->sc_type >= WM_T_80003 && (sc->sc_flags & WM_F_HAS_MII)) {
3293 		int val;
3294 		val = CSR_READ(sc, WMREG_CTRL_EXT);
3295 		val &= ~CTRL_EXT_LINK_MODE_MASK;
3296 		CSR_WRITE(sc, WMREG_CTRL_EXT, val);
3297 
3298 		/* Bypass RX and TX FIFO's */
3299 		wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
3300 		    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS |
3301 		    KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
3302 
3303 		wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
3304 		    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
3305 		    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
3306 		/*
3307 		 * Set the mac to wait the maximum time between each
3308 		 * iteration and increase the max iterations when
3309 		 * polling the phy; this fixes erroneous timeouts at 10Mbps.
3310 		 */
3311 		wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS, 0xFFFF);
3312 		val = wm_kmrn_i80003_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM);
3313 		val |= 0x3F;
3314 		wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM, val);
3315 	}
3316 #if 0
3317 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
3318 #endif
3319 
3320 	/*
3321 	 * Set up checksum offload parameters.
3322 	 */
3323 	reg = CSR_READ(sc, WMREG_RXCSUM);
3324 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
3325 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
3326 		reg |= RXCSUM_IPOFL;
3327 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
3328 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
3329 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
3330 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
3331 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
3332 
3333 	/* Reset TBI's RXCFG count */
3334 	sc->sc_tbi_nrxcfg = sc->sc_tbi_lastnrxcfg = 0;
3335 
3336 	/*
3337 	 * Set up the interrupt registers.
3338 	 */
3339 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3340 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
3341 	    ICR_RXO | ICR_RXT0;
3342 	if ((sc->sc_flags & WM_F_HAS_MII) == 0)
3343 		sc->sc_icr |= ICR_RXCFG;
3344 	CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
3345 
3346 	/* Set up the inter-packet gap. */
3347 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
3348 
3349 	if (sc->sc_type >= WM_T_82543) {
3350 		/*
3351 		 * Set up the interrupt throttling register (units of 256ns)
3352 		 * Note that a footnote in Intel's documentation says this
3353 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
3354 		 * or 10Mbit mode.  Empirically, it appears to be the case
3355 		 * that that is also true for the 1024ns units of the other
3356 		 * interrupt-related timer registers -- so, really, we ought
3357 		 * to divide this value by 4 when the link speed is low.
3358 		 *
3359 		 * XXX implement this division at link speed change!
3360 		 */
3361 
3362 		 /*
3363 		  * For N interrupts/sec, set this value to:
3364 		  * 1000000000 / (N * 256).  Note that we set the
3365 		  * absolute and packet timer values to this value
3366 		  * divided by 4 to get "simple timer" behavior.
3367 		  */
3368 
3369 		sc->sc_itr = 1500;		/* 2604 ints/sec */
3370 		CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
3371 	}
3372 
3373 	/* Set the VLAN ethernetype. */
3374 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
3375 
3376 	/*
3377 	 * Set up the transmit control register; we start out with
3378 	 * a collision distance suitable for FDX, but update it whe
3379 	 * we resolve the media type.
3380 	 */
3381 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_CT(TX_COLLISION_THRESHOLD) |
3382 	    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3383 	if (sc->sc_type >= WM_T_82571)
3384 		sc->sc_tctl |= TCTL_MULR;
3385 	if (sc->sc_type >= WM_T_80003)
3386 		sc->sc_tctl |= TCTL_RTLC;
3387 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3388 
3389 	/* Set the media. */
3390 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
3391 		goto out;
3392 
3393 	/*
3394 	 * Set up the receive control register; we actually program
3395 	 * the register when we set the receive filter.  Use multicast
3396 	 * address offset type 0.
3397 	 *
3398 	 * Only the i82544 has the ability to strip the incoming
3399 	 * CRC, so we don't enable that feature.
3400 	 */
3401 	sc->sc_mchash_type = 0;
3402 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
3403 	    | RCTL_MO(sc->sc_mchash_type);
3404 
3405 	/* 82573 doesn't support jumbo frame */
3406 	if (sc->sc_type != WM_T_82573 && sc->sc_type != WM_T_82574 &&
3407 	    sc->sc_type != WM_T_ICH8)
3408 		sc->sc_rctl |= RCTL_LPE;
3409 
3410 	if (MCLBYTES == 2048) {
3411 		sc->sc_rctl |= RCTL_2k;
3412 	} else {
3413 		if (sc->sc_type >= WM_T_82543) {
3414 			switch(MCLBYTES) {
3415 			case 4096:
3416 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
3417 				break;
3418 			case 8192:
3419 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
3420 				break;
3421 			case 16384:
3422 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
3423 				break;
3424 			default:
3425 				panic("wm_init: MCLBYTES %d unsupported",
3426 				    MCLBYTES);
3427 				break;
3428 			}
3429 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
3430 	}
3431 
3432 	/* Set the receive filter. */
3433 	wm_set_filter(sc);
3434 
3435 	/* Start the one second link check clock. */
3436 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
3437 
3438 	/* ...all done! */
3439 	ifp->if_flags |= IFF_RUNNING;
3440 	ifp->if_flags &= ~IFF_OACTIVE;
3441 
3442  out:
3443 	if (error)
3444 		log(LOG_ERR, "%s: interface not running\n",
3445 		    device_xname(sc->sc_dev));
3446 	return (error);
3447 }
3448 
3449 /*
3450  * wm_rxdrain:
3451  *
3452  *	Drain the receive queue.
3453  */
3454 static void
3455 wm_rxdrain(struct wm_softc *sc)
3456 {
3457 	struct wm_rxsoft *rxs;
3458 	int i;
3459 
3460 	for (i = 0; i < WM_NRXDESC; i++) {
3461 		rxs = &sc->sc_rxsoft[i];
3462 		if (rxs->rxs_mbuf != NULL) {
3463 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
3464 			m_freem(rxs->rxs_mbuf);
3465 			rxs->rxs_mbuf = NULL;
3466 		}
3467 	}
3468 }
3469 
3470 /*
3471  * wm_stop:		[ifnet interface function]
3472  *
3473  *	Stop transmission on the interface.
3474  */
3475 static void
3476 wm_stop(struct ifnet *ifp, int disable)
3477 {
3478 	struct wm_softc *sc = ifp->if_softc;
3479 	struct wm_txsoft *txs;
3480 	int i;
3481 
3482 	/* Stop the one second clock. */
3483 	callout_stop(&sc->sc_tick_ch);
3484 
3485 	/* Stop the 82547 Tx FIFO stall check timer. */
3486 	if (sc->sc_type == WM_T_82547)
3487 		callout_stop(&sc->sc_txfifo_ch);
3488 
3489 	if (sc->sc_flags & WM_F_HAS_MII) {
3490 		/* Down the MII. */
3491 		mii_down(&sc->sc_mii);
3492 	} else {
3493 #if 0
3494 		/* Should we clear PHY's status properly? */
3495 		wm_reset(sc);
3496 #endif
3497 	}
3498 
3499 	/* Stop the transmit and receive processes. */
3500 	CSR_WRITE(sc, WMREG_TCTL, 0);
3501 	CSR_WRITE(sc, WMREG_RCTL, 0);
3502 
3503 	/*
3504 	 * Clear the interrupt mask to ensure the device cannot assert its
3505 	 * interrupt line.
3506 	 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
3507 	 * any currently pending or shared interrupt.
3508 	 */
3509 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3510 	sc->sc_icr = 0;
3511 
3512 	/* Release any queued transmit buffers. */
3513 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
3514 		txs = &sc->sc_txsoft[i];
3515 		if (txs->txs_mbuf != NULL) {
3516 			bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
3517 			m_freem(txs->txs_mbuf);
3518 			txs->txs_mbuf = NULL;
3519 		}
3520 	}
3521 
3522 	/* Mark the interface as down and cancel the watchdog timer. */
3523 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
3524 	ifp->if_timer = 0;
3525 
3526 	if (disable)
3527 		wm_rxdrain(sc);
3528 }
3529 
3530 void
3531 wm_get_auto_rd_done(struct wm_softc *sc)
3532 {
3533 	int i;
3534 
3535 	/* wait for eeprom to reload */
3536 	switch (sc->sc_type) {
3537 	case WM_T_82571:
3538 	case WM_T_82572:
3539 	case WM_T_82573:
3540 	case WM_T_82574:
3541 	case WM_T_80003:
3542 	case WM_T_ICH8:
3543 	case WM_T_ICH9:
3544 	case WM_T_ICH10:
3545 		for (i = 10; i > 0; i--) {
3546 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
3547 				break;
3548 			delay(1000);
3549 		}
3550 		if (i == 0) {
3551 			log(LOG_ERR, "%s: auto read from eeprom failed to "
3552 			    "complete\n", device_xname(sc->sc_dev));
3553 		}
3554 		break;
3555 	default:
3556 		delay(5000);
3557 		break;
3558 	}
3559 
3560 	/* Phy configuration starts after EECD_AUTO_RD is set */
3561 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574)
3562 		delay(25000);
3563 }
3564 
3565 /*
3566  * wm_acquire_eeprom:
3567  *
3568  *	Perform the EEPROM handshake required on some chips.
3569  */
3570 static int
3571 wm_acquire_eeprom(struct wm_softc *sc)
3572 {
3573 	uint32_t reg;
3574 	int x;
3575 	int ret = 0;
3576 
3577 	/* always success */
3578 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
3579 		return 0;
3580 
3581 	if (sc->sc_flags & WM_F_SWFWHW_SYNC) {
3582 		ret = wm_get_swfwhw_semaphore(sc);
3583 	} else if (sc->sc_flags & WM_F_SWFW_SYNC) {
3584 		/* this will also do wm_get_swsm_semaphore() if needed */
3585 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
3586 	} else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
3587 		ret = wm_get_swsm_semaphore(sc);
3588 	}
3589 
3590 	if (ret) {
3591 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
3592 			__func__);
3593 		return 1;
3594 	}
3595 
3596 	if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE)  {
3597 		reg = CSR_READ(sc, WMREG_EECD);
3598 
3599 		/* Request EEPROM access. */
3600 		reg |= EECD_EE_REQ;
3601 		CSR_WRITE(sc, WMREG_EECD, reg);
3602 
3603 		/* ..and wait for it to be granted. */
3604 		for (x = 0; x < 1000; x++) {
3605 			reg = CSR_READ(sc, WMREG_EECD);
3606 			if (reg & EECD_EE_GNT)
3607 				break;
3608 			delay(5);
3609 		}
3610 		if ((reg & EECD_EE_GNT) == 0) {
3611 			aprint_error_dev(sc->sc_dev,
3612 			    "could not acquire EEPROM GNT\n");
3613 			reg &= ~EECD_EE_REQ;
3614 			CSR_WRITE(sc, WMREG_EECD, reg);
3615 			if (sc->sc_flags & WM_F_SWFWHW_SYNC)
3616 				wm_put_swfwhw_semaphore(sc);
3617 			if (sc->sc_flags & WM_F_SWFW_SYNC)
3618 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
3619 			else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
3620 				wm_put_swsm_semaphore(sc);
3621 			return (1);
3622 		}
3623 	}
3624 
3625 	return (0);
3626 }
3627 
3628 /*
3629  * wm_release_eeprom:
3630  *
3631  *	Release the EEPROM mutex.
3632  */
3633 static void
3634 wm_release_eeprom(struct wm_softc *sc)
3635 {
3636 	uint32_t reg;
3637 
3638 	/* always success */
3639 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
3640 		return;
3641 
3642 	if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
3643 		reg = CSR_READ(sc, WMREG_EECD);
3644 		reg &= ~EECD_EE_REQ;
3645 		CSR_WRITE(sc, WMREG_EECD, reg);
3646 	}
3647 
3648 	if (sc->sc_flags & WM_F_SWFWHW_SYNC)
3649 		wm_put_swfwhw_semaphore(sc);
3650 	if (sc->sc_flags & WM_F_SWFW_SYNC)
3651 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
3652 	else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
3653 		wm_put_swsm_semaphore(sc);
3654 }
3655 
3656 /*
3657  * wm_eeprom_sendbits:
3658  *
3659  *	Send a series of bits to the EEPROM.
3660  */
3661 static void
3662 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
3663 {
3664 	uint32_t reg;
3665 	int x;
3666 
3667 	reg = CSR_READ(sc, WMREG_EECD);
3668 
3669 	for (x = nbits; x > 0; x--) {
3670 		if (bits & (1U << (x - 1)))
3671 			reg |= EECD_DI;
3672 		else
3673 			reg &= ~EECD_DI;
3674 		CSR_WRITE(sc, WMREG_EECD, reg);
3675 		delay(2);
3676 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
3677 		delay(2);
3678 		CSR_WRITE(sc, WMREG_EECD, reg);
3679 		delay(2);
3680 	}
3681 }
3682 
3683 /*
3684  * wm_eeprom_recvbits:
3685  *
3686  *	Receive a series of bits from the EEPROM.
3687  */
3688 static void
3689 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
3690 {
3691 	uint32_t reg, val;
3692 	int x;
3693 
3694 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
3695 
3696 	val = 0;
3697 	for (x = nbits; x > 0; x--) {
3698 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
3699 		delay(2);
3700 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
3701 			val |= (1U << (x - 1));
3702 		CSR_WRITE(sc, WMREG_EECD, reg);
3703 		delay(2);
3704 	}
3705 	*valp = val;
3706 }
3707 
3708 /*
3709  * wm_read_eeprom_uwire:
3710  *
3711  *	Read a word from the EEPROM using the MicroWire protocol.
3712  */
3713 static int
3714 wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
3715 {
3716 	uint32_t reg, val;
3717 	int i;
3718 
3719 	for (i = 0; i < wordcnt; i++) {
3720 		/* Clear SK and DI. */
3721 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
3722 		CSR_WRITE(sc, WMREG_EECD, reg);
3723 
3724 		/* Set CHIP SELECT. */
3725 		reg |= EECD_CS;
3726 		CSR_WRITE(sc, WMREG_EECD, reg);
3727 		delay(2);
3728 
3729 		/* Shift in the READ command. */
3730 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
3731 
3732 		/* Shift in address. */
3733 		wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits);
3734 
3735 		/* Shift out the data. */
3736 		wm_eeprom_recvbits(sc, &val, 16);
3737 		data[i] = val & 0xffff;
3738 
3739 		/* Clear CHIP SELECT. */
3740 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
3741 		CSR_WRITE(sc, WMREG_EECD, reg);
3742 		delay(2);
3743 	}
3744 
3745 	return (0);
3746 }
3747 
3748 /*
3749  * wm_spi_eeprom_ready:
3750  *
3751  *	Wait for a SPI EEPROM to be ready for commands.
3752  */
3753 static int
3754 wm_spi_eeprom_ready(struct wm_softc *sc)
3755 {
3756 	uint32_t val;
3757 	int usec;
3758 
3759 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
3760 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
3761 		wm_eeprom_recvbits(sc, &val, 8);
3762 		if ((val & SPI_SR_RDY) == 0)
3763 			break;
3764 	}
3765 	if (usec >= SPI_MAX_RETRIES) {
3766 		aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
3767 		return (1);
3768 	}
3769 	return (0);
3770 }
3771 
3772 /*
3773  * wm_read_eeprom_spi:
3774  *
3775  *	Read a work from the EEPROM using the SPI protocol.
3776  */
3777 static int
3778 wm_read_eeprom_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
3779 {
3780 	uint32_t reg, val;
3781 	int i;
3782 	uint8_t opc;
3783 
3784 	/* Clear SK and CS. */
3785 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
3786 	CSR_WRITE(sc, WMREG_EECD, reg);
3787 	delay(2);
3788 
3789 	if (wm_spi_eeprom_ready(sc))
3790 		return (1);
3791 
3792 	/* Toggle CS to flush commands. */
3793 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
3794 	delay(2);
3795 	CSR_WRITE(sc, WMREG_EECD, reg);
3796 	delay(2);
3797 
3798 	opc = SPI_OPC_READ;
3799 	if (sc->sc_ee_addrbits == 8 && word >= 128)
3800 		opc |= SPI_OPC_A8;
3801 
3802 	wm_eeprom_sendbits(sc, opc, 8);
3803 	wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits);
3804 
3805 	for (i = 0; i < wordcnt; i++) {
3806 		wm_eeprom_recvbits(sc, &val, 16);
3807 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
3808 	}
3809 
3810 	/* Raise CS and clear SK. */
3811 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
3812 	CSR_WRITE(sc, WMREG_EECD, reg);
3813 	delay(2);
3814 
3815 	return (0);
3816 }
3817 
3818 #define EEPROM_CHECKSUM		0xBABA
3819 #define EEPROM_SIZE		0x0040
3820 
3821 /*
3822  * wm_validate_eeprom_checksum
3823  *
3824  * The checksum is defined as the sum of the first 64 (16 bit) words.
3825  */
3826 static int
3827 wm_validate_eeprom_checksum(struct wm_softc *sc)
3828 {
3829 	uint16_t checksum;
3830 	uint16_t eeprom_data;
3831 	int i;
3832 
3833 	checksum = 0;
3834 
3835 	for (i = 0; i < EEPROM_SIZE; i++) {
3836 		if (wm_read_eeprom(sc, i, 1, &eeprom_data))
3837 			return 1;
3838 		checksum += eeprom_data;
3839 	}
3840 
3841 	if (checksum != (uint16_t) EEPROM_CHECKSUM)
3842 		return 1;
3843 
3844 	return 0;
3845 }
3846 
3847 /*
3848  * wm_read_eeprom:
3849  *
3850  *	Read data from the serial EEPROM.
3851  */
3852 static int
3853 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
3854 {
3855 	int rv;
3856 
3857 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
3858 		return 1;
3859 
3860 	if (wm_acquire_eeprom(sc))
3861 		return 1;
3862 
3863 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3864 	    || (sc->sc_type == WM_T_ICH10))
3865 		rv = wm_read_eeprom_ich8(sc, word, wordcnt, data);
3866 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
3867 		rv = wm_read_eeprom_eerd(sc, word, wordcnt, data);
3868 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
3869 		rv = wm_read_eeprom_spi(sc, word, wordcnt, data);
3870 	else
3871 		rv = wm_read_eeprom_uwire(sc, word, wordcnt, data);
3872 
3873 	wm_release_eeprom(sc);
3874 	return rv;
3875 }
3876 
3877 static int
3878 wm_read_eeprom_eerd(struct wm_softc *sc, int offset, int wordcnt,
3879     uint16_t *data)
3880 {
3881 	int i, eerd = 0;
3882 	int error = 0;
3883 
3884 	for (i = 0; i < wordcnt; i++) {
3885 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
3886 
3887 		CSR_WRITE(sc, WMREG_EERD, eerd);
3888 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
3889 		if (error != 0)
3890 			break;
3891 
3892 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
3893 	}
3894 
3895 	return error;
3896 }
3897 
3898 static int
3899 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
3900 {
3901 	uint32_t attempts = 100000;
3902 	uint32_t i, reg = 0;
3903 	int32_t done = -1;
3904 
3905 	for (i = 0; i < attempts; i++) {
3906 		reg = CSR_READ(sc, rw);
3907 
3908 		if (reg & EERD_DONE) {
3909 			done = 0;
3910 			break;
3911 		}
3912 		delay(5);
3913 	}
3914 
3915 	return done;
3916 }
3917 
3918 /*
3919  * wm_add_rxbuf:
3920  *
3921  *	Add a receive buffer to the indiciated descriptor.
3922  */
3923 static int
3924 wm_add_rxbuf(struct wm_softc *sc, int idx)
3925 {
3926 	struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
3927 	struct mbuf *m;
3928 	int error;
3929 
3930 	MGETHDR(m, M_DONTWAIT, MT_DATA);
3931 	if (m == NULL)
3932 		return (ENOBUFS);
3933 
3934 	MCLGET(m, M_DONTWAIT);
3935 	if ((m->m_flags & M_EXT) == 0) {
3936 		m_freem(m);
3937 		return (ENOBUFS);
3938 	}
3939 
3940 	if (rxs->rxs_mbuf != NULL)
3941 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
3942 
3943 	rxs->rxs_mbuf = m;
3944 
3945 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3946 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
3947 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
3948 	if (error) {
3949 		/* XXX XXX XXX */
3950 		aprint_error_dev(sc->sc_dev,
3951 		    "unable to load rx DMA map %d, error = %d\n",
3952 		    idx, error);
3953 		panic("wm_add_rxbuf");
3954 	}
3955 
3956 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3957 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
3958 
3959 	WM_INIT_RXDESC(sc, idx);
3960 
3961 	return (0);
3962 }
3963 
3964 /*
3965  * wm_set_ral:
3966  *
3967  *	Set an entery in the receive address list.
3968  */
3969 static void
3970 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
3971 {
3972 	uint32_t ral_lo, ral_hi;
3973 
3974 	if (enaddr != NULL) {
3975 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
3976 		    (enaddr[3] << 24);
3977 		ral_hi = enaddr[4] | (enaddr[5] << 8);
3978 		ral_hi |= RAL_AV;
3979 	} else {
3980 		ral_lo = 0;
3981 		ral_hi = 0;
3982 	}
3983 
3984 	if (sc->sc_type >= WM_T_82544) {
3985 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
3986 		    ral_lo);
3987 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
3988 		    ral_hi);
3989 	} else {
3990 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
3991 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
3992 	}
3993 }
3994 
3995 /*
3996  * wm_mchash:
3997  *
3998  *	Compute the hash of the multicast address for the 4096-bit
3999  *	multicast filter.
4000  */
4001 static uint32_t
4002 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
4003 {
4004 	static const int lo_shift[4] = { 4, 3, 2, 0 };
4005 	static const int hi_shift[4] = { 4, 5, 6, 8 };
4006 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
4007 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
4008 	uint32_t hash;
4009 
4010 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4011 	    || (sc->sc_type == WM_T_ICH10)) {
4012 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
4013 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
4014 		return (hash & 0x3ff);
4015 	}
4016 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
4017 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
4018 
4019 	return (hash & 0xfff);
4020 }
4021 
4022 /*
4023  * wm_set_filter:
4024  *
4025  *	Set up the receive filter.
4026  */
4027 static void
4028 wm_set_filter(struct wm_softc *sc)
4029 {
4030 	struct ethercom *ec = &sc->sc_ethercom;
4031 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4032 	struct ether_multi *enm;
4033 	struct ether_multistep step;
4034 	bus_addr_t mta_reg;
4035 	uint32_t hash, reg, bit;
4036 	int i, size;
4037 
4038 	if (sc->sc_type >= WM_T_82544)
4039 		mta_reg = WMREG_CORDOVA_MTA;
4040 	else
4041 		mta_reg = WMREG_MTA;
4042 
4043 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
4044 
4045 	if (ifp->if_flags & IFF_BROADCAST)
4046 		sc->sc_rctl |= RCTL_BAM;
4047 	if (ifp->if_flags & IFF_PROMISC) {
4048 		sc->sc_rctl |= RCTL_UPE;
4049 		goto allmulti;
4050 	}
4051 
4052 	/*
4053 	 * Set the station address in the first RAL slot, and
4054 	 * clear the remaining slots.
4055 	 */
4056 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4057 		 || (sc->sc_type == WM_T_ICH10))
4058 		size = WM_ICH8_RAL_TABSIZE;
4059 	else
4060 		size = WM_RAL_TABSIZE;
4061 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
4062 	for (i = 1; i < size; i++)
4063 		wm_set_ral(sc, NULL, i);
4064 
4065 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4066 	    || (sc->sc_type == WM_T_ICH10))
4067 		size = WM_ICH8_MC_TABSIZE;
4068 	else
4069 		size = WM_MC_TABSIZE;
4070 	/* Clear out the multicast table. */
4071 	for (i = 0; i < size; i++)
4072 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
4073 
4074 	ETHER_FIRST_MULTI(step, ec, enm);
4075 	while (enm != NULL) {
4076 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
4077 			/*
4078 			 * We must listen to a range of multicast addresses.
4079 			 * For now, just accept all multicasts, rather than
4080 			 * trying to set only those filter bits needed to match
4081 			 * the range.  (At this time, the only use of address
4082 			 * ranges is for IP multicast routing, for which the
4083 			 * range is big enough to require all bits set.)
4084 			 */
4085 			goto allmulti;
4086 		}
4087 
4088 		hash = wm_mchash(sc, enm->enm_addrlo);
4089 
4090 		reg = (hash >> 5);
4091 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4092 		    || (sc->sc_type == WM_T_ICH10))
4093 			reg &= 0x1f;
4094 		else
4095 			reg &= 0x7f;
4096 		bit = hash & 0x1f;
4097 
4098 		hash = CSR_READ(sc, mta_reg + (reg << 2));
4099 		hash |= 1U << bit;
4100 
4101 		/* XXX Hardware bug?? */
4102 		if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
4103 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
4104 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
4105 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
4106 		} else
4107 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
4108 
4109 		ETHER_NEXT_MULTI(step, enm);
4110 	}
4111 
4112 	ifp->if_flags &= ~IFF_ALLMULTI;
4113 	goto setit;
4114 
4115  allmulti:
4116 	ifp->if_flags |= IFF_ALLMULTI;
4117 	sc->sc_rctl |= RCTL_MPE;
4118 
4119  setit:
4120 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
4121 }
4122 
4123 /*
4124  * wm_tbi_mediainit:
4125  *
4126  *	Initialize media for use on 1000BASE-X devices.
4127  */
4128 static void
4129 wm_tbi_mediainit(struct wm_softc *sc)
4130 {
4131 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4132 	const char *sep = "";
4133 
4134 	if (sc->sc_type < WM_T_82543)
4135 		sc->sc_tipg = TIPG_WM_DFLT;
4136 	else
4137 		sc->sc_tipg = TIPG_LG_DFLT;
4138 
4139 	sc->sc_tbi_anegticks = 5;
4140 
4141 	/* Initialize our media structures */
4142 	sc->sc_mii.mii_ifp = ifp;
4143 
4144 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
4145 	ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
4146 	    wm_tbi_mediastatus);
4147 
4148 	/*
4149 	 * SWD Pins:
4150 	 *
4151 	 *	0 = Link LED (output)
4152 	 *	1 = Loss Of Signal (input)
4153 	 */
4154 	sc->sc_ctrl |= CTRL_SWDPIO(0);
4155 	sc->sc_ctrl &= ~CTRL_SWDPIO(1);
4156 
4157 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4158 
4159 #define	ADD(ss, mm, dd)							\
4160 do {									\
4161 	aprint_normal("%s%s", sep, ss);					\
4162 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL);	\
4163 	sep = ", ";							\
4164 } while (/*CONSTCOND*/0)
4165 
4166 	aprint_normal_dev(sc->sc_dev, "");
4167 	ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
4168 	ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
4169 	ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
4170 	aprint_normal("\n");
4171 
4172 #undef ADD
4173 
4174 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
4175 }
4176 
4177 /*
4178  * wm_tbi_mediastatus:	[ifmedia interface function]
4179  *
4180  *	Get the current interface media status on a 1000BASE-X device.
4181  */
4182 static void
4183 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
4184 {
4185 	struct wm_softc *sc = ifp->if_softc;
4186 	uint32_t ctrl, status;
4187 
4188 	ifmr->ifm_status = IFM_AVALID;
4189 	ifmr->ifm_active = IFM_ETHER;
4190 
4191 	status = CSR_READ(sc, WMREG_STATUS);
4192 	if ((status & STATUS_LU) == 0) {
4193 		ifmr->ifm_active |= IFM_NONE;
4194 		return;
4195 	}
4196 
4197 	ifmr->ifm_status |= IFM_ACTIVE;
4198 	ifmr->ifm_active |= IFM_1000_SX;
4199 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
4200 		ifmr->ifm_active |= IFM_FDX;
4201 	ctrl = CSR_READ(sc, WMREG_CTRL);
4202 	if (ctrl & CTRL_RFCE)
4203 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
4204 	if (ctrl & CTRL_TFCE)
4205 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
4206 }
4207 
4208 /*
4209  * wm_tbi_mediachange:	[ifmedia interface function]
4210  *
4211  *	Set hardware to newly-selected media on a 1000BASE-X device.
4212  */
4213 static int
4214 wm_tbi_mediachange(struct ifnet *ifp)
4215 {
4216 	struct wm_softc *sc = ifp->if_softc;
4217 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
4218 	uint32_t status;
4219 	int i;
4220 
4221 	sc->sc_txcw = 0;
4222 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO ||
4223 	    (sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
4224 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
4225 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
4226 		sc->sc_txcw |= TXCW_ANE;
4227 	} else {
4228 		/*
4229 		 * If autonegotiation is turned off, force link up and turn on
4230 		 * full duplex
4231 		 */
4232 		sc->sc_txcw &= ~TXCW_ANE;
4233 		sc->sc_ctrl |= CTRL_SLU | CTRL_FD;
4234 		sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
4235 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4236 		delay(1000);
4237 	}
4238 
4239 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
4240 		    device_xname(sc->sc_dev),sc->sc_txcw));
4241 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
4242 	delay(10000);
4243 
4244 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
4245 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
4246 
4247 	/*
4248 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
4249 	 * optics detect a signal, 0 if they don't.
4250 	 */
4251 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
4252 		/* Have signal; wait for the link to come up. */
4253 
4254 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
4255 			/*
4256 			 * Reset the link, and let autonegotiation do its thing
4257 			 */
4258 			sc->sc_ctrl |= CTRL_LRST;
4259 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4260 			delay(1000);
4261 			sc->sc_ctrl &= ~CTRL_LRST;
4262 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4263 			delay(1000);
4264 		}
4265 
4266 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
4267 			delay(10000);
4268 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
4269 				break;
4270 		}
4271 
4272 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
4273 			    device_xname(sc->sc_dev),i));
4274 
4275 		status = CSR_READ(sc, WMREG_STATUS);
4276 		DPRINTF(WM_DEBUG_LINK,
4277 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
4278 			device_xname(sc->sc_dev),status, STATUS_LU));
4279 		if (status & STATUS_LU) {
4280 			/* Link is up. */
4281 			DPRINTF(WM_DEBUG_LINK,
4282 			    ("%s: LINK: set media -> link up %s\n",
4283 			    device_xname(sc->sc_dev),
4284 			    (status & STATUS_FD) ? "FDX" : "HDX"));
4285 
4286 			/*
4287 			 * NOTE: CTRL will update TFCE and RFCE automatically,
4288 			 * so we should update sc->sc_ctrl
4289 			 */
4290 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
4291 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
4292 			sc->sc_fcrtl &= ~FCRTL_XONE;
4293 			if (status & STATUS_FD)
4294 				sc->sc_tctl |=
4295 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4296 			else
4297 				sc->sc_tctl |=
4298 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
4299 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
4300 				sc->sc_fcrtl |= FCRTL_XONE;
4301 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4302 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
4303 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
4304 				      sc->sc_fcrtl);
4305 			sc->sc_tbi_linkup = 1;
4306 		} else {
4307 			if (i == WM_LINKUP_TIMEOUT)
4308 				wm_check_for_link(sc);
4309 			/* Link is down. */
4310 			DPRINTF(WM_DEBUG_LINK,
4311 			    ("%s: LINK: set media -> link down\n",
4312 			    device_xname(sc->sc_dev)));
4313 			sc->sc_tbi_linkup = 0;
4314 		}
4315 	} else {
4316 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
4317 		    device_xname(sc->sc_dev)));
4318 		sc->sc_tbi_linkup = 0;
4319 	}
4320 
4321 	wm_tbi_set_linkled(sc);
4322 
4323 	return (0);
4324 }
4325 
4326 /*
4327  * wm_tbi_set_linkled:
4328  *
4329  *	Update the link LED on 1000BASE-X devices.
4330  */
4331 static void
4332 wm_tbi_set_linkled(struct wm_softc *sc)
4333 {
4334 
4335 	if (sc->sc_tbi_linkup)
4336 		sc->sc_ctrl |= CTRL_SWDPIN(0);
4337 	else
4338 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
4339 
4340 	/* 82540 or newer devices are active low */
4341 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
4342 
4343 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4344 }
4345 
4346 /*
4347  * wm_tbi_check_link:
4348  *
4349  *	Check the link on 1000BASE-X devices.
4350  */
4351 static void
4352 wm_tbi_check_link(struct wm_softc *sc)
4353 {
4354 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4355 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
4356 	uint32_t rxcw, ctrl, status;
4357 
4358 	status = CSR_READ(sc, WMREG_STATUS);
4359 
4360 	rxcw = CSR_READ(sc, WMREG_RXCW);
4361 	ctrl = CSR_READ(sc, WMREG_CTRL);
4362 
4363 	/* set link status */
4364 	if ((status & STATUS_LU) == 0) {
4365 		DPRINTF(WM_DEBUG_LINK,
4366 		    ("%s: LINK: checklink -> down\n", device_xname(sc->sc_dev)));
4367 		sc->sc_tbi_linkup = 0;
4368 	} else if (sc->sc_tbi_linkup == 0) {
4369 		DPRINTF(WM_DEBUG_LINK,
4370 		    ("%s: LINK: checklink -> up %s\n", device_xname(sc->sc_dev),
4371 		    (status & STATUS_FD) ? "FDX" : "HDX"));
4372 		sc->sc_tbi_linkup = 1;
4373 	}
4374 
4375 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP)
4376 	    && ((status & STATUS_LU) == 0)) {
4377 		sc->sc_tbi_linkup = 0;
4378 		if (sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg > 100) {
4379 			/* RXCFG storm! */
4380 			DPRINTF(WM_DEBUG_LINK, ("RXCFG storm! (%d)\n",
4381 				sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg));
4382 			wm_init(ifp);
4383 			wm_start(ifp);
4384 		} else if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
4385 			/* If the timer expired, retry autonegotiation */
4386 			if (++sc->sc_tbi_ticks >= sc->sc_tbi_anegticks) {
4387 				DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
4388 				sc->sc_tbi_ticks = 0;
4389 				/*
4390 				 * Reset the link, and let autonegotiation do
4391 				 * its thing
4392 				 */
4393 				sc->sc_ctrl |= CTRL_LRST;
4394 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4395 				delay(1000);
4396 				sc->sc_ctrl &= ~CTRL_LRST;
4397 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4398 				delay(1000);
4399 				CSR_WRITE(sc, WMREG_TXCW,
4400 				    sc->sc_txcw & ~TXCW_ANE);
4401 				CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
4402 			}
4403 		}
4404 	}
4405 
4406 	wm_tbi_set_linkled(sc);
4407 }
4408 
4409 /*
4410  * wm_gmii_reset:
4411  *
4412  *	Reset the PHY.
4413  */
4414 static void
4415 wm_gmii_reset(struct wm_softc *sc)
4416 {
4417 	uint32_t reg;
4418 	int func = 0; /* XXX gcc */
4419 
4420 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4421 	    || (sc->sc_type == WM_T_ICH10)) {
4422 		if (wm_get_swfwhw_semaphore(sc)) {
4423 			aprint_error_dev(sc->sc_dev,
4424 			    "%s: failed to get semaphore\n", __func__);
4425 			return;
4426 		}
4427 	}
4428 	if (sc->sc_type == WM_T_80003) {
4429 		func = (CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1;
4430 		if (wm_get_swfw_semaphore(sc,
4431 			func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) {
4432 			aprint_error_dev(sc->sc_dev,
4433 			    "%s: failed to get semaphore\n", __func__);
4434 			return;
4435 		}
4436 	}
4437 	if (sc->sc_type >= WM_T_82544) {
4438 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
4439 		delay(20000);
4440 
4441 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4442 		delay(20000);
4443 	} else {
4444 		/*
4445 		 * With 82543, we need to force speed and duplex on the MAC
4446 		 * equal to what the PHY speed and duplex configuration is.
4447 		 * In addition, we need to perform a hardware reset on the PHY
4448 		 * to take it out of reset.
4449 		 */
4450 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
4451 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4452 
4453 		/* The PHY reset pin is active-low. */
4454 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
4455 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
4456 		    CTRL_EXT_SWDPIN(4));
4457 		reg |= CTRL_EXT_SWDPIO(4);
4458 
4459 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
4460 		delay(10);
4461 
4462 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4463 		delay(10000);
4464 
4465 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
4466 		delay(10);
4467 #if 0
4468 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
4469 #endif
4470 	}
4471 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4472 	    || (sc->sc_type == WM_T_ICH10))
4473 		wm_put_swfwhw_semaphore(sc);
4474 	if (sc->sc_type == WM_T_80003)
4475 		wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4476 }
4477 
4478 /*
4479  * wm_gmii_mediainit:
4480  *
4481  *	Initialize media for use on 1000BASE-T devices.
4482  */
4483 static void
4484 wm_gmii_mediainit(struct wm_softc *sc)
4485 {
4486 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4487 
4488 	/* We have MII. */
4489 	sc->sc_flags |= WM_F_HAS_MII;
4490 
4491 	if (sc->sc_type >= WM_T_80003)
4492 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
4493 	else
4494 		sc->sc_tipg = TIPG_1000T_DFLT;
4495 
4496 	/*
4497 	 * Let the chip set speed/duplex on its own based on
4498 	 * signals from the PHY.
4499 	 * XXXbouyer - I'm not sure this is right for the 80003,
4500 	 * the em driver only sets CTRL_SLU here - but it seems to work.
4501 	 */
4502 	sc->sc_ctrl |= CTRL_SLU;
4503 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4504 
4505 	/* Initialize our media structures and probe the GMII. */
4506 	sc->sc_mii.mii_ifp = ifp;
4507 
4508 	if (sc->sc_type == WM_T_ICH10) {
4509 		sc->sc_mii.mii_readreg = wm_gmii_bm_readreg;
4510 		sc->sc_mii.mii_writereg = wm_gmii_bm_writereg;
4511 	} else if (sc->sc_type >= WM_T_80003) {
4512 		sc->sc_mii.mii_readreg = wm_gmii_i80003_readreg;
4513 		sc->sc_mii.mii_writereg = wm_gmii_i80003_writereg;
4514 	} else if (sc->sc_type >= WM_T_82544) {
4515 		sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg;
4516 		sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg;
4517 	} else {
4518 		sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg;
4519 		sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg;
4520 	}
4521 	sc->sc_mii.mii_statchg = wm_gmii_statchg;
4522 
4523 	wm_gmii_reset(sc);
4524 
4525 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
4526 	ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_gmii_mediachange,
4527 	    wm_gmii_mediastatus);
4528 
4529 	mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
4530 	    MII_OFFSET_ANY, MIIF_DOPAUSE);
4531 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
4532 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
4533 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
4534 	} else
4535 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
4536 }
4537 
4538 /*
4539  * wm_gmii_mediastatus:	[ifmedia interface function]
4540  *
4541  *	Get the current interface media status on a 1000BASE-T device.
4542  */
4543 static void
4544 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
4545 {
4546 	struct wm_softc *sc = ifp->if_softc;
4547 
4548 	ether_mediastatus(ifp, ifmr);
4549 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK) |
4550 			   sc->sc_flowflags;
4551 }
4552 
4553 /*
4554  * wm_gmii_mediachange:	[ifmedia interface function]
4555  *
4556  *	Set hardware to newly-selected media on a 1000BASE-T device.
4557  */
4558 static int
4559 wm_gmii_mediachange(struct ifnet *ifp)
4560 {
4561 	struct wm_softc *sc = ifp->if_softc;
4562 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
4563 	int rc;
4564 
4565 	if ((ifp->if_flags & IFF_UP) == 0)
4566 		return 0;
4567 
4568 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
4569 	sc->sc_ctrl |= CTRL_SLU;
4570 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
4571 	    || (sc->sc_type > WM_T_82543)) {
4572 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
4573 	} else {
4574 		sc->sc_ctrl &= ~CTRL_ASDE;
4575 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
4576 		if (ife->ifm_media & IFM_FDX)
4577 			sc->sc_ctrl |= CTRL_FD;
4578 		switch(IFM_SUBTYPE(ife->ifm_media)) {
4579 		case IFM_10_T:
4580 			sc->sc_ctrl |= CTRL_SPEED_10;
4581 			break;
4582 		case IFM_100_TX:
4583 			sc->sc_ctrl |= CTRL_SPEED_100;
4584 			break;
4585 		case IFM_1000_T:
4586 			sc->sc_ctrl |= CTRL_SPEED_1000;
4587 			break;
4588 		default:
4589 			panic("wm_gmii_mediachange: bad media 0x%x",
4590 			    ife->ifm_media);
4591 		}
4592 	}
4593 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4594 	if (sc->sc_type <= WM_T_82543)
4595 		wm_gmii_reset(sc);
4596 
4597 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
4598 		return 0;
4599 	return rc;
4600 }
4601 
4602 #define	MDI_IO		CTRL_SWDPIN(2)
4603 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
4604 #define	MDI_CLK		CTRL_SWDPIN(3)
4605 
4606 static void
4607 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
4608 {
4609 	uint32_t i, v;
4610 
4611 	v = CSR_READ(sc, WMREG_CTRL);
4612 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
4613 	v |= MDI_DIR | CTRL_SWDPIO(3);
4614 
4615 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
4616 		if (data & i)
4617 			v |= MDI_IO;
4618 		else
4619 			v &= ~MDI_IO;
4620 		CSR_WRITE(sc, WMREG_CTRL, v);
4621 		delay(10);
4622 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
4623 		delay(10);
4624 		CSR_WRITE(sc, WMREG_CTRL, v);
4625 		delay(10);
4626 	}
4627 }
4628 
4629 static uint32_t
4630 i82543_mii_recvbits(struct wm_softc *sc)
4631 {
4632 	uint32_t v, i, data = 0;
4633 
4634 	v = CSR_READ(sc, WMREG_CTRL);
4635 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
4636 	v |= CTRL_SWDPIO(3);
4637 
4638 	CSR_WRITE(sc, WMREG_CTRL, v);
4639 	delay(10);
4640 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
4641 	delay(10);
4642 	CSR_WRITE(sc, WMREG_CTRL, v);
4643 	delay(10);
4644 
4645 	for (i = 0; i < 16; i++) {
4646 		data <<= 1;
4647 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
4648 		delay(10);
4649 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
4650 			data |= 1;
4651 		CSR_WRITE(sc, WMREG_CTRL, v);
4652 		delay(10);
4653 	}
4654 
4655 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
4656 	delay(10);
4657 	CSR_WRITE(sc, WMREG_CTRL, v);
4658 	delay(10);
4659 
4660 	return (data);
4661 }
4662 
4663 #undef MDI_IO
4664 #undef MDI_DIR
4665 #undef MDI_CLK
4666 
4667 /*
4668  * wm_gmii_i82543_readreg:	[mii interface function]
4669  *
4670  *	Read a PHY register on the GMII (i82543 version).
4671  */
4672 static int
4673 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
4674 {
4675 	struct wm_softc *sc = device_private(self);
4676 	int rv;
4677 
4678 	i82543_mii_sendbits(sc, 0xffffffffU, 32);
4679 	i82543_mii_sendbits(sc, reg | (phy << 5) |
4680 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
4681 	rv = i82543_mii_recvbits(sc) & 0xffff;
4682 
4683 	DPRINTF(WM_DEBUG_GMII,
4684 	    ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
4685 	    device_xname(sc->sc_dev), phy, reg, rv));
4686 
4687 	return (rv);
4688 }
4689 
4690 /*
4691  * wm_gmii_i82543_writereg:	[mii interface function]
4692  *
4693  *	Write a PHY register on the GMII (i82543 version).
4694  */
4695 static void
4696 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
4697 {
4698 	struct wm_softc *sc = device_private(self);
4699 
4700 	i82543_mii_sendbits(sc, 0xffffffffU, 32);
4701 	i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
4702 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
4703 	    (MII_COMMAND_START << 30), 32);
4704 }
4705 
4706 /*
4707  * wm_gmii_i82544_readreg:	[mii interface function]
4708  *
4709  *	Read a PHY register on the GMII.
4710  */
4711 static int
4712 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
4713 {
4714 	struct wm_softc *sc = device_private(self);
4715 	uint32_t mdic = 0;
4716 	int i, rv;
4717 
4718 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
4719 	    MDIC_REGADD(reg));
4720 
4721 	for (i = 0; i < 320; i++) {
4722 		mdic = CSR_READ(sc, WMREG_MDIC);
4723 		if (mdic & MDIC_READY)
4724 			break;
4725 		delay(10);
4726 	}
4727 
4728 	if ((mdic & MDIC_READY) == 0) {
4729 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
4730 		    device_xname(sc->sc_dev), phy, reg);
4731 		rv = 0;
4732 	} else if (mdic & MDIC_E) {
4733 #if 0 /* This is normal if no PHY is present. */
4734 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
4735 		    device_xname(sc->sc_dev), phy, reg);
4736 #endif
4737 		rv = 0;
4738 	} else {
4739 		rv = MDIC_DATA(mdic);
4740 		if (rv == 0xffff)
4741 			rv = 0;
4742 	}
4743 
4744 	return (rv);
4745 }
4746 
4747 /*
4748  * wm_gmii_i82544_writereg:	[mii interface function]
4749  *
4750  *	Write a PHY register on the GMII.
4751  */
4752 static void
4753 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
4754 {
4755 	struct wm_softc *sc = device_private(self);
4756 	uint32_t mdic = 0;
4757 	int i;
4758 
4759 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
4760 	    MDIC_REGADD(reg) | MDIC_DATA(val));
4761 
4762 	for (i = 0; i < 320; i++) {
4763 		mdic = CSR_READ(sc, WMREG_MDIC);
4764 		if (mdic & MDIC_READY)
4765 			break;
4766 		delay(10);
4767 	}
4768 
4769 	if ((mdic & MDIC_READY) == 0)
4770 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
4771 		    device_xname(sc->sc_dev), phy, reg);
4772 	else if (mdic & MDIC_E)
4773 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
4774 		    device_xname(sc->sc_dev), phy, reg);
4775 }
4776 
4777 /*
4778  * wm_gmii_i80003_readreg:	[mii interface function]
4779  *
4780  *	Read a PHY register on the kumeran
4781  * This could be handled by the PHY layer if we didn't have to lock the
4782  * ressource ...
4783  */
4784 static int
4785 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
4786 {
4787 	struct wm_softc *sc = device_private(self);
4788 	int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
4789 	int rv;
4790 
4791 	if (phy != 1) /* only one PHY on kumeran bus */
4792 		return 0;
4793 
4794 	if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) {
4795 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
4796 		    __func__);
4797 		return 0;
4798 	}
4799 
4800 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
4801 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
4802 		    reg >> GG82563_PAGE_SHIFT);
4803 	} else {
4804 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
4805 		    reg >> GG82563_PAGE_SHIFT);
4806 	}
4807 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
4808 	delay(200);
4809 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
4810 	delay(200);
4811 
4812 	wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4813 	return (rv);
4814 }
4815 
4816 /*
4817  * wm_gmii_i80003_writereg:	[mii interface function]
4818  *
4819  *	Write a PHY register on the kumeran.
4820  * This could be handled by the PHY layer if we didn't have to lock the
4821  * ressource ...
4822  */
4823 static void
4824 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
4825 {
4826 	struct wm_softc *sc = device_private(self);
4827 	int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
4828 
4829 	if (phy != 1) /* only one PHY on kumeran bus */
4830 		return;
4831 
4832 	if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) {
4833 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
4834 		    __func__);
4835 		return;
4836 	}
4837 
4838 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
4839 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
4840 		    reg >> GG82563_PAGE_SHIFT);
4841 	} else {
4842 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
4843 		    reg >> GG82563_PAGE_SHIFT);
4844 	}
4845 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
4846 	delay(200);
4847 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
4848 	delay(200);
4849 
4850 	wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4851 }
4852 
4853 /*
4854  * wm_gmii_bm_readreg:	[mii interface function]
4855  *
4856  *	Read a PHY register on the kumeran
4857  * This could be handled by the PHY layer if we didn't have to lock the
4858  * ressource ...
4859  */
4860 static int
4861 wm_gmii_bm_readreg(device_t self, int phy, int reg)
4862 {
4863 	struct wm_softc *sc = device_private(self);
4864 	int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
4865 	int rv;
4866 
4867 	if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) {
4868 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
4869 		    __func__);
4870 		return 0;
4871 	}
4872 
4873 	if (reg > GG82563_MAX_REG_ADDRESS) {
4874 		if (phy == 1)
4875 			wm_gmii_i82544_writereg(self, phy, 0x1f,
4876 			    reg);
4877 		else
4878 			wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
4879 			    reg >> GG82563_PAGE_SHIFT);
4880 
4881 	}
4882 
4883 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
4884 	wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4885 	return (rv);
4886 }
4887 
4888 /*
4889  * wm_gmii_bm_writereg:	[mii interface function]
4890  *
4891  *	Write a PHY register on the kumeran.
4892  * This could be handled by the PHY layer if we didn't have to lock the
4893  * ressource ...
4894  */
4895 static void
4896 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
4897 {
4898 	struct wm_softc *sc = device_private(self);
4899 	int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
4900 
4901 	if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) {
4902 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
4903 		    __func__);
4904 		return;
4905 	}
4906 
4907 	if (reg > GG82563_MAX_REG_ADDRESS) {
4908 		if (phy == 1)
4909 			wm_gmii_i82544_writereg(self, phy, 0x1f,
4910 			    reg);
4911 		else
4912 			wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
4913 			    reg >> GG82563_PAGE_SHIFT);
4914 
4915 	}
4916 
4917 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
4918 	wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4919 }
4920 
4921 /*
4922  * wm_gmii_statchg:	[mii interface function]
4923  *
4924  *	Callback from MII layer when media changes.
4925  */
4926 static void
4927 wm_gmii_statchg(device_t self)
4928 {
4929 	struct wm_softc *sc = device_private(self);
4930 	struct mii_data *mii = &sc->sc_mii;
4931 
4932 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
4933 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
4934 	sc->sc_fcrtl &= ~FCRTL_XONE;
4935 
4936 	/*
4937 	 * Get flow control negotiation result.
4938 	 */
4939 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
4940 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
4941 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
4942 		mii->mii_media_active &= ~IFM_ETH_FMASK;
4943 	}
4944 
4945 	if (sc->sc_flowflags & IFM_FLOW) {
4946 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
4947 			sc->sc_ctrl |= CTRL_TFCE;
4948 			sc->sc_fcrtl |= FCRTL_XONE;
4949 		}
4950 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
4951 			sc->sc_ctrl |= CTRL_RFCE;
4952 	}
4953 
4954 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
4955 		DPRINTF(WM_DEBUG_LINK,
4956 		    ("%s: LINK: statchg: FDX\n", device_xname(sc->sc_dev)));
4957 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4958 	} else  {
4959 		DPRINTF(WM_DEBUG_LINK,
4960 		    ("%s: LINK: statchg: HDX\n", device_xname(sc->sc_dev)));
4961 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
4962 	}
4963 
4964 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4965 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4966 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
4967 						 : WMREG_FCRTL, sc->sc_fcrtl);
4968 	if (sc->sc_type >= WM_T_80003) {
4969 		switch(IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
4970 		case IFM_1000_T:
4971 			wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
4972 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
4973 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
4974 			break;
4975 		default:
4976 			wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
4977 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
4978 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
4979 			break;
4980 		}
4981 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
4982 	}
4983 }
4984 
4985 /*
4986  * wm_kmrn_i80003_readreg:
4987  *
4988  *	Read a kumeran register
4989  */
4990 static int
4991 wm_kmrn_i80003_readreg(struct wm_softc *sc, int reg)
4992 {
4993 	int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
4994 	int rv;
4995 
4996 	if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) {
4997 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
4998 		    __func__);
4999 		return 0;
5000 	}
5001 
5002 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
5003 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
5004 	    KUMCTRLSTA_REN);
5005 	delay(2);
5006 
5007 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
5008 	wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
5009 	return (rv);
5010 }
5011 
5012 /*
5013  * wm_kmrn_i80003_writereg:
5014  *
5015  *	Write a kumeran register
5016  */
5017 static void
5018 wm_kmrn_i80003_writereg(struct wm_softc *sc, int reg, int val)
5019 {
5020 	int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
5021 
5022 	if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) {
5023 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5024 		    __func__);
5025 		return;
5026 	}
5027 
5028 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
5029 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
5030 	    (val & KUMCTRLSTA_MASK));
5031 	wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
5032 }
5033 
5034 static int
5035 wm_is_onboard_nvm_eeprom(struct wm_softc *sc)
5036 {
5037 	uint32_t eecd = 0;
5038 
5039 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574) {
5040 		eecd = CSR_READ(sc, WMREG_EECD);
5041 
5042 		/* Isolate bits 15 & 16 */
5043 		eecd = ((eecd >> 15) & 0x03);
5044 
5045 		/* If both bits are set, device is Flash type */
5046 		if (eecd == 0x03) {
5047 			return 0;
5048 		}
5049 	}
5050 	return 1;
5051 }
5052 
5053 static int
5054 wm_get_swsm_semaphore(struct wm_softc *sc)
5055 {
5056 	int32_t timeout;
5057 	uint32_t swsm;
5058 
5059 	/* Get the FW semaphore. */
5060 	timeout = 1000 + 1; /* XXX */
5061 	while (timeout) {
5062 		swsm = CSR_READ(sc, WMREG_SWSM);
5063 		swsm |= SWSM_SWESMBI;
5064 		CSR_WRITE(sc, WMREG_SWSM, swsm);
5065 		/* if we managed to set the bit we got the semaphore. */
5066 		swsm = CSR_READ(sc, WMREG_SWSM);
5067 		if (swsm & SWSM_SWESMBI)
5068 			break;
5069 
5070 		delay(50);
5071 		timeout--;
5072 	}
5073 
5074 	if (timeout == 0) {
5075 		aprint_error_dev(sc->sc_dev, "could not acquire EEPROM GNT\n");
5076 		/* Release semaphores */
5077 		wm_put_swsm_semaphore(sc);
5078 		return 1;
5079 	}
5080 	return 0;
5081 }
5082 
5083 static void
5084 wm_put_swsm_semaphore(struct wm_softc *sc)
5085 {
5086 	uint32_t swsm;
5087 
5088 	swsm = CSR_READ(sc, WMREG_SWSM);
5089 	swsm &= ~(SWSM_SWESMBI);
5090 	CSR_WRITE(sc, WMREG_SWSM, swsm);
5091 }
5092 
5093 static int
5094 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
5095 {
5096 	uint32_t swfw_sync;
5097 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
5098 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
5099 	int timeout = 200;
5100 
5101 	for(timeout = 0; timeout < 200; timeout++) {
5102 		if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
5103 			if (wm_get_swsm_semaphore(sc)) {
5104 				aprint_error_dev(sc->sc_dev,
5105 				    "%s: failed to get semaphore\n",
5106 				    __func__);
5107 				return 1;
5108 			}
5109 		}
5110 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
5111 		if ((swfw_sync & (swmask | fwmask)) == 0) {
5112 			swfw_sync |= swmask;
5113 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
5114 			if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
5115 				wm_put_swsm_semaphore(sc);
5116 			return 0;
5117 		}
5118 		if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
5119 			wm_put_swsm_semaphore(sc);
5120 		delay(5000);
5121 	}
5122 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
5123 	    device_xname(sc->sc_dev), mask, swfw_sync);
5124 	return 1;
5125 }
5126 
5127 static void
5128 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
5129 {
5130 	uint32_t swfw_sync;
5131 
5132 	if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
5133 		while (wm_get_swsm_semaphore(sc) != 0)
5134 			continue;
5135 	}
5136 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
5137 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
5138 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
5139 	if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
5140 		wm_put_swsm_semaphore(sc);
5141 }
5142 
5143 static int
5144 wm_get_swfwhw_semaphore(struct wm_softc *sc)
5145 {
5146 	uint32_t ext_ctrl;
5147 	int timeout = 200;
5148 
5149 	for(timeout = 0; timeout < 200; timeout++) {
5150 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
5151 		ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
5152 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
5153 
5154 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
5155 		if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
5156 			return 0;
5157 		delay(5000);
5158 	}
5159 	printf("%s: failed to get swfwgw semaphore ext_ctrl 0x%x\n",
5160 	    device_xname(sc->sc_dev), ext_ctrl);
5161 	return 1;
5162 }
5163 
5164 static void
5165 wm_put_swfwhw_semaphore(struct wm_softc *sc)
5166 {
5167 	uint32_t ext_ctrl;
5168 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
5169 	ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
5170 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
5171 }
5172 
5173 static int
5174 wm_valid_nvm_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
5175 {
5176 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
5177 	uint8_t bank_high_byte;
5178 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
5179 
5180 	if (sc->sc_type != WM_T_ICH10) {
5181 		/* Value of bit 22 corresponds to the flash bank we're on. */
5182 		*bank = (CSR_READ(sc, WMREG_EECD) & EECD_SEC1VAL) ? 1 : 0;
5183 	} else {
5184 		wm_read_ich8_byte(sc, act_offset, &bank_high_byte);
5185 		if ((bank_high_byte & 0xc0) == 0x80)
5186 			*bank = 0;
5187 		else {
5188 			wm_read_ich8_byte(sc, act_offset + bank1_offset,
5189 			    &bank_high_byte);
5190 			if ((bank_high_byte & 0xc0) == 0x80)
5191 				*bank = 1;
5192 			else {
5193 				aprint_error_dev(sc->sc_dev,
5194 				    "EEPROM not present\n");
5195 				return -1;
5196 			}
5197 		}
5198 	}
5199 
5200 	return 0;
5201 }
5202 
5203 /******************************************************************************
5204  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
5205  * register.
5206  *
5207  * sc - Struct containing variables accessed by shared code
5208  * offset - offset of word in the EEPROM to read
5209  * data - word read from the EEPROM
5210  * words - number of words to read
5211  *****************************************************************************/
5212 static int
5213 wm_read_eeprom_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
5214 {
5215     int32_t  error = 0;
5216     uint32_t flash_bank = 0;
5217     uint32_t act_offset = 0;
5218     uint32_t bank_offset = 0;
5219     uint16_t word = 0;
5220     uint16_t i = 0;
5221 
5222     /* We need to know which is the valid flash bank.  In the event
5223      * that we didn't allocate eeprom_shadow_ram, we may not be
5224      * managing flash_bank.  So it cannot be trusted and needs
5225      * to be updated with each read.
5226      */
5227     error = wm_valid_nvm_bank_detect_ich8lan(sc, &flash_bank);
5228     if (error) {
5229 	    aprint_error_dev(sc->sc_dev, "%s: failed to detect NVM bank\n",
5230 		    __func__);
5231         return error;
5232     }
5233 
5234     /* Adjust offset appropriately if we're on bank 1 - adjust for word size */
5235     bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
5236 
5237     error = wm_get_swfwhw_semaphore(sc);
5238     if (error) {
5239 	    aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5240 		__func__);
5241         return error;
5242     }
5243 
5244     for (i = 0; i < words; i++) {
5245             /* The NVM part needs a byte offset, hence * 2 */
5246             act_offset = bank_offset + ((offset + i) * 2);
5247             error = wm_read_ich8_word(sc, act_offset, &word);
5248             if (error) {
5249 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
5250 		    __func__);
5251                 break;
5252 	    }
5253             data[i] = word;
5254     }
5255 
5256     wm_put_swfwhw_semaphore(sc);
5257     return error;
5258 }
5259 
5260 /******************************************************************************
5261  * This function does initial flash setup so that a new read/write/erase cycle
5262  * can be started.
5263  *
5264  * sc - The pointer to the hw structure
5265  ****************************************************************************/
5266 static int32_t
5267 wm_ich8_cycle_init(struct wm_softc *sc)
5268 {
5269     uint16_t hsfsts;
5270     int32_t error = 1;
5271     int32_t i     = 0;
5272 
5273     hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
5274 
5275     /* May be check the Flash Des Valid bit in Hw status */
5276     if ((hsfsts & HSFSTS_FLDVAL) == 0) {
5277         return error;
5278     }
5279 
5280     /* Clear FCERR in Hw status by writing 1 */
5281     /* Clear DAEL in Hw status by writing a 1 */
5282     hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
5283 
5284     ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
5285 
5286     /* Either we should have a hardware SPI cycle in progress bit to check
5287      * against, in order to start a new cycle or FDONE bit should be changed
5288      * in the hardware so that it is 1 after harware reset, which can then be
5289      * used as an indication whether a cycle is in progress or has been
5290      * completed .. we should also have some software semaphore mechanism to
5291      * guard FDONE or the cycle in progress bit so that two threads access to
5292      * those bits can be sequentiallized or a way so that 2 threads dont
5293      * start the cycle at the same time */
5294 
5295     if ((hsfsts & HSFSTS_FLINPRO) == 0) {
5296         /* There is no cycle running at present, so we can start a cycle */
5297         /* Begin by setting Flash Cycle Done. */
5298         hsfsts |= HSFSTS_DONE;
5299         ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
5300         error = 0;
5301     } else {
5302         /* otherwise poll for sometime so the current cycle has a chance
5303          * to end before giving up. */
5304         for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
5305             hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
5306             if ((hsfsts & HSFSTS_FLINPRO) == 0) {
5307                 error = 0;
5308                 break;
5309             }
5310             delay(1);
5311         }
5312         if (error == 0) {
5313             /* Successful in waiting for previous cycle to timeout,
5314              * now set the Flash Cycle Done. */
5315             hsfsts |= HSFSTS_DONE;
5316             ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
5317         }
5318     }
5319     return error;
5320 }
5321 
5322 /******************************************************************************
5323  * This function starts a flash cycle and waits for its completion
5324  *
5325  * sc - The pointer to the hw structure
5326  ****************************************************************************/
5327 static int32_t
5328 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
5329 {
5330     uint16_t hsflctl;
5331     uint16_t hsfsts;
5332     int32_t error = 1;
5333     uint32_t i = 0;
5334 
5335     /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
5336     hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
5337     hsflctl |= HSFCTL_GO;
5338     ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
5339 
5340     /* wait till FDONE bit is set to 1 */
5341     do {
5342         hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
5343         if (hsfsts & HSFSTS_DONE)
5344             break;
5345         delay(1);
5346         i++;
5347     } while (i < timeout);
5348     if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0) {
5349         error = 0;
5350     }
5351     return error;
5352 }
5353 
5354 /******************************************************************************
5355  * Reads a byte or word from the NVM using the ICH8 flash access registers.
5356  *
5357  * sc - The pointer to the hw structure
5358  * index - The index of the byte or word to read.
5359  * size - Size of data to read, 1=byte 2=word
5360  * data - Pointer to the word to store the value read.
5361  *****************************************************************************/
5362 static int32_t
5363 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
5364                      uint32_t size, uint16_t* data)
5365 {
5366     uint16_t hsfsts;
5367     uint16_t hsflctl;
5368     uint32_t flash_linear_address;
5369     uint32_t flash_data = 0;
5370     int32_t error = 1;
5371     int32_t count = 0;
5372 
5373     if (size < 1  || size > 2 || data == 0x0 ||
5374         index > ICH_FLASH_LINEAR_ADDR_MASK)
5375         return error;
5376 
5377     flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
5378                            sc->sc_ich8_flash_base;
5379 
5380     do {
5381         delay(1);
5382         /* Steps */
5383         error = wm_ich8_cycle_init(sc);
5384         if (error)
5385             break;
5386 
5387         hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
5388         /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
5389         hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT) & HSFCTL_BCOUNT_MASK;
5390         hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
5391         ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
5392 
5393         /* Write the last 24 bits of index into Flash Linear address field in
5394          * Flash Address */
5395         /* TODO: TBD maybe check the index against the size of flash */
5396 
5397         ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
5398 
5399         error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
5400 
5401         /* Check if FCERR is set to 1, if set to 1, clear it and try the whole
5402          * sequence a few more times, else read in (shift in) the Flash Data0,
5403          * the order is least significant byte first msb to lsb */
5404         if (error == 0) {
5405             flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
5406             if (size == 1) {
5407                 *data = (uint8_t)(flash_data & 0x000000FF);
5408             } else if (size == 2) {
5409                 *data = (uint16_t)(flash_data & 0x0000FFFF);
5410             }
5411             break;
5412         } else {
5413             /* If we've gotten here, then things are probably completely hosed,
5414              * but if the error condition is detected, it won't hurt to give
5415              * it another try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
5416              */
5417             hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
5418             if (hsfsts & HSFSTS_ERR) {
5419                 /* Repeat for some time before giving up. */
5420                 continue;
5421             } else if ((hsfsts & HSFSTS_DONE) == 0) {
5422                 break;
5423             }
5424         }
5425     } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
5426 
5427     return error;
5428 }
5429 
5430 /******************************************************************************
5431  * Reads a single byte from the NVM using the ICH8 flash access registers.
5432  *
5433  * sc - pointer to wm_hw structure
5434  * index - The index of the byte to read.
5435  * data - Pointer to a byte to store the value read.
5436  *****************************************************************************/
5437 static int32_t
5438 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
5439 {
5440     int32_t status;
5441     uint16_t word = 0;
5442 
5443     status = wm_read_ich8_data(sc, index, 1, &word);
5444     if (status == 0) {
5445         *data = (uint8_t)word;
5446     }
5447 
5448     return status;
5449 }
5450 
5451 /******************************************************************************
5452  * Reads a word from the NVM using the ICH8 flash access registers.
5453  *
5454  * sc - pointer to wm_hw structure
5455  * index - The starting byte index of the word to read.
5456  * data - Pointer to a word to store the value read.
5457  *****************************************************************************/
5458 static int32_t
5459 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
5460 {
5461     int32_t status;
5462 
5463     status = wm_read_ich8_data(sc, index, 2, data);
5464     return status;
5465 }
5466 
5467 static int
5468 wm_check_mng_mode(struct wm_softc *sc)
5469 {
5470 	int rv;
5471 
5472 	switch (sc->sc_type) {
5473 	case WM_T_ICH8:
5474 	case WM_T_ICH9:
5475 	case WM_T_ICH10:
5476 		rv = wm_check_mng_mode_ich8lan(sc);
5477 		break;
5478 #if 0
5479 	case WM_T_82574:
5480 		/*
5481 		 * The function is provided in em driver, but it's not
5482 		 * used. Why?
5483 		 */
5484 		rv = wm_check_mng_mode_82574(sc);
5485 		break;
5486 #endif
5487 	case WM_T_82571:
5488 	case WM_T_82572:
5489 	case WM_T_82573:
5490 	case WM_T_80003:
5491 		rv = wm_check_mng_mode_generic(sc);
5492 		break;
5493 	default:
5494 		/* noting to do */
5495 		rv = 0;
5496 		break;
5497 	}
5498 
5499 	return rv;
5500 }
5501 
5502 static int
5503 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
5504 {
5505 	uint32_t fwsm;
5506 
5507 	fwsm = CSR_READ(sc, WMREG_FWSM);
5508 
5509 	if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
5510 		return 1;
5511 
5512 	return 0;
5513 }
5514 
5515 #if 0
5516 static int
5517 wm_check_mng_mode_82574(struct wm_softc *sc)
5518 {
5519 	uint16_t data;
5520 
5521 	wm_read_eeprom(sc, NVM_INIT_CONTROL2_REG, 1, &data);
5522 
5523 	if ((data & NVM_INIT_CTRL2_MNGM) != 0)
5524 		return 1;
5525 
5526 	return 0;
5527 }
5528 #endif
5529 
5530 static int
5531 wm_check_mng_mode_generic(struct wm_softc *sc)
5532 {
5533 	uint32_t fwsm;
5534 
5535 	fwsm = CSR_READ(sc, WMREG_FWSM);
5536 
5537 	if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
5538 		return 1;
5539 
5540 	return 0;
5541 }
5542 
5543 static void
5544 wm_get_hw_control(struct wm_softc *sc)
5545 {
5546 	uint32_t reg;
5547 
5548 	switch (sc->sc_type) {
5549 	case WM_T_82573:
5550 #if 0
5551 	case WM_T_82574:
5552 		/*
5553 		 * FreeBSD's em driver has the function for 82574 to checks
5554 		 * the management mode, but it's not used. Why?
5555 		 */
5556 #endif
5557 		reg = CSR_READ(sc, WMREG_SWSM);
5558 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
5559 		break;
5560 	case WM_T_82571:
5561 	case WM_T_82572:
5562 	case WM_T_80003:
5563 	case WM_T_ICH8:
5564 	case WM_T_ICH9:
5565 	case WM_T_ICH10:
5566 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
5567 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
5568 		break;
5569 	default:
5570 		break;
5571 	}
5572 }
5573 
5574 /* XXX Currently TBI only */
5575 static int
5576 wm_check_for_link(struct wm_softc *sc)
5577 {
5578 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
5579 	uint32_t rxcw;
5580 	uint32_t ctrl;
5581 	uint32_t status;
5582 	uint32_t sig;
5583 
5584 	rxcw = CSR_READ(sc, WMREG_RXCW);
5585 	ctrl = CSR_READ(sc, WMREG_CTRL);
5586 	status = CSR_READ(sc, WMREG_STATUS);
5587 
5588 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
5589 
5590 	DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
5591 		device_xname(sc->sc_dev), __func__,
5592 		((ctrl & CTRL_SWDPIN(1)) == sig),
5593 		((status & STATUS_LU) != 0),
5594 		((rxcw & RXCW_C) != 0)
5595 		    ));
5596 
5597 	/*
5598 	 * SWDPIN   LU RXCW
5599 	 *      0    0    0
5600 	 *      0    0    1	(should not happen)
5601 	 *      0    1    0	(should not happen)
5602 	 *      0    1    1	(should not happen)
5603 	 *      1    0    0	Disable autonego and force linkup
5604 	 *      1    0    1	got /C/ but not linkup yet
5605 	 *      1    1    0	(linkup)
5606 	 *      1    1    1	If IFM_AUTO, back to autonego
5607 	 *
5608 	 */
5609 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
5610 	    && ((status & STATUS_LU) == 0)
5611 	    && ((rxcw & RXCW_C) == 0)) {
5612 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
5613 			__func__));
5614 		sc->sc_tbi_linkup = 0;
5615 		/* Disable auto-negotiation in the TXCW register */
5616 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
5617 
5618 		/*
5619 		 * Force link-up and also force full-duplex.
5620 		 *
5621 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
5622 		 * so we should update sc->sc_ctrl
5623 		 */
5624 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
5625 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5626 	} else if(((status & STATUS_LU) != 0)
5627 	    && ((rxcw & RXCW_C) != 0)
5628 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
5629 		sc->sc_tbi_linkup = 1;
5630 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
5631 			__func__));
5632 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
5633 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
5634 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
5635 	    && ((rxcw & RXCW_C) != 0)) {
5636 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
5637 	} else {
5638 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
5639 			status));
5640 	}
5641 
5642 	return 0;
5643 }
5644