xref: /netbsd-src/sys/dev/pci/if_wm.c (revision abb0f93cd77b67f080613360c65701f85e5f5cfe)
1 /*	$NetBSD: if_wm.c,v 1.181 2009/11/26 15:17:10 njoly Exp $	*/
2 
3 /*
4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed for the NetBSD Project by
20  *	Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 /*******************************************************************************
39 
40   Copyright (c) 2001-2005, Intel Corporation
41   All rights reserved.
42 
43   Redistribution and use in source and binary forms, with or without
44   modification, are permitted provided that the following conditions are met:
45 
46    1. Redistributions of source code must retain the above copyright notice,
47       this list of conditions and the following disclaimer.
48 
49    2. Redistributions in binary form must reproduce the above copyright
50       notice, this list of conditions and the following disclaimer in the
51       documentation and/or other materials provided with the distribution.
52 
53    3. Neither the name of the Intel Corporation nor the names of its
54       contributors may be used to endorse or promote products derived from
55       this software without specific prior written permission.
56 
57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67   POSSIBILITY OF SUCH DAMAGE.
68 
69 *******************************************************************************/
70 /*
71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72  *
73  * TODO (in order of importance):
74  *
75  *	- Rework how parameters are loaded from the EEPROM.
76  */
77 
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.181 2009/11/26 15:17:10 njoly Exp $");
80 
81 #include "bpfilter.h"
82 #include "rnd.h"
83 
84 #include <sys/param.h>
85 #include <sys/systm.h>
86 #include <sys/callout.h>
87 #include <sys/mbuf.h>
88 #include <sys/malloc.h>
89 #include <sys/kernel.h>
90 #include <sys/socket.h>
91 #include <sys/ioctl.h>
92 #include <sys/errno.h>
93 #include <sys/device.h>
94 #include <sys/queue.h>
95 #include <sys/syslog.h>
96 
97 #include <uvm/uvm_extern.h>		/* for PAGE_SIZE */
98 
99 #if NRND > 0
100 #include <sys/rnd.h>
101 #endif
102 
103 #include <net/if.h>
104 #include <net/if_dl.h>
105 #include <net/if_media.h>
106 #include <net/if_ether.h>
107 
108 #if NBPFILTER > 0
109 #include <net/bpf.h>
110 #endif
111 
112 #include <netinet/in.h>			/* XXX for struct ip */
113 #include <netinet/in_systm.h>		/* XXX for struct ip */
114 #include <netinet/ip.h>			/* XXX for struct ip */
115 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
116 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
117 
118 #include <sys/bus.h>
119 #include <sys/intr.h>
120 #include <machine/endian.h>
121 
122 #include <dev/mii/mii.h>
123 #include <dev/mii/miivar.h>
124 #include <dev/mii/mii_bitbang.h>
125 #include <dev/mii/ikphyreg.h>
126 
127 #include <dev/pci/pcireg.h>
128 #include <dev/pci/pcivar.h>
129 #include <dev/pci/pcidevs.h>
130 
131 #include <dev/pci/if_wmreg.h>
132 
133 #ifdef WM_DEBUG
134 #define	WM_DEBUG_LINK		0x01
135 #define	WM_DEBUG_TX		0x02
136 #define	WM_DEBUG_RX		0x04
137 #define	WM_DEBUG_GMII		0x08
138 int	wm_debug = WM_DEBUG_TX|WM_DEBUG_RX|WM_DEBUG_LINK|WM_DEBUG_GMII;
139 
140 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
141 #else
142 #define	DPRINTF(x, y)	/* nothing */
143 #endif /* WM_DEBUG */
144 
145 /*
146  * Transmit descriptor list size.  Due to errata, we can only have
147  * 256 hardware descriptors in the ring on < 82544, but we use 4096
148  * on >= 82544.  We tell the upper layers that they can queue a lot
149  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
150  * of them at a time.
151  *
152  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
153  * chains containing many small mbufs have been observed in zero-copy
154  * situations with jumbo frames.
155  */
156 #define	WM_NTXSEGS		256
157 #define	WM_IFQUEUELEN		256
158 #define	WM_TXQUEUELEN_MAX	64
159 #define	WM_TXQUEUELEN_MAX_82547	16
160 #define	WM_TXQUEUELEN(sc)	((sc)->sc_txnum)
161 #define	WM_TXQUEUELEN_MASK(sc)	(WM_TXQUEUELEN(sc) - 1)
162 #define	WM_TXQUEUE_GC(sc)	(WM_TXQUEUELEN(sc) / 8)
163 #define	WM_NTXDESC_82542	256
164 #define	WM_NTXDESC_82544	4096
165 #define	WM_NTXDESC(sc)		((sc)->sc_ntxdesc)
166 #define	WM_NTXDESC_MASK(sc)	(WM_NTXDESC(sc) - 1)
167 #define	WM_TXDESCSIZE(sc)	(WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
168 #define	WM_NEXTTX(sc, x)	(((x) + 1) & WM_NTXDESC_MASK(sc))
169 #define	WM_NEXTTXS(sc, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(sc))
170 
171 #define	WM_MAXTXDMA		round_page(IP_MAXPACKET) /* for TSO */
172 
173 /*
174  * Receive descriptor list size.  We have one Rx buffer for normal
175  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
176  * packet.  We allocate 256 receive descriptors, each with a 2k
177  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
178  */
179 #define	WM_NRXDESC		256
180 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
181 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
182 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
183 
184 /*
185  * Control structures are DMA'd to the i82542 chip.  We allocate them in
186  * a single clump that maps to a single DMA segment to make several things
187  * easier.
188  */
189 struct wm_control_data_82544 {
190 	/*
191 	 * The receive descriptors.
192 	 */
193 	wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
194 
195 	/*
196 	 * The transmit descriptors.  Put these at the end, because
197 	 * we might use a smaller number of them.
198 	 */
199 	wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82544];
200 };
201 
202 struct wm_control_data_82542 {
203 	wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
204 	wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
205 };
206 
207 #define	WM_CDOFF(x)	offsetof(struct wm_control_data_82544, x)
208 #define	WM_CDTXOFF(x)	WM_CDOFF(wcd_txdescs[(x)])
209 #define	WM_CDRXOFF(x)	WM_CDOFF(wcd_rxdescs[(x)])
210 
211 /*
212  * Software state for transmit jobs.
213  */
214 struct wm_txsoft {
215 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
216 	bus_dmamap_t txs_dmamap;	/* our DMA map */
217 	int txs_firstdesc;		/* first descriptor in packet */
218 	int txs_lastdesc;		/* last descriptor in packet */
219 	int txs_ndesc;			/* # of descriptors used */
220 };
221 
222 /*
223  * Software state for receive buffers.  Each descriptor gets a
224  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
225  * more than one buffer, we chain them together.
226  */
227 struct wm_rxsoft {
228 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
229 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
230 };
231 
232 typedef enum {
233 	WM_T_unknown		= 0,
234 	WM_T_82542_2_0,			/* i82542 2.0 (really old) */
235 	WM_T_82542_2_1,			/* i82542 2.1+ (old) */
236 	WM_T_82543,			/* i82543 */
237 	WM_T_82544,			/* i82544 */
238 	WM_T_82540,			/* i82540 */
239 	WM_T_82545,			/* i82545 */
240 	WM_T_82545_3,			/* i82545 3.0+ */
241 	WM_T_82546,			/* i82546 */
242 	WM_T_82546_3,			/* i82546 3.0+ */
243 	WM_T_82541,			/* i82541 */
244 	WM_T_82541_2,			/* i82541 2.0+ */
245 	WM_T_82547,			/* i82547 */
246 	WM_T_82547_2,			/* i82547 2.0+ */
247 	WM_T_82571,			/* i82571 */
248 	WM_T_82572,			/* i82572 */
249 	WM_T_82573,			/* i82573 */
250 	WM_T_82574,			/* i82574 */
251 	WM_T_80003,			/* i80003 */
252 	WM_T_ICH8,			/* ICH8 LAN */
253 	WM_T_ICH9,			/* ICH9 LAN */
254 	WM_T_ICH10,			/* ICH10 LAN */
255 } wm_chip_type;
256 
257 #define WM_LINKUP_TIMEOUT	50
258 
259 /*
260  * Software state per device.
261  */
262 struct wm_softc {
263 	device_t sc_dev;		/* generic device information */
264 	bus_space_tag_t sc_st;		/* bus space tag */
265 	bus_space_handle_t sc_sh;	/* bus space handle */
266 	bus_space_tag_t sc_iot;		/* I/O space tag */
267 	bus_space_handle_t sc_ioh;	/* I/O space handle */
268 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
269 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
270 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
271 	struct ethercom sc_ethercom;	/* ethernet common data */
272 	pci_chipset_tag_t sc_pc;
273 	pcitag_t sc_pcitag;
274 
275 	wm_chip_type sc_type;		/* chip type */
276 	int sc_flags;			/* flags; see below */
277 	int sc_if_flags;		/* last if_flags */
278 	int sc_bus_speed;		/* PCI/PCIX bus speed */
279 	int sc_pcix_offset;		/* PCIX capability register offset */
280 	int sc_flowflags;		/* 802.3x flow control flags */
281 
282 	void *sc_ih;			/* interrupt cookie */
283 
284 	int sc_ee_addrbits;		/* EEPROM address bits */
285 
286 	struct mii_data sc_mii;		/* MII/media information */
287 
288 	callout_t sc_tick_ch;		/* tick callout */
289 
290 	bus_dmamap_t sc_cddmamap;	/* control data DMA map */
291 #define	sc_cddma	sc_cddmamap->dm_segs[0].ds_addr
292 
293 	int		sc_align_tweak;
294 
295 	/*
296 	 * Software state for the transmit and receive descriptors.
297 	 */
298 	int			sc_txnum;	/* must be a power of two */
299 	struct wm_txsoft	sc_txsoft[WM_TXQUEUELEN_MAX];
300 	struct wm_rxsoft	sc_rxsoft[WM_NRXDESC];
301 
302 	/*
303 	 * Control data structures.
304 	 */
305 	int			sc_ntxdesc;	/* must be a power of two */
306 	struct wm_control_data_82544 *sc_control_data;
307 #define	sc_txdescs	sc_control_data->wcd_txdescs
308 #define	sc_rxdescs	sc_control_data->wcd_rxdescs
309 
310 #ifdef WM_EVENT_COUNTERS
311 	/* Event counters. */
312 	struct evcnt sc_ev_txsstall;	/* Tx stalled due to no txs */
313 	struct evcnt sc_ev_txdstall;	/* Tx stalled due to no txd */
314 	struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
315 	struct evcnt sc_ev_txdw;	/* Tx descriptor interrupts */
316 	struct evcnt sc_ev_txqe;	/* Tx queue empty interrupts */
317 	struct evcnt sc_ev_rxintr;	/* Rx interrupts */
318 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
319 
320 	struct evcnt sc_ev_rxipsum;	/* IP checksums checked in-bound */
321 	struct evcnt sc_ev_rxtusum;	/* TCP/UDP cksums checked in-bound */
322 	struct evcnt sc_ev_txipsum;	/* IP checksums comp. out-bound */
323 	struct evcnt sc_ev_txtusum;	/* TCP/UDP cksums comp. out-bound */
324 	struct evcnt sc_ev_txtusum6;	/* TCP/UDP v6 cksums comp. out-bound */
325 	struct evcnt sc_ev_txtso;	/* TCP seg offload out-bound (IPv4) */
326 	struct evcnt sc_ev_txtso6;	/* TCP seg offload out-bound (IPv6) */
327 	struct evcnt sc_ev_txtsopain;	/* painful header manip. for TSO */
328 
329 	struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
330 	struct evcnt sc_ev_txdrop;	/* Tx packets dropped (too many segs) */
331 
332 	struct evcnt sc_ev_tu;		/* Tx underrun */
333 
334 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
335 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
336 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
337 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
338 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
339 #endif /* WM_EVENT_COUNTERS */
340 
341 	bus_addr_t sc_tdt_reg;		/* offset of TDT register */
342 
343 	int	sc_txfree;		/* number of free Tx descriptors */
344 	int	sc_txnext;		/* next ready Tx descriptor */
345 
346 	int	sc_txsfree;		/* number of free Tx jobs */
347 	int	sc_txsnext;		/* next free Tx job */
348 	int	sc_txsdirty;		/* dirty Tx jobs */
349 
350 	/* These 5 variables are used only on the 82547. */
351 	int	sc_txfifo_size;		/* Tx FIFO size */
352 	int	sc_txfifo_head;		/* current head of FIFO */
353 	uint32_t sc_txfifo_addr;	/* internal address of start of FIFO */
354 	int	sc_txfifo_stall;	/* Tx FIFO is stalled */
355 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
356 
357 	bus_addr_t sc_rdt_reg;		/* offset of RDT register */
358 
359 	int	sc_rxptr;		/* next ready Rx descriptor/queue ent */
360 	int	sc_rxdiscard;
361 	int	sc_rxlen;
362 	struct mbuf *sc_rxhead;
363 	struct mbuf *sc_rxtail;
364 	struct mbuf **sc_rxtailp;
365 
366 	uint32_t sc_ctrl;		/* prototype CTRL register */
367 #if 0
368 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
369 #endif
370 	uint32_t sc_icr;		/* prototype interrupt bits */
371 	uint32_t sc_itr;		/* prototype intr throttling reg */
372 	uint32_t sc_tctl;		/* prototype TCTL register */
373 	uint32_t sc_rctl;		/* prototype RCTL register */
374 	uint32_t sc_txcw;		/* prototype TXCW register */
375 	uint32_t sc_tipg;		/* prototype TIPG register */
376 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
377 	uint32_t sc_pba;		/* prototype PBA register */
378 
379 	int sc_tbi_linkup;		/* TBI link status */
380 	int sc_tbi_anegticks;		/* autonegotiation ticks */
381 	int sc_tbi_ticks;		/* tbi ticks */
382 	int sc_tbi_nrxcfg;		/* count of ICR_RXCFG */
383 	int sc_tbi_lastnrxcfg;		/* count of ICR_RXCFG (on last tick) */
384 
385 	int sc_mchash_type;		/* multicast filter offset */
386 
387 #if NRND > 0
388 	rndsource_element_t rnd_source;	/* random source */
389 #endif
390 	int sc_ich8_flash_base;
391 	int sc_ich8_flash_bank_size;
392 };
393 
394 #define	WM_RXCHAIN_RESET(sc)						\
395 do {									\
396 	(sc)->sc_rxtailp = &(sc)->sc_rxhead;				\
397 	*(sc)->sc_rxtailp = NULL;					\
398 	(sc)->sc_rxlen = 0;						\
399 } while (/*CONSTCOND*/0)
400 
401 #define	WM_RXCHAIN_LINK(sc, m)						\
402 do {									\
403 	*(sc)->sc_rxtailp = (sc)->sc_rxtail = (m);			\
404 	(sc)->sc_rxtailp = &(m)->m_next;				\
405 } while (/*CONSTCOND*/0)
406 
407 /* sc_flags */
408 #define	WM_F_HAS_MII		0x0001	/* has MII */
409 #define	WM_F_EEPROM_HANDSHAKE	0x0002	/* requires EEPROM handshake */
410 #define	WM_F_EEPROM_SEMAPHORE	0x0004	/* EEPROM with semaphore */
411 #define	WM_F_EEPROM_EERDEEWR	0x0008	/* EEPROM access via EERD/EEWR */
412 #define	WM_F_EEPROM_SPI		0x0010	/* EEPROM is SPI */
413 #define	WM_F_EEPROM_FLASH	0x0020	/* EEPROM is FLASH */
414 #define	WM_F_EEPROM_INVALID	0x0040	/* EEPROM not present (bad checksum) */
415 #define	WM_F_IOH_VALID		0x0080	/* I/O handle is valid */
416 #define	WM_F_BUS64		0x0100	/* bus is 64-bit */
417 #define	WM_F_PCIX		0x0200	/* bus is PCI-X */
418 #define	WM_F_CSA		0x0400	/* bus is CSA */
419 #define	WM_F_PCIE		0x0800	/* bus is PCI-Express */
420 #define WM_F_SWFW_SYNC		0x1000  /* Software-Firmware synchronisation */
421 #define WM_F_SWFWHW_SYNC	0x2000  /* Software-Firmware synchronisation */
422 
423 #ifdef WM_EVENT_COUNTERS
424 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
425 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
426 #else
427 #define	WM_EVCNT_INCR(ev)	/* nothing */
428 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
429 #endif
430 
431 #define	CSR_READ(sc, reg)						\
432 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
433 #define	CSR_WRITE(sc, reg, val)						\
434 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
435 #define	CSR_WRITE_FLUSH(sc)						\
436 	(void) CSR_READ((sc), WMREG_STATUS)
437 
438 #define ICH8_FLASH_READ32(sc, reg) \
439 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
440 #define ICH8_FLASH_WRITE32(sc, reg, data) \
441 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
442 
443 #define ICH8_FLASH_READ16(sc, reg) \
444 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
445 #define ICH8_FLASH_WRITE16(sc, reg, data) \
446 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
447 
448 #define	WM_CDTXADDR(sc, x)	((sc)->sc_cddma + WM_CDTXOFF((x)))
449 #define	WM_CDRXADDR(sc, x)	((sc)->sc_cddma + WM_CDRXOFF((x)))
450 
451 #define	WM_CDTXADDR_LO(sc, x)	(WM_CDTXADDR((sc), (x)) & 0xffffffffU)
452 #define	WM_CDTXADDR_HI(sc, x)						\
453 	(sizeof(bus_addr_t) == 8 ?					\
454 	 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
455 
456 #define	WM_CDRXADDR_LO(sc, x)	(WM_CDRXADDR((sc), (x)) & 0xffffffffU)
457 #define	WM_CDRXADDR_HI(sc, x)						\
458 	(sizeof(bus_addr_t) == 8 ?					\
459 	 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
460 
461 #define	WM_CDTXSYNC(sc, x, n, ops)					\
462 do {									\
463 	int __x, __n;							\
464 									\
465 	__x = (x);							\
466 	__n = (n);							\
467 									\
468 	/* If it will wrap around, sync to the end of the ring. */	\
469 	if ((__x + __n) > WM_NTXDESC(sc)) {				\
470 		bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,	\
471 		    WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) *		\
472 		    (WM_NTXDESC(sc) - __x), (ops));			\
473 		__n -= (WM_NTXDESC(sc) - __x);				\
474 		__x = 0;						\
475 	}								\
476 									\
477 	/* Now sync whatever is left. */				\
478 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
479 	    WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops));	\
480 } while (/*CONSTCOND*/0)
481 
482 #define	WM_CDRXSYNC(sc, x, ops)						\
483 do {									\
484 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
485 	   WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops));		\
486 } while (/*CONSTCOND*/0)
487 
488 #define	WM_INIT_RXDESC(sc, x)						\
489 do {									\
490 	struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)];		\
491 	wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)];		\
492 	struct mbuf *__m = __rxs->rxs_mbuf;				\
493 									\
494 	/*								\
495 	 * Note: We scoot the packet forward 2 bytes in the buffer	\
496 	 * so that the payload after the Ethernet header is aligned	\
497 	 * to a 4-byte boundary.					\
498 	 *								\
499 	 * XXX BRAINDAMAGE ALERT!					\
500 	 * The stupid chip uses the same size for every buffer, which	\
501 	 * is set in the Receive Control register.  We are using the 2K	\
502 	 * size option, but what we REALLY want is (2K - 2)!  For this	\
503 	 * reason, we can't "scoot" packets longer than the standard	\
504 	 * Ethernet MTU.  On strict-alignment platforms, if the total	\
505 	 * size exceeds (2K - 2) we set align_tweak to 0 and let	\
506 	 * the upper layer copy the headers.				\
507 	 */								\
508 	__m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak;	\
509 									\
510 	wm_set_dma_addr(&__rxd->wrx_addr,				\
511 	    __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
512 	__rxd->wrx_len = 0;						\
513 	__rxd->wrx_cksum = 0;						\
514 	__rxd->wrx_status = 0;						\
515 	__rxd->wrx_errors = 0;						\
516 	__rxd->wrx_special = 0;						\
517 	WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
518 									\
519 	CSR_WRITE((sc), (sc)->sc_rdt_reg, (x));				\
520 } while (/*CONSTCOND*/0)
521 
522 static void	wm_start(struct ifnet *);
523 static void	wm_watchdog(struct ifnet *);
524 static int	wm_ioctl(struct ifnet *, u_long, void *);
525 static int	wm_init(struct ifnet *);
526 static void	wm_stop(struct ifnet *, int);
527 
528 static void	wm_reset(struct wm_softc *);
529 static void	wm_rxdrain(struct wm_softc *);
530 static int	wm_add_rxbuf(struct wm_softc *, int);
531 static int	wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
532 static int	wm_read_eeprom_eerd(struct wm_softc *, int, int, u_int16_t *);
533 static int	wm_validate_eeprom_checksum(struct wm_softc *);
534 static void	wm_tick(void *);
535 
536 static void	wm_set_filter(struct wm_softc *);
537 
538 static int	wm_intr(void *);
539 static void	wm_txintr(struct wm_softc *);
540 static void	wm_rxintr(struct wm_softc *);
541 static void	wm_linkintr(struct wm_softc *, uint32_t);
542 
543 static void	wm_tbi_mediainit(struct wm_softc *);
544 static int	wm_tbi_mediachange(struct ifnet *);
545 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
546 
547 static void	wm_tbi_set_linkled(struct wm_softc *);
548 static void	wm_tbi_check_link(struct wm_softc *);
549 
550 static void	wm_gmii_reset(struct wm_softc *);
551 
552 static int	wm_gmii_i82543_readreg(device_t, int, int);
553 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
554 
555 static int	wm_gmii_i82544_readreg(device_t, int, int);
556 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
557 
558 static int	wm_gmii_i80003_readreg(device_t, int, int);
559 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
560 
561 static int	wm_gmii_bm_readreg(device_t, int, int);
562 static void	wm_gmii_bm_writereg(device_t, int, int, int);
563 
564 static void	wm_gmii_statchg(device_t);
565 
566 static void	wm_gmii_mediainit(struct wm_softc *);
567 static int	wm_gmii_mediachange(struct ifnet *);
568 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
569 
570 static int	wm_kmrn_readreg(struct wm_softc *, int);
571 static void	wm_kmrn_writereg(struct wm_softc *, int, int);
572 
573 static int	wm_match(device_t, cfdata_t, void *);
574 static void	wm_attach(device_t, device_t, void *);
575 static int	wm_is_onboard_nvm_eeprom(struct wm_softc *);
576 static void	wm_get_auto_rd_done(struct wm_softc *);
577 static int	wm_get_swsm_semaphore(struct wm_softc *);
578 static void	wm_put_swsm_semaphore(struct wm_softc *);
579 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
580 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
581 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
582 static int	wm_get_swfwhw_semaphore(struct wm_softc *);
583 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
584 
585 static int	wm_read_eeprom_ich8(struct wm_softc *, int, int, uint16_t *);
586 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
587 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
588 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t,
589 		     uint32_t, uint16_t *);
590 static int32_t	wm_read_ich8_byte(struct wm_softc *sc, uint32_t, uint8_t *);
591 static int32_t	wm_read_ich8_word(struct wm_softc *sc, uint32_t, uint16_t *);
592 static void	wm_82547_txfifo_stall(void *);
593 static int	wm_check_mng_mode(struct wm_softc *);
594 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
595 #if 0
596 static int	wm_check_mng_mode_82574(struct wm_softc *);
597 #endif
598 static int	wm_check_mng_mode_generic(struct wm_softc *);
599 static void	wm_get_hw_control(struct wm_softc *);
600 static int	wm_check_for_link(struct wm_softc *);
601 
602 CFATTACH_DECL_NEW(wm, sizeof(struct wm_softc),
603     wm_match, wm_attach, NULL, NULL);
604 
605 
606 /*
607  * Devices supported by this driver.
608  */
609 static const struct wm_product {
610 	pci_vendor_id_t		wmp_vendor;
611 	pci_product_id_t	wmp_product;
612 	const char		*wmp_name;
613 	wm_chip_type		wmp_type;
614 	int			wmp_flags;
615 #define	WMP_F_1000X		0x01
616 #define	WMP_F_1000T		0x02
617 } wm_products[] = {
618 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
619 	  "Intel i82542 1000BASE-X Ethernet",
620 	  WM_T_82542_2_1,	WMP_F_1000X },
621 
622 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
623 	  "Intel i82543GC 1000BASE-X Ethernet",
624 	  WM_T_82543,		WMP_F_1000X },
625 
626 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
627 	  "Intel i82543GC 1000BASE-T Ethernet",
628 	  WM_T_82543,		WMP_F_1000T },
629 
630 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
631 	  "Intel i82544EI 1000BASE-T Ethernet",
632 	  WM_T_82544,		WMP_F_1000T },
633 
634 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
635 	  "Intel i82544EI 1000BASE-X Ethernet",
636 	  WM_T_82544,		WMP_F_1000X },
637 
638 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
639 	  "Intel i82544GC 1000BASE-T Ethernet",
640 	  WM_T_82544,		WMP_F_1000T },
641 
642 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
643 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
644 	  WM_T_82544,		WMP_F_1000T },
645 
646 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
647 	  "Intel i82540EM 1000BASE-T Ethernet",
648 	  WM_T_82540,		WMP_F_1000T },
649 
650 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
651 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
652 	  WM_T_82540,		WMP_F_1000T },
653 
654 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
655 	  "Intel i82540EP 1000BASE-T Ethernet",
656 	  WM_T_82540,		WMP_F_1000T },
657 
658 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
659 	  "Intel i82540EP 1000BASE-T Ethernet",
660 	  WM_T_82540,		WMP_F_1000T },
661 
662 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
663 	  "Intel i82540EP 1000BASE-T Ethernet",
664 	  WM_T_82540,		WMP_F_1000T },
665 
666 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
667 	  "Intel i82545EM 1000BASE-T Ethernet",
668 	  WM_T_82545,		WMP_F_1000T },
669 
670 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
671 	  "Intel i82545GM 1000BASE-T Ethernet",
672 	  WM_T_82545_3,		WMP_F_1000T },
673 
674 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
675 	  "Intel i82545GM 1000BASE-X Ethernet",
676 	  WM_T_82545_3,		WMP_F_1000X },
677 #if 0
678 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
679 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
680 	  WM_T_82545_3,		WMP_F_SERDES },
681 #endif
682 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
683 	  "Intel i82546EB 1000BASE-T Ethernet",
684 	  WM_T_82546,		WMP_F_1000T },
685 
686 	{ PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82546EB_QUAD,
687 	  "Intel i82546EB 1000BASE-T Ethernet",
688 	  WM_T_82546,		WMP_F_1000T },
689 
690 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
691 	  "Intel i82545EM 1000BASE-X Ethernet",
692 	  WM_T_82545,		WMP_F_1000X },
693 
694 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
695 	  "Intel i82546EB 1000BASE-X Ethernet",
696 	  WM_T_82546,		WMP_F_1000X },
697 
698 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
699 	  "Intel i82546GB 1000BASE-T Ethernet",
700 	  WM_T_82546_3,		WMP_F_1000T },
701 
702 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
703 	  "Intel i82546GB 1000BASE-X Ethernet",
704 	  WM_T_82546_3,		WMP_F_1000X },
705 #if 0
706 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
707 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
708 	  WM_T_82546_3,		WMP_F_SERDES },
709 #endif
710 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
711 	  "i82546GB quad-port Gigabit Ethernet",
712 	  WM_T_82546_3,		WMP_F_1000T },
713 
714 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
715 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
716 	  WM_T_82546_3,		WMP_F_1000T },
717 
718 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
719 	  "Intel PRO/1000MT (82546GB)",
720 	  WM_T_82546_3,		WMP_F_1000T },
721 
722 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
723 	  "Intel i82541EI 1000BASE-T Ethernet",
724 	  WM_T_82541,		WMP_F_1000T },
725 
726 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
727 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
728 	  WM_T_82541,		WMP_F_1000T },
729 
730 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
731 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
732 	  WM_T_82541,		WMP_F_1000T },
733 
734 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
735 	  "Intel i82541ER 1000BASE-T Ethernet",
736 	  WM_T_82541_2,		WMP_F_1000T },
737 
738 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
739 	  "Intel i82541GI 1000BASE-T Ethernet",
740 	  WM_T_82541_2,		WMP_F_1000T },
741 
742 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
743 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
744 	  WM_T_82541_2,		WMP_F_1000T },
745 
746 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
747 	  "Intel i82541PI 1000BASE-T Ethernet",
748 	  WM_T_82541_2,		WMP_F_1000T },
749 
750 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
751 	  "Intel i82547EI 1000BASE-T Ethernet",
752 	  WM_T_82547,		WMP_F_1000T },
753 
754 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
755 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
756 	  WM_T_82547,		WMP_F_1000T },
757 
758 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
759 	  "Intel i82547GI 1000BASE-T Ethernet",
760 	  WM_T_82547_2,		WMP_F_1000T },
761 
762 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
763 	  "Intel PRO/1000 PT (82571EB)",
764 	  WM_T_82571,		WMP_F_1000T },
765 
766 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
767 	  "Intel PRO/1000 PF (82571EB)",
768 	  WM_T_82571,		WMP_F_1000X },
769 #if 0
770 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
771 	  "Intel PRO/1000 PB (82571EB)",
772 	  WM_T_82571,		WMP_F_SERDES },
773 #endif
774 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
775 	  "Intel PRO/1000 QT (82571EB)",
776 	  WM_T_82571,		WMP_F_1000T },
777 
778 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
779 	  "Intel i82572EI 1000baseT Ethernet",
780 	  WM_T_82572,		WMP_F_1000T },
781 
782 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
783 	  "Intel� PRO/1000 PT Quad Port Server Adapter",
784 	  WM_T_82571,		WMP_F_1000T, },
785 
786 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
787 	  "Intel i82572EI 1000baseX Ethernet",
788 	  WM_T_82572,		WMP_F_1000X },
789 #if 0
790 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
791 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
792 	  WM_T_82572,		WMP_F_SERDES },
793 #endif
794 
795 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
796 	  "Intel i82572EI 1000baseT Ethernet",
797 	  WM_T_82572,		WMP_F_1000T },
798 
799 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
800 	  "Intel i82573E",
801 	  WM_T_82573,		WMP_F_1000T },
802 
803 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
804 	  "Intel i82573E IAMT",
805 	  WM_T_82573,		WMP_F_1000T },
806 
807 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
808 	  "Intel i82573L Gigabit Ethernet",
809 	  WM_T_82573,		WMP_F_1000T },
810 
811 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
812 	  "Intel i82574L",
813 	  WM_T_82574,		WMP_F_1000T },
814 
815 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
816 	  "i80003 dual 1000baseT Ethernet",
817 	  WM_T_80003,		WMP_F_1000T },
818 
819 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
820 	  "i80003 dual 1000baseX Ethernet",
821 	  WM_T_80003,		WMP_F_1000T },
822 #if 0
823 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
824 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
825 	  WM_T_80003,		WMP_F_SERDES },
826 #endif
827 
828 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
829 	  "Intel i80003 1000baseT Ethernet",
830 	  WM_T_80003,		WMP_F_1000T },
831 #if 0
832 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
833 	  "Intel i80003 Gigabit Ethernet (SERDES)",
834 	  WM_T_80003,		WMP_F_SERDES },
835 #endif
836 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
837 	  "Intel i82801H (M_AMT) LAN Controller",
838 	  WM_T_ICH8,		WMP_F_1000T },
839 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
840 	  "Intel i82801H (AMT) LAN Controller",
841 	  WM_T_ICH8,		WMP_F_1000T },
842 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
843 	  "Intel i82801H LAN Controller",
844 	  WM_T_ICH8,		WMP_F_1000T },
845 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
846 	  "Intel i82801H (IFE) LAN Controller",
847 	  WM_T_ICH8,		WMP_F_1000T },
848 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
849 	  "Intel i82801H (M) LAN Controller",
850 	  WM_T_ICH8,		WMP_F_1000T },
851 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
852 	  "Intel i82801H IFE (GT) LAN Controller",
853 	  WM_T_ICH8,		WMP_F_1000T },
854 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
855 	  "Intel i82801H IFE (G) LAN Controller",
856 	  WM_T_ICH8,		WMP_F_1000T },
857 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
858 	  "82801I (AMT) LAN Controller",
859 	  WM_T_ICH9,		WMP_F_1000T },
860 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
861 	  "82801I LAN Controller",
862 	  WM_T_ICH9,		WMP_F_1000T },
863 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
864 	  "82801I (G) LAN Controller",
865 	  WM_T_ICH9,		WMP_F_1000T },
866 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
867 	  "82801I (GT) LAN Controller",
868 	  WM_T_ICH9,		WMP_F_1000T },
869 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
870 	  "82801I (C) LAN Controller",
871 	  WM_T_ICH9,		WMP_F_1000T },
872 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
873 	  "82801I mobile LAN Controller",
874 	  WM_T_ICH9,		WMP_F_1000T },
875 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IGP_M_V,
876 	  "82801I mobile (V) LAN Controller",
877 	  WM_T_ICH9,		WMP_F_1000T },
878 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
879 	  "82801I mobile (AMT) LAN Controller",
880 	  WM_T_ICH9,		WMP_F_1000T },
881 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82567LM_3,
882 	  "82567LM-3 LAN Controller",
883 	  WM_T_ICH10,		WMP_F_1000T },
884 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82567LF_3,
885 	  "82567LF-3 LAN Controller",
886 	  WM_T_ICH10,		WMP_F_1000T },
887 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
888 	  "i82801J (LF) LAN Controller",
889 	  WM_T_ICH10,		WMP_F_1000T },
890 	{ 0,			0,
891 	  NULL,
892 	  0,			0 },
893 };
894 
895 #ifdef WM_EVENT_COUNTERS
896 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
897 #endif /* WM_EVENT_COUNTERS */
898 
899 #if 0 /* Not currently used */
900 static inline uint32_t
901 wm_io_read(struct wm_softc *sc, int reg)
902 {
903 
904 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
905 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
906 }
907 #endif
908 
909 static inline void
910 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
911 {
912 
913 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
914 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
915 }
916 
917 static inline void
918 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
919 {
920 	wa->wa_low = htole32(v & 0xffffffffU);
921 	if (sizeof(bus_addr_t) == 8)
922 		wa->wa_high = htole32((uint64_t) v >> 32);
923 	else
924 		wa->wa_high = 0;
925 }
926 
927 static const struct wm_product *
928 wm_lookup(const struct pci_attach_args *pa)
929 {
930 	const struct wm_product *wmp;
931 
932 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
933 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
934 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
935 			return (wmp);
936 	}
937 	return (NULL);
938 }
939 
940 static int
941 wm_match(device_t parent, cfdata_t cf, void *aux)
942 {
943 	struct pci_attach_args *pa = aux;
944 
945 	if (wm_lookup(pa) != NULL)
946 		return (1);
947 
948 	return (0);
949 }
950 
951 static void
952 wm_attach(device_t parent, device_t self, void *aux)
953 {
954 	struct wm_softc *sc = device_private(self);
955 	struct pci_attach_args *pa = aux;
956 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
957 	pci_chipset_tag_t pc = pa->pa_pc;
958 	pci_intr_handle_t ih;
959 	size_t cdata_size;
960 	const char *intrstr = NULL;
961 	const char *eetype, *xname;
962 	bus_space_tag_t memt;
963 	bus_space_handle_t memh;
964 	bus_dma_segment_t seg;
965 	int memh_valid;
966 	int i, rseg, error;
967 	const struct wm_product *wmp;
968 	prop_data_t ea;
969 	prop_number_t pn;
970 	uint8_t enaddr[ETHER_ADDR_LEN];
971 	uint16_t myea[ETHER_ADDR_LEN / 2], cfg1, cfg2, swdpin;
972 	pcireg_t preg, memtype;
973 	uint32_t reg;
974 
975 	sc->sc_dev = self;
976 	callout_init(&sc->sc_tick_ch, 0);
977 
978 	wmp = wm_lookup(pa);
979 	if (wmp == NULL) {
980 		printf("\n");
981 		panic("wm_attach: impossible");
982 	}
983 
984 	sc->sc_pc = pa->pa_pc;
985 	sc->sc_pcitag = pa->pa_tag;
986 
987 	if (pci_dma64_available(pa))
988 		sc->sc_dmat = pa->pa_dmat64;
989 	else
990 		sc->sc_dmat = pa->pa_dmat;
991 
992 	preg = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
993 	aprint_naive(": Ethernet controller\n");
994 	aprint_normal(": %s, rev. %d\n", wmp->wmp_name, preg);
995 
996 	sc->sc_type = wmp->wmp_type;
997 	if (sc->sc_type < WM_T_82543) {
998 		if (preg < 2) {
999 			aprint_error_dev(sc->sc_dev,
1000 			    "i82542 must be at least rev. 2\n");
1001 			return;
1002 		}
1003 		if (preg < 3)
1004 			sc->sc_type = WM_T_82542_2_0;
1005 	}
1006 
1007 	/*
1008 	 * Map the device.  All devices support memory-mapped acccess,
1009 	 * and it is really required for normal operation.
1010 	 */
1011 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1012 	switch (memtype) {
1013 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1014 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1015 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1016 		    memtype, 0, &memt, &memh, NULL, NULL) == 0);
1017 		break;
1018 	default:
1019 		memh_valid = 0;
1020 	}
1021 
1022 	if (memh_valid) {
1023 		sc->sc_st = memt;
1024 		sc->sc_sh = memh;
1025 	} else {
1026 		aprint_error_dev(sc->sc_dev,
1027 		    "unable to map device registers\n");
1028 		return;
1029 	}
1030 
1031 	/*
1032 	 * In addition, i82544 and later support I/O mapped indirect
1033 	 * register access.  It is not desirable (nor supported in
1034 	 * this driver) to use it for normal operation, though it is
1035 	 * required to work around bugs in some chip versions.
1036 	 */
1037 	if (sc->sc_type >= WM_T_82544) {
1038 		/* First we have to find the I/O BAR. */
1039 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1040 			if (pci_mapreg_type(pa->pa_pc, pa->pa_tag, i) ==
1041 			    PCI_MAPREG_TYPE_IO)
1042 				break;
1043 		}
1044 		if (i == PCI_MAPREG_END)
1045 			aprint_error_dev(sc->sc_dev,
1046 			    "WARNING: unable to find I/O BAR\n");
1047 		else {
1048 			/*
1049 			 * The i8254x doesn't apparently respond when the
1050 			 * I/O BAR is 0, which looks somewhat like it's not
1051 			 * been configured.
1052 			 */
1053 			preg = pci_conf_read(pc, pa->pa_tag, i);
1054 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1055 				aprint_error_dev(sc->sc_dev,
1056 				    "WARNING: I/O BAR at zero.\n");
1057 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1058 					0, &sc->sc_iot, &sc->sc_ioh,
1059 					NULL, NULL) == 0) {
1060 				sc->sc_flags |= WM_F_IOH_VALID;
1061 			} else {
1062 				aprint_error_dev(sc->sc_dev,
1063 				    "WARNING: unable to map I/O space\n");
1064 			}
1065 		}
1066 
1067 	}
1068 
1069 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
1070 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1071 	preg |= PCI_COMMAND_MASTER_ENABLE;
1072 	if (sc->sc_type < WM_T_82542_2_1)
1073 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1074 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1075 
1076 	/* power up chip */
1077 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1078 	    NULL)) && error != EOPNOTSUPP) {
1079 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1080 		return;
1081 	}
1082 
1083 	/*
1084 	 * Map and establish our interrupt.
1085 	 */
1086 	if (pci_intr_map(pa, &ih)) {
1087 		aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
1088 		return;
1089 	}
1090 	intrstr = pci_intr_string(pc, ih);
1091 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
1092 	if (sc->sc_ih == NULL) {
1093 		aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
1094 		if (intrstr != NULL)
1095 			aprint_error(" at %s", intrstr);
1096 		aprint_error("\n");
1097 		return;
1098 	}
1099 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
1100 
1101 	/*
1102 	 * Determine a few things about the bus we're connected to.
1103 	 */
1104 	if (sc->sc_type < WM_T_82543) {
1105 		/* We don't really know the bus characteristics here. */
1106 		sc->sc_bus_speed = 33;
1107 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1108 		/*
1109 		 * CSA (Communication Streaming Architecture) is about as fast
1110 		 * a 32-bit 66MHz PCI Bus.
1111 		 */
1112 		sc->sc_flags |= WM_F_CSA;
1113 		sc->sc_bus_speed = 66;
1114 		aprint_verbose_dev(sc->sc_dev,
1115 		    "Communication Streaming Architecture\n");
1116 		if (sc->sc_type == WM_T_82547) {
1117 			callout_init(&sc->sc_txfifo_ch, 0);
1118 			callout_setfunc(&sc->sc_txfifo_ch,
1119 					wm_82547_txfifo_stall, sc);
1120 			aprint_verbose_dev(sc->sc_dev,
1121 			    "using 82547 Tx FIFO stall work-around\n");
1122 		}
1123 	} else if (sc->sc_type >= WM_T_82571) {
1124 		sc->sc_flags |= WM_F_PCIE;
1125 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1126 			&& (sc->sc_type != WM_T_ICH10))
1127 			sc->sc_flags |= WM_F_EEPROM_SEMAPHORE;
1128 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1129 	} else {
1130 		reg = CSR_READ(sc, WMREG_STATUS);
1131 		if (reg & STATUS_BUS64)
1132 			sc->sc_flags |= WM_F_BUS64;
1133 		if ((reg & STATUS_PCIX_MODE) != 0) {
1134 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1135 
1136 			sc->sc_flags |= WM_F_PCIX;
1137 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1138 					       PCI_CAP_PCIX,
1139 					       &sc->sc_pcix_offset, NULL) == 0)
1140 				aprint_error_dev(sc->sc_dev,
1141 				    "unable to find PCIX capability\n");
1142 			else if (sc->sc_type != WM_T_82545_3 &&
1143 				 sc->sc_type != WM_T_82546_3) {
1144 				/*
1145 				 * Work around a problem caused by the BIOS
1146 				 * setting the max memory read byte count
1147 				 * incorrectly.
1148 				 */
1149 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1150 				    sc->sc_pcix_offset + PCI_PCIX_CMD);
1151 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1152 				    sc->sc_pcix_offset + PCI_PCIX_STATUS);
1153 
1154 				bytecnt =
1155 				    (pcix_cmd & PCI_PCIX_CMD_BYTECNT_MASK) >>
1156 				    PCI_PCIX_CMD_BYTECNT_SHIFT;
1157 				maxb =
1158 				    (pcix_sts & PCI_PCIX_STATUS_MAXB_MASK) >>
1159 				    PCI_PCIX_STATUS_MAXB_SHIFT;
1160 				if (bytecnt > maxb) {
1161 					aprint_verbose_dev(sc->sc_dev,
1162 					    "resetting PCI-X MMRBC: %d -> %d\n",
1163 					    512 << bytecnt, 512 << maxb);
1164 					pcix_cmd = (pcix_cmd &
1165 					    ~PCI_PCIX_CMD_BYTECNT_MASK) |
1166 					   (maxb << PCI_PCIX_CMD_BYTECNT_SHIFT);
1167 					pci_conf_write(pa->pa_pc, pa->pa_tag,
1168 					    sc->sc_pcix_offset + PCI_PCIX_CMD,
1169 					    pcix_cmd);
1170 				}
1171 			}
1172 		}
1173 		/*
1174 		 * The quad port adapter is special; it has a PCIX-PCIX
1175 		 * bridge on the board, and can run the secondary bus at
1176 		 * a higher speed.
1177 		 */
1178 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1179 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1180 								      : 66;
1181 		} else if (sc->sc_flags & WM_F_PCIX) {
1182 			switch (reg & STATUS_PCIXSPD_MASK) {
1183 			case STATUS_PCIXSPD_50_66:
1184 				sc->sc_bus_speed = 66;
1185 				break;
1186 			case STATUS_PCIXSPD_66_100:
1187 				sc->sc_bus_speed = 100;
1188 				break;
1189 			case STATUS_PCIXSPD_100_133:
1190 				sc->sc_bus_speed = 133;
1191 				break;
1192 			default:
1193 				aprint_error_dev(sc->sc_dev,
1194 				    "unknown PCIXSPD %d; assuming 66MHz\n",
1195 				    reg & STATUS_PCIXSPD_MASK);
1196 				sc->sc_bus_speed = 66;
1197 			}
1198 		} else
1199 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1200 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1201 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1202 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1203 	}
1204 
1205 	/*
1206 	 * Allocate the control data structures, and create and load the
1207 	 * DMA map for it.
1208 	 *
1209 	 * NOTE: All Tx descriptors must be in the same 4G segment of
1210 	 * memory.  So must Rx descriptors.  We simplify by allocating
1211 	 * both sets within the same 4G segment.
1212 	 */
1213 	WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
1214 	    WM_NTXDESC_82542 : WM_NTXDESC_82544;
1215 	cdata_size = sc->sc_type < WM_T_82544 ?
1216 	    sizeof(struct wm_control_data_82542) :
1217 	    sizeof(struct wm_control_data_82544);
1218 	if ((error = bus_dmamem_alloc(sc->sc_dmat, cdata_size, PAGE_SIZE,
1219 				      (bus_size_t) 0x100000000ULL,
1220 				      &seg, 1, &rseg, 0)) != 0) {
1221 		aprint_error_dev(sc->sc_dev,
1222 		    "unable to allocate control data, error = %d\n",
1223 		    error);
1224 		goto fail_0;
1225 	}
1226 
1227 	if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, cdata_size,
1228 				    (void **)&sc->sc_control_data,
1229 				    BUS_DMA_COHERENT)) != 0) {
1230 		aprint_error_dev(sc->sc_dev,
1231 		    "unable to map control data, error = %d\n", error);
1232 		goto fail_1;
1233 	}
1234 
1235 	if ((error = bus_dmamap_create(sc->sc_dmat, cdata_size, 1, cdata_size,
1236 				       0, 0, &sc->sc_cddmamap)) != 0) {
1237 		aprint_error_dev(sc->sc_dev,
1238 		    "unable to create control data DMA map, error = %d\n",
1239 		    error);
1240 		goto fail_2;
1241 	}
1242 
1243 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
1244 				     sc->sc_control_data, cdata_size, NULL,
1245 				     0)) != 0) {
1246 		aprint_error_dev(sc->sc_dev,
1247 		    "unable to load control data DMA map, error = %d\n",
1248 		    error);
1249 		goto fail_3;
1250 	}
1251 
1252 
1253 	/*
1254 	 * Create the transmit buffer DMA maps.
1255 	 */
1256 	WM_TXQUEUELEN(sc) =
1257 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
1258 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
1259 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1260 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
1261 					       WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
1262 					  &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1263 			aprint_error_dev(sc->sc_dev,
1264 			    "unable to create Tx DMA map %d, error = %d\n",
1265 			    i, error);
1266 			goto fail_4;
1267 		}
1268 	}
1269 
1270 	/*
1271 	 * Create the receive buffer DMA maps.
1272 	 */
1273 	for (i = 0; i < WM_NRXDESC; i++) {
1274 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1275 					       MCLBYTES, 0, 0,
1276 					  &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1277 			aprint_error_dev(sc->sc_dev,
1278 			    "unable to create Rx DMA map %d error = %d\n",
1279 			    i, error);
1280 			goto fail_5;
1281 		}
1282 		sc->sc_rxsoft[i].rxs_mbuf = NULL;
1283 	}
1284 
1285 	/* clear interesting stat counters */
1286 	CSR_READ(sc, WMREG_COLC);
1287 	CSR_READ(sc, WMREG_RXERRC);
1288 
1289 	/*
1290 	 * Reset the chip to a known state.
1291 	 */
1292 	wm_reset(sc);
1293 
1294 	switch (sc->sc_type) {
1295 	case WM_T_82571:
1296 	case WM_T_82572:
1297 	case WM_T_82573:
1298 	case WM_T_82574:
1299 	case WM_T_80003:
1300 	case WM_T_ICH8:
1301 	case WM_T_ICH9:
1302 	case WM_T_ICH10:
1303 		if (wm_check_mng_mode(sc) != 0)
1304 			wm_get_hw_control(sc);
1305 		break;
1306 	default:
1307 		break;
1308 	}
1309 
1310 	/*
1311 	 * Get some information about the EEPROM.
1312 	 */
1313 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
1314 	    || (sc->sc_type == WM_T_ICH10)) {
1315 		uint32_t flash_size;
1316 		sc->sc_flags |= WM_F_SWFWHW_SYNC | WM_F_EEPROM_FLASH;
1317 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
1318 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
1319 		    &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) {
1320 			aprint_error_dev(sc->sc_dev,
1321 			    "can't map FLASH registers\n");
1322 			return;
1323 		}
1324 		flash_size = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
1325 		sc->sc_ich8_flash_base = (flash_size & ICH_GFPREG_BASE_MASK) *
1326 						ICH_FLASH_SECTOR_SIZE;
1327 		sc->sc_ich8_flash_bank_size =
1328 			((flash_size >> 16) & ICH_GFPREG_BASE_MASK) + 1;
1329 		sc->sc_ich8_flash_bank_size -=
1330 			(flash_size & ICH_GFPREG_BASE_MASK);
1331 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
1332 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
1333 	} else if (sc->sc_type == WM_T_80003)
1334 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR |  WM_F_SWFW_SYNC;
1335 	else if (sc->sc_type == WM_T_82573)
1336 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1337 	else if (sc->sc_type == WM_T_82574)
1338 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1339 	else if (sc->sc_type > WM_T_82544)
1340 		sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1341 
1342 	if (sc->sc_type <= WM_T_82544)
1343 		sc->sc_ee_addrbits = 6;
1344 	else if (sc->sc_type <= WM_T_82546_3) {
1345 		reg = CSR_READ(sc, WMREG_EECD);
1346 		if (reg & EECD_EE_SIZE)
1347 			sc->sc_ee_addrbits = 8;
1348 		else
1349 			sc->sc_ee_addrbits = 6;
1350 	} else if (sc->sc_type <= WM_T_82547_2) {
1351 		reg = CSR_READ(sc, WMREG_EECD);
1352 		if (reg & EECD_EE_TYPE) {
1353 			sc->sc_flags |= WM_F_EEPROM_SPI;
1354 			sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
1355 		} else
1356 			sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6;
1357 	} else if ((sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574) &&
1358 	    (wm_is_onboard_nvm_eeprom(sc) == 0)) {
1359 		sc->sc_flags |= WM_F_EEPROM_FLASH;
1360 	} else {
1361 		/* Assume everything else is SPI. */
1362 		reg = CSR_READ(sc, WMREG_EECD);
1363 		sc->sc_flags |= WM_F_EEPROM_SPI;
1364 		sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
1365 	}
1366 
1367 	/*
1368 	 * Defer printing the EEPROM type until after verifying the checksum
1369 	 * This allows the EEPROM type to be printed correctly in the case
1370 	 * that no EEPROM is attached.
1371 	 */
1372 
1373 	/*
1374 	 * Validate the EEPROM checksum. If the checksum fails, flag this for
1375 	 * later, so we can fail future reads from the EEPROM.
1376 	 */
1377 	if (wm_validate_eeprom_checksum(sc)) {
1378 		/*
1379 		 * Read twice again because some PCI-e parts fail the first
1380 		 * check due to the link being in sleep state.
1381 		 */
1382 		if (wm_validate_eeprom_checksum(sc))
1383 			sc->sc_flags |= WM_F_EEPROM_INVALID;
1384 	}
1385 
1386 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
1387 		aprint_verbose_dev(sc->sc_dev, "No EEPROM\n");
1388 	else if (sc->sc_flags & WM_F_EEPROM_FLASH) {
1389 		aprint_verbose_dev(sc->sc_dev, "FLASH\n");
1390 	} else {
1391 		if (sc->sc_flags & WM_F_EEPROM_SPI)
1392 			eetype = "SPI";
1393 		else
1394 			eetype = "MicroWire";
1395 		aprint_verbose_dev(sc->sc_dev,
1396 		    "%u word (%d address bits) %s EEPROM\n",
1397 		    1U << sc->sc_ee_addrbits,
1398 		    sc->sc_ee_addrbits, eetype);
1399 	}
1400 
1401 	/*
1402 	 * Read the Ethernet address from the EEPROM, if not first found
1403 	 * in device properties.
1404 	 */
1405 	ea = prop_dictionary_get(device_properties(sc->sc_dev), "mac-addr");
1406 	if (ea != NULL) {
1407 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
1408 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
1409 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
1410 	} else {
1411 		if (wm_read_eeprom(sc, EEPROM_OFF_MACADDR,
1412 		    sizeof(myea) / sizeof(myea[0]), myea)) {
1413 			aprint_error_dev(sc->sc_dev,
1414 			    "unable to read Ethernet address\n");
1415 			return;
1416 		}
1417 		enaddr[0] = myea[0] & 0xff;
1418 		enaddr[1] = myea[0] >> 8;
1419 		enaddr[2] = myea[1] & 0xff;
1420 		enaddr[3] = myea[1] >> 8;
1421 		enaddr[4] = myea[2] & 0xff;
1422 		enaddr[5] = myea[2] >> 8;
1423 	}
1424 
1425 	/*
1426 	 * Toggle the LSB of the MAC address on the second port
1427 	 * of the dual port controller.
1428 	 */
1429 	if (sc->sc_type == WM_T_82546 || sc->sc_type == WM_T_82546_3
1430 	    || sc->sc_type ==  WM_T_82571 || sc->sc_type == WM_T_80003) {
1431 		if ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1)
1432 			enaddr[5] ^= 1;
1433 	}
1434 
1435 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
1436 	    ether_sprintf(enaddr));
1437 
1438 	/*
1439 	 * Read the config info from the EEPROM, and set up various
1440 	 * bits in the control registers based on their contents.
1441 	 */
1442 	pn = prop_dictionary_get(device_properties(sc->sc_dev),
1443 				 "i82543-cfg1");
1444 	if (pn != NULL) {
1445 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1446 		cfg1 = (uint16_t) prop_number_integer_value(pn);
1447 	} else {
1448 		if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) {
1449 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
1450 			return;
1451 		}
1452 	}
1453 
1454 	pn = prop_dictionary_get(device_properties(sc->sc_dev),
1455 				 "i82543-cfg2");
1456 	if (pn != NULL) {
1457 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1458 		cfg2 = (uint16_t) prop_number_integer_value(pn);
1459 	} else {
1460 		if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) {
1461 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
1462 			return;
1463 		}
1464 	}
1465 
1466 	if (sc->sc_type >= WM_T_82544) {
1467 		pn = prop_dictionary_get(device_properties(sc->sc_dev),
1468 					 "i82543-swdpin");
1469 		if (pn != NULL) {
1470 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1471 			swdpin = (uint16_t) prop_number_integer_value(pn);
1472 		} else {
1473 			if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) {
1474 				aprint_error_dev(sc->sc_dev,
1475 				    "unable to read SWDPIN\n");
1476 				return;
1477 			}
1478 		}
1479 	}
1480 
1481 	if (cfg1 & EEPROM_CFG1_ILOS)
1482 		sc->sc_ctrl |= CTRL_ILOS;
1483 	if (sc->sc_type >= WM_T_82544) {
1484 		sc->sc_ctrl |=
1485 		    ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
1486 		    CTRL_SWDPIO_SHIFT;
1487 		sc->sc_ctrl |=
1488 		    ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
1489 		    CTRL_SWDPINS_SHIFT;
1490 	} else {
1491 		sc->sc_ctrl |=
1492 		    ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
1493 		    CTRL_SWDPIO_SHIFT;
1494 	}
1495 
1496 #if 0
1497 	if (sc->sc_type >= WM_T_82544) {
1498 		if (cfg1 & EEPROM_CFG1_IPS0)
1499 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
1500 		if (cfg1 & EEPROM_CFG1_IPS1)
1501 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
1502 		sc->sc_ctrl_ext |=
1503 		    ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
1504 		    CTRL_EXT_SWDPIO_SHIFT;
1505 		sc->sc_ctrl_ext |=
1506 		    ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
1507 		    CTRL_EXT_SWDPINS_SHIFT;
1508 	} else {
1509 		sc->sc_ctrl_ext |=
1510 		    ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
1511 		    CTRL_EXT_SWDPIO_SHIFT;
1512 	}
1513 #endif
1514 
1515 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
1516 #if 0
1517 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
1518 #endif
1519 
1520 	/*
1521 	 * Set up some register offsets that are different between
1522 	 * the i82542 and the i82543 and later chips.
1523 	 */
1524 	if (sc->sc_type < WM_T_82543) {
1525 		sc->sc_rdt_reg = WMREG_OLD_RDT0;
1526 		sc->sc_tdt_reg = WMREG_OLD_TDT;
1527 	} else {
1528 		sc->sc_rdt_reg = WMREG_RDT;
1529 		sc->sc_tdt_reg = WMREG_TDT;
1530 	}
1531 
1532 	/*
1533 	 * Determine if we're TBI or GMII mode, and initialize the
1534 	 * media structures accordingly.
1535 	 */
1536 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
1537 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_82573
1538 	    || sc->sc_type == WM_T_82574) {
1539 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
1540 		wm_gmii_mediainit(sc);
1541 	} else if (sc->sc_type < WM_T_82543 ||
1542 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
1543 		if (wmp->wmp_flags & WMP_F_1000T)
1544 			aprint_error_dev(sc->sc_dev,
1545 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
1546 		wm_tbi_mediainit(sc);
1547 	} else {
1548 		if (wmp->wmp_flags & WMP_F_1000X)
1549 			aprint_error_dev(sc->sc_dev,
1550 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
1551 		wm_gmii_mediainit(sc);
1552 	}
1553 
1554 	ifp = &sc->sc_ethercom.ec_if;
1555 	xname = device_xname(sc->sc_dev);
1556 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
1557 	ifp->if_softc = sc;
1558 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1559 	ifp->if_ioctl = wm_ioctl;
1560 	ifp->if_start = wm_start;
1561 	ifp->if_watchdog = wm_watchdog;
1562 	ifp->if_init = wm_init;
1563 	ifp->if_stop = wm_stop;
1564 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
1565 	IFQ_SET_READY(&ifp->if_snd);
1566 
1567 	if (sc->sc_type != WM_T_82573 && sc->sc_type != WM_T_82574 &&
1568 	    sc->sc_type != WM_T_ICH8)
1569 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1570 
1571 	/*
1572 	 * If we're a i82543 or greater, we can support VLANs.
1573 	 */
1574 	if (sc->sc_type >= WM_T_82543)
1575 		sc->sc_ethercom.ec_capabilities |=
1576 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
1577 
1578 	/*
1579 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
1580 	 * on i82543 and later.
1581 	 */
1582 	if (sc->sc_type >= WM_T_82543) {
1583 		ifp->if_capabilities |=
1584 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
1585 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
1586 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
1587 		    IFCAP_CSUM_TCPv6_Tx |
1588 		    IFCAP_CSUM_UDPv6_Tx;
1589 	}
1590 
1591 	/*
1592 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
1593 	 *
1594 	 *	82541GI (8086:1076) ... no
1595 	 *	82572EI (8086:10b9) ... yes
1596 	 */
1597 	if (sc->sc_type >= WM_T_82571) {
1598 		ifp->if_capabilities |=
1599 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
1600 	}
1601 
1602 	/*
1603 	 * If we're a i82544 or greater (except i82547), we can do
1604 	 * TCP segmentation offload.
1605 	 */
1606 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
1607 		ifp->if_capabilities |= IFCAP_TSOv4;
1608 	}
1609 
1610 	if (sc->sc_type >= WM_T_82571) {
1611 		ifp->if_capabilities |= IFCAP_TSOv6;
1612 	}
1613 
1614 	/*
1615 	 * Attach the interface.
1616 	 */
1617 	if_attach(ifp);
1618 	ether_ifattach(ifp, enaddr);
1619 #if NRND > 0
1620 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET, 0);
1621 #endif
1622 
1623 #ifdef WM_EVENT_COUNTERS
1624 	/* Attach event counters. */
1625 	evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
1626 	    NULL, xname, "txsstall");
1627 	evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
1628 	    NULL, xname, "txdstall");
1629 	evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
1630 	    NULL, xname, "txfifo_stall");
1631 	evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
1632 	    NULL, xname, "txdw");
1633 	evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
1634 	    NULL, xname, "txqe");
1635 	evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
1636 	    NULL, xname, "rxintr");
1637 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
1638 	    NULL, xname, "linkintr");
1639 
1640 	evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
1641 	    NULL, xname, "rxipsum");
1642 	evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
1643 	    NULL, xname, "rxtusum");
1644 	evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
1645 	    NULL, xname, "txipsum");
1646 	evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
1647 	    NULL, xname, "txtusum");
1648 	evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
1649 	    NULL, xname, "txtusum6");
1650 
1651 	evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
1652 	    NULL, xname, "txtso");
1653 	evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
1654 	    NULL, xname, "txtso6");
1655 	evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
1656 	    NULL, xname, "txtsopain");
1657 
1658 	for (i = 0; i < WM_NTXSEGS; i++) {
1659 		sprintf(wm_txseg_evcnt_names[i], "txseg%d", i);
1660 		evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
1661 		    NULL, xname, wm_txseg_evcnt_names[i]);
1662 	}
1663 
1664 	evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
1665 	    NULL, xname, "txdrop");
1666 
1667 	evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
1668 	    NULL, xname, "tu");
1669 
1670 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
1671 	    NULL, xname, "tx_xoff");
1672 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
1673 	    NULL, xname, "tx_xon");
1674 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
1675 	    NULL, xname, "rx_xoff");
1676 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
1677 	    NULL, xname, "rx_xon");
1678 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
1679 	    NULL, xname, "rx_macctl");
1680 #endif /* WM_EVENT_COUNTERS */
1681 
1682 	if (pmf_device_register(self, NULL, NULL))
1683 		pmf_class_network_register(self, ifp);
1684 	else
1685 		aprint_error_dev(self, "couldn't establish power handler\n");
1686 
1687 	return;
1688 
1689 	/*
1690 	 * Free any resources we've allocated during the failed attach
1691 	 * attempt.  Do this in reverse order and fall through.
1692 	 */
1693  fail_5:
1694 	for (i = 0; i < WM_NRXDESC; i++) {
1695 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
1696 			bus_dmamap_destroy(sc->sc_dmat,
1697 			    sc->sc_rxsoft[i].rxs_dmamap);
1698 	}
1699  fail_4:
1700 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1701 		if (sc->sc_txsoft[i].txs_dmamap != NULL)
1702 			bus_dmamap_destroy(sc->sc_dmat,
1703 			    sc->sc_txsoft[i].txs_dmamap);
1704 	}
1705 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
1706  fail_3:
1707 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
1708  fail_2:
1709 	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
1710 	    cdata_size);
1711  fail_1:
1712 	bus_dmamem_free(sc->sc_dmat, &seg, rseg);
1713  fail_0:
1714 	return;
1715 }
1716 
1717 /*
1718  * wm_tx_offload:
1719  *
1720  *	Set up TCP/IP checksumming parameters for the
1721  *	specified packet.
1722  */
1723 static int
1724 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
1725     uint8_t *fieldsp)
1726 {
1727 	struct mbuf *m0 = txs->txs_mbuf;
1728 	struct livengood_tcpip_ctxdesc *t;
1729 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
1730 	uint32_t ipcse;
1731 	struct ether_header *eh;
1732 	int offset, iphl;
1733 	uint8_t fields;
1734 
1735 	/*
1736 	 * XXX It would be nice if the mbuf pkthdr had offset
1737 	 * fields for the protocol headers.
1738 	 */
1739 
1740 	eh = mtod(m0, struct ether_header *);
1741 	switch (htons(eh->ether_type)) {
1742 	case ETHERTYPE_IP:
1743 	case ETHERTYPE_IPV6:
1744 		offset = ETHER_HDR_LEN;
1745 		break;
1746 
1747 	case ETHERTYPE_VLAN:
1748 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1749 		break;
1750 
1751 	default:
1752 		/*
1753 		 * Don't support this protocol or encapsulation.
1754 		 */
1755 		*fieldsp = 0;
1756 		*cmdp = 0;
1757 		return (0);
1758 	}
1759 
1760 	if ((m0->m_pkthdr.csum_flags &
1761 	    (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
1762 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
1763 	} else {
1764 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
1765 	}
1766 	ipcse = offset + iphl - 1;
1767 
1768 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
1769 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
1770 	seg = 0;
1771 	fields = 0;
1772 
1773 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
1774 		int hlen = offset + iphl;
1775 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
1776 
1777 		if (__predict_false(m0->m_len <
1778 				    (hlen + sizeof(struct tcphdr)))) {
1779 			/*
1780 			 * TCP/IP headers are not in the first mbuf; we need
1781 			 * to do this the slow and painful way.  Let's just
1782 			 * hope this doesn't happen very often.
1783 			 */
1784 			struct tcphdr th;
1785 
1786 			WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
1787 
1788 			m_copydata(m0, hlen, sizeof(th), &th);
1789 			if (v4) {
1790 				struct ip ip;
1791 
1792 				m_copydata(m0, offset, sizeof(ip), &ip);
1793 				ip.ip_len = 0;
1794 				m_copyback(m0,
1795 				    offset + offsetof(struct ip, ip_len),
1796 				    sizeof(ip.ip_len), &ip.ip_len);
1797 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
1798 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
1799 			} else {
1800 				struct ip6_hdr ip6;
1801 
1802 				m_copydata(m0, offset, sizeof(ip6), &ip6);
1803 				ip6.ip6_plen = 0;
1804 				m_copyback(m0,
1805 				    offset + offsetof(struct ip6_hdr, ip6_plen),
1806 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
1807 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
1808 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
1809 			}
1810 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
1811 			    sizeof(th.th_sum), &th.th_sum);
1812 
1813 			hlen += th.th_off << 2;
1814 		} else {
1815 			/*
1816 			 * TCP/IP headers are in the first mbuf; we can do
1817 			 * this the easy way.
1818 			 */
1819 			struct tcphdr *th;
1820 
1821 			if (v4) {
1822 				struct ip *ip =
1823 				    (void *)(mtod(m0, char *) + offset);
1824 				th = (void *)(mtod(m0, char *) + hlen);
1825 
1826 				ip->ip_len = 0;
1827 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
1828 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
1829 			} else {
1830 				struct ip6_hdr *ip6 =
1831 				    (void *)(mtod(m0, char *) + offset);
1832 				th = (void *)(mtod(m0, char *) + hlen);
1833 
1834 				ip6->ip6_plen = 0;
1835 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
1836 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
1837 			}
1838 			hlen += th->th_off << 2;
1839 		}
1840 
1841 		if (v4) {
1842 			WM_EVCNT_INCR(&sc->sc_ev_txtso);
1843 			cmdlen |= WTX_TCPIP_CMD_IP;
1844 		} else {
1845 			WM_EVCNT_INCR(&sc->sc_ev_txtso6);
1846 			ipcse = 0;
1847 		}
1848 		cmd |= WTX_TCPIP_CMD_TSE;
1849 		cmdlen |= WTX_TCPIP_CMD_TSE |
1850 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
1851 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
1852 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
1853 	}
1854 
1855 	/*
1856 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
1857 	 * offload feature, if we load the context descriptor, we
1858 	 * MUST provide valid values for IPCSS and TUCSS fields.
1859 	 */
1860 
1861 	ipcs = WTX_TCPIP_IPCSS(offset) |
1862 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1863 	    WTX_TCPIP_IPCSE(ipcse);
1864 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
1865 		WM_EVCNT_INCR(&sc->sc_ev_txipsum);
1866 		fields |= WTX_IXSM;
1867 	}
1868 
1869 	offset += iphl;
1870 
1871 	if (m0->m_pkthdr.csum_flags &
1872 	    (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
1873 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
1874 		fields |= WTX_TXSM;
1875 		tucs = WTX_TCPIP_TUCSS(offset) |
1876 		    WTX_TCPIP_TUCSO(offset +
1877 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
1878 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
1879 	} else if ((m0->m_pkthdr.csum_flags &
1880 	    (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
1881 		WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
1882 		fields |= WTX_TXSM;
1883 		tucs = WTX_TCPIP_TUCSS(offset) |
1884 		    WTX_TCPIP_TUCSO(offset +
1885 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
1886 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
1887 	} else {
1888 		/* Just initialize it to a valid TCP context. */
1889 		tucs = WTX_TCPIP_TUCSS(offset) |
1890 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
1891 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
1892 	}
1893 
1894 	/* Fill in the context descriptor. */
1895 	t = (struct livengood_tcpip_ctxdesc *)
1896 	    &sc->sc_txdescs[sc->sc_txnext];
1897 	t->tcpip_ipcs = htole32(ipcs);
1898 	t->tcpip_tucs = htole32(tucs);
1899 	t->tcpip_cmdlen = htole32(cmdlen);
1900 	t->tcpip_seg = htole32(seg);
1901 	WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
1902 
1903 	sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
1904 	txs->txs_ndesc++;
1905 
1906 	*cmdp = cmd;
1907 	*fieldsp = fields;
1908 
1909 	return (0);
1910 }
1911 
1912 static void
1913 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
1914 {
1915 	struct mbuf *m;
1916 	int i;
1917 
1918 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
1919 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
1920 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
1921 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
1922 		    m->m_data, m->m_len, m->m_flags);
1923 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
1924 	    i, i == 1 ? "" : "s");
1925 }
1926 
1927 /*
1928  * wm_82547_txfifo_stall:
1929  *
1930  *	Callout used to wait for the 82547 Tx FIFO to drain,
1931  *	reset the FIFO pointers, and restart packet transmission.
1932  */
1933 static void
1934 wm_82547_txfifo_stall(void *arg)
1935 {
1936 	struct wm_softc *sc = arg;
1937 	int s;
1938 
1939 	s = splnet();
1940 
1941 	if (sc->sc_txfifo_stall) {
1942 		if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
1943 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
1944 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
1945 			/*
1946 			 * Packets have drained.  Stop transmitter, reset
1947 			 * FIFO pointers, restart transmitter, and kick
1948 			 * the packet queue.
1949 			 */
1950 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
1951 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
1952 			CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
1953 			CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
1954 			CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
1955 			CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
1956 			CSR_WRITE(sc, WMREG_TCTL, tctl);
1957 			CSR_WRITE_FLUSH(sc);
1958 
1959 			sc->sc_txfifo_head = 0;
1960 			sc->sc_txfifo_stall = 0;
1961 			wm_start(&sc->sc_ethercom.ec_if);
1962 		} else {
1963 			/*
1964 			 * Still waiting for packets to drain; try again in
1965 			 * another tick.
1966 			 */
1967 			callout_schedule(&sc->sc_txfifo_ch, 1);
1968 		}
1969 	}
1970 
1971 	splx(s);
1972 }
1973 
1974 /*
1975  * wm_82547_txfifo_bugchk:
1976  *
1977  *	Check for bug condition in the 82547 Tx FIFO.  We need to
1978  *	prevent enqueueing a packet that would wrap around the end
1979  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
1980  *
1981  *	We do this by checking the amount of space before the end
1982  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
1983  *	the Tx FIFO, wait for all remaining packets to drain, reset
1984  *	the internal FIFO pointers to the beginning, and restart
1985  *	transmission on the interface.
1986  */
1987 #define	WM_FIFO_HDR		0x10
1988 #define	WM_82547_PAD_LEN	0x3e0
1989 static int
1990 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
1991 {
1992 	int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
1993 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
1994 
1995 	/* Just return if already stalled. */
1996 	if (sc->sc_txfifo_stall)
1997 		return (1);
1998 
1999 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
2000 		/* Stall only occurs in half-duplex mode. */
2001 		goto send_packet;
2002 	}
2003 
2004 	if (len >= WM_82547_PAD_LEN + space) {
2005 		sc->sc_txfifo_stall = 1;
2006 		callout_schedule(&sc->sc_txfifo_ch, 1);
2007 		return (1);
2008 	}
2009 
2010  send_packet:
2011 	sc->sc_txfifo_head += len;
2012 	if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
2013 		sc->sc_txfifo_head -= sc->sc_txfifo_size;
2014 
2015 	return (0);
2016 }
2017 
2018 /*
2019  * wm_start:		[ifnet interface function]
2020  *
2021  *	Start packet transmission on the interface.
2022  */
2023 static void
2024 wm_start(struct ifnet *ifp)
2025 {
2026 	struct wm_softc *sc = ifp->if_softc;
2027 	struct mbuf *m0;
2028 	struct m_tag *mtag;
2029 	struct wm_txsoft *txs;
2030 	bus_dmamap_t dmamap;
2031 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
2032 	bus_addr_t curaddr;
2033 	bus_size_t seglen, curlen;
2034 	uint32_t cksumcmd;
2035 	uint8_t cksumfields;
2036 
2037 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
2038 		return;
2039 
2040 	/*
2041 	 * Remember the previous number of free descriptors.
2042 	 */
2043 	ofree = sc->sc_txfree;
2044 
2045 	/*
2046 	 * Loop through the send queue, setting up transmit descriptors
2047 	 * until we drain the queue, or use up all available transmit
2048 	 * descriptors.
2049 	 */
2050 	for (;;) {
2051 		/* Grab a packet off the queue. */
2052 		IFQ_POLL(&ifp->if_snd, m0);
2053 		if (m0 == NULL)
2054 			break;
2055 
2056 		DPRINTF(WM_DEBUG_TX,
2057 		    ("%s: TX: have packet to transmit: %p\n",
2058 		    device_xname(sc->sc_dev), m0));
2059 
2060 		/* Get a work queue entry. */
2061 		if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
2062 			wm_txintr(sc);
2063 			if (sc->sc_txsfree == 0) {
2064 				DPRINTF(WM_DEBUG_TX,
2065 				    ("%s: TX: no free job descriptors\n",
2066 					device_xname(sc->sc_dev)));
2067 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
2068 				break;
2069 			}
2070 		}
2071 
2072 		txs = &sc->sc_txsoft[sc->sc_txsnext];
2073 		dmamap = txs->txs_dmamap;
2074 
2075 		use_tso = (m0->m_pkthdr.csum_flags &
2076 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
2077 
2078 		/*
2079 		 * So says the Linux driver:
2080 		 * The controller does a simple calculation to make sure
2081 		 * there is enough room in the FIFO before initiating the
2082 		 * DMA for each buffer.  The calc is:
2083 		 *	4 = ceil(buffer len / MSS)
2084 		 * To make sure we don't overrun the FIFO, adjust the max
2085 		 * buffer len if the MSS drops.
2086 		 */
2087 		dmamap->dm_maxsegsz =
2088 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
2089 		    ? m0->m_pkthdr.segsz << 2
2090 		    : WTX_MAX_LEN;
2091 
2092 		/*
2093 		 * Load the DMA map.  If this fails, the packet either
2094 		 * didn't fit in the allotted number of segments, or we
2095 		 * were short on resources.  For the too-many-segments
2096 		 * case, we simply report an error and drop the packet,
2097 		 * since we can't sanely copy a jumbo packet to a single
2098 		 * buffer.
2099 		 */
2100 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
2101 		    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
2102 		if (error) {
2103 			if (error == EFBIG) {
2104 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
2105 				log(LOG_ERR, "%s: Tx packet consumes too many "
2106 				    "DMA segments, dropping...\n",
2107 				    device_xname(sc->sc_dev));
2108 				IFQ_DEQUEUE(&ifp->if_snd, m0);
2109 				wm_dump_mbuf_chain(sc, m0);
2110 				m_freem(m0);
2111 				continue;
2112 			}
2113 			/*
2114 			 * Short on resources, just stop for now.
2115 			 */
2116 			DPRINTF(WM_DEBUG_TX,
2117 			    ("%s: TX: dmamap load failed: %d\n",
2118 			    device_xname(sc->sc_dev), error));
2119 			break;
2120 		}
2121 
2122 		segs_needed = dmamap->dm_nsegs;
2123 		if (use_tso) {
2124 			/* For sentinel descriptor; see below. */
2125 			segs_needed++;
2126 		}
2127 
2128 		/*
2129 		 * Ensure we have enough descriptors free to describe
2130 		 * the packet.  Note, we always reserve one descriptor
2131 		 * at the end of the ring due to the semantics of the
2132 		 * TDT register, plus one more in the event we need
2133 		 * to load offload context.
2134 		 */
2135 		if (segs_needed > sc->sc_txfree - 2) {
2136 			/*
2137 			 * Not enough free descriptors to transmit this
2138 			 * packet.  We haven't committed anything yet,
2139 			 * so just unload the DMA map, put the packet
2140 			 * pack on the queue, and punt.  Notify the upper
2141 			 * layer that there are no more slots left.
2142 			 */
2143 			DPRINTF(WM_DEBUG_TX,
2144 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
2145 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
2146 			    segs_needed, sc->sc_txfree - 1));
2147 			ifp->if_flags |= IFF_OACTIVE;
2148 			bus_dmamap_unload(sc->sc_dmat, dmamap);
2149 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
2150 			break;
2151 		}
2152 
2153 		/*
2154 		 * Check for 82547 Tx FIFO bug.  We need to do this
2155 		 * once we know we can transmit the packet, since we
2156 		 * do some internal FIFO space accounting here.
2157 		 */
2158 		if (sc->sc_type == WM_T_82547 &&
2159 		    wm_82547_txfifo_bugchk(sc, m0)) {
2160 			DPRINTF(WM_DEBUG_TX,
2161 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
2162 			    device_xname(sc->sc_dev)));
2163 			ifp->if_flags |= IFF_OACTIVE;
2164 			bus_dmamap_unload(sc->sc_dmat, dmamap);
2165 			WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
2166 			break;
2167 		}
2168 
2169 		IFQ_DEQUEUE(&ifp->if_snd, m0);
2170 
2171 		/*
2172 		 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
2173 		 */
2174 
2175 		DPRINTF(WM_DEBUG_TX,
2176 		    ("%s: TX: packet has %d (%d) DMA segments\n",
2177 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
2178 
2179 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
2180 
2181 		/*
2182 		 * Store a pointer to the packet so that we can free it
2183 		 * later.
2184 		 *
2185 		 * Initially, we consider the number of descriptors the
2186 		 * packet uses the number of DMA segments.  This may be
2187 		 * incremented by 1 if we do checksum offload (a descriptor
2188 		 * is used to set the checksum context).
2189 		 */
2190 		txs->txs_mbuf = m0;
2191 		txs->txs_firstdesc = sc->sc_txnext;
2192 		txs->txs_ndesc = segs_needed;
2193 
2194 		/* Set up offload parameters for this packet. */
2195 		if (m0->m_pkthdr.csum_flags &
2196 		    (M_CSUM_TSOv4|M_CSUM_TSOv6|
2197 		    M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
2198 		    M_CSUM_TCPv6|M_CSUM_UDPv6)) {
2199 			if (wm_tx_offload(sc, txs, &cksumcmd,
2200 					  &cksumfields) != 0) {
2201 				/* Error message already displayed. */
2202 				bus_dmamap_unload(sc->sc_dmat, dmamap);
2203 				continue;
2204 			}
2205 		} else {
2206 			cksumcmd = 0;
2207 			cksumfields = 0;
2208 		}
2209 
2210 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
2211 
2212 		/* Sync the DMA map. */
2213 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
2214 		    BUS_DMASYNC_PREWRITE);
2215 
2216 		/*
2217 		 * Initialize the transmit descriptor.
2218 		 */
2219 		for (nexttx = sc->sc_txnext, seg = 0;
2220 		     seg < dmamap->dm_nsegs; seg++) {
2221 			for (seglen = dmamap->dm_segs[seg].ds_len,
2222 			     curaddr = dmamap->dm_segs[seg].ds_addr;
2223 			     seglen != 0;
2224 			     curaddr += curlen, seglen -= curlen,
2225 			     nexttx = WM_NEXTTX(sc, nexttx)) {
2226 				curlen = seglen;
2227 
2228 				/*
2229 				 * So says the Linux driver:
2230 				 * Work around for premature descriptor
2231 				 * write-backs in TSO mode.  Append a
2232 				 * 4-byte sentinel descriptor.
2233 				 */
2234 				if (use_tso &&
2235 				    seg == dmamap->dm_nsegs - 1 &&
2236 				    curlen > 8)
2237 					curlen -= 4;
2238 
2239 				wm_set_dma_addr(
2240 				    &sc->sc_txdescs[nexttx].wtx_addr,
2241 				    curaddr);
2242 				sc->sc_txdescs[nexttx].wtx_cmdlen =
2243 				    htole32(cksumcmd | curlen);
2244 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
2245 				    0;
2246 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
2247 				    cksumfields;
2248 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
2249 				lasttx = nexttx;
2250 
2251 				DPRINTF(WM_DEBUG_TX,
2252 				    ("%s: TX: desc %d: low 0x%08lx, "
2253 				     "len 0x%04x\n",
2254 				    device_xname(sc->sc_dev), nexttx,
2255 				    curaddr & 0xffffffffUL, (unsigned)curlen));
2256 			}
2257 		}
2258 
2259 		KASSERT(lasttx != -1);
2260 
2261 		/*
2262 		 * Set up the command byte on the last descriptor of
2263 		 * the packet.  If we're in the interrupt delay window,
2264 		 * delay the interrupt.
2265 		 */
2266 		sc->sc_txdescs[lasttx].wtx_cmdlen |=
2267 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
2268 
2269 		/*
2270 		 * If VLANs are enabled and the packet has a VLAN tag, set
2271 		 * up the descriptor to encapsulate the packet for us.
2272 		 *
2273 		 * This is only valid on the last descriptor of the packet.
2274 		 */
2275 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
2276 			sc->sc_txdescs[lasttx].wtx_cmdlen |=
2277 			    htole32(WTX_CMD_VLE);
2278 			sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
2279 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
2280 		}
2281 
2282 		txs->txs_lastdesc = lasttx;
2283 
2284 		DPRINTF(WM_DEBUG_TX,
2285 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
2286 		    device_xname(sc->sc_dev),
2287 		    lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
2288 
2289 		/* Sync the descriptors we're using. */
2290 		WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
2291 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2292 
2293 		/* Give the packet to the chip. */
2294 		CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
2295 
2296 		DPRINTF(WM_DEBUG_TX,
2297 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
2298 
2299 		DPRINTF(WM_DEBUG_TX,
2300 		    ("%s: TX: finished transmitting packet, job %d\n",
2301 		    device_xname(sc->sc_dev), sc->sc_txsnext));
2302 
2303 		/* Advance the tx pointer. */
2304 		sc->sc_txfree -= txs->txs_ndesc;
2305 		sc->sc_txnext = nexttx;
2306 
2307 		sc->sc_txsfree--;
2308 		sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
2309 
2310 #if NBPFILTER > 0
2311 		/* Pass the packet to any BPF listeners. */
2312 		if (ifp->if_bpf)
2313 			bpf_mtap(ifp->if_bpf, m0);
2314 #endif /* NBPFILTER > 0 */
2315 	}
2316 
2317 	if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
2318 		/* No more slots; notify upper layer. */
2319 		ifp->if_flags |= IFF_OACTIVE;
2320 	}
2321 
2322 	if (sc->sc_txfree != ofree) {
2323 		/* Set a watchdog timer in case the chip flakes out. */
2324 		ifp->if_timer = 5;
2325 	}
2326 }
2327 
2328 /*
2329  * wm_watchdog:		[ifnet interface function]
2330  *
2331  *	Watchdog timer handler.
2332  */
2333 static void
2334 wm_watchdog(struct ifnet *ifp)
2335 {
2336 	struct wm_softc *sc = ifp->if_softc;
2337 
2338 	/*
2339 	 * Since we're using delayed interrupts, sweep up
2340 	 * before we report an error.
2341 	 */
2342 	wm_txintr(sc);
2343 
2344 	if (sc->sc_txfree != WM_NTXDESC(sc)) {
2345 		log(LOG_ERR,
2346 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2347 		    device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
2348 		    sc->sc_txnext);
2349 		ifp->if_oerrors++;
2350 
2351 		/* Reset the interface. */
2352 		(void) wm_init(ifp);
2353 	}
2354 
2355 	/* Try to get more packets going. */
2356 	wm_start(ifp);
2357 }
2358 
2359 /*
2360  * wm_ioctl:		[ifnet interface function]
2361  *
2362  *	Handle control requests from the operator.
2363  */
2364 static int
2365 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2366 {
2367 	struct wm_softc *sc = ifp->if_softc;
2368 	struct ifreq *ifr = (struct ifreq *) data;
2369 	struct ifaddr *ifa = (struct ifaddr *)data;
2370 	struct sockaddr_dl *sdl;
2371 	int diff, s, error;
2372 
2373 	s = splnet();
2374 
2375 	switch (cmd) {
2376 	case SIOCSIFFLAGS:
2377 		if ((error = ifioctl_common(ifp, cmd, data)) != 0)
2378 			break;
2379 		if (ifp->if_flags & IFF_UP) {
2380 			diff = (ifp->if_flags ^ sc->sc_if_flags)
2381 			    & (IFF_PROMISC | IFF_ALLMULTI);
2382 			if ((diff & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
2383 				/*
2384 				 * If the difference bettween last flag and
2385 				 * new flag is only IFF_PROMISC or
2386 				 * IFF_ALLMULTI, set multicast filter only
2387 				 * (don't reset to prevent link down).
2388 				 */
2389 				wm_set_filter(sc);
2390 			} else {
2391 				/*
2392 				 * Reset the interface to pick up changes in
2393 				 * any other flags that affect the hardware
2394 				 * state.
2395 				 */
2396 				wm_init(ifp);
2397 			}
2398 		} else {
2399 			if (ifp->if_flags & IFF_RUNNING)
2400 				wm_stop(ifp, 1);
2401 		}
2402 		sc->sc_if_flags = ifp->if_flags;
2403 		error = 0;
2404 		break;
2405 	case SIOCSIFMEDIA:
2406 	case SIOCGIFMEDIA:
2407 		/* Flow control requires full-duplex mode. */
2408 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
2409 		    (ifr->ifr_media & IFM_FDX) == 0)
2410 			ifr->ifr_media &= ~IFM_ETH_FMASK;
2411 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
2412 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
2413 				/* We can do both TXPAUSE and RXPAUSE. */
2414 				ifr->ifr_media |=
2415 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
2416 			}
2417 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
2418 		}
2419 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
2420 		break;
2421 	case SIOCINITIFADDR:
2422 		if (ifa->ifa_addr->sa_family == AF_LINK) {
2423 			sdl = satosdl(ifp->if_dl->ifa_addr);
2424 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
2425 					LLADDR(satosdl(ifa->ifa_addr)),
2426 					ifp->if_addrlen);
2427 			/* unicast address is first multicast entry */
2428 			wm_set_filter(sc);
2429 			error = 0;
2430 			break;
2431 		}
2432 		/* Fall through for rest */
2433 	default:
2434 		if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
2435 			break;
2436 
2437 		error = 0;
2438 
2439 		if (cmd == SIOCSIFCAP)
2440 			error = (*ifp->if_init)(ifp);
2441 		else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
2442 			;
2443 		else if (ifp->if_flags & IFF_RUNNING) {
2444 			/*
2445 			 * Multicast list has changed; set the hardware filter
2446 			 * accordingly.
2447 			 */
2448 			wm_set_filter(sc);
2449 		}
2450 		break;
2451 	}
2452 
2453 	/* Try to get more packets going. */
2454 	wm_start(ifp);
2455 
2456 	splx(s);
2457 	return (error);
2458 }
2459 
2460 /*
2461  * wm_intr:
2462  *
2463  *	Interrupt service routine.
2464  */
2465 static int
2466 wm_intr(void *arg)
2467 {
2468 	struct wm_softc *sc = arg;
2469 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2470 	uint32_t icr;
2471 	int handled = 0;
2472 
2473 	while (1 /* CONSTCOND */) {
2474 		icr = CSR_READ(sc, WMREG_ICR);
2475 		if ((icr & sc->sc_icr) == 0)
2476 			break;
2477 #if 0 /*NRND > 0*/
2478 		if (RND_ENABLED(&sc->rnd_source))
2479 			rnd_add_uint32(&sc->rnd_source, icr);
2480 #endif
2481 
2482 		handled = 1;
2483 
2484 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
2485 		if (icr & (ICR_RXDMT0|ICR_RXT0)) {
2486 			DPRINTF(WM_DEBUG_RX,
2487 			    ("%s: RX: got Rx intr 0x%08x\n",
2488 			    device_xname(sc->sc_dev),
2489 			    icr & (ICR_RXDMT0|ICR_RXT0)));
2490 			WM_EVCNT_INCR(&sc->sc_ev_rxintr);
2491 		}
2492 #endif
2493 		wm_rxintr(sc);
2494 
2495 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
2496 		if (icr & ICR_TXDW) {
2497 			DPRINTF(WM_DEBUG_TX,
2498 			    ("%s: TX: got TXDW interrupt\n",
2499 			    device_xname(sc->sc_dev)));
2500 			WM_EVCNT_INCR(&sc->sc_ev_txdw);
2501 		}
2502 #endif
2503 		wm_txintr(sc);
2504 
2505 		if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
2506 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
2507 			wm_linkintr(sc, icr);
2508 		}
2509 
2510 		if (icr & ICR_RXO) {
2511 			ifp->if_ierrors++;
2512 #if defined(WM_DEBUG)
2513 			log(LOG_WARNING, "%s: Receive overrun\n",
2514 			    device_xname(sc->sc_dev));
2515 #endif /* defined(WM_DEBUG) */
2516 		}
2517 	}
2518 
2519 	if (handled) {
2520 		/* Try to get more packets going. */
2521 		wm_start(ifp);
2522 	}
2523 
2524 	return (handled);
2525 }
2526 
2527 /*
2528  * wm_txintr:
2529  *
2530  *	Helper; handle transmit interrupts.
2531  */
2532 static void
2533 wm_txintr(struct wm_softc *sc)
2534 {
2535 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2536 	struct wm_txsoft *txs;
2537 	uint8_t status;
2538 	int i;
2539 
2540 	ifp->if_flags &= ~IFF_OACTIVE;
2541 
2542 	/*
2543 	 * Go through the Tx list and free mbufs for those
2544 	 * frames which have been transmitted.
2545 	 */
2546 	for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
2547 	     i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
2548 		txs = &sc->sc_txsoft[i];
2549 
2550 		DPRINTF(WM_DEBUG_TX,
2551 		    ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
2552 
2553 		WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
2554 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2555 
2556 		status =
2557 		    sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
2558 		if ((status & WTX_ST_DD) == 0) {
2559 			WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
2560 			    BUS_DMASYNC_PREREAD);
2561 			break;
2562 		}
2563 
2564 		DPRINTF(WM_DEBUG_TX,
2565 		    ("%s: TX: job %d done: descs %d..%d\n",
2566 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
2567 		    txs->txs_lastdesc));
2568 
2569 		/*
2570 		 * XXX We should probably be using the statistics
2571 		 * XXX registers, but I don't know if they exist
2572 		 * XXX on chips before the i82544.
2573 		 */
2574 
2575 #ifdef WM_EVENT_COUNTERS
2576 		if (status & WTX_ST_TU)
2577 			WM_EVCNT_INCR(&sc->sc_ev_tu);
2578 #endif /* WM_EVENT_COUNTERS */
2579 
2580 		if (status & (WTX_ST_EC|WTX_ST_LC)) {
2581 			ifp->if_oerrors++;
2582 			if (status & WTX_ST_LC)
2583 				log(LOG_WARNING, "%s: late collision\n",
2584 				    device_xname(sc->sc_dev));
2585 			else if (status & WTX_ST_EC) {
2586 				ifp->if_collisions += 16;
2587 				log(LOG_WARNING, "%s: excessive collisions\n",
2588 				    device_xname(sc->sc_dev));
2589 			}
2590 		} else
2591 			ifp->if_opackets++;
2592 
2593 		sc->sc_txfree += txs->txs_ndesc;
2594 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
2595 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2596 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2597 		m_freem(txs->txs_mbuf);
2598 		txs->txs_mbuf = NULL;
2599 	}
2600 
2601 	/* Update the dirty transmit buffer pointer. */
2602 	sc->sc_txsdirty = i;
2603 	DPRINTF(WM_DEBUG_TX,
2604 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
2605 
2606 	/*
2607 	 * If there are no more pending transmissions, cancel the watchdog
2608 	 * timer.
2609 	 */
2610 	if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
2611 		ifp->if_timer = 0;
2612 }
2613 
2614 /*
2615  * wm_rxintr:
2616  *
2617  *	Helper; handle receive interrupts.
2618  */
2619 static void
2620 wm_rxintr(struct wm_softc *sc)
2621 {
2622 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2623 	struct wm_rxsoft *rxs;
2624 	struct mbuf *m;
2625 	int i, len;
2626 	uint8_t status, errors;
2627 	uint16_t vlantag;
2628 
2629 	for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
2630 		rxs = &sc->sc_rxsoft[i];
2631 
2632 		DPRINTF(WM_DEBUG_RX,
2633 		    ("%s: RX: checking descriptor %d\n",
2634 		    device_xname(sc->sc_dev), i));
2635 
2636 		WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2637 
2638 		status = sc->sc_rxdescs[i].wrx_status;
2639 		errors = sc->sc_rxdescs[i].wrx_errors;
2640 		len = le16toh(sc->sc_rxdescs[i].wrx_len);
2641 		vlantag = sc->sc_rxdescs[i].wrx_special;
2642 
2643 		if ((status & WRX_ST_DD) == 0) {
2644 			/*
2645 			 * We have processed all of the receive descriptors.
2646 			 */
2647 			WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
2648 			break;
2649 		}
2650 
2651 		if (__predict_false(sc->sc_rxdiscard)) {
2652 			DPRINTF(WM_DEBUG_RX,
2653 			    ("%s: RX: discarding contents of descriptor %d\n",
2654 			    device_xname(sc->sc_dev), i));
2655 			WM_INIT_RXDESC(sc, i);
2656 			if (status & WRX_ST_EOP) {
2657 				/* Reset our state. */
2658 				DPRINTF(WM_DEBUG_RX,
2659 				    ("%s: RX: resetting rxdiscard -> 0\n",
2660 				    device_xname(sc->sc_dev)));
2661 				sc->sc_rxdiscard = 0;
2662 			}
2663 			continue;
2664 		}
2665 
2666 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2667 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
2668 
2669 		m = rxs->rxs_mbuf;
2670 
2671 		/*
2672 		 * Add a new receive buffer to the ring, unless of
2673 		 * course the length is zero. Treat the latter as a
2674 		 * failed mapping.
2675 		 */
2676 		if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
2677 			/*
2678 			 * Failed, throw away what we've done so
2679 			 * far, and discard the rest of the packet.
2680 			 */
2681 			ifp->if_ierrors++;
2682 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2683 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2684 			WM_INIT_RXDESC(sc, i);
2685 			if ((status & WRX_ST_EOP) == 0)
2686 				sc->sc_rxdiscard = 1;
2687 			if (sc->sc_rxhead != NULL)
2688 				m_freem(sc->sc_rxhead);
2689 			WM_RXCHAIN_RESET(sc);
2690 			DPRINTF(WM_DEBUG_RX,
2691 			    ("%s: RX: Rx buffer allocation failed, "
2692 			    "dropping packet%s\n", device_xname(sc->sc_dev),
2693 			    sc->sc_rxdiscard ? " (discard)" : ""));
2694 			continue;
2695 		}
2696 
2697 		m->m_len = len;
2698 		sc->sc_rxlen += len;
2699 		DPRINTF(WM_DEBUG_RX,
2700 		    ("%s: RX: buffer at %p len %d\n",
2701 		    device_xname(sc->sc_dev), m->m_data, len));
2702 
2703 		/*
2704 		 * If this is not the end of the packet, keep
2705 		 * looking.
2706 		 */
2707 		if ((status & WRX_ST_EOP) == 0) {
2708 			WM_RXCHAIN_LINK(sc, m);
2709 			DPRINTF(WM_DEBUG_RX,
2710 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
2711 			    device_xname(sc->sc_dev), sc->sc_rxlen));
2712 			continue;
2713 		}
2714 
2715 		/*
2716 		 * Okay, we have the entire packet now.  The chip is
2717 		 * configured to include the FCS (not all chips can
2718 		 * be configured to strip it), so we need to trim it.
2719 		 * May need to adjust length of previous mbuf in the
2720 		 * chain if the current mbuf is too short.
2721 		 */
2722 		if (m->m_len < ETHER_CRC_LEN) {
2723 			sc->sc_rxtail->m_len -= (ETHER_CRC_LEN - m->m_len);
2724 			m->m_len = 0;
2725 		} else {
2726 			m->m_len -= ETHER_CRC_LEN;
2727 		}
2728 		len = sc->sc_rxlen - ETHER_CRC_LEN;
2729 
2730 		WM_RXCHAIN_LINK(sc, m);
2731 
2732 		*sc->sc_rxtailp = NULL;
2733 		m = sc->sc_rxhead;
2734 
2735 		WM_RXCHAIN_RESET(sc);
2736 
2737 		DPRINTF(WM_DEBUG_RX,
2738 		    ("%s: RX: have entire packet, len -> %d\n",
2739 		    device_xname(sc->sc_dev), len));
2740 
2741 		/*
2742 		 * If an error occurred, update stats and drop the packet.
2743 		 */
2744 		if (errors &
2745 		     (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
2746 			ifp->if_ierrors++;
2747 			if (errors & WRX_ER_SE)
2748 				log(LOG_WARNING, "%s: symbol error\n",
2749 				    device_xname(sc->sc_dev));
2750 			else if (errors & WRX_ER_SEQ)
2751 				log(LOG_WARNING, "%s: receive sequence error\n",
2752 				    device_xname(sc->sc_dev));
2753 			else if (errors & WRX_ER_CE)
2754 				log(LOG_WARNING, "%s: CRC error\n",
2755 				    device_xname(sc->sc_dev));
2756 			m_freem(m);
2757 			continue;
2758 		}
2759 
2760 		/*
2761 		 * No errors.  Receive the packet.
2762 		 */
2763 		m->m_pkthdr.rcvif = ifp;
2764 		m->m_pkthdr.len = len;
2765 
2766 		/*
2767 		 * If VLANs are enabled, VLAN packets have been unwrapped
2768 		 * for us.  Associate the tag with the packet.
2769 		 */
2770 		if ((status & WRX_ST_VP) != 0) {
2771 			VLAN_INPUT_TAG(ifp, m,
2772 			    le16toh(vlantag),
2773 			    continue);
2774 		}
2775 
2776 		/*
2777 		 * Set up checksum info for this packet.
2778 		 */
2779 		if ((status & WRX_ST_IXSM) == 0) {
2780 			if (status & WRX_ST_IPCS) {
2781 				WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
2782 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
2783 				if (errors & WRX_ER_IPE)
2784 					m->m_pkthdr.csum_flags |=
2785 					    M_CSUM_IPv4_BAD;
2786 			}
2787 			if (status & WRX_ST_TCPCS) {
2788 				/*
2789 				 * Note: we don't know if this was TCP or UDP,
2790 				 * so we just set both bits, and expect the
2791 				 * upper layers to deal.
2792 				 */
2793 				WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
2794 				m->m_pkthdr.csum_flags |=
2795 				    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
2796 				    M_CSUM_TCPv6 | M_CSUM_UDPv6;
2797 				if (errors & WRX_ER_TCPE)
2798 					m->m_pkthdr.csum_flags |=
2799 					    M_CSUM_TCP_UDP_BAD;
2800 			}
2801 		}
2802 
2803 		ifp->if_ipackets++;
2804 
2805 #if NBPFILTER > 0
2806 		/* Pass this up to any BPF listeners. */
2807 		if (ifp->if_bpf)
2808 			bpf_mtap(ifp->if_bpf, m);
2809 #endif /* NBPFILTER > 0 */
2810 
2811 		/* Pass it on. */
2812 		(*ifp->if_input)(ifp, m);
2813 	}
2814 
2815 	/* Update the receive pointer. */
2816 	sc->sc_rxptr = i;
2817 
2818 	DPRINTF(WM_DEBUG_RX,
2819 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
2820 }
2821 
2822 /*
2823  * wm_linkintr:
2824  *
2825  *	Helper; handle link interrupts.
2826  */
2827 static void
2828 wm_linkintr(struct wm_softc *sc, uint32_t icr)
2829 {
2830 	uint32_t status;
2831 
2832 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
2833 		__func__));
2834 	/*
2835 	 * If we get a link status interrupt on a 1000BASE-T
2836 	 * device, just fall into the normal MII tick path.
2837 	 */
2838 	if (sc->sc_flags & WM_F_HAS_MII) {
2839 		if (icr & ICR_LSC) {
2840 			DPRINTF(WM_DEBUG_LINK,
2841 			    ("%s: LINK: LSC -> mii_tick\n",
2842 			    device_xname(sc->sc_dev)));
2843 			mii_tick(&sc->sc_mii);
2844 			if (sc->sc_type == WM_T_82543) {
2845 				int miistatus, active;
2846 
2847 				/*
2848 				 * With 82543, we need to force speed and
2849 				 * duplex on the MAC equal to what the PHY
2850 				 * speed and duplex configuration is.
2851 				 */
2852 				miistatus = sc->sc_mii.mii_media_status;
2853 
2854 				if (miistatus & IFM_ACTIVE) {
2855 					active = sc->sc_mii.mii_media_active;
2856 					sc->sc_ctrl &= ~(CTRL_SPEED_MASK
2857 					    | CTRL_FD);
2858 					switch (IFM_SUBTYPE(active)) {
2859 					case IFM_10_T:
2860 						sc->sc_ctrl |= CTRL_SPEED_10;
2861 						break;
2862 					case IFM_100_TX:
2863 						sc->sc_ctrl |= CTRL_SPEED_100;
2864 						break;
2865 					case IFM_1000_T:
2866 						sc->sc_ctrl |= CTRL_SPEED_1000;
2867 						break;
2868 					default:
2869 						/*
2870 						 * fiber?
2871 						 * Shoud not enter here.
2872 						 */
2873 						printf("unknown media (%x)\n",
2874 						    active);
2875 						break;
2876 					}
2877 					if (active & IFM_FDX)
2878 						sc->sc_ctrl |= CTRL_FD;
2879 					CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2880 				}
2881 			}
2882 		} else if (icr & ICR_RXSEQ) {
2883 			DPRINTF(WM_DEBUG_LINK,
2884 			    ("%s: LINK Receive sequence error\n",
2885 			    device_xname(sc->sc_dev)));
2886 		}
2887 		return;
2888 	}
2889 
2890 	status = CSR_READ(sc, WMREG_STATUS);
2891 	if (icr & ICR_LSC) {
2892 		if (status & STATUS_LU) {
2893 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
2894 			    device_xname(sc->sc_dev),
2895 			    (status & STATUS_FD) ? "FDX" : "HDX"));
2896 			/*
2897 			 * NOTE: CTRL will update TFCE and RFCE automatically,
2898 			 * so we should update sc->sc_ctrl
2899 			 */
2900 
2901 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
2902 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2903 			sc->sc_fcrtl &= ~FCRTL_XONE;
2904 			if (status & STATUS_FD)
2905 				sc->sc_tctl |=
2906 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2907 			else
2908 				sc->sc_tctl |=
2909 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2910 			if (sc->sc_ctrl & CTRL_TFCE)
2911 				sc->sc_fcrtl |= FCRTL_XONE;
2912 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2913 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
2914 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
2915 				      sc->sc_fcrtl);
2916 			sc->sc_tbi_linkup = 1;
2917 		} else {
2918 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
2919 			    device_xname(sc->sc_dev)));
2920 			sc->sc_tbi_linkup = 0;
2921 		}
2922 		wm_tbi_set_linkled(sc);
2923 	} else if (icr & ICR_RXCFG) {
2924 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
2925 		    device_xname(sc->sc_dev)));
2926 		sc->sc_tbi_nrxcfg++;
2927 		wm_check_for_link(sc);
2928 	} else if (icr & ICR_RXSEQ) {
2929 		DPRINTF(WM_DEBUG_LINK,
2930 		    ("%s: LINK: Receive sequence error\n",
2931 		    device_xname(sc->sc_dev)));
2932 	}
2933 }
2934 
2935 /*
2936  * wm_tick:
2937  *
2938  *	One second timer, used to check link status, sweep up
2939  *	completed transmit jobs, etc.
2940  */
2941 static void
2942 wm_tick(void *arg)
2943 {
2944 	struct wm_softc *sc = arg;
2945 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2946 	int s;
2947 
2948 	s = splnet();
2949 
2950 	if (sc->sc_type >= WM_T_82542_2_1) {
2951 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
2952 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
2953 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
2954 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
2955 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
2956 	}
2957 
2958 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
2959 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
2960 
2961 
2962 	if (sc->sc_flags & WM_F_HAS_MII)
2963 		mii_tick(&sc->sc_mii);
2964 	else
2965 		wm_tbi_check_link(sc);
2966 
2967 	splx(s);
2968 
2969 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2970 }
2971 
2972 /*
2973  * wm_reset:
2974  *
2975  *	Reset the i82542 chip.
2976  */
2977 static void
2978 wm_reset(struct wm_softc *sc)
2979 {
2980 	uint32_t reg;
2981 
2982 	/*
2983 	 * Allocate on-chip memory according to the MTU size.
2984 	 * The Packet Buffer Allocation register must be written
2985 	 * before the chip is reset.
2986 	 */
2987 	switch (sc->sc_type) {
2988 	case WM_T_82547:
2989 	case WM_T_82547_2:
2990 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
2991 		    PBA_22K : PBA_30K;
2992 		sc->sc_txfifo_head = 0;
2993 		sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
2994 		sc->sc_txfifo_size =
2995 		    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
2996 		sc->sc_txfifo_stall = 0;
2997 		break;
2998 	case WM_T_82571:
2999 	case WM_T_82572:
3000 	case WM_T_80003:
3001 		sc->sc_pba = PBA_32K;
3002 		break;
3003 	case WM_T_82573:
3004 	case WM_T_82574:
3005 		sc->sc_pba = PBA_12K;
3006 		break;
3007 	case WM_T_ICH8:
3008 		sc->sc_pba = PBA_8K;
3009 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
3010 		break;
3011 	case WM_T_ICH9:
3012 	case WM_T_ICH10:
3013 		sc->sc_pba = PBA_10K;
3014 		break;
3015 	default:
3016 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3017 		    PBA_40K : PBA_48K;
3018 		break;
3019 	}
3020 	CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
3021 
3022 	if (sc->sc_flags & WM_F_PCIE) {
3023 		int timeout = 800;
3024 
3025 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
3026 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3027 
3028 		while (timeout) {
3029 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA) == 0)
3030 				break;
3031 			delay(100);
3032 		}
3033 	}
3034 
3035 	/* clear interrupt */
3036 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3037 
3038 	/*
3039 	 * 82541 Errata 29? & 82547 Errata 28?
3040 	 * See also the description about PHY_RST bit in CTRL register
3041 	 * in 8254x_GBe_SDM.pdf.
3042 	 */
3043 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
3044 		CSR_WRITE(sc, WMREG_CTRL,
3045 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
3046 		delay(5000);
3047 	}
3048 
3049 	switch (sc->sc_type) {
3050 	case WM_T_82544:
3051 	case WM_T_82540:
3052 	case WM_T_82545:
3053 	case WM_T_82546:
3054 	case WM_T_82541:
3055 	case WM_T_82541_2:
3056 		/*
3057 		 * On some chipsets, a reset through a memory-mapped write
3058 		 * cycle can cause the chip to reset before completing the
3059 		 * write cycle.  This causes major headache that can be
3060 		 * avoided by issuing the reset via indirect register writes
3061 		 * through I/O space.
3062 		 *
3063 		 * So, if we successfully mapped the I/O BAR at attach time,
3064 		 * use that.  Otherwise, try our luck with a memory-mapped
3065 		 * reset.
3066 		 */
3067 		if (sc->sc_flags & WM_F_IOH_VALID)
3068 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
3069 		else
3070 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
3071 		break;
3072 
3073 	case WM_T_82545_3:
3074 	case WM_T_82546_3:
3075 		/* Use the shadow control register on these chips. */
3076 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
3077 		break;
3078 
3079 	case WM_T_ICH8:
3080 	case WM_T_ICH9:
3081 	case WM_T_ICH10:
3082 		wm_get_swfwhw_semaphore(sc);
3083 		CSR_WRITE(sc, WMREG_CTRL, CTRL_RST | CTRL_PHY_RESET);
3084 		delay(10000);
3085 
3086 	default:
3087 		/* Everything else can safely use the documented method. */
3088 		CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
3089 		break;
3090 	}
3091 	delay(10000);
3092 
3093 	/* reload EEPROM */
3094 	switch(sc->sc_type) {
3095 	case WM_T_82542_2_0:
3096 	case WM_T_82542_2_1:
3097 	case WM_T_82543:
3098 	case WM_T_82544:
3099 		delay(10);
3100 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3101 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3102 		delay(2000);
3103 		break;
3104 	case WM_T_82541:
3105 	case WM_T_82541_2:
3106 	case WM_T_82547:
3107 	case WM_T_82547_2:
3108 		delay(20000);
3109 		break;
3110 	case WM_T_82573:
3111 	case WM_T_82574:
3112 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
3113 			delay(10);
3114 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3115 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3116 		}
3117 		/* FALLTHROUGH */
3118 	default:
3119 		/* check EECD_EE_AUTORD */
3120 		wm_get_auto_rd_done(sc);
3121 	}
3122 
3123 	/* reload sc_ctrl */
3124 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3125 
3126 #if 0
3127 	for (i = 0; i < 1000; i++) {
3128 		if ((CSR_READ(sc, WMREG_CTRL) & CTRL_RST) == 0) {
3129 			return;
3130 		}
3131 		delay(20);
3132 	}
3133 
3134 	if (CSR_READ(sc, WMREG_CTRL) & CTRL_RST)
3135 		log(LOG_ERR, "%s: reset failed to complete\n",
3136 		    device_xname(sc->sc_dev));
3137 #endif
3138 }
3139 
3140 /*
3141  * wm_init:		[ifnet interface function]
3142  *
3143  *	Initialize the interface.  Must be called at splnet().
3144  */
3145 static int
3146 wm_init(struct ifnet *ifp)
3147 {
3148 	struct wm_softc *sc = ifp->if_softc;
3149 	struct wm_rxsoft *rxs;
3150 	int i, error = 0;
3151 	uint32_t reg;
3152 
3153 	/*
3154 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
3155 	 * There is a small but measurable benefit to avoiding the adjusment
3156 	 * of the descriptor so that the headers are aligned, for normal mtu,
3157 	 * on such platforms.  One possibility is that the DMA itself is
3158 	 * slightly more efficient if the front of the entire packet (instead
3159 	 * of the front of the headers) is aligned.
3160 	 *
3161 	 * Note we must always set align_tweak to 0 if we are using
3162 	 * jumbo frames.
3163 	 */
3164 #ifdef __NO_STRICT_ALIGNMENT
3165 	sc->sc_align_tweak = 0;
3166 #else
3167 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
3168 		sc->sc_align_tweak = 0;
3169 	else
3170 		sc->sc_align_tweak = 2;
3171 #endif /* __NO_STRICT_ALIGNMENT */
3172 
3173 	/* Cancel any pending I/O. */
3174 	wm_stop(ifp, 0);
3175 
3176 	/* update statistics before reset */
3177 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3178 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
3179 
3180 	/* Reset the chip to a known state. */
3181 	wm_reset(sc);
3182 
3183 	switch (sc->sc_type) {
3184 	case WM_T_82571:
3185 	case WM_T_82572:
3186 	case WM_T_82573:
3187 	case WM_T_82574:
3188 	case WM_T_80003:
3189 	case WM_T_ICH8:
3190 	case WM_T_ICH9:
3191 	case WM_T_ICH10:
3192 		if (wm_check_mng_mode(sc) != 0)
3193 			wm_get_hw_control(sc);
3194 		break;
3195 	default:
3196 		break;
3197 	}
3198 
3199 	/* Initialize the transmit descriptor ring. */
3200 	memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
3201 	WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
3202 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3203 	sc->sc_txfree = WM_NTXDESC(sc);
3204 	sc->sc_txnext = 0;
3205 
3206 	if (sc->sc_type < WM_T_82543) {
3207 		CSR_WRITE(sc, WMREG_OLD_TBDAH, WM_CDTXADDR_HI(sc, 0));
3208 		CSR_WRITE(sc, WMREG_OLD_TBDAL, WM_CDTXADDR_LO(sc, 0));
3209 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
3210 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
3211 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
3212 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
3213 	} else {
3214 		CSR_WRITE(sc, WMREG_TBDAH, WM_CDTXADDR_HI(sc, 0));
3215 		CSR_WRITE(sc, WMREG_TBDAL, WM_CDTXADDR_LO(sc, 0));
3216 		CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
3217 		CSR_WRITE(sc, WMREG_TDH, 0);
3218 		CSR_WRITE(sc, WMREG_TDT, 0);
3219 		CSR_WRITE(sc, WMREG_TIDV, 375);		/* ITR / 4 */
3220 		CSR_WRITE(sc, WMREG_TADV, 375);		/* should be same */
3221 
3222 		CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
3223 		    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
3224 		CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
3225 		    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
3226 	}
3227 	CSR_WRITE(sc, WMREG_TQSA_LO, 0);
3228 	CSR_WRITE(sc, WMREG_TQSA_HI, 0);
3229 
3230 	/* Initialize the transmit job descriptors. */
3231 	for (i = 0; i < WM_TXQUEUELEN(sc); i++)
3232 		sc->sc_txsoft[i].txs_mbuf = NULL;
3233 	sc->sc_txsfree = WM_TXQUEUELEN(sc);
3234 	sc->sc_txsnext = 0;
3235 	sc->sc_txsdirty = 0;
3236 
3237 	/*
3238 	 * Initialize the receive descriptor and receive job
3239 	 * descriptor rings.
3240 	 */
3241 	if (sc->sc_type < WM_T_82543) {
3242 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
3243 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
3244 		CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
3245 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
3246 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
3247 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
3248 
3249 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
3250 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
3251 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
3252 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
3253 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
3254 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
3255 	} else {
3256 		CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
3257 		CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
3258 		CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
3259 		CSR_WRITE(sc, WMREG_RDH, 0);
3260 		CSR_WRITE(sc, WMREG_RDT, 0);
3261 		CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD);	/* ITR/4 */
3262 		CSR_WRITE(sc, WMREG_RADV, 375);		/* MUST be same */
3263 	}
3264 	for (i = 0; i < WM_NRXDESC; i++) {
3265 		rxs = &sc->sc_rxsoft[i];
3266 		if (rxs->rxs_mbuf == NULL) {
3267 			if ((error = wm_add_rxbuf(sc, i)) != 0) {
3268 				log(LOG_ERR, "%s: unable to allocate or map rx "
3269 				    "buffer %d, error = %d\n",
3270 				    device_xname(sc->sc_dev), i, error);
3271 				/*
3272 				 * XXX Should attempt to run with fewer receive
3273 				 * XXX buffers instead of just failing.
3274 				 */
3275 				wm_rxdrain(sc);
3276 				goto out;
3277 			}
3278 		} else
3279 			WM_INIT_RXDESC(sc, i);
3280 	}
3281 	sc->sc_rxptr = 0;
3282 	sc->sc_rxdiscard = 0;
3283 	WM_RXCHAIN_RESET(sc);
3284 
3285 	/*
3286 	 * Clear out the VLAN table -- we don't use it (yet).
3287 	 */
3288 	CSR_WRITE(sc, WMREG_VET, 0);
3289 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
3290 		CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
3291 
3292 	/*
3293 	 * Set up flow-control parameters.
3294 	 *
3295 	 * XXX Values could probably stand some tuning.
3296 	 */
3297 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
3298 	    && (sc->sc_type != WM_T_ICH10)) {
3299 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
3300 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
3301 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
3302 	}
3303 
3304 	sc->sc_fcrtl = FCRTL_DFLT;
3305 	if (sc->sc_type < WM_T_82543) {
3306 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
3307 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
3308 	} else {
3309 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
3310 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
3311 	}
3312 
3313 	if (sc->sc_type == WM_T_80003)
3314 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
3315 	else
3316 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
3317 
3318 	/* Deal with VLAN enables. */
3319 	if (VLAN_ATTACHED(&sc->sc_ethercom))
3320 		sc->sc_ctrl |= CTRL_VME;
3321 	else
3322 		sc->sc_ctrl &= ~CTRL_VME;
3323 
3324 	/* Write the control registers. */
3325 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3326 
3327 	if (sc->sc_flags & WM_F_HAS_MII) {
3328 		int val;
3329 
3330 		switch (sc->sc_type) {
3331 		case WM_T_80003:
3332 		case WM_T_ICH8:
3333 		case WM_T_ICH9:
3334 		case WM_T_ICH10:
3335 			/*
3336 			 * Set the mac to wait the maximum time between each
3337 			 * iteration and increase the max iterations when
3338 			 * polling the phy; this fixes erroneous timeouts at
3339 			 * 10Mbps.
3340 			 */
3341 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
3342 			    0xFFFF);
3343 			val = wm_kmrn_readreg(sc,
3344 			    KUMCTRLSTA_OFFSET_INB_PARAM);
3345 			val |= 0x3F;
3346 			wm_kmrn_writereg(sc,
3347 			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
3348 			break;
3349 		default:
3350 			break;
3351 		}
3352 
3353 		if (sc->sc_type == WM_T_80003) {
3354 			val = CSR_READ(sc, WMREG_CTRL_EXT);
3355 			val &= ~CTRL_EXT_LINK_MODE_MASK;
3356 			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
3357 
3358 			/* Bypass RX and TX FIFO's */
3359 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
3360 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS |
3361 			    KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
3362 
3363 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
3364 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
3365 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
3366 		}
3367 	}
3368 #if 0
3369 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
3370 #endif
3371 
3372 	/*
3373 	 * Set up checksum offload parameters.
3374 	 */
3375 	reg = CSR_READ(sc, WMREG_RXCSUM);
3376 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
3377 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
3378 		reg |= RXCSUM_IPOFL;
3379 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
3380 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
3381 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
3382 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
3383 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
3384 
3385 	/* Reset TBI's RXCFG count */
3386 	sc->sc_tbi_nrxcfg = sc->sc_tbi_lastnrxcfg = 0;
3387 
3388 	/*
3389 	 * Set up the interrupt registers.
3390 	 */
3391 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3392 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
3393 	    ICR_RXO | ICR_RXT0;
3394 	if ((sc->sc_flags & WM_F_HAS_MII) == 0)
3395 		sc->sc_icr |= ICR_RXCFG;
3396 	CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
3397 
3398 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3399 	    || (sc->sc_type == WM_T_ICH10)) {
3400 		reg = CSR_READ(sc, WMREG_KABGTXD);
3401 		reg |= KABGTXD_BGSQLBIAS;
3402 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
3403 	}
3404 
3405 	/* Set up the inter-packet gap. */
3406 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
3407 
3408 	if (sc->sc_type >= WM_T_82543) {
3409 		/*
3410 		 * Set up the interrupt throttling register (units of 256ns)
3411 		 * Note that a footnote in Intel's documentation says this
3412 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
3413 		 * or 10Mbit mode.  Empirically, it appears to be the case
3414 		 * that that is also true for the 1024ns units of the other
3415 		 * interrupt-related timer registers -- so, really, we ought
3416 		 * to divide this value by 4 when the link speed is low.
3417 		 *
3418 		 * XXX implement this division at link speed change!
3419 		 */
3420 
3421 		 /*
3422 		  * For N interrupts/sec, set this value to:
3423 		  * 1000000000 / (N * 256).  Note that we set the
3424 		  * absolute and packet timer values to this value
3425 		  * divided by 4 to get "simple timer" behavior.
3426 		  */
3427 
3428 		sc->sc_itr = 1500;		/* 2604 ints/sec */
3429 		CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
3430 	}
3431 
3432 	/* Set the VLAN ethernetype. */
3433 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
3434 
3435 	/*
3436 	 * Set up the transmit control register; we start out with
3437 	 * a collision distance suitable for FDX, but update it whe
3438 	 * we resolve the media type.
3439 	 */
3440 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
3441 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
3442 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3443 	if (sc->sc_type >= WM_T_82571)
3444 		sc->sc_tctl |= TCTL_MULR;
3445 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3446 
3447 	if (sc->sc_type == WM_T_80003) {
3448 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
3449 		reg &= ~TCTL_EXT_GCEX_MASK;
3450 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
3451 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
3452 	}
3453 
3454 	/* Set the media. */
3455 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
3456 		goto out;
3457 
3458 	/*
3459 	 * Set up the receive control register; we actually program
3460 	 * the register when we set the receive filter.  Use multicast
3461 	 * address offset type 0.
3462 	 *
3463 	 * Only the i82544 has the ability to strip the incoming
3464 	 * CRC, so we don't enable that feature.
3465 	 */
3466 	sc->sc_mchash_type = 0;
3467 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
3468 	    | RCTL_MO(sc->sc_mchash_type);
3469 
3470 	/* 82573 doesn't support jumbo frame */
3471 	if (sc->sc_type != WM_T_82573 && sc->sc_type != WM_T_82574 &&
3472 	    sc->sc_type != WM_T_ICH8)
3473 		sc->sc_rctl |= RCTL_LPE;
3474 
3475 	if (MCLBYTES == 2048) {
3476 		sc->sc_rctl |= RCTL_2k;
3477 	} else {
3478 		if (sc->sc_type >= WM_T_82543) {
3479 			switch(MCLBYTES) {
3480 			case 4096:
3481 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
3482 				break;
3483 			case 8192:
3484 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
3485 				break;
3486 			case 16384:
3487 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
3488 				break;
3489 			default:
3490 				panic("wm_init: MCLBYTES %d unsupported",
3491 				    MCLBYTES);
3492 				break;
3493 			}
3494 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
3495 	}
3496 
3497 	/* Set the receive filter. */
3498 	wm_set_filter(sc);
3499 
3500 	/* Start the one second link check clock. */
3501 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
3502 
3503 	/* ...all done! */
3504 	ifp->if_flags |= IFF_RUNNING;
3505 	ifp->if_flags &= ~IFF_OACTIVE;
3506 
3507  out:
3508 	if (error)
3509 		log(LOG_ERR, "%s: interface not running\n",
3510 		    device_xname(sc->sc_dev));
3511 	return (error);
3512 }
3513 
3514 /*
3515  * wm_rxdrain:
3516  *
3517  *	Drain the receive queue.
3518  */
3519 static void
3520 wm_rxdrain(struct wm_softc *sc)
3521 {
3522 	struct wm_rxsoft *rxs;
3523 	int i;
3524 
3525 	for (i = 0; i < WM_NRXDESC; i++) {
3526 		rxs = &sc->sc_rxsoft[i];
3527 		if (rxs->rxs_mbuf != NULL) {
3528 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
3529 			m_freem(rxs->rxs_mbuf);
3530 			rxs->rxs_mbuf = NULL;
3531 		}
3532 	}
3533 }
3534 
3535 /*
3536  * wm_stop:		[ifnet interface function]
3537  *
3538  *	Stop transmission on the interface.
3539  */
3540 static void
3541 wm_stop(struct ifnet *ifp, int disable)
3542 {
3543 	struct wm_softc *sc = ifp->if_softc;
3544 	struct wm_txsoft *txs;
3545 	int i;
3546 
3547 	/* Stop the one second clock. */
3548 	callout_stop(&sc->sc_tick_ch);
3549 
3550 	/* Stop the 82547 Tx FIFO stall check timer. */
3551 	if (sc->sc_type == WM_T_82547)
3552 		callout_stop(&sc->sc_txfifo_ch);
3553 
3554 	if (sc->sc_flags & WM_F_HAS_MII) {
3555 		/* Down the MII. */
3556 		mii_down(&sc->sc_mii);
3557 	} else {
3558 #if 0
3559 		/* Should we clear PHY's status properly? */
3560 		wm_reset(sc);
3561 #endif
3562 	}
3563 
3564 	/* Stop the transmit and receive processes. */
3565 	CSR_WRITE(sc, WMREG_TCTL, 0);
3566 	CSR_WRITE(sc, WMREG_RCTL, 0);
3567 
3568 	/*
3569 	 * Clear the interrupt mask to ensure the device cannot assert its
3570 	 * interrupt line.
3571 	 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
3572 	 * any currently pending or shared interrupt.
3573 	 */
3574 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3575 	sc->sc_icr = 0;
3576 
3577 	/* Release any queued transmit buffers. */
3578 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
3579 		txs = &sc->sc_txsoft[i];
3580 		if (txs->txs_mbuf != NULL) {
3581 			bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
3582 			m_freem(txs->txs_mbuf);
3583 			txs->txs_mbuf = NULL;
3584 		}
3585 	}
3586 
3587 	/* Mark the interface as down and cancel the watchdog timer. */
3588 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
3589 	ifp->if_timer = 0;
3590 
3591 	if (disable)
3592 		wm_rxdrain(sc);
3593 }
3594 
3595 void
3596 wm_get_auto_rd_done(struct wm_softc *sc)
3597 {
3598 	int i;
3599 
3600 	/* wait for eeprom to reload */
3601 	switch (sc->sc_type) {
3602 	case WM_T_82571:
3603 	case WM_T_82572:
3604 	case WM_T_82573:
3605 	case WM_T_82574:
3606 	case WM_T_80003:
3607 	case WM_T_ICH8:
3608 	case WM_T_ICH9:
3609 	case WM_T_ICH10:
3610 		for (i = 10; i > 0; i--) {
3611 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
3612 				break;
3613 			delay(1000);
3614 		}
3615 		if (i == 0) {
3616 			log(LOG_ERR, "%s: auto read from eeprom failed to "
3617 			    "complete\n", device_xname(sc->sc_dev));
3618 		}
3619 		break;
3620 	default:
3621 		delay(5000);
3622 		break;
3623 	}
3624 
3625 	/* Phy configuration starts after EECD_AUTO_RD is set */
3626 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574)
3627 		delay(25000);
3628 }
3629 
3630 /*
3631  * wm_acquire_eeprom:
3632  *
3633  *	Perform the EEPROM handshake required on some chips.
3634  */
3635 static int
3636 wm_acquire_eeprom(struct wm_softc *sc)
3637 {
3638 	uint32_t reg;
3639 	int x;
3640 	int ret = 0;
3641 
3642 	/* always success */
3643 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
3644 		return 0;
3645 
3646 	if (sc->sc_flags & WM_F_SWFWHW_SYNC) {
3647 		ret = wm_get_swfwhw_semaphore(sc);
3648 	} else if (sc->sc_flags & WM_F_SWFW_SYNC) {
3649 		/* this will also do wm_get_swsm_semaphore() if needed */
3650 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
3651 	} else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
3652 		ret = wm_get_swsm_semaphore(sc);
3653 	}
3654 
3655 	if (ret) {
3656 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
3657 			__func__);
3658 		return 1;
3659 	}
3660 
3661 	if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE)  {
3662 		reg = CSR_READ(sc, WMREG_EECD);
3663 
3664 		/* Request EEPROM access. */
3665 		reg |= EECD_EE_REQ;
3666 		CSR_WRITE(sc, WMREG_EECD, reg);
3667 
3668 		/* ..and wait for it to be granted. */
3669 		for (x = 0; x < 1000; x++) {
3670 			reg = CSR_READ(sc, WMREG_EECD);
3671 			if (reg & EECD_EE_GNT)
3672 				break;
3673 			delay(5);
3674 		}
3675 		if ((reg & EECD_EE_GNT) == 0) {
3676 			aprint_error_dev(sc->sc_dev,
3677 			    "could not acquire EEPROM GNT\n");
3678 			reg &= ~EECD_EE_REQ;
3679 			CSR_WRITE(sc, WMREG_EECD, reg);
3680 			if (sc->sc_flags & WM_F_SWFWHW_SYNC)
3681 				wm_put_swfwhw_semaphore(sc);
3682 			if (sc->sc_flags & WM_F_SWFW_SYNC)
3683 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
3684 			else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
3685 				wm_put_swsm_semaphore(sc);
3686 			return (1);
3687 		}
3688 	}
3689 
3690 	return (0);
3691 }
3692 
3693 /*
3694  * wm_release_eeprom:
3695  *
3696  *	Release the EEPROM mutex.
3697  */
3698 static void
3699 wm_release_eeprom(struct wm_softc *sc)
3700 {
3701 	uint32_t reg;
3702 
3703 	/* always success */
3704 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
3705 		return;
3706 
3707 	if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
3708 		reg = CSR_READ(sc, WMREG_EECD);
3709 		reg &= ~EECD_EE_REQ;
3710 		CSR_WRITE(sc, WMREG_EECD, reg);
3711 	}
3712 
3713 	if (sc->sc_flags & WM_F_SWFWHW_SYNC)
3714 		wm_put_swfwhw_semaphore(sc);
3715 	if (sc->sc_flags & WM_F_SWFW_SYNC)
3716 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
3717 	else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
3718 		wm_put_swsm_semaphore(sc);
3719 }
3720 
3721 /*
3722  * wm_eeprom_sendbits:
3723  *
3724  *	Send a series of bits to the EEPROM.
3725  */
3726 static void
3727 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
3728 {
3729 	uint32_t reg;
3730 	int x;
3731 
3732 	reg = CSR_READ(sc, WMREG_EECD);
3733 
3734 	for (x = nbits; x > 0; x--) {
3735 		if (bits & (1U << (x - 1)))
3736 			reg |= EECD_DI;
3737 		else
3738 			reg &= ~EECD_DI;
3739 		CSR_WRITE(sc, WMREG_EECD, reg);
3740 		delay(2);
3741 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
3742 		delay(2);
3743 		CSR_WRITE(sc, WMREG_EECD, reg);
3744 		delay(2);
3745 	}
3746 }
3747 
3748 /*
3749  * wm_eeprom_recvbits:
3750  *
3751  *	Receive a series of bits from the EEPROM.
3752  */
3753 static void
3754 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
3755 {
3756 	uint32_t reg, val;
3757 	int x;
3758 
3759 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
3760 
3761 	val = 0;
3762 	for (x = nbits; x > 0; x--) {
3763 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
3764 		delay(2);
3765 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
3766 			val |= (1U << (x - 1));
3767 		CSR_WRITE(sc, WMREG_EECD, reg);
3768 		delay(2);
3769 	}
3770 	*valp = val;
3771 }
3772 
3773 /*
3774  * wm_read_eeprom_uwire:
3775  *
3776  *	Read a word from the EEPROM using the MicroWire protocol.
3777  */
3778 static int
3779 wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
3780 {
3781 	uint32_t reg, val;
3782 	int i;
3783 
3784 	for (i = 0; i < wordcnt; i++) {
3785 		/* Clear SK and DI. */
3786 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
3787 		CSR_WRITE(sc, WMREG_EECD, reg);
3788 
3789 		/* Set CHIP SELECT. */
3790 		reg |= EECD_CS;
3791 		CSR_WRITE(sc, WMREG_EECD, reg);
3792 		delay(2);
3793 
3794 		/* Shift in the READ command. */
3795 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
3796 
3797 		/* Shift in address. */
3798 		wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits);
3799 
3800 		/* Shift out the data. */
3801 		wm_eeprom_recvbits(sc, &val, 16);
3802 		data[i] = val & 0xffff;
3803 
3804 		/* Clear CHIP SELECT. */
3805 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
3806 		CSR_WRITE(sc, WMREG_EECD, reg);
3807 		delay(2);
3808 	}
3809 
3810 	return (0);
3811 }
3812 
3813 /*
3814  * wm_spi_eeprom_ready:
3815  *
3816  *	Wait for a SPI EEPROM to be ready for commands.
3817  */
3818 static int
3819 wm_spi_eeprom_ready(struct wm_softc *sc)
3820 {
3821 	uint32_t val;
3822 	int usec;
3823 
3824 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
3825 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
3826 		wm_eeprom_recvbits(sc, &val, 8);
3827 		if ((val & SPI_SR_RDY) == 0)
3828 			break;
3829 	}
3830 	if (usec >= SPI_MAX_RETRIES) {
3831 		aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
3832 		return (1);
3833 	}
3834 	return (0);
3835 }
3836 
3837 /*
3838  * wm_read_eeprom_spi:
3839  *
3840  *	Read a work from the EEPROM using the SPI protocol.
3841  */
3842 static int
3843 wm_read_eeprom_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
3844 {
3845 	uint32_t reg, val;
3846 	int i;
3847 	uint8_t opc;
3848 
3849 	/* Clear SK and CS. */
3850 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
3851 	CSR_WRITE(sc, WMREG_EECD, reg);
3852 	delay(2);
3853 
3854 	if (wm_spi_eeprom_ready(sc))
3855 		return (1);
3856 
3857 	/* Toggle CS to flush commands. */
3858 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
3859 	delay(2);
3860 	CSR_WRITE(sc, WMREG_EECD, reg);
3861 	delay(2);
3862 
3863 	opc = SPI_OPC_READ;
3864 	if (sc->sc_ee_addrbits == 8 && word >= 128)
3865 		opc |= SPI_OPC_A8;
3866 
3867 	wm_eeprom_sendbits(sc, opc, 8);
3868 	wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits);
3869 
3870 	for (i = 0; i < wordcnt; i++) {
3871 		wm_eeprom_recvbits(sc, &val, 16);
3872 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
3873 	}
3874 
3875 	/* Raise CS and clear SK. */
3876 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
3877 	CSR_WRITE(sc, WMREG_EECD, reg);
3878 	delay(2);
3879 
3880 	return (0);
3881 }
3882 
3883 #define EEPROM_CHECKSUM		0xBABA
3884 #define EEPROM_SIZE		0x0040
3885 
3886 /*
3887  * wm_validate_eeprom_checksum
3888  *
3889  * The checksum is defined as the sum of the first 64 (16 bit) words.
3890  */
3891 static int
3892 wm_validate_eeprom_checksum(struct wm_softc *sc)
3893 {
3894 	uint16_t checksum;
3895 	uint16_t eeprom_data;
3896 	int i;
3897 
3898 	checksum = 0;
3899 
3900 	for (i = 0; i < EEPROM_SIZE; i++) {
3901 		if (wm_read_eeprom(sc, i, 1, &eeprom_data))
3902 			return 1;
3903 		checksum += eeprom_data;
3904 	}
3905 
3906 	if (checksum != (uint16_t) EEPROM_CHECKSUM)
3907 		return 1;
3908 
3909 	return 0;
3910 }
3911 
3912 /*
3913  * wm_read_eeprom:
3914  *
3915  *	Read data from the serial EEPROM.
3916  */
3917 static int
3918 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
3919 {
3920 	int rv;
3921 
3922 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
3923 		return 1;
3924 
3925 	if (wm_acquire_eeprom(sc))
3926 		return 1;
3927 
3928 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3929 	    || (sc->sc_type == WM_T_ICH10))
3930 		rv = wm_read_eeprom_ich8(sc, word, wordcnt, data);
3931 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
3932 		rv = wm_read_eeprom_eerd(sc, word, wordcnt, data);
3933 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
3934 		rv = wm_read_eeprom_spi(sc, word, wordcnt, data);
3935 	else
3936 		rv = wm_read_eeprom_uwire(sc, word, wordcnt, data);
3937 
3938 	wm_release_eeprom(sc);
3939 	return rv;
3940 }
3941 
3942 static int
3943 wm_read_eeprom_eerd(struct wm_softc *sc, int offset, int wordcnt,
3944     uint16_t *data)
3945 {
3946 	int i, eerd = 0;
3947 	int error = 0;
3948 
3949 	for (i = 0; i < wordcnt; i++) {
3950 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
3951 
3952 		CSR_WRITE(sc, WMREG_EERD, eerd);
3953 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
3954 		if (error != 0)
3955 			break;
3956 
3957 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
3958 	}
3959 
3960 	return error;
3961 }
3962 
3963 static int
3964 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
3965 {
3966 	uint32_t attempts = 100000;
3967 	uint32_t i, reg = 0;
3968 	int32_t done = -1;
3969 
3970 	for (i = 0; i < attempts; i++) {
3971 		reg = CSR_READ(sc, rw);
3972 
3973 		if (reg & EERD_DONE) {
3974 			done = 0;
3975 			break;
3976 		}
3977 		delay(5);
3978 	}
3979 
3980 	return done;
3981 }
3982 
3983 /*
3984  * wm_add_rxbuf:
3985  *
3986  *	Add a receive buffer to the indiciated descriptor.
3987  */
3988 static int
3989 wm_add_rxbuf(struct wm_softc *sc, int idx)
3990 {
3991 	struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
3992 	struct mbuf *m;
3993 	int error;
3994 
3995 	MGETHDR(m, M_DONTWAIT, MT_DATA);
3996 	if (m == NULL)
3997 		return (ENOBUFS);
3998 
3999 	MCLGET(m, M_DONTWAIT);
4000 	if ((m->m_flags & M_EXT) == 0) {
4001 		m_freem(m);
4002 		return (ENOBUFS);
4003 	}
4004 
4005 	if (rxs->rxs_mbuf != NULL)
4006 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4007 
4008 	rxs->rxs_mbuf = m;
4009 
4010 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
4011 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
4012 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
4013 	if (error) {
4014 		/* XXX XXX XXX */
4015 		aprint_error_dev(sc->sc_dev,
4016 		    "unable to load rx DMA map %d, error = %d\n",
4017 		    idx, error);
4018 		panic("wm_add_rxbuf");
4019 	}
4020 
4021 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
4022 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
4023 
4024 	WM_INIT_RXDESC(sc, idx);
4025 
4026 	return (0);
4027 }
4028 
4029 /*
4030  * wm_set_ral:
4031  *
4032  *	Set an entery in the receive address list.
4033  */
4034 static void
4035 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
4036 {
4037 	uint32_t ral_lo, ral_hi;
4038 
4039 	if (enaddr != NULL) {
4040 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
4041 		    (enaddr[3] << 24);
4042 		ral_hi = enaddr[4] | (enaddr[5] << 8);
4043 		ral_hi |= RAL_AV;
4044 	} else {
4045 		ral_lo = 0;
4046 		ral_hi = 0;
4047 	}
4048 
4049 	if (sc->sc_type >= WM_T_82544) {
4050 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
4051 		    ral_lo);
4052 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
4053 		    ral_hi);
4054 	} else {
4055 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
4056 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
4057 	}
4058 }
4059 
4060 /*
4061  * wm_mchash:
4062  *
4063  *	Compute the hash of the multicast address for the 4096-bit
4064  *	multicast filter.
4065  */
4066 static uint32_t
4067 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
4068 {
4069 	static const int lo_shift[4] = { 4, 3, 2, 0 };
4070 	static const int hi_shift[4] = { 4, 5, 6, 8 };
4071 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
4072 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
4073 	uint32_t hash;
4074 
4075 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4076 	    || (sc->sc_type == WM_T_ICH10)) {
4077 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
4078 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
4079 		return (hash & 0x3ff);
4080 	}
4081 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
4082 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
4083 
4084 	return (hash & 0xfff);
4085 }
4086 
4087 /*
4088  * wm_set_filter:
4089  *
4090  *	Set up the receive filter.
4091  */
4092 static void
4093 wm_set_filter(struct wm_softc *sc)
4094 {
4095 	struct ethercom *ec = &sc->sc_ethercom;
4096 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4097 	struct ether_multi *enm;
4098 	struct ether_multistep step;
4099 	bus_addr_t mta_reg;
4100 	uint32_t hash, reg, bit;
4101 	int i, size;
4102 
4103 	if (sc->sc_type >= WM_T_82544)
4104 		mta_reg = WMREG_CORDOVA_MTA;
4105 	else
4106 		mta_reg = WMREG_MTA;
4107 
4108 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
4109 
4110 	if (ifp->if_flags & IFF_BROADCAST)
4111 		sc->sc_rctl |= RCTL_BAM;
4112 	if (ifp->if_flags & IFF_PROMISC) {
4113 		sc->sc_rctl |= RCTL_UPE;
4114 		goto allmulti;
4115 	}
4116 
4117 	/*
4118 	 * Set the station address in the first RAL slot, and
4119 	 * clear the remaining slots.
4120 	 */
4121 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4122 		 || (sc->sc_type == WM_T_ICH10))
4123 		size = WM_ICH8_RAL_TABSIZE;
4124 	else
4125 		size = WM_RAL_TABSIZE;
4126 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
4127 	for (i = 1; i < size; i++)
4128 		wm_set_ral(sc, NULL, i);
4129 
4130 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4131 	    || (sc->sc_type == WM_T_ICH10))
4132 		size = WM_ICH8_MC_TABSIZE;
4133 	else
4134 		size = WM_MC_TABSIZE;
4135 	/* Clear out the multicast table. */
4136 	for (i = 0; i < size; i++)
4137 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
4138 
4139 	ETHER_FIRST_MULTI(step, ec, enm);
4140 	while (enm != NULL) {
4141 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
4142 			/*
4143 			 * We must listen to a range of multicast addresses.
4144 			 * For now, just accept all multicasts, rather than
4145 			 * trying to set only those filter bits needed to match
4146 			 * the range.  (At this time, the only use of address
4147 			 * ranges is for IP multicast routing, for which the
4148 			 * range is big enough to require all bits set.)
4149 			 */
4150 			goto allmulti;
4151 		}
4152 
4153 		hash = wm_mchash(sc, enm->enm_addrlo);
4154 
4155 		reg = (hash >> 5);
4156 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4157 		    || (sc->sc_type == WM_T_ICH10))
4158 			reg &= 0x1f;
4159 		else
4160 			reg &= 0x7f;
4161 		bit = hash & 0x1f;
4162 
4163 		hash = CSR_READ(sc, mta_reg + (reg << 2));
4164 		hash |= 1U << bit;
4165 
4166 		/* XXX Hardware bug?? */
4167 		if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
4168 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
4169 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
4170 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
4171 		} else
4172 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
4173 
4174 		ETHER_NEXT_MULTI(step, enm);
4175 	}
4176 
4177 	ifp->if_flags &= ~IFF_ALLMULTI;
4178 	goto setit;
4179 
4180  allmulti:
4181 	ifp->if_flags |= IFF_ALLMULTI;
4182 	sc->sc_rctl |= RCTL_MPE;
4183 
4184  setit:
4185 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
4186 }
4187 
4188 /*
4189  * wm_tbi_mediainit:
4190  *
4191  *	Initialize media for use on 1000BASE-X devices.
4192  */
4193 static void
4194 wm_tbi_mediainit(struct wm_softc *sc)
4195 {
4196 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4197 	const char *sep = "";
4198 
4199 	if (sc->sc_type < WM_T_82543)
4200 		sc->sc_tipg = TIPG_WM_DFLT;
4201 	else
4202 		sc->sc_tipg = TIPG_LG_DFLT;
4203 
4204 	sc->sc_tbi_anegticks = 5;
4205 
4206 	/* Initialize our media structures */
4207 	sc->sc_mii.mii_ifp = ifp;
4208 
4209 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
4210 	ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
4211 	    wm_tbi_mediastatus);
4212 
4213 	/*
4214 	 * SWD Pins:
4215 	 *
4216 	 *	0 = Link LED (output)
4217 	 *	1 = Loss Of Signal (input)
4218 	 */
4219 	sc->sc_ctrl |= CTRL_SWDPIO(0);
4220 	sc->sc_ctrl &= ~CTRL_SWDPIO(1);
4221 
4222 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4223 
4224 #define	ADD(ss, mm, dd)							\
4225 do {									\
4226 	aprint_normal("%s%s", sep, ss);					\
4227 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL);	\
4228 	sep = ", ";							\
4229 } while (/*CONSTCOND*/0)
4230 
4231 	aprint_normal_dev(sc->sc_dev, "");
4232 	ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
4233 	ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
4234 	ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
4235 	aprint_normal("\n");
4236 
4237 #undef ADD
4238 
4239 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
4240 }
4241 
4242 /*
4243  * wm_tbi_mediastatus:	[ifmedia interface function]
4244  *
4245  *	Get the current interface media status on a 1000BASE-X device.
4246  */
4247 static void
4248 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
4249 {
4250 	struct wm_softc *sc = ifp->if_softc;
4251 	uint32_t ctrl, status;
4252 
4253 	ifmr->ifm_status = IFM_AVALID;
4254 	ifmr->ifm_active = IFM_ETHER;
4255 
4256 	status = CSR_READ(sc, WMREG_STATUS);
4257 	if ((status & STATUS_LU) == 0) {
4258 		ifmr->ifm_active |= IFM_NONE;
4259 		return;
4260 	}
4261 
4262 	ifmr->ifm_status |= IFM_ACTIVE;
4263 	ifmr->ifm_active |= IFM_1000_SX;
4264 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
4265 		ifmr->ifm_active |= IFM_FDX;
4266 	ctrl = CSR_READ(sc, WMREG_CTRL);
4267 	if (ctrl & CTRL_RFCE)
4268 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
4269 	if (ctrl & CTRL_TFCE)
4270 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
4271 }
4272 
4273 /*
4274  * wm_tbi_mediachange:	[ifmedia interface function]
4275  *
4276  *	Set hardware to newly-selected media on a 1000BASE-X device.
4277  */
4278 static int
4279 wm_tbi_mediachange(struct ifnet *ifp)
4280 {
4281 	struct wm_softc *sc = ifp->if_softc;
4282 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
4283 	uint32_t status;
4284 	int i;
4285 
4286 	sc->sc_txcw = 0;
4287 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO ||
4288 	    (sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
4289 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
4290 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
4291 		sc->sc_txcw |= TXCW_ANE;
4292 	} else {
4293 		/*
4294 		 * If autonegotiation is turned off, force link up and turn on
4295 		 * full duplex
4296 		 */
4297 		sc->sc_txcw &= ~TXCW_ANE;
4298 		sc->sc_ctrl |= CTRL_SLU | CTRL_FD;
4299 		sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
4300 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4301 		delay(1000);
4302 	}
4303 
4304 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
4305 		    device_xname(sc->sc_dev),sc->sc_txcw));
4306 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
4307 	delay(10000);
4308 
4309 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
4310 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
4311 
4312 	/*
4313 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
4314 	 * optics detect a signal, 0 if they don't.
4315 	 */
4316 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
4317 		/* Have signal; wait for the link to come up. */
4318 
4319 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
4320 			/*
4321 			 * Reset the link, and let autonegotiation do its thing
4322 			 */
4323 			sc->sc_ctrl |= CTRL_LRST;
4324 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4325 			delay(1000);
4326 			sc->sc_ctrl &= ~CTRL_LRST;
4327 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4328 			delay(1000);
4329 		}
4330 
4331 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
4332 			delay(10000);
4333 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
4334 				break;
4335 		}
4336 
4337 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
4338 			    device_xname(sc->sc_dev),i));
4339 
4340 		status = CSR_READ(sc, WMREG_STATUS);
4341 		DPRINTF(WM_DEBUG_LINK,
4342 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
4343 			device_xname(sc->sc_dev),status, STATUS_LU));
4344 		if (status & STATUS_LU) {
4345 			/* Link is up. */
4346 			DPRINTF(WM_DEBUG_LINK,
4347 			    ("%s: LINK: set media -> link up %s\n",
4348 			    device_xname(sc->sc_dev),
4349 			    (status & STATUS_FD) ? "FDX" : "HDX"));
4350 
4351 			/*
4352 			 * NOTE: CTRL will update TFCE and RFCE automatically,
4353 			 * so we should update sc->sc_ctrl
4354 			 */
4355 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
4356 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
4357 			sc->sc_fcrtl &= ~FCRTL_XONE;
4358 			if (status & STATUS_FD)
4359 				sc->sc_tctl |=
4360 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4361 			else
4362 				sc->sc_tctl |=
4363 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
4364 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
4365 				sc->sc_fcrtl |= FCRTL_XONE;
4366 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4367 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
4368 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
4369 				      sc->sc_fcrtl);
4370 			sc->sc_tbi_linkup = 1;
4371 		} else {
4372 			if (i == WM_LINKUP_TIMEOUT)
4373 				wm_check_for_link(sc);
4374 			/* Link is down. */
4375 			DPRINTF(WM_DEBUG_LINK,
4376 			    ("%s: LINK: set media -> link down\n",
4377 			    device_xname(sc->sc_dev)));
4378 			sc->sc_tbi_linkup = 0;
4379 		}
4380 	} else {
4381 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
4382 		    device_xname(sc->sc_dev)));
4383 		sc->sc_tbi_linkup = 0;
4384 	}
4385 
4386 	wm_tbi_set_linkled(sc);
4387 
4388 	return (0);
4389 }
4390 
4391 /*
4392  * wm_tbi_set_linkled:
4393  *
4394  *	Update the link LED on 1000BASE-X devices.
4395  */
4396 static void
4397 wm_tbi_set_linkled(struct wm_softc *sc)
4398 {
4399 
4400 	if (sc->sc_tbi_linkup)
4401 		sc->sc_ctrl |= CTRL_SWDPIN(0);
4402 	else
4403 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
4404 
4405 	/* 82540 or newer devices are active low */
4406 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
4407 
4408 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4409 }
4410 
4411 /*
4412  * wm_tbi_check_link:
4413  *
4414  *	Check the link on 1000BASE-X devices.
4415  */
4416 static void
4417 wm_tbi_check_link(struct wm_softc *sc)
4418 {
4419 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4420 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
4421 	uint32_t rxcw, ctrl, status;
4422 
4423 	status = CSR_READ(sc, WMREG_STATUS);
4424 
4425 	rxcw = CSR_READ(sc, WMREG_RXCW);
4426 	ctrl = CSR_READ(sc, WMREG_CTRL);
4427 
4428 	/* set link status */
4429 	if ((status & STATUS_LU) == 0) {
4430 		DPRINTF(WM_DEBUG_LINK,
4431 		    ("%s: LINK: checklink -> down\n", device_xname(sc->sc_dev)));
4432 		sc->sc_tbi_linkup = 0;
4433 	} else if (sc->sc_tbi_linkup == 0) {
4434 		DPRINTF(WM_DEBUG_LINK,
4435 		    ("%s: LINK: checklink -> up %s\n", device_xname(sc->sc_dev),
4436 		    (status & STATUS_FD) ? "FDX" : "HDX"));
4437 		sc->sc_tbi_linkup = 1;
4438 	}
4439 
4440 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP)
4441 	    && ((status & STATUS_LU) == 0)) {
4442 		sc->sc_tbi_linkup = 0;
4443 		if (sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg > 100) {
4444 			/* RXCFG storm! */
4445 			DPRINTF(WM_DEBUG_LINK, ("RXCFG storm! (%d)\n",
4446 				sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg));
4447 			wm_init(ifp);
4448 			wm_start(ifp);
4449 		} else if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
4450 			/* If the timer expired, retry autonegotiation */
4451 			if (++sc->sc_tbi_ticks >= sc->sc_tbi_anegticks) {
4452 				DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
4453 				sc->sc_tbi_ticks = 0;
4454 				/*
4455 				 * Reset the link, and let autonegotiation do
4456 				 * its thing
4457 				 */
4458 				sc->sc_ctrl |= CTRL_LRST;
4459 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4460 				delay(1000);
4461 				sc->sc_ctrl &= ~CTRL_LRST;
4462 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4463 				delay(1000);
4464 				CSR_WRITE(sc, WMREG_TXCW,
4465 				    sc->sc_txcw & ~TXCW_ANE);
4466 				CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
4467 			}
4468 		}
4469 	}
4470 
4471 	wm_tbi_set_linkled(sc);
4472 }
4473 
4474 /*
4475  * wm_gmii_reset:
4476  *
4477  *	Reset the PHY.
4478  */
4479 static void
4480 wm_gmii_reset(struct wm_softc *sc)
4481 {
4482 	uint32_t reg;
4483 	int func = 0; /* XXX gcc */
4484 
4485 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4486 	    || (sc->sc_type == WM_T_ICH10)) {
4487 		if (wm_get_swfwhw_semaphore(sc)) {
4488 			aprint_error_dev(sc->sc_dev,
4489 			    "%s: failed to get semaphore\n", __func__);
4490 			return;
4491 		}
4492 	}
4493 	if (sc->sc_type == WM_T_80003) {
4494 		func = (CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1;
4495 		if (wm_get_swfw_semaphore(sc,
4496 			func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) {
4497 			aprint_error_dev(sc->sc_dev,
4498 			    "%s: failed to get semaphore\n", __func__);
4499 			return;
4500 		}
4501 	}
4502 	if (sc->sc_type >= WM_T_82544) {
4503 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
4504 		delay(20000);
4505 
4506 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4507 		delay(20000);
4508 	} else {
4509 		/*
4510 		 * With 82543, we need to force speed and duplex on the MAC
4511 		 * equal to what the PHY speed and duplex configuration is.
4512 		 * In addition, we need to perform a hardware reset on the PHY
4513 		 * to take it out of reset.
4514 		 */
4515 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
4516 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4517 
4518 		/* The PHY reset pin is active-low. */
4519 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
4520 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
4521 		    CTRL_EXT_SWDPIN(4));
4522 		reg |= CTRL_EXT_SWDPIO(4);
4523 
4524 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
4525 		delay(10);
4526 
4527 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4528 		delay(10000);
4529 
4530 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
4531 		delay(10);
4532 #if 0
4533 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
4534 #endif
4535 	}
4536 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4537 	    || (sc->sc_type == WM_T_ICH10))
4538 		wm_put_swfwhw_semaphore(sc);
4539 	if (sc->sc_type == WM_T_80003)
4540 		wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4541 }
4542 
4543 /*
4544  * wm_gmii_mediainit:
4545  *
4546  *	Initialize media for use on 1000BASE-T devices.
4547  */
4548 static void
4549 wm_gmii_mediainit(struct wm_softc *sc)
4550 {
4551 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4552 
4553 	/* We have MII. */
4554 	sc->sc_flags |= WM_F_HAS_MII;
4555 
4556 	if (sc->sc_type == WM_T_80003)
4557 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
4558 	else
4559 		sc->sc_tipg = TIPG_1000T_DFLT;
4560 
4561 	/*
4562 	 * Let the chip set speed/duplex on its own based on
4563 	 * signals from the PHY.
4564 	 * XXXbouyer - I'm not sure this is right for the 80003,
4565 	 * the em driver only sets CTRL_SLU here - but it seems to work.
4566 	 */
4567 	sc->sc_ctrl |= CTRL_SLU;
4568 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4569 
4570 	/* Initialize our media structures and probe the GMII. */
4571 	sc->sc_mii.mii_ifp = ifp;
4572 
4573 	if (sc->sc_type == WM_T_ICH10) {
4574 		sc->sc_mii.mii_readreg = wm_gmii_bm_readreg;
4575 		sc->sc_mii.mii_writereg = wm_gmii_bm_writereg;
4576 	} else if (sc->sc_type >= WM_T_80003) {
4577 		sc->sc_mii.mii_readreg = wm_gmii_i80003_readreg;
4578 		sc->sc_mii.mii_writereg = wm_gmii_i80003_writereg;
4579 	} else if (sc->sc_type >= WM_T_82544) {
4580 		sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg;
4581 		sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg;
4582 	} else {
4583 		sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg;
4584 		sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg;
4585 	}
4586 	sc->sc_mii.mii_statchg = wm_gmii_statchg;
4587 
4588 	wm_gmii_reset(sc);
4589 
4590 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
4591 	ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_gmii_mediachange,
4592 	    wm_gmii_mediastatus);
4593 
4594 	mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
4595 	    MII_OFFSET_ANY, MIIF_DOPAUSE);
4596 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
4597 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
4598 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
4599 	} else
4600 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
4601 }
4602 
4603 /*
4604  * wm_gmii_mediastatus:	[ifmedia interface function]
4605  *
4606  *	Get the current interface media status on a 1000BASE-T device.
4607  */
4608 static void
4609 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
4610 {
4611 	struct wm_softc *sc = ifp->if_softc;
4612 
4613 	ether_mediastatus(ifp, ifmr);
4614 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK) |
4615 			   sc->sc_flowflags;
4616 }
4617 
4618 /*
4619  * wm_gmii_mediachange:	[ifmedia interface function]
4620  *
4621  *	Set hardware to newly-selected media on a 1000BASE-T device.
4622  */
4623 static int
4624 wm_gmii_mediachange(struct ifnet *ifp)
4625 {
4626 	struct wm_softc *sc = ifp->if_softc;
4627 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
4628 	int rc;
4629 
4630 	if ((ifp->if_flags & IFF_UP) == 0)
4631 		return 0;
4632 
4633 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
4634 	sc->sc_ctrl |= CTRL_SLU;
4635 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
4636 	    || (sc->sc_type > WM_T_82543)) {
4637 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
4638 	} else {
4639 		sc->sc_ctrl &= ~CTRL_ASDE;
4640 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
4641 		if (ife->ifm_media & IFM_FDX)
4642 			sc->sc_ctrl |= CTRL_FD;
4643 		switch(IFM_SUBTYPE(ife->ifm_media)) {
4644 		case IFM_10_T:
4645 			sc->sc_ctrl |= CTRL_SPEED_10;
4646 			break;
4647 		case IFM_100_TX:
4648 			sc->sc_ctrl |= CTRL_SPEED_100;
4649 			break;
4650 		case IFM_1000_T:
4651 			sc->sc_ctrl |= CTRL_SPEED_1000;
4652 			break;
4653 		default:
4654 			panic("wm_gmii_mediachange: bad media 0x%x",
4655 			    ife->ifm_media);
4656 		}
4657 	}
4658 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4659 	if (sc->sc_type <= WM_T_82543)
4660 		wm_gmii_reset(sc);
4661 
4662 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
4663 		return 0;
4664 	return rc;
4665 }
4666 
4667 #define	MDI_IO		CTRL_SWDPIN(2)
4668 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
4669 #define	MDI_CLK		CTRL_SWDPIN(3)
4670 
4671 static void
4672 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
4673 {
4674 	uint32_t i, v;
4675 
4676 	v = CSR_READ(sc, WMREG_CTRL);
4677 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
4678 	v |= MDI_DIR | CTRL_SWDPIO(3);
4679 
4680 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
4681 		if (data & i)
4682 			v |= MDI_IO;
4683 		else
4684 			v &= ~MDI_IO;
4685 		CSR_WRITE(sc, WMREG_CTRL, v);
4686 		delay(10);
4687 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
4688 		delay(10);
4689 		CSR_WRITE(sc, WMREG_CTRL, v);
4690 		delay(10);
4691 	}
4692 }
4693 
4694 static uint32_t
4695 i82543_mii_recvbits(struct wm_softc *sc)
4696 {
4697 	uint32_t v, i, data = 0;
4698 
4699 	v = CSR_READ(sc, WMREG_CTRL);
4700 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
4701 	v |= CTRL_SWDPIO(3);
4702 
4703 	CSR_WRITE(sc, WMREG_CTRL, v);
4704 	delay(10);
4705 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
4706 	delay(10);
4707 	CSR_WRITE(sc, WMREG_CTRL, v);
4708 	delay(10);
4709 
4710 	for (i = 0; i < 16; i++) {
4711 		data <<= 1;
4712 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
4713 		delay(10);
4714 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
4715 			data |= 1;
4716 		CSR_WRITE(sc, WMREG_CTRL, v);
4717 		delay(10);
4718 	}
4719 
4720 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
4721 	delay(10);
4722 	CSR_WRITE(sc, WMREG_CTRL, v);
4723 	delay(10);
4724 
4725 	return (data);
4726 }
4727 
4728 #undef MDI_IO
4729 #undef MDI_DIR
4730 #undef MDI_CLK
4731 
4732 /*
4733  * wm_gmii_i82543_readreg:	[mii interface function]
4734  *
4735  *	Read a PHY register on the GMII (i82543 version).
4736  */
4737 static int
4738 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
4739 {
4740 	struct wm_softc *sc = device_private(self);
4741 	int rv;
4742 
4743 	i82543_mii_sendbits(sc, 0xffffffffU, 32);
4744 	i82543_mii_sendbits(sc, reg | (phy << 5) |
4745 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
4746 	rv = i82543_mii_recvbits(sc) & 0xffff;
4747 
4748 	DPRINTF(WM_DEBUG_GMII,
4749 	    ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
4750 	    device_xname(sc->sc_dev), phy, reg, rv));
4751 
4752 	return (rv);
4753 }
4754 
4755 /*
4756  * wm_gmii_i82543_writereg:	[mii interface function]
4757  *
4758  *	Write a PHY register on the GMII (i82543 version).
4759  */
4760 static void
4761 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
4762 {
4763 	struct wm_softc *sc = device_private(self);
4764 
4765 	i82543_mii_sendbits(sc, 0xffffffffU, 32);
4766 	i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
4767 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
4768 	    (MII_COMMAND_START << 30), 32);
4769 }
4770 
4771 /*
4772  * wm_gmii_i82544_readreg:	[mii interface function]
4773  *
4774  *	Read a PHY register on the GMII.
4775  */
4776 static int
4777 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
4778 {
4779 	struct wm_softc *sc = device_private(self);
4780 	uint32_t mdic = 0;
4781 	int i, rv;
4782 
4783 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
4784 	    MDIC_REGADD(reg));
4785 
4786 	for (i = 0; i < 320; i++) {
4787 		mdic = CSR_READ(sc, WMREG_MDIC);
4788 		if (mdic & MDIC_READY)
4789 			break;
4790 		delay(10);
4791 	}
4792 
4793 	if ((mdic & MDIC_READY) == 0) {
4794 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
4795 		    device_xname(sc->sc_dev), phy, reg);
4796 		rv = 0;
4797 	} else if (mdic & MDIC_E) {
4798 #if 0 /* This is normal if no PHY is present. */
4799 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
4800 		    device_xname(sc->sc_dev), phy, reg);
4801 #endif
4802 		rv = 0;
4803 	} else {
4804 		rv = MDIC_DATA(mdic);
4805 		if (rv == 0xffff)
4806 			rv = 0;
4807 	}
4808 
4809 	return (rv);
4810 }
4811 
4812 /*
4813  * wm_gmii_i82544_writereg:	[mii interface function]
4814  *
4815  *	Write a PHY register on the GMII.
4816  */
4817 static void
4818 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
4819 {
4820 	struct wm_softc *sc = device_private(self);
4821 	uint32_t mdic = 0;
4822 	int i;
4823 
4824 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
4825 	    MDIC_REGADD(reg) | MDIC_DATA(val));
4826 
4827 	for (i = 0; i < 320; i++) {
4828 		mdic = CSR_READ(sc, WMREG_MDIC);
4829 		if (mdic & MDIC_READY)
4830 			break;
4831 		delay(10);
4832 	}
4833 
4834 	if ((mdic & MDIC_READY) == 0)
4835 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
4836 		    device_xname(sc->sc_dev), phy, reg);
4837 	else if (mdic & MDIC_E)
4838 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
4839 		    device_xname(sc->sc_dev), phy, reg);
4840 }
4841 
4842 /*
4843  * wm_gmii_i80003_readreg:	[mii interface function]
4844  *
4845  *	Read a PHY register on the kumeran
4846  * This could be handled by the PHY layer if we didn't have to lock the
4847  * ressource ...
4848  */
4849 static int
4850 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
4851 {
4852 	struct wm_softc *sc = device_private(self);
4853 	int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
4854 	int rv;
4855 
4856 	if (phy != 1) /* only one PHY on kumeran bus */
4857 		return 0;
4858 
4859 	if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) {
4860 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
4861 		    __func__);
4862 		return 0;
4863 	}
4864 
4865 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
4866 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
4867 		    reg >> GG82563_PAGE_SHIFT);
4868 	} else {
4869 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
4870 		    reg >> GG82563_PAGE_SHIFT);
4871 	}
4872 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
4873 	delay(200);
4874 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
4875 	delay(200);
4876 
4877 	wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4878 	return (rv);
4879 }
4880 
4881 /*
4882  * wm_gmii_i80003_writereg:	[mii interface function]
4883  *
4884  *	Write a PHY register on the kumeran.
4885  * This could be handled by the PHY layer if we didn't have to lock the
4886  * ressource ...
4887  */
4888 static void
4889 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
4890 {
4891 	struct wm_softc *sc = device_private(self);
4892 	int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
4893 
4894 	if (phy != 1) /* only one PHY on kumeran bus */
4895 		return;
4896 
4897 	if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) {
4898 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
4899 		    __func__);
4900 		return;
4901 	}
4902 
4903 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
4904 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
4905 		    reg >> GG82563_PAGE_SHIFT);
4906 	} else {
4907 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
4908 		    reg >> GG82563_PAGE_SHIFT);
4909 	}
4910 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
4911 	delay(200);
4912 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
4913 	delay(200);
4914 
4915 	wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4916 }
4917 
4918 /*
4919  * wm_gmii_bm_readreg:	[mii interface function]
4920  *
4921  *	Read a PHY register on the kumeran
4922  * This could be handled by the PHY layer if we didn't have to lock the
4923  * ressource ...
4924  */
4925 static int
4926 wm_gmii_bm_readreg(device_t self, int phy, int reg)
4927 {
4928 	struct wm_softc *sc = device_private(self);
4929 	int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
4930 	int rv;
4931 
4932 	if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) {
4933 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
4934 		    __func__);
4935 		return 0;
4936 	}
4937 
4938 	if (reg > GG82563_MAX_REG_ADDRESS) {
4939 		if (phy == 1)
4940 			wm_gmii_i82544_writereg(self, phy, 0x1f,
4941 			    reg);
4942 		else
4943 			wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
4944 			    reg >> GG82563_PAGE_SHIFT);
4945 
4946 	}
4947 
4948 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
4949 	wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4950 	return (rv);
4951 }
4952 
4953 /*
4954  * wm_gmii_bm_writereg:	[mii interface function]
4955  *
4956  *	Write a PHY register on the kumeran.
4957  * This could be handled by the PHY layer if we didn't have to lock the
4958  * ressource ...
4959  */
4960 static void
4961 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
4962 {
4963 	struct wm_softc *sc = device_private(self);
4964 	int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
4965 
4966 	if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) {
4967 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
4968 		    __func__);
4969 		return;
4970 	}
4971 
4972 	if (reg > GG82563_MAX_REG_ADDRESS) {
4973 		if (phy == 1)
4974 			wm_gmii_i82544_writereg(self, phy, 0x1f,
4975 			    reg);
4976 		else
4977 			wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
4978 			    reg >> GG82563_PAGE_SHIFT);
4979 
4980 	}
4981 
4982 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
4983 	wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4984 }
4985 
4986 /*
4987  * wm_gmii_statchg:	[mii interface function]
4988  *
4989  *	Callback from MII layer when media changes.
4990  */
4991 static void
4992 wm_gmii_statchg(device_t self)
4993 {
4994 	struct wm_softc *sc = device_private(self);
4995 	struct mii_data *mii = &sc->sc_mii;
4996 
4997 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
4998 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
4999 	sc->sc_fcrtl &= ~FCRTL_XONE;
5000 
5001 	/*
5002 	 * Get flow control negotiation result.
5003 	 */
5004 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
5005 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
5006 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
5007 		mii->mii_media_active &= ~IFM_ETH_FMASK;
5008 	}
5009 
5010 	if (sc->sc_flowflags & IFM_FLOW) {
5011 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
5012 			sc->sc_ctrl |= CTRL_TFCE;
5013 			sc->sc_fcrtl |= FCRTL_XONE;
5014 		}
5015 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
5016 			sc->sc_ctrl |= CTRL_RFCE;
5017 	}
5018 
5019 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
5020 		DPRINTF(WM_DEBUG_LINK,
5021 		    ("%s: LINK: statchg: FDX\n", device_xname(sc->sc_dev)));
5022 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
5023 	} else  {
5024 		DPRINTF(WM_DEBUG_LINK,
5025 		    ("%s: LINK: statchg: HDX\n", device_xname(sc->sc_dev)));
5026 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
5027 	}
5028 
5029 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5030 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
5031 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
5032 						 : WMREG_FCRTL, sc->sc_fcrtl);
5033 	if (sc->sc_type == WM_T_80003) {
5034 		switch(IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
5035 		case IFM_1000_T:
5036 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
5037 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
5038 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
5039 			break;
5040 		default:
5041 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
5042 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
5043 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
5044 			break;
5045 		}
5046 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
5047 	}
5048 }
5049 
5050 /*
5051  * wm_kmrn_readreg:
5052  *
5053  *	Read a kumeran register
5054  */
5055 static int
5056 wm_kmrn_readreg(struct wm_softc *sc, int reg)
5057 {
5058 	int rv;
5059 
5060 	if (sc->sc_flags == WM_F_SWFW_SYNC) {
5061 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
5062 			aprint_error_dev(sc->sc_dev,
5063 			    "%s: failed to get semaphore\n", __func__);
5064 			return 0;
5065 		}
5066 	} else 	if (sc->sc_flags == WM_F_SWFWHW_SYNC) {
5067 		if (wm_get_swfwhw_semaphore(sc)) {
5068 			aprint_error_dev(sc->sc_dev,
5069 			    "%s: failed to get semaphore\n", __func__);
5070 			return 0;
5071 		}
5072 	}
5073 
5074 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
5075 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
5076 	    KUMCTRLSTA_REN);
5077 	delay(2);
5078 
5079 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
5080 
5081 	if (sc->sc_flags == WM_F_SWFW_SYNC)
5082 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
5083 	else if (sc->sc_flags == WM_F_SWFWHW_SYNC)
5084 		wm_put_swfwhw_semaphore(sc);
5085 
5086 	return (rv);
5087 }
5088 
5089 /*
5090  * wm_kmrn_writereg:
5091  *
5092  *	Write a kumeran register
5093  */
5094 static void
5095 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
5096 {
5097 
5098 	if (sc->sc_flags == WM_F_SWFW_SYNC) {
5099 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
5100 			aprint_error_dev(sc->sc_dev,
5101 			    "%s: failed to get semaphore\n", __func__);
5102 			return;
5103 		}
5104 	} else 	if (sc->sc_flags == WM_F_SWFWHW_SYNC) {
5105 		if (wm_get_swfwhw_semaphore(sc)) {
5106 			aprint_error_dev(sc->sc_dev,
5107 			    "%s: failed to get semaphore\n", __func__);
5108 			return;
5109 		}
5110 	}
5111 
5112 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
5113 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
5114 	    (val & KUMCTRLSTA_MASK));
5115 
5116 	if (sc->sc_flags == WM_F_SWFW_SYNC)
5117 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
5118 	else if (sc->sc_flags == WM_F_SWFWHW_SYNC)
5119 		wm_put_swfwhw_semaphore(sc);
5120 }
5121 
5122 static int
5123 wm_is_onboard_nvm_eeprom(struct wm_softc *sc)
5124 {
5125 	uint32_t eecd = 0;
5126 
5127 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574) {
5128 		eecd = CSR_READ(sc, WMREG_EECD);
5129 
5130 		/* Isolate bits 15 & 16 */
5131 		eecd = ((eecd >> 15) & 0x03);
5132 
5133 		/* If both bits are set, device is Flash type */
5134 		if (eecd == 0x03) {
5135 			return 0;
5136 		}
5137 	}
5138 	return 1;
5139 }
5140 
5141 static int
5142 wm_get_swsm_semaphore(struct wm_softc *sc)
5143 {
5144 	int32_t timeout;
5145 	uint32_t swsm;
5146 
5147 	/* Get the FW semaphore. */
5148 	timeout = 1000 + 1; /* XXX */
5149 	while (timeout) {
5150 		swsm = CSR_READ(sc, WMREG_SWSM);
5151 		swsm |= SWSM_SWESMBI;
5152 		CSR_WRITE(sc, WMREG_SWSM, swsm);
5153 		/* if we managed to set the bit we got the semaphore. */
5154 		swsm = CSR_READ(sc, WMREG_SWSM);
5155 		if (swsm & SWSM_SWESMBI)
5156 			break;
5157 
5158 		delay(50);
5159 		timeout--;
5160 	}
5161 
5162 	if (timeout == 0) {
5163 		aprint_error_dev(sc->sc_dev, "could not acquire EEPROM GNT\n");
5164 		/* Release semaphores */
5165 		wm_put_swsm_semaphore(sc);
5166 		return 1;
5167 	}
5168 	return 0;
5169 }
5170 
5171 static void
5172 wm_put_swsm_semaphore(struct wm_softc *sc)
5173 {
5174 	uint32_t swsm;
5175 
5176 	swsm = CSR_READ(sc, WMREG_SWSM);
5177 	swsm &= ~(SWSM_SWESMBI);
5178 	CSR_WRITE(sc, WMREG_SWSM, swsm);
5179 }
5180 
5181 static int
5182 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
5183 {
5184 	uint32_t swfw_sync;
5185 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
5186 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
5187 	int timeout = 200;
5188 
5189 	for(timeout = 0; timeout < 200; timeout++) {
5190 		if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
5191 			if (wm_get_swsm_semaphore(sc)) {
5192 				aprint_error_dev(sc->sc_dev,
5193 				    "%s: failed to get semaphore\n",
5194 				    __func__);
5195 				return 1;
5196 			}
5197 		}
5198 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
5199 		if ((swfw_sync & (swmask | fwmask)) == 0) {
5200 			swfw_sync |= swmask;
5201 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
5202 			if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
5203 				wm_put_swsm_semaphore(sc);
5204 			return 0;
5205 		}
5206 		if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
5207 			wm_put_swsm_semaphore(sc);
5208 		delay(5000);
5209 	}
5210 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
5211 	    device_xname(sc->sc_dev), mask, swfw_sync);
5212 	return 1;
5213 }
5214 
5215 static void
5216 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
5217 {
5218 	uint32_t swfw_sync;
5219 
5220 	if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
5221 		while (wm_get_swsm_semaphore(sc) != 0)
5222 			continue;
5223 	}
5224 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
5225 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
5226 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
5227 	if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
5228 		wm_put_swsm_semaphore(sc);
5229 }
5230 
5231 static int
5232 wm_get_swfwhw_semaphore(struct wm_softc *sc)
5233 {
5234 	uint32_t ext_ctrl;
5235 	int timeout = 200;
5236 
5237 	for(timeout = 0; timeout < 200; timeout++) {
5238 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
5239 		ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
5240 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
5241 
5242 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
5243 		if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
5244 			return 0;
5245 		delay(5000);
5246 	}
5247 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
5248 	    device_xname(sc->sc_dev), ext_ctrl);
5249 	return 1;
5250 }
5251 
5252 static void
5253 wm_put_swfwhw_semaphore(struct wm_softc *sc)
5254 {
5255 	uint32_t ext_ctrl;
5256 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
5257 	ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
5258 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
5259 }
5260 
5261 static int
5262 wm_valid_nvm_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
5263 {
5264 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
5265 	uint8_t bank_high_byte;
5266 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
5267 
5268 	if (sc->sc_type != WM_T_ICH10) {
5269 		/* Value of bit 22 corresponds to the flash bank we're on. */
5270 		*bank = (CSR_READ(sc, WMREG_EECD) & EECD_SEC1VAL) ? 1 : 0;
5271 	} else {
5272 		wm_read_ich8_byte(sc, act_offset, &bank_high_byte);
5273 		if ((bank_high_byte & 0xc0) == 0x80)
5274 			*bank = 0;
5275 		else {
5276 			wm_read_ich8_byte(sc, act_offset + bank1_offset,
5277 			    &bank_high_byte);
5278 			if ((bank_high_byte & 0xc0) == 0x80)
5279 				*bank = 1;
5280 			else {
5281 				aprint_error_dev(sc->sc_dev,
5282 				    "EEPROM not present\n");
5283 				return -1;
5284 			}
5285 		}
5286 	}
5287 
5288 	return 0;
5289 }
5290 
5291 /******************************************************************************
5292  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
5293  * register.
5294  *
5295  * sc - Struct containing variables accessed by shared code
5296  * offset - offset of word in the EEPROM to read
5297  * data - word read from the EEPROM
5298  * words - number of words to read
5299  *****************************************************************************/
5300 static int
5301 wm_read_eeprom_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
5302 {
5303     int32_t  error = 0;
5304     uint32_t flash_bank = 0;
5305     uint32_t act_offset = 0;
5306     uint32_t bank_offset = 0;
5307     uint16_t word = 0;
5308     uint16_t i = 0;
5309 
5310     /* We need to know which is the valid flash bank.  In the event
5311      * that we didn't allocate eeprom_shadow_ram, we may not be
5312      * managing flash_bank.  So it cannot be trusted and needs
5313      * to be updated with each read.
5314      */
5315     error = wm_valid_nvm_bank_detect_ich8lan(sc, &flash_bank);
5316     if (error) {
5317 	    aprint_error_dev(sc->sc_dev, "%s: failed to detect NVM bank\n",
5318 		    __func__);
5319         return error;
5320     }
5321 
5322     /* Adjust offset appropriately if we're on bank 1 - adjust for word size */
5323     bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
5324 
5325     error = wm_get_swfwhw_semaphore(sc);
5326     if (error) {
5327 	    aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5328 		__func__);
5329         return error;
5330     }
5331 
5332     for (i = 0; i < words; i++) {
5333             /* The NVM part needs a byte offset, hence * 2 */
5334             act_offset = bank_offset + ((offset + i) * 2);
5335             error = wm_read_ich8_word(sc, act_offset, &word);
5336             if (error) {
5337 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
5338 		    __func__);
5339                 break;
5340 	    }
5341             data[i] = word;
5342     }
5343 
5344     wm_put_swfwhw_semaphore(sc);
5345     return error;
5346 }
5347 
5348 /******************************************************************************
5349  * This function does initial flash setup so that a new read/write/erase cycle
5350  * can be started.
5351  *
5352  * sc - The pointer to the hw structure
5353  ****************************************************************************/
5354 static int32_t
5355 wm_ich8_cycle_init(struct wm_softc *sc)
5356 {
5357     uint16_t hsfsts;
5358     int32_t error = 1;
5359     int32_t i     = 0;
5360 
5361     hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
5362 
5363     /* May be check the Flash Des Valid bit in Hw status */
5364     if ((hsfsts & HSFSTS_FLDVAL) == 0) {
5365         return error;
5366     }
5367 
5368     /* Clear FCERR in Hw status by writing 1 */
5369     /* Clear DAEL in Hw status by writing a 1 */
5370     hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
5371 
5372     ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
5373 
5374     /* Either we should have a hardware SPI cycle in progress bit to check
5375      * against, in order to start a new cycle or FDONE bit should be changed
5376      * in the hardware so that it is 1 after harware reset, which can then be
5377      * used as an indication whether a cycle is in progress or has been
5378      * completed .. we should also have some software semaphore mechanism to
5379      * guard FDONE or the cycle in progress bit so that two threads access to
5380      * those bits can be sequentiallized or a way so that 2 threads dont
5381      * start the cycle at the same time */
5382 
5383     if ((hsfsts & HSFSTS_FLINPRO) == 0) {
5384         /* There is no cycle running at present, so we can start a cycle */
5385         /* Begin by setting Flash Cycle Done. */
5386         hsfsts |= HSFSTS_DONE;
5387         ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
5388         error = 0;
5389     } else {
5390         /* otherwise poll for sometime so the current cycle has a chance
5391          * to end before giving up. */
5392         for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
5393             hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
5394             if ((hsfsts & HSFSTS_FLINPRO) == 0) {
5395                 error = 0;
5396                 break;
5397             }
5398             delay(1);
5399         }
5400         if (error == 0) {
5401             /* Successful in waiting for previous cycle to timeout,
5402              * now set the Flash Cycle Done. */
5403             hsfsts |= HSFSTS_DONE;
5404             ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
5405         }
5406     }
5407     return error;
5408 }
5409 
5410 /******************************************************************************
5411  * This function starts a flash cycle and waits for its completion
5412  *
5413  * sc - The pointer to the hw structure
5414  ****************************************************************************/
5415 static int32_t
5416 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
5417 {
5418     uint16_t hsflctl;
5419     uint16_t hsfsts;
5420     int32_t error = 1;
5421     uint32_t i = 0;
5422 
5423     /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
5424     hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
5425     hsflctl |= HSFCTL_GO;
5426     ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
5427 
5428     /* wait till FDONE bit is set to 1 */
5429     do {
5430         hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
5431         if (hsfsts & HSFSTS_DONE)
5432             break;
5433         delay(1);
5434         i++;
5435     } while (i < timeout);
5436     if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0) {
5437         error = 0;
5438     }
5439     return error;
5440 }
5441 
5442 /******************************************************************************
5443  * Reads a byte or word from the NVM using the ICH8 flash access registers.
5444  *
5445  * sc - The pointer to the hw structure
5446  * index - The index of the byte or word to read.
5447  * size - Size of data to read, 1=byte 2=word
5448  * data - Pointer to the word to store the value read.
5449  *****************************************************************************/
5450 static int32_t
5451 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
5452                      uint32_t size, uint16_t* data)
5453 {
5454     uint16_t hsfsts;
5455     uint16_t hsflctl;
5456     uint32_t flash_linear_address;
5457     uint32_t flash_data = 0;
5458     int32_t error = 1;
5459     int32_t count = 0;
5460 
5461     if (size < 1  || size > 2 || data == 0x0 ||
5462         index > ICH_FLASH_LINEAR_ADDR_MASK)
5463         return error;
5464 
5465     flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
5466                            sc->sc_ich8_flash_base;
5467 
5468     do {
5469         delay(1);
5470         /* Steps */
5471         error = wm_ich8_cycle_init(sc);
5472         if (error)
5473             break;
5474 
5475         hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
5476         /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
5477         hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT) & HSFCTL_BCOUNT_MASK;
5478         hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
5479         ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
5480 
5481         /* Write the last 24 bits of index into Flash Linear address field in
5482          * Flash Address */
5483         /* TODO: TBD maybe check the index against the size of flash */
5484 
5485         ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
5486 
5487         error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
5488 
5489         /* Check if FCERR is set to 1, if set to 1, clear it and try the whole
5490          * sequence a few more times, else read in (shift in) the Flash Data0,
5491          * the order is least significant byte first msb to lsb */
5492         if (error == 0) {
5493             flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
5494             if (size == 1) {
5495                 *data = (uint8_t)(flash_data & 0x000000FF);
5496             } else if (size == 2) {
5497                 *data = (uint16_t)(flash_data & 0x0000FFFF);
5498             }
5499             break;
5500         } else {
5501             /* If we've gotten here, then things are probably completely hosed,
5502              * but if the error condition is detected, it won't hurt to give
5503              * it another try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
5504              */
5505             hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
5506             if (hsfsts & HSFSTS_ERR) {
5507                 /* Repeat for some time before giving up. */
5508                 continue;
5509             } else if ((hsfsts & HSFSTS_DONE) == 0) {
5510                 break;
5511             }
5512         }
5513     } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
5514 
5515     return error;
5516 }
5517 
5518 /******************************************************************************
5519  * Reads a single byte from the NVM using the ICH8 flash access registers.
5520  *
5521  * sc - pointer to wm_hw structure
5522  * index - The index of the byte to read.
5523  * data - Pointer to a byte to store the value read.
5524  *****************************************************************************/
5525 static int32_t
5526 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
5527 {
5528     int32_t status;
5529     uint16_t word = 0;
5530 
5531     status = wm_read_ich8_data(sc, index, 1, &word);
5532     if (status == 0) {
5533         *data = (uint8_t)word;
5534     }
5535 
5536     return status;
5537 }
5538 
5539 /******************************************************************************
5540  * Reads a word from the NVM using the ICH8 flash access registers.
5541  *
5542  * sc - pointer to wm_hw structure
5543  * index - The starting byte index of the word to read.
5544  * data - Pointer to a word to store the value read.
5545  *****************************************************************************/
5546 static int32_t
5547 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
5548 {
5549     int32_t status;
5550 
5551     status = wm_read_ich8_data(sc, index, 2, data);
5552     return status;
5553 }
5554 
5555 static int
5556 wm_check_mng_mode(struct wm_softc *sc)
5557 {
5558 	int rv;
5559 
5560 	switch (sc->sc_type) {
5561 	case WM_T_ICH8:
5562 	case WM_T_ICH9:
5563 	case WM_T_ICH10:
5564 		rv = wm_check_mng_mode_ich8lan(sc);
5565 		break;
5566 #if 0
5567 	case WM_T_82574:
5568 		/*
5569 		 * The function is provided in em driver, but it's not
5570 		 * used. Why?
5571 		 */
5572 		rv = wm_check_mng_mode_82574(sc);
5573 		break;
5574 #endif
5575 	case WM_T_82571:
5576 	case WM_T_82572:
5577 	case WM_T_82573:
5578 	case WM_T_80003:
5579 		rv = wm_check_mng_mode_generic(sc);
5580 		break;
5581 	default:
5582 		/* noting to do */
5583 		rv = 0;
5584 		break;
5585 	}
5586 
5587 	return rv;
5588 }
5589 
5590 static int
5591 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
5592 {
5593 	uint32_t fwsm;
5594 
5595 	fwsm = CSR_READ(sc, WMREG_FWSM);
5596 
5597 	if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
5598 		return 1;
5599 
5600 	return 0;
5601 }
5602 
5603 #if 0
5604 static int
5605 wm_check_mng_mode_82574(struct wm_softc *sc)
5606 {
5607 	uint16_t data;
5608 
5609 	wm_read_eeprom(sc, NVM_INIT_CONTROL2_REG, 1, &data);
5610 
5611 	if ((data & NVM_INIT_CTRL2_MNGM) != 0)
5612 		return 1;
5613 
5614 	return 0;
5615 }
5616 #endif
5617 
5618 static int
5619 wm_check_mng_mode_generic(struct wm_softc *sc)
5620 {
5621 	uint32_t fwsm;
5622 
5623 	fwsm = CSR_READ(sc, WMREG_FWSM);
5624 
5625 	if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
5626 		return 1;
5627 
5628 	return 0;
5629 }
5630 
5631 static void
5632 wm_get_hw_control(struct wm_softc *sc)
5633 {
5634 	uint32_t reg;
5635 
5636 	switch (sc->sc_type) {
5637 	case WM_T_82573:
5638 #if 0
5639 	case WM_T_82574:
5640 		/*
5641 		 * FreeBSD's em driver has the function for 82574 to checks
5642 		 * the management mode, but it's not used. Why?
5643 		 */
5644 #endif
5645 		reg = CSR_READ(sc, WMREG_SWSM);
5646 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
5647 		break;
5648 	case WM_T_82571:
5649 	case WM_T_82572:
5650 	case WM_T_80003:
5651 	case WM_T_ICH8:
5652 	case WM_T_ICH9:
5653 	case WM_T_ICH10:
5654 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
5655 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
5656 		break;
5657 	default:
5658 		break;
5659 	}
5660 }
5661 
5662 /* XXX Currently TBI only */
5663 static int
5664 wm_check_for_link(struct wm_softc *sc)
5665 {
5666 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
5667 	uint32_t rxcw;
5668 	uint32_t ctrl;
5669 	uint32_t status;
5670 	uint32_t sig;
5671 
5672 	rxcw = CSR_READ(sc, WMREG_RXCW);
5673 	ctrl = CSR_READ(sc, WMREG_CTRL);
5674 	status = CSR_READ(sc, WMREG_STATUS);
5675 
5676 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
5677 
5678 	DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
5679 		device_xname(sc->sc_dev), __func__,
5680 		((ctrl & CTRL_SWDPIN(1)) == sig),
5681 		((status & STATUS_LU) != 0),
5682 		((rxcw & RXCW_C) != 0)
5683 		    ));
5684 
5685 	/*
5686 	 * SWDPIN   LU RXCW
5687 	 *      0    0    0
5688 	 *      0    0    1	(should not happen)
5689 	 *      0    1    0	(should not happen)
5690 	 *      0    1    1	(should not happen)
5691 	 *      1    0    0	Disable autonego and force linkup
5692 	 *      1    0    1	got /C/ but not linkup yet
5693 	 *      1    1    0	(linkup)
5694 	 *      1    1    1	If IFM_AUTO, back to autonego
5695 	 *
5696 	 */
5697 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
5698 	    && ((status & STATUS_LU) == 0)
5699 	    && ((rxcw & RXCW_C) == 0)) {
5700 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
5701 			__func__));
5702 		sc->sc_tbi_linkup = 0;
5703 		/* Disable auto-negotiation in the TXCW register */
5704 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
5705 
5706 		/*
5707 		 * Force link-up and also force full-duplex.
5708 		 *
5709 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
5710 		 * so we should update sc->sc_ctrl
5711 		 */
5712 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
5713 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5714 	} else if(((status & STATUS_LU) != 0)
5715 	    && ((rxcw & RXCW_C) != 0)
5716 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
5717 		sc->sc_tbi_linkup = 1;
5718 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
5719 			__func__));
5720 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
5721 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
5722 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
5723 	    && ((rxcw & RXCW_C) != 0)) {
5724 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
5725 	} else {
5726 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
5727 			status));
5728 	}
5729 
5730 	return 0;
5731 }
5732