xref: /netbsd-src/sys/dev/pci/if_wm.c (revision b5677b36047b601b9addaaa494a58ceae82c2a6c)
1 /*	$NetBSD: if_wm.c,v 1.174 2009/04/07 18:42:30 msaitoh Exp $	*/
2 
3 /*
4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed for the NetBSD Project by
20  *	Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 /*******************************************************************************
39 
40   Copyright (c) 2001-2005, Intel Corporation
41   All rights reserved.
42 
43   Redistribution and use in source and binary forms, with or without
44   modification, are permitted provided that the following conditions are met:
45 
46    1. Redistributions of source code must retain the above copyright notice,
47       this list of conditions and the following disclaimer.
48 
49    2. Redistributions in binary form must reproduce the above copyright
50       notice, this list of conditions and the following disclaimer in the
51       documentation and/or other materials provided with the distribution.
52 
53    3. Neither the name of the Intel Corporation nor the names of its
54       contributors may be used to endorse or promote products derived from
55       this software without specific prior written permission.
56 
57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67   POSSIBILITY OF SUCH DAMAGE.
68 
69 *******************************************************************************/
70 /*
71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72  *
73  * TODO (in order of importance):
74  *
75  *	- Rework how parameters are loaded from the EEPROM.
76  */
77 
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.174 2009/04/07 18:42:30 msaitoh Exp $");
80 
81 #include "bpfilter.h"
82 #include "rnd.h"
83 
84 #include <sys/param.h>
85 #include <sys/systm.h>
86 #include <sys/callout.h>
87 #include <sys/mbuf.h>
88 #include <sys/malloc.h>
89 #include <sys/kernel.h>
90 #include <sys/socket.h>
91 #include <sys/ioctl.h>
92 #include <sys/errno.h>
93 #include <sys/device.h>
94 #include <sys/queue.h>
95 #include <sys/syslog.h>
96 
97 #include <uvm/uvm_extern.h>		/* for PAGE_SIZE */
98 
99 #if NRND > 0
100 #include <sys/rnd.h>
101 #endif
102 
103 #include <net/if.h>
104 #include <net/if_dl.h>
105 #include <net/if_media.h>
106 #include <net/if_ether.h>
107 
108 #if NBPFILTER > 0
109 #include <net/bpf.h>
110 #endif
111 
112 #include <netinet/in.h>			/* XXX for struct ip */
113 #include <netinet/in_systm.h>		/* XXX for struct ip */
114 #include <netinet/ip.h>			/* XXX for struct ip */
115 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
116 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
117 
118 #include <sys/bus.h>
119 #include <sys/intr.h>
120 #include <machine/endian.h>
121 
122 #include <dev/mii/mii.h>
123 #include <dev/mii/miivar.h>
124 #include <dev/mii/mii_bitbang.h>
125 #include <dev/mii/ikphyreg.h>
126 
127 #include <dev/pci/pcireg.h>
128 #include <dev/pci/pcivar.h>
129 #include <dev/pci/pcidevs.h>
130 
131 #include <dev/pci/if_wmreg.h>
132 
133 #ifdef WM_DEBUG
134 #define	WM_DEBUG_LINK		0x01
135 #define	WM_DEBUG_TX		0x02
136 #define	WM_DEBUG_RX		0x04
137 #define	WM_DEBUG_GMII		0x08
138 int	wm_debug = WM_DEBUG_TX|WM_DEBUG_RX|WM_DEBUG_LINK|WM_DEBUG_GMII;
139 
140 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
141 #else
142 #define	DPRINTF(x, y)	/* nothing */
143 #endif /* WM_DEBUG */
144 
145 /*
146  * Transmit descriptor list size.  Due to errata, we can only have
147  * 256 hardware descriptors in the ring on < 82544, but we use 4096
148  * on >= 82544.  We tell the upper layers that they can queue a lot
149  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
150  * of them at a time.
151  *
152  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
153  * chains containing many small mbufs have been observed in zero-copy
154  * situations with jumbo frames.
155  */
156 #define	WM_NTXSEGS		256
157 #define	WM_IFQUEUELEN		256
158 #define	WM_TXQUEUELEN_MAX	64
159 #define	WM_TXQUEUELEN_MAX_82547	16
160 #define	WM_TXQUEUELEN(sc)	((sc)->sc_txnum)
161 #define	WM_TXQUEUELEN_MASK(sc)	(WM_TXQUEUELEN(sc) - 1)
162 #define	WM_TXQUEUE_GC(sc)	(WM_TXQUEUELEN(sc) / 8)
163 #define	WM_NTXDESC_82542	256
164 #define	WM_NTXDESC_82544	4096
165 #define	WM_NTXDESC(sc)		((sc)->sc_ntxdesc)
166 #define	WM_NTXDESC_MASK(sc)	(WM_NTXDESC(sc) - 1)
167 #define	WM_TXDESCSIZE(sc)	(WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
168 #define	WM_NEXTTX(sc, x)	(((x) + 1) & WM_NTXDESC_MASK(sc))
169 #define	WM_NEXTTXS(sc, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(sc))
170 
171 #define	WM_MAXTXDMA		round_page(IP_MAXPACKET) /* for TSO */
172 
173 /*
174  * Receive descriptor list size.  We have one Rx buffer for normal
175  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
176  * packet.  We allocate 256 receive descriptors, each with a 2k
177  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
178  */
179 #define	WM_NRXDESC		256
180 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
181 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
182 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
183 
184 /*
185  * Control structures are DMA'd to the i82542 chip.  We allocate them in
186  * a single clump that maps to a single DMA segment to make several things
187  * easier.
188  */
189 struct wm_control_data_82544 {
190 	/*
191 	 * The receive descriptors.
192 	 */
193 	wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
194 
195 	/*
196 	 * The transmit descriptors.  Put these at the end, because
197 	 * we might use a smaller number of them.
198 	 */
199 	wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82544];
200 };
201 
202 struct wm_control_data_82542 {
203 	wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
204 	wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
205 };
206 
207 #define	WM_CDOFF(x)	offsetof(struct wm_control_data_82544, x)
208 #define	WM_CDTXOFF(x)	WM_CDOFF(wcd_txdescs[(x)])
209 #define	WM_CDRXOFF(x)	WM_CDOFF(wcd_rxdescs[(x)])
210 
211 /*
212  * Software state for transmit jobs.
213  */
214 struct wm_txsoft {
215 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
216 	bus_dmamap_t txs_dmamap;	/* our DMA map */
217 	int txs_firstdesc;		/* first descriptor in packet */
218 	int txs_lastdesc;		/* last descriptor in packet */
219 	int txs_ndesc;			/* # of descriptors used */
220 };
221 
222 /*
223  * Software state for receive buffers.  Each descriptor gets a
224  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
225  * more than one buffer, we chain them together.
226  */
227 struct wm_rxsoft {
228 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
229 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
230 };
231 
232 typedef enum {
233 	WM_T_unknown		= 0,
234 	WM_T_82542_2_0,			/* i82542 2.0 (really old) */
235 	WM_T_82542_2_1,			/* i82542 2.1+ (old) */
236 	WM_T_82543,			/* i82543 */
237 	WM_T_82544,			/* i82544 */
238 	WM_T_82540,			/* i82540 */
239 	WM_T_82545,			/* i82545 */
240 	WM_T_82545_3,			/* i82545 3.0+ */
241 	WM_T_82546,			/* i82546 */
242 	WM_T_82546_3,			/* i82546 3.0+ */
243 	WM_T_82541,			/* i82541 */
244 	WM_T_82541_2,			/* i82541 2.0+ */
245 	WM_T_82547,			/* i82547 */
246 	WM_T_82547_2,			/* i82547 2.0+ */
247 	WM_T_82571,			/* i82571 */
248 	WM_T_82572,			/* i82572 */
249 	WM_T_82573,			/* i82573 */
250 	WM_T_82574,			/* i82574 */
251 	WM_T_80003,			/* i80003 */
252 	WM_T_ICH8,			/* ICH8 LAN */
253 	WM_T_ICH9,			/* ICH9 LAN */
254 	WM_T_ICH10,			/* ICH10 LAN */
255 } wm_chip_type;
256 
257 #define WM_LINKUP_TIMEOUT	50
258 
259 /*
260  * Software state per device.
261  */
262 struct wm_softc {
263 	device_t sc_dev;		/* generic device information */
264 	bus_space_tag_t sc_st;		/* bus space tag */
265 	bus_space_handle_t sc_sh;	/* bus space handle */
266 	bus_space_tag_t sc_iot;		/* I/O space tag */
267 	bus_space_handle_t sc_ioh;	/* I/O space handle */
268 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
269 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
270 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
271 	struct ethercom sc_ethercom;	/* ethernet common data */
272 	pci_chipset_tag_t sc_pc;
273 	pcitag_t sc_pcitag;
274 
275 	wm_chip_type sc_type;		/* chip type */
276 	int sc_flags;			/* flags; see below */
277 	int sc_bus_speed;		/* PCI/PCIX bus speed */
278 	int sc_pcix_offset;		/* PCIX capability register offset */
279 	int sc_flowflags;		/* 802.3x flow control flags */
280 
281 	void *sc_ih;			/* interrupt cookie */
282 
283 	int sc_ee_addrbits;		/* EEPROM address bits */
284 
285 	struct mii_data sc_mii;		/* MII/media information */
286 
287 	callout_t sc_tick_ch;		/* tick callout */
288 
289 	bus_dmamap_t sc_cddmamap;	/* control data DMA map */
290 #define	sc_cddma	sc_cddmamap->dm_segs[0].ds_addr
291 
292 	int		sc_align_tweak;
293 
294 	/*
295 	 * Software state for the transmit and receive descriptors.
296 	 */
297 	int			sc_txnum;	/* must be a power of two */
298 	struct wm_txsoft	sc_txsoft[WM_TXQUEUELEN_MAX];
299 	struct wm_rxsoft	sc_rxsoft[WM_NRXDESC];
300 
301 	/*
302 	 * Control data structures.
303 	 */
304 	int			sc_ntxdesc;	/* must be a power of two */
305 	struct wm_control_data_82544 *sc_control_data;
306 #define	sc_txdescs	sc_control_data->wcd_txdescs
307 #define	sc_rxdescs	sc_control_data->wcd_rxdescs
308 
309 #ifdef WM_EVENT_COUNTERS
310 	/* Event counters. */
311 	struct evcnt sc_ev_txsstall;	/* Tx stalled due to no txs */
312 	struct evcnt sc_ev_txdstall;	/* Tx stalled due to no txd */
313 	struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
314 	struct evcnt sc_ev_txdw;	/* Tx descriptor interrupts */
315 	struct evcnt sc_ev_txqe;	/* Tx queue empty interrupts */
316 	struct evcnt sc_ev_rxintr;	/* Rx interrupts */
317 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
318 
319 	struct evcnt sc_ev_rxipsum;	/* IP checksums checked in-bound */
320 	struct evcnt sc_ev_rxtusum;	/* TCP/UDP cksums checked in-bound */
321 	struct evcnt sc_ev_txipsum;	/* IP checksums comp. out-bound */
322 	struct evcnt sc_ev_txtusum;	/* TCP/UDP cksums comp. out-bound */
323 	struct evcnt sc_ev_txtusum6;	/* TCP/UDP v6 cksums comp. out-bound */
324 	struct evcnt sc_ev_txtso;	/* TCP seg offload out-bound (IPv4) */
325 	struct evcnt sc_ev_txtso6;	/* TCP seg offload out-bound (IPv6) */
326 	struct evcnt sc_ev_txtsopain;	/* painful header manip. for TSO */
327 
328 	struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
329 	struct evcnt sc_ev_txdrop;	/* Tx packets dropped (too many segs) */
330 
331 	struct evcnt sc_ev_tu;		/* Tx underrun */
332 
333 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
334 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
335 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
336 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
337 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
338 #endif /* WM_EVENT_COUNTERS */
339 
340 	bus_addr_t sc_tdt_reg;		/* offset of TDT register */
341 
342 	int	sc_txfree;		/* number of free Tx descriptors */
343 	int	sc_txnext;		/* next ready Tx descriptor */
344 
345 	int	sc_txsfree;		/* number of free Tx jobs */
346 	int	sc_txsnext;		/* next free Tx job */
347 	int	sc_txsdirty;		/* dirty Tx jobs */
348 
349 	/* These 5 variables are used only on the 82547. */
350 	int	sc_txfifo_size;		/* Tx FIFO size */
351 	int	sc_txfifo_head;		/* current head of FIFO */
352 	uint32_t sc_txfifo_addr;	/* internal address of start of FIFO */
353 	int	sc_txfifo_stall;	/* Tx FIFO is stalled */
354 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
355 
356 	bus_addr_t sc_rdt_reg;		/* offset of RDT register */
357 
358 	int	sc_rxptr;		/* next ready Rx descriptor/queue ent */
359 	int	sc_rxdiscard;
360 	int	sc_rxlen;
361 	struct mbuf *sc_rxhead;
362 	struct mbuf *sc_rxtail;
363 	struct mbuf **sc_rxtailp;
364 
365 	uint32_t sc_ctrl;		/* prototype CTRL register */
366 #if 0
367 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
368 #endif
369 	uint32_t sc_icr;		/* prototype interrupt bits */
370 	uint32_t sc_itr;		/* prototype intr throttling reg */
371 	uint32_t sc_tctl;		/* prototype TCTL register */
372 	uint32_t sc_rctl;		/* prototype RCTL register */
373 	uint32_t sc_txcw;		/* prototype TXCW register */
374 	uint32_t sc_tipg;		/* prototype TIPG register */
375 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
376 	uint32_t sc_pba;		/* prototype PBA register */
377 
378 	int sc_tbi_linkup;		/* TBI link status */
379 	int sc_tbi_anegticks;		/* autonegotiation ticks */
380 	int sc_tbi_ticks;		/* tbi ticks */
381 	int sc_tbi_nrxcfg;		/* count of ICR_RXCFG */
382 	int sc_tbi_lastnrxcfg;		/* count of ICR_RXCFG (on last tick) */
383 
384 	int sc_mchash_type;		/* multicast filter offset */
385 
386 #if NRND > 0
387 	rndsource_element_t rnd_source;	/* random source */
388 #endif
389 	int sc_ich8_flash_base;
390 	int sc_ich8_flash_bank_size;
391 };
392 
393 #define	WM_RXCHAIN_RESET(sc)						\
394 do {									\
395 	(sc)->sc_rxtailp = &(sc)->sc_rxhead;				\
396 	*(sc)->sc_rxtailp = NULL;					\
397 	(sc)->sc_rxlen = 0;						\
398 } while (/*CONSTCOND*/0)
399 
400 #define	WM_RXCHAIN_LINK(sc, m)						\
401 do {									\
402 	*(sc)->sc_rxtailp = (sc)->sc_rxtail = (m);			\
403 	(sc)->sc_rxtailp = &(m)->m_next;				\
404 } while (/*CONSTCOND*/0)
405 
406 /* sc_flags */
407 #define	WM_F_HAS_MII		0x0001	/* has MII */
408 #define	WM_F_EEPROM_HANDSHAKE	0x0002	/* requires EEPROM handshake */
409 #define	WM_F_EEPROM_SEMAPHORE	0x0004	/* EEPROM with semaphore */
410 #define	WM_F_EEPROM_EERDEEWR	0x0008	/* EEPROM access via EERD/EEWR */
411 #define	WM_F_EEPROM_SPI		0x0010	/* EEPROM is SPI */
412 #define	WM_F_EEPROM_FLASH	0x0020	/* EEPROM is FLASH */
413 #define	WM_F_EEPROM_INVALID	0x0040	/* EEPROM not present (bad checksum) */
414 #define	WM_F_IOH_VALID		0x0080	/* I/O handle is valid */
415 #define	WM_F_BUS64		0x0100	/* bus is 64-bit */
416 #define	WM_F_PCIX		0x0200	/* bus is PCI-X */
417 #define	WM_F_CSA		0x0400	/* bus is CSA */
418 #define	WM_F_PCIE		0x0800	/* bus is PCI-Express */
419 #define WM_F_SWFW_SYNC		0x1000  /* Software-Firmware synchronisation */
420 #define WM_F_SWFWHW_SYNC	0x2000  /* Software-Firmware synchronisation */
421 
422 #ifdef WM_EVENT_COUNTERS
423 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
424 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
425 #else
426 #define	WM_EVCNT_INCR(ev)	/* nothing */
427 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
428 #endif
429 
430 #define	CSR_READ(sc, reg)						\
431 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
432 #define	CSR_WRITE(sc, reg, val)						\
433 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
434 #define	CSR_WRITE_FLUSH(sc)						\
435 	(void) CSR_READ((sc), WMREG_STATUS)
436 
437 #define ICH8_FLASH_READ32(sc, reg) \
438 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
439 #define ICH8_FLASH_WRITE32(sc, reg, data) \
440 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
441 
442 #define ICH8_FLASH_READ16(sc, reg) \
443 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
444 #define ICH8_FLASH_WRITE16(sc, reg, data) \
445 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
446 
447 #define	WM_CDTXADDR(sc, x)	((sc)->sc_cddma + WM_CDTXOFF((x)))
448 #define	WM_CDRXADDR(sc, x)	((sc)->sc_cddma + WM_CDRXOFF((x)))
449 
450 #define	WM_CDTXADDR_LO(sc, x)	(WM_CDTXADDR((sc), (x)) & 0xffffffffU)
451 #define	WM_CDTXADDR_HI(sc, x)						\
452 	(sizeof(bus_addr_t) == 8 ?					\
453 	 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
454 
455 #define	WM_CDRXADDR_LO(sc, x)	(WM_CDRXADDR((sc), (x)) & 0xffffffffU)
456 #define	WM_CDRXADDR_HI(sc, x)						\
457 	(sizeof(bus_addr_t) == 8 ?					\
458 	 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
459 
460 #define	WM_CDTXSYNC(sc, x, n, ops)					\
461 do {									\
462 	int __x, __n;							\
463 									\
464 	__x = (x);							\
465 	__n = (n);							\
466 									\
467 	/* If it will wrap around, sync to the end of the ring. */	\
468 	if ((__x + __n) > WM_NTXDESC(sc)) {				\
469 		bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,	\
470 		    WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) *		\
471 		    (WM_NTXDESC(sc) - __x), (ops));			\
472 		__n -= (WM_NTXDESC(sc) - __x);				\
473 		__x = 0;						\
474 	}								\
475 									\
476 	/* Now sync whatever is left. */				\
477 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
478 	    WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops));	\
479 } while (/*CONSTCOND*/0)
480 
481 #define	WM_CDRXSYNC(sc, x, ops)						\
482 do {									\
483 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
484 	   WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops));		\
485 } while (/*CONSTCOND*/0)
486 
487 #define	WM_INIT_RXDESC(sc, x)						\
488 do {									\
489 	struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)];		\
490 	wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)];		\
491 	struct mbuf *__m = __rxs->rxs_mbuf;				\
492 									\
493 	/*								\
494 	 * Note: We scoot the packet forward 2 bytes in the buffer	\
495 	 * so that the payload after the Ethernet header is aligned	\
496 	 * to a 4-byte boundary.					\
497 	 *								\
498 	 * XXX BRAINDAMAGE ALERT!					\
499 	 * The stupid chip uses the same size for every buffer, which	\
500 	 * is set in the Receive Control register.  We are using the 2K	\
501 	 * size option, but what we REALLY want is (2K - 2)!  For this	\
502 	 * reason, we can't "scoot" packets longer than the standard	\
503 	 * Ethernet MTU.  On strict-alignment platforms, if the total	\
504 	 * size exceeds (2K - 2) we set align_tweak to 0 and let	\
505 	 * the upper layer copy the headers.				\
506 	 */								\
507 	__m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak;	\
508 									\
509 	wm_set_dma_addr(&__rxd->wrx_addr,				\
510 	    __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
511 	__rxd->wrx_len = 0;						\
512 	__rxd->wrx_cksum = 0;						\
513 	__rxd->wrx_status = 0;						\
514 	__rxd->wrx_errors = 0;						\
515 	__rxd->wrx_special = 0;						\
516 	WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
517 									\
518 	CSR_WRITE((sc), (sc)->sc_rdt_reg, (x));				\
519 } while (/*CONSTCOND*/0)
520 
521 static void	wm_start(struct ifnet *);
522 static void	wm_watchdog(struct ifnet *);
523 static int	wm_ioctl(struct ifnet *, u_long, void *);
524 static int	wm_init(struct ifnet *);
525 static void	wm_stop(struct ifnet *, int);
526 
527 static void	wm_reset(struct wm_softc *);
528 static void	wm_rxdrain(struct wm_softc *);
529 static int	wm_add_rxbuf(struct wm_softc *, int);
530 static int	wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
531 static int	wm_read_eeprom_eerd(struct wm_softc *, int, int, u_int16_t *);
532 static int	wm_validate_eeprom_checksum(struct wm_softc *);
533 static void	wm_tick(void *);
534 
535 static void	wm_set_filter(struct wm_softc *);
536 
537 static int	wm_intr(void *);
538 static void	wm_txintr(struct wm_softc *);
539 static void	wm_rxintr(struct wm_softc *);
540 static void	wm_linkintr(struct wm_softc *, uint32_t);
541 
542 static void	wm_tbi_mediainit(struct wm_softc *);
543 static int	wm_tbi_mediachange(struct ifnet *);
544 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
545 
546 static void	wm_tbi_set_linkled(struct wm_softc *);
547 static void	wm_tbi_check_link(struct wm_softc *);
548 
549 static void	wm_gmii_reset(struct wm_softc *);
550 
551 static int	wm_gmii_i82543_readreg(device_t, int, int);
552 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
553 
554 static int	wm_gmii_i82544_readreg(device_t, int, int);
555 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
556 
557 static int	wm_gmii_i80003_readreg(device_t, int, int);
558 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
559 
560 static int	wm_gmii_bm_readreg(device_t, int, int);
561 static void	wm_gmii_bm_writereg(device_t, int, int, int);
562 
563 static void	wm_gmii_statchg(device_t);
564 
565 static void	wm_gmii_mediainit(struct wm_softc *);
566 static int	wm_gmii_mediachange(struct ifnet *);
567 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
568 
569 static int	wm_kmrn_i80003_readreg(struct wm_softc *, int);
570 static void	wm_kmrn_i80003_writereg(struct wm_softc *, int, int);
571 
572 static int	wm_match(device_t, cfdata_t, void *);
573 static void	wm_attach(device_t, device_t, void *);
574 static int	wm_is_onboard_nvm_eeprom(struct wm_softc *);
575 static void	wm_get_auto_rd_done(struct wm_softc *);
576 static int	wm_get_swsm_semaphore(struct wm_softc *);
577 static void	wm_put_swsm_semaphore(struct wm_softc *);
578 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
579 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
580 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
581 static int	wm_get_swfwhw_semaphore(struct wm_softc *);
582 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
583 
584 static int	wm_read_eeprom_ich8(struct wm_softc *, int, int, uint16_t *);
585 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
586 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
587 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t,
588 		     uint32_t, uint16_t *);
589 static int32_t	wm_read_ich8_byte(struct wm_softc *sc, uint32_t, uint8_t *);
590 static int32_t	wm_read_ich8_word(struct wm_softc *sc, uint32_t, uint16_t *);
591 static void	wm_82547_txfifo_stall(void *);
592 static int	wm_check_mng_mode(struct wm_softc *);
593 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
594 #if 0
595 static int	wm_check_mng_mode_82574(struct wm_softc *);
596 #endif
597 static int	wm_check_mng_mode_generic(struct wm_softc *);
598 static void	wm_get_hw_control(struct wm_softc *);
599 static int	wm_check_for_link(struct wm_softc *);
600 
601 CFATTACH_DECL_NEW(wm, sizeof(struct wm_softc),
602     wm_match, wm_attach, NULL, NULL);
603 
604 
605 /*
606  * Devices supported by this driver.
607  */
608 static const struct wm_product {
609 	pci_vendor_id_t		wmp_vendor;
610 	pci_product_id_t	wmp_product;
611 	const char		*wmp_name;
612 	wm_chip_type		wmp_type;
613 	int			wmp_flags;
614 #define	WMP_F_1000X		0x01
615 #define	WMP_F_1000T		0x02
616 } wm_products[] = {
617 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
618 	  "Intel i82542 1000BASE-X Ethernet",
619 	  WM_T_82542_2_1,	WMP_F_1000X },
620 
621 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
622 	  "Intel i82543GC 1000BASE-X Ethernet",
623 	  WM_T_82543,		WMP_F_1000X },
624 
625 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
626 	  "Intel i82543GC 1000BASE-T Ethernet",
627 	  WM_T_82543,		WMP_F_1000T },
628 
629 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
630 	  "Intel i82544EI 1000BASE-T Ethernet",
631 	  WM_T_82544,		WMP_F_1000T },
632 
633 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
634 	  "Intel i82544EI 1000BASE-X Ethernet",
635 	  WM_T_82544,		WMP_F_1000X },
636 
637 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
638 	  "Intel i82544GC 1000BASE-T Ethernet",
639 	  WM_T_82544,		WMP_F_1000T },
640 
641 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
642 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
643 	  WM_T_82544,		WMP_F_1000T },
644 
645 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
646 	  "Intel i82540EM 1000BASE-T Ethernet",
647 	  WM_T_82540,		WMP_F_1000T },
648 
649 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
650 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
651 	  WM_T_82540,		WMP_F_1000T },
652 
653 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
654 	  "Intel i82540EP 1000BASE-T Ethernet",
655 	  WM_T_82540,		WMP_F_1000T },
656 
657 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
658 	  "Intel i82540EP 1000BASE-T Ethernet",
659 	  WM_T_82540,		WMP_F_1000T },
660 
661 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
662 	  "Intel i82540EP 1000BASE-T Ethernet",
663 	  WM_T_82540,		WMP_F_1000T },
664 
665 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
666 	  "Intel i82545EM 1000BASE-T Ethernet",
667 	  WM_T_82545,		WMP_F_1000T },
668 
669 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
670 	  "Intel i82545GM 1000BASE-T Ethernet",
671 	  WM_T_82545_3,		WMP_F_1000T },
672 
673 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
674 	  "Intel i82545GM 1000BASE-X Ethernet",
675 	  WM_T_82545_3,		WMP_F_1000X },
676 #if 0
677 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
678 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
679 	  WM_T_82545_3,		WMP_F_SERDES },
680 #endif
681 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
682 	  "Intel i82546EB 1000BASE-T Ethernet",
683 	  WM_T_82546,		WMP_F_1000T },
684 
685 	{ PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82546EB_QUAD,
686 	  "Intel i82546EB 1000BASE-T Ethernet",
687 	  WM_T_82546,		WMP_F_1000T },
688 
689 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
690 	  "Intel i82545EM 1000BASE-X Ethernet",
691 	  WM_T_82545,		WMP_F_1000X },
692 
693 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
694 	  "Intel i82546EB 1000BASE-X Ethernet",
695 	  WM_T_82546,		WMP_F_1000X },
696 
697 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
698 	  "Intel i82546GB 1000BASE-T Ethernet",
699 	  WM_T_82546_3,		WMP_F_1000T },
700 
701 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
702 	  "Intel i82546GB 1000BASE-X Ethernet",
703 	  WM_T_82546_3,		WMP_F_1000X },
704 #if 0
705 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
706 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
707 	  WM_T_82546_3,		WMP_F_SERDES },
708 #endif
709 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
710 	  "i82546GB quad-port Gigabit Ethernet",
711 	  WM_T_82546_3,		WMP_F_1000T },
712 
713 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
714 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
715 	  WM_T_82546_3,		WMP_F_1000T },
716 
717 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
718 	  "Intel PRO/1000MT (82546GB)",
719 	  WM_T_82546_3,		WMP_F_1000T },
720 
721 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
722 	  "Intel i82541EI 1000BASE-T Ethernet",
723 	  WM_T_82541,		WMP_F_1000T },
724 
725 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
726 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
727 	  WM_T_82541,		WMP_F_1000T },
728 
729 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
730 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
731 	  WM_T_82541,		WMP_F_1000T },
732 
733 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
734 	  "Intel i82541ER 1000BASE-T Ethernet",
735 	  WM_T_82541_2,		WMP_F_1000T },
736 
737 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
738 	  "Intel i82541GI 1000BASE-T Ethernet",
739 	  WM_T_82541_2,		WMP_F_1000T },
740 
741 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
742 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
743 	  WM_T_82541_2,		WMP_F_1000T },
744 
745 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
746 	  "Intel i82541PI 1000BASE-T Ethernet",
747 	  WM_T_82541_2,		WMP_F_1000T },
748 
749 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
750 	  "Intel i82547EI 1000BASE-T Ethernet",
751 	  WM_T_82547,		WMP_F_1000T },
752 
753 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
754 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
755 	  WM_T_82547,		WMP_F_1000T },
756 
757 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
758 	  "Intel i82547GI 1000BASE-T Ethernet",
759 	  WM_T_82547_2,		WMP_F_1000T },
760 
761 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
762 	  "Intel PRO/1000 PT (82571EB)",
763 	  WM_T_82571,		WMP_F_1000T },
764 
765 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
766 	  "Intel PRO/1000 PF (82571EB)",
767 	  WM_T_82571,		WMP_F_1000X },
768 #if 0
769 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
770 	  "Intel PRO/1000 PB (82571EB)",
771 	  WM_T_82571,		WMP_F_SERDES },
772 #endif
773 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
774 	  "Intel PRO/1000 QT (82571EB)",
775 	  WM_T_82571,		WMP_F_1000T },
776 
777 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
778 	  "Intel i82572EI 1000baseT Ethernet",
779 	  WM_T_82572,		WMP_F_1000T },
780 
781 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
782 	  "Intel� PRO/1000 PT Quad Port Server Adapter",
783 	  WM_T_82571,		WMP_F_1000T, },
784 
785 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
786 	  "Intel i82572EI 1000baseX Ethernet",
787 	  WM_T_82572,		WMP_F_1000X },
788 #if 0
789 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
790 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
791 	  WM_T_82572,		WMP_F_SERDES },
792 #endif
793 
794 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
795 	  "Intel i82572EI 1000baseT Ethernet",
796 	  WM_T_82572,		WMP_F_1000T },
797 
798 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
799 	  "Intel i82573E",
800 	  WM_T_82573,		WMP_F_1000T },
801 
802 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
803 	  "Intel i82573E IAMT",
804 	  WM_T_82573,		WMP_F_1000T },
805 
806 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
807 	  "Intel i82573L Gigabit Ethernet",
808 	  WM_T_82573,		WMP_F_1000T },
809 
810 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
811 	  "Intel i82574L",
812 	  WM_T_82574,		WMP_F_1000T },
813 
814 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
815 	  "i80003 dual 1000baseT Ethernet",
816 	  WM_T_80003,		WMP_F_1000T },
817 
818 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
819 	  "i80003 dual 1000baseX Ethernet",
820 	  WM_T_80003,		WMP_F_1000T },
821 #if 0
822 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
823 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
824 	  WM_T_80003,		WMP_F_SERDES },
825 #endif
826 
827 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
828 	  "Intel i80003 1000baseT Ethernet",
829 	  WM_T_80003,		WMP_F_1000T },
830 #if 0
831 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
832 	  "Intel i80003 Gigabit Ethernet (SERDES)",
833 	  WM_T_80003,		WMP_F_SERDES },
834 #endif
835 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
836 	  "Intel i82801H (M_AMT) LAN Controller",
837 	  WM_T_ICH8,		WMP_F_1000T },
838 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
839 	  "Intel i82801H (AMT) LAN Controller",
840 	  WM_T_ICH8,		WMP_F_1000T },
841 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
842 	  "Intel i82801H LAN Controller",
843 	  WM_T_ICH8,		WMP_F_1000T },
844 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
845 	  "Intel i82801H (IFE) LAN Controller",
846 	  WM_T_ICH8,		WMP_F_1000T },
847 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
848 	  "Intel i82801H (M) LAN Controller",
849 	  WM_T_ICH8,		WMP_F_1000T },
850 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
851 	  "Intel i82801H IFE (GT) LAN Controller",
852 	  WM_T_ICH8,		WMP_F_1000T },
853 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
854 	  "Intel i82801H IFE (G) LAN Controller",
855 	  WM_T_ICH8,		WMP_F_1000T },
856 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
857 	  "82801I (AMT) LAN Controller",
858 	  WM_T_ICH9,		WMP_F_1000T },
859 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
860 	  "82801I LAN Controller",
861 	  WM_T_ICH9,		WMP_F_1000T },
862 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
863 	  "82801I (G) LAN Controller",
864 	  WM_T_ICH9,		WMP_F_1000T },
865 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
866 	  "82801I (GT) LAN Controller",
867 	  WM_T_ICH9,		WMP_F_1000T },
868 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
869 	  "82801I (C) LAN Controller",
870 	  WM_T_ICH9,		WMP_F_1000T },
871 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
872 	  "82801I mobile LAN Controller",
873 	  WM_T_ICH9,		WMP_F_1000T },
874 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IGP_M_V,
875 	  "82801I mobile (V) LAN Controller",
876 	  WM_T_ICH9,		WMP_F_1000T },
877 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
878 	  "82801I mobile (AMT) LAN Controller",
879 	  WM_T_ICH9,		WMP_F_1000T },
880 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82567LM_3,
881 	  "82567LM-3 LAN Controller",
882 	  WM_T_ICH10,		WMP_F_1000T },
883 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82567LF_3,
884 	  "82567LF-3 LAN Controller",
885 	  WM_T_ICH10,		WMP_F_1000T },
886 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
887 	  "i82801J (LF) LAN Controller",
888 	  WM_T_ICH10,		WMP_F_1000T },
889 	{ 0,			0,
890 	  NULL,
891 	  0,			0 },
892 };
893 
894 #ifdef WM_EVENT_COUNTERS
895 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
896 #endif /* WM_EVENT_COUNTERS */
897 
898 #if 0 /* Not currently used */
899 static inline uint32_t
900 wm_io_read(struct wm_softc *sc, int reg)
901 {
902 
903 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
904 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
905 }
906 #endif
907 
908 static inline void
909 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
910 {
911 
912 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
913 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
914 }
915 
916 static inline void
917 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
918 {
919 	wa->wa_low = htole32(v & 0xffffffffU);
920 	if (sizeof(bus_addr_t) == 8)
921 		wa->wa_high = htole32((uint64_t) v >> 32);
922 	else
923 		wa->wa_high = 0;
924 }
925 
926 static const struct wm_product *
927 wm_lookup(const struct pci_attach_args *pa)
928 {
929 	const struct wm_product *wmp;
930 
931 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
932 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
933 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
934 			return (wmp);
935 	}
936 	return (NULL);
937 }
938 
939 static int
940 wm_match(device_t parent, cfdata_t cf, void *aux)
941 {
942 	struct pci_attach_args *pa = aux;
943 
944 	if (wm_lookup(pa) != NULL)
945 		return (1);
946 
947 	return (0);
948 }
949 
950 static void
951 wm_attach(device_t parent, device_t self, void *aux)
952 {
953 	struct wm_softc *sc = device_private(self);
954 	struct pci_attach_args *pa = aux;
955 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
956 	pci_chipset_tag_t pc = pa->pa_pc;
957 	pci_intr_handle_t ih;
958 	size_t cdata_size;
959 	const char *intrstr = NULL;
960 	const char *eetype, *xname;
961 	bus_space_tag_t memt;
962 	bus_space_handle_t memh;
963 	bus_dma_segment_t seg;
964 	int memh_valid;
965 	int i, rseg, error;
966 	const struct wm_product *wmp;
967 	prop_data_t ea;
968 	prop_number_t pn;
969 	uint8_t enaddr[ETHER_ADDR_LEN];
970 	uint16_t myea[ETHER_ADDR_LEN / 2], cfg1, cfg2, swdpin;
971 	pcireg_t preg, memtype;
972 	uint32_t reg;
973 
974 	sc->sc_dev = self;
975 	callout_init(&sc->sc_tick_ch, 0);
976 
977 	wmp = wm_lookup(pa);
978 	if (wmp == NULL) {
979 		printf("\n");
980 		panic("wm_attach: impossible");
981 	}
982 
983 	sc->sc_pc = pa->pa_pc;
984 	sc->sc_pcitag = pa->pa_tag;
985 
986 	if (pci_dma64_available(pa))
987 		sc->sc_dmat = pa->pa_dmat64;
988 	else
989 		sc->sc_dmat = pa->pa_dmat;
990 
991 	preg = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
992 	aprint_naive(": Ethernet controller\n");
993 	aprint_normal(": %s, rev. %d\n", wmp->wmp_name, preg);
994 
995 	sc->sc_type = wmp->wmp_type;
996 	if (sc->sc_type < WM_T_82543) {
997 		if (preg < 2) {
998 			aprint_error_dev(sc->sc_dev,
999 			    "i82542 must be at least rev. 2\n");
1000 			return;
1001 		}
1002 		if (preg < 3)
1003 			sc->sc_type = WM_T_82542_2_0;
1004 	}
1005 
1006 	/*
1007 	 * Map the device.  All devices support memory-mapped acccess,
1008 	 * and it is really required for normal operation.
1009 	 */
1010 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1011 	switch (memtype) {
1012 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1013 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1014 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1015 		    memtype, 0, &memt, &memh, NULL, NULL) == 0);
1016 		break;
1017 	default:
1018 		memh_valid = 0;
1019 	}
1020 
1021 	if (memh_valid) {
1022 		sc->sc_st = memt;
1023 		sc->sc_sh = memh;
1024 	} else {
1025 		aprint_error_dev(sc->sc_dev,
1026 		    "unable to map device registers\n");
1027 		return;
1028 	}
1029 
1030 	/*
1031 	 * In addition, i82544 and later support I/O mapped indirect
1032 	 * register access.  It is not desirable (nor supported in
1033 	 * this driver) to use it for normal operation, though it is
1034 	 * required to work around bugs in some chip versions.
1035 	 */
1036 	if (sc->sc_type >= WM_T_82544) {
1037 		/* First we have to find the I/O BAR. */
1038 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1039 			if (pci_mapreg_type(pa->pa_pc, pa->pa_tag, i) ==
1040 			    PCI_MAPREG_TYPE_IO)
1041 				break;
1042 		}
1043 		if (i == PCI_MAPREG_END)
1044 			aprint_error_dev(sc->sc_dev,
1045 			    "WARNING: unable to find I/O BAR\n");
1046 		else {
1047 			/*
1048 			 * The i8254x doesn't apparently respond when the
1049 			 * I/O BAR is 0, which looks somewhat like it's not
1050 			 * been configured.
1051 			 */
1052 			preg = pci_conf_read(pc, pa->pa_tag, i);
1053 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1054 				aprint_error_dev(sc->sc_dev,
1055 				    "WARNING: I/O BAR at zero.\n");
1056 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1057 					0, &sc->sc_iot, &sc->sc_ioh,
1058 					NULL, NULL) == 0) {
1059 				sc->sc_flags |= WM_F_IOH_VALID;
1060 			} else {
1061 				aprint_error_dev(sc->sc_dev,
1062 				    "WARNING: unable to map I/O space\n");
1063 			}
1064 		}
1065 
1066 	}
1067 
1068 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
1069 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1070 	preg |= PCI_COMMAND_MASTER_ENABLE;
1071 	if (sc->sc_type < WM_T_82542_2_1)
1072 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1073 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1074 
1075 	/* power up chip */
1076 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1077 	    NULL)) && error != EOPNOTSUPP) {
1078 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1079 		return;
1080 	}
1081 
1082 	/*
1083 	 * Map and establish our interrupt.
1084 	 */
1085 	if (pci_intr_map(pa, &ih)) {
1086 		aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
1087 		return;
1088 	}
1089 	intrstr = pci_intr_string(pc, ih);
1090 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
1091 	if (sc->sc_ih == NULL) {
1092 		aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
1093 		if (intrstr != NULL)
1094 			aprint_normal(" at %s", intrstr);
1095 		aprint_normal("\n");
1096 		return;
1097 	}
1098 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
1099 
1100 	/*
1101 	 * Determine a few things about the bus we're connected to.
1102 	 */
1103 	if (sc->sc_type < WM_T_82543) {
1104 		/* We don't really know the bus characteristics here. */
1105 		sc->sc_bus_speed = 33;
1106 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1107 		/*
1108 		 * CSA (Communication Streaming Architecture) is about as fast
1109 		 * a 32-bit 66MHz PCI Bus.
1110 		 */
1111 		sc->sc_flags |= WM_F_CSA;
1112 		sc->sc_bus_speed = 66;
1113 		aprint_verbose_dev(sc->sc_dev,
1114 		    "Communication Streaming Architecture\n");
1115 		if (sc->sc_type == WM_T_82547) {
1116 			callout_init(&sc->sc_txfifo_ch, 0);
1117 			callout_setfunc(&sc->sc_txfifo_ch,
1118 					wm_82547_txfifo_stall, sc);
1119 			aprint_verbose_dev(sc->sc_dev,
1120 			    "using 82547 Tx FIFO stall work-around\n");
1121 		}
1122 	} else if (sc->sc_type >= WM_T_82571) {
1123 		sc->sc_flags |= WM_F_PCIE;
1124 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1125 			&& (sc->sc_type != WM_T_ICH10))
1126 			sc->sc_flags |= WM_F_EEPROM_SEMAPHORE;
1127 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1128 	} else {
1129 		reg = CSR_READ(sc, WMREG_STATUS);
1130 		if (reg & STATUS_BUS64)
1131 			sc->sc_flags |= WM_F_BUS64;
1132 		if (sc->sc_type >= WM_T_82544 &&
1133 		    (reg & STATUS_PCIX_MODE) != 0) {
1134 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1135 
1136 			sc->sc_flags |= WM_F_PCIX;
1137 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1138 					       PCI_CAP_PCIX,
1139 					       &sc->sc_pcix_offset, NULL) == 0)
1140 				aprint_error_dev(sc->sc_dev,
1141 				    "unable to find PCIX capability\n");
1142 			else if (sc->sc_type != WM_T_82545_3 &&
1143 				 sc->sc_type != WM_T_82546_3) {
1144 				/*
1145 				 * Work around a problem caused by the BIOS
1146 				 * setting the max memory read byte count
1147 				 * incorrectly.
1148 				 */
1149 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1150 				    sc->sc_pcix_offset + PCI_PCIX_CMD);
1151 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1152 				    sc->sc_pcix_offset + PCI_PCIX_STATUS);
1153 
1154 				bytecnt =
1155 				    (pcix_cmd & PCI_PCIX_CMD_BYTECNT_MASK) >>
1156 				    PCI_PCIX_CMD_BYTECNT_SHIFT;
1157 				maxb =
1158 				    (pcix_sts & PCI_PCIX_STATUS_MAXB_MASK) >>
1159 				    PCI_PCIX_STATUS_MAXB_SHIFT;
1160 				if (bytecnt > maxb) {
1161 					aprint_verbose_dev(sc->sc_dev,
1162 					    "resetting PCI-X MMRBC: %d -> %d\n",
1163 					    512 << bytecnt, 512 << maxb);
1164 					pcix_cmd = (pcix_cmd &
1165 					    ~PCI_PCIX_CMD_BYTECNT_MASK) |
1166 					   (maxb << PCI_PCIX_CMD_BYTECNT_SHIFT);
1167 					pci_conf_write(pa->pa_pc, pa->pa_tag,
1168 					    sc->sc_pcix_offset + PCI_PCIX_CMD,
1169 					    pcix_cmd);
1170 				}
1171 			}
1172 		}
1173 		/*
1174 		 * The quad port adapter is special; it has a PCIX-PCIX
1175 		 * bridge on the board, and can run the secondary bus at
1176 		 * a higher speed.
1177 		 */
1178 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1179 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1180 								      : 66;
1181 		} else if (sc->sc_flags & WM_F_PCIX) {
1182 			switch (reg & STATUS_PCIXSPD_MASK) {
1183 			case STATUS_PCIXSPD_50_66:
1184 				sc->sc_bus_speed = 66;
1185 				break;
1186 			case STATUS_PCIXSPD_66_100:
1187 				sc->sc_bus_speed = 100;
1188 				break;
1189 			case STATUS_PCIXSPD_100_133:
1190 				sc->sc_bus_speed = 133;
1191 				break;
1192 			default:
1193 				aprint_error_dev(sc->sc_dev,
1194 				    "unknown PCIXSPD %d; assuming 66MHz\n",
1195 				    reg & STATUS_PCIXSPD_MASK);
1196 				sc->sc_bus_speed = 66;
1197 			}
1198 		} else
1199 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1200 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1201 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1202 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1203 	}
1204 
1205 	/*
1206 	 * Allocate the control data structures, and create and load the
1207 	 * DMA map for it.
1208 	 *
1209 	 * NOTE: All Tx descriptors must be in the same 4G segment of
1210 	 * memory.  So must Rx descriptors.  We simplify by allocating
1211 	 * both sets within the same 4G segment.
1212 	 */
1213 	WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
1214 	    WM_NTXDESC_82542 : WM_NTXDESC_82544;
1215 	cdata_size = sc->sc_type < WM_T_82544 ?
1216 	    sizeof(struct wm_control_data_82542) :
1217 	    sizeof(struct wm_control_data_82544);
1218 	if ((error = bus_dmamem_alloc(sc->sc_dmat, cdata_size, PAGE_SIZE,
1219 				      (bus_size_t) 0x100000000ULL,
1220 				      &seg, 1, &rseg, 0)) != 0) {
1221 		aprint_error_dev(sc->sc_dev,
1222 		    "unable to allocate control data, error = %d\n",
1223 		    error);
1224 		goto fail_0;
1225 	}
1226 
1227 	if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, cdata_size,
1228 				    (void **)&sc->sc_control_data,
1229 				    BUS_DMA_COHERENT)) != 0) {
1230 		aprint_error_dev(sc->sc_dev,
1231 		    "unable to map control data, error = %d\n", error);
1232 		goto fail_1;
1233 	}
1234 
1235 	if ((error = bus_dmamap_create(sc->sc_dmat, cdata_size, 1, cdata_size,
1236 				       0, 0, &sc->sc_cddmamap)) != 0) {
1237 		aprint_error_dev(sc->sc_dev,
1238 		    "unable to create control data DMA map, error = %d\n",
1239 		    error);
1240 		goto fail_2;
1241 	}
1242 
1243 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
1244 				     sc->sc_control_data, cdata_size, NULL,
1245 				     0)) != 0) {
1246 		aprint_error_dev(sc->sc_dev,
1247 		    "unable to load control data DMA map, error = %d\n",
1248 		    error);
1249 		goto fail_3;
1250 	}
1251 
1252 
1253 	/*
1254 	 * Create the transmit buffer DMA maps.
1255 	 */
1256 	WM_TXQUEUELEN(sc) =
1257 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
1258 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
1259 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1260 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
1261 					       WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
1262 					  &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1263 			aprint_error_dev(sc->sc_dev,
1264 			    "unable to create Tx DMA map %d, error = %d\n",
1265 			    i, error);
1266 			goto fail_4;
1267 		}
1268 	}
1269 
1270 	/*
1271 	 * Create the receive buffer DMA maps.
1272 	 */
1273 	for (i = 0; i < WM_NRXDESC; i++) {
1274 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1275 					       MCLBYTES, 0, 0,
1276 					  &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1277 			aprint_error_dev(sc->sc_dev,
1278 			    "unable to create Rx DMA map %d error = %d\n",
1279 			    i, error);
1280 			goto fail_5;
1281 		}
1282 		sc->sc_rxsoft[i].rxs_mbuf = NULL;
1283 	}
1284 
1285 	/* clear interesting stat counters */
1286 	CSR_READ(sc, WMREG_COLC);
1287 	CSR_READ(sc, WMREG_RXERRC);
1288 
1289 	/*
1290 	 * Reset the chip to a known state.
1291 	 */
1292 	wm_reset(sc);
1293 
1294 	switch (sc->sc_type) {
1295 	case WM_T_82571:
1296 	case WM_T_82572:
1297 	case WM_T_82573:
1298 	case WM_T_82574:
1299 	case WM_T_80003:
1300 	case WM_T_ICH8:
1301 	case WM_T_ICH9:
1302 	case WM_T_ICH10:
1303 		if (wm_check_mng_mode(sc) != 0)
1304 			wm_get_hw_control(sc);
1305 		break;
1306 	default:
1307 		break;
1308 	}
1309 
1310 	/*
1311 	 * Get some information about the EEPROM.
1312 	 */
1313 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
1314 	    || (sc->sc_type == WM_T_ICH10)) {
1315 		uint32_t flash_size;
1316 		sc->sc_flags |= WM_F_SWFWHW_SYNC | WM_F_EEPROM_FLASH;
1317 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
1318 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
1319 		    &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) {
1320 			aprint_error_dev(sc->sc_dev,
1321 			    "can't map FLASH registers\n");
1322 			return;
1323 		}
1324 		flash_size = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
1325 		sc->sc_ich8_flash_base = (flash_size & ICH_GFPREG_BASE_MASK) *
1326 						ICH_FLASH_SECTOR_SIZE;
1327 		sc->sc_ich8_flash_bank_size =
1328 			((flash_size >> 16) & ICH_GFPREG_BASE_MASK) + 1;
1329 		sc->sc_ich8_flash_bank_size -=
1330 			(flash_size & ICH_GFPREG_BASE_MASK);
1331 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
1332 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
1333 	} else if (sc->sc_type == WM_T_80003)
1334 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR |  WM_F_SWFW_SYNC;
1335 	else if (sc->sc_type == WM_T_82573)
1336 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1337 	else if (sc->sc_type == WM_T_82574)
1338 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1339 	else if (sc->sc_type > WM_T_82544)
1340 		sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1341 
1342 	if (sc->sc_type <= WM_T_82544)
1343 		sc->sc_ee_addrbits = 6;
1344 	else if (sc->sc_type <= WM_T_82546_3) {
1345 		reg = CSR_READ(sc, WMREG_EECD);
1346 		if (reg & EECD_EE_SIZE)
1347 			sc->sc_ee_addrbits = 8;
1348 		else
1349 			sc->sc_ee_addrbits = 6;
1350 	} else if (sc->sc_type <= WM_T_82547_2) {
1351 		reg = CSR_READ(sc, WMREG_EECD);
1352 		if (reg & EECD_EE_TYPE) {
1353 			sc->sc_flags |= WM_F_EEPROM_SPI;
1354 			sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
1355 		} else
1356 			sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6;
1357 	} else if ((sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574) &&
1358 	    (wm_is_onboard_nvm_eeprom(sc) == 0)) {
1359 		sc->sc_flags |= WM_F_EEPROM_FLASH;
1360 	} else {
1361 		/* Assume everything else is SPI. */
1362 		reg = CSR_READ(sc, WMREG_EECD);
1363 		sc->sc_flags |= WM_F_EEPROM_SPI;
1364 		sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
1365 	}
1366 
1367 	/*
1368 	 * Defer printing the EEPROM type until after verifying the checksum
1369 	 * This allows the EEPROM type to be printed correctly in the case
1370 	 * that no EEPROM is attached.
1371 	 */
1372 
1373 	/*
1374 	 * Validate the EEPROM checksum. If the checksum fails, flag this for
1375 	 * later, so we can fail future reads from the EEPROM.
1376 	 */
1377 	if (wm_validate_eeprom_checksum(sc)) {
1378 		/*
1379 		 * Read twice again because some PCI-e parts fail the first
1380 		 * check due to the link being in sleep state.
1381 		 */
1382 		if (wm_validate_eeprom_checksum(sc))
1383 			sc->sc_flags |= WM_F_EEPROM_INVALID;
1384 	}
1385 
1386 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
1387 		aprint_verbose_dev(sc->sc_dev, "No EEPROM\n");
1388 	else if (sc->sc_flags & WM_F_EEPROM_FLASH) {
1389 		aprint_verbose_dev(sc->sc_dev, "FLASH\n");
1390 	} else {
1391 		if (sc->sc_flags & WM_F_EEPROM_SPI)
1392 			eetype = "SPI";
1393 		else
1394 			eetype = "MicroWire";
1395 		aprint_verbose_dev(sc->sc_dev,
1396 		    "%u word (%d address bits) %s EEPROM\n",
1397 		    1U << sc->sc_ee_addrbits,
1398 		    sc->sc_ee_addrbits, eetype);
1399 	}
1400 
1401 	/*
1402 	 * Read the Ethernet address from the EEPROM, if not first found
1403 	 * in device properties.
1404 	 */
1405 	ea = prop_dictionary_get(device_properties(sc->sc_dev), "mac-addr");
1406 	if (ea != NULL) {
1407 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
1408 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
1409 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
1410 	} else {
1411 		if (wm_read_eeprom(sc, EEPROM_OFF_MACADDR,
1412 		    sizeof(myea) / sizeof(myea[0]), myea)) {
1413 			aprint_error_dev(sc->sc_dev,
1414 			    "unable to read Ethernet address\n");
1415 			return;
1416 		}
1417 		enaddr[0] = myea[0] & 0xff;
1418 		enaddr[1] = myea[0] >> 8;
1419 		enaddr[2] = myea[1] & 0xff;
1420 		enaddr[3] = myea[1] >> 8;
1421 		enaddr[4] = myea[2] & 0xff;
1422 		enaddr[5] = myea[2] >> 8;
1423 	}
1424 
1425 	/*
1426 	 * Toggle the LSB of the MAC address on the second port
1427 	 * of the dual port controller.
1428 	 */
1429 	if (sc->sc_type == WM_T_82546 || sc->sc_type == WM_T_82546_3
1430 	    || sc->sc_type ==  WM_T_82571 || sc->sc_type == WM_T_80003) {
1431 		if ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1)
1432 			enaddr[5] ^= 1;
1433 	}
1434 
1435 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
1436 	    ether_sprintf(enaddr));
1437 
1438 	/*
1439 	 * Read the config info from the EEPROM, and set up various
1440 	 * bits in the control registers based on their contents.
1441 	 */
1442 	pn = prop_dictionary_get(device_properties(sc->sc_dev),
1443 				 "i82543-cfg1");
1444 	if (pn != NULL) {
1445 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1446 		cfg1 = (uint16_t) prop_number_integer_value(pn);
1447 	} else {
1448 		if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) {
1449 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
1450 			return;
1451 		}
1452 	}
1453 
1454 	pn = prop_dictionary_get(device_properties(sc->sc_dev),
1455 				 "i82543-cfg2");
1456 	if (pn != NULL) {
1457 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1458 		cfg2 = (uint16_t) prop_number_integer_value(pn);
1459 	} else {
1460 		if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) {
1461 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
1462 			return;
1463 		}
1464 	}
1465 
1466 	if (sc->sc_type >= WM_T_82544) {
1467 		pn = prop_dictionary_get(device_properties(sc->sc_dev),
1468 					 "i82543-swdpin");
1469 		if (pn != NULL) {
1470 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1471 			swdpin = (uint16_t) prop_number_integer_value(pn);
1472 		} else {
1473 			if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) {
1474 				aprint_error_dev(sc->sc_dev,
1475 				    "unable to read SWDPIN\n");
1476 				return;
1477 			}
1478 		}
1479 	}
1480 
1481 	if (cfg1 & EEPROM_CFG1_ILOS)
1482 		sc->sc_ctrl |= CTRL_ILOS;
1483 	if (sc->sc_type >= WM_T_82544) {
1484 		sc->sc_ctrl |=
1485 		    ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
1486 		    CTRL_SWDPIO_SHIFT;
1487 		sc->sc_ctrl |=
1488 		    ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
1489 		    CTRL_SWDPINS_SHIFT;
1490 	} else {
1491 		sc->sc_ctrl |=
1492 		    ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
1493 		    CTRL_SWDPIO_SHIFT;
1494 	}
1495 
1496 #if 0
1497 	if (sc->sc_type >= WM_T_82544) {
1498 		if (cfg1 & EEPROM_CFG1_IPS0)
1499 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
1500 		if (cfg1 & EEPROM_CFG1_IPS1)
1501 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
1502 		sc->sc_ctrl_ext |=
1503 		    ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
1504 		    CTRL_EXT_SWDPIO_SHIFT;
1505 		sc->sc_ctrl_ext |=
1506 		    ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
1507 		    CTRL_EXT_SWDPINS_SHIFT;
1508 	} else {
1509 		sc->sc_ctrl_ext |=
1510 		    ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
1511 		    CTRL_EXT_SWDPIO_SHIFT;
1512 	}
1513 #endif
1514 
1515 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
1516 #if 0
1517 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
1518 #endif
1519 
1520 	/*
1521 	 * Set up some register offsets that are different between
1522 	 * the i82542 and the i82543 and later chips.
1523 	 */
1524 	if (sc->sc_type < WM_T_82543) {
1525 		sc->sc_rdt_reg = WMREG_OLD_RDT0;
1526 		sc->sc_tdt_reg = WMREG_OLD_TDT;
1527 	} else {
1528 		sc->sc_rdt_reg = WMREG_RDT;
1529 		sc->sc_tdt_reg = WMREG_TDT;
1530 	}
1531 
1532 	/*
1533 	 * Determine if we're TBI or GMII mode, and initialize the
1534 	 * media structures accordingly.
1535 	 */
1536 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
1537 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_82573
1538 	    || sc->sc_type == WM_T_82574) {
1539 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
1540 		wm_gmii_mediainit(sc);
1541 	} else if (sc->sc_type < WM_T_82543 ||
1542 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
1543 		if (wmp->wmp_flags & WMP_F_1000T)
1544 			aprint_error_dev(sc->sc_dev,
1545 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
1546 		wm_tbi_mediainit(sc);
1547 	} else {
1548 		if (wmp->wmp_flags & WMP_F_1000X)
1549 			aprint_error_dev(sc->sc_dev,
1550 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
1551 		wm_gmii_mediainit(sc);
1552 	}
1553 
1554 	ifp = &sc->sc_ethercom.ec_if;
1555 	xname = device_xname(sc->sc_dev);
1556 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
1557 	ifp->if_softc = sc;
1558 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1559 	ifp->if_ioctl = wm_ioctl;
1560 	ifp->if_start = wm_start;
1561 	ifp->if_watchdog = wm_watchdog;
1562 	ifp->if_init = wm_init;
1563 	ifp->if_stop = wm_stop;
1564 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
1565 	IFQ_SET_READY(&ifp->if_snd);
1566 
1567 	if (sc->sc_type != WM_T_82573 && sc->sc_type != WM_T_82574 &&
1568 	    sc->sc_type != WM_T_ICH8)
1569 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1570 
1571 	/*
1572 	 * If we're a i82543 or greater, we can support VLANs.
1573 	 */
1574 	if (sc->sc_type >= WM_T_82543)
1575 		sc->sc_ethercom.ec_capabilities |=
1576 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
1577 
1578 	/*
1579 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
1580 	 * on i82543 and later.
1581 	 */
1582 	if (sc->sc_type >= WM_T_82543) {
1583 		ifp->if_capabilities |=
1584 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
1585 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
1586 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
1587 		    IFCAP_CSUM_TCPv6_Tx |
1588 		    IFCAP_CSUM_UDPv6_Tx;
1589 	}
1590 
1591 	/*
1592 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
1593 	 *
1594 	 *	82541GI (8086:1076) ... no
1595 	 *	82572EI (8086:10b9) ... yes
1596 	 */
1597 	if (sc->sc_type >= WM_T_82571) {
1598 		ifp->if_capabilities |=
1599 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
1600 	}
1601 
1602 	/*
1603 	 * If we're a i82544 or greater (except i82547), we can do
1604 	 * TCP segmentation offload.
1605 	 */
1606 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
1607 		ifp->if_capabilities |= IFCAP_TSOv4;
1608 	}
1609 
1610 	if (sc->sc_type >= WM_T_82571) {
1611 		ifp->if_capabilities |= IFCAP_TSOv6;
1612 	}
1613 
1614 	/*
1615 	 * Attach the interface.
1616 	 */
1617 	if_attach(ifp);
1618 	ether_ifattach(ifp, enaddr);
1619 #if NRND > 0
1620 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET, 0);
1621 #endif
1622 
1623 #ifdef WM_EVENT_COUNTERS
1624 	/* Attach event counters. */
1625 	evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
1626 	    NULL, xname, "txsstall");
1627 	evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
1628 	    NULL, xname, "txdstall");
1629 	evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
1630 	    NULL, xname, "txfifo_stall");
1631 	evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
1632 	    NULL, xname, "txdw");
1633 	evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
1634 	    NULL, xname, "txqe");
1635 	evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
1636 	    NULL, xname, "rxintr");
1637 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
1638 	    NULL, xname, "linkintr");
1639 
1640 	evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
1641 	    NULL, xname, "rxipsum");
1642 	evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
1643 	    NULL, xname, "rxtusum");
1644 	evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
1645 	    NULL, xname, "txipsum");
1646 	evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
1647 	    NULL, xname, "txtusum");
1648 	evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
1649 	    NULL, xname, "txtusum6");
1650 
1651 	evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
1652 	    NULL, xname, "txtso");
1653 	evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
1654 	    NULL, xname, "txtso6");
1655 	evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
1656 	    NULL, xname, "txtsopain");
1657 
1658 	for (i = 0; i < WM_NTXSEGS; i++) {
1659 		sprintf(wm_txseg_evcnt_names[i], "txseg%d", i);
1660 		evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
1661 		    NULL, xname, wm_txseg_evcnt_names[i]);
1662 	}
1663 
1664 	evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
1665 	    NULL, xname, "txdrop");
1666 
1667 	evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
1668 	    NULL, xname, "tu");
1669 
1670 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
1671 	    NULL, xname, "tx_xoff");
1672 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
1673 	    NULL, xname, "tx_xon");
1674 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
1675 	    NULL, xname, "rx_xoff");
1676 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
1677 	    NULL, xname, "rx_xon");
1678 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
1679 	    NULL, xname, "rx_macctl");
1680 #endif /* WM_EVENT_COUNTERS */
1681 
1682 	if (!pmf_device_register(self, NULL, NULL))
1683 		aprint_error_dev(self, "couldn't establish power handler\n");
1684 	else
1685 		pmf_class_network_register(self, ifp);
1686 
1687 	return;
1688 
1689 	/*
1690 	 * Free any resources we've allocated during the failed attach
1691 	 * attempt.  Do this in reverse order and fall through.
1692 	 */
1693  fail_5:
1694 	for (i = 0; i < WM_NRXDESC; i++) {
1695 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
1696 			bus_dmamap_destroy(sc->sc_dmat,
1697 			    sc->sc_rxsoft[i].rxs_dmamap);
1698 	}
1699  fail_4:
1700 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1701 		if (sc->sc_txsoft[i].txs_dmamap != NULL)
1702 			bus_dmamap_destroy(sc->sc_dmat,
1703 			    sc->sc_txsoft[i].txs_dmamap);
1704 	}
1705 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
1706  fail_3:
1707 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
1708  fail_2:
1709 	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
1710 	    cdata_size);
1711  fail_1:
1712 	bus_dmamem_free(sc->sc_dmat, &seg, rseg);
1713  fail_0:
1714 	return;
1715 }
1716 
1717 /*
1718  * wm_tx_offload:
1719  *
1720  *	Set up TCP/IP checksumming parameters for the
1721  *	specified packet.
1722  */
1723 static int
1724 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
1725     uint8_t *fieldsp)
1726 {
1727 	struct mbuf *m0 = txs->txs_mbuf;
1728 	struct livengood_tcpip_ctxdesc *t;
1729 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
1730 	uint32_t ipcse;
1731 	struct ether_header *eh;
1732 	int offset, iphl;
1733 	uint8_t fields;
1734 
1735 	/*
1736 	 * XXX It would be nice if the mbuf pkthdr had offset
1737 	 * fields for the protocol headers.
1738 	 */
1739 
1740 	eh = mtod(m0, struct ether_header *);
1741 	switch (htons(eh->ether_type)) {
1742 	case ETHERTYPE_IP:
1743 	case ETHERTYPE_IPV6:
1744 		offset = ETHER_HDR_LEN;
1745 		break;
1746 
1747 	case ETHERTYPE_VLAN:
1748 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1749 		break;
1750 
1751 	default:
1752 		/*
1753 		 * Don't support this protocol or encapsulation.
1754 		 */
1755 		*fieldsp = 0;
1756 		*cmdp = 0;
1757 		return (0);
1758 	}
1759 
1760 	if ((m0->m_pkthdr.csum_flags &
1761 	    (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
1762 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
1763 	} else {
1764 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
1765 	}
1766 	ipcse = offset + iphl - 1;
1767 
1768 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
1769 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
1770 	seg = 0;
1771 	fields = 0;
1772 
1773 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
1774 		int hlen = offset + iphl;
1775 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
1776 
1777 		if (__predict_false(m0->m_len <
1778 				    (hlen + sizeof(struct tcphdr)))) {
1779 			/*
1780 			 * TCP/IP headers are not in the first mbuf; we need
1781 			 * to do this the slow and painful way.  Let's just
1782 			 * hope this doesn't happen very often.
1783 			 */
1784 			struct tcphdr th;
1785 
1786 			WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
1787 
1788 			m_copydata(m0, hlen, sizeof(th), &th);
1789 			if (v4) {
1790 				struct ip ip;
1791 
1792 				m_copydata(m0, offset, sizeof(ip), &ip);
1793 				ip.ip_len = 0;
1794 				m_copyback(m0,
1795 				    offset + offsetof(struct ip, ip_len),
1796 				    sizeof(ip.ip_len), &ip.ip_len);
1797 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
1798 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
1799 			} else {
1800 				struct ip6_hdr ip6;
1801 
1802 				m_copydata(m0, offset, sizeof(ip6), &ip6);
1803 				ip6.ip6_plen = 0;
1804 				m_copyback(m0,
1805 				    offset + offsetof(struct ip6_hdr, ip6_plen),
1806 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
1807 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
1808 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
1809 			}
1810 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
1811 			    sizeof(th.th_sum), &th.th_sum);
1812 
1813 			hlen += th.th_off << 2;
1814 		} else {
1815 			/*
1816 			 * TCP/IP headers are in the first mbuf; we can do
1817 			 * this the easy way.
1818 			 */
1819 			struct tcphdr *th;
1820 
1821 			if (v4) {
1822 				struct ip *ip =
1823 				    (void *)(mtod(m0, char *) + offset);
1824 				th = (void *)(mtod(m0, char *) + hlen);
1825 
1826 				ip->ip_len = 0;
1827 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
1828 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
1829 			} else {
1830 				struct ip6_hdr *ip6 =
1831 				    (void *)(mtod(m0, char *) + offset);
1832 				th = (void *)(mtod(m0, char *) + hlen);
1833 
1834 				ip6->ip6_plen = 0;
1835 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
1836 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
1837 			}
1838 			hlen += th->th_off << 2;
1839 		}
1840 
1841 		if (v4) {
1842 			WM_EVCNT_INCR(&sc->sc_ev_txtso);
1843 			cmdlen |= WTX_TCPIP_CMD_IP;
1844 		} else {
1845 			WM_EVCNT_INCR(&sc->sc_ev_txtso6);
1846 			ipcse = 0;
1847 		}
1848 		cmd |= WTX_TCPIP_CMD_TSE;
1849 		cmdlen |= WTX_TCPIP_CMD_TSE |
1850 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
1851 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
1852 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
1853 	}
1854 
1855 	/*
1856 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
1857 	 * offload feature, if we load the context descriptor, we
1858 	 * MUST provide valid values for IPCSS and TUCSS fields.
1859 	 */
1860 
1861 	ipcs = WTX_TCPIP_IPCSS(offset) |
1862 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1863 	    WTX_TCPIP_IPCSE(ipcse);
1864 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
1865 		WM_EVCNT_INCR(&sc->sc_ev_txipsum);
1866 		fields |= WTX_IXSM;
1867 	}
1868 
1869 	offset += iphl;
1870 
1871 	if (m0->m_pkthdr.csum_flags &
1872 	    (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
1873 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
1874 		fields |= WTX_TXSM;
1875 		tucs = WTX_TCPIP_TUCSS(offset) |
1876 		    WTX_TCPIP_TUCSO(offset +
1877 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
1878 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
1879 	} else if ((m0->m_pkthdr.csum_flags &
1880 	    (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
1881 		WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
1882 		fields |= WTX_TXSM;
1883 		tucs = WTX_TCPIP_TUCSS(offset) |
1884 		    WTX_TCPIP_TUCSO(offset +
1885 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
1886 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
1887 	} else {
1888 		/* Just initialize it to a valid TCP context. */
1889 		tucs = WTX_TCPIP_TUCSS(offset) |
1890 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
1891 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
1892 	}
1893 
1894 	/* Fill in the context descriptor. */
1895 	t = (struct livengood_tcpip_ctxdesc *)
1896 	    &sc->sc_txdescs[sc->sc_txnext];
1897 	t->tcpip_ipcs = htole32(ipcs);
1898 	t->tcpip_tucs = htole32(tucs);
1899 	t->tcpip_cmdlen = htole32(cmdlen);
1900 	t->tcpip_seg = htole32(seg);
1901 	WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
1902 
1903 	sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
1904 	txs->txs_ndesc++;
1905 
1906 	*cmdp = cmd;
1907 	*fieldsp = fields;
1908 
1909 	return (0);
1910 }
1911 
1912 static void
1913 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
1914 {
1915 	struct mbuf *m;
1916 	int i;
1917 
1918 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
1919 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
1920 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
1921 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
1922 		    m->m_data, m->m_len, m->m_flags);
1923 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
1924 	    i, i == 1 ? "" : "s");
1925 }
1926 
1927 /*
1928  * wm_82547_txfifo_stall:
1929  *
1930  *	Callout used to wait for the 82547 Tx FIFO to drain,
1931  *	reset the FIFO pointers, and restart packet transmission.
1932  */
1933 static void
1934 wm_82547_txfifo_stall(void *arg)
1935 {
1936 	struct wm_softc *sc = arg;
1937 	int s;
1938 
1939 	s = splnet();
1940 
1941 	if (sc->sc_txfifo_stall) {
1942 		if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
1943 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
1944 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
1945 			/*
1946 			 * Packets have drained.  Stop transmitter, reset
1947 			 * FIFO pointers, restart transmitter, and kick
1948 			 * the packet queue.
1949 			 */
1950 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
1951 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
1952 			CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
1953 			CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
1954 			CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
1955 			CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
1956 			CSR_WRITE(sc, WMREG_TCTL, tctl);
1957 			CSR_WRITE_FLUSH(sc);
1958 
1959 			sc->sc_txfifo_head = 0;
1960 			sc->sc_txfifo_stall = 0;
1961 			wm_start(&sc->sc_ethercom.ec_if);
1962 		} else {
1963 			/*
1964 			 * Still waiting for packets to drain; try again in
1965 			 * another tick.
1966 			 */
1967 			callout_schedule(&sc->sc_txfifo_ch, 1);
1968 		}
1969 	}
1970 
1971 	splx(s);
1972 }
1973 
1974 /*
1975  * wm_82547_txfifo_bugchk:
1976  *
1977  *	Check for bug condition in the 82547 Tx FIFO.  We need to
1978  *	prevent enqueueing a packet that would wrap around the end
1979  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
1980  *
1981  *	We do this by checking the amount of space before the end
1982  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
1983  *	the Tx FIFO, wait for all remaining packets to drain, reset
1984  *	the internal FIFO pointers to the beginning, and restart
1985  *	transmission on the interface.
1986  */
1987 #define	WM_FIFO_HDR		0x10
1988 #define	WM_82547_PAD_LEN	0x3e0
1989 static int
1990 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
1991 {
1992 	int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
1993 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
1994 
1995 	/* Just return if already stalled. */
1996 	if (sc->sc_txfifo_stall)
1997 		return (1);
1998 
1999 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
2000 		/* Stall only occurs in half-duplex mode. */
2001 		goto send_packet;
2002 	}
2003 
2004 	if (len >= WM_82547_PAD_LEN + space) {
2005 		sc->sc_txfifo_stall = 1;
2006 		callout_schedule(&sc->sc_txfifo_ch, 1);
2007 		return (1);
2008 	}
2009 
2010  send_packet:
2011 	sc->sc_txfifo_head += len;
2012 	if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
2013 		sc->sc_txfifo_head -= sc->sc_txfifo_size;
2014 
2015 	return (0);
2016 }
2017 
2018 /*
2019  * wm_start:		[ifnet interface function]
2020  *
2021  *	Start packet transmission on the interface.
2022  */
2023 static void
2024 wm_start(struct ifnet *ifp)
2025 {
2026 	struct wm_softc *sc = ifp->if_softc;
2027 	struct mbuf *m0;
2028 	struct m_tag *mtag;
2029 	struct wm_txsoft *txs;
2030 	bus_dmamap_t dmamap;
2031 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
2032 	bus_addr_t curaddr;
2033 	bus_size_t seglen, curlen;
2034 	uint32_t cksumcmd;
2035 	uint8_t cksumfields;
2036 
2037 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
2038 		return;
2039 
2040 	/*
2041 	 * Remember the previous number of free descriptors.
2042 	 */
2043 	ofree = sc->sc_txfree;
2044 
2045 	/*
2046 	 * Loop through the send queue, setting up transmit descriptors
2047 	 * until we drain the queue, or use up all available transmit
2048 	 * descriptors.
2049 	 */
2050 	for (;;) {
2051 		/* Grab a packet off the queue. */
2052 		IFQ_POLL(&ifp->if_snd, m0);
2053 		if (m0 == NULL)
2054 			break;
2055 
2056 		DPRINTF(WM_DEBUG_TX,
2057 		    ("%s: TX: have packet to transmit: %p\n",
2058 		    device_xname(sc->sc_dev), m0));
2059 
2060 		/* Get a work queue entry. */
2061 		if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
2062 			wm_txintr(sc);
2063 			if (sc->sc_txsfree == 0) {
2064 				DPRINTF(WM_DEBUG_TX,
2065 				    ("%s: TX: no free job descriptors\n",
2066 					device_xname(sc->sc_dev)));
2067 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
2068 				break;
2069 			}
2070 		}
2071 
2072 		txs = &sc->sc_txsoft[sc->sc_txsnext];
2073 		dmamap = txs->txs_dmamap;
2074 
2075 		use_tso = (m0->m_pkthdr.csum_flags &
2076 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
2077 
2078 		/*
2079 		 * So says the Linux driver:
2080 		 * The controller does a simple calculation to make sure
2081 		 * there is enough room in the FIFO before initiating the
2082 		 * DMA for each buffer.  The calc is:
2083 		 *	4 = ceil(buffer len / MSS)
2084 		 * To make sure we don't overrun the FIFO, adjust the max
2085 		 * buffer len if the MSS drops.
2086 		 */
2087 		dmamap->dm_maxsegsz =
2088 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
2089 		    ? m0->m_pkthdr.segsz << 2
2090 		    : WTX_MAX_LEN;
2091 
2092 		/*
2093 		 * Load the DMA map.  If this fails, the packet either
2094 		 * didn't fit in the allotted number of segments, or we
2095 		 * were short on resources.  For the too-many-segments
2096 		 * case, we simply report an error and drop the packet,
2097 		 * since we can't sanely copy a jumbo packet to a single
2098 		 * buffer.
2099 		 */
2100 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
2101 		    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
2102 		if (error) {
2103 			if (error == EFBIG) {
2104 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
2105 				log(LOG_ERR, "%s: Tx packet consumes too many "
2106 				    "DMA segments, dropping...\n",
2107 				    device_xname(sc->sc_dev));
2108 				IFQ_DEQUEUE(&ifp->if_snd, m0);
2109 				wm_dump_mbuf_chain(sc, m0);
2110 				m_freem(m0);
2111 				continue;
2112 			}
2113 			/*
2114 			 * Short on resources, just stop for now.
2115 			 */
2116 			DPRINTF(WM_DEBUG_TX,
2117 			    ("%s: TX: dmamap load failed: %d\n",
2118 			    device_xname(sc->sc_dev), error));
2119 			break;
2120 		}
2121 
2122 		segs_needed = dmamap->dm_nsegs;
2123 		if (use_tso) {
2124 			/* For sentinel descriptor; see below. */
2125 			segs_needed++;
2126 		}
2127 
2128 		/*
2129 		 * Ensure we have enough descriptors free to describe
2130 		 * the packet.  Note, we always reserve one descriptor
2131 		 * at the end of the ring due to the semantics of the
2132 		 * TDT register, plus one more in the event we need
2133 		 * to load offload context.
2134 		 */
2135 		if (segs_needed > sc->sc_txfree - 2) {
2136 			/*
2137 			 * Not enough free descriptors to transmit this
2138 			 * packet.  We haven't committed anything yet,
2139 			 * so just unload the DMA map, put the packet
2140 			 * pack on the queue, and punt.  Notify the upper
2141 			 * layer that there are no more slots left.
2142 			 */
2143 			DPRINTF(WM_DEBUG_TX,
2144 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
2145 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
2146 			    segs_needed, sc->sc_txfree - 1));
2147 			ifp->if_flags |= IFF_OACTIVE;
2148 			bus_dmamap_unload(sc->sc_dmat, dmamap);
2149 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
2150 			break;
2151 		}
2152 
2153 		/*
2154 		 * Check for 82547 Tx FIFO bug.  We need to do this
2155 		 * once we know we can transmit the packet, since we
2156 		 * do some internal FIFO space accounting here.
2157 		 */
2158 		if (sc->sc_type == WM_T_82547 &&
2159 		    wm_82547_txfifo_bugchk(sc, m0)) {
2160 			DPRINTF(WM_DEBUG_TX,
2161 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
2162 			    device_xname(sc->sc_dev)));
2163 			ifp->if_flags |= IFF_OACTIVE;
2164 			bus_dmamap_unload(sc->sc_dmat, dmamap);
2165 			WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
2166 			break;
2167 		}
2168 
2169 		IFQ_DEQUEUE(&ifp->if_snd, m0);
2170 
2171 		/*
2172 		 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
2173 		 */
2174 
2175 		DPRINTF(WM_DEBUG_TX,
2176 		    ("%s: TX: packet has %d (%d) DMA segments\n",
2177 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
2178 
2179 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
2180 
2181 		/*
2182 		 * Store a pointer to the packet so that we can free it
2183 		 * later.
2184 		 *
2185 		 * Initially, we consider the number of descriptors the
2186 		 * packet uses the number of DMA segments.  This may be
2187 		 * incremented by 1 if we do checksum offload (a descriptor
2188 		 * is used to set the checksum context).
2189 		 */
2190 		txs->txs_mbuf = m0;
2191 		txs->txs_firstdesc = sc->sc_txnext;
2192 		txs->txs_ndesc = segs_needed;
2193 
2194 		/* Set up offload parameters for this packet. */
2195 		if (m0->m_pkthdr.csum_flags &
2196 		    (M_CSUM_TSOv4|M_CSUM_TSOv6|
2197 		    M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
2198 		    M_CSUM_TCPv6|M_CSUM_UDPv6)) {
2199 			if (wm_tx_offload(sc, txs, &cksumcmd,
2200 					  &cksumfields) != 0) {
2201 				/* Error message already displayed. */
2202 				bus_dmamap_unload(sc->sc_dmat, dmamap);
2203 				continue;
2204 			}
2205 		} else {
2206 			cksumcmd = 0;
2207 			cksumfields = 0;
2208 		}
2209 
2210 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
2211 
2212 		/* Sync the DMA map. */
2213 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
2214 		    BUS_DMASYNC_PREWRITE);
2215 
2216 		/*
2217 		 * Initialize the transmit descriptor.
2218 		 */
2219 		for (nexttx = sc->sc_txnext, seg = 0;
2220 		     seg < dmamap->dm_nsegs; seg++) {
2221 			for (seglen = dmamap->dm_segs[seg].ds_len,
2222 			     curaddr = dmamap->dm_segs[seg].ds_addr;
2223 			     seglen != 0;
2224 			     curaddr += curlen, seglen -= curlen,
2225 			     nexttx = WM_NEXTTX(sc, nexttx)) {
2226 				curlen = seglen;
2227 
2228 				/*
2229 				 * So says the Linux driver:
2230 				 * Work around for premature descriptor
2231 				 * write-backs in TSO mode.  Append a
2232 				 * 4-byte sentinel descriptor.
2233 				 */
2234 				if (use_tso &&
2235 				    seg == dmamap->dm_nsegs - 1 &&
2236 				    curlen > 8)
2237 					curlen -= 4;
2238 
2239 				wm_set_dma_addr(
2240 				    &sc->sc_txdescs[nexttx].wtx_addr,
2241 				    curaddr);
2242 				sc->sc_txdescs[nexttx].wtx_cmdlen =
2243 				    htole32(cksumcmd | curlen);
2244 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
2245 				    0;
2246 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
2247 				    cksumfields;
2248 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
2249 				lasttx = nexttx;
2250 
2251 				DPRINTF(WM_DEBUG_TX,
2252 				    ("%s: TX: desc %d: low 0x%08lx, "
2253 				     "len 0x%04x\n",
2254 				    device_xname(sc->sc_dev), nexttx,
2255 				    curaddr & 0xffffffffUL, (unsigned)curlen));
2256 			}
2257 		}
2258 
2259 		KASSERT(lasttx != -1);
2260 
2261 		/*
2262 		 * Set up the command byte on the last descriptor of
2263 		 * the packet.  If we're in the interrupt delay window,
2264 		 * delay the interrupt.
2265 		 */
2266 		sc->sc_txdescs[lasttx].wtx_cmdlen |=
2267 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
2268 
2269 		/*
2270 		 * If VLANs are enabled and the packet has a VLAN tag, set
2271 		 * up the descriptor to encapsulate the packet for us.
2272 		 *
2273 		 * This is only valid on the last descriptor of the packet.
2274 		 */
2275 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
2276 			sc->sc_txdescs[lasttx].wtx_cmdlen |=
2277 			    htole32(WTX_CMD_VLE);
2278 			sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
2279 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
2280 		}
2281 
2282 		txs->txs_lastdesc = lasttx;
2283 
2284 		DPRINTF(WM_DEBUG_TX,
2285 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
2286 		    device_xname(sc->sc_dev),
2287 		    lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
2288 
2289 		/* Sync the descriptors we're using. */
2290 		WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
2291 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2292 
2293 		/* Give the packet to the chip. */
2294 		CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
2295 
2296 		DPRINTF(WM_DEBUG_TX,
2297 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
2298 
2299 		DPRINTF(WM_DEBUG_TX,
2300 		    ("%s: TX: finished transmitting packet, job %d\n",
2301 		    device_xname(sc->sc_dev), sc->sc_txsnext));
2302 
2303 		/* Advance the tx pointer. */
2304 		sc->sc_txfree -= txs->txs_ndesc;
2305 		sc->sc_txnext = nexttx;
2306 
2307 		sc->sc_txsfree--;
2308 		sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
2309 
2310 #if NBPFILTER > 0
2311 		/* Pass the packet to any BPF listeners. */
2312 		if (ifp->if_bpf)
2313 			bpf_mtap(ifp->if_bpf, m0);
2314 #endif /* NBPFILTER > 0 */
2315 	}
2316 
2317 	if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
2318 		/* No more slots; notify upper layer. */
2319 		ifp->if_flags |= IFF_OACTIVE;
2320 	}
2321 
2322 	if (sc->sc_txfree != ofree) {
2323 		/* Set a watchdog timer in case the chip flakes out. */
2324 		ifp->if_timer = 5;
2325 	}
2326 }
2327 
2328 /*
2329  * wm_watchdog:		[ifnet interface function]
2330  *
2331  *	Watchdog timer handler.
2332  */
2333 static void
2334 wm_watchdog(struct ifnet *ifp)
2335 {
2336 	struct wm_softc *sc = ifp->if_softc;
2337 
2338 	/*
2339 	 * Since we're using delayed interrupts, sweep up
2340 	 * before we report an error.
2341 	 */
2342 	wm_txintr(sc);
2343 
2344 	if (sc->sc_txfree != WM_NTXDESC(sc)) {
2345 		log(LOG_ERR,
2346 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2347 		    device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
2348 		    sc->sc_txnext);
2349 		ifp->if_oerrors++;
2350 
2351 		/* Reset the interface. */
2352 		(void) wm_init(ifp);
2353 	}
2354 
2355 	/* Try to get more packets going. */
2356 	wm_start(ifp);
2357 }
2358 
2359 /*
2360  * wm_ioctl:		[ifnet interface function]
2361  *
2362  *	Handle control requests from the operator.
2363  */
2364 static int
2365 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2366 {
2367 	struct wm_softc *sc = ifp->if_softc;
2368 	struct ifreq *ifr = (struct ifreq *) data;
2369 	int s, error;
2370 
2371 	s = splnet();
2372 
2373 	switch (cmd) {
2374 	case SIOCSIFMEDIA:
2375 	case SIOCGIFMEDIA:
2376 		/* Flow control requires full-duplex mode. */
2377 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
2378 		    (ifr->ifr_media & IFM_FDX) == 0)
2379 			ifr->ifr_media &= ~IFM_ETH_FMASK;
2380 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
2381 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
2382 				/* We can do both TXPAUSE and RXPAUSE. */
2383 				ifr->ifr_media |=
2384 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
2385 			}
2386 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
2387 		}
2388 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
2389 		break;
2390 	default:
2391 		if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
2392 			break;
2393 
2394 		error = 0;
2395 
2396 		if (cmd == SIOCSIFCAP)
2397 			error = (*ifp->if_init)(ifp);
2398 		else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
2399 			;
2400 		else if (ifp->if_flags & IFF_RUNNING) {
2401 			/*
2402 			 * Multicast list has changed; set the hardware filter
2403 			 * accordingly.
2404 			 */
2405 			wm_set_filter(sc);
2406 		}
2407 		break;
2408 	}
2409 
2410 	/* Try to get more packets going. */
2411 	wm_start(ifp);
2412 
2413 	splx(s);
2414 	return (error);
2415 }
2416 
2417 /*
2418  * wm_intr:
2419  *
2420  *	Interrupt service routine.
2421  */
2422 static int
2423 wm_intr(void *arg)
2424 {
2425 	struct wm_softc *sc = arg;
2426 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2427 	uint32_t icr;
2428 	int handled = 0;
2429 
2430 	while (1 /* CONSTCOND */) {
2431 		icr = CSR_READ(sc, WMREG_ICR);
2432 		if ((icr & sc->sc_icr) == 0)
2433 			break;
2434 #if 0 /*NRND > 0*/
2435 		if (RND_ENABLED(&sc->rnd_source))
2436 			rnd_add_uint32(&sc->rnd_source, icr);
2437 #endif
2438 
2439 		handled = 1;
2440 
2441 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
2442 		if (icr & (ICR_RXDMT0|ICR_RXT0)) {
2443 			DPRINTF(WM_DEBUG_RX,
2444 			    ("%s: RX: got Rx intr 0x%08x\n",
2445 			    device_xname(sc->sc_dev),
2446 			    icr & (ICR_RXDMT0|ICR_RXT0)));
2447 			WM_EVCNT_INCR(&sc->sc_ev_rxintr);
2448 		}
2449 #endif
2450 		wm_rxintr(sc);
2451 
2452 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
2453 		if (icr & ICR_TXDW) {
2454 			DPRINTF(WM_DEBUG_TX,
2455 			    ("%s: TX: got TXDW interrupt\n",
2456 			    device_xname(sc->sc_dev)));
2457 			WM_EVCNT_INCR(&sc->sc_ev_txdw);
2458 		}
2459 #endif
2460 		wm_txintr(sc);
2461 
2462 		if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
2463 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
2464 			wm_linkintr(sc, icr);
2465 		}
2466 
2467 		if (icr & ICR_RXO) {
2468 			ifp->if_ierrors++;
2469 #if defined(WM_DEBUG)
2470 			log(LOG_WARNING, "%s: Receive overrun\n",
2471 			    device_xname(sc->sc_dev));
2472 #endif /* defined(WM_DEBUG) */
2473 		}
2474 	}
2475 
2476 	if (handled) {
2477 		/* Try to get more packets going. */
2478 		wm_start(ifp);
2479 	}
2480 
2481 	return (handled);
2482 }
2483 
2484 /*
2485  * wm_txintr:
2486  *
2487  *	Helper; handle transmit interrupts.
2488  */
2489 static void
2490 wm_txintr(struct wm_softc *sc)
2491 {
2492 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2493 	struct wm_txsoft *txs;
2494 	uint8_t status;
2495 	int i;
2496 
2497 	ifp->if_flags &= ~IFF_OACTIVE;
2498 
2499 	/*
2500 	 * Go through the Tx list and free mbufs for those
2501 	 * frames which have been transmitted.
2502 	 */
2503 	for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
2504 	     i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
2505 		txs = &sc->sc_txsoft[i];
2506 
2507 		DPRINTF(WM_DEBUG_TX,
2508 		    ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
2509 
2510 		WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
2511 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2512 
2513 		status =
2514 		    sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
2515 		if ((status & WTX_ST_DD) == 0) {
2516 			WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
2517 			    BUS_DMASYNC_PREREAD);
2518 			break;
2519 		}
2520 
2521 		DPRINTF(WM_DEBUG_TX,
2522 		    ("%s: TX: job %d done: descs %d..%d\n",
2523 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
2524 		    txs->txs_lastdesc));
2525 
2526 		/*
2527 		 * XXX We should probably be using the statistics
2528 		 * XXX registers, but I don't know if they exist
2529 		 * XXX on chips before the i82544.
2530 		 */
2531 
2532 #ifdef WM_EVENT_COUNTERS
2533 		if (status & WTX_ST_TU)
2534 			WM_EVCNT_INCR(&sc->sc_ev_tu);
2535 #endif /* WM_EVENT_COUNTERS */
2536 
2537 		if (status & (WTX_ST_EC|WTX_ST_LC)) {
2538 			ifp->if_oerrors++;
2539 			if (status & WTX_ST_LC)
2540 				log(LOG_WARNING, "%s: late collision\n",
2541 				    device_xname(sc->sc_dev));
2542 			else if (status & WTX_ST_EC) {
2543 				ifp->if_collisions += 16;
2544 				log(LOG_WARNING, "%s: excessive collisions\n",
2545 				    device_xname(sc->sc_dev));
2546 			}
2547 		} else
2548 			ifp->if_opackets++;
2549 
2550 		sc->sc_txfree += txs->txs_ndesc;
2551 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
2552 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2553 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2554 		m_freem(txs->txs_mbuf);
2555 		txs->txs_mbuf = NULL;
2556 	}
2557 
2558 	/* Update the dirty transmit buffer pointer. */
2559 	sc->sc_txsdirty = i;
2560 	DPRINTF(WM_DEBUG_TX,
2561 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
2562 
2563 	/*
2564 	 * If there are no more pending transmissions, cancel the watchdog
2565 	 * timer.
2566 	 */
2567 	if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
2568 		ifp->if_timer = 0;
2569 }
2570 
2571 /*
2572  * wm_rxintr:
2573  *
2574  *	Helper; handle receive interrupts.
2575  */
2576 static void
2577 wm_rxintr(struct wm_softc *sc)
2578 {
2579 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2580 	struct wm_rxsoft *rxs;
2581 	struct mbuf *m;
2582 	int i, len;
2583 	uint8_t status, errors;
2584 	uint16_t vlantag;
2585 
2586 	for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
2587 		rxs = &sc->sc_rxsoft[i];
2588 
2589 		DPRINTF(WM_DEBUG_RX,
2590 		    ("%s: RX: checking descriptor %d\n",
2591 		    device_xname(sc->sc_dev), i));
2592 
2593 		WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2594 
2595 		status = sc->sc_rxdescs[i].wrx_status;
2596 		errors = sc->sc_rxdescs[i].wrx_errors;
2597 		len = le16toh(sc->sc_rxdescs[i].wrx_len);
2598 		vlantag = sc->sc_rxdescs[i].wrx_special;
2599 
2600 		if ((status & WRX_ST_DD) == 0) {
2601 			/*
2602 			 * We have processed all of the receive descriptors.
2603 			 */
2604 			WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
2605 			break;
2606 		}
2607 
2608 		if (__predict_false(sc->sc_rxdiscard)) {
2609 			DPRINTF(WM_DEBUG_RX,
2610 			    ("%s: RX: discarding contents of descriptor %d\n",
2611 			    device_xname(sc->sc_dev), i));
2612 			WM_INIT_RXDESC(sc, i);
2613 			if (status & WRX_ST_EOP) {
2614 				/* Reset our state. */
2615 				DPRINTF(WM_DEBUG_RX,
2616 				    ("%s: RX: resetting rxdiscard -> 0\n",
2617 				    device_xname(sc->sc_dev)));
2618 				sc->sc_rxdiscard = 0;
2619 			}
2620 			continue;
2621 		}
2622 
2623 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2624 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
2625 
2626 		m = rxs->rxs_mbuf;
2627 
2628 		/*
2629 		 * Add a new receive buffer to the ring, unless of
2630 		 * course the length is zero. Treat the latter as a
2631 		 * failed mapping.
2632 		 */
2633 		if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
2634 			/*
2635 			 * Failed, throw away what we've done so
2636 			 * far, and discard the rest of the packet.
2637 			 */
2638 			ifp->if_ierrors++;
2639 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2640 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2641 			WM_INIT_RXDESC(sc, i);
2642 			if ((status & WRX_ST_EOP) == 0)
2643 				sc->sc_rxdiscard = 1;
2644 			if (sc->sc_rxhead != NULL)
2645 				m_freem(sc->sc_rxhead);
2646 			WM_RXCHAIN_RESET(sc);
2647 			DPRINTF(WM_DEBUG_RX,
2648 			    ("%s: RX: Rx buffer allocation failed, "
2649 			    "dropping packet%s\n", device_xname(sc->sc_dev),
2650 			    sc->sc_rxdiscard ? " (discard)" : ""));
2651 			continue;
2652 		}
2653 
2654 		m->m_len = len;
2655 		sc->sc_rxlen += len;
2656 		DPRINTF(WM_DEBUG_RX,
2657 		    ("%s: RX: buffer at %p len %d\n",
2658 		    device_xname(sc->sc_dev), m->m_data, len));
2659 
2660 		/*
2661 		 * If this is not the end of the packet, keep
2662 		 * looking.
2663 		 */
2664 		if ((status & WRX_ST_EOP) == 0) {
2665 			WM_RXCHAIN_LINK(sc, m);
2666 			DPRINTF(WM_DEBUG_RX,
2667 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
2668 			    device_xname(sc->sc_dev), sc->sc_rxlen));
2669 			continue;
2670 		}
2671 
2672 		/*
2673 		 * Okay, we have the entire packet now.  The chip is
2674 		 * configured to include the FCS (not all chips can
2675 		 * be configured to strip it), so we need to trim it.
2676 		 * May need to adjust length of previous mbuf in the
2677 		 * chain if the current mbuf is too short.
2678 		 */
2679 		if (m->m_len < ETHER_CRC_LEN) {
2680 			sc->sc_rxtail->m_len -= (ETHER_CRC_LEN - m->m_len);
2681 			m->m_len = 0;
2682 		} else {
2683 			m->m_len -= ETHER_CRC_LEN;
2684 		}
2685 		len = sc->sc_rxlen - ETHER_CRC_LEN;
2686 
2687 		WM_RXCHAIN_LINK(sc, m);
2688 
2689 		*sc->sc_rxtailp = NULL;
2690 		m = sc->sc_rxhead;
2691 
2692 		WM_RXCHAIN_RESET(sc);
2693 
2694 		DPRINTF(WM_DEBUG_RX,
2695 		    ("%s: RX: have entire packet, len -> %d\n",
2696 		    device_xname(sc->sc_dev), len));
2697 
2698 		/*
2699 		 * If an error occurred, update stats and drop the packet.
2700 		 */
2701 		if (errors &
2702 		     (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
2703 			ifp->if_ierrors++;
2704 			if (errors & WRX_ER_SE)
2705 				log(LOG_WARNING, "%s: symbol error\n",
2706 				    device_xname(sc->sc_dev));
2707 			else if (errors & WRX_ER_SEQ)
2708 				log(LOG_WARNING, "%s: receive sequence error\n",
2709 				    device_xname(sc->sc_dev));
2710 			else if (errors & WRX_ER_CE)
2711 				log(LOG_WARNING, "%s: CRC error\n",
2712 				    device_xname(sc->sc_dev));
2713 			m_freem(m);
2714 			continue;
2715 		}
2716 
2717 		/*
2718 		 * No errors.  Receive the packet.
2719 		 */
2720 		m->m_pkthdr.rcvif = ifp;
2721 		m->m_pkthdr.len = len;
2722 
2723 		/*
2724 		 * If VLANs are enabled, VLAN packets have been unwrapped
2725 		 * for us.  Associate the tag with the packet.
2726 		 */
2727 		if ((status & WRX_ST_VP) != 0) {
2728 			VLAN_INPUT_TAG(ifp, m,
2729 			    le16toh(vlantag),
2730 			    continue);
2731 		}
2732 
2733 		/*
2734 		 * Set up checksum info for this packet.
2735 		 */
2736 		if ((status & WRX_ST_IXSM) == 0) {
2737 			if (status & WRX_ST_IPCS) {
2738 				WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
2739 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
2740 				if (errors & WRX_ER_IPE)
2741 					m->m_pkthdr.csum_flags |=
2742 					    M_CSUM_IPv4_BAD;
2743 			}
2744 			if (status & WRX_ST_TCPCS) {
2745 				/*
2746 				 * Note: we don't know if this was TCP or UDP,
2747 				 * so we just set both bits, and expect the
2748 				 * upper layers to deal.
2749 				 */
2750 				WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
2751 				m->m_pkthdr.csum_flags |=
2752 				    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
2753 				    M_CSUM_TCPv6 | M_CSUM_UDPv6;
2754 				if (errors & WRX_ER_TCPE)
2755 					m->m_pkthdr.csum_flags |=
2756 					    M_CSUM_TCP_UDP_BAD;
2757 			}
2758 		}
2759 
2760 		ifp->if_ipackets++;
2761 
2762 #if NBPFILTER > 0
2763 		/* Pass this up to any BPF listeners. */
2764 		if (ifp->if_bpf)
2765 			bpf_mtap(ifp->if_bpf, m);
2766 #endif /* NBPFILTER > 0 */
2767 
2768 		/* Pass it on. */
2769 		(*ifp->if_input)(ifp, m);
2770 	}
2771 
2772 	/* Update the receive pointer. */
2773 	sc->sc_rxptr = i;
2774 
2775 	DPRINTF(WM_DEBUG_RX,
2776 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
2777 }
2778 
2779 /*
2780  * wm_linkintr:
2781  *
2782  *	Helper; handle link interrupts.
2783  */
2784 static void
2785 wm_linkintr(struct wm_softc *sc, uint32_t icr)
2786 {
2787 	uint32_t status;
2788 
2789 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
2790 		__func__));
2791 	/*
2792 	 * If we get a link status interrupt on a 1000BASE-T
2793 	 * device, just fall into the normal MII tick path.
2794 	 */
2795 	if (sc->sc_flags & WM_F_HAS_MII) {
2796 		if (icr & ICR_LSC) {
2797 			DPRINTF(WM_DEBUG_LINK,
2798 			    ("%s: LINK: LSC -> mii_tick\n",
2799 			    device_xname(sc->sc_dev)));
2800 			mii_tick(&sc->sc_mii);
2801 			if (sc->sc_type == WM_T_82543) {
2802 				int miistatus, active;
2803 
2804 				/*
2805 				 * With 82543, we need to force speed and
2806 				 * duplex on the MAC equal to what the PHY
2807 				 * speed and duplex configuration is.
2808 				 */
2809 				miistatus = sc->sc_mii.mii_media_status;
2810 
2811 				if (miistatus & IFM_ACTIVE) {
2812 					active = sc->sc_mii.mii_media_active;
2813 					sc->sc_ctrl &= ~(CTRL_SPEED_MASK
2814 					    | CTRL_FD);
2815 					switch (IFM_SUBTYPE(active)) {
2816 					case IFM_10_T:
2817 						sc->sc_ctrl |= CTRL_SPEED_10;
2818 						break;
2819 					case IFM_100_TX:
2820 						sc->sc_ctrl |= CTRL_SPEED_100;
2821 						break;
2822 					case IFM_1000_T:
2823 						sc->sc_ctrl |= CTRL_SPEED_1000;
2824 						break;
2825 					default:
2826 						/*
2827 						 * fiber?
2828 						 * Shoud not enter here.
2829 						 */
2830 						printf("unknown media (%x)\n",
2831 						    active);
2832 						break;
2833 					}
2834 					if (active & IFM_FDX)
2835 						sc->sc_ctrl |= CTRL_FD;
2836 					CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2837 				}
2838 			}
2839 		} else if (icr & ICR_RXSEQ) {
2840 			DPRINTF(WM_DEBUG_LINK,
2841 			    ("%s: LINK Receive sequence error\n",
2842 			    device_xname(sc->sc_dev)));
2843 		}
2844 		return;
2845 	}
2846 
2847 	status = CSR_READ(sc, WMREG_STATUS);
2848 	if (icr & ICR_LSC) {
2849 		if (status & STATUS_LU) {
2850 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
2851 			    device_xname(sc->sc_dev),
2852 			    (status & STATUS_FD) ? "FDX" : "HDX"));
2853 			/*
2854 			 * NOTE: CTRL will update TFCE and RFCE automatically,
2855 			 * so we should update sc->sc_ctrl
2856 			 */
2857 
2858 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
2859 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2860 			sc->sc_fcrtl &= ~FCRTL_XONE;
2861 			if (status & STATUS_FD)
2862 				sc->sc_tctl |=
2863 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2864 			else
2865 				sc->sc_tctl |=
2866 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2867 			if (sc->sc_ctrl & CTRL_TFCE)
2868 				sc->sc_fcrtl |= FCRTL_XONE;
2869 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2870 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
2871 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
2872 				      sc->sc_fcrtl);
2873 			sc->sc_tbi_linkup = 1;
2874 		} else {
2875 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
2876 			    device_xname(sc->sc_dev)));
2877 			sc->sc_tbi_linkup = 0;
2878 		}
2879 		wm_tbi_set_linkled(sc);
2880 	} else if (icr & ICR_RXCFG) {
2881 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
2882 		    device_xname(sc->sc_dev)));
2883 		sc->sc_tbi_nrxcfg++;
2884 		wm_check_for_link(sc);
2885 	} else if (icr & ICR_RXSEQ) {
2886 		DPRINTF(WM_DEBUG_LINK,
2887 		    ("%s: LINK: Receive sequence error\n",
2888 		    device_xname(sc->sc_dev)));
2889 	}
2890 }
2891 
2892 /*
2893  * wm_tick:
2894  *
2895  *	One second timer, used to check link status, sweep up
2896  *	completed transmit jobs, etc.
2897  */
2898 static void
2899 wm_tick(void *arg)
2900 {
2901 	struct wm_softc *sc = arg;
2902 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2903 	int s;
2904 
2905 	s = splnet();
2906 
2907 	if (sc->sc_type >= WM_T_82542_2_1) {
2908 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
2909 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
2910 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
2911 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
2912 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
2913 	}
2914 
2915 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
2916 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
2917 
2918 
2919 	if (sc->sc_flags & WM_F_HAS_MII)
2920 		mii_tick(&sc->sc_mii);
2921 	else
2922 		wm_tbi_check_link(sc);
2923 
2924 	splx(s);
2925 
2926 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2927 }
2928 
2929 /*
2930  * wm_reset:
2931  *
2932  *	Reset the i82542 chip.
2933  */
2934 static void
2935 wm_reset(struct wm_softc *sc)
2936 {
2937 	uint32_t reg;
2938 
2939 	/*
2940 	 * Allocate on-chip memory according to the MTU size.
2941 	 * The Packet Buffer Allocation register must be written
2942 	 * before the chip is reset.
2943 	 */
2944 	switch (sc->sc_type) {
2945 	case WM_T_82547:
2946 	case WM_T_82547_2:
2947 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
2948 		    PBA_22K : PBA_30K;
2949 		sc->sc_txfifo_head = 0;
2950 		sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
2951 		sc->sc_txfifo_size =
2952 		    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
2953 		sc->sc_txfifo_stall = 0;
2954 		break;
2955 	case WM_T_82571:
2956 	case WM_T_82572:
2957 	case WM_T_80003:
2958 		sc->sc_pba = PBA_32K;
2959 		break;
2960 	case WM_T_82573:
2961 	case WM_T_82574:
2962 		sc->sc_pba = PBA_12K;
2963 		break;
2964 	case WM_T_ICH8:
2965 		sc->sc_pba = PBA_8K;
2966 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
2967 		break;
2968 	case WM_T_ICH9:
2969 	case WM_T_ICH10:
2970 		sc->sc_pba = PBA_10K;
2971 		break;
2972 	default:
2973 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
2974 		    PBA_40K : PBA_48K;
2975 		break;
2976 	}
2977 	CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
2978 
2979 	if (sc->sc_flags & WM_F_PCIE) {
2980 		int timeout = 800;
2981 
2982 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
2983 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2984 
2985 		while (timeout) {
2986 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA) == 0)
2987 				break;
2988 			delay(100);
2989 		}
2990 	}
2991 
2992 	/* clear interrupt */
2993 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
2994 
2995 	/*
2996 	 * 82541 Errata 29? & 82547 Errata 28?
2997 	 * See also the description about PHY_RST bit in CTRL register
2998 	 * in 8254x_GBe_SDM.pdf.
2999 	 */
3000 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
3001 		CSR_WRITE(sc, WMREG_CTRL,
3002 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
3003 		delay(5000);
3004 	}
3005 
3006 	switch (sc->sc_type) {
3007 	case WM_T_82544:
3008 	case WM_T_82540:
3009 	case WM_T_82545:
3010 	case WM_T_82546:
3011 	case WM_T_82541:
3012 	case WM_T_82541_2:
3013 		/*
3014 		 * On some chipsets, a reset through a memory-mapped write
3015 		 * cycle can cause the chip to reset before completing the
3016 		 * write cycle.  This causes major headache that can be
3017 		 * avoided by issuing the reset via indirect register writes
3018 		 * through I/O space.
3019 		 *
3020 		 * So, if we successfully mapped the I/O BAR at attach time,
3021 		 * use that.  Otherwise, try our luck with a memory-mapped
3022 		 * reset.
3023 		 */
3024 		if (sc->sc_flags & WM_F_IOH_VALID)
3025 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
3026 		else
3027 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
3028 		break;
3029 
3030 	case WM_T_82545_3:
3031 	case WM_T_82546_3:
3032 		/* Use the shadow control register on these chips. */
3033 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
3034 		break;
3035 
3036 	case WM_T_ICH8:
3037 	case WM_T_ICH9:
3038 	case WM_T_ICH10:
3039 		wm_get_swfwhw_semaphore(sc);
3040 		CSR_WRITE(sc, WMREG_CTRL, CTRL_RST | CTRL_PHY_RESET);
3041 		delay(10000);
3042 
3043 	default:
3044 		/* Everything else can safely use the documented method. */
3045 		CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
3046 		break;
3047 	}
3048 	delay(10000);
3049 
3050 	/* reload EEPROM */
3051 	switch(sc->sc_type) {
3052 	case WM_T_82542_2_0:
3053 	case WM_T_82542_2_1:
3054 	case WM_T_82543:
3055 	case WM_T_82544:
3056 		delay(10);
3057 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3058 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3059 		delay(2000);
3060 		break;
3061 	case WM_T_82541:
3062 	case WM_T_82541_2:
3063 	case WM_T_82547:
3064 	case WM_T_82547_2:
3065 		delay(20000);
3066 		break;
3067 	case WM_T_82573:
3068 	case WM_T_82574:
3069 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
3070 			delay(10);
3071 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3072 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3073 		}
3074 		/* FALLTHROUGH */
3075 	default:
3076 		/* check EECD_EE_AUTORD */
3077 		wm_get_auto_rd_done(sc);
3078 	}
3079 
3080 	/* reload sc_ctrl */
3081 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3082 
3083 #if 0
3084 	for (i = 0; i < 1000; i++) {
3085 		if ((CSR_READ(sc, WMREG_CTRL) & CTRL_RST) == 0) {
3086 			return;
3087 		}
3088 		delay(20);
3089 	}
3090 
3091 	if (CSR_READ(sc, WMREG_CTRL) & CTRL_RST)
3092 		log(LOG_ERR, "%s: reset failed to complete\n",
3093 		    device_xname(sc->sc_dev));
3094 #endif
3095 }
3096 
3097 /*
3098  * wm_init:		[ifnet interface function]
3099  *
3100  *	Initialize the interface.  Must be called at splnet().
3101  */
3102 static int
3103 wm_init(struct ifnet *ifp)
3104 {
3105 	struct wm_softc *sc = ifp->if_softc;
3106 	struct wm_rxsoft *rxs;
3107 	int i, error = 0;
3108 	uint32_t reg;
3109 
3110 	/*
3111 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
3112 	 * There is a small but measurable benefit to avoiding the adjusment
3113 	 * of the descriptor so that the headers are aligned, for normal mtu,
3114 	 * on such platforms.  One possibility is that the DMA itself is
3115 	 * slightly more efficient if the front of the entire packet (instead
3116 	 * of the front of the headers) is aligned.
3117 	 *
3118 	 * Note we must always set align_tweak to 0 if we are using
3119 	 * jumbo frames.
3120 	 */
3121 #ifdef __NO_STRICT_ALIGNMENT
3122 	sc->sc_align_tweak = 0;
3123 #else
3124 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
3125 		sc->sc_align_tweak = 0;
3126 	else
3127 		sc->sc_align_tweak = 2;
3128 #endif /* __NO_STRICT_ALIGNMENT */
3129 
3130 	/* Cancel any pending I/O. */
3131 	wm_stop(ifp, 0);
3132 
3133 	/* update statistics before reset */
3134 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3135 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
3136 
3137 	/* Reset the chip to a known state. */
3138 	wm_reset(sc);
3139 
3140 	switch (sc->sc_type) {
3141 	case WM_T_82571:
3142 	case WM_T_82572:
3143 	case WM_T_82573:
3144 	case WM_T_82574:
3145 	case WM_T_80003:
3146 	case WM_T_ICH8:
3147 	case WM_T_ICH9:
3148 	case WM_T_ICH10:
3149 		if (wm_check_mng_mode(sc) != 0)
3150 			wm_get_hw_control(sc);
3151 		break;
3152 	default:
3153 		break;
3154 	}
3155 
3156 	/* Initialize the transmit descriptor ring. */
3157 	memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
3158 	WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
3159 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3160 	sc->sc_txfree = WM_NTXDESC(sc);
3161 	sc->sc_txnext = 0;
3162 
3163 	if (sc->sc_type < WM_T_82543) {
3164 		CSR_WRITE(sc, WMREG_OLD_TBDAH, WM_CDTXADDR_HI(sc, 0));
3165 		CSR_WRITE(sc, WMREG_OLD_TBDAL, WM_CDTXADDR_LO(sc, 0));
3166 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
3167 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
3168 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
3169 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
3170 	} else {
3171 		CSR_WRITE(sc, WMREG_TBDAH, WM_CDTXADDR_HI(sc, 0));
3172 		CSR_WRITE(sc, WMREG_TBDAL, WM_CDTXADDR_LO(sc, 0));
3173 		CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
3174 		CSR_WRITE(sc, WMREG_TDH, 0);
3175 		CSR_WRITE(sc, WMREG_TDT, 0);
3176 		CSR_WRITE(sc, WMREG_TIDV, 375);		/* ITR / 4 */
3177 		CSR_WRITE(sc, WMREG_TADV, 375);		/* should be same */
3178 
3179 		CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
3180 		    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
3181 		CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
3182 		    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
3183 	}
3184 	CSR_WRITE(sc, WMREG_TQSA_LO, 0);
3185 	CSR_WRITE(sc, WMREG_TQSA_HI, 0);
3186 
3187 	/* Initialize the transmit job descriptors. */
3188 	for (i = 0; i < WM_TXQUEUELEN(sc); i++)
3189 		sc->sc_txsoft[i].txs_mbuf = NULL;
3190 	sc->sc_txsfree = WM_TXQUEUELEN(sc);
3191 	sc->sc_txsnext = 0;
3192 	sc->sc_txsdirty = 0;
3193 
3194 	/*
3195 	 * Initialize the receive descriptor and receive job
3196 	 * descriptor rings.
3197 	 */
3198 	if (sc->sc_type < WM_T_82543) {
3199 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
3200 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
3201 		CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
3202 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
3203 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
3204 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
3205 
3206 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
3207 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
3208 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
3209 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
3210 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
3211 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
3212 	} else {
3213 		CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
3214 		CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
3215 		CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
3216 		CSR_WRITE(sc, WMREG_RDH, 0);
3217 		CSR_WRITE(sc, WMREG_RDT, 0);
3218 		CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD);	/* ITR/4 */
3219 		CSR_WRITE(sc, WMREG_RADV, 375);		/* MUST be same */
3220 	}
3221 	for (i = 0; i < WM_NRXDESC; i++) {
3222 		rxs = &sc->sc_rxsoft[i];
3223 		if (rxs->rxs_mbuf == NULL) {
3224 			if ((error = wm_add_rxbuf(sc, i)) != 0) {
3225 				log(LOG_ERR, "%s: unable to allocate or map rx "
3226 				    "buffer %d, error = %d\n",
3227 				    device_xname(sc->sc_dev), i, error);
3228 				/*
3229 				 * XXX Should attempt to run with fewer receive
3230 				 * XXX buffers instead of just failing.
3231 				 */
3232 				wm_rxdrain(sc);
3233 				goto out;
3234 			}
3235 		} else
3236 			WM_INIT_RXDESC(sc, i);
3237 	}
3238 	sc->sc_rxptr = 0;
3239 	sc->sc_rxdiscard = 0;
3240 	WM_RXCHAIN_RESET(sc);
3241 
3242 	/*
3243 	 * Clear out the VLAN table -- we don't use it (yet).
3244 	 */
3245 	CSR_WRITE(sc, WMREG_VET, 0);
3246 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
3247 		CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
3248 
3249 	/*
3250 	 * Set up flow-control parameters.
3251 	 *
3252 	 * XXX Values could probably stand some tuning.
3253 	 */
3254 	if (sc->sc_type != WM_T_ICH8) {
3255 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
3256 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
3257 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
3258 	}
3259 
3260 	sc->sc_fcrtl = FCRTL_DFLT;
3261 	if (sc->sc_type < WM_T_82543) {
3262 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
3263 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
3264 	} else {
3265 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
3266 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
3267 	}
3268 	CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
3269 
3270 	/* Deal with VLAN enables. */
3271 	if (VLAN_ATTACHED(&sc->sc_ethercom))
3272 		sc->sc_ctrl |= CTRL_VME;
3273 	else
3274 		sc->sc_ctrl &= ~CTRL_VME;
3275 
3276 	/* Write the control registers. */
3277 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3278 	if (sc->sc_type >= WM_T_80003 && (sc->sc_flags & WM_F_HAS_MII)) {
3279 		int val;
3280 		val = CSR_READ(sc, WMREG_CTRL_EXT);
3281 		val &= ~CTRL_EXT_LINK_MODE_MASK;
3282 		CSR_WRITE(sc, WMREG_CTRL_EXT, val);
3283 
3284 		/* Bypass RX and TX FIFO's */
3285 		wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
3286 		    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS |
3287 		    KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
3288 
3289 		wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
3290 		    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
3291 		    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
3292 		/*
3293 		 * Set the mac to wait the maximum time between each
3294 		 * iteration and increase the max iterations when
3295 		 * polling the phy; this fixes erroneous timeouts at 10Mbps.
3296 		 */
3297 		wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS, 0xFFFF);
3298 		val = wm_kmrn_i80003_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM);
3299 		val |= 0x3F;
3300 		wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM, val);
3301 	}
3302 #if 0
3303 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
3304 #endif
3305 
3306 	/*
3307 	 * Set up checksum offload parameters.
3308 	 */
3309 	reg = CSR_READ(sc, WMREG_RXCSUM);
3310 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
3311 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
3312 		reg |= RXCSUM_IPOFL;
3313 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
3314 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
3315 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
3316 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
3317 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
3318 
3319 	/* Reset TBI's RXCFG count */
3320 	sc->sc_tbi_nrxcfg = sc->sc_tbi_lastnrxcfg = 0;
3321 
3322 	/*
3323 	 * Set up the interrupt registers.
3324 	 */
3325 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3326 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
3327 	    ICR_RXO | ICR_RXT0;
3328 	if ((sc->sc_flags & WM_F_HAS_MII) == 0)
3329 		sc->sc_icr |= ICR_RXCFG;
3330 	CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
3331 
3332 	/* Set up the inter-packet gap. */
3333 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
3334 
3335 	if (sc->sc_type >= WM_T_82543) {
3336 		/*
3337 		 * Set up the interrupt throttling register (units of 256ns)
3338 		 * Note that a footnote in Intel's documentation says this
3339 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
3340 		 * or 10Mbit mode.  Empirically, it appears to be the case
3341 		 * that that is also true for the 1024ns units of the other
3342 		 * interrupt-related timer registers -- so, really, we ought
3343 		 * to divide this value by 4 when the link speed is low.
3344 		 *
3345 		 * XXX implement this division at link speed change!
3346 		 */
3347 
3348 		 /*
3349 		  * For N interrupts/sec, set this value to:
3350 		  * 1000000000 / (N * 256).  Note that we set the
3351 		  * absolute and packet timer values to this value
3352 		  * divided by 4 to get "simple timer" behavior.
3353 		  */
3354 
3355 		sc->sc_itr = 1500;		/* 2604 ints/sec */
3356 		CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
3357 	}
3358 
3359 	/* Set the VLAN ethernetype. */
3360 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
3361 
3362 	/*
3363 	 * Set up the transmit control register; we start out with
3364 	 * a collision distance suitable for FDX, but update it whe
3365 	 * we resolve the media type.
3366 	 */
3367 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_CT(TX_COLLISION_THRESHOLD) |
3368 	    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3369 	if (sc->sc_type >= WM_T_82571)
3370 		sc->sc_tctl |= TCTL_MULR;
3371 	if (sc->sc_type >= WM_T_80003)
3372 		sc->sc_tctl |= TCTL_RTLC;
3373 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3374 
3375 	/* Set the media. */
3376 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
3377 		goto out;
3378 
3379 	/*
3380 	 * Set up the receive control register; we actually program
3381 	 * the register when we set the receive filter.  Use multicast
3382 	 * address offset type 0.
3383 	 *
3384 	 * Only the i82544 has the ability to strip the incoming
3385 	 * CRC, so we don't enable that feature.
3386 	 */
3387 	sc->sc_mchash_type = 0;
3388 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
3389 	    | RCTL_MO(sc->sc_mchash_type);
3390 
3391 	/* 82573 doesn't support jumbo frame */
3392 	if (sc->sc_type != WM_T_82573 && sc->sc_type != WM_T_82574 &&
3393 	    sc->sc_type != WM_T_ICH8)
3394 		sc->sc_rctl |= RCTL_LPE;
3395 
3396 	if (MCLBYTES == 2048) {
3397 		sc->sc_rctl |= RCTL_2k;
3398 	} else {
3399 		if (sc->sc_type >= WM_T_82543) {
3400 			switch(MCLBYTES) {
3401 			case 4096:
3402 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
3403 				break;
3404 			case 8192:
3405 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
3406 				break;
3407 			case 16384:
3408 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
3409 				break;
3410 			default:
3411 				panic("wm_init: MCLBYTES %d unsupported",
3412 				    MCLBYTES);
3413 				break;
3414 			}
3415 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
3416 	}
3417 
3418 	/* Set the receive filter. */
3419 	wm_set_filter(sc);
3420 
3421 	/* Start the one second link check clock. */
3422 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
3423 
3424 	/* ...all done! */
3425 	ifp->if_flags |= IFF_RUNNING;
3426 	ifp->if_flags &= ~IFF_OACTIVE;
3427 
3428  out:
3429 	if (error)
3430 		log(LOG_ERR, "%s: interface not running\n",
3431 		    device_xname(sc->sc_dev));
3432 	return (error);
3433 }
3434 
3435 /*
3436  * wm_rxdrain:
3437  *
3438  *	Drain the receive queue.
3439  */
3440 static void
3441 wm_rxdrain(struct wm_softc *sc)
3442 {
3443 	struct wm_rxsoft *rxs;
3444 	int i;
3445 
3446 	for (i = 0; i < WM_NRXDESC; i++) {
3447 		rxs = &sc->sc_rxsoft[i];
3448 		if (rxs->rxs_mbuf != NULL) {
3449 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
3450 			m_freem(rxs->rxs_mbuf);
3451 			rxs->rxs_mbuf = NULL;
3452 		}
3453 	}
3454 }
3455 
3456 /*
3457  * wm_stop:		[ifnet interface function]
3458  *
3459  *	Stop transmission on the interface.
3460  */
3461 static void
3462 wm_stop(struct ifnet *ifp, int disable)
3463 {
3464 	struct wm_softc *sc = ifp->if_softc;
3465 	struct wm_txsoft *txs;
3466 	int i;
3467 
3468 	/* Stop the one second clock. */
3469 	callout_stop(&sc->sc_tick_ch);
3470 
3471 	/* Stop the 82547 Tx FIFO stall check timer. */
3472 	if (sc->sc_type == WM_T_82547)
3473 		callout_stop(&sc->sc_txfifo_ch);
3474 
3475 	if (sc->sc_flags & WM_F_HAS_MII) {
3476 		/* Down the MII. */
3477 		mii_down(&sc->sc_mii);
3478 	} else {
3479 #if 0
3480 		/* Should we clear PHY's status properly? */
3481 		wm_reset(sc);
3482 #endif
3483 	}
3484 
3485 	/* Stop the transmit and receive processes. */
3486 	CSR_WRITE(sc, WMREG_TCTL, 0);
3487 	CSR_WRITE(sc, WMREG_RCTL, 0);
3488 
3489 	/*
3490 	 * Clear the interrupt mask to ensure the device cannot assert its
3491 	 * interrupt line.
3492 	 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
3493 	 * any currently pending or shared interrupt.
3494 	 */
3495 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3496 	sc->sc_icr = 0;
3497 
3498 	/* Release any queued transmit buffers. */
3499 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
3500 		txs = &sc->sc_txsoft[i];
3501 		if (txs->txs_mbuf != NULL) {
3502 			bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
3503 			m_freem(txs->txs_mbuf);
3504 			txs->txs_mbuf = NULL;
3505 		}
3506 	}
3507 
3508 	/* Mark the interface as down and cancel the watchdog timer. */
3509 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
3510 	ifp->if_timer = 0;
3511 
3512 	if (disable)
3513 		wm_rxdrain(sc);
3514 }
3515 
3516 void
3517 wm_get_auto_rd_done(struct wm_softc *sc)
3518 {
3519 	int i;
3520 
3521 	/* wait for eeprom to reload */
3522 	switch (sc->sc_type) {
3523 	case WM_T_82571:
3524 	case WM_T_82572:
3525 	case WM_T_82573:
3526 	case WM_T_82574:
3527 	case WM_T_80003:
3528 	case WM_T_ICH8:
3529 	case WM_T_ICH9:
3530 	case WM_T_ICH10:
3531 		for (i = 10; i > 0; i--) {
3532 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
3533 				break;
3534 			delay(1000);
3535 		}
3536 		if (i == 0) {
3537 			log(LOG_ERR, "%s: auto read from eeprom failed to "
3538 			    "complete\n", device_xname(sc->sc_dev));
3539 		}
3540 		break;
3541 	default:
3542 		delay(5000);
3543 		break;
3544 	}
3545 
3546 	/* Phy configuration starts after EECD_AUTO_RD is set */
3547 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574)
3548 		delay(25000);
3549 }
3550 
3551 /*
3552  * wm_acquire_eeprom:
3553  *
3554  *	Perform the EEPROM handshake required on some chips.
3555  */
3556 static int
3557 wm_acquire_eeprom(struct wm_softc *sc)
3558 {
3559 	uint32_t reg;
3560 	int x;
3561 	int ret = 0;
3562 
3563 	/* always success */
3564 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
3565 		return 0;
3566 
3567 	if (sc->sc_flags & WM_F_SWFWHW_SYNC) {
3568 		ret = wm_get_swfwhw_semaphore(sc);
3569 	} else if (sc->sc_flags & WM_F_SWFW_SYNC) {
3570 		/* this will also do wm_get_swsm_semaphore() if needed */
3571 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
3572 	} else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
3573 		ret = wm_get_swsm_semaphore(sc);
3574 	}
3575 
3576 	if (ret) {
3577 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
3578 			__func__);
3579 		return 1;
3580 	}
3581 
3582 	if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE)  {
3583 		reg = CSR_READ(sc, WMREG_EECD);
3584 
3585 		/* Request EEPROM access. */
3586 		reg |= EECD_EE_REQ;
3587 		CSR_WRITE(sc, WMREG_EECD, reg);
3588 
3589 		/* ..and wait for it to be granted. */
3590 		for (x = 0; x < 1000; x++) {
3591 			reg = CSR_READ(sc, WMREG_EECD);
3592 			if (reg & EECD_EE_GNT)
3593 				break;
3594 			delay(5);
3595 		}
3596 		if ((reg & EECD_EE_GNT) == 0) {
3597 			aprint_error_dev(sc->sc_dev,
3598 			    "could not acquire EEPROM GNT\n");
3599 			reg &= ~EECD_EE_REQ;
3600 			CSR_WRITE(sc, WMREG_EECD, reg);
3601 			if (sc->sc_flags & WM_F_SWFWHW_SYNC)
3602 				wm_put_swfwhw_semaphore(sc);
3603 			if (sc->sc_flags & WM_F_SWFW_SYNC)
3604 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
3605 			else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
3606 				wm_put_swsm_semaphore(sc);
3607 			return (1);
3608 		}
3609 	}
3610 
3611 	return (0);
3612 }
3613 
3614 /*
3615  * wm_release_eeprom:
3616  *
3617  *	Release the EEPROM mutex.
3618  */
3619 static void
3620 wm_release_eeprom(struct wm_softc *sc)
3621 {
3622 	uint32_t reg;
3623 
3624 	/* always success */
3625 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
3626 		return;
3627 
3628 	if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
3629 		reg = CSR_READ(sc, WMREG_EECD);
3630 		reg &= ~EECD_EE_REQ;
3631 		CSR_WRITE(sc, WMREG_EECD, reg);
3632 	}
3633 
3634 	if (sc->sc_flags & WM_F_SWFWHW_SYNC)
3635 		wm_put_swfwhw_semaphore(sc);
3636 	if (sc->sc_flags & WM_F_SWFW_SYNC)
3637 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
3638 	else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
3639 		wm_put_swsm_semaphore(sc);
3640 }
3641 
3642 /*
3643  * wm_eeprom_sendbits:
3644  *
3645  *	Send a series of bits to the EEPROM.
3646  */
3647 static void
3648 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
3649 {
3650 	uint32_t reg;
3651 	int x;
3652 
3653 	reg = CSR_READ(sc, WMREG_EECD);
3654 
3655 	for (x = nbits; x > 0; x--) {
3656 		if (bits & (1U << (x - 1)))
3657 			reg |= EECD_DI;
3658 		else
3659 			reg &= ~EECD_DI;
3660 		CSR_WRITE(sc, WMREG_EECD, reg);
3661 		delay(2);
3662 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
3663 		delay(2);
3664 		CSR_WRITE(sc, WMREG_EECD, reg);
3665 		delay(2);
3666 	}
3667 }
3668 
3669 /*
3670  * wm_eeprom_recvbits:
3671  *
3672  *	Receive a series of bits from the EEPROM.
3673  */
3674 static void
3675 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
3676 {
3677 	uint32_t reg, val;
3678 	int x;
3679 
3680 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
3681 
3682 	val = 0;
3683 	for (x = nbits; x > 0; x--) {
3684 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
3685 		delay(2);
3686 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
3687 			val |= (1U << (x - 1));
3688 		CSR_WRITE(sc, WMREG_EECD, reg);
3689 		delay(2);
3690 	}
3691 	*valp = val;
3692 }
3693 
3694 /*
3695  * wm_read_eeprom_uwire:
3696  *
3697  *	Read a word from the EEPROM using the MicroWire protocol.
3698  */
3699 static int
3700 wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
3701 {
3702 	uint32_t reg, val;
3703 	int i;
3704 
3705 	for (i = 0; i < wordcnt; i++) {
3706 		/* Clear SK and DI. */
3707 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
3708 		CSR_WRITE(sc, WMREG_EECD, reg);
3709 
3710 		/* Set CHIP SELECT. */
3711 		reg |= EECD_CS;
3712 		CSR_WRITE(sc, WMREG_EECD, reg);
3713 		delay(2);
3714 
3715 		/* Shift in the READ command. */
3716 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
3717 
3718 		/* Shift in address. */
3719 		wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits);
3720 
3721 		/* Shift out the data. */
3722 		wm_eeprom_recvbits(sc, &val, 16);
3723 		data[i] = val & 0xffff;
3724 
3725 		/* Clear CHIP SELECT. */
3726 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
3727 		CSR_WRITE(sc, WMREG_EECD, reg);
3728 		delay(2);
3729 	}
3730 
3731 	return (0);
3732 }
3733 
3734 /*
3735  * wm_spi_eeprom_ready:
3736  *
3737  *	Wait for a SPI EEPROM to be ready for commands.
3738  */
3739 static int
3740 wm_spi_eeprom_ready(struct wm_softc *sc)
3741 {
3742 	uint32_t val;
3743 	int usec;
3744 
3745 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
3746 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
3747 		wm_eeprom_recvbits(sc, &val, 8);
3748 		if ((val & SPI_SR_RDY) == 0)
3749 			break;
3750 	}
3751 	if (usec >= SPI_MAX_RETRIES) {
3752 		aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
3753 		return (1);
3754 	}
3755 	return (0);
3756 }
3757 
3758 /*
3759  * wm_read_eeprom_spi:
3760  *
3761  *	Read a work from the EEPROM using the SPI protocol.
3762  */
3763 static int
3764 wm_read_eeprom_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
3765 {
3766 	uint32_t reg, val;
3767 	int i;
3768 	uint8_t opc;
3769 
3770 	/* Clear SK and CS. */
3771 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
3772 	CSR_WRITE(sc, WMREG_EECD, reg);
3773 	delay(2);
3774 
3775 	if (wm_spi_eeprom_ready(sc))
3776 		return (1);
3777 
3778 	/* Toggle CS to flush commands. */
3779 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
3780 	delay(2);
3781 	CSR_WRITE(sc, WMREG_EECD, reg);
3782 	delay(2);
3783 
3784 	opc = SPI_OPC_READ;
3785 	if (sc->sc_ee_addrbits == 8 && word >= 128)
3786 		opc |= SPI_OPC_A8;
3787 
3788 	wm_eeprom_sendbits(sc, opc, 8);
3789 	wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits);
3790 
3791 	for (i = 0; i < wordcnt; i++) {
3792 		wm_eeprom_recvbits(sc, &val, 16);
3793 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
3794 	}
3795 
3796 	/* Raise CS and clear SK. */
3797 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
3798 	CSR_WRITE(sc, WMREG_EECD, reg);
3799 	delay(2);
3800 
3801 	return (0);
3802 }
3803 
3804 #define EEPROM_CHECKSUM		0xBABA
3805 #define EEPROM_SIZE		0x0040
3806 
3807 /*
3808  * wm_validate_eeprom_checksum
3809  *
3810  * The checksum is defined as the sum of the first 64 (16 bit) words.
3811  */
3812 static int
3813 wm_validate_eeprom_checksum(struct wm_softc *sc)
3814 {
3815 	uint16_t checksum;
3816 	uint16_t eeprom_data;
3817 	int i;
3818 
3819 	checksum = 0;
3820 
3821 	for (i = 0; i < EEPROM_SIZE; i++) {
3822 		if (wm_read_eeprom(sc, i, 1, &eeprom_data))
3823 			return 1;
3824 		checksum += eeprom_data;
3825 	}
3826 
3827 	if (checksum != (uint16_t) EEPROM_CHECKSUM)
3828 		return 1;
3829 
3830 	return 0;
3831 }
3832 
3833 /*
3834  * wm_read_eeprom:
3835  *
3836  *	Read data from the serial EEPROM.
3837  */
3838 static int
3839 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
3840 {
3841 	int rv;
3842 
3843 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
3844 		return 1;
3845 
3846 	if (wm_acquire_eeprom(sc))
3847 		return 1;
3848 
3849 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3850 	    || (sc->sc_type == WM_T_ICH10))
3851 		rv = wm_read_eeprom_ich8(sc, word, wordcnt, data);
3852 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
3853 		rv = wm_read_eeprom_eerd(sc, word, wordcnt, data);
3854 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
3855 		rv = wm_read_eeprom_spi(sc, word, wordcnt, data);
3856 	else
3857 		rv = wm_read_eeprom_uwire(sc, word, wordcnt, data);
3858 
3859 	wm_release_eeprom(sc);
3860 	return rv;
3861 }
3862 
3863 static int
3864 wm_read_eeprom_eerd(struct wm_softc *sc, int offset, int wordcnt,
3865     uint16_t *data)
3866 {
3867 	int i, eerd = 0;
3868 	int error = 0;
3869 
3870 	for (i = 0; i < wordcnt; i++) {
3871 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
3872 
3873 		CSR_WRITE(sc, WMREG_EERD, eerd);
3874 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
3875 		if (error != 0)
3876 			break;
3877 
3878 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
3879 	}
3880 
3881 	return error;
3882 }
3883 
3884 static int
3885 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
3886 {
3887 	uint32_t attempts = 100000;
3888 	uint32_t i, reg = 0;
3889 	int32_t done = -1;
3890 
3891 	for (i = 0; i < attempts; i++) {
3892 		reg = CSR_READ(sc, rw);
3893 
3894 		if (reg & EERD_DONE) {
3895 			done = 0;
3896 			break;
3897 		}
3898 		delay(5);
3899 	}
3900 
3901 	return done;
3902 }
3903 
3904 /*
3905  * wm_add_rxbuf:
3906  *
3907  *	Add a receive buffer to the indiciated descriptor.
3908  */
3909 static int
3910 wm_add_rxbuf(struct wm_softc *sc, int idx)
3911 {
3912 	struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
3913 	struct mbuf *m;
3914 	int error;
3915 
3916 	MGETHDR(m, M_DONTWAIT, MT_DATA);
3917 	if (m == NULL)
3918 		return (ENOBUFS);
3919 
3920 	MCLGET(m, M_DONTWAIT);
3921 	if ((m->m_flags & M_EXT) == 0) {
3922 		m_freem(m);
3923 		return (ENOBUFS);
3924 	}
3925 
3926 	if (rxs->rxs_mbuf != NULL)
3927 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
3928 
3929 	rxs->rxs_mbuf = m;
3930 
3931 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3932 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
3933 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
3934 	if (error) {
3935 		/* XXX XXX XXX */
3936 		aprint_error_dev(sc->sc_dev,
3937 		    "unable to load rx DMA map %d, error = %d\n",
3938 		    idx, error);
3939 		panic("wm_add_rxbuf");
3940 	}
3941 
3942 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3943 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
3944 
3945 	WM_INIT_RXDESC(sc, idx);
3946 
3947 	return (0);
3948 }
3949 
3950 /*
3951  * wm_set_ral:
3952  *
3953  *	Set an entery in the receive address list.
3954  */
3955 static void
3956 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
3957 {
3958 	uint32_t ral_lo, ral_hi;
3959 
3960 	if (enaddr != NULL) {
3961 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
3962 		    (enaddr[3] << 24);
3963 		ral_hi = enaddr[4] | (enaddr[5] << 8);
3964 		ral_hi |= RAL_AV;
3965 	} else {
3966 		ral_lo = 0;
3967 		ral_hi = 0;
3968 	}
3969 
3970 	if (sc->sc_type >= WM_T_82544) {
3971 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
3972 		    ral_lo);
3973 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
3974 		    ral_hi);
3975 	} else {
3976 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
3977 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
3978 	}
3979 }
3980 
3981 /*
3982  * wm_mchash:
3983  *
3984  *	Compute the hash of the multicast address for the 4096-bit
3985  *	multicast filter.
3986  */
3987 static uint32_t
3988 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
3989 {
3990 	static const int lo_shift[4] = { 4, 3, 2, 0 };
3991 	static const int hi_shift[4] = { 4, 5, 6, 8 };
3992 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
3993 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
3994 	uint32_t hash;
3995 
3996 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3997 	    || (sc->sc_type == WM_T_ICH10)) {
3998 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
3999 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
4000 		return (hash & 0x3ff);
4001 	}
4002 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
4003 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
4004 
4005 	return (hash & 0xfff);
4006 }
4007 
4008 /*
4009  * wm_set_filter:
4010  *
4011  *	Set up the receive filter.
4012  */
4013 static void
4014 wm_set_filter(struct wm_softc *sc)
4015 {
4016 	struct ethercom *ec = &sc->sc_ethercom;
4017 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4018 	struct ether_multi *enm;
4019 	struct ether_multistep step;
4020 	bus_addr_t mta_reg;
4021 	uint32_t hash, reg, bit;
4022 	int i, size;
4023 
4024 	if (sc->sc_type >= WM_T_82544)
4025 		mta_reg = WMREG_CORDOVA_MTA;
4026 	else
4027 		mta_reg = WMREG_MTA;
4028 
4029 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
4030 
4031 	if (ifp->if_flags & IFF_BROADCAST)
4032 		sc->sc_rctl |= RCTL_BAM;
4033 	if (ifp->if_flags & IFF_PROMISC) {
4034 		sc->sc_rctl |= RCTL_UPE;
4035 		goto allmulti;
4036 	}
4037 
4038 	/*
4039 	 * Set the station address in the first RAL slot, and
4040 	 * clear the remaining slots.
4041 	 */
4042 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4043 		 || (sc->sc_type == WM_T_ICH10))
4044 		size = WM_ICH8_RAL_TABSIZE;
4045 	else
4046 		size = WM_RAL_TABSIZE;
4047 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
4048 	for (i = 1; i < size; i++)
4049 		wm_set_ral(sc, NULL, i);
4050 
4051 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4052 	    || (sc->sc_type == WM_T_ICH10))
4053 		size = WM_ICH8_MC_TABSIZE;
4054 	else
4055 		size = WM_MC_TABSIZE;
4056 	/* Clear out the multicast table. */
4057 	for (i = 0; i < size; i++)
4058 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
4059 
4060 	ETHER_FIRST_MULTI(step, ec, enm);
4061 	while (enm != NULL) {
4062 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
4063 			/*
4064 			 * We must listen to a range of multicast addresses.
4065 			 * For now, just accept all multicasts, rather than
4066 			 * trying to set only those filter bits needed to match
4067 			 * the range.  (At this time, the only use of address
4068 			 * ranges is for IP multicast routing, for which the
4069 			 * range is big enough to require all bits set.)
4070 			 */
4071 			goto allmulti;
4072 		}
4073 
4074 		hash = wm_mchash(sc, enm->enm_addrlo);
4075 
4076 		reg = (hash >> 5);
4077 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4078 		    || (sc->sc_type == WM_T_ICH10))
4079 			reg &= 0x1f;
4080 		else
4081 			reg &= 0x7f;
4082 		bit = hash & 0x1f;
4083 
4084 		hash = CSR_READ(sc, mta_reg + (reg << 2));
4085 		hash |= 1U << bit;
4086 
4087 		/* XXX Hardware bug?? */
4088 		if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
4089 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
4090 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
4091 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
4092 		} else
4093 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
4094 
4095 		ETHER_NEXT_MULTI(step, enm);
4096 	}
4097 
4098 	ifp->if_flags &= ~IFF_ALLMULTI;
4099 	goto setit;
4100 
4101  allmulti:
4102 	ifp->if_flags |= IFF_ALLMULTI;
4103 	sc->sc_rctl |= RCTL_MPE;
4104 
4105  setit:
4106 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
4107 }
4108 
4109 /*
4110  * wm_tbi_mediainit:
4111  *
4112  *	Initialize media for use on 1000BASE-X devices.
4113  */
4114 static void
4115 wm_tbi_mediainit(struct wm_softc *sc)
4116 {
4117 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4118 	const char *sep = "";
4119 
4120 	if (sc->sc_type < WM_T_82543)
4121 		sc->sc_tipg = TIPG_WM_DFLT;
4122 	else
4123 		sc->sc_tipg = TIPG_LG_DFLT;
4124 
4125 	sc->sc_tbi_anegticks = 5;
4126 
4127 	/* Initialize our media structures */
4128 	sc->sc_mii.mii_ifp = ifp;
4129 
4130 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
4131 	ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
4132 	    wm_tbi_mediastatus);
4133 
4134 	/*
4135 	 * SWD Pins:
4136 	 *
4137 	 *	0 = Link LED (output)
4138 	 *	1 = Loss Of Signal (input)
4139 	 */
4140 	sc->sc_ctrl |= CTRL_SWDPIO(0);
4141 	sc->sc_ctrl &= ~CTRL_SWDPIO(1);
4142 
4143 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4144 
4145 #define	ADD(ss, mm, dd)							\
4146 do {									\
4147 	aprint_normal("%s%s", sep, ss);					\
4148 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL);	\
4149 	sep = ", ";							\
4150 } while (/*CONSTCOND*/0)
4151 
4152 	aprint_normal_dev(sc->sc_dev, "");
4153 	ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
4154 	ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
4155 	ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
4156 	aprint_normal("\n");
4157 
4158 #undef ADD
4159 
4160 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
4161 }
4162 
4163 /*
4164  * wm_tbi_mediastatus:	[ifmedia interface function]
4165  *
4166  *	Get the current interface media status on a 1000BASE-X device.
4167  */
4168 static void
4169 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
4170 {
4171 	struct wm_softc *sc = ifp->if_softc;
4172 	uint32_t ctrl, status;
4173 
4174 	ifmr->ifm_status = IFM_AVALID;
4175 	ifmr->ifm_active = IFM_ETHER;
4176 
4177 	status = CSR_READ(sc, WMREG_STATUS);
4178 	if ((status & STATUS_LU) == 0) {
4179 		ifmr->ifm_active |= IFM_NONE;
4180 		return;
4181 	}
4182 
4183 	ifmr->ifm_status |= IFM_ACTIVE;
4184 	ifmr->ifm_active |= IFM_1000_SX;
4185 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
4186 		ifmr->ifm_active |= IFM_FDX;
4187 	ctrl = CSR_READ(sc, WMREG_CTRL);
4188 	if (ctrl & CTRL_RFCE)
4189 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
4190 	if (ctrl & CTRL_TFCE)
4191 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
4192 }
4193 
4194 /*
4195  * wm_tbi_mediachange:	[ifmedia interface function]
4196  *
4197  *	Set hardware to newly-selected media on a 1000BASE-X device.
4198  */
4199 static int
4200 wm_tbi_mediachange(struct ifnet *ifp)
4201 {
4202 	struct wm_softc *sc = ifp->if_softc;
4203 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
4204 	uint32_t status;
4205 	int i;
4206 
4207 	sc->sc_txcw = 0;
4208 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO ||
4209 	    (sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
4210 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
4211 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
4212 		sc->sc_txcw |= TXCW_ANE;
4213 	} else {
4214 		/*
4215 		 * If autonegotiation is turned off, force link up and turn on
4216 		 * full duplex
4217 		 */
4218 		sc->sc_txcw &= ~TXCW_ANE;
4219 		sc->sc_ctrl |= CTRL_SLU | CTRL_FD;
4220 		sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
4221 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4222 		delay(1000);
4223 	}
4224 
4225 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
4226 		    device_xname(sc->sc_dev),sc->sc_txcw));
4227 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
4228 	delay(10000);
4229 
4230 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
4231 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
4232 
4233 	/*
4234 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
4235 	 * optics detect a signal, 0 if they don't.
4236 	 */
4237 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
4238 		/* Have signal; wait for the link to come up. */
4239 
4240 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
4241 			/*
4242 			 * Reset the link, and let autonegotiation do its thing
4243 			 */
4244 			sc->sc_ctrl |= CTRL_LRST;
4245 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4246 			delay(1000);
4247 			sc->sc_ctrl &= ~CTRL_LRST;
4248 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4249 			delay(1000);
4250 		}
4251 
4252 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
4253 			delay(10000);
4254 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
4255 				break;
4256 		}
4257 
4258 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
4259 			    device_xname(sc->sc_dev),i));
4260 
4261 		status = CSR_READ(sc, WMREG_STATUS);
4262 		DPRINTF(WM_DEBUG_LINK,
4263 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
4264 			device_xname(sc->sc_dev),status, STATUS_LU));
4265 		if (status & STATUS_LU) {
4266 			/* Link is up. */
4267 			DPRINTF(WM_DEBUG_LINK,
4268 			    ("%s: LINK: set media -> link up %s\n",
4269 			    device_xname(sc->sc_dev),
4270 			    (status & STATUS_FD) ? "FDX" : "HDX"));
4271 
4272 			/*
4273 			 * NOTE: CTRL will update TFCE and RFCE automatically,
4274 			 * so we should update sc->sc_ctrl
4275 			 */
4276 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
4277 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
4278 			sc->sc_fcrtl &= ~FCRTL_XONE;
4279 			if (status & STATUS_FD)
4280 				sc->sc_tctl |=
4281 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4282 			else
4283 				sc->sc_tctl |=
4284 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
4285 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
4286 				sc->sc_fcrtl |= FCRTL_XONE;
4287 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4288 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
4289 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
4290 				      sc->sc_fcrtl);
4291 			sc->sc_tbi_linkup = 1;
4292 		} else {
4293 			if (i == WM_LINKUP_TIMEOUT)
4294 				wm_check_for_link(sc);
4295 			/* Link is down. */
4296 			DPRINTF(WM_DEBUG_LINK,
4297 			    ("%s: LINK: set media -> link down\n",
4298 			    device_xname(sc->sc_dev)));
4299 			sc->sc_tbi_linkup = 0;
4300 		}
4301 	} else {
4302 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
4303 		    device_xname(sc->sc_dev)));
4304 		sc->sc_tbi_linkup = 0;
4305 	}
4306 
4307 	wm_tbi_set_linkled(sc);
4308 
4309 	return (0);
4310 }
4311 
4312 /*
4313  * wm_tbi_set_linkled:
4314  *
4315  *	Update the link LED on 1000BASE-X devices.
4316  */
4317 static void
4318 wm_tbi_set_linkled(struct wm_softc *sc)
4319 {
4320 
4321 	if (sc->sc_tbi_linkup)
4322 		sc->sc_ctrl |= CTRL_SWDPIN(0);
4323 	else
4324 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
4325 
4326 	/* 82540 or newer devices are active low */
4327 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
4328 
4329 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4330 }
4331 
4332 /*
4333  * wm_tbi_check_link:
4334  *
4335  *	Check the link on 1000BASE-X devices.
4336  */
4337 static void
4338 wm_tbi_check_link(struct wm_softc *sc)
4339 {
4340 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4341 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
4342 	uint32_t rxcw, ctrl, status;
4343 
4344 	status = CSR_READ(sc, WMREG_STATUS);
4345 
4346 	rxcw = CSR_READ(sc, WMREG_RXCW);
4347 	ctrl = CSR_READ(sc, WMREG_CTRL);
4348 
4349 	/* set link status */
4350 	if ((status & STATUS_LU) == 0) {
4351 		DPRINTF(WM_DEBUG_LINK,
4352 		    ("%s: LINK: checklink -> down\n", device_xname(sc->sc_dev)));
4353 		sc->sc_tbi_linkup = 0;
4354 	} else if (sc->sc_tbi_linkup == 0) {
4355 		DPRINTF(WM_DEBUG_LINK,
4356 		    ("%s: LINK: checklink -> up %s\n", device_xname(sc->sc_dev),
4357 		    (status & STATUS_FD) ? "FDX" : "HDX"));
4358 		sc->sc_tbi_linkup = 1;
4359 	}
4360 
4361 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP)
4362 	    && ((status & STATUS_LU) == 0)) {
4363 		sc->sc_tbi_linkup = 0;
4364 		if (sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg > 100) {
4365 			/* RXCFG storm! */
4366 			DPRINTF(WM_DEBUG_LINK, ("RXCFG storm! (%d)\n",
4367 				sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg));
4368 			wm_init(ifp);
4369 			wm_start(ifp);
4370 		} else if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
4371 			/* If the timer expired, retry autonegotiation */
4372 			if (++sc->sc_tbi_ticks >= sc->sc_tbi_anegticks) {
4373 				DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
4374 				sc->sc_tbi_ticks = 0;
4375 				/*
4376 				 * Reset the link, and let autonegotiation do
4377 				 * its thing
4378 				 */
4379 				sc->sc_ctrl |= CTRL_LRST;
4380 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4381 				delay(1000);
4382 				sc->sc_ctrl &= ~CTRL_LRST;
4383 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4384 				delay(1000);
4385 				CSR_WRITE(sc, WMREG_TXCW,
4386 				    sc->sc_txcw & ~TXCW_ANE);
4387 				CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
4388 			}
4389 		}
4390 	}
4391 
4392 	wm_tbi_set_linkled(sc);
4393 }
4394 
4395 /*
4396  * wm_gmii_reset:
4397  *
4398  *	Reset the PHY.
4399  */
4400 static void
4401 wm_gmii_reset(struct wm_softc *sc)
4402 {
4403 	uint32_t reg;
4404 	int func = 0; /* XXX gcc */
4405 
4406 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4407 	    || (sc->sc_type == WM_T_ICH10)) {
4408 		if (wm_get_swfwhw_semaphore(sc)) {
4409 			aprint_error_dev(sc->sc_dev,
4410 			    "%s: failed to get semaphore\n", __func__);
4411 			return;
4412 		}
4413 	}
4414 	if (sc->sc_type == WM_T_80003) {
4415 		func = (CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1;
4416 		if (wm_get_swfw_semaphore(sc,
4417 			func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) {
4418 			aprint_error_dev(sc->sc_dev,
4419 			    "%s: failed to get semaphore\n", __func__);
4420 			return;
4421 		}
4422 	}
4423 	if (sc->sc_type >= WM_T_82544) {
4424 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
4425 		delay(20000);
4426 
4427 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4428 		delay(20000);
4429 	} else {
4430 		/*
4431 		 * With 82543, we need to force speed and duplex on the MAC
4432 		 * equal to what the PHY speed and duplex configuration is.
4433 		 * In addition, we need to perform a hardware reset on the PHY
4434 		 * to take it out of reset.
4435 		 */
4436 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
4437 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4438 
4439 		/* The PHY reset pin is active-low. */
4440 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
4441 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
4442 		    CTRL_EXT_SWDPIN(4));
4443 		reg |= CTRL_EXT_SWDPIO(4);
4444 
4445 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
4446 		delay(10);
4447 
4448 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4449 		delay(10000);
4450 
4451 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
4452 		delay(10);
4453 #if 0
4454 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
4455 #endif
4456 	}
4457 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4458 	    || (sc->sc_type == WM_T_ICH10))
4459 		wm_put_swfwhw_semaphore(sc);
4460 	if (sc->sc_type == WM_T_80003)
4461 		wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4462 }
4463 
4464 /*
4465  * wm_gmii_mediainit:
4466  *
4467  *	Initialize media for use on 1000BASE-T devices.
4468  */
4469 static void
4470 wm_gmii_mediainit(struct wm_softc *sc)
4471 {
4472 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4473 
4474 	/* We have MII. */
4475 	sc->sc_flags |= WM_F_HAS_MII;
4476 
4477 	if (sc->sc_type >= WM_T_80003)
4478 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
4479 	else
4480 		sc->sc_tipg = TIPG_1000T_DFLT;
4481 
4482 	/*
4483 	 * Let the chip set speed/duplex on its own based on
4484 	 * signals from the PHY.
4485 	 * XXXbouyer - I'm not sure this is right for the 80003,
4486 	 * the em driver only sets CTRL_SLU here - but it seems to work.
4487 	 */
4488 	sc->sc_ctrl |= CTRL_SLU;
4489 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4490 
4491 	/* Initialize our media structures and probe the GMII. */
4492 	sc->sc_mii.mii_ifp = ifp;
4493 
4494 	if (sc->sc_type == WM_T_ICH10) {
4495 		sc->sc_mii.mii_readreg = wm_gmii_bm_readreg;
4496 		sc->sc_mii.mii_writereg = wm_gmii_bm_writereg;
4497 	} else if (sc->sc_type >= WM_T_80003) {
4498 		sc->sc_mii.mii_readreg = wm_gmii_i80003_readreg;
4499 		sc->sc_mii.mii_writereg = wm_gmii_i80003_writereg;
4500 	} else if (sc->sc_type >= WM_T_82544) {
4501 		sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg;
4502 		sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg;
4503 	} else {
4504 		sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg;
4505 		sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg;
4506 	}
4507 	sc->sc_mii.mii_statchg = wm_gmii_statchg;
4508 
4509 	wm_gmii_reset(sc);
4510 
4511 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
4512 	ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_gmii_mediachange,
4513 	    wm_gmii_mediastatus);
4514 
4515 	mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
4516 	    MII_OFFSET_ANY, MIIF_DOPAUSE);
4517 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
4518 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
4519 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
4520 	} else
4521 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
4522 }
4523 
4524 /*
4525  * wm_gmii_mediastatus:	[ifmedia interface function]
4526  *
4527  *	Get the current interface media status on a 1000BASE-T device.
4528  */
4529 static void
4530 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
4531 {
4532 	struct wm_softc *sc = ifp->if_softc;
4533 
4534 	ether_mediastatus(ifp, ifmr);
4535 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK) |
4536 			   sc->sc_flowflags;
4537 }
4538 
4539 /*
4540  * wm_gmii_mediachange:	[ifmedia interface function]
4541  *
4542  *	Set hardware to newly-selected media on a 1000BASE-T device.
4543  */
4544 static int
4545 wm_gmii_mediachange(struct ifnet *ifp)
4546 {
4547 	struct wm_softc *sc = ifp->if_softc;
4548 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
4549 	int rc;
4550 
4551 	if ((ifp->if_flags & IFF_UP) == 0)
4552 		return 0;
4553 
4554 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
4555 	sc->sc_ctrl |= CTRL_SLU;
4556 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
4557 	    || (sc->sc_type > WM_T_82543)) {
4558 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
4559 	} else {
4560 		sc->sc_ctrl &= ~CTRL_ASDE;
4561 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
4562 		if (ife->ifm_media & IFM_FDX)
4563 			sc->sc_ctrl |= CTRL_FD;
4564 		switch(IFM_SUBTYPE(ife->ifm_media)) {
4565 		case IFM_10_T:
4566 			sc->sc_ctrl |= CTRL_SPEED_10;
4567 			break;
4568 		case IFM_100_TX:
4569 			sc->sc_ctrl |= CTRL_SPEED_100;
4570 			break;
4571 		case IFM_1000_T:
4572 			sc->sc_ctrl |= CTRL_SPEED_1000;
4573 			break;
4574 		default:
4575 			panic("wm_gmii_mediachange: bad media 0x%x",
4576 			    ife->ifm_media);
4577 		}
4578 	}
4579 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4580 	if (sc->sc_type <= WM_T_82543)
4581 		wm_gmii_reset(sc);
4582 
4583 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
4584 		return 0;
4585 	return rc;
4586 }
4587 
4588 #define	MDI_IO		CTRL_SWDPIN(2)
4589 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
4590 #define	MDI_CLK		CTRL_SWDPIN(3)
4591 
4592 static void
4593 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
4594 {
4595 	uint32_t i, v;
4596 
4597 	v = CSR_READ(sc, WMREG_CTRL);
4598 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
4599 	v |= MDI_DIR | CTRL_SWDPIO(3);
4600 
4601 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
4602 		if (data & i)
4603 			v |= MDI_IO;
4604 		else
4605 			v &= ~MDI_IO;
4606 		CSR_WRITE(sc, WMREG_CTRL, v);
4607 		delay(10);
4608 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
4609 		delay(10);
4610 		CSR_WRITE(sc, WMREG_CTRL, v);
4611 		delay(10);
4612 	}
4613 }
4614 
4615 static uint32_t
4616 i82543_mii_recvbits(struct wm_softc *sc)
4617 {
4618 	uint32_t v, i, data = 0;
4619 
4620 	v = CSR_READ(sc, WMREG_CTRL);
4621 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
4622 	v |= CTRL_SWDPIO(3);
4623 
4624 	CSR_WRITE(sc, WMREG_CTRL, v);
4625 	delay(10);
4626 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
4627 	delay(10);
4628 	CSR_WRITE(sc, WMREG_CTRL, v);
4629 	delay(10);
4630 
4631 	for (i = 0; i < 16; i++) {
4632 		data <<= 1;
4633 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
4634 		delay(10);
4635 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
4636 			data |= 1;
4637 		CSR_WRITE(sc, WMREG_CTRL, v);
4638 		delay(10);
4639 	}
4640 
4641 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
4642 	delay(10);
4643 	CSR_WRITE(sc, WMREG_CTRL, v);
4644 	delay(10);
4645 
4646 	return (data);
4647 }
4648 
4649 #undef MDI_IO
4650 #undef MDI_DIR
4651 #undef MDI_CLK
4652 
4653 /*
4654  * wm_gmii_i82543_readreg:	[mii interface function]
4655  *
4656  *	Read a PHY register on the GMII (i82543 version).
4657  */
4658 static int
4659 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
4660 {
4661 	struct wm_softc *sc = device_private(self);
4662 	int rv;
4663 
4664 	i82543_mii_sendbits(sc, 0xffffffffU, 32);
4665 	i82543_mii_sendbits(sc, reg | (phy << 5) |
4666 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
4667 	rv = i82543_mii_recvbits(sc) & 0xffff;
4668 
4669 	DPRINTF(WM_DEBUG_GMII,
4670 	    ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
4671 	    device_xname(sc->sc_dev), phy, reg, rv));
4672 
4673 	return (rv);
4674 }
4675 
4676 /*
4677  * wm_gmii_i82543_writereg:	[mii interface function]
4678  *
4679  *	Write a PHY register on the GMII (i82543 version).
4680  */
4681 static void
4682 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
4683 {
4684 	struct wm_softc *sc = device_private(self);
4685 
4686 	i82543_mii_sendbits(sc, 0xffffffffU, 32);
4687 	i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
4688 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
4689 	    (MII_COMMAND_START << 30), 32);
4690 }
4691 
4692 /*
4693  * wm_gmii_i82544_readreg:	[mii interface function]
4694  *
4695  *	Read a PHY register on the GMII.
4696  */
4697 static int
4698 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
4699 {
4700 	struct wm_softc *sc = device_private(self);
4701 	uint32_t mdic = 0;
4702 	int i, rv;
4703 
4704 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
4705 	    MDIC_REGADD(reg));
4706 
4707 	for (i = 0; i < 320; i++) {
4708 		mdic = CSR_READ(sc, WMREG_MDIC);
4709 		if (mdic & MDIC_READY)
4710 			break;
4711 		delay(10);
4712 	}
4713 
4714 	if ((mdic & MDIC_READY) == 0) {
4715 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
4716 		    device_xname(sc->sc_dev), phy, reg);
4717 		rv = 0;
4718 	} else if (mdic & MDIC_E) {
4719 #if 0 /* This is normal if no PHY is present. */
4720 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
4721 		    device_xname(sc->sc_dev), phy, reg);
4722 #endif
4723 		rv = 0;
4724 	} else {
4725 		rv = MDIC_DATA(mdic);
4726 		if (rv == 0xffff)
4727 			rv = 0;
4728 	}
4729 
4730 	return (rv);
4731 }
4732 
4733 /*
4734  * wm_gmii_i82544_writereg:	[mii interface function]
4735  *
4736  *	Write a PHY register on the GMII.
4737  */
4738 static void
4739 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
4740 {
4741 	struct wm_softc *sc = device_private(self);
4742 	uint32_t mdic = 0;
4743 	int i;
4744 
4745 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
4746 	    MDIC_REGADD(reg) | MDIC_DATA(val));
4747 
4748 	for (i = 0; i < 320; i++) {
4749 		mdic = CSR_READ(sc, WMREG_MDIC);
4750 		if (mdic & MDIC_READY)
4751 			break;
4752 		delay(10);
4753 	}
4754 
4755 	if ((mdic & MDIC_READY) == 0)
4756 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
4757 		    device_xname(sc->sc_dev), phy, reg);
4758 	else if (mdic & MDIC_E)
4759 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
4760 		    device_xname(sc->sc_dev), phy, reg);
4761 }
4762 
4763 /*
4764  * wm_gmii_i80003_readreg:	[mii interface function]
4765  *
4766  *	Read a PHY register on the kumeran
4767  * This could be handled by the PHY layer if we didn't have to lock the
4768  * ressource ...
4769  */
4770 static int
4771 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
4772 {
4773 	struct wm_softc *sc = device_private(self);
4774 	int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
4775 	int rv;
4776 
4777 	if (phy != 1) /* only one PHY on kumeran bus */
4778 		return 0;
4779 
4780 	if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) {
4781 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
4782 		    __func__);
4783 		return 0;
4784 	}
4785 
4786 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
4787 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
4788 		    reg >> GG82563_PAGE_SHIFT);
4789 	} else {
4790 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
4791 		    reg >> GG82563_PAGE_SHIFT);
4792 	}
4793 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
4794 	delay(200);
4795 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
4796 	delay(200);
4797 
4798 	wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4799 	return (rv);
4800 }
4801 
4802 /*
4803  * wm_gmii_i80003_writereg:	[mii interface function]
4804  *
4805  *	Write a PHY register on the kumeran.
4806  * This could be handled by the PHY layer if we didn't have to lock the
4807  * ressource ...
4808  */
4809 static void
4810 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
4811 {
4812 	struct wm_softc *sc = device_private(self);
4813 	int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
4814 
4815 	if (phy != 1) /* only one PHY on kumeran bus */
4816 		return;
4817 
4818 	if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) {
4819 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
4820 		    __func__);
4821 		return;
4822 	}
4823 
4824 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
4825 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
4826 		    reg >> GG82563_PAGE_SHIFT);
4827 	} else {
4828 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
4829 		    reg >> GG82563_PAGE_SHIFT);
4830 	}
4831 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
4832 	delay(200);
4833 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
4834 	delay(200);
4835 
4836 	wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4837 }
4838 
4839 /*
4840  * wm_gmii_bm_readreg:	[mii interface function]
4841  *
4842  *	Read a PHY register on the kumeran
4843  * This could be handled by the PHY layer if we didn't have to lock the
4844  * ressource ...
4845  */
4846 static int
4847 wm_gmii_bm_readreg(device_t self, int phy, int reg)
4848 {
4849 	struct wm_softc *sc = device_private(self);
4850 	int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
4851 	int rv;
4852 
4853 	if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) {
4854 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
4855 		    __func__);
4856 		return 0;
4857 	}
4858 
4859 	if (reg > GG82563_MAX_REG_ADDRESS) {
4860 		if (phy == 1)
4861 			wm_gmii_i82544_writereg(self, phy, 0x1f,
4862 			    reg);
4863 		else
4864 			wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
4865 			    reg >> GG82563_PAGE_SHIFT);
4866 
4867 	}
4868 
4869 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
4870 	wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4871 	return (rv);
4872 }
4873 
4874 /*
4875  * wm_gmii_bm_writereg:	[mii interface function]
4876  *
4877  *	Write a PHY register on the kumeran.
4878  * This could be handled by the PHY layer if we didn't have to lock the
4879  * ressource ...
4880  */
4881 static void
4882 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
4883 {
4884 	struct wm_softc *sc = device_private(self);
4885 	int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
4886 
4887 	if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) {
4888 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
4889 		    __func__);
4890 		return;
4891 	}
4892 
4893 	if (reg > GG82563_MAX_REG_ADDRESS) {
4894 		if (phy == 1)
4895 			wm_gmii_i82544_writereg(self, phy, 0x1f,
4896 			    reg);
4897 		else
4898 			wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
4899 			    reg >> GG82563_PAGE_SHIFT);
4900 
4901 	}
4902 
4903 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
4904 	wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4905 }
4906 
4907 /*
4908  * wm_gmii_statchg:	[mii interface function]
4909  *
4910  *	Callback from MII layer when media changes.
4911  */
4912 static void
4913 wm_gmii_statchg(device_t self)
4914 {
4915 	struct wm_softc *sc = device_private(self);
4916 	struct mii_data *mii = &sc->sc_mii;
4917 
4918 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
4919 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
4920 	sc->sc_fcrtl &= ~FCRTL_XONE;
4921 
4922 	/*
4923 	 * Get flow control negotiation result.
4924 	 */
4925 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
4926 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
4927 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
4928 		mii->mii_media_active &= ~IFM_ETH_FMASK;
4929 	}
4930 
4931 	if (sc->sc_flowflags & IFM_FLOW) {
4932 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
4933 			sc->sc_ctrl |= CTRL_TFCE;
4934 			sc->sc_fcrtl |= FCRTL_XONE;
4935 		}
4936 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
4937 			sc->sc_ctrl |= CTRL_RFCE;
4938 	}
4939 
4940 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
4941 		DPRINTF(WM_DEBUG_LINK,
4942 		    ("%s: LINK: statchg: FDX\n", device_xname(sc->sc_dev)));
4943 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4944 	} else  {
4945 		DPRINTF(WM_DEBUG_LINK,
4946 		    ("%s: LINK: statchg: HDX\n", device_xname(sc->sc_dev)));
4947 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
4948 	}
4949 
4950 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4951 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4952 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
4953 						 : WMREG_FCRTL, sc->sc_fcrtl);
4954 	if (sc->sc_type >= WM_T_80003) {
4955 		switch(IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
4956 		case IFM_1000_T:
4957 			wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
4958 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
4959 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
4960 			break;
4961 		default:
4962 			wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
4963 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
4964 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
4965 			break;
4966 		}
4967 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
4968 	}
4969 }
4970 
4971 /*
4972  * wm_kmrn_i80003_readreg:
4973  *
4974  *	Read a kumeran register
4975  */
4976 static int
4977 wm_kmrn_i80003_readreg(struct wm_softc *sc, int reg)
4978 {
4979 	int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
4980 	int rv;
4981 
4982 	if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) {
4983 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
4984 		    __func__);
4985 		return 0;
4986 	}
4987 
4988 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
4989 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
4990 	    KUMCTRLSTA_REN);
4991 	delay(2);
4992 
4993 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
4994 	wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4995 	return (rv);
4996 }
4997 
4998 /*
4999  * wm_kmrn_i80003_writereg:
5000  *
5001  *	Write a kumeran register
5002  */
5003 static void
5004 wm_kmrn_i80003_writereg(struct wm_softc *sc, int reg, int val)
5005 {
5006 	int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
5007 
5008 	if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) {
5009 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5010 		    __func__);
5011 		return;
5012 	}
5013 
5014 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
5015 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
5016 	    (val & KUMCTRLSTA_MASK));
5017 	wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
5018 }
5019 
5020 static int
5021 wm_is_onboard_nvm_eeprom(struct wm_softc *sc)
5022 {
5023 	uint32_t eecd = 0;
5024 
5025 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574) {
5026 		eecd = CSR_READ(sc, WMREG_EECD);
5027 
5028 		/* Isolate bits 15 & 16 */
5029 		eecd = ((eecd >> 15) & 0x03);
5030 
5031 		/* If both bits are set, device is Flash type */
5032 		if (eecd == 0x03) {
5033 			return 0;
5034 		}
5035 	}
5036 	return 1;
5037 }
5038 
5039 static int
5040 wm_get_swsm_semaphore(struct wm_softc *sc)
5041 {
5042 	int32_t timeout;
5043 	uint32_t swsm;
5044 
5045 	/* Get the FW semaphore. */
5046 	timeout = 1000 + 1; /* XXX */
5047 	while (timeout) {
5048 		swsm = CSR_READ(sc, WMREG_SWSM);
5049 		swsm |= SWSM_SWESMBI;
5050 		CSR_WRITE(sc, WMREG_SWSM, swsm);
5051 		/* if we managed to set the bit we got the semaphore. */
5052 		swsm = CSR_READ(sc, WMREG_SWSM);
5053 		if (swsm & SWSM_SWESMBI)
5054 			break;
5055 
5056 		delay(50);
5057 		timeout--;
5058 	}
5059 
5060 	if (timeout == 0) {
5061 		aprint_error_dev(sc->sc_dev, "could not acquire EEPROM GNT\n");
5062 		/* Release semaphores */
5063 		wm_put_swsm_semaphore(sc);
5064 		return 1;
5065 	}
5066 	return 0;
5067 }
5068 
5069 static void
5070 wm_put_swsm_semaphore(struct wm_softc *sc)
5071 {
5072 	uint32_t swsm;
5073 
5074 	swsm = CSR_READ(sc, WMREG_SWSM);
5075 	swsm &= ~(SWSM_SWESMBI);
5076 	CSR_WRITE(sc, WMREG_SWSM, swsm);
5077 }
5078 
5079 static int
5080 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
5081 {
5082 	uint32_t swfw_sync;
5083 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
5084 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
5085 	int timeout = 200;
5086 
5087 	for(timeout = 0; timeout < 200; timeout++) {
5088 		if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
5089 			if (wm_get_swsm_semaphore(sc)) {
5090 				aprint_error_dev(sc->sc_dev,
5091 				    "%s: failed to get semaphore\n",
5092 				    __func__);
5093 				return 1;
5094 			}
5095 		}
5096 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
5097 		if ((swfw_sync & (swmask | fwmask)) == 0) {
5098 			swfw_sync |= swmask;
5099 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
5100 			if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
5101 				wm_put_swsm_semaphore(sc);
5102 			return 0;
5103 		}
5104 		if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
5105 			wm_put_swsm_semaphore(sc);
5106 		delay(5000);
5107 	}
5108 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
5109 	    device_xname(sc->sc_dev), mask, swfw_sync);
5110 	return 1;
5111 }
5112 
5113 static void
5114 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
5115 {
5116 	uint32_t swfw_sync;
5117 
5118 	if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
5119 		while (wm_get_swsm_semaphore(sc) != 0)
5120 			continue;
5121 	}
5122 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
5123 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
5124 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
5125 	if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
5126 		wm_put_swsm_semaphore(sc);
5127 }
5128 
5129 static int
5130 wm_get_swfwhw_semaphore(struct wm_softc *sc)
5131 {
5132 	uint32_t ext_ctrl;
5133 	int timeout = 200;
5134 
5135 	for(timeout = 0; timeout < 200; timeout++) {
5136 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
5137 		ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
5138 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
5139 
5140 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
5141 		if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
5142 			return 0;
5143 		delay(5000);
5144 	}
5145 	printf("%s: failed to get swfwgw semaphore ext_ctrl 0x%x\n",
5146 	    device_xname(sc->sc_dev), ext_ctrl);
5147 	return 1;
5148 }
5149 
5150 static void
5151 wm_put_swfwhw_semaphore(struct wm_softc *sc)
5152 {
5153 	uint32_t ext_ctrl;
5154 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
5155 	ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
5156 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
5157 }
5158 
5159 static int
5160 wm_valid_nvm_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
5161 {
5162 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
5163 	uint8_t bank_high_byte;
5164 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
5165 
5166 	if (sc->sc_type != WM_T_ICH10) {
5167 		/* Value of bit 22 corresponds to the flash bank we're on. */
5168 		*bank = (CSR_READ(sc, WMREG_EECD) & EECD_SEC1VAL) ? 1 : 0;
5169 	} else {
5170 		wm_read_ich8_byte(sc, act_offset, &bank_high_byte);
5171 		if ((bank_high_byte & 0xc0) == 0x80)
5172 			*bank = 0;
5173 		else {
5174 			wm_read_ich8_byte(sc, act_offset + bank1_offset,
5175 			    &bank_high_byte);
5176 			if ((bank_high_byte & 0xc0) == 0x80)
5177 				*bank = 1;
5178 			else {
5179 				aprint_error_dev(sc->sc_dev,
5180 				    "EEPROM not present\n");
5181 				return -1;
5182 			}
5183 		}
5184 	}
5185 
5186 	return 0;
5187 }
5188 
5189 /******************************************************************************
5190  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
5191  * register.
5192  *
5193  * sc - Struct containing variables accessed by shared code
5194  * offset - offset of word in the EEPROM to read
5195  * data - word read from the EEPROM
5196  * words - number of words to read
5197  *****************************************************************************/
5198 static int
5199 wm_read_eeprom_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
5200 {
5201     int32_t  error = 0;
5202     uint32_t flash_bank = 0;
5203     uint32_t act_offset = 0;
5204     uint32_t bank_offset = 0;
5205     uint16_t word = 0;
5206     uint16_t i = 0;
5207 
5208     /* We need to know which is the valid flash bank.  In the event
5209      * that we didn't allocate eeprom_shadow_ram, we may not be
5210      * managing flash_bank.  So it cannot be trusted and needs
5211      * to be updated with each read.
5212      */
5213     error = wm_valid_nvm_bank_detect_ich8lan(sc, &flash_bank);
5214     if (error) {
5215 	    aprint_error_dev(sc->sc_dev, "%s: failed to detect NVM bank\n",
5216 		    __func__);
5217         return error;
5218     }
5219 
5220     /* Adjust offset appropriately if we're on bank 1 - adjust for word size */
5221     bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
5222 
5223     error = wm_get_swfwhw_semaphore(sc);
5224     if (error) {
5225 	    aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5226 		__func__);
5227         return error;
5228     }
5229 
5230     for (i = 0; i < words; i++) {
5231             /* The NVM part needs a byte offset, hence * 2 */
5232             act_offset = bank_offset + ((offset + i) * 2);
5233             error = wm_read_ich8_word(sc, act_offset, &word);
5234             if (error) {
5235 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
5236 		    __func__);
5237                 break;
5238 	    }
5239             data[i] = word;
5240     }
5241 
5242     wm_put_swfwhw_semaphore(sc);
5243     return error;
5244 }
5245 
5246 /******************************************************************************
5247  * This function does initial flash setup so that a new read/write/erase cycle
5248  * can be started.
5249  *
5250  * sc - The pointer to the hw structure
5251  ****************************************************************************/
5252 static int32_t
5253 wm_ich8_cycle_init(struct wm_softc *sc)
5254 {
5255     uint16_t hsfsts;
5256     int32_t error = 1;
5257     int32_t i     = 0;
5258 
5259     hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
5260 
5261     /* May be check the Flash Des Valid bit in Hw status */
5262     if ((hsfsts & HSFSTS_FLDVAL) == 0) {
5263         return error;
5264     }
5265 
5266     /* Clear FCERR in Hw status by writing 1 */
5267     /* Clear DAEL in Hw status by writing a 1 */
5268     hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
5269 
5270     ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
5271 
5272     /* Either we should have a hardware SPI cycle in progress bit to check
5273      * against, in order to start a new cycle or FDONE bit should be changed
5274      * in the hardware so that it is 1 after harware reset, which can then be
5275      * used as an indication whether a cycle is in progress or has been
5276      * completed .. we should also have some software semaphore mechanism to
5277      * guard FDONE or the cycle in progress bit so that two threads access to
5278      * those bits can be sequentiallized or a way so that 2 threads dont
5279      * start the cycle at the same time */
5280 
5281     if ((hsfsts & HSFSTS_FLINPRO) == 0) {
5282         /* There is no cycle running at present, so we can start a cycle */
5283         /* Begin by setting Flash Cycle Done. */
5284         hsfsts |= HSFSTS_DONE;
5285         ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
5286         error = 0;
5287     } else {
5288         /* otherwise poll for sometime so the current cycle has a chance
5289          * to end before giving up. */
5290         for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
5291             hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
5292             if ((hsfsts & HSFSTS_FLINPRO) == 0) {
5293                 error = 0;
5294                 break;
5295             }
5296             delay(1);
5297         }
5298         if (error == 0) {
5299             /* Successful in waiting for previous cycle to timeout,
5300              * now set the Flash Cycle Done. */
5301             hsfsts |= HSFSTS_DONE;
5302             ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
5303         }
5304     }
5305     return error;
5306 }
5307 
5308 /******************************************************************************
5309  * This function starts a flash cycle and waits for its completion
5310  *
5311  * sc - The pointer to the hw structure
5312  ****************************************************************************/
5313 static int32_t
5314 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
5315 {
5316     uint16_t hsflctl;
5317     uint16_t hsfsts;
5318     int32_t error = 1;
5319     uint32_t i = 0;
5320 
5321     /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
5322     hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
5323     hsflctl |= HSFCTL_GO;
5324     ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
5325 
5326     /* wait till FDONE bit is set to 1 */
5327     do {
5328         hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
5329         if (hsfsts & HSFSTS_DONE)
5330             break;
5331         delay(1);
5332         i++;
5333     } while (i < timeout);
5334     if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0) {
5335         error = 0;
5336     }
5337     return error;
5338 }
5339 
5340 /******************************************************************************
5341  * Reads a byte or word from the NVM using the ICH8 flash access registers.
5342  *
5343  * sc - The pointer to the hw structure
5344  * index - The index of the byte or word to read.
5345  * size - Size of data to read, 1=byte 2=word
5346  * data - Pointer to the word to store the value read.
5347  *****************************************************************************/
5348 static int32_t
5349 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
5350                      uint32_t size, uint16_t* data)
5351 {
5352     uint16_t hsfsts;
5353     uint16_t hsflctl;
5354     uint32_t flash_linear_address;
5355     uint32_t flash_data = 0;
5356     int32_t error = 1;
5357     int32_t count = 0;
5358 
5359     if (size < 1  || size > 2 || data == 0x0 ||
5360         index > ICH_FLASH_LINEAR_ADDR_MASK)
5361         return error;
5362 
5363     flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
5364                            sc->sc_ich8_flash_base;
5365 
5366     do {
5367         delay(1);
5368         /* Steps */
5369         error = wm_ich8_cycle_init(sc);
5370         if (error)
5371             break;
5372 
5373         hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
5374         /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
5375         hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT) & HSFCTL_BCOUNT_MASK;
5376         hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
5377         ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
5378 
5379         /* Write the last 24 bits of index into Flash Linear address field in
5380          * Flash Address */
5381         /* TODO: TBD maybe check the index against the size of flash */
5382 
5383         ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
5384 
5385         error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
5386 
5387         /* Check if FCERR is set to 1, if set to 1, clear it and try the whole
5388          * sequence a few more times, else read in (shift in) the Flash Data0,
5389          * the order is least significant byte first msb to lsb */
5390         if (error == 0) {
5391             flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
5392             if (size == 1) {
5393                 *data = (uint8_t)(flash_data & 0x000000FF);
5394             } else if (size == 2) {
5395                 *data = (uint16_t)(flash_data & 0x0000FFFF);
5396             }
5397             break;
5398         } else {
5399             /* If we've gotten here, then things are probably completely hosed,
5400              * but if the error condition is detected, it won't hurt to give
5401              * it another try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
5402              */
5403             hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
5404             if (hsfsts & HSFSTS_ERR) {
5405                 /* Repeat for some time before giving up. */
5406                 continue;
5407             } else if ((hsfsts & HSFSTS_DONE) == 0) {
5408                 break;
5409             }
5410         }
5411     } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
5412 
5413     return error;
5414 }
5415 
5416 /******************************************************************************
5417  * Reads a single byte from the NVM using the ICH8 flash access registers.
5418  *
5419  * sc - pointer to wm_hw structure
5420  * index - The index of the byte to read.
5421  * data - Pointer to a byte to store the value read.
5422  *****************************************************************************/
5423 static int32_t
5424 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
5425 {
5426     int32_t status;
5427     uint16_t word = 0;
5428 
5429     status = wm_read_ich8_data(sc, index, 1, &word);
5430     if (status == 0) {
5431         *data = (uint8_t)word;
5432     }
5433 
5434     return status;
5435 }
5436 
5437 /******************************************************************************
5438  * Reads a word from the NVM using the ICH8 flash access registers.
5439  *
5440  * sc - pointer to wm_hw structure
5441  * index - The starting byte index of the word to read.
5442  * data - Pointer to a word to store the value read.
5443  *****************************************************************************/
5444 static int32_t
5445 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
5446 {
5447     int32_t status;
5448 
5449     status = wm_read_ich8_data(sc, index, 2, data);
5450     return status;
5451 }
5452 
5453 static int
5454 wm_check_mng_mode(struct wm_softc *sc)
5455 {
5456 	int rv;
5457 
5458 	switch (sc->sc_type) {
5459 	case WM_T_ICH8:
5460 	case WM_T_ICH9:
5461 	case WM_T_ICH10:
5462 		rv = wm_check_mng_mode_ich8lan(sc);
5463 		break;
5464 #if 0
5465 	case WM_T_82574:
5466 		/*
5467 		 * The function is provided in em driver, but it's not
5468 		 * used. Why?
5469 		 */
5470 		rv = wm_check_mng_mode_82574(sc);
5471 		break;
5472 #endif
5473 	case WM_T_82571:
5474 	case WM_T_82572:
5475 	case WM_T_82573:
5476 	case WM_T_80003:
5477 		rv = wm_check_mng_mode_generic(sc);
5478 		break;
5479 	default:
5480 		/* noting to do */
5481 		rv = 0;
5482 		break;
5483 	}
5484 
5485 	return rv;
5486 }
5487 
5488 static int
5489 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
5490 {
5491 	uint32_t fwsm;
5492 
5493 	fwsm = CSR_READ(sc, WMREG_FWSM);
5494 
5495 	if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
5496 		return 1;
5497 
5498 	return 0;
5499 }
5500 
5501 #if 0
5502 static int
5503 wm_check_mng_mode_82574(struct wm_softc *sc)
5504 {
5505 	uint16_t data;
5506 
5507 	wm_read_eeprom(sc, NVM_INIT_CONTROL2_REG, 1, &data);
5508 
5509 	if ((data & NVM_INIT_CTRL2_MNGM) != 0)
5510 		return 1;
5511 
5512 	return 0;
5513 }
5514 #endif
5515 
5516 static int
5517 wm_check_mng_mode_generic(struct wm_softc *sc)
5518 {
5519 	uint32_t fwsm;
5520 
5521 	fwsm = CSR_READ(sc, WMREG_FWSM);
5522 
5523 	if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
5524 		return 1;
5525 
5526 	return 0;
5527 }
5528 
5529 static void
5530 wm_get_hw_control(struct wm_softc *sc)
5531 {
5532 	uint32_t reg;
5533 
5534 	switch (sc->sc_type) {
5535 	case WM_T_82573:
5536 #if 0
5537 	case WM_T_82574:
5538 		/*
5539 		 * FreeBSD's em driver has the function for 82574 to checks
5540 		 * the management mode, but it's not used. Why?
5541 		 */
5542 #endif
5543 		reg = CSR_READ(sc, WMREG_SWSM);
5544 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
5545 		break;
5546 	case WM_T_82571:
5547 	case WM_T_82572:
5548 	case WM_T_80003:
5549 	case WM_T_ICH8:
5550 	case WM_T_ICH9:
5551 	case WM_T_ICH10:
5552 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
5553 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
5554 		break;
5555 	default:
5556 		break;
5557 	}
5558 }
5559 
5560 /* XXX Currently TBI only */
5561 static int
5562 wm_check_for_link(struct wm_softc *sc)
5563 {
5564 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
5565 	uint32_t rxcw;
5566 	uint32_t ctrl;
5567 	uint32_t status;
5568 	uint32_t sig;
5569 
5570 	rxcw = CSR_READ(sc, WMREG_RXCW);
5571 	ctrl = CSR_READ(sc, WMREG_CTRL);
5572 	status = CSR_READ(sc, WMREG_STATUS);
5573 
5574 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
5575 
5576 	DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
5577 		device_xname(sc->sc_dev), __func__,
5578 		((ctrl & CTRL_SWDPIN(1)) == sig),
5579 		((status & STATUS_LU) != 0),
5580 		((rxcw & RXCW_C) != 0)
5581 		    ));
5582 
5583 	/*
5584 	 * SWDPIN   LU RXCW
5585 	 *      0    0    0
5586 	 *      0    0    1	(should not happen)
5587 	 *      0    1    0	(should not happen)
5588 	 *      0    1    1	(should not happen)
5589 	 *      1    0    0	Disable autonego and force linkup
5590 	 *      1    0    1	got /C/ but not linkup yet
5591 	 *      1    1    0	(linkup)
5592 	 *      1    1    1	If IFM_AUTO, back to autonego
5593 	 *
5594 	 */
5595 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
5596 	    && ((status & STATUS_LU) == 0)
5597 	    && ((rxcw & RXCW_C) == 0)) {
5598 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
5599 			__func__));
5600 		sc->sc_tbi_linkup = 0;
5601 		/* Disable auto-negotiation in the TXCW register */
5602 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
5603 
5604 		/*
5605 		 * Force link-up and also force full-duplex.
5606 		 *
5607 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
5608 		 * so we should update sc->sc_ctrl
5609 		 */
5610 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
5611 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5612 	} else if(((status & STATUS_LU) != 0)
5613 	    && ((rxcw & RXCW_C) != 0)
5614 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
5615 		sc->sc_tbi_linkup = 1;
5616 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
5617 			__func__));
5618 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
5619 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
5620 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
5621 	    && ((rxcw & RXCW_C) != 0)) {
5622 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
5623 	} else {
5624 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
5625 			status));
5626 	}
5627 
5628 	return 0;
5629 }
5630