xref: /netbsd-src/sys/dev/pci/if_wm.c (revision 4b71a66d0f279143147d63ebfcfd8a59499a3684)
1 /*	$NetBSD: if_wm.c,v 1.158 2008/04/10 19:13:37 cegger Exp $	*/
2 
3 /*
4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed for the NetBSD Project by
20  *	Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 /*******************************************************************************
39 
40   Copyright (c) 2001-2005, Intel Corporation
41   All rights reserved.
42 
43   Redistribution and use in source and binary forms, with or without
44   modification, are permitted provided that the following conditions are met:
45 
46    1. Redistributions of source code must retain the above copyright notice,
47       this list of conditions and the following disclaimer.
48 
49    2. Redistributions in binary form must reproduce the above copyright
50       notice, this list of conditions and the following disclaimer in the
51       documentation and/or other materials provided with the distribution.
52 
53    3. Neither the name of the Intel Corporation nor the names of its
54       contributors may be used to endorse or promote products derived from
55       this software without specific prior written permission.
56 
57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67   POSSIBILITY OF SUCH DAMAGE.
68 
69 *******************************************************************************/
70 /*
71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72  *
73  * TODO (in order of importance):
74  *
75  *	- Rework how parameters are loaded from the EEPROM.
76  *	- Figure out what to do with the i82545GM and i82546GB
77  *	  SERDES controllers.
78  *	- Fix hw VLAN assist.
79  */
80 
81 #include <sys/cdefs.h>
82 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.158 2008/04/10 19:13:37 cegger Exp $");
83 
84 #include "bpfilter.h"
85 #include "rnd.h"
86 
87 #include <sys/param.h>
88 #include <sys/systm.h>
89 #include <sys/callout.h>
90 #include <sys/mbuf.h>
91 #include <sys/malloc.h>
92 #include <sys/kernel.h>
93 #include <sys/socket.h>
94 #include <sys/ioctl.h>
95 #include <sys/errno.h>
96 #include <sys/device.h>
97 #include <sys/queue.h>
98 #include <sys/syslog.h>
99 
100 #include <uvm/uvm_extern.h>		/* for PAGE_SIZE */
101 
102 #if NRND > 0
103 #include <sys/rnd.h>
104 #endif
105 
106 #include <net/if.h>
107 #include <net/if_dl.h>
108 #include <net/if_media.h>
109 #include <net/if_ether.h>
110 
111 #if NBPFILTER > 0
112 #include <net/bpf.h>
113 #endif
114 
115 #include <netinet/in.h>			/* XXX for struct ip */
116 #include <netinet/in_systm.h>		/* XXX for struct ip */
117 #include <netinet/ip.h>			/* XXX for struct ip */
118 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
119 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
120 
121 #include <sys/bus.h>
122 #include <sys/intr.h>
123 #include <machine/endian.h>
124 
125 #include <dev/mii/mii.h>
126 #include <dev/mii/miivar.h>
127 #include <dev/mii/mii_bitbang.h>
128 #include <dev/mii/ikphyreg.h>
129 
130 #include <dev/pci/pcireg.h>
131 #include <dev/pci/pcivar.h>
132 #include <dev/pci/pcidevs.h>
133 
134 #include <dev/pci/if_wmreg.h>
135 
136 #ifdef WM_DEBUG
137 #define	WM_DEBUG_LINK		0x01
138 #define	WM_DEBUG_TX		0x02
139 #define	WM_DEBUG_RX		0x04
140 #define	WM_DEBUG_GMII		0x08
141 int	wm_debug = WM_DEBUG_TX|WM_DEBUG_RX|WM_DEBUG_LINK|WM_DEBUG_GMII;
142 
143 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
144 #else
145 #define	DPRINTF(x, y)	/* nothing */
146 #endif /* WM_DEBUG */
147 
148 /*
149  * Transmit descriptor list size.  Due to errata, we can only have
150  * 256 hardware descriptors in the ring on < 82544, but we use 4096
151  * on >= 82544.  We tell the upper layers that they can queue a lot
152  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
153  * of them at a time.
154  *
155  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
156  * chains containing many small mbufs have been observed in zero-copy
157  * situations with jumbo frames.
158  */
159 #define	WM_NTXSEGS		256
160 #define	WM_IFQUEUELEN		256
161 #define	WM_TXQUEUELEN_MAX	64
162 #define	WM_TXQUEUELEN_MAX_82547	16
163 #define	WM_TXQUEUELEN(sc)	((sc)->sc_txnum)
164 #define	WM_TXQUEUELEN_MASK(sc)	(WM_TXQUEUELEN(sc) - 1)
165 #define	WM_TXQUEUE_GC(sc)	(WM_TXQUEUELEN(sc) / 8)
166 #define	WM_NTXDESC_82542	256
167 #define	WM_NTXDESC_82544	4096
168 #define	WM_NTXDESC(sc)		((sc)->sc_ntxdesc)
169 #define	WM_NTXDESC_MASK(sc)	(WM_NTXDESC(sc) - 1)
170 #define	WM_TXDESCSIZE(sc)	(WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
171 #define	WM_NEXTTX(sc, x)	(((x) + 1) & WM_NTXDESC_MASK(sc))
172 #define	WM_NEXTTXS(sc, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(sc))
173 
174 #define	WM_MAXTXDMA		round_page(IP_MAXPACKET) /* for TSO */
175 
176 /*
177  * Receive descriptor list size.  We have one Rx buffer for normal
178  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
179  * packet.  We allocate 256 receive descriptors, each with a 2k
180  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
181  */
182 #define	WM_NRXDESC		256
183 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
184 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
185 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
186 
187 /*
188  * Control structures are DMA'd to the i82542 chip.  We allocate them in
189  * a single clump that maps to a single DMA segment to make several things
190  * easier.
191  */
192 struct wm_control_data_82544 {
193 	/*
194 	 * The receive descriptors.
195 	 */
196 	wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
197 
198 	/*
199 	 * The transmit descriptors.  Put these at the end, because
200 	 * we might use a smaller number of them.
201 	 */
202 	wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82544];
203 };
204 
205 struct wm_control_data_82542 {
206 	wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
207 	wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
208 };
209 
210 #define	WM_CDOFF(x)	offsetof(struct wm_control_data_82544, x)
211 #define	WM_CDTXOFF(x)	WM_CDOFF(wcd_txdescs[(x)])
212 #define	WM_CDRXOFF(x)	WM_CDOFF(wcd_rxdescs[(x)])
213 
214 /*
215  * Software state for transmit jobs.
216  */
217 struct wm_txsoft {
218 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
219 	bus_dmamap_t txs_dmamap;	/* our DMA map */
220 	int txs_firstdesc;		/* first descriptor in packet */
221 	int txs_lastdesc;		/* last descriptor in packet */
222 	int txs_ndesc;			/* # of descriptors used */
223 };
224 
225 /*
226  * Software state for receive buffers.  Each descriptor gets a
227  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
228  * more than one buffer, we chain them together.
229  */
230 struct wm_rxsoft {
231 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
232 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
233 };
234 
235 typedef enum {
236 	WM_T_unknown		= 0,
237 	WM_T_82542_2_0,			/* i82542 2.0 (really old) */
238 	WM_T_82542_2_1,			/* i82542 2.1+ (old) */
239 	WM_T_82543,			/* i82543 */
240 	WM_T_82544,			/* i82544 */
241 	WM_T_82540,			/* i82540 */
242 	WM_T_82545,			/* i82545 */
243 	WM_T_82545_3,			/* i82545 3.0+ */
244 	WM_T_82546,			/* i82546 */
245 	WM_T_82546_3,			/* i82546 3.0+ */
246 	WM_T_82541,			/* i82541 */
247 	WM_T_82541_2,			/* i82541 2.0+ */
248 	WM_T_82547,			/* i82547 */
249 	WM_T_82547_2,			/* i82547 2.0+ */
250 	WM_T_82571,			/* i82571 */
251 	WM_T_82572,			/* i82572 */
252 	WM_T_82573,			/* i82573 */
253 	WM_T_80003,			/* i80003 */
254 	WM_T_ICH8,			/* ICH8 LAN */
255 	WM_T_ICH9,			/* ICH9 LAN */
256 } wm_chip_type;
257 
258 /*
259  * Software state per device.
260  */
261 struct wm_softc {
262 	struct device sc_dev;		/* generic device information */
263 	bus_space_tag_t sc_st;		/* bus space tag */
264 	bus_space_handle_t sc_sh;	/* bus space handle */
265 	bus_space_tag_t sc_iot;		/* I/O space tag */
266 	bus_space_handle_t sc_ioh;	/* I/O space handle */
267 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
268 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
269 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
270 	struct ethercom sc_ethercom;	/* ethernet common data */
271 	pci_chipset_tag_t sc_pc;
272 	pcitag_t sc_pcitag;
273 
274 	wm_chip_type sc_type;		/* chip type */
275 	int sc_flags;			/* flags; see below */
276 	int sc_bus_speed;		/* PCI/PCIX bus speed */
277 	int sc_pcix_offset;		/* PCIX capability register offset */
278 	int sc_flowflags;		/* 802.3x flow control flags */
279 
280 	void *sc_ih;			/* interrupt cookie */
281 
282 	int sc_ee_addrbits;		/* EEPROM address bits */
283 
284 	struct mii_data sc_mii;		/* MII/media information */
285 
286 	callout_t sc_tick_ch;		/* tick callout */
287 
288 	bus_dmamap_t sc_cddmamap;	/* control data DMA map */
289 #define	sc_cddma	sc_cddmamap->dm_segs[0].ds_addr
290 
291 	int		sc_align_tweak;
292 
293 	/*
294 	 * Software state for the transmit and receive descriptors.
295 	 */
296 	int			sc_txnum;	/* must be a power of two */
297 	struct wm_txsoft	sc_txsoft[WM_TXQUEUELEN_MAX];
298 	struct wm_rxsoft	sc_rxsoft[WM_NRXDESC];
299 
300 	/*
301 	 * Control data structures.
302 	 */
303 	int			sc_ntxdesc;	/* must be a power of two */
304 	struct wm_control_data_82544 *sc_control_data;
305 #define	sc_txdescs	sc_control_data->wcd_txdescs
306 #define	sc_rxdescs	sc_control_data->wcd_rxdescs
307 
308 #ifdef WM_EVENT_COUNTERS
309 	/* Event counters. */
310 	struct evcnt sc_ev_txsstall;	/* Tx stalled due to no txs */
311 	struct evcnt sc_ev_txdstall;	/* Tx stalled due to no txd */
312 	struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
313 	struct evcnt sc_ev_txdw;	/* Tx descriptor interrupts */
314 	struct evcnt sc_ev_txqe;	/* Tx queue empty interrupts */
315 	struct evcnt sc_ev_rxintr;	/* Rx interrupts */
316 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
317 
318 	struct evcnt sc_ev_rxipsum;	/* IP checksums checked in-bound */
319 	struct evcnt sc_ev_rxtusum;	/* TCP/UDP cksums checked in-bound */
320 	struct evcnt sc_ev_txipsum;	/* IP checksums comp. out-bound */
321 	struct evcnt sc_ev_txtusum;	/* TCP/UDP cksums comp. out-bound */
322 	struct evcnt sc_ev_txtusum6;	/* TCP/UDP v6 cksums comp. out-bound */
323 	struct evcnt sc_ev_txtso;	/* TCP seg offload out-bound (IPv4) */
324 	struct evcnt sc_ev_txtso6;	/* TCP seg offload out-bound (IPv6) */
325 	struct evcnt sc_ev_txtsopain;	/* painful header manip. for TSO */
326 
327 	struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
328 	struct evcnt sc_ev_txdrop;	/* Tx packets dropped (too many segs) */
329 
330 	struct evcnt sc_ev_tu;		/* Tx underrun */
331 
332 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
333 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
334 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
335 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
336 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
337 #endif /* WM_EVENT_COUNTERS */
338 
339 	bus_addr_t sc_tdt_reg;		/* offset of TDT register */
340 
341 	int	sc_txfree;		/* number of free Tx descriptors */
342 	int	sc_txnext;		/* next ready Tx descriptor */
343 
344 	int	sc_txsfree;		/* number of free Tx jobs */
345 	int	sc_txsnext;		/* next free Tx job */
346 	int	sc_txsdirty;		/* dirty Tx jobs */
347 
348 	/* These 5 variables are used only on the 82547. */
349 	int	sc_txfifo_size;		/* Tx FIFO size */
350 	int	sc_txfifo_head;		/* current head of FIFO */
351 	uint32_t sc_txfifo_addr;	/* internal address of start of FIFO */
352 	int	sc_txfifo_stall;	/* Tx FIFO is stalled */
353 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
354 
355 	bus_addr_t sc_rdt_reg;		/* offset of RDT register */
356 
357 	int	sc_rxptr;		/* next ready Rx descriptor/queue ent */
358 	int	sc_rxdiscard;
359 	int	sc_rxlen;
360 	struct mbuf *sc_rxhead;
361 	struct mbuf *sc_rxtail;
362 	struct mbuf **sc_rxtailp;
363 
364 	uint32_t sc_ctrl;		/* prototype CTRL register */
365 #if 0
366 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
367 #endif
368 	uint32_t sc_icr;		/* prototype interrupt bits */
369 	uint32_t sc_itr;		/* prototype intr throttling reg */
370 	uint32_t sc_tctl;		/* prototype TCTL register */
371 	uint32_t sc_rctl;		/* prototype RCTL register */
372 	uint32_t sc_txcw;		/* prototype TXCW register */
373 	uint32_t sc_tipg;		/* prototype TIPG register */
374 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
375 	uint32_t sc_pba;		/* prototype PBA register */
376 
377 	int sc_tbi_linkup;		/* TBI link status */
378 	int sc_tbi_anstate;		/* autonegotiation state */
379 
380 	int sc_mchash_type;		/* multicast filter offset */
381 
382 #if NRND > 0
383 	rndsource_element_t rnd_source;	/* random source */
384 #endif
385 	int sc_ich8_flash_base;
386 	int sc_ich8_flash_bank_size;
387 };
388 
389 #define	WM_RXCHAIN_RESET(sc)						\
390 do {									\
391 	(sc)->sc_rxtailp = &(sc)->sc_rxhead;				\
392 	*(sc)->sc_rxtailp = NULL;					\
393 	(sc)->sc_rxlen = 0;						\
394 } while (/*CONSTCOND*/0)
395 
396 #define	WM_RXCHAIN_LINK(sc, m)						\
397 do {									\
398 	*(sc)->sc_rxtailp = (sc)->sc_rxtail = (m);			\
399 	(sc)->sc_rxtailp = &(m)->m_next;				\
400 } while (/*CONSTCOND*/0)
401 
402 /* sc_flags */
403 #define	WM_F_HAS_MII		0x0001	/* has MII */
404 #define	WM_F_EEPROM_HANDSHAKE	0x0002	/* requires EEPROM handshake */
405 #define	WM_F_EEPROM_SEMAPHORE	0x0004	/* EEPROM with semaphore */
406 #define	WM_F_EEPROM_EERDEEWR	0x0008	/* EEPROM access via EERD/EEWR */
407 #define	WM_F_EEPROM_SPI		0x0010	/* EEPROM is SPI */
408 #define	WM_F_EEPROM_FLASH	0x0020	/* EEPROM is FLASH */
409 #define	WM_F_EEPROM_INVALID	0x0040	/* EEPROM not present (bad checksum) */
410 #define	WM_F_IOH_VALID		0x0080	/* I/O handle is valid */
411 #define	WM_F_BUS64		0x0100	/* bus is 64-bit */
412 #define	WM_F_PCIX		0x0200	/* bus is PCI-X */
413 #define	WM_F_CSA		0x0400	/* bus is CSA */
414 #define	WM_F_PCIE		0x0800	/* bus is PCI-Express */
415 #define WM_F_SWFW_SYNC		0x1000  /* Software-Firmware synchronisation */
416 #define WM_F_SWFWHW_SYNC	0x2000  /* Software-Firmware synchronisation */
417 
418 #ifdef WM_EVENT_COUNTERS
419 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
420 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
421 #else
422 #define	WM_EVCNT_INCR(ev)	/* nothing */
423 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
424 #endif
425 
426 #define	CSR_READ(sc, reg)						\
427 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
428 #define	CSR_WRITE(sc, reg, val)						\
429 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
430 #define	CSR_WRITE_FLUSH(sc)						\
431 	(void) CSR_READ((sc), WMREG_STATUS)
432 
433 #define ICH8_FLASH_READ32(sc, reg) \
434 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
435 #define ICH8_FLASH_WRITE32(sc, reg, data) \
436 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
437 
438 #define ICH8_FLASH_READ16(sc, reg) \
439 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
440 #define ICH8_FLASH_WRITE16(sc, reg, data) \
441 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
442 
443 #define	WM_CDTXADDR(sc, x)	((sc)->sc_cddma + WM_CDTXOFF((x)))
444 #define	WM_CDRXADDR(sc, x)	((sc)->sc_cddma + WM_CDRXOFF((x)))
445 
446 #define	WM_CDTXADDR_LO(sc, x)	(WM_CDTXADDR((sc), (x)) & 0xffffffffU)
447 #define	WM_CDTXADDR_HI(sc, x)						\
448 	(sizeof(bus_addr_t) == 8 ?					\
449 	 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
450 
451 #define	WM_CDRXADDR_LO(sc, x)	(WM_CDRXADDR((sc), (x)) & 0xffffffffU)
452 #define	WM_CDRXADDR_HI(sc, x)						\
453 	(sizeof(bus_addr_t) == 8 ?					\
454 	 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
455 
456 #define	WM_CDTXSYNC(sc, x, n, ops)					\
457 do {									\
458 	int __x, __n;							\
459 									\
460 	__x = (x);							\
461 	__n = (n);							\
462 									\
463 	/* If it will wrap around, sync to the end of the ring. */	\
464 	if ((__x + __n) > WM_NTXDESC(sc)) {				\
465 		bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,	\
466 		    WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) *		\
467 		    (WM_NTXDESC(sc) - __x), (ops));			\
468 		__n -= (WM_NTXDESC(sc) - __x);				\
469 		__x = 0;						\
470 	}								\
471 									\
472 	/* Now sync whatever is left. */				\
473 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
474 	    WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops));	\
475 } while (/*CONSTCOND*/0)
476 
477 #define	WM_CDRXSYNC(sc, x, ops)						\
478 do {									\
479 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
480 	   WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops));		\
481 } while (/*CONSTCOND*/0)
482 
483 #define	WM_INIT_RXDESC(sc, x)						\
484 do {									\
485 	struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)];		\
486 	wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)];		\
487 	struct mbuf *__m = __rxs->rxs_mbuf;				\
488 									\
489 	/*								\
490 	 * Note: We scoot the packet forward 2 bytes in the buffer	\
491 	 * so that the payload after the Ethernet header is aligned	\
492 	 * to a 4-byte boundary.					\
493 	 *								\
494 	 * XXX BRAINDAMAGE ALERT!					\
495 	 * The stupid chip uses the same size for every buffer, which	\
496 	 * is set in the Receive Control register.  We are using the 2K	\
497 	 * size option, but what we REALLY want is (2K - 2)!  For this	\
498 	 * reason, we can't "scoot" packets longer than the standard	\
499 	 * Ethernet MTU.  On strict-alignment platforms, if the total	\
500 	 * size exceeds (2K - 2) we set align_tweak to 0 and let	\
501 	 * the upper layer copy the headers.				\
502 	 */								\
503 	__m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak;	\
504 									\
505 	wm_set_dma_addr(&__rxd->wrx_addr,				\
506 	    __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
507 	__rxd->wrx_len = 0;						\
508 	__rxd->wrx_cksum = 0;						\
509 	__rxd->wrx_status = 0;						\
510 	__rxd->wrx_errors = 0;						\
511 	__rxd->wrx_special = 0;						\
512 	WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
513 									\
514 	CSR_WRITE((sc), (sc)->sc_rdt_reg, (x));				\
515 } while (/*CONSTCOND*/0)
516 
517 static void	wm_start(struct ifnet *);
518 static void	wm_watchdog(struct ifnet *);
519 static int	wm_ioctl(struct ifnet *, u_long, void *);
520 static int	wm_init(struct ifnet *);
521 static void	wm_stop(struct ifnet *, int);
522 
523 static void	wm_reset(struct wm_softc *);
524 static void	wm_rxdrain(struct wm_softc *);
525 static int	wm_add_rxbuf(struct wm_softc *, int);
526 static int	wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
527 static int	wm_read_eeprom_eerd(struct wm_softc *, int, int, u_int16_t *);
528 static int	wm_validate_eeprom_checksum(struct wm_softc *);
529 static void	wm_tick(void *);
530 
531 static void	wm_set_filter(struct wm_softc *);
532 
533 static int	wm_intr(void *);
534 static void	wm_txintr(struct wm_softc *);
535 static void	wm_rxintr(struct wm_softc *);
536 static void	wm_linkintr(struct wm_softc *, uint32_t);
537 
538 static void	wm_tbi_mediainit(struct wm_softc *);
539 static int	wm_tbi_mediachange(struct ifnet *);
540 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
541 
542 static void	wm_tbi_set_linkled(struct wm_softc *);
543 static void	wm_tbi_check_link(struct wm_softc *);
544 
545 static void	wm_gmii_reset(struct wm_softc *);
546 
547 static int	wm_gmii_i82543_readreg(device_t, int, int);
548 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
549 
550 static int	wm_gmii_i82544_readreg(device_t, int, int);
551 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
552 
553 static int	wm_gmii_i80003_readreg(device_t, int, int);
554 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
555 
556 static void	wm_gmii_statchg(device_t);
557 
558 static void	wm_gmii_mediainit(struct wm_softc *);
559 static int	wm_gmii_mediachange(struct ifnet *);
560 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
561 
562 static int	wm_kmrn_i80003_readreg(struct wm_softc *, int);
563 static void	wm_kmrn_i80003_writereg(struct wm_softc *, int, int);
564 
565 static int	wm_match(device_t, struct cfdata *, void *);
566 static void	wm_attach(device_t, device_t, void *);
567 static int	wm_is_onboard_nvm_eeprom(struct wm_softc *);
568 static void	wm_get_auto_rd_done(struct wm_softc *);
569 static int	wm_get_swsm_semaphore(struct wm_softc *);
570 static void	wm_put_swsm_semaphore(struct wm_softc *);
571 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
572 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
573 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
574 static int	wm_get_swfwhw_semaphore(struct wm_softc *);
575 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
576 
577 static int	wm_read_eeprom_ich8(struct wm_softc *, int, int, uint16_t *);
578 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
579 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
580 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t,
581 		     uint32_t, uint16_t *);
582 static int32_t	wm_read_ich8_word(struct wm_softc *sc, uint32_t, uint16_t *);
583 
584 CFATTACH_DECL(wm, sizeof(struct wm_softc),
585     wm_match, wm_attach, NULL, NULL);
586 
587 static void	wm_82547_txfifo_stall(void *);
588 
589 /*
590  * Devices supported by this driver.
591  */
592 static const struct wm_product {
593 	pci_vendor_id_t		wmp_vendor;
594 	pci_product_id_t	wmp_product;
595 	const char		*wmp_name;
596 	wm_chip_type		wmp_type;
597 	int			wmp_flags;
598 #define	WMP_F_1000X		0x01
599 #define	WMP_F_1000T		0x02
600 } wm_products[] = {
601 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
602 	  "Intel i82542 1000BASE-X Ethernet",
603 	  WM_T_82542_2_1,	WMP_F_1000X },
604 
605 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
606 	  "Intel i82543GC 1000BASE-X Ethernet",
607 	  WM_T_82543,		WMP_F_1000X },
608 
609 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
610 	  "Intel i82543GC 1000BASE-T Ethernet",
611 	  WM_T_82543,		WMP_F_1000T },
612 
613 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
614 	  "Intel i82544EI 1000BASE-T Ethernet",
615 	  WM_T_82544,		WMP_F_1000T },
616 
617 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
618 	  "Intel i82544EI 1000BASE-X Ethernet",
619 	  WM_T_82544,		WMP_F_1000X },
620 
621 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
622 	  "Intel i82544GC 1000BASE-T Ethernet",
623 	  WM_T_82544,		WMP_F_1000T },
624 
625 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
626 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
627 	  WM_T_82544,		WMP_F_1000T },
628 
629 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
630 	  "Intel i82540EM 1000BASE-T Ethernet",
631 	  WM_T_82540,		WMP_F_1000T },
632 
633 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
634 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
635 	  WM_T_82540,		WMP_F_1000T },
636 
637 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
638 	  "Intel i82540EP 1000BASE-T Ethernet",
639 	  WM_T_82540,		WMP_F_1000T },
640 
641 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
642 	  "Intel i82540EP 1000BASE-T Ethernet",
643 	  WM_T_82540,		WMP_F_1000T },
644 
645 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
646 	  "Intel i82540EP 1000BASE-T Ethernet",
647 	  WM_T_82540,		WMP_F_1000T },
648 
649 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
650 	  "Intel i82545EM 1000BASE-T Ethernet",
651 	  WM_T_82545,		WMP_F_1000T },
652 
653 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
654 	  "Intel i82545GM 1000BASE-T Ethernet",
655 	  WM_T_82545_3,		WMP_F_1000T },
656 
657 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
658 	  "Intel i82545GM 1000BASE-X Ethernet",
659 	  WM_T_82545_3,		WMP_F_1000X },
660 #if 0
661 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
662 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
663 	  WM_T_82545_3,		WMP_F_SERDES },
664 #endif
665 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
666 	  "Intel i82546EB 1000BASE-T Ethernet",
667 	  WM_T_82546,		WMP_F_1000T },
668 
669 	{ PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82546EB_QUAD,
670 	  "Intel i82546EB 1000BASE-T Ethernet",
671 	  WM_T_82546,		WMP_F_1000T },
672 
673 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
674 	  "Intel i82545EM 1000BASE-X Ethernet",
675 	  WM_T_82545,		WMP_F_1000X },
676 
677 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
678 	  "Intel i82546EB 1000BASE-X Ethernet",
679 	  WM_T_82546,		WMP_F_1000X },
680 
681 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
682 	  "Intel i82546GB 1000BASE-T Ethernet",
683 	  WM_T_82546_3,		WMP_F_1000T },
684 
685 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
686 	  "Intel i82546GB 1000BASE-X Ethernet",
687 	  WM_T_82546_3,		WMP_F_1000X },
688 #if 0
689 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
690 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
691 	  WM_T_82546_3,		WMP_F_SERDES },
692 #endif
693 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
694 	  "i82546GB quad-port Gigabit Ethernet",
695 	  WM_T_82546_3,		WMP_F_1000T },
696 
697 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
698 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
699 	  WM_T_82546_3,		WMP_F_1000T },
700 
701 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
702 	  "Intel PRO/1000MT (82546GB)",
703 	  WM_T_82546_3,		WMP_F_1000T },
704 
705 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
706 	  "Intel i82541EI 1000BASE-T Ethernet",
707 	  WM_T_82541,		WMP_F_1000T },
708 
709 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
710 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
711 	  WM_T_82541,		WMP_F_1000T },
712 
713 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
714 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
715 	  WM_T_82541,		WMP_F_1000T },
716 
717 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
718 	  "Intel i82541ER 1000BASE-T Ethernet",
719 	  WM_T_82541_2,		WMP_F_1000T },
720 
721 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
722 	  "Intel i82541GI 1000BASE-T Ethernet",
723 	  WM_T_82541_2,		WMP_F_1000T },
724 
725 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
726 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
727 	  WM_T_82541_2,		WMP_F_1000T },
728 
729 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
730 	  "Intel i82541PI 1000BASE-T Ethernet",
731 	  WM_T_82541_2,		WMP_F_1000T },
732 
733 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
734 	  "Intel i82547EI 1000BASE-T Ethernet",
735 	  WM_T_82547,		WMP_F_1000T },
736 
737 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
738 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
739 	  WM_T_82547,		WMP_F_1000T },
740 
741 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
742 	  "Intel i82547GI 1000BASE-T Ethernet",
743 	  WM_T_82547_2,		WMP_F_1000T },
744 
745 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
746 	  "Intel PRO/1000 PT (82571EB)",
747 	  WM_T_82571,		WMP_F_1000T },
748 
749 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
750 	  "Intel PRO/1000 PF (82571EB)",
751 	  WM_T_82571,		WMP_F_1000X },
752 #if 0
753 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
754 	  "Intel PRO/1000 PB (82571EB)",
755 	  WM_T_82571,		WMP_F_SERDES },
756 #endif
757 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
758 	  "Intel PRO/1000 QT (82571EB)",
759 	  WM_T_82571,		WMP_F_1000T },
760 
761 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
762 	  "Intel i82572EI 1000baseT Ethernet",
763 	  WM_T_82572,		WMP_F_1000T },
764 
765 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
766 	  "Intel� PRO/1000 PT Quad Port Server Adapter",
767 	  WM_T_82571,		WMP_F_1000T, },
768 
769 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
770 	  "Intel i82572EI 1000baseX Ethernet",
771 	  WM_T_82572,		WMP_F_1000X },
772 #if 0
773 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
774 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
775 	  WM_T_82572,		WMP_F_SERDES },
776 #endif
777 
778 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
779 	  "Intel i82572EI 1000baseT Ethernet",
780 	  WM_T_82572,		WMP_F_1000T },
781 
782 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
783 	  "Intel i82573E",
784 	  WM_T_82573,		WMP_F_1000T },
785 
786 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
787 	  "Intel i82573E IAMT",
788 	  WM_T_82573,		WMP_F_1000T },
789 
790 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
791 	  "Intel i82573L Gigabit Ethernet",
792 	  WM_T_82573,		WMP_F_1000T },
793 
794 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
795 	  "i80003 dual 1000baseT Ethernet",
796 	  WM_T_80003,		WMP_F_1000T },
797 
798 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
799 	  "i80003 dual 1000baseX Ethernet",
800 	  WM_T_80003,		WMP_F_1000T },
801 #if 0
802 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
803 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
804 	  WM_T_80003,		WMP_F_SERDES },
805 #endif
806 
807 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
808 	  "Intel i80003 1000baseT Ethernet",
809 	  WM_T_80003,		WMP_F_1000T },
810 #if 0
811 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
812 	  "Intel i80003 Gigabit Ethernet (SERDES)",
813 	  WM_T_80003,		WMP_F_SERDES },
814 #endif
815 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
816 	  "Intel i82801H (M_AMT) LAN Controller",
817 	  WM_T_ICH8,		WMP_F_1000T },
818 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
819 	  "Intel i82801H (AMT) LAN Controller",
820 	  WM_T_ICH8,		WMP_F_1000T },
821 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
822 	  "Intel i82801H LAN Controller",
823 	  WM_T_ICH8,		WMP_F_1000T },
824 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
825 	  "Intel i82801H (IFE) LAN Controller",
826 	  WM_T_ICH8,		WMP_F_1000T },
827 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
828 	  "Intel i82801H (M) LAN Controller",
829 	  WM_T_ICH8,		WMP_F_1000T },
830 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
831 	  "Intel i82801H IFE (GT) LAN Controller",
832 	  WM_T_ICH8,		WMP_F_1000T },
833 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
834 	  "Intel i82801H IFE (G) LAN Controller",
835 	  WM_T_ICH8,		WMP_F_1000T },
836 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
837 	  "82801I (AMT) LAN Controller",
838 	  WM_T_ICH9,		WMP_F_1000T },
839 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
840 	  "82801I LAN Controller",
841 	  WM_T_ICH9,		WMP_F_1000T },
842 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
843 	  "82801I (G) LAN Controller",
844 	  WM_T_ICH9,		WMP_F_1000T },
845 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
846 	  "82801I (GT) LAN Controller",
847 	  WM_T_ICH9,		WMP_F_1000T },
848 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
849 	  "82801I (C) LAN Controller",
850 	  WM_T_ICH9,		WMP_F_1000T },
851 	{ 0,			0,
852 	  NULL,
853 	  0,			0 },
854 };
855 
856 #ifdef WM_EVENT_COUNTERS
857 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
858 #endif /* WM_EVENT_COUNTERS */
859 
860 #if 0 /* Not currently used */
861 static inline uint32_t
862 wm_io_read(struct wm_softc *sc, int reg)
863 {
864 
865 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
866 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
867 }
868 #endif
869 
870 static inline void
871 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
872 {
873 
874 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
875 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
876 }
877 
878 static inline void
879 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
880 {
881 	wa->wa_low = htole32(v & 0xffffffffU);
882 	if (sizeof(bus_addr_t) == 8)
883 		wa->wa_high = htole32((uint64_t) v >> 32);
884 	else
885 		wa->wa_high = 0;
886 }
887 
888 static const struct wm_product *
889 wm_lookup(const struct pci_attach_args *pa)
890 {
891 	const struct wm_product *wmp;
892 
893 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
894 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
895 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
896 			return (wmp);
897 	}
898 	return (NULL);
899 }
900 
901 static int
902 wm_match(device_t parent, struct cfdata *cf, void *aux)
903 {
904 	struct pci_attach_args *pa = aux;
905 
906 	if (wm_lookup(pa) != NULL)
907 		return (1);
908 
909 	return (0);
910 }
911 
912 static void
913 wm_attach(device_t parent, device_t self, void *aux)
914 {
915 	struct wm_softc *sc = device_private(self);
916 	struct pci_attach_args *pa = aux;
917 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
918 	pci_chipset_tag_t pc = pa->pa_pc;
919 	pci_intr_handle_t ih;
920 	size_t cdata_size;
921 	const char *intrstr = NULL;
922 	const char *eetype;
923 	bus_space_tag_t memt;
924 	bus_space_handle_t memh;
925 	bus_dma_segment_t seg;
926 	int memh_valid;
927 	int i, rseg, error;
928 	const struct wm_product *wmp;
929 	prop_data_t ea;
930 	prop_number_t pn;
931 	uint8_t enaddr[ETHER_ADDR_LEN];
932 	uint16_t myea[ETHER_ADDR_LEN / 2], cfg1, cfg2, swdpin;
933 	pcireg_t preg, memtype;
934 	uint32_t reg;
935 
936 	callout_init(&sc->sc_tick_ch, 0);
937 
938 	wmp = wm_lookup(pa);
939 	if (wmp == NULL) {
940 		printf("\n");
941 		panic("wm_attach: impossible");
942 	}
943 
944 	sc->sc_pc = pa->pa_pc;
945 	sc->sc_pcitag = pa->pa_tag;
946 
947 	if (pci_dma64_available(pa))
948 		sc->sc_dmat = pa->pa_dmat64;
949 	else
950 		sc->sc_dmat = pa->pa_dmat;
951 
952 	preg = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
953 	aprint_naive(": Ethernet controller\n");
954 	aprint_normal(": %s, rev. %d\n", wmp->wmp_name, preg);
955 
956 	sc->sc_type = wmp->wmp_type;
957 	if (sc->sc_type < WM_T_82543) {
958 		if (preg < 2) {
959 			aprint_error_dev(&sc->sc_dev, "i82542 must be at least rev. 2\n");
960 			return;
961 		}
962 		if (preg < 3)
963 			sc->sc_type = WM_T_82542_2_0;
964 	}
965 
966 	/*
967 	 * Map the device.  All devices support memory-mapped acccess,
968 	 * and it is really required for normal operation.
969 	 */
970 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
971 	switch (memtype) {
972 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
973 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
974 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
975 		    memtype, 0, &memt, &memh, NULL, NULL) == 0);
976 		break;
977 	default:
978 		memh_valid = 0;
979 	}
980 
981 	if (memh_valid) {
982 		sc->sc_st = memt;
983 		sc->sc_sh = memh;
984 	} else {
985 		aprint_error_dev(&sc->sc_dev, "unable to map device registers\n");
986 		return;
987 	}
988 
989 	/*
990 	 * In addition, i82544 and later support I/O mapped indirect
991 	 * register access.  It is not desirable (nor supported in
992 	 * this driver) to use it for normal operation, though it is
993 	 * required to work around bugs in some chip versions.
994 	 */
995 	if (sc->sc_type >= WM_T_82544) {
996 		/* First we have to find the I/O BAR. */
997 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
998 			if (pci_mapreg_type(pa->pa_pc, pa->pa_tag, i) ==
999 			    PCI_MAPREG_TYPE_IO)
1000 				break;
1001 		}
1002 		if (i == PCI_MAPREG_END)
1003 			aprint_error_dev(&sc->sc_dev, "WARNING: unable to find I/O BAR\n");
1004 		else {
1005 			/*
1006 			 * The i8254x doesn't apparently respond when the
1007 			 * I/O BAR is 0, which looks somewhat like it's not
1008 			 * been configured.
1009 			 */
1010 			preg = pci_conf_read(pc, pa->pa_tag, i);
1011 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1012 				aprint_error_dev(&sc->sc_dev, "WARNING: I/O BAR at zero.\n");
1013 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1014 					0, &sc->sc_iot, &sc->sc_ioh,
1015 					NULL, NULL) == 0) {
1016 				sc->sc_flags |= WM_F_IOH_VALID;
1017 			} else {
1018 				aprint_error_dev(&sc->sc_dev, "WARNING: unable to map "
1019 				    "I/O space\n");
1020 			}
1021 		}
1022 
1023 	}
1024 
1025 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
1026 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1027 	preg |= PCI_COMMAND_MASTER_ENABLE;
1028 	if (sc->sc_type < WM_T_82542_2_1)
1029 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1030 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1031 
1032 	/* power up chip */
1033 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1034 	    NULL)) && error != EOPNOTSUPP) {
1035 		aprint_error_dev(&sc->sc_dev, "cannot activate %d\n",
1036 		    error);
1037 		return;
1038 	}
1039 
1040 	/*
1041 	 * Map and establish our interrupt.
1042 	 */
1043 	if (pci_intr_map(pa, &ih)) {
1044 		aprint_error_dev(&sc->sc_dev, "unable to map interrupt\n");
1045 		return;
1046 	}
1047 	intrstr = pci_intr_string(pc, ih);
1048 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
1049 	if (sc->sc_ih == NULL) {
1050 		aprint_error_dev(&sc->sc_dev, "unable to establish interrupt");
1051 		if (intrstr != NULL)
1052 			aprint_normal(" at %s", intrstr);
1053 		aprint_normal("\n");
1054 		return;
1055 	}
1056 	aprint_normal_dev(&sc->sc_dev, "interrupting at %s\n", intrstr);
1057 
1058 	/*
1059 	 * Determine a few things about the bus we're connected to.
1060 	 */
1061 	if (sc->sc_type < WM_T_82543) {
1062 		/* We don't really know the bus characteristics here. */
1063 		sc->sc_bus_speed = 33;
1064 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1065 		/*
1066 		 * CSA (Communication Streaming Architecture) is about as fast
1067 		 * a 32-bit 66MHz PCI Bus.
1068 		 */
1069 		sc->sc_flags |= WM_F_CSA;
1070 		sc->sc_bus_speed = 66;
1071 		aprint_verbose_dev(&sc->sc_dev, "Communication Streaming Architecture\n");
1072 		if (sc->sc_type == WM_T_82547) {
1073 			callout_init(&sc->sc_txfifo_ch, 0);
1074 			callout_setfunc(&sc->sc_txfifo_ch,
1075 					wm_82547_txfifo_stall, sc);
1076 			aprint_verbose_dev(&sc->sc_dev, "using 82547 Tx FIFO stall "
1077 				       "work-around\n");
1078 		}
1079 	} else if (sc->sc_type >= WM_T_82571) {
1080 		sc->sc_flags |= WM_F_PCIE;
1081 		if ((sc->sc_type != WM_T_ICH8) || (sc->sc_type != WM_T_ICH9))
1082 			sc->sc_flags |= WM_F_EEPROM_SEMAPHORE;
1083 		aprint_verbose_dev(&sc->sc_dev, "PCI-Express bus\n");
1084 	} else {
1085 		reg = CSR_READ(sc, WMREG_STATUS);
1086 		if (reg & STATUS_BUS64)
1087 			sc->sc_flags |= WM_F_BUS64;
1088 		if (sc->sc_type >= WM_T_82544 &&
1089 		    (reg & STATUS_PCIX_MODE) != 0) {
1090 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1091 
1092 			sc->sc_flags |= WM_F_PCIX;
1093 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1094 					       PCI_CAP_PCIX,
1095 					       &sc->sc_pcix_offset, NULL) == 0)
1096 				aprint_error_dev(&sc->sc_dev, "unable to find PCIX "
1097 				    "capability\n");
1098 			else if (sc->sc_type != WM_T_82545_3 &&
1099 				 sc->sc_type != WM_T_82546_3) {
1100 				/*
1101 				 * Work around a problem caused by the BIOS
1102 				 * setting the max memory read byte count
1103 				 * incorrectly.
1104 				 */
1105 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1106 				    sc->sc_pcix_offset + PCI_PCIX_CMD);
1107 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1108 				    sc->sc_pcix_offset + PCI_PCIX_STATUS);
1109 
1110 				bytecnt =
1111 				    (pcix_cmd & PCI_PCIX_CMD_BYTECNT_MASK) >>
1112 				    PCI_PCIX_CMD_BYTECNT_SHIFT;
1113 				maxb =
1114 				    (pcix_sts & PCI_PCIX_STATUS_MAXB_MASK) >>
1115 				    PCI_PCIX_STATUS_MAXB_SHIFT;
1116 				if (bytecnt > maxb) {
1117 					aprint_verbose_dev(&sc->sc_dev, "resetting PCI-X "
1118 					    "MMRBC: %d -> %d\n",
1119 					    512 << bytecnt, 512 << maxb);
1120 					pcix_cmd = (pcix_cmd &
1121 					    ~PCI_PCIX_CMD_BYTECNT_MASK) |
1122 					   (maxb << PCI_PCIX_CMD_BYTECNT_SHIFT);
1123 					pci_conf_write(pa->pa_pc, pa->pa_tag,
1124 					    sc->sc_pcix_offset + PCI_PCIX_CMD,
1125 					    pcix_cmd);
1126 				}
1127 			}
1128 		}
1129 		/*
1130 		 * The quad port adapter is special; it has a PCIX-PCIX
1131 		 * bridge on the board, and can run the secondary bus at
1132 		 * a higher speed.
1133 		 */
1134 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1135 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1136 								      : 66;
1137 		} else if (sc->sc_flags & WM_F_PCIX) {
1138 			switch (reg & STATUS_PCIXSPD_MASK) {
1139 			case STATUS_PCIXSPD_50_66:
1140 				sc->sc_bus_speed = 66;
1141 				break;
1142 			case STATUS_PCIXSPD_66_100:
1143 				sc->sc_bus_speed = 100;
1144 				break;
1145 			case STATUS_PCIXSPD_100_133:
1146 				sc->sc_bus_speed = 133;
1147 				break;
1148 			default:
1149 				aprint_error_dev(&sc->sc_dev,
1150 				    "unknown PCIXSPD %d; assuming 66MHz\n",
1151 				    reg & STATUS_PCIXSPD_MASK);
1152 				sc->sc_bus_speed = 66;
1153 			}
1154 		} else
1155 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1156 		aprint_verbose_dev(&sc->sc_dev, "%d-bit %dMHz %s bus\n",
1157 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1158 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1159 	}
1160 
1161 	/*
1162 	 * Allocate the control data structures, and create and load the
1163 	 * DMA map for it.
1164 	 *
1165 	 * NOTE: All Tx descriptors must be in the same 4G segment of
1166 	 * memory.  So must Rx descriptors.  We simplify by allocating
1167 	 * both sets within the same 4G segment.
1168 	 */
1169 	WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
1170 	    WM_NTXDESC_82542 : WM_NTXDESC_82544;
1171 	cdata_size = sc->sc_type < WM_T_82544 ?
1172 	    sizeof(struct wm_control_data_82542) :
1173 	    sizeof(struct wm_control_data_82544);
1174 	if ((error = bus_dmamem_alloc(sc->sc_dmat, cdata_size, PAGE_SIZE,
1175 				      (bus_size_t) 0x100000000ULL,
1176 				      &seg, 1, &rseg, 0)) != 0) {
1177 		aprint_error_dev(&sc->sc_dev,
1178 		    "unable to allocate control data, error = %d\n",
1179 		    error);
1180 		goto fail_0;
1181 	}
1182 
1183 	if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, cdata_size,
1184 				    (void **)&sc->sc_control_data,
1185 				    BUS_DMA_COHERENT)) != 0) {
1186 		aprint_error_dev(&sc->sc_dev, "unable to map control data, error = %d\n",
1187 		    error);
1188 		goto fail_1;
1189 	}
1190 
1191 	if ((error = bus_dmamap_create(sc->sc_dmat, cdata_size, 1, cdata_size,
1192 				       0, 0, &sc->sc_cddmamap)) != 0) {
1193 		aprint_error_dev(&sc->sc_dev, "unable to create control data DMA map, "
1194 		    "error = %d\n", error);
1195 		goto fail_2;
1196 	}
1197 
1198 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
1199 				     sc->sc_control_data, cdata_size, NULL,
1200 				     0)) != 0) {
1201 		aprint_error_dev(&sc->sc_dev,
1202 		    "unable to load control data DMA map, error = %d\n",
1203 		    error);
1204 		goto fail_3;
1205 	}
1206 
1207 
1208 	/*
1209 	 * Create the transmit buffer DMA maps.
1210 	 */
1211 	WM_TXQUEUELEN(sc) =
1212 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
1213 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
1214 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1215 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
1216 					       WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
1217 					  &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1218 			aprint_error_dev(&sc->sc_dev, "unable to create Tx DMA map %d, "
1219 			    "error = %d\n", i, error);
1220 			goto fail_4;
1221 		}
1222 	}
1223 
1224 	/*
1225 	 * Create the receive buffer DMA maps.
1226 	 */
1227 	for (i = 0; i < WM_NRXDESC; i++) {
1228 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1229 					       MCLBYTES, 0, 0,
1230 					  &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1231 			aprint_error_dev(&sc->sc_dev, "unable to create Rx DMA map %d, "
1232 			    "error = %d\n", i, error);
1233 			goto fail_5;
1234 		}
1235 		sc->sc_rxsoft[i].rxs_mbuf = NULL;
1236 	}
1237 
1238 	/* clear interesting stat counters */
1239 	CSR_READ(sc, WMREG_COLC);
1240 	CSR_READ(sc, WMREG_RXERRC);
1241 
1242 	/*
1243 	 * Reset the chip to a known state.
1244 	 */
1245 	wm_reset(sc);
1246 
1247 	/*
1248 	 * Get some information about the EEPROM.
1249 	 */
1250 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)) {
1251 		uint32_t flash_size;
1252 		sc->sc_flags |= WM_F_SWFWHW_SYNC | WM_F_EEPROM_FLASH;
1253 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
1254 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
1255 		    &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) {
1256 			aprint_error_dev(&sc->sc_dev, "can't map FLASH registers\n");
1257 			return;
1258 		}
1259 		flash_size = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
1260 		sc->sc_ich8_flash_base = (flash_size & ICH_GFPREG_BASE_MASK) *
1261 						ICH_FLASH_SECTOR_SIZE;
1262 		sc->sc_ich8_flash_bank_size =
1263 			((flash_size >> 16) & ICH_GFPREG_BASE_MASK) + 1;
1264 		sc->sc_ich8_flash_bank_size -=
1265 			(flash_size & ICH_GFPREG_BASE_MASK);
1266 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
1267 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
1268 	} else if (sc->sc_type == WM_T_80003)
1269 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR |  WM_F_SWFW_SYNC;
1270 	else if (sc->sc_type == WM_T_82573)
1271 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1272 	else if (sc->sc_type > WM_T_82544)
1273 		sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1274 
1275 	if (sc->sc_type <= WM_T_82544)
1276 		sc->sc_ee_addrbits = 6;
1277 	else if (sc->sc_type <= WM_T_82546_3) {
1278 		reg = CSR_READ(sc, WMREG_EECD);
1279 		if (reg & EECD_EE_SIZE)
1280 			sc->sc_ee_addrbits = 8;
1281 		else
1282 			sc->sc_ee_addrbits = 6;
1283 	} else if (sc->sc_type <= WM_T_82547_2) {
1284 		reg = CSR_READ(sc, WMREG_EECD);
1285 		if (reg & EECD_EE_TYPE) {
1286 			sc->sc_flags |= WM_F_EEPROM_SPI;
1287 			sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
1288 		} else
1289 			sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6;
1290 	} else if ((sc->sc_type == WM_T_82573) &&
1291 	    (wm_is_onboard_nvm_eeprom(sc) == 0)) {
1292 		sc->sc_flags |= WM_F_EEPROM_FLASH;
1293 	} else {
1294 		/* Assume everything else is SPI. */
1295 		reg = CSR_READ(sc, WMREG_EECD);
1296 		sc->sc_flags |= WM_F_EEPROM_SPI;
1297 		sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
1298 	}
1299 
1300 	/*
1301 	 * Defer printing the EEPROM type until after verifying the checksum
1302 	 * This allows the EEPROM type to be printed correctly in the case
1303 	 * that no EEPROM is attached.
1304 	 */
1305 
1306 
1307 	/*
1308 	 * Validate the EEPROM checksum. If the checksum fails, flag this for
1309 	 * later, so we can fail future reads from the EEPROM.
1310 	 */
1311 	if (wm_validate_eeprom_checksum(sc))
1312 		sc->sc_flags |= WM_F_EEPROM_INVALID;
1313 
1314 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
1315 		aprint_verbose_dev(&sc->sc_dev, "No EEPROM\n");
1316 	else if (sc->sc_flags & WM_F_EEPROM_FLASH) {
1317 		aprint_verbose_dev(&sc->sc_dev, "FLASH\n");
1318 	} else {
1319 		if (sc->sc_flags & WM_F_EEPROM_SPI)
1320 			eetype = "SPI";
1321 		else
1322 			eetype = "MicroWire";
1323 		aprint_verbose_dev(&sc->sc_dev, "%u word (%d address bits) %s EEPROM\n",
1324 		    1U << sc->sc_ee_addrbits,
1325 		    sc->sc_ee_addrbits, eetype);
1326 	}
1327 
1328 	/*
1329 	 * Read the Ethernet address from the EEPROM, if not first found
1330 	 * in device properties.
1331 	 */
1332 	ea = prop_dictionary_get(device_properties(&sc->sc_dev), "mac-addr");
1333 	if (ea != NULL) {
1334 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
1335 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
1336 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
1337 	} else {
1338 		if (wm_read_eeprom(sc, EEPROM_OFF_MACADDR,
1339 		    sizeof(myea) / sizeof(myea[0]), myea)) {
1340 			aprint_error_dev(&sc->sc_dev, "unable to read Ethernet address\n");
1341 			return;
1342 		}
1343 		enaddr[0] = myea[0] & 0xff;
1344 		enaddr[1] = myea[0] >> 8;
1345 		enaddr[2] = myea[1] & 0xff;
1346 		enaddr[3] = myea[1] >> 8;
1347 		enaddr[4] = myea[2] & 0xff;
1348 		enaddr[5] = myea[2] >> 8;
1349 	}
1350 
1351 	/*
1352 	 * Toggle the LSB of the MAC address on the second port
1353 	 * of the dual port controller.
1354 	 */
1355 	if (sc->sc_type == WM_T_82546 || sc->sc_type == WM_T_82546_3
1356 	    || sc->sc_type ==  WM_T_82571 || sc->sc_type == WM_T_80003) {
1357 		if ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1)
1358 			enaddr[5] ^= 1;
1359 	}
1360 
1361 	aprint_normal_dev(&sc->sc_dev, "Ethernet address %s\n",
1362 	    ether_sprintf(enaddr));
1363 
1364 	/*
1365 	 * Read the config info from the EEPROM, and set up various
1366 	 * bits in the control registers based on their contents.
1367 	 */
1368 	pn = prop_dictionary_get(device_properties(&sc->sc_dev),
1369 				 "i82543-cfg1");
1370 	if (pn != NULL) {
1371 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1372 		cfg1 = (uint16_t) prop_number_integer_value(pn);
1373 	} else {
1374 		if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) {
1375 			aprint_error_dev(&sc->sc_dev, "unable to read CFG1\n");
1376 			return;
1377 		}
1378 	}
1379 
1380 	pn = prop_dictionary_get(device_properties(&sc->sc_dev),
1381 				 "i82543-cfg2");
1382 	if (pn != NULL) {
1383 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1384 		cfg2 = (uint16_t) prop_number_integer_value(pn);
1385 	} else {
1386 		if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) {
1387 			aprint_error_dev(&sc->sc_dev, "unable to read CFG2\n");
1388 			return;
1389 		}
1390 	}
1391 
1392 	if (sc->sc_type >= WM_T_82544) {
1393 		pn = prop_dictionary_get(device_properties(&sc->sc_dev),
1394 					 "i82543-swdpin");
1395 		if (pn != NULL) {
1396 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1397 			swdpin = (uint16_t) prop_number_integer_value(pn);
1398 		} else {
1399 			if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) {
1400 				aprint_error_dev(&sc->sc_dev, "unable to read SWDPIN\n");
1401 				return;
1402 			}
1403 		}
1404 	}
1405 
1406 	if (cfg1 & EEPROM_CFG1_ILOS)
1407 		sc->sc_ctrl |= CTRL_ILOS;
1408 	if (sc->sc_type >= WM_T_82544) {
1409 		sc->sc_ctrl |=
1410 		    ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
1411 		    CTRL_SWDPIO_SHIFT;
1412 		sc->sc_ctrl |=
1413 		    ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
1414 		    CTRL_SWDPINS_SHIFT;
1415 	} else {
1416 		sc->sc_ctrl |=
1417 		    ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
1418 		    CTRL_SWDPIO_SHIFT;
1419 	}
1420 
1421 #if 0
1422 	if (sc->sc_type >= WM_T_82544) {
1423 		if (cfg1 & EEPROM_CFG1_IPS0)
1424 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
1425 		if (cfg1 & EEPROM_CFG1_IPS1)
1426 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
1427 		sc->sc_ctrl_ext |=
1428 		    ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
1429 		    CTRL_EXT_SWDPIO_SHIFT;
1430 		sc->sc_ctrl_ext |=
1431 		    ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
1432 		    CTRL_EXT_SWDPINS_SHIFT;
1433 	} else {
1434 		sc->sc_ctrl_ext |=
1435 		    ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
1436 		    CTRL_EXT_SWDPIO_SHIFT;
1437 	}
1438 #endif
1439 
1440 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
1441 #if 0
1442 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
1443 #endif
1444 
1445 	/*
1446 	 * Set up some register offsets that are different between
1447 	 * the i82542 and the i82543 and later chips.
1448 	 */
1449 	if (sc->sc_type < WM_T_82543) {
1450 		sc->sc_rdt_reg = WMREG_OLD_RDT0;
1451 		sc->sc_tdt_reg = WMREG_OLD_TDT;
1452 	} else {
1453 		sc->sc_rdt_reg = WMREG_RDT;
1454 		sc->sc_tdt_reg = WMREG_TDT;
1455 	}
1456 
1457 	/*
1458 	 * Determine if we're TBI or GMII mode, and initialize the
1459 	 * media structures accordingly.
1460 	 */
1461 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
1462 	    || sc->sc_type == WM_T_82573) {
1463 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
1464 		wm_gmii_mediainit(sc);
1465 	} else if (sc->sc_type < WM_T_82543 ||
1466 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
1467 		if (wmp->wmp_flags & WMP_F_1000T)
1468 			aprint_error_dev(&sc->sc_dev, "WARNING: TBIMODE set on 1000BASE-T "
1469 			    "product!\n");
1470 		wm_tbi_mediainit(sc);
1471 	} else {
1472 		if (wmp->wmp_flags & WMP_F_1000X)
1473 			aprint_error_dev(&sc->sc_dev, "WARNING: TBIMODE clear on 1000BASE-X "
1474 			    "product!\n");
1475 		wm_gmii_mediainit(sc);
1476 	}
1477 
1478 	ifp = &sc->sc_ethercom.ec_if;
1479 	strlcpy(ifp->if_xname, device_xname(&sc->sc_dev), IFNAMSIZ);
1480 	ifp->if_softc = sc;
1481 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1482 	ifp->if_ioctl = wm_ioctl;
1483 	ifp->if_start = wm_start;
1484 	ifp->if_watchdog = wm_watchdog;
1485 	ifp->if_init = wm_init;
1486 	ifp->if_stop = wm_stop;
1487 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
1488 	IFQ_SET_READY(&ifp->if_snd);
1489 
1490 	if (sc->sc_type != WM_T_82573 && sc->sc_type != WM_T_ICH8)
1491 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1492 
1493 	/*
1494 	 * If we're a i82543 or greater, we can support VLANs.
1495 	 */
1496 	if (sc->sc_type >= WM_T_82543)
1497 		sc->sc_ethercom.ec_capabilities |=
1498 		    ETHERCAP_VLAN_MTU /* XXXJRT | ETHERCAP_VLAN_HWTAGGING */;
1499 
1500 	/*
1501 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
1502 	 * on i82543 and later.
1503 	 */
1504 	if (sc->sc_type >= WM_T_82543) {
1505 		ifp->if_capabilities |=
1506 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
1507 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
1508 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
1509 		    IFCAP_CSUM_TCPv6_Tx |
1510 		    IFCAP_CSUM_UDPv6_Tx;
1511 	}
1512 
1513 	/*
1514 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
1515 	 *
1516 	 *	82541GI (8086:1076) ... no
1517 	 *	82572EI (8086:10b9) ... yes
1518 	 */
1519 	if (sc->sc_type >= WM_T_82571) {
1520 		ifp->if_capabilities |=
1521 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
1522 	}
1523 
1524 	/*
1525 	 * If we're a i82544 or greater (except i82547), we can do
1526 	 * TCP segmentation offload.
1527 	 */
1528 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
1529 		ifp->if_capabilities |= IFCAP_TSOv4;
1530 	}
1531 
1532 	if (sc->sc_type >= WM_T_82571) {
1533 		ifp->if_capabilities |= IFCAP_TSOv6;
1534 	}
1535 
1536 	/*
1537 	 * Attach the interface.
1538 	 */
1539 	if_attach(ifp);
1540 	ether_ifattach(ifp, enaddr);
1541 #if NRND > 0
1542 	rnd_attach_source(&sc->rnd_source, device_xname(&sc->sc_dev),
1543 	    RND_TYPE_NET, 0);
1544 #endif
1545 
1546 #ifdef WM_EVENT_COUNTERS
1547 	/* Attach event counters. */
1548 	evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
1549 	    NULL, device_xname(&sc->sc_dev), "txsstall");
1550 	evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
1551 	    NULL, device_xname(&sc->sc_dev), "txdstall");
1552 	evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
1553 	    NULL, device_xname(&sc->sc_dev), "txfifo_stall");
1554 	evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
1555 	    NULL, device_xname(&sc->sc_dev), "txdw");
1556 	evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
1557 	    NULL, device_xname(&sc->sc_dev), "txqe");
1558 	evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
1559 	    NULL, device_xname(&sc->sc_dev), "rxintr");
1560 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
1561 	    NULL, device_xname(&sc->sc_dev), "linkintr");
1562 
1563 	evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
1564 	    NULL, device_xname(&sc->sc_dev), "rxipsum");
1565 	evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
1566 	    NULL, device_xname(&sc->sc_dev), "rxtusum");
1567 	evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
1568 	    NULL, device_xname(&sc->sc_dev), "txipsum");
1569 	evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
1570 	    NULL, device_xname(&sc->sc_dev), "txtusum");
1571 	evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
1572 	    NULL, device_xname(&sc->sc_dev), "txtusum6");
1573 
1574 	evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
1575 	    NULL, device_xname(&sc->sc_dev), "txtso");
1576 	evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
1577 	    NULL, device_xname(&sc->sc_dev), "txtso6");
1578 	evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
1579 	    NULL, device_xname(&sc->sc_dev), "txtsopain");
1580 
1581 	for (i = 0; i < WM_NTXSEGS; i++) {
1582 		sprintf(wm_txseg_evcnt_names[i], "txseg%d", i);
1583 		evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
1584 		    NULL, device_xname(&sc->sc_dev), wm_txseg_evcnt_names[i]);
1585 	}
1586 
1587 	evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
1588 	    NULL, device_xname(&sc->sc_dev), "txdrop");
1589 
1590 	evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
1591 	    NULL, device_xname(&sc->sc_dev), "tu");
1592 
1593 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
1594 	    NULL, device_xname(&sc->sc_dev), "tx_xoff");
1595 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
1596 	    NULL, device_xname(&sc->sc_dev), "tx_xon");
1597 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
1598 	    NULL, device_xname(&sc->sc_dev), "rx_xoff");
1599 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
1600 	    NULL, device_xname(&sc->sc_dev), "rx_xon");
1601 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
1602 	    NULL, device_xname(&sc->sc_dev), "rx_macctl");
1603 #endif /* WM_EVENT_COUNTERS */
1604 
1605 	if (!pmf_device_register(self, NULL, NULL))
1606 		aprint_error_dev(self, "couldn't establish power handler\n");
1607 	else
1608 		pmf_class_network_register(self, ifp);
1609 
1610 	return;
1611 
1612 	/*
1613 	 * Free any resources we've allocated during the failed attach
1614 	 * attempt.  Do this in reverse order and fall through.
1615 	 */
1616  fail_5:
1617 	for (i = 0; i < WM_NRXDESC; i++) {
1618 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
1619 			bus_dmamap_destroy(sc->sc_dmat,
1620 			    sc->sc_rxsoft[i].rxs_dmamap);
1621 	}
1622  fail_4:
1623 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1624 		if (sc->sc_txsoft[i].txs_dmamap != NULL)
1625 			bus_dmamap_destroy(sc->sc_dmat,
1626 			    sc->sc_txsoft[i].txs_dmamap);
1627 	}
1628 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
1629  fail_3:
1630 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
1631  fail_2:
1632 	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
1633 	    cdata_size);
1634  fail_1:
1635 	bus_dmamem_free(sc->sc_dmat, &seg, rseg);
1636  fail_0:
1637 	return;
1638 }
1639 
1640 /*
1641  * wm_tx_offload:
1642  *
1643  *	Set up TCP/IP checksumming parameters for the
1644  *	specified packet.
1645  */
1646 static int
1647 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
1648     uint8_t *fieldsp)
1649 {
1650 	struct mbuf *m0 = txs->txs_mbuf;
1651 	struct livengood_tcpip_ctxdesc *t;
1652 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
1653 	uint32_t ipcse;
1654 	struct ether_header *eh;
1655 	int offset, iphl;
1656 	uint8_t fields;
1657 
1658 	/*
1659 	 * XXX It would be nice if the mbuf pkthdr had offset
1660 	 * fields for the protocol headers.
1661 	 */
1662 
1663 	eh = mtod(m0, struct ether_header *);
1664 	switch (htons(eh->ether_type)) {
1665 	case ETHERTYPE_IP:
1666 	case ETHERTYPE_IPV6:
1667 		offset = ETHER_HDR_LEN;
1668 		break;
1669 
1670 	case ETHERTYPE_VLAN:
1671 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1672 		break;
1673 
1674 	default:
1675 		/*
1676 		 * Don't support this protocol or encapsulation.
1677 		 */
1678 		*fieldsp = 0;
1679 		*cmdp = 0;
1680 		return (0);
1681 	}
1682 
1683 	if ((m0->m_pkthdr.csum_flags &
1684 	    (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
1685 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
1686 	} else {
1687 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
1688 	}
1689 	ipcse = offset + iphl - 1;
1690 
1691 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
1692 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
1693 	seg = 0;
1694 	fields = 0;
1695 
1696 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
1697 		int hlen = offset + iphl;
1698 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
1699 
1700 		if (__predict_false(m0->m_len <
1701 				    (hlen + sizeof(struct tcphdr)))) {
1702 			/*
1703 			 * TCP/IP headers are not in the first mbuf; we need
1704 			 * to do this the slow and painful way.  Let's just
1705 			 * hope this doesn't happen very often.
1706 			 */
1707 			struct tcphdr th;
1708 
1709 			WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
1710 
1711 			m_copydata(m0, hlen, sizeof(th), &th);
1712 			if (v4) {
1713 				struct ip ip;
1714 
1715 				m_copydata(m0, offset, sizeof(ip), &ip);
1716 				ip.ip_len = 0;
1717 				m_copyback(m0,
1718 				    offset + offsetof(struct ip, ip_len),
1719 				    sizeof(ip.ip_len), &ip.ip_len);
1720 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
1721 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
1722 			} else {
1723 				struct ip6_hdr ip6;
1724 
1725 				m_copydata(m0, offset, sizeof(ip6), &ip6);
1726 				ip6.ip6_plen = 0;
1727 				m_copyback(m0,
1728 				    offset + offsetof(struct ip6_hdr, ip6_plen),
1729 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
1730 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
1731 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
1732 			}
1733 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
1734 			    sizeof(th.th_sum), &th.th_sum);
1735 
1736 			hlen += th.th_off << 2;
1737 		} else {
1738 			/*
1739 			 * TCP/IP headers are in the first mbuf; we can do
1740 			 * this the easy way.
1741 			 */
1742 			struct tcphdr *th;
1743 
1744 			if (v4) {
1745 				struct ip *ip =
1746 				    (void *)(mtod(m0, char *) + offset);
1747 				th = (void *)(mtod(m0, char *) + hlen);
1748 
1749 				ip->ip_len = 0;
1750 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
1751 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
1752 			} else {
1753 				struct ip6_hdr *ip6 =
1754 				    (void *)(mtod(m0, char *) + offset);
1755 				th = (void *)(mtod(m0, char *) + hlen);
1756 
1757 				ip6->ip6_plen = 0;
1758 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
1759 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
1760 			}
1761 			hlen += th->th_off << 2;
1762 		}
1763 
1764 		if (v4) {
1765 			WM_EVCNT_INCR(&sc->sc_ev_txtso);
1766 			cmdlen |= WTX_TCPIP_CMD_IP;
1767 		} else {
1768 			WM_EVCNT_INCR(&sc->sc_ev_txtso6);
1769 			ipcse = 0;
1770 		}
1771 		cmd |= WTX_TCPIP_CMD_TSE;
1772 		cmdlen |= WTX_TCPIP_CMD_TSE |
1773 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
1774 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
1775 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
1776 	}
1777 
1778 	/*
1779 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
1780 	 * offload feature, if we load the context descriptor, we
1781 	 * MUST provide valid values for IPCSS and TUCSS fields.
1782 	 */
1783 
1784 	ipcs = WTX_TCPIP_IPCSS(offset) |
1785 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1786 	    WTX_TCPIP_IPCSE(ipcse);
1787 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
1788 		WM_EVCNT_INCR(&sc->sc_ev_txipsum);
1789 		fields |= WTX_IXSM;
1790 	}
1791 
1792 	offset += iphl;
1793 
1794 	if (m0->m_pkthdr.csum_flags &
1795 	    (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
1796 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
1797 		fields |= WTX_TXSM;
1798 		tucs = WTX_TCPIP_TUCSS(offset) |
1799 		    WTX_TCPIP_TUCSO(offset +
1800 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
1801 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
1802 	} else if ((m0->m_pkthdr.csum_flags &
1803 	    (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
1804 		WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
1805 		fields |= WTX_TXSM;
1806 		tucs = WTX_TCPIP_TUCSS(offset) |
1807 		    WTX_TCPIP_TUCSO(offset +
1808 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
1809 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
1810 	} else {
1811 		/* Just initialize it to a valid TCP context. */
1812 		tucs = WTX_TCPIP_TUCSS(offset) |
1813 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
1814 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
1815 	}
1816 
1817 	/* Fill in the context descriptor. */
1818 	t = (struct livengood_tcpip_ctxdesc *)
1819 	    &sc->sc_txdescs[sc->sc_txnext];
1820 	t->tcpip_ipcs = htole32(ipcs);
1821 	t->tcpip_tucs = htole32(tucs);
1822 	t->tcpip_cmdlen = htole32(cmdlen);
1823 	t->tcpip_seg = htole32(seg);
1824 	WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
1825 
1826 	sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
1827 	txs->txs_ndesc++;
1828 
1829 	*cmdp = cmd;
1830 	*fieldsp = fields;
1831 
1832 	return (0);
1833 }
1834 
1835 static void
1836 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
1837 {
1838 	struct mbuf *m;
1839 	int i;
1840 
1841 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(&sc->sc_dev));
1842 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
1843 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
1844 		    "m_flags = 0x%08x\n", device_xname(&sc->sc_dev),
1845 		    m->m_data, m->m_len, m->m_flags);
1846 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(&sc->sc_dev),
1847 	    i, i == 1 ? "" : "s");
1848 }
1849 
1850 /*
1851  * wm_82547_txfifo_stall:
1852  *
1853  *	Callout used to wait for the 82547 Tx FIFO to drain,
1854  *	reset the FIFO pointers, and restart packet transmission.
1855  */
1856 static void
1857 wm_82547_txfifo_stall(void *arg)
1858 {
1859 	struct wm_softc *sc = arg;
1860 	int s;
1861 
1862 	s = splnet();
1863 
1864 	if (sc->sc_txfifo_stall) {
1865 		if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
1866 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
1867 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
1868 			/*
1869 			 * Packets have drained.  Stop transmitter, reset
1870 			 * FIFO pointers, restart transmitter, and kick
1871 			 * the packet queue.
1872 			 */
1873 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
1874 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
1875 			CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
1876 			CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
1877 			CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
1878 			CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
1879 			CSR_WRITE(sc, WMREG_TCTL, tctl);
1880 			CSR_WRITE_FLUSH(sc);
1881 
1882 			sc->sc_txfifo_head = 0;
1883 			sc->sc_txfifo_stall = 0;
1884 			wm_start(&sc->sc_ethercom.ec_if);
1885 		} else {
1886 			/*
1887 			 * Still waiting for packets to drain; try again in
1888 			 * another tick.
1889 			 */
1890 			callout_schedule(&sc->sc_txfifo_ch, 1);
1891 		}
1892 	}
1893 
1894 	splx(s);
1895 }
1896 
1897 /*
1898  * wm_82547_txfifo_bugchk:
1899  *
1900  *	Check for bug condition in the 82547 Tx FIFO.  We need to
1901  *	prevent enqueueing a packet that would wrap around the end
1902  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
1903  *
1904  *	We do this by checking the amount of space before the end
1905  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
1906  *	the Tx FIFO, wait for all remaining packets to drain, reset
1907  *	the internal FIFO pointers to the beginning, and restart
1908  *	transmission on the interface.
1909  */
1910 #define	WM_FIFO_HDR		0x10
1911 #define	WM_82547_PAD_LEN	0x3e0
1912 static int
1913 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
1914 {
1915 	int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
1916 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
1917 
1918 	/* Just return if already stalled. */
1919 	if (sc->sc_txfifo_stall)
1920 		return (1);
1921 
1922 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
1923 		/* Stall only occurs in half-duplex mode. */
1924 		goto send_packet;
1925 	}
1926 
1927 	if (len >= WM_82547_PAD_LEN + space) {
1928 		sc->sc_txfifo_stall = 1;
1929 		callout_schedule(&sc->sc_txfifo_ch, 1);
1930 		return (1);
1931 	}
1932 
1933  send_packet:
1934 	sc->sc_txfifo_head += len;
1935 	if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
1936 		sc->sc_txfifo_head -= sc->sc_txfifo_size;
1937 
1938 	return (0);
1939 }
1940 
1941 /*
1942  * wm_start:		[ifnet interface function]
1943  *
1944  *	Start packet transmission on the interface.
1945  */
1946 static void
1947 wm_start(struct ifnet *ifp)
1948 {
1949 	struct wm_softc *sc = ifp->if_softc;
1950 	struct mbuf *m0;
1951 #if 0 /* XXXJRT */
1952 	struct m_tag *mtag;
1953 #endif
1954 	struct wm_txsoft *txs;
1955 	bus_dmamap_t dmamap;
1956 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
1957 	bus_addr_t curaddr;
1958 	bus_size_t seglen, curlen;
1959 	uint32_t cksumcmd;
1960 	uint8_t cksumfields;
1961 
1962 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
1963 		return;
1964 
1965 	/*
1966 	 * Remember the previous number of free descriptors.
1967 	 */
1968 	ofree = sc->sc_txfree;
1969 
1970 	/*
1971 	 * Loop through the send queue, setting up transmit descriptors
1972 	 * until we drain the queue, or use up all available transmit
1973 	 * descriptors.
1974 	 */
1975 	for (;;) {
1976 		/* Grab a packet off the queue. */
1977 		IFQ_POLL(&ifp->if_snd, m0);
1978 		if (m0 == NULL)
1979 			break;
1980 
1981 		DPRINTF(WM_DEBUG_TX,
1982 		    ("%s: TX: have packet to transmit: %p\n",
1983 		    device_xname(&sc->sc_dev), m0));
1984 
1985 		/* Get a work queue entry. */
1986 		if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
1987 			wm_txintr(sc);
1988 			if (sc->sc_txsfree == 0) {
1989 				DPRINTF(WM_DEBUG_TX,
1990 				    ("%s: TX: no free job descriptors\n",
1991 					device_xname(&sc->sc_dev)));
1992 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
1993 				break;
1994 			}
1995 		}
1996 
1997 		txs = &sc->sc_txsoft[sc->sc_txsnext];
1998 		dmamap = txs->txs_dmamap;
1999 
2000 		use_tso = (m0->m_pkthdr.csum_flags &
2001 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
2002 
2003 		/*
2004 		 * So says the Linux driver:
2005 		 * The controller does a simple calculation to make sure
2006 		 * there is enough room in the FIFO before initiating the
2007 		 * DMA for each buffer.  The calc is:
2008 		 *	4 = ceil(buffer len / MSS)
2009 		 * To make sure we don't overrun the FIFO, adjust the max
2010 		 * buffer len if the MSS drops.
2011 		 */
2012 		dmamap->dm_maxsegsz =
2013 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
2014 		    ? m0->m_pkthdr.segsz << 2
2015 		    : WTX_MAX_LEN;
2016 
2017 		/*
2018 		 * Load the DMA map.  If this fails, the packet either
2019 		 * didn't fit in the allotted number of segments, or we
2020 		 * were short on resources.  For the too-many-segments
2021 		 * case, we simply report an error and drop the packet,
2022 		 * since we can't sanely copy a jumbo packet to a single
2023 		 * buffer.
2024 		 */
2025 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
2026 		    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
2027 		if (error) {
2028 			if (error == EFBIG) {
2029 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
2030 				log(LOG_ERR, "%s: Tx packet consumes too many "
2031 				    "DMA segments, dropping...\n",
2032 				    device_xname(&sc->sc_dev));
2033 				IFQ_DEQUEUE(&ifp->if_snd, m0);
2034 				wm_dump_mbuf_chain(sc, m0);
2035 				m_freem(m0);
2036 				continue;
2037 			}
2038 			/*
2039 			 * Short on resources, just stop for now.
2040 			 */
2041 			DPRINTF(WM_DEBUG_TX,
2042 			    ("%s: TX: dmamap load failed: %d\n",
2043 			    device_xname(&sc->sc_dev), error));
2044 			break;
2045 		}
2046 
2047 		segs_needed = dmamap->dm_nsegs;
2048 		if (use_tso) {
2049 			/* For sentinel descriptor; see below. */
2050 			segs_needed++;
2051 		}
2052 
2053 		/*
2054 		 * Ensure we have enough descriptors free to describe
2055 		 * the packet.  Note, we always reserve one descriptor
2056 		 * at the end of the ring due to the semantics of the
2057 		 * TDT register, plus one more in the event we need
2058 		 * to load offload context.
2059 		 */
2060 		if (segs_needed > sc->sc_txfree - 2) {
2061 			/*
2062 			 * Not enough free descriptors to transmit this
2063 			 * packet.  We haven't committed anything yet,
2064 			 * so just unload the DMA map, put the packet
2065 			 * pack on the queue, and punt.  Notify the upper
2066 			 * layer that there are no more slots left.
2067 			 */
2068 			DPRINTF(WM_DEBUG_TX,
2069 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
2070 			    device_xname(&sc->sc_dev), dmamap->dm_nsegs, segs_needed,
2071 			    sc->sc_txfree - 1));
2072 			ifp->if_flags |= IFF_OACTIVE;
2073 			bus_dmamap_unload(sc->sc_dmat, dmamap);
2074 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
2075 			break;
2076 		}
2077 
2078 		/*
2079 		 * Check for 82547 Tx FIFO bug.  We need to do this
2080 		 * once we know we can transmit the packet, since we
2081 		 * do some internal FIFO space accounting here.
2082 		 */
2083 		if (sc->sc_type == WM_T_82547 &&
2084 		    wm_82547_txfifo_bugchk(sc, m0)) {
2085 			DPRINTF(WM_DEBUG_TX,
2086 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
2087 			    device_xname(&sc->sc_dev)));
2088 			ifp->if_flags |= IFF_OACTIVE;
2089 			bus_dmamap_unload(sc->sc_dmat, dmamap);
2090 			WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
2091 			break;
2092 		}
2093 
2094 		IFQ_DEQUEUE(&ifp->if_snd, m0);
2095 
2096 		/*
2097 		 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
2098 		 */
2099 
2100 		DPRINTF(WM_DEBUG_TX,
2101 		    ("%s: TX: packet has %d (%d) DMA segments\n",
2102 		    device_xname(&sc->sc_dev), dmamap->dm_nsegs, segs_needed));
2103 
2104 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
2105 
2106 		/*
2107 		 * Store a pointer to the packet so that we can free it
2108 		 * later.
2109 		 *
2110 		 * Initially, we consider the number of descriptors the
2111 		 * packet uses the number of DMA segments.  This may be
2112 		 * incremented by 1 if we do checksum offload (a descriptor
2113 		 * is used to set the checksum context).
2114 		 */
2115 		txs->txs_mbuf = m0;
2116 		txs->txs_firstdesc = sc->sc_txnext;
2117 		txs->txs_ndesc = segs_needed;
2118 
2119 		/* Set up offload parameters for this packet. */
2120 		if (m0->m_pkthdr.csum_flags &
2121 		    (M_CSUM_TSOv4|M_CSUM_TSOv6|
2122 		    M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
2123 		    M_CSUM_TCPv6|M_CSUM_UDPv6)) {
2124 			if (wm_tx_offload(sc, txs, &cksumcmd,
2125 					  &cksumfields) != 0) {
2126 				/* Error message already displayed. */
2127 				bus_dmamap_unload(sc->sc_dmat, dmamap);
2128 				continue;
2129 			}
2130 		} else {
2131 			cksumcmd = 0;
2132 			cksumfields = 0;
2133 		}
2134 
2135 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
2136 
2137 		/* Sync the DMA map. */
2138 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
2139 		    BUS_DMASYNC_PREWRITE);
2140 
2141 		/*
2142 		 * Initialize the transmit descriptor.
2143 		 */
2144 		for (nexttx = sc->sc_txnext, seg = 0;
2145 		     seg < dmamap->dm_nsegs; seg++) {
2146 			for (seglen = dmamap->dm_segs[seg].ds_len,
2147 			     curaddr = dmamap->dm_segs[seg].ds_addr;
2148 			     seglen != 0;
2149 			     curaddr += curlen, seglen -= curlen,
2150 			     nexttx = WM_NEXTTX(sc, nexttx)) {
2151 				curlen = seglen;
2152 
2153 				/*
2154 				 * So says the Linux driver:
2155 				 * Work around for premature descriptor
2156 				 * write-backs in TSO mode.  Append a
2157 				 * 4-byte sentinel descriptor.
2158 				 */
2159 				if (use_tso &&
2160 				    seg == dmamap->dm_nsegs - 1 &&
2161 				    curlen > 8)
2162 					curlen -= 4;
2163 
2164 				wm_set_dma_addr(
2165 				    &sc->sc_txdescs[nexttx].wtx_addr,
2166 				    curaddr);
2167 				sc->sc_txdescs[nexttx].wtx_cmdlen =
2168 				    htole32(cksumcmd | curlen);
2169 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
2170 				    0;
2171 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
2172 				    cksumfields;
2173 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
2174 				lasttx = nexttx;
2175 
2176 				DPRINTF(WM_DEBUG_TX,
2177 				    ("%s: TX: desc %d: low 0x%08lx, "
2178 				     "len 0x%04x\n",
2179 				    device_xname(&sc->sc_dev), nexttx,
2180 				    curaddr & 0xffffffffUL, (unsigned)curlen));
2181 			}
2182 		}
2183 
2184 		KASSERT(lasttx != -1);
2185 
2186 		/*
2187 		 * Set up the command byte on the last descriptor of
2188 		 * the packet.  If we're in the interrupt delay window,
2189 		 * delay the interrupt.
2190 		 */
2191 		sc->sc_txdescs[lasttx].wtx_cmdlen |=
2192 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
2193 
2194 #if 0 /* XXXJRT */
2195 		/*
2196 		 * If VLANs are enabled and the packet has a VLAN tag, set
2197 		 * up the descriptor to encapsulate the packet for us.
2198 		 *
2199 		 * This is only valid on the last descriptor of the packet.
2200 		 */
2201 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
2202 			sc->sc_txdescs[lasttx].wtx_cmdlen |=
2203 			    htole32(WTX_CMD_VLE);
2204 			sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
2205 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
2206 		}
2207 #endif /* XXXJRT */
2208 
2209 		txs->txs_lastdesc = lasttx;
2210 
2211 		DPRINTF(WM_DEBUG_TX,
2212 		    ("%s: TX: desc %d: cmdlen 0x%08x\n", device_xname(&sc->sc_dev),
2213 		    lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
2214 
2215 		/* Sync the descriptors we're using. */
2216 		WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
2217 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2218 
2219 		/* Give the packet to the chip. */
2220 		CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
2221 
2222 		DPRINTF(WM_DEBUG_TX,
2223 		    ("%s: TX: TDT -> %d\n", device_xname(&sc->sc_dev), nexttx));
2224 
2225 		DPRINTF(WM_DEBUG_TX,
2226 		    ("%s: TX: finished transmitting packet, job %d\n",
2227 		    device_xname(&sc->sc_dev), sc->sc_txsnext));
2228 
2229 		/* Advance the tx pointer. */
2230 		sc->sc_txfree -= txs->txs_ndesc;
2231 		sc->sc_txnext = nexttx;
2232 
2233 		sc->sc_txsfree--;
2234 		sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
2235 
2236 #if NBPFILTER > 0
2237 		/* Pass the packet to any BPF listeners. */
2238 		if (ifp->if_bpf)
2239 			bpf_mtap(ifp->if_bpf, m0);
2240 #endif /* NBPFILTER > 0 */
2241 	}
2242 
2243 	if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
2244 		/* No more slots; notify upper layer. */
2245 		ifp->if_flags |= IFF_OACTIVE;
2246 	}
2247 
2248 	if (sc->sc_txfree != ofree) {
2249 		/* Set a watchdog timer in case the chip flakes out. */
2250 		ifp->if_timer = 5;
2251 	}
2252 }
2253 
2254 /*
2255  * wm_watchdog:		[ifnet interface function]
2256  *
2257  *	Watchdog timer handler.
2258  */
2259 static void
2260 wm_watchdog(struct ifnet *ifp)
2261 {
2262 	struct wm_softc *sc = ifp->if_softc;
2263 
2264 	/*
2265 	 * Since we're using delayed interrupts, sweep up
2266 	 * before we report an error.
2267 	 */
2268 	wm_txintr(sc);
2269 
2270 	if (sc->sc_txfree != WM_NTXDESC(sc)) {
2271 		log(LOG_ERR,
2272 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2273 		    device_xname(&sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
2274 		    sc->sc_txnext);
2275 		ifp->if_oerrors++;
2276 
2277 		/* Reset the interface. */
2278 		(void) wm_init(ifp);
2279 	}
2280 
2281 	/* Try to get more packets going. */
2282 	wm_start(ifp);
2283 }
2284 
2285 /*
2286  * wm_ioctl:		[ifnet interface function]
2287  *
2288  *	Handle control requests from the operator.
2289  */
2290 static int
2291 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2292 {
2293 	struct wm_softc *sc = ifp->if_softc;
2294 	struct ifreq *ifr = (struct ifreq *) data;
2295 	int s, error;
2296 
2297 	s = splnet();
2298 
2299 	switch (cmd) {
2300 	case SIOCSIFMEDIA:
2301 	case SIOCGIFMEDIA:
2302 		/* Flow control requires full-duplex mode. */
2303 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
2304 		    (ifr->ifr_media & IFM_FDX) == 0)
2305 			ifr->ifr_media &= ~IFM_ETH_FMASK;
2306 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
2307 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
2308 				/* We can do both TXPAUSE and RXPAUSE. */
2309 				ifr->ifr_media |=
2310 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
2311 			}
2312 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
2313 		}
2314 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
2315 		break;
2316 	default:
2317 		if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
2318 			break;
2319 
2320 		error = 0;
2321 
2322 		if (cmd == SIOCSIFCAP)
2323 			error = (*ifp->if_init)(ifp);
2324 		else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
2325 			;
2326 		else if (ifp->if_flags & IFF_RUNNING) {
2327 			/*
2328 			 * Multicast list has changed; set the hardware filter
2329 			 * accordingly.
2330 			 */
2331 			wm_set_filter(sc);
2332 		}
2333 		break;
2334 	}
2335 
2336 	/* Try to get more packets going. */
2337 	wm_start(ifp);
2338 
2339 	splx(s);
2340 	return (error);
2341 }
2342 
2343 /*
2344  * wm_intr:
2345  *
2346  *	Interrupt service routine.
2347  */
2348 static int
2349 wm_intr(void *arg)
2350 {
2351 	struct wm_softc *sc = arg;
2352 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2353 	uint32_t icr;
2354 	int handled = 0;
2355 
2356 	while (1 /* CONSTCOND */) {
2357 		icr = CSR_READ(sc, WMREG_ICR);
2358 		if ((icr & sc->sc_icr) == 0)
2359 			break;
2360 #if 0 /*NRND > 0*/
2361 		if (RND_ENABLED(&sc->rnd_source))
2362 			rnd_add_uint32(&sc->rnd_source, icr);
2363 #endif
2364 
2365 		handled = 1;
2366 
2367 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
2368 		if (icr & (ICR_RXDMT0|ICR_RXT0)) {
2369 			DPRINTF(WM_DEBUG_RX,
2370 			    ("%s: RX: got Rx intr 0x%08x\n",
2371 			    device_xname(&sc->sc_dev),
2372 			    icr & (ICR_RXDMT0|ICR_RXT0)));
2373 			WM_EVCNT_INCR(&sc->sc_ev_rxintr);
2374 		}
2375 #endif
2376 		wm_rxintr(sc);
2377 
2378 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
2379 		if (icr & ICR_TXDW) {
2380 			DPRINTF(WM_DEBUG_TX,
2381 			    ("%s: TX: got TXDW interrupt\n",
2382 			    device_xname(&sc->sc_dev)));
2383 			WM_EVCNT_INCR(&sc->sc_ev_txdw);
2384 		}
2385 #endif
2386 		wm_txintr(sc);
2387 
2388 		if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
2389 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
2390 			wm_linkintr(sc, icr);
2391 		}
2392 
2393 		if (icr & ICR_RXO) {
2394 			ifp->if_ierrors++;
2395 #if defined(WM_DEBUG)
2396 			log(LOG_WARNING, "%s: Receive overrun\n",
2397 			    device_xname(&sc->sc_dev));
2398 #endif /* defined(WM_DEBUG) */
2399 		}
2400 	}
2401 
2402 	if (handled) {
2403 		/* Try to get more packets going. */
2404 		wm_start(ifp);
2405 	}
2406 
2407 	return (handled);
2408 }
2409 
2410 /*
2411  * wm_txintr:
2412  *
2413  *	Helper; handle transmit interrupts.
2414  */
2415 static void
2416 wm_txintr(struct wm_softc *sc)
2417 {
2418 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2419 	struct wm_txsoft *txs;
2420 	uint8_t status;
2421 	int i;
2422 
2423 	ifp->if_flags &= ~IFF_OACTIVE;
2424 
2425 	/*
2426 	 * Go through the Tx list and free mbufs for those
2427 	 * frames which have been transmitted.
2428 	 */
2429 	for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
2430 	     i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
2431 		txs = &sc->sc_txsoft[i];
2432 
2433 		DPRINTF(WM_DEBUG_TX,
2434 		    ("%s: TX: checking job %d\n", device_xname(&sc->sc_dev), i));
2435 
2436 		WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
2437 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2438 
2439 		status =
2440 		    sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
2441 		if ((status & WTX_ST_DD) == 0) {
2442 			WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
2443 			    BUS_DMASYNC_PREREAD);
2444 			break;
2445 		}
2446 
2447 		DPRINTF(WM_DEBUG_TX,
2448 		    ("%s: TX: job %d done: descs %d..%d\n",
2449 		    device_xname(&sc->sc_dev), i, txs->txs_firstdesc,
2450 		    txs->txs_lastdesc));
2451 
2452 		/*
2453 		 * XXX We should probably be using the statistics
2454 		 * XXX registers, but I don't know if they exist
2455 		 * XXX on chips before the i82544.
2456 		 */
2457 
2458 #ifdef WM_EVENT_COUNTERS
2459 		if (status & WTX_ST_TU)
2460 			WM_EVCNT_INCR(&sc->sc_ev_tu);
2461 #endif /* WM_EVENT_COUNTERS */
2462 
2463 		if (status & (WTX_ST_EC|WTX_ST_LC)) {
2464 			ifp->if_oerrors++;
2465 			if (status & WTX_ST_LC)
2466 				log(LOG_WARNING, "%s: late collision\n",
2467 				    device_xname(&sc->sc_dev));
2468 			else if (status & WTX_ST_EC) {
2469 				ifp->if_collisions += 16;
2470 				log(LOG_WARNING, "%s: excessive collisions\n",
2471 				    device_xname(&sc->sc_dev));
2472 			}
2473 		} else
2474 			ifp->if_opackets++;
2475 
2476 		sc->sc_txfree += txs->txs_ndesc;
2477 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
2478 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2479 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2480 		m_freem(txs->txs_mbuf);
2481 		txs->txs_mbuf = NULL;
2482 	}
2483 
2484 	/* Update the dirty transmit buffer pointer. */
2485 	sc->sc_txsdirty = i;
2486 	DPRINTF(WM_DEBUG_TX,
2487 	    ("%s: TX: txsdirty -> %d\n", device_xname(&sc->sc_dev), i));
2488 
2489 	/*
2490 	 * If there are no more pending transmissions, cancel the watchdog
2491 	 * timer.
2492 	 */
2493 	if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
2494 		ifp->if_timer = 0;
2495 }
2496 
2497 /*
2498  * wm_rxintr:
2499  *
2500  *	Helper; handle receive interrupts.
2501  */
2502 static void
2503 wm_rxintr(struct wm_softc *sc)
2504 {
2505 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2506 	struct wm_rxsoft *rxs;
2507 	struct mbuf *m;
2508 	int i, len;
2509 	uint8_t status, errors;
2510 
2511 	for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
2512 		rxs = &sc->sc_rxsoft[i];
2513 
2514 		DPRINTF(WM_DEBUG_RX,
2515 		    ("%s: RX: checking descriptor %d\n",
2516 		    device_xname(&sc->sc_dev), i));
2517 
2518 		WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2519 
2520 		status = sc->sc_rxdescs[i].wrx_status;
2521 		errors = sc->sc_rxdescs[i].wrx_errors;
2522 		len = le16toh(sc->sc_rxdescs[i].wrx_len);
2523 
2524 		if ((status & WRX_ST_DD) == 0) {
2525 			/*
2526 			 * We have processed all of the receive descriptors.
2527 			 */
2528 			WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
2529 			break;
2530 		}
2531 
2532 		if (__predict_false(sc->sc_rxdiscard)) {
2533 			DPRINTF(WM_DEBUG_RX,
2534 			    ("%s: RX: discarding contents of descriptor %d\n",
2535 			    device_xname(&sc->sc_dev), i));
2536 			WM_INIT_RXDESC(sc, i);
2537 			if (status & WRX_ST_EOP) {
2538 				/* Reset our state. */
2539 				DPRINTF(WM_DEBUG_RX,
2540 				    ("%s: RX: resetting rxdiscard -> 0\n",
2541 				    device_xname(&sc->sc_dev)));
2542 				sc->sc_rxdiscard = 0;
2543 			}
2544 			continue;
2545 		}
2546 
2547 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2548 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
2549 
2550 		m = rxs->rxs_mbuf;
2551 
2552 		/*
2553 		 * Add a new receive buffer to the ring, unless of
2554 		 * course the length is zero. Treat the latter as a
2555 		 * failed mapping.
2556 		 */
2557 		if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
2558 			/*
2559 			 * Failed, throw away what we've done so
2560 			 * far, and discard the rest of the packet.
2561 			 */
2562 			ifp->if_ierrors++;
2563 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2564 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2565 			WM_INIT_RXDESC(sc, i);
2566 			if ((status & WRX_ST_EOP) == 0)
2567 				sc->sc_rxdiscard = 1;
2568 			if (sc->sc_rxhead != NULL)
2569 				m_freem(sc->sc_rxhead);
2570 			WM_RXCHAIN_RESET(sc);
2571 			DPRINTF(WM_DEBUG_RX,
2572 			    ("%s: RX: Rx buffer allocation failed, "
2573 			    "dropping packet%s\n", device_xname(&sc->sc_dev),
2574 			    sc->sc_rxdiscard ? " (discard)" : ""));
2575 			continue;
2576 		}
2577 
2578 		WM_RXCHAIN_LINK(sc, m);
2579 
2580 		m->m_len = len;
2581 
2582 		DPRINTF(WM_DEBUG_RX,
2583 		    ("%s: RX: buffer at %p len %d\n",
2584 		    device_xname(&sc->sc_dev), m->m_data, len));
2585 
2586 		/*
2587 		 * If this is not the end of the packet, keep
2588 		 * looking.
2589 		 */
2590 		if ((status & WRX_ST_EOP) == 0) {
2591 			sc->sc_rxlen += len;
2592 			DPRINTF(WM_DEBUG_RX,
2593 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
2594 			    device_xname(&sc->sc_dev), sc->sc_rxlen));
2595 			continue;
2596 		}
2597 
2598 		/*
2599 		 * Okay, we have the entire packet now.  The chip is
2600 		 * configured to include the FCS (not all chips can
2601 		 * be configured to strip it), so we need to trim it.
2602 		 */
2603 		m->m_len -= ETHER_CRC_LEN;
2604 
2605 		*sc->sc_rxtailp = NULL;
2606 		len = m->m_len + sc->sc_rxlen;
2607 		m = sc->sc_rxhead;
2608 
2609 		WM_RXCHAIN_RESET(sc);
2610 
2611 		DPRINTF(WM_DEBUG_RX,
2612 		    ("%s: RX: have entire packet, len -> %d\n",
2613 		    device_xname(&sc->sc_dev), len));
2614 
2615 		/*
2616 		 * If an error occurred, update stats and drop the packet.
2617 		 */
2618 		if (errors &
2619 		     (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
2620 			ifp->if_ierrors++;
2621 			if (errors & WRX_ER_SE)
2622 				log(LOG_WARNING, "%s: symbol error\n",
2623 				    device_xname(&sc->sc_dev));
2624 			else if (errors & WRX_ER_SEQ)
2625 				log(LOG_WARNING, "%s: receive sequence error\n",
2626 				    device_xname(&sc->sc_dev));
2627 			else if (errors & WRX_ER_CE)
2628 				log(LOG_WARNING, "%s: CRC error\n",
2629 				    device_xname(&sc->sc_dev));
2630 			m_freem(m);
2631 			continue;
2632 		}
2633 
2634 		/*
2635 		 * No errors.  Receive the packet.
2636 		 */
2637 		m->m_pkthdr.rcvif = ifp;
2638 		m->m_pkthdr.len = len;
2639 
2640 #if 0 /* XXXJRT */
2641 		/*
2642 		 * If VLANs are enabled, VLAN packets have been unwrapped
2643 		 * for us.  Associate the tag with the packet.
2644 		 */
2645 		if ((status & WRX_ST_VP) != 0) {
2646 			VLAN_INPUT_TAG(ifp, m,
2647 			    le16toh(sc->sc_rxdescs[i].wrx_special,
2648 			    continue);
2649 		}
2650 #endif /* XXXJRT */
2651 
2652 		/*
2653 		 * Set up checksum info for this packet.
2654 		 */
2655 		if ((status & WRX_ST_IXSM) == 0) {
2656 			if (status & WRX_ST_IPCS) {
2657 				WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
2658 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
2659 				if (errors & WRX_ER_IPE)
2660 					m->m_pkthdr.csum_flags |=
2661 					    M_CSUM_IPv4_BAD;
2662 			}
2663 			if (status & WRX_ST_TCPCS) {
2664 				/*
2665 				 * Note: we don't know if this was TCP or UDP,
2666 				 * so we just set both bits, and expect the
2667 				 * upper layers to deal.
2668 				 */
2669 				WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
2670 				m->m_pkthdr.csum_flags |=
2671 				    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
2672 				    M_CSUM_TCPv6 | M_CSUM_UDPv6;
2673 				if (errors & WRX_ER_TCPE)
2674 					m->m_pkthdr.csum_flags |=
2675 					    M_CSUM_TCP_UDP_BAD;
2676 			}
2677 		}
2678 
2679 		ifp->if_ipackets++;
2680 
2681 #if NBPFILTER > 0
2682 		/* Pass this up to any BPF listeners. */
2683 		if (ifp->if_bpf)
2684 			bpf_mtap(ifp->if_bpf, m);
2685 #endif /* NBPFILTER > 0 */
2686 
2687 		/* Pass it on. */
2688 		(*ifp->if_input)(ifp, m);
2689 	}
2690 
2691 	/* Update the receive pointer. */
2692 	sc->sc_rxptr = i;
2693 
2694 	DPRINTF(WM_DEBUG_RX,
2695 	    ("%s: RX: rxptr -> %d\n", device_xname(&sc->sc_dev), i));
2696 }
2697 
2698 /*
2699  * wm_linkintr:
2700  *
2701  *	Helper; handle link interrupts.
2702  */
2703 static void
2704 wm_linkintr(struct wm_softc *sc, uint32_t icr)
2705 {
2706 	uint32_t status;
2707 
2708 	/*
2709 	 * If we get a link status interrupt on a 1000BASE-T
2710 	 * device, just fall into the normal MII tick path.
2711 	 */
2712 	if (sc->sc_flags & WM_F_HAS_MII) {
2713 		if (icr & ICR_LSC) {
2714 			DPRINTF(WM_DEBUG_LINK,
2715 			    ("%s: LINK: LSC -> mii_tick\n",
2716 			    device_xname(&sc->sc_dev)));
2717 			mii_tick(&sc->sc_mii);
2718 		} else if (icr & ICR_RXSEQ) {
2719 			DPRINTF(WM_DEBUG_LINK,
2720 			    ("%s: LINK Receive sequence error\n",
2721 			    device_xname(&sc->sc_dev)));
2722 		}
2723 		return;
2724 	}
2725 
2726 	/*
2727 	 * If we are now receiving /C/, check for link again in
2728 	 * a couple of link clock ticks.
2729 	 */
2730 	if (icr & ICR_RXCFG) {
2731 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
2732 		    device_xname(&sc->sc_dev)));
2733 		sc->sc_tbi_anstate = 2;
2734 	}
2735 
2736 	if (icr & ICR_LSC) {
2737 		status = CSR_READ(sc, WMREG_STATUS);
2738 		if (status & STATUS_LU) {
2739 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
2740 			    device_xname(&sc->sc_dev),
2741 			    (status & STATUS_FD) ? "FDX" : "HDX"));
2742 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2743 			sc->sc_fcrtl &= ~FCRTL_XONE;
2744 			if (status & STATUS_FD)
2745 				sc->sc_tctl |=
2746 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2747 			else
2748 				sc->sc_tctl |=
2749 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2750 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
2751 				sc->sc_fcrtl |= FCRTL_XONE;
2752 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2753 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
2754 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
2755 				      sc->sc_fcrtl);
2756 			sc->sc_tbi_linkup = 1;
2757 		} else {
2758 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
2759 			    device_xname(&sc->sc_dev)));
2760 			sc->sc_tbi_linkup = 0;
2761 		}
2762 		sc->sc_tbi_anstate = 2;
2763 		wm_tbi_set_linkled(sc);
2764 	} else if (icr & ICR_RXSEQ) {
2765 		DPRINTF(WM_DEBUG_LINK,
2766 		    ("%s: LINK: Receive sequence error\n",
2767 		    device_xname(&sc->sc_dev)));
2768 	}
2769 }
2770 
2771 /*
2772  * wm_tick:
2773  *
2774  *	One second timer, used to check link status, sweep up
2775  *	completed transmit jobs, etc.
2776  */
2777 static void
2778 wm_tick(void *arg)
2779 {
2780 	struct wm_softc *sc = arg;
2781 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2782 	int s;
2783 
2784 	s = splnet();
2785 
2786 	if (sc->sc_type >= WM_T_82542_2_1) {
2787 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
2788 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
2789 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
2790 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
2791 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
2792 	}
2793 
2794 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
2795 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
2796 
2797 
2798 	if (sc->sc_flags & WM_F_HAS_MII)
2799 		mii_tick(&sc->sc_mii);
2800 	else
2801 		wm_tbi_check_link(sc);
2802 
2803 	splx(s);
2804 
2805 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2806 }
2807 
2808 /*
2809  * wm_reset:
2810  *
2811  *	Reset the i82542 chip.
2812  */
2813 static void
2814 wm_reset(struct wm_softc *sc)
2815 {
2816 	uint32_t reg;
2817 
2818 	/*
2819 	 * Allocate on-chip memory according to the MTU size.
2820 	 * The Packet Buffer Allocation register must be written
2821 	 * before the chip is reset.
2822 	 */
2823 	switch (sc->sc_type) {
2824 	case WM_T_82547:
2825 	case WM_T_82547_2:
2826 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
2827 		    PBA_22K : PBA_30K;
2828 		sc->sc_txfifo_head = 0;
2829 		sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
2830 		sc->sc_txfifo_size =
2831 		    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
2832 		sc->sc_txfifo_stall = 0;
2833 		break;
2834 	case WM_T_82571:
2835 	case WM_T_82572:
2836 	case WM_T_80003:
2837 		sc->sc_pba = PBA_32K;
2838 		break;
2839 	case WM_T_82573:
2840 		sc->sc_pba = PBA_12K;
2841 		break;
2842 	case WM_T_ICH8:
2843 		sc->sc_pba = PBA_8K;
2844 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
2845 		break;
2846 	case WM_T_ICH9:
2847 		sc->sc_pba = PBA_10K;
2848 		break;
2849 	default:
2850 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
2851 		    PBA_40K : PBA_48K;
2852 		break;
2853 	}
2854 	CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
2855 
2856 	if (sc->sc_flags & WM_F_PCIE) {
2857 		int timeout = 800;
2858 
2859 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
2860 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2861 
2862 		while (timeout) {
2863 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA) == 0)
2864 				break;
2865 			delay(100);
2866 		}
2867 	}
2868 
2869 	/* clear interrupt */
2870 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
2871 
2872 	/*
2873 	 * 82541 Errata 29? & 82547 Errata 28?
2874 	 * See also the description about PHY_RST bit in CTRL register
2875 	 * in 8254x_GBe_SDM.pdf.
2876 	 */
2877 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
2878 		CSR_WRITE(sc, WMREG_CTRL,
2879 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
2880 		delay(5000);
2881 	}
2882 
2883 	switch (sc->sc_type) {
2884 	case WM_T_82544:
2885 	case WM_T_82540:
2886 	case WM_T_82545:
2887 	case WM_T_82546:
2888 	case WM_T_82541:
2889 	case WM_T_82541_2:
2890 		/*
2891 		 * On some chipsets, a reset through a memory-mapped write
2892 		 * cycle can cause the chip to reset before completing the
2893 		 * write cycle.  This causes major headache that can be
2894 		 * avoided by issuing the reset via indirect register writes
2895 		 * through I/O space.
2896 		 *
2897 		 * So, if we successfully mapped the I/O BAR at attach time,
2898 		 * use that.  Otherwise, try our luck with a memory-mapped
2899 		 * reset.
2900 		 */
2901 		if (sc->sc_flags & WM_F_IOH_VALID)
2902 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
2903 		else
2904 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
2905 		break;
2906 
2907 	case WM_T_82545_3:
2908 	case WM_T_82546_3:
2909 		/* Use the shadow control register on these chips. */
2910 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
2911 		break;
2912 
2913 	case WM_T_ICH8:
2914 	case WM_T_ICH9:
2915 		wm_get_swfwhw_semaphore(sc);
2916 		CSR_WRITE(sc, WMREG_CTRL, CTRL_RST | CTRL_PHY_RESET);
2917 		delay(10000);
2918 
2919 	default:
2920 		/* Everything else can safely use the documented method. */
2921 		CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
2922 		break;
2923 	}
2924 	delay(10000);
2925 
2926 	/* reload EEPROM */
2927 	switch(sc->sc_type) {
2928 	case WM_T_82542_2_0:
2929 	case WM_T_82542_2_1:
2930 	case WM_T_82543:
2931 	case WM_T_82544:
2932 		delay(10);
2933 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
2934 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2935 		delay(2000);
2936 		break;
2937 	case WM_T_82541:
2938 	case WM_T_82541_2:
2939 	case WM_T_82547:
2940 	case WM_T_82547_2:
2941 		delay(20000);
2942 		break;
2943 	case WM_T_82573:
2944 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
2945 			delay(10);
2946 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
2947 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2948 		}
2949 		/* FALLTHROUGH */
2950 	default:
2951 		/* check EECD_EE_AUTORD */
2952 		wm_get_auto_rd_done(sc);
2953 	}
2954 
2955 #if 0
2956 	for (i = 0; i < 1000; i++) {
2957 		if ((CSR_READ(sc, WMREG_CTRL) & CTRL_RST) == 0) {
2958 			return;
2959 		}
2960 		delay(20);
2961 	}
2962 
2963 	if (CSR_READ(sc, WMREG_CTRL) & CTRL_RST)
2964 		log(LOG_ERR, "%s: reset failed to complete\n",
2965 		    device_xname(&sc->sc_dev));
2966 #endif
2967 }
2968 
2969 /*
2970  * wm_init:		[ifnet interface function]
2971  *
2972  *	Initialize the interface.  Must be called at splnet().
2973  */
2974 static int
2975 wm_init(struct ifnet *ifp)
2976 {
2977 	struct wm_softc *sc = ifp->if_softc;
2978 	struct wm_rxsoft *rxs;
2979 	int i, error = 0;
2980 	uint32_t reg;
2981 
2982 	/*
2983 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
2984 	 * There is a small but measurable benefit to avoiding the adjusment
2985 	 * of the descriptor so that the headers are aligned, for normal mtu,
2986 	 * on such platforms.  One possibility is that the DMA itself is
2987 	 * slightly more efficient if the front of the entire packet (instead
2988 	 * of the front of the headers) is aligned.
2989 	 *
2990 	 * Note we must always set align_tweak to 0 if we are using
2991 	 * jumbo frames.
2992 	 */
2993 #ifdef __NO_STRICT_ALIGNMENT
2994 	sc->sc_align_tweak = 0;
2995 #else
2996 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
2997 		sc->sc_align_tweak = 0;
2998 	else
2999 		sc->sc_align_tweak = 2;
3000 #endif /* __NO_STRICT_ALIGNMENT */
3001 
3002 	/* Cancel any pending I/O. */
3003 	wm_stop(ifp, 0);
3004 
3005 	/* update statistics before reset */
3006 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3007 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
3008 
3009 	/* Reset the chip to a known state. */
3010 	wm_reset(sc);
3011 
3012 	/* Initialize the transmit descriptor ring. */
3013 	memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
3014 	WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
3015 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3016 	sc->sc_txfree = WM_NTXDESC(sc);
3017 	sc->sc_txnext = 0;
3018 
3019 	if (sc->sc_type < WM_T_82543) {
3020 		CSR_WRITE(sc, WMREG_OLD_TBDAH, WM_CDTXADDR_HI(sc, 0));
3021 		CSR_WRITE(sc, WMREG_OLD_TBDAL, WM_CDTXADDR_LO(sc, 0));
3022 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
3023 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
3024 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
3025 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
3026 	} else {
3027 		CSR_WRITE(sc, WMREG_TBDAH, WM_CDTXADDR_HI(sc, 0));
3028 		CSR_WRITE(sc, WMREG_TBDAL, WM_CDTXADDR_LO(sc, 0));
3029 		CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
3030 		CSR_WRITE(sc, WMREG_TDH, 0);
3031 		CSR_WRITE(sc, WMREG_TDT, 0);
3032 		CSR_WRITE(sc, WMREG_TIDV, 375);		/* ITR / 4 */
3033 		CSR_WRITE(sc, WMREG_TADV, 375);		/* should be same */
3034 
3035 		CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
3036 		    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
3037 		CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
3038 		    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
3039 	}
3040 	CSR_WRITE(sc, WMREG_TQSA_LO, 0);
3041 	CSR_WRITE(sc, WMREG_TQSA_HI, 0);
3042 
3043 	/* Initialize the transmit job descriptors. */
3044 	for (i = 0; i < WM_TXQUEUELEN(sc); i++)
3045 		sc->sc_txsoft[i].txs_mbuf = NULL;
3046 	sc->sc_txsfree = WM_TXQUEUELEN(sc);
3047 	sc->sc_txsnext = 0;
3048 	sc->sc_txsdirty = 0;
3049 
3050 	/*
3051 	 * Initialize the receive descriptor and receive job
3052 	 * descriptor rings.
3053 	 */
3054 	if (sc->sc_type < WM_T_82543) {
3055 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
3056 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
3057 		CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
3058 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
3059 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
3060 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
3061 
3062 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
3063 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
3064 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
3065 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
3066 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
3067 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
3068 	} else {
3069 		CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
3070 		CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
3071 		CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
3072 		CSR_WRITE(sc, WMREG_RDH, 0);
3073 		CSR_WRITE(sc, WMREG_RDT, 0);
3074 		CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD);	/* ITR/4 */
3075 		CSR_WRITE(sc, WMREG_RADV, 375);		/* MUST be same */
3076 	}
3077 	for (i = 0; i < WM_NRXDESC; i++) {
3078 		rxs = &sc->sc_rxsoft[i];
3079 		if (rxs->rxs_mbuf == NULL) {
3080 			if ((error = wm_add_rxbuf(sc, i)) != 0) {
3081 				log(LOG_ERR, "%s: unable to allocate or map rx "
3082 				    "buffer %d, error = %d\n",
3083 				    device_xname(&sc->sc_dev), i, error);
3084 				/*
3085 				 * XXX Should attempt to run with fewer receive
3086 				 * XXX buffers instead of just failing.
3087 				 */
3088 				wm_rxdrain(sc);
3089 				goto out;
3090 			}
3091 		} else
3092 			WM_INIT_RXDESC(sc, i);
3093 	}
3094 	sc->sc_rxptr = 0;
3095 	sc->sc_rxdiscard = 0;
3096 	WM_RXCHAIN_RESET(sc);
3097 
3098 	/*
3099 	 * Clear out the VLAN table -- we don't use it (yet).
3100 	 */
3101 	CSR_WRITE(sc, WMREG_VET, 0);
3102 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
3103 		CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
3104 
3105 	/*
3106 	 * Set up flow-control parameters.
3107 	 *
3108 	 * XXX Values could probably stand some tuning.
3109 	 */
3110 	if (sc->sc_type != WM_T_ICH8) {
3111 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
3112 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
3113 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
3114 	}
3115 
3116 	sc->sc_fcrtl = FCRTL_DFLT;
3117 	if (sc->sc_type < WM_T_82543) {
3118 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
3119 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
3120 	} else {
3121 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
3122 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
3123 	}
3124 	CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
3125 
3126 #if 0 /* XXXJRT */
3127 	/* Deal with VLAN enables. */
3128 	if (VLAN_ATTACHED(&sc->sc_ethercom))
3129 		sc->sc_ctrl |= CTRL_VME;
3130 	else
3131 #endif /* XXXJRT */
3132 		sc->sc_ctrl &= ~CTRL_VME;
3133 
3134 	/* Write the control registers. */
3135 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3136 	if (sc->sc_type >= WM_T_80003 && (sc->sc_flags & WM_F_HAS_MII)) {
3137 		int val;
3138 		val = CSR_READ(sc, WMREG_CTRL_EXT);
3139 		val &= ~CTRL_EXT_LINK_MODE_MASK;
3140 		CSR_WRITE(sc, WMREG_CTRL_EXT, val);
3141 
3142 		/* Bypass RX and TX FIFO's */
3143 		wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
3144 		    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS |
3145 		    KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
3146 
3147 		wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
3148 		    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
3149 		    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
3150 		/*
3151 		 * Set the mac to wait the maximum time between each
3152 		 * iteration and increase the max iterations when
3153 		 * polling the phy; this fixes erroneous timeouts at 10Mbps.
3154 		 */
3155 		wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS, 0xFFFF);
3156 		val = wm_kmrn_i80003_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM);
3157 		val |= 0x3F;
3158 		wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM, val);
3159 	}
3160 #if 0
3161 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
3162 #endif
3163 
3164 	/*
3165 	 * Set up checksum offload parameters.
3166 	 */
3167 	reg = CSR_READ(sc, WMREG_RXCSUM);
3168 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
3169 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
3170 		reg |= RXCSUM_IPOFL;
3171 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
3172 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
3173 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
3174 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
3175 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
3176 
3177 	/*
3178 	 * Set up the interrupt registers.
3179 	 */
3180 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3181 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
3182 	    ICR_RXO | ICR_RXT0;
3183 	if ((sc->sc_flags & WM_F_HAS_MII) == 0)
3184 		sc->sc_icr |= ICR_RXCFG;
3185 	CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
3186 
3187 	/* Set up the inter-packet gap. */
3188 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
3189 
3190 	if (sc->sc_type >= WM_T_82543) {
3191 		/*
3192 		 * Set up the interrupt throttling register (units of 256ns)
3193 		 * Note that a footnote in Intel's documentation says this
3194 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
3195 		 * or 10Mbit mode.  Empirically, it appears to be the case
3196 		 * that that is also true for the 1024ns units of the other
3197 		 * interrupt-related timer registers -- so, really, we ought
3198 		 * to divide this value by 4 when the link speed is low.
3199 		 *
3200 		 * XXX implement this division at link speed change!
3201 		 */
3202 
3203 		 /*
3204 		  * For N interrupts/sec, set this value to:
3205 		  * 1000000000 / (N * 256).  Note that we set the
3206 		  * absolute and packet timer values to this value
3207 		  * divided by 4 to get "simple timer" behavior.
3208 		  */
3209 
3210 		sc->sc_itr = 1500;		/* 2604 ints/sec */
3211 		CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
3212 	}
3213 
3214 #if 0 /* XXXJRT */
3215 	/* Set the VLAN ethernetype. */
3216 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
3217 #endif
3218 
3219 	/*
3220 	 * Set up the transmit control register; we start out with
3221 	 * a collision distance suitable for FDX, but update it whe
3222 	 * we resolve the media type.
3223 	 */
3224 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_CT(TX_COLLISION_THRESHOLD) |
3225 	    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3226 	if (sc->sc_type >= WM_T_82571)
3227 		sc->sc_tctl |= TCTL_MULR;
3228 	if (sc->sc_type >= WM_T_80003)
3229 		sc->sc_tctl |= TCTL_RTLC;
3230 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3231 
3232 	/* Set the media. */
3233 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
3234 		goto out;
3235 
3236 	/*
3237 	 * Set up the receive control register; we actually program
3238 	 * the register when we set the receive filter.  Use multicast
3239 	 * address offset type 0.
3240 	 *
3241 	 * Only the i82544 has the ability to strip the incoming
3242 	 * CRC, so we don't enable that feature.
3243 	 */
3244 	sc->sc_mchash_type = 0;
3245 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
3246 	    | RCTL_MO(sc->sc_mchash_type);
3247 
3248 	/* 82573 doesn't support jumbo frame */
3249 	if (sc->sc_type != WM_T_82573 && sc->sc_type != WM_T_ICH8)
3250 		sc->sc_rctl |= RCTL_LPE;
3251 
3252 	if (MCLBYTES == 2048) {
3253 		sc->sc_rctl |= RCTL_2k;
3254 	} else {
3255 		if (sc->sc_type >= WM_T_82543) {
3256 			switch(MCLBYTES) {
3257 			case 4096:
3258 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
3259 				break;
3260 			case 8192:
3261 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
3262 				break;
3263 			case 16384:
3264 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
3265 				break;
3266 			default:
3267 				panic("wm_init: MCLBYTES %d unsupported",
3268 				    MCLBYTES);
3269 				break;
3270 			}
3271 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
3272 	}
3273 
3274 	/* Set the receive filter. */
3275 	wm_set_filter(sc);
3276 
3277 	/* Start the one second link check clock. */
3278 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
3279 
3280 	/* ...all done! */
3281 	ifp->if_flags |= IFF_RUNNING;
3282 	ifp->if_flags &= ~IFF_OACTIVE;
3283 
3284  out:
3285 	if (error)
3286 		log(LOG_ERR, "%s: interface not running\n",
3287 		    device_xname(&sc->sc_dev));
3288 	return (error);
3289 }
3290 
3291 /*
3292  * wm_rxdrain:
3293  *
3294  *	Drain the receive queue.
3295  */
3296 static void
3297 wm_rxdrain(struct wm_softc *sc)
3298 {
3299 	struct wm_rxsoft *rxs;
3300 	int i;
3301 
3302 	for (i = 0; i < WM_NRXDESC; i++) {
3303 		rxs = &sc->sc_rxsoft[i];
3304 		if (rxs->rxs_mbuf != NULL) {
3305 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
3306 			m_freem(rxs->rxs_mbuf);
3307 			rxs->rxs_mbuf = NULL;
3308 		}
3309 	}
3310 }
3311 
3312 /*
3313  * wm_stop:		[ifnet interface function]
3314  *
3315  *	Stop transmission on the interface.
3316  */
3317 static void
3318 wm_stop(struct ifnet *ifp, int disable)
3319 {
3320 	struct wm_softc *sc = ifp->if_softc;
3321 	struct wm_txsoft *txs;
3322 	int i;
3323 
3324 	/* Stop the one second clock. */
3325 	callout_stop(&sc->sc_tick_ch);
3326 
3327 	/* Stop the 82547 Tx FIFO stall check timer. */
3328 	if (sc->sc_type == WM_T_82547)
3329 		callout_stop(&sc->sc_txfifo_ch);
3330 
3331 	if (sc->sc_flags & WM_F_HAS_MII) {
3332 		/* Down the MII. */
3333 		mii_down(&sc->sc_mii);
3334 	}
3335 
3336 	/* Stop the transmit and receive processes. */
3337 	CSR_WRITE(sc, WMREG_TCTL, 0);
3338 	CSR_WRITE(sc, WMREG_RCTL, 0);
3339 
3340 	/*
3341 	 * Clear the interrupt mask to ensure the device cannot assert its
3342 	 * interrupt line.
3343 	 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
3344 	 * any currently pending or shared interrupt.
3345 	 */
3346 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3347 	sc->sc_icr = 0;
3348 
3349 	/* Release any queued transmit buffers. */
3350 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
3351 		txs = &sc->sc_txsoft[i];
3352 		if (txs->txs_mbuf != NULL) {
3353 			bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
3354 			m_freem(txs->txs_mbuf);
3355 			txs->txs_mbuf = NULL;
3356 		}
3357 	}
3358 
3359 	/* Mark the interface as down and cancel the watchdog timer. */
3360 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
3361 	ifp->if_timer = 0;
3362 
3363 	if (disable)
3364 		wm_rxdrain(sc);
3365 }
3366 
3367 void
3368 wm_get_auto_rd_done(struct wm_softc *sc)
3369 {
3370 	int i;
3371 
3372 	/* wait for eeprom to reload */
3373 	switch (sc->sc_type) {
3374 	case WM_T_82571:
3375 	case WM_T_82572:
3376 	case WM_T_82573:
3377 	case WM_T_80003:
3378 	case WM_T_ICH8:
3379 	case WM_T_ICH9:
3380 		for (i = 10; i > 0; i--) {
3381 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
3382 				break;
3383 			delay(1000);
3384 		}
3385 		if (i == 0) {
3386 			log(LOG_ERR, "%s: auto read from eeprom failed to "
3387 			    "complete\n", device_xname(&sc->sc_dev));
3388 		}
3389 		break;
3390 	default:
3391 		delay(5000);
3392 		break;
3393 	}
3394 
3395 	/* Phy configuration starts after EECD_AUTO_RD is set */
3396 	if (sc->sc_type == WM_T_82573)
3397 		delay(25000);
3398 }
3399 
3400 /*
3401  * wm_acquire_eeprom:
3402  *
3403  *	Perform the EEPROM handshake required on some chips.
3404  */
3405 static int
3406 wm_acquire_eeprom(struct wm_softc *sc)
3407 {
3408 	uint32_t reg;
3409 	int x;
3410 	int ret = 0;
3411 
3412 	/* always success */
3413 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
3414 		return 0;
3415 
3416 	if (sc->sc_flags & WM_F_SWFWHW_SYNC) {
3417 		ret = wm_get_swfwhw_semaphore(sc);
3418 	} else if (sc->sc_flags & WM_F_SWFW_SYNC) {
3419 		/* this will also do wm_get_swsm_semaphore() if needed */
3420 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
3421 	} else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
3422 		ret = wm_get_swsm_semaphore(sc);
3423 	}
3424 
3425 	if (ret)
3426 		return 1;
3427 
3428 	if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE)  {
3429 		reg = CSR_READ(sc, WMREG_EECD);
3430 
3431 		/* Request EEPROM access. */
3432 		reg |= EECD_EE_REQ;
3433 		CSR_WRITE(sc, WMREG_EECD, reg);
3434 
3435 		/* ..and wait for it to be granted. */
3436 		for (x = 0; x < 1000; x++) {
3437 			reg = CSR_READ(sc, WMREG_EECD);
3438 			if (reg & EECD_EE_GNT)
3439 				break;
3440 			delay(5);
3441 		}
3442 		if ((reg & EECD_EE_GNT) == 0) {
3443 			aprint_error_dev(&sc->sc_dev, "could not acquire EEPROM GNT\n");
3444 			reg &= ~EECD_EE_REQ;
3445 			CSR_WRITE(sc, WMREG_EECD, reg);
3446 			if (sc->sc_flags & WM_F_SWFWHW_SYNC)
3447 				wm_put_swfwhw_semaphore(sc);
3448 			if (sc->sc_flags & WM_F_SWFW_SYNC)
3449 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
3450 			else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
3451 				wm_put_swsm_semaphore(sc);
3452 			return (1);
3453 		}
3454 	}
3455 
3456 	return (0);
3457 }
3458 
3459 /*
3460  * wm_release_eeprom:
3461  *
3462  *	Release the EEPROM mutex.
3463  */
3464 static void
3465 wm_release_eeprom(struct wm_softc *sc)
3466 {
3467 	uint32_t reg;
3468 
3469 	/* always success */
3470 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
3471 		return;
3472 
3473 	if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
3474 		reg = CSR_READ(sc, WMREG_EECD);
3475 		reg &= ~EECD_EE_REQ;
3476 		CSR_WRITE(sc, WMREG_EECD, reg);
3477 	}
3478 
3479 	if (sc->sc_flags & WM_F_SWFWHW_SYNC)
3480 		wm_put_swfwhw_semaphore(sc);
3481 	if (sc->sc_flags & WM_F_SWFW_SYNC)
3482 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
3483 	else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
3484 		wm_put_swsm_semaphore(sc);
3485 }
3486 
3487 /*
3488  * wm_eeprom_sendbits:
3489  *
3490  *	Send a series of bits to the EEPROM.
3491  */
3492 static void
3493 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
3494 {
3495 	uint32_t reg;
3496 	int x;
3497 
3498 	reg = CSR_READ(sc, WMREG_EECD);
3499 
3500 	for (x = nbits; x > 0; x--) {
3501 		if (bits & (1U << (x - 1)))
3502 			reg |= EECD_DI;
3503 		else
3504 			reg &= ~EECD_DI;
3505 		CSR_WRITE(sc, WMREG_EECD, reg);
3506 		delay(2);
3507 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
3508 		delay(2);
3509 		CSR_WRITE(sc, WMREG_EECD, reg);
3510 		delay(2);
3511 	}
3512 }
3513 
3514 /*
3515  * wm_eeprom_recvbits:
3516  *
3517  *	Receive a series of bits from the EEPROM.
3518  */
3519 static void
3520 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
3521 {
3522 	uint32_t reg, val;
3523 	int x;
3524 
3525 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
3526 
3527 	val = 0;
3528 	for (x = nbits; x > 0; x--) {
3529 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
3530 		delay(2);
3531 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
3532 			val |= (1U << (x - 1));
3533 		CSR_WRITE(sc, WMREG_EECD, reg);
3534 		delay(2);
3535 	}
3536 	*valp = val;
3537 }
3538 
3539 /*
3540  * wm_read_eeprom_uwire:
3541  *
3542  *	Read a word from the EEPROM using the MicroWire protocol.
3543  */
3544 static int
3545 wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
3546 {
3547 	uint32_t reg, val;
3548 	int i;
3549 
3550 	for (i = 0; i < wordcnt; i++) {
3551 		/* Clear SK and DI. */
3552 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
3553 		CSR_WRITE(sc, WMREG_EECD, reg);
3554 
3555 		/* Set CHIP SELECT. */
3556 		reg |= EECD_CS;
3557 		CSR_WRITE(sc, WMREG_EECD, reg);
3558 		delay(2);
3559 
3560 		/* Shift in the READ command. */
3561 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
3562 
3563 		/* Shift in address. */
3564 		wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits);
3565 
3566 		/* Shift out the data. */
3567 		wm_eeprom_recvbits(sc, &val, 16);
3568 		data[i] = val & 0xffff;
3569 
3570 		/* Clear CHIP SELECT. */
3571 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
3572 		CSR_WRITE(sc, WMREG_EECD, reg);
3573 		delay(2);
3574 	}
3575 
3576 	return (0);
3577 }
3578 
3579 /*
3580  * wm_spi_eeprom_ready:
3581  *
3582  *	Wait for a SPI EEPROM to be ready for commands.
3583  */
3584 static int
3585 wm_spi_eeprom_ready(struct wm_softc *sc)
3586 {
3587 	uint32_t val;
3588 	int usec;
3589 
3590 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
3591 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
3592 		wm_eeprom_recvbits(sc, &val, 8);
3593 		if ((val & SPI_SR_RDY) == 0)
3594 			break;
3595 	}
3596 	if (usec >= SPI_MAX_RETRIES) {
3597 		aprint_error_dev(&sc->sc_dev, "EEPROM failed to become ready\n");
3598 		return (1);
3599 	}
3600 	return (0);
3601 }
3602 
3603 /*
3604  * wm_read_eeprom_spi:
3605  *
3606  *	Read a work from the EEPROM using the SPI protocol.
3607  */
3608 static int
3609 wm_read_eeprom_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
3610 {
3611 	uint32_t reg, val;
3612 	int i;
3613 	uint8_t opc;
3614 
3615 	/* Clear SK and CS. */
3616 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
3617 	CSR_WRITE(sc, WMREG_EECD, reg);
3618 	delay(2);
3619 
3620 	if (wm_spi_eeprom_ready(sc))
3621 		return (1);
3622 
3623 	/* Toggle CS to flush commands. */
3624 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
3625 	delay(2);
3626 	CSR_WRITE(sc, WMREG_EECD, reg);
3627 	delay(2);
3628 
3629 	opc = SPI_OPC_READ;
3630 	if (sc->sc_ee_addrbits == 8 && word >= 128)
3631 		opc |= SPI_OPC_A8;
3632 
3633 	wm_eeprom_sendbits(sc, opc, 8);
3634 	wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits);
3635 
3636 	for (i = 0; i < wordcnt; i++) {
3637 		wm_eeprom_recvbits(sc, &val, 16);
3638 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
3639 	}
3640 
3641 	/* Raise CS and clear SK. */
3642 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
3643 	CSR_WRITE(sc, WMREG_EECD, reg);
3644 	delay(2);
3645 
3646 	return (0);
3647 }
3648 
3649 #define EEPROM_CHECKSUM		0xBABA
3650 #define EEPROM_SIZE		0x0040
3651 
3652 /*
3653  * wm_validate_eeprom_checksum
3654  *
3655  * The checksum is defined as the sum of the first 64 (16 bit) words.
3656  */
3657 static int
3658 wm_validate_eeprom_checksum(struct wm_softc *sc)
3659 {
3660 	uint16_t checksum;
3661 	uint16_t eeprom_data;
3662 	int i;
3663 
3664 	checksum = 0;
3665 
3666 	for (i = 0; i < EEPROM_SIZE; i++) {
3667 		if (wm_read_eeprom(sc, i, 1, &eeprom_data))
3668 			return 1;
3669 		checksum += eeprom_data;
3670 	}
3671 
3672 	if (checksum != (uint16_t) EEPROM_CHECKSUM)
3673 		return 1;
3674 
3675 	return 0;
3676 }
3677 
3678 /*
3679  * wm_read_eeprom:
3680  *
3681  *	Read data from the serial EEPROM.
3682  */
3683 static int
3684 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
3685 {
3686 	int rv;
3687 
3688 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
3689 		return 1;
3690 
3691 	if (wm_acquire_eeprom(sc))
3692 		return 1;
3693 
3694 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9))
3695 		rv = wm_read_eeprom_ich8(sc, word, wordcnt, data);
3696 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
3697 		rv = wm_read_eeprom_eerd(sc, word, wordcnt, data);
3698 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
3699 		rv = wm_read_eeprom_spi(sc, word, wordcnt, data);
3700 	else
3701 		rv = wm_read_eeprom_uwire(sc, word, wordcnt, data);
3702 
3703 	wm_release_eeprom(sc);
3704 	return rv;
3705 }
3706 
3707 static int
3708 wm_read_eeprom_eerd(struct wm_softc *sc, int offset, int wordcnt,
3709     uint16_t *data)
3710 {
3711 	int i, eerd = 0;
3712 	int error = 0;
3713 
3714 	for (i = 0; i < wordcnt; i++) {
3715 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
3716 
3717 		CSR_WRITE(sc, WMREG_EERD, eerd);
3718 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
3719 		if (error != 0)
3720 			break;
3721 
3722 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
3723 	}
3724 
3725 	return error;
3726 }
3727 
3728 static int
3729 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
3730 {
3731 	uint32_t attempts = 100000;
3732 	uint32_t i, reg = 0;
3733 	int32_t done = -1;
3734 
3735 	for (i = 0; i < attempts; i++) {
3736 		reg = CSR_READ(sc, rw);
3737 
3738 		if (reg & EERD_DONE) {
3739 			done = 0;
3740 			break;
3741 		}
3742 		delay(5);
3743 	}
3744 
3745 	return done;
3746 }
3747 
3748 /*
3749  * wm_add_rxbuf:
3750  *
3751  *	Add a receive buffer to the indiciated descriptor.
3752  */
3753 static int
3754 wm_add_rxbuf(struct wm_softc *sc, int idx)
3755 {
3756 	struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
3757 	struct mbuf *m;
3758 	int error;
3759 
3760 	MGETHDR(m, M_DONTWAIT, MT_DATA);
3761 	if (m == NULL)
3762 		return (ENOBUFS);
3763 
3764 	MCLGET(m, M_DONTWAIT);
3765 	if ((m->m_flags & M_EXT) == 0) {
3766 		m_freem(m);
3767 		return (ENOBUFS);
3768 	}
3769 
3770 	if (rxs->rxs_mbuf != NULL)
3771 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
3772 
3773 	rxs->rxs_mbuf = m;
3774 
3775 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3776 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
3777 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
3778 	if (error) {
3779 		/* XXX XXX XXX */
3780 		aprint_error_dev(&sc->sc_dev, "unable to load rx DMA map %d, error = %d\n",
3781 		    idx, error);
3782 		panic("wm_add_rxbuf");
3783 	}
3784 
3785 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3786 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
3787 
3788 	WM_INIT_RXDESC(sc, idx);
3789 
3790 	return (0);
3791 }
3792 
3793 /*
3794  * wm_set_ral:
3795  *
3796  *	Set an entery in the receive address list.
3797  */
3798 static void
3799 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
3800 {
3801 	uint32_t ral_lo, ral_hi;
3802 
3803 	if (enaddr != NULL) {
3804 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
3805 		    (enaddr[3] << 24);
3806 		ral_hi = enaddr[4] | (enaddr[5] << 8);
3807 		ral_hi |= RAL_AV;
3808 	} else {
3809 		ral_lo = 0;
3810 		ral_hi = 0;
3811 	}
3812 
3813 	if (sc->sc_type >= WM_T_82544) {
3814 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
3815 		    ral_lo);
3816 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
3817 		    ral_hi);
3818 	} else {
3819 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
3820 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
3821 	}
3822 }
3823 
3824 /*
3825  * wm_mchash:
3826  *
3827  *	Compute the hash of the multicast address for the 4096-bit
3828  *	multicast filter.
3829  */
3830 static uint32_t
3831 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
3832 {
3833 	static const int lo_shift[4] = { 4, 3, 2, 0 };
3834 	static const int hi_shift[4] = { 4, 5, 6, 8 };
3835 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
3836 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
3837 	uint32_t hash;
3838 
3839 	if (sc->sc_type == WM_T_ICH8) {
3840 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
3841 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
3842 		return (hash & 0x3ff);
3843 	}
3844 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
3845 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
3846 
3847 	return (hash & 0xfff);
3848 }
3849 
3850 /*
3851  * wm_set_filter:
3852  *
3853  *	Set up the receive filter.
3854  */
3855 static void
3856 wm_set_filter(struct wm_softc *sc)
3857 {
3858 	struct ethercom *ec = &sc->sc_ethercom;
3859 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3860 	struct ether_multi *enm;
3861 	struct ether_multistep step;
3862 	bus_addr_t mta_reg;
3863 	uint32_t hash, reg, bit;
3864 	int i, size;
3865 
3866 	if (sc->sc_type >= WM_T_82544)
3867 		mta_reg = WMREG_CORDOVA_MTA;
3868 	else
3869 		mta_reg = WMREG_MTA;
3870 
3871 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
3872 
3873 	if (ifp->if_flags & IFF_BROADCAST)
3874 		sc->sc_rctl |= RCTL_BAM;
3875 	if (ifp->if_flags & IFF_PROMISC) {
3876 		sc->sc_rctl |= RCTL_UPE;
3877 		goto allmulti;
3878 	}
3879 
3880 	/*
3881 	 * Set the station address in the first RAL slot, and
3882 	 * clear the remaining slots.
3883 	 */
3884 	if (sc->sc_type == WM_T_ICH8)
3885 		size = WM_ICH8_RAL_TABSIZE;
3886 	else
3887 		size = WM_RAL_TABSIZE;
3888 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
3889 	for (i = 1; i < size; i++)
3890 		wm_set_ral(sc, NULL, i);
3891 
3892 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9))
3893 		size = WM_ICH8_MC_TABSIZE;
3894 	else
3895 		size = WM_MC_TABSIZE;
3896 	/* Clear out the multicast table. */
3897 	for (i = 0; i < size; i++)
3898 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
3899 
3900 	ETHER_FIRST_MULTI(step, ec, enm);
3901 	while (enm != NULL) {
3902 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
3903 			/*
3904 			 * We must listen to a range of multicast addresses.
3905 			 * For now, just accept all multicasts, rather than
3906 			 * trying to set only those filter bits needed to match
3907 			 * the range.  (At this time, the only use of address
3908 			 * ranges is for IP multicast routing, for which the
3909 			 * range is big enough to require all bits set.)
3910 			 */
3911 			goto allmulti;
3912 		}
3913 
3914 		hash = wm_mchash(sc, enm->enm_addrlo);
3915 
3916 		reg = (hash >> 5);
3917 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9))
3918 			reg &= 0x1f;
3919 		else
3920 			reg &= 0x7f;
3921 		bit = hash & 0x1f;
3922 
3923 		hash = CSR_READ(sc, mta_reg + (reg << 2));
3924 		hash |= 1U << bit;
3925 
3926 		/* XXX Hardware bug?? */
3927 		if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
3928 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
3929 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3930 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
3931 		} else
3932 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3933 
3934 		ETHER_NEXT_MULTI(step, enm);
3935 	}
3936 
3937 	ifp->if_flags &= ~IFF_ALLMULTI;
3938 	goto setit;
3939 
3940  allmulti:
3941 	ifp->if_flags |= IFF_ALLMULTI;
3942 	sc->sc_rctl |= RCTL_MPE;
3943 
3944  setit:
3945 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
3946 }
3947 
3948 /*
3949  * wm_tbi_mediainit:
3950  *
3951  *	Initialize media for use on 1000BASE-X devices.
3952  */
3953 static void
3954 wm_tbi_mediainit(struct wm_softc *sc)
3955 {
3956 	const char *sep = "";
3957 
3958 	if (sc->sc_type < WM_T_82543)
3959 		sc->sc_tipg = TIPG_WM_DFLT;
3960 	else
3961 		sc->sc_tipg = TIPG_LG_DFLT;
3962 
3963 	ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
3964 	    wm_tbi_mediastatus);
3965 
3966 	/*
3967 	 * SWD Pins:
3968 	 *
3969 	 *	0 = Link LED (output)
3970 	 *	1 = Loss Of Signal (input)
3971 	 */
3972 	sc->sc_ctrl |= CTRL_SWDPIO(0);
3973 	sc->sc_ctrl &= ~CTRL_SWDPIO(1);
3974 
3975 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3976 
3977 #define	ADD(ss, mm, dd)							\
3978 do {									\
3979 	aprint_normal("%s%s", sep, ss);					\
3980 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL);	\
3981 	sep = ", ";							\
3982 } while (/*CONSTCOND*/0)
3983 
3984 	aprint_normal_dev(&sc->sc_dev, "");
3985 	ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
3986 	ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
3987 	ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
3988 	aprint_normal("\n");
3989 
3990 #undef ADD
3991 
3992 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
3993 }
3994 
3995 /*
3996  * wm_tbi_mediastatus:	[ifmedia interface function]
3997  *
3998  *	Get the current interface media status on a 1000BASE-X device.
3999  */
4000 static void
4001 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
4002 {
4003 	struct wm_softc *sc = ifp->if_softc;
4004 	uint32_t ctrl;
4005 
4006 	ifmr->ifm_status = IFM_AVALID;
4007 	ifmr->ifm_active = IFM_ETHER;
4008 
4009 	if (sc->sc_tbi_linkup == 0) {
4010 		ifmr->ifm_active |= IFM_NONE;
4011 		return;
4012 	}
4013 
4014 	ifmr->ifm_status |= IFM_ACTIVE;
4015 	ifmr->ifm_active |= IFM_1000_SX;
4016 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
4017 		ifmr->ifm_active |= IFM_FDX;
4018 	ctrl = CSR_READ(sc, WMREG_CTRL);
4019 	if (ctrl & CTRL_RFCE)
4020 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
4021 	if (ctrl & CTRL_TFCE)
4022 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
4023 }
4024 
4025 /*
4026  * wm_tbi_mediachange:	[ifmedia interface function]
4027  *
4028  *	Set hardware to newly-selected media on a 1000BASE-X device.
4029  */
4030 static int
4031 wm_tbi_mediachange(struct ifnet *ifp)
4032 {
4033 	struct wm_softc *sc = ifp->if_softc;
4034 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
4035 	uint32_t status;
4036 	int i;
4037 
4038 	sc->sc_txcw = ife->ifm_data;
4039 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x on entry\n",
4040 		    device_xname(&sc->sc_dev),sc->sc_txcw));
4041 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO ||
4042 	    (sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
4043 		sc->sc_txcw |= ANAR_X_PAUSE_SYM | ANAR_X_PAUSE_ASYM;
4044 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
4045 		sc->sc_txcw |= TXCW_ANE;
4046 	} else {
4047 		/*If autonegotiation is turned off, force link up and turn on full duplex*/
4048 		sc->sc_txcw &= ~TXCW_ANE;
4049 		sc->sc_ctrl |= CTRL_SLU | CTRL_FD;
4050 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4051 		delay(1000);
4052 	}
4053 
4054 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
4055 		    device_xname(&sc->sc_dev),sc->sc_txcw));
4056 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
4057 	delay(10000);
4058 
4059 	/* NOTE: CTRL will update TFCE and RFCE automatically. */
4060 
4061 	sc->sc_tbi_anstate = 0;
4062 
4063 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
4064 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(&sc->sc_dev),i));
4065 
4066 	/*
4067 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
4068 	 * optics detect a signal, 0 if they don't.
4069 	 */
4070 	if (((i != 0) && (sc->sc_type >= WM_T_82544)) || (i == 0)) {
4071 		/* Have signal; wait for the link to come up. */
4072 
4073 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
4074 			/*
4075 			 * Reset the link, and let autonegotiation do its thing
4076 			 */
4077 			sc->sc_ctrl |= CTRL_LRST;
4078 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4079 			delay(1000);
4080 			sc->sc_ctrl &= ~CTRL_LRST;
4081 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4082 			delay(1000);
4083 		}
4084 
4085 		for (i = 0; i < 50; i++) {
4086 			delay(10000);
4087 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
4088 				break;
4089 		}
4090 
4091 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
4092 			    device_xname(&sc->sc_dev),i));
4093 
4094 		status = CSR_READ(sc, WMREG_STATUS);
4095 		DPRINTF(WM_DEBUG_LINK,
4096 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
4097 			device_xname(&sc->sc_dev),status, STATUS_LU));
4098 		if (status & STATUS_LU) {
4099 			/* Link is up. */
4100 			DPRINTF(WM_DEBUG_LINK,
4101 			    ("%s: LINK: set media -> link up %s\n",
4102 			    device_xname(&sc->sc_dev),
4103 			    (status & STATUS_FD) ? "FDX" : "HDX"));
4104 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
4105 			sc->sc_fcrtl &= ~FCRTL_XONE;
4106 			if (status & STATUS_FD)
4107 				sc->sc_tctl |=
4108 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4109 			else
4110 				sc->sc_tctl |=
4111 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
4112 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
4113 				sc->sc_fcrtl |= FCRTL_XONE;
4114 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4115 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
4116 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
4117 				      sc->sc_fcrtl);
4118 			sc->sc_tbi_linkup = 1;
4119 		} else {
4120 			/* Link is down. */
4121 			DPRINTF(WM_DEBUG_LINK,
4122 			    ("%s: LINK: set media -> link down\n",
4123 			    device_xname(&sc->sc_dev)));
4124 			sc->sc_tbi_linkup = 0;
4125 		}
4126 	} else {
4127 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
4128 		    device_xname(&sc->sc_dev)));
4129 		sc->sc_tbi_linkup = 0;
4130 	}
4131 
4132 	wm_tbi_set_linkled(sc);
4133 
4134 	return (0);
4135 }
4136 
4137 /*
4138  * wm_tbi_set_linkled:
4139  *
4140  *	Update the link LED on 1000BASE-X devices.
4141  */
4142 static void
4143 wm_tbi_set_linkled(struct wm_softc *sc)
4144 {
4145 
4146 	if (sc->sc_tbi_linkup)
4147 		sc->sc_ctrl |= CTRL_SWDPIN(0);
4148 	else
4149 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
4150 
4151 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4152 }
4153 
4154 /*
4155  * wm_tbi_check_link:
4156  *
4157  *	Check the link on 1000BASE-X devices.
4158  */
4159 static void
4160 wm_tbi_check_link(struct wm_softc *sc)
4161 {
4162 	uint32_t rxcw, ctrl, status;
4163 
4164 	if (sc->sc_tbi_anstate == 0)
4165 		return;
4166 	else if (sc->sc_tbi_anstate > 1) {
4167 		DPRINTF(WM_DEBUG_LINK,
4168 		    ("%s: LINK: anstate %d\n", device_xname(&sc->sc_dev),
4169 		    sc->sc_tbi_anstate));
4170 		sc->sc_tbi_anstate--;
4171 		return;
4172 	}
4173 
4174 	sc->sc_tbi_anstate = 0;
4175 
4176 	rxcw = CSR_READ(sc, WMREG_RXCW);
4177 	ctrl = CSR_READ(sc, WMREG_CTRL);
4178 	status = CSR_READ(sc, WMREG_STATUS);
4179 
4180 	if ((status & STATUS_LU) == 0) {
4181 		DPRINTF(WM_DEBUG_LINK,
4182 		    ("%s: LINK: checklink -> down\n", device_xname(&sc->sc_dev)));
4183 		sc->sc_tbi_linkup = 0;
4184 	} else {
4185 		DPRINTF(WM_DEBUG_LINK,
4186 		    ("%s: LINK: checklink -> up %s\n", device_xname(&sc->sc_dev),
4187 		    (status & STATUS_FD) ? "FDX" : "HDX"));
4188 		sc->sc_tctl &= ~TCTL_COLD(0x3ff);
4189 		sc->sc_fcrtl &= ~FCRTL_XONE;
4190 		if (status & STATUS_FD)
4191 			sc->sc_tctl |=
4192 			    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4193 		else
4194 			sc->sc_tctl |=
4195 			    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
4196 		if (ctrl & CTRL_TFCE)
4197 			sc->sc_fcrtl |= FCRTL_XONE;
4198 		CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4199 		CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
4200 			      WMREG_OLD_FCRTL : WMREG_FCRTL,
4201 			      sc->sc_fcrtl);
4202 		sc->sc_tbi_linkup = 1;
4203 	}
4204 
4205 	wm_tbi_set_linkled(sc);
4206 }
4207 
4208 /*
4209  * wm_gmii_reset:
4210  *
4211  *	Reset the PHY.
4212  */
4213 static void
4214 wm_gmii_reset(struct wm_softc *sc)
4215 {
4216 	uint32_t reg;
4217 	int func = 0; /* XXX gcc */
4218 
4219 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)) {
4220 		if (wm_get_swfwhw_semaphore(sc))
4221 			return;
4222 	}
4223 	if (sc->sc_type == WM_T_80003) {
4224 		func = (CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1;
4225 		if (wm_get_swfw_semaphore(sc,
4226 		    func ? SWFW_PHY1_SM : SWFW_PHY0_SM))
4227 			return;
4228 	}
4229 	if (sc->sc_type >= WM_T_82544) {
4230 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
4231 		delay(20000);
4232 
4233 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4234 		delay(20000);
4235 	} else {
4236 		/*
4237 		 * With 82543, we need to force speed and duplex on the MAC
4238 		 * equal to what the PHY speed and duplex configuration is.
4239 		 * In addition, we need to perform a hardware reset on the PHY
4240 		 * to take it out of reset.
4241 		 */
4242 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
4243 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4244 
4245 		/* The PHY reset pin is active-low. */
4246 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
4247 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
4248 		    CTRL_EXT_SWDPIN(4));
4249 		reg |= CTRL_EXT_SWDPIO(4);
4250 
4251 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
4252 		delay(10);
4253 
4254 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4255 		delay(10000);
4256 
4257 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
4258 		delay(10);
4259 #if 0
4260 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
4261 #endif
4262 	}
4263 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9))
4264 		wm_put_swfwhw_semaphore(sc);
4265 	if (sc->sc_type == WM_T_80003)
4266 		wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4267 }
4268 
4269 /*
4270  * wm_gmii_mediainit:
4271  *
4272  *	Initialize media for use on 1000BASE-T devices.
4273  */
4274 static void
4275 wm_gmii_mediainit(struct wm_softc *sc)
4276 {
4277 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4278 
4279 	/* We have MII. */
4280 	sc->sc_flags |= WM_F_HAS_MII;
4281 
4282 	if (sc->sc_type >= WM_T_80003)
4283 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
4284 	else
4285 		sc->sc_tipg = TIPG_1000T_DFLT;
4286 
4287 	/*
4288 	 * Let the chip set speed/duplex on its own based on
4289 	 * signals from the PHY.
4290 	 * XXXbouyer - I'm not sure this is right for the 80003,
4291 	 * the em driver only sets CTRL_SLU here - but it seems to work.
4292 	 */
4293 	sc->sc_ctrl |= CTRL_SLU;
4294 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4295 
4296 	/* Initialize our media structures and probe the GMII. */
4297 	sc->sc_mii.mii_ifp = ifp;
4298 
4299 	if (sc->sc_type >= WM_T_80003) {
4300 		sc->sc_mii.mii_readreg = wm_gmii_i80003_readreg;
4301 		sc->sc_mii.mii_writereg = wm_gmii_i80003_writereg;
4302 	} else if (sc->sc_type >= WM_T_82544) {
4303 		sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg;
4304 		sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg;
4305 	} else {
4306 		sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg;
4307 		sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg;
4308 	}
4309 	sc->sc_mii.mii_statchg = wm_gmii_statchg;
4310 
4311 	wm_gmii_reset(sc);
4312 
4313 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
4314 	ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_gmii_mediachange,
4315 	    wm_gmii_mediastatus);
4316 
4317 	mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
4318 	    MII_OFFSET_ANY, MIIF_DOPAUSE);
4319 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
4320 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
4321 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
4322 	} else
4323 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
4324 }
4325 
4326 /*
4327  * wm_gmii_mediastatus:	[ifmedia interface function]
4328  *
4329  *	Get the current interface media status on a 1000BASE-T device.
4330  */
4331 static void
4332 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
4333 {
4334 	struct wm_softc *sc = ifp->if_softc;
4335 
4336 	ether_mediastatus(ifp, ifmr);
4337 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK) |
4338 			   sc->sc_flowflags;
4339 }
4340 
4341 /*
4342  * wm_gmii_mediachange:	[ifmedia interface function]
4343  *
4344  *	Set hardware to newly-selected media on a 1000BASE-T device.
4345  */
4346 static int
4347 wm_gmii_mediachange(struct ifnet *ifp)
4348 {
4349 	struct wm_softc *sc = ifp->if_softc;
4350 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
4351 	int rc;
4352 
4353 	if ((ifp->if_flags & IFF_UP) == 0)
4354 		return 0;
4355 
4356 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
4357 	sc->sc_ctrl |= CTRL_SLU;
4358 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
4359 	    || (sc->sc_type > WM_T_82543)) {
4360 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
4361 	} else {
4362 		sc->sc_ctrl &= ~CTRL_ASDE;
4363 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
4364 		if (ife->ifm_media & IFM_FDX)
4365 			sc->sc_ctrl |= CTRL_FD;
4366 		switch(IFM_SUBTYPE(ife->ifm_media)) {
4367 		case IFM_10_T:
4368 			sc->sc_ctrl |= CTRL_SPEED_10;
4369 			break;
4370 		case IFM_100_TX:
4371 			sc->sc_ctrl |= CTRL_SPEED_100;
4372 			break;
4373 		case IFM_1000_T:
4374 			sc->sc_ctrl |= CTRL_SPEED_1000;
4375 			break;
4376 		default:
4377 			panic("wm_gmii_mediachange: bad media 0x%x",
4378 			    ife->ifm_media);
4379 		}
4380 	}
4381 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4382 	if (sc->sc_type <= WM_T_82543)
4383 		wm_gmii_reset(sc);
4384 
4385 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
4386 		return 0;
4387 	return rc;
4388 }
4389 
4390 #define	MDI_IO		CTRL_SWDPIN(2)
4391 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
4392 #define	MDI_CLK		CTRL_SWDPIN(3)
4393 
4394 static void
4395 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
4396 {
4397 	uint32_t i, v;
4398 
4399 	v = CSR_READ(sc, WMREG_CTRL);
4400 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
4401 	v |= MDI_DIR | CTRL_SWDPIO(3);
4402 
4403 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
4404 		if (data & i)
4405 			v |= MDI_IO;
4406 		else
4407 			v &= ~MDI_IO;
4408 		CSR_WRITE(sc, WMREG_CTRL, v);
4409 		delay(10);
4410 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
4411 		delay(10);
4412 		CSR_WRITE(sc, WMREG_CTRL, v);
4413 		delay(10);
4414 	}
4415 }
4416 
4417 static uint32_t
4418 i82543_mii_recvbits(struct wm_softc *sc)
4419 {
4420 	uint32_t v, i, data = 0;
4421 
4422 	v = CSR_READ(sc, WMREG_CTRL);
4423 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
4424 	v |= CTRL_SWDPIO(3);
4425 
4426 	CSR_WRITE(sc, WMREG_CTRL, v);
4427 	delay(10);
4428 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
4429 	delay(10);
4430 	CSR_WRITE(sc, WMREG_CTRL, v);
4431 	delay(10);
4432 
4433 	for (i = 0; i < 16; i++) {
4434 		data <<= 1;
4435 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
4436 		delay(10);
4437 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
4438 			data |= 1;
4439 		CSR_WRITE(sc, WMREG_CTRL, v);
4440 		delay(10);
4441 	}
4442 
4443 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
4444 	delay(10);
4445 	CSR_WRITE(sc, WMREG_CTRL, v);
4446 	delay(10);
4447 
4448 	return (data);
4449 }
4450 
4451 #undef MDI_IO
4452 #undef MDI_DIR
4453 #undef MDI_CLK
4454 
4455 /*
4456  * wm_gmii_i82543_readreg:	[mii interface function]
4457  *
4458  *	Read a PHY register on the GMII (i82543 version).
4459  */
4460 static int
4461 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
4462 {
4463 	struct wm_softc *sc = device_private(self);
4464 	int rv;
4465 
4466 	i82543_mii_sendbits(sc, 0xffffffffU, 32);
4467 	i82543_mii_sendbits(sc, reg | (phy << 5) |
4468 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
4469 	rv = i82543_mii_recvbits(sc) & 0xffff;
4470 
4471 	DPRINTF(WM_DEBUG_GMII,
4472 	    ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
4473 	    device_xname(&sc->sc_dev), phy, reg, rv));
4474 
4475 	return (rv);
4476 }
4477 
4478 /*
4479  * wm_gmii_i82543_writereg:	[mii interface function]
4480  *
4481  *	Write a PHY register on the GMII (i82543 version).
4482  */
4483 static void
4484 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
4485 {
4486 	struct wm_softc *sc = device_private(self);
4487 
4488 	i82543_mii_sendbits(sc, 0xffffffffU, 32);
4489 	i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
4490 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
4491 	    (MII_COMMAND_START << 30), 32);
4492 }
4493 
4494 /*
4495  * wm_gmii_i82544_readreg:	[mii interface function]
4496  *
4497  *	Read a PHY register on the GMII.
4498  */
4499 static int
4500 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
4501 {
4502 	struct wm_softc *sc = device_private(self);
4503 	uint32_t mdic = 0;
4504 	int i, rv;
4505 
4506 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
4507 	    MDIC_REGADD(reg));
4508 
4509 	for (i = 0; i < 320; i++) {
4510 		mdic = CSR_READ(sc, WMREG_MDIC);
4511 		if (mdic & MDIC_READY)
4512 			break;
4513 		delay(10);
4514 	}
4515 
4516 	if ((mdic & MDIC_READY) == 0) {
4517 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
4518 		    device_xname(&sc->sc_dev), phy, reg);
4519 		rv = 0;
4520 	} else if (mdic & MDIC_E) {
4521 #if 0 /* This is normal if no PHY is present. */
4522 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
4523 		    device_xname(&sc->sc_dev), phy, reg);
4524 #endif
4525 		rv = 0;
4526 	} else {
4527 		rv = MDIC_DATA(mdic);
4528 		if (rv == 0xffff)
4529 			rv = 0;
4530 	}
4531 
4532 	return (rv);
4533 }
4534 
4535 /*
4536  * wm_gmii_i82544_writereg:	[mii interface function]
4537  *
4538  *	Write a PHY register on the GMII.
4539  */
4540 static void
4541 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
4542 {
4543 	struct wm_softc *sc = device_private(self);
4544 	uint32_t mdic = 0;
4545 	int i;
4546 
4547 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
4548 	    MDIC_REGADD(reg) | MDIC_DATA(val));
4549 
4550 	for (i = 0; i < 320; i++) {
4551 		mdic = CSR_READ(sc, WMREG_MDIC);
4552 		if (mdic & MDIC_READY)
4553 			break;
4554 		delay(10);
4555 	}
4556 
4557 	if ((mdic & MDIC_READY) == 0)
4558 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
4559 		    device_xname(&sc->sc_dev), phy, reg);
4560 	else if (mdic & MDIC_E)
4561 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
4562 		    device_xname(&sc->sc_dev), phy, reg);
4563 }
4564 
4565 /*
4566  * wm_gmii_i80003_readreg:	[mii interface function]
4567  *
4568  *	Read a PHY register on the kumeran
4569  * This could be handled by the PHY layer if we didn't have to lock the
4570  * ressource ...
4571  */
4572 static int
4573 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
4574 {
4575 	struct wm_softc *sc = device_private(self);
4576 	int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
4577 	int rv;
4578 
4579 	if (phy != 1) /* only one PHY on kumeran bus */
4580 		return 0;
4581 
4582 	if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM))
4583 		return 0;
4584 
4585 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
4586 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
4587 		    reg >> GG82563_PAGE_SHIFT);
4588 	} else {
4589 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
4590 		    reg >> GG82563_PAGE_SHIFT);
4591 	}
4592 
4593 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
4594 	wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4595 	return (rv);
4596 }
4597 
4598 /*
4599  * wm_gmii_i80003_writereg:	[mii interface function]
4600  *
4601  *	Write a PHY register on the kumeran.
4602  * This could be handled by the PHY layer if we didn't have to lock the
4603  * ressource ...
4604  */
4605 static void
4606 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
4607 {
4608 	struct wm_softc *sc = device_private(self);
4609 	int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
4610 
4611 	if (phy != 1) /* only one PHY on kumeran bus */
4612 		return;
4613 
4614 	if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM))
4615 		return;
4616 
4617 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
4618 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
4619 		    reg >> GG82563_PAGE_SHIFT);
4620 	} else {
4621 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
4622 		    reg >> GG82563_PAGE_SHIFT);
4623 	}
4624 
4625 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
4626 	wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4627 }
4628 
4629 /*
4630  * wm_gmii_statchg:	[mii interface function]
4631  *
4632  *	Callback from MII layer when media changes.
4633  */
4634 static void
4635 wm_gmii_statchg(device_t self)
4636 {
4637 	struct wm_softc *sc = device_private(self);
4638 	struct mii_data *mii = &sc->sc_mii;
4639 
4640 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
4641 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
4642 	sc->sc_fcrtl &= ~FCRTL_XONE;
4643 
4644 	/*
4645 	 * Get flow control negotiation result.
4646 	 */
4647 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
4648 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
4649 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
4650 		mii->mii_media_active &= ~IFM_ETH_FMASK;
4651 	}
4652 
4653 	if (sc->sc_flowflags & IFM_FLOW) {
4654 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
4655 			sc->sc_ctrl |= CTRL_TFCE;
4656 			sc->sc_fcrtl |= FCRTL_XONE;
4657 		}
4658 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
4659 			sc->sc_ctrl |= CTRL_RFCE;
4660 	}
4661 
4662 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
4663 		DPRINTF(WM_DEBUG_LINK,
4664 		    ("%s: LINK: statchg: FDX\n", device_xname(&sc->sc_dev)));
4665 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4666 	} else  {
4667 		DPRINTF(WM_DEBUG_LINK,
4668 		    ("%s: LINK: statchg: HDX\n", device_xname(&sc->sc_dev)));
4669 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
4670 	}
4671 
4672 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4673 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4674 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
4675 						 : WMREG_FCRTL, sc->sc_fcrtl);
4676 	if (sc->sc_type >= WM_T_80003) {
4677 		switch(IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
4678 		case IFM_1000_T:
4679 			wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
4680 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
4681 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
4682 			break;
4683 		default:
4684 			wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
4685 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
4686 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
4687 			break;
4688 		}
4689 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
4690 	}
4691 }
4692 
4693 /*
4694  * wm_kmrn_i80003_readreg:
4695  *
4696  *	Read a kumeran register
4697  */
4698 static int
4699 wm_kmrn_i80003_readreg(struct wm_softc *sc, int reg)
4700 {
4701 	int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
4702 	int rv;
4703 
4704 	if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM))
4705 		return 0;
4706 
4707 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
4708 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
4709 	    KUMCTRLSTA_REN);
4710 	delay(2);
4711 
4712 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
4713 	wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4714 	return (rv);
4715 }
4716 
4717 /*
4718  * wm_kmrn_i80003_writereg:
4719  *
4720  *	Write a kumeran register
4721  */
4722 static void
4723 wm_kmrn_i80003_writereg(struct wm_softc *sc, int reg, int val)
4724 {
4725 	int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
4726 
4727 	if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM))
4728 		return;
4729 
4730 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
4731 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
4732 	    (val & KUMCTRLSTA_MASK));
4733 	wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4734 }
4735 
4736 static int
4737 wm_is_onboard_nvm_eeprom(struct wm_softc *sc)
4738 {
4739 	uint32_t eecd = 0;
4740 
4741 	if (sc->sc_type == WM_T_82573) {
4742 		eecd = CSR_READ(sc, WMREG_EECD);
4743 
4744 		/* Isolate bits 15 & 16 */
4745 		eecd = ((eecd >> 15) & 0x03);
4746 
4747 		/* If both bits are set, device is Flash type */
4748 		if (eecd == 0x03) {
4749 			return 0;
4750 		}
4751 	}
4752 	return 1;
4753 }
4754 
4755 static int
4756 wm_get_swsm_semaphore(struct wm_softc *sc)
4757 {
4758 	int32_t timeout;
4759 	uint32_t swsm;
4760 
4761 	/* Get the FW semaphore. */
4762 	timeout = 1000 + 1; /* XXX */
4763 	while (timeout) {
4764 		swsm = CSR_READ(sc, WMREG_SWSM);
4765 		swsm |= SWSM_SWESMBI;
4766 		CSR_WRITE(sc, WMREG_SWSM, swsm);
4767 		/* if we managed to set the bit we got the semaphore. */
4768 		swsm = CSR_READ(sc, WMREG_SWSM);
4769 		if (swsm & SWSM_SWESMBI)
4770 			break;
4771 
4772 		delay(50);
4773 		timeout--;
4774 	}
4775 
4776 	if (timeout == 0) {
4777 		aprint_error_dev(&sc->sc_dev, "could not acquire EEPROM GNT\n");
4778 		/* Release semaphores */
4779 		wm_put_swsm_semaphore(sc);
4780 		return 1;
4781 	}
4782 	return 0;
4783 }
4784 
4785 static void
4786 wm_put_swsm_semaphore(struct wm_softc *sc)
4787 {
4788 	uint32_t swsm;
4789 
4790 	swsm = CSR_READ(sc, WMREG_SWSM);
4791 	swsm &= ~(SWSM_SWESMBI);
4792 	CSR_WRITE(sc, WMREG_SWSM, swsm);
4793 }
4794 
4795 static int
4796 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
4797 {
4798 	uint32_t swfw_sync;
4799 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
4800 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
4801 	int timeout = 200;
4802 
4803 	for(timeout = 0; timeout < 200; timeout++) {
4804 		if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
4805 			if (wm_get_swsm_semaphore(sc))
4806 				return 1;
4807 		}
4808 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
4809 		if ((swfw_sync & (swmask | fwmask)) == 0) {
4810 			swfw_sync |= swmask;
4811 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
4812 			if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
4813 				wm_put_swsm_semaphore(sc);
4814 			return 0;
4815 		}
4816 		if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
4817 			wm_put_swsm_semaphore(sc);
4818 		delay(5000);
4819 	}
4820 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
4821 	    device_xname(&sc->sc_dev), mask, swfw_sync);
4822 	return 1;
4823 }
4824 
4825 static void
4826 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
4827 {
4828 	uint32_t swfw_sync;
4829 
4830 	if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
4831 		while (wm_get_swsm_semaphore(sc) != 0)
4832 			continue;
4833 	}
4834 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
4835 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
4836 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
4837 	if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
4838 		wm_put_swsm_semaphore(sc);
4839 }
4840 
4841 static int
4842 wm_get_swfwhw_semaphore(struct wm_softc *sc)
4843 {
4844 	uint32_t ext_ctrl;
4845 	int timeout = 200;
4846 
4847 	for(timeout = 0; timeout < 200; timeout++) {
4848 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
4849 		ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
4850 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
4851 
4852 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
4853 		if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
4854 			return 0;
4855 		delay(5000);
4856 	}
4857 	printf("%s: failed to get swfwgw semaphore ext_ctrl 0x%x\n",
4858 	    device_xname(&sc->sc_dev), ext_ctrl);
4859 	return 1;
4860 }
4861 
4862 static void
4863 wm_put_swfwhw_semaphore(struct wm_softc *sc)
4864 {
4865 	uint32_t ext_ctrl;
4866 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
4867 	ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
4868 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
4869 }
4870 
4871 /******************************************************************************
4872  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
4873  * register.
4874  *
4875  * sc - Struct containing variables accessed by shared code
4876  * offset - offset of word in the EEPROM to read
4877  * data - word read from the EEPROM
4878  * words - number of words to read
4879  *****************************************************************************/
4880 static int
4881 wm_read_eeprom_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
4882 {
4883     int32_t  error = 0;
4884     uint32_t flash_bank = 0;
4885     uint32_t act_offset = 0;
4886     uint32_t bank_offset = 0;
4887     uint16_t word = 0;
4888     uint16_t i = 0;
4889 
4890     /* We need to know which is the valid flash bank.  In the event
4891      * that we didn't allocate eeprom_shadow_ram, we may not be
4892      * managing flash_bank.  So it cannot be trusted and needs
4893      * to be updated with each read.
4894      */
4895     /* Value of bit 22 corresponds to the flash bank we're on. */
4896     flash_bank = (CSR_READ(sc, WMREG_EECD) & EECD_SEC1VAL) ? 1 : 0;
4897 
4898     /* Adjust offset appropriately if we're on bank 1 - adjust for word size */
4899     bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
4900 
4901     error = wm_get_swfwhw_semaphore(sc);
4902     if (error)
4903         return error;
4904 
4905     for (i = 0; i < words; i++) {
4906             /* The NVM part needs a byte offset, hence * 2 */
4907             act_offset = bank_offset + ((offset + i) * 2);
4908             error = wm_read_ich8_word(sc, act_offset, &word);
4909             if (error)
4910                 break;
4911             data[i] = word;
4912     }
4913 
4914     wm_put_swfwhw_semaphore(sc);
4915     return error;
4916 }
4917 
4918 /******************************************************************************
4919  * This function does initial flash setup so that a new read/write/erase cycle
4920  * can be started.
4921  *
4922  * sc - The pointer to the hw structure
4923  ****************************************************************************/
4924 static int32_t
4925 wm_ich8_cycle_init(struct wm_softc *sc)
4926 {
4927     uint16_t hsfsts;
4928     int32_t error = 1;
4929     int32_t i     = 0;
4930 
4931     hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
4932 
4933     /* May be check the Flash Des Valid bit in Hw status */
4934     if ((hsfsts & HSFSTS_FLDVAL) == 0) {
4935         return error;
4936     }
4937 
4938     /* Clear FCERR in Hw status by writing 1 */
4939     /* Clear DAEL in Hw status by writing a 1 */
4940     hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
4941 
4942     ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
4943 
4944     /* Either we should have a hardware SPI cycle in progress bit to check
4945      * against, in order to start a new cycle or FDONE bit should be changed
4946      * in the hardware so that it is 1 after harware reset, which can then be
4947      * used as an indication whether a cycle is in progress or has been
4948      * completed .. we should also have some software semaphore mechanism to
4949      * guard FDONE or the cycle in progress bit so that two threads access to
4950      * those bits can be sequentiallized or a way so that 2 threads dont
4951      * start the cycle at the same time */
4952 
4953     if ((hsfsts & HSFSTS_FLINPRO) == 0) {
4954         /* There is no cycle running at present, so we can start a cycle */
4955         /* Begin by setting Flash Cycle Done. */
4956         hsfsts |= HSFSTS_DONE;
4957         ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
4958         error = 0;
4959     } else {
4960         /* otherwise poll for sometime so the current cycle has a chance
4961          * to end before giving up. */
4962         for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
4963             hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
4964             if ((hsfsts & HSFSTS_FLINPRO) == 0) {
4965                 error = 0;
4966                 break;
4967             }
4968             delay(1);
4969         }
4970         if (error == 0) {
4971             /* Successful in waiting for previous cycle to timeout,
4972              * now set the Flash Cycle Done. */
4973             hsfsts |= HSFSTS_DONE;
4974             ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
4975         }
4976     }
4977     return error;
4978 }
4979 
4980 /******************************************************************************
4981  * This function starts a flash cycle and waits for its completion
4982  *
4983  * sc - The pointer to the hw structure
4984  ****************************************************************************/
4985 static int32_t
4986 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
4987 {
4988     uint16_t hsflctl;
4989     uint16_t hsfsts;
4990     int32_t error = 1;
4991     uint32_t i = 0;
4992 
4993     /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
4994     hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
4995     hsflctl |= HSFCTL_GO;
4996     ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
4997 
4998     /* wait till FDONE bit is set to 1 */
4999     do {
5000         hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
5001         if (hsfsts & HSFSTS_DONE)
5002             break;
5003         delay(1);
5004         i++;
5005     } while (i < timeout);
5006     if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0) {
5007         error = 0;
5008     }
5009     return error;
5010 }
5011 
5012 /******************************************************************************
5013  * Reads a byte or word from the NVM using the ICH8 flash access registers.
5014  *
5015  * sc - The pointer to the hw structure
5016  * index - The index of the byte or word to read.
5017  * size - Size of data to read, 1=byte 2=word
5018  * data - Pointer to the word to store the value read.
5019  *****************************************************************************/
5020 static int32_t
5021 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
5022                      uint32_t size, uint16_t* data)
5023 {
5024     uint16_t hsfsts;
5025     uint16_t hsflctl;
5026     uint32_t flash_linear_address;
5027     uint32_t flash_data = 0;
5028     int32_t error = 1;
5029     int32_t count = 0;
5030 
5031     if (size < 1  || size > 2 || data == 0x0 ||
5032         index > ICH_FLASH_LINEAR_ADDR_MASK)
5033         return error;
5034 
5035     flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
5036                            sc->sc_ich8_flash_base;
5037 
5038     do {
5039         delay(1);
5040         /* Steps */
5041         error = wm_ich8_cycle_init(sc);
5042         if (error)
5043             break;
5044 
5045         hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
5046         /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
5047         hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT) & HSFCTL_BCOUNT_MASK;
5048         hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
5049         ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
5050 
5051         /* Write the last 24 bits of index into Flash Linear address field in
5052          * Flash Address */
5053         /* TODO: TBD maybe check the index against the size of flash */
5054 
5055         ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
5056 
5057         error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
5058 
5059         /* Check if FCERR is set to 1, if set to 1, clear it and try the whole
5060          * sequence a few more times, else read in (shift in) the Flash Data0,
5061          * the order is least significant byte first msb to lsb */
5062         if (error == 0) {
5063             flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
5064             if (size == 1) {
5065                 *data = (uint8_t)(flash_data & 0x000000FF);
5066             } else if (size == 2) {
5067                 *data = (uint16_t)(flash_data & 0x0000FFFF);
5068             }
5069             break;
5070         } else {
5071             /* If we've gotten here, then things are probably completely hosed,
5072              * but if the error condition is detected, it won't hurt to give
5073              * it another try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
5074              */
5075             hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
5076             if (hsfsts & HSFSTS_ERR) {
5077                 /* Repeat for some time before giving up. */
5078                 continue;
5079             } else if ((hsfsts & HSFSTS_DONE) == 0) {
5080                 break;
5081             }
5082         }
5083     } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
5084 
5085     return error;
5086 }
5087 
5088 #if 0
5089 /******************************************************************************
5090  * Reads a single byte from the NVM using the ICH8 flash access registers.
5091  *
5092  * sc - pointer to wm_hw structure
5093  * index - The index of the byte to read.
5094  * data - Pointer to a byte to store the value read.
5095  *****************************************************************************/
5096 static int32_t
5097 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
5098 {
5099     int32_t status;
5100     uint16_t word = 0;
5101 
5102     status = wm_read_ich8_data(sc, index, 1, &word);
5103     if (status == 0) {
5104         *data = (uint8_t)word;
5105     }
5106 
5107     return status;
5108 }
5109 #endif
5110 
5111 /******************************************************************************
5112  * Reads a word from the NVM using the ICH8 flash access registers.
5113  *
5114  * sc - pointer to wm_hw structure
5115  * index - The starting byte index of the word to read.
5116  * data - Pointer to a word to store the value read.
5117  *****************************************************************************/
5118 static int32_t
5119 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
5120 {
5121     int32_t status;
5122 
5123     status = wm_read_ich8_data(sc, index, 2, data);
5124     return status;
5125 }
5126