xref: /netbsd-src/sys/dev/pci/if_wm.c (revision d20841bb642898112fe68f0ad3f7b26dddf56f07)
1 /*	$NetBSD: if_wm.c,v 1.65 2004/01/14 14:29:48 tsutsui Exp $	*/
2 
3 /*
4  * Copyright (c) 2001, 2002, 2003 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed for the NetBSD Project by
20  *	Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 /*
39  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
40  *
41  * TODO (in order of importance):
42  *
43  *	- Rework how parameters are loaded from the EEPROM.
44  *	- Figure out performance stability issue on i82547 (fvdl).
45  *	- Figure out what to do with the i82545GM and i82546GB
46  *	  SERDES controllers.
47  *	- Fix hw VLAN assist.
48  */
49 
50 #include <sys/cdefs.h>
51 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.65 2004/01/14 14:29:48 tsutsui Exp $");
52 
53 #include "bpfilter.h"
54 #include "rnd.h"
55 
56 #include <sys/param.h>
57 #include <sys/systm.h>
58 #include <sys/callout.h>
59 #include <sys/mbuf.h>
60 #include <sys/malloc.h>
61 #include <sys/kernel.h>
62 #include <sys/socket.h>
63 #include <sys/ioctl.h>
64 #include <sys/errno.h>
65 #include <sys/device.h>
66 #include <sys/queue.h>
67 
68 #include <uvm/uvm_extern.h>		/* for PAGE_SIZE */
69 
70 #if NRND > 0
71 #include <sys/rnd.h>
72 #endif
73 
74 #include <net/if.h>
75 #include <net/if_dl.h>
76 #include <net/if_media.h>
77 #include <net/if_ether.h>
78 
79 #if NBPFILTER > 0
80 #include <net/bpf.h>
81 #endif
82 
83 #include <netinet/in.h>			/* XXX for struct ip */
84 #include <netinet/in_systm.h>		/* XXX for struct ip */
85 #include <netinet/ip.h>			/* XXX for struct ip */
86 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
87 
88 #include <machine/bus.h>
89 #include <machine/intr.h>
90 #include <machine/endian.h>
91 
92 #include <dev/mii/mii.h>
93 #include <dev/mii/miivar.h>
94 #include <dev/mii/mii_bitbang.h>
95 
96 #include <dev/pci/pcireg.h>
97 #include <dev/pci/pcivar.h>
98 #include <dev/pci/pcidevs.h>
99 
100 #include <dev/pci/if_wmreg.h>
101 
102 #ifdef WM_DEBUG
103 #define	WM_DEBUG_LINK		0x01
104 #define	WM_DEBUG_TX		0x02
105 #define	WM_DEBUG_RX		0x04
106 #define	WM_DEBUG_GMII		0x08
107 int	wm_debug = WM_DEBUG_TX|WM_DEBUG_RX|WM_DEBUG_LINK;
108 
109 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
110 #else
111 #define	DPRINTF(x, y)	/* nothing */
112 #endif /* WM_DEBUG */
113 
114 /*
115  * Transmit descriptor list size.  Due to errata, we can only have
116  * 256 hardware descriptors in the ring.  We tell the upper layers
117  * that they can queue a lot of packets, and we go ahead and manage
118  * up to 64 of them at a time.  We allow up to 40 DMA segments per
119  * packet (there have been reports of jumbo frame packets with as
120  * many as 30 DMA segments!).
121  */
122 #define	WM_NTXSEGS		40
123 #define	WM_IFQUEUELEN		256
124 #define	WM_TXQUEUELEN		64
125 #define	WM_TXQUEUELEN_MASK	(WM_TXQUEUELEN - 1)
126 #define	WM_TXQUEUE_GC		(WM_TXQUEUELEN / 8)
127 #define	WM_NTXDESC		256
128 #define	WM_NTXDESC_MASK		(WM_NTXDESC - 1)
129 #define	WM_NEXTTX(x)		(((x) + 1) & WM_NTXDESC_MASK)
130 #define	WM_NEXTTXS(x)		(((x) + 1) & WM_TXQUEUELEN_MASK)
131 
132 /*
133  * Receive descriptor list size.  We have one Rx buffer for normal
134  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
135  * packet.  We allocate 256 receive descriptors, each with a 2k
136  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
137  */
138 #define	WM_NRXDESC		256
139 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
140 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
141 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
142 
143 /*
144  * Control structures are DMA'd to the i82542 chip.  We allocate them in
145  * a single clump that maps to a single DMA segment to make serveral things
146  * easier.
147  */
148 struct wm_control_data {
149 	/*
150 	 * The transmit descriptors.
151 	 */
152 	wiseman_txdesc_t wcd_txdescs[WM_NTXDESC];
153 
154 	/*
155 	 * The receive descriptors.
156 	 */
157 	wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
158 };
159 
160 #define	WM_CDOFF(x)	offsetof(struct wm_control_data, x)
161 #define	WM_CDTXOFF(x)	WM_CDOFF(wcd_txdescs[(x)])
162 #define	WM_CDRXOFF(x)	WM_CDOFF(wcd_rxdescs[(x)])
163 
164 /*
165  * Software state for transmit jobs.
166  */
167 struct wm_txsoft {
168 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
169 	bus_dmamap_t txs_dmamap;	/* our DMA map */
170 	int txs_firstdesc;		/* first descriptor in packet */
171 	int txs_lastdesc;		/* last descriptor in packet */
172 	int txs_ndesc;			/* # of descriptors used */
173 };
174 
175 /*
176  * Software state for receive buffers.  Each descriptor gets a
177  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
178  * more than one buffer, we chain them together.
179  */
180 struct wm_rxsoft {
181 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
182 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
183 };
184 
185 typedef enum {
186 	WM_T_unknown		= 0,
187 	WM_T_82542_2_0,			/* i82542 2.0 (really old) */
188 	WM_T_82542_2_1,			/* i82542 2.1+ (old) */
189 	WM_T_82543,			/* i82543 */
190 	WM_T_82544,			/* i82544 */
191 	WM_T_82540,			/* i82540 */
192 	WM_T_82545,			/* i82545 */
193 	WM_T_82545_3,			/* i82545 3.0+ */
194 	WM_T_82546,			/* i82546 */
195 	WM_T_82546_3,			/* i82546 3.0+ */
196 	WM_T_82541,			/* i82541 */
197 	WM_T_82541_2,			/* i82541 2.0+ */
198 	WM_T_82547,			/* i82547 */
199 	WM_T_82547_2,			/* i82547 2.0+ */
200 } wm_chip_type;
201 
202 /*
203  * Software state per device.
204  */
205 struct wm_softc {
206 	struct device sc_dev;		/* generic device information */
207 	bus_space_tag_t sc_st;		/* bus space tag */
208 	bus_space_handle_t sc_sh;	/* bus space handle */
209 	bus_space_tag_t sc_iot;		/* I/O space tag */
210 	bus_space_handle_t sc_ioh;	/* I/O space handle */
211 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
212 	struct ethercom sc_ethercom;	/* ethernet common data */
213 	void *sc_sdhook;		/* shutdown hook */
214 
215 	wm_chip_type sc_type;		/* chip type */
216 	int sc_flags;			/* flags; see below */
217 	int sc_bus_speed;		/* PCI/PCIX bus speed */
218 	int sc_pcix_offset;		/* PCIX capability register offset */
219 
220 	void *sc_ih;			/* interrupt cookie */
221 
222 	int sc_ee_addrbits;		/* EEPROM address bits */
223 
224 	struct mii_data sc_mii;		/* MII/media information */
225 
226 	struct callout sc_tick_ch;	/* tick callout */
227 
228 	bus_dmamap_t sc_cddmamap;	/* control data DMA map */
229 #define	sc_cddma	sc_cddmamap->dm_segs[0].ds_addr
230 
231 	int		sc_align_tweak;
232 
233 	/*
234 	 * Software state for the transmit and receive descriptors.
235 	 */
236 	struct wm_txsoft sc_txsoft[WM_TXQUEUELEN];
237 	struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
238 
239 	/*
240 	 * Control data structures.
241 	 */
242 	struct wm_control_data *sc_control_data;
243 #define	sc_txdescs	sc_control_data->wcd_txdescs
244 #define	sc_rxdescs	sc_control_data->wcd_rxdescs
245 
246 #ifdef WM_EVENT_COUNTERS
247 	/* Event counters. */
248 	struct evcnt sc_ev_txsstall;	/* Tx stalled due to no txs */
249 	struct evcnt sc_ev_txdstall;	/* Tx stalled due to no txd */
250 	struct evcnt sc_ev_txforceintr;	/* Tx interrupts forced */
251 	struct evcnt sc_ev_txdw;	/* Tx descriptor interrupts */
252 	struct evcnt sc_ev_txqe;	/* Tx queue empty interrupts */
253 	struct evcnt sc_ev_rxintr;	/* Rx interrupts */
254 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
255 
256 	struct evcnt sc_ev_rxipsum;	/* IP checksums checked in-bound */
257 	struct evcnt sc_ev_rxtusum;	/* TCP/UDP cksums checked in-bound */
258 	struct evcnt sc_ev_txipsum;	/* IP checksums comp. out-bound */
259 	struct evcnt sc_ev_txtusum;	/* TCP/UDP cksums comp. out-bound */
260 
261 	struct evcnt sc_ev_txctx_init;	/* Tx cksum context cache initialized */
262 	struct evcnt sc_ev_txctx_hit;	/* Tx cksum context cache hit */
263 	struct evcnt sc_ev_txctx_miss;	/* Tx cksum context cache miss */
264 
265 	struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
266 	struct evcnt sc_ev_txdrop;	/* Tx packets dropped (too many segs) */
267 
268 	struct evcnt sc_ev_tu;		/* Tx underrun */
269 #endif /* WM_EVENT_COUNTERS */
270 
271 	bus_addr_t sc_tdt_reg;		/* offset of TDT register */
272 
273 	int	sc_txfree;		/* number of free Tx descriptors */
274 	int	sc_txnext;		/* next ready Tx descriptor */
275 
276 	int	sc_txsfree;		/* number of free Tx jobs */
277 	int	sc_txsnext;		/* next free Tx job */
278 	int	sc_txsdirty;		/* dirty Tx jobs */
279 
280 	uint32_t sc_txctx_ipcs;		/* cached Tx IP cksum ctx */
281 	uint32_t sc_txctx_tucs;		/* cached Tx TCP/UDP cksum ctx */
282 
283 	bus_addr_t sc_rdt_reg;		/* offset of RDT register */
284 
285 	int	sc_rxptr;		/* next ready Rx descriptor/queue ent */
286 	int	sc_rxdiscard;
287 	int	sc_rxlen;
288 	struct mbuf *sc_rxhead;
289 	struct mbuf *sc_rxtail;
290 	struct mbuf **sc_rxtailp;
291 
292 	uint32_t sc_ctrl;		/* prototype CTRL register */
293 #if 0
294 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
295 #endif
296 	uint32_t sc_icr;		/* prototype interrupt bits */
297 	uint32_t sc_tctl;		/* prototype TCTL register */
298 	uint32_t sc_rctl;		/* prototype RCTL register */
299 	uint32_t sc_txcw;		/* prototype TXCW register */
300 	uint32_t sc_tipg;		/* prototype TIPG register */
301 
302 	int sc_tbi_linkup;		/* TBI link status */
303 	int sc_tbi_anstate;		/* autonegotiation state */
304 
305 	int sc_mchash_type;		/* multicast filter offset */
306 
307 #if NRND > 0
308 	rndsource_element_t rnd_source;	/* random source */
309 #endif
310 };
311 
312 #define	WM_RXCHAIN_RESET(sc)						\
313 do {									\
314 	(sc)->sc_rxtailp = &(sc)->sc_rxhead;				\
315 	*(sc)->sc_rxtailp = NULL;					\
316 	(sc)->sc_rxlen = 0;						\
317 } while (/*CONSTCOND*/0)
318 
319 #define	WM_RXCHAIN_LINK(sc, m)						\
320 do {									\
321 	*(sc)->sc_rxtailp = (sc)->sc_rxtail = (m);			\
322 	(sc)->sc_rxtailp = &(m)->m_next;				\
323 } while (/*CONSTCOND*/0)
324 
325 /* sc_flags */
326 #define	WM_F_HAS_MII		0x01	/* has MII */
327 #define	WM_F_EEPROM_HANDSHAKE	0x02	/* requires EEPROM handshake */
328 #define	WM_F_EEPROM_SPI		0x04	/* EEPROM is SPI */
329 #define	WM_F_IOH_VALID		0x10	/* I/O handle is valid */
330 #define	WM_F_BUS64		0x20	/* bus is 64-bit */
331 #define	WM_F_PCIX		0x40	/* bus is PCI-X */
332 
333 #ifdef WM_EVENT_COUNTERS
334 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
335 #else
336 #define	WM_EVCNT_INCR(ev)	/* nothing */
337 #endif
338 
339 #define	CSR_READ(sc, reg)						\
340 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
341 #define	CSR_WRITE(sc, reg, val)						\
342 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
343 
344 #define	WM_CDTXADDR(sc, x)	((sc)->sc_cddma + WM_CDTXOFF((x)))
345 #define	WM_CDRXADDR(sc, x)	((sc)->sc_cddma + WM_CDRXOFF((x)))
346 
347 #define	WM_CDTXSYNC(sc, x, n, ops)					\
348 do {									\
349 	int __x, __n;							\
350 									\
351 	__x = (x);							\
352 	__n = (n);							\
353 									\
354 	/* If it will wrap around, sync to the end of the ring. */	\
355 	if ((__x + __n) > WM_NTXDESC) {					\
356 		bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,	\
357 		    WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) *		\
358 		    (WM_NTXDESC - __x), (ops));				\
359 		__n -= (WM_NTXDESC - __x);				\
360 		__x = 0;						\
361 	}								\
362 									\
363 	/* Now sync whatever is left. */				\
364 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
365 	    WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops));	\
366 } while (/*CONSTCOND*/0)
367 
368 #define	WM_CDRXSYNC(sc, x, ops)						\
369 do {									\
370 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
371 	   WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops));		\
372 } while (/*CONSTCOND*/0)
373 
374 #define	WM_INIT_RXDESC(sc, x)						\
375 do {									\
376 	struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)];		\
377 	wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)];		\
378 	struct mbuf *__m = __rxs->rxs_mbuf;				\
379 									\
380 	/*								\
381 	 * Note: We scoot the packet forward 2 bytes in the buffer	\
382 	 * so that the payload after the Ethernet header is aligned	\
383 	 * to a 4-byte boundary.					\
384 	 *								\
385 	 * XXX BRAINDAMAGE ALERT!					\
386 	 * The stupid chip uses the same size for every buffer, which	\
387 	 * is set in the Receive Control register.  We are using the 2K	\
388 	 * size option, but what we REALLY want is (2K - 2)!  For this	\
389 	 * reason, we can't "scoot" packets longer than the standard	\
390 	 * Ethernet MTU.  On strict-alignment platforms, if the total	\
391 	 * size exceeds (2K - 2) we set align_tweak to 0 and let	\
392 	 * the upper layer copy the headers.				\
393 	 */								\
394 	__m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak;	\
395 									\
396 	__rxd->wrx_addr.wa_low =					\
397 	    htole32(__rxs->rxs_dmamap->dm_segs[0].ds_addr + 		\
398 		(sc)->sc_align_tweak);					\
399 	__rxd->wrx_addr.wa_high = 0;					\
400 	__rxd->wrx_len = 0;						\
401 	__rxd->wrx_cksum = 0;						\
402 	__rxd->wrx_status = 0;						\
403 	__rxd->wrx_errors = 0;						\
404 	__rxd->wrx_special = 0;						\
405 	WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
406 									\
407 	CSR_WRITE((sc), (sc)->sc_rdt_reg, (x));				\
408 } while (/*CONSTCOND*/0)
409 
410 static void	wm_start(struct ifnet *);
411 static void	wm_watchdog(struct ifnet *);
412 static int	wm_ioctl(struct ifnet *, u_long, caddr_t);
413 static int	wm_init(struct ifnet *);
414 static void	wm_stop(struct ifnet *, int);
415 
416 static void	wm_shutdown(void *);
417 
418 static void	wm_reset(struct wm_softc *);
419 static void	wm_rxdrain(struct wm_softc *);
420 static int	wm_add_rxbuf(struct wm_softc *, int);
421 static int	wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
422 static void	wm_tick(void *);
423 
424 static void	wm_set_filter(struct wm_softc *);
425 
426 static int	wm_intr(void *);
427 static void	wm_txintr(struct wm_softc *);
428 static void	wm_rxintr(struct wm_softc *);
429 static void	wm_linkintr(struct wm_softc *, uint32_t);
430 
431 static void	wm_tbi_mediainit(struct wm_softc *);
432 static int	wm_tbi_mediachange(struct ifnet *);
433 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
434 
435 static void	wm_tbi_set_linkled(struct wm_softc *);
436 static void	wm_tbi_check_link(struct wm_softc *);
437 
438 static void	wm_gmii_reset(struct wm_softc *);
439 
440 static int	wm_gmii_i82543_readreg(struct device *, int, int);
441 static void	wm_gmii_i82543_writereg(struct device *, int, int, int);
442 
443 static int	wm_gmii_i82544_readreg(struct device *, int, int);
444 static void	wm_gmii_i82544_writereg(struct device *, int, int, int);
445 
446 static void	wm_gmii_statchg(struct device *);
447 
448 static void	wm_gmii_mediainit(struct wm_softc *);
449 static int	wm_gmii_mediachange(struct ifnet *);
450 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
451 
452 static int	wm_match(struct device *, struct cfdata *, void *);
453 static void	wm_attach(struct device *, struct device *, void *);
454 
455 CFATTACH_DECL(wm, sizeof(struct wm_softc),
456     wm_match, wm_attach, NULL, NULL);
457 
458 /*
459  * Devices supported by this driver.
460  */
461 const struct wm_product {
462 	pci_vendor_id_t		wmp_vendor;
463 	pci_product_id_t	wmp_product;
464 	const char		*wmp_name;
465 	wm_chip_type		wmp_type;
466 	int			wmp_flags;
467 #define	WMP_F_1000X		0x01
468 #define	WMP_F_1000T		0x02
469 } wm_products[] = {
470 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
471 	  "Intel i82542 1000BASE-X Ethernet",
472 	  WM_T_82542_2_1,	WMP_F_1000X },
473 
474 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
475 	  "Intel i82543GC 1000BASE-X Ethernet",
476 	  WM_T_82543,		WMP_F_1000X },
477 
478 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
479 	  "Intel i82543GC 1000BASE-T Ethernet",
480 	  WM_T_82543,		WMP_F_1000T },
481 
482 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
483 	  "Intel i82544EI 1000BASE-T Ethernet",
484 	  WM_T_82544,		WMP_F_1000T },
485 
486 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
487 	  "Intel i82544EI 1000BASE-X Ethernet",
488 	  WM_T_82544,		WMP_F_1000X },
489 
490 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
491 	  "Intel i82544GC 1000BASE-T Ethernet",
492 	  WM_T_82544,		WMP_F_1000T },
493 
494 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
495 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
496 	  WM_T_82544,		WMP_F_1000T },
497 
498 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
499 	  "Intel i82540EM 1000BASE-T Ethernet",
500 	  WM_T_82540,		WMP_F_1000T },
501 
502 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
503 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
504 	  WM_T_82540,		WMP_F_1000T },
505 
506 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
507 	  "Intel i82540EP 1000BASE-T Ethernet",
508 	  WM_T_82540,		WMP_F_1000T },
509 
510 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
511 	  "Intel i82540EP 1000BASE-T Ethernet",
512 	  WM_T_82540,		WMP_F_1000T },
513 
514 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
515 	  "Intel i82540EP 1000BASE-T Ethernet",
516 	  WM_T_82540,		WMP_F_1000T },
517 
518 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
519 	  "Intel i82545EM 1000BASE-T Ethernet",
520 	  WM_T_82545,		WMP_F_1000T },
521 
522 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
523 	  "Intel i82545GM 1000BASE-T Ethernet",
524 	  WM_T_82545_3,		WMP_F_1000T },
525 
526 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
527 	  "Intel i82545GM 1000BASE-X Ethernet",
528 	  WM_T_82545_3,		WMP_F_1000X },
529 #if 0
530 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
531 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
532 	  WM_T_82545_3,		WMP_F_SERDES },
533 #endif
534 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
535 	  "Intel i82546EB 1000BASE-T Ethernet",
536 	  WM_T_82546,		WMP_F_1000T },
537 
538 	{ PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82546EB_QUAD,
539 	  "Intel i82546EB 1000BASE-T Ethernet",
540 	  WM_T_82546,		WMP_F_1000T },
541 
542 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
543 	  "Intel i82545EM 1000BASE-X Ethernet",
544 	  WM_T_82545,		WMP_F_1000X },
545 
546 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
547 	  "Intel i82546EB 1000BASE-X Ethernet",
548 	  WM_T_82546,		WMP_F_1000X },
549 
550 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
551 	  "Intel i82546GB 1000BASE-T Ethernet",
552 	  WM_T_82546_3,		WMP_F_1000T },
553 
554 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
555 	  "Intel i82546GB 1000BASE-X Ethernet",
556 	  WM_T_82546_3,		WMP_F_1000X },
557 #if 0
558 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
559 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
560 	  WM_T_82546_3,		WMP_F_SERDES },
561 #endif
562 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
563 	  "Intel i82541EI 1000BASE-T Ethernet",
564 	  WM_T_82541,		WMP_F_1000T },
565 
566 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
567 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
568 	  WM_T_82541,		WMP_F_1000T },
569 
570 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
571 	  "Intel i82541ER 1000BASE-T Ethernet",
572 	  WM_T_82541_2,		WMP_F_1000T },
573 
574 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
575 	  "Intel i82541GI 1000BASE-T Ethernet",
576 	  WM_T_82541_2,		WMP_F_1000T },
577 
578 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
579 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
580 	  WM_T_82541_2,		WMP_F_1000T },
581 
582 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
583 	  "Intel i82547EI 1000BASE-T Ethernet",
584 	  WM_T_82547,		WMP_F_1000T },
585 
586 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
587 	  "Intel i82547GI 1000BASE-T Ethernet",
588 	  WM_T_82547_2,		WMP_F_1000T },
589 	{ 0,			0,
590 	  NULL,
591 	  0,			0 },
592 };
593 
594 #ifdef WM_EVENT_COUNTERS
595 #if WM_NTXSEGS != 40
596 #error Update wm_txseg_evcnt_names
597 #endif
598 static const char *wm_txseg_evcnt_names[WM_NTXSEGS] = {
599 	"txseg1",
600 	"txseg2",
601 	"txseg3",
602 	"txseg4",
603 	"txseg5",
604 	"txseg6",
605 	"txseg7",
606 	"txseg8",
607 	"txseg9",
608 	"txseg10",
609 	"txseg11",
610 	"txseg12",
611 	"txseg13",
612 	"txseg14",
613 	"txseg15",
614 	"txseg16",
615 	"txseg17",
616 	"txseg18",
617 	"txseg19",
618 	"txseg21",
619 	"txseg22",
620 	"txseg23",
621 	"txseg24",
622 	"txseg25",
623 	"txseg26",
624 	"txseg27",
625 	"txseg28",
626 	"txseg29",
627 	"txseg30",
628 	"txseg31",
629 	"txseg32",
630 	"txseg33",
631 	"txseg34",
632 	"txseg35",
633 	"txseg36",
634 	"txseg37",
635 	"txseg38",
636 	"txseg39",
637 	"txseg40",
638 };
639 #endif /* WM_EVENT_COUNTERS */
640 
641 #if 0 /* Not currently used */
642 static __inline uint32_t
643 wm_io_read(struct wm_softc *sc, int reg)
644 {
645 
646 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
647 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
648 }
649 #endif
650 
651 static __inline void
652 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
653 {
654 
655 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
656 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
657 }
658 
659 static const struct wm_product *
660 wm_lookup(const struct pci_attach_args *pa)
661 {
662 	const struct wm_product *wmp;
663 
664 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
665 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
666 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
667 			return (wmp);
668 	}
669 	return (NULL);
670 }
671 
672 static int
673 wm_match(struct device *parent, struct cfdata *cf, void *aux)
674 {
675 	struct pci_attach_args *pa = aux;
676 
677 	if (wm_lookup(pa) != NULL)
678 		return (1);
679 
680 	return (0);
681 }
682 
683 static void
684 wm_attach(struct device *parent, struct device *self, void *aux)
685 {
686 	struct wm_softc *sc = (void *) self;
687 	struct pci_attach_args *pa = aux;
688 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
689 	pci_chipset_tag_t pc = pa->pa_pc;
690 	pci_intr_handle_t ih;
691 	const char *intrstr = NULL;
692 	const char *eetype;
693 	bus_space_tag_t memt;
694 	bus_space_handle_t memh;
695 	bus_dma_segment_t seg;
696 	int memh_valid;
697 	int i, rseg, error;
698 	const struct wm_product *wmp;
699 	uint8_t enaddr[ETHER_ADDR_LEN];
700 	uint16_t myea[ETHER_ADDR_LEN / 2], cfg1, cfg2, swdpin;
701 	pcireg_t preg, memtype;
702 	uint32_t reg;
703 	int pmreg;
704 
705 	callout_init(&sc->sc_tick_ch);
706 
707 	wmp = wm_lookup(pa);
708 	if (wmp == NULL) {
709 		printf("\n");
710 		panic("wm_attach: impossible");
711 	}
712 
713 	sc->sc_dmat = pa->pa_dmat;
714 
715 	preg = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
716 	aprint_naive(": Ethernet controller\n");
717 	aprint_normal(": %s, rev. %d\n", wmp->wmp_name, preg);
718 
719 	sc->sc_type = wmp->wmp_type;
720 	if (sc->sc_type < WM_T_82543) {
721 		if (preg < 2) {
722 			aprint_error("%s: i82542 must be at least rev. 2\n",
723 			    sc->sc_dev.dv_xname);
724 			return;
725 		}
726 		if (preg < 3)
727 			sc->sc_type = WM_T_82542_2_0;
728 	}
729 
730 	/*
731 	 * Map the device.  All devices support memory-mapped acccess,
732 	 * and it is really required for normal operation.
733 	 */
734 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
735 	switch (memtype) {
736 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
737 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
738 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
739 		    memtype, 0, &memt, &memh, NULL, NULL) == 0);
740 		break;
741 	default:
742 		memh_valid = 0;
743 	}
744 
745 	if (memh_valid) {
746 		sc->sc_st = memt;
747 		sc->sc_sh = memh;
748 	} else {
749 		aprint_error("%s: unable to map device registers\n",
750 		    sc->sc_dev.dv_xname);
751 		return;
752 	}
753 
754 	/*
755 	 * In addition, i82544 and later support I/O mapped indirect
756 	 * register access.  It is not desirable (nor supported in
757 	 * this driver) to use it for normal operation, though it is
758 	 * required to work around bugs in some chip versions.
759 	 */
760 	if (sc->sc_type >= WM_T_82544) {
761 		/* First we have to find the I/O BAR. */
762 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
763 			if (pci_mapreg_type(pa->pa_pc, pa->pa_tag, i) ==
764 			    PCI_MAPREG_TYPE_IO)
765 				break;
766 		}
767 		if (i == PCI_MAPREG_END)
768 			aprint_error("%s: WARNING: unable to find I/O BAR\n",
769 			    sc->sc_dev.dv_xname);
770 		else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
771 					0, &sc->sc_iot, &sc->sc_ioh,
772 					NULL, NULL) == 0)
773 			sc->sc_flags |= WM_F_IOH_VALID;
774 		else
775 			aprint_error("%s: WARNING: unable to map I/O space\n",
776 			    sc->sc_dev.dv_xname);
777 	}
778 
779 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
780 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
781 	preg |= PCI_COMMAND_MASTER_ENABLE;
782 	if (sc->sc_type < WM_T_82542_2_1)
783 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
784 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
785 
786 	/* Get it out of power save mode, if needed. */
787 	if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) {
788 		preg = pci_conf_read(pc, pa->pa_tag, pmreg + PCI_PMCSR) &
789 		    PCI_PMCSR_STATE_MASK;
790 		if (preg == PCI_PMCSR_STATE_D3) {
791 			/*
792 			 * The card has lost all configuration data in
793 			 * this state, so punt.
794 			 */
795 			aprint_error("%s: unable to wake from power state D3\n",
796 			    sc->sc_dev.dv_xname);
797 			return;
798 		}
799 		if (preg != PCI_PMCSR_STATE_D0) {
800 			aprint_normal("%s: waking up from power state D%d\n",
801 			    sc->sc_dev.dv_xname, preg);
802 			pci_conf_write(pc, pa->pa_tag, pmreg + PCI_PMCSR,
803 			    PCI_PMCSR_STATE_D0);
804 		}
805 	}
806 
807 	/*
808 	 * Map and establish our interrupt.
809 	 */
810 	if (pci_intr_map(pa, &ih)) {
811 		aprint_error("%s: unable to map interrupt\n",
812 		    sc->sc_dev.dv_xname);
813 		return;
814 	}
815 	intrstr = pci_intr_string(pc, ih);
816 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
817 	if (sc->sc_ih == NULL) {
818 		aprint_error("%s: unable to establish interrupt",
819 		    sc->sc_dev.dv_xname);
820 		if (intrstr != NULL)
821 			aprint_normal(" at %s", intrstr);
822 		aprint_normal("\n");
823 		return;
824 	}
825 	aprint_normal("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr);
826 
827 	/*
828 	 * Determine a few things about the bus we're connected to.
829 	 */
830 	if (sc->sc_type < WM_T_82543) {
831 		/* We don't really know the bus characteristics here. */
832 		sc->sc_bus_speed = 33;
833 	} else  {
834 		reg = CSR_READ(sc, WMREG_STATUS);
835 		if (reg & STATUS_BUS64)
836 			sc->sc_flags |= WM_F_BUS64;
837 		if (sc->sc_type >= WM_T_82544 &&
838 		    (reg & STATUS_PCIX_MODE) != 0) {
839 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
840 
841 			sc->sc_flags |= WM_F_PCIX;
842 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
843 					       PCI_CAP_PCIX,
844 					       &sc->sc_pcix_offset, NULL) == 0)
845 				aprint_error("%s: unable to find PCIX "
846 				    "capability\n", sc->sc_dev.dv_xname);
847 			else if (sc->sc_type != WM_T_82545_3 &&
848 				 sc->sc_type != WM_T_82546_3) {
849 				/*
850 				 * Work around a problem caused by the BIOS
851 				 * setting the max memory read byte count
852 				 * incorrectly.
853 				 */
854 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
855 				    sc->sc_pcix_offset + PCI_PCIX_CMD);
856 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
857 				    sc->sc_pcix_offset + PCI_PCIX_STATUS);
858 
859 				bytecnt =
860 				    (pcix_cmd & PCI_PCIX_CMD_BYTECNT_MASK) >>
861 				    PCI_PCIX_CMD_BYTECNT_SHIFT;
862 				maxb =
863 				    (pcix_sts & PCI_PCIX_STATUS_MAXB_MASK) >>
864 				    PCI_PCIX_STATUS_MAXB_SHIFT;
865 				if (bytecnt > maxb) {
866 					aprint_verbose("%s: resetting PCI-X "
867 					    "MMRBC: %d -> %d\n",
868 					    sc->sc_dev.dv_xname,
869 					    512 << bytecnt, 512 << maxb);
870 					pcix_cmd = (pcix_cmd &
871 					    ~PCI_PCIX_CMD_BYTECNT_MASK) |
872 					   (maxb << PCI_PCIX_CMD_BYTECNT_SHIFT);
873 					pci_conf_write(pa->pa_pc, pa->pa_tag,
874 					    sc->sc_pcix_offset + PCI_PCIX_CMD,
875 					    pcix_cmd);
876 				}
877 			}
878 		}
879 		/*
880 		 * The quad port adapter is special; it has a PCIX-PCIX
881 		 * bridge on the board, and can run the secondary bus at
882 		 * a higher speed.
883 		 */
884 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
885 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
886 								      : 66;
887 		} else if (sc->sc_flags & WM_F_PCIX) {
888 			switch (reg & STATUS_PCIXSPD_MASK) {
889 			case STATUS_PCIXSPD_50_66:
890 				sc->sc_bus_speed = 66;
891 				break;
892 			case STATUS_PCIXSPD_66_100:
893 				sc->sc_bus_speed = 100;
894 				break;
895 			case STATUS_PCIXSPD_100_133:
896 				sc->sc_bus_speed = 133;
897 				break;
898 			default:
899 				aprint_error(
900 				    "%s: unknown PCIXSPD %d; assuming 66MHz\n",
901 				    sc->sc_dev.dv_xname,
902 				    reg & STATUS_PCIXSPD_MASK);
903 				sc->sc_bus_speed = 66;
904 			}
905 		} else
906 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
907 		aprint_verbose("%s: %d-bit %dMHz %s bus\n", sc->sc_dev.dv_xname,
908 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
909 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
910 	}
911 
912 	/*
913 	 * Allocate the control data structures, and create and load the
914 	 * DMA map for it.
915 	 */
916 	if ((error = bus_dmamem_alloc(sc->sc_dmat,
917 	    sizeof(struct wm_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
918 	    0)) != 0) {
919 		aprint_error(
920 		    "%s: unable to allocate control data, error = %d\n",
921 		    sc->sc_dev.dv_xname, error);
922 		goto fail_0;
923 	}
924 
925 	if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
926 	    sizeof(struct wm_control_data), (caddr_t *)&sc->sc_control_data,
927 	    0)) != 0) {
928 		aprint_error("%s: unable to map control data, error = %d\n",
929 		    sc->sc_dev.dv_xname, error);
930 		goto fail_1;
931 	}
932 
933 	if ((error = bus_dmamap_create(sc->sc_dmat,
934 	    sizeof(struct wm_control_data), 1,
935 	    sizeof(struct wm_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
936 		aprint_error("%s: unable to create control data DMA map, "
937 		    "error = %d\n", sc->sc_dev.dv_xname, error);
938 		goto fail_2;
939 	}
940 
941 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
942 	    sc->sc_control_data, sizeof(struct wm_control_data), NULL,
943 	    0)) != 0) {
944 		aprint_error(
945 		    "%s: unable to load control data DMA map, error = %d\n",
946 		    sc->sc_dev.dv_xname, error);
947 		goto fail_3;
948 	}
949 
950 	/*
951 	 * Create the transmit buffer DMA maps.
952 	 */
953 	for (i = 0; i < WM_TXQUEUELEN; i++) {
954 		if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_MAX_LEN_JUMBO,
955 		    WM_NTXSEGS, MCLBYTES, 0, 0,
956 		    &sc->sc_txsoft[i].txs_dmamap)) != 0) {
957 			aprint_error("%s: unable to create Tx DMA map %d, "
958 			    "error = %d\n", sc->sc_dev.dv_xname, i, error);
959 			goto fail_4;
960 		}
961 	}
962 
963 	/*
964 	 * Create the receive buffer DMA maps.
965 	 */
966 	for (i = 0; i < WM_NRXDESC; i++) {
967 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
968 		    MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
969 			aprint_error("%s: unable to create Rx DMA map %d, "
970 			    "error = %d\n", sc->sc_dev.dv_xname, i, error);
971 			goto fail_5;
972 		}
973 		sc->sc_rxsoft[i].rxs_mbuf = NULL;
974 	}
975 
976 	/*
977 	 * Reset the chip to a known state.
978 	 */
979 	wm_reset(sc);
980 
981 	/*
982 	 * Get some information about the EEPROM.
983 	 */
984 	if (sc->sc_type >= WM_T_82540)
985 		sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
986 	if (sc->sc_type <= WM_T_82544)
987 		sc->sc_ee_addrbits = 6;
988 	else if (sc->sc_type <= WM_T_82546_3) {
989 		reg = CSR_READ(sc, WMREG_EECD);
990 		if (reg & EECD_EE_SIZE)
991 			sc->sc_ee_addrbits = 8;
992 		else
993 			sc->sc_ee_addrbits = 6;
994 	} else if (sc->sc_type <= WM_T_82547_2) {
995 		reg = CSR_READ(sc, WMREG_EECD);
996 		if (reg & EECD_EE_TYPE) {
997 			sc->sc_flags |= WM_F_EEPROM_SPI;
998 			sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
999 		} else
1000 			sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6;
1001 	} else {
1002 		/* Assume everything else is SPI. */
1003 		reg = CSR_READ(sc, WMREG_EECD);
1004 		sc->sc_flags |= WM_F_EEPROM_SPI;
1005 		sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
1006 	}
1007 	if (sc->sc_flags & WM_F_EEPROM_SPI)
1008 		eetype = "SPI";
1009 	else
1010 		eetype = "MicroWire";
1011 	aprint_verbose("%s: %u word (%d address bits) %s EEPROM\n",
1012 	    sc->sc_dev.dv_xname, 1U << sc->sc_ee_addrbits,
1013 	    sc->sc_ee_addrbits, eetype);
1014 
1015 	/*
1016 	 * Read the Ethernet address from the EEPROM.
1017 	 */
1018 	if (wm_read_eeprom(sc, EEPROM_OFF_MACADDR,
1019 	    sizeof(myea) / sizeof(myea[0]), myea)) {
1020 		aprint_error("%s: unable to read Ethernet address\n",
1021 		    sc->sc_dev.dv_xname);
1022 		return;
1023 	}
1024 	enaddr[0] = myea[0] & 0xff;
1025 	enaddr[1] = myea[0] >> 8;
1026 	enaddr[2] = myea[1] & 0xff;
1027 	enaddr[3] = myea[1] >> 8;
1028 	enaddr[4] = myea[2] & 0xff;
1029 	enaddr[5] = myea[2] >> 8;
1030 
1031 	/*
1032 	 * Toggle the LSB of the MAC address on the second port
1033 	 * of the i82546.
1034 	 */
1035 	if (sc->sc_type == WM_T_82546) {
1036 		if ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1)
1037 			enaddr[5] ^= 1;
1038 	}
1039 
1040 	aprint_normal("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
1041 	    ether_sprintf(enaddr));
1042 
1043 	/*
1044 	 * Read the config info from the EEPROM, and set up various
1045 	 * bits in the control registers based on their contents.
1046 	 */
1047 	if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) {
1048 		aprint_error("%s: unable to read CFG1 from EEPROM\n",
1049 		    sc->sc_dev.dv_xname);
1050 		return;
1051 	}
1052 	if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) {
1053 		aprint_error("%s: unable to read CFG2 from EEPROM\n",
1054 		    sc->sc_dev.dv_xname);
1055 		return;
1056 	}
1057 	if (sc->sc_type >= WM_T_82544) {
1058 		if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) {
1059 			aprint_error("%s: unable to read SWDPIN from EEPROM\n",
1060 			    sc->sc_dev.dv_xname);
1061 			return;
1062 		}
1063 	}
1064 
1065 	if (cfg1 & EEPROM_CFG1_ILOS)
1066 		sc->sc_ctrl |= CTRL_ILOS;
1067 	if (sc->sc_type >= WM_T_82544) {
1068 		sc->sc_ctrl |=
1069 		    ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
1070 		    CTRL_SWDPIO_SHIFT;
1071 		sc->sc_ctrl |=
1072 		    ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
1073 		    CTRL_SWDPINS_SHIFT;
1074 	} else {
1075 		sc->sc_ctrl |=
1076 		    ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
1077 		    CTRL_SWDPIO_SHIFT;
1078 	}
1079 
1080 #if 0
1081 	if (sc->sc_type >= WM_T_82544) {
1082 		if (cfg1 & EEPROM_CFG1_IPS0)
1083 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
1084 		if (cfg1 & EEPROM_CFG1_IPS1)
1085 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
1086 		sc->sc_ctrl_ext |=
1087 		    ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
1088 		    CTRL_EXT_SWDPIO_SHIFT;
1089 		sc->sc_ctrl_ext |=
1090 		    ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
1091 		    CTRL_EXT_SWDPINS_SHIFT;
1092 	} else {
1093 		sc->sc_ctrl_ext |=
1094 		    ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
1095 		    CTRL_EXT_SWDPIO_SHIFT;
1096 	}
1097 #endif
1098 
1099 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
1100 #if 0
1101 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
1102 #endif
1103 
1104 	/*
1105 	 * Set up some register offsets that are different between
1106 	 * the i82542 and the i82543 and later chips.
1107 	 */
1108 	if (sc->sc_type < WM_T_82543) {
1109 		sc->sc_rdt_reg = WMREG_OLD_RDT0;
1110 		sc->sc_tdt_reg = WMREG_OLD_TDT;
1111 	} else {
1112 		sc->sc_rdt_reg = WMREG_RDT;
1113 		sc->sc_tdt_reg = WMREG_TDT;
1114 	}
1115 
1116 	/*
1117 	 * Determine if we should use flow control.  We should
1118 	 * always use it, unless we're on a i82542 < 2.1.
1119 	 */
1120 	if (sc->sc_type >= WM_T_82542_2_1)
1121 		sc->sc_ctrl |= CTRL_TFCE | CTRL_RFCE;
1122 
1123 	/*
1124 	 * Determine if we're TBI or GMII mode, and initialize the
1125 	 * media structures accordingly.
1126 	 */
1127 	if (sc->sc_type < WM_T_82543 ||
1128 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
1129 		if (wmp->wmp_flags & WMP_F_1000T)
1130 			aprint_error("%s: WARNING: TBIMODE set on 1000BASE-T "
1131 			    "product!\n", sc->sc_dev.dv_xname);
1132 		wm_tbi_mediainit(sc);
1133 	} else {
1134 		if (wmp->wmp_flags & WMP_F_1000X)
1135 			aprint_error("%s: WARNING: TBIMODE clear on 1000BASE-X "
1136 			    "product!\n", sc->sc_dev.dv_xname);
1137 		wm_gmii_mediainit(sc);
1138 	}
1139 
1140 	ifp = &sc->sc_ethercom.ec_if;
1141 	strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
1142 	ifp->if_softc = sc;
1143 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1144 	ifp->if_ioctl = wm_ioctl;
1145 	ifp->if_start = wm_start;
1146 	ifp->if_watchdog = wm_watchdog;
1147 	ifp->if_init = wm_init;
1148 	ifp->if_stop = wm_stop;
1149 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
1150 	IFQ_SET_READY(&ifp->if_snd);
1151 
1152 	sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1153 
1154 	/*
1155 	 * If we're a i82543 or greater, we can support VLANs.
1156 	 */
1157 	if (sc->sc_type >= WM_T_82543)
1158 		sc->sc_ethercom.ec_capabilities |=
1159 		    ETHERCAP_VLAN_MTU /* XXXJRT | ETHERCAP_VLAN_HWTAGGING */;
1160 
1161 	/*
1162 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
1163 	 * on i82543 and later.
1164 	 */
1165 	if (sc->sc_type >= WM_T_82543)
1166 		ifp->if_capabilities |=
1167 		    IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
1168 
1169 	/*
1170 	 * Attach the interface.
1171 	 */
1172 	if_attach(ifp);
1173 	ether_ifattach(ifp, enaddr);
1174 #if NRND > 0
1175 	rnd_attach_source(&sc->rnd_source, sc->sc_dev.dv_xname,
1176 	    RND_TYPE_NET, 0);
1177 #endif
1178 
1179 #ifdef WM_EVENT_COUNTERS
1180 	/* Attach event counters. */
1181 	evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
1182 	    NULL, sc->sc_dev.dv_xname, "txsstall");
1183 	evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
1184 	    NULL, sc->sc_dev.dv_xname, "txdstall");
1185 	evcnt_attach_dynamic(&sc->sc_ev_txforceintr, EVCNT_TYPE_MISC,
1186 	    NULL, sc->sc_dev.dv_xname, "txforceintr");
1187 	evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
1188 	    NULL, sc->sc_dev.dv_xname, "txdw");
1189 	evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
1190 	    NULL, sc->sc_dev.dv_xname, "txqe");
1191 	evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
1192 	    NULL, sc->sc_dev.dv_xname, "rxintr");
1193 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
1194 	    NULL, sc->sc_dev.dv_xname, "linkintr");
1195 
1196 	evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
1197 	    NULL, sc->sc_dev.dv_xname, "rxipsum");
1198 	evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
1199 	    NULL, sc->sc_dev.dv_xname, "rxtusum");
1200 	evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
1201 	    NULL, sc->sc_dev.dv_xname, "txipsum");
1202 	evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
1203 	    NULL, sc->sc_dev.dv_xname, "txtusum");
1204 
1205 	evcnt_attach_dynamic(&sc->sc_ev_txctx_init, EVCNT_TYPE_MISC,
1206 	    NULL, sc->sc_dev.dv_xname, "txctx init");
1207 	evcnt_attach_dynamic(&sc->sc_ev_txctx_hit, EVCNT_TYPE_MISC,
1208 	    NULL, sc->sc_dev.dv_xname, "txctx hit");
1209 	evcnt_attach_dynamic(&sc->sc_ev_txctx_miss, EVCNT_TYPE_MISC,
1210 	    NULL, sc->sc_dev.dv_xname, "txctx miss");
1211 
1212 	for (i = 0; i < WM_NTXSEGS; i++)
1213 		evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
1214 		    NULL, sc->sc_dev.dv_xname, wm_txseg_evcnt_names[i]);
1215 
1216 	evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
1217 	    NULL, sc->sc_dev.dv_xname, "txdrop");
1218 
1219 	evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
1220 	    NULL, sc->sc_dev.dv_xname, "tu");
1221 #endif /* WM_EVENT_COUNTERS */
1222 
1223 	/*
1224 	 * Make sure the interface is shutdown during reboot.
1225 	 */
1226 	sc->sc_sdhook = shutdownhook_establish(wm_shutdown, sc);
1227 	if (sc->sc_sdhook == NULL)
1228 		aprint_error("%s: WARNING: unable to establish shutdown hook\n",
1229 		    sc->sc_dev.dv_xname);
1230 	return;
1231 
1232 	/*
1233 	 * Free any resources we've allocated during the failed attach
1234 	 * attempt.  Do this in reverse order and fall through.
1235 	 */
1236  fail_5:
1237 	for (i = 0; i < WM_NRXDESC; i++) {
1238 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
1239 			bus_dmamap_destroy(sc->sc_dmat,
1240 			    sc->sc_rxsoft[i].rxs_dmamap);
1241 	}
1242  fail_4:
1243 	for (i = 0; i < WM_TXQUEUELEN; i++) {
1244 		if (sc->sc_txsoft[i].txs_dmamap != NULL)
1245 			bus_dmamap_destroy(sc->sc_dmat,
1246 			    sc->sc_txsoft[i].txs_dmamap);
1247 	}
1248 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
1249  fail_3:
1250 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
1251  fail_2:
1252 	bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
1253 	    sizeof(struct wm_control_data));
1254  fail_1:
1255 	bus_dmamem_free(sc->sc_dmat, &seg, rseg);
1256  fail_0:
1257 	return;
1258 }
1259 
1260 /*
1261  * wm_shutdown:
1262  *
1263  *	Make sure the interface is stopped at reboot time.
1264  */
1265 static void
1266 wm_shutdown(void *arg)
1267 {
1268 	struct wm_softc *sc = arg;
1269 
1270 	wm_stop(&sc->sc_ethercom.ec_if, 1);
1271 }
1272 
1273 /*
1274  * wm_tx_cksum:
1275  *
1276  *	Set up TCP/IP checksumming parameters for the
1277  *	specified packet.
1278  */
1279 static int
1280 wm_tx_cksum(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
1281     uint8_t *fieldsp)
1282 {
1283 	struct mbuf *m0 = txs->txs_mbuf;
1284 	struct livengood_tcpip_ctxdesc *t;
1285 	uint32_t ipcs, tucs;
1286 	struct ip *ip;
1287 	struct ether_header *eh;
1288 	int offset, iphl;
1289 	uint8_t fields = 0;
1290 
1291 	/*
1292 	 * XXX It would be nice if the mbuf pkthdr had offset
1293 	 * fields for the protocol headers.
1294 	 */
1295 
1296 	eh = mtod(m0, struct ether_header *);
1297 	switch (htons(eh->ether_type)) {
1298 	case ETHERTYPE_IP:
1299 		iphl = sizeof(struct ip);
1300 		offset = ETHER_HDR_LEN;
1301 		break;
1302 
1303 	case ETHERTYPE_VLAN:
1304 		iphl = sizeof(struct ip);
1305 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1306 		break;
1307 
1308 	default:
1309 		/*
1310 		 * Don't support this protocol or encapsulation.
1311 		 */
1312 		*fieldsp = 0;
1313 		*cmdp = 0;
1314 		return (0);
1315 	}
1316 
1317 	if (m0->m_len < (offset + iphl)) {
1318 		if ((txs->txs_mbuf = m_pullup(m0, offset + iphl)) == NULL) {
1319 			printf("%s: wm_tx_cksum: mbuf allocation failed, "
1320 			    "packet dropped\n", sc->sc_dev.dv_xname);
1321 			return (ENOMEM);
1322 		}
1323 		m0 = txs->txs_mbuf;
1324 	}
1325 
1326 	ip = (struct ip *) (mtod(m0, caddr_t) + offset);
1327 	iphl = ip->ip_hl << 2;
1328 
1329 	/*
1330 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
1331 	 * offload feature, if we load the context descriptor, we
1332 	 * MUST provide valid values for IPCSS and TUCSS fields.
1333 	 */
1334 
1335 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
1336 		WM_EVCNT_INCR(&sc->sc_ev_txipsum);
1337 		fields |= WTX_IXSM;
1338 		ipcs = WTX_TCPIP_IPCSS(offset) |
1339 		    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1340 		    WTX_TCPIP_IPCSE(offset + iphl - 1);
1341 	} else if (__predict_true(sc->sc_txctx_ipcs != 0xffffffff)) {
1342 		/* Use the cached value. */
1343 		ipcs = sc->sc_txctx_ipcs;
1344 	} else {
1345 		/* Just initialize it to the likely value anyway. */
1346 		ipcs = WTX_TCPIP_IPCSS(offset) |
1347 		    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1348 		    WTX_TCPIP_IPCSE(offset + iphl - 1);
1349 	}
1350 
1351 	offset += iphl;
1352 
1353 	if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
1354 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
1355 		fields |= WTX_TXSM;
1356 		tucs = WTX_TCPIP_TUCSS(offset) |
1357 		    WTX_TCPIP_TUCSO(offset + m0->m_pkthdr.csum_data) |
1358 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
1359 	} else if (__predict_true(sc->sc_txctx_tucs != 0xffffffff)) {
1360 		/* Use the cached value. */
1361 		tucs = sc->sc_txctx_tucs;
1362 	} else {
1363 		/* Just initialize it to a valid TCP context. */
1364 		tucs = WTX_TCPIP_TUCSS(offset) |
1365 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
1366 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
1367 	}
1368 
1369 	if (sc->sc_txctx_ipcs == ipcs &&
1370 	    sc->sc_txctx_tucs == tucs) {
1371 		/* Cached context is fine. */
1372 		WM_EVCNT_INCR(&sc->sc_ev_txctx_hit);
1373 	} else {
1374 		/* Fill in the context descriptor. */
1375 #ifdef WM_EVENT_COUNTERS
1376 		if (sc->sc_txctx_ipcs == 0xffffffff &&
1377 		    sc->sc_txctx_tucs == 0xffffffff)
1378 			WM_EVCNT_INCR(&sc->sc_ev_txctx_init);
1379 		else
1380 			WM_EVCNT_INCR(&sc->sc_ev_txctx_miss);
1381 #endif
1382 		t = (struct livengood_tcpip_ctxdesc *)
1383 		    &sc->sc_txdescs[sc->sc_txnext];
1384 		t->tcpip_ipcs = htole32(ipcs);
1385 		t->tcpip_tucs = htole32(tucs);
1386 		t->tcpip_cmdlen = htole32(WTX_CMD_DEXT | WTX_DTYP_C);
1387 		t->tcpip_seg = 0;
1388 		WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
1389 
1390 		sc->sc_txctx_ipcs = ipcs;
1391 		sc->sc_txctx_tucs = tucs;
1392 
1393 		sc->sc_txnext = WM_NEXTTX(sc->sc_txnext);
1394 		txs->txs_ndesc++;
1395 	}
1396 
1397 	*cmdp = WTX_CMD_DEXT | WTC_DTYP_D;
1398 	*fieldsp = fields;
1399 
1400 	return (0);
1401 }
1402 
1403 /*
1404  * wm_start:		[ifnet interface function]
1405  *
1406  *	Start packet transmission on the interface.
1407  */
1408 static void
1409 wm_start(struct ifnet *ifp)
1410 {
1411 	struct wm_softc *sc = ifp->if_softc;
1412 	struct mbuf *m0;
1413 #if 0 /* XXXJRT */
1414 	struct m_tag *mtag;
1415 #endif
1416 	struct wm_txsoft *txs;
1417 	bus_dmamap_t dmamap;
1418 	int error, nexttx, lasttx = -1, ofree, seg;
1419 	uint32_t cksumcmd;
1420 	uint8_t cksumfields;
1421 
1422 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
1423 		return;
1424 
1425 	/*
1426 	 * Remember the previous number of free descriptors.
1427 	 */
1428 	ofree = sc->sc_txfree;
1429 
1430 	/*
1431 	 * Loop through the send queue, setting up transmit descriptors
1432 	 * until we drain the queue, or use up all available transmit
1433 	 * descriptors.
1434 	 */
1435 	for (;;) {
1436 		/* Grab a packet off the queue. */
1437 		IFQ_POLL(&ifp->if_snd, m0);
1438 		if (m0 == NULL)
1439 			break;
1440 
1441 		DPRINTF(WM_DEBUG_TX,
1442 		    ("%s: TX: have packet to transmit: %p\n",
1443 		    sc->sc_dev.dv_xname, m0));
1444 
1445 		/* Get a work queue entry. */
1446 		if (sc->sc_txsfree < WM_TXQUEUE_GC) {
1447 			wm_txintr(sc);
1448 			if (sc->sc_txsfree == 0) {
1449 				DPRINTF(WM_DEBUG_TX,
1450 				    ("%s: TX: no free job descriptors\n",
1451 					sc->sc_dev.dv_xname));
1452 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
1453 				break;
1454 			}
1455 		}
1456 
1457 		txs = &sc->sc_txsoft[sc->sc_txsnext];
1458 		dmamap = txs->txs_dmamap;
1459 
1460 		/*
1461 		 * Load the DMA map.  If this fails, the packet either
1462 		 * didn't fit in the allotted number of segments, or we
1463 		 * were short on resources.  For the too-many-segments
1464 		 * case, we simply report an error and drop the packet,
1465 		 * since we can't sanely copy a jumbo packet to a single
1466 		 * buffer.
1467 		 */
1468 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
1469 		    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1470 		if (error) {
1471 			if (error == EFBIG) {
1472 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
1473 				printf("%s: Tx packet consumes too many "
1474 				    "DMA segments, dropping...\n",
1475 				    sc->sc_dev.dv_xname);
1476 				IFQ_DEQUEUE(&ifp->if_snd, m0);
1477 				m_freem(m0);
1478 				continue;
1479 			}
1480 			/*
1481 			 * Short on resources, just stop for now.
1482 			 */
1483 			DPRINTF(WM_DEBUG_TX,
1484 			    ("%s: TX: dmamap load failed: %d\n",
1485 			    sc->sc_dev.dv_xname, error));
1486 			break;
1487 		}
1488 
1489 		/*
1490 		 * Ensure we have enough descriptors free to describe
1491 		 * the packet.  Note, we always reserve one descriptor
1492 		 * at the end of the ring due to the semantics of the
1493 		 * TDT register, plus one more in the event we need
1494 		 * to re-load checksum offload context.
1495 		 */
1496 		if (dmamap->dm_nsegs > (sc->sc_txfree - 2)) {
1497 			/*
1498 			 * Not enough free descriptors to transmit this
1499 			 * packet.  We haven't committed anything yet,
1500 			 * so just unload the DMA map, put the packet
1501 			 * pack on the queue, and punt.  Notify the upper
1502 			 * layer that there are no more slots left.
1503 			 */
1504 			DPRINTF(WM_DEBUG_TX,
1505 			    ("%s: TX: need %d descriptors, have %d\n",
1506 			    sc->sc_dev.dv_xname, dmamap->dm_nsegs,
1507 			    sc->sc_txfree - 1));
1508 			ifp->if_flags |= IFF_OACTIVE;
1509 			bus_dmamap_unload(sc->sc_dmat, dmamap);
1510 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
1511 			break;
1512 		}
1513 
1514 		IFQ_DEQUEUE(&ifp->if_snd, m0);
1515 
1516 		/*
1517 		 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
1518 		 */
1519 
1520 		/* Sync the DMA map. */
1521 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
1522 		    BUS_DMASYNC_PREWRITE);
1523 
1524 		DPRINTF(WM_DEBUG_TX,
1525 		    ("%s: TX: packet has %d DMA segments\n",
1526 		    sc->sc_dev.dv_xname, dmamap->dm_nsegs));
1527 
1528 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
1529 
1530 		/*
1531 		 * Store a pointer to the packet so that we can free it
1532 		 * later.
1533 		 *
1534 		 * Initially, we consider the number of descriptors the
1535 		 * packet uses the number of DMA segments.  This may be
1536 		 * incremented by 1 if we do checksum offload (a descriptor
1537 		 * is used to set the checksum context).
1538 		 */
1539 		txs->txs_mbuf = m0;
1540 		txs->txs_firstdesc = sc->sc_txnext;
1541 		txs->txs_ndesc = dmamap->dm_nsegs;
1542 
1543 		/*
1544 		 * Set up checksum offload parameters for
1545 		 * this packet.
1546 		 */
1547 		if (m0->m_pkthdr.csum_flags &
1548 		    (M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4)) {
1549 			if (wm_tx_cksum(sc, txs, &cksumcmd,
1550 					&cksumfields) != 0) {
1551 				/* Error message already displayed. */
1552 				bus_dmamap_unload(sc->sc_dmat, dmamap);
1553 				continue;
1554 			}
1555 		} else {
1556 			cksumcmd = 0;
1557 			cksumfields = 0;
1558 		}
1559 
1560 		cksumcmd |= WTX_CMD_IDE;
1561 
1562 		/*
1563 		 * Initialize the transmit descriptor.
1564 		 */
1565 		for (nexttx = sc->sc_txnext, seg = 0;
1566 		     seg < dmamap->dm_nsegs;
1567 		     seg++, nexttx = WM_NEXTTX(nexttx)) {
1568 			/*
1569 			 * Note: we currently only use 32-bit DMA
1570 			 * addresses.
1571 			 */
1572 			sc->sc_txdescs[nexttx].wtx_addr.wa_high = 0;
1573 			sc->sc_txdescs[nexttx].wtx_addr.wa_low =
1574 			    htole32(dmamap->dm_segs[seg].ds_addr);
1575 			sc->sc_txdescs[nexttx].wtx_cmdlen =
1576 			    htole32(cksumcmd | dmamap->dm_segs[seg].ds_len);
1577 			sc->sc_txdescs[nexttx].wtx_fields.wtxu_status = 0;
1578 			sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
1579 			    cksumfields;
1580 			sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
1581 			lasttx = nexttx;
1582 
1583 			DPRINTF(WM_DEBUG_TX,
1584 			    ("%s: TX: desc %d: low 0x%08x, len 0x%04x\n",
1585 			    sc->sc_dev.dv_xname, nexttx,
1586 			    le32toh(dmamap->dm_segs[seg].ds_addr),
1587 			    le32toh(dmamap->dm_segs[seg].ds_len)));
1588 		}
1589 
1590 		KASSERT(lasttx != -1);
1591 
1592 		/*
1593 		 * Set up the command byte on the last descriptor of
1594 		 * the packet.  If we're in the interrupt delay window,
1595 		 * delay the interrupt.
1596 		 */
1597 		sc->sc_txdescs[lasttx].wtx_cmdlen |=
1598 		    htole32(WTX_CMD_EOP | WTX_CMD_IFCS | WTX_CMD_RS);
1599 
1600 #if 0 /* XXXJRT */
1601 		/*
1602 		 * If VLANs are enabled and the packet has a VLAN tag, set
1603 		 * up the descriptor to encapsulate the packet for us.
1604 		 *
1605 		 * This is only valid on the last descriptor of the packet.
1606 		 */
1607 		if (sc->sc_ethercom.ec_nvlans != 0 &&
1608 		    (mtag = m_tag_find(m0, PACKET_TAG_VLAN, NULL)) != NULL) {
1609 			sc->sc_txdescs[lasttx].wtx_cmdlen |=
1610 			    htole32(WTX_CMD_VLE);
1611 			sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
1612 			    = htole16(*(u_int *)(mtag + 1) & 0xffff);
1613 		}
1614 #endif /* XXXJRT */
1615 
1616 		txs->txs_lastdesc = lasttx;
1617 
1618 		DPRINTF(WM_DEBUG_TX,
1619 		    ("%s: TX: desc %d: cmdlen 0x%08x\n", sc->sc_dev.dv_xname,
1620 		    lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
1621 
1622 		/* Sync the descriptors we're using. */
1623 		WM_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
1624 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1625 
1626 		/* Give the packet to the chip. */
1627 		CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
1628 
1629 		DPRINTF(WM_DEBUG_TX,
1630 		    ("%s: TX: TDT -> %d\n", sc->sc_dev.dv_xname, nexttx));
1631 
1632 		DPRINTF(WM_DEBUG_TX,
1633 		    ("%s: TX: finished transmitting packet, job %d\n",
1634 		    sc->sc_dev.dv_xname, sc->sc_txsnext));
1635 
1636 		/* Advance the tx pointer. */
1637 		sc->sc_txfree -= txs->txs_ndesc;
1638 		sc->sc_txnext = nexttx;
1639 
1640 		sc->sc_txsfree--;
1641 		sc->sc_txsnext = WM_NEXTTXS(sc->sc_txsnext);
1642 
1643 #if NBPFILTER > 0
1644 		/* Pass the packet to any BPF listeners. */
1645 		if (ifp->if_bpf)
1646 			bpf_mtap(ifp->if_bpf, m0);
1647 #endif /* NBPFILTER > 0 */
1648 	}
1649 
1650 	if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
1651 		/* No more slots; notify upper layer. */
1652 		ifp->if_flags |= IFF_OACTIVE;
1653 	}
1654 
1655 	if (sc->sc_txfree != ofree) {
1656 		/* Set a watchdog timer in case the chip flakes out. */
1657 		ifp->if_timer = 5;
1658 	}
1659 }
1660 
1661 /*
1662  * wm_watchdog:		[ifnet interface function]
1663  *
1664  *	Watchdog timer handler.
1665  */
1666 static void
1667 wm_watchdog(struct ifnet *ifp)
1668 {
1669 	struct wm_softc *sc = ifp->if_softc;
1670 
1671 	/*
1672 	 * Since we're using delayed interrupts, sweep up
1673 	 * before we report an error.
1674 	 */
1675 	wm_txintr(sc);
1676 
1677 	if (sc->sc_txfree != WM_NTXDESC) {
1678 		printf("%s: device timeout (txfree %d txsfree %d txnext %d)\n",
1679 		    sc->sc_dev.dv_xname, sc->sc_txfree, sc->sc_txsfree,
1680 		    sc->sc_txnext);
1681 		ifp->if_oerrors++;
1682 
1683 		/* Reset the interface. */
1684 		(void) wm_init(ifp);
1685 	}
1686 
1687 	/* Try to get more packets going. */
1688 	wm_start(ifp);
1689 }
1690 
1691 /*
1692  * wm_ioctl:		[ifnet interface function]
1693  *
1694  *	Handle control requests from the operator.
1695  */
1696 static int
1697 wm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1698 {
1699 	struct wm_softc *sc = ifp->if_softc;
1700 	struct ifreq *ifr = (struct ifreq *) data;
1701 	int s, error;
1702 
1703 	s = splnet();
1704 
1705 	switch (cmd) {
1706 	case SIOCSIFMEDIA:
1707 	case SIOCGIFMEDIA:
1708 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
1709 		break;
1710 	default:
1711 		error = ether_ioctl(ifp, cmd, data);
1712 		if (error == ENETRESET) {
1713 			/*
1714 			 * Multicast list has changed; set the hardware filter
1715 			 * accordingly.
1716 			 */
1717 			wm_set_filter(sc);
1718 			error = 0;
1719 		}
1720 		break;
1721 	}
1722 
1723 	/* Try to get more packets going. */
1724 	wm_start(ifp);
1725 
1726 	splx(s);
1727 	return (error);
1728 }
1729 
1730 /*
1731  * wm_intr:
1732  *
1733  *	Interrupt service routine.
1734  */
1735 static int
1736 wm_intr(void *arg)
1737 {
1738 	struct wm_softc *sc = arg;
1739 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1740 	uint32_t icr;
1741 	int wantinit, handled = 0;
1742 
1743 	for (wantinit = 0; wantinit == 0;) {
1744 		icr = CSR_READ(sc, WMREG_ICR);
1745 		if ((icr & sc->sc_icr) == 0)
1746 			break;
1747 
1748 #if 0 /*NRND > 0*/
1749 		if (RND_ENABLED(&sc->rnd_source))
1750 			rnd_add_uint32(&sc->rnd_source, icr);
1751 #endif
1752 
1753 		handled = 1;
1754 
1755 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
1756 		if (icr & (ICR_RXDMT0|ICR_RXT0)) {
1757 			DPRINTF(WM_DEBUG_RX,
1758 			    ("%s: RX: got Rx intr 0x%08x\n",
1759 			    sc->sc_dev.dv_xname,
1760 			    icr & (ICR_RXDMT0|ICR_RXT0)));
1761 			WM_EVCNT_INCR(&sc->sc_ev_rxintr);
1762 		}
1763 #endif
1764 		wm_rxintr(sc);
1765 
1766 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
1767 		if (icr & ICR_TXDW) {
1768 			DPRINTF(WM_DEBUG_TX,
1769 			    ("%s: TX: got TDXW interrupt\n",
1770 			    sc->sc_dev.dv_xname));
1771 			WM_EVCNT_INCR(&sc->sc_ev_txdw);
1772 		}
1773 #endif
1774 		wm_txintr(sc);
1775 
1776 		if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
1777 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
1778 			wm_linkintr(sc, icr);
1779 		}
1780 
1781 		if (icr & ICR_RXO) {
1782 			printf("%s: Receive overrun\n", sc->sc_dev.dv_xname);
1783 			wantinit = 1;
1784 		}
1785 	}
1786 
1787 	if (handled) {
1788 		if (wantinit)
1789 			wm_init(ifp);
1790 
1791 		/* Try to get more packets going. */
1792 		wm_start(ifp);
1793 	}
1794 
1795 	return (handled);
1796 }
1797 
1798 /*
1799  * wm_txintr:
1800  *
1801  *	Helper; handle transmit interrupts.
1802  */
1803 static void
1804 wm_txintr(struct wm_softc *sc)
1805 {
1806 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1807 	struct wm_txsoft *txs;
1808 	uint8_t status;
1809 	int i;
1810 
1811 	ifp->if_flags &= ~IFF_OACTIVE;
1812 
1813 	/*
1814 	 * Go through the Tx list and free mbufs for those
1815 	 * frames which have been transmitted.
1816 	 */
1817 	for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN;
1818 	     i = WM_NEXTTXS(i), sc->sc_txsfree++) {
1819 		txs = &sc->sc_txsoft[i];
1820 
1821 		DPRINTF(WM_DEBUG_TX,
1822 		    ("%s: TX: checking job %d\n", sc->sc_dev.dv_xname, i));
1823 
1824 		WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_dmamap->dm_nsegs,
1825 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1826 
1827 		status =
1828 		    sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
1829 		if ((status & WTX_ST_DD) == 0) {
1830 			WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
1831 			    BUS_DMASYNC_PREREAD);
1832 			break;
1833 		}
1834 
1835 		DPRINTF(WM_DEBUG_TX,
1836 		    ("%s: TX: job %d done: descs %d..%d\n",
1837 		    sc->sc_dev.dv_xname, i, txs->txs_firstdesc,
1838 		    txs->txs_lastdesc));
1839 
1840 		/*
1841 		 * XXX We should probably be using the statistics
1842 		 * XXX registers, but I don't know if they exist
1843 		 * XXX on chips before the i82544.
1844 		 */
1845 
1846 #ifdef WM_EVENT_COUNTERS
1847 		if (status & WTX_ST_TU)
1848 			WM_EVCNT_INCR(&sc->sc_ev_tu);
1849 #endif /* WM_EVENT_COUNTERS */
1850 
1851 		if (status & (WTX_ST_EC|WTX_ST_LC)) {
1852 			ifp->if_oerrors++;
1853 			if (status & WTX_ST_LC)
1854 				printf("%s: late collision\n",
1855 				    sc->sc_dev.dv_xname);
1856 			else if (status & WTX_ST_EC) {
1857 				ifp->if_collisions += 16;
1858 				printf("%s: excessive collisions\n",
1859 				    sc->sc_dev.dv_xname);
1860 			}
1861 		} else
1862 			ifp->if_opackets++;
1863 
1864 		sc->sc_txfree += txs->txs_ndesc;
1865 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
1866 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1867 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1868 		m_freem(txs->txs_mbuf);
1869 		txs->txs_mbuf = NULL;
1870 	}
1871 
1872 	/* Update the dirty transmit buffer pointer. */
1873 	sc->sc_txsdirty = i;
1874 	DPRINTF(WM_DEBUG_TX,
1875 	    ("%s: TX: txsdirty -> %d\n", sc->sc_dev.dv_xname, i));
1876 
1877 	/*
1878 	 * If there are no more pending transmissions, cancel the watchdog
1879 	 * timer.
1880 	 */
1881 	if (sc->sc_txsfree == WM_TXQUEUELEN)
1882 		ifp->if_timer = 0;
1883 }
1884 
1885 /*
1886  * wm_rxintr:
1887  *
1888  *	Helper; handle receive interrupts.
1889  */
1890 static void
1891 wm_rxintr(struct wm_softc *sc)
1892 {
1893 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1894 	struct wm_rxsoft *rxs;
1895 	struct mbuf *m;
1896 	int i, len;
1897 	uint8_t status, errors;
1898 
1899 	for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
1900 		rxs = &sc->sc_rxsoft[i];
1901 
1902 		DPRINTF(WM_DEBUG_RX,
1903 		    ("%s: RX: checking descriptor %d\n",
1904 		    sc->sc_dev.dv_xname, i));
1905 
1906 		WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1907 
1908 		status = sc->sc_rxdescs[i].wrx_status;
1909 		errors = sc->sc_rxdescs[i].wrx_errors;
1910 		len = le16toh(sc->sc_rxdescs[i].wrx_len);
1911 
1912 		if ((status & WRX_ST_DD) == 0) {
1913 			/*
1914 			 * We have processed all of the receive descriptors.
1915 			 */
1916 			WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
1917 			break;
1918 		}
1919 
1920 		if (__predict_false(sc->sc_rxdiscard)) {
1921 			DPRINTF(WM_DEBUG_RX,
1922 			    ("%s: RX: discarding contents of descriptor %d\n",
1923 			    sc->sc_dev.dv_xname, i));
1924 			WM_INIT_RXDESC(sc, i);
1925 			if (status & WRX_ST_EOP) {
1926 				/* Reset our state. */
1927 				DPRINTF(WM_DEBUG_RX,
1928 				    ("%s: RX: resetting rxdiscard -> 0\n",
1929 				    sc->sc_dev.dv_xname));
1930 				sc->sc_rxdiscard = 0;
1931 			}
1932 			continue;
1933 		}
1934 
1935 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1936 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1937 
1938 		m = rxs->rxs_mbuf;
1939 
1940 		/*
1941 		 * Add a new receive buffer to the ring.
1942 		 */
1943 		if (wm_add_rxbuf(sc, i) != 0) {
1944 			/*
1945 			 * Failed, throw away what we've done so
1946 			 * far, and discard the rest of the packet.
1947 			 */
1948 			ifp->if_ierrors++;
1949 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1950 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1951 			WM_INIT_RXDESC(sc, i);
1952 			if ((status & WRX_ST_EOP) == 0)
1953 				sc->sc_rxdiscard = 1;
1954 			if (sc->sc_rxhead != NULL)
1955 				m_freem(sc->sc_rxhead);
1956 			WM_RXCHAIN_RESET(sc);
1957 			DPRINTF(WM_DEBUG_RX,
1958 			    ("%s: RX: Rx buffer allocation failed, "
1959 			    "dropping packet%s\n", sc->sc_dev.dv_xname,
1960 			    sc->sc_rxdiscard ? " (discard)" : ""));
1961 			continue;
1962 		}
1963 
1964 		WM_RXCHAIN_LINK(sc, m);
1965 
1966 		m->m_len = len;
1967 
1968 		DPRINTF(WM_DEBUG_RX,
1969 		    ("%s: RX: buffer at %p len %d\n",
1970 		    sc->sc_dev.dv_xname, m->m_data, len));
1971 
1972 		/*
1973 		 * If this is not the end of the packet, keep
1974 		 * looking.
1975 		 */
1976 		if ((status & WRX_ST_EOP) == 0) {
1977 			sc->sc_rxlen += len;
1978 			DPRINTF(WM_DEBUG_RX,
1979 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
1980 			    sc->sc_dev.dv_xname, sc->sc_rxlen));
1981 			continue;
1982 		}
1983 
1984 		/*
1985 		 * Okay, we have the entire packet now...
1986 		 */
1987 		*sc->sc_rxtailp = NULL;
1988 		m = sc->sc_rxhead;
1989 		len += sc->sc_rxlen;
1990 
1991 		WM_RXCHAIN_RESET(sc);
1992 
1993 		DPRINTF(WM_DEBUG_RX,
1994 		    ("%s: RX: have entire packet, len -> %d\n",
1995 		    sc->sc_dev.dv_xname, len));
1996 
1997 		/*
1998 		 * If an error occurred, update stats and drop the packet.
1999 		 */
2000 		if (errors &
2001 		     (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
2002 			ifp->if_ierrors++;
2003 			if (errors & WRX_ER_SE)
2004 				printf("%s: symbol error\n",
2005 				    sc->sc_dev.dv_xname);
2006 			else if (errors & WRX_ER_SEQ)
2007 				printf("%s: receive sequence error\n",
2008 				    sc->sc_dev.dv_xname);
2009 			else if (errors & WRX_ER_CE)
2010 				printf("%s: CRC error\n",
2011 				    sc->sc_dev.dv_xname);
2012 			m_freem(m);
2013 			continue;
2014 		}
2015 
2016 		/*
2017 		 * No errors.  Receive the packet.
2018 		 *
2019 		 * Note, we have configured the chip to include the
2020 		 * CRC with every packet.
2021 		 */
2022 		m->m_flags |= M_HASFCS;
2023 		m->m_pkthdr.rcvif = ifp;
2024 		m->m_pkthdr.len = len;
2025 
2026 #if 0 /* XXXJRT */
2027 		/*
2028 		 * If VLANs are enabled, VLAN packets have been unwrapped
2029 		 * for us.  Associate the tag with the packet.
2030 		 */
2031 		if (sc->sc_ethercom.ec_nvlans != 0 &&
2032 		    (status & WRX_ST_VP) != 0) {
2033 			struct m_tag *vtag;
2034 
2035 			vtag = m_tag_get(PACKET_TAG_VLAN, sizeof(u_int),
2036 			    M_NOWAIT);
2037 			if (vtag == NULL) {
2038 				ifp->if_ierrors++;
2039 				printf("%s: unable to allocate VLAN tag\n",
2040 				    sc->sc_dev.dv_xname);
2041 				m_freem(m);
2042 				continue;
2043 			}
2044 
2045 			*(u_int *)(vtag + 1) =
2046 			    le16toh(sc->sc_rxdescs[i].wrx_special);
2047 		}
2048 #endif /* XXXJRT */
2049 
2050 		/*
2051 		 * Set up checksum info for this packet.
2052 		 */
2053 		if (status & WRX_ST_IPCS) {
2054 			WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
2055 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
2056 			if (errors & WRX_ER_IPE)
2057 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
2058 		}
2059 		if (status & WRX_ST_TCPCS) {
2060 			/*
2061 			 * Note: we don't know if this was TCP or UDP,
2062 			 * so we just set both bits, and expect the
2063 			 * upper layers to deal.
2064 			 */
2065 			WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
2066 			m->m_pkthdr.csum_flags |= M_CSUM_TCPv4|M_CSUM_UDPv4;
2067 			if (errors & WRX_ER_TCPE)
2068 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
2069 		}
2070 
2071 		ifp->if_ipackets++;
2072 
2073 #if NBPFILTER > 0
2074 		/* Pass this up to any BPF listeners. */
2075 		if (ifp->if_bpf)
2076 			bpf_mtap(ifp->if_bpf, m);
2077 #endif /* NBPFILTER > 0 */
2078 
2079 		/* Pass it on. */
2080 		(*ifp->if_input)(ifp, m);
2081 	}
2082 
2083 	/* Update the receive pointer. */
2084 	sc->sc_rxptr = i;
2085 
2086 	DPRINTF(WM_DEBUG_RX,
2087 	    ("%s: RX: rxptr -> %d\n", sc->sc_dev.dv_xname, i));
2088 }
2089 
2090 /*
2091  * wm_linkintr:
2092  *
2093  *	Helper; handle link interrupts.
2094  */
2095 static void
2096 wm_linkintr(struct wm_softc *sc, uint32_t icr)
2097 {
2098 	uint32_t status;
2099 
2100 	/*
2101 	 * If we get a link status interrupt on a 1000BASE-T
2102 	 * device, just fall into the normal MII tick path.
2103 	 */
2104 	if (sc->sc_flags & WM_F_HAS_MII) {
2105 		if (icr & ICR_LSC) {
2106 			DPRINTF(WM_DEBUG_LINK,
2107 			    ("%s: LINK: LSC -> mii_tick\n",
2108 			    sc->sc_dev.dv_xname));
2109 			mii_tick(&sc->sc_mii);
2110 		} else if (icr & ICR_RXSEQ) {
2111 			DPRINTF(WM_DEBUG_LINK,
2112 			    ("%s: LINK Receive sequence error\n",
2113 			    sc->sc_dev.dv_xname));
2114 		}
2115 		return;
2116 	}
2117 
2118 	/*
2119 	 * If we are now receiving /C/, check for link again in
2120 	 * a couple of link clock ticks.
2121 	 */
2122 	if (icr & ICR_RXCFG) {
2123 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
2124 		    sc->sc_dev.dv_xname));
2125 		sc->sc_tbi_anstate = 2;
2126 	}
2127 
2128 	if (icr & ICR_LSC) {
2129 		status = CSR_READ(sc, WMREG_STATUS);
2130 		if (status & STATUS_LU) {
2131 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
2132 			    sc->sc_dev.dv_xname,
2133 			    (status & STATUS_FD) ? "FDX" : "HDX"));
2134 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2135 			if (status & STATUS_FD)
2136 				sc->sc_tctl |=
2137 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2138 			else
2139 				sc->sc_tctl |=
2140 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2141 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2142 			sc->sc_tbi_linkup = 1;
2143 		} else {
2144 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
2145 			    sc->sc_dev.dv_xname));
2146 			sc->sc_tbi_linkup = 0;
2147 		}
2148 		sc->sc_tbi_anstate = 2;
2149 		wm_tbi_set_linkled(sc);
2150 	} else if (icr & ICR_RXSEQ) {
2151 		DPRINTF(WM_DEBUG_LINK,
2152 		    ("%s: LINK: Receive sequence error\n",
2153 		    sc->sc_dev.dv_xname));
2154 	}
2155 }
2156 
2157 /*
2158  * wm_tick:
2159  *
2160  *	One second timer, used to check link status, sweep up
2161  *	completed transmit jobs, etc.
2162  */
2163 static void
2164 wm_tick(void *arg)
2165 {
2166 	struct wm_softc *sc = arg;
2167 	int s;
2168 
2169 	s = splnet();
2170 
2171 	if (sc->sc_flags & WM_F_HAS_MII)
2172 		mii_tick(&sc->sc_mii);
2173 	else
2174 		wm_tbi_check_link(sc);
2175 
2176 	splx(s);
2177 
2178 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2179 }
2180 
2181 /*
2182  * wm_reset:
2183  *
2184  *	Reset the i82542 chip.
2185  */
2186 static void
2187 wm_reset(struct wm_softc *sc)
2188 {
2189 	int i;
2190 
2191 	switch (sc->sc_type) {
2192 	case WM_T_82544:
2193 	case WM_T_82540:
2194 	case WM_T_82545:
2195 	case WM_T_82546:
2196 	case WM_T_82541:
2197 	case WM_T_82541_2:
2198 		/*
2199 		 * These chips have a problem with the memory-mapped
2200 		 * write cycle when issuing the reset, so use I/O-mapped
2201 		 * access, if possible.
2202 		 */
2203 		if (sc->sc_flags & WM_F_IOH_VALID)
2204 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
2205 		else
2206 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
2207 		break;
2208 
2209 	case WM_T_82545_3:
2210 	case WM_T_82546_3:
2211 		/* Use the shadow control register on these chips. */
2212 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
2213 		break;
2214 
2215 	default:
2216 		/* Everything else can safely use the documented method. */
2217 		CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
2218 		break;
2219 	}
2220 	delay(10000);
2221 
2222 	for (i = 0; i < 1000; i++) {
2223 		if ((CSR_READ(sc, WMREG_CTRL) & CTRL_RST) == 0)
2224 			return;
2225 		delay(20);
2226 	}
2227 
2228 	if (CSR_READ(sc, WMREG_CTRL) & CTRL_RST)
2229 		printf("%s: WARNING: reset failed to complete\n",
2230 		    sc->sc_dev.dv_xname);
2231 }
2232 
2233 /*
2234  * wm_init:		[ifnet interface function]
2235  *
2236  *	Initialize the interface.  Must be called at splnet().
2237  */
2238 static int
2239 wm_init(struct ifnet *ifp)
2240 {
2241 	struct wm_softc *sc = ifp->if_softc;
2242 	struct wm_rxsoft *rxs;
2243 	int i, error = 0;
2244 	uint32_t reg;
2245 
2246 	/*
2247 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
2248 	 * There is a small but measurable benefit to avoiding the adjusment
2249 	 * of the descriptor so that the headers are aligned, for normal mtu,
2250 	 * on such platforms.  One possibility is that the DMA itself is
2251 	 * slightly more efficient if the front of the entire packet (instead
2252 	 * of the front of the headers) is aligned.
2253 	 *
2254 	 * Note we must always set align_tweak to 0 if we are using
2255 	 * jumbo frames.
2256 	 */
2257 #ifdef __NO_STRICT_ALIGNMENT
2258 	sc->sc_align_tweak = 0;
2259 #else
2260 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
2261 		sc->sc_align_tweak = 0;
2262 	else
2263 		sc->sc_align_tweak = 2;
2264 #endif /* __NO_STRICT_ALIGNMENT */
2265 
2266 	/* Cancel any pending I/O. */
2267 	wm_stop(ifp, 0);
2268 
2269 	/* Reset the chip to a known state. */
2270 	wm_reset(sc);
2271 
2272 	/* Initialize the transmit descriptor ring. */
2273 	memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
2274 	WM_CDTXSYNC(sc, 0, WM_NTXDESC,
2275 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2276 	sc->sc_txfree = WM_NTXDESC;
2277 	sc->sc_txnext = 0;
2278 
2279 	sc->sc_txctx_ipcs = 0xffffffff;
2280 	sc->sc_txctx_tucs = 0xffffffff;
2281 
2282 	if (sc->sc_type < WM_T_82543) {
2283 		CSR_WRITE(sc, WMREG_OLD_TBDAH, 0);
2284 		CSR_WRITE(sc, WMREG_OLD_TBDAL, WM_CDTXADDR(sc, 0));
2285 		CSR_WRITE(sc, WMREG_OLD_TDLEN, sizeof(sc->sc_txdescs));
2286 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
2287 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
2288 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
2289 	} else {
2290 		CSR_WRITE(sc, WMREG_TBDAH, 0);
2291 		CSR_WRITE(sc, WMREG_TBDAL, WM_CDTXADDR(sc, 0));
2292 		CSR_WRITE(sc, WMREG_TDLEN, sizeof(sc->sc_txdescs));
2293 		CSR_WRITE(sc, WMREG_TDH, 0);
2294 		CSR_WRITE(sc, WMREG_TDT, 0);
2295 		CSR_WRITE(sc, WMREG_TIDV, 128);
2296 
2297 		CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
2298 		    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
2299 		CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
2300 		    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
2301 	}
2302 	CSR_WRITE(sc, WMREG_TQSA_LO, 0);
2303 	CSR_WRITE(sc, WMREG_TQSA_HI, 0);
2304 
2305 	/* Initialize the transmit job descriptors. */
2306 	for (i = 0; i < WM_TXQUEUELEN; i++)
2307 		sc->sc_txsoft[i].txs_mbuf = NULL;
2308 	sc->sc_txsfree = WM_TXQUEUELEN;
2309 	sc->sc_txsnext = 0;
2310 	sc->sc_txsdirty = 0;
2311 
2312 	/*
2313 	 * Initialize the receive descriptor and receive job
2314 	 * descriptor rings.
2315 	 */
2316 	if (sc->sc_type < WM_T_82543) {
2317 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, 0);
2318 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR(sc, 0));
2319 		CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
2320 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
2321 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
2322 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
2323 
2324 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
2325 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
2326 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
2327 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
2328 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
2329 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
2330 	} else {
2331 		CSR_WRITE(sc, WMREG_RDBAH, 0);
2332 		CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR(sc, 0));
2333 		CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
2334 		CSR_WRITE(sc, WMREG_RDH, 0);
2335 		CSR_WRITE(sc, WMREG_RDT, 0);
2336 		CSR_WRITE(sc, WMREG_RDTR, 28 | RDTR_FPD);
2337 	}
2338 	for (i = 0; i < WM_NRXDESC; i++) {
2339 		rxs = &sc->sc_rxsoft[i];
2340 		if (rxs->rxs_mbuf == NULL) {
2341 			if ((error = wm_add_rxbuf(sc, i)) != 0) {
2342 				printf("%s: unable to allocate or map rx "
2343 				    "buffer %d, error = %d\n",
2344 				    sc->sc_dev.dv_xname, i, error);
2345 				/*
2346 				 * XXX Should attempt to run with fewer receive
2347 				 * XXX buffers instead of just failing.
2348 				 */
2349 				wm_rxdrain(sc);
2350 				goto out;
2351 			}
2352 		} else
2353 			WM_INIT_RXDESC(sc, i);
2354 	}
2355 	sc->sc_rxptr = 0;
2356 	sc->sc_rxdiscard = 0;
2357 	WM_RXCHAIN_RESET(sc);
2358 
2359 	/*
2360 	 * Clear out the VLAN table -- we don't use it (yet).
2361 	 */
2362 	CSR_WRITE(sc, WMREG_VET, 0);
2363 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
2364 		CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
2365 
2366 	/*
2367 	 * Set up flow-control parameters.
2368 	 *
2369 	 * XXX Values could probably stand some tuning.
2370 	 */
2371 	if (sc->sc_ctrl & (CTRL_RFCE|CTRL_TFCE)) {
2372 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
2373 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
2374 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
2375 
2376 		if (sc->sc_type < WM_T_82543) {
2377 			CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
2378 			CSR_WRITE(sc, WMREG_OLD_FCRTL, FCRTL_DFLT);
2379 		} else {
2380 			CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
2381 			CSR_WRITE(sc, WMREG_FCRTL, FCRTL_DFLT);
2382 		}
2383 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
2384 	}
2385 
2386 #if 0 /* XXXJRT */
2387 	/* Deal with VLAN enables. */
2388 	if (sc->sc_ethercom.ec_nvlans != 0)
2389 		sc->sc_ctrl |= CTRL_VME;
2390 	else
2391 #endif /* XXXJRT */
2392 		sc->sc_ctrl &= ~CTRL_VME;
2393 
2394 	/* Write the control registers. */
2395 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2396 #if 0
2397 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2398 #endif
2399 
2400 	/*
2401 	 * Set up checksum offload parameters.
2402 	 */
2403 	reg = CSR_READ(sc, WMREG_RXCSUM);
2404 	if (ifp->if_capenable & IFCAP_CSUM_IPv4)
2405 		reg |= RXCSUM_IPOFL;
2406 	else
2407 		reg &= ~RXCSUM_IPOFL;
2408 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4))
2409 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
2410 	else {
2411 		reg &= ~RXCSUM_TUOFL;
2412 		if ((ifp->if_capenable & IFCAP_CSUM_IPv4) == 0)
2413 			reg &= ~RXCSUM_IPOFL;
2414 	}
2415 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
2416 
2417 	/*
2418 	 * Set up the interrupt registers.
2419 	 */
2420 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
2421 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
2422 	    ICR_RXO | ICR_RXT0;
2423 	if ((sc->sc_flags & WM_F_HAS_MII) == 0)
2424 		sc->sc_icr |= ICR_RXCFG;
2425 	CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
2426 
2427 	/* Set up the inter-packet gap. */
2428 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
2429 
2430 #if 0 /* XXXJRT */
2431 	/* Set the VLAN ethernetype. */
2432 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
2433 #endif
2434 
2435 	/*
2436 	 * Set up the transmit control register; we start out with
2437 	 * a collision distance suitable for FDX, but update it whe
2438 	 * we resolve the media type.
2439 	 */
2440 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_CT(TX_COLLISION_THRESHOLD) |
2441 	    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2442 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2443 
2444 	/* Set the media. */
2445 	(void) (*sc->sc_mii.mii_media.ifm_change)(ifp);
2446 
2447 	/*
2448 	 * Set up the receive control register; we actually program
2449 	 * the register when we set the receive filter.  Use multicast
2450 	 * address offset type 0.
2451 	 *
2452 	 * Only the i82544 has the ability to strip the incoming
2453 	 * CRC, so we don't enable that feature.
2454 	 */
2455 	sc->sc_mchash_type = 0;
2456 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_LPE |
2457 	    RCTL_DPF | RCTL_MO(sc->sc_mchash_type);
2458 
2459 	if(MCLBYTES == 2048) {
2460 		sc->sc_rctl |= RCTL_2k;
2461 	} else {
2462 	/*
2463 	 * XXX MCLBYTES > 2048 causes "Tx packet consumes too many DMA"
2464 	 * XXX segments, dropping" -- why?
2465 	 */
2466 #if 0
2467 		if(sc->sc_type >= WM_T_82543) {
2468 			switch(MCLBYTES) {
2469 			case 4096:
2470 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
2471 				break;
2472 			case 8192:
2473 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
2474 				break;
2475 			case 16384:
2476 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
2477 				break;
2478 			default:
2479 				panic("wm_init: MCLBYTES %d unsupported",
2480 				    MCLBYTES);
2481 				break;
2482 			}
2483 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
2484 #else
2485 		panic("wm_init: MCLBYTES > 2048 not supported.");
2486 #endif
2487 	}
2488 
2489 	/* Set the receive filter. */
2490 	wm_set_filter(sc);
2491 
2492 	/* Start the one second link check clock. */
2493 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2494 
2495 	/* ...all done! */
2496 	ifp->if_flags |= IFF_RUNNING;
2497 	ifp->if_flags &= ~IFF_OACTIVE;
2498 
2499  out:
2500 	if (error)
2501 		printf("%s: interface not running\n", sc->sc_dev.dv_xname);
2502 	return (error);
2503 }
2504 
2505 /*
2506  * wm_rxdrain:
2507  *
2508  *	Drain the receive queue.
2509  */
2510 static void
2511 wm_rxdrain(struct wm_softc *sc)
2512 {
2513 	struct wm_rxsoft *rxs;
2514 	int i;
2515 
2516 	for (i = 0; i < WM_NRXDESC; i++) {
2517 		rxs = &sc->sc_rxsoft[i];
2518 		if (rxs->rxs_mbuf != NULL) {
2519 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2520 			m_freem(rxs->rxs_mbuf);
2521 			rxs->rxs_mbuf = NULL;
2522 		}
2523 	}
2524 }
2525 
2526 /*
2527  * wm_stop:		[ifnet interface function]
2528  *
2529  *	Stop transmission on the interface.
2530  */
2531 static void
2532 wm_stop(struct ifnet *ifp, int disable)
2533 {
2534 	struct wm_softc *sc = ifp->if_softc;
2535 	struct wm_txsoft *txs;
2536 	int i;
2537 
2538 	/* Stop the one second clock. */
2539 	callout_stop(&sc->sc_tick_ch);
2540 
2541 	if (sc->sc_flags & WM_F_HAS_MII) {
2542 		/* Down the MII. */
2543 		mii_down(&sc->sc_mii);
2544 	}
2545 
2546 	/* Stop the transmit and receive processes. */
2547 	CSR_WRITE(sc, WMREG_TCTL, 0);
2548 	CSR_WRITE(sc, WMREG_RCTL, 0);
2549 
2550 	/* Release any queued transmit buffers. */
2551 	for (i = 0; i < WM_TXQUEUELEN; i++) {
2552 		txs = &sc->sc_txsoft[i];
2553 		if (txs->txs_mbuf != NULL) {
2554 			bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2555 			m_freem(txs->txs_mbuf);
2556 			txs->txs_mbuf = NULL;
2557 		}
2558 	}
2559 
2560 	if (disable)
2561 		wm_rxdrain(sc);
2562 
2563 	/* Mark the interface as down and cancel the watchdog timer. */
2564 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2565 	ifp->if_timer = 0;
2566 }
2567 
2568 /*
2569  * wm_acquire_eeprom:
2570  *
2571  *	Perform the EEPROM handshake required on some chips.
2572  */
2573 static int
2574 wm_acquire_eeprom(struct wm_softc *sc)
2575 {
2576 	uint32_t reg;
2577 	int x;
2578 
2579 	if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE)  {
2580 		reg = CSR_READ(sc, WMREG_EECD);
2581 
2582 		/* Request EEPROM access. */
2583 		reg |= EECD_EE_REQ;
2584 		CSR_WRITE(sc, WMREG_EECD, reg);
2585 
2586 		/* ..and wait for it to be granted. */
2587 		for (x = 0; x < 100; x++) {
2588 			reg = CSR_READ(sc, WMREG_EECD);
2589 			if (reg & EECD_EE_GNT)
2590 				break;
2591 			delay(5);
2592 		}
2593 		if ((reg & EECD_EE_GNT) == 0) {
2594 			aprint_error("%s: could not acquire EEPROM GNT\n",
2595 			    sc->sc_dev.dv_xname);
2596 			reg &= ~EECD_EE_REQ;
2597 			CSR_WRITE(sc, WMREG_EECD, reg);
2598 			return (1);
2599 		}
2600 	}
2601 
2602 	return (0);
2603 }
2604 
2605 /*
2606  * wm_release_eeprom:
2607  *
2608  *	Release the EEPROM mutex.
2609  */
2610 static void
2611 wm_release_eeprom(struct wm_softc *sc)
2612 {
2613 	uint32_t reg;
2614 
2615 	if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
2616 		reg = CSR_READ(sc, WMREG_EECD);
2617 		reg &= ~EECD_EE_REQ;
2618 		CSR_WRITE(sc, WMREG_EECD, reg);
2619 	}
2620 }
2621 
2622 /*
2623  * wm_eeprom_sendbits:
2624  *
2625  *	Send a series of bits to the EEPROM.
2626  */
2627 static void
2628 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
2629 {
2630 	uint32_t reg;
2631 	int x;
2632 
2633 	reg = CSR_READ(sc, WMREG_EECD);
2634 
2635 	for (x = nbits; x > 0; x--) {
2636 		if (bits & (1U << (x - 1)))
2637 			reg |= EECD_DI;
2638 		else
2639 			reg &= ~EECD_DI;
2640 		CSR_WRITE(sc, WMREG_EECD, reg);
2641 		delay(2);
2642 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2643 		delay(2);
2644 		CSR_WRITE(sc, WMREG_EECD, reg);
2645 		delay(2);
2646 	}
2647 }
2648 
2649 /*
2650  * wm_eeprom_recvbits:
2651  *
2652  *	Receive a series of bits from the EEPROM.
2653  */
2654 static void
2655 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
2656 {
2657 	uint32_t reg, val;
2658 	int x;
2659 
2660 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
2661 
2662 	val = 0;
2663 	for (x = nbits; x > 0; x--) {
2664 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2665 		delay(2);
2666 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
2667 			val |= (1U << (x - 1));
2668 		CSR_WRITE(sc, WMREG_EECD, reg);
2669 		delay(2);
2670 	}
2671 	*valp = val;
2672 }
2673 
2674 /*
2675  * wm_read_eeprom_uwire:
2676  *
2677  *	Read a word from the EEPROM using the MicroWire protocol.
2678  */
2679 static int
2680 wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
2681 {
2682 	uint32_t reg, val;
2683 	int i;
2684 
2685 	for (i = 0; i < wordcnt; i++) {
2686 		/* Clear SK and DI. */
2687 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
2688 		CSR_WRITE(sc, WMREG_EECD, reg);
2689 
2690 		/* Set CHIP SELECT. */
2691 		reg |= EECD_CS;
2692 		CSR_WRITE(sc, WMREG_EECD, reg);
2693 		delay(2);
2694 
2695 		/* Shift in the READ command. */
2696 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
2697 
2698 		/* Shift in address. */
2699 		wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits);
2700 
2701 		/* Shift out the data. */
2702 		wm_eeprom_recvbits(sc, &val, 16);
2703 		data[i] = val & 0xffff;
2704 
2705 		/* Clear CHIP SELECT. */
2706 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
2707 		CSR_WRITE(sc, WMREG_EECD, reg);
2708 		delay(2);
2709 	}
2710 
2711 	return (0);
2712 }
2713 
2714 /*
2715  * wm_spi_eeprom_ready:
2716  *
2717  *	Wait for a SPI EEPROM to be ready for commands.
2718  */
2719 static int
2720 wm_spi_eeprom_ready(struct wm_softc *sc)
2721 {
2722 	uint32_t val;
2723 	int usec;
2724 
2725 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
2726 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
2727 		wm_eeprom_recvbits(sc, &val, 8);
2728 		if ((val & SPI_SR_RDY) == 0)
2729 			break;
2730 	}
2731 	if (usec >= SPI_MAX_RETRIES) {
2732 		aprint_error("%s: EEPROM failed to become ready\n",
2733 		    sc->sc_dev.dv_xname);
2734 		return (1);
2735 	}
2736 	return (0);
2737 }
2738 
2739 /*
2740  * wm_read_eeprom_spi:
2741  *
2742  *	Read a work from the EEPROM using the SPI protocol.
2743  */
2744 static int
2745 wm_read_eeprom_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
2746 {
2747 	uint32_t reg, val;
2748 	int i;
2749 	uint8_t opc;
2750 
2751 	/* Clear SK and CS. */
2752 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
2753 	CSR_WRITE(sc, WMREG_EECD, reg);
2754 	delay(2);
2755 
2756 	if (wm_spi_eeprom_ready(sc))
2757 		return (1);
2758 
2759 	/* Toggle CS to flush commands. */
2760 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
2761 	delay(2);
2762 	CSR_WRITE(sc, WMREG_EECD, reg);
2763 	delay(2);
2764 
2765 	opc = SPI_OPC_READ;
2766 	if (sc->sc_ee_addrbits == 8 && word >= 128)
2767 		opc |= SPI_OPC_A8;
2768 
2769 	wm_eeprom_sendbits(sc, opc, 8);
2770 	wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits);
2771 
2772 	for (i = 0; i < wordcnt; i++) {
2773 		wm_eeprom_recvbits(sc, &val, 16);
2774 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
2775 	}
2776 
2777 	/* Raise CS and clear SK. */
2778 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
2779 	CSR_WRITE(sc, WMREG_EECD, reg);
2780 	delay(2);
2781 
2782 	return (0);
2783 }
2784 
2785 /*
2786  * wm_read_eeprom:
2787  *
2788  *	Read data from the serial EEPROM.
2789  */
2790 static int
2791 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
2792 {
2793 	int rv;
2794 
2795 	if (wm_acquire_eeprom(sc))
2796 		return (1);
2797 
2798 	if (sc->sc_flags & WM_F_EEPROM_SPI)
2799 		rv = wm_read_eeprom_spi(sc, word, wordcnt, data);
2800 	else
2801 		rv = wm_read_eeprom_uwire(sc, word, wordcnt, data);
2802 
2803 	wm_release_eeprom(sc);
2804 	return (rv);
2805 }
2806 
2807 /*
2808  * wm_add_rxbuf:
2809  *
2810  *	Add a receive buffer to the indiciated descriptor.
2811  */
2812 static int
2813 wm_add_rxbuf(struct wm_softc *sc, int idx)
2814 {
2815 	struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
2816 	struct mbuf *m;
2817 	int error;
2818 
2819 	MGETHDR(m, M_DONTWAIT, MT_DATA);
2820 	if (m == NULL)
2821 		return (ENOBUFS);
2822 
2823 	MCLGET(m, M_DONTWAIT);
2824 	if ((m->m_flags & M_EXT) == 0) {
2825 		m_freem(m);
2826 		return (ENOBUFS);
2827 	}
2828 
2829 	if (rxs->rxs_mbuf != NULL)
2830 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2831 
2832 	rxs->rxs_mbuf = m;
2833 
2834 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
2835 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
2836 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
2837 	if (error) {
2838 		printf("%s: unable to load rx DMA map %d, error = %d\n",
2839 		    sc->sc_dev.dv_xname, idx, error);
2840 		panic("wm_add_rxbuf");	/* XXX XXX XXX */
2841 	}
2842 
2843 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2844 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2845 
2846 	WM_INIT_RXDESC(sc, idx);
2847 
2848 	return (0);
2849 }
2850 
2851 /*
2852  * wm_set_ral:
2853  *
2854  *	Set an entery in the receive address list.
2855  */
2856 static void
2857 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
2858 {
2859 	uint32_t ral_lo, ral_hi;
2860 
2861 	if (enaddr != NULL) {
2862 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
2863 		    (enaddr[3] << 24);
2864 		ral_hi = enaddr[4] | (enaddr[5] << 8);
2865 		ral_hi |= RAL_AV;
2866 	} else {
2867 		ral_lo = 0;
2868 		ral_hi = 0;
2869 	}
2870 
2871 	if (sc->sc_type >= WM_T_82544) {
2872 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
2873 		    ral_lo);
2874 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
2875 		    ral_hi);
2876 	} else {
2877 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
2878 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
2879 	}
2880 }
2881 
2882 /*
2883  * wm_mchash:
2884  *
2885  *	Compute the hash of the multicast address for the 4096-bit
2886  *	multicast filter.
2887  */
2888 static uint32_t
2889 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
2890 {
2891 	static const int lo_shift[4] = { 4, 3, 2, 0 };
2892 	static const int hi_shift[4] = { 4, 5, 6, 8 };
2893 	uint32_t hash;
2894 
2895 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
2896 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
2897 
2898 	return (hash & 0xfff);
2899 }
2900 
2901 /*
2902  * wm_set_filter:
2903  *
2904  *	Set up the receive filter.
2905  */
2906 static void
2907 wm_set_filter(struct wm_softc *sc)
2908 {
2909 	struct ethercom *ec = &sc->sc_ethercom;
2910 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2911 	struct ether_multi *enm;
2912 	struct ether_multistep step;
2913 	bus_addr_t mta_reg;
2914 	uint32_t hash, reg, bit;
2915 	int i;
2916 
2917 	if (sc->sc_type >= WM_T_82544)
2918 		mta_reg = WMREG_CORDOVA_MTA;
2919 	else
2920 		mta_reg = WMREG_MTA;
2921 
2922 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
2923 
2924 	if (ifp->if_flags & IFF_BROADCAST)
2925 		sc->sc_rctl |= RCTL_BAM;
2926 	if (ifp->if_flags & IFF_PROMISC) {
2927 		sc->sc_rctl |= RCTL_UPE;
2928 		goto allmulti;
2929 	}
2930 
2931 	/*
2932 	 * Set the station address in the first RAL slot, and
2933 	 * clear the remaining slots.
2934 	 */
2935 	wm_set_ral(sc, LLADDR(ifp->if_sadl), 0);
2936 	for (i = 1; i < WM_RAL_TABSIZE; i++)
2937 		wm_set_ral(sc, NULL, i);
2938 
2939 	/* Clear out the multicast table. */
2940 	for (i = 0; i < WM_MC_TABSIZE; i++)
2941 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
2942 
2943 	ETHER_FIRST_MULTI(step, ec, enm);
2944 	while (enm != NULL) {
2945 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2946 			/*
2947 			 * We must listen to a range of multicast addresses.
2948 			 * For now, just accept all multicasts, rather than
2949 			 * trying to set only those filter bits needed to match
2950 			 * the range.  (At this time, the only use of address
2951 			 * ranges is for IP multicast routing, for which the
2952 			 * range is big enough to require all bits set.)
2953 			 */
2954 			goto allmulti;
2955 		}
2956 
2957 		hash = wm_mchash(sc, enm->enm_addrlo);
2958 
2959 		reg = (hash >> 5) & 0x7f;
2960 		bit = hash & 0x1f;
2961 
2962 		hash = CSR_READ(sc, mta_reg + (reg << 2));
2963 		hash |= 1U << bit;
2964 
2965 		/* XXX Hardware bug?? */
2966 		if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
2967 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
2968 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
2969 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
2970 		} else
2971 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
2972 
2973 		ETHER_NEXT_MULTI(step, enm);
2974 	}
2975 
2976 	ifp->if_flags &= ~IFF_ALLMULTI;
2977 	goto setit;
2978 
2979  allmulti:
2980 	ifp->if_flags |= IFF_ALLMULTI;
2981 	sc->sc_rctl |= RCTL_MPE;
2982 
2983  setit:
2984 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
2985 }
2986 
2987 /*
2988  * wm_tbi_mediainit:
2989  *
2990  *	Initialize media for use on 1000BASE-X devices.
2991  */
2992 static void
2993 wm_tbi_mediainit(struct wm_softc *sc)
2994 {
2995 	const char *sep = "";
2996 
2997 	if (sc->sc_type < WM_T_82543)
2998 		sc->sc_tipg = TIPG_WM_DFLT;
2999 	else
3000 		sc->sc_tipg = TIPG_LG_DFLT;
3001 
3002 	ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
3003 	    wm_tbi_mediastatus);
3004 
3005 	/*
3006 	 * SWD Pins:
3007 	 *
3008 	 *	0 = Link LED (output)
3009 	 *	1 = Loss Of Signal (input)
3010 	 */
3011 	sc->sc_ctrl |= CTRL_SWDPIO(0);
3012 	sc->sc_ctrl &= ~CTRL_SWDPIO(1);
3013 
3014 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3015 
3016 #define	ADD(ss, mm, dd)							\
3017 do {									\
3018 	printf("%s%s", sep, ss);					\
3019 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL);	\
3020 	sep = ", ";							\
3021 } while (/*CONSTCOND*/0)
3022 
3023 	printf("%s: ", sc->sc_dev.dv_xname);
3024 	ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
3025 	ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
3026 	ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
3027 	printf("\n");
3028 
3029 #undef ADD
3030 
3031 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
3032 }
3033 
3034 /*
3035  * wm_tbi_mediastatus:	[ifmedia interface function]
3036  *
3037  *	Get the current interface media status on a 1000BASE-X device.
3038  */
3039 static void
3040 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
3041 {
3042 	struct wm_softc *sc = ifp->if_softc;
3043 
3044 	ifmr->ifm_status = IFM_AVALID;
3045 	ifmr->ifm_active = IFM_ETHER;
3046 
3047 	if (sc->sc_tbi_linkup == 0) {
3048 		ifmr->ifm_active |= IFM_NONE;
3049 		return;
3050 	}
3051 
3052 	ifmr->ifm_status |= IFM_ACTIVE;
3053 	ifmr->ifm_active |= IFM_1000_SX;
3054 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
3055 		ifmr->ifm_active |= IFM_FDX;
3056 }
3057 
3058 /*
3059  * wm_tbi_mediachange:	[ifmedia interface function]
3060  *
3061  *	Set hardware to newly-selected media on a 1000BASE-X device.
3062  */
3063 static int
3064 wm_tbi_mediachange(struct ifnet *ifp)
3065 {
3066 	struct wm_softc *sc = ifp->if_softc;
3067 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
3068 	uint32_t status;
3069 	int i;
3070 
3071 	sc->sc_txcw = ife->ifm_data;
3072 	if (sc->sc_ctrl & CTRL_RFCE)
3073 		sc->sc_txcw |= ANAR_X_PAUSE_TOWARDS;
3074 	if (sc->sc_ctrl & CTRL_TFCE)
3075 		sc->sc_txcw |= ANAR_X_PAUSE_ASYM;
3076 	sc->sc_txcw |= TXCW_ANE;
3077 
3078 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
3079 	delay(10000);
3080 
3081 	sc->sc_tbi_anstate = 0;
3082 
3083 	if ((CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1)) == 0) {
3084 		/* Have signal; wait for the link to come up. */
3085 		for (i = 0; i < 50; i++) {
3086 			delay(10000);
3087 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
3088 				break;
3089 		}
3090 
3091 		status = CSR_READ(sc, WMREG_STATUS);
3092 		if (status & STATUS_LU) {
3093 			/* Link is up. */
3094 			DPRINTF(WM_DEBUG_LINK,
3095 			    ("%s: LINK: set media -> link up %s\n",
3096 			    sc->sc_dev.dv_xname,
3097 			    (status & STATUS_FD) ? "FDX" : "HDX"));
3098 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
3099 			if (status & STATUS_FD)
3100 				sc->sc_tctl |=
3101 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3102 			else
3103 				sc->sc_tctl |=
3104 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
3105 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3106 			sc->sc_tbi_linkup = 1;
3107 		} else {
3108 			/* Link is down. */
3109 			DPRINTF(WM_DEBUG_LINK,
3110 			    ("%s: LINK: set media -> link down\n",
3111 			    sc->sc_dev.dv_xname));
3112 			sc->sc_tbi_linkup = 0;
3113 		}
3114 	} else {
3115 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
3116 		    sc->sc_dev.dv_xname));
3117 		sc->sc_tbi_linkup = 0;
3118 	}
3119 
3120 	wm_tbi_set_linkled(sc);
3121 
3122 	return (0);
3123 }
3124 
3125 /*
3126  * wm_tbi_set_linkled:
3127  *
3128  *	Update the link LED on 1000BASE-X devices.
3129  */
3130 static void
3131 wm_tbi_set_linkled(struct wm_softc *sc)
3132 {
3133 
3134 	if (sc->sc_tbi_linkup)
3135 		sc->sc_ctrl |= CTRL_SWDPIN(0);
3136 	else
3137 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
3138 
3139 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3140 }
3141 
3142 /*
3143  * wm_tbi_check_link:
3144  *
3145  *	Check the link on 1000BASE-X devices.
3146  */
3147 static void
3148 wm_tbi_check_link(struct wm_softc *sc)
3149 {
3150 	uint32_t rxcw, ctrl, status;
3151 
3152 	if (sc->sc_tbi_anstate == 0)
3153 		return;
3154 	else if (sc->sc_tbi_anstate > 1) {
3155 		DPRINTF(WM_DEBUG_LINK,
3156 		    ("%s: LINK: anstate %d\n", sc->sc_dev.dv_xname,
3157 		    sc->sc_tbi_anstate));
3158 		sc->sc_tbi_anstate--;
3159 		return;
3160 	}
3161 
3162 	sc->sc_tbi_anstate = 0;
3163 
3164 	rxcw = CSR_READ(sc, WMREG_RXCW);
3165 	ctrl = CSR_READ(sc, WMREG_CTRL);
3166 	status = CSR_READ(sc, WMREG_STATUS);
3167 
3168 	if ((status & STATUS_LU) == 0) {
3169 		DPRINTF(WM_DEBUG_LINK,
3170 		    ("%s: LINK: checklink -> down\n", sc->sc_dev.dv_xname));
3171 		sc->sc_tbi_linkup = 0;
3172 	} else {
3173 		DPRINTF(WM_DEBUG_LINK,
3174 		    ("%s: LINK: checklink -> up %s\n", sc->sc_dev.dv_xname,
3175 		    (status & STATUS_FD) ? "FDX" : "HDX"));
3176 		sc->sc_tctl &= ~TCTL_COLD(0x3ff);
3177 		if (status & STATUS_FD)
3178 			sc->sc_tctl |=
3179 			    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3180 		else
3181 			sc->sc_tctl |=
3182 			    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
3183 		CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3184 		sc->sc_tbi_linkup = 1;
3185 	}
3186 
3187 	wm_tbi_set_linkled(sc);
3188 }
3189 
3190 /*
3191  * wm_gmii_reset:
3192  *
3193  *	Reset the PHY.
3194  */
3195 static void
3196 wm_gmii_reset(struct wm_softc *sc)
3197 {
3198 	uint32_t reg;
3199 
3200 	if (sc->sc_type >= WM_T_82544) {
3201 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
3202 		delay(20000);
3203 
3204 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3205 		delay(20000);
3206 	} else {
3207 		/* The PHY reset pin is active-low. */
3208 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
3209 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
3210 		    CTRL_EXT_SWDPIN(4));
3211 		reg |= CTRL_EXT_SWDPIO(4);
3212 
3213 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
3214 		delay(10);
3215 
3216 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3217 		delay(10);
3218 
3219 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
3220 		delay(10);
3221 #if 0
3222 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
3223 #endif
3224 	}
3225 }
3226 
3227 /*
3228  * wm_gmii_mediainit:
3229  *
3230  *	Initialize media for use on 1000BASE-T devices.
3231  */
3232 static void
3233 wm_gmii_mediainit(struct wm_softc *sc)
3234 {
3235 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3236 
3237 	/* We have MII. */
3238 	sc->sc_flags |= WM_F_HAS_MII;
3239 
3240 	sc->sc_tipg = TIPG_1000T_DFLT;
3241 
3242 	/*
3243 	 * Let the chip set speed/duplex on its own based on
3244 	 * signals from the PHY.
3245 	 */
3246 	sc->sc_ctrl |= CTRL_SLU | CTRL_ASDE;
3247 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3248 
3249 	/* Initialize our media structures and probe the GMII. */
3250 	sc->sc_mii.mii_ifp = ifp;
3251 
3252 	if (sc->sc_type >= WM_T_82544) {
3253 		sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg;
3254 		sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg;
3255 	} else {
3256 		sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg;
3257 		sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg;
3258 	}
3259 	sc->sc_mii.mii_statchg = wm_gmii_statchg;
3260 
3261 	wm_gmii_reset(sc);
3262 
3263 	ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_gmii_mediachange,
3264 	    wm_gmii_mediastatus);
3265 
3266 	mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
3267 	    MII_OFFSET_ANY, 0);
3268 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
3269 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
3270 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
3271 	} else
3272 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
3273 }
3274 
3275 /*
3276  * wm_gmii_mediastatus:	[ifmedia interface function]
3277  *
3278  *	Get the current interface media status on a 1000BASE-T device.
3279  */
3280 static void
3281 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
3282 {
3283 	struct wm_softc *sc = ifp->if_softc;
3284 
3285 	mii_pollstat(&sc->sc_mii);
3286 	ifmr->ifm_status = sc->sc_mii.mii_media_status;
3287 	ifmr->ifm_active = sc->sc_mii.mii_media_active;
3288 }
3289 
3290 /*
3291  * wm_gmii_mediachange:	[ifmedia interface function]
3292  *
3293  *	Set hardware to newly-selected media on a 1000BASE-T device.
3294  */
3295 static int
3296 wm_gmii_mediachange(struct ifnet *ifp)
3297 {
3298 	struct wm_softc *sc = ifp->if_softc;
3299 
3300 	if (ifp->if_flags & IFF_UP)
3301 		mii_mediachg(&sc->sc_mii);
3302 	return (0);
3303 }
3304 
3305 #define	MDI_IO		CTRL_SWDPIN(2)
3306 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
3307 #define	MDI_CLK		CTRL_SWDPIN(3)
3308 
3309 static void
3310 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
3311 {
3312 	uint32_t i, v;
3313 
3314 	v = CSR_READ(sc, WMREG_CTRL);
3315 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
3316 	v |= MDI_DIR | CTRL_SWDPIO(3);
3317 
3318 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
3319 		if (data & i)
3320 			v |= MDI_IO;
3321 		else
3322 			v &= ~MDI_IO;
3323 		CSR_WRITE(sc, WMREG_CTRL, v);
3324 		delay(10);
3325 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
3326 		delay(10);
3327 		CSR_WRITE(sc, WMREG_CTRL, v);
3328 		delay(10);
3329 	}
3330 }
3331 
3332 static uint32_t
3333 i82543_mii_recvbits(struct wm_softc *sc)
3334 {
3335 	uint32_t v, i, data = 0;
3336 
3337 	v = CSR_READ(sc, WMREG_CTRL);
3338 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
3339 	v |= CTRL_SWDPIO(3);
3340 
3341 	CSR_WRITE(sc, WMREG_CTRL, v);
3342 	delay(10);
3343 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
3344 	delay(10);
3345 	CSR_WRITE(sc, WMREG_CTRL, v);
3346 	delay(10);
3347 
3348 	for (i = 0; i < 16; i++) {
3349 		data <<= 1;
3350 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
3351 		delay(10);
3352 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
3353 			data |= 1;
3354 		CSR_WRITE(sc, WMREG_CTRL, v);
3355 		delay(10);
3356 	}
3357 
3358 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
3359 	delay(10);
3360 	CSR_WRITE(sc, WMREG_CTRL, v);
3361 	delay(10);
3362 
3363 	return (data);
3364 }
3365 
3366 #undef MDI_IO
3367 #undef MDI_DIR
3368 #undef MDI_CLK
3369 
3370 /*
3371  * wm_gmii_i82543_readreg:	[mii interface function]
3372  *
3373  *	Read a PHY register on the GMII (i82543 version).
3374  */
3375 static int
3376 wm_gmii_i82543_readreg(struct device *self, int phy, int reg)
3377 {
3378 	struct wm_softc *sc = (void *) self;
3379 	int rv;
3380 
3381 	i82543_mii_sendbits(sc, 0xffffffffU, 32);
3382 	i82543_mii_sendbits(sc, reg | (phy << 5) |
3383 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
3384 	rv = i82543_mii_recvbits(sc) & 0xffff;
3385 
3386 	DPRINTF(WM_DEBUG_GMII,
3387 	    ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
3388 	    sc->sc_dev.dv_xname, phy, reg, rv));
3389 
3390 	return (rv);
3391 }
3392 
3393 /*
3394  * wm_gmii_i82543_writereg:	[mii interface function]
3395  *
3396  *	Write a PHY register on the GMII (i82543 version).
3397  */
3398 static void
3399 wm_gmii_i82543_writereg(struct device *self, int phy, int reg, int val)
3400 {
3401 	struct wm_softc *sc = (void *) self;
3402 
3403 	i82543_mii_sendbits(sc, 0xffffffffU, 32);
3404 	i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
3405 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
3406 	    (MII_COMMAND_START << 30), 32);
3407 }
3408 
3409 /*
3410  * wm_gmii_i82544_readreg:	[mii interface function]
3411  *
3412  *	Read a PHY register on the GMII.
3413  */
3414 static int
3415 wm_gmii_i82544_readreg(struct device *self, int phy, int reg)
3416 {
3417 	struct wm_softc *sc = (void *) self;
3418 	uint32_t mdic = 0;
3419 	int i, rv;
3420 
3421 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
3422 	    MDIC_REGADD(reg));
3423 
3424 	for (i = 0; i < 100; i++) {
3425 		mdic = CSR_READ(sc, WMREG_MDIC);
3426 		if (mdic & MDIC_READY)
3427 			break;
3428 		delay(10);
3429 	}
3430 
3431 	if ((mdic & MDIC_READY) == 0) {
3432 		printf("%s: MDIC read timed out: phy %d reg %d\n",
3433 		    sc->sc_dev.dv_xname, phy, reg);
3434 		rv = 0;
3435 	} else if (mdic & MDIC_E) {
3436 #if 0 /* This is normal if no PHY is present. */
3437 		printf("%s: MDIC read error: phy %d reg %d\n",
3438 		    sc->sc_dev.dv_xname, phy, reg);
3439 #endif
3440 		rv = 0;
3441 	} else {
3442 		rv = MDIC_DATA(mdic);
3443 		if (rv == 0xffff)
3444 			rv = 0;
3445 	}
3446 
3447 	return (rv);
3448 }
3449 
3450 /*
3451  * wm_gmii_i82544_writereg:	[mii interface function]
3452  *
3453  *	Write a PHY register on the GMII.
3454  */
3455 static void
3456 wm_gmii_i82544_writereg(struct device *self, int phy, int reg, int val)
3457 {
3458 	struct wm_softc *sc = (void *) self;
3459 	uint32_t mdic = 0;
3460 	int i;
3461 
3462 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
3463 	    MDIC_REGADD(reg) | MDIC_DATA(val));
3464 
3465 	for (i = 0; i < 100; i++) {
3466 		mdic = CSR_READ(sc, WMREG_MDIC);
3467 		if (mdic & MDIC_READY)
3468 			break;
3469 		delay(10);
3470 	}
3471 
3472 	if ((mdic & MDIC_READY) == 0)
3473 		printf("%s: MDIC write timed out: phy %d reg %d\n",
3474 		    sc->sc_dev.dv_xname, phy, reg);
3475 	else if (mdic & MDIC_E)
3476 		printf("%s: MDIC write error: phy %d reg %d\n",
3477 		    sc->sc_dev.dv_xname, phy, reg);
3478 }
3479 
3480 /*
3481  * wm_gmii_statchg:	[mii interface function]
3482  *
3483  *	Callback from MII layer when media changes.
3484  */
3485 static void
3486 wm_gmii_statchg(struct device *self)
3487 {
3488 	struct wm_softc *sc = (void *) self;
3489 
3490 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
3491 
3492 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
3493 		DPRINTF(WM_DEBUG_LINK,
3494 		    ("%s: LINK: statchg: FDX\n", sc->sc_dev.dv_xname));
3495 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3496 	} else  {
3497 		DPRINTF(WM_DEBUG_LINK,
3498 		    ("%s: LINK: statchg: HDX\n", sc->sc_dev.dv_xname));
3499 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
3500 	}
3501 
3502 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3503 }
3504