xref: /netbsd-src/sys/dev/pci/if_wm.c (revision 21e37cc72a480a47828990a439cde7ac9ffaf0c6)
1 /*	$NetBSD: if_wm.c,v 1.71 2004/05/16 02:34:47 thorpej Exp $	*/
2 
3 /*
4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed for the NetBSD Project by
20  *	Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 /*
39  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
40  *
41  * TODO (in order of importance):
42  *
43  *	- Rework how parameters are loaded from the EEPROM.
44  *	- Figure out performance stability issue on i82547 (fvdl).
45  *	- Figure out what to do with the i82545GM and i82546GB
46  *	  SERDES controllers.
47  *	- Fix hw VLAN assist.
48  */
49 
50 #include <sys/cdefs.h>
51 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.71 2004/05/16 02:34:47 thorpej Exp $");
52 
53 #include "bpfilter.h"
54 #include "rnd.h"
55 
56 #include <sys/param.h>
57 #include <sys/systm.h>
58 #include <sys/callout.h>
59 #include <sys/mbuf.h>
60 #include <sys/malloc.h>
61 #include <sys/kernel.h>
62 #include <sys/socket.h>
63 #include <sys/ioctl.h>
64 #include <sys/errno.h>
65 #include <sys/device.h>
66 #include <sys/queue.h>
67 
68 #include <uvm/uvm_extern.h>		/* for PAGE_SIZE */
69 
70 #if NRND > 0
71 #include <sys/rnd.h>
72 #endif
73 
74 #include <net/if.h>
75 #include <net/if_dl.h>
76 #include <net/if_media.h>
77 #include <net/if_ether.h>
78 
79 #if NBPFILTER > 0
80 #include <net/bpf.h>
81 #endif
82 
83 #include <netinet/in.h>			/* XXX for struct ip */
84 #include <netinet/in_systm.h>		/* XXX for struct ip */
85 #include <netinet/ip.h>			/* XXX for struct ip */
86 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
87 
88 #include <machine/bus.h>
89 #include <machine/intr.h>
90 #include <machine/endian.h>
91 
92 #include <dev/mii/mii.h>
93 #include <dev/mii/miivar.h>
94 #include <dev/mii/mii_bitbang.h>
95 
96 #include <dev/pci/pcireg.h>
97 #include <dev/pci/pcivar.h>
98 #include <dev/pci/pcidevs.h>
99 
100 #include <dev/pci/if_wmreg.h>
101 
102 #ifdef WM_DEBUG
103 #define	WM_DEBUG_LINK		0x01
104 #define	WM_DEBUG_TX		0x02
105 #define	WM_DEBUG_RX		0x04
106 #define	WM_DEBUG_GMII		0x08
107 int	wm_debug = WM_DEBUG_TX|WM_DEBUG_RX|WM_DEBUG_LINK;
108 
109 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
110 #else
111 #define	DPRINTF(x, y)	/* nothing */
112 #endif /* WM_DEBUG */
113 
114 /*
115  * Transmit descriptor list size.  Due to errata, we can only have
116  * 256 hardware descriptors in the ring.  We tell the upper layers
117  * that they can queue a lot of packets, and we go ahead and manage
118  * up to 64 of them at a time.  We allow up to 40 DMA segments per
119  * packet (there have been reports of jumbo frame packets with as
120  * many as 30 DMA segments!).
121  */
122 #define	WM_NTXSEGS		40
123 #define	WM_IFQUEUELEN		256
124 #define	WM_TXQUEUELEN		64
125 #define	WM_TXQUEUELEN_MASK	(WM_TXQUEUELEN - 1)
126 #define	WM_TXQUEUE_GC		(WM_TXQUEUELEN / 8)
127 #define	WM_NTXDESC		256
128 #define	WM_NTXDESC_MASK		(WM_NTXDESC - 1)
129 #define	WM_NEXTTX(x)		(((x) + 1) & WM_NTXDESC_MASK)
130 #define	WM_NEXTTXS(x)		(((x) + 1) & WM_TXQUEUELEN_MASK)
131 
132 /*
133  * Receive descriptor list size.  We have one Rx buffer for normal
134  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
135  * packet.  We allocate 256 receive descriptors, each with a 2k
136  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
137  */
138 #define	WM_NRXDESC		256
139 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
140 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
141 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
142 
143 /*
144  * Control structures are DMA'd to the i82542 chip.  We allocate them in
145  * a single clump that maps to a single DMA segment to make serveral things
146  * easier.
147  */
148 struct wm_control_data {
149 	/*
150 	 * The transmit descriptors.
151 	 */
152 	wiseman_txdesc_t wcd_txdescs[WM_NTXDESC];
153 
154 	/*
155 	 * The receive descriptors.
156 	 */
157 	wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
158 };
159 
160 #define	WM_CDOFF(x)	offsetof(struct wm_control_data, x)
161 #define	WM_CDTXOFF(x)	WM_CDOFF(wcd_txdescs[(x)])
162 #define	WM_CDRXOFF(x)	WM_CDOFF(wcd_rxdescs[(x)])
163 
164 /*
165  * Software state for transmit jobs.
166  */
167 struct wm_txsoft {
168 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
169 	bus_dmamap_t txs_dmamap;	/* our DMA map */
170 	int txs_firstdesc;		/* first descriptor in packet */
171 	int txs_lastdesc;		/* last descriptor in packet */
172 	int txs_ndesc;			/* # of descriptors used */
173 };
174 
175 /*
176  * Software state for receive buffers.  Each descriptor gets a
177  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
178  * more than one buffer, we chain them together.
179  */
180 struct wm_rxsoft {
181 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
182 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
183 };
184 
185 typedef enum {
186 	WM_T_unknown		= 0,
187 	WM_T_82542_2_0,			/* i82542 2.0 (really old) */
188 	WM_T_82542_2_1,			/* i82542 2.1+ (old) */
189 	WM_T_82543,			/* i82543 */
190 	WM_T_82544,			/* i82544 */
191 	WM_T_82540,			/* i82540 */
192 	WM_T_82545,			/* i82545 */
193 	WM_T_82545_3,			/* i82545 3.0+ */
194 	WM_T_82546,			/* i82546 */
195 	WM_T_82546_3,			/* i82546 3.0+ */
196 	WM_T_82541,			/* i82541 */
197 	WM_T_82541_2,			/* i82541 2.0+ */
198 	WM_T_82547,			/* i82547 */
199 	WM_T_82547_2,			/* i82547 2.0+ */
200 } wm_chip_type;
201 
202 /*
203  * Software state per device.
204  */
205 struct wm_softc {
206 	struct device sc_dev;		/* generic device information */
207 	bus_space_tag_t sc_st;		/* bus space tag */
208 	bus_space_handle_t sc_sh;	/* bus space handle */
209 	bus_space_tag_t sc_iot;		/* I/O space tag */
210 	bus_space_handle_t sc_ioh;	/* I/O space handle */
211 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
212 	struct ethercom sc_ethercom;	/* ethernet common data */
213 	void *sc_sdhook;		/* shutdown hook */
214 
215 	wm_chip_type sc_type;		/* chip type */
216 	int sc_flags;			/* flags; see below */
217 	int sc_bus_speed;		/* PCI/PCIX bus speed */
218 	int sc_pcix_offset;		/* PCIX capability register offset */
219 	int sc_flowflags;		/* 802.3x flow control flags */
220 
221 	void *sc_ih;			/* interrupt cookie */
222 
223 	int sc_ee_addrbits;		/* EEPROM address bits */
224 
225 	struct mii_data sc_mii;		/* MII/media information */
226 
227 	struct callout sc_tick_ch;	/* tick callout */
228 
229 	bus_dmamap_t sc_cddmamap;	/* control data DMA map */
230 #define	sc_cddma	sc_cddmamap->dm_segs[0].ds_addr
231 
232 	int		sc_align_tweak;
233 
234 	/*
235 	 * Software state for the transmit and receive descriptors.
236 	 */
237 	struct wm_txsoft sc_txsoft[WM_TXQUEUELEN];
238 	struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
239 
240 	/*
241 	 * Control data structures.
242 	 */
243 	struct wm_control_data *sc_control_data;
244 #define	sc_txdescs	sc_control_data->wcd_txdescs
245 #define	sc_rxdescs	sc_control_data->wcd_rxdescs
246 
247 #ifdef WM_EVENT_COUNTERS
248 	/* Event counters. */
249 	struct evcnt sc_ev_txsstall;	/* Tx stalled due to no txs */
250 	struct evcnt sc_ev_txdstall;	/* Tx stalled due to no txd */
251 	struct evcnt sc_ev_txforceintr;	/* Tx interrupts forced */
252 	struct evcnt sc_ev_txdw;	/* Tx descriptor interrupts */
253 	struct evcnt sc_ev_txqe;	/* Tx queue empty interrupts */
254 	struct evcnt sc_ev_rxintr;	/* Rx interrupts */
255 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
256 
257 	struct evcnt sc_ev_rxipsum;	/* IP checksums checked in-bound */
258 	struct evcnt sc_ev_rxtusum;	/* TCP/UDP cksums checked in-bound */
259 	struct evcnt sc_ev_txipsum;	/* IP checksums comp. out-bound */
260 	struct evcnt sc_ev_txtusum;	/* TCP/UDP cksums comp. out-bound */
261 
262 	struct evcnt sc_ev_txctx_init;	/* Tx cksum context cache initialized */
263 	struct evcnt sc_ev_txctx_hit;	/* Tx cksum context cache hit */
264 	struct evcnt sc_ev_txctx_miss;	/* Tx cksum context cache miss */
265 
266 	struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
267 	struct evcnt sc_ev_txdrop;	/* Tx packets dropped (too many segs) */
268 
269 	struct evcnt sc_ev_tu;		/* Tx underrun */
270 
271 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
272 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
273 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
274 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
275 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
276 #endif /* WM_EVENT_COUNTERS */
277 
278 	bus_addr_t sc_tdt_reg;		/* offset of TDT register */
279 
280 	int	sc_txfree;		/* number of free Tx descriptors */
281 	int	sc_txnext;		/* next ready Tx descriptor */
282 
283 	int	sc_txsfree;		/* number of free Tx jobs */
284 	int	sc_txsnext;		/* next free Tx job */
285 	int	sc_txsdirty;		/* dirty Tx jobs */
286 
287 	uint32_t sc_txctx_ipcs;		/* cached Tx IP cksum ctx */
288 	uint32_t sc_txctx_tucs;		/* cached Tx TCP/UDP cksum ctx */
289 
290 	bus_addr_t sc_rdt_reg;		/* offset of RDT register */
291 
292 	int	sc_rxptr;		/* next ready Rx descriptor/queue ent */
293 	int	sc_rxdiscard;
294 	int	sc_rxlen;
295 	struct mbuf *sc_rxhead;
296 	struct mbuf *sc_rxtail;
297 	struct mbuf **sc_rxtailp;
298 
299 	uint32_t sc_ctrl;		/* prototype CTRL register */
300 #if 0
301 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
302 #endif
303 	uint32_t sc_icr;		/* prototype interrupt bits */
304 	uint32_t sc_tctl;		/* prototype TCTL register */
305 	uint32_t sc_rctl;		/* prototype RCTL register */
306 	uint32_t sc_txcw;		/* prototype TXCW register */
307 	uint32_t sc_tipg;		/* prototype TIPG register */
308 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
309 
310 	int sc_tbi_linkup;		/* TBI link status */
311 	int sc_tbi_anstate;		/* autonegotiation state */
312 
313 	int sc_mchash_type;		/* multicast filter offset */
314 
315 #if NRND > 0
316 	rndsource_element_t rnd_source;	/* random source */
317 #endif
318 };
319 
320 #define	WM_RXCHAIN_RESET(sc)						\
321 do {									\
322 	(sc)->sc_rxtailp = &(sc)->sc_rxhead;				\
323 	*(sc)->sc_rxtailp = NULL;					\
324 	(sc)->sc_rxlen = 0;						\
325 } while (/*CONSTCOND*/0)
326 
327 #define	WM_RXCHAIN_LINK(sc, m)						\
328 do {									\
329 	*(sc)->sc_rxtailp = (sc)->sc_rxtail = (m);			\
330 	(sc)->sc_rxtailp = &(m)->m_next;				\
331 } while (/*CONSTCOND*/0)
332 
333 /* sc_flags */
334 #define	WM_F_HAS_MII		0x01	/* has MII */
335 #define	WM_F_EEPROM_HANDSHAKE	0x02	/* requires EEPROM handshake */
336 #define	WM_F_EEPROM_SPI		0x04	/* EEPROM is SPI */
337 #define	WM_F_IOH_VALID		0x10	/* I/O handle is valid */
338 #define	WM_F_BUS64		0x20	/* bus is 64-bit */
339 #define	WM_F_PCIX		0x40	/* bus is PCI-X */
340 
341 #ifdef WM_EVENT_COUNTERS
342 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
343 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
344 #else
345 #define	WM_EVCNT_INCR(ev)	/* nothing */
346 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
347 #endif
348 
349 #define	CSR_READ(sc, reg)						\
350 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
351 #define	CSR_WRITE(sc, reg, val)						\
352 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
353 
354 #define	WM_CDTXADDR(sc, x)	((sc)->sc_cddma + WM_CDTXOFF((x)))
355 #define	WM_CDRXADDR(sc, x)	((sc)->sc_cddma + WM_CDRXOFF((x)))
356 
357 #define	WM_CDTXADDR_LO(sc, x)	(WM_CDTXADDR((sc), (x)) & 0xffffffffU)
358 #define	WM_CDTXADDR_HI(sc, x)						\
359 	(sizeof(bus_addr_t) == 8 ?					\
360 	 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
361 
362 #define	WM_CDRXADDR_LO(sc, x)	(WM_CDRXADDR((sc), (x)) & 0xffffffffU)
363 #define	WM_CDRXADDR_HI(sc, x)						\
364 	(sizeof(bus_addr_t) == 8 ?					\
365 	 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
366 
367 #define	WM_CDTXSYNC(sc, x, n, ops)					\
368 do {									\
369 	int __x, __n;							\
370 									\
371 	__x = (x);							\
372 	__n = (n);							\
373 									\
374 	/* If it will wrap around, sync to the end of the ring. */	\
375 	if ((__x + __n) > WM_NTXDESC) {					\
376 		bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,	\
377 		    WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) *		\
378 		    (WM_NTXDESC - __x), (ops));				\
379 		__n -= (WM_NTXDESC - __x);				\
380 		__x = 0;						\
381 	}								\
382 									\
383 	/* Now sync whatever is left. */				\
384 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
385 	    WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops));	\
386 } while (/*CONSTCOND*/0)
387 
388 #define	WM_CDRXSYNC(sc, x, ops)						\
389 do {									\
390 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
391 	   WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops));		\
392 } while (/*CONSTCOND*/0)
393 
394 #define	WM_INIT_RXDESC(sc, x)						\
395 do {									\
396 	struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)];		\
397 	wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)];		\
398 	struct mbuf *__m = __rxs->rxs_mbuf;				\
399 									\
400 	/*								\
401 	 * Note: We scoot the packet forward 2 bytes in the buffer	\
402 	 * so that the payload after the Ethernet header is aligned	\
403 	 * to a 4-byte boundary.					\
404 	 *								\
405 	 * XXX BRAINDAMAGE ALERT!					\
406 	 * The stupid chip uses the same size for every buffer, which	\
407 	 * is set in the Receive Control register.  We are using the 2K	\
408 	 * size option, but what we REALLY want is (2K - 2)!  For this	\
409 	 * reason, we can't "scoot" packets longer than the standard	\
410 	 * Ethernet MTU.  On strict-alignment platforms, if the total	\
411 	 * size exceeds (2K - 2) we set align_tweak to 0 and let	\
412 	 * the upper layer copy the headers.				\
413 	 */								\
414 	__m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak;	\
415 									\
416 	wm_set_dma_addr(&__rxd->wrx_addr,				\
417 	    __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
418 	__rxd->wrx_len = 0;						\
419 	__rxd->wrx_cksum = 0;						\
420 	__rxd->wrx_status = 0;						\
421 	__rxd->wrx_errors = 0;						\
422 	__rxd->wrx_special = 0;						\
423 	WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
424 									\
425 	CSR_WRITE((sc), (sc)->sc_rdt_reg, (x));				\
426 } while (/*CONSTCOND*/0)
427 
428 static void	wm_start(struct ifnet *);
429 static void	wm_watchdog(struct ifnet *);
430 static int	wm_ioctl(struct ifnet *, u_long, caddr_t);
431 static int	wm_init(struct ifnet *);
432 static void	wm_stop(struct ifnet *, int);
433 
434 static void	wm_shutdown(void *);
435 
436 static void	wm_reset(struct wm_softc *);
437 static void	wm_rxdrain(struct wm_softc *);
438 static int	wm_add_rxbuf(struct wm_softc *, int);
439 static int	wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
440 static void	wm_tick(void *);
441 
442 static void	wm_set_filter(struct wm_softc *);
443 
444 static int	wm_intr(void *);
445 static void	wm_txintr(struct wm_softc *);
446 static void	wm_rxintr(struct wm_softc *);
447 static void	wm_linkintr(struct wm_softc *, uint32_t);
448 
449 static void	wm_tbi_mediainit(struct wm_softc *);
450 static int	wm_tbi_mediachange(struct ifnet *);
451 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
452 
453 static void	wm_tbi_set_linkled(struct wm_softc *);
454 static void	wm_tbi_check_link(struct wm_softc *);
455 
456 static void	wm_gmii_reset(struct wm_softc *);
457 
458 static int	wm_gmii_i82543_readreg(struct device *, int, int);
459 static void	wm_gmii_i82543_writereg(struct device *, int, int, int);
460 
461 static int	wm_gmii_i82544_readreg(struct device *, int, int);
462 static void	wm_gmii_i82544_writereg(struct device *, int, int, int);
463 
464 static void	wm_gmii_statchg(struct device *);
465 
466 static void	wm_gmii_mediainit(struct wm_softc *);
467 static int	wm_gmii_mediachange(struct ifnet *);
468 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
469 
470 static int	wm_match(struct device *, struct cfdata *, void *);
471 static void	wm_attach(struct device *, struct device *, void *);
472 
473 CFATTACH_DECL(wm, sizeof(struct wm_softc),
474     wm_match, wm_attach, NULL, NULL);
475 
476 /*
477  * Devices supported by this driver.
478  */
479 const struct wm_product {
480 	pci_vendor_id_t		wmp_vendor;
481 	pci_product_id_t	wmp_product;
482 	const char		*wmp_name;
483 	wm_chip_type		wmp_type;
484 	int			wmp_flags;
485 #define	WMP_F_1000X		0x01
486 #define	WMP_F_1000T		0x02
487 } wm_products[] = {
488 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
489 	  "Intel i82542 1000BASE-X Ethernet",
490 	  WM_T_82542_2_1,	WMP_F_1000X },
491 
492 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
493 	  "Intel i82543GC 1000BASE-X Ethernet",
494 	  WM_T_82543,		WMP_F_1000X },
495 
496 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
497 	  "Intel i82543GC 1000BASE-T Ethernet",
498 	  WM_T_82543,		WMP_F_1000T },
499 
500 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
501 	  "Intel i82544EI 1000BASE-T Ethernet",
502 	  WM_T_82544,		WMP_F_1000T },
503 
504 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
505 	  "Intel i82544EI 1000BASE-X Ethernet",
506 	  WM_T_82544,		WMP_F_1000X },
507 
508 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
509 	  "Intel i82544GC 1000BASE-T Ethernet",
510 	  WM_T_82544,		WMP_F_1000T },
511 
512 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
513 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
514 	  WM_T_82544,		WMP_F_1000T },
515 
516 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
517 	  "Intel i82540EM 1000BASE-T Ethernet",
518 	  WM_T_82540,		WMP_F_1000T },
519 
520 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
521 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
522 	  WM_T_82540,		WMP_F_1000T },
523 
524 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
525 	  "Intel i82540EP 1000BASE-T Ethernet",
526 	  WM_T_82540,		WMP_F_1000T },
527 
528 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
529 	  "Intel i82540EP 1000BASE-T Ethernet",
530 	  WM_T_82540,		WMP_F_1000T },
531 
532 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
533 	  "Intel i82540EP 1000BASE-T Ethernet",
534 	  WM_T_82540,		WMP_F_1000T },
535 
536 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
537 	  "Intel i82545EM 1000BASE-T Ethernet",
538 	  WM_T_82545,		WMP_F_1000T },
539 
540 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
541 	  "Intel i82545GM 1000BASE-T Ethernet",
542 	  WM_T_82545_3,		WMP_F_1000T },
543 
544 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
545 	  "Intel i82545GM 1000BASE-X Ethernet",
546 	  WM_T_82545_3,		WMP_F_1000X },
547 #if 0
548 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
549 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
550 	  WM_T_82545_3,		WMP_F_SERDES },
551 #endif
552 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
553 	  "Intel i82546EB 1000BASE-T Ethernet",
554 	  WM_T_82546,		WMP_F_1000T },
555 
556 	{ PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82546EB_QUAD,
557 	  "Intel i82546EB 1000BASE-T Ethernet",
558 	  WM_T_82546,		WMP_F_1000T },
559 
560 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
561 	  "Intel i82545EM 1000BASE-X Ethernet",
562 	  WM_T_82545,		WMP_F_1000X },
563 
564 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
565 	  "Intel i82546EB 1000BASE-X Ethernet",
566 	  WM_T_82546,		WMP_F_1000X },
567 
568 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
569 	  "Intel i82546GB 1000BASE-T Ethernet",
570 	  WM_T_82546_3,		WMP_F_1000T },
571 
572 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
573 	  "Intel i82546GB 1000BASE-X Ethernet",
574 	  WM_T_82546_3,		WMP_F_1000X },
575 #if 0
576 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
577 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
578 	  WM_T_82546_3,		WMP_F_SERDES },
579 #endif
580 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
581 	  "Intel i82541EI 1000BASE-T Ethernet",
582 	  WM_T_82541,		WMP_F_1000T },
583 
584 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
585 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
586 	  WM_T_82541,		WMP_F_1000T },
587 
588 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
589 	  "Intel i82541ER 1000BASE-T Ethernet",
590 	  WM_T_82541_2,		WMP_F_1000T },
591 
592 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
593 	  "Intel i82541GI 1000BASE-T Ethernet",
594 	  WM_T_82541_2,		WMP_F_1000T },
595 
596 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
597 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
598 	  WM_T_82541_2,		WMP_F_1000T },
599 
600 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
601 	  "Intel i82547EI 1000BASE-T Ethernet",
602 	  WM_T_82547,		WMP_F_1000T },
603 
604 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
605 	  "Intel i82547GI 1000BASE-T Ethernet",
606 	  WM_T_82547_2,		WMP_F_1000T },
607 	{ 0,			0,
608 	  NULL,
609 	  0,			0 },
610 };
611 
612 #ifdef WM_EVENT_COUNTERS
613 #if WM_NTXSEGS != 40
614 #error Update wm_txseg_evcnt_names
615 #endif
616 static const char *wm_txseg_evcnt_names[WM_NTXSEGS] = {
617 	"txseg1",
618 	"txseg2",
619 	"txseg3",
620 	"txseg4",
621 	"txseg5",
622 	"txseg6",
623 	"txseg7",
624 	"txseg8",
625 	"txseg9",
626 	"txseg10",
627 	"txseg11",
628 	"txseg12",
629 	"txseg13",
630 	"txseg14",
631 	"txseg15",
632 	"txseg16",
633 	"txseg17",
634 	"txseg18",
635 	"txseg19",
636 	"txseg20",
637 	"txseg21",
638 	"txseg22",
639 	"txseg23",
640 	"txseg24",
641 	"txseg25",
642 	"txseg26",
643 	"txseg27",
644 	"txseg28",
645 	"txseg29",
646 	"txseg30",
647 	"txseg31",
648 	"txseg32",
649 	"txseg33",
650 	"txseg34",
651 	"txseg35",
652 	"txseg36",
653 	"txseg37",
654 	"txseg38",
655 	"txseg39",
656 	"txseg40",
657 };
658 #endif /* WM_EVENT_COUNTERS */
659 
660 #if 0 /* Not currently used */
661 static __inline uint32_t
662 wm_io_read(struct wm_softc *sc, int reg)
663 {
664 
665 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
666 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
667 }
668 #endif
669 
670 static __inline void
671 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
672 {
673 
674 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
675 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
676 }
677 
678 static __inline void
679 wm_set_dma_addr(__volatile wiseman_addr_t *wa, bus_addr_t v)
680 {
681 	wa->wa_low = htole32(v & 0xffffffffU);
682 	if (sizeof(bus_addr_t) == 8)
683 		wa->wa_high = htole32((uint64_t) v >> 32);
684 	else
685 		wa->wa_high = 0;
686 }
687 
688 static const struct wm_product *
689 wm_lookup(const struct pci_attach_args *pa)
690 {
691 	const struct wm_product *wmp;
692 
693 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
694 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
695 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
696 			return (wmp);
697 	}
698 	return (NULL);
699 }
700 
701 static int
702 wm_match(struct device *parent, struct cfdata *cf, void *aux)
703 {
704 	struct pci_attach_args *pa = aux;
705 
706 	if (wm_lookup(pa) != NULL)
707 		return (1);
708 
709 	return (0);
710 }
711 
712 static void
713 wm_attach(struct device *parent, struct device *self, void *aux)
714 {
715 	struct wm_softc *sc = (void *) self;
716 	struct pci_attach_args *pa = aux;
717 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
718 	pci_chipset_tag_t pc = pa->pa_pc;
719 	pci_intr_handle_t ih;
720 	const char *intrstr = NULL;
721 	const char *eetype;
722 	bus_space_tag_t memt;
723 	bus_space_handle_t memh;
724 	bus_dma_segment_t seg;
725 	int memh_valid;
726 	int i, rseg, error;
727 	const struct wm_product *wmp;
728 	uint8_t enaddr[ETHER_ADDR_LEN];
729 	uint16_t myea[ETHER_ADDR_LEN / 2], cfg1, cfg2, swdpin;
730 	pcireg_t preg, memtype;
731 	uint32_t reg;
732 	int pmreg;
733 
734 	callout_init(&sc->sc_tick_ch);
735 
736 	wmp = wm_lookup(pa);
737 	if (wmp == NULL) {
738 		printf("\n");
739 		panic("wm_attach: impossible");
740 	}
741 
742 	if (pci_dma64_available(pa))
743 		sc->sc_dmat = pa->pa_dmat64;
744 	else
745 		sc->sc_dmat = pa->pa_dmat;
746 
747 	preg = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
748 	aprint_naive(": Ethernet controller\n");
749 	aprint_normal(": %s, rev. %d\n", wmp->wmp_name, preg);
750 
751 	sc->sc_type = wmp->wmp_type;
752 	if (sc->sc_type < WM_T_82543) {
753 		if (preg < 2) {
754 			aprint_error("%s: i82542 must be at least rev. 2\n",
755 			    sc->sc_dev.dv_xname);
756 			return;
757 		}
758 		if (preg < 3)
759 			sc->sc_type = WM_T_82542_2_0;
760 	}
761 
762 	/*
763 	 * Map the device.  All devices support memory-mapped acccess,
764 	 * and it is really required for normal operation.
765 	 */
766 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
767 	switch (memtype) {
768 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
769 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
770 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
771 		    memtype, 0, &memt, &memh, NULL, NULL) == 0);
772 		break;
773 	default:
774 		memh_valid = 0;
775 	}
776 
777 	if (memh_valid) {
778 		sc->sc_st = memt;
779 		sc->sc_sh = memh;
780 	} else {
781 		aprint_error("%s: unable to map device registers\n",
782 		    sc->sc_dev.dv_xname);
783 		return;
784 	}
785 
786 	/*
787 	 * In addition, i82544 and later support I/O mapped indirect
788 	 * register access.  It is not desirable (nor supported in
789 	 * this driver) to use it for normal operation, though it is
790 	 * required to work around bugs in some chip versions.
791 	 */
792 	if (sc->sc_type >= WM_T_82544) {
793 		/* First we have to find the I/O BAR. */
794 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
795 			if (pci_mapreg_type(pa->pa_pc, pa->pa_tag, i) ==
796 			    PCI_MAPREG_TYPE_IO)
797 				break;
798 		}
799 		if (i == PCI_MAPREG_END)
800 			aprint_error("%s: WARNING: unable to find I/O BAR\n",
801 			    sc->sc_dev.dv_xname);
802 		else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
803 					0, &sc->sc_iot, &sc->sc_ioh,
804 					NULL, NULL) == 0)
805 			sc->sc_flags |= WM_F_IOH_VALID;
806 		else
807 			aprint_error("%s: WARNING: unable to map I/O space\n",
808 			    sc->sc_dev.dv_xname);
809 	}
810 
811 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
812 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
813 	preg |= PCI_COMMAND_MASTER_ENABLE;
814 	if (sc->sc_type < WM_T_82542_2_1)
815 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
816 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
817 
818 	/* Get it out of power save mode, if needed. */
819 	if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) {
820 		preg = pci_conf_read(pc, pa->pa_tag, pmreg + PCI_PMCSR) &
821 		    PCI_PMCSR_STATE_MASK;
822 		if (preg == PCI_PMCSR_STATE_D3) {
823 			/*
824 			 * The card has lost all configuration data in
825 			 * this state, so punt.
826 			 */
827 			aprint_error("%s: unable to wake from power state D3\n",
828 			    sc->sc_dev.dv_xname);
829 			return;
830 		}
831 		if (preg != PCI_PMCSR_STATE_D0) {
832 			aprint_normal("%s: waking up from power state D%d\n",
833 			    sc->sc_dev.dv_xname, preg);
834 			pci_conf_write(pc, pa->pa_tag, pmreg + PCI_PMCSR,
835 			    PCI_PMCSR_STATE_D0);
836 		}
837 	}
838 
839 	/*
840 	 * Map and establish our interrupt.
841 	 */
842 	if (pci_intr_map(pa, &ih)) {
843 		aprint_error("%s: unable to map interrupt\n",
844 		    sc->sc_dev.dv_xname);
845 		return;
846 	}
847 	intrstr = pci_intr_string(pc, ih);
848 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
849 	if (sc->sc_ih == NULL) {
850 		aprint_error("%s: unable to establish interrupt",
851 		    sc->sc_dev.dv_xname);
852 		if (intrstr != NULL)
853 			aprint_normal(" at %s", intrstr);
854 		aprint_normal("\n");
855 		return;
856 	}
857 	aprint_normal("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr);
858 
859 	/*
860 	 * Determine a few things about the bus we're connected to.
861 	 */
862 	if (sc->sc_type < WM_T_82543) {
863 		/* We don't really know the bus characteristics here. */
864 		sc->sc_bus_speed = 33;
865 	} else  {
866 		reg = CSR_READ(sc, WMREG_STATUS);
867 		if (reg & STATUS_BUS64)
868 			sc->sc_flags |= WM_F_BUS64;
869 		if (sc->sc_type >= WM_T_82544 &&
870 		    (reg & STATUS_PCIX_MODE) != 0) {
871 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
872 
873 			sc->sc_flags |= WM_F_PCIX;
874 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
875 					       PCI_CAP_PCIX,
876 					       &sc->sc_pcix_offset, NULL) == 0)
877 				aprint_error("%s: unable to find PCIX "
878 				    "capability\n", sc->sc_dev.dv_xname);
879 			else if (sc->sc_type != WM_T_82545_3 &&
880 				 sc->sc_type != WM_T_82546_3) {
881 				/*
882 				 * Work around a problem caused by the BIOS
883 				 * setting the max memory read byte count
884 				 * incorrectly.
885 				 */
886 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
887 				    sc->sc_pcix_offset + PCI_PCIX_CMD);
888 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
889 				    sc->sc_pcix_offset + PCI_PCIX_STATUS);
890 
891 				bytecnt =
892 				    (pcix_cmd & PCI_PCIX_CMD_BYTECNT_MASK) >>
893 				    PCI_PCIX_CMD_BYTECNT_SHIFT;
894 				maxb =
895 				    (pcix_sts & PCI_PCIX_STATUS_MAXB_MASK) >>
896 				    PCI_PCIX_STATUS_MAXB_SHIFT;
897 				if (bytecnt > maxb) {
898 					aprint_verbose("%s: resetting PCI-X "
899 					    "MMRBC: %d -> %d\n",
900 					    sc->sc_dev.dv_xname,
901 					    512 << bytecnt, 512 << maxb);
902 					pcix_cmd = (pcix_cmd &
903 					    ~PCI_PCIX_CMD_BYTECNT_MASK) |
904 					   (maxb << PCI_PCIX_CMD_BYTECNT_SHIFT);
905 					pci_conf_write(pa->pa_pc, pa->pa_tag,
906 					    sc->sc_pcix_offset + PCI_PCIX_CMD,
907 					    pcix_cmd);
908 				}
909 			}
910 		}
911 		/*
912 		 * The quad port adapter is special; it has a PCIX-PCIX
913 		 * bridge on the board, and can run the secondary bus at
914 		 * a higher speed.
915 		 */
916 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
917 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
918 								      : 66;
919 		} else if (sc->sc_flags & WM_F_PCIX) {
920 			switch (reg & STATUS_PCIXSPD_MASK) {
921 			case STATUS_PCIXSPD_50_66:
922 				sc->sc_bus_speed = 66;
923 				break;
924 			case STATUS_PCIXSPD_66_100:
925 				sc->sc_bus_speed = 100;
926 				break;
927 			case STATUS_PCIXSPD_100_133:
928 				sc->sc_bus_speed = 133;
929 				break;
930 			default:
931 				aprint_error(
932 				    "%s: unknown PCIXSPD %d; assuming 66MHz\n",
933 				    sc->sc_dev.dv_xname,
934 				    reg & STATUS_PCIXSPD_MASK);
935 				sc->sc_bus_speed = 66;
936 			}
937 		} else
938 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
939 		aprint_verbose("%s: %d-bit %dMHz %s bus\n", sc->sc_dev.dv_xname,
940 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
941 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
942 	}
943 
944 	/*
945 	 * Allocate the control data structures, and create and load the
946 	 * DMA map for it.
947 	 *
948 	 * NOTE: All Tx descriptors must be in the same 4G segment of
949 	 * memory.  So must Rx descriptors.  We simplify by allocating
950 	 * both sets within the same 4G segment.
951 	 */
952 	if ((error = bus_dmamem_alloc(sc->sc_dmat,
953 				      sizeof(struct wm_control_data),
954 				      PAGE_SIZE, (bus_size_t) 0x100000000ULL,
955 				      &seg, 1, &rseg, 0)) != 0) {
956 		aprint_error(
957 		    "%s: unable to allocate control data, error = %d\n",
958 		    sc->sc_dev.dv_xname, error);
959 		goto fail_0;
960 	}
961 
962 	if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
963 				    sizeof(struct wm_control_data),
964 				    (caddr_t *)&sc->sc_control_data, 0)) != 0) {
965 		aprint_error("%s: unable to map control data, error = %d\n",
966 		    sc->sc_dev.dv_xname, error);
967 		goto fail_1;
968 	}
969 
970 	if ((error = bus_dmamap_create(sc->sc_dmat,
971 				       sizeof(struct wm_control_data), 1,
972 				       sizeof(struct wm_control_data), 0, 0,
973 				       &sc->sc_cddmamap)) != 0) {
974 		aprint_error("%s: unable to create control data DMA map, "
975 		    "error = %d\n", sc->sc_dev.dv_xname, error);
976 		goto fail_2;
977 	}
978 
979 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
980 				     sc->sc_control_data,
981 				     sizeof(struct wm_control_data), NULL,
982 				     0)) != 0) {
983 		aprint_error(
984 		    "%s: unable to load control data DMA map, error = %d\n",
985 		    sc->sc_dev.dv_xname, error);
986 		goto fail_3;
987 	}
988 
989 	/*
990 	 * Create the transmit buffer DMA maps.
991 	 */
992 	for (i = 0; i < WM_TXQUEUELEN; i++) {
993 		if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_MAX_LEN_JUMBO,
994 					       WM_NTXSEGS, MCLBYTES, 0, 0,
995 					  &sc->sc_txsoft[i].txs_dmamap)) != 0) {
996 			aprint_error("%s: unable to create Tx DMA map %d, "
997 			    "error = %d\n", sc->sc_dev.dv_xname, i, error);
998 			goto fail_4;
999 		}
1000 	}
1001 
1002 	/*
1003 	 * Create the receive buffer DMA maps.
1004 	 */
1005 	for (i = 0; i < WM_NRXDESC; i++) {
1006 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1007 					       MCLBYTES, 0, 0,
1008 					  &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1009 			aprint_error("%s: unable to create Rx DMA map %d, "
1010 			    "error = %d\n", sc->sc_dev.dv_xname, i, error);
1011 			goto fail_5;
1012 		}
1013 		sc->sc_rxsoft[i].rxs_mbuf = NULL;
1014 	}
1015 
1016 	/*
1017 	 * Reset the chip to a known state.
1018 	 */
1019 	wm_reset(sc);
1020 
1021 	/*
1022 	 * Get some information about the EEPROM.
1023 	 */
1024 	if (sc->sc_type >= WM_T_82540)
1025 		sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1026 	if (sc->sc_type <= WM_T_82544)
1027 		sc->sc_ee_addrbits = 6;
1028 	else if (sc->sc_type <= WM_T_82546_3) {
1029 		reg = CSR_READ(sc, WMREG_EECD);
1030 		if (reg & EECD_EE_SIZE)
1031 			sc->sc_ee_addrbits = 8;
1032 		else
1033 			sc->sc_ee_addrbits = 6;
1034 	} else if (sc->sc_type <= WM_T_82547_2) {
1035 		reg = CSR_READ(sc, WMREG_EECD);
1036 		if (reg & EECD_EE_TYPE) {
1037 			sc->sc_flags |= WM_F_EEPROM_SPI;
1038 			sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
1039 		} else
1040 			sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6;
1041 	} else {
1042 		/* Assume everything else is SPI. */
1043 		reg = CSR_READ(sc, WMREG_EECD);
1044 		sc->sc_flags |= WM_F_EEPROM_SPI;
1045 		sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
1046 	}
1047 	if (sc->sc_flags & WM_F_EEPROM_SPI)
1048 		eetype = "SPI";
1049 	else
1050 		eetype = "MicroWire";
1051 	aprint_verbose("%s: %u word (%d address bits) %s EEPROM\n",
1052 	    sc->sc_dev.dv_xname, 1U << sc->sc_ee_addrbits,
1053 	    sc->sc_ee_addrbits, eetype);
1054 
1055 	/*
1056 	 * Read the Ethernet address from the EEPROM.
1057 	 */
1058 	if (wm_read_eeprom(sc, EEPROM_OFF_MACADDR,
1059 	    sizeof(myea) / sizeof(myea[0]), myea)) {
1060 		aprint_error("%s: unable to read Ethernet address\n",
1061 		    sc->sc_dev.dv_xname);
1062 		return;
1063 	}
1064 	enaddr[0] = myea[0] & 0xff;
1065 	enaddr[1] = myea[0] >> 8;
1066 	enaddr[2] = myea[1] & 0xff;
1067 	enaddr[3] = myea[1] >> 8;
1068 	enaddr[4] = myea[2] & 0xff;
1069 	enaddr[5] = myea[2] >> 8;
1070 
1071 	/*
1072 	 * Toggle the LSB of the MAC address on the second port
1073 	 * of the i82546.
1074 	 */
1075 	if (sc->sc_type == WM_T_82546) {
1076 		if ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1)
1077 			enaddr[5] ^= 1;
1078 	}
1079 
1080 	aprint_normal("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
1081 	    ether_sprintf(enaddr));
1082 
1083 	/*
1084 	 * Read the config info from the EEPROM, and set up various
1085 	 * bits in the control registers based on their contents.
1086 	 */
1087 	if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) {
1088 		aprint_error("%s: unable to read CFG1 from EEPROM\n",
1089 		    sc->sc_dev.dv_xname);
1090 		return;
1091 	}
1092 	if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) {
1093 		aprint_error("%s: unable to read CFG2 from EEPROM\n",
1094 		    sc->sc_dev.dv_xname);
1095 		return;
1096 	}
1097 	if (sc->sc_type >= WM_T_82544) {
1098 		if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) {
1099 			aprint_error("%s: unable to read SWDPIN from EEPROM\n",
1100 			    sc->sc_dev.dv_xname);
1101 			return;
1102 		}
1103 	}
1104 
1105 	if (cfg1 & EEPROM_CFG1_ILOS)
1106 		sc->sc_ctrl |= CTRL_ILOS;
1107 	if (sc->sc_type >= WM_T_82544) {
1108 		sc->sc_ctrl |=
1109 		    ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
1110 		    CTRL_SWDPIO_SHIFT;
1111 		sc->sc_ctrl |=
1112 		    ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
1113 		    CTRL_SWDPINS_SHIFT;
1114 	} else {
1115 		sc->sc_ctrl |=
1116 		    ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
1117 		    CTRL_SWDPIO_SHIFT;
1118 	}
1119 
1120 #if 0
1121 	if (sc->sc_type >= WM_T_82544) {
1122 		if (cfg1 & EEPROM_CFG1_IPS0)
1123 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
1124 		if (cfg1 & EEPROM_CFG1_IPS1)
1125 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
1126 		sc->sc_ctrl_ext |=
1127 		    ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
1128 		    CTRL_EXT_SWDPIO_SHIFT;
1129 		sc->sc_ctrl_ext |=
1130 		    ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
1131 		    CTRL_EXT_SWDPINS_SHIFT;
1132 	} else {
1133 		sc->sc_ctrl_ext |=
1134 		    ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
1135 		    CTRL_EXT_SWDPIO_SHIFT;
1136 	}
1137 #endif
1138 
1139 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
1140 #if 0
1141 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
1142 #endif
1143 
1144 	/*
1145 	 * Set up some register offsets that are different between
1146 	 * the i82542 and the i82543 and later chips.
1147 	 */
1148 	if (sc->sc_type < WM_T_82543) {
1149 		sc->sc_rdt_reg = WMREG_OLD_RDT0;
1150 		sc->sc_tdt_reg = WMREG_OLD_TDT;
1151 	} else {
1152 		sc->sc_rdt_reg = WMREG_RDT;
1153 		sc->sc_tdt_reg = WMREG_TDT;
1154 	}
1155 
1156 	/*
1157 	 * Determine if we're TBI or GMII mode, and initialize the
1158 	 * media structures accordingly.
1159 	 */
1160 	if (sc->sc_type < WM_T_82543 ||
1161 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
1162 		if (wmp->wmp_flags & WMP_F_1000T)
1163 			aprint_error("%s: WARNING: TBIMODE set on 1000BASE-T "
1164 			    "product!\n", sc->sc_dev.dv_xname);
1165 		wm_tbi_mediainit(sc);
1166 	} else {
1167 		if (wmp->wmp_flags & WMP_F_1000X)
1168 			aprint_error("%s: WARNING: TBIMODE clear on 1000BASE-X "
1169 			    "product!\n", sc->sc_dev.dv_xname);
1170 		wm_gmii_mediainit(sc);
1171 	}
1172 
1173 	ifp = &sc->sc_ethercom.ec_if;
1174 	strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
1175 	ifp->if_softc = sc;
1176 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1177 	ifp->if_ioctl = wm_ioctl;
1178 	ifp->if_start = wm_start;
1179 	ifp->if_watchdog = wm_watchdog;
1180 	ifp->if_init = wm_init;
1181 	ifp->if_stop = wm_stop;
1182 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
1183 	IFQ_SET_READY(&ifp->if_snd);
1184 
1185 	sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1186 
1187 	/*
1188 	 * If we're a i82543 or greater, we can support VLANs.
1189 	 */
1190 	if (sc->sc_type >= WM_T_82543)
1191 		sc->sc_ethercom.ec_capabilities |=
1192 		    ETHERCAP_VLAN_MTU /* XXXJRT | ETHERCAP_VLAN_HWTAGGING */;
1193 
1194 	/*
1195 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
1196 	 * on i82543 and later.
1197 	 */
1198 	if (sc->sc_type >= WM_T_82543)
1199 		ifp->if_capabilities |=
1200 		    IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
1201 
1202 	/*
1203 	 * Attach the interface.
1204 	 */
1205 	if_attach(ifp);
1206 	ether_ifattach(ifp, enaddr);
1207 #if NRND > 0
1208 	rnd_attach_source(&sc->rnd_source, sc->sc_dev.dv_xname,
1209 	    RND_TYPE_NET, 0);
1210 #endif
1211 
1212 #ifdef WM_EVENT_COUNTERS
1213 	/* Attach event counters. */
1214 	evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
1215 	    NULL, sc->sc_dev.dv_xname, "txsstall");
1216 	evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
1217 	    NULL, sc->sc_dev.dv_xname, "txdstall");
1218 	evcnt_attach_dynamic(&sc->sc_ev_txforceintr, EVCNT_TYPE_MISC,
1219 	    NULL, sc->sc_dev.dv_xname, "txforceintr");
1220 	evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
1221 	    NULL, sc->sc_dev.dv_xname, "txdw");
1222 	evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
1223 	    NULL, sc->sc_dev.dv_xname, "txqe");
1224 	evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
1225 	    NULL, sc->sc_dev.dv_xname, "rxintr");
1226 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
1227 	    NULL, sc->sc_dev.dv_xname, "linkintr");
1228 
1229 	evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
1230 	    NULL, sc->sc_dev.dv_xname, "rxipsum");
1231 	evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
1232 	    NULL, sc->sc_dev.dv_xname, "rxtusum");
1233 	evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
1234 	    NULL, sc->sc_dev.dv_xname, "txipsum");
1235 	evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
1236 	    NULL, sc->sc_dev.dv_xname, "txtusum");
1237 
1238 	evcnt_attach_dynamic(&sc->sc_ev_txctx_init, EVCNT_TYPE_MISC,
1239 	    NULL, sc->sc_dev.dv_xname, "txctx init");
1240 	evcnt_attach_dynamic(&sc->sc_ev_txctx_hit, EVCNT_TYPE_MISC,
1241 	    NULL, sc->sc_dev.dv_xname, "txctx hit");
1242 	evcnt_attach_dynamic(&sc->sc_ev_txctx_miss, EVCNT_TYPE_MISC,
1243 	    NULL, sc->sc_dev.dv_xname, "txctx miss");
1244 
1245 	for (i = 0; i < WM_NTXSEGS; i++)
1246 		evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
1247 		    NULL, sc->sc_dev.dv_xname, wm_txseg_evcnt_names[i]);
1248 
1249 	evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
1250 	    NULL, sc->sc_dev.dv_xname, "txdrop");
1251 
1252 	evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
1253 	    NULL, sc->sc_dev.dv_xname, "tu");
1254 
1255 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
1256 	    NULL, sc->sc_dev.dv_xname, "tx_xoff");
1257 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
1258 	    NULL, sc->sc_dev.dv_xname, "tx_xon");
1259 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
1260 	    NULL, sc->sc_dev.dv_xname, "rx_xoff");
1261 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
1262 	    NULL, sc->sc_dev.dv_xname, "rx_xon");
1263 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
1264 	    NULL, sc->sc_dev.dv_xname, "rx_macctl");
1265 #endif /* WM_EVENT_COUNTERS */
1266 
1267 	/*
1268 	 * Make sure the interface is shutdown during reboot.
1269 	 */
1270 	sc->sc_sdhook = shutdownhook_establish(wm_shutdown, sc);
1271 	if (sc->sc_sdhook == NULL)
1272 		aprint_error("%s: WARNING: unable to establish shutdown hook\n",
1273 		    sc->sc_dev.dv_xname);
1274 	return;
1275 
1276 	/*
1277 	 * Free any resources we've allocated during the failed attach
1278 	 * attempt.  Do this in reverse order and fall through.
1279 	 */
1280  fail_5:
1281 	for (i = 0; i < WM_NRXDESC; i++) {
1282 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
1283 			bus_dmamap_destroy(sc->sc_dmat,
1284 			    sc->sc_rxsoft[i].rxs_dmamap);
1285 	}
1286  fail_4:
1287 	for (i = 0; i < WM_TXQUEUELEN; i++) {
1288 		if (sc->sc_txsoft[i].txs_dmamap != NULL)
1289 			bus_dmamap_destroy(sc->sc_dmat,
1290 			    sc->sc_txsoft[i].txs_dmamap);
1291 	}
1292 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
1293  fail_3:
1294 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
1295  fail_2:
1296 	bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
1297 	    sizeof(struct wm_control_data));
1298  fail_1:
1299 	bus_dmamem_free(sc->sc_dmat, &seg, rseg);
1300  fail_0:
1301 	return;
1302 }
1303 
1304 /*
1305  * wm_shutdown:
1306  *
1307  *	Make sure the interface is stopped at reboot time.
1308  */
1309 static void
1310 wm_shutdown(void *arg)
1311 {
1312 	struct wm_softc *sc = arg;
1313 
1314 	wm_stop(&sc->sc_ethercom.ec_if, 1);
1315 }
1316 
1317 /*
1318  * wm_tx_cksum:
1319  *
1320  *	Set up TCP/IP checksumming parameters for the
1321  *	specified packet.
1322  */
1323 static int
1324 wm_tx_cksum(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
1325     uint8_t *fieldsp)
1326 {
1327 	struct mbuf *m0 = txs->txs_mbuf;
1328 	struct livengood_tcpip_ctxdesc *t;
1329 	uint32_t ipcs, tucs;
1330 	struct ip *ip;
1331 	struct ether_header *eh;
1332 	int offset, iphl;
1333 	uint8_t fields = 0;
1334 
1335 	/*
1336 	 * XXX It would be nice if the mbuf pkthdr had offset
1337 	 * fields for the protocol headers.
1338 	 */
1339 
1340 	eh = mtod(m0, struct ether_header *);
1341 	switch (htons(eh->ether_type)) {
1342 	case ETHERTYPE_IP:
1343 		iphl = sizeof(struct ip);
1344 		offset = ETHER_HDR_LEN;
1345 		break;
1346 
1347 	case ETHERTYPE_VLAN:
1348 		iphl = sizeof(struct ip);
1349 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1350 		break;
1351 
1352 	default:
1353 		/*
1354 		 * Don't support this protocol or encapsulation.
1355 		 */
1356 		*fieldsp = 0;
1357 		*cmdp = 0;
1358 		return (0);
1359 	}
1360 
1361 	if (m0->m_len < (offset + iphl)) {
1362 		if ((txs->txs_mbuf = m_pullup(m0, offset + iphl)) == NULL) {
1363 			printf("%s: wm_tx_cksum: mbuf allocation failed, "
1364 			    "packet dropped\n", sc->sc_dev.dv_xname);
1365 			return (ENOMEM);
1366 		}
1367 		m0 = txs->txs_mbuf;
1368 	}
1369 
1370 	ip = (struct ip *) (mtod(m0, caddr_t) + offset);
1371 	iphl = ip->ip_hl << 2;
1372 
1373 	/*
1374 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
1375 	 * offload feature, if we load the context descriptor, we
1376 	 * MUST provide valid values for IPCSS and TUCSS fields.
1377 	 */
1378 
1379 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
1380 		WM_EVCNT_INCR(&sc->sc_ev_txipsum);
1381 		fields |= WTX_IXSM;
1382 		ipcs = WTX_TCPIP_IPCSS(offset) |
1383 		    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1384 		    WTX_TCPIP_IPCSE(offset + iphl - 1);
1385 	} else if (__predict_true(sc->sc_txctx_ipcs != 0xffffffff)) {
1386 		/* Use the cached value. */
1387 		ipcs = sc->sc_txctx_ipcs;
1388 	} else {
1389 		/* Just initialize it to the likely value anyway. */
1390 		ipcs = WTX_TCPIP_IPCSS(offset) |
1391 		    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1392 		    WTX_TCPIP_IPCSE(offset + iphl - 1);
1393 	}
1394 
1395 	offset += iphl;
1396 
1397 	if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
1398 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
1399 		fields |= WTX_TXSM;
1400 		tucs = WTX_TCPIP_TUCSS(offset) |
1401 		    WTX_TCPIP_TUCSO(offset + m0->m_pkthdr.csum_data) |
1402 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
1403 	} else if (__predict_true(sc->sc_txctx_tucs != 0xffffffff)) {
1404 		/* Use the cached value. */
1405 		tucs = sc->sc_txctx_tucs;
1406 	} else {
1407 		/* Just initialize it to a valid TCP context. */
1408 		tucs = WTX_TCPIP_TUCSS(offset) |
1409 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
1410 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
1411 	}
1412 
1413 	if (sc->sc_txctx_ipcs == ipcs &&
1414 	    sc->sc_txctx_tucs == tucs) {
1415 		/* Cached context is fine. */
1416 		WM_EVCNT_INCR(&sc->sc_ev_txctx_hit);
1417 	} else {
1418 		/* Fill in the context descriptor. */
1419 #ifdef WM_EVENT_COUNTERS
1420 		if (sc->sc_txctx_ipcs == 0xffffffff &&
1421 		    sc->sc_txctx_tucs == 0xffffffff)
1422 			WM_EVCNT_INCR(&sc->sc_ev_txctx_init);
1423 		else
1424 			WM_EVCNT_INCR(&sc->sc_ev_txctx_miss);
1425 #endif
1426 		t = (struct livengood_tcpip_ctxdesc *)
1427 		    &sc->sc_txdescs[sc->sc_txnext];
1428 		t->tcpip_ipcs = htole32(ipcs);
1429 		t->tcpip_tucs = htole32(tucs);
1430 		t->tcpip_cmdlen = htole32(WTX_CMD_DEXT | WTX_DTYP_C);
1431 		t->tcpip_seg = 0;
1432 		WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
1433 
1434 		sc->sc_txctx_ipcs = ipcs;
1435 		sc->sc_txctx_tucs = tucs;
1436 
1437 		sc->sc_txnext = WM_NEXTTX(sc->sc_txnext);
1438 		txs->txs_ndesc++;
1439 	}
1440 
1441 	*cmdp = WTX_CMD_DEXT | WTX_DTYP_D;
1442 	*fieldsp = fields;
1443 
1444 	return (0);
1445 }
1446 
1447 /*
1448  * wm_start:		[ifnet interface function]
1449  *
1450  *	Start packet transmission on the interface.
1451  */
1452 static void
1453 wm_start(struct ifnet *ifp)
1454 {
1455 	struct wm_softc *sc = ifp->if_softc;
1456 	struct mbuf *m0;
1457 #if 0 /* XXXJRT */
1458 	struct m_tag *mtag;
1459 #endif
1460 	struct wm_txsoft *txs;
1461 	bus_dmamap_t dmamap;
1462 	int error, nexttx, lasttx = -1, ofree, seg;
1463 	uint32_t cksumcmd;
1464 	uint8_t cksumfields;
1465 
1466 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
1467 		return;
1468 
1469 	/*
1470 	 * Remember the previous number of free descriptors.
1471 	 */
1472 	ofree = sc->sc_txfree;
1473 
1474 	/*
1475 	 * Loop through the send queue, setting up transmit descriptors
1476 	 * until we drain the queue, or use up all available transmit
1477 	 * descriptors.
1478 	 */
1479 	for (;;) {
1480 		/* Grab a packet off the queue. */
1481 		IFQ_POLL(&ifp->if_snd, m0);
1482 		if (m0 == NULL)
1483 			break;
1484 
1485 		DPRINTF(WM_DEBUG_TX,
1486 		    ("%s: TX: have packet to transmit: %p\n",
1487 		    sc->sc_dev.dv_xname, m0));
1488 
1489 		/* Get a work queue entry. */
1490 		if (sc->sc_txsfree < WM_TXQUEUE_GC) {
1491 			wm_txintr(sc);
1492 			if (sc->sc_txsfree == 0) {
1493 				DPRINTF(WM_DEBUG_TX,
1494 				    ("%s: TX: no free job descriptors\n",
1495 					sc->sc_dev.dv_xname));
1496 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
1497 				break;
1498 			}
1499 		}
1500 
1501 		txs = &sc->sc_txsoft[sc->sc_txsnext];
1502 		dmamap = txs->txs_dmamap;
1503 
1504 		/*
1505 		 * Load the DMA map.  If this fails, the packet either
1506 		 * didn't fit in the allotted number of segments, or we
1507 		 * were short on resources.  For the too-many-segments
1508 		 * case, we simply report an error and drop the packet,
1509 		 * since we can't sanely copy a jumbo packet to a single
1510 		 * buffer.
1511 		 */
1512 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
1513 		    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1514 		if (error) {
1515 			if (error == EFBIG) {
1516 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
1517 				printf("%s: Tx packet consumes too many "
1518 				    "DMA segments, dropping...\n",
1519 				    sc->sc_dev.dv_xname);
1520 				IFQ_DEQUEUE(&ifp->if_snd, m0);
1521 				m_freem(m0);
1522 				continue;
1523 			}
1524 			/*
1525 			 * Short on resources, just stop for now.
1526 			 */
1527 			DPRINTF(WM_DEBUG_TX,
1528 			    ("%s: TX: dmamap load failed: %d\n",
1529 			    sc->sc_dev.dv_xname, error));
1530 			break;
1531 		}
1532 
1533 		/*
1534 		 * Ensure we have enough descriptors free to describe
1535 		 * the packet.  Note, we always reserve one descriptor
1536 		 * at the end of the ring due to the semantics of the
1537 		 * TDT register, plus one more in the event we need
1538 		 * to re-load checksum offload context.
1539 		 */
1540 		if (dmamap->dm_nsegs > (sc->sc_txfree - 2)) {
1541 			/*
1542 			 * Not enough free descriptors to transmit this
1543 			 * packet.  We haven't committed anything yet,
1544 			 * so just unload the DMA map, put the packet
1545 			 * pack on the queue, and punt.  Notify the upper
1546 			 * layer that there are no more slots left.
1547 			 */
1548 			DPRINTF(WM_DEBUG_TX,
1549 			    ("%s: TX: need %d descriptors, have %d\n",
1550 			    sc->sc_dev.dv_xname, dmamap->dm_nsegs,
1551 			    sc->sc_txfree - 1));
1552 			ifp->if_flags |= IFF_OACTIVE;
1553 			bus_dmamap_unload(sc->sc_dmat, dmamap);
1554 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
1555 			break;
1556 		}
1557 
1558 		IFQ_DEQUEUE(&ifp->if_snd, m0);
1559 
1560 		/*
1561 		 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
1562 		 */
1563 
1564 		/* Sync the DMA map. */
1565 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
1566 		    BUS_DMASYNC_PREWRITE);
1567 
1568 		DPRINTF(WM_DEBUG_TX,
1569 		    ("%s: TX: packet has %d DMA segments\n",
1570 		    sc->sc_dev.dv_xname, dmamap->dm_nsegs));
1571 
1572 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
1573 
1574 		/*
1575 		 * Store a pointer to the packet so that we can free it
1576 		 * later.
1577 		 *
1578 		 * Initially, we consider the number of descriptors the
1579 		 * packet uses the number of DMA segments.  This may be
1580 		 * incremented by 1 if we do checksum offload (a descriptor
1581 		 * is used to set the checksum context).
1582 		 */
1583 		txs->txs_mbuf = m0;
1584 		txs->txs_firstdesc = sc->sc_txnext;
1585 		txs->txs_ndesc = dmamap->dm_nsegs;
1586 
1587 		/*
1588 		 * Set up checksum offload parameters for
1589 		 * this packet.
1590 		 */
1591 		if (m0->m_pkthdr.csum_flags &
1592 		    (M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4)) {
1593 			if (wm_tx_cksum(sc, txs, &cksumcmd,
1594 					&cksumfields) != 0) {
1595 				/* Error message already displayed. */
1596 				bus_dmamap_unload(sc->sc_dmat, dmamap);
1597 				continue;
1598 			}
1599 		} else {
1600 			cksumcmd = 0;
1601 			cksumfields = 0;
1602 		}
1603 
1604 		cksumcmd |= WTX_CMD_IDE;
1605 
1606 		/*
1607 		 * Initialize the transmit descriptor.
1608 		 */
1609 		for (nexttx = sc->sc_txnext, seg = 0;
1610 		     seg < dmamap->dm_nsegs;
1611 		     seg++, nexttx = WM_NEXTTX(nexttx)) {
1612 			wm_set_dma_addr(&sc->sc_txdescs[nexttx].wtx_addr,
1613 			    dmamap->dm_segs[seg].ds_addr);
1614 			sc->sc_txdescs[nexttx].wtx_cmdlen =
1615 			    htole32(cksumcmd | dmamap->dm_segs[seg].ds_len);
1616 			sc->sc_txdescs[nexttx].wtx_fields.wtxu_status = 0;
1617 			sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
1618 			    cksumfields;
1619 			sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
1620 			lasttx = nexttx;
1621 
1622 			DPRINTF(WM_DEBUG_TX,
1623 			    ("%s: TX: desc %d: low 0x%08x, len 0x%04x\n",
1624 			    sc->sc_dev.dv_xname, nexttx,
1625 			    le32toh(dmamap->dm_segs[seg].ds_addr),
1626 			    le32toh(dmamap->dm_segs[seg].ds_len)));
1627 		}
1628 
1629 		KASSERT(lasttx != -1);
1630 
1631 		/*
1632 		 * Set up the command byte on the last descriptor of
1633 		 * the packet.  If we're in the interrupt delay window,
1634 		 * delay the interrupt.
1635 		 */
1636 		sc->sc_txdescs[lasttx].wtx_cmdlen |=
1637 		    htole32(WTX_CMD_EOP | WTX_CMD_IFCS | WTX_CMD_RS);
1638 
1639 #if 0 /* XXXJRT */
1640 		/*
1641 		 * If VLANs are enabled and the packet has a VLAN tag, set
1642 		 * up the descriptor to encapsulate the packet for us.
1643 		 *
1644 		 * This is only valid on the last descriptor of the packet.
1645 		 */
1646 		if (sc->sc_ethercom.ec_nvlans != 0 &&
1647 		    (mtag = m_tag_find(m0, PACKET_TAG_VLAN, NULL)) != NULL) {
1648 			sc->sc_txdescs[lasttx].wtx_cmdlen |=
1649 			    htole32(WTX_CMD_VLE);
1650 			sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
1651 			    = htole16(*(u_int *)(mtag + 1) & 0xffff);
1652 		}
1653 #endif /* XXXJRT */
1654 
1655 		txs->txs_lastdesc = lasttx;
1656 
1657 		DPRINTF(WM_DEBUG_TX,
1658 		    ("%s: TX: desc %d: cmdlen 0x%08x\n", sc->sc_dev.dv_xname,
1659 		    lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
1660 
1661 		/* Sync the descriptors we're using. */
1662 		WM_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
1663 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1664 
1665 		/* Give the packet to the chip. */
1666 		CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
1667 
1668 		DPRINTF(WM_DEBUG_TX,
1669 		    ("%s: TX: TDT -> %d\n", sc->sc_dev.dv_xname, nexttx));
1670 
1671 		DPRINTF(WM_DEBUG_TX,
1672 		    ("%s: TX: finished transmitting packet, job %d\n",
1673 		    sc->sc_dev.dv_xname, sc->sc_txsnext));
1674 
1675 		/* Advance the tx pointer. */
1676 		sc->sc_txfree -= txs->txs_ndesc;
1677 		sc->sc_txnext = nexttx;
1678 
1679 		sc->sc_txsfree--;
1680 		sc->sc_txsnext = WM_NEXTTXS(sc->sc_txsnext);
1681 
1682 #if NBPFILTER > 0
1683 		/* Pass the packet to any BPF listeners. */
1684 		if (ifp->if_bpf)
1685 			bpf_mtap(ifp->if_bpf, m0);
1686 #endif /* NBPFILTER > 0 */
1687 	}
1688 
1689 	if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
1690 		/* No more slots; notify upper layer. */
1691 		ifp->if_flags |= IFF_OACTIVE;
1692 	}
1693 
1694 	if (sc->sc_txfree != ofree) {
1695 		/* Set a watchdog timer in case the chip flakes out. */
1696 		ifp->if_timer = 5;
1697 	}
1698 }
1699 
1700 /*
1701  * wm_watchdog:		[ifnet interface function]
1702  *
1703  *	Watchdog timer handler.
1704  */
1705 static void
1706 wm_watchdog(struct ifnet *ifp)
1707 {
1708 	struct wm_softc *sc = ifp->if_softc;
1709 
1710 	/*
1711 	 * Since we're using delayed interrupts, sweep up
1712 	 * before we report an error.
1713 	 */
1714 	wm_txintr(sc);
1715 
1716 	if (sc->sc_txfree != WM_NTXDESC) {
1717 		printf("%s: device timeout (txfree %d txsfree %d txnext %d)\n",
1718 		    sc->sc_dev.dv_xname, sc->sc_txfree, sc->sc_txsfree,
1719 		    sc->sc_txnext);
1720 		ifp->if_oerrors++;
1721 
1722 		/* Reset the interface. */
1723 		(void) wm_init(ifp);
1724 	}
1725 
1726 	/* Try to get more packets going. */
1727 	wm_start(ifp);
1728 }
1729 
1730 /*
1731  * wm_ioctl:		[ifnet interface function]
1732  *
1733  *	Handle control requests from the operator.
1734  */
1735 static int
1736 wm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1737 {
1738 	struct wm_softc *sc = ifp->if_softc;
1739 	struct ifreq *ifr = (struct ifreq *) data;
1740 	int s, error;
1741 
1742 	s = splnet();
1743 
1744 	switch (cmd) {
1745 	case SIOCSIFMEDIA:
1746 	case SIOCGIFMEDIA:
1747 		/* Flow control requires full-duplex mode. */
1748 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
1749 		    (ifr->ifr_media & IFM_FDX) == 0)
1750 			ifr->ifr_media &= ~IFM_ETH_FMASK;
1751 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
1752 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
1753 				/* We can do both TXPAUSE and RXPAUSE. */
1754 				ifr->ifr_media |=
1755 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
1756 			}
1757 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
1758 		}
1759 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
1760 		break;
1761 	default:
1762 		error = ether_ioctl(ifp, cmd, data);
1763 		if (error == ENETRESET) {
1764 			/*
1765 			 * Multicast list has changed; set the hardware filter
1766 			 * accordingly.
1767 			 */
1768 			wm_set_filter(sc);
1769 			error = 0;
1770 		}
1771 		break;
1772 	}
1773 
1774 	/* Try to get more packets going. */
1775 	wm_start(ifp);
1776 
1777 	splx(s);
1778 	return (error);
1779 }
1780 
1781 /*
1782  * wm_intr:
1783  *
1784  *	Interrupt service routine.
1785  */
1786 static int
1787 wm_intr(void *arg)
1788 {
1789 	struct wm_softc *sc = arg;
1790 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1791 	uint32_t icr;
1792 	int wantinit, handled = 0;
1793 
1794 	for (wantinit = 0; wantinit == 0;) {
1795 		icr = CSR_READ(sc, WMREG_ICR);
1796 		if ((icr & sc->sc_icr) == 0)
1797 			break;
1798 
1799 #if 0 /*NRND > 0*/
1800 		if (RND_ENABLED(&sc->rnd_source))
1801 			rnd_add_uint32(&sc->rnd_source, icr);
1802 #endif
1803 
1804 		handled = 1;
1805 
1806 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
1807 		if (icr & (ICR_RXDMT0|ICR_RXT0)) {
1808 			DPRINTF(WM_DEBUG_RX,
1809 			    ("%s: RX: got Rx intr 0x%08x\n",
1810 			    sc->sc_dev.dv_xname,
1811 			    icr & (ICR_RXDMT0|ICR_RXT0)));
1812 			WM_EVCNT_INCR(&sc->sc_ev_rxintr);
1813 		}
1814 #endif
1815 		wm_rxintr(sc);
1816 
1817 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
1818 		if (icr & ICR_TXDW) {
1819 			DPRINTF(WM_DEBUG_TX,
1820 			    ("%s: TX: got TXDW interrupt\n",
1821 			    sc->sc_dev.dv_xname));
1822 			WM_EVCNT_INCR(&sc->sc_ev_txdw);
1823 		}
1824 #endif
1825 		wm_txintr(sc);
1826 
1827 		if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
1828 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
1829 			wm_linkintr(sc, icr);
1830 		}
1831 
1832 		if (icr & ICR_RXO) {
1833 			printf("%s: Receive overrun\n", sc->sc_dev.dv_xname);
1834 			wantinit = 1;
1835 		}
1836 	}
1837 
1838 	if (handled) {
1839 		if (wantinit)
1840 			wm_init(ifp);
1841 
1842 		/* Try to get more packets going. */
1843 		wm_start(ifp);
1844 	}
1845 
1846 	return (handled);
1847 }
1848 
1849 /*
1850  * wm_txintr:
1851  *
1852  *	Helper; handle transmit interrupts.
1853  */
1854 static void
1855 wm_txintr(struct wm_softc *sc)
1856 {
1857 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1858 	struct wm_txsoft *txs;
1859 	uint8_t status;
1860 	int i;
1861 
1862 	ifp->if_flags &= ~IFF_OACTIVE;
1863 
1864 	/*
1865 	 * Go through the Tx list and free mbufs for those
1866 	 * frames which have been transmitted.
1867 	 */
1868 	for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN;
1869 	     i = WM_NEXTTXS(i), sc->sc_txsfree++) {
1870 		txs = &sc->sc_txsoft[i];
1871 
1872 		DPRINTF(WM_DEBUG_TX,
1873 		    ("%s: TX: checking job %d\n", sc->sc_dev.dv_xname, i));
1874 
1875 		WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_dmamap->dm_nsegs,
1876 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1877 
1878 		status =
1879 		    sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
1880 		if ((status & WTX_ST_DD) == 0) {
1881 			WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
1882 			    BUS_DMASYNC_PREREAD);
1883 			break;
1884 		}
1885 
1886 		DPRINTF(WM_DEBUG_TX,
1887 		    ("%s: TX: job %d done: descs %d..%d\n",
1888 		    sc->sc_dev.dv_xname, i, txs->txs_firstdesc,
1889 		    txs->txs_lastdesc));
1890 
1891 		/*
1892 		 * XXX We should probably be using the statistics
1893 		 * XXX registers, but I don't know if they exist
1894 		 * XXX on chips before the i82544.
1895 		 */
1896 
1897 #ifdef WM_EVENT_COUNTERS
1898 		if (status & WTX_ST_TU)
1899 			WM_EVCNT_INCR(&sc->sc_ev_tu);
1900 #endif /* WM_EVENT_COUNTERS */
1901 
1902 		if (status & (WTX_ST_EC|WTX_ST_LC)) {
1903 			ifp->if_oerrors++;
1904 			if (status & WTX_ST_LC)
1905 				printf("%s: late collision\n",
1906 				    sc->sc_dev.dv_xname);
1907 			else if (status & WTX_ST_EC) {
1908 				ifp->if_collisions += 16;
1909 				printf("%s: excessive collisions\n",
1910 				    sc->sc_dev.dv_xname);
1911 			}
1912 		} else
1913 			ifp->if_opackets++;
1914 
1915 		sc->sc_txfree += txs->txs_ndesc;
1916 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
1917 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1918 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1919 		m_freem(txs->txs_mbuf);
1920 		txs->txs_mbuf = NULL;
1921 	}
1922 
1923 	/* Update the dirty transmit buffer pointer. */
1924 	sc->sc_txsdirty = i;
1925 	DPRINTF(WM_DEBUG_TX,
1926 	    ("%s: TX: txsdirty -> %d\n", sc->sc_dev.dv_xname, i));
1927 
1928 	/*
1929 	 * If there are no more pending transmissions, cancel the watchdog
1930 	 * timer.
1931 	 */
1932 	if (sc->sc_txsfree == WM_TXQUEUELEN)
1933 		ifp->if_timer = 0;
1934 }
1935 
1936 /*
1937  * wm_rxintr:
1938  *
1939  *	Helper; handle receive interrupts.
1940  */
1941 static void
1942 wm_rxintr(struct wm_softc *sc)
1943 {
1944 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1945 	struct wm_rxsoft *rxs;
1946 	struct mbuf *m;
1947 	int i, len;
1948 	uint8_t status, errors;
1949 
1950 	for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
1951 		rxs = &sc->sc_rxsoft[i];
1952 
1953 		DPRINTF(WM_DEBUG_RX,
1954 		    ("%s: RX: checking descriptor %d\n",
1955 		    sc->sc_dev.dv_xname, i));
1956 
1957 		WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1958 
1959 		status = sc->sc_rxdescs[i].wrx_status;
1960 		errors = sc->sc_rxdescs[i].wrx_errors;
1961 		len = le16toh(sc->sc_rxdescs[i].wrx_len);
1962 
1963 		if ((status & WRX_ST_DD) == 0) {
1964 			/*
1965 			 * We have processed all of the receive descriptors.
1966 			 */
1967 			WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
1968 			break;
1969 		}
1970 
1971 		if (__predict_false(sc->sc_rxdiscard)) {
1972 			DPRINTF(WM_DEBUG_RX,
1973 			    ("%s: RX: discarding contents of descriptor %d\n",
1974 			    sc->sc_dev.dv_xname, i));
1975 			WM_INIT_RXDESC(sc, i);
1976 			if (status & WRX_ST_EOP) {
1977 				/* Reset our state. */
1978 				DPRINTF(WM_DEBUG_RX,
1979 				    ("%s: RX: resetting rxdiscard -> 0\n",
1980 				    sc->sc_dev.dv_xname));
1981 				sc->sc_rxdiscard = 0;
1982 			}
1983 			continue;
1984 		}
1985 
1986 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1987 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1988 
1989 		m = rxs->rxs_mbuf;
1990 
1991 		/*
1992 		 * Add a new receive buffer to the ring.
1993 		 */
1994 		if (wm_add_rxbuf(sc, i) != 0) {
1995 			/*
1996 			 * Failed, throw away what we've done so
1997 			 * far, and discard the rest of the packet.
1998 			 */
1999 			ifp->if_ierrors++;
2000 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2001 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2002 			WM_INIT_RXDESC(sc, i);
2003 			if ((status & WRX_ST_EOP) == 0)
2004 				sc->sc_rxdiscard = 1;
2005 			if (sc->sc_rxhead != NULL)
2006 				m_freem(sc->sc_rxhead);
2007 			WM_RXCHAIN_RESET(sc);
2008 			DPRINTF(WM_DEBUG_RX,
2009 			    ("%s: RX: Rx buffer allocation failed, "
2010 			    "dropping packet%s\n", sc->sc_dev.dv_xname,
2011 			    sc->sc_rxdiscard ? " (discard)" : ""));
2012 			continue;
2013 		}
2014 
2015 		WM_RXCHAIN_LINK(sc, m);
2016 
2017 		m->m_len = len;
2018 
2019 		DPRINTF(WM_DEBUG_RX,
2020 		    ("%s: RX: buffer at %p len %d\n",
2021 		    sc->sc_dev.dv_xname, m->m_data, len));
2022 
2023 		/*
2024 		 * If this is not the end of the packet, keep
2025 		 * looking.
2026 		 */
2027 		if ((status & WRX_ST_EOP) == 0) {
2028 			sc->sc_rxlen += len;
2029 			DPRINTF(WM_DEBUG_RX,
2030 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
2031 			    sc->sc_dev.dv_xname, sc->sc_rxlen));
2032 			continue;
2033 		}
2034 
2035 		/*
2036 		 * Okay, we have the entire packet now...
2037 		 */
2038 		*sc->sc_rxtailp = NULL;
2039 		m = sc->sc_rxhead;
2040 		len += sc->sc_rxlen;
2041 
2042 		WM_RXCHAIN_RESET(sc);
2043 
2044 		DPRINTF(WM_DEBUG_RX,
2045 		    ("%s: RX: have entire packet, len -> %d\n",
2046 		    sc->sc_dev.dv_xname, len));
2047 
2048 		/*
2049 		 * If an error occurred, update stats and drop the packet.
2050 		 */
2051 		if (errors &
2052 		     (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
2053 			ifp->if_ierrors++;
2054 			if (errors & WRX_ER_SE)
2055 				printf("%s: symbol error\n",
2056 				    sc->sc_dev.dv_xname);
2057 			else if (errors & WRX_ER_SEQ)
2058 				printf("%s: receive sequence error\n",
2059 				    sc->sc_dev.dv_xname);
2060 			else if (errors & WRX_ER_CE)
2061 				printf("%s: CRC error\n",
2062 				    sc->sc_dev.dv_xname);
2063 			m_freem(m);
2064 			continue;
2065 		}
2066 
2067 		/*
2068 		 * No errors.  Receive the packet.
2069 		 *
2070 		 * Note, we have configured the chip to include the
2071 		 * CRC with every packet.
2072 		 */
2073 		m->m_flags |= M_HASFCS;
2074 		m->m_pkthdr.rcvif = ifp;
2075 		m->m_pkthdr.len = len;
2076 
2077 #if 0 /* XXXJRT */
2078 		/*
2079 		 * If VLANs are enabled, VLAN packets have been unwrapped
2080 		 * for us.  Associate the tag with the packet.
2081 		 */
2082 		if (sc->sc_ethercom.ec_nvlans != 0 &&
2083 		    (status & WRX_ST_VP) != 0) {
2084 			struct m_tag *vtag;
2085 
2086 			vtag = m_tag_get(PACKET_TAG_VLAN, sizeof(u_int),
2087 			    M_NOWAIT);
2088 			if (vtag == NULL) {
2089 				ifp->if_ierrors++;
2090 				printf("%s: unable to allocate VLAN tag\n",
2091 				    sc->sc_dev.dv_xname);
2092 				m_freem(m);
2093 				continue;
2094 			}
2095 
2096 			*(u_int *)(vtag + 1) =
2097 			    le16toh(sc->sc_rxdescs[i].wrx_special);
2098 		}
2099 #endif /* XXXJRT */
2100 
2101 		/*
2102 		 * Set up checksum info for this packet.
2103 		 */
2104 		if (status & WRX_ST_IPCS) {
2105 			WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
2106 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
2107 			if (errors & WRX_ER_IPE)
2108 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
2109 		}
2110 		if (status & WRX_ST_TCPCS) {
2111 			/*
2112 			 * Note: we don't know if this was TCP or UDP,
2113 			 * so we just set both bits, and expect the
2114 			 * upper layers to deal.
2115 			 */
2116 			WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
2117 			m->m_pkthdr.csum_flags |= M_CSUM_TCPv4|M_CSUM_UDPv4;
2118 			if (errors & WRX_ER_TCPE)
2119 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
2120 		}
2121 
2122 		ifp->if_ipackets++;
2123 
2124 #if NBPFILTER > 0
2125 		/* Pass this up to any BPF listeners. */
2126 		if (ifp->if_bpf)
2127 			bpf_mtap(ifp->if_bpf, m);
2128 #endif /* NBPFILTER > 0 */
2129 
2130 		/* Pass it on. */
2131 		(*ifp->if_input)(ifp, m);
2132 	}
2133 
2134 	/* Update the receive pointer. */
2135 	sc->sc_rxptr = i;
2136 
2137 	DPRINTF(WM_DEBUG_RX,
2138 	    ("%s: RX: rxptr -> %d\n", sc->sc_dev.dv_xname, i));
2139 }
2140 
2141 /*
2142  * wm_linkintr:
2143  *
2144  *	Helper; handle link interrupts.
2145  */
2146 static void
2147 wm_linkintr(struct wm_softc *sc, uint32_t icr)
2148 {
2149 	uint32_t status;
2150 
2151 	/*
2152 	 * If we get a link status interrupt on a 1000BASE-T
2153 	 * device, just fall into the normal MII tick path.
2154 	 */
2155 	if (sc->sc_flags & WM_F_HAS_MII) {
2156 		if (icr & ICR_LSC) {
2157 			DPRINTF(WM_DEBUG_LINK,
2158 			    ("%s: LINK: LSC -> mii_tick\n",
2159 			    sc->sc_dev.dv_xname));
2160 			mii_tick(&sc->sc_mii);
2161 		} else if (icr & ICR_RXSEQ) {
2162 			DPRINTF(WM_DEBUG_LINK,
2163 			    ("%s: LINK Receive sequence error\n",
2164 			    sc->sc_dev.dv_xname));
2165 		}
2166 		return;
2167 	}
2168 
2169 	/*
2170 	 * If we are now receiving /C/, check for link again in
2171 	 * a couple of link clock ticks.
2172 	 */
2173 	if (icr & ICR_RXCFG) {
2174 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
2175 		    sc->sc_dev.dv_xname));
2176 		sc->sc_tbi_anstate = 2;
2177 	}
2178 
2179 	if (icr & ICR_LSC) {
2180 		status = CSR_READ(sc, WMREG_STATUS);
2181 		if (status & STATUS_LU) {
2182 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
2183 			    sc->sc_dev.dv_xname,
2184 			    (status & STATUS_FD) ? "FDX" : "HDX"));
2185 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2186 			sc->sc_fcrtl &= ~FCRTL_XONE;
2187 			if (status & STATUS_FD)
2188 				sc->sc_tctl |=
2189 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2190 			else
2191 				sc->sc_tctl |=
2192 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2193 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
2194 				sc->sc_fcrtl |= FCRTL_XONE;
2195 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2196 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
2197 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
2198 				      sc->sc_fcrtl);
2199 			sc->sc_tbi_linkup = 1;
2200 		} else {
2201 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
2202 			    sc->sc_dev.dv_xname));
2203 			sc->sc_tbi_linkup = 0;
2204 		}
2205 		sc->sc_tbi_anstate = 2;
2206 		wm_tbi_set_linkled(sc);
2207 	} else if (icr & ICR_RXSEQ) {
2208 		DPRINTF(WM_DEBUG_LINK,
2209 		    ("%s: LINK: Receive sequence error\n",
2210 		    sc->sc_dev.dv_xname));
2211 	}
2212 }
2213 
2214 /*
2215  * wm_tick:
2216  *
2217  *	One second timer, used to check link status, sweep up
2218  *	completed transmit jobs, etc.
2219  */
2220 static void
2221 wm_tick(void *arg)
2222 {
2223 	struct wm_softc *sc = arg;
2224 	int s;
2225 
2226 	s = splnet();
2227 
2228 	if (sc->sc_type >= WM_T_82542_2_1) {
2229 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
2230 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
2231 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
2232 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
2233 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
2234 	}
2235 
2236 	if (sc->sc_flags & WM_F_HAS_MII)
2237 		mii_tick(&sc->sc_mii);
2238 	else
2239 		wm_tbi_check_link(sc);
2240 
2241 	splx(s);
2242 
2243 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2244 }
2245 
2246 /*
2247  * wm_reset:
2248  *
2249  *	Reset the i82542 chip.
2250  */
2251 static void
2252 wm_reset(struct wm_softc *sc)
2253 {
2254 	int i;
2255 
2256 	switch (sc->sc_type) {
2257 	case WM_T_82544:
2258 	case WM_T_82540:
2259 	case WM_T_82545:
2260 	case WM_T_82546:
2261 	case WM_T_82541:
2262 	case WM_T_82541_2:
2263 		/*
2264 		 * These chips have a problem with the memory-mapped
2265 		 * write cycle when issuing the reset, so use I/O-mapped
2266 		 * access, if possible.
2267 		 */
2268 		if (sc->sc_flags & WM_F_IOH_VALID)
2269 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
2270 		else
2271 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
2272 		break;
2273 
2274 	case WM_T_82545_3:
2275 	case WM_T_82546_3:
2276 		/* Use the shadow control register on these chips. */
2277 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
2278 		break;
2279 
2280 	default:
2281 		/* Everything else can safely use the documented method. */
2282 		CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
2283 		break;
2284 	}
2285 	delay(10000);
2286 
2287 	for (i = 0; i < 1000; i++) {
2288 		if ((CSR_READ(sc, WMREG_CTRL) & CTRL_RST) == 0)
2289 			return;
2290 		delay(20);
2291 	}
2292 
2293 	if (CSR_READ(sc, WMREG_CTRL) & CTRL_RST)
2294 		printf("%s: WARNING: reset failed to complete\n",
2295 		    sc->sc_dev.dv_xname);
2296 }
2297 
2298 /*
2299  * wm_init:		[ifnet interface function]
2300  *
2301  *	Initialize the interface.  Must be called at splnet().
2302  */
2303 static int
2304 wm_init(struct ifnet *ifp)
2305 {
2306 	struct wm_softc *sc = ifp->if_softc;
2307 	struct wm_rxsoft *rxs;
2308 	int i, error = 0;
2309 	uint32_t reg;
2310 
2311 	/*
2312 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
2313 	 * There is a small but measurable benefit to avoiding the adjusment
2314 	 * of the descriptor so that the headers are aligned, for normal mtu,
2315 	 * on such platforms.  One possibility is that the DMA itself is
2316 	 * slightly more efficient if the front of the entire packet (instead
2317 	 * of the front of the headers) is aligned.
2318 	 *
2319 	 * Note we must always set align_tweak to 0 if we are using
2320 	 * jumbo frames.
2321 	 */
2322 #ifdef __NO_STRICT_ALIGNMENT
2323 	sc->sc_align_tweak = 0;
2324 #else
2325 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
2326 		sc->sc_align_tweak = 0;
2327 	else
2328 		sc->sc_align_tweak = 2;
2329 #endif /* __NO_STRICT_ALIGNMENT */
2330 
2331 	/* Cancel any pending I/O. */
2332 	wm_stop(ifp, 0);
2333 
2334 	/* Reset the chip to a known state. */
2335 	wm_reset(sc);
2336 
2337 	/* Initialize the transmit descriptor ring. */
2338 	memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
2339 	WM_CDTXSYNC(sc, 0, WM_NTXDESC,
2340 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2341 	sc->sc_txfree = WM_NTXDESC;
2342 	sc->sc_txnext = 0;
2343 
2344 	sc->sc_txctx_ipcs = 0xffffffff;
2345 	sc->sc_txctx_tucs = 0xffffffff;
2346 
2347 	if (sc->sc_type < WM_T_82543) {
2348 		CSR_WRITE(sc, WMREG_OLD_TBDAH, WM_CDTXADDR_HI(sc, 0));
2349 		CSR_WRITE(sc, WMREG_OLD_TBDAL, WM_CDTXADDR_LO(sc, 0));
2350 		CSR_WRITE(sc, WMREG_OLD_TDLEN, sizeof(sc->sc_txdescs));
2351 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
2352 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
2353 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
2354 	} else {
2355 		CSR_WRITE(sc, WMREG_TBDAH, WM_CDTXADDR_HI(sc, 0));
2356 		CSR_WRITE(sc, WMREG_TBDAL, WM_CDTXADDR_LO(sc, 0));
2357 		CSR_WRITE(sc, WMREG_TDLEN, sizeof(sc->sc_txdescs));
2358 		CSR_WRITE(sc, WMREG_TDH, 0);
2359 		CSR_WRITE(sc, WMREG_TDT, 0);
2360 		CSR_WRITE(sc, WMREG_TIDV, 128);
2361 
2362 		CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
2363 		    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
2364 		CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
2365 		    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
2366 	}
2367 	CSR_WRITE(sc, WMREG_TQSA_LO, 0);
2368 	CSR_WRITE(sc, WMREG_TQSA_HI, 0);
2369 
2370 	/* Initialize the transmit job descriptors. */
2371 	for (i = 0; i < WM_TXQUEUELEN; i++)
2372 		sc->sc_txsoft[i].txs_mbuf = NULL;
2373 	sc->sc_txsfree = WM_TXQUEUELEN;
2374 	sc->sc_txsnext = 0;
2375 	sc->sc_txsdirty = 0;
2376 
2377 	/*
2378 	 * Initialize the receive descriptor and receive job
2379 	 * descriptor rings.
2380 	 */
2381 	if (sc->sc_type < WM_T_82543) {
2382 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
2383 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
2384 		CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
2385 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
2386 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
2387 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
2388 
2389 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
2390 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
2391 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
2392 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
2393 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
2394 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
2395 	} else {
2396 		CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
2397 		CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
2398 		CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
2399 		CSR_WRITE(sc, WMREG_RDH, 0);
2400 		CSR_WRITE(sc, WMREG_RDT, 0);
2401 		CSR_WRITE(sc, WMREG_RDTR, 28 | RDTR_FPD);
2402 	}
2403 	for (i = 0; i < WM_NRXDESC; i++) {
2404 		rxs = &sc->sc_rxsoft[i];
2405 		if (rxs->rxs_mbuf == NULL) {
2406 			if ((error = wm_add_rxbuf(sc, i)) != 0) {
2407 				printf("%s: unable to allocate or map rx "
2408 				    "buffer %d, error = %d\n",
2409 				    sc->sc_dev.dv_xname, i, error);
2410 				/*
2411 				 * XXX Should attempt to run with fewer receive
2412 				 * XXX buffers instead of just failing.
2413 				 */
2414 				wm_rxdrain(sc);
2415 				goto out;
2416 			}
2417 		} else
2418 			WM_INIT_RXDESC(sc, i);
2419 	}
2420 	sc->sc_rxptr = 0;
2421 	sc->sc_rxdiscard = 0;
2422 	WM_RXCHAIN_RESET(sc);
2423 
2424 	/*
2425 	 * Clear out the VLAN table -- we don't use it (yet).
2426 	 */
2427 	CSR_WRITE(sc, WMREG_VET, 0);
2428 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
2429 		CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
2430 
2431 	/*
2432 	 * Set up flow-control parameters.
2433 	 *
2434 	 * XXX Values could probably stand some tuning.
2435 	 */
2436 	CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
2437 	CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
2438 	CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
2439 
2440 	sc->sc_fcrtl = FCRTL_DFLT;
2441 	if (sc->sc_type < WM_T_82543) {
2442 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
2443 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
2444 	} else {
2445 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
2446 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
2447 	}
2448 	CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
2449 
2450 #if 0 /* XXXJRT */
2451 	/* Deal with VLAN enables. */
2452 	if (sc->sc_ethercom.ec_nvlans != 0)
2453 		sc->sc_ctrl |= CTRL_VME;
2454 	else
2455 #endif /* XXXJRT */
2456 		sc->sc_ctrl &= ~CTRL_VME;
2457 
2458 	/* Write the control registers. */
2459 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2460 #if 0
2461 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2462 #endif
2463 
2464 	/*
2465 	 * Set up checksum offload parameters.
2466 	 */
2467 	reg = CSR_READ(sc, WMREG_RXCSUM);
2468 	if (ifp->if_capenable & IFCAP_CSUM_IPv4)
2469 		reg |= RXCSUM_IPOFL;
2470 	else
2471 		reg &= ~RXCSUM_IPOFL;
2472 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4))
2473 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
2474 	else {
2475 		reg &= ~RXCSUM_TUOFL;
2476 		if ((ifp->if_capenable & IFCAP_CSUM_IPv4) == 0)
2477 			reg &= ~RXCSUM_IPOFL;
2478 	}
2479 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
2480 
2481 	/*
2482 	 * Set up the interrupt registers.
2483 	 */
2484 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
2485 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
2486 	    ICR_RXO | ICR_RXT0;
2487 	if ((sc->sc_flags & WM_F_HAS_MII) == 0)
2488 		sc->sc_icr |= ICR_RXCFG;
2489 	CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
2490 
2491 	/* Set up the inter-packet gap. */
2492 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
2493 
2494 #if 0 /* XXXJRT */
2495 	/* Set the VLAN ethernetype. */
2496 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
2497 #endif
2498 
2499 	/*
2500 	 * Set up the transmit control register; we start out with
2501 	 * a collision distance suitable for FDX, but update it whe
2502 	 * we resolve the media type.
2503 	 */
2504 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_CT(TX_COLLISION_THRESHOLD) |
2505 	    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2506 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2507 
2508 	/* Set the media. */
2509 	(void) (*sc->sc_mii.mii_media.ifm_change)(ifp);
2510 
2511 	/*
2512 	 * Set up the receive control register; we actually program
2513 	 * the register when we set the receive filter.  Use multicast
2514 	 * address offset type 0.
2515 	 *
2516 	 * Only the i82544 has the ability to strip the incoming
2517 	 * CRC, so we don't enable that feature.
2518 	 */
2519 	sc->sc_mchash_type = 0;
2520 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_LPE |
2521 	    RCTL_DPF | RCTL_MO(sc->sc_mchash_type);
2522 
2523 	if(MCLBYTES == 2048) {
2524 		sc->sc_rctl |= RCTL_2k;
2525 	} else {
2526 		if(sc->sc_type >= WM_T_82543) {
2527 			switch(MCLBYTES) {
2528 			case 4096:
2529 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
2530 				break;
2531 			case 8192:
2532 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
2533 				break;
2534 			case 16384:
2535 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
2536 				break;
2537 			default:
2538 				panic("wm_init: MCLBYTES %d unsupported",
2539 				    MCLBYTES);
2540 				break;
2541 			}
2542 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
2543 	}
2544 
2545 	/* Set the receive filter. */
2546 	wm_set_filter(sc);
2547 
2548 	/* Start the one second link check clock. */
2549 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2550 
2551 	/* ...all done! */
2552 	ifp->if_flags |= IFF_RUNNING;
2553 	ifp->if_flags &= ~IFF_OACTIVE;
2554 
2555  out:
2556 	if (error)
2557 		printf("%s: interface not running\n", sc->sc_dev.dv_xname);
2558 	return (error);
2559 }
2560 
2561 /*
2562  * wm_rxdrain:
2563  *
2564  *	Drain the receive queue.
2565  */
2566 static void
2567 wm_rxdrain(struct wm_softc *sc)
2568 {
2569 	struct wm_rxsoft *rxs;
2570 	int i;
2571 
2572 	for (i = 0; i < WM_NRXDESC; i++) {
2573 		rxs = &sc->sc_rxsoft[i];
2574 		if (rxs->rxs_mbuf != NULL) {
2575 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2576 			m_freem(rxs->rxs_mbuf);
2577 			rxs->rxs_mbuf = NULL;
2578 		}
2579 	}
2580 }
2581 
2582 /*
2583  * wm_stop:		[ifnet interface function]
2584  *
2585  *	Stop transmission on the interface.
2586  */
2587 static void
2588 wm_stop(struct ifnet *ifp, int disable)
2589 {
2590 	struct wm_softc *sc = ifp->if_softc;
2591 	struct wm_txsoft *txs;
2592 	int i;
2593 
2594 	/* Stop the one second clock. */
2595 	callout_stop(&sc->sc_tick_ch);
2596 
2597 	if (sc->sc_flags & WM_F_HAS_MII) {
2598 		/* Down the MII. */
2599 		mii_down(&sc->sc_mii);
2600 	}
2601 
2602 	/* Stop the transmit and receive processes. */
2603 	CSR_WRITE(sc, WMREG_TCTL, 0);
2604 	CSR_WRITE(sc, WMREG_RCTL, 0);
2605 
2606 	/* Release any queued transmit buffers. */
2607 	for (i = 0; i < WM_TXQUEUELEN; i++) {
2608 		txs = &sc->sc_txsoft[i];
2609 		if (txs->txs_mbuf != NULL) {
2610 			bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2611 			m_freem(txs->txs_mbuf);
2612 			txs->txs_mbuf = NULL;
2613 		}
2614 	}
2615 
2616 	if (disable)
2617 		wm_rxdrain(sc);
2618 
2619 	/* Mark the interface as down and cancel the watchdog timer. */
2620 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2621 	ifp->if_timer = 0;
2622 }
2623 
2624 /*
2625  * wm_acquire_eeprom:
2626  *
2627  *	Perform the EEPROM handshake required on some chips.
2628  */
2629 static int
2630 wm_acquire_eeprom(struct wm_softc *sc)
2631 {
2632 	uint32_t reg;
2633 	int x;
2634 
2635 	if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE)  {
2636 		reg = CSR_READ(sc, WMREG_EECD);
2637 
2638 		/* Request EEPROM access. */
2639 		reg |= EECD_EE_REQ;
2640 		CSR_WRITE(sc, WMREG_EECD, reg);
2641 
2642 		/* ..and wait for it to be granted. */
2643 		for (x = 0; x < 100; x++) {
2644 			reg = CSR_READ(sc, WMREG_EECD);
2645 			if (reg & EECD_EE_GNT)
2646 				break;
2647 			delay(5);
2648 		}
2649 		if ((reg & EECD_EE_GNT) == 0) {
2650 			aprint_error("%s: could not acquire EEPROM GNT\n",
2651 			    sc->sc_dev.dv_xname);
2652 			reg &= ~EECD_EE_REQ;
2653 			CSR_WRITE(sc, WMREG_EECD, reg);
2654 			return (1);
2655 		}
2656 	}
2657 
2658 	return (0);
2659 }
2660 
2661 /*
2662  * wm_release_eeprom:
2663  *
2664  *	Release the EEPROM mutex.
2665  */
2666 static void
2667 wm_release_eeprom(struct wm_softc *sc)
2668 {
2669 	uint32_t reg;
2670 
2671 	if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
2672 		reg = CSR_READ(sc, WMREG_EECD);
2673 		reg &= ~EECD_EE_REQ;
2674 		CSR_WRITE(sc, WMREG_EECD, reg);
2675 	}
2676 }
2677 
2678 /*
2679  * wm_eeprom_sendbits:
2680  *
2681  *	Send a series of bits to the EEPROM.
2682  */
2683 static void
2684 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
2685 {
2686 	uint32_t reg;
2687 	int x;
2688 
2689 	reg = CSR_READ(sc, WMREG_EECD);
2690 
2691 	for (x = nbits; x > 0; x--) {
2692 		if (bits & (1U << (x - 1)))
2693 			reg |= EECD_DI;
2694 		else
2695 			reg &= ~EECD_DI;
2696 		CSR_WRITE(sc, WMREG_EECD, reg);
2697 		delay(2);
2698 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2699 		delay(2);
2700 		CSR_WRITE(sc, WMREG_EECD, reg);
2701 		delay(2);
2702 	}
2703 }
2704 
2705 /*
2706  * wm_eeprom_recvbits:
2707  *
2708  *	Receive a series of bits from the EEPROM.
2709  */
2710 static void
2711 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
2712 {
2713 	uint32_t reg, val;
2714 	int x;
2715 
2716 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
2717 
2718 	val = 0;
2719 	for (x = nbits; x > 0; x--) {
2720 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2721 		delay(2);
2722 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
2723 			val |= (1U << (x - 1));
2724 		CSR_WRITE(sc, WMREG_EECD, reg);
2725 		delay(2);
2726 	}
2727 	*valp = val;
2728 }
2729 
2730 /*
2731  * wm_read_eeprom_uwire:
2732  *
2733  *	Read a word from the EEPROM using the MicroWire protocol.
2734  */
2735 static int
2736 wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
2737 {
2738 	uint32_t reg, val;
2739 	int i;
2740 
2741 	for (i = 0; i < wordcnt; i++) {
2742 		/* Clear SK and DI. */
2743 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
2744 		CSR_WRITE(sc, WMREG_EECD, reg);
2745 
2746 		/* Set CHIP SELECT. */
2747 		reg |= EECD_CS;
2748 		CSR_WRITE(sc, WMREG_EECD, reg);
2749 		delay(2);
2750 
2751 		/* Shift in the READ command. */
2752 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
2753 
2754 		/* Shift in address. */
2755 		wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits);
2756 
2757 		/* Shift out the data. */
2758 		wm_eeprom_recvbits(sc, &val, 16);
2759 		data[i] = val & 0xffff;
2760 
2761 		/* Clear CHIP SELECT. */
2762 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
2763 		CSR_WRITE(sc, WMREG_EECD, reg);
2764 		delay(2);
2765 	}
2766 
2767 	return (0);
2768 }
2769 
2770 /*
2771  * wm_spi_eeprom_ready:
2772  *
2773  *	Wait for a SPI EEPROM to be ready for commands.
2774  */
2775 static int
2776 wm_spi_eeprom_ready(struct wm_softc *sc)
2777 {
2778 	uint32_t val;
2779 	int usec;
2780 
2781 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
2782 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
2783 		wm_eeprom_recvbits(sc, &val, 8);
2784 		if ((val & SPI_SR_RDY) == 0)
2785 			break;
2786 	}
2787 	if (usec >= SPI_MAX_RETRIES) {
2788 		aprint_error("%s: EEPROM failed to become ready\n",
2789 		    sc->sc_dev.dv_xname);
2790 		return (1);
2791 	}
2792 	return (0);
2793 }
2794 
2795 /*
2796  * wm_read_eeprom_spi:
2797  *
2798  *	Read a work from the EEPROM using the SPI protocol.
2799  */
2800 static int
2801 wm_read_eeprom_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
2802 {
2803 	uint32_t reg, val;
2804 	int i;
2805 	uint8_t opc;
2806 
2807 	/* Clear SK and CS. */
2808 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
2809 	CSR_WRITE(sc, WMREG_EECD, reg);
2810 	delay(2);
2811 
2812 	if (wm_spi_eeprom_ready(sc))
2813 		return (1);
2814 
2815 	/* Toggle CS to flush commands. */
2816 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
2817 	delay(2);
2818 	CSR_WRITE(sc, WMREG_EECD, reg);
2819 	delay(2);
2820 
2821 	opc = SPI_OPC_READ;
2822 	if (sc->sc_ee_addrbits == 8 && word >= 128)
2823 		opc |= SPI_OPC_A8;
2824 
2825 	wm_eeprom_sendbits(sc, opc, 8);
2826 	wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits);
2827 
2828 	for (i = 0; i < wordcnt; i++) {
2829 		wm_eeprom_recvbits(sc, &val, 16);
2830 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
2831 	}
2832 
2833 	/* Raise CS and clear SK. */
2834 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
2835 	CSR_WRITE(sc, WMREG_EECD, reg);
2836 	delay(2);
2837 
2838 	return (0);
2839 }
2840 
2841 /*
2842  * wm_read_eeprom:
2843  *
2844  *	Read data from the serial EEPROM.
2845  */
2846 static int
2847 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
2848 {
2849 	int rv;
2850 
2851 	if (wm_acquire_eeprom(sc))
2852 		return (1);
2853 
2854 	if (sc->sc_flags & WM_F_EEPROM_SPI)
2855 		rv = wm_read_eeprom_spi(sc, word, wordcnt, data);
2856 	else
2857 		rv = wm_read_eeprom_uwire(sc, word, wordcnt, data);
2858 
2859 	wm_release_eeprom(sc);
2860 	return (rv);
2861 }
2862 
2863 /*
2864  * wm_add_rxbuf:
2865  *
2866  *	Add a receive buffer to the indiciated descriptor.
2867  */
2868 static int
2869 wm_add_rxbuf(struct wm_softc *sc, int idx)
2870 {
2871 	struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
2872 	struct mbuf *m;
2873 	int error;
2874 
2875 	MGETHDR(m, M_DONTWAIT, MT_DATA);
2876 	if (m == NULL)
2877 		return (ENOBUFS);
2878 
2879 	MCLGET(m, M_DONTWAIT);
2880 	if ((m->m_flags & M_EXT) == 0) {
2881 		m_freem(m);
2882 		return (ENOBUFS);
2883 	}
2884 
2885 	if (rxs->rxs_mbuf != NULL)
2886 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2887 
2888 	rxs->rxs_mbuf = m;
2889 
2890 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
2891 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
2892 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
2893 	if (error) {
2894 		printf("%s: unable to load rx DMA map %d, error = %d\n",
2895 		    sc->sc_dev.dv_xname, idx, error);
2896 		panic("wm_add_rxbuf");	/* XXX XXX XXX */
2897 	}
2898 
2899 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2900 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2901 
2902 	WM_INIT_RXDESC(sc, idx);
2903 
2904 	return (0);
2905 }
2906 
2907 /*
2908  * wm_set_ral:
2909  *
2910  *	Set an entery in the receive address list.
2911  */
2912 static void
2913 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
2914 {
2915 	uint32_t ral_lo, ral_hi;
2916 
2917 	if (enaddr != NULL) {
2918 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
2919 		    (enaddr[3] << 24);
2920 		ral_hi = enaddr[4] | (enaddr[5] << 8);
2921 		ral_hi |= RAL_AV;
2922 	} else {
2923 		ral_lo = 0;
2924 		ral_hi = 0;
2925 	}
2926 
2927 	if (sc->sc_type >= WM_T_82544) {
2928 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
2929 		    ral_lo);
2930 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
2931 		    ral_hi);
2932 	} else {
2933 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
2934 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
2935 	}
2936 }
2937 
2938 /*
2939  * wm_mchash:
2940  *
2941  *	Compute the hash of the multicast address for the 4096-bit
2942  *	multicast filter.
2943  */
2944 static uint32_t
2945 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
2946 {
2947 	static const int lo_shift[4] = { 4, 3, 2, 0 };
2948 	static const int hi_shift[4] = { 4, 5, 6, 8 };
2949 	uint32_t hash;
2950 
2951 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
2952 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
2953 
2954 	return (hash & 0xfff);
2955 }
2956 
2957 /*
2958  * wm_set_filter:
2959  *
2960  *	Set up the receive filter.
2961  */
2962 static void
2963 wm_set_filter(struct wm_softc *sc)
2964 {
2965 	struct ethercom *ec = &sc->sc_ethercom;
2966 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2967 	struct ether_multi *enm;
2968 	struct ether_multistep step;
2969 	bus_addr_t mta_reg;
2970 	uint32_t hash, reg, bit;
2971 	int i;
2972 
2973 	if (sc->sc_type >= WM_T_82544)
2974 		mta_reg = WMREG_CORDOVA_MTA;
2975 	else
2976 		mta_reg = WMREG_MTA;
2977 
2978 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
2979 
2980 	if (ifp->if_flags & IFF_BROADCAST)
2981 		sc->sc_rctl |= RCTL_BAM;
2982 	if (ifp->if_flags & IFF_PROMISC) {
2983 		sc->sc_rctl |= RCTL_UPE;
2984 		goto allmulti;
2985 	}
2986 
2987 	/*
2988 	 * Set the station address in the first RAL slot, and
2989 	 * clear the remaining slots.
2990 	 */
2991 	wm_set_ral(sc, LLADDR(ifp->if_sadl), 0);
2992 	for (i = 1; i < WM_RAL_TABSIZE; i++)
2993 		wm_set_ral(sc, NULL, i);
2994 
2995 	/* Clear out the multicast table. */
2996 	for (i = 0; i < WM_MC_TABSIZE; i++)
2997 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
2998 
2999 	ETHER_FIRST_MULTI(step, ec, enm);
3000 	while (enm != NULL) {
3001 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
3002 			/*
3003 			 * We must listen to a range of multicast addresses.
3004 			 * For now, just accept all multicasts, rather than
3005 			 * trying to set only those filter bits needed to match
3006 			 * the range.  (At this time, the only use of address
3007 			 * ranges is for IP multicast routing, for which the
3008 			 * range is big enough to require all bits set.)
3009 			 */
3010 			goto allmulti;
3011 		}
3012 
3013 		hash = wm_mchash(sc, enm->enm_addrlo);
3014 
3015 		reg = (hash >> 5) & 0x7f;
3016 		bit = hash & 0x1f;
3017 
3018 		hash = CSR_READ(sc, mta_reg + (reg << 2));
3019 		hash |= 1U << bit;
3020 
3021 		/* XXX Hardware bug?? */
3022 		if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
3023 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
3024 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3025 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
3026 		} else
3027 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3028 
3029 		ETHER_NEXT_MULTI(step, enm);
3030 	}
3031 
3032 	ifp->if_flags &= ~IFF_ALLMULTI;
3033 	goto setit;
3034 
3035  allmulti:
3036 	ifp->if_flags |= IFF_ALLMULTI;
3037 	sc->sc_rctl |= RCTL_MPE;
3038 
3039  setit:
3040 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
3041 }
3042 
3043 /*
3044  * wm_tbi_mediainit:
3045  *
3046  *	Initialize media for use on 1000BASE-X devices.
3047  */
3048 static void
3049 wm_tbi_mediainit(struct wm_softc *sc)
3050 {
3051 	const char *sep = "";
3052 
3053 	if (sc->sc_type < WM_T_82543)
3054 		sc->sc_tipg = TIPG_WM_DFLT;
3055 	else
3056 		sc->sc_tipg = TIPG_LG_DFLT;
3057 
3058 	ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
3059 	    wm_tbi_mediastatus);
3060 
3061 	/*
3062 	 * SWD Pins:
3063 	 *
3064 	 *	0 = Link LED (output)
3065 	 *	1 = Loss Of Signal (input)
3066 	 */
3067 	sc->sc_ctrl |= CTRL_SWDPIO(0);
3068 	sc->sc_ctrl &= ~CTRL_SWDPIO(1);
3069 
3070 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3071 
3072 #define	ADD(ss, mm, dd)							\
3073 do {									\
3074 	printf("%s%s", sep, ss);					\
3075 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL);	\
3076 	sep = ", ";							\
3077 } while (/*CONSTCOND*/0)
3078 
3079 	printf("%s: ", sc->sc_dev.dv_xname);
3080 	ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
3081 	ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
3082 	ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
3083 	printf("\n");
3084 
3085 #undef ADD
3086 
3087 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
3088 }
3089 
3090 /*
3091  * wm_tbi_mediastatus:	[ifmedia interface function]
3092  *
3093  *	Get the current interface media status on a 1000BASE-X device.
3094  */
3095 static void
3096 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
3097 {
3098 	struct wm_softc *sc = ifp->if_softc;
3099 	uint32_t ctrl;
3100 
3101 	ifmr->ifm_status = IFM_AVALID;
3102 	ifmr->ifm_active = IFM_ETHER;
3103 
3104 	if (sc->sc_tbi_linkup == 0) {
3105 		ifmr->ifm_active |= IFM_NONE;
3106 		return;
3107 	}
3108 
3109 	ifmr->ifm_status |= IFM_ACTIVE;
3110 	ifmr->ifm_active |= IFM_1000_SX;
3111 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
3112 		ifmr->ifm_active |= IFM_FDX;
3113 	ctrl = CSR_READ(sc, WMREG_CTRL);
3114 	if (ctrl & CTRL_RFCE)
3115 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
3116 	if (ctrl & CTRL_TFCE)
3117 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
3118 }
3119 
3120 /*
3121  * wm_tbi_mediachange:	[ifmedia interface function]
3122  *
3123  *	Set hardware to newly-selected media on a 1000BASE-X device.
3124  */
3125 static int
3126 wm_tbi_mediachange(struct ifnet *ifp)
3127 {
3128 	struct wm_softc *sc = ifp->if_softc;
3129 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
3130 	uint32_t status;
3131 	int i;
3132 
3133 	sc->sc_txcw = ife->ifm_data;
3134 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO ||
3135 	    (sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
3136 		sc->sc_txcw |= ANAR_X_PAUSE_SYM | ANAR_X_PAUSE_ASYM;
3137 	sc->sc_txcw |= TXCW_ANE;
3138 
3139 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
3140 	delay(10000);
3141 
3142 	/* NOTE: CTRL will update TFCE and RFCE automatically. */
3143 
3144 	sc->sc_tbi_anstate = 0;
3145 
3146 	if ((CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1)) == 0) {
3147 		/* Have signal; wait for the link to come up. */
3148 		for (i = 0; i < 50; i++) {
3149 			delay(10000);
3150 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
3151 				break;
3152 		}
3153 
3154 		status = CSR_READ(sc, WMREG_STATUS);
3155 		if (status & STATUS_LU) {
3156 			/* Link is up. */
3157 			DPRINTF(WM_DEBUG_LINK,
3158 			    ("%s: LINK: set media -> link up %s\n",
3159 			    sc->sc_dev.dv_xname,
3160 			    (status & STATUS_FD) ? "FDX" : "HDX"));
3161 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
3162 			sc->sc_fcrtl &= ~FCRTL_XONE;
3163 			if (status & STATUS_FD)
3164 				sc->sc_tctl |=
3165 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3166 			else
3167 				sc->sc_tctl |=
3168 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
3169 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
3170 				sc->sc_fcrtl |= FCRTL_XONE;
3171 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3172 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
3173 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
3174 				      sc->sc_fcrtl);
3175 			sc->sc_tbi_linkup = 1;
3176 		} else {
3177 			/* Link is down. */
3178 			DPRINTF(WM_DEBUG_LINK,
3179 			    ("%s: LINK: set media -> link down\n",
3180 			    sc->sc_dev.dv_xname));
3181 			sc->sc_tbi_linkup = 0;
3182 		}
3183 	} else {
3184 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
3185 		    sc->sc_dev.dv_xname));
3186 		sc->sc_tbi_linkup = 0;
3187 	}
3188 
3189 	wm_tbi_set_linkled(sc);
3190 
3191 	return (0);
3192 }
3193 
3194 /*
3195  * wm_tbi_set_linkled:
3196  *
3197  *	Update the link LED on 1000BASE-X devices.
3198  */
3199 static void
3200 wm_tbi_set_linkled(struct wm_softc *sc)
3201 {
3202 
3203 	if (sc->sc_tbi_linkup)
3204 		sc->sc_ctrl |= CTRL_SWDPIN(0);
3205 	else
3206 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
3207 
3208 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3209 }
3210 
3211 /*
3212  * wm_tbi_check_link:
3213  *
3214  *	Check the link on 1000BASE-X devices.
3215  */
3216 static void
3217 wm_tbi_check_link(struct wm_softc *sc)
3218 {
3219 	uint32_t rxcw, ctrl, status;
3220 
3221 	if (sc->sc_tbi_anstate == 0)
3222 		return;
3223 	else if (sc->sc_tbi_anstate > 1) {
3224 		DPRINTF(WM_DEBUG_LINK,
3225 		    ("%s: LINK: anstate %d\n", sc->sc_dev.dv_xname,
3226 		    sc->sc_tbi_anstate));
3227 		sc->sc_tbi_anstate--;
3228 		return;
3229 	}
3230 
3231 	sc->sc_tbi_anstate = 0;
3232 
3233 	rxcw = CSR_READ(sc, WMREG_RXCW);
3234 	ctrl = CSR_READ(sc, WMREG_CTRL);
3235 	status = CSR_READ(sc, WMREG_STATUS);
3236 
3237 	if ((status & STATUS_LU) == 0) {
3238 		DPRINTF(WM_DEBUG_LINK,
3239 		    ("%s: LINK: checklink -> down\n", sc->sc_dev.dv_xname));
3240 		sc->sc_tbi_linkup = 0;
3241 	} else {
3242 		DPRINTF(WM_DEBUG_LINK,
3243 		    ("%s: LINK: checklink -> up %s\n", sc->sc_dev.dv_xname,
3244 		    (status & STATUS_FD) ? "FDX" : "HDX"));
3245 		sc->sc_tctl &= ~TCTL_COLD(0x3ff);
3246 		sc->sc_fcrtl &= ~FCRTL_XONE;
3247 		if (status & STATUS_FD)
3248 			sc->sc_tctl |=
3249 			    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3250 		else
3251 			sc->sc_tctl |=
3252 			    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
3253 		if (ctrl & CTRL_TFCE)
3254 			sc->sc_fcrtl |= FCRTL_XONE;
3255 		CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3256 		CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
3257 			      WMREG_OLD_FCRTL : WMREG_FCRTL,
3258 			      sc->sc_fcrtl);
3259 		sc->sc_tbi_linkup = 1;
3260 	}
3261 
3262 	wm_tbi_set_linkled(sc);
3263 }
3264 
3265 /*
3266  * wm_gmii_reset:
3267  *
3268  *	Reset the PHY.
3269  */
3270 static void
3271 wm_gmii_reset(struct wm_softc *sc)
3272 {
3273 	uint32_t reg;
3274 
3275 	if (sc->sc_type >= WM_T_82544) {
3276 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
3277 		delay(20000);
3278 
3279 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3280 		delay(20000);
3281 	} else {
3282 		/* The PHY reset pin is active-low. */
3283 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
3284 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
3285 		    CTRL_EXT_SWDPIN(4));
3286 		reg |= CTRL_EXT_SWDPIO(4);
3287 
3288 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
3289 		delay(10);
3290 
3291 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3292 		delay(10);
3293 
3294 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
3295 		delay(10);
3296 #if 0
3297 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
3298 #endif
3299 	}
3300 }
3301 
3302 /*
3303  * wm_gmii_mediainit:
3304  *
3305  *	Initialize media for use on 1000BASE-T devices.
3306  */
3307 static void
3308 wm_gmii_mediainit(struct wm_softc *sc)
3309 {
3310 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3311 
3312 	/* We have MII. */
3313 	sc->sc_flags |= WM_F_HAS_MII;
3314 
3315 	sc->sc_tipg = TIPG_1000T_DFLT;
3316 
3317 	/*
3318 	 * Let the chip set speed/duplex on its own based on
3319 	 * signals from the PHY.
3320 	 */
3321 	sc->sc_ctrl |= CTRL_SLU | CTRL_ASDE;
3322 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3323 
3324 	/* Initialize our media structures and probe the GMII. */
3325 	sc->sc_mii.mii_ifp = ifp;
3326 
3327 	if (sc->sc_type >= WM_T_82544) {
3328 		sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg;
3329 		sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg;
3330 	} else {
3331 		sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg;
3332 		sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg;
3333 	}
3334 	sc->sc_mii.mii_statchg = wm_gmii_statchg;
3335 
3336 	wm_gmii_reset(sc);
3337 
3338 	ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_gmii_mediachange,
3339 	    wm_gmii_mediastatus);
3340 
3341 	mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
3342 	    MII_OFFSET_ANY, MIIF_DOPAUSE);
3343 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
3344 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
3345 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
3346 	} else
3347 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
3348 }
3349 
3350 /*
3351  * wm_gmii_mediastatus:	[ifmedia interface function]
3352  *
3353  *	Get the current interface media status on a 1000BASE-T device.
3354  */
3355 static void
3356 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
3357 {
3358 	struct wm_softc *sc = ifp->if_softc;
3359 
3360 	mii_pollstat(&sc->sc_mii);
3361 	ifmr->ifm_status = sc->sc_mii.mii_media_status;
3362 	ifmr->ifm_active = (sc->sc_mii.mii_media_active & ~IFM_ETH_FMASK) |
3363 			   sc->sc_flowflags;
3364 }
3365 
3366 /*
3367  * wm_gmii_mediachange:	[ifmedia interface function]
3368  *
3369  *	Set hardware to newly-selected media on a 1000BASE-T device.
3370  */
3371 static int
3372 wm_gmii_mediachange(struct ifnet *ifp)
3373 {
3374 	struct wm_softc *sc = ifp->if_softc;
3375 
3376 	if (ifp->if_flags & IFF_UP)
3377 		mii_mediachg(&sc->sc_mii);
3378 	return (0);
3379 }
3380 
3381 #define	MDI_IO		CTRL_SWDPIN(2)
3382 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
3383 #define	MDI_CLK		CTRL_SWDPIN(3)
3384 
3385 static void
3386 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
3387 {
3388 	uint32_t i, v;
3389 
3390 	v = CSR_READ(sc, WMREG_CTRL);
3391 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
3392 	v |= MDI_DIR | CTRL_SWDPIO(3);
3393 
3394 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
3395 		if (data & i)
3396 			v |= MDI_IO;
3397 		else
3398 			v &= ~MDI_IO;
3399 		CSR_WRITE(sc, WMREG_CTRL, v);
3400 		delay(10);
3401 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
3402 		delay(10);
3403 		CSR_WRITE(sc, WMREG_CTRL, v);
3404 		delay(10);
3405 	}
3406 }
3407 
3408 static uint32_t
3409 i82543_mii_recvbits(struct wm_softc *sc)
3410 {
3411 	uint32_t v, i, data = 0;
3412 
3413 	v = CSR_READ(sc, WMREG_CTRL);
3414 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
3415 	v |= CTRL_SWDPIO(3);
3416 
3417 	CSR_WRITE(sc, WMREG_CTRL, v);
3418 	delay(10);
3419 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
3420 	delay(10);
3421 	CSR_WRITE(sc, WMREG_CTRL, v);
3422 	delay(10);
3423 
3424 	for (i = 0; i < 16; i++) {
3425 		data <<= 1;
3426 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
3427 		delay(10);
3428 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
3429 			data |= 1;
3430 		CSR_WRITE(sc, WMREG_CTRL, v);
3431 		delay(10);
3432 	}
3433 
3434 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
3435 	delay(10);
3436 	CSR_WRITE(sc, WMREG_CTRL, v);
3437 	delay(10);
3438 
3439 	return (data);
3440 }
3441 
3442 #undef MDI_IO
3443 #undef MDI_DIR
3444 #undef MDI_CLK
3445 
3446 /*
3447  * wm_gmii_i82543_readreg:	[mii interface function]
3448  *
3449  *	Read a PHY register on the GMII (i82543 version).
3450  */
3451 static int
3452 wm_gmii_i82543_readreg(struct device *self, int phy, int reg)
3453 {
3454 	struct wm_softc *sc = (void *) self;
3455 	int rv;
3456 
3457 	i82543_mii_sendbits(sc, 0xffffffffU, 32);
3458 	i82543_mii_sendbits(sc, reg | (phy << 5) |
3459 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
3460 	rv = i82543_mii_recvbits(sc) & 0xffff;
3461 
3462 	DPRINTF(WM_DEBUG_GMII,
3463 	    ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
3464 	    sc->sc_dev.dv_xname, phy, reg, rv));
3465 
3466 	return (rv);
3467 }
3468 
3469 /*
3470  * wm_gmii_i82543_writereg:	[mii interface function]
3471  *
3472  *	Write a PHY register on the GMII (i82543 version).
3473  */
3474 static void
3475 wm_gmii_i82543_writereg(struct device *self, int phy, int reg, int val)
3476 {
3477 	struct wm_softc *sc = (void *) self;
3478 
3479 	i82543_mii_sendbits(sc, 0xffffffffU, 32);
3480 	i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
3481 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
3482 	    (MII_COMMAND_START << 30), 32);
3483 }
3484 
3485 /*
3486  * wm_gmii_i82544_readreg:	[mii interface function]
3487  *
3488  *	Read a PHY register on the GMII.
3489  */
3490 static int
3491 wm_gmii_i82544_readreg(struct device *self, int phy, int reg)
3492 {
3493 	struct wm_softc *sc = (void *) self;
3494 	uint32_t mdic = 0;
3495 	int i, rv;
3496 
3497 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
3498 	    MDIC_REGADD(reg));
3499 
3500 	for (i = 0; i < 100; i++) {
3501 		mdic = CSR_READ(sc, WMREG_MDIC);
3502 		if (mdic & MDIC_READY)
3503 			break;
3504 		delay(10);
3505 	}
3506 
3507 	if ((mdic & MDIC_READY) == 0) {
3508 		printf("%s: MDIC read timed out: phy %d reg %d\n",
3509 		    sc->sc_dev.dv_xname, phy, reg);
3510 		rv = 0;
3511 	} else if (mdic & MDIC_E) {
3512 #if 0 /* This is normal if no PHY is present. */
3513 		printf("%s: MDIC read error: phy %d reg %d\n",
3514 		    sc->sc_dev.dv_xname, phy, reg);
3515 #endif
3516 		rv = 0;
3517 	} else {
3518 		rv = MDIC_DATA(mdic);
3519 		if (rv == 0xffff)
3520 			rv = 0;
3521 	}
3522 
3523 	return (rv);
3524 }
3525 
3526 /*
3527  * wm_gmii_i82544_writereg:	[mii interface function]
3528  *
3529  *	Write a PHY register on the GMII.
3530  */
3531 static void
3532 wm_gmii_i82544_writereg(struct device *self, int phy, int reg, int val)
3533 {
3534 	struct wm_softc *sc = (void *) self;
3535 	uint32_t mdic = 0;
3536 	int i;
3537 
3538 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
3539 	    MDIC_REGADD(reg) | MDIC_DATA(val));
3540 
3541 	for (i = 0; i < 100; i++) {
3542 		mdic = CSR_READ(sc, WMREG_MDIC);
3543 		if (mdic & MDIC_READY)
3544 			break;
3545 		delay(10);
3546 	}
3547 
3548 	if ((mdic & MDIC_READY) == 0)
3549 		printf("%s: MDIC write timed out: phy %d reg %d\n",
3550 		    sc->sc_dev.dv_xname, phy, reg);
3551 	else if (mdic & MDIC_E)
3552 		printf("%s: MDIC write error: phy %d reg %d\n",
3553 		    sc->sc_dev.dv_xname, phy, reg);
3554 }
3555 
3556 /*
3557  * wm_gmii_statchg:	[mii interface function]
3558  *
3559  *	Callback from MII layer when media changes.
3560  */
3561 static void
3562 wm_gmii_statchg(struct device *self)
3563 {
3564 	struct wm_softc *sc = (void *) self;
3565 	struct mii_data *mii = &sc->sc_mii;
3566 
3567 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
3568 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
3569 	sc->sc_fcrtl &= ~FCRTL_XONE;
3570 
3571 	/*
3572 	 * Get flow control negotiation result.
3573 	 */
3574 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
3575 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
3576 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
3577 		mii->mii_media_active &= ~IFM_ETH_FMASK;
3578 	}
3579 
3580 	if (sc->sc_flowflags & IFM_FLOW) {
3581 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
3582 			sc->sc_ctrl |= CTRL_TFCE;
3583 			sc->sc_fcrtl |= FCRTL_XONE;
3584 		}
3585 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
3586 			sc->sc_ctrl |= CTRL_RFCE;
3587 	}
3588 
3589 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
3590 		DPRINTF(WM_DEBUG_LINK,
3591 		    ("%s: LINK: statchg: FDX\n", sc->sc_dev.dv_xname));
3592 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3593 	} else  {
3594 		DPRINTF(WM_DEBUG_LINK,
3595 		    ("%s: LINK: statchg: HDX\n", sc->sc_dev.dv_xname));
3596 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
3597 	}
3598 
3599 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3600 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3601 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
3602 						 : WMREG_FCRTL, sc->sc_fcrtl);
3603 }
3604