xref: /netbsd-src/sys/dev/pci/if_wm.c (revision 404fbe5fb94ca1e054339640cabb2801ce52dd30)
1 /*	$NetBSD: if_wm.c,v 1.163 2008/12/02 15:30:04 sketch Exp $	*/
2 
3 /*
4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed for the NetBSD Project by
20  *	Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 /*******************************************************************************
39 
40   Copyright (c) 2001-2005, Intel Corporation
41   All rights reserved.
42 
43   Redistribution and use in source and binary forms, with or without
44   modification, are permitted provided that the following conditions are met:
45 
46    1. Redistributions of source code must retain the above copyright notice,
47       this list of conditions and the following disclaimer.
48 
49    2. Redistributions in binary form must reproduce the above copyright
50       notice, this list of conditions and the following disclaimer in the
51       documentation and/or other materials provided with the distribution.
52 
53    3. Neither the name of the Intel Corporation nor the names of its
54       contributors may be used to endorse or promote products derived from
55       this software without specific prior written permission.
56 
57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67   POSSIBILITY OF SUCH DAMAGE.
68 
69 *******************************************************************************/
70 /*
71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72  *
73  * TODO (in order of importance):
74  *
75  *	- Rework how parameters are loaded from the EEPROM.
76  *	- Figure out what to do with the i82545GM and i82546GB
77  *	  SERDES controllers.
78  *	- Fix hw VLAN assist.
79  */
80 
81 #include <sys/cdefs.h>
82 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.163 2008/12/02 15:30:04 sketch Exp $");
83 
84 #include "bpfilter.h"
85 #include "rnd.h"
86 
87 #include <sys/param.h>
88 #include <sys/systm.h>
89 #include <sys/callout.h>
90 #include <sys/mbuf.h>
91 #include <sys/malloc.h>
92 #include <sys/kernel.h>
93 #include <sys/socket.h>
94 #include <sys/ioctl.h>
95 #include <sys/errno.h>
96 #include <sys/device.h>
97 #include <sys/queue.h>
98 #include <sys/syslog.h>
99 
100 #include <uvm/uvm_extern.h>		/* for PAGE_SIZE */
101 
102 #if NRND > 0
103 #include <sys/rnd.h>
104 #endif
105 
106 #include <net/if.h>
107 #include <net/if_dl.h>
108 #include <net/if_media.h>
109 #include <net/if_ether.h>
110 
111 #if NBPFILTER > 0
112 #include <net/bpf.h>
113 #endif
114 
115 #include <netinet/in.h>			/* XXX for struct ip */
116 #include <netinet/in_systm.h>		/* XXX for struct ip */
117 #include <netinet/ip.h>			/* XXX for struct ip */
118 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
119 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
120 
121 #include <sys/bus.h>
122 #include <sys/intr.h>
123 #include <machine/endian.h>
124 
125 #include <dev/mii/mii.h>
126 #include <dev/mii/miivar.h>
127 #include <dev/mii/mii_bitbang.h>
128 #include <dev/mii/ikphyreg.h>
129 
130 #include <dev/pci/pcireg.h>
131 #include <dev/pci/pcivar.h>
132 #include <dev/pci/pcidevs.h>
133 
134 #include <dev/pci/if_wmreg.h>
135 
136 #ifdef WM_DEBUG
137 #define	WM_DEBUG_LINK		0x01
138 #define	WM_DEBUG_TX		0x02
139 #define	WM_DEBUG_RX		0x04
140 #define	WM_DEBUG_GMII		0x08
141 int	wm_debug = WM_DEBUG_TX|WM_DEBUG_RX|WM_DEBUG_LINK|WM_DEBUG_GMII;
142 
143 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
144 #else
145 #define	DPRINTF(x, y)	/* nothing */
146 #endif /* WM_DEBUG */
147 
148 /*
149  * Transmit descriptor list size.  Due to errata, we can only have
150  * 256 hardware descriptors in the ring on < 82544, but we use 4096
151  * on >= 82544.  We tell the upper layers that they can queue a lot
152  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
153  * of them at a time.
154  *
155  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
156  * chains containing many small mbufs have been observed in zero-copy
157  * situations with jumbo frames.
158  */
159 #define	WM_NTXSEGS		256
160 #define	WM_IFQUEUELEN		256
161 #define	WM_TXQUEUELEN_MAX	64
162 #define	WM_TXQUEUELEN_MAX_82547	16
163 #define	WM_TXQUEUELEN(sc)	((sc)->sc_txnum)
164 #define	WM_TXQUEUELEN_MASK(sc)	(WM_TXQUEUELEN(sc) - 1)
165 #define	WM_TXQUEUE_GC(sc)	(WM_TXQUEUELEN(sc) / 8)
166 #define	WM_NTXDESC_82542	256
167 #define	WM_NTXDESC_82544	4096
168 #define	WM_NTXDESC(sc)		((sc)->sc_ntxdesc)
169 #define	WM_NTXDESC_MASK(sc)	(WM_NTXDESC(sc) - 1)
170 #define	WM_TXDESCSIZE(sc)	(WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
171 #define	WM_NEXTTX(sc, x)	(((x) + 1) & WM_NTXDESC_MASK(sc))
172 #define	WM_NEXTTXS(sc, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(sc))
173 
174 #define	WM_MAXTXDMA		round_page(IP_MAXPACKET) /* for TSO */
175 
176 /*
177  * Receive descriptor list size.  We have one Rx buffer for normal
178  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
179  * packet.  We allocate 256 receive descriptors, each with a 2k
180  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
181  */
182 #define	WM_NRXDESC		256
183 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
184 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
185 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
186 
187 /*
188  * Control structures are DMA'd to the i82542 chip.  We allocate them in
189  * a single clump that maps to a single DMA segment to make several things
190  * easier.
191  */
192 struct wm_control_data_82544 {
193 	/*
194 	 * The receive descriptors.
195 	 */
196 	wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
197 
198 	/*
199 	 * The transmit descriptors.  Put these at the end, because
200 	 * we might use a smaller number of them.
201 	 */
202 	wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82544];
203 };
204 
205 struct wm_control_data_82542 {
206 	wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
207 	wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
208 };
209 
210 #define	WM_CDOFF(x)	offsetof(struct wm_control_data_82544, x)
211 #define	WM_CDTXOFF(x)	WM_CDOFF(wcd_txdescs[(x)])
212 #define	WM_CDRXOFF(x)	WM_CDOFF(wcd_rxdescs[(x)])
213 
214 /*
215  * Software state for transmit jobs.
216  */
217 struct wm_txsoft {
218 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
219 	bus_dmamap_t txs_dmamap;	/* our DMA map */
220 	int txs_firstdesc;		/* first descriptor in packet */
221 	int txs_lastdesc;		/* last descriptor in packet */
222 	int txs_ndesc;			/* # of descriptors used */
223 };
224 
225 /*
226  * Software state for receive buffers.  Each descriptor gets a
227  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
228  * more than one buffer, we chain them together.
229  */
230 struct wm_rxsoft {
231 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
232 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
233 };
234 
235 typedef enum {
236 	WM_T_unknown		= 0,
237 	WM_T_82542_2_0,			/* i82542 2.0 (really old) */
238 	WM_T_82542_2_1,			/* i82542 2.1+ (old) */
239 	WM_T_82543,			/* i82543 */
240 	WM_T_82544,			/* i82544 */
241 	WM_T_82540,			/* i82540 */
242 	WM_T_82545,			/* i82545 */
243 	WM_T_82545_3,			/* i82545 3.0+ */
244 	WM_T_82546,			/* i82546 */
245 	WM_T_82546_3,			/* i82546 3.0+ */
246 	WM_T_82541,			/* i82541 */
247 	WM_T_82541_2,			/* i82541 2.0+ */
248 	WM_T_82547,			/* i82547 */
249 	WM_T_82547_2,			/* i82547 2.0+ */
250 	WM_T_82571,			/* i82571 */
251 	WM_T_82572,			/* i82572 */
252 	WM_T_82573,			/* i82573 */
253 	WM_T_80003,			/* i80003 */
254 	WM_T_ICH8,			/* ICH8 LAN */
255 	WM_T_ICH9,			/* ICH9 LAN */
256 } wm_chip_type;
257 
258 /*
259  * Software state per device.
260  */
261 struct wm_softc {
262 	device_t sc_dev;		/* generic device information */
263 	bus_space_tag_t sc_st;		/* bus space tag */
264 	bus_space_handle_t sc_sh;	/* bus space handle */
265 	bus_space_tag_t sc_iot;		/* I/O space tag */
266 	bus_space_handle_t sc_ioh;	/* I/O space handle */
267 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
268 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
269 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
270 	struct ethercom sc_ethercom;	/* ethernet common data */
271 	pci_chipset_tag_t sc_pc;
272 	pcitag_t sc_pcitag;
273 
274 	wm_chip_type sc_type;		/* chip type */
275 	int sc_flags;			/* flags; see below */
276 	int sc_bus_speed;		/* PCI/PCIX bus speed */
277 	int sc_pcix_offset;		/* PCIX capability register offset */
278 	int sc_flowflags;		/* 802.3x flow control flags */
279 
280 	void *sc_ih;			/* interrupt cookie */
281 
282 	int sc_ee_addrbits;		/* EEPROM address bits */
283 
284 	struct mii_data sc_mii;		/* MII/media information */
285 
286 	callout_t sc_tick_ch;		/* tick callout */
287 
288 	bus_dmamap_t sc_cddmamap;	/* control data DMA map */
289 #define	sc_cddma	sc_cddmamap->dm_segs[0].ds_addr
290 
291 	int		sc_align_tweak;
292 
293 	/*
294 	 * Software state for the transmit and receive descriptors.
295 	 */
296 	int			sc_txnum;	/* must be a power of two */
297 	struct wm_txsoft	sc_txsoft[WM_TXQUEUELEN_MAX];
298 	struct wm_rxsoft	sc_rxsoft[WM_NRXDESC];
299 
300 	/*
301 	 * Control data structures.
302 	 */
303 	int			sc_ntxdesc;	/* must be a power of two */
304 	struct wm_control_data_82544 *sc_control_data;
305 #define	sc_txdescs	sc_control_data->wcd_txdescs
306 #define	sc_rxdescs	sc_control_data->wcd_rxdescs
307 
308 #ifdef WM_EVENT_COUNTERS
309 	/* Event counters. */
310 	struct evcnt sc_ev_txsstall;	/* Tx stalled due to no txs */
311 	struct evcnt sc_ev_txdstall;	/* Tx stalled due to no txd */
312 	struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
313 	struct evcnt sc_ev_txdw;	/* Tx descriptor interrupts */
314 	struct evcnt sc_ev_txqe;	/* Tx queue empty interrupts */
315 	struct evcnt sc_ev_rxintr;	/* Rx interrupts */
316 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
317 
318 	struct evcnt sc_ev_rxipsum;	/* IP checksums checked in-bound */
319 	struct evcnt sc_ev_rxtusum;	/* TCP/UDP cksums checked in-bound */
320 	struct evcnt sc_ev_txipsum;	/* IP checksums comp. out-bound */
321 	struct evcnt sc_ev_txtusum;	/* TCP/UDP cksums comp. out-bound */
322 	struct evcnt sc_ev_txtusum6;	/* TCP/UDP v6 cksums comp. out-bound */
323 	struct evcnt sc_ev_txtso;	/* TCP seg offload out-bound (IPv4) */
324 	struct evcnt sc_ev_txtso6;	/* TCP seg offload out-bound (IPv6) */
325 	struct evcnt sc_ev_txtsopain;	/* painful header manip. for TSO */
326 
327 	struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
328 	struct evcnt sc_ev_txdrop;	/* Tx packets dropped (too many segs) */
329 
330 	struct evcnt sc_ev_tu;		/* Tx underrun */
331 
332 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
333 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
334 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
335 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
336 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
337 #endif /* WM_EVENT_COUNTERS */
338 
339 	bus_addr_t sc_tdt_reg;		/* offset of TDT register */
340 
341 	int	sc_txfree;		/* number of free Tx descriptors */
342 	int	sc_txnext;		/* next ready Tx descriptor */
343 
344 	int	sc_txsfree;		/* number of free Tx jobs */
345 	int	sc_txsnext;		/* next free Tx job */
346 	int	sc_txsdirty;		/* dirty Tx jobs */
347 
348 	/* These 5 variables are used only on the 82547. */
349 	int	sc_txfifo_size;		/* Tx FIFO size */
350 	int	sc_txfifo_head;		/* current head of FIFO */
351 	uint32_t sc_txfifo_addr;	/* internal address of start of FIFO */
352 	int	sc_txfifo_stall;	/* Tx FIFO is stalled */
353 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
354 
355 	bus_addr_t sc_rdt_reg;		/* offset of RDT register */
356 
357 	int	sc_rxptr;		/* next ready Rx descriptor/queue ent */
358 	int	sc_rxdiscard;
359 	int	sc_rxlen;
360 	struct mbuf *sc_rxhead;
361 	struct mbuf *sc_rxtail;
362 	struct mbuf **sc_rxtailp;
363 
364 	uint32_t sc_ctrl;		/* prototype CTRL register */
365 #if 0
366 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
367 #endif
368 	uint32_t sc_icr;		/* prototype interrupt bits */
369 	uint32_t sc_itr;		/* prototype intr throttling reg */
370 	uint32_t sc_tctl;		/* prototype TCTL register */
371 	uint32_t sc_rctl;		/* prototype RCTL register */
372 	uint32_t sc_txcw;		/* prototype TXCW register */
373 	uint32_t sc_tipg;		/* prototype TIPG register */
374 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
375 	uint32_t sc_pba;		/* prototype PBA register */
376 
377 	int sc_tbi_linkup;		/* TBI link status */
378 	int sc_tbi_anstate;		/* autonegotiation state */
379 
380 	int sc_mchash_type;		/* multicast filter offset */
381 
382 #if NRND > 0
383 	rndsource_element_t rnd_source;	/* random source */
384 #endif
385 	int sc_ich8_flash_base;
386 	int sc_ich8_flash_bank_size;
387 };
388 
389 #define	WM_RXCHAIN_RESET(sc)						\
390 do {									\
391 	(sc)->sc_rxtailp = &(sc)->sc_rxhead;				\
392 	*(sc)->sc_rxtailp = NULL;					\
393 	(sc)->sc_rxlen = 0;						\
394 } while (/*CONSTCOND*/0)
395 
396 #define	WM_RXCHAIN_LINK(sc, m)						\
397 do {									\
398 	*(sc)->sc_rxtailp = (sc)->sc_rxtail = (m);			\
399 	(sc)->sc_rxtailp = &(m)->m_next;				\
400 } while (/*CONSTCOND*/0)
401 
402 /* sc_flags */
403 #define	WM_F_HAS_MII		0x0001	/* has MII */
404 #define	WM_F_EEPROM_HANDSHAKE	0x0002	/* requires EEPROM handshake */
405 #define	WM_F_EEPROM_SEMAPHORE	0x0004	/* EEPROM with semaphore */
406 #define	WM_F_EEPROM_EERDEEWR	0x0008	/* EEPROM access via EERD/EEWR */
407 #define	WM_F_EEPROM_SPI		0x0010	/* EEPROM is SPI */
408 #define	WM_F_EEPROM_FLASH	0x0020	/* EEPROM is FLASH */
409 #define	WM_F_EEPROM_INVALID	0x0040	/* EEPROM not present (bad checksum) */
410 #define	WM_F_IOH_VALID		0x0080	/* I/O handle is valid */
411 #define	WM_F_BUS64		0x0100	/* bus is 64-bit */
412 #define	WM_F_PCIX		0x0200	/* bus is PCI-X */
413 #define	WM_F_CSA		0x0400	/* bus is CSA */
414 #define	WM_F_PCIE		0x0800	/* bus is PCI-Express */
415 #define WM_F_SWFW_SYNC		0x1000  /* Software-Firmware synchronisation */
416 #define WM_F_SWFWHW_SYNC	0x2000  /* Software-Firmware synchronisation */
417 
418 #ifdef WM_EVENT_COUNTERS
419 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
420 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
421 #else
422 #define	WM_EVCNT_INCR(ev)	/* nothing */
423 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
424 #endif
425 
426 #define	CSR_READ(sc, reg)						\
427 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
428 #define	CSR_WRITE(sc, reg, val)						\
429 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
430 #define	CSR_WRITE_FLUSH(sc)						\
431 	(void) CSR_READ((sc), WMREG_STATUS)
432 
433 #define ICH8_FLASH_READ32(sc, reg) \
434 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
435 #define ICH8_FLASH_WRITE32(sc, reg, data) \
436 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
437 
438 #define ICH8_FLASH_READ16(sc, reg) \
439 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
440 #define ICH8_FLASH_WRITE16(sc, reg, data) \
441 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
442 
443 #define	WM_CDTXADDR(sc, x)	((sc)->sc_cddma + WM_CDTXOFF((x)))
444 #define	WM_CDRXADDR(sc, x)	((sc)->sc_cddma + WM_CDRXOFF((x)))
445 
446 #define	WM_CDTXADDR_LO(sc, x)	(WM_CDTXADDR((sc), (x)) & 0xffffffffU)
447 #define	WM_CDTXADDR_HI(sc, x)						\
448 	(sizeof(bus_addr_t) == 8 ?					\
449 	 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
450 
451 #define	WM_CDRXADDR_LO(sc, x)	(WM_CDRXADDR((sc), (x)) & 0xffffffffU)
452 #define	WM_CDRXADDR_HI(sc, x)						\
453 	(sizeof(bus_addr_t) == 8 ?					\
454 	 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
455 
456 #define	WM_CDTXSYNC(sc, x, n, ops)					\
457 do {									\
458 	int __x, __n;							\
459 									\
460 	__x = (x);							\
461 	__n = (n);							\
462 									\
463 	/* If it will wrap around, sync to the end of the ring. */	\
464 	if ((__x + __n) > WM_NTXDESC(sc)) {				\
465 		bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,	\
466 		    WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) *		\
467 		    (WM_NTXDESC(sc) - __x), (ops));			\
468 		__n -= (WM_NTXDESC(sc) - __x);				\
469 		__x = 0;						\
470 	}								\
471 									\
472 	/* Now sync whatever is left. */				\
473 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
474 	    WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops));	\
475 } while (/*CONSTCOND*/0)
476 
477 #define	WM_CDRXSYNC(sc, x, ops)						\
478 do {									\
479 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
480 	   WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops));		\
481 } while (/*CONSTCOND*/0)
482 
483 #define	WM_INIT_RXDESC(sc, x)						\
484 do {									\
485 	struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)];		\
486 	wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)];		\
487 	struct mbuf *__m = __rxs->rxs_mbuf;				\
488 									\
489 	/*								\
490 	 * Note: We scoot the packet forward 2 bytes in the buffer	\
491 	 * so that the payload after the Ethernet header is aligned	\
492 	 * to a 4-byte boundary.					\
493 	 *								\
494 	 * XXX BRAINDAMAGE ALERT!					\
495 	 * The stupid chip uses the same size for every buffer, which	\
496 	 * is set in the Receive Control register.  We are using the 2K	\
497 	 * size option, but what we REALLY want is (2K - 2)!  For this	\
498 	 * reason, we can't "scoot" packets longer than the standard	\
499 	 * Ethernet MTU.  On strict-alignment platforms, if the total	\
500 	 * size exceeds (2K - 2) we set align_tweak to 0 and let	\
501 	 * the upper layer copy the headers.				\
502 	 */								\
503 	__m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak;	\
504 									\
505 	wm_set_dma_addr(&__rxd->wrx_addr,				\
506 	    __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
507 	__rxd->wrx_len = 0;						\
508 	__rxd->wrx_cksum = 0;						\
509 	__rxd->wrx_status = 0;						\
510 	__rxd->wrx_errors = 0;						\
511 	__rxd->wrx_special = 0;						\
512 	WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
513 									\
514 	CSR_WRITE((sc), (sc)->sc_rdt_reg, (x));				\
515 } while (/*CONSTCOND*/0)
516 
517 static void	wm_start(struct ifnet *);
518 static void	wm_watchdog(struct ifnet *);
519 static int	wm_ioctl(struct ifnet *, u_long, void *);
520 static int	wm_init(struct ifnet *);
521 static void	wm_stop(struct ifnet *, int);
522 
523 static void	wm_reset(struct wm_softc *);
524 static void	wm_rxdrain(struct wm_softc *);
525 static int	wm_add_rxbuf(struct wm_softc *, int);
526 static int	wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
527 static int	wm_read_eeprom_eerd(struct wm_softc *, int, int, u_int16_t *);
528 static int	wm_validate_eeprom_checksum(struct wm_softc *);
529 static void	wm_tick(void *);
530 
531 static void	wm_set_filter(struct wm_softc *);
532 
533 static int	wm_intr(void *);
534 static void	wm_txintr(struct wm_softc *);
535 static void	wm_rxintr(struct wm_softc *);
536 static void	wm_linkintr(struct wm_softc *, uint32_t);
537 
538 static void	wm_tbi_mediainit(struct wm_softc *);
539 static int	wm_tbi_mediachange(struct ifnet *);
540 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
541 
542 static void	wm_tbi_set_linkled(struct wm_softc *);
543 static void	wm_tbi_check_link(struct wm_softc *);
544 
545 static void	wm_gmii_reset(struct wm_softc *);
546 
547 static int	wm_gmii_i82543_readreg(device_t, int, int);
548 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
549 
550 static int	wm_gmii_i82544_readreg(device_t, int, int);
551 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
552 
553 static int	wm_gmii_i80003_readreg(device_t, int, int);
554 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
555 
556 static void	wm_gmii_statchg(device_t);
557 
558 static void	wm_gmii_mediainit(struct wm_softc *);
559 static int	wm_gmii_mediachange(struct ifnet *);
560 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
561 
562 static int	wm_kmrn_i80003_readreg(struct wm_softc *, int);
563 static void	wm_kmrn_i80003_writereg(struct wm_softc *, int, int);
564 
565 static int	wm_match(device_t, cfdata_t, void *);
566 static void	wm_attach(device_t, device_t, void *);
567 static int	wm_is_onboard_nvm_eeprom(struct wm_softc *);
568 static void	wm_get_auto_rd_done(struct wm_softc *);
569 static int	wm_get_swsm_semaphore(struct wm_softc *);
570 static void	wm_put_swsm_semaphore(struct wm_softc *);
571 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
572 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
573 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
574 static int	wm_get_swfwhw_semaphore(struct wm_softc *);
575 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
576 
577 static int	wm_read_eeprom_ich8(struct wm_softc *, int, int, uint16_t *);
578 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
579 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
580 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t,
581 		     uint32_t, uint16_t *);
582 static int32_t	wm_read_ich8_word(struct wm_softc *sc, uint32_t, uint16_t *);
583 
584 CFATTACH_DECL_NEW(wm, sizeof(struct wm_softc),
585     wm_match, wm_attach, NULL, NULL);
586 
587 static void	wm_82547_txfifo_stall(void *);
588 
589 /*
590  * Devices supported by this driver.
591  */
592 static const struct wm_product {
593 	pci_vendor_id_t		wmp_vendor;
594 	pci_product_id_t	wmp_product;
595 	const char		*wmp_name;
596 	wm_chip_type		wmp_type;
597 	int			wmp_flags;
598 #define	WMP_F_1000X		0x01
599 #define	WMP_F_1000T		0x02
600 } wm_products[] = {
601 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
602 	  "Intel i82542 1000BASE-X Ethernet",
603 	  WM_T_82542_2_1,	WMP_F_1000X },
604 
605 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
606 	  "Intel i82543GC 1000BASE-X Ethernet",
607 	  WM_T_82543,		WMP_F_1000X },
608 
609 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
610 	  "Intel i82543GC 1000BASE-T Ethernet",
611 	  WM_T_82543,		WMP_F_1000T },
612 
613 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
614 	  "Intel i82544EI 1000BASE-T Ethernet",
615 	  WM_T_82544,		WMP_F_1000T },
616 
617 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
618 	  "Intel i82544EI 1000BASE-X Ethernet",
619 	  WM_T_82544,		WMP_F_1000X },
620 
621 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
622 	  "Intel i82544GC 1000BASE-T Ethernet",
623 	  WM_T_82544,		WMP_F_1000T },
624 
625 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
626 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
627 	  WM_T_82544,		WMP_F_1000T },
628 
629 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
630 	  "Intel i82540EM 1000BASE-T Ethernet",
631 	  WM_T_82540,		WMP_F_1000T },
632 
633 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
634 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
635 	  WM_T_82540,		WMP_F_1000T },
636 
637 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
638 	  "Intel i82540EP 1000BASE-T Ethernet",
639 	  WM_T_82540,		WMP_F_1000T },
640 
641 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
642 	  "Intel i82540EP 1000BASE-T Ethernet",
643 	  WM_T_82540,		WMP_F_1000T },
644 
645 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
646 	  "Intel i82540EP 1000BASE-T Ethernet",
647 	  WM_T_82540,		WMP_F_1000T },
648 
649 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
650 	  "Intel i82545EM 1000BASE-T Ethernet",
651 	  WM_T_82545,		WMP_F_1000T },
652 
653 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
654 	  "Intel i82545GM 1000BASE-T Ethernet",
655 	  WM_T_82545_3,		WMP_F_1000T },
656 
657 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
658 	  "Intel i82545GM 1000BASE-X Ethernet",
659 	  WM_T_82545_3,		WMP_F_1000X },
660 #if 0
661 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
662 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
663 	  WM_T_82545_3,		WMP_F_SERDES },
664 #endif
665 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
666 	  "Intel i82546EB 1000BASE-T Ethernet",
667 	  WM_T_82546,		WMP_F_1000T },
668 
669 	{ PCI_VENDOR_INTEL,     PCI_PRODUCT_INTEL_82546EB_QUAD,
670 	  "Intel i82546EB 1000BASE-T Ethernet",
671 	  WM_T_82546,		WMP_F_1000T },
672 
673 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
674 	  "Intel i82545EM 1000BASE-X Ethernet",
675 	  WM_T_82545,		WMP_F_1000X },
676 
677 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
678 	  "Intel i82546EB 1000BASE-X Ethernet",
679 	  WM_T_82546,		WMP_F_1000X },
680 
681 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
682 	  "Intel i82546GB 1000BASE-T Ethernet",
683 	  WM_T_82546_3,		WMP_F_1000T },
684 
685 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
686 	  "Intel i82546GB 1000BASE-X Ethernet",
687 	  WM_T_82546_3,		WMP_F_1000X },
688 #if 0
689 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
690 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
691 	  WM_T_82546_3,		WMP_F_SERDES },
692 #endif
693 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
694 	  "i82546GB quad-port Gigabit Ethernet",
695 	  WM_T_82546_3,		WMP_F_1000T },
696 
697 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
698 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
699 	  WM_T_82546_3,		WMP_F_1000T },
700 
701 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
702 	  "Intel PRO/1000MT (82546GB)",
703 	  WM_T_82546_3,		WMP_F_1000T },
704 
705 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
706 	  "Intel i82541EI 1000BASE-T Ethernet",
707 	  WM_T_82541,		WMP_F_1000T },
708 
709 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
710 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
711 	  WM_T_82541,		WMP_F_1000T },
712 
713 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
714 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
715 	  WM_T_82541,		WMP_F_1000T },
716 
717 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
718 	  "Intel i82541ER 1000BASE-T Ethernet",
719 	  WM_T_82541_2,		WMP_F_1000T },
720 
721 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
722 	  "Intel i82541GI 1000BASE-T Ethernet",
723 	  WM_T_82541_2,		WMP_F_1000T },
724 
725 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
726 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
727 	  WM_T_82541_2,		WMP_F_1000T },
728 
729 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
730 	  "Intel i82541PI 1000BASE-T Ethernet",
731 	  WM_T_82541_2,		WMP_F_1000T },
732 
733 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
734 	  "Intel i82547EI 1000BASE-T Ethernet",
735 	  WM_T_82547,		WMP_F_1000T },
736 
737 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
738 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
739 	  WM_T_82547,		WMP_F_1000T },
740 
741 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
742 	  "Intel i82547GI 1000BASE-T Ethernet",
743 	  WM_T_82547_2,		WMP_F_1000T },
744 
745 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
746 	  "Intel PRO/1000 PT (82571EB)",
747 	  WM_T_82571,		WMP_F_1000T },
748 
749 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
750 	  "Intel PRO/1000 PF (82571EB)",
751 	  WM_T_82571,		WMP_F_1000X },
752 #if 0
753 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
754 	  "Intel PRO/1000 PB (82571EB)",
755 	  WM_T_82571,		WMP_F_SERDES },
756 #endif
757 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
758 	  "Intel PRO/1000 QT (82571EB)",
759 	  WM_T_82571,		WMP_F_1000T },
760 
761 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
762 	  "Intel i82572EI 1000baseT Ethernet",
763 	  WM_T_82572,		WMP_F_1000T },
764 
765 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
766 	  "Intel� PRO/1000 PT Quad Port Server Adapter",
767 	  WM_T_82571,		WMP_F_1000T, },
768 
769 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
770 	  "Intel i82572EI 1000baseX Ethernet",
771 	  WM_T_82572,		WMP_F_1000X },
772 #if 0
773 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
774 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
775 	  WM_T_82572,		WMP_F_SERDES },
776 #endif
777 
778 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
779 	  "Intel i82572EI 1000baseT Ethernet",
780 	  WM_T_82572,		WMP_F_1000T },
781 
782 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
783 	  "Intel i82573E",
784 	  WM_T_82573,		WMP_F_1000T },
785 
786 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
787 	  "Intel i82573E IAMT",
788 	  WM_T_82573,		WMP_F_1000T },
789 
790 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
791 	  "Intel i82573L Gigabit Ethernet",
792 	  WM_T_82573,		WMP_F_1000T },
793 
794 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
795 	  "i80003 dual 1000baseT Ethernet",
796 	  WM_T_80003,		WMP_F_1000T },
797 
798 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
799 	  "i80003 dual 1000baseX Ethernet",
800 	  WM_T_80003,		WMP_F_1000T },
801 #if 0
802 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
803 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
804 	  WM_T_80003,		WMP_F_SERDES },
805 #endif
806 
807 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
808 	  "Intel i80003 1000baseT Ethernet",
809 	  WM_T_80003,		WMP_F_1000T },
810 #if 0
811 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
812 	  "Intel i80003 Gigabit Ethernet (SERDES)",
813 	  WM_T_80003,		WMP_F_SERDES },
814 #endif
815 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
816 	  "Intel i82801H (M_AMT) LAN Controller",
817 	  WM_T_ICH8,		WMP_F_1000T },
818 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
819 	  "Intel i82801H (AMT) LAN Controller",
820 	  WM_T_ICH8,		WMP_F_1000T },
821 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
822 	  "Intel i82801H LAN Controller",
823 	  WM_T_ICH8,		WMP_F_1000T },
824 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
825 	  "Intel i82801H (IFE) LAN Controller",
826 	  WM_T_ICH8,		WMP_F_1000T },
827 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
828 	  "Intel i82801H (M) LAN Controller",
829 	  WM_T_ICH8,		WMP_F_1000T },
830 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
831 	  "Intel i82801H IFE (GT) LAN Controller",
832 	  WM_T_ICH8,		WMP_F_1000T },
833 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
834 	  "Intel i82801H IFE (G) LAN Controller",
835 	  WM_T_ICH8,		WMP_F_1000T },
836 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
837 	  "82801I (AMT) LAN Controller",
838 	  WM_T_ICH9,		WMP_F_1000T },
839 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
840 	  "82801I LAN Controller",
841 	  WM_T_ICH9,		WMP_F_1000T },
842 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
843 	  "82801I (G) LAN Controller",
844 	  WM_T_ICH9,		WMP_F_1000T },
845 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
846 	  "82801I (GT) LAN Controller",
847 	  WM_T_ICH9,		WMP_F_1000T },
848 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
849 	  "82801I (C) LAN Controller",
850 	  WM_T_ICH9,		WMP_F_1000T },
851 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
852 	  "82801I mobile LAN Controller",
853 	  WM_T_ICH9,		WMP_F_1000T },
854 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IGP_M_V,
855 	  "82801I mobile (V) LAN Controller",
856 	  WM_T_ICH9,		WMP_F_1000T },
857 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
858 	  "82801I mobile (AMT) LAN Controller",
859 	  WM_T_ICH9,		WMP_F_1000T },
860 	{ 0,			0,
861 	  NULL,
862 	  0,			0 },
863 };
864 
865 #ifdef WM_EVENT_COUNTERS
866 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
867 #endif /* WM_EVENT_COUNTERS */
868 
869 #if 0 /* Not currently used */
870 static inline uint32_t
871 wm_io_read(struct wm_softc *sc, int reg)
872 {
873 
874 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
875 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
876 }
877 #endif
878 
879 static inline void
880 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
881 {
882 
883 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
884 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
885 }
886 
887 static inline void
888 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
889 {
890 	wa->wa_low = htole32(v & 0xffffffffU);
891 	if (sizeof(bus_addr_t) == 8)
892 		wa->wa_high = htole32((uint64_t) v >> 32);
893 	else
894 		wa->wa_high = 0;
895 }
896 
897 static const struct wm_product *
898 wm_lookup(const struct pci_attach_args *pa)
899 {
900 	const struct wm_product *wmp;
901 
902 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
903 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
904 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
905 			return (wmp);
906 	}
907 	return (NULL);
908 }
909 
910 static int
911 wm_match(device_t parent, cfdata_t cf, void *aux)
912 {
913 	struct pci_attach_args *pa = aux;
914 
915 	if (wm_lookup(pa) != NULL)
916 		return (1);
917 
918 	return (0);
919 }
920 
921 static void
922 wm_attach(device_t parent, device_t self, void *aux)
923 {
924 	struct wm_softc *sc = device_private(self);
925 	struct pci_attach_args *pa = aux;
926 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
927 	pci_chipset_tag_t pc = pa->pa_pc;
928 	pci_intr_handle_t ih;
929 	size_t cdata_size;
930 	const char *intrstr = NULL;
931 	const char *eetype, *xname;
932 	bus_space_tag_t memt;
933 	bus_space_handle_t memh;
934 	bus_dma_segment_t seg;
935 	int memh_valid;
936 	int i, rseg, error;
937 	const struct wm_product *wmp;
938 	prop_data_t ea;
939 	prop_number_t pn;
940 	uint8_t enaddr[ETHER_ADDR_LEN];
941 	uint16_t myea[ETHER_ADDR_LEN / 2], cfg1, cfg2, swdpin;
942 	pcireg_t preg, memtype;
943 	uint32_t reg;
944 
945 	sc->sc_dev = self;
946 	callout_init(&sc->sc_tick_ch, 0);
947 
948 	wmp = wm_lookup(pa);
949 	if (wmp == NULL) {
950 		printf("\n");
951 		panic("wm_attach: impossible");
952 	}
953 
954 	sc->sc_pc = pa->pa_pc;
955 	sc->sc_pcitag = pa->pa_tag;
956 
957 	if (pci_dma64_available(pa))
958 		sc->sc_dmat = pa->pa_dmat64;
959 	else
960 		sc->sc_dmat = pa->pa_dmat;
961 
962 	preg = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
963 	aprint_naive(": Ethernet controller\n");
964 	aprint_normal(": %s, rev. %d\n", wmp->wmp_name, preg);
965 
966 	sc->sc_type = wmp->wmp_type;
967 	if (sc->sc_type < WM_T_82543) {
968 		if (preg < 2) {
969 			aprint_error_dev(sc->sc_dev,
970 			    "i82542 must be at least rev. 2\n");
971 			return;
972 		}
973 		if (preg < 3)
974 			sc->sc_type = WM_T_82542_2_0;
975 	}
976 
977 	/*
978 	 * Map the device.  All devices support memory-mapped acccess,
979 	 * and it is really required for normal operation.
980 	 */
981 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
982 	switch (memtype) {
983 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
984 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
985 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
986 		    memtype, 0, &memt, &memh, NULL, NULL) == 0);
987 		break;
988 	default:
989 		memh_valid = 0;
990 	}
991 
992 	if (memh_valid) {
993 		sc->sc_st = memt;
994 		sc->sc_sh = memh;
995 	} else {
996 		aprint_error_dev(sc->sc_dev,
997 		    "unable to map device registers\n");
998 		return;
999 	}
1000 
1001 	/*
1002 	 * In addition, i82544 and later support I/O mapped indirect
1003 	 * register access.  It is not desirable (nor supported in
1004 	 * this driver) to use it for normal operation, though it is
1005 	 * required to work around bugs in some chip versions.
1006 	 */
1007 	if (sc->sc_type >= WM_T_82544) {
1008 		/* First we have to find the I/O BAR. */
1009 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1010 			if (pci_mapreg_type(pa->pa_pc, pa->pa_tag, i) ==
1011 			    PCI_MAPREG_TYPE_IO)
1012 				break;
1013 		}
1014 		if (i == PCI_MAPREG_END)
1015 			aprint_error_dev(sc->sc_dev,
1016 			    "WARNING: unable to find I/O BAR\n");
1017 		else {
1018 			/*
1019 			 * The i8254x doesn't apparently respond when the
1020 			 * I/O BAR is 0, which looks somewhat like it's not
1021 			 * been configured.
1022 			 */
1023 			preg = pci_conf_read(pc, pa->pa_tag, i);
1024 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1025 				aprint_error_dev(sc->sc_dev,
1026 				    "WARNING: I/O BAR at zero.\n");
1027 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1028 					0, &sc->sc_iot, &sc->sc_ioh,
1029 					NULL, NULL) == 0) {
1030 				sc->sc_flags |= WM_F_IOH_VALID;
1031 			} else {
1032 				aprint_error_dev(sc->sc_dev,
1033 				    "WARNING: unable to map I/O space\n");
1034 			}
1035 		}
1036 
1037 	}
1038 
1039 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
1040 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1041 	preg |= PCI_COMMAND_MASTER_ENABLE;
1042 	if (sc->sc_type < WM_T_82542_2_1)
1043 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1044 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1045 
1046 	/* power up chip */
1047 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1048 	    NULL)) && error != EOPNOTSUPP) {
1049 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1050 		return;
1051 	}
1052 
1053 	/*
1054 	 * Map and establish our interrupt.
1055 	 */
1056 	if (pci_intr_map(pa, &ih)) {
1057 		aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
1058 		return;
1059 	}
1060 	intrstr = pci_intr_string(pc, ih);
1061 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
1062 	if (sc->sc_ih == NULL) {
1063 		aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
1064 		if (intrstr != NULL)
1065 			aprint_normal(" at %s", intrstr);
1066 		aprint_normal("\n");
1067 		return;
1068 	}
1069 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
1070 
1071 	/*
1072 	 * Determine a few things about the bus we're connected to.
1073 	 */
1074 	if (sc->sc_type < WM_T_82543) {
1075 		/* We don't really know the bus characteristics here. */
1076 		sc->sc_bus_speed = 33;
1077 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1078 		/*
1079 		 * CSA (Communication Streaming Architecture) is about as fast
1080 		 * a 32-bit 66MHz PCI Bus.
1081 		 */
1082 		sc->sc_flags |= WM_F_CSA;
1083 		sc->sc_bus_speed = 66;
1084 		aprint_verbose_dev(sc->sc_dev,
1085 		    "Communication Streaming Architecture\n");
1086 		if (sc->sc_type == WM_T_82547) {
1087 			callout_init(&sc->sc_txfifo_ch, 0);
1088 			callout_setfunc(&sc->sc_txfifo_ch,
1089 					wm_82547_txfifo_stall, sc);
1090 			aprint_verbose_dev(sc->sc_dev,
1091 			    "using 82547 Tx FIFO stall work-around\n");
1092 		}
1093 	} else if (sc->sc_type >= WM_T_82571) {
1094 		sc->sc_flags |= WM_F_PCIE;
1095 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9))
1096 			sc->sc_flags |= WM_F_EEPROM_SEMAPHORE;
1097 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1098 	} else {
1099 		reg = CSR_READ(sc, WMREG_STATUS);
1100 		if (reg & STATUS_BUS64)
1101 			sc->sc_flags |= WM_F_BUS64;
1102 		if (sc->sc_type >= WM_T_82544 &&
1103 		    (reg & STATUS_PCIX_MODE) != 0) {
1104 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1105 
1106 			sc->sc_flags |= WM_F_PCIX;
1107 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1108 					       PCI_CAP_PCIX,
1109 					       &sc->sc_pcix_offset, NULL) == 0)
1110 				aprint_error_dev(sc->sc_dev,
1111 				    "unable to find PCIX capability\n");
1112 			else if (sc->sc_type != WM_T_82545_3 &&
1113 				 sc->sc_type != WM_T_82546_3) {
1114 				/*
1115 				 * Work around a problem caused by the BIOS
1116 				 * setting the max memory read byte count
1117 				 * incorrectly.
1118 				 */
1119 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1120 				    sc->sc_pcix_offset + PCI_PCIX_CMD);
1121 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1122 				    sc->sc_pcix_offset + PCI_PCIX_STATUS);
1123 
1124 				bytecnt =
1125 				    (pcix_cmd & PCI_PCIX_CMD_BYTECNT_MASK) >>
1126 				    PCI_PCIX_CMD_BYTECNT_SHIFT;
1127 				maxb =
1128 				    (pcix_sts & PCI_PCIX_STATUS_MAXB_MASK) >>
1129 				    PCI_PCIX_STATUS_MAXB_SHIFT;
1130 				if (bytecnt > maxb) {
1131 					aprint_verbose_dev(sc->sc_dev,
1132 					    "resetting PCI-X MMRBC: %d -> %d\n",
1133 					    512 << bytecnt, 512 << maxb);
1134 					pcix_cmd = (pcix_cmd &
1135 					    ~PCI_PCIX_CMD_BYTECNT_MASK) |
1136 					   (maxb << PCI_PCIX_CMD_BYTECNT_SHIFT);
1137 					pci_conf_write(pa->pa_pc, pa->pa_tag,
1138 					    sc->sc_pcix_offset + PCI_PCIX_CMD,
1139 					    pcix_cmd);
1140 				}
1141 			}
1142 		}
1143 		/*
1144 		 * The quad port adapter is special; it has a PCIX-PCIX
1145 		 * bridge on the board, and can run the secondary bus at
1146 		 * a higher speed.
1147 		 */
1148 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1149 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1150 								      : 66;
1151 		} else if (sc->sc_flags & WM_F_PCIX) {
1152 			switch (reg & STATUS_PCIXSPD_MASK) {
1153 			case STATUS_PCIXSPD_50_66:
1154 				sc->sc_bus_speed = 66;
1155 				break;
1156 			case STATUS_PCIXSPD_66_100:
1157 				sc->sc_bus_speed = 100;
1158 				break;
1159 			case STATUS_PCIXSPD_100_133:
1160 				sc->sc_bus_speed = 133;
1161 				break;
1162 			default:
1163 				aprint_error_dev(sc->sc_dev,
1164 				    "unknown PCIXSPD %d; assuming 66MHz\n",
1165 				    reg & STATUS_PCIXSPD_MASK);
1166 				sc->sc_bus_speed = 66;
1167 			}
1168 		} else
1169 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1170 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1171 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1172 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1173 	}
1174 
1175 	/*
1176 	 * Allocate the control data structures, and create and load the
1177 	 * DMA map for it.
1178 	 *
1179 	 * NOTE: All Tx descriptors must be in the same 4G segment of
1180 	 * memory.  So must Rx descriptors.  We simplify by allocating
1181 	 * both sets within the same 4G segment.
1182 	 */
1183 	WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
1184 	    WM_NTXDESC_82542 : WM_NTXDESC_82544;
1185 	cdata_size = sc->sc_type < WM_T_82544 ?
1186 	    sizeof(struct wm_control_data_82542) :
1187 	    sizeof(struct wm_control_data_82544);
1188 	if ((error = bus_dmamem_alloc(sc->sc_dmat, cdata_size, PAGE_SIZE,
1189 				      (bus_size_t) 0x100000000ULL,
1190 				      &seg, 1, &rseg, 0)) != 0) {
1191 		aprint_error_dev(sc->sc_dev,
1192 		    "unable to allocate control data, error = %d\n",
1193 		    error);
1194 		goto fail_0;
1195 	}
1196 
1197 	if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, cdata_size,
1198 				    (void **)&sc->sc_control_data,
1199 				    BUS_DMA_COHERENT)) != 0) {
1200 		aprint_error_dev(sc->sc_dev,
1201 		    "unable to map control data, error = %d\n", error);
1202 		goto fail_1;
1203 	}
1204 
1205 	if ((error = bus_dmamap_create(sc->sc_dmat, cdata_size, 1, cdata_size,
1206 				       0, 0, &sc->sc_cddmamap)) != 0) {
1207 		aprint_error_dev(sc->sc_dev,
1208 		    "unable to create control data DMA map, error = %d\n",
1209 		    error);
1210 		goto fail_2;
1211 	}
1212 
1213 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
1214 				     sc->sc_control_data, cdata_size, NULL,
1215 				     0)) != 0) {
1216 		aprint_error_dev(sc->sc_dev,
1217 		    "unable to load control data DMA map, error = %d\n",
1218 		    error);
1219 		goto fail_3;
1220 	}
1221 
1222 
1223 	/*
1224 	 * Create the transmit buffer DMA maps.
1225 	 */
1226 	WM_TXQUEUELEN(sc) =
1227 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
1228 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
1229 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1230 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
1231 					       WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
1232 					  &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1233 			aprint_error_dev(sc->sc_dev,
1234 			    "unable to create Tx DMA map %d, error = %d\n",
1235 			    i, error);
1236 			goto fail_4;
1237 		}
1238 	}
1239 
1240 	/*
1241 	 * Create the receive buffer DMA maps.
1242 	 */
1243 	for (i = 0; i < WM_NRXDESC; i++) {
1244 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1245 					       MCLBYTES, 0, 0,
1246 					  &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1247 			aprint_error_dev(sc->sc_dev,
1248 			    "unable to create Rx DMA map %d error = %d\n",
1249 			    i, error);
1250 			goto fail_5;
1251 		}
1252 		sc->sc_rxsoft[i].rxs_mbuf = NULL;
1253 	}
1254 
1255 	/* clear interesting stat counters */
1256 	CSR_READ(sc, WMREG_COLC);
1257 	CSR_READ(sc, WMREG_RXERRC);
1258 
1259 	/*
1260 	 * Reset the chip to a known state.
1261 	 */
1262 	wm_reset(sc);
1263 
1264 	/*
1265 	 * Get some information about the EEPROM.
1266 	 */
1267 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)) {
1268 		uint32_t flash_size;
1269 		sc->sc_flags |= WM_F_SWFWHW_SYNC | WM_F_EEPROM_FLASH;
1270 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
1271 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
1272 		    &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) {
1273 			aprint_error_dev(sc->sc_dev,
1274 			    "can't map FLASH registers\n");
1275 			return;
1276 		}
1277 		flash_size = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
1278 		sc->sc_ich8_flash_base = (flash_size & ICH_GFPREG_BASE_MASK) *
1279 						ICH_FLASH_SECTOR_SIZE;
1280 		sc->sc_ich8_flash_bank_size =
1281 			((flash_size >> 16) & ICH_GFPREG_BASE_MASK) + 1;
1282 		sc->sc_ich8_flash_bank_size -=
1283 			(flash_size & ICH_GFPREG_BASE_MASK);
1284 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
1285 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
1286 	} else if (sc->sc_type == WM_T_80003)
1287 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR |  WM_F_SWFW_SYNC;
1288 	else if (sc->sc_type == WM_T_82573)
1289 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1290 	else if (sc->sc_type > WM_T_82544)
1291 		sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1292 
1293 	if (sc->sc_type <= WM_T_82544)
1294 		sc->sc_ee_addrbits = 6;
1295 	else if (sc->sc_type <= WM_T_82546_3) {
1296 		reg = CSR_READ(sc, WMREG_EECD);
1297 		if (reg & EECD_EE_SIZE)
1298 			sc->sc_ee_addrbits = 8;
1299 		else
1300 			sc->sc_ee_addrbits = 6;
1301 	} else if (sc->sc_type <= WM_T_82547_2) {
1302 		reg = CSR_READ(sc, WMREG_EECD);
1303 		if (reg & EECD_EE_TYPE) {
1304 			sc->sc_flags |= WM_F_EEPROM_SPI;
1305 			sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
1306 		} else
1307 			sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6;
1308 	} else if ((sc->sc_type == WM_T_82573) &&
1309 	    (wm_is_onboard_nvm_eeprom(sc) == 0)) {
1310 		sc->sc_flags |= WM_F_EEPROM_FLASH;
1311 	} else {
1312 		/* Assume everything else is SPI. */
1313 		reg = CSR_READ(sc, WMREG_EECD);
1314 		sc->sc_flags |= WM_F_EEPROM_SPI;
1315 		sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
1316 	}
1317 
1318 	/*
1319 	 * Defer printing the EEPROM type until after verifying the checksum
1320 	 * This allows the EEPROM type to be printed correctly in the case
1321 	 * that no EEPROM is attached.
1322 	 */
1323 
1324 
1325 	/*
1326 	 * Validate the EEPROM checksum. If the checksum fails, flag this for
1327 	 * later, so we can fail future reads from the EEPROM.
1328 	 */
1329 	if (wm_validate_eeprom_checksum(sc))
1330 		sc->sc_flags |= WM_F_EEPROM_INVALID;
1331 
1332 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
1333 		aprint_verbose_dev(sc->sc_dev, "No EEPROM\n");
1334 	else if (sc->sc_flags & WM_F_EEPROM_FLASH) {
1335 		aprint_verbose_dev(sc->sc_dev, "FLASH\n");
1336 	} else {
1337 		if (sc->sc_flags & WM_F_EEPROM_SPI)
1338 			eetype = "SPI";
1339 		else
1340 			eetype = "MicroWire";
1341 		aprint_verbose_dev(sc->sc_dev,
1342 		    "%u word (%d address bits) %s EEPROM\n",
1343 		    1U << sc->sc_ee_addrbits,
1344 		    sc->sc_ee_addrbits, eetype);
1345 	}
1346 
1347 	/*
1348 	 * Read the Ethernet address from the EEPROM, if not first found
1349 	 * in device properties.
1350 	 */
1351 	ea = prop_dictionary_get(device_properties(sc->sc_dev), "mac-addr");
1352 	if (ea != NULL) {
1353 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
1354 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
1355 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
1356 	} else {
1357 		if (wm_read_eeprom(sc, EEPROM_OFF_MACADDR,
1358 		    sizeof(myea) / sizeof(myea[0]), myea)) {
1359 			aprint_error_dev(sc->sc_dev,
1360 			    "unable to read Ethernet address\n");
1361 			return;
1362 		}
1363 		enaddr[0] = myea[0] & 0xff;
1364 		enaddr[1] = myea[0] >> 8;
1365 		enaddr[2] = myea[1] & 0xff;
1366 		enaddr[3] = myea[1] >> 8;
1367 		enaddr[4] = myea[2] & 0xff;
1368 		enaddr[5] = myea[2] >> 8;
1369 	}
1370 
1371 	/*
1372 	 * Toggle the LSB of the MAC address on the second port
1373 	 * of the dual port controller.
1374 	 */
1375 	if (sc->sc_type == WM_T_82546 || sc->sc_type == WM_T_82546_3
1376 	    || sc->sc_type ==  WM_T_82571 || sc->sc_type == WM_T_80003) {
1377 		if ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1)
1378 			enaddr[5] ^= 1;
1379 	}
1380 
1381 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
1382 	    ether_sprintf(enaddr));
1383 
1384 	/*
1385 	 * Read the config info from the EEPROM, and set up various
1386 	 * bits in the control registers based on their contents.
1387 	 */
1388 	pn = prop_dictionary_get(device_properties(sc->sc_dev),
1389 				 "i82543-cfg1");
1390 	if (pn != NULL) {
1391 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1392 		cfg1 = (uint16_t) prop_number_integer_value(pn);
1393 	} else {
1394 		if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) {
1395 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
1396 			return;
1397 		}
1398 	}
1399 
1400 	pn = prop_dictionary_get(device_properties(sc->sc_dev),
1401 				 "i82543-cfg2");
1402 	if (pn != NULL) {
1403 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1404 		cfg2 = (uint16_t) prop_number_integer_value(pn);
1405 	} else {
1406 		if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) {
1407 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
1408 			return;
1409 		}
1410 	}
1411 
1412 	if (sc->sc_type >= WM_T_82544) {
1413 		pn = prop_dictionary_get(device_properties(sc->sc_dev),
1414 					 "i82543-swdpin");
1415 		if (pn != NULL) {
1416 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1417 			swdpin = (uint16_t) prop_number_integer_value(pn);
1418 		} else {
1419 			if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) {
1420 				aprint_error_dev(sc->sc_dev,
1421 				    "unable to read SWDPIN\n");
1422 				return;
1423 			}
1424 		}
1425 	}
1426 
1427 	if (cfg1 & EEPROM_CFG1_ILOS)
1428 		sc->sc_ctrl |= CTRL_ILOS;
1429 	if (sc->sc_type >= WM_T_82544) {
1430 		sc->sc_ctrl |=
1431 		    ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
1432 		    CTRL_SWDPIO_SHIFT;
1433 		sc->sc_ctrl |=
1434 		    ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
1435 		    CTRL_SWDPINS_SHIFT;
1436 	} else {
1437 		sc->sc_ctrl |=
1438 		    ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
1439 		    CTRL_SWDPIO_SHIFT;
1440 	}
1441 
1442 #if 0
1443 	if (sc->sc_type >= WM_T_82544) {
1444 		if (cfg1 & EEPROM_CFG1_IPS0)
1445 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
1446 		if (cfg1 & EEPROM_CFG1_IPS1)
1447 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
1448 		sc->sc_ctrl_ext |=
1449 		    ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
1450 		    CTRL_EXT_SWDPIO_SHIFT;
1451 		sc->sc_ctrl_ext |=
1452 		    ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
1453 		    CTRL_EXT_SWDPINS_SHIFT;
1454 	} else {
1455 		sc->sc_ctrl_ext |=
1456 		    ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
1457 		    CTRL_EXT_SWDPIO_SHIFT;
1458 	}
1459 #endif
1460 
1461 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
1462 #if 0
1463 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
1464 #endif
1465 
1466 	/*
1467 	 * Set up some register offsets that are different between
1468 	 * the i82542 and the i82543 and later chips.
1469 	 */
1470 	if (sc->sc_type < WM_T_82543) {
1471 		sc->sc_rdt_reg = WMREG_OLD_RDT0;
1472 		sc->sc_tdt_reg = WMREG_OLD_TDT;
1473 	} else {
1474 		sc->sc_rdt_reg = WMREG_RDT;
1475 		sc->sc_tdt_reg = WMREG_TDT;
1476 	}
1477 
1478 	/*
1479 	 * Determine if we're TBI or GMII mode, and initialize the
1480 	 * media structures accordingly.
1481 	 */
1482 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
1483 	    || sc->sc_type == WM_T_82573) {
1484 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
1485 		wm_gmii_mediainit(sc);
1486 	} else if (sc->sc_type < WM_T_82543 ||
1487 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
1488 		if (wmp->wmp_flags & WMP_F_1000T)
1489 			aprint_error_dev(sc->sc_dev,
1490 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
1491 		wm_tbi_mediainit(sc);
1492 	} else {
1493 		if (wmp->wmp_flags & WMP_F_1000X)
1494 			aprint_error_dev(sc->sc_dev,
1495 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
1496 		wm_gmii_mediainit(sc);
1497 	}
1498 
1499 	ifp = &sc->sc_ethercom.ec_if;
1500 	xname = device_xname(sc->sc_dev);
1501 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
1502 	ifp->if_softc = sc;
1503 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1504 	ifp->if_ioctl = wm_ioctl;
1505 	ifp->if_start = wm_start;
1506 	ifp->if_watchdog = wm_watchdog;
1507 	ifp->if_init = wm_init;
1508 	ifp->if_stop = wm_stop;
1509 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
1510 	IFQ_SET_READY(&ifp->if_snd);
1511 
1512 	if (sc->sc_type != WM_T_82573 && sc->sc_type != WM_T_ICH8)
1513 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1514 
1515 	/*
1516 	 * If we're a i82543 or greater, we can support VLANs.
1517 	 */
1518 	if (sc->sc_type >= WM_T_82543)
1519 		sc->sc_ethercom.ec_capabilities |=
1520 		    ETHERCAP_VLAN_MTU /* XXXJRT | ETHERCAP_VLAN_HWTAGGING */;
1521 
1522 	/*
1523 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
1524 	 * on i82543 and later.
1525 	 */
1526 	if (sc->sc_type >= WM_T_82543) {
1527 		ifp->if_capabilities |=
1528 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
1529 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
1530 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
1531 		    IFCAP_CSUM_TCPv6_Tx |
1532 		    IFCAP_CSUM_UDPv6_Tx;
1533 	}
1534 
1535 	/*
1536 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
1537 	 *
1538 	 *	82541GI (8086:1076) ... no
1539 	 *	82572EI (8086:10b9) ... yes
1540 	 */
1541 	if (sc->sc_type >= WM_T_82571) {
1542 		ifp->if_capabilities |=
1543 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
1544 	}
1545 
1546 	/*
1547 	 * If we're a i82544 or greater (except i82547), we can do
1548 	 * TCP segmentation offload.
1549 	 */
1550 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
1551 		ifp->if_capabilities |= IFCAP_TSOv4;
1552 	}
1553 
1554 	if (sc->sc_type >= WM_T_82571) {
1555 		ifp->if_capabilities |= IFCAP_TSOv6;
1556 	}
1557 
1558 	/*
1559 	 * Attach the interface.
1560 	 */
1561 	if_attach(ifp);
1562 	ether_ifattach(ifp, enaddr);
1563 #if NRND > 0
1564 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET, 0);
1565 #endif
1566 
1567 #ifdef WM_EVENT_COUNTERS
1568 	/* Attach event counters. */
1569 	evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
1570 	    NULL, xname, "txsstall");
1571 	evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
1572 	    NULL, xname, "txdstall");
1573 	evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
1574 	    NULL, xname, "txfifo_stall");
1575 	evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
1576 	    NULL, xname, "txdw");
1577 	evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
1578 	    NULL, xname, "txqe");
1579 	evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
1580 	    NULL, xname, "rxintr");
1581 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
1582 	    NULL, xname, "linkintr");
1583 
1584 	evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
1585 	    NULL, xname, "rxipsum");
1586 	evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
1587 	    NULL, xname, "rxtusum");
1588 	evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
1589 	    NULL, xname, "txipsum");
1590 	evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
1591 	    NULL, xname, "txtusum");
1592 	evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
1593 	    NULL, xname, "txtusum6");
1594 
1595 	evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
1596 	    NULL, xname, "txtso");
1597 	evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
1598 	    NULL, xname, "txtso6");
1599 	evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
1600 	    NULL, xname, "txtsopain");
1601 
1602 	for (i = 0; i < WM_NTXSEGS; i++) {
1603 		sprintf(wm_txseg_evcnt_names[i], "txseg%d", i);
1604 		evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
1605 		    NULL, xname, wm_txseg_evcnt_names[i]);
1606 	}
1607 
1608 	evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
1609 	    NULL, xname, "txdrop");
1610 
1611 	evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
1612 	    NULL, xname, "tu");
1613 
1614 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
1615 	    NULL, xname, "tx_xoff");
1616 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
1617 	    NULL, xname, "tx_xon");
1618 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
1619 	    NULL, xname, "rx_xoff");
1620 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
1621 	    NULL, xname, "rx_xon");
1622 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
1623 	    NULL, xname, "rx_macctl");
1624 #endif /* WM_EVENT_COUNTERS */
1625 
1626 	if (!pmf_device_register(self, NULL, NULL))
1627 		aprint_error_dev(self, "couldn't establish power handler\n");
1628 	else
1629 		pmf_class_network_register(self, ifp);
1630 
1631 	return;
1632 
1633 	/*
1634 	 * Free any resources we've allocated during the failed attach
1635 	 * attempt.  Do this in reverse order and fall through.
1636 	 */
1637  fail_5:
1638 	for (i = 0; i < WM_NRXDESC; i++) {
1639 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
1640 			bus_dmamap_destroy(sc->sc_dmat,
1641 			    sc->sc_rxsoft[i].rxs_dmamap);
1642 	}
1643  fail_4:
1644 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1645 		if (sc->sc_txsoft[i].txs_dmamap != NULL)
1646 			bus_dmamap_destroy(sc->sc_dmat,
1647 			    sc->sc_txsoft[i].txs_dmamap);
1648 	}
1649 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
1650  fail_3:
1651 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
1652  fail_2:
1653 	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
1654 	    cdata_size);
1655  fail_1:
1656 	bus_dmamem_free(sc->sc_dmat, &seg, rseg);
1657  fail_0:
1658 	return;
1659 }
1660 
1661 /*
1662  * wm_tx_offload:
1663  *
1664  *	Set up TCP/IP checksumming parameters for the
1665  *	specified packet.
1666  */
1667 static int
1668 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
1669     uint8_t *fieldsp)
1670 {
1671 	struct mbuf *m0 = txs->txs_mbuf;
1672 	struct livengood_tcpip_ctxdesc *t;
1673 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
1674 	uint32_t ipcse;
1675 	struct ether_header *eh;
1676 	int offset, iphl;
1677 	uint8_t fields;
1678 
1679 	/*
1680 	 * XXX It would be nice if the mbuf pkthdr had offset
1681 	 * fields for the protocol headers.
1682 	 */
1683 
1684 	eh = mtod(m0, struct ether_header *);
1685 	switch (htons(eh->ether_type)) {
1686 	case ETHERTYPE_IP:
1687 	case ETHERTYPE_IPV6:
1688 		offset = ETHER_HDR_LEN;
1689 		break;
1690 
1691 	case ETHERTYPE_VLAN:
1692 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1693 		break;
1694 
1695 	default:
1696 		/*
1697 		 * Don't support this protocol or encapsulation.
1698 		 */
1699 		*fieldsp = 0;
1700 		*cmdp = 0;
1701 		return (0);
1702 	}
1703 
1704 	if ((m0->m_pkthdr.csum_flags &
1705 	    (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
1706 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
1707 	} else {
1708 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
1709 	}
1710 	ipcse = offset + iphl - 1;
1711 
1712 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
1713 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
1714 	seg = 0;
1715 	fields = 0;
1716 
1717 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
1718 		int hlen = offset + iphl;
1719 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
1720 
1721 		if (__predict_false(m0->m_len <
1722 				    (hlen + sizeof(struct tcphdr)))) {
1723 			/*
1724 			 * TCP/IP headers are not in the first mbuf; we need
1725 			 * to do this the slow and painful way.  Let's just
1726 			 * hope this doesn't happen very often.
1727 			 */
1728 			struct tcphdr th;
1729 
1730 			WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
1731 
1732 			m_copydata(m0, hlen, sizeof(th), &th);
1733 			if (v4) {
1734 				struct ip ip;
1735 
1736 				m_copydata(m0, offset, sizeof(ip), &ip);
1737 				ip.ip_len = 0;
1738 				m_copyback(m0,
1739 				    offset + offsetof(struct ip, ip_len),
1740 				    sizeof(ip.ip_len), &ip.ip_len);
1741 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
1742 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
1743 			} else {
1744 				struct ip6_hdr ip6;
1745 
1746 				m_copydata(m0, offset, sizeof(ip6), &ip6);
1747 				ip6.ip6_plen = 0;
1748 				m_copyback(m0,
1749 				    offset + offsetof(struct ip6_hdr, ip6_plen),
1750 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
1751 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
1752 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
1753 			}
1754 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
1755 			    sizeof(th.th_sum), &th.th_sum);
1756 
1757 			hlen += th.th_off << 2;
1758 		} else {
1759 			/*
1760 			 * TCP/IP headers are in the first mbuf; we can do
1761 			 * this the easy way.
1762 			 */
1763 			struct tcphdr *th;
1764 
1765 			if (v4) {
1766 				struct ip *ip =
1767 				    (void *)(mtod(m0, char *) + offset);
1768 				th = (void *)(mtod(m0, char *) + hlen);
1769 
1770 				ip->ip_len = 0;
1771 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
1772 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
1773 			} else {
1774 				struct ip6_hdr *ip6 =
1775 				    (void *)(mtod(m0, char *) + offset);
1776 				th = (void *)(mtod(m0, char *) + hlen);
1777 
1778 				ip6->ip6_plen = 0;
1779 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
1780 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
1781 			}
1782 			hlen += th->th_off << 2;
1783 		}
1784 
1785 		if (v4) {
1786 			WM_EVCNT_INCR(&sc->sc_ev_txtso);
1787 			cmdlen |= WTX_TCPIP_CMD_IP;
1788 		} else {
1789 			WM_EVCNT_INCR(&sc->sc_ev_txtso6);
1790 			ipcse = 0;
1791 		}
1792 		cmd |= WTX_TCPIP_CMD_TSE;
1793 		cmdlen |= WTX_TCPIP_CMD_TSE |
1794 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
1795 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
1796 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
1797 	}
1798 
1799 	/*
1800 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
1801 	 * offload feature, if we load the context descriptor, we
1802 	 * MUST provide valid values for IPCSS and TUCSS fields.
1803 	 */
1804 
1805 	ipcs = WTX_TCPIP_IPCSS(offset) |
1806 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1807 	    WTX_TCPIP_IPCSE(ipcse);
1808 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
1809 		WM_EVCNT_INCR(&sc->sc_ev_txipsum);
1810 		fields |= WTX_IXSM;
1811 	}
1812 
1813 	offset += iphl;
1814 
1815 	if (m0->m_pkthdr.csum_flags &
1816 	    (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
1817 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
1818 		fields |= WTX_TXSM;
1819 		tucs = WTX_TCPIP_TUCSS(offset) |
1820 		    WTX_TCPIP_TUCSO(offset +
1821 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
1822 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
1823 	} else if ((m0->m_pkthdr.csum_flags &
1824 	    (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
1825 		WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
1826 		fields |= WTX_TXSM;
1827 		tucs = WTX_TCPIP_TUCSS(offset) |
1828 		    WTX_TCPIP_TUCSO(offset +
1829 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
1830 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
1831 	} else {
1832 		/* Just initialize it to a valid TCP context. */
1833 		tucs = WTX_TCPIP_TUCSS(offset) |
1834 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
1835 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
1836 	}
1837 
1838 	/* Fill in the context descriptor. */
1839 	t = (struct livengood_tcpip_ctxdesc *)
1840 	    &sc->sc_txdescs[sc->sc_txnext];
1841 	t->tcpip_ipcs = htole32(ipcs);
1842 	t->tcpip_tucs = htole32(tucs);
1843 	t->tcpip_cmdlen = htole32(cmdlen);
1844 	t->tcpip_seg = htole32(seg);
1845 	WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
1846 
1847 	sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
1848 	txs->txs_ndesc++;
1849 
1850 	*cmdp = cmd;
1851 	*fieldsp = fields;
1852 
1853 	return (0);
1854 }
1855 
1856 static void
1857 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
1858 {
1859 	struct mbuf *m;
1860 	int i;
1861 
1862 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
1863 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
1864 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
1865 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
1866 		    m->m_data, m->m_len, m->m_flags);
1867 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
1868 	    i, i == 1 ? "" : "s");
1869 }
1870 
1871 /*
1872  * wm_82547_txfifo_stall:
1873  *
1874  *	Callout used to wait for the 82547 Tx FIFO to drain,
1875  *	reset the FIFO pointers, and restart packet transmission.
1876  */
1877 static void
1878 wm_82547_txfifo_stall(void *arg)
1879 {
1880 	struct wm_softc *sc = arg;
1881 	int s;
1882 
1883 	s = splnet();
1884 
1885 	if (sc->sc_txfifo_stall) {
1886 		if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
1887 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
1888 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
1889 			/*
1890 			 * Packets have drained.  Stop transmitter, reset
1891 			 * FIFO pointers, restart transmitter, and kick
1892 			 * the packet queue.
1893 			 */
1894 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
1895 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
1896 			CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
1897 			CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
1898 			CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
1899 			CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
1900 			CSR_WRITE(sc, WMREG_TCTL, tctl);
1901 			CSR_WRITE_FLUSH(sc);
1902 
1903 			sc->sc_txfifo_head = 0;
1904 			sc->sc_txfifo_stall = 0;
1905 			wm_start(&sc->sc_ethercom.ec_if);
1906 		} else {
1907 			/*
1908 			 * Still waiting for packets to drain; try again in
1909 			 * another tick.
1910 			 */
1911 			callout_schedule(&sc->sc_txfifo_ch, 1);
1912 		}
1913 	}
1914 
1915 	splx(s);
1916 }
1917 
1918 /*
1919  * wm_82547_txfifo_bugchk:
1920  *
1921  *	Check for bug condition in the 82547 Tx FIFO.  We need to
1922  *	prevent enqueueing a packet that would wrap around the end
1923  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
1924  *
1925  *	We do this by checking the amount of space before the end
1926  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
1927  *	the Tx FIFO, wait for all remaining packets to drain, reset
1928  *	the internal FIFO pointers to the beginning, and restart
1929  *	transmission on the interface.
1930  */
1931 #define	WM_FIFO_HDR		0x10
1932 #define	WM_82547_PAD_LEN	0x3e0
1933 static int
1934 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
1935 {
1936 	int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
1937 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
1938 
1939 	/* Just return if already stalled. */
1940 	if (sc->sc_txfifo_stall)
1941 		return (1);
1942 
1943 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
1944 		/* Stall only occurs in half-duplex mode. */
1945 		goto send_packet;
1946 	}
1947 
1948 	if (len >= WM_82547_PAD_LEN + space) {
1949 		sc->sc_txfifo_stall = 1;
1950 		callout_schedule(&sc->sc_txfifo_ch, 1);
1951 		return (1);
1952 	}
1953 
1954  send_packet:
1955 	sc->sc_txfifo_head += len;
1956 	if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
1957 		sc->sc_txfifo_head -= sc->sc_txfifo_size;
1958 
1959 	return (0);
1960 }
1961 
1962 /*
1963  * wm_start:		[ifnet interface function]
1964  *
1965  *	Start packet transmission on the interface.
1966  */
1967 static void
1968 wm_start(struct ifnet *ifp)
1969 {
1970 	struct wm_softc *sc = ifp->if_softc;
1971 	struct mbuf *m0;
1972 #if 0 /* XXXJRT */
1973 	struct m_tag *mtag;
1974 #endif
1975 	struct wm_txsoft *txs;
1976 	bus_dmamap_t dmamap;
1977 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
1978 	bus_addr_t curaddr;
1979 	bus_size_t seglen, curlen;
1980 	uint32_t cksumcmd;
1981 	uint8_t cksumfields;
1982 
1983 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
1984 		return;
1985 
1986 	/*
1987 	 * Remember the previous number of free descriptors.
1988 	 */
1989 	ofree = sc->sc_txfree;
1990 
1991 	/*
1992 	 * Loop through the send queue, setting up transmit descriptors
1993 	 * until we drain the queue, or use up all available transmit
1994 	 * descriptors.
1995 	 */
1996 	for (;;) {
1997 		/* Grab a packet off the queue. */
1998 		IFQ_POLL(&ifp->if_snd, m0);
1999 		if (m0 == NULL)
2000 			break;
2001 
2002 		DPRINTF(WM_DEBUG_TX,
2003 		    ("%s: TX: have packet to transmit: %p\n",
2004 		    device_xname(sc->sc_dev), m0));
2005 
2006 		/* Get a work queue entry. */
2007 		if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
2008 			wm_txintr(sc);
2009 			if (sc->sc_txsfree == 0) {
2010 				DPRINTF(WM_DEBUG_TX,
2011 				    ("%s: TX: no free job descriptors\n",
2012 					device_xname(sc->sc_dev)));
2013 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
2014 				break;
2015 			}
2016 		}
2017 
2018 		txs = &sc->sc_txsoft[sc->sc_txsnext];
2019 		dmamap = txs->txs_dmamap;
2020 
2021 		use_tso = (m0->m_pkthdr.csum_flags &
2022 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
2023 
2024 		/*
2025 		 * So says the Linux driver:
2026 		 * The controller does a simple calculation to make sure
2027 		 * there is enough room in the FIFO before initiating the
2028 		 * DMA for each buffer.  The calc is:
2029 		 *	4 = ceil(buffer len / MSS)
2030 		 * To make sure we don't overrun the FIFO, adjust the max
2031 		 * buffer len if the MSS drops.
2032 		 */
2033 		dmamap->dm_maxsegsz =
2034 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
2035 		    ? m0->m_pkthdr.segsz << 2
2036 		    : WTX_MAX_LEN;
2037 
2038 		/*
2039 		 * Load the DMA map.  If this fails, the packet either
2040 		 * didn't fit in the allotted number of segments, or we
2041 		 * were short on resources.  For the too-many-segments
2042 		 * case, we simply report an error and drop the packet,
2043 		 * since we can't sanely copy a jumbo packet to a single
2044 		 * buffer.
2045 		 */
2046 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
2047 		    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
2048 		if (error) {
2049 			if (error == EFBIG) {
2050 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
2051 				log(LOG_ERR, "%s: Tx packet consumes too many "
2052 				    "DMA segments, dropping...\n",
2053 				    device_xname(sc->sc_dev));
2054 				IFQ_DEQUEUE(&ifp->if_snd, m0);
2055 				wm_dump_mbuf_chain(sc, m0);
2056 				m_freem(m0);
2057 				continue;
2058 			}
2059 			/*
2060 			 * Short on resources, just stop for now.
2061 			 */
2062 			DPRINTF(WM_DEBUG_TX,
2063 			    ("%s: TX: dmamap load failed: %d\n",
2064 			    device_xname(sc->sc_dev), error));
2065 			break;
2066 		}
2067 
2068 		segs_needed = dmamap->dm_nsegs;
2069 		if (use_tso) {
2070 			/* For sentinel descriptor; see below. */
2071 			segs_needed++;
2072 		}
2073 
2074 		/*
2075 		 * Ensure we have enough descriptors free to describe
2076 		 * the packet.  Note, we always reserve one descriptor
2077 		 * at the end of the ring due to the semantics of the
2078 		 * TDT register, plus one more in the event we need
2079 		 * to load offload context.
2080 		 */
2081 		if (segs_needed > sc->sc_txfree - 2) {
2082 			/*
2083 			 * Not enough free descriptors to transmit this
2084 			 * packet.  We haven't committed anything yet,
2085 			 * so just unload the DMA map, put the packet
2086 			 * pack on the queue, and punt.  Notify the upper
2087 			 * layer that there are no more slots left.
2088 			 */
2089 			DPRINTF(WM_DEBUG_TX,
2090 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
2091 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
2092 			    segs_needed, sc->sc_txfree - 1));
2093 			ifp->if_flags |= IFF_OACTIVE;
2094 			bus_dmamap_unload(sc->sc_dmat, dmamap);
2095 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
2096 			break;
2097 		}
2098 
2099 		/*
2100 		 * Check for 82547 Tx FIFO bug.  We need to do this
2101 		 * once we know we can transmit the packet, since we
2102 		 * do some internal FIFO space accounting here.
2103 		 */
2104 		if (sc->sc_type == WM_T_82547 &&
2105 		    wm_82547_txfifo_bugchk(sc, m0)) {
2106 			DPRINTF(WM_DEBUG_TX,
2107 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
2108 			    device_xname(sc->sc_dev)));
2109 			ifp->if_flags |= IFF_OACTIVE;
2110 			bus_dmamap_unload(sc->sc_dmat, dmamap);
2111 			WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
2112 			break;
2113 		}
2114 
2115 		IFQ_DEQUEUE(&ifp->if_snd, m0);
2116 
2117 		/*
2118 		 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
2119 		 */
2120 
2121 		DPRINTF(WM_DEBUG_TX,
2122 		    ("%s: TX: packet has %d (%d) DMA segments\n",
2123 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
2124 
2125 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
2126 
2127 		/*
2128 		 * Store a pointer to the packet so that we can free it
2129 		 * later.
2130 		 *
2131 		 * Initially, we consider the number of descriptors the
2132 		 * packet uses the number of DMA segments.  This may be
2133 		 * incremented by 1 if we do checksum offload (a descriptor
2134 		 * is used to set the checksum context).
2135 		 */
2136 		txs->txs_mbuf = m0;
2137 		txs->txs_firstdesc = sc->sc_txnext;
2138 		txs->txs_ndesc = segs_needed;
2139 
2140 		/* Set up offload parameters for this packet. */
2141 		if (m0->m_pkthdr.csum_flags &
2142 		    (M_CSUM_TSOv4|M_CSUM_TSOv6|
2143 		    M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
2144 		    M_CSUM_TCPv6|M_CSUM_UDPv6)) {
2145 			if (wm_tx_offload(sc, txs, &cksumcmd,
2146 					  &cksumfields) != 0) {
2147 				/* Error message already displayed. */
2148 				bus_dmamap_unload(sc->sc_dmat, dmamap);
2149 				continue;
2150 			}
2151 		} else {
2152 			cksumcmd = 0;
2153 			cksumfields = 0;
2154 		}
2155 
2156 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
2157 
2158 		/* Sync the DMA map. */
2159 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
2160 		    BUS_DMASYNC_PREWRITE);
2161 
2162 		/*
2163 		 * Initialize the transmit descriptor.
2164 		 */
2165 		for (nexttx = sc->sc_txnext, seg = 0;
2166 		     seg < dmamap->dm_nsegs; seg++) {
2167 			for (seglen = dmamap->dm_segs[seg].ds_len,
2168 			     curaddr = dmamap->dm_segs[seg].ds_addr;
2169 			     seglen != 0;
2170 			     curaddr += curlen, seglen -= curlen,
2171 			     nexttx = WM_NEXTTX(sc, nexttx)) {
2172 				curlen = seglen;
2173 
2174 				/*
2175 				 * So says the Linux driver:
2176 				 * Work around for premature descriptor
2177 				 * write-backs in TSO mode.  Append a
2178 				 * 4-byte sentinel descriptor.
2179 				 */
2180 				if (use_tso &&
2181 				    seg == dmamap->dm_nsegs - 1 &&
2182 				    curlen > 8)
2183 					curlen -= 4;
2184 
2185 				wm_set_dma_addr(
2186 				    &sc->sc_txdescs[nexttx].wtx_addr,
2187 				    curaddr);
2188 				sc->sc_txdescs[nexttx].wtx_cmdlen =
2189 				    htole32(cksumcmd | curlen);
2190 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
2191 				    0;
2192 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
2193 				    cksumfields;
2194 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
2195 				lasttx = nexttx;
2196 
2197 				DPRINTF(WM_DEBUG_TX,
2198 				    ("%s: TX: desc %d: low 0x%08lx, "
2199 				     "len 0x%04x\n",
2200 				    device_xname(sc->sc_dev), nexttx,
2201 				    curaddr & 0xffffffffUL, (unsigned)curlen));
2202 			}
2203 		}
2204 
2205 		KASSERT(lasttx != -1);
2206 
2207 		/*
2208 		 * Set up the command byte on the last descriptor of
2209 		 * the packet.  If we're in the interrupt delay window,
2210 		 * delay the interrupt.
2211 		 */
2212 		sc->sc_txdescs[lasttx].wtx_cmdlen |=
2213 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
2214 
2215 #if 0 /* XXXJRT */
2216 		/*
2217 		 * If VLANs are enabled and the packet has a VLAN tag, set
2218 		 * up the descriptor to encapsulate the packet for us.
2219 		 *
2220 		 * This is only valid on the last descriptor of the packet.
2221 		 */
2222 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
2223 			sc->sc_txdescs[lasttx].wtx_cmdlen |=
2224 			    htole32(WTX_CMD_VLE);
2225 			sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
2226 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
2227 		}
2228 #endif /* XXXJRT */
2229 
2230 		txs->txs_lastdesc = lasttx;
2231 
2232 		DPRINTF(WM_DEBUG_TX,
2233 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
2234 		    device_xname(sc->sc_dev),
2235 		    lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
2236 
2237 		/* Sync the descriptors we're using. */
2238 		WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
2239 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2240 
2241 		/* Give the packet to the chip. */
2242 		CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
2243 
2244 		DPRINTF(WM_DEBUG_TX,
2245 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
2246 
2247 		DPRINTF(WM_DEBUG_TX,
2248 		    ("%s: TX: finished transmitting packet, job %d\n",
2249 		    device_xname(sc->sc_dev), sc->sc_txsnext));
2250 
2251 		/* Advance the tx pointer. */
2252 		sc->sc_txfree -= txs->txs_ndesc;
2253 		sc->sc_txnext = nexttx;
2254 
2255 		sc->sc_txsfree--;
2256 		sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
2257 
2258 #if NBPFILTER > 0
2259 		/* Pass the packet to any BPF listeners. */
2260 		if (ifp->if_bpf)
2261 			bpf_mtap(ifp->if_bpf, m0);
2262 #endif /* NBPFILTER > 0 */
2263 	}
2264 
2265 	if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
2266 		/* No more slots; notify upper layer. */
2267 		ifp->if_flags |= IFF_OACTIVE;
2268 	}
2269 
2270 	if (sc->sc_txfree != ofree) {
2271 		/* Set a watchdog timer in case the chip flakes out. */
2272 		ifp->if_timer = 5;
2273 	}
2274 }
2275 
2276 /*
2277  * wm_watchdog:		[ifnet interface function]
2278  *
2279  *	Watchdog timer handler.
2280  */
2281 static void
2282 wm_watchdog(struct ifnet *ifp)
2283 {
2284 	struct wm_softc *sc = ifp->if_softc;
2285 
2286 	/*
2287 	 * Since we're using delayed interrupts, sweep up
2288 	 * before we report an error.
2289 	 */
2290 	wm_txintr(sc);
2291 
2292 	if (sc->sc_txfree != WM_NTXDESC(sc)) {
2293 		log(LOG_ERR,
2294 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2295 		    device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
2296 		    sc->sc_txnext);
2297 		ifp->if_oerrors++;
2298 
2299 		/* Reset the interface. */
2300 		(void) wm_init(ifp);
2301 	}
2302 
2303 	/* Try to get more packets going. */
2304 	wm_start(ifp);
2305 }
2306 
2307 /*
2308  * wm_ioctl:		[ifnet interface function]
2309  *
2310  *	Handle control requests from the operator.
2311  */
2312 static int
2313 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2314 {
2315 	struct wm_softc *sc = ifp->if_softc;
2316 	struct ifreq *ifr = (struct ifreq *) data;
2317 	int s, error;
2318 
2319 	s = splnet();
2320 
2321 	switch (cmd) {
2322 	case SIOCSIFMEDIA:
2323 	case SIOCGIFMEDIA:
2324 		/* Flow control requires full-duplex mode. */
2325 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
2326 		    (ifr->ifr_media & IFM_FDX) == 0)
2327 			ifr->ifr_media &= ~IFM_ETH_FMASK;
2328 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
2329 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
2330 				/* We can do both TXPAUSE and RXPAUSE. */
2331 				ifr->ifr_media |=
2332 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
2333 			}
2334 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
2335 		}
2336 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
2337 		break;
2338 	default:
2339 		if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
2340 			break;
2341 
2342 		error = 0;
2343 
2344 		if (cmd == SIOCSIFCAP)
2345 			error = (*ifp->if_init)(ifp);
2346 		else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
2347 			;
2348 		else if (ifp->if_flags & IFF_RUNNING) {
2349 			/*
2350 			 * Multicast list has changed; set the hardware filter
2351 			 * accordingly.
2352 			 */
2353 			wm_set_filter(sc);
2354 		}
2355 		break;
2356 	}
2357 
2358 	/* Try to get more packets going. */
2359 	wm_start(ifp);
2360 
2361 	splx(s);
2362 	return (error);
2363 }
2364 
2365 /*
2366  * wm_intr:
2367  *
2368  *	Interrupt service routine.
2369  */
2370 static int
2371 wm_intr(void *arg)
2372 {
2373 	struct wm_softc *sc = arg;
2374 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2375 	uint32_t icr;
2376 	int handled = 0;
2377 
2378 	while (1 /* CONSTCOND */) {
2379 		icr = CSR_READ(sc, WMREG_ICR);
2380 		if ((icr & sc->sc_icr) == 0)
2381 			break;
2382 #if 0 /*NRND > 0*/
2383 		if (RND_ENABLED(&sc->rnd_source))
2384 			rnd_add_uint32(&sc->rnd_source, icr);
2385 #endif
2386 
2387 		handled = 1;
2388 
2389 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
2390 		if (icr & (ICR_RXDMT0|ICR_RXT0)) {
2391 			DPRINTF(WM_DEBUG_RX,
2392 			    ("%s: RX: got Rx intr 0x%08x\n",
2393 			    device_xname(sc->sc_dev),
2394 			    icr & (ICR_RXDMT0|ICR_RXT0)));
2395 			WM_EVCNT_INCR(&sc->sc_ev_rxintr);
2396 		}
2397 #endif
2398 		wm_rxintr(sc);
2399 
2400 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
2401 		if (icr & ICR_TXDW) {
2402 			DPRINTF(WM_DEBUG_TX,
2403 			    ("%s: TX: got TXDW interrupt\n",
2404 			    device_xname(sc->sc_dev)));
2405 			WM_EVCNT_INCR(&sc->sc_ev_txdw);
2406 		}
2407 #endif
2408 		wm_txintr(sc);
2409 
2410 		if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
2411 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
2412 			wm_linkintr(sc, icr);
2413 		}
2414 
2415 		if (icr & ICR_RXO) {
2416 			ifp->if_ierrors++;
2417 #if defined(WM_DEBUG)
2418 			log(LOG_WARNING, "%s: Receive overrun\n",
2419 			    device_xname(sc->sc_dev));
2420 #endif /* defined(WM_DEBUG) */
2421 		}
2422 	}
2423 
2424 	if (handled) {
2425 		/* Try to get more packets going. */
2426 		wm_start(ifp);
2427 	}
2428 
2429 	return (handled);
2430 }
2431 
2432 /*
2433  * wm_txintr:
2434  *
2435  *	Helper; handle transmit interrupts.
2436  */
2437 static void
2438 wm_txintr(struct wm_softc *sc)
2439 {
2440 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2441 	struct wm_txsoft *txs;
2442 	uint8_t status;
2443 	int i;
2444 
2445 	ifp->if_flags &= ~IFF_OACTIVE;
2446 
2447 	/*
2448 	 * Go through the Tx list and free mbufs for those
2449 	 * frames which have been transmitted.
2450 	 */
2451 	for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
2452 	     i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
2453 		txs = &sc->sc_txsoft[i];
2454 
2455 		DPRINTF(WM_DEBUG_TX,
2456 		    ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
2457 
2458 		WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
2459 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2460 
2461 		status =
2462 		    sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
2463 		if ((status & WTX_ST_DD) == 0) {
2464 			WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
2465 			    BUS_DMASYNC_PREREAD);
2466 			break;
2467 		}
2468 
2469 		DPRINTF(WM_DEBUG_TX,
2470 		    ("%s: TX: job %d done: descs %d..%d\n",
2471 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
2472 		    txs->txs_lastdesc));
2473 
2474 		/*
2475 		 * XXX We should probably be using the statistics
2476 		 * XXX registers, but I don't know if they exist
2477 		 * XXX on chips before the i82544.
2478 		 */
2479 
2480 #ifdef WM_EVENT_COUNTERS
2481 		if (status & WTX_ST_TU)
2482 			WM_EVCNT_INCR(&sc->sc_ev_tu);
2483 #endif /* WM_EVENT_COUNTERS */
2484 
2485 		if (status & (WTX_ST_EC|WTX_ST_LC)) {
2486 			ifp->if_oerrors++;
2487 			if (status & WTX_ST_LC)
2488 				log(LOG_WARNING, "%s: late collision\n",
2489 				    device_xname(sc->sc_dev));
2490 			else if (status & WTX_ST_EC) {
2491 				ifp->if_collisions += 16;
2492 				log(LOG_WARNING, "%s: excessive collisions\n",
2493 				    device_xname(sc->sc_dev));
2494 			}
2495 		} else
2496 			ifp->if_opackets++;
2497 
2498 		sc->sc_txfree += txs->txs_ndesc;
2499 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
2500 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2501 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2502 		m_freem(txs->txs_mbuf);
2503 		txs->txs_mbuf = NULL;
2504 	}
2505 
2506 	/* Update the dirty transmit buffer pointer. */
2507 	sc->sc_txsdirty = i;
2508 	DPRINTF(WM_DEBUG_TX,
2509 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
2510 
2511 	/*
2512 	 * If there are no more pending transmissions, cancel the watchdog
2513 	 * timer.
2514 	 */
2515 	if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
2516 		ifp->if_timer = 0;
2517 }
2518 
2519 /*
2520  * wm_rxintr:
2521  *
2522  *	Helper; handle receive interrupts.
2523  */
2524 static void
2525 wm_rxintr(struct wm_softc *sc)
2526 {
2527 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2528 	struct wm_rxsoft *rxs;
2529 	struct mbuf *m;
2530 	int i, len;
2531 	uint8_t status, errors;
2532 
2533 	for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
2534 		rxs = &sc->sc_rxsoft[i];
2535 
2536 		DPRINTF(WM_DEBUG_RX,
2537 		    ("%s: RX: checking descriptor %d\n",
2538 		    device_xname(sc->sc_dev), i));
2539 
2540 		WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2541 
2542 		status = sc->sc_rxdescs[i].wrx_status;
2543 		errors = sc->sc_rxdescs[i].wrx_errors;
2544 		len = le16toh(sc->sc_rxdescs[i].wrx_len);
2545 
2546 		if ((status & WRX_ST_DD) == 0) {
2547 			/*
2548 			 * We have processed all of the receive descriptors.
2549 			 */
2550 			WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
2551 			break;
2552 		}
2553 
2554 		if (__predict_false(sc->sc_rxdiscard)) {
2555 			DPRINTF(WM_DEBUG_RX,
2556 			    ("%s: RX: discarding contents of descriptor %d\n",
2557 			    device_xname(sc->sc_dev), i));
2558 			WM_INIT_RXDESC(sc, i);
2559 			if (status & WRX_ST_EOP) {
2560 				/* Reset our state. */
2561 				DPRINTF(WM_DEBUG_RX,
2562 				    ("%s: RX: resetting rxdiscard -> 0\n",
2563 				    device_xname(sc->sc_dev)));
2564 				sc->sc_rxdiscard = 0;
2565 			}
2566 			continue;
2567 		}
2568 
2569 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2570 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
2571 
2572 		m = rxs->rxs_mbuf;
2573 
2574 		/*
2575 		 * Add a new receive buffer to the ring, unless of
2576 		 * course the length is zero. Treat the latter as a
2577 		 * failed mapping.
2578 		 */
2579 		if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
2580 			/*
2581 			 * Failed, throw away what we've done so
2582 			 * far, and discard the rest of the packet.
2583 			 */
2584 			ifp->if_ierrors++;
2585 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2586 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2587 			WM_INIT_RXDESC(sc, i);
2588 			if ((status & WRX_ST_EOP) == 0)
2589 				sc->sc_rxdiscard = 1;
2590 			if (sc->sc_rxhead != NULL)
2591 				m_freem(sc->sc_rxhead);
2592 			WM_RXCHAIN_RESET(sc);
2593 			DPRINTF(WM_DEBUG_RX,
2594 			    ("%s: RX: Rx buffer allocation failed, "
2595 			    "dropping packet%s\n", device_xname(sc->sc_dev),
2596 			    sc->sc_rxdiscard ? " (discard)" : ""));
2597 			continue;
2598 		}
2599 
2600 		m->m_len = len;
2601 		sc->sc_rxlen += len;
2602 		DPRINTF(WM_DEBUG_RX,
2603 		    ("%s: RX: buffer at %p len %d\n",
2604 		    device_xname(sc->sc_dev), m->m_data, len));
2605 
2606 		/*
2607 		 * If this is not the end of the packet, keep
2608 		 * looking.
2609 		 */
2610 		if ((status & WRX_ST_EOP) == 0) {
2611 			WM_RXCHAIN_LINK(sc, m);
2612 			DPRINTF(WM_DEBUG_RX,
2613 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
2614 			    device_xname(sc->sc_dev), sc->sc_rxlen));
2615 			continue;
2616 		}
2617 
2618 		/*
2619 		 * Okay, we have the entire packet now.  The chip is
2620 		 * configured to include the FCS (not all chips can
2621 		 * be configured to strip it), so we need to trim it.
2622 		 * May need to adjust length of previous mbuf in the
2623 		 * chain if the current mbuf is too short.
2624 		 */
2625 		if (m->m_len < ETHER_CRC_LEN) {
2626 			sc->sc_rxtail->m_len -= (ETHER_CRC_LEN - m->m_len);
2627 			m->m_len = 0;
2628 		} else {
2629 			m->m_len -= ETHER_CRC_LEN;
2630 		}
2631 		len = sc->sc_rxlen - ETHER_CRC_LEN;
2632 
2633 		WM_RXCHAIN_LINK(sc, m);
2634 
2635 		*sc->sc_rxtailp = NULL;
2636 		m = sc->sc_rxhead;
2637 
2638 		WM_RXCHAIN_RESET(sc);
2639 
2640 		DPRINTF(WM_DEBUG_RX,
2641 		    ("%s: RX: have entire packet, len -> %d\n",
2642 		    device_xname(sc->sc_dev), len));
2643 
2644 		/*
2645 		 * If an error occurred, update stats and drop the packet.
2646 		 */
2647 		if (errors &
2648 		     (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
2649 			ifp->if_ierrors++;
2650 			if (errors & WRX_ER_SE)
2651 				log(LOG_WARNING, "%s: symbol error\n",
2652 				    device_xname(sc->sc_dev));
2653 			else if (errors & WRX_ER_SEQ)
2654 				log(LOG_WARNING, "%s: receive sequence error\n",
2655 				    device_xname(sc->sc_dev));
2656 			else if (errors & WRX_ER_CE)
2657 				log(LOG_WARNING, "%s: CRC error\n",
2658 				    device_xname(sc->sc_dev));
2659 			m_freem(m);
2660 			continue;
2661 		}
2662 
2663 		/*
2664 		 * No errors.  Receive the packet.
2665 		 */
2666 		m->m_pkthdr.rcvif = ifp;
2667 		m->m_pkthdr.len = len;
2668 
2669 #if 0 /* XXXJRT */
2670 		/*
2671 		 * If VLANs are enabled, VLAN packets have been unwrapped
2672 		 * for us.  Associate the tag with the packet.
2673 		 */
2674 		if ((status & WRX_ST_VP) != 0) {
2675 			VLAN_INPUT_TAG(ifp, m,
2676 			    le16toh(sc->sc_rxdescs[i].wrx_special,
2677 			    continue);
2678 		}
2679 #endif /* XXXJRT */
2680 
2681 		/*
2682 		 * Set up checksum info for this packet.
2683 		 */
2684 		if ((status & WRX_ST_IXSM) == 0) {
2685 			if (status & WRX_ST_IPCS) {
2686 				WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
2687 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
2688 				if (errors & WRX_ER_IPE)
2689 					m->m_pkthdr.csum_flags |=
2690 					    M_CSUM_IPv4_BAD;
2691 			}
2692 			if (status & WRX_ST_TCPCS) {
2693 				/*
2694 				 * Note: we don't know if this was TCP or UDP,
2695 				 * so we just set both bits, and expect the
2696 				 * upper layers to deal.
2697 				 */
2698 				WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
2699 				m->m_pkthdr.csum_flags |=
2700 				    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
2701 				    M_CSUM_TCPv6 | M_CSUM_UDPv6;
2702 				if (errors & WRX_ER_TCPE)
2703 					m->m_pkthdr.csum_flags |=
2704 					    M_CSUM_TCP_UDP_BAD;
2705 			}
2706 		}
2707 
2708 		ifp->if_ipackets++;
2709 
2710 #if NBPFILTER > 0
2711 		/* Pass this up to any BPF listeners. */
2712 		if (ifp->if_bpf)
2713 			bpf_mtap(ifp->if_bpf, m);
2714 #endif /* NBPFILTER > 0 */
2715 
2716 		/* Pass it on. */
2717 		(*ifp->if_input)(ifp, m);
2718 	}
2719 
2720 	/* Update the receive pointer. */
2721 	sc->sc_rxptr = i;
2722 
2723 	DPRINTF(WM_DEBUG_RX,
2724 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
2725 }
2726 
2727 /*
2728  * wm_linkintr:
2729  *
2730  *	Helper; handle link interrupts.
2731  */
2732 static void
2733 wm_linkintr(struct wm_softc *sc, uint32_t icr)
2734 {
2735 	uint32_t status;
2736 
2737 	/*
2738 	 * If we get a link status interrupt on a 1000BASE-T
2739 	 * device, just fall into the normal MII tick path.
2740 	 */
2741 	if (sc->sc_flags & WM_F_HAS_MII) {
2742 		if (icr & ICR_LSC) {
2743 			DPRINTF(WM_DEBUG_LINK,
2744 			    ("%s: LINK: LSC -> mii_tick\n",
2745 			    device_xname(sc->sc_dev)));
2746 			mii_tick(&sc->sc_mii);
2747 		} else if (icr & ICR_RXSEQ) {
2748 			DPRINTF(WM_DEBUG_LINK,
2749 			    ("%s: LINK Receive sequence error\n",
2750 			    device_xname(sc->sc_dev)));
2751 		}
2752 		return;
2753 	}
2754 
2755 	/*
2756 	 * If we are now receiving /C/, check for link again in
2757 	 * a couple of link clock ticks.
2758 	 */
2759 	if (icr & ICR_RXCFG) {
2760 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
2761 		    device_xname(sc->sc_dev)));
2762 		sc->sc_tbi_anstate = 2;
2763 	}
2764 
2765 	if (icr & ICR_LSC) {
2766 		status = CSR_READ(sc, WMREG_STATUS);
2767 		if (status & STATUS_LU) {
2768 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
2769 			    device_xname(sc->sc_dev),
2770 			    (status & STATUS_FD) ? "FDX" : "HDX"));
2771 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2772 			sc->sc_fcrtl &= ~FCRTL_XONE;
2773 			if (status & STATUS_FD)
2774 				sc->sc_tctl |=
2775 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2776 			else
2777 				sc->sc_tctl |=
2778 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2779 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
2780 				sc->sc_fcrtl |= FCRTL_XONE;
2781 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2782 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
2783 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
2784 				      sc->sc_fcrtl);
2785 			sc->sc_tbi_linkup = 1;
2786 		} else {
2787 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
2788 			    device_xname(sc->sc_dev)));
2789 			sc->sc_tbi_linkup = 0;
2790 		}
2791 		sc->sc_tbi_anstate = 2;
2792 		wm_tbi_set_linkled(sc);
2793 	} else if (icr & ICR_RXSEQ) {
2794 		DPRINTF(WM_DEBUG_LINK,
2795 		    ("%s: LINK: Receive sequence error\n",
2796 		    device_xname(sc->sc_dev)));
2797 	}
2798 }
2799 
2800 /*
2801  * wm_tick:
2802  *
2803  *	One second timer, used to check link status, sweep up
2804  *	completed transmit jobs, etc.
2805  */
2806 static void
2807 wm_tick(void *arg)
2808 {
2809 	struct wm_softc *sc = arg;
2810 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2811 	int s;
2812 
2813 	s = splnet();
2814 
2815 	if (sc->sc_type >= WM_T_82542_2_1) {
2816 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
2817 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
2818 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
2819 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
2820 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
2821 	}
2822 
2823 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
2824 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
2825 
2826 
2827 	if (sc->sc_flags & WM_F_HAS_MII)
2828 		mii_tick(&sc->sc_mii);
2829 	else
2830 		wm_tbi_check_link(sc);
2831 
2832 	splx(s);
2833 
2834 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2835 }
2836 
2837 /*
2838  * wm_reset:
2839  *
2840  *	Reset the i82542 chip.
2841  */
2842 static void
2843 wm_reset(struct wm_softc *sc)
2844 {
2845 	uint32_t reg;
2846 
2847 	/*
2848 	 * Allocate on-chip memory according to the MTU size.
2849 	 * The Packet Buffer Allocation register must be written
2850 	 * before the chip is reset.
2851 	 */
2852 	switch (sc->sc_type) {
2853 	case WM_T_82547:
2854 	case WM_T_82547_2:
2855 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
2856 		    PBA_22K : PBA_30K;
2857 		sc->sc_txfifo_head = 0;
2858 		sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
2859 		sc->sc_txfifo_size =
2860 		    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
2861 		sc->sc_txfifo_stall = 0;
2862 		break;
2863 	case WM_T_82571:
2864 	case WM_T_82572:
2865 	case WM_T_80003:
2866 		sc->sc_pba = PBA_32K;
2867 		break;
2868 	case WM_T_82573:
2869 		sc->sc_pba = PBA_12K;
2870 		break;
2871 	case WM_T_ICH8:
2872 		sc->sc_pba = PBA_8K;
2873 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
2874 		break;
2875 	case WM_T_ICH9:
2876 		sc->sc_pba = PBA_10K;
2877 		break;
2878 	default:
2879 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
2880 		    PBA_40K : PBA_48K;
2881 		break;
2882 	}
2883 	CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
2884 
2885 	if (sc->sc_flags & WM_F_PCIE) {
2886 		int timeout = 800;
2887 
2888 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
2889 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2890 
2891 		while (timeout) {
2892 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA) == 0)
2893 				break;
2894 			delay(100);
2895 		}
2896 	}
2897 
2898 	/* clear interrupt */
2899 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
2900 
2901 	/*
2902 	 * 82541 Errata 29? & 82547 Errata 28?
2903 	 * See also the description about PHY_RST bit in CTRL register
2904 	 * in 8254x_GBe_SDM.pdf.
2905 	 */
2906 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
2907 		CSR_WRITE(sc, WMREG_CTRL,
2908 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
2909 		delay(5000);
2910 	}
2911 
2912 	switch (sc->sc_type) {
2913 	case WM_T_82544:
2914 	case WM_T_82540:
2915 	case WM_T_82545:
2916 	case WM_T_82546:
2917 	case WM_T_82541:
2918 	case WM_T_82541_2:
2919 		/*
2920 		 * On some chipsets, a reset through a memory-mapped write
2921 		 * cycle can cause the chip to reset before completing the
2922 		 * write cycle.  This causes major headache that can be
2923 		 * avoided by issuing the reset via indirect register writes
2924 		 * through I/O space.
2925 		 *
2926 		 * So, if we successfully mapped the I/O BAR at attach time,
2927 		 * use that.  Otherwise, try our luck with a memory-mapped
2928 		 * reset.
2929 		 */
2930 		if (sc->sc_flags & WM_F_IOH_VALID)
2931 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
2932 		else
2933 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
2934 		break;
2935 
2936 	case WM_T_82545_3:
2937 	case WM_T_82546_3:
2938 		/* Use the shadow control register on these chips. */
2939 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
2940 		break;
2941 
2942 	case WM_T_ICH8:
2943 	case WM_T_ICH9:
2944 		wm_get_swfwhw_semaphore(sc);
2945 		CSR_WRITE(sc, WMREG_CTRL, CTRL_RST | CTRL_PHY_RESET);
2946 		delay(10000);
2947 
2948 	default:
2949 		/* Everything else can safely use the documented method. */
2950 		CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
2951 		break;
2952 	}
2953 	delay(10000);
2954 
2955 	/* reload EEPROM */
2956 	switch(sc->sc_type) {
2957 	case WM_T_82542_2_0:
2958 	case WM_T_82542_2_1:
2959 	case WM_T_82543:
2960 	case WM_T_82544:
2961 		delay(10);
2962 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
2963 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2964 		delay(2000);
2965 		break;
2966 	case WM_T_82541:
2967 	case WM_T_82541_2:
2968 	case WM_T_82547:
2969 	case WM_T_82547_2:
2970 		delay(20000);
2971 		break;
2972 	case WM_T_82573:
2973 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
2974 			delay(10);
2975 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
2976 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2977 		}
2978 		/* FALLTHROUGH */
2979 	default:
2980 		/* check EECD_EE_AUTORD */
2981 		wm_get_auto_rd_done(sc);
2982 	}
2983 
2984 #if 0
2985 	for (i = 0; i < 1000; i++) {
2986 		if ((CSR_READ(sc, WMREG_CTRL) & CTRL_RST) == 0) {
2987 			return;
2988 		}
2989 		delay(20);
2990 	}
2991 
2992 	if (CSR_READ(sc, WMREG_CTRL) & CTRL_RST)
2993 		log(LOG_ERR, "%s: reset failed to complete\n",
2994 		    device_xname(sc->sc_dev));
2995 #endif
2996 }
2997 
2998 /*
2999  * wm_init:		[ifnet interface function]
3000  *
3001  *	Initialize the interface.  Must be called at splnet().
3002  */
3003 static int
3004 wm_init(struct ifnet *ifp)
3005 {
3006 	struct wm_softc *sc = ifp->if_softc;
3007 	struct wm_rxsoft *rxs;
3008 	int i, error = 0;
3009 	uint32_t reg;
3010 
3011 	/*
3012 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
3013 	 * There is a small but measurable benefit to avoiding the adjusment
3014 	 * of the descriptor so that the headers are aligned, for normal mtu,
3015 	 * on such platforms.  One possibility is that the DMA itself is
3016 	 * slightly more efficient if the front of the entire packet (instead
3017 	 * of the front of the headers) is aligned.
3018 	 *
3019 	 * Note we must always set align_tweak to 0 if we are using
3020 	 * jumbo frames.
3021 	 */
3022 #ifdef __NO_STRICT_ALIGNMENT
3023 	sc->sc_align_tweak = 0;
3024 #else
3025 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
3026 		sc->sc_align_tweak = 0;
3027 	else
3028 		sc->sc_align_tweak = 2;
3029 #endif /* __NO_STRICT_ALIGNMENT */
3030 
3031 	/* Cancel any pending I/O. */
3032 	wm_stop(ifp, 0);
3033 
3034 	/* update statistics before reset */
3035 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3036 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
3037 
3038 	/* Reset the chip to a known state. */
3039 	wm_reset(sc);
3040 
3041 	/* Initialize the transmit descriptor ring. */
3042 	memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
3043 	WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
3044 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3045 	sc->sc_txfree = WM_NTXDESC(sc);
3046 	sc->sc_txnext = 0;
3047 
3048 	if (sc->sc_type < WM_T_82543) {
3049 		CSR_WRITE(sc, WMREG_OLD_TBDAH, WM_CDTXADDR_HI(sc, 0));
3050 		CSR_WRITE(sc, WMREG_OLD_TBDAL, WM_CDTXADDR_LO(sc, 0));
3051 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
3052 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
3053 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
3054 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
3055 	} else {
3056 		CSR_WRITE(sc, WMREG_TBDAH, WM_CDTXADDR_HI(sc, 0));
3057 		CSR_WRITE(sc, WMREG_TBDAL, WM_CDTXADDR_LO(sc, 0));
3058 		CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
3059 		CSR_WRITE(sc, WMREG_TDH, 0);
3060 		CSR_WRITE(sc, WMREG_TDT, 0);
3061 		CSR_WRITE(sc, WMREG_TIDV, 375);		/* ITR / 4 */
3062 		CSR_WRITE(sc, WMREG_TADV, 375);		/* should be same */
3063 
3064 		CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
3065 		    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
3066 		CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
3067 		    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
3068 	}
3069 	CSR_WRITE(sc, WMREG_TQSA_LO, 0);
3070 	CSR_WRITE(sc, WMREG_TQSA_HI, 0);
3071 
3072 	/* Initialize the transmit job descriptors. */
3073 	for (i = 0; i < WM_TXQUEUELEN(sc); i++)
3074 		sc->sc_txsoft[i].txs_mbuf = NULL;
3075 	sc->sc_txsfree = WM_TXQUEUELEN(sc);
3076 	sc->sc_txsnext = 0;
3077 	sc->sc_txsdirty = 0;
3078 
3079 	/*
3080 	 * Initialize the receive descriptor and receive job
3081 	 * descriptor rings.
3082 	 */
3083 	if (sc->sc_type < WM_T_82543) {
3084 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
3085 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
3086 		CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
3087 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
3088 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
3089 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
3090 
3091 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
3092 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
3093 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
3094 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
3095 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
3096 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
3097 	} else {
3098 		CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
3099 		CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
3100 		CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
3101 		CSR_WRITE(sc, WMREG_RDH, 0);
3102 		CSR_WRITE(sc, WMREG_RDT, 0);
3103 		CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD);	/* ITR/4 */
3104 		CSR_WRITE(sc, WMREG_RADV, 375);		/* MUST be same */
3105 	}
3106 	for (i = 0; i < WM_NRXDESC; i++) {
3107 		rxs = &sc->sc_rxsoft[i];
3108 		if (rxs->rxs_mbuf == NULL) {
3109 			if ((error = wm_add_rxbuf(sc, i)) != 0) {
3110 				log(LOG_ERR, "%s: unable to allocate or map rx "
3111 				    "buffer %d, error = %d\n",
3112 				    device_xname(sc->sc_dev), i, error);
3113 				/*
3114 				 * XXX Should attempt to run with fewer receive
3115 				 * XXX buffers instead of just failing.
3116 				 */
3117 				wm_rxdrain(sc);
3118 				goto out;
3119 			}
3120 		} else
3121 			WM_INIT_RXDESC(sc, i);
3122 	}
3123 	sc->sc_rxptr = 0;
3124 	sc->sc_rxdiscard = 0;
3125 	WM_RXCHAIN_RESET(sc);
3126 
3127 	/*
3128 	 * Clear out the VLAN table -- we don't use it (yet).
3129 	 */
3130 	CSR_WRITE(sc, WMREG_VET, 0);
3131 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
3132 		CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
3133 
3134 	/*
3135 	 * Set up flow-control parameters.
3136 	 *
3137 	 * XXX Values could probably stand some tuning.
3138 	 */
3139 	if (sc->sc_type != WM_T_ICH8) {
3140 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
3141 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
3142 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
3143 	}
3144 
3145 	sc->sc_fcrtl = FCRTL_DFLT;
3146 	if (sc->sc_type < WM_T_82543) {
3147 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
3148 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
3149 	} else {
3150 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
3151 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
3152 	}
3153 	CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
3154 
3155 #if 0 /* XXXJRT */
3156 	/* Deal with VLAN enables. */
3157 	if (VLAN_ATTACHED(&sc->sc_ethercom))
3158 		sc->sc_ctrl |= CTRL_VME;
3159 	else
3160 #endif /* XXXJRT */
3161 		sc->sc_ctrl &= ~CTRL_VME;
3162 
3163 	/* Write the control registers. */
3164 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3165 	if (sc->sc_type >= WM_T_80003 && (sc->sc_flags & WM_F_HAS_MII)) {
3166 		int val;
3167 		val = CSR_READ(sc, WMREG_CTRL_EXT);
3168 		val &= ~CTRL_EXT_LINK_MODE_MASK;
3169 		CSR_WRITE(sc, WMREG_CTRL_EXT, val);
3170 
3171 		/* Bypass RX and TX FIFO's */
3172 		wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
3173 		    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS |
3174 		    KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
3175 
3176 		wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
3177 		    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
3178 		    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
3179 		/*
3180 		 * Set the mac to wait the maximum time between each
3181 		 * iteration and increase the max iterations when
3182 		 * polling the phy; this fixes erroneous timeouts at 10Mbps.
3183 		 */
3184 		wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS, 0xFFFF);
3185 		val = wm_kmrn_i80003_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM);
3186 		val |= 0x3F;
3187 		wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM, val);
3188 	}
3189 #if 0
3190 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
3191 #endif
3192 
3193 	/*
3194 	 * Set up checksum offload parameters.
3195 	 */
3196 	reg = CSR_READ(sc, WMREG_RXCSUM);
3197 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
3198 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
3199 		reg |= RXCSUM_IPOFL;
3200 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
3201 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
3202 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
3203 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
3204 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
3205 
3206 	/*
3207 	 * Set up the interrupt registers.
3208 	 */
3209 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3210 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
3211 	    ICR_RXO | ICR_RXT0;
3212 	if ((sc->sc_flags & WM_F_HAS_MII) == 0)
3213 		sc->sc_icr |= ICR_RXCFG;
3214 	CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
3215 
3216 	/* Set up the inter-packet gap. */
3217 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
3218 
3219 	if (sc->sc_type >= WM_T_82543) {
3220 		/*
3221 		 * Set up the interrupt throttling register (units of 256ns)
3222 		 * Note that a footnote in Intel's documentation says this
3223 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
3224 		 * or 10Mbit mode.  Empirically, it appears to be the case
3225 		 * that that is also true for the 1024ns units of the other
3226 		 * interrupt-related timer registers -- so, really, we ought
3227 		 * to divide this value by 4 when the link speed is low.
3228 		 *
3229 		 * XXX implement this division at link speed change!
3230 		 */
3231 
3232 		 /*
3233 		  * For N interrupts/sec, set this value to:
3234 		  * 1000000000 / (N * 256).  Note that we set the
3235 		  * absolute and packet timer values to this value
3236 		  * divided by 4 to get "simple timer" behavior.
3237 		  */
3238 
3239 		sc->sc_itr = 1500;		/* 2604 ints/sec */
3240 		CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
3241 	}
3242 
3243 #if 0 /* XXXJRT */
3244 	/* Set the VLAN ethernetype. */
3245 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
3246 #endif
3247 
3248 	/*
3249 	 * Set up the transmit control register; we start out with
3250 	 * a collision distance suitable for FDX, but update it whe
3251 	 * we resolve the media type.
3252 	 */
3253 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_CT(TX_COLLISION_THRESHOLD) |
3254 	    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3255 	if (sc->sc_type >= WM_T_82571)
3256 		sc->sc_tctl |= TCTL_MULR;
3257 	if (sc->sc_type >= WM_T_80003)
3258 		sc->sc_tctl |= TCTL_RTLC;
3259 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3260 
3261 	/* Set the media. */
3262 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
3263 		goto out;
3264 
3265 	/*
3266 	 * Set up the receive control register; we actually program
3267 	 * the register when we set the receive filter.  Use multicast
3268 	 * address offset type 0.
3269 	 *
3270 	 * Only the i82544 has the ability to strip the incoming
3271 	 * CRC, so we don't enable that feature.
3272 	 */
3273 	sc->sc_mchash_type = 0;
3274 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
3275 	    | RCTL_MO(sc->sc_mchash_type);
3276 
3277 	/* 82573 doesn't support jumbo frame */
3278 	if (sc->sc_type != WM_T_82573 && sc->sc_type != WM_T_ICH8)
3279 		sc->sc_rctl |= RCTL_LPE;
3280 
3281 	if (MCLBYTES == 2048) {
3282 		sc->sc_rctl |= RCTL_2k;
3283 	} else {
3284 		if (sc->sc_type >= WM_T_82543) {
3285 			switch(MCLBYTES) {
3286 			case 4096:
3287 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
3288 				break;
3289 			case 8192:
3290 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
3291 				break;
3292 			case 16384:
3293 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
3294 				break;
3295 			default:
3296 				panic("wm_init: MCLBYTES %d unsupported",
3297 				    MCLBYTES);
3298 				break;
3299 			}
3300 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
3301 	}
3302 
3303 	/* Set the receive filter. */
3304 	wm_set_filter(sc);
3305 
3306 	/* Start the one second link check clock. */
3307 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
3308 
3309 	/* ...all done! */
3310 	ifp->if_flags |= IFF_RUNNING;
3311 	ifp->if_flags &= ~IFF_OACTIVE;
3312 
3313  out:
3314 	if (error)
3315 		log(LOG_ERR, "%s: interface not running\n",
3316 		    device_xname(sc->sc_dev));
3317 	return (error);
3318 }
3319 
3320 /*
3321  * wm_rxdrain:
3322  *
3323  *	Drain the receive queue.
3324  */
3325 static void
3326 wm_rxdrain(struct wm_softc *sc)
3327 {
3328 	struct wm_rxsoft *rxs;
3329 	int i;
3330 
3331 	for (i = 0; i < WM_NRXDESC; i++) {
3332 		rxs = &sc->sc_rxsoft[i];
3333 		if (rxs->rxs_mbuf != NULL) {
3334 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
3335 			m_freem(rxs->rxs_mbuf);
3336 			rxs->rxs_mbuf = NULL;
3337 		}
3338 	}
3339 }
3340 
3341 /*
3342  * wm_stop:		[ifnet interface function]
3343  *
3344  *	Stop transmission on the interface.
3345  */
3346 static void
3347 wm_stop(struct ifnet *ifp, int disable)
3348 {
3349 	struct wm_softc *sc = ifp->if_softc;
3350 	struct wm_txsoft *txs;
3351 	int i;
3352 
3353 	/* Stop the one second clock. */
3354 	callout_stop(&sc->sc_tick_ch);
3355 
3356 	/* Stop the 82547 Tx FIFO stall check timer. */
3357 	if (sc->sc_type == WM_T_82547)
3358 		callout_stop(&sc->sc_txfifo_ch);
3359 
3360 	if (sc->sc_flags & WM_F_HAS_MII) {
3361 		/* Down the MII. */
3362 		mii_down(&sc->sc_mii);
3363 	}
3364 
3365 	/* Stop the transmit and receive processes. */
3366 	CSR_WRITE(sc, WMREG_TCTL, 0);
3367 	CSR_WRITE(sc, WMREG_RCTL, 0);
3368 
3369 	/*
3370 	 * Clear the interrupt mask to ensure the device cannot assert its
3371 	 * interrupt line.
3372 	 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
3373 	 * any currently pending or shared interrupt.
3374 	 */
3375 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3376 	sc->sc_icr = 0;
3377 
3378 	/* Release any queued transmit buffers. */
3379 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
3380 		txs = &sc->sc_txsoft[i];
3381 		if (txs->txs_mbuf != NULL) {
3382 			bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
3383 			m_freem(txs->txs_mbuf);
3384 			txs->txs_mbuf = NULL;
3385 		}
3386 	}
3387 
3388 	/* Mark the interface as down and cancel the watchdog timer. */
3389 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
3390 	ifp->if_timer = 0;
3391 
3392 	if (disable)
3393 		wm_rxdrain(sc);
3394 }
3395 
3396 void
3397 wm_get_auto_rd_done(struct wm_softc *sc)
3398 {
3399 	int i;
3400 
3401 	/* wait for eeprom to reload */
3402 	switch (sc->sc_type) {
3403 	case WM_T_82571:
3404 	case WM_T_82572:
3405 	case WM_T_82573:
3406 	case WM_T_80003:
3407 	case WM_T_ICH8:
3408 	case WM_T_ICH9:
3409 		for (i = 10; i > 0; i--) {
3410 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
3411 				break;
3412 			delay(1000);
3413 		}
3414 		if (i == 0) {
3415 			log(LOG_ERR, "%s: auto read from eeprom failed to "
3416 			    "complete\n", device_xname(sc->sc_dev));
3417 		}
3418 		break;
3419 	default:
3420 		delay(5000);
3421 		break;
3422 	}
3423 
3424 	/* Phy configuration starts after EECD_AUTO_RD is set */
3425 	if (sc->sc_type == WM_T_82573)
3426 		delay(25000);
3427 }
3428 
3429 /*
3430  * wm_acquire_eeprom:
3431  *
3432  *	Perform the EEPROM handshake required on some chips.
3433  */
3434 static int
3435 wm_acquire_eeprom(struct wm_softc *sc)
3436 {
3437 	uint32_t reg;
3438 	int x;
3439 	int ret = 0;
3440 
3441 	/* always success */
3442 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
3443 		return 0;
3444 
3445 	if (sc->sc_flags & WM_F_SWFWHW_SYNC) {
3446 		ret = wm_get_swfwhw_semaphore(sc);
3447 	} else if (sc->sc_flags & WM_F_SWFW_SYNC) {
3448 		/* this will also do wm_get_swsm_semaphore() if needed */
3449 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
3450 	} else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
3451 		ret = wm_get_swsm_semaphore(sc);
3452 	}
3453 
3454 	if (ret)
3455 		return 1;
3456 
3457 	if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE)  {
3458 		reg = CSR_READ(sc, WMREG_EECD);
3459 
3460 		/* Request EEPROM access. */
3461 		reg |= EECD_EE_REQ;
3462 		CSR_WRITE(sc, WMREG_EECD, reg);
3463 
3464 		/* ..and wait for it to be granted. */
3465 		for (x = 0; x < 1000; x++) {
3466 			reg = CSR_READ(sc, WMREG_EECD);
3467 			if (reg & EECD_EE_GNT)
3468 				break;
3469 			delay(5);
3470 		}
3471 		if ((reg & EECD_EE_GNT) == 0) {
3472 			aprint_error_dev(sc->sc_dev,
3473 			    "could not acquire EEPROM GNT\n");
3474 			reg &= ~EECD_EE_REQ;
3475 			CSR_WRITE(sc, WMREG_EECD, reg);
3476 			if (sc->sc_flags & WM_F_SWFWHW_SYNC)
3477 				wm_put_swfwhw_semaphore(sc);
3478 			if (sc->sc_flags & WM_F_SWFW_SYNC)
3479 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
3480 			else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
3481 				wm_put_swsm_semaphore(sc);
3482 			return (1);
3483 		}
3484 	}
3485 
3486 	return (0);
3487 }
3488 
3489 /*
3490  * wm_release_eeprom:
3491  *
3492  *	Release the EEPROM mutex.
3493  */
3494 static void
3495 wm_release_eeprom(struct wm_softc *sc)
3496 {
3497 	uint32_t reg;
3498 
3499 	/* always success */
3500 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
3501 		return;
3502 
3503 	if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
3504 		reg = CSR_READ(sc, WMREG_EECD);
3505 		reg &= ~EECD_EE_REQ;
3506 		CSR_WRITE(sc, WMREG_EECD, reg);
3507 	}
3508 
3509 	if (sc->sc_flags & WM_F_SWFWHW_SYNC)
3510 		wm_put_swfwhw_semaphore(sc);
3511 	if (sc->sc_flags & WM_F_SWFW_SYNC)
3512 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
3513 	else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
3514 		wm_put_swsm_semaphore(sc);
3515 }
3516 
3517 /*
3518  * wm_eeprom_sendbits:
3519  *
3520  *	Send a series of bits to the EEPROM.
3521  */
3522 static void
3523 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
3524 {
3525 	uint32_t reg;
3526 	int x;
3527 
3528 	reg = CSR_READ(sc, WMREG_EECD);
3529 
3530 	for (x = nbits; x > 0; x--) {
3531 		if (bits & (1U << (x - 1)))
3532 			reg |= EECD_DI;
3533 		else
3534 			reg &= ~EECD_DI;
3535 		CSR_WRITE(sc, WMREG_EECD, reg);
3536 		delay(2);
3537 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
3538 		delay(2);
3539 		CSR_WRITE(sc, WMREG_EECD, reg);
3540 		delay(2);
3541 	}
3542 }
3543 
3544 /*
3545  * wm_eeprom_recvbits:
3546  *
3547  *	Receive a series of bits from the EEPROM.
3548  */
3549 static void
3550 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
3551 {
3552 	uint32_t reg, val;
3553 	int x;
3554 
3555 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
3556 
3557 	val = 0;
3558 	for (x = nbits; x > 0; x--) {
3559 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
3560 		delay(2);
3561 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
3562 			val |= (1U << (x - 1));
3563 		CSR_WRITE(sc, WMREG_EECD, reg);
3564 		delay(2);
3565 	}
3566 	*valp = val;
3567 }
3568 
3569 /*
3570  * wm_read_eeprom_uwire:
3571  *
3572  *	Read a word from the EEPROM using the MicroWire protocol.
3573  */
3574 static int
3575 wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
3576 {
3577 	uint32_t reg, val;
3578 	int i;
3579 
3580 	for (i = 0; i < wordcnt; i++) {
3581 		/* Clear SK and DI. */
3582 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
3583 		CSR_WRITE(sc, WMREG_EECD, reg);
3584 
3585 		/* Set CHIP SELECT. */
3586 		reg |= EECD_CS;
3587 		CSR_WRITE(sc, WMREG_EECD, reg);
3588 		delay(2);
3589 
3590 		/* Shift in the READ command. */
3591 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
3592 
3593 		/* Shift in address. */
3594 		wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits);
3595 
3596 		/* Shift out the data. */
3597 		wm_eeprom_recvbits(sc, &val, 16);
3598 		data[i] = val & 0xffff;
3599 
3600 		/* Clear CHIP SELECT. */
3601 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
3602 		CSR_WRITE(sc, WMREG_EECD, reg);
3603 		delay(2);
3604 	}
3605 
3606 	return (0);
3607 }
3608 
3609 /*
3610  * wm_spi_eeprom_ready:
3611  *
3612  *	Wait for a SPI EEPROM to be ready for commands.
3613  */
3614 static int
3615 wm_spi_eeprom_ready(struct wm_softc *sc)
3616 {
3617 	uint32_t val;
3618 	int usec;
3619 
3620 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
3621 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
3622 		wm_eeprom_recvbits(sc, &val, 8);
3623 		if ((val & SPI_SR_RDY) == 0)
3624 			break;
3625 	}
3626 	if (usec >= SPI_MAX_RETRIES) {
3627 		aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
3628 		return (1);
3629 	}
3630 	return (0);
3631 }
3632 
3633 /*
3634  * wm_read_eeprom_spi:
3635  *
3636  *	Read a work from the EEPROM using the SPI protocol.
3637  */
3638 static int
3639 wm_read_eeprom_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
3640 {
3641 	uint32_t reg, val;
3642 	int i;
3643 	uint8_t opc;
3644 
3645 	/* Clear SK and CS. */
3646 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
3647 	CSR_WRITE(sc, WMREG_EECD, reg);
3648 	delay(2);
3649 
3650 	if (wm_spi_eeprom_ready(sc))
3651 		return (1);
3652 
3653 	/* Toggle CS to flush commands. */
3654 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
3655 	delay(2);
3656 	CSR_WRITE(sc, WMREG_EECD, reg);
3657 	delay(2);
3658 
3659 	opc = SPI_OPC_READ;
3660 	if (sc->sc_ee_addrbits == 8 && word >= 128)
3661 		opc |= SPI_OPC_A8;
3662 
3663 	wm_eeprom_sendbits(sc, opc, 8);
3664 	wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits);
3665 
3666 	for (i = 0; i < wordcnt; i++) {
3667 		wm_eeprom_recvbits(sc, &val, 16);
3668 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
3669 	}
3670 
3671 	/* Raise CS and clear SK. */
3672 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
3673 	CSR_WRITE(sc, WMREG_EECD, reg);
3674 	delay(2);
3675 
3676 	return (0);
3677 }
3678 
3679 #define EEPROM_CHECKSUM		0xBABA
3680 #define EEPROM_SIZE		0x0040
3681 
3682 /*
3683  * wm_validate_eeprom_checksum
3684  *
3685  * The checksum is defined as the sum of the first 64 (16 bit) words.
3686  */
3687 static int
3688 wm_validate_eeprom_checksum(struct wm_softc *sc)
3689 {
3690 	uint16_t checksum;
3691 	uint16_t eeprom_data;
3692 	int i;
3693 
3694 	checksum = 0;
3695 
3696 	for (i = 0; i < EEPROM_SIZE; i++) {
3697 		if (wm_read_eeprom(sc, i, 1, &eeprom_data))
3698 			return 1;
3699 		checksum += eeprom_data;
3700 	}
3701 
3702 	if (checksum != (uint16_t) EEPROM_CHECKSUM)
3703 		return 1;
3704 
3705 	return 0;
3706 }
3707 
3708 /*
3709  * wm_read_eeprom:
3710  *
3711  *	Read data from the serial EEPROM.
3712  */
3713 static int
3714 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
3715 {
3716 	int rv;
3717 
3718 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
3719 		return 1;
3720 
3721 	if (wm_acquire_eeprom(sc))
3722 		return 1;
3723 
3724 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9))
3725 		rv = wm_read_eeprom_ich8(sc, word, wordcnt, data);
3726 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
3727 		rv = wm_read_eeprom_eerd(sc, word, wordcnt, data);
3728 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
3729 		rv = wm_read_eeprom_spi(sc, word, wordcnt, data);
3730 	else
3731 		rv = wm_read_eeprom_uwire(sc, word, wordcnt, data);
3732 
3733 	wm_release_eeprom(sc);
3734 	return rv;
3735 }
3736 
3737 static int
3738 wm_read_eeprom_eerd(struct wm_softc *sc, int offset, int wordcnt,
3739     uint16_t *data)
3740 {
3741 	int i, eerd = 0;
3742 	int error = 0;
3743 
3744 	for (i = 0; i < wordcnt; i++) {
3745 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
3746 
3747 		CSR_WRITE(sc, WMREG_EERD, eerd);
3748 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
3749 		if (error != 0)
3750 			break;
3751 
3752 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
3753 	}
3754 
3755 	return error;
3756 }
3757 
3758 static int
3759 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
3760 {
3761 	uint32_t attempts = 100000;
3762 	uint32_t i, reg = 0;
3763 	int32_t done = -1;
3764 
3765 	for (i = 0; i < attempts; i++) {
3766 		reg = CSR_READ(sc, rw);
3767 
3768 		if (reg & EERD_DONE) {
3769 			done = 0;
3770 			break;
3771 		}
3772 		delay(5);
3773 	}
3774 
3775 	return done;
3776 }
3777 
3778 /*
3779  * wm_add_rxbuf:
3780  *
3781  *	Add a receive buffer to the indiciated descriptor.
3782  */
3783 static int
3784 wm_add_rxbuf(struct wm_softc *sc, int idx)
3785 {
3786 	struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
3787 	struct mbuf *m;
3788 	int error;
3789 
3790 	MGETHDR(m, M_DONTWAIT, MT_DATA);
3791 	if (m == NULL)
3792 		return (ENOBUFS);
3793 
3794 	MCLGET(m, M_DONTWAIT);
3795 	if ((m->m_flags & M_EXT) == 0) {
3796 		m_freem(m);
3797 		return (ENOBUFS);
3798 	}
3799 
3800 	if (rxs->rxs_mbuf != NULL)
3801 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
3802 
3803 	rxs->rxs_mbuf = m;
3804 
3805 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3806 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
3807 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
3808 	if (error) {
3809 		/* XXX XXX XXX */
3810 		aprint_error_dev(sc->sc_dev,
3811 		    "unable to load rx DMA map %d, error = %d\n",
3812 		    idx, error);
3813 		panic("wm_add_rxbuf");
3814 	}
3815 
3816 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3817 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
3818 
3819 	WM_INIT_RXDESC(sc, idx);
3820 
3821 	return (0);
3822 }
3823 
3824 /*
3825  * wm_set_ral:
3826  *
3827  *	Set an entery in the receive address list.
3828  */
3829 static void
3830 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
3831 {
3832 	uint32_t ral_lo, ral_hi;
3833 
3834 	if (enaddr != NULL) {
3835 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
3836 		    (enaddr[3] << 24);
3837 		ral_hi = enaddr[4] | (enaddr[5] << 8);
3838 		ral_hi |= RAL_AV;
3839 	} else {
3840 		ral_lo = 0;
3841 		ral_hi = 0;
3842 	}
3843 
3844 	if (sc->sc_type >= WM_T_82544) {
3845 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
3846 		    ral_lo);
3847 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
3848 		    ral_hi);
3849 	} else {
3850 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
3851 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
3852 	}
3853 }
3854 
3855 /*
3856  * wm_mchash:
3857  *
3858  *	Compute the hash of the multicast address for the 4096-bit
3859  *	multicast filter.
3860  */
3861 static uint32_t
3862 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
3863 {
3864 	static const int lo_shift[4] = { 4, 3, 2, 0 };
3865 	static const int hi_shift[4] = { 4, 5, 6, 8 };
3866 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
3867 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
3868 	uint32_t hash;
3869 
3870 	if (sc->sc_type == WM_T_ICH8) {
3871 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
3872 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
3873 		return (hash & 0x3ff);
3874 	}
3875 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
3876 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
3877 
3878 	return (hash & 0xfff);
3879 }
3880 
3881 /*
3882  * wm_set_filter:
3883  *
3884  *	Set up the receive filter.
3885  */
3886 static void
3887 wm_set_filter(struct wm_softc *sc)
3888 {
3889 	struct ethercom *ec = &sc->sc_ethercom;
3890 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3891 	struct ether_multi *enm;
3892 	struct ether_multistep step;
3893 	bus_addr_t mta_reg;
3894 	uint32_t hash, reg, bit;
3895 	int i, size;
3896 
3897 	if (sc->sc_type >= WM_T_82544)
3898 		mta_reg = WMREG_CORDOVA_MTA;
3899 	else
3900 		mta_reg = WMREG_MTA;
3901 
3902 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
3903 
3904 	if (ifp->if_flags & IFF_BROADCAST)
3905 		sc->sc_rctl |= RCTL_BAM;
3906 	if (ifp->if_flags & IFF_PROMISC) {
3907 		sc->sc_rctl |= RCTL_UPE;
3908 		goto allmulti;
3909 	}
3910 
3911 	/*
3912 	 * Set the station address in the first RAL slot, and
3913 	 * clear the remaining slots.
3914 	 */
3915 	if (sc->sc_type == WM_T_ICH8)
3916 		size = WM_ICH8_RAL_TABSIZE;
3917 	else
3918 		size = WM_RAL_TABSIZE;
3919 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
3920 	for (i = 1; i < size; i++)
3921 		wm_set_ral(sc, NULL, i);
3922 
3923 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9))
3924 		size = WM_ICH8_MC_TABSIZE;
3925 	else
3926 		size = WM_MC_TABSIZE;
3927 	/* Clear out the multicast table. */
3928 	for (i = 0; i < size; i++)
3929 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
3930 
3931 	ETHER_FIRST_MULTI(step, ec, enm);
3932 	while (enm != NULL) {
3933 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
3934 			/*
3935 			 * We must listen to a range of multicast addresses.
3936 			 * For now, just accept all multicasts, rather than
3937 			 * trying to set only those filter bits needed to match
3938 			 * the range.  (At this time, the only use of address
3939 			 * ranges is for IP multicast routing, for which the
3940 			 * range is big enough to require all bits set.)
3941 			 */
3942 			goto allmulti;
3943 		}
3944 
3945 		hash = wm_mchash(sc, enm->enm_addrlo);
3946 
3947 		reg = (hash >> 5);
3948 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9))
3949 			reg &= 0x1f;
3950 		else
3951 			reg &= 0x7f;
3952 		bit = hash & 0x1f;
3953 
3954 		hash = CSR_READ(sc, mta_reg + (reg << 2));
3955 		hash |= 1U << bit;
3956 
3957 		/* XXX Hardware bug?? */
3958 		if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
3959 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
3960 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3961 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
3962 		} else
3963 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3964 
3965 		ETHER_NEXT_MULTI(step, enm);
3966 	}
3967 
3968 	ifp->if_flags &= ~IFF_ALLMULTI;
3969 	goto setit;
3970 
3971  allmulti:
3972 	ifp->if_flags |= IFF_ALLMULTI;
3973 	sc->sc_rctl |= RCTL_MPE;
3974 
3975  setit:
3976 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
3977 }
3978 
3979 /*
3980  * wm_tbi_mediainit:
3981  *
3982  *	Initialize media for use on 1000BASE-X devices.
3983  */
3984 static void
3985 wm_tbi_mediainit(struct wm_softc *sc)
3986 {
3987 	const char *sep = "";
3988 
3989 	if (sc->sc_type < WM_T_82543)
3990 		sc->sc_tipg = TIPG_WM_DFLT;
3991 	else
3992 		sc->sc_tipg = TIPG_LG_DFLT;
3993 
3994 	ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
3995 	    wm_tbi_mediastatus);
3996 
3997 	/*
3998 	 * SWD Pins:
3999 	 *
4000 	 *	0 = Link LED (output)
4001 	 *	1 = Loss Of Signal (input)
4002 	 */
4003 	sc->sc_ctrl |= CTRL_SWDPIO(0);
4004 	sc->sc_ctrl &= ~CTRL_SWDPIO(1);
4005 
4006 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4007 
4008 #define	ADD(ss, mm, dd)							\
4009 do {									\
4010 	aprint_normal("%s%s", sep, ss);					\
4011 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL);	\
4012 	sep = ", ";							\
4013 } while (/*CONSTCOND*/0)
4014 
4015 	aprint_normal_dev(sc->sc_dev, "");
4016 	ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
4017 	ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
4018 	ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
4019 	aprint_normal("\n");
4020 
4021 #undef ADD
4022 
4023 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
4024 }
4025 
4026 /*
4027  * wm_tbi_mediastatus:	[ifmedia interface function]
4028  *
4029  *	Get the current interface media status on a 1000BASE-X device.
4030  */
4031 static void
4032 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
4033 {
4034 	struct wm_softc *sc = ifp->if_softc;
4035 	uint32_t ctrl;
4036 
4037 	ifmr->ifm_status = IFM_AVALID;
4038 	ifmr->ifm_active = IFM_ETHER;
4039 
4040 	if (sc->sc_tbi_linkup == 0) {
4041 		ifmr->ifm_active |= IFM_NONE;
4042 		return;
4043 	}
4044 
4045 	ifmr->ifm_status |= IFM_ACTIVE;
4046 	ifmr->ifm_active |= IFM_1000_SX;
4047 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
4048 		ifmr->ifm_active |= IFM_FDX;
4049 	ctrl = CSR_READ(sc, WMREG_CTRL);
4050 	if (ctrl & CTRL_RFCE)
4051 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
4052 	if (ctrl & CTRL_TFCE)
4053 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
4054 }
4055 
4056 /*
4057  * wm_tbi_mediachange:	[ifmedia interface function]
4058  *
4059  *	Set hardware to newly-selected media on a 1000BASE-X device.
4060  */
4061 static int
4062 wm_tbi_mediachange(struct ifnet *ifp)
4063 {
4064 	struct wm_softc *sc = ifp->if_softc;
4065 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
4066 	uint32_t status;
4067 	int i;
4068 
4069 	sc->sc_txcw = ife->ifm_data;
4070 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x on entry\n",
4071 		    device_xname(sc->sc_dev),sc->sc_txcw));
4072 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO ||
4073 	    (sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
4074 		sc->sc_txcw |= ANAR_X_PAUSE_SYM | ANAR_X_PAUSE_ASYM;
4075 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
4076 		sc->sc_txcw |= TXCW_ANE;
4077 	} else {
4078 		/*If autonegotiation is turned off, force link up and turn on full duplex*/
4079 		sc->sc_txcw &= ~TXCW_ANE;
4080 		sc->sc_ctrl |= CTRL_SLU | CTRL_FD;
4081 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4082 		delay(1000);
4083 	}
4084 
4085 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
4086 		    device_xname(sc->sc_dev),sc->sc_txcw));
4087 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
4088 	delay(10000);
4089 
4090 	/* NOTE: CTRL will update TFCE and RFCE automatically. */
4091 
4092 	sc->sc_tbi_anstate = 0;
4093 
4094 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
4095 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
4096 
4097 	/*
4098 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
4099 	 * optics detect a signal, 0 if they don't.
4100 	 */
4101 	if (((i != 0) && (sc->sc_type >= WM_T_82544)) || (i == 0)) {
4102 		/* Have signal; wait for the link to come up. */
4103 
4104 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
4105 			/*
4106 			 * Reset the link, and let autonegotiation do its thing
4107 			 */
4108 			sc->sc_ctrl |= CTRL_LRST;
4109 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4110 			delay(1000);
4111 			sc->sc_ctrl &= ~CTRL_LRST;
4112 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4113 			delay(1000);
4114 		}
4115 
4116 		for (i = 0; i < 50; i++) {
4117 			delay(10000);
4118 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
4119 				break;
4120 		}
4121 
4122 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
4123 			    device_xname(sc->sc_dev),i));
4124 
4125 		status = CSR_READ(sc, WMREG_STATUS);
4126 		DPRINTF(WM_DEBUG_LINK,
4127 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
4128 			device_xname(sc->sc_dev),status, STATUS_LU));
4129 		if (status & STATUS_LU) {
4130 			/* Link is up. */
4131 			DPRINTF(WM_DEBUG_LINK,
4132 			    ("%s: LINK: set media -> link up %s\n",
4133 			    device_xname(sc->sc_dev),
4134 			    (status & STATUS_FD) ? "FDX" : "HDX"));
4135 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
4136 			sc->sc_fcrtl &= ~FCRTL_XONE;
4137 			if (status & STATUS_FD)
4138 				sc->sc_tctl |=
4139 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4140 			else
4141 				sc->sc_tctl |=
4142 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
4143 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
4144 				sc->sc_fcrtl |= FCRTL_XONE;
4145 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4146 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
4147 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
4148 				      sc->sc_fcrtl);
4149 			sc->sc_tbi_linkup = 1;
4150 		} else {
4151 			/* Link is down. */
4152 			DPRINTF(WM_DEBUG_LINK,
4153 			    ("%s: LINK: set media -> link down\n",
4154 			    device_xname(sc->sc_dev)));
4155 			sc->sc_tbi_linkup = 0;
4156 		}
4157 	} else {
4158 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
4159 		    device_xname(sc->sc_dev)));
4160 		sc->sc_tbi_linkup = 0;
4161 	}
4162 
4163 	wm_tbi_set_linkled(sc);
4164 
4165 	return (0);
4166 }
4167 
4168 /*
4169  * wm_tbi_set_linkled:
4170  *
4171  *	Update the link LED on 1000BASE-X devices.
4172  */
4173 static void
4174 wm_tbi_set_linkled(struct wm_softc *sc)
4175 {
4176 
4177 	if (sc->sc_tbi_linkup)
4178 		sc->sc_ctrl |= CTRL_SWDPIN(0);
4179 	else
4180 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
4181 
4182 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4183 }
4184 
4185 /*
4186  * wm_tbi_check_link:
4187  *
4188  *	Check the link on 1000BASE-X devices.
4189  */
4190 static void
4191 wm_tbi_check_link(struct wm_softc *sc)
4192 {
4193 	uint32_t rxcw, ctrl, status;
4194 
4195 	if (sc->sc_tbi_anstate == 0)
4196 		return;
4197 	else if (sc->sc_tbi_anstate > 1) {
4198 		DPRINTF(WM_DEBUG_LINK,
4199 		    ("%s: LINK: anstate %d\n", device_xname(sc->sc_dev),
4200 		    sc->sc_tbi_anstate));
4201 		sc->sc_tbi_anstate--;
4202 		return;
4203 	}
4204 
4205 	sc->sc_tbi_anstate = 0;
4206 
4207 	rxcw = CSR_READ(sc, WMREG_RXCW);
4208 	ctrl = CSR_READ(sc, WMREG_CTRL);
4209 	status = CSR_READ(sc, WMREG_STATUS);
4210 
4211 	if ((status & STATUS_LU) == 0) {
4212 		DPRINTF(WM_DEBUG_LINK,
4213 		    ("%s: LINK: checklink -> down\n", device_xname(sc->sc_dev)));
4214 		sc->sc_tbi_linkup = 0;
4215 	} else {
4216 		DPRINTF(WM_DEBUG_LINK,
4217 		    ("%s: LINK: checklink -> up %s\n", device_xname(sc->sc_dev),
4218 		    (status & STATUS_FD) ? "FDX" : "HDX"));
4219 		sc->sc_tctl &= ~TCTL_COLD(0x3ff);
4220 		sc->sc_fcrtl &= ~FCRTL_XONE;
4221 		if (status & STATUS_FD)
4222 			sc->sc_tctl |=
4223 			    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4224 		else
4225 			sc->sc_tctl |=
4226 			    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
4227 		if (ctrl & CTRL_TFCE)
4228 			sc->sc_fcrtl |= FCRTL_XONE;
4229 		CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4230 		CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
4231 			      WMREG_OLD_FCRTL : WMREG_FCRTL,
4232 			      sc->sc_fcrtl);
4233 		sc->sc_tbi_linkup = 1;
4234 	}
4235 
4236 	wm_tbi_set_linkled(sc);
4237 }
4238 
4239 /*
4240  * wm_gmii_reset:
4241  *
4242  *	Reset the PHY.
4243  */
4244 static void
4245 wm_gmii_reset(struct wm_softc *sc)
4246 {
4247 	uint32_t reg;
4248 	int func = 0; /* XXX gcc */
4249 
4250 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)) {
4251 		if (wm_get_swfwhw_semaphore(sc))
4252 			return;
4253 	}
4254 	if (sc->sc_type == WM_T_80003) {
4255 		func = (CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1;
4256 		if (wm_get_swfw_semaphore(sc,
4257 		    func ? SWFW_PHY1_SM : SWFW_PHY0_SM))
4258 			return;
4259 	}
4260 	if (sc->sc_type >= WM_T_82544) {
4261 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
4262 		delay(20000);
4263 
4264 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4265 		delay(20000);
4266 	} else {
4267 		/*
4268 		 * With 82543, we need to force speed and duplex on the MAC
4269 		 * equal to what the PHY speed and duplex configuration is.
4270 		 * In addition, we need to perform a hardware reset on the PHY
4271 		 * to take it out of reset.
4272 		 */
4273 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
4274 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4275 
4276 		/* The PHY reset pin is active-low. */
4277 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
4278 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
4279 		    CTRL_EXT_SWDPIN(4));
4280 		reg |= CTRL_EXT_SWDPIO(4);
4281 
4282 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
4283 		delay(10);
4284 
4285 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4286 		delay(10000);
4287 
4288 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
4289 		delay(10);
4290 #if 0
4291 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
4292 #endif
4293 	}
4294 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9))
4295 		wm_put_swfwhw_semaphore(sc);
4296 	if (sc->sc_type == WM_T_80003)
4297 		wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4298 }
4299 
4300 /*
4301  * wm_gmii_mediainit:
4302  *
4303  *	Initialize media for use on 1000BASE-T devices.
4304  */
4305 static void
4306 wm_gmii_mediainit(struct wm_softc *sc)
4307 {
4308 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4309 
4310 	/* We have MII. */
4311 	sc->sc_flags |= WM_F_HAS_MII;
4312 
4313 	if (sc->sc_type >= WM_T_80003)
4314 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
4315 	else
4316 		sc->sc_tipg = TIPG_1000T_DFLT;
4317 
4318 	/*
4319 	 * Let the chip set speed/duplex on its own based on
4320 	 * signals from the PHY.
4321 	 * XXXbouyer - I'm not sure this is right for the 80003,
4322 	 * the em driver only sets CTRL_SLU here - but it seems to work.
4323 	 */
4324 	sc->sc_ctrl |= CTRL_SLU;
4325 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4326 
4327 	/* Initialize our media structures and probe the GMII. */
4328 	sc->sc_mii.mii_ifp = ifp;
4329 
4330 	if (sc->sc_type >= WM_T_80003) {
4331 		sc->sc_mii.mii_readreg = wm_gmii_i80003_readreg;
4332 		sc->sc_mii.mii_writereg = wm_gmii_i80003_writereg;
4333 	} else if (sc->sc_type >= WM_T_82544) {
4334 		sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg;
4335 		sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg;
4336 	} else {
4337 		sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg;
4338 		sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg;
4339 	}
4340 	sc->sc_mii.mii_statchg = wm_gmii_statchg;
4341 
4342 	wm_gmii_reset(sc);
4343 
4344 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
4345 	ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_gmii_mediachange,
4346 	    wm_gmii_mediastatus);
4347 
4348 	mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
4349 	    MII_OFFSET_ANY, MIIF_DOPAUSE);
4350 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
4351 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
4352 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
4353 	} else
4354 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
4355 }
4356 
4357 /*
4358  * wm_gmii_mediastatus:	[ifmedia interface function]
4359  *
4360  *	Get the current interface media status on a 1000BASE-T device.
4361  */
4362 static void
4363 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
4364 {
4365 	struct wm_softc *sc = ifp->if_softc;
4366 
4367 	ether_mediastatus(ifp, ifmr);
4368 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK) |
4369 			   sc->sc_flowflags;
4370 }
4371 
4372 /*
4373  * wm_gmii_mediachange:	[ifmedia interface function]
4374  *
4375  *	Set hardware to newly-selected media on a 1000BASE-T device.
4376  */
4377 static int
4378 wm_gmii_mediachange(struct ifnet *ifp)
4379 {
4380 	struct wm_softc *sc = ifp->if_softc;
4381 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
4382 	int rc;
4383 
4384 	if ((ifp->if_flags & IFF_UP) == 0)
4385 		return 0;
4386 
4387 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
4388 	sc->sc_ctrl |= CTRL_SLU;
4389 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
4390 	    || (sc->sc_type > WM_T_82543)) {
4391 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
4392 	} else {
4393 		sc->sc_ctrl &= ~CTRL_ASDE;
4394 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
4395 		if (ife->ifm_media & IFM_FDX)
4396 			sc->sc_ctrl |= CTRL_FD;
4397 		switch(IFM_SUBTYPE(ife->ifm_media)) {
4398 		case IFM_10_T:
4399 			sc->sc_ctrl |= CTRL_SPEED_10;
4400 			break;
4401 		case IFM_100_TX:
4402 			sc->sc_ctrl |= CTRL_SPEED_100;
4403 			break;
4404 		case IFM_1000_T:
4405 			sc->sc_ctrl |= CTRL_SPEED_1000;
4406 			break;
4407 		default:
4408 			panic("wm_gmii_mediachange: bad media 0x%x",
4409 			    ife->ifm_media);
4410 		}
4411 	}
4412 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4413 	if (sc->sc_type <= WM_T_82543)
4414 		wm_gmii_reset(sc);
4415 
4416 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
4417 		return 0;
4418 	return rc;
4419 }
4420 
4421 #define	MDI_IO		CTRL_SWDPIN(2)
4422 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
4423 #define	MDI_CLK		CTRL_SWDPIN(3)
4424 
4425 static void
4426 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
4427 {
4428 	uint32_t i, v;
4429 
4430 	v = CSR_READ(sc, WMREG_CTRL);
4431 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
4432 	v |= MDI_DIR | CTRL_SWDPIO(3);
4433 
4434 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
4435 		if (data & i)
4436 			v |= MDI_IO;
4437 		else
4438 			v &= ~MDI_IO;
4439 		CSR_WRITE(sc, WMREG_CTRL, v);
4440 		delay(10);
4441 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
4442 		delay(10);
4443 		CSR_WRITE(sc, WMREG_CTRL, v);
4444 		delay(10);
4445 	}
4446 }
4447 
4448 static uint32_t
4449 i82543_mii_recvbits(struct wm_softc *sc)
4450 {
4451 	uint32_t v, i, data = 0;
4452 
4453 	v = CSR_READ(sc, WMREG_CTRL);
4454 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
4455 	v |= CTRL_SWDPIO(3);
4456 
4457 	CSR_WRITE(sc, WMREG_CTRL, v);
4458 	delay(10);
4459 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
4460 	delay(10);
4461 	CSR_WRITE(sc, WMREG_CTRL, v);
4462 	delay(10);
4463 
4464 	for (i = 0; i < 16; i++) {
4465 		data <<= 1;
4466 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
4467 		delay(10);
4468 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
4469 			data |= 1;
4470 		CSR_WRITE(sc, WMREG_CTRL, v);
4471 		delay(10);
4472 	}
4473 
4474 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
4475 	delay(10);
4476 	CSR_WRITE(sc, WMREG_CTRL, v);
4477 	delay(10);
4478 
4479 	return (data);
4480 }
4481 
4482 #undef MDI_IO
4483 #undef MDI_DIR
4484 #undef MDI_CLK
4485 
4486 /*
4487  * wm_gmii_i82543_readreg:	[mii interface function]
4488  *
4489  *	Read a PHY register on the GMII (i82543 version).
4490  */
4491 static int
4492 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
4493 {
4494 	struct wm_softc *sc = device_private(self);
4495 	int rv;
4496 
4497 	i82543_mii_sendbits(sc, 0xffffffffU, 32);
4498 	i82543_mii_sendbits(sc, reg | (phy << 5) |
4499 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
4500 	rv = i82543_mii_recvbits(sc) & 0xffff;
4501 
4502 	DPRINTF(WM_DEBUG_GMII,
4503 	    ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
4504 	    device_xname(sc->sc_dev), phy, reg, rv));
4505 
4506 	return (rv);
4507 }
4508 
4509 /*
4510  * wm_gmii_i82543_writereg:	[mii interface function]
4511  *
4512  *	Write a PHY register on the GMII (i82543 version).
4513  */
4514 static void
4515 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
4516 {
4517 	struct wm_softc *sc = device_private(self);
4518 
4519 	i82543_mii_sendbits(sc, 0xffffffffU, 32);
4520 	i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
4521 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
4522 	    (MII_COMMAND_START << 30), 32);
4523 }
4524 
4525 /*
4526  * wm_gmii_i82544_readreg:	[mii interface function]
4527  *
4528  *	Read a PHY register on the GMII.
4529  */
4530 static int
4531 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
4532 {
4533 	struct wm_softc *sc = device_private(self);
4534 	uint32_t mdic = 0;
4535 	int i, rv;
4536 
4537 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
4538 	    MDIC_REGADD(reg));
4539 
4540 	for (i = 0; i < 320; i++) {
4541 		mdic = CSR_READ(sc, WMREG_MDIC);
4542 		if (mdic & MDIC_READY)
4543 			break;
4544 		delay(10);
4545 	}
4546 
4547 	if ((mdic & MDIC_READY) == 0) {
4548 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
4549 		    device_xname(sc->sc_dev), phy, reg);
4550 		rv = 0;
4551 	} else if (mdic & MDIC_E) {
4552 #if 0 /* This is normal if no PHY is present. */
4553 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
4554 		    device_xname(sc->sc_dev), phy, reg);
4555 #endif
4556 		rv = 0;
4557 	} else {
4558 		rv = MDIC_DATA(mdic);
4559 		if (rv == 0xffff)
4560 			rv = 0;
4561 	}
4562 
4563 	return (rv);
4564 }
4565 
4566 /*
4567  * wm_gmii_i82544_writereg:	[mii interface function]
4568  *
4569  *	Write a PHY register on the GMII.
4570  */
4571 static void
4572 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
4573 {
4574 	struct wm_softc *sc = device_private(self);
4575 	uint32_t mdic = 0;
4576 	int i;
4577 
4578 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
4579 	    MDIC_REGADD(reg) | MDIC_DATA(val));
4580 
4581 	for (i = 0; i < 320; i++) {
4582 		mdic = CSR_READ(sc, WMREG_MDIC);
4583 		if (mdic & MDIC_READY)
4584 			break;
4585 		delay(10);
4586 	}
4587 
4588 	if ((mdic & MDIC_READY) == 0)
4589 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
4590 		    device_xname(sc->sc_dev), phy, reg);
4591 	else if (mdic & MDIC_E)
4592 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
4593 		    device_xname(sc->sc_dev), phy, reg);
4594 }
4595 
4596 /*
4597  * wm_gmii_i80003_readreg:	[mii interface function]
4598  *
4599  *	Read a PHY register on the kumeran
4600  * This could be handled by the PHY layer if we didn't have to lock the
4601  * ressource ...
4602  */
4603 static int
4604 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
4605 {
4606 	struct wm_softc *sc = device_private(self);
4607 	int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
4608 	int rv;
4609 
4610 	if (phy != 1) /* only one PHY on kumeran bus */
4611 		return 0;
4612 
4613 	if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM))
4614 		return 0;
4615 
4616 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
4617 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
4618 		    reg >> GG82563_PAGE_SHIFT);
4619 	} else {
4620 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
4621 		    reg >> GG82563_PAGE_SHIFT);
4622 	}
4623 
4624 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
4625 	wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4626 	return (rv);
4627 }
4628 
4629 /*
4630  * wm_gmii_i80003_writereg:	[mii interface function]
4631  *
4632  *	Write a PHY register on the kumeran.
4633  * This could be handled by the PHY layer if we didn't have to lock the
4634  * ressource ...
4635  */
4636 static void
4637 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
4638 {
4639 	struct wm_softc *sc = device_private(self);
4640 	int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
4641 
4642 	if (phy != 1) /* only one PHY on kumeran bus */
4643 		return;
4644 
4645 	if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM))
4646 		return;
4647 
4648 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
4649 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
4650 		    reg >> GG82563_PAGE_SHIFT);
4651 	} else {
4652 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
4653 		    reg >> GG82563_PAGE_SHIFT);
4654 	}
4655 
4656 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
4657 	wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4658 }
4659 
4660 /*
4661  * wm_gmii_statchg:	[mii interface function]
4662  *
4663  *	Callback from MII layer when media changes.
4664  */
4665 static void
4666 wm_gmii_statchg(device_t self)
4667 {
4668 	struct wm_softc *sc = device_private(self);
4669 	struct mii_data *mii = &sc->sc_mii;
4670 
4671 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
4672 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
4673 	sc->sc_fcrtl &= ~FCRTL_XONE;
4674 
4675 	/*
4676 	 * Get flow control negotiation result.
4677 	 */
4678 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
4679 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
4680 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
4681 		mii->mii_media_active &= ~IFM_ETH_FMASK;
4682 	}
4683 
4684 	if (sc->sc_flowflags & IFM_FLOW) {
4685 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
4686 			sc->sc_ctrl |= CTRL_TFCE;
4687 			sc->sc_fcrtl |= FCRTL_XONE;
4688 		}
4689 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
4690 			sc->sc_ctrl |= CTRL_RFCE;
4691 	}
4692 
4693 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
4694 		DPRINTF(WM_DEBUG_LINK,
4695 		    ("%s: LINK: statchg: FDX\n", device_xname(sc->sc_dev)));
4696 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4697 	} else  {
4698 		DPRINTF(WM_DEBUG_LINK,
4699 		    ("%s: LINK: statchg: HDX\n", device_xname(sc->sc_dev)));
4700 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
4701 	}
4702 
4703 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4704 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4705 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
4706 						 : WMREG_FCRTL, sc->sc_fcrtl);
4707 	if (sc->sc_type >= WM_T_80003) {
4708 		switch(IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
4709 		case IFM_1000_T:
4710 			wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
4711 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
4712 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
4713 			break;
4714 		default:
4715 			wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
4716 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
4717 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
4718 			break;
4719 		}
4720 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
4721 	}
4722 }
4723 
4724 /*
4725  * wm_kmrn_i80003_readreg:
4726  *
4727  *	Read a kumeran register
4728  */
4729 static int
4730 wm_kmrn_i80003_readreg(struct wm_softc *sc, int reg)
4731 {
4732 	int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
4733 	int rv;
4734 
4735 	if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM))
4736 		return 0;
4737 
4738 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
4739 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
4740 	    KUMCTRLSTA_REN);
4741 	delay(2);
4742 
4743 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
4744 	wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4745 	return (rv);
4746 }
4747 
4748 /*
4749  * wm_kmrn_i80003_writereg:
4750  *
4751  *	Write a kumeran register
4752  */
4753 static void
4754 wm_kmrn_i80003_writereg(struct wm_softc *sc, int reg, int val)
4755 {
4756 	int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
4757 
4758 	if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM))
4759 		return;
4760 
4761 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
4762 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
4763 	    (val & KUMCTRLSTA_MASK));
4764 	wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4765 }
4766 
4767 static int
4768 wm_is_onboard_nvm_eeprom(struct wm_softc *sc)
4769 {
4770 	uint32_t eecd = 0;
4771 
4772 	if (sc->sc_type == WM_T_82573) {
4773 		eecd = CSR_READ(sc, WMREG_EECD);
4774 
4775 		/* Isolate bits 15 & 16 */
4776 		eecd = ((eecd >> 15) & 0x03);
4777 
4778 		/* If both bits are set, device is Flash type */
4779 		if (eecd == 0x03) {
4780 			return 0;
4781 		}
4782 	}
4783 	return 1;
4784 }
4785 
4786 static int
4787 wm_get_swsm_semaphore(struct wm_softc *sc)
4788 {
4789 	int32_t timeout;
4790 	uint32_t swsm;
4791 
4792 	/* Get the FW semaphore. */
4793 	timeout = 1000 + 1; /* XXX */
4794 	while (timeout) {
4795 		swsm = CSR_READ(sc, WMREG_SWSM);
4796 		swsm |= SWSM_SWESMBI;
4797 		CSR_WRITE(sc, WMREG_SWSM, swsm);
4798 		/* if we managed to set the bit we got the semaphore. */
4799 		swsm = CSR_READ(sc, WMREG_SWSM);
4800 		if (swsm & SWSM_SWESMBI)
4801 			break;
4802 
4803 		delay(50);
4804 		timeout--;
4805 	}
4806 
4807 	if (timeout == 0) {
4808 		aprint_error_dev(sc->sc_dev, "could not acquire EEPROM GNT\n");
4809 		/* Release semaphores */
4810 		wm_put_swsm_semaphore(sc);
4811 		return 1;
4812 	}
4813 	return 0;
4814 }
4815 
4816 static void
4817 wm_put_swsm_semaphore(struct wm_softc *sc)
4818 {
4819 	uint32_t swsm;
4820 
4821 	swsm = CSR_READ(sc, WMREG_SWSM);
4822 	swsm &= ~(SWSM_SWESMBI);
4823 	CSR_WRITE(sc, WMREG_SWSM, swsm);
4824 }
4825 
4826 static int
4827 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
4828 {
4829 	uint32_t swfw_sync;
4830 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
4831 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
4832 	int timeout = 200;
4833 
4834 	for(timeout = 0; timeout < 200; timeout++) {
4835 		if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
4836 			if (wm_get_swsm_semaphore(sc))
4837 				return 1;
4838 		}
4839 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
4840 		if ((swfw_sync & (swmask | fwmask)) == 0) {
4841 			swfw_sync |= swmask;
4842 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
4843 			if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
4844 				wm_put_swsm_semaphore(sc);
4845 			return 0;
4846 		}
4847 		if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
4848 			wm_put_swsm_semaphore(sc);
4849 		delay(5000);
4850 	}
4851 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
4852 	    device_xname(sc->sc_dev), mask, swfw_sync);
4853 	return 1;
4854 }
4855 
4856 static void
4857 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
4858 {
4859 	uint32_t swfw_sync;
4860 
4861 	if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
4862 		while (wm_get_swsm_semaphore(sc) != 0)
4863 			continue;
4864 	}
4865 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
4866 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
4867 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
4868 	if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
4869 		wm_put_swsm_semaphore(sc);
4870 }
4871 
4872 static int
4873 wm_get_swfwhw_semaphore(struct wm_softc *sc)
4874 {
4875 	uint32_t ext_ctrl;
4876 	int timeout = 200;
4877 
4878 	for(timeout = 0; timeout < 200; timeout++) {
4879 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
4880 		ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
4881 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
4882 
4883 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
4884 		if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
4885 			return 0;
4886 		delay(5000);
4887 	}
4888 	printf("%s: failed to get swfwgw semaphore ext_ctrl 0x%x\n",
4889 	    device_xname(sc->sc_dev), ext_ctrl);
4890 	return 1;
4891 }
4892 
4893 static void
4894 wm_put_swfwhw_semaphore(struct wm_softc *sc)
4895 {
4896 	uint32_t ext_ctrl;
4897 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
4898 	ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
4899 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
4900 }
4901 
4902 /******************************************************************************
4903  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
4904  * register.
4905  *
4906  * sc - Struct containing variables accessed by shared code
4907  * offset - offset of word in the EEPROM to read
4908  * data - word read from the EEPROM
4909  * words - number of words to read
4910  *****************************************************************************/
4911 static int
4912 wm_read_eeprom_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
4913 {
4914     int32_t  error = 0;
4915     uint32_t flash_bank = 0;
4916     uint32_t act_offset = 0;
4917     uint32_t bank_offset = 0;
4918     uint16_t word = 0;
4919     uint16_t i = 0;
4920 
4921     /* We need to know which is the valid flash bank.  In the event
4922      * that we didn't allocate eeprom_shadow_ram, we may not be
4923      * managing flash_bank.  So it cannot be trusted and needs
4924      * to be updated with each read.
4925      */
4926     /* Value of bit 22 corresponds to the flash bank we're on. */
4927     flash_bank = (CSR_READ(sc, WMREG_EECD) & EECD_SEC1VAL) ? 1 : 0;
4928 
4929     /* Adjust offset appropriately if we're on bank 1 - adjust for word size */
4930     bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
4931 
4932     error = wm_get_swfwhw_semaphore(sc);
4933     if (error)
4934         return error;
4935 
4936     for (i = 0; i < words; i++) {
4937             /* The NVM part needs a byte offset, hence * 2 */
4938             act_offset = bank_offset + ((offset + i) * 2);
4939             error = wm_read_ich8_word(sc, act_offset, &word);
4940             if (error)
4941                 break;
4942             data[i] = word;
4943     }
4944 
4945     wm_put_swfwhw_semaphore(sc);
4946     return error;
4947 }
4948 
4949 /******************************************************************************
4950  * This function does initial flash setup so that a new read/write/erase cycle
4951  * can be started.
4952  *
4953  * sc - The pointer to the hw structure
4954  ****************************************************************************/
4955 static int32_t
4956 wm_ich8_cycle_init(struct wm_softc *sc)
4957 {
4958     uint16_t hsfsts;
4959     int32_t error = 1;
4960     int32_t i     = 0;
4961 
4962     hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
4963 
4964     /* May be check the Flash Des Valid bit in Hw status */
4965     if ((hsfsts & HSFSTS_FLDVAL) == 0) {
4966         return error;
4967     }
4968 
4969     /* Clear FCERR in Hw status by writing 1 */
4970     /* Clear DAEL in Hw status by writing a 1 */
4971     hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
4972 
4973     ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
4974 
4975     /* Either we should have a hardware SPI cycle in progress bit to check
4976      * against, in order to start a new cycle or FDONE bit should be changed
4977      * in the hardware so that it is 1 after harware reset, which can then be
4978      * used as an indication whether a cycle is in progress or has been
4979      * completed .. we should also have some software semaphore mechanism to
4980      * guard FDONE or the cycle in progress bit so that two threads access to
4981      * those bits can be sequentiallized or a way so that 2 threads dont
4982      * start the cycle at the same time */
4983 
4984     if ((hsfsts & HSFSTS_FLINPRO) == 0) {
4985         /* There is no cycle running at present, so we can start a cycle */
4986         /* Begin by setting Flash Cycle Done. */
4987         hsfsts |= HSFSTS_DONE;
4988         ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
4989         error = 0;
4990     } else {
4991         /* otherwise poll for sometime so the current cycle has a chance
4992          * to end before giving up. */
4993         for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
4994             hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
4995             if ((hsfsts & HSFSTS_FLINPRO) == 0) {
4996                 error = 0;
4997                 break;
4998             }
4999             delay(1);
5000         }
5001         if (error == 0) {
5002             /* Successful in waiting for previous cycle to timeout,
5003              * now set the Flash Cycle Done. */
5004             hsfsts |= HSFSTS_DONE;
5005             ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
5006         }
5007     }
5008     return error;
5009 }
5010 
5011 /******************************************************************************
5012  * This function starts a flash cycle and waits for its completion
5013  *
5014  * sc - The pointer to the hw structure
5015  ****************************************************************************/
5016 static int32_t
5017 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
5018 {
5019     uint16_t hsflctl;
5020     uint16_t hsfsts;
5021     int32_t error = 1;
5022     uint32_t i = 0;
5023 
5024     /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
5025     hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
5026     hsflctl |= HSFCTL_GO;
5027     ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
5028 
5029     /* wait till FDONE bit is set to 1 */
5030     do {
5031         hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
5032         if (hsfsts & HSFSTS_DONE)
5033             break;
5034         delay(1);
5035         i++;
5036     } while (i < timeout);
5037     if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0) {
5038         error = 0;
5039     }
5040     return error;
5041 }
5042 
5043 /******************************************************************************
5044  * Reads a byte or word from the NVM using the ICH8 flash access registers.
5045  *
5046  * sc - The pointer to the hw structure
5047  * index - The index of the byte or word to read.
5048  * size - Size of data to read, 1=byte 2=word
5049  * data - Pointer to the word to store the value read.
5050  *****************************************************************************/
5051 static int32_t
5052 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
5053                      uint32_t size, uint16_t* data)
5054 {
5055     uint16_t hsfsts;
5056     uint16_t hsflctl;
5057     uint32_t flash_linear_address;
5058     uint32_t flash_data = 0;
5059     int32_t error = 1;
5060     int32_t count = 0;
5061 
5062     if (size < 1  || size > 2 || data == 0x0 ||
5063         index > ICH_FLASH_LINEAR_ADDR_MASK)
5064         return error;
5065 
5066     flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
5067                            sc->sc_ich8_flash_base;
5068 
5069     do {
5070         delay(1);
5071         /* Steps */
5072         error = wm_ich8_cycle_init(sc);
5073         if (error)
5074             break;
5075 
5076         hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
5077         /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
5078         hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT) & HSFCTL_BCOUNT_MASK;
5079         hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
5080         ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
5081 
5082         /* Write the last 24 bits of index into Flash Linear address field in
5083          * Flash Address */
5084         /* TODO: TBD maybe check the index against the size of flash */
5085 
5086         ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
5087 
5088         error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
5089 
5090         /* Check if FCERR is set to 1, if set to 1, clear it and try the whole
5091          * sequence a few more times, else read in (shift in) the Flash Data0,
5092          * the order is least significant byte first msb to lsb */
5093         if (error == 0) {
5094             flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
5095             if (size == 1) {
5096                 *data = (uint8_t)(flash_data & 0x000000FF);
5097             } else if (size == 2) {
5098                 *data = (uint16_t)(flash_data & 0x0000FFFF);
5099             }
5100             break;
5101         } else {
5102             /* If we've gotten here, then things are probably completely hosed,
5103              * but if the error condition is detected, it won't hurt to give
5104              * it another try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
5105              */
5106             hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
5107             if (hsfsts & HSFSTS_ERR) {
5108                 /* Repeat for some time before giving up. */
5109                 continue;
5110             } else if ((hsfsts & HSFSTS_DONE) == 0) {
5111                 break;
5112             }
5113         }
5114     } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
5115 
5116     return error;
5117 }
5118 
5119 #if 0
5120 /******************************************************************************
5121  * Reads a single byte from the NVM using the ICH8 flash access registers.
5122  *
5123  * sc - pointer to wm_hw structure
5124  * index - The index of the byte to read.
5125  * data - Pointer to a byte to store the value read.
5126  *****************************************************************************/
5127 static int32_t
5128 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
5129 {
5130     int32_t status;
5131     uint16_t word = 0;
5132 
5133     status = wm_read_ich8_data(sc, index, 1, &word);
5134     if (status == 0) {
5135         *data = (uint8_t)word;
5136     }
5137 
5138     return status;
5139 }
5140 #endif
5141 
5142 /******************************************************************************
5143  * Reads a word from the NVM using the ICH8 flash access registers.
5144  *
5145  * sc - pointer to wm_hw structure
5146  * index - The starting byte index of the word to read.
5147  * data - Pointer to a word to store the value read.
5148  *****************************************************************************/
5149 static int32_t
5150 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
5151 {
5152     int32_t status;
5153 
5154     status = wm_read_ich8_data(sc, index, 2, data);
5155     return status;
5156 }
5157