xref: /netbsd-src/sys/dev/pci/if_wm.c (revision 2c6fc41c810f5088457889d00eba558e8bc74d9e)
1 /*	$NetBSD: if_wm.c,v 1.269 2014/05/27 02:21:29 tls Exp $	*/
2 
3 /*
4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed for the NetBSD Project by
20  *	Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 /*******************************************************************************
39 
40   Copyright (c) 2001-2005, Intel Corporation
41   All rights reserved.
42 
43   Redistribution and use in source and binary forms, with or without
44   modification, are permitted provided that the following conditions are met:
45 
46    1. Redistributions of source code must retain the above copyright notice,
47       this list of conditions and the following disclaimer.
48 
49    2. Redistributions in binary form must reproduce the above copyright
50       notice, this list of conditions and the following disclaimer in the
51       documentation and/or other materials provided with the distribution.
52 
53    3. Neither the name of the Intel Corporation nor the names of its
54       contributors may be used to endorse or promote products derived from
55       this software without specific prior written permission.
56 
57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67   POSSIBILITY OF SUCH DAMAGE.
68 
69 *******************************************************************************/
70 /*
71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72  *
73  * TODO (in order of importance):
74  *
75  *	- Rework how parameters are loaded from the EEPROM.
76  */
77 
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.269 2014/05/27 02:21:29 tls Exp $");
80 
81 #include <sys/param.h>
82 #include <sys/systm.h>
83 #include <sys/callout.h>
84 #include <sys/mbuf.h>
85 #include <sys/malloc.h>
86 #include <sys/kernel.h>
87 #include <sys/socket.h>
88 #include <sys/ioctl.h>
89 #include <sys/errno.h>
90 #include <sys/device.h>
91 #include <sys/queue.h>
92 #include <sys/syslog.h>
93 
94 #include <sys/rnd.h>
95 
96 #include <net/if.h>
97 #include <net/if_dl.h>
98 #include <net/if_media.h>
99 #include <net/if_ether.h>
100 
101 #include <net/bpf.h>
102 
103 #include <netinet/in.h>			/* XXX for struct ip */
104 #include <netinet/in_systm.h>		/* XXX for struct ip */
105 #include <netinet/ip.h>			/* XXX for struct ip */
106 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
107 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
108 
109 #include <sys/bus.h>
110 #include <sys/intr.h>
111 #include <machine/endian.h>
112 
113 #include <dev/mii/mii.h>
114 #include <dev/mii/miivar.h>
115 #include <dev/mii/miidevs.h>
116 #include <dev/mii/mii_bitbang.h>
117 #include <dev/mii/ikphyreg.h>
118 #include <dev/mii/igphyreg.h>
119 #include <dev/mii/igphyvar.h>
120 #include <dev/mii/inbmphyreg.h>
121 
122 #include <dev/pci/pcireg.h>
123 #include <dev/pci/pcivar.h>
124 #include <dev/pci/pcidevs.h>
125 
126 #include <dev/pci/if_wmreg.h>
127 #include <dev/pci/if_wmvar.h>
128 
129 #ifdef WM_DEBUG
130 #define	WM_DEBUG_LINK		0x01
131 #define	WM_DEBUG_TX		0x02
132 #define	WM_DEBUG_RX		0x04
133 #define	WM_DEBUG_GMII		0x08
134 #define	WM_DEBUG_MANAGE		0x10
135 #define	WM_DEBUG_NVM		0x20
136 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
137     | WM_DEBUG_MANAGE | WM_DEBUG_NVM;
138 
139 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
140 #else
141 #define	DPRINTF(x, y)	/* nothing */
142 #endif /* WM_DEBUG */
143 
144 /*
145  * Transmit descriptor list size.  Due to errata, we can only have
146  * 256 hardware descriptors in the ring on < 82544, but we use 4096
147  * on >= 82544.  We tell the upper layers that they can queue a lot
148  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
149  * of them at a time.
150  *
151  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
152  * chains containing many small mbufs have been observed in zero-copy
153  * situations with jumbo frames.
154  */
155 #define	WM_NTXSEGS		256
156 #define	WM_IFQUEUELEN		256
157 #define	WM_TXQUEUELEN_MAX	64
158 #define	WM_TXQUEUELEN_MAX_82547	16
159 #define	WM_TXQUEUELEN(sc)	((sc)->sc_txnum)
160 #define	WM_TXQUEUELEN_MASK(sc)	(WM_TXQUEUELEN(sc) - 1)
161 #define	WM_TXQUEUE_GC(sc)	(WM_TXQUEUELEN(sc) / 8)
162 #define	WM_NTXDESC_82542	256
163 #define	WM_NTXDESC_82544	4096
164 #define	WM_NTXDESC(sc)		((sc)->sc_ntxdesc)
165 #define	WM_NTXDESC_MASK(sc)	(WM_NTXDESC(sc) - 1)
166 #define	WM_TXDESCSIZE(sc)	(WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
167 #define	WM_NEXTTX(sc, x)	(((x) + 1) & WM_NTXDESC_MASK(sc))
168 #define	WM_NEXTTXS(sc, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(sc))
169 
170 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
171 
172 /*
173  * Receive descriptor list size.  We have one Rx buffer for normal
174  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
175  * packet.  We allocate 256 receive descriptors, each with a 2k
176  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
177  */
178 #define	WM_NRXDESC		256
179 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
180 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
181 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
182 
183 /*
184  * Control structures are DMA'd to the i82542 chip.  We allocate them in
185  * a single clump that maps to a single DMA segment to make several things
186  * easier.
187  */
188 struct wm_control_data_82544 {
189 	/*
190 	 * The receive descriptors.
191 	 */
192 	wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
193 
194 	/*
195 	 * The transmit descriptors.  Put these at the end, because
196 	 * we might use a smaller number of them.
197 	 */
198 	union {
199 		wiseman_txdesc_t wcdu_txdescs[WM_NTXDESC_82544];
200 		nq_txdesc_t      wcdu_nq_txdescs[WM_NTXDESC_82544];
201 	} wdc_u;
202 };
203 
204 struct wm_control_data_82542 {
205 	wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
206 	wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
207 };
208 
209 #define	WM_CDOFF(x)	offsetof(struct wm_control_data_82544, x)
210 #define	WM_CDTXOFF(x)	WM_CDOFF(wdc_u.wcdu_txdescs[(x)])
211 #define	WM_CDRXOFF(x)	WM_CDOFF(wcd_rxdescs[(x)])
212 
213 /*
214  * Software state for transmit jobs.
215  */
216 struct wm_txsoft {
217 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
218 	bus_dmamap_t txs_dmamap;	/* our DMA map */
219 	int txs_firstdesc;		/* first descriptor in packet */
220 	int txs_lastdesc;		/* last descriptor in packet */
221 	int txs_ndesc;			/* # of descriptors used */
222 };
223 
224 /*
225  * Software state for receive buffers.  Each descriptor gets a
226  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
227  * more than one buffer, we chain them together.
228  */
229 struct wm_rxsoft {
230 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
231 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
232 };
233 
234 #define WM_LINKUP_TIMEOUT	50
235 
236 static uint16_t swfwphysem[] = {
237 	SWFW_PHY0_SM,
238 	SWFW_PHY1_SM,
239 	SWFW_PHY2_SM,
240 	SWFW_PHY3_SM
241 };
242 
243 /*
244  * Software state per device.
245  */
246 struct wm_softc {
247 	device_t sc_dev;		/* generic device information */
248 	bus_space_tag_t sc_st;		/* bus space tag */
249 	bus_space_handle_t sc_sh;	/* bus space handle */
250 	bus_size_t sc_ss;		/* bus space size */
251 	bus_space_tag_t sc_iot;		/* I/O space tag */
252 	bus_space_handle_t sc_ioh;	/* I/O space handle */
253 	bus_size_t sc_ios;		/* I/O space size */
254 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
255 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
256 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
257 
258 	struct ethercom sc_ethercom;	/* ethernet common data */
259 	struct mii_data sc_mii;		/* MII/media information */
260 
261 	pci_chipset_tag_t sc_pc;
262 	pcitag_t sc_pcitag;
263 	int sc_bus_speed;		/* PCI/PCIX bus speed */
264 	int sc_pcixe_capoff;		/* PCI[Xe] capability register offset */
265 
266 	const struct wm_product *sc_wmp; /* Pointer to the wm_product entry */
267 	wm_chip_type sc_type;		/* MAC type */
268 	int sc_rev;			/* MAC revision */
269 	wm_phy_type sc_phytype;		/* PHY type */
270 	int sc_funcid;			/* unit number of the chip (0 to 3) */
271 	int sc_flags;			/* flags; see below */
272 	int sc_if_flags;		/* last if_flags */
273 	int sc_flowflags;		/* 802.3x flow control flags */
274 	int sc_align_tweak;
275 
276 	void *sc_ih;			/* interrupt cookie */
277 	callout_t sc_tick_ch;		/* tick callout */
278 
279 	int sc_ee_addrbits;		/* EEPROM address bits */
280 	int sc_ich8_flash_base;
281 	int sc_ich8_flash_bank_size;
282 	int sc_nvm_k1_enabled;
283 
284 	/*
285 	 * Software state for the transmit and receive descriptors.
286 	 */
287 	int sc_txnum;			/* must be a power of two */
288 	struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX];
289 	struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
290 
291 	/*
292 	 * Control data structures.
293 	 */
294 	int sc_ntxdesc;			/* must be a power of two */
295 	struct wm_control_data_82544 *sc_control_data;
296 	bus_dmamap_t sc_cddmamap;	/* control data DMA map */
297 	bus_dma_segment_t sc_cd_seg;	/* control data segment */
298 	int sc_cd_rseg;			/* real number of control segment */
299 	size_t sc_cd_size;		/* control data size */
300 #define	sc_cddma	sc_cddmamap->dm_segs[0].ds_addr
301 #define	sc_txdescs	sc_control_data->wdc_u.wcdu_txdescs
302 #define	sc_nq_txdescs	sc_control_data->wdc_u.wcdu_nq_txdescs
303 #define	sc_rxdescs	sc_control_data->wcd_rxdescs
304 
305 #ifdef WM_EVENT_COUNTERS
306 	/* Event counters. */
307 	struct evcnt sc_ev_txsstall;	/* Tx stalled due to no txs */
308 	struct evcnt sc_ev_txdstall;	/* Tx stalled due to no txd */
309 	struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
310 	struct evcnt sc_ev_txdw;	/* Tx descriptor interrupts */
311 	struct evcnt sc_ev_txqe;	/* Tx queue empty interrupts */
312 	struct evcnt sc_ev_rxintr;	/* Rx interrupts */
313 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
314 
315 	struct evcnt sc_ev_rxipsum;	/* IP checksums checked in-bound */
316 	struct evcnt sc_ev_rxtusum;	/* TCP/UDP cksums checked in-bound */
317 	struct evcnt sc_ev_txipsum;	/* IP checksums comp. out-bound */
318 	struct evcnt sc_ev_txtusum;	/* TCP/UDP cksums comp. out-bound */
319 	struct evcnt sc_ev_txtusum6;	/* TCP/UDP v6 cksums comp. out-bound */
320 	struct evcnt sc_ev_txtso;	/* TCP seg offload out-bound (IPv4) */
321 	struct evcnt sc_ev_txtso6;	/* TCP seg offload out-bound (IPv6) */
322 	struct evcnt sc_ev_txtsopain;	/* painful header manip. for TSO */
323 
324 	struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
325 	struct evcnt sc_ev_txdrop;	/* Tx packets dropped (too many segs) */
326 
327 	struct evcnt sc_ev_tu;		/* Tx underrun */
328 
329 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
330 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
331 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
332 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
333 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
334 #endif /* WM_EVENT_COUNTERS */
335 
336 	bus_addr_t sc_tdt_reg;		/* offset of TDT register */
337 
338 	int	sc_txfree;		/* number of free Tx descriptors */
339 	int	sc_txnext;		/* next ready Tx descriptor */
340 
341 	int	sc_txsfree;		/* number of free Tx jobs */
342 	int	sc_txsnext;		/* next free Tx job */
343 	int	sc_txsdirty;		/* dirty Tx jobs */
344 
345 	/* These 5 variables are used only on the 82547. */
346 	int	sc_txfifo_size;		/* Tx FIFO size */
347 	int	sc_txfifo_head;		/* current head of FIFO */
348 	uint32_t sc_txfifo_addr;	/* internal address of start of FIFO */
349 	int	sc_txfifo_stall;	/* Tx FIFO is stalled */
350 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
351 
352 	bus_addr_t sc_rdt_reg;		/* offset of RDT register */
353 
354 	int	sc_rxptr;		/* next ready Rx descriptor/queue ent */
355 	int	sc_rxdiscard;
356 	int	sc_rxlen;
357 	struct mbuf *sc_rxhead;
358 	struct mbuf *sc_rxtail;
359 	struct mbuf **sc_rxtailp;
360 
361 	uint32_t sc_ctrl;		/* prototype CTRL register */
362 #if 0
363 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
364 #endif
365 	uint32_t sc_icr;		/* prototype interrupt bits */
366 	uint32_t sc_itr;		/* prototype intr throttling reg */
367 	uint32_t sc_tctl;		/* prototype TCTL register */
368 	uint32_t sc_rctl;		/* prototype RCTL register */
369 	uint32_t sc_txcw;		/* prototype TXCW register */
370 	uint32_t sc_tipg;		/* prototype TIPG register */
371 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
372 	uint32_t sc_pba;		/* prototype PBA register */
373 
374 	int sc_tbi_linkup;		/* TBI link status */
375 	int sc_tbi_anegticks;		/* autonegotiation ticks */
376 	int sc_tbi_ticks;		/* tbi ticks */
377 	int sc_tbi_nrxcfg;		/* count of ICR_RXCFG */
378 	int sc_tbi_lastnrxcfg;		/* count of ICR_RXCFG (on last tick) */
379 
380 	int sc_mchash_type;		/* multicast filter offset */
381 
382 	krndsource_t rnd_source;	/* random source */
383 };
384 
385 #define	WM_RXCHAIN_RESET(sc)						\
386 do {									\
387 	(sc)->sc_rxtailp = &(sc)->sc_rxhead;				\
388 	*(sc)->sc_rxtailp = NULL;					\
389 	(sc)->sc_rxlen = 0;						\
390 } while (/*CONSTCOND*/0)
391 
392 #define	WM_RXCHAIN_LINK(sc, m)						\
393 do {									\
394 	*(sc)->sc_rxtailp = (sc)->sc_rxtail = (m);			\
395 	(sc)->sc_rxtailp = &(m)->m_next;				\
396 } while (/*CONSTCOND*/0)
397 
398 #ifdef WM_EVENT_COUNTERS
399 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
400 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
401 #else
402 #define	WM_EVCNT_INCR(ev)	/* nothing */
403 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
404 #endif
405 
406 #define	CSR_READ(sc, reg)						\
407 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
408 #define	CSR_WRITE(sc, reg, val)						\
409 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
410 #define	CSR_WRITE_FLUSH(sc)						\
411 	(void) CSR_READ((sc), WMREG_STATUS)
412 
413 #define ICH8_FLASH_READ32(sc, reg) \
414 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
415 #define ICH8_FLASH_WRITE32(sc, reg, data) \
416 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
417 
418 #define ICH8_FLASH_READ16(sc, reg) \
419 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
420 #define ICH8_FLASH_WRITE16(sc, reg, data) \
421 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
422 
423 #define	WM_CDTXADDR(sc, x)	((sc)->sc_cddma + WM_CDTXOFF((x)))
424 #define	WM_CDRXADDR(sc, x)	((sc)->sc_cddma + WM_CDRXOFF((x)))
425 
426 #define	WM_CDTXADDR_LO(sc, x)	(WM_CDTXADDR((sc), (x)) & 0xffffffffU)
427 #define	WM_CDTXADDR_HI(sc, x)						\
428 	(sizeof(bus_addr_t) == 8 ?					\
429 	 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
430 
431 #define	WM_CDRXADDR_LO(sc, x)	(WM_CDRXADDR((sc), (x)) & 0xffffffffU)
432 #define	WM_CDRXADDR_HI(sc, x)						\
433 	(sizeof(bus_addr_t) == 8 ?					\
434 	 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
435 
436 #define	WM_CDTXSYNC(sc, x, n, ops)					\
437 do {									\
438 	int __x, __n;							\
439 									\
440 	__x = (x);							\
441 	__n = (n);							\
442 									\
443 	/* If it will wrap around, sync to the end of the ring. */	\
444 	if ((__x + __n) > WM_NTXDESC(sc)) {				\
445 		bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,	\
446 		    WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) *		\
447 		    (WM_NTXDESC(sc) - __x), (ops));			\
448 		__n -= (WM_NTXDESC(sc) - __x);				\
449 		__x = 0;						\
450 	}								\
451 									\
452 	/* Now sync whatever is left. */				\
453 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
454 	    WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops));	\
455 } while (/*CONSTCOND*/0)
456 
457 #define	WM_CDRXSYNC(sc, x, ops)						\
458 do {									\
459 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
460 	   WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops));		\
461 } while (/*CONSTCOND*/0)
462 
463 #define	WM_INIT_RXDESC(sc, x)						\
464 do {									\
465 	struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)];		\
466 	wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)];		\
467 	struct mbuf *__m = __rxs->rxs_mbuf;				\
468 									\
469 	/*								\
470 	 * Note: We scoot the packet forward 2 bytes in the buffer	\
471 	 * so that the payload after the Ethernet header is aligned	\
472 	 * to a 4-byte boundary.					\
473 	 *								\
474 	 * XXX BRAINDAMAGE ALERT!					\
475 	 * The stupid chip uses the same size for every buffer, which	\
476 	 * is set in the Receive Control register.  We are using the 2K	\
477 	 * size option, but what we REALLY want is (2K - 2)!  For this	\
478 	 * reason, we can't "scoot" packets longer than the standard	\
479 	 * Ethernet MTU.  On strict-alignment platforms, if the total	\
480 	 * size exceeds (2K - 2) we set align_tweak to 0 and let	\
481 	 * the upper layer copy the headers.				\
482 	 */								\
483 	__m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak;	\
484 									\
485 	wm_set_dma_addr(&__rxd->wrx_addr,				\
486 	    __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
487 	__rxd->wrx_len = 0;						\
488 	__rxd->wrx_cksum = 0;						\
489 	__rxd->wrx_status = 0;						\
490 	__rxd->wrx_errors = 0;						\
491 	__rxd->wrx_special = 0;						\
492 	WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
493 									\
494 	CSR_WRITE((sc), (sc)->sc_rdt_reg, (x));				\
495 } while (/*CONSTCOND*/0)
496 
497 static void	wm_start(struct ifnet *);
498 static void	wm_nq_start(struct ifnet *);
499 static void	wm_watchdog(struct ifnet *);
500 static int	wm_ifflags_cb(struct ethercom *);
501 static int	wm_ioctl(struct ifnet *, u_long, void *);
502 static int	wm_init(struct ifnet *);
503 static void	wm_stop(struct ifnet *, int);
504 static bool	wm_suspend(device_t, const pmf_qual_t *);
505 static bool	wm_resume(device_t, const pmf_qual_t *);
506 
507 static void	wm_reset(struct wm_softc *);
508 static void	wm_rxdrain(struct wm_softc *);
509 static int	wm_add_rxbuf(struct wm_softc *, int);
510 static int	wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
511 static int	wm_read_eeprom_eerd(struct wm_softc *, int, int, u_int16_t *);
512 static int	wm_validate_eeprom_checksum(struct wm_softc *);
513 static int	wm_check_alt_mac_addr(struct wm_softc *);
514 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
515 static void	wm_tick(void *);
516 
517 static void	wm_set_filter(struct wm_softc *);
518 static void	wm_set_vlan(struct wm_softc *);
519 
520 static int	wm_intr(void *);
521 static void	wm_txintr(struct wm_softc *);
522 static void	wm_rxintr(struct wm_softc *);
523 static void	wm_linkintr(struct wm_softc *, uint32_t);
524 
525 static void	wm_tbi_mediainit(struct wm_softc *);
526 static int	wm_tbi_mediachange(struct ifnet *);
527 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
528 
529 static void	wm_tbi_set_linkled(struct wm_softc *);
530 static void	wm_tbi_check_link(struct wm_softc *);
531 
532 static void	wm_gmii_reset(struct wm_softc *);
533 
534 static int	wm_gmii_i82543_readreg(device_t, int, int);
535 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
536 static int	wm_gmii_i82544_readreg(device_t, int, int);
537 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
538 static int	wm_gmii_i80003_readreg(device_t, int, int);
539 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
540 static int	wm_gmii_bm_readreg(device_t, int, int);
541 static void	wm_gmii_bm_writereg(device_t, int, int, int);
542 static int	wm_gmii_hv_readreg(device_t, int, int);
543 static void	wm_gmii_hv_writereg(device_t, int, int, int);
544 static int	wm_gmii_82580_readreg(device_t, int, int);
545 static void	wm_gmii_82580_writereg(device_t, int, int, int);
546 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
547 static int	wm_sgmii_readreg(device_t, int, int);
548 static void	wm_sgmii_writereg(device_t, int, int, int);
549 
550 static void	wm_gmii_statchg(struct ifnet *);
551 
552 static int	wm_get_phy_id_82575(struct wm_softc *);
553 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
554 static int	wm_gmii_mediachange(struct ifnet *);
555 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
556 
557 static int	wm_kmrn_readreg(struct wm_softc *, int);
558 static void	wm_kmrn_writereg(struct wm_softc *, int, int);
559 
560 static void	wm_set_spiaddrbits(struct wm_softc *);
561 static int	wm_match(device_t, cfdata_t, void *);
562 static void	wm_attach(device_t, device_t, void *);
563 static int	wm_detach(device_t, int);
564 static int	wm_is_onboard_nvm_eeprom(struct wm_softc *);
565 static void	wm_get_auto_rd_done(struct wm_softc *);
566 static void	wm_lan_init_done(struct wm_softc *);
567 static void	wm_get_cfg_done(struct wm_softc *);
568 static int	wm_get_swsm_semaphore(struct wm_softc *);
569 static void	wm_put_swsm_semaphore(struct wm_softc *);
570 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
571 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
572 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
573 static int	wm_get_swfwhw_semaphore(struct wm_softc *);
574 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
575 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
576 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
577 
578 static int	wm_read_eeprom_ich8(struct wm_softc *, int, int, uint16_t *);
579 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
580 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
581 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t,
582 		     uint32_t, uint16_t *);
583 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
584 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
585 static void	wm_82547_txfifo_stall(void *);
586 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, int);
587 static int	wm_check_mng_mode(struct wm_softc *);
588 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
589 static int	wm_check_mng_mode_82574(struct wm_softc *);
590 static int	wm_check_mng_mode_generic(struct wm_softc *);
591 static int	wm_enable_mng_pass_thru(struct wm_softc *);
592 static int	wm_check_reset_block(struct wm_softc *);
593 static void	wm_get_hw_control(struct wm_softc *);
594 static int	wm_check_for_link(struct wm_softc *);
595 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
596 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
597 #ifdef WM_WOL
598 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
599 #endif
600 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
601 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
602 static void	wm_k1_gig_workaround_hv(struct wm_softc *, int);
603 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
604 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
605 static void	wm_smbustopci(struct wm_softc *);
606 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
607 static void	wm_reset_init_script_82575(struct wm_softc *);
608 static void	wm_release_manageability(struct wm_softc *);
609 static void	wm_release_hw_control(struct wm_softc *);
610 static void	wm_get_wakeup(struct wm_softc *);
611 #ifdef WM_WOL
612 static void	wm_enable_phy_wakeup(struct wm_softc *);
613 static void	wm_enable_wakeup(struct wm_softc *);
614 #endif
615 static void	wm_init_manageability(struct wm_softc *);
616 static void	wm_set_eee_i350(struct wm_softc *);
617 
618 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
619     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
620 
621 /*
622  * Devices supported by this driver.
623  */
624 static const struct wm_product {
625 	pci_vendor_id_t		wmp_vendor;
626 	pci_product_id_t	wmp_product;
627 	const char		*wmp_name;
628 	wm_chip_type		wmp_type;
629 	int			wmp_flags;
630 #define	WMP_F_1000X		0x01
631 #define	WMP_F_1000T		0x02
632 #define	WMP_F_SERDES		0x04
633 } wm_products[] = {
634 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
635 	  "Intel i82542 1000BASE-X Ethernet",
636 	  WM_T_82542_2_1,	WMP_F_1000X },
637 
638 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
639 	  "Intel i82543GC 1000BASE-X Ethernet",
640 	  WM_T_82543,		WMP_F_1000X },
641 
642 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
643 	  "Intel i82543GC 1000BASE-T Ethernet",
644 	  WM_T_82543,		WMP_F_1000T },
645 
646 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
647 	  "Intel i82544EI 1000BASE-T Ethernet",
648 	  WM_T_82544,		WMP_F_1000T },
649 
650 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
651 	  "Intel i82544EI 1000BASE-X Ethernet",
652 	  WM_T_82544,		WMP_F_1000X },
653 
654 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
655 	  "Intel i82544GC 1000BASE-T Ethernet",
656 	  WM_T_82544,		WMP_F_1000T },
657 
658 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
659 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
660 	  WM_T_82544,		WMP_F_1000T },
661 
662 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
663 	  "Intel i82540EM 1000BASE-T Ethernet",
664 	  WM_T_82540,		WMP_F_1000T },
665 
666 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
667 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
668 	  WM_T_82540,		WMP_F_1000T },
669 
670 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
671 	  "Intel i82540EP 1000BASE-T Ethernet",
672 	  WM_T_82540,		WMP_F_1000T },
673 
674 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
675 	  "Intel i82540EP 1000BASE-T Ethernet",
676 	  WM_T_82540,		WMP_F_1000T },
677 
678 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
679 	  "Intel i82540EP 1000BASE-T Ethernet",
680 	  WM_T_82540,		WMP_F_1000T },
681 
682 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
683 	  "Intel i82545EM 1000BASE-T Ethernet",
684 	  WM_T_82545,		WMP_F_1000T },
685 
686 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
687 	  "Intel i82545GM 1000BASE-T Ethernet",
688 	  WM_T_82545_3,		WMP_F_1000T },
689 
690 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
691 	  "Intel i82545GM 1000BASE-X Ethernet",
692 	  WM_T_82545_3,		WMP_F_1000X },
693 #if 0
694 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
695 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
696 	  WM_T_82545_3,		WMP_F_SERDES },
697 #endif
698 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
699 	  "Intel i82546EB 1000BASE-T Ethernet",
700 	  WM_T_82546,		WMP_F_1000T },
701 
702 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
703 	  "Intel i82546EB 1000BASE-T Ethernet",
704 	  WM_T_82546,		WMP_F_1000T },
705 
706 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
707 	  "Intel i82545EM 1000BASE-X Ethernet",
708 	  WM_T_82545,		WMP_F_1000X },
709 
710 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
711 	  "Intel i82546EB 1000BASE-X Ethernet",
712 	  WM_T_82546,		WMP_F_1000X },
713 
714 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
715 	  "Intel i82546GB 1000BASE-T Ethernet",
716 	  WM_T_82546_3,		WMP_F_1000T },
717 
718 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
719 	  "Intel i82546GB 1000BASE-X Ethernet",
720 	  WM_T_82546_3,		WMP_F_1000X },
721 #if 0
722 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
723 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
724 	  WM_T_82546_3,		WMP_F_SERDES },
725 #endif
726 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
727 	  "i82546GB quad-port Gigabit Ethernet",
728 	  WM_T_82546_3,		WMP_F_1000T },
729 
730 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
731 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
732 	  WM_T_82546_3,		WMP_F_1000T },
733 
734 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
735 	  "Intel PRO/1000MT (82546GB)",
736 	  WM_T_82546_3,		WMP_F_1000T },
737 
738 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
739 	  "Intel i82541EI 1000BASE-T Ethernet",
740 	  WM_T_82541,		WMP_F_1000T },
741 
742 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
743 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
744 	  WM_T_82541,		WMP_F_1000T },
745 
746 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
747 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
748 	  WM_T_82541,		WMP_F_1000T },
749 
750 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
751 	  "Intel i82541ER 1000BASE-T Ethernet",
752 	  WM_T_82541_2,		WMP_F_1000T },
753 
754 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
755 	  "Intel i82541GI 1000BASE-T Ethernet",
756 	  WM_T_82541_2,		WMP_F_1000T },
757 
758 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
759 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
760 	  WM_T_82541_2,		WMP_F_1000T },
761 
762 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
763 	  "Intel i82541PI 1000BASE-T Ethernet",
764 	  WM_T_82541_2,		WMP_F_1000T },
765 
766 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
767 	  "Intel i82547EI 1000BASE-T Ethernet",
768 	  WM_T_82547,		WMP_F_1000T },
769 
770 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
771 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
772 	  WM_T_82547,		WMP_F_1000T },
773 
774 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
775 	  "Intel i82547GI 1000BASE-T Ethernet",
776 	  WM_T_82547_2,		WMP_F_1000T },
777 
778 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
779 	  "Intel PRO/1000 PT (82571EB)",
780 	  WM_T_82571,		WMP_F_1000T },
781 
782 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
783 	  "Intel PRO/1000 PF (82571EB)",
784 	  WM_T_82571,		WMP_F_1000X },
785 #if 0
786 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
787 	  "Intel PRO/1000 PB (82571EB)",
788 	  WM_T_82571,		WMP_F_SERDES },
789 #endif
790 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
791 	  "Intel PRO/1000 QT (82571EB)",
792 	  WM_T_82571,		WMP_F_1000T },
793 
794 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
795 	  "Intel i82572EI 1000baseT Ethernet",
796 	  WM_T_82572,		WMP_F_1000T },
797 
798 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
799 	  "Intel PRO/1000 PT Quad Port Server Adapter",
800 	  WM_T_82571,		WMP_F_1000T, },
801 
802 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
803 	  "Intel i82572EI 1000baseX Ethernet",
804 	  WM_T_82572,		WMP_F_1000X },
805 #if 0
806 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
807 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
808 	  WM_T_82572,		WMP_F_SERDES },
809 #endif
810 
811 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
812 	  "Intel i82572EI 1000baseT Ethernet",
813 	  WM_T_82572,		WMP_F_1000T },
814 
815 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
816 	  "Intel i82573E",
817 	  WM_T_82573,		WMP_F_1000T },
818 
819 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
820 	  "Intel i82573E IAMT",
821 	  WM_T_82573,		WMP_F_1000T },
822 
823 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
824 	  "Intel i82573L Gigabit Ethernet",
825 	  WM_T_82573,		WMP_F_1000T },
826 
827 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
828 	  "Intel i82574L",
829 	  WM_T_82574,		WMP_F_1000T },
830 
831 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
832 	  "Intel i82583V",
833 	  WM_T_82583,		WMP_F_1000T },
834 
835 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
836 	  "i80003 dual 1000baseT Ethernet",
837 	  WM_T_80003,		WMP_F_1000T },
838 
839 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
840 	  "i80003 dual 1000baseX Ethernet",
841 	  WM_T_80003,		WMP_F_1000T },
842 #if 0
843 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
844 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
845 	  WM_T_80003,		WMP_F_SERDES },
846 #endif
847 
848 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
849 	  "Intel i80003 1000baseT Ethernet",
850 	  WM_T_80003,		WMP_F_1000T },
851 #if 0
852 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
853 	  "Intel i80003 Gigabit Ethernet (SERDES)",
854 	  WM_T_80003,		WMP_F_SERDES },
855 #endif
856 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
857 	  "Intel i82801H (M_AMT) LAN Controller",
858 	  WM_T_ICH8,		WMP_F_1000T },
859 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
860 	  "Intel i82801H (AMT) LAN Controller",
861 	  WM_T_ICH8,		WMP_F_1000T },
862 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
863 	  "Intel i82801H LAN Controller",
864 	  WM_T_ICH8,		WMP_F_1000T },
865 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
866 	  "Intel i82801H (IFE) LAN Controller",
867 	  WM_T_ICH8,		WMP_F_1000T },
868 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
869 	  "Intel i82801H (M) LAN Controller",
870 	  WM_T_ICH8,		WMP_F_1000T },
871 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
872 	  "Intel i82801H IFE (GT) LAN Controller",
873 	  WM_T_ICH8,		WMP_F_1000T },
874 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
875 	  "Intel i82801H IFE (G) LAN Controller",
876 	  WM_T_ICH8,		WMP_F_1000T },
877 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
878 	  "82801I (AMT) LAN Controller",
879 	  WM_T_ICH9,		WMP_F_1000T },
880 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
881 	  "82801I LAN Controller",
882 	  WM_T_ICH9,		WMP_F_1000T },
883 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
884 	  "82801I (G) LAN Controller",
885 	  WM_T_ICH9,		WMP_F_1000T },
886 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
887 	  "82801I (GT) LAN Controller",
888 	  WM_T_ICH9,		WMP_F_1000T },
889 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
890 	  "82801I (C) LAN Controller",
891 	  WM_T_ICH9,		WMP_F_1000T },
892 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
893 	  "82801I mobile LAN Controller",
894 	  WM_T_ICH9,		WMP_F_1000T },
895 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IGP_M_V,
896 	  "82801I mobile (V) LAN Controller",
897 	  WM_T_ICH9,		WMP_F_1000T },
898 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
899 	  "82801I mobile (AMT) LAN Controller",
900 	  WM_T_ICH9,		WMP_F_1000T },
901 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
902 	  "82567LM-4 LAN Controller",
903 	  WM_T_ICH9,		WMP_F_1000T },
904 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_82567V_3,
905 	  "82567V-3 LAN Controller",
906 	  WM_T_ICH9,		WMP_F_1000T },
907 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
908 	  "82567LM-2 LAN Controller",
909 	  WM_T_ICH10,		WMP_F_1000T },
910 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
911 	  "82567LF-2 LAN Controller",
912 	  WM_T_ICH10,		WMP_F_1000T },
913 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
914 	  "82567LM-3 LAN Controller",
915 	  WM_T_ICH10,		WMP_F_1000T },
916 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
917 	  "82567LF-3 LAN Controller",
918 	  WM_T_ICH10,		WMP_F_1000T },
919 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
920 	  "82567V-2 LAN Controller",
921 	  WM_T_ICH10,		WMP_F_1000T },
922 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
923 	  "82567V-3? LAN Controller",
924 	  WM_T_ICH10,		WMP_F_1000T },
925 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
926 	  "HANKSVILLE LAN Controller",
927 	  WM_T_ICH10,		WMP_F_1000T },
928 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
929 	  "PCH LAN (82577LM) Controller",
930 	  WM_T_PCH,		WMP_F_1000T },
931 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
932 	  "PCH LAN (82577LC) Controller",
933 	  WM_T_PCH,		WMP_F_1000T },
934 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
935 	  "PCH LAN (82578DM) Controller",
936 	  WM_T_PCH,		WMP_F_1000T },
937 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
938 	  "PCH LAN (82578DC) Controller",
939 	  WM_T_PCH,		WMP_F_1000T },
940 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
941 	  "PCH2 LAN (82579LM) Controller",
942 	  WM_T_PCH2,		WMP_F_1000T },
943 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
944 	  "PCH2 LAN (82579V) Controller",
945 	  WM_T_PCH2,		WMP_F_1000T },
946 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
947 	  "82575EB dual-1000baseT Ethernet",
948 	  WM_T_82575,		WMP_F_1000T },
949 #if 0
950 	/*
951 	 * not sure if WMP_F_1000X or WMP_F_SERDES - we do not have it - so
952 	 * disabled for now ...
953 	 */
954 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
955 	  "82575EB dual-1000baseX Ethernet (SERDES)",
956 	  WM_T_82575,		WMP_F_SERDES },
957 #endif
958 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
959 	  "82575GB quad-1000baseT Ethernet",
960 	  WM_T_82575,		WMP_F_1000T },
961 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
962 	  "82575GB quad-1000baseT Ethernet (PM)",
963 	  WM_T_82575,		WMP_F_1000T },
964 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
965 	  "82576 1000BaseT Ethernet",
966 	  WM_T_82576,		WMP_F_1000T },
967 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
968 	  "82576 1000BaseX Ethernet",
969 	  WM_T_82576,		WMP_F_1000X },
970 #if 0
971 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
972 	  "82576 gigabit Ethernet (SERDES)",
973 	  WM_T_82576,		WMP_F_SERDES },
974 #endif
975 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
976 	  "82576 quad-1000BaseT Ethernet",
977 	  WM_T_82576,		WMP_F_1000T },
978 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
979 	  "82576 gigabit Ethernet",
980 	  WM_T_82576,		WMP_F_1000T },
981 #if 0
982 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
983 	  "82576 gigabit Ethernet (SERDES)",
984 	  WM_T_82576,		WMP_F_SERDES },
985 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
986 	  "82576 quad-gigabit Ethernet (SERDES)",
987 	  WM_T_82576,		WMP_F_SERDES },
988 #endif
989 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
990 	  "82580 1000BaseT Ethernet",
991 	  WM_T_82580,		WMP_F_1000T },
992 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
993 	  "82580 1000BaseX Ethernet",
994 	  WM_T_82580,		WMP_F_1000X },
995 #if 0
996 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
997 	  "82580 1000BaseT Ethernet (SERDES)",
998 	  WM_T_82580,		WMP_F_SERDES },
999 #endif
1000 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
1001 	  "82580 gigabit Ethernet (SGMII)",
1002 	  WM_T_82580,		WMP_F_1000T },
1003 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
1004 	  "82580 dual-1000BaseT Ethernet",
1005 	  WM_T_82580,		WMP_F_1000T },
1006 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_ER,
1007 	  "82580 1000BaseT Ethernet",
1008 	  WM_T_82580ER,		WMP_F_1000T },
1009 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_ER_DUAL,
1010 	  "82580 dual-1000BaseT Ethernet",
1011 	  WM_T_82580ER,		WMP_F_1000T },
1012 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1013 	  "82580 quad-1000BaseX Ethernet",
1014 	  WM_T_82580,		WMP_F_1000X },
1015 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
1016 	  "I350 Gigabit Network Connection",
1017 	  WM_T_I350,		WMP_F_1000T },
1018 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
1019 	  "I350 Gigabit Fiber Network Connection",
1020 	  WM_T_I350,		WMP_F_1000X },
1021 #if 0
1022 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
1023 	  "I350 Gigabit Backplane Connection",
1024 	  WM_T_I350,		WMP_F_SERDES },
1025 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
1026 	  "I350 Gigabit Connection",
1027 	  WM_T_I350,		WMP_F_1000T },
1028 #endif
1029 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
1030 	  "I354 Gigabit Connection",
1031 	  WM_T_I354,		WMP_F_1000T },
1032 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
1033 	  "I210-T1 Ethernet Server Adapter",
1034 	  WM_T_I210,		WMP_F_1000T },
1035 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
1036 	  "I210 Ethernet (Copper OEM)",
1037 	  WM_T_I210,		WMP_F_1000T },
1038 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
1039 	  "I210 Ethernet (Copper IT)",
1040 	  WM_T_I210,		WMP_F_1000T },
1041 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
1042 	  "I210 Gigabit Ethernet (Fiber)",
1043 	  WM_T_I210,		WMP_F_1000X },
1044 #if 0
1045 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
1046 	  "I210 Gigabit Ethernet (SERDES)",
1047 	  WM_T_I210,		WMP_F_SERDES },
1048 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
1049 	  "I210 Gigabit Ethernet (SGMII)",
1050 	  WM_T_I210,		WMP_F_SERDES },
1051 #endif
1052 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
1053 	  "I211 Ethernet (COPPER)",
1054 	  WM_T_I211,		WMP_F_1000T },
1055 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
1056 	  "I217 V Ethernet Connection",
1057 	  WM_T_PCH_LPT,		WMP_F_1000T },
1058 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
1059 	  "I217 LM Ethernet Connection",
1060 	  WM_T_PCH_LPT,		WMP_F_1000T },
1061 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
1062 	  "I218 V Ethernet Connection",
1063 	  WM_T_PCH_LPT,		WMP_F_1000T },
1064 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
1065 	  "I218 LM Ethernet Connection",
1066 	  WM_T_PCH_LPT,		WMP_F_1000T },
1067 	{ 0,			0,
1068 	  NULL,
1069 	  0,			0 },
1070 };
1071 
1072 #ifdef WM_EVENT_COUNTERS
1073 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
1074 #endif /* WM_EVENT_COUNTERS */
1075 
1076 #if 0 /* Not currently used */
1077 static inline uint32_t
1078 wm_io_read(struct wm_softc *sc, int reg)
1079 {
1080 
1081 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1082 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1083 }
1084 #endif
1085 
1086 static inline void
1087 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1088 {
1089 
1090 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1091 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1092 }
1093 
1094 static inline void
1095 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1096     uint32_t data)
1097 {
1098 	uint32_t regval;
1099 	int i;
1100 
1101 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1102 
1103 	CSR_WRITE(sc, reg, regval);
1104 
1105 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1106 		delay(5);
1107 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1108 			break;
1109 	}
1110 	if (i == SCTL_CTL_POLL_TIMEOUT) {
1111 		aprint_error("%s: WARNING: i82575 reg 0x%08x setup did not indicate ready\n",
1112 		    device_xname(sc->sc_dev), reg);
1113 	}
1114 }
1115 
1116 static inline void
1117 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1118 {
1119 	wa->wa_low = htole32(v & 0xffffffffU);
1120 	if (sizeof(bus_addr_t) == 8)
1121 		wa->wa_high = htole32((uint64_t) v >> 32);
1122 	else
1123 		wa->wa_high = 0;
1124 }
1125 
1126 static void
1127 wm_set_spiaddrbits(struct wm_softc *sc)
1128 {
1129 	uint32_t reg;
1130 
1131 	sc->sc_flags |= WM_F_EEPROM_SPI;
1132 	reg = CSR_READ(sc, WMREG_EECD);
1133 	sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
1134 }
1135 
1136 static const struct wm_product *
1137 wm_lookup(const struct pci_attach_args *pa)
1138 {
1139 	const struct wm_product *wmp;
1140 
1141 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1142 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1143 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1144 			return wmp;
1145 	}
1146 	return NULL;
1147 }
1148 
1149 static int
1150 wm_match(device_t parent, cfdata_t cf, void *aux)
1151 {
1152 	struct pci_attach_args *pa = aux;
1153 
1154 	if (wm_lookup(pa) != NULL)
1155 		return 1;
1156 
1157 	return 0;
1158 }
1159 
1160 static void
1161 wm_attach(device_t parent, device_t self, void *aux)
1162 {
1163 	struct wm_softc *sc = device_private(self);
1164 	struct pci_attach_args *pa = aux;
1165 	prop_dictionary_t dict;
1166 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1167 	pci_chipset_tag_t pc = pa->pa_pc;
1168 	pci_intr_handle_t ih;
1169 	const char *intrstr = NULL;
1170 	const char *eetype, *xname;
1171 	bus_space_tag_t memt;
1172 	bus_space_handle_t memh;
1173 	bus_size_t memsize;
1174 	int memh_valid;
1175 	int i, error;
1176 	const struct wm_product *wmp;
1177 	prop_data_t ea;
1178 	prop_number_t pn;
1179 	uint8_t enaddr[ETHER_ADDR_LEN];
1180 	uint16_t cfg1, cfg2, swdpin, io3;
1181 	pcireg_t preg, memtype;
1182 	uint16_t eeprom_data, apme_mask;
1183 	uint32_t reg;
1184 	char intrbuf[PCI_INTRSTR_LEN];
1185 
1186 	sc->sc_dev = self;
1187 	callout_init(&sc->sc_tick_ch, 0);
1188 
1189 	sc->sc_wmp = wmp = wm_lookup(pa);
1190 	if (wmp == NULL) {
1191 		printf("\n");
1192 		panic("wm_attach: impossible");
1193 	}
1194 
1195 	sc->sc_pc = pa->pa_pc;
1196 	sc->sc_pcitag = pa->pa_tag;
1197 
1198 	if (pci_dma64_available(pa))
1199 		sc->sc_dmat = pa->pa_dmat64;
1200 	else
1201 		sc->sc_dmat = pa->pa_dmat;
1202 
1203 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
1204 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
1205 
1206 	sc->sc_type = wmp->wmp_type;
1207 	if (sc->sc_type < WM_T_82543) {
1208 		if (sc->sc_rev < 2) {
1209 			aprint_error_dev(sc->sc_dev,
1210 			    "i82542 must be at least rev. 2\n");
1211 			return;
1212 		}
1213 		if (sc->sc_rev < 3)
1214 			sc->sc_type = WM_T_82542_2_0;
1215 	}
1216 
1217 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1218 	    || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
1219 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
1220 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
1221 		sc->sc_flags |= WM_F_NEWQUEUE;
1222 
1223 	/* Set device properties (mactype) */
1224 	dict = device_properties(sc->sc_dev);
1225 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1226 
1227 	/*
1228 	 * Map the device.  All devices support memory-mapped acccess,
1229 	 * and it is really required for normal operation.
1230 	 */
1231 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1232 	switch (memtype) {
1233 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1234 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1235 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1236 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1237 		break;
1238 	default:
1239 		memh_valid = 0;
1240 		break;
1241 	}
1242 
1243 	if (memh_valid) {
1244 		sc->sc_st = memt;
1245 		sc->sc_sh = memh;
1246 		sc->sc_ss = memsize;
1247 	} else {
1248 		aprint_error_dev(sc->sc_dev,
1249 		    "unable to map device registers\n");
1250 		return;
1251 	}
1252 
1253 	/*
1254 	 * In addition, i82544 and later support I/O mapped indirect
1255 	 * register access.  It is not desirable (nor supported in
1256 	 * this driver) to use it for normal operation, though it is
1257 	 * required to work around bugs in some chip versions.
1258 	 */
1259 	if (sc->sc_type >= WM_T_82544) {
1260 		/* First we have to find the I/O BAR. */
1261 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1262 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
1263 			if (memtype == PCI_MAPREG_TYPE_IO)
1264 				break;
1265 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
1266 			    PCI_MAPREG_MEM_TYPE_64BIT)
1267 				i += 4;	/* skip high bits, too */
1268 		}
1269 		if (i < PCI_MAPREG_END) {
1270 			/*
1271 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
1272 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
1273 			 * It's no problem because newer chips has no this
1274 			 * bug.
1275 			 *
1276 			 * The i8254x doesn't apparently respond when the
1277 			 * I/O BAR is 0, which looks somewhat like it's not
1278 			 * been configured.
1279 			 */
1280 			preg = pci_conf_read(pc, pa->pa_tag, i);
1281 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1282 				aprint_error_dev(sc->sc_dev,
1283 				    "WARNING: I/O BAR at zero.\n");
1284 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1285 					0, &sc->sc_iot, &sc->sc_ioh,
1286 					NULL, &sc->sc_ios) == 0) {
1287 				sc->sc_flags |= WM_F_IOH_VALID;
1288 			} else {
1289 				aprint_error_dev(sc->sc_dev,
1290 				    "WARNING: unable to map I/O space\n");
1291 			}
1292 		}
1293 
1294 	}
1295 
1296 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
1297 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1298 	preg |= PCI_COMMAND_MASTER_ENABLE;
1299 	if (sc->sc_type < WM_T_82542_2_1)
1300 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1301 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1302 
1303 	/* power up chip */
1304 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1305 	    NULL)) && error != EOPNOTSUPP) {
1306 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1307 		return;
1308 	}
1309 
1310 	/*
1311 	 * Map and establish our interrupt.
1312 	 */
1313 	if (pci_intr_map(pa, &ih)) {
1314 		aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
1315 		return;
1316 	}
1317 	intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
1318 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
1319 	if (sc->sc_ih == NULL) {
1320 		aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
1321 		if (intrstr != NULL)
1322 			aprint_error(" at %s", intrstr);
1323 		aprint_error("\n");
1324 		return;
1325 	}
1326 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
1327 
1328 	/*
1329 	 * Check the function ID (unit number of the chip).
1330 	 */
1331 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
1332 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
1333 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1334 	    || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
1335 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
1336 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
1337 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
1338 	else
1339 		sc->sc_funcid = 0;
1340 
1341 	/*
1342 	 * Determine a few things about the bus we're connected to.
1343 	 */
1344 	if (sc->sc_type < WM_T_82543) {
1345 		/* We don't really know the bus characteristics here. */
1346 		sc->sc_bus_speed = 33;
1347 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1348 		/*
1349 		 * CSA (Communication Streaming Architecture) is about as fast
1350 		 * a 32-bit 66MHz PCI Bus.
1351 		 */
1352 		sc->sc_flags |= WM_F_CSA;
1353 		sc->sc_bus_speed = 66;
1354 		aprint_verbose_dev(sc->sc_dev,
1355 		    "Communication Streaming Architecture\n");
1356 		if (sc->sc_type == WM_T_82547) {
1357 			callout_init(&sc->sc_txfifo_ch, 0);
1358 			callout_setfunc(&sc->sc_txfifo_ch,
1359 					wm_82547_txfifo_stall, sc);
1360 			aprint_verbose_dev(sc->sc_dev,
1361 			    "using 82547 Tx FIFO stall work-around\n");
1362 		}
1363 	} else if (sc->sc_type >= WM_T_82571) {
1364 		sc->sc_flags |= WM_F_PCIE;
1365 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1366 		    && (sc->sc_type != WM_T_ICH10)
1367 		    && (sc->sc_type != WM_T_PCH)
1368 		    && (sc->sc_type != WM_T_PCH2)
1369 		    && (sc->sc_type != WM_T_PCH_LPT)) {
1370 			sc->sc_flags |= WM_F_EEPROM_SEMAPHORE;
1371 			/* ICH* and PCH* have no PCIe capability registers */
1372 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1373 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
1374 				NULL) == 0)
1375 				aprint_error_dev(sc->sc_dev,
1376 				    "unable to find PCIe capability\n");
1377 		}
1378 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1379 	} else {
1380 		reg = CSR_READ(sc, WMREG_STATUS);
1381 		if (reg & STATUS_BUS64)
1382 			sc->sc_flags |= WM_F_BUS64;
1383 		if ((reg & STATUS_PCIX_MODE) != 0) {
1384 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1385 
1386 			sc->sc_flags |= WM_F_PCIX;
1387 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1388 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
1389 				aprint_error_dev(sc->sc_dev,
1390 				    "unable to find PCIX capability\n");
1391 			else if (sc->sc_type != WM_T_82545_3 &&
1392 				 sc->sc_type != WM_T_82546_3) {
1393 				/*
1394 				 * Work around a problem caused by the BIOS
1395 				 * setting the max memory read byte count
1396 				 * incorrectly.
1397 				 */
1398 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1399 				    sc->sc_pcixe_capoff + PCIX_CMD);
1400 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1401 				    sc->sc_pcixe_capoff + PCIX_STATUS);
1402 
1403 				bytecnt =
1404 				    (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
1405 				    PCIX_CMD_BYTECNT_SHIFT;
1406 				maxb =
1407 				    (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
1408 				    PCIX_STATUS_MAXB_SHIFT;
1409 				if (bytecnt > maxb) {
1410 					aprint_verbose_dev(sc->sc_dev,
1411 					    "resetting PCI-X MMRBC: %d -> %d\n",
1412 					    512 << bytecnt, 512 << maxb);
1413 					pcix_cmd = (pcix_cmd &
1414 					    ~PCIX_CMD_BYTECNT_MASK) |
1415 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
1416 					pci_conf_write(pa->pa_pc, pa->pa_tag,
1417 					    sc->sc_pcixe_capoff + PCIX_CMD,
1418 					    pcix_cmd);
1419 				}
1420 			}
1421 		}
1422 		/*
1423 		 * The quad port adapter is special; it has a PCIX-PCIX
1424 		 * bridge on the board, and can run the secondary bus at
1425 		 * a higher speed.
1426 		 */
1427 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1428 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1429 								      : 66;
1430 		} else if (sc->sc_flags & WM_F_PCIX) {
1431 			switch (reg & STATUS_PCIXSPD_MASK) {
1432 			case STATUS_PCIXSPD_50_66:
1433 				sc->sc_bus_speed = 66;
1434 				break;
1435 			case STATUS_PCIXSPD_66_100:
1436 				sc->sc_bus_speed = 100;
1437 				break;
1438 			case STATUS_PCIXSPD_100_133:
1439 				sc->sc_bus_speed = 133;
1440 				break;
1441 			default:
1442 				aprint_error_dev(sc->sc_dev,
1443 				    "unknown PCIXSPD %d; assuming 66MHz\n",
1444 				    reg & STATUS_PCIXSPD_MASK);
1445 				sc->sc_bus_speed = 66;
1446 				break;
1447 			}
1448 		} else
1449 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1450 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1451 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1452 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1453 	}
1454 
1455 	/*
1456 	 * Allocate the control data structures, and create and load the
1457 	 * DMA map for it.
1458 	 *
1459 	 * NOTE: All Tx descriptors must be in the same 4G segment of
1460 	 * memory.  So must Rx descriptors.  We simplify by allocating
1461 	 * both sets within the same 4G segment.
1462 	 */
1463 	WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
1464 	    WM_NTXDESC_82542 : WM_NTXDESC_82544;
1465 	sc->sc_cd_size = sc->sc_type < WM_T_82544 ?
1466 	    sizeof(struct wm_control_data_82542) :
1467 	    sizeof(struct wm_control_data_82544);
1468 	if ((error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_cd_size, PAGE_SIZE,
1469 		    (bus_size_t) 0x100000000ULL, &sc->sc_cd_seg, 1,
1470 		    &sc->sc_cd_rseg, 0)) != 0) {
1471 		aprint_error_dev(sc->sc_dev,
1472 		    "unable to allocate control data, error = %d\n",
1473 		    error);
1474 		goto fail_0;
1475 	}
1476 
1477 	if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cd_seg,
1478 		    sc->sc_cd_rseg, sc->sc_cd_size,
1479 		    (void **)&sc->sc_control_data, BUS_DMA_COHERENT)) != 0) {
1480 		aprint_error_dev(sc->sc_dev,
1481 		    "unable to map control data, error = %d\n", error);
1482 		goto fail_1;
1483 	}
1484 
1485 	if ((error = bus_dmamap_create(sc->sc_dmat, sc->sc_cd_size, 1,
1486 		    sc->sc_cd_size, 0, 0, &sc->sc_cddmamap)) != 0) {
1487 		aprint_error_dev(sc->sc_dev,
1488 		    "unable to create control data DMA map, error = %d\n",
1489 		    error);
1490 		goto fail_2;
1491 	}
1492 
1493 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
1494 		    sc->sc_control_data, sc->sc_cd_size, NULL, 0)) != 0) {
1495 		aprint_error_dev(sc->sc_dev,
1496 		    "unable to load control data DMA map, error = %d\n",
1497 		    error);
1498 		goto fail_3;
1499 	}
1500 
1501 	/*
1502 	 * Create the transmit buffer DMA maps.
1503 	 */
1504 	WM_TXQUEUELEN(sc) =
1505 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
1506 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
1507 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1508 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
1509 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
1510 			    &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1511 			aprint_error_dev(sc->sc_dev,
1512 			    "unable to create Tx DMA map %d, error = %d\n",
1513 			    i, error);
1514 			goto fail_4;
1515 		}
1516 	}
1517 
1518 	/*
1519 	 * Create the receive buffer DMA maps.
1520 	 */
1521 	for (i = 0; i < WM_NRXDESC; i++) {
1522 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1523 			    MCLBYTES, 0, 0,
1524 			    &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1525 			aprint_error_dev(sc->sc_dev,
1526 			    "unable to create Rx DMA map %d error = %d\n",
1527 			    i, error);
1528 			goto fail_5;
1529 		}
1530 		sc->sc_rxsoft[i].rxs_mbuf = NULL;
1531 	}
1532 
1533 	/* clear interesting stat counters */
1534 	CSR_READ(sc, WMREG_COLC);
1535 	CSR_READ(sc, WMREG_RXERRC);
1536 
1537 	/* get PHY control from SMBus to PCIe */
1538 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
1539 	    || (sc->sc_type == WM_T_PCH_LPT))
1540 		wm_smbustopci(sc);
1541 
1542 	/*
1543 	 * Reset the chip to a known state.
1544 	 */
1545 	wm_reset(sc);
1546 
1547 	/*
1548 	 * Get some information about the EEPROM.
1549 	 */
1550 	switch (sc->sc_type) {
1551 	case WM_T_82542_2_0:
1552 	case WM_T_82542_2_1:
1553 	case WM_T_82543:
1554 	case WM_T_82544:
1555 		/* Microwire */
1556 		sc->sc_ee_addrbits = 6;
1557 		break;
1558 	case WM_T_82540:
1559 	case WM_T_82545:
1560 	case WM_T_82545_3:
1561 	case WM_T_82546:
1562 	case WM_T_82546_3:
1563 		/* Microwire */
1564 		reg = CSR_READ(sc, WMREG_EECD);
1565 		if (reg & EECD_EE_SIZE)
1566 			sc->sc_ee_addrbits = 8;
1567 		else
1568 			sc->sc_ee_addrbits = 6;
1569 		sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1570 		break;
1571 	case WM_T_82541:
1572 	case WM_T_82541_2:
1573 	case WM_T_82547:
1574 	case WM_T_82547_2:
1575 		reg = CSR_READ(sc, WMREG_EECD);
1576 		if (reg & EECD_EE_TYPE) {
1577 			/* SPI */
1578 			wm_set_spiaddrbits(sc);
1579 		} else
1580 			/* Microwire */
1581 			sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6;
1582 		sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1583 		break;
1584 	case WM_T_82571:
1585 	case WM_T_82572:
1586 		/* SPI */
1587 		wm_set_spiaddrbits(sc);
1588 		sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1589 		break;
1590 	case WM_T_82573:
1591 	case WM_T_82574:
1592 	case WM_T_82583:
1593 		if (wm_is_onboard_nvm_eeprom(sc) == 0)
1594 			sc->sc_flags |= WM_F_EEPROM_FLASH;
1595 		else {
1596 			/* SPI */
1597 			wm_set_spiaddrbits(sc);
1598 		}
1599 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1600 		break;
1601 	case WM_T_82575:
1602 	case WM_T_82576:
1603 	case WM_T_82580:
1604 	case WM_T_82580ER:
1605 	case WM_T_I350:
1606 	case WM_T_I354: /* XXXX ok? */
1607 	case WM_T_80003:
1608 		/* SPI */
1609 		wm_set_spiaddrbits(sc);
1610 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_SWFW_SYNC;
1611 		break;
1612 	case WM_T_ICH8:
1613 	case WM_T_ICH9:
1614 	case WM_T_ICH10:
1615 	case WM_T_PCH:
1616 	case WM_T_PCH2:
1617 	case WM_T_PCH_LPT:
1618 		/* FLASH */
1619 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_SWFWHW_SYNC;
1620 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
1621 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
1622 		    &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) {
1623 			aprint_error_dev(sc->sc_dev,
1624 			    "can't map FLASH registers\n");
1625 			return;
1626 		}
1627 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
1628 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
1629 						ICH_FLASH_SECTOR_SIZE;
1630 		sc->sc_ich8_flash_bank_size =
1631 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
1632 		sc->sc_ich8_flash_bank_size -=
1633 		    (reg & ICH_GFPREG_BASE_MASK);
1634 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
1635 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
1636 		break;
1637 	case WM_T_I210:
1638 	case WM_T_I211:
1639 		sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
1640 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_SWFW_SYNC;
1641 		break;
1642 	default:
1643 		break;
1644 	}
1645 
1646 	/*
1647 	 * Defer printing the EEPROM type until after verifying the checksum
1648 	 * This allows the EEPROM type to be printed correctly in the case
1649 	 * that no EEPROM is attached.
1650 	 */
1651 	/*
1652 	 * Validate the EEPROM checksum. If the checksum fails, flag
1653 	 * this for later, so we can fail future reads from the EEPROM.
1654 	 */
1655 	if (wm_validate_eeprom_checksum(sc)) {
1656 		/*
1657 		 * Read twice again because some PCI-e parts fail the
1658 		 * first check due to the link being in sleep state.
1659 		 */
1660 		if (wm_validate_eeprom_checksum(sc))
1661 			sc->sc_flags |= WM_F_EEPROM_INVALID;
1662 	}
1663 
1664 	/* Set device properties (macflags) */
1665 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
1666 
1667 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
1668 		aprint_verbose_dev(sc->sc_dev, "No EEPROM\n");
1669 	else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW) {
1670 		aprint_verbose_dev(sc->sc_dev, "FLASH(HW)\n");
1671 	} else if (sc->sc_flags & WM_F_EEPROM_FLASH) {
1672 		aprint_verbose_dev(sc->sc_dev, "FLASH\n");
1673 	} else {
1674 		if (sc->sc_flags & WM_F_EEPROM_SPI)
1675 			eetype = "SPI";
1676 		else
1677 			eetype = "MicroWire";
1678 		aprint_verbose_dev(sc->sc_dev,
1679 		    "%u word (%d address bits) %s EEPROM\n",
1680 		    1U << sc->sc_ee_addrbits,
1681 		    sc->sc_ee_addrbits, eetype);
1682 	}
1683 
1684 	switch (sc->sc_type) {
1685 	case WM_T_82571:
1686 	case WM_T_82572:
1687 	case WM_T_82573:
1688 	case WM_T_82574:
1689 	case WM_T_82583:
1690 	case WM_T_80003:
1691 	case WM_T_ICH8:
1692 	case WM_T_ICH9:
1693 	case WM_T_ICH10:
1694 	case WM_T_PCH:
1695 	case WM_T_PCH2:
1696 	case WM_T_PCH_LPT:
1697 		if (wm_check_mng_mode(sc) != 0)
1698 			wm_get_hw_control(sc);
1699 		break;
1700 	default:
1701 		break;
1702 	}
1703 	wm_get_wakeup(sc);
1704 	/*
1705 	 * Read the Ethernet address from the EEPROM, if not first found
1706 	 * in device properties.
1707 	 */
1708 	ea = prop_dictionary_get(dict, "mac-address");
1709 	if (ea != NULL) {
1710 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
1711 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
1712 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
1713 	} else {
1714 		if (wm_read_mac_addr(sc, enaddr) != 0) {
1715 			aprint_error_dev(sc->sc_dev,
1716 			    "unable to read Ethernet address\n");
1717 			return;
1718 		}
1719 	}
1720 
1721 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
1722 	    ether_sprintf(enaddr));
1723 
1724 	/*
1725 	 * Read the config info from the EEPROM, and set up various
1726 	 * bits in the control registers based on their contents.
1727 	 */
1728 	pn = prop_dictionary_get(dict, "i82543-cfg1");
1729 	if (pn != NULL) {
1730 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1731 		cfg1 = (uint16_t) prop_number_integer_value(pn);
1732 	} else {
1733 		if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) {
1734 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
1735 			return;
1736 		}
1737 	}
1738 
1739 	pn = prop_dictionary_get(dict, "i82543-cfg2");
1740 	if (pn != NULL) {
1741 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1742 		cfg2 = (uint16_t) prop_number_integer_value(pn);
1743 	} else {
1744 		if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) {
1745 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
1746 			return;
1747 		}
1748 	}
1749 
1750 	/* check for WM_F_WOL */
1751 	switch (sc->sc_type) {
1752 	case WM_T_82542_2_0:
1753 	case WM_T_82542_2_1:
1754 	case WM_T_82543:
1755 		/* dummy? */
1756 		eeprom_data = 0;
1757 		apme_mask = EEPROM_CFG3_APME;
1758 		break;
1759 	case WM_T_82544:
1760 		apme_mask = EEPROM_CFG2_82544_APM_EN;
1761 		eeprom_data = cfg2;
1762 		break;
1763 	case WM_T_82546:
1764 	case WM_T_82546_3:
1765 	case WM_T_82571:
1766 	case WM_T_82572:
1767 	case WM_T_82573:
1768 	case WM_T_82574:
1769 	case WM_T_82583:
1770 	case WM_T_80003:
1771 	default:
1772 		apme_mask = EEPROM_CFG3_APME;
1773 		wm_read_eeprom(sc, (sc->sc_funcid == 1) ? EEPROM_OFF_CFG3_PORTB
1774 		    : EEPROM_OFF_CFG3_PORTA, 1, &eeprom_data);
1775 		break;
1776 	case WM_T_82575:
1777 	case WM_T_82576:
1778 	case WM_T_82580:
1779 	case WM_T_82580ER:
1780 	case WM_T_I350:
1781 	case WM_T_I354: /* XXX ok? */
1782 	case WM_T_ICH8:
1783 	case WM_T_ICH9:
1784 	case WM_T_ICH10:
1785 	case WM_T_PCH:
1786 	case WM_T_PCH2:
1787 	case WM_T_PCH_LPT:
1788 		/* XXX The funcid should be checked on some devices */
1789 		apme_mask = WUC_APME;
1790 		eeprom_data = CSR_READ(sc, WMREG_WUC);
1791 		break;
1792 	}
1793 
1794 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
1795 	if ((eeprom_data & apme_mask) != 0)
1796 		sc->sc_flags |= WM_F_WOL;
1797 #ifdef WM_DEBUG
1798 	if ((sc->sc_flags & WM_F_WOL) != 0)
1799 		printf("WOL\n");
1800 #endif
1801 
1802 	/*
1803 	 * XXX need special handling for some multiple port cards
1804 	 * to disable a paticular port.
1805 	 */
1806 
1807 	if (sc->sc_type >= WM_T_82544) {
1808 		pn = prop_dictionary_get(dict, "i82543-swdpin");
1809 		if (pn != NULL) {
1810 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1811 			swdpin = (uint16_t) prop_number_integer_value(pn);
1812 		} else {
1813 			if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) {
1814 				aprint_error_dev(sc->sc_dev,
1815 				    "unable to read SWDPIN\n");
1816 				return;
1817 			}
1818 		}
1819 	}
1820 
1821 	if (cfg1 & EEPROM_CFG1_ILOS)
1822 		sc->sc_ctrl |= CTRL_ILOS;
1823 	if (sc->sc_type >= WM_T_82544) {
1824 		sc->sc_ctrl |=
1825 		    ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
1826 		    CTRL_SWDPIO_SHIFT;
1827 		sc->sc_ctrl |=
1828 		    ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
1829 		    CTRL_SWDPINS_SHIFT;
1830 	} else {
1831 		sc->sc_ctrl |=
1832 		    ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
1833 		    CTRL_SWDPIO_SHIFT;
1834 	}
1835 
1836 #if 0
1837 	if (sc->sc_type >= WM_T_82544) {
1838 		if (cfg1 & EEPROM_CFG1_IPS0)
1839 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
1840 		if (cfg1 & EEPROM_CFG1_IPS1)
1841 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
1842 		sc->sc_ctrl_ext |=
1843 		    ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
1844 		    CTRL_EXT_SWDPIO_SHIFT;
1845 		sc->sc_ctrl_ext |=
1846 		    ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
1847 		    CTRL_EXT_SWDPINS_SHIFT;
1848 	} else {
1849 		sc->sc_ctrl_ext |=
1850 		    ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
1851 		    CTRL_EXT_SWDPIO_SHIFT;
1852 	}
1853 #endif
1854 
1855 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
1856 #if 0
1857 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
1858 #endif
1859 
1860 	/*
1861 	 * Set up some register offsets that are different between
1862 	 * the i82542 and the i82543 and later chips.
1863 	 */
1864 	if (sc->sc_type < WM_T_82543) {
1865 		sc->sc_rdt_reg = WMREG_OLD_RDT0;
1866 		sc->sc_tdt_reg = WMREG_OLD_TDT;
1867 	} else {
1868 		sc->sc_rdt_reg = WMREG_RDT;
1869 		sc->sc_tdt_reg = WMREG_TDT;
1870 	}
1871 
1872 	if (sc->sc_type == WM_T_PCH) {
1873 		uint16_t val;
1874 
1875 		/* Save the NVM K1 bit setting */
1876 		wm_read_eeprom(sc, EEPROM_OFF_K1_CONFIG, 1, &val);
1877 
1878 		if ((val & EEPROM_K1_CONFIG_ENABLE) != 0)
1879 			sc->sc_nvm_k1_enabled = 1;
1880 		else
1881 			sc->sc_nvm_k1_enabled = 0;
1882 	}
1883 
1884 	/*
1885 	 * Determine if we're TBI,GMII or SGMII mode, and initialize the
1886 	 * media structures accordingly.
1887 	 */
1888 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
1889 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
1890 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
1891 	    || sc->sc_type == WM_T_82573
1892 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
1893 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
1894 		wm_gmii_mediainit(sc, wmp->wmp_product);
1895 	} else if (sc->sc_type < WM_T_82543 ||
1896 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
1897 		if (wmp->wmp_flags & WMP_F_1000T)
1898 			aprint_error_dev(sc->sc_dev,
1899 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
1900 		wm_tbi_mediainit(sc);
1901 	} else {
1902 		switch (sc->sc_type) {
1903 		case WM_T_82575:
1904 		case WM_T_82576:
1905 		case WM_T_82580:
1906 		case WM_T_82580ER:
1907 		case WM_T_I350:
1908 		case WM_T_I354:
1909 		case WM_T_I210:
1910 		case WM_T_I211:
1911 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
1912 			switch (reg & CTRL_EXT_LINK_MODE_MASK) {
1913 			case CTRL_EXT_LINK_MODE_1000KX:
1914 				aprint_verbose_dev(sc->sc_dev, "1000KX\n");
1915 				CSR_WRITE(sc, WMREG_CTRL_EXT,
1916 				    reg | CTRL_EXT_I2C_ENA);
1917 				panic("not supported yet\n");
1918 				break;
1919 			case CTRL_EXT_LINK_MODE_SGMII:
1920 				if (wm_sgmii_uses_mdio(sc)) {
1921 					aprint_verbose_dev(sc->sc_dev,
1922 					    "SGMII(MDIO)\n");
1923 					sc->sc_flags |= WM_F_SGMII;
1924 					wm_gmii_mediainit(sc,
1925 					    wmp->wmp_product);
1926 					break;
1927 				}
1928 				aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
1929 				/*FALLTHROUGH*/
1930 			case CTRL_EXT_LINK_MODE_PCIE_SERDES:
1931 				aprint_verbose_dev(sc->sc_dev, "SERDES\n");
1932 				CSR_WRITE(sc, WMREG_CTRL_EXT,
1933 				    reg | CTRL_EXT_I2C_ENA);
1934 				panic("not supported yet\n");
1935 				break;
1936 			case CTRL_EXT_LINK_MODE_GMII:
1937 			default:
1938 				CSR_WRITE(sc, WMREG_CTRL_EXT,
1939 				    reg & ~CTRL_EXT_I2C_ENA);
1940 				wm_gmii_mediainit(sc, wmp->wmp_product);
1941 				break;
1942 			}
1943 			break;
1944 		default:
1945 			if (wmp->wmp_flags & WMP_F_1000X)
1946 				aprint_error_dev(sc->sc_dev,
1947 				    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
1948 			wm_gmii_mediainit(sc, wmp->wmp_product);
1949 		}
1950 	}
1951 
1952 	ifp = &sc->sc_ethercom.ec_if;
1953 	xname = device_xname(sc->sc_dev);
1954 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
1955 	ifp->if_softc = sc;
1956 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1957 	ifp->if_ioctl = wm_ioctl;
1958 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
1959 		ifp->if_start = wm_nq_start;
1960 	else
1961 		ifp->if_start = wm_start;
1962 	ifp->if_watchdog = wm_watchdog;
1963 	ifp->if_init = wm_init;
1964 	ifp->if_stop = wm_stop;
1965 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
1966 	IFQ_SET_READY(&ifp->if_snd);
1967 
1968 	/* Check for jumbo frame */
1969 	switch (sc->sc_type) {
1970 	case WM_T_82573:
1971 		/* XXX limited to 9234 if ASPM is disabled */
1972 		wm_read_eeprom(sc, EEPROM_INIT_3GIO_3, 1, &io3);
1973 		if ((io3 & EEPROM_3GIO_3_ASPM_MASK) != 0)
1974 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1975 		break;
1976 	case WM_T_82571:
1977 	case WM_T_82572:
1978 	case WM_T_82574:
1979 	case WM_T_82575:
1980 	case WM_T_82576:
1981 	case WM_T_82580:
1982 	case WM_T_82580ER:
1983 	case WM_T_I350:
1984 	case WM_T_I354: /* XXXX ok? */
1985 	case WM_T_I210:
1986 	case WM_T_I211:
1987 	case WM_T_80003:
1988 	case WM_T_ICH9:
1989 	case WM_T_ICH10:
1990 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
1991 	case WM_T_PCH_LPT:
1992 		/* XXX limited to 9234 */
1993 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1994 		break;
1995 	case WM_T_PCH:
1996 		/* XXX limited to 4096 */
1997 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1998 		break;
1999 	case WM_T_82542_2_0:
2000 	case WM_T_82542_2_1:
2001 	case WM_T_82583:
2002 	case WM_T_ICH8:
2003 		/* No support for jumbo frame */
2004 		break;
2005 	default:
2006 		/* ETHER_MAX_LEN_JUMBO */
2007 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2008 		break;
2009 	}
2010 
2011 	/*
2012 	 * If we're a i82543 or greater, we can support VLANs.
2013 	 */
2014 	if (sc->sc_type >= WM_T_82543)
2015 		sc->sc_ethercom.ec_capabilities |=
2016 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
2017 
2018 	/*
2019 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
2020 	 * on i82543 and later.
2021 	 */
2022 	if (sc->sc_type >= WM_T_82543) {
2023 		ifp->if_capabilities |=
2024 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
2025 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
2026 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
2027 		    IFCAP_CSUM_TCPv6_Tx |
2028 		    IFCAP_CSUM_UDPv6_Tx;
2029 	}
2030 
2031 	/*
2032 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
2033 	 *
2034 	 *	82541GI (8086:1076) ... no
2035 	 *	82572EI (8086:10b9) ... yes
2036 	 */
2037 	if (sc->sc_type >= WM_T_82571) {
2038 		ifp->if_capabilities |=
2039 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
2040 	}
2041 
2042 	/*
2043 	 * If we're a i82544 or greater (except i82547), we can do
2044 	 * TCP segmentation offload.
2045 	 */
2046 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
2047 		ifp->if_capabilities |= IFCAP_TSOv4;
2048 	}
2049 
2050 	if (sc->sc_type >= WM_T_82571) {
2051 		ifp->if_capabilities |= IFCAP_TSOv6;
2052 	}
2053 
2054 	/*
2055 	 * Attach the interface.
2056 	 */
2057 	if_attach(ifp);
2058 	ether_ifattach(ifp, enaddr);
2059 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
2060 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET, 0);
2061 
2062 #ifdef WM_EVENT_COUNTERS
2063 	/* Attach event counters. */
2064 	evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
2065 	    NULL, xname, "txsstall");
2066 	evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
2067 	    NULL, xname, "txdstall");
2068 	evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
2069 	    NULL, xname, "txfifo_stall");
2070 	evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
2071 	    NULL, xname, "txdw");
2072 	evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
2073 	    NULL, xname, "txqe");
2074 	evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
2075 	    NULL, xname, "rxintr");
2076 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
2077 	    NULL, xname, "linkintr");
2078 
2079 	evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
2080 	    NULL, xname, "rxipsum");
2081 	evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
2082 	    NULL, xname, "rxtusum");
2083 	evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
2084 	    NULL, xname, "txipsum");
2085 	evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
2086 	    NULL, xname, "txtusum");
2087 	evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
2088 	    NULL, xname, "txtusum6");
2089 
2090 	evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
2091 	    NULL, xname, "txtso");
2092 	evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
2093 	    NULL, xname, "txtso6");
2094 	evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
2095 	    NULL, xname, "txtsopain");
2096 
2097 	for (i = 0; i < WM_NTXSEGS; i++) {
2098 		snprintf(wm_txseg_evcnt_names[i],
2099 		    sizeof(wm_txseg_evcnt_names[i]), "txseg%d", i);
2100 		evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
2101 		    NULL, xname, wm_txseg_evcnt_names[i]);
2102 	}
2103 
2104 	evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
2105 	    NULL, xname, "txdrop");
2106 
2107 	evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
2108 	    NULL, xname, "tu");
2109 
2110 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
2111 	    NULL, xname, "tx_xoff");
2112 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
2113 	    NULL, xname, "tx_xon");
2114 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
2115 	    NULL, xname, "rx_xoff");
2116 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
2117 	    NULL, xname, "rx_xon");
2118 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
2119 	    NULL, xname, "rx_macctl");
2120 #endif /* WM_EVENT_COUNTERS */
2121 
2122 	if (pmf_device_register(self, wm_suspend, wm_resume))
2123 		pmf_class_network_register(self, ifp);
2124 	else
2125 		aprint_error_dev(self, "couldn't establish power handler\n");
2126 
2127 	return;
2128 
2129 	/*
2130 	 * Free any resources we've allocated during the failed attach
2131 	 * attempt.  Do this in reverse order and fall through.
2132 	 */
2133  fail_5:
2134 	for (i = 0; i < WM_NRXDESC; i++) {
2135 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2136 			bus_dmamap_destroy(sc->sc_dmat,
2137 			    sc->sc_rxsoft[i].rxs_dmamap);
2138 	}
2139  fail_4:
2140 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2141 		if (sc->sc_txsoft[i].txs_dmamap != NULL)
2142 			bus_dmamap_destroy(sc->sc_dmat,
2143 			    sc->sc_txsoft[i].txs_dmamap);
2144 	}
2145 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2146  fail_3:
2147 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2148  fail_2:
2149 	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2150 	    sc->sc_cd_size);
2151  fail_1:
2152 	bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2153  fail_0:
2154 	return;
2155 }
2156 
2157 static int
2158 wm_detach(device_t self, int flags __unused)
2159 {
2160 	struct wm_softc *sc = device_private(self);
2161 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2162 	int i, s;
2163 
2164 	s = splnet();
2165 	/* Stop the interface. Callouts are stopped in it. */
2166 	wm_stop(ifp, 1);
2167 	splx(s);
2168 
2169 	pmf_device_deregister(self);
2170 
2171 	/* Tell the firmware about the release */
2172 	wm_release_manageability(sc);
2173 	wm_release_hw_control(sc);
2174 
2175 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2176 
2177 	/* Delete all remaining media. */
2178 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2179 
2180 	ether_ifdetach(ifp);
2181 	if_detach(ifp);
2182 
2183 
2184 	/* Unload RX dmamaps and free mbufs */
2185 	wm_rxdrain(sc);
2186 
2187 	/* Free dmamap. It's the same as the end of the wm_attach() function */
2188 	for (i = 0; i < WM_NRXDESC; i++) {
2189 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2190 			bus_dmamap_destroy(sc->sc_dmat,
2191 			    sc->sc_rxsoft[i].rxs_dmamap);
2192 	}
2193 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2194 		if (sc->sc_txsoft[i].txs_dmamap != NULL)
2195 			bus_dmamap_destroy(sc->sc_dmat,
2196 			    sc->sc_txsoft[i].txs_dmamap);
2197 	}
2198 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2199 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2200 	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2201 	    sc->sc_cd_size);
2202 	bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2203 
2204 	/* Disestablish the interrupt handler */
2205 	if (sc->sc_ih != NULL) {
2206 		pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
2207 		sc->sc_ih = NULL;
2208 	}
2209 
2210 	/* Unmap the registers */
2211 	if (sc->sc_ss) {
2212 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
2213 		sc->sc_ss = 0;
2214 	}
2215 
2216 	if (sc->sc_ios) {
2217 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
2218 		sc->sc_ios = 0;
2219 	}
2220 
2221 	return 0;
2222 }
2223 
2224 /*
2225  * wm_tx_offload:
2226  *
2227  *	Set up TCP/IP checksumming parameters for the
2228  *	specified packet.
2229  */
2230 static int
2231 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
2232     uint8_t *fieldsp)
2233 {
2234 	struct mbuf *m0 = txs->txs_mbuf;
2235 	struct livengood_tcpip_ctxdesc *t;
2236 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
2237 	uint32_t ipcse;
2238 	struct ether_header *eh;
2239 	int offset, iphl;
2240 	uint8_t fields;
2241 
2242 	/*
2243 	 * XXX It would be nice if the mbuf pkthdr had offset
2244 	 * fields for the protocol headers.
2245 	 */
2246 
2247 	eh = mtod(m0, struct ether_header *);
2248 	switch (htons(eh->ether_type)) {
2249 	case ETHERTYPE_IP:
2250 	case ETHERTYPE_IPV6:
2251 		offset = ETHER_HDR_LEN;
2252 		break;
2253 
2254 	case ETHERTYPE_VLAN:
2255 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2256 		break;
2257 
2258 	default:
2259 		/*
2260 		 * Don't support this protocol or encapsulation.
2261 		 */
2262 		*fieldsp = 0;
2263 		*cmdp = 0;
2264 		return 0;
2265 	}
2266 
2267 	if ((m0->m_pkthdr.csum_flags &
2268 	    (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
2269 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
2270 	} else {
2271 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
2272 	}
2273 	ipcse = offset + iphl - 1;
2274 
2275 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
2276 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
2277 	seg = 0;
2278 	fields = 0;
2279 
2280 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
2281 		int hlen = offset + iphl;
2282 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
2283 
2284 		if (__predict_false(m0->m_len <
2285 				    (hlen + sizeof(struct tcphdr)))) {
2286 			/*
2287 			 * TCP/IP headers are not in the first mbuf; we need
2288 			 * to do this the slow and painful way.  Let's just
2289 			 * hope this doesn't happen very often.
2290 			 */
2291 			struct tcphdr th;
2292 
2293 			WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
2294 
2295 			m_copydata(m0, hlen, sizeof(th), &th);
2296 			if (v4) {
2297 				struct ip ip;
2298 
2299 				m_copydata(m0, offset, sizeof(ip), &ip);
2300 				ip.ip_len = 0;
2301 				m_copyback(m0,
2302 				    offset + offsetof(struct ip, ip_len),
2303 				    sizeof(ip.ip_len), &ip.ip_len);
2304 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
2305 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
2306 			} else {
2307 				struct ip6_hdr ip6;
2308 
2309 				m_copydata(m0, offset, sizeof(ip6), &ip6);
2310 				ip6.ip6_plen = 0;
2311 				m_copyback(m0,
2312 				    offset + offsetof(struct ip6_hdr, ip6_plen),
2313 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
2314 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
2315 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
2316 			}
2317 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
2318 			    sizeof(th.th_sum), &th.th_sum);
2319 
2320 			hlen += th.th_off << 2;
2321 		} else {
2322 			/*
2323 			 * TCP/IP headers are in the first mbuf; we can do
2324 			 * this the easy way.
2325 			 */
2326 			struct tcphdr *th;
2327 
2328 			if (v4) {
2329 				struct ip *ip =
2330 				    (void *)(mtod(m0, char *) + offset);
2331 				th = (void *)(mtod(m0, char *) + hlen);
2332 
2333 				ip->ip_len = 0;
2334 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
2335 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
2336 			} else {
2337 				struct ip6_hdr *ip6 =
2338 				    (void *)(mtod(m0, char *) + offset);
2339 				th = (void *)(mtod(m0, char *) + hlen);
2340 
2341 				ip6->ip6_plen = 0;
2342 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
2343 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
2344 			}
2345 			hlen += th->th_off << 2;
2346 		}
2347 
2348 		if (v4) {
2349 			WM_EVCNT_INCR(&sc->sc_ev_txtso);
2350 			cmdlen |= WTX_TCPIP_CMD_IP;
2351 		} else {
2352 			WM_EVCNT_INCR(&sc->sc_ev_txtso6);
2353 			ipcse = 0;
2354 		}
2355 		cmd |= WTX_TCPIP_CMD_TSE;
2356 		cmdlen |= WTX_TCPIP_CMD_TSE |
2357 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
2358 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
2359 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
2360 	}
2361 
2362 	/*
2363 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
2364 	 * offload feature, if we load the context descriptor, we
2365 	 * MUST provide valid values for IPCSS and TUCSS fields.
2366 	 */
2367 
2368 	ipcs = WTX_TCPIP_IPCSS(offset) |
2369 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
2370 	    WTX_TCPIP_IPCSE(ipcse);
2371 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
2372 		WM_EVCNT_INCR(&sc->sc_ev_txipsum);
2373 		fields |= WTX_IXSM;
2374 	}
2375 
2376 	offset += iphl;
2377 
2378 	if (m0->m_pkthdr.csum_flags &
2379 	    (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
2380 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
2381 		fields |= WTX_TXSM;
2382 		tucs = WTX_TCPIP_TUCSS(offset) |
2383 		    WTX_TCPIP_TUCSO(offset +
2384 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
2385 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
2386 	} else if ((m0->m_pkthdr.csum_flags &
2387 	    (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
2388 		WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
2389 		fields |= WTX_TXSM;
2390 		tucs = WTX_TCPIP_TUCSS(offset) |
2391 		    WTX_TCPIP_TUCSO(offset +
2392 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
2393 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
2394 	} else {
2395 		/* Just initialize it to a valid TCP context. */
2396 		tucs = WTX_TCPIP_TUCSS(offset) |
2397 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
2398 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
2399 	}
2400 
2401 	/* Fill in the context descriptor. */
2402 	t = (struct livengood_tcpip_ctxdesc *)
2403 	    &sc->sc_txdescs[sc->sc_txnext];
2404 	t->tcpip_ipcs = htole32(ipcs);
2405 	t->tcpip_tucs = htole32(tucs);
2406 	t->tcpip_cmdlen = htole32(cmdlen);
2407 	t->tcpip_seg = htole32(seg);
2408 	WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
2409 
2410 	sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
2411 	txs->txs_ndesc++;
2412 
2413 	*cmdp = cmd;
2414 	*fieldsp = fields;
2415 
2416 	return 0;
2417 }
2418 
2419 static void
2420 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
2421 {
2422 	struct mbuf *m;
2423 	int i;
2424 
2425 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
2426 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
2427 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
2428 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
2429 		    m->m_data, m->m_len, m->m_flags);
2430 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
2431 	    i, i == 1 ? "" : "s");
2432 }
2433 
2434 /*
2435  * wm_82547_txfifo_stall:
2436  *
2437  *	Callout used to wait for the 82547 Tx FIFO to drain,
2438  *	reset the FIFO pointers, and restart packet transmission.
2439  */
2440 static void
2441 wm_82547_txfifo_stall(void *arg)
2442 {
2443 	struct wm_softc *sc = arg;
2444 	int s;
2445 
2446 	s = splnet();
2447 
2448 	if (sc->sc_txfifo_stall) {
2449 		if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
2450 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
2451 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
2452 			/*
2453 			 * Packets have drained.  Stop transmitter, reset
2454 			 * FIFO pointers, restart transmitter, and kick
2455 			 * the packet queue.
2456 			 */
2457 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
2458 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
2459 			CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
2460 			CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
2461 			CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
2462 			CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
2463 			CSR_WRITE(sc, WMREG_TCTL, tctl);
2464 			CSR_WRITE_FLUSH(sc);
2465 
2466 			sc->sc_txfifo_head = 0;
2467 			sc->sc_txfifo_stall = 0;
2468 			wm_start(&sc->sc_ethercom.ec_if);
2469 		} else {
2470 			/*
2471 			 * Still waiting for packets to drain; try again in
2472 			 * another tick.
2473 			 */
2474 			callout_schedule(&sc->sc_txfifo_ch, 1);
2475 		}
2476 	}
2477 
2478 	splx(s);
2479 }
2480 
2481 static void
2482 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, int on)
2483 {
2484 	uint32_t reg;
2485 
2486 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
2487 
2488 	if (on != 0)
2489 		reg |= EXTCNFCTR_GATE_PHY_CFG;
2490 	else
2491 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
2492 
2493 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
2494 }
2495 
2496 /*
2497  * wm_82547_txfifo_bugchk:
2498  *
2499  *	Check for bug condition in the 82547 Tx FIFO.  We need to
2500  *	prevent enqueueing a packet that would wrap around the end
2501  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
2502  *
2503  *	We do this by checking the amount of space before the end
2504  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
2505  *	the Tx FIFO, wait for all remaining packets to drain, reset
2506  *	the internal FIFO pointers to the beginning, and restart
2507  *	transmission on the interface.
2508  */
2509 #define	WM_FIFO_HDR		0x10
2510 #define	WM_82547_PAD_LEN	0x3e0
2511 static int
2512 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
2513 {
2514 	int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
2515 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
2516 
2517 	/* Just return if already stalled. */
2518 	if (sc->sc_txfifo_stall)
2519 		return 1;
2520 
2521 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
2522 		/* Stall only occurs in half-duplex mode. */
2523 		goto send_packet;
2524 	}
2525 
2526 	if (len >= WM_82547_PAD_LEN + space) {
2527 		sc->sc_txfifo_stall = 1;
2528 		callout_schedule(&sc->sc_txfifo_ch, 1);
2529 		return 1;
2530 	}
2531 
2532  send_packet:
2533 	sc->sc_txfifo_head += len;
2534 	if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
2535 		sc->sc_txfifo_head -= sc->sc_txfifo_size;
2536 
2537 	return 0;
2538 }
2539 
2540 /*
2541  * wm_start:		[ifnet interface function]
2542  *
2543  *	Start packet transmission on the interface.
2544  */
2545 static void
2546 wm_start(struct ifnet *ifp)
2547 {
2548 	struct wm_softc *sc = ifp->if_softc;
2549 	struct mbuf *m0;
2550 	struct m_tag *mtag;
2551 	struct wm_txsoft *txs;
2552 	bus_dmamap_t dmamap;
2553 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
2554 	bus_addr_t curaddr;
2555 	bus_size_t seglen, curlen;
2556 	uint32_t cksumcmd;
2557 	uint8_t cksumfields;
2558 
2559 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
2560 		return;
2561 
2562 	/*
2563 	 * Remember the previous number of free descriptors.
2564 	 */
2565 	ofree = sc->sc_txfree;
2566 
2567 	/*
2568 	 * Loop through the send queue, setting up transmit descriptors
2569 	 * until we drain the queue, or use up all available transmit
2570 	 * descriptors.
2571 	 */
2572 	for (;;) {
2573 		/* Grab a packet off the queue. */
2574 		IFQ_POLL(&ifp->if_snd, m0);
2575 		if (m0 == NULL)
2576 			break;
2577 
2578 		DPRINTF(WM_DEBUG_TX,
2579 		    ("%s: TX: have packet to transmit: %p\n",
2580 		    device_xname(sc->sc_dev), m0));
2581 
2582 		/* Get a work queue entry. */
2583 		if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
2584 			wm_txintr(sc);
2585 			if (sc->sc_txsfree == 0) {
2586 				DPRINTF(WM_DEBUG_TX,
2587 				    ("%s: TX: no free job descriptors\n",
2588 					device_xname(sc->sc_dev)));
2589 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
2590 				break;
2591 			}
2592 		}
2593 
2594 		txs = &sc->sc_txsoft[sc->sc_txsnext];
2595 		dmamap = txs->txs_dmamap;
2596 
2597 		use_tso = (m0->m_pkthdr.csum_flags &
2598 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
2599 
2600 		/*
2601 		 * So says the Linux driver:
2602 		 * The controller does a simple calculation to make sure
2603 		 * there is enough room in the FIFO before initiating the
2604 		 * DMA for each buffer.  The calc is:
2605 		 *	4 = ceil(buffer len / MSS)
2606 		 * To make sure we don't overrun the FIFO, adjust the max
2607 		 * buffer len if the MSS drops.
2608 		 */
2609 		dmamap->dm_maxsegsz =
2610 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
2611 		    ? m0->m_pkthdr.segsz << 2
2612 		    : WTX_MAX_LEN;
2613 
2614 		/*
2615 		 * Load the DMA map.  If this fails, the packet either
2616 		 * didn't fit in the allotted number of segments, or we
2617 		 * were short on resources.  For the too-many-segments
2618 		 * case, we simply report an error and drop the packet,
2619 		 * since we can't sanely copy a jumbo packet to a single
2620 		 * buffer.
2621 		 */
2622 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
2623 		    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
2624 		if (error) {
2625 			if (error == EFBIG) {
2626 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
2627 				log(LOG_ERR, "%s: Tx packet consumes too many "
2628 				    "DMA segments, dropping...\n",
2629 				    device_xname(sc->sc_dev));
2630 				IFQ_DEQUEUE(&ifp->if_snd, m0);
2631 				wm_dump_mbuf_chain(sc, m0);
2632 				m_freem(m0);
2633 				continue;
2634 			}
2635 			/*
2636 			 * Short on resources, just stop for now.
2637 			 */
2638 			DPRINTF(WM_DEBUG_TX,
2639 			    ("%s: TX: dmamap load failed: %d\n",
2640 			    device_xname(sc->sc_dev), error));
2641 			break;
2642 		}
2643 
2644 		segs_needed = dmamap->dm_nsegs;
2645 		if (use_tso) {
2646 			/* For sentinel descriptor; see below. */
2647 			segs_needed++;
2648 		}
2649 
2650 		/*
2651 		 * Ensure we have enough descriptors free to describe
2652 		 * the packet.  Note, we always reserve one descriptor
2653 		 * at the end of the ring due to the semantics of the
2654 		 * TDT register, plus one more in the event we need
2655 		 * to load offload context.
2656 		 */
2657 		if (segs_needed > sc->sc_txfree - 2) {
2658 			/*
2659 			 * Not enough free descriptors to transmit this
2660 			 * packet.  We haven't committed anything yet,
2661 			 * so just unload the DMA map, put the packet
2662 			 * pack on the queue, and punt.  Notify the upper
2663 			 * layer that there are no more slots left.
2664 			 */
2665 			DPRINTF(WM_DEBUG_TX,
2666 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
2667 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
2668 			    segs_needed, sc->sc_txfree - 1));
2669 			ifp->if_flags |= IFF_OACTIVE;
2670 			bus_dmamap_unload(sc->sc_dmat, dmamap);
2671 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
2672 			break;
2673 		}
2674 
2675 		/*
2676 		 * Check for 82547 Tx FIFO bug.  We need to do this
2677 		 * once we know we can transmit the packet, since we
2678 		 * do some internal FIFO space accounting here.
2679 		 */
2680 		if (sc->sc_type == WM_T_82547 &&
2681 		    wm_82547_txfifo_bugchk(sc, m0)) {
2682 			DPRINTF(WM_DEBUG_TX,
2683 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
2684 			    device_xname(sc->sc_dev)));
2685 			ifp->if_flags |= IFF_OACTIVE;
2686 			bus_dmamap_unload(sc->sc_dmat, dmamap);
2687 			WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
2688 			break;
2689 		}
2690 
2691 		IFQ_DEQUEUE(&ifp->if_snd, m0);
2692 
2693 		/*
2694 		 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
2695 		 */
2696 
2697 		DPRINTF(WM_DEBUG_TX,
2698 		    ("%s: TX: packet has %d (%d) DMA segments\n",
2699 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
2700 
2701 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
2702 
2703 		/*
2704 		 * Store a pointer to the packet so that we can free it
2705 		 * later.
2706 		 *
2707 		 * Initially, we consider the number of descriptors the
2708 		 * packet uses the number of DMA segments.  This may be
2709 		 * incremented by 1 if we do checksum offload (a descriptor
2710 		 * is used to set the checksum context).
2711 		 */
2712 		txs->txs_mbuf = m0;
2713 		txs->txs_firstdesc = sc->sc_txnext;
2714 		txs->txs_ndesc = segs_needed;
2715 
2716 		/* Set up offload parameters for this packet. */
2717 		if (m0->m_pkthdr.csum_flags &
2718 		    (M_CSUM_TSOv4|M_CSUM_TSOv6|
2719 		    M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
2720 		    M_CSUM_TCPv6|M_CSUM_UDPv6)) {
2721 			if (wm_tx_offload(sc, txs, &cksumcmd,
2722 					  &cksumfields) != 0) {
2723 				/* Error message already displayed. */
2724 				bus_dmamap_unload(sc->sc_dmat, dmamap);
2725 				continue;
2726 			}
2727 		} else {
2728 			cksumcmd = 0;
2729 			cksumfields = 0;
2730 		}
2731 
2732 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
2733 
2734 		/* Sync the DMA map. */
2735 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
2736 		    BUS_DMASYNC_PREWRITE);
2737 
2738 		/*
2739 		 * Initialize the transmit descriptor.
2740 		 */
2741 		for (nexttx = sc->sc_txnext, seg = 0;
2742 		     seg < dmamap->dm_nsegs; seg++) {
2743 			for (seglen = dmamap->dm_segs[seg].ds_len,
2744 			     curaddr = dmamap->dm_segs[seg].ds_addr;
2745 			     seglen != 0;
2746 			     curaddr += curlen, seglen -= curlen,
2747 			     nexttx = WM_NEXTTX(sc, nexttx)) {
2748 				curlen = seglen;
2749 
2750 				/*
2751 				 * So says the Linux driver:
2752 				 * Work around for premature descriptor
2753 				 * write-backs in TSO mode.  Append a
2754 				 * 4-byte sentinel descriptor.
2755 				 */
2756 				if (use_tso &&
2757 				    seg == dmamap->dm_nsegs - 1 &&
2758 				    curlen > 8)
2759 					curlen -= 4;
2760 
2761 				wm_set_dma_addr(
2762 				    &sc->sc_txdescs[nexttx].wtx_addr,
2763 				    curaddr);
2764 				sc->sc_txdescs[nexttx].wtx_cmdlen =
2765 				    htole32(cksumcmd | curlen);
2766 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
2767 				    0;
2768 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
2769 				    cksumfields;
2770 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
2771 				lasttx = nexttx;
2772 
2773 				DPRINTF(WM_DEBUG_TX,
2774 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
2775 				     "len %#04zx\n",
2776 				    device_xname(sc->sc_dev), nexttx,
2777 				    (uint64_t)curaddr, curlen));
2778 			}
2779 		}
2780 
2781 		KASSERT(lasttx != -1);
2782 
2783 		/*
2784 		 * Set up the command byte on the last descriptor of
2785 		 * the packet.  If we're in the interrupt delay window,
2786 		 * delay the interrupt.
2787 		 */
2788 		sc->sc_txdescs[lasttx].wtx_cmdlen |=
2789 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
2790 
2791 		/*
2792 		 * If VLANs are enabled and the packet has a VLAN tag, set
2793 		 * up the descriptor to encapsulate the packet for us.
2794 		 *
2795 		 * This is only valid on the last descriptor of the packet.
2796 		 */
2797 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
2798 			sc->sc_txdescs[lasttx].wtx_cmdlen |=
2799 			    htole32(WTX_CMD_VLE);
2800 			sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
2801 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
2802 		}
2803 
2804 		txs->txs_lastdesc = lasttx;
2805 
2806 		DPRINTF(WM_DEBUG_TX,
2807 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
2808 		    device_xname(sc->sc_dev),
2809 		    lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
2810 
2811 		/* Sync the descriptors we're using. */
2812 		WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
2813 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2814 
2815 		/* Give the packet to the chip. */
2816 		CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
2817 
2818 		DPRINTF(WM_DEBUG_TX,
2819 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
2820 
2821 		DPRINTF(WM_DEBUG_TX,
2822 		    ("%s: TX: finished transmitting packet, job %d\n",
2823 		    device_xname(sc->sc_dev), sc->sc_txsnext));
2824 
2825 		/* Advance the tx pointer. */
2826 		sc->sc_txfree -= txs->txs_ndesc;
2827 		sc->sc_txnext = nexttx;
2828 
2829 		sc->sc_txsfree--;
2830 		sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
2831 
2832 		/* Pass the packet to any BPF listeners. */
2833 		bpf_mtap(ifp, m0);
2834 	}
2835 
2836 	if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
2837 		/* No more slots; notify upper layer. */
2838 		ifp->if_flags |= IFF_OACTIVE;
2839 	}
2840 
2841 	if (sc->sc_txfree != ofree) {
2842 		/* Set a watchdog timer in case the chip flakes out. */
2843 		ifp->if_timer = 5;
2844 	}
2845 }
2846 
2847 /*
2848  * wm_nq_tx_offload:
2849  *
2850  *	Set up TCP/IP checksumming parameters for the
2851  *	specified packet, for NEWQUEUE devices
2852  */
2853 static int
2854 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs,
2855     uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
2856 {
2857 	struct mbuf *m0 = txs->txs_mbuf;
2858 	struct m_tag *mtag;
2859 	uint32_t vl_len, mssidx, cmdc;
2860 	struct ether_header *eh;
2861 	int offset, iphl;
2862 
2863 	/*
2864 	 * XXX It would be nice if the mbuf pkthdr had offset
2865 	 * fields for the protocol headers.
2866 	 */
2867 	*cmdlenp = 0;
2868 	*fieldsp = 0;
2869 
2870 	eh = mtod(m0, struct ether_header *);
2871 	switch (htons(eh->ether_type)) {
2872 	case ETHERTYPE_IP:
2873 	case ETHERTYPE_IPV6:
2874 		offset = ETHER_HDR_LEN;
2875 		break;
2876 
2877 	case ETHERTYPE_VLAN:
2878 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2879 		break;
2880 
2881 	default:
2882 		/*
2883 		 * Don't support this protocol or encapsulation.
2884 		 */
2885 		*do_csum = false;
2886 		return 0;
2887 	}
2888 	*do_csum = true;
2889 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
2890 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
2891 
2892 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
2893 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
2894 
2895 	if ((m0->m_pkthdr.csum_flags &
2896 	    (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4|M_CSUM_IPv4)) != 0) {
2897 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
2898 	} else {
2899 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
2900 	}
2901 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
2902 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
2903 
2904 	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
2905 		vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
2906 		     << NQTXC_VLLEN_VLAN_SHIFT);
2907 		*cmdlenp |= NQTX_CMD_VLE;
2908 	}
2909 
2910 	mssidx = 0;
2911 
2912 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
2913 		int hlen = offset + iphl;
2914 		int tcp_hlen;
2915 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
2916 
2917 		if (__predict_false(m0->m_len <
2918 				    (hlen + sizeof(struct tcphdr)))) {
2919 			/*
2920 			 * TCP/IP headers are not in the first mbuf; we need
2921 			 * to do this the slow and painful way.  Let's just
2922 			 * hope this doesn't happen very often.
2923 			 */
2924 			struct tcphdr th;
2925 
2926 			WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
2927 
2928 			m_copydata(m0, hlen, sizeof(th), &th);
2929 			if (v4) {
2930 				struct ip ip;
2931 
2932 				m_copydata(m0, offset, sizeof(ip), &ip);
2933 				ip.ip_len = 0;
2934 				m_copyback(m0,
2935 				    offset + offsetof(struct ip, ip_len),
2936 				    sizeof(ip.ip_len), &ip.ip_len);
2937 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
2938 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
2939 			} else {
2940 				struct ip6_hdr ip6;
2941 
2942 				m_copydata(m0, offset, sizeof(ip6), &ip6);
2943 				ip6.ip6_plen = 0;
2944 				m_copyback(m0,
2945 				    offset + offsetof(struct ip6_hdr, ip6_plen),
2946 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
2947 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
2948 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
2949 			}
2950 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
2951 			    sizeof(th.th_sum), &th.th_sum);
2952 
2953 			tcp_hlen = th.th_off << 2;
2954 		} else {
2955 			/*
2956 			 * TCP/IP headers are in the first mbuf; we can do
2957 			 * this the easy way.
2958 			 */
2959 			struct tcphdr *th;
2960 
2961 			if (v4) {
2962 				struct ip *ip =
2963 				    (void *)(mtod(m0, char *) + offset);
2964 				th = (void *)(mtod(m0, char *) + hlen);
2965 
2966 				ip->ip_len = 0;
2967 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
2968 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
2969 			} else {
2970 				struct ip6_hdr *ip6 =
2971 				    (void *)(mtod(m0, char *) + offset);
2972 				th = (void *)(mtod(m0, char *) + hlen);
2973 
2974 				ip6->ip6_plen = 0;
2975 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
2976 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
2977 			}
2978 			tcp_hlen = th->th_off << 2;
2979 		}
2980 		hlen += tcp_hlen;
2981 		*cmdlenp |= NQTX_CMD_TSE;
2982 
2983 		if (v4) {
2984 			WM_EVCNT_INCR(&sc->sc_ev_txtso);
2985 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
2986 		} else {
2987 			WM_EVCNT_INCR(&sc->sc_ev_txtso6);
2988 			*fieldsp |= NQTXD_FIELDS_TUXSM;
2989 		}
2990 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
2991 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
2992 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
2993 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
2994 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
2995 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
2996 	} else {
2997 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
2998 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
2999 	}
3000 
3001 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
3002 		*fieldsp |= NQTXD_FIELDS_IXSM;
3003 		cmdc |= NQTXC_CMD_IP4;
3004 	}
3005 
3006 	if (m0->m_pkthdr.csum_flags &
3007 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
3008 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
3009 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
3010 			cmdc |= NQTXC_CMD_TCP;
3011 		} else {
3012 			cmdc |= NQTXC_CMD_UDP;
3013 		}
3014 		cmdc |= NQTXC_CMD_IP4;
3015 		*fieldsp |= NQTXD_FIELDS_TUXSM;
3016 	}
3017 	if (m0->m_pkthdr.csum_flags &
3018 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
3019 		WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
3020 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
3021 			cmdc |= NQTXC_CMD_TCP;
3022 		} else {
3023 			cmdc |= NQTXC_CMD_UDP;
3024 		}
3025 		cmdc |= NQTXC_CMD_IP6;
3026 		*fieldsp |= NQTXD_FIELDS_TUXSM;
3027 	}
3028 
3029 	/* Fill in the context descriptor. */
3030 	sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_vl_len =
3031 	    htole32(vl_len);
3032 	sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_sn = 0;
3033 	sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_cmd =
3034 	    htole32(cmdc);
3035 	sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_mssidx =
3036 	    htole32(mssidx);
3037 	WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
3038 	DPRINTF(WM_DEBUG_TX,
3039 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
3040 	    sc->sc_txnext, 0, vl_len));
3041 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
3042 	sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
3043 	txs->txs_ndesc++;
3044 	return 0;
3045 }
3046 
3047 /*
3048  * wm_nq_start:		[ifnet interface function]
3049  *
3050  *	Start packet transmission on the interface for NEWQUEUE devices
3051  */
3052 static void
3053 wm_nq_start(struct ifnet *ifp)
3054 {
3055 	struct wm_softc *sc = ifp->if_softc;
3056 	struct mbuf *m0;
3057 	struct m_tag *mtag;
3058 	struct wm_txsoft *txs;
3059 	bus_dmamap_t dmamap;
3060 	int error, nexttx, lasttx = -1, seg, segs_needed;
3061 	bool do_csum, sent;
3062 
3063 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
3064 		return;
3065 
3066 	sent = false;
3067 
3068 	/*
3069 	 * Loop through the send queue, setting up transmit descriptors
3070 	 * until we drain the queue, or use up all available transmit
3071 	 * descriptors.
3072 	 */
3073 	for (;;) {
3074 		/* Grab a packet off the queue. */
3075 		IFQ_POLL(&ifp->if_snd, m0);
3076 		if (m0 == NULL)
3077 			break;
3078 
3079 		DPRINTF(WM_DEBUG_TX,
3080 		    ("%s: TX: have packet to transmit: %p\n",
3081 		    device_xname(sc->sc_dev), m0));
3082 
3083 		/* Get a work queue entry. */
3084 		if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
3085 			wm_txintr(sc);
3086 			if (sc->sc_txsfree == 0) {
3087 				DPRINTF(WM_DEBUG_TX,
3088 				    ("%s: TX: no free job descriptors\n",
3089 					device_xname(sc->sc_dev)));
3090 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
3091 				break;
3092 			}
3093 		}
3094 
3095 		txs = &sc->sc_txsoft[sc->sc_txsnext];
3096 		dmamap = txs->txs_dmamap;
3097 
3098 		/*
3099 		 * Load the DMA map.  If this fails, the packet either
3100 		 * didn't fit in the allotted number of segments, or we
3101 		 * were short on resources.  For the too-many-segments
3102 		 * case, we simply report an error and drop the packet,
3103 		 * since we can't sanely copy a jumbo packet to a single
3104 		 * buffer.
3105 		 */
3106 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
3107 		    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
3108 		if (error) {
3109 			if (error == EFBIG) {
3110 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
3111 				log(LOG_ERR, "%s: Tx packet consumes too many "
3112 				    "DMA segments, dropping...\n",
3113 				    device_xname(sc->sc_dev));
3114 				IFQ_DEQUEUE(&ifp->if_snd, m0);
3115 				wm_dump_mbuf_chain(sc, m0);
3116 				m_freem(m0);
3117 				continue;
3118 			}
3119 			/*
3120 			 * Short on resources, just stop for now.
3121 			 */
3122 			DPRINTF(WM_DEBUG_TX,
3123 			    ("%s: TX: dmamap load failed: %d\n",
3124 			    device_xname(sc->sc_dev), error));
3125 			break;
3126 		}
3127 
3128 		segs_needed = dmamap->dm_nsegs;
3129 
3130 		/*
3131 		 * Ensure we have enough descriptors free to describe
3132 		 * the packet.  Note, we always reserve one descriptor
3133 		 * at the end of the ring due to the semantics of the
3134 		 * TDT register, plus one more in the event we need
3135 		 * to load offload context.
3136 		 */
3137 		if (segs_needed > sc->sc_txfree - 2) {
3138 			/*
3139 			 * Not enough free descriptors to transmit this
3140 			 * packet.  We haven't committed anything yet,
3141 			 * so just unload the DMA map, put the packet
3142 			 * pack on the queue, and punt.  Notify the upper
3143 			 * layer that there are no more slots left.
3144 			 */
3145 			DPRINTF(WM_DEBUG_TX,
3146 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
3147 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
3148 			    segs_needed, sc->sc_txfree - 1));
3149 			ifp->if_flags |= IFF_OACTIVE;
3150 			bus_dmamap_unload(sc->sc_dmat, dmamap);
3151 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
3152 			break;
3153 		}
3154 
3155 		IFQ_DEQUEUE(&ifp->if_snd, m0);
3156 
3157 		/*
3158 		 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
3159 		 */
3160 
3161 		DPRINTF(WM_DEBUG_TX,
3162 		    ("%s: TX: packet has %d (%d) DMA segments\n",
3163 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
3164 
3165 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
3166 
3167 		/*
3168 		 * Store a pointer to the packet so that we can free it
3169 		 * later.
3170 		 *
3171 		 * Initially, we consider the number of descriptors the
3172 		 * packet uses the number of DMA segments.  This may be
3173 		 * incremented by 1 if we do checksum offload (a descriptor
3174 		 * is used to set the checksum context).
3175 		 */
3176 		txs->txs_mbuf = m0;
3177 		txs->txs_firstdesc = sc->sc_txnext;
3178 		txs->txs_ndesc = segs_needed;
3179 
3180 		/* Set up offload parameters for this packet. */
3181 		uint32_t cmdlen, fields, dcmdlen;
3182 		if (m0->m_pkthdr.csum_flags &
3183 		    (M_CSUM_TSOv4|M_CSUM_TSOv6|
3184 		    M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
3185 		    M_CSUM_TCPv6|M_CSUM_UDPv6)) {
3186 			if (wm_nq_tx_offload(sc, txs, &cmdlen, &fields,
3187 			    &do_csum) != 0) {
3188 				/* Error message already displayed. */
3189 				bus_dmamap_unload(sc->sc_dmat, dmamap);
3190 				continue;
3191 			}
3192 		} else {
3193 			do_csum = false;
3194 			cmdlen = 0;
3195 			fields = 0;
3196 		}
3197 
3198 		/* Sync the DMA map. */
3199 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
3200 		    BUS_DMASYNC_PREWRITE);
3201 
3202 		/*
3203 		 * Initialize the first transmit descriptor.
3204 		 */
3205 		nexttx = sc->sc_txnext;
3206 		if (!do_csum) {
3207 			/* setup a legacy descriptor */
3208 			wm_set_dma_addr(
3209 			    &sc->sc_txdescs[nexttx].wtx_addr,
3210 			    dmamap->dm_segs[0].ds_addr);
3211 			sc->sc_txdescs[nexttx].wtx_cmdlen =
3212 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
3213 			sc->sc_txdescs[nexttx].wtx_fields.wtxu_status = 0;
3214 			sc->sc_txdescs[nexttx].wtx_fields.wtxu_options = 0;
3215 			if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
3216 			    NULL) {
3217 				sc->sc_txdescs[nexttx].wtx_cmdlen |=
3218 				    htole32(WTX_CMD_VLE);
3219 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan =
3220 				    htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
3221 			} else {
3222 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
3223 			}
3224 			dcmdlen = 0;
3225 		} else {
3226 			/* setup an advanced data descriptor */
3227 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
3228 			    htole64(dmamap->dm_segs[0].ds_addr);
3229 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
3230 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
3231 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
3232 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields =
3233 			    htole32(fields);
3234 			DPRINTF(WM_DEBUG_TX,
3235 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
3236 			    device_xname(sc->sc_dev), nexttx,
3237 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
3238 			DPRINTF(WM_DEBUG_TX,
3239 			    ("\t 0x%08x%08x\n", fields,
3240 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
3241 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
3242 		}
3243 
3244 		lasttx = nexttx;
3245 		nexttx = WM_NEXTTX(sc, nexttx);
3246 		/*
3247 		 * fill in the next descriptors. legacy or adcanced format
3248 		 * is the same here
3249 		 */
3250 		for (seg = 1; seg < dmamap->dm_nsegs;
3251 		    seg++, nexttx = WM_NEXTTX(sc, nexttx)) {
3252 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
3253 			    htole64(dmamap->dm_segs[seg].ds_addr);
3254 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
3255 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
3256 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
3257 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields = 0;
3258 			lasttx = nexttx;
3259 
3260 			DPRINTF(WM_DEBUG_TX,
3261 			    ("%s: TX: desc %d: %#" PRIx64 ", "
3262 			     "len %#04zx\n",
3263 			    device_xname(sc->sc_dev), nexttx,
3264 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
3265 			    dmamap->dm_segs[seg].ds_len));
3266 		}
3267 
3268 		KASSERT(lasttx != -1);
3269 
3270 		/*
3271 		 * Set up the command byte on the last descriptor of
3272 		 * the packet.  If we're in the interrupt delay window,
3273 		 * delay the interrupt.
3274 		 */
3275 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
3276 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
3277 		sc->sc_txdescs[lasttx].wtx_cmdlen |=
3278 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
3279 
3280 		txs->txs_lastdesc = lasttx;
3281 
3282 		DPRINTF(WM_DEBUG_TX,
3283 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
3284 		    device_xname(sc->sc_dev),
3285 		    lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
3286 
3287 		/* Sync the descriptors we're using. */
3288 		WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
3289 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3290 
3291 		/* Give the packet to the chip. */
3292 		CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
3293 		sent = true;
3294 
3295 		DPRINTF(WM_DEBUG_TX,
3296 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
3297 
3298 		DPRINTF(WM_DEBUG_TX,
3299 		    ("%s: TX: finished transmitting packet, job %d\n",
3300 		    device_xname(sc->sc_dev), sc->sc_txsnext));
3301 
3302 		/* Advance the tx pointer. */
3303 		sc->sc_txfree -= txs->txs_ndesc;
3304 		sc->sc_txnext = nexttx;
3305 
3306 		sc->sc_txsfree--;
3307 		sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
3308 
3309 		/* Pass the packet to any BPF listeners. */
3310 		bpf_mtap(ifp, m0);
3311 	}
3312 
3313 	if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
3314 		/* No more slots; notify upper layer. */
3315 		ifp->if_flags |= IFF_OACTIVE;
3316 	}
3317 
3318 	if (sent) {
3319 		/* Set a watchdog timer in case the chip flakes out. */
3320 		ifp->if_timer = 5;
3321 	}
3322 }
3323 
3324 /*
3325  * wm_watchdog:		[ifnet interface function]
3326  *
3327  *	Watchdog timer handler.
3328  */
3329 static void
3330 wm_watchdog(struct ifnet *ifp)
3331 {
3332 	struct wm_softc *sc = ifp->if_softc;
3333 
3334 	/*
3335 	 * Since we're using delayed interrupts, sweep up
3336 	 * before we report an error.
3337 	 */
3338 	wm_txintr(sc);
3339 
3340 	if (sc->sc_txfree != WM_NTXDESC(sc)) {
3341 #ifdef WM_DEBUG
3342 		int i, j;
3343 		struct wm_txsoft *txs;
3344 #endif
3345 		log(LOG_ERR,
3346 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
3347 		    device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
3348 		    sc->sc_txnext);
3349 		ifp->if_oerrors++;
3350 #ifdef WM_DEBUG
3351 		for (i = sc->sc_txsdirty; i != sc->sc_txsnext ;
3352 		    i = WM_NEXTTXS(sc, i)) {
3353 		    txs = &sc->sc_txsoft[i];
3354 		    printf("txs %d tx %d -> %d\n",
3355 			i, txs->txs_firstdesc, txs->txs_lastdesc);
3356 		    for (j = txs->txs_firstdesc; ;
3357 			j = WM_NEXTTX(sc, j)) {
3358 			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
3359 			    sc->sc_nq_txdescs[j].nqtx_data.nqtxd_addr);
3360 			printf("\t %#08x%08x\n",
3361 			    sc->sc_nq_txdescs[j].nqtx_data.nqtxd_fields,
3362 			    sc->sc_nq_txdescs[j].nqtx_data.nqtxd_cmdlen);
3363 			if (j == txs->txs_lastdesc)
3364 				break;
3365 			}
3366 		}
3367 #endif
3368 		/* Reset the interface. */
3369 		(void) wm_init(ifp);
3370 	}
3371 
3372 	/* Try to get more packets going. */
3373 	ifp->if_start(ifp);
3374 }
3375 
3376 static int
3377 wm_ifflags_cb(struct ethercom *ec)
3378 {
3379 	struct ifnet *ifp = &ec->ec_if;
3380 	struct wm_softc *sc = ifp->if_softc;
3381 	int change = ifp->if_flags ^ sc->sc_if_flags;
3382 
3383 	if (change != 0)
3384 		sc->sc_if_flags = ifp->if_flags;
3385 
3386 	if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
3387 		return ENETRESET;
3388 
3389 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
3390 		wm_set_filter(sc);
3391 
3392 	wm_set_vlan(sc);
3393 
3394 	return 0;
3395 }
3396 
3397 /*
3398  * wm_ioctl:		[ifnet interface function]
3399  *
3400  *	Handle control requests from the operator.
3401  */
3402 static int
3403 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
3404 {
3405 	struct wm_softc *sc = ifp->if_softc;
3406 	struct ifreq *ifr = (struct ifreq *) data;
3407 	struct ifaddr *ifa = (struct ifaddr *)data;
3408 	struct sockaddr_dl *sdl;
3409 	int s, error;
3410 
3411 	s = splnet();
3412 
3413 	switch (cmd) {
3414 	case SIOCSIFMEDIA:
3415 	case SIOCGIFMEDIA:
3416 		/* Flow control requires full-duplex mode. */
3417 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
3418 		    (ifr->ifr_media & IFM_FDX) == 0)
3419 			ifr->ifr_media &= ~IFM_ETH_FMASK;
3420 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
3421 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
3422 				/* We can do both TXPAUSE and RXPAUSE. */
3423 				ifr->ifr_media |=
3424 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
3425 			}
3426 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
3427 		}
3428 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
3429 		break;
3430 	case SIOCINITIFADDR:
3431 		if (ifa->ifa_addr->sa_family == AF_LINK) {
3432 			sdl = satosdl(ifp->if_dl->ifa_addr);
3433 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
3434 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
3435 			/* unicast address is first multicast entry */
3436 			wm_set_filter(sc);
3437 			error = 0;
3438 			break;
3439 		}
3440 		/*FALLTHROUGH*/
3441 	default:
3442 		if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
3443 			break;
3444 
3445 		error = 0;
3446 
3447 		if (cmd == SIOCSIFCAP)
3448 			error = (*ifp->if_init)(ifp);
3449 		else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
3450 			;
3451 		else if (ifp->if_flags & IFF_RUNNING) {
3452 			/*
3453 			 * Multicast list has changed; set the hardware filter
3454 			 * accordingly.
3455 			 */
3456 			wm_set_filter(sc);
3457 		}
3458 		break;
3459 	}
3460 
3461 	/* Try to get more packets going. */
3462 	ifp->if_start(ifp);
3463 
3464 	splx(s);
3465 	return error;
3466 }
3467 
3468 /*
3469  * wm_intr:
3470  *
3471  *	Interrupt service routine.
3472  */
3473 static int
3474 wm_intr(void *arg)
3475 {
3476 	struct wm_softc *sc = arg;
3477 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3478 	uint32_t icr;
3479 	int handled = 0;
3480 
3481 	while (1 /* CONSTCOND */) {
3482 		icr = CSR_READ(sc, WMREG_ICR);
3483 		if ((icr & sc->sc_icr) == 0)
3484 			break;
3485 		rnd_add_uint32(&sc->rnd_source, icr);
3486 
3487 		handled = 1;
3488 
3489 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
3490 		if (icr & (ICR_RXDMT0|ICR_RXT0)) {
3491 			DPRINTF(WM_DEBUG_RX,
3492 			    ("%s: RX: got Rx intr 0x%08x\n",
3493 			    device_xname(sc->sc_dev),
3494 			    icr & (ICR_RXDMT0|ICR_RXT0)));
3495 			WM_EVCNT_INCR(&sc->sc_ev_rxintr);
3496 		}
3497 #endif
3498 		wm_rxintr(sc);
3499 
3500 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
3501 		if (icr & ICR_TXDW) {
3502 			DPRINTF(WM_DEBUG_TX,
3503 			    ("%s: TX: got TXDW interrupt\n",
3504 			    device_xname(sc->sc_dev)));
3505 			WM_EVCNT_INCR(&sc->sc_ev_txdw);
3506 		}
3507 #endif
3508 		wm_txintr(sc);
3509 
3510 		if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
3511 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
3512 			wm_linkintr(sc, icr);
3513 		}
3514 
3515 		if (icr & ICR_RXO) {
3516 #if defined(WM_DEBUG)
3517 			log(LOG_WARNING, "%s: Receive overrun\n",
3518 			    device_xname(sc->sc_dev));
3519 #endif /* defined(WM_DEBUG) */
3520 		}
3521 	}
3522 
3523 	if (handled) {
3524 		/* Try to get more packets going. */
3525 		ifp->if_start(ifp);
3526 	}
3527 
3528 	return handled;
3529 }
3530 
3531 /*
3532  * wm_txintr:
3533  *
3534  *	Helper; handle transmit interrupts.
3535  */
3536 static void
3537 wm_txintr(struct wm_softc *sc)
3538 {
3539 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3540 	struct wm_txsoft *txs;
3541 	uint8_t status;
3542 	int i;
3543 
3544 	ifp->if_flags &= ~IFF_OACTIVE;
3545 
3546 	/*
3547 	 * Go through the Tx list and free mbufs for those
3548 	 * frames which have been transmitted.
3549 	 */
3550 	for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
3551 	     i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
3552 		txs = &sc->sc_txsoft[i];
3553 
3554 		DPRINTF(WM_DEBUG_TX,
3555 		    ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
3556 
3557 		WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
3558 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3559 
3560 		status =
3561 		    sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
3562 		if ((status & WTX_ST_DD) == 0) {
3563 			WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
3564 			    BUS_DMASYNC_PREREAD);
3565 			break;
3566 		}
3567 
3568 		DPRINTF(WM_DEBUG_TX,
3569 		    ("%s: TX: job %d done: descs %d..%d\n",
3570 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
3571 		    txs->txs_lastdesc));
3572 
3573 		/*
3574 		 * XXX We should probably be using the statistics
3575 		 * XXX registers, but I don't know if they exist
3576 		 * XXX on chips before the i82544.
3577 		 */
3578 
3579 #ifdef WM_EVENT_COUNTERS
3580 		if (status & WTX_ST_TU)
3581 			WM_EVCNT_INCR(&sc->sc_ev_tu);
3582 #endif /* WM_EVENT_COUNTERS */
3583 
3584 		if (status & (WTX_ST_EC|WTX_ST_LC)) {
3585 			ifp->if_oerrors++;
3586 			if (status & WTX_ST_LC)
3587 				log(LOG_WARNING, "%s: late collision\n",
3588 				    device_xname(sc->sc_dev));
3589 			else if (status & WTX_ST_EC) {
3590 				ifp->if_collisions += 16;
3591 				log(LOG_WARNING, "%s: excessive collisions\n",
3592 				    device_xname(sc->sc_dev));
3593 			}
3594 		} else
3595 			ifp->if_opackets++;
3596 
3597 		sc->sc_txfree += txs->txs_ndesc;
3598 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
3599 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
3600 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
3601 		m_freem(txs->txs_mbuf);
3602 		txs->txs_mbuf = NULL;
3603 	}
3604 
3605 	/* Update the dirty transmit buffer pointer. */
3606 	sc->sc_txsdirty = i;
3607 	DPRINTF(WM_DEBUG_TX,
3608 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
3609 
3610 	/*
3611 	 * If there are no more pending transmissions, cancel the watchdog
3612 	 * timer.
3613 	 */
3614 	if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
3615 		ifp->if_timer = 0;
3616 }
3617 
3618 /*
3619  * wm_rxintr:
3620  *
3621  *	Helper; handle receive interrupts.
3622  */
3623 static void
3624 wm_rxintr(struct wm_softc *sc)
3625 {
3626 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3627 	struct wm_rxsoft *rxs;
3628 	struct mbuf *m;
3629 	int i, len;
3630 	uint8_t status, errors;
3631 	uint16_t vlantag;
3632 
3633 	for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
3634 		rxs = &sc->sc_rxsoft[i];
3635 
3636 		DPRINTF(WM_DEBUG_RX,
3637 		    ("%s: RX: checking descriptor %d\n",
3638 		    device_xname(sc->sc_dev), i));
3639 
3640 		WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3641 
3642 		status = sc->sc_rxdescs[i].wrx_status;
3643 		errors = sc->sc_rxdescs[i].wrx_errors;
3644 		len = le16toh(sc->sc_rxdescs[i].wrx_len);
3645 		vlantag = sc->sc_rxdescs[i].wrx_special;
3646 
3647 		if ((status & WRX_ST_DD) == 0) {
3648 			/*
3649 			 * We have processed all of the receive descriptors.
3650 			 */
3651 			WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
3652 			break;
3653 		}
3654 
3655 		if (__predict_false(sc->sc_rxdiscard)) {
3656 			DPRINTF(WM_DEBUG_RX,
3657 			    ("%s: RX: discarding contents of descriptor %d\n",
3658 			    device_xname(sc->sc_dev), i));
3659 			WM_INIT_RXDESC(sc, i);
3660 			if (status & WRX_ST_EOP) {
3661 				/* Reset our state. */
3662 				DPRINTF(WM_DEBUG_RX,
3663 				    ("%s: RX: resetting rxdiscard -> 0\n",
3664 				    device_xname(sc->sc_dev)));
3665 				sc->sc_rxdiscard = 0;
3666 			}
3667 			continue;
3668 		}
3669 
3670 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3671 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
3672 
3673 		m = rxs->rxs_mbuf;
3674 
3675 		/*
3676 		 * Add a new receive buffer to the ring, unless of
3677 		 * course the length is zero. Treat the latter as a
3678 		 * failed mapping.
3679 		 */
3680 		if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
3681 			/*
3682 			 * Failed, throw away what we've done so
3683 			 * far, and discard the rest of the packet.
3684 			 */
3685 			ifp->if_ierrors++;
3686 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3687 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
3688 			WM_INIT_RXDESC(sc, i);
3689 			if ((status & WRX_ST_EOP) == 0)
3690 				sc->sc_rxdiscard = 1;
3691 			if (sc->sc_rxhead != NULL)
3692 				m_freem(sc->sc_rxhead);
3693 			WM_RXCHAIN_RESET(sc);
3694 			DPRINTF(WM_DEBUG_RX,
3695 			    ("%s: RX: Rx buffer allocation failed, "
3696 			    "dropping packet%s\n", device_xname(sc->sc_dev),
3697 			    sc->sc_rxdiscard ? " (discard)" : ""));
3698 			continue;
3699 		}
3700 
3701 		m->m_len = len;
3702 		sc->sc_rxlen += len;
3703 		DPRINTF(WM_DEBUG_RX,
3704 		    ("%s: RX: buffer at %p len %d\n",
3705 		    device_xname(sc->sc_dev), m->m_data, len));
3706 
3707 		/*
3708 		 * If this is not the end of the packet, keep
3709 		 * looking.
3710 		 */
3711 		if ((status & WRX_ST_EOP) == 0) {
3712 			WM_RXCHAIN_LINK(sc, m);
3713 			DPRINTF(WM_DEBUG_RX,
3714 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
3715 			    device_xname(sc->sc_dev), sc->sc_rxlen));
3716 			continue;
3717 		}
3718 
3719 		/*
3720 		 * Okay, we have the entire packet now.  The chip is
3721 		 * configured to include the FCS except I350 and I21[01]
3722 		 * (not all chips can be configured to strip it),
3723 		 * so we need to trim it.
3724 		 * May need to adjust length of previous mbuf in the
3725 		 * chain if the current mbuf is too short.
3726 		 * For an eratta, the RCTL_SECRC bit in RCTL register
3727 		 * is always set in I350, so we don't trim it.
3728 		 */
3729 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
3730 		    && (sc->sc_type != WM_T_I210)
3731 		    && (sc->sc_type != WM_T_I211)) {
3732 			if (m->m_len < ETHER_CRC_LEN) {
3733 				sc->sc_rxtail->m_len
3734 				    -= (ETHER_CRC_LEN - m->m_len);
3735 				m->m_len = 0;
3736 			} else
3737 				m->m_len -= ETHER_CRC_LEN;
3738 			len = sc->sc_rxlen - ETHER_CRC_LEN;
3739 		} else
3740 			len = sc->sc_rxlen;
3741 
3742 		WM_RXCHAIN_LINK(sc, m);
3743 
3744 		*sc->sc_rxtailp = NULL;
3745 		m = sc->sc_rxhead;
3746 
3747 		WM_RXCHAIN_RESET(sc);
3748 
3749 		DPRINTF(WM_DEBUG_RX,
3750 		    ("%s: RX: have entire packet, len -> %d\n",
3751 		    device_xname(sc->sc_dev), len));
3752 
3753 		/*
3754 		 * If an error occurred, update stats and drop the packet.
3755 		 */
3756 		if (errors &
3757 		     (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
3758 			if (errors & WRX_ER_SE)
3759 				log(LOG_WARNING, "%s: symbol error\n",
3760 				    device_xname(sc->sc_dev));
3761 			else if (errors & WRX_ER_SEQ)
3762 				log(LOG_WARNING, "%s: receive sequence error\n",
3763 				    device_xname(sc->sc_dev));
3764 			else if (errors & WRX_ER_CE)
3765 				log(LOG_WARNING, "%s: CRC error\n",
3766 				    device_xname(sc->sc_dev));
3767 			m_freem(m);
3768 			continue;
3769 		}
3770 
3771 		/*
3772 		 * No errors.  Receive the packet.
3773 		 */
3774 		m->m_pkthdr.rcvif = ifp;
3775 		m->m_pkthdr.len = len;
3776 
3777 		/*
3778 		 * If VLANs are enabled, VLAN packets have been unwrapped
3779 		 * for us.  Associate the tag with the packet.
3780 		 */
3781 		/* XXXX should check for i350 and i354 */
3782 		if ((status & WRX_ST_VP) != 0) {
3783 			VLAN_INPUT_TAG(ifp, m,
3784 			    le16toh(vlantag),
3785 			    continue);
3786 		}
3787 
3788 		/*
3789 		 * Set up checksum info for this packet.
3790 		 */
3791 		if ((status & WRX_ST_IXSM) == 0) {
3792 			if (status & WRX_ST_IPCS) {
3793 				WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
3794 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
3795 				if (errors & WRX_ER_IPE)
3796 					m->m_pkthdr.csum_flags |=
3797 					    M_CSUM_IPv4_BAD;
3798 			}
3799 			if (status & WRX_ST_TCPCS) {
3800 				/*
3801 				 * Note: we don't know if this was TCP or UDP,
3802 				 * so we just set both bits, and expect the
3803 				 * upper layers to deal.
3804 				 */
3805 				WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
3806 				m->m_pkthdr.csum_flags |=
3807 				    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
3808 				    M_CSUM_TCPv6 | M_CSUM_UDPv6;
3809 				if (errors & WRX_ER_TCPE)
3810 					m->m_pkthdr.csum_flags |=
3811 					    M_CSUM_TCP_UDP_BAD;
3812 			}
3813 		}
3814 
3815 		ifp->if_ipackets++;
3816 
3817 		/* Pass this up to any BPF listeners. */
3818 		bpf_mtap(ifp, m);
3819 
3820 		/* Pass it on. */
3821 		(*ifp->if_input)(ifp, m);
3822 	}
3823 
3824 	/* Update the receive pointer. */
3825 	sc->sc_rxptr = i;
3826 
3827 	DPRINTF(WM_DEBUG_RX,
3828 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
3829 }
3830 
3831 /*
3832  * wm_linkintr_gmii:
3833  *
3834  *	Helper; handle link interrupts for GMII.
3835  */
3836 static void
3837 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
3838 {
3839 
3840 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
3841 		__func__));
3842 
3843 	if (icr & ICR_LSC) {
3844 		DPRINTF(WM_DEBUG_LINK,
3845 		    ("%s: LINK: LSC -> mii_pollstat\n",
3846 			device_xname(sc->sc_dev)));
3847 		mii_pollstat(&sc->sc_mii);
3848 		if (sc->sc_type == WM_T_82543) {
3849 			int miistatus, active;
3850 
3851 			/*
3852 			 * With 82543, we need to force speed and
3853 			 * duplex on the MAC equal to what the PHY
3854 			 * speed and duplex configuration is.
3855 			 */
3856 			miistatus = sc->sc_mii.mii_media_status;
3857 
3858 			if (miistatus & IFM_ACTIVE) {
3859 				active = sc->sc_mii.mii_media_active;
3860 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
3861 				switch (IFM_SUBTYPE(active)) {
3862 				case IFM_10_T:
3863 					sc->sc_ctrl |= CTRL_SPEED_10;
3864 					break;
3865 				case IFM_100_TX:
3866 					sc->sc_ctrl |= CTRL_SPEED_100;
3867 					break;
3868 				case IFM_1000_T:
3869 					sc->sc_ctrl |= CTRL_SPEED_1000;
3870 					break;
3871 				default:
3872 					/*
3873 					 * fiber?
3874 					 * Shoud not enter here.
3875 					 */
3876 					printf("unknown media (%x)\n",
3877 					    active);
3878 					break;
3879 				}
3880 				if (active & IFM_FDX)
3881 					sc->sc_ctrl |= CTRL_FD;
3882 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3883 			}
3884 		} else if ((sc->sc_type == WM_T_ICH8)
3885 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
3886 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
3887 		} else if (sc->sc_type == WM_T_PCH) {
3888 			wm_k1_gig_workaround_hv(sc,
3889 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
3890 		}
3891 
3892 		if ((sc->sc_phytype == WMPHY_82578)
3893 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
3894 			== IFM_1000_T)) {
3895 
3896 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
3897 				delay(200*1000); /* XXX too big */
3898 
3899 				/* Link stall fix for link up */
3900 				wm_gmii_hv_writereg(sc->sc_dev, 1,
3901 				    HV_MUX_DATA_CTRL,
3902 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
3903 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
3904 				wm_gmii_hv_writereg(sc->sc_dev, 1,
3905 				    HV_MUX_DATA_CTRL,
3906 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
3907 			}
3908 		}
3909 	} else if (icr & ICR_RXSEQ) {
3910 		DPRINTF(WM_DEBUG_LINK,
3911 		    ("%s: LINK Receive sequence error\n",
3912 			device_xname(sc->sc_dev)));
3913 	}
3914 }
3915 
3916 /*
3917  * wm_linkintr_tbi:
3918  *
3919  *	Helper; handle link interrupts for TBI mode.
3920  */
3921 static void
3922 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
3923 {
3924 	uint32_t status;
3925 
3926 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
3927 		__func__));
3928 
3929 	status = CSR_READ(sc, WMREG_STATUS);
3930 	if (icr & ICR_LSC) {
3931 		if (status & STATUS_LU) {
3932 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
3933 			    device_xname(sc->sc_dev),
3934 			    (status & STATUS_FD) ? "FDX" : "HDX"));
3935 			/*
3936 			 * NOTE: CTRL will update TFCE and RFCE automatically,
3937 			 * so we should update sc->sc_ctrl
3938 			 */
3939 
3940 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3941 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
3942 			sc->sc_fcrtl &= ~FCRTL_XONE;
3943 			if (status & STATUS_FD)
3944 				sc->sc_tctl |=
3945 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3946 			else
3947 				sc->sc_tctl |=
3948 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
3949 			if (sc->sc_ctrl & CTRL_TFCE)
3950 				sc->sc_fcrtl |= FCRTL_XONE;
3951 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3952 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
3953 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
3954 				      sc->sc_fcrtl);
3955 			sc->sc_tbi_linkup = 1;
3956 		} else {
3957 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
3958 			    device_xname(sc->sc_dev)));
3959 			sc->sc_tbi_linkup = 0;
3960 		}
3961 		wm_tbi_set_linkled(sc);
3962 	} else if (icr & ICR_RXCFG) {
3963 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
3964 		    device_xname(sc->sc_dev)));
3965 		sc->sc_tbi_nrxcfg++;
3966 		wm_check_for_link(sc);
3967 	} else if (icr & ICR_RXSEQ) {
3968 		DPRINTF(WM_DEBUG_LINK,
3969 		    ("%s: LINK: Receive sequence error\n",
3970 		    device_xname(sc->sc_dev)));
3971 	}
3972 }
3973 
3974 /*
3975  * wm_linkintr:
3976  *
3977  *	Helper; handle link interrupts.
3978  */
3979 static void
3980 wm_linkintr(struct wm_softc *sc, uint32_t icr)
3981 {
3982 
3983 	if (sc->sc_flags & WM_F_HAS_MII)
3984 		wm_linkintr_gmii(sc, icr);
3985 	else
3986 		wm_linkintr_tbi(sc, icr);
3987 }
3988 
3989 /*
3990  * wm_tick:
3991  *
3992  *	One second timer, used to check link status, sweep up
3993  *	completed transmit jobs, etc.
3994  */
3995 static void
3996 wm_tick(void *arg)
3997 {
3998 	struct wm_softc *sc = arg;
3999 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4000 	int s;
4001 
4002 	s = splnet();
4003 
4004 	if (sc->sc_type >= WM_T_82542_2_1) {
4005 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
4006 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
4007 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
4008 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
4009 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
4010 	}
4011 
4012 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
4013 	ifp->if_ierrors += 0ULL + /* ensure quad_t */
4014 	    + CSR_READ(sc, WMREG_CRCERRS)
4015 	    + CSR_READ(sc, WMREG_ALGNERRC)
4016 	    + CSR_READ(sc, WMREG_SYMERRC)
4017 	    + CSR_READ(sc, WMREG_RXERRC)
4018 	    + CSR_READ(sc, WMREG_SEC)
4019 	    + CSR_READ(sc, WMREG_CEXTERR)
4020 	    + CSR_READ(sc, WMREG_RLEC);
4021 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
4022 
4023 	if (sc->sc_flags & WM_F_HAS_MII)
4024 		mii_tick(&sc->sc_mii);
4025 	else
4026 		wm_tbi_check_link(sc);
4027 
4028 	splx(s);
4029 
4030 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
4031 }
4032 
4033 /*
4034  * wm_reset:
4035  *
4036  *	Reset the i82542 chip.
4037  */
4038 static void
4039 wm_reset(struct wm_softc *sc)
4040 {
4041 	int phy_reset = 0;
4042 	uint32_t reg, mask;
4043 
4044 	/*
4045 	 * Allocate on-chip memory according to the MTU size.
4046 	 * The Packet Buffer Allocation register must be written
4047 	 * before the chip is reset.
4048 	 */
4049 	switch (sc->sc_type) {
4050 	case WM_T_82547:
4051 	case WM_T_82547_2:
4052 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
4053 		    PBA_22K : PBA_30K;
4054 		sc->sc_txfifo_head = 0;
4055 		sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
4056 		sc->sc_txfifo_size =
4057 		    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
4058 		sc->sc_txfifo_stall = 0;
4059 		break;
4060 	case WM_T_82571:
4061 	case WM_T_82572:
4062 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
4063 	case WM_T_I350:
4064 	case WM_T_I354:
4065 	case WM_T_80003:
4066 		sc->sc_pba = PBA_32K;
4067 		break;
4068 	case WM_T_82580:
4069 	case WM_T_82580ER:
4070 		sc->sc_pba = PBA_35K;
4071 		break;
4072 	case WM_T_I210:
4073 	case WM_T_I211:
4074 		sc->sc_pba = PBA_34K;
4075 		break;
4076 	case WM_T_82576:
4077 		sc->sc_pba = PBA_64K;
4078 		break;
4079 	case WM_T_82573:
4080 		sc->sc_pba = PBA_12K;
4081 		break;
4082 	case WM_T_82574:
4083 	case WM_T_82583:
4084 		sc->sc_pba = PBA_20K;
4085 		break;
4086 	case WM_T_ICH8:
4087 		sc->sc_pba = PBA_8K;
4088 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
4089 		break;
4090 	case WM_T_ICH9:
4091 	case WM_T_ICH10:
4092 		sc->sc_pba = PBA_10K;
4093 		break;
4094 	case WM_T_PCH:
4095 	case WM_T_PCH2:
4096 	case WM_T_PCH_LPT:
4097 		sc->sc_pba = PBA_26K;
4098 		break;
4099 	default:
4100 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
4101 		    PBA_40K : PBA_48K;
4102 		break;
4103 	}
4104 	CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
4105 
4106 	/* Prevent the PCI-E bus from sticking */
4107 	if (sc->sc_flags & WM_F_PCIE) {
4108 		int timeout = 800;
4109 
4110 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
4111 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4112 
4113 		while (timeout--) {
4114 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
4115 			    == 0)
4116 				break;
4117 			delay(100);
4118 		}
4119 	}
4120 
4121 	/* Set the completion timeout for interface */
4122 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
4123 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
4124 		wm_set_pcie_completion_timeout(sc);
4125 
4126 	/* Clear interrupt */
4127 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4128 
4129 	/* Stop the transmit and receive processes. */
4130 	CSR_WRITE(sc, WMREG_RCTL, 0);
4131 	sc->sc_rctl &= ~RCTL_EN;
4132 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
4133 	CSR_WRITE_FLUSH(sc);
4134 
4135 	/* XXX set_tbi_sbp_82543() */
4136 
4137 	delay(10*1000);
4138 
4139 	/* Must acquire the MDIO ownership before MAC reset */
4140 	switch (sc->sc_type) {
4141 	case WM_T_82573:
4142 	case WM_T_82574:
4143 	case WM_T_82583:
4144 		wm_get_hw_semaphore_82573(sc);
4145 		break;
4146 	default:
4147 		break;
4148 	}
4149 
4150 	/*
4151 	 * 82541 Errata 29? & 82547 Errata 28?
4152 	 * See also the description about PHY_RST bit in CTRL register
4153 	 * in 8254x_GBe_SDM.pdf.
4154 	 */
4155 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
4156 		CSR_WRITE(sc, WMREG_CTRL,
4157 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
4158 		CSR_WRITE_FLUSH(sc);
4159 		delay(5000);
4160 	}
4161 
4162 	switch (sc->sc_type) {
4163 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
4164 	case WM_T_82541:
4165 	case WM_T_82541_2:
4166 	case WM_T_82547:
4167 	case WM_T_82547_2:
4168 		/*
4169 		 * On some chipsets, a reset through a memory-mapped write
4170 		 * cycle can cause the chip to reset before completing the
4171 		 * write cycle.  This causes major headache that can be
4172 		 * avoided by issuing the reset via indirect register writes
4173 		 * through I/O space.
4174 		 *
4175 		 * So, if we successfully mapped the I/O BAR at attach time,
4176 		 * use that.  Otherwise, try our luck with a memory-mapped
4177 		 * reset.
4178 		 */
4179 		if (sc->sc_flags & WM_F_IOH_VALID)
4180 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
4181 		else
4182 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
4183 		break;
4184 	case WM_T_82545_3:
4185 	case WM_T_82546_3:
4186 		/* Use the shadow control register on these chips. */
4187 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
4188 		break;
4189 	case WM_T_80003:
4190 		mask = swfwphysem[sc->sc_funcid];
4191 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
4192 		wm_get_swfw_semaphore(sc, mask);
4193 		CSR_WRITE(sc, WMREG_CTRL, reg);
4194 		wm_put_swfw_semaphore(sc, mask);
4195 		break;
4196 	case WM_T_ICH8:
4197 	case WM_T_ICH9:
4198 	case WM_T_ICH10:
4199 	case WM_T_PCH:
4200 	case WM_T_PCH2:
4201 	case WM_T_PCH_LPT:
4202 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
4203 		if (wm_check_reset_block(sc) == 0) {
4204 			/*
4205 			 * Gate automatic PHY configuration by hardware on
4206 			 * non-managed 82579
4207 			 */
4208 			if ((sc->sc_type == WM_T_PCH2)
4209 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
4210 				!= 0))
4211 				wm_gate_hw_phy_config_ich8lan(sc, 1);
4212 
4213 
4214 			reg |= CTRL_PHY_RESET;
4215 			phy_reset = 1;
4216 		}
4217 		wm_get_swfwhw_semaphore(sc);
4218 		CSR_WRITE(sc, WMREG_CTRL, reg);
4219 		/* Don't insert a completion barrier when reset */
4220 		delay(20*1000);
4221 		wm_put_swfwhw_semaphore(sc);
4222 		break;
4223 	case WM_T_82542_2_0:
4224 	case WM_T_82542_2_1:
4225 	case WM_T_82543:
4226 	case WM_T_82540:
4227 	case WM_T_82545:
4228 	case WM_T_82546:
4229 	case WM_T_82571:
4230 	case WM_T_82572:
4231 	case WM_T_82573:
4232 	case WM_T_82574:
4233 	case WM_T_82575:
4234 	case WM_T_82576:
4235 	case WM_T_82580:
4236 	case WM_T_82580ER:
4237 	case WM_T_82583:
4238 	case WM_T_I350:
4239 	case WM_T_I354:
4240 	case WM_T_I210:
4241 	case WM_T_I211:
4242 	default:
4243 		/* Everything else can safely use the documented method. */
4244 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
4245 		break;
4246 	}
4247 
4248 	/* Must release the MDIO ownership after MAC reset */
4249 	switch (sc->sc_type) {
4250 	case WM_T_82574:
4251 	case WM_T_82583:
4252 		wm_put_hw_semaphore_82573(sc);
4253 		break;
4254 	default:
4255 		break;
4256 	}
4257 
4258 	if (phy_reset != 0)
4259 		wm_get_cfg_done(sc);
4260 
4261 	/* reload EEPROM */
4262 	switch (sc->sc_type) {
4263 	case WM_T_82542_2_0:
4264 	case WM_T_82542_2_1:
4265 	case WM_T_82543:
4266 	case WM_T_82544:
4267 		delay(10);
4268 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
4269 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4270 		CSR_WRITE_FLUSH(sc);
4271 		delay(2000);
4272 		break;
4273 	case WM_T_82540:
4274 	case WM_T_82545:
4275 	case WM_T_82545_3:
4276 	case WM_T_82546:
4277 	case WM_T_82546_3:
4278 		delay(5*1000);
4279 		/* XXX Disable HW ARPs on ASF enabled adapters */
4280 		break;
4281 	case WM_T_82541:
4282 	case WM_T_82541_2:
4283 	case WM_T_82547:
4284 	case WM_T_82547_2:
4285 		delay(20000);
4286 		/* XXX Disable HW ARPs on ASF enabled adapters */
4287 		break;
4288 	case WM_T_82571:
4289 	case WM_T_82572:
4290 	case WM_T_82573:
4291 	case WM_T_82574:
4292 	case WM_T_82583:
4293 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
4294 			delay(10);
4295 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
4296 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4297 			CSR_WRITE_FLUSH(sc);
4298 		}
4299 		/* check EECD_EE_AUTORD */
4300 		wm_get_auto_rd_done(sc);
4301 		/*
4302 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
4303 		 * is set.
4304 		 */
4305 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
4306 		    || (sc->sc_type == WM_T_82583))
4307 			delay(25*1000);
4308 		break;
4309 	case WM_T_82575:
4310 	case WM_T_82576:
4311 	case WM_T_82580:
4312 	case WM_T_82580ER:
4313 	case WM_T_I350:
4314 	case WM_T_I354:
4315 	case WM_T_I210:
4316 	case WM_T_I211:
4317 	case WM_T_80003:
4318 		/* check EECD_EE_AUTORD */
4319 		wm_get_auto_rd_done(sc);
4320 		break;
4321 	case WM_T_ICH8:
4322 	case WM_T_ICH9:
4323 	case WM_T_ICH10:
4324 	case WM_T_PCH:
4325 	case WM_T_PCH2:
4326 	case WM_T_PCH_LPT:
4327 		break;
4328 	default:
4329 		panic("%s: unknown type\n", __func__);
4330 	}
4331 
4332 	/* Check whether EEPROM is present or not */
4333 	switch (sc->sc_type) {
4334 	case WM_T_82575:
4335 	case WM_T_82576:
4336 #if 0 /* XXX */
4337 	case WM_T_82580:
4338 	case WM_T_82580ER:
4339 #endif
4340 	case WM_T_I350:
4341 	case WM_T_I354:
4342 	case WM_T_ICH8:
4343 	case WM_T_ICH9:
4344 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
4345 			/* Not found */
4346 			sc->sc_flags |= WM_F_EEPROM_INVALID;
4347 			if ((sc->sc_type == WM_T_82575)
4348 			    || (sc->sc_type == WM_T_82576)
4349 			    || (sc->sc_type == WM_T_82580)
4350 			    || (sc->sc_type == WM_T_82580ER)
4351 			    || (sc->sc_type == WM_T_I350)
4352 			    || (sc->sc_type == WM_T_I354))
4353 				wm_reset_init_script_82575(sc);
4354 		}
4355 		break;
4356 	default:
4357 		break;
4358 	}
4359 
4360 	if ((sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
4361 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
4362 		/* clear global device reset status bit */
4363 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
4364 	}
4365 
4366 	/* Clear any pending interrupt events. */
4367 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4368 	reg = CSR_READ(sc, WMREG_ICR);
4369 
4370 	/* reload sc_ctrl */
4371 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
4372 
4373 	if (sc->sc_type == WM_T_I350)
4374 		wm_set_eee_i350(sc);
4375 
4376 	/* dummy read from WUC */
4377 	if (sc->sc_type == WM_T_PCH)
4378 		reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
4379 	/*
4380 	 * For PCH, this write will make sure that any noise will be detected
4381 	 * as a CRC error and be dropped rather than show up as a bad packet
4382 	 * to the DMA engine
4383 	 */
4384 	if (sc->sc_type == WM_T_PCH)
4385 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
4386 
4387 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4388 		CSR_WRITE(sc, WMREG_WUC, 0);
4389 
4390 	/* XXX need special handling for 82580 */
4391 }
4392 
4393 static void
4394 wm_set_vlan(struct wm_softc *sc)
4395 {
4396 	/* Deal with VLAN enables. */
4397 	if (VLAN_ATTACHED(&sc->sc_ethercom))
4398 		sc->sc_ctrl |= CTRL_VME;
4399 	else
4400 		sc->sc_ctrl &= ~CTRL_VME;
4401 
4402 	/* Write the control registers. */
4403 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4404 }
4405 
4406 /*
4407  * wm_init:		[ifnet interface function]
4408  *
4409  *	Initialize the interface.  Must be called at splnet().
4410  */
4411 static int
4412 wm_init(struct ifnet *ifp)
4413 {
4414 	struct wm_softc *sc = ifp->if_softc;
4415 	struct wm_rxsoft *rxs;
4416 	int i, j, trynum, error = 0;
4417 	uint32_t reg;
4418 
4419 	/*
4420 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
4421 	 * There is a small but measurable benefit to avoiding the adjusment
4422 	 * of the descriptor so that the headers are aligned, for normal mtu,
4423 	 * on such platforms.  One possibility is that the DMA itself is
4424 	 * slightly more efficient if the front of the entire packet (instead
4425 	 * of the front of the headers) is aligned.
4426 	 *
4427 	 * Note we must always set align_tweak to 0 if we are using
4428 	 * jumbo frames.
4429 	 */
4430 #ifdef __NO_STRICT_ALIGNMENT
4431 	sc->sc_align_tweak = 0;
4432 #else
4433 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
4434 		sc->sc_align_tweak = 0;
4435 	else
4436 		sc->sc_align_tweak = 2;
4437 #endif /* __NO_STRICT_ALIGNMENT */
4438 
4439 	/* Cancel any pending I/O. */
4440 	wm_stop(ifp, 0);
4441 
4442 	/* update statistics before reset */
4443 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
4444 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
4445 
4446 	/* Reset the chip to a known state. */
4447 	wm_reset(sc);
4448 
4449 	switch (sc->sc_type) {
4450 	case WM_T_82571:
4451 	case WM_T_82572:
4452 	case WM_T_82573:
4453 	case WM_T_82574:
4454 	case WM_T_82583:
4455 	case WM_T_80003:
4456 	case WM_T_ICH8:
4457 	case WM_T_ICH9:
4458 	case WM_T_ICH10:
4459 	case WM_T_PCH:
4460 	case WM_T_PCH2:
4461 	case WM_T_PCH_LPT:
4462 		if (wm_check_mng_mode(sc) != 0)
4463 			wm_get_hw_control(sc);
4464 		break;
4465 	default:
4466 		break;
4467 	}
4468 
4469 	/* Reset the PHY. */
4470 	if (sc->sc_flags & WM_F_HAS_MII)
4471 		wm_gmii_reset(sc);
4472 
4473 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
4474 	/* Enable PHY low-power state when MAC is at D3 w/o WoL */
4475 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
4476 	    || (sc->sc_type == WM_T_PCH_LPT))
4477 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_PHYPDEN);
4478 
4479 	/* Initialize the transmit descriptor ring. */
4480 	memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
4481 	WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
4482 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
4483 	sc->sc_txfree = WM_NTXDESC(sc);
4484 	sc->sc_txnext = 0;
4485 
4486 	if (sc->sc_type < WM_T_82543) {
4487 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(sc, 0));
4488 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(sc, 0));
4489 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
4490 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
4491 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
4492 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
4493 	} else {
4494 		CSR_WRITE(sc, WMREG_TDBAH, WM_CDTXADDR_HI(sc, 0));
4495 		CSR_WRITE(sc, WMREG_TDBAL, WM_CDTXADDR_LO(sc, 0));
4496 		CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
4497 		CSR_WRITE(sc, WMREG_TDH, 0);
4498 		CSR_WRITE(sc, WMREG_TIDV, 375);		/* ITR / 4 */
4499 		CSR_WRITE(sc, WMREG_TADV, 375);		/* should be same */
4500 
4501 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4502 			/*
4503 			 * Don't write TDT before TCTL.EN is set.
4504 			 * See the document.
4505 			 */
4506 			CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_QUEUE_ENABLE
4507 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
4508 			    | TXDCTL_WTHRESH(0));
4509 		else {
4510 			CSR_WRITE(sc, WMREG_TDT, 0);
4511 			CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
4512 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
4513 			CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
4514 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
4515 		}
4516 	}
4517 	CSR_WRITE(sc, WMREG_TQSA_LO, 0);
4518 	CSR_WRITE(sc, WMREG_TQSA_HI, 0);
4519 
4520 	/* Initialize the transmit job descriptors. */
4521 	for (i = 0; i < WM_TXQUEUELEN(sc); i++)
4522 		sc->sc_txsoft[i].txs_mbuf = NULL;
4523 	sc->sc_txsfree = WM_TXQUEUELEN(sc);
4524 	sc->sc_txsnext = 0;
4525 	sc->sc_txsdirty = 0;
4526 
4527 	/*
4528 	 * Initialize the receive descriptor and receive job
4529 	 * descriptor rings.
4530 	 */
4531 	if (sc->sc_type < WM_T_82543) {
4532 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
4533 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
4534 		CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
4535 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
4536 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
4537 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
4538 
4539 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
4540 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
4541 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
4542 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
4543 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
4544 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
4545 	} else {
4546 		CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
4547 		CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
4548 		CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
4549 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4550 			CSR_WRITE(sc, WMREG_EITR(0), 450);
4551 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
4552 				panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
4553 			CSR_WRITE(sc, WMREG_SRRCTL, SRRCTL_DESCTYPE_LEGACY
4554 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
4555 			CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_QUEUE_ENABLE
4556 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
4557 			    | RXDCTL_WTHRESH(1));
4558 		} else {
4559 			CSR_WRITE(sc, WMREG_RDH, 0);
4560 			CSR_WRITE(sc, WMREG_RDT, 0);
4561 			CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD); /* ITR/4 */
4562 			CSR_WRITE(sc, WMREG_RADV, 375);	/* MUST be same */
4563 		}
4564 	}
4565 	for (i = 0; i < WM_NRXDESC; i++) {
4566 		rxs = &sc->sc_rxsoft[i];
4567 		if (rxs->rxs_mbuf == NULL) {
4568 			if ((error = wm_add_rxbuf(sc, i)) != 0) {
4569 				log(LOG_ERR, "%s: unable to allocate or map "
4570 				    "rx buffer %d, error = %d\n",
4571 				    device_xname(sc->sc_dev), i, error);
4572 				/*
4573 				 * XXX Should attempt to run with fewer receive
4574 				 * XXX buffers instead of just failing.
4575 				 */
4576 				wm_rxdrain(sc);
4577 				goto out;
4578 			}
4579 		} else {
4580 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
4581 				WM_INIT_RXDESC(sc, i);
4582 			/*
4583 			 * For 82575 and newer device, the RX descriptors
4584 			 * must be initialized after the setting of RCTL.EN in
4585 			 * wm_set_filter()
4586 			 */
4587 		}
4588 	}
4589 	sc->sc_rxptr = 0;
4590 	sc->sc_rxdiscard = 0;
4591 	WM_RXCHAIN_RESET(sc);
4592 
4593 	/*
4594 	 * Clear out the VLAN table -- we don't use it (yet).
4595 	 */
4596 	CSR_WRITE(sc, WMREG_VET, 0);
4597 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
4598 		trynum = 10; /* Due to hw errata */
4599 	else
4600 		trynum = 1;
4601 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
4602 		for (j = 0; j < trynum; j++)
4603 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
4604 
4605 	/*
4606 	 * Set up flow-control parameters.
4607 	 *
4608 	 * XXX Values could probably stand some tuning.
4609 	 */
4610 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
4611 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
4612 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)) {
4613 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
4614 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
4615 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
4616 	}
4617 
4618 	sc->sc_fcrtl = FCRTL_DFLT;
4619 	if (sc->sc_type < WM_T_82543) {
4620 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
4621 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
4622 	} else {
4623 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
4624 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
4625 	}
4626 
4627 	if (sc->sc_type == WM_T_80003)
4628 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
4629 	else
4630 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
4631 
4632 	/* Writes the control register. */
4633 	wm_set_vlan(sc);
4634 
4635 	if (sc->sc_flags & WM_F_HAS_MII) {
4636 		int val;
4637 
4638 		switch (sc->sc_type) {
4639 		case WM_T_80003:
4640 		case WM_T_ICH8:
4641 		case WM_T_ICH9:
4642 		case WM_T_ICH10:
4643 		case WM_T_PCH:
4644 		case WM_T_PCH2:
4645 		case WM_T_PCH_LPT:
4646 			/*
4647 			 * Set the mac to wait the maximum time between each
4648 			 * iteration and increase the max iterations when
4649 			 * polling the phy; this fixes erroneous timeouts at
4650 			 * 10Mbps.
4651 			 */
4652 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
4653 			    0xFFFF);
4654 			val = wm_kmrn_readreg(sc,
4655 			    KUMCTRLSTA_OFFSET_INB_PARAM);
4656 			val |= 0x3F;
4657 			wm_kmrn_writereg(sc,
4658 			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
4659 			break;
4660 		default:
4661 			break;
4662 		}
4663 
4664 		if (sc->sc_type == WM_T_80003) {
4665 			val = CSR_READ(sc, WMREG_CTRL_EXT);
4666 			val &= ~CTRL_EXT_LINK_MODE_MASK;
4667 			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
4668 
4669 			/* Bypass RX and TX FIFO's */
4670 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
4671 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
4672 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
4673 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
4674 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
4675 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
4676 		}
4677 	}
4678 #if 0
4679 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
4680 #endif
4681 
4682 	/*
4683 	 * Set up checksum offload parameters.
4684 	 */
4685 	reg = CSR_READ(sc, WMREG_RXCSUM);
4686 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
4687 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
4688 		reg |= RXCSUM_IPOFL;
4689 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
4690 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
4691 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
4692 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
4693 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
4694 
4695 	/* Reset TBI's RXCFG count */
4696 	sc->sc_tbi_nrxcfg = sc->sc_tbi_lastnrxcfg = 0;
4697 
4698 	/*
4699 	 * Set up the interrupt registers.
4700 	 */
4701 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4702 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
4703 	    ICR_RXO | ICR_RXT0;
4704 	if ((sc->sc_flags & WM_F_HAS_MII) == 0)
4705 		sc->sc_icr |= ICR_RXCFG;
4706 	CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
4707 
4708 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4709 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4710 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
4711 		reg = CSR_READ(sc, WMREG_KABGTXD);
4712 		reg |= KABGTXD_BGSQLBIAS;
4713 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
4714 	}
4715 
4716 	/* Set up the inter-packet gap. */
4717 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
4718 
4719 	if (sc->sc_type >= WM_T_82543) {
4720 		/*
4721 		 * Set up the interrupt throttling register (units of 256ns)
4722 		 * Note that a footnote in Intel's documentation says this
4723 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
4724 		 * or 10Mbit mode.  Empirically, it appears to be the case
4725 		 * that that is also true for the 1024ns units of the other
4726 		 * interrupt-related timer registers -- so, really, we ought
4727 		 * to divide this value by 4 when the link speed is low.
4728 		 *
4729 		 * XXX implement this division at link speed change!
4730 		 */
4731 
4732 		 /*
4733 		  * For N interrupts/sec, set this value to:
4734 		  * 1000000000 / (N * 256).  Note that we set the
4735 		  * absolute and packet timer values to this value
4736 		  * divided by 4 to get "simple timer" behavior.
4737 		  */
4738 
4739 		sc->sc_itr = 1500;		/* 2604 ints/sec */
4740 		CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
4741 	}
4742 
4743 	/* Set the VLAN ethernetype. */
4744 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
4745 
4746 	/*
4747 	 * Set up the transmit control register; we start out with
4748 	 * a collision distance suitable for FDX, but update it whe
4749 	 * we resolve the media type.
4750 	 */
4751 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
4752 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
4753 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4754 	if (sc->sc_type >= WM_T_82571)
4755 		sc->sc_tctl |= TCTL_MULR;
4756 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4757 
4758 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4759 		/*
4760 		 * Write TDT after TCTL.EN is set.
4761 		 * See the document.
4762 		 */
4763 		CSR_WRITE(sc, WMREG_TDT, 0);
4764 	}
4765 
4766 	if (sc->sc_type == WM_T_80003) {
4767 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
4768 		reg &= ~TCTL_EXT_GCEX_MASK;
4769 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
4770 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
4771 	}
4772 
4773 	/* Set the media. */
4774 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
4775 		goto out;
4776 
4777 	/* Configure for OS presence */
4778 	wm_init_manageability(sc);
4779 
4780 	/*
4781 	 * Set up the receive control register; we actually program
4782 	 * the register when we set the receive filter.  Use multicast
4783 	 * address offset type 0.
4784 	 *
4785 	 * Only the i82544 has the ability to strip the incoming
4786 	 * CRC, so we don't enable that feature.
4787 	 */
4788 	sc->sc_mchash_type = 0;
4789 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
4790 	    | RCTL_MO(sc->sc_mchash_type);
4791 
4792 	/*
4793 	 * The I350 has a bug where it always strips the CRC whether
4794 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
4795 	 */
4796 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
4797 	    || (sc->sc_type == WM_T_I210))
4798 		sc->sc_rctl |= RCTL_SECRC;
4799 
4800 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
4801 	    && (ifp->if_mtu > ETHERMTU)) {
4802 		sc->sc_rctl |= RCTL_LPE;
4803 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4804 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
4805 	}
4806 
4807 	if (MCLBYTES == 2048) {
4808 		sc->sc_rctl |= RCTL_2k;
4809 	} else {
4810 		if (sc->sc_type >= WM_T_82543) {
4811 			switch (MCLBYTES) {
4812 			case 4096:
4813 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
4814 				break;
4815 			case 8192:
4816 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
4817 				break;
4818 			case 16384:
4819 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
4820 				break;
4821 			default:
4822 				panic("wm_init: MCLBYTES %d unsupported",
4823 				    MCLBYTES);
4824 				break;
4825 			}
4826 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
4827 	}
4828 
4829 	/* Set the receive filter. */
4830 	wm_set_filter(sc);
4831 
4832 	/* Enable ECC */
4833 	switch (sc->sc_type) {
4834 	case WM_T_82571:
4835 		reg = CSR_READ(sc, WMREG_PBA_ECC);
4836 		reg |= PBA_ECC_CORR_EN;
4837 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
4838 		break;
4839 	case WM_T_PCH_LPT:
4840 		reg = CSR_READ(sc, WMREG_PBECCSTS);
4841 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
4842 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
4843 
4844 		reg = CSR_READ(sc, WMREG_CTRL);
4845 		reg |= CTRL_MEHE;
4846 		CSR_WRITE(sc, WMREG_CTRL, reg);
4847 		break;
4848 	default:
4849 		break;
4850 	}
4851 
4852 	/* On 575 and later set RDT only if RX enabled */
4853 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4854 		for (i = 0; i < WM_NRXDESC; i++)
4855 			WM_INIT_RXDESC(sc, i);
4856 
4857 	/* Start the one second link check clock. */
4858 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
4859 
4860 	/* ...all done! */
4861 	ifp->if_flags |= IFF_RUNNING;
4862 	ifp->if_flags &= ~IFF_OACTIVE;
4863 
4864  out:
4865 	sc->sc_if_flags = ifp->if_flags;
4866 	if (error)
4867 		log(LOG_ERR, "%s: interface not running\n",
4868 		    device_xname(sc->sc_dev));
4869 	return error;
4870 }
4871 
4872 /*
4873  * wm_rxdrain:
4874  *
4875  *	Drain the receive queue.
4876  */
4877 static void
4878 wm_rxdrain(struct wm_softc *sc)
4879 {
4880 	struct wm_rxsoft *rxs;
4881 	int i;
4882 
4883 	for (i = 0; i < WM_NRXDESC; i++) {
4884 		rxs = &sc->sc_rxsoft[i];
4885 		if (rxs->rxs_mbuf != NULL) {
4886 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4887 			m_freem(rxs->rxs_mbuf);
4888 			rxs->rxs_mbuf = NULL;
4889 		}
4890 	}
4891 }
4892 
4893 /*
4894  * wm_stop:		[ifnet interface function]
4895  *
4896  *	Stop transmission on the interface.
4897  */
4898 static void
4899 wm_stop(struct ifnet *ifp, int disable)
4900 {
4901 	struct wm_softc *sc = ifp->if_softc;
4902 	struct wm_txsoft *txs;
4903 	int i;
4904 
4905 	/* Stop the one second clock. */
4906 	callout_stop(&sc->sc_tick_ch);
4907 
4908 	/* Stop the 82547 Tx FIFO stall check timer. */
4909 	if (sc->sc_type == WM_T_82547)
4910 		callout_stop(&sc->sc_txfifo_ch);
4911 
4912 	if (sc->sc_flags & WM_F_HAS_MII) {
4913 		/* Down the MII. */
4914 		mii_down(&sc->sc_mii);
4915 	} else {
4916 #if 0
4917 		/* Should we clear PHY's status properly? */
4918 		wm_reset(sc);
4919 #endif
4920 	}
4921 
4922 	/* Stop the transmit and receive processes. */
4923 	CSR_WRITE(sc, WMREG_TCTL, 0);
4924 	CSR_WRITE(sc, WMREG_RCTL, 0);
4925 	sc->sc_rctl &= ~RCTL_EN;
4926 
4927 	/*
4928 	 * Clear the interrupt mask to ensure the device cannot assert its
4929 	 * interrupt line.
4930 	 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
4931 	 * any currently pending or shared interrupt.
4932 	 */
4933 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4934 	sc->sc_icr = 0;
4935 
4936 	/* Release any queued transmit buffers. */
4937 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
4938 		txs = &sc->sc_txsoft[i];
4939 		if (txs->txs_mbuf != NULL) {
4940 			bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
4941 			m_freem(txs->txs_mbuf);
4942 			txs->txs_mbuf = NULL;
4943 		}
4944 	}
4945 
4946 	/* Mark the interface as down and cancel the watchdog timer. */
4947 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
4948 	ifp->if_timer = 0;
4949 
4950 	if (disable)
4951 		wm_rxdrain(sc);
4952 
4953 #if 0 /* notyet */
4954 	if (sc->sc_type >= WM_T_82544)
4955 		CSR_WRITE(sc, WMREG_WUC, 0);
4956 #endif
4957 }
4958 
4959 void
4960 wm_get_auto_rd_done(struct wm_softc *sc)
4961 {
4962 	int i;
4963 
4964 	/* wait for eeprom to reload */
4965 	switch (sc->sc_type) {
4966 	case WM_T_82571:
4967 	case WM_T_82572:
4968 	case WM_T_82573:
4969 	case WM_T_82574:
4970 	case WM_T_82583:
4971 	case WM_T_82575:
4972 	case WM_T_82576:
4973 	case WM_T_82580:
4974 	case WM_T_82580ER:
4975 	case WM_T_I350:
4976 	case WM_T_I354:
4977 	case WM_T_I210:
4978 	case WM_T_I211:
4979 	case WM_T_80003:
4980 	case WM_T_ICH8:
4981 	case WM_T_ICH9:
4982 		for (i = 0; i < 10; i++) {
4983 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
4984 				break;
4985 			delay(1000);
4986 		}
4987 		if (i == 10) {
4988 			log(LOG_ERR, "%s: auto read from eeprom failed to "
4989 			    "complete\n", device_xname(sc->sc_dev));
4990 		}
4991 		break;
4992 	default:
4993 		break;
4994 	}
4995 }
4996 
4997 void
4998 wm_lan_init_done(struct wm_softc *sc)
4999 {
5000 	uint32_t reg = 0;
5001 	int i;
5002 
5003 	/* wait for eeprom to reload */
5004 	switch (sc->sc_type) {
5005 	case WM_T_ICH10:
5006 	case WM_T_PCH:
5007 	case WM_T_PCH2:
5008 	case WM_T_PCH_LPT:
5009 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
5010 			reg = CSR_READ(sc, WMREG_STATUS);
5011 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
5012 				break;
5013 			delay(100);
5014 		}
5015 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
5016 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
5017 			    "complete\n", device_xname(sc->sc_dev), __func__);
5018 		}
5019 		break;
5020 	default:
5021 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
5022 		    __func__);
5023 		break;
5024 	}
5025 
5026 	reg &= ~STATUS_LAN_INIT_DONE;
5027 	CSR_WRITE(sc, WMREG_STATUS, reg);
5028 }
5029 
5030 void
5031 wm_get_cfg_done(struct wm_softc *sc)
5032 {
5033 	int mask;
5034 	uint32_t reg;
5035 	int i;
5036 
5037 	/* wait for eeprom to reload */
5038 	switch (sc->sc_type) {
5039 	case WM_T_82542_2_0:
5040 	case WM_T_82542_2_1:
5041 		/* null */
5042 		break;
5043 	case WM_T_82543:
5044 	case WM_T_82544:
5045 	case WM_T_82540:
5046 	case WM_T_82545:
5047 	case WM_T_82545_3:
5048 	case WM_T_82546:
5049 	case WM_T_82546_3:
5050 	case WM_T_82541:
5051 	case WM_T_82541_2:
5052 	case WM_T_82547:
5053 	case WM_T_82547_2:
5054 	case WM_T_82573:
5055 	case WM_T_82574:
5056 	case WM_T_82583:
5057 		/* generic */
5058 		delay(10*1000);
5059 		break;
5060 	case WM_T_80003:
5061 	case WM_T_82571:
5062 	case WM_T_82572:
5063 	case WM_T_82575:
5064 	case WM_T_82576:
5065 	case WM_T_82580:
5066 	case WM_T_82580ER:
5067 	case WM_T_I350:
5068 	case WM_T_I354:
5069 	case WM_T_I210:
5070 	case WM_T_I211:
5071 		if (sc->sc_type == WM_T_82571) {
5072 			/* Only 82571 shares port 0 */
5073 			mask = EEMNGCTL_CFGDONE_0;
5074 		} else
5075 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
5076 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
5077 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
5078 				break;
5079 			delay(1000);
5080 		}
5081 		if (i >= WM_PHY_CFG_TIMEOUT) {
5082 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
5083 				device_xname(sc->sc_dev), __func__));
5084 		}
5085 		break;
5086 	case WM_T_ICH8:
5087 	case WM_T_ICH9:
5088 	case WM_T_ICH10:
5089 	case WM_T_PCH:
5090 	case WM_T_PCH2:
5091 	case WM_T_PCH_LPT:
5092 		delay(10*1000);
5093 		if (sc->sc_type >= WM_T_ICH10)
5094 			wm_lan_init_done(sc);
5095 		else
5096 			wm_get_auto_rd_done(sc);
5097 
5098 		reg = CSR_READ(sc, WMREG_STATUS);
5099 		if ((reg & STATUS_PHYRA) != 0)
5100 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
5101 		break;
5102 	default:
5103 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
5104 		    __func__);
5105 		break;
5106 	}
5107 }
5108 
5109 /*
5110  * wm_acquire_eeprom:
5111  *
5112  *	Perform the EEPROM handshake required on some chips.
5113  */
5114 static int
5115 wm_acquire_eeprom(struct wm_softc *sc)
5116 {
5117 	uint32_t reg;
5118 	int x;
5119 	int ret = 0;
5120 
5121 	/* always success */
5122 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
5123 		return 0;
5124 
5125 	if (sc->sc_flags & WM_F_SWFWHW_SYNC) {
5126 		ret = wm_get_swfwhw_semaphore(sc);
5127 	} else if (sc->sc_flags & WM_F_SWFW_SYNC) {
5128 		/* this will also do wm_get_swsm_semaphore() if needed */
5129 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
5130 	} else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
5131 		ret = wm_get_swsm_semaphore(sc);
5132 	}
5133 
5134 	if (ret) {
5135 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5136 			__func__);
5137 		return 1;
5138 	}
5139 
5140 	if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
5141 		reg = CSR_READ(sc, WMREG_EECD);
5142 
5143 		/* Request EEPROM access. */
5144 		reg |= EECD_EE_REQ;
5145 		CSR_WRITE(sc, WMREG_EECD, reg);
5146 
5147 		/* ..and wait for it to be granted. */
5148 		for (x = 0; x < 1000; x++) {
5149 			reg = CSR_READ(sc, WMREG_EECD);
5150 			if (reg & EECD_EE_GNT)
5151 				break;
5152 			delay(5);
5153 		}
5154 		if ((reg & EECD_EE_GNT) == 0) {
5155 			aprint_error_dev(sc->sc_dev,
5156 			    "could not acquire EEPROM GNT\n");
5157 			reg &= ~EECD_EE_REQ;
5158 			CSR_WRITE(sc, WMREG_EECD, reg);
5159 			if (sc->sc_flags & WM_F_SWFWHW_SYNC)
5160 				wm_put_swfwhw_semaphore(sc);
5161 			if (sc->sc_flags & WM_F_SWFW_SYNC)
5162 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
5163 			else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
5164 				wm_put_swsm_semaphore(sc);
5165 			return 1;
5166 		}
5167 	}
5168 
5169 	return 0;
5170 }
5171 
5172 /*
5173  * wm_release_eeprom:
5174  *
5175  *	Release the EEPROM mutex.
5176  */
5177 static void
5178 wm_release_eeprom(struct wm_softc *sc)
5179 {
5180 	uint32_t reg;
5181 
5182 	/* always success */
5183 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
5184 		return;
5185 
5186 	if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
5187 		reg = CSR_READ(sc, WMREG_EECD);
5188 		reg &= ~EECD_EE_REQ;
5189 		CSR_WRITE(sc, WMREG_EECD, reg);
5190 	}
5191 
5192 	if (sc->sc_flags & WM_F_SWFWHW_SYNC)
5193 		wm_put_swfwhw_semaphore(sc);
5194 	if (sc->sc_flags & WM_F_SWFW_SYNC)
5195 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
5196 	else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
5197 		wm_put_swsm_semaphore(sc);
5198 }
5199 
5200 /*
5201  * wm_eeprom_sendbits:
5202  *
5203  *	Send a series of bits to the EEPROM.
5204  */
5205 static void
5206 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
5207 {
5208 	uint32_t reg;
5209 	int x;
5210 
5211 	reg = CSR_READ(sc, WMREG_EECD);
5212 
5213 	for (x = nbits; x > 0; x--) {
5214 		if (bits & (1U << (x - 1)))
5215 			reg |= EECD_DI;
5216 		else
5217 			reg &= ~EECD_DI;
5218 		CSR_WRITE(sc, WMREG_EECD, reg);
5219 		CSR_WRITE_FLUSH(sc);
5220 		delay(2);
5221 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
5222 		CSR_WRITE_FLUSH(sc);
5223 		delay(2);
5224 		CSR_WRITE(sc, WMREG_EECD, reg);
5225 		CSR_WRITE_FLUSH(sc);
5226 		delay(2);
5227 	}
5228 }
5229 
5230 /*
5231  * wm_eeprom_recvbits:
5232  *
5233  *	Receive a series of bits from the EEPROM.
5234  */
5235 static void
5236 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
5237 {
5238 	uint32_t reg, val;
5239 	int x;
5240 
5241 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
5242 
5243 	val = 0;
5244 	for (x = nbits; x > 0; x--) {
5245 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
5246 		CSR_WRITE_FLUSH(sc);
5247 		delay(2);
5248 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
5249 			val |= (1U << (x - 1));
5250 		CSR_WRITE(sc, WMREG_EECD, reg);
5251 		CSR_WRITE_FLUSH(sc);
5252 		delay(2);
5253 	}
5254 	*valp = val;
5255 }
5256 
5257 /*
5258  * wm_read_eeprom_uwire:
5259  *
5260  *	Read a word from the EEPROM using the MicroWire protocol.
5261  */
5262 static int
5263 wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
5264 {
5265 	uint32_t reg, val;
5266 	int i;
5267 
5268 	for (i = 0; i < wordcnt; i++) {
5269 		/* Clear SK and DI. */
5270 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
5271 		CSR_WRITE(sc, WMREG_EECD, reg);
5272 
5273 		/*
5274 		 * XXX: workaround for a bug in qemu-0.12.x and prior
5275 		 * and Xen.
5276 		 *
5277 		 * We use this workaround only for 82540 because qemu's
5278 		 * e1000 act as 82540.
5279 		 */
5280 		if (sc->sc_type == WM_T_82540) {
5281 			reg |= EECD_SK;
5282 			CSR_WRITE(sc, WMREG_EECD, reg);
5283 			reg &= ~EECD_SK;
5284 			CSR_WRITE(sc, WMREG_EECD, reg);
5285 			CSR_WRITE_FLUSH(sc);
5286 			delay(2);
5287 		}
5288 		/* XXX: end of workaround */
5289 
5290 		/* Set CHIP SELECT. */
5291 		reg |= EECD_CS;
5292 		CSR_WRITE(sc, WMREG_EECD, reg);
5293 		CSR_WRITE_FLUSH(sc);
5294 		delay(2);
5295 
5296 		/* Shift in the READ command. */
5297 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
5298 
5299 		/* Shift in address. */
5300 		wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits);
5301 
5302 		/* Shift out the data. */
5303 		wm_eeprom_recvbits(sc, &val, 16);
5304 		data[i] = val & 0xffff;
5305 
5306 		/* Clear CHIP SELECT. */
5307 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
5308 		CSR_WRITE(sc, WMREG_EECD, reg);
5309 		CSR_WRITE_FLUSH(sc);
5310 		delay(2);
5311 	}
5312 
5313 	return 0;
5314 }
5315 
5316 /*
5317  * wm_spi_eeprom_ready:
5318  *
5319  *	Wait for a SPI EEPROM to be ready for commands.
5320  */
5321 static int
5322 wm_spi_eeprom_ready(struct wm_softc *sc)
5323 {
5324 	uint32_t val;
5325 	int usec;
5326 
5327 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
5328 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
5329 		wm_eeprom_recvbits(sc, &val, 8);
5330 		if ((val & SPI_SR_RDY) == 0)
5331 			break;
5332 	}
5333 	if (usec >= SPI_MAX_RETRIES) {
5334 		aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
5335 		return 1;
5336 	}
5337 	return 0;
5338 }
5339 
5340 /*
5341  * wm_read_eeprom_spi:
5342  *
5343  *	Read a work from the EEPROM using the SPI protocol.
5344  */
5345 static int
5346 wm_read_eeprom_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
5347 {
5348 	uint32_t reg, val;
5349 	int i;
5350 	uint8_t opc;
5351 
5352 	/* Clear SK and CS. */
5353 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
5354 	CSR_WRITE(sc, WMREG_EECD, reg);
5355 	CSR_WRITE_FLUSH(sc);
5356 	delay(2);
5357 
5358 	if (wm_spi_eeprom_ready(sc))
5359 		return 1;
5360 
5361 	/* Toggle CS to flush commands. */
5362 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
5363 	CSR_WRITE_FLUSH(sc);
5364 	delay(2);
5365 	CSR_WRITE(sc, WMREG_EECD, reg);
5366 	CSR_WRITE_FLUSH(sc);
5367 	delay(2);
5368 
5369 	opc = SPI_OPC_READ;
5370 	if (sc->sc_ee_addrbits == 8 && word >= 128)
5371 		opc |= SPI_OPC_A8;
5372 
5373 	wm_eeprom_sendbits(sc, opc, 8);
5374 	wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits);
5375 
5376 	for (i = 0; i < wordcnt; i++) {
5377 		wm_eeprom_recvbits(sc, &val, 16);
5378 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
5379 	}
5380 
5381 	/* Raise CS and clear SK. */
5382 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
5383 	CSR_WRITE(sc, WMREG_EECD, reg);
5384 	CSR_WRITE_FLUSH(sc);
5385 	delay(2);
5386 
5387 	return 0;
5388 }
5389 
5390 #define NVM_CHECKSUM			0xBABA
5391 #define EEPROM_SIZE			0x0040
5392 #define NVM_COMPAT			0x0003
5393 #define NVM_COMPAT_VALID_CHECKSUM	0x0001
5394 #define NVM_FUTURE_INIT_WORD1			0x0019
5395 #define NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM	0x0040
5396 
5397 /*
5398  * wm_validate_eeprom_checksum
5399  *
5400  * The checksum is defined as the sum of the first 64 (16 bit) words.
5401  */
5402 static int
5403 wm_validate_eeprom_checksum(struct wm_softc *sc)
5404 {
5405 	uint16_t checksum;
5406 	uint16_t eeprom_data;
5407 #ifdef WM_DEBUG
5408 	uint16_t csum_wordaddr, valid_checksum;
5409 #endif
5410 	int i;
5411 
5412 	checksum = 0;
5413 
5414 	/* Don't check for I211 */
5415 	if (sc->sc_type == WM_T_I211)
5416 		return 0;
5417 
5418 #ifdef WM_DEBUG
5419 	if (sc->sc_type == WM_T_PCH_LPT) {
5420 		csum_wordaddr = NVM_COMPAT;
5421 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
5422 	} else {
5423 		csum_wordaddr = NVM_FUTURE_INIT_WORD1;
5424 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
5425 	}
5426 
5427 	/* Dump EEPROM image for debug */
5428 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5429 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5430 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
5431 		wm_read_eeprom(sc, csum_wordaddr, 1, &eeprom_data);
5432 		if ((eeprom_data & valid_checksum) == 0) {
5433 			DPRINTF(WM_DEBUG_NVM,
5434 			    ("%s: NVM need to be updated (%04x != %04x)\n",
5435 				device_xname(sc->sc_dev), eeprom_data,
5436 				    valid_checksum));
5437 		}
5438 	}
5439 
5440 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
5441 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
5442 		for (i = 0; i < EEPROM_SIZE; i++) {
5443 			if (wm_read_eeprom(sc, i, 1, &eeprom_data))
5444 				printf("XX ");
5445 			else
5446 				printf("%04x ", eeprom_data);
5447 			if (i % 8 == 7)
5448 				printf("\n");
5449 		}
5450 	}
5451 
5452 #endif /* WM_DEBUG */
5453 
5454 	for (i = 0; i < EEPROM_SIZE; i++) {
5455 		if (wm_read_eeprom(sc, i, 1, &eeprom_data))
5456 			return 1;
5457 		checksum += eeprom_data;
5458 	}
5459 
5460 	if (checksum != (uint16_t) NVM_CHECKSUM) {
5461 #ifdef WM_DEBUG
5462 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
5463 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
5464 #endif
5465 	}
5466 
5467 	return 0;
5468 }
5469 
5470 /*
5471  * wm_read_eeprom:
5472  *
5473  *	Read data from the serial EEPROM.
5474  */
5475 static int
5476 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
5477 {
5478 	int rv;
5479 
5480 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
5481 		return 1;
5482 
5483 	if (wm_acquire_eeprom(sc))
5484 		return 1;
5485 
5486 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5487 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5488 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
5489 		rv = wm_read_eeprom_ich8(sc, word, wordcnt, data);
5490 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
5491 		rv = wm_read_eeprom_eerd(sc, word, wordcnt, data);
5492 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
5493 		rv = wm_read_eeprom_spi(sc, word, wordcnt, data);
5494 	else
5495 		rv = wm_read_eeprom_uwire(sc, word, wordcnt, data);
5496 
5497 	wm_release_eeprom(sc);
5498 	return rv;
5499 }
5500 
5501 static int
5502 wm_read_eeprom_eerd(struct wm_softc *sc, int offset, int wordcnt,
5503     uint16_t *data)
5504 {
5505 	int i, eerd = 0;
5506 	int error = 0;
5507 
5508 	for (i = 0; i < wordcnt; i++) {
5509 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
5510 
5511 		CSR_WRITE(sc, WMREG_EERD, eerd);
5512 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
5513 		if (error != 0)
5514 			break;
5515 
5516 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
5517 	}
5518 
5519 	return error;
5520 }
5521 
5522 static int
5523 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
5524 {
5525 	uint32_t attempts = 100000;
5526 	uint32_t i, reg = 0;
5527 	int32_t done = -1;
5528 
5529 	for (i = 0; i < attempts; i++) {
5530 		reg = CSR_READ(sc, rw);
5531 
5532 		if (reg & EERD_DONE) {
5533 			done = 0;
5534 			break;
5535 		}
5536 		delay(5);
5537 	}
5538 
5539 	return done;
5540 }
5541 
5542 static int
5543 wm_check_alt_mac_addr(struct wm_softc *sc)
5544 {
5545 	uint16_t myea[ETHER_ADDR_LEN / 2];
5546 	uint16_t offset = EEPROM_OFF_MACADDR;
5547 
5548 	/* Try to read alternative MAC address pointer */
5549 	if (wm_read_eeprom(sc, EEPROM_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
5550 		return -1;
5551 
5552 	/* Check pointer */
5553 	if (offset == 0xffff)
5554 		return -1;
5555 
5556 	/*
5557 	 * Check whether alternative MAC address is valid or not.
5558 	 * Some cards have non 0xffff pointer but those don't use
5559 	 * alternative MAC address in reality.
5560 	 *
5561 	 * Check whether the broadcast bit is set or not.
5562 	 */
5563 	if (wm_read_eeprom(sc, offset, 1, myea) == 0)
5564 		if (((myea[0] & 0xff) & 0x01) == 0)
5565 			return 0; /* found! */
5566 
5567 	/* not found */
5568 	return -1;
5569 }
5570 
5571 static int
5572 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
5573 {
5574 	uint16_t myea[ETHER_ADDR_LEN / 2];
5575 	uint16_t offset = EEPROM_OFF_MACADDR;
5576 	int do_invert = 0;
5577 
5578 	switch (sc->sc_type) {
5579 	case WM_T_82580:
5580 	case WM_T_82580ER:
5581 	case WM_T_I350:
5582 	case WM_T_I354:
5583 		switch (sc->sc_funcid) {
5584 		case 0:
5585 			/* default value (== EEPROM_OFF_MACADDR) */
5586 			break;
5587 		case 1:
5588 			offset = EEPROM_OFF_LAN1;
5589 			break;
5590 		case 2:
5591 			offset = EEPROM_OFF_LAN2;
5592 			break;
5593 		case 3:
5594 			offset = EEPROM_OFF_LAN3;
5595 			break;
5596 		default:
5597 			goto bad;
5598 			/* NOTREACHED */
5599 			break;
5600 		}
5601 		break;
5602 	case WM_T_82571:
5603 	case WM_T_82575:
5604 	case WM_T_82576:
5605 	case WM_T_80003:
5606 	case WM_T_I210:
5607 	case WM_T_I211:
5608 		if (wm_check_alt_mac_addr(sc) != 0) {
5609 			/* reset the offset to LAN0 */
5610 			offset = EEPROM_OFF_MACADDR;
5611 			if ((sc->sc_funcid & 0x01) == 1)
5612 				do_invert = 1;
5613 			goto do_read;
5614 		}
5615 		switch (sc->sc_funcid) {
5616 		case 0:
5617 			/*
5618 			 * The offset is the value in EEPROM_ALT_MAC_ADDR_PTR
5619 			 * itself.
5620 			 */
5621 			break;
5622 		case 1:
5623 			offset += EEPROM_OFF_MACADDR_LAN1;
5624 			break;
5625 		case 2:
5626 			offset += EEPROM_OFF_MACADDR_LAN2;
5627 			break;
5628 		case 3:
5629 			offset += EEPROM_OFF_MACADDR_LAN3;
5630 			break;
5631 		default:
5632 			goto bad;
5633 			/* NOTREACHED */
5634 			break;
5635 		}
5636 		break;
5637 	default:
5638 		if ((sc->sc_funcid & 0x01) == 1)
5639 			do_invert = 1;
5640 		break;
5641 	}
5642 
5643  do_read:
5644 	if (wm_read_eeprom(sc, offset, sizeof(myea) / sizeof(myea[0]),
5645 		myea) != 0) {
5646 		goto bad;
5647 	}
5648 
5649 	enaddr[0] = myea[0] & 0xff;
5650 	enaddr[1] = myea[0] >> 8;
5651 	enaddr[2] = myea[1] & 0xff;
5652 	enaddr[3] = myea[1] >> 8;
5653 	enaddr[4] = myea[2] & 0xff;
5654 	enaddr[5] = myea[2] >> 8;
5655 
5656 	/*
5657 	 * Toggle the LSB of the MAC address on the second port
5658 	 * of some dual port cards.
5659 	 */
5660 	if (do_invert != 0)
5661 		enaddr[5] ^= 1;
5662 
5663 	return 0;
5664 
5665  bad:
5666 	aprint_error_dev(sc->sc_dev, "unable to read Ethernet address\n");
5667 
5668 	return -1;
5669 }
5670 
5671 /*
5672  * wm_add_rxbuf:
5673  *
5674  *	Add a receive buffer to the indiciated descriptor.
5675  */
5676 static int
5677 wm_add_rxbuf(struct wm_softc *sc, int idx)
5678 {
5679 	struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
5680 	struct mbuf *m;
5681 	int error;
5682 
5683 	MGETHDR(m, M_DONTWAIT, MT_DATA);
5684 	if (m == NULL)
5685 		return ENOBUFS;
5686 
5687 	MCLGET(m, M_DONTWAIT);
5688 	if ((m->m_flags & M_EXT) == 0) {
5689 		m_freem(m);
5690 		return ENOBUFS;
5691 	}
5692 
5693 	if (rxs->rxs_mbuf != NULL)
5694 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
5695 
5696 	rxs->rxs_mbuf = m;
5697 
5698 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
5699 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
5700 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
5701 	if (error) {
5702 		/* XXX XXX XXX */
5703 		aprint_error_dev(sc->sc_dev,
5704 		    "unable to load rx DMA map %d, error = %d\n",
5705 		    idx, error);
5706 		panic("wm_add_rxbuf");
5707 	}
5708 
5709 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
5710 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
5711 
5712 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5713 		if ((sc->sc_rctl & RCTL_EN) != 0)
5714 			WM_INIT_RXDESC(sc, idx);
5715 	} else
5716 		WM_INIT_RXDESC(sc, idx);
5717 
5718 	return 0;
5719 }
5720 
5721 /*
5722  * wm_set_ral:
5723  *
5724  *	Set an entery in the receive address list.
5725  */
5726 static void
5727 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
5728 {
5729 	uint32_t ral_lo, ral_hi;
5730 
5731 	if (enaddr != NULL) {
5732 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
5733 		    (enaddr[3] << 24);
5734 		ral_hi = enaddr[4] | (enaddr[5] << 8);
5735 		ral_hi |= RAL_AV;
5736 	} else {
5737 		ral_lo = 0;
5738 		ral_hi = 0;
5739 	}
5740 
5741 	if (sc->sc_type >= WM_T_82544) {
5742 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
5743 		    ral_lo);
5744 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
5745 		    ral_hi);
5746 	} else {
5747 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
5748 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
5749 	}
5750 }
5751 
5752 /*
5753  * wm_mchash:
5754  *
5755  *	Compute the hash of the multicast address for the 4096-bit
5756  *	multicast filter.
5757  */
5758 static uint32_t
5759 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
5760 {
5761 	static const int lo_shift[4] = { 4, 3, 2, 0 };
5762 	static const int hi_shift[4] = { 4, 5, 6, 8 };
5763 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
5764 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
5765 	uint32_t hash;
5766 
5767 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5768 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5769 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
5770 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
5771 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
5772 		return (hash & 0x3ff);
5773 	}
5774 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
5775 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
5776 
5777 	return (hash & 0xfff);
5778 }
5779 
5780 /*
5781  * wm_set_filter:
5782  *
5783  *	Set up the receive filter.
5784  */
5785 static void
5786 wm_set_filter(struct wm_softc *sc)
5787 {
5788 	struct ethercom *ec = &sc->sc_ethercom;
5789 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5790 	struct ether_multi *enm;
5791 	struct ether_multistep step;
5792 	bus_addr_t mta_reg;
5793 	uint32_t hash, reg, bit;
5794 	int i, size;
5795 
5796 	if (sc->sc_type >= WM_T_82544)
5797 		mta_reg = WMREG_CORDOVA_MTA;
5798 	else
5799 		mta_reg = WMREG_MTA;
5800 
5801 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
5802 
5803 	if (ifp->if_flags & IFF_BROADCAST)
5804 		sc->sc_rctl |= RCTL_BAM;
5805 	if (ifp->if_flags & IFF_PROMISC) {
5806 		sc->sc_rctl |= RCTL_UPE;
5807 		goto allmulti;
5808 	}
5809 
5810 	/*
5811 	 * Set the station address in the first RAL slot, and
5812 	 * clear the remaining slots.
5813 	 */
5814 	if (sc->sc_type == WM_T_ICH8)
5815 		size = WM_RAL_TABSIZE_ICH8 -1;
5816 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
5817 	    || (sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
5818 	    || (sc->sc_type == WM_T_PCH_LPT))
5819 		size = WM_RAL_TABSIZE_ICH8;
5820 	else if (sc->sc_type == WM_T_82575)
5821 		size = WM_RAL_TABSIZE_82575;
5822 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
5823 		size = WM_RAL_TABSIZE_82576;
5824 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
5825 		size = WM_RAL_TABSIZE_I350;
5826 	else
5827 		size = WM_RAL_TABSIZE;
5828 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
5829 	for (i = 1; i < size; i++)
5830 		wm_set_ral(sc, NULL, i);
5831 
5832 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5833 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5834 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
5835 		size = WM_ICH8_MC_TABSIZE;
5836 	else
5837 		size = WM_MC_TABSIZE;
5838 	/* Clear out the multicast table. */
5839 	for (i = 0; i < size; i++)
5840 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
5841 
5842 	ETHER_FIRST_MULTI(step, ec, enm);
5843 	while (enm != NULL) {
5844 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
5845 			/*
5846 			 * We must listen to a range of multicast addresses.
5847 			 * For now, just accept all multicasts, rather than
5848 			 * trying to set only those filter bits needed to match
5849 			 * the range.  (At this time, the only use of address
5850 			 * ranges is for IP multicast routing, for which the
5851 			 * range is big enough to require all bits set.)
5852 			 */
5853 			goto allmulti;
5854 		}
5855 
5856 		hash = wm_mchash(sc, enm->enm_addrlo);
5857 
5858 		reg = (hash >> 5);
5859 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5860 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5861 		    || (sc->sc_type == WM_T_PCH2)
5862 		    || (sc->sc_type == WM_T_PCH_LPT))
5863 			reg &= 0x1f;
5864 		else
5865 			reg &= 0x7f;
5866 		bit = hash & 0x1f;
5867 
5868 		hash = CSR_READ(sc, mta_reg + (reg << 2));
5869 		hash |= 1U << bit;
5870 
5871 		/* XXX Hardware bug?? */
5872 		if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
5873 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
5874 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
5875 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
5876 		} else
5877 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
5878 
5879 		ETHER_NEXT_MULTI(step, enm);
5880 	}
5881 
5882 	ifp->if_flags &= ~IFF_ALLMULTI;
5883 	goto setit;
5884 
5885  allmulti:
5886 	ifp->if_flags |= IFF_ALLMULTI;
5887 	sc->sc_rctl |= RCTL_MPE;
5888 
5889  setit:
5890 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
5891 }
5892 
5893 /*
5894  * wm_tbi_mediainit:
5895  *
5896  *	Initialize media for use on 1000BASE-X devices.
5897  */
5898 static void
5899 wm_tbi_mediainit(struct wm_softc *sc)
5900 {
5901 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5902 	const char *sep = "";
5903 
5904 	if (sc->sc_type < WM_T_82543)
5905 		sc->sc_tipg = TIPG_WM_DFLT;
5906 	else
5907 		sc->sc_tipg = TIPG_LG_DFLT;
5908 
5909 	sc->sc_tbi_anegticks = 5;
5910 
5911 	/* Initialize our media structures */
5912 	sc->sc_mii.mii_ifp = ifp;
5913 
5914 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
5915 	ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
5916 	    wm_tbi_mediastatus);
5917 
5918 	/*
5919 	 * SWD Pins:
5920 	 *
5921 	 *	0 = Link LED (output)
5922 	 *	1 = Loss Of Signal (input)
5923 	 */
5924 	sc->sc_ctrl |= CTRL_SWDPIO(0);
5925 	sc->sc_ctrl &= ~CTRL_SWDPIO(1);
5926 
5927 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5928 
5929 #define	ADD(ss, mm, dd)							\
5930 do {									\
5931 	aprint_normal("%s%s", sep, ss);					\
5932 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL);	\
5933 	sep = ", ";							\
5934 } while (/*CONSTCOND*/0)
5935 
5936 	aprint_normal_dev(sc->sc_dev, "");
5937 	ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
5938 	ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
5939 	ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
5940 	aprint_normal("\n");
5941 
5942 #undef ADD
5943 
5944 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
5945 }
5946 
5947 /*
5948  * wm_tbi_mediastatus:	[ifmedia interface function]
5949  *
5950  *	Get the current interface media status on a 1000BASE-X device.
5951  */
5952 static void
5953 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
5954 {
5955 	struct wm_softc *sc = ifp->if_softc;
5956 	uint32_t ctrl, status;
5957 
5958 	ifmr->ifm_status = IFM_AVALID;
5959 	ifmr->ifm_active = IFM_ETHER;
5960 
5961 	status = CSR_READ(sc, WMREG_STATUS);
5962 	if ((status & STATUS_LU) == 0) {
5963 		ifmr->ifm_active |= IFM_NONE;
5964 		return;
5965 	}
5966 
5967 	ifmr->ifm_status |= IFM_ACTIVE;
5968 	ifmr->ifm_active |= IFM_1000_SX;
5969 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
5970 		ifmr->ifm_active |= IFM_FDX;
5971 	ctrl = CSR_READ(sc, WMREG_CTRL);
5972 	if (ctrl & CTRL_RFCE)
5973 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
5974 	if (ctrl & CTRL_TFCE)
5975 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
5976 }
5977 
5978 /*
5979  * wm_tbi_mediachange:	[ifmedia interface function]
5980  *
5981  *	Set hardware to newly-selected media on a 1000BASE-X device.
5982  */
5983 static int
5984 wm_tbi_mediachange(struct ifnet *ifp)
5985 {
5986 	struct wm_softc *sc = ifp->if_softc;
5987 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
5988 	uint32_t status;
5989 	int i;
5990 
5991 	sc->sc_txcw = 0;
5992 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO ||
5993 	    (sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
5994 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
5995 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
5996 		sc->sc_txcw |= TXCW_ANE;
5997 	} else {
5998 		/*
5999 		 * If autonegotiation is turned off, force link up and turn on
6000 		 * full duplex
6001 		 */
6002 		sc->sc_txcw &= ~TXCW_ANE;
6003 		sc->sc_ctrl |= CTRL_SLU | CTRL_FD;
6004 		sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
6005 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6006 		CSR_WRITE_FLUSH(sc);
6007 		delay(1000);
6008 	}
6009 
6010 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
6011 		    device_xname(sc->sc_dev),sc->sc_txcw));
6012 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
6013 	CSR_WRITE_FLUSH(sc);
6014 	delay(10000);
6015 
6016 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
6017 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
6018 
6019 	/*
6020 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
6021 	 * optics detect a signal, 0 if they don't.
6022 	 */
6023 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
6024 		/* Have signal; wait for the link to come up. */
6025 
6026 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
6027 			/*
6028 			 * Reset the link, and let autonegotiation do its thing
6029 			 */
6030 			sc->sc_ctrl |= CTRL_LRST;
6031 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6032 			CSR_WRITE_FLUSH(sc);
6033 			delay(1000);
6034 			sc->sc_ctrl &= ~CTRL_LRST;
6035 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6036 			CSR_WRITE_FLUSH(sc);
6037 			delay(1000);
6038 		}
6039 
6040 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
6041 			delay(10000);
6042 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
6043 				break;
6044 		}
6045 
6046 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
6047 			    device_xname(sc->sc_dev),i));
6048 
6049 		status = CSR_READ(sc, WMREG_STATUS);
6050 		DPRINTF(WM_DEBUG_LINK,
6051 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
6052 			device_xname(sc->sc_dev),status, STATUS_LU));
6053 		if (status & STATUS_LU) {
6054 			/* Link is up. */
6055 			DPRINTF(WM_DEBUG_LINK,
6056 			    ("%s: LINK: set media -> link up %s\n",
6057 			    device_xname(sc->sc_dev),
6058 			    (status & STATUS_FD) ? "FDX" : "HDX"));
6059 
6060 			/*
6061 			 * NOTE: CTRL will update TFCE and RFCE automatically,
6062 			 * so we should update sc->sc_ctrl
6063 			 */
6064 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
6065 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
6066 			sc->sc_fcrtl &= ~FCRTL_XONE;
6067 			if (status & STATUS_FD)
6068 				sc->sc_tctl |=
6069 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
6070 			else
6071 				sc->sc_tctl |=
6072 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
6073 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
6074 				sc->sc_fcrtl |= FCRTL_XONE;
6075 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
6076 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
6077 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
6078 				      sc->sc_fcrtl);
6079 			sc->sc_tbi_linkup = 1;
6080 		} else {
6081 			if (i == WM_LINKUP_TIMEOUT)
6082 				wm_check_for_link(sc);
6083 			/* Link is down. */
6084 			DPRINTF(WM_DEBUG_LINK,
6085 			    ("%s: LINK: set media -> link down\n",
6086 			    device_xname(sc->sc_dev)));
6087 			sc->sc_tbi_linkup = 0;
6088 		}
6089 	} else {
6090 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
6091 		    device_xname(sc->sc_dev)));
6092 		sc->sc_tbi_linkup = 0;
6093 	}
6094 
6095 	wm_tbi_set_linkled(sc);
6096 
6097 	return 0;
6098 }
6099 
6100 /*
6101  * wm_tbi_set_linkled:
6102  *
6103  *	Update the link LED on 1000BASE-X devices.
6104  */
6105 static void
6106 wm_tbi_set_linkled(struct wm_softc *sc)
6107 {
6108 
6109 	if (sc->sc_tbi_linkup)
6110 		sc->sc_ctrl |= CTRL_SWDPIN(0);
6111 	else
6112 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
6113 
6114 	/* 82540 or newer devices are active low */
6115 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
6116 
6117 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6118 }
6119 
6120 /*
6121  * wm_tbi_check_link:
6122  *
6123  *	Check the link on 1000BASE-X devices.
6124  */
6125 static void
6126 wm_tbi_check_link(struct wm_softc *sc)
6127 {
6128 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6129 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
6130 	uint32_t status;
6131 
6132 	status = CSR_READ(sc, WMREG_STATUS);
6133 
6134 	/* XXX is this needed? */
6135 	(void)CSR_READ(sc, WMREG_RXCW);
6136 	(void)CSR_READ(sc, WMREG_CTRL);
6137 
6138 	/* set link status */
6139 	if ((status & STATUS_LU) == 0) {
6140 		DPRINTF(WM_DEBUG_LINK,
6141 		    ("%s: LINK: checklink -> down\n", device_xname(sc->sc_dev)));
6142 		sc->sc_tbi_linkup = 0;
6143 	} else if (sc->sc_tbi_linkup == 0) {
6144 		DPRINTF(WM_DEBUG_LINK,
6145 		    ("%s: LINK: checklink -> up %s\n", device_xname(sc->sc_dev),
6146 		    (status & STATUS_FD) ? "FDX" : "HDX"));
6147 		sc->sc_tbi_linkup = 1;
6148 	}
6149 
6150 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP)
6151 	    && ((status & STATUS_LU) == 0)) {
6152 		sc->sc_tbi_linkup = 0;
6153 		if (sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg > 100) {
6154 			/* RXCFG storm! */
6155 			DPRINTF(WM_DEBUG_LINK, ("RXCFG storm! (%d)\n",
6156 				sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg));
6157 			wm_init(ifp);
6158 			ifp->if_start(ifp);
6159 		} else if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
6160 			/* If the timer expired, retry autonegotiation */
6161 			if (++sc->sc_tbi_ticks >= sc->sc_tbi_anegticks) {
6162 				DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
6163 				sc->sc_tbi_ticks = 0;
6164 				/*
6165 				 * Reset the link, and let autonegotiation do
6166 				 * its thing
6167 				 */
6168 				sc->sc_ctrl |= CTRL_LRST;
6169 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6170 				CSR_WRITE_FLUSH(sc);
6171 				delay(1000);
6172 				sc->sc_ctrl &= ~CTRL_LRST;
6173 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6174 				CSR_WRITE_FLUSH(sc);
6175 				delay(1000);
6176 				CSR_WRITE(sc, WMREG_TXCW,
6177 				    sc->sc_txcw & ~TXCW_ANE);
6178 				CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
6179 			}
6180 		}
6181 	}
6182 
6183 	wm_tbi_set_linkled(sc);
6184 }
6185 
6186 /*
6187  * wm_gmii_reset:
6188  *
6189  *	Reset the PHY.
6190  */
6191 static void
6192 wm_gmii_reset(struct wm_softc *sc)
6193 {
6194 	uint32_t reg;
6195 	int rv;
6196 
6197 	/* get phy semaphore */
6198 	switch (sc->sc_type) {
6199 	case WM_T_82571:
6200 	case WM_T_82572:
6201 	case WM_T_82573:
6202 	case WM_T_82574:
6203 	case WM_T_82583:
6204 		 /* XXX should get sw semaphore, too */
6205 		rv = wm_get_swsm_semaphore(sc);
6206 		break;
6207 	case WM_T_82575:
6208 	case WM_T_82576:
6209 	case WM_T_82580:
6210 	case WM_T_82580ER:
6211 	case WM_T_I350:
6212 	case WM_T_I354:
6213 	case WM_T_I210:
6214 	case WM_T_I211:
6215 	case WM_T_80003:
6216 		rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6217 		break;
6218 	case WM_T_ICH8:
6219 	case WM_T_ICH9:
6220 	case WM_T_ICH10:
6221 	case WM_T_PCH:
6222 	case WM_T_PCH2:
6223 	case WM_T_PCH_LPT:
6224 		rv = wm_get_swfwhw_semaphore(sc);
6225 		break;
6226 	default:
6227 		/* nothing to do*/
6228 		rv = 0;
6229 		break;
6230 	}
6231 	if (rv != 0) {
6232 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6233 		    __func__);
6234 		return;
6235 	}
6236 
6237 	switch (sc->sc_type) {
6238 	case WM_T_82542_2_0:
6239 	case WM_T_82542_2_1:
6240 		/* null */
6241 		break;
6242 	case WM_T_82543:
6243 		/*
6244 		 * With 82543, we need to force speed and duplex on the MAC
6245 		 * equal to what the PHY speed and duplex configuration is.
6246 		 * In addition, we need to perform a hardware reset on the PHY
6247 		 * to take it out of reset.
6248 		 */
6249 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
6250 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6251 
6252 		/* The PHY reset pin is active-low. */
6253 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
6254 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
6255 		    CTRL_EXT_SWDPIN(4));
6256 		reg |= CTRL_EXT_SWDPIO(4);
6257 
6258 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
6259 		CSR_WRITE_FLUSH(sc);
6260 		delay(10*1000);
6261 
6262 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
6263 		CSR_WRITE_FLUSH(sc);
6264 		delay(150);
6265 #if 0
6266 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
6267 #endif
6268 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
6269 		break;
6270 	case WM_T_82544:	/* reset 10000us */
6271 	case WM_T_82540:
6272 	case WM_T_82545:
6273 	case WM_T_82545_3:
6274 	case WM_T_82546:
6275 	case WM_T_82546_3:
6276 	case WM_T_82541:
6277 	case WM_T_82541_2:
6278 	case WM_T_82547:
6279 	case WM_T_82547_2:
6280 	case WM_T_82571:	/* reset 100us */
6281 	case WM_T_82572:
6282 	case WM_T_82573:
6283 	case WM_T_82574:
6284 	case WM_T_82575:
6285 	case WM_T_82576:
6286 	case WM_T_82580:
6287 	case WM_T_82580ER:
6288 	case WM_T_I350:
6289 	case WM_T_I354:
6290 	case WM_T_I210:
6291 	case WM_T_I211:
6292 	case WM_T_82583:
6293 	case WM_T_80003:
6294 		/* generic reset */
6295 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
6296 		CSR_WRITE_FLUSH(sc);
6297 		delay(20000);
6298 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6299 		CSR_WRITE_FLUSH(sc);
6300 		delay(20000);
6301 
6302 		if ((sc->sc_type == WM_T_82541)
6303 		    || (sc->sc_type == WM_T_82541_2)
6304 		    || (sc->sc_type == WM_T_82547)
6305 		    || (sc->sc_type == WM_T_82547_2)) {
6306 			/* workaround for igp are done in igp_reset() */
6307 			/* XXX add code to set LED after phy reset */
6308 		}
6309 		break;
6310 	case WM_T_ICH8:
6311 	case WM_T_ICH9:
6312 	case WM_T_ICH10:
6313 	case WM_T_PCH:
6314 	case WM_T_PCH2:
6315 	case WM_T_PCH_LPT:
6316 		/* generic reset */
6317 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
6318 		CSR_WRITE_FLUSH(sc);
6319 		delay(100);
6320 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6321 		CSR_WRITE_FLUSH(sc);
6322 		delay(150);
6323 		break;
6324 	default:
6325 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
6326 		    __func__);
6327 		break;
6328 	}
6329 
6330 	/* release PHY semaphore */
6331 	switch (sc->sc_type) {
6332 	case WM_T_82571:
6333 	case WM_T_82572:
6334 	case WM_T_82573:
6335 	case WM_T_82574:
6336 	case WM_T_82583:
6337 		 /* XXX should put sw semaphore, too */
6338 		wm_put_swsm_semaphore(sc);
6339 		break;
6340 	case WM_T_82575:
6341 	case WM_T_82576:
6342 	case WM_T_82580:
6343 	case WM_T_82580ER:
6344 	case WM_T_I350:
6345 	case WM_T_I354:
6346 	case WM_T_I210:
6347 	case WM_T_I211:
6348 	case WM_T_80003:
6349 		wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6350 		break;
6351 	case WM_T_ICH8:
6352 	case WM_T_ICH9:
6353 	case WM_T_ICH10:
6354 	case WM_T_PCH:
6355 	case WM_T_PCH2:
6356 	case WM_T_PCH_LPT:
6357 		wm_put_swfwhw_semaphore(sc);
6358 		break;
6359 	default:
6360 		/* nothing to do*/
6361 		rv = 0;
6362 		break;
6363 	}
6364 
6365 	/* get_cfg_done */
6366 	wm_get_cfg_done(sc);
6367 
6368 	/* extra setup */
6369 	switch (sc->sc_type) {
6370 	case WM_T_82542_2_0:
6371 	case WM_T_82542_2_1:
6372 	case WM_T_82543:
6373 	case WM_T_82544:
6374 	case WM_T_82540:
6375 	case WM_T_82545:
6376 	case WM_T_82545_3:
6377 	case WM_T_82546:
6378 	case WM_T_82546_3:
6379 	case WM_T_82541_2:
6380 	case WM_T_82547_2:
6381 	case WM_T_82571:
6382 	case WM_T_82572:
6383 	case WM_T_82573:
6384 	case WM_T_82574:
6385 	case WM_T_82575:
6386 	case WM_T_82576:
6387 	case WM_T_82580:
6388 	case WM_T_82580ER:
6389 	case WM_T_I350:
6390 	case WM_T_I354:
6391 	case WM_T_I210:
6392 	case WM_T_I211:
6393 	case WM_T_82583:
6394 	case WM_T_80003:
6395 		/* null */
6396 		break;
6397 	case WM_T_82541:
6398 	case WM_T_82547:
6399 		/* XXX Configure actively LED after PHY reset */
6400 		break;
6401 	case WM_T_ICH8:
6402 	case WM_T_ICH9:
6403 	case WM_T_ICH10:
6404 	case WM_T_PCH:
6405 	case WM_T_PCH2:
6406 	case WM_T_PCH_LPT:
6407 		/* Allow time for h/w to get to a quiescent state afer reset */
6408 		delay(10*1000);
6409 
6410 		if (sc->sc_type == WM_T_PCH)
6411 			wm_hv_phy_workaround_ich8lan(sc);
6412 
6413 		if (sc->sc_type == WM_T_PCH2)
6414 			wm_lv_phy_workaround_ich8lan(sc);
6415 
6416 		if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
6417 			/*
6418 			 * dummy read to clear the phy wakeup bit after lcd
6419 			 * reset
6420 			 */
6421 			reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
6422 		}
6423 
6424 		/*
6425 		 * XXX Configure the LCD with th extended configuration region
6426 		 * in NVM
6427 		 */
6428 
6429 		/* Configure the LCD with the OEM bits in NVM */
6430 		if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
6431 		    || (sc->sc_type == WM_T_PCH_LPT)) {
6432 			/*
6433 			 * Disable LPLU.
6434 			 * XXX It seems that 82567 has LPLU, too.
6435 			 */
6436 			reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
6437 			reg &= ~(HV_OEM_BITS_A1KDIS| HV_OEM_BITS_LPLU);
6438 			reg |= HV_OEM_BITS_ANEGNOW;
6439 			wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
6440 		}
6441 		break;
6442 	default:
6443 		panic("%s: unknown type\n", __func__);
6444 		break;
6445 	}
6446 }
6447 
6448 /*
6449  * wm_get_phy_id_82575:
6450  *
6451  * Return PHY ID. Return -1 if it failed.
6452  */
6453 static int
6454 wm_get_phy_id_82575(struct wm_softc *sc)
6455 {
6456 	uint32_t reg;
6457 	int phyid = -1;
6458 
6459 	/* XXX */
6460 	if ((sc->sc_flags & WM_F_SGMII) == 0)
6461 		return -1;
6462 
6463 	if (wm_sgmii_uses_mdio(sc)) {
6464 		switch (sc->sc_type) {
6465 		case WM_T_82575:
6466 		case WM_T_82576:
6467 			reg = CSR_READ(sc, WMREG_MDIC);
6468 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
6469 			break;
6470 		case WM_T_82580:
6471 		case WM_T_I350:
6472 		case WM_T_I354:
6473 		case WM_T_I210:
6474 		case WM_T_I211:
6475 			reg = CSR_READ(sc, WMREG_MDICNFG);
6476 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
6477 			break;
6478 		default:
6479 			return -1;
6480 		}
6481 	}
6482 
6483 	return phyid;
6484 }
6485 
6486 
6487 /*
6488  * wm_gmii_mediainit:
6489  *
6490  *	Initialize media for use on 1000BASE-T devices.
6491  */
6492 static void
6493 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
6494 {
6495 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6496 	struct mii_data *mii = &sc->sc_mii;
6497 
6498 	/* We have MII. */
6499 	sc->sc_flags |= WM_F_HAS_MII;
6500 
6501 	if (sc->sc_type == WM_T_80003)
6502 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
6503 	else
6504 		sc->sc_tipg = TIPG_1000T_DFLT;
6505 
6506 	/*
6507 	 * Let the chip set speed/duplex on its own based on
6508 	 * signals from the PHY.
6509 	 * XXXbouyer - I'm not sure this is right for the 80003,
6510 	 * the em driver only sets CTRL_SLU here - but it seems to work.
6511 	 */
6512 	sc->sc_ctrl |= CTRL_SLU;
6513 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6514 
6515 	/* Initialize our media structures and probe the GMII. */
6516 	mii->mii_ifp = ifp;
6517 
6518 	/*
6519 	 * Determine the PHY access method.
6520 	 *
6521 	 *  For SGMII, use SGMII specific method.
6522 	 *
6523 	 *  For some devices, we can determine the PHY access method
6524 	 * from sc_type.
6525 	 *
6526 	 *  For ICH8 variants, it's difficult to detemine the PHY access
6527 	 * method by sc_type, so use the PCI product ID for some devices.
6528 	 * For other ICH8 variants, try to use igp's method. If the PHY
6529 	 * can't detect, then use bm's method.
6530 	 */
6531 	switch (prodid) {
6532 	case PCI_PRODUCT_INTEL_PCH_M_LM:
6533 	case PCI_PRODUCT_INTEL_PCH_M_LC:
6534 		/* 82577 */
6535 		sc->sc_phytype = WMPHY_82577;
6536 		mii->mii_readreg = wm_gmii_hv_readreg;
6537 		mii->mii_writereg = wm_gmii_hv_writereg;
6538 		break;
6539 	case PCI_PRODUCT_INTEL_PCH_D_DM:
6540 	case PCI_PRODUCT_INTEL_PCH_D_DC:
6541 		/* 82578 */
6542 		sc->sc_phytype = WMPHY_82578;
6543 		mii->mii_readreg = wm_gmii_hv_readreg;
6544 		mii->mii_writereg = wm_gmii_hv_writereg;
6545 		break;
6546 	case PCI_PRODUCT_INTEL_PCH2_LV_LM:
6547 	case PCI_PRODUCT_INTEL_PCH2_LV_V:
6548 		/* 82579 */
6549 		sc->sc_phytype = WMPHY_82579;
6550 		mii->mii_readreg = wm_gmii_hv_readreg;
6551 		mii->mii_writereg = wm_gmii_hv_writereg;
6552 		break;
6553 	case PCI_PRODUCT_INTEL_I217_LM:
6554 	case PCI_PRODUCT_INTEL_I217_V:
6555 	case PCI_PRODUCT_INTEL_I218_LM:
6556 	case PCI_PRODUCT_INTEL_I218_V:
6557 		/* I21[78] */
6558 		mii->mii_readreg = wm_gmii_hv_readreg;
6559 		mii->mii_writereg = wm_gmii_hv_writereg;
6560 		break;
6561 	case PCI_PRODUCT_INTEL_82801I_BM:
6562 	case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
6563 	case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
6564 	case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
6565 	case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
6566 	case PCI_PRODUCT_INTEL_82801J_R_BM_V:
6567 		/* 82567 */
6568 		sc->sc_phytype = WMPHY_BM;
6569 		mii->mii_readreg = wm_gmii_bm_readreg;
6570 		mii->mii_writereg = wm_gmii_bm_writereg;
6571 		break;
6572 	default:
6573 		if (((sc->sc_flags & WM_F_SGMII) != 0)
6574 		    && !wm_sgmii_uses_mdio(sc)){
6575 			mii->mii_readreg = wm_sgmii_readreg;
6576 			mii->mii_writereg = wm_sgmii_writereg;
6577 		} else if (sc->sc_type >= WM_T_80003) {
6578 			mii->mii_readreg = wm_gmii_i80003_readreg;
6579 			mii->mii_writereg = wm_gmii_i80003_writereg;
6580 		} else if (sc->sc_type >= WM_T_I210) {
6581 			mii->mii_readreg = wm_gmii_i82544_readreg;
6582 			mii->mii_writereg = wm_gmii_i82544_writereg;
6583 		} else if (sc->sc_type >= WM_T_82580) {
6584 			sc->sc_phytype = WMPHY_82580;
6585 			mii->mii_readreg = wm_gmii_82580_readreg;
6586 			mii->mii_writereg = wm_gmii_82580_writereg;
6587 		} else if (sc->sc_type >= WM_T_82544) {
6588 			mii->mii_readreg = wm_gmii_i82544_readreg;
6589 			mii->mii_writereg = wm_gmii_i82544_writereg;
6590 		} else {
6591 			mii->mii_readreg = wm_gmii_i82543_readreg;
6592 			mii->mii_writereg = wm_gmii_i82543_writereg;
6593 		}
6594 		break;
6595 	}
6596 	mii->mii_statchg = wm_gmii_statchg;
6597 
6598 	wm_gmii_reset(sc);
6599 
6600 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
6601 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
6602 	    wm_gmii_mediastatus);
6603 
6604 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
6605 	    || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
6606 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
6607 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
6608 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
6609 			/* Attach only one port */
6610 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
6611 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
6612 		} else {
6613 			int i, id;
6614 			uint32_t ctrl_ext;
6615 
6616 			id = wm_get_phy_id_82575(sc);
6617 			if (id != -1) {
6618 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
6619 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
6620 			}
6621 			if ((id == -1)
6622 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
6623 				/* Power on sgmii phy if it is disabled */
6624 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
6625 				CSR_WRITE(sc, WMREG_CTRL_EXT,
6626 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
6627 				CSR_WRITE_FLUSH(sc);
6628 				delay(300*1000); /* XXX too long */
6629 
6630 				/* from 1 to 8 */
6631 				for (i = 1; i < 8; i++)
6632 					mii_attach(sc->sc_dev, &sc->sc_mii,
6633 					    0xffffffff, i, MII_OFFSET_ANY,
6634 					    MIIF_DOPAUSE);
6635 
6636 				/* restore previous sfp cage power state */
6637 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
6638 			}
6639 		}
6640 	} else {
6641 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6642 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
6643 	}
6644 
6645 	/*
6646 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
6647 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
6648 	 */
6649 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
6650 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
6651 		wm_set_mdio_slow_mode_hv(sc);
6652 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6653 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
6654 	}
6655 
6656 	/*
6657 	 * (For ICH8 variants)
6658 	 * If PHY detection failed, use BM's r/w function and retry.
6659 	 */
6660 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
6661 		/* if failed, retry with *_bm_* */
6662 		mii->mii_readreg = wm_gmii_bm_readreg;
6663 		mii->mii_writereg = wm_gmii_bm_writereg;
6664 
6665 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6666 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
6667 	}
6668 
6669 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
6670 		/* Any PHY wasn't find */
6671 		ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
6672 		ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_NONE);
6673 		sc->sc_phytype = WMPHY_NONE;
6674 	} else {
6675 		/*
6676 		 * PHY Found!
6677 		 * Check PHY type.
6678 		 */
6679 		uint32_t model;
6680 		struct mii_softc *child;
6681 
6682 		child = LIST_FIRST(&mii->mii_phys);
6683 		if (device_is_a(child->mii_dev, "igphy")) {
6684 			struct igphy_softc *isc = (struct igphy_softc *)child;
6685 
6686 			model = isc->sc_mii.mii_mpd_model;
6687 			if (model == MII_MODEL_yyINTEL_I82566)
6688 				sc->sc_phytype = WMPHY_IGP_3;
6689 		}
6690 
6691 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
6692 	}
6693 }
6694 
6695 /*
6696  * wm_gmii_mediastatus:	[ifmedia interface function]
6697  *
6698  *	Get the current interface media status on a 1000BASE-T device.
6699  */
6700 static void
6701 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
6702 {
6703 	struct wm_softc *sc = ifp->if_softc;
6704 
6705 	ether_mediastatus(ifp, ifmr);
6706 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
6707 	    | sc->sc_flowflags;
6708 }
6709 
6710 /*
6711  * wm_gmii_mediachange:	[ifmedia interface function]
6712  *
6713  *	Set hardware to newly-selected media on a 1000BASE-T device.
6714  */
6715 static int
6716 wm_gmii_mediachange(struct ifnet *ifp)
6717 {
6718 	struct wm_softc *sc = ifp->if_softc;
6719 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
6720 	int rc;
6721 
6722 	if ((ifp->if_flags & IFF_UP) == 0)
6723 		return 0;
6724 
6725 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
6726 	sc->sc_ctrl |= CTRL_SLU;
6727 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
6728 	    || (sc->sc_type > WM_T_82543)) {
6729 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
6730 	} else {
6731 		sc->sc_ctrl &= ~CTRL_ASDE;
6732 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
6733 		if (ife->ifm_media & IFM_FDX)
6734 			sc->sc_ctrl |= CTRL_FD;
6735 		switch (IFM_SUBTYPE(ife->ifm_media)) {
6736 		case IFM_10_T:
6737 			sc->sc_ctrl |= CTRL_SPEED_10;
6738 			break;
6739 		case IFM_100_TX:
6740 			sc->sc_ctrl |= CTRL_SPEED_100;
6741 			break;
6742 		case IFM_1000_T:
6743 			sc->sc_ctrl |= CTRL_SPEED_1000;
6744 			break;
6745 		default:
6746 			panic("wm_gmii_mediachange: bad media 0x%x",
6747 			    ife->ifm_media);
6748 		}
6749 	}
6750 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6751 	if (sc->sc_type <= WM_T_82543)
6752 		wm_gmii_reset(sc);
6753 
6754 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
6755 		return 0;
6756 	return rc;
6757 }
6758 
6759 #define	MDI_IO		CTRL_SWDPIN(2)
6760 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
6761 #define	MDI_CLK		CTRL_SWDPIN(3)
6762 
6763 static void
6764 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
6765 {
6766 	uint32_t i, v;
6767 
6768 	v = CSR_READ(sc, WMREG_CTRL);
6769 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
6770 	v |= MDI_DIR | CTRL_SWDPIO(3);
6771 
6772 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
6773 		if (data & i)
6774 			v |= MDI_IO;
6775 		else
6776 			v &= ~MDI_IO;
6777 		CSR_WRITE(sc, WMREG_CTRL, v);
6778 		CSR_WRITE_FLUSH(sc);
6779 		delay(10);
6780 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6781 		CSR_WRITE_FLUSH(sc);
6782 		delay(10);
6783 		CSR_WRITE(sc, WMREG_CTRL, v);
6784 		CSR_WRITE_FLUSH(sc);
6785 		delay(10);
6786 	}
6787 }
6788 
6789 static uint32_t
6790 i82543_mii_recvbits(struct wm_softc *sc)
6791 {
6792 	uint32_t v, i, data = 0;
6793 
6794 	v = CSR_READ(sc, WMREG_CTRL);
6795 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
6796 	v |= CTRL_SWDPIO(3);
6797 
6798 	CSR_WRITE(sc, WMREG_CTRL, v);
6799 	CSR_WRITE_FLUSH(sc);
6800 	delay(10);
6801 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6802 	CSR_WRITE_FLUSH(sc);
6803 	delay(10);
6804 	CSR_WRITE(sc, WMREG_CTRL, v);
6805 	CSR_WRITE_FLUSH(sc);
6806 	delay(10);
6807 
6808 	for (i = 0; i < 16; i++) {
6809 		data <<= 1;
6810 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6811 		CSR_WRITE_FLUSH(sc);
6812 		delay(10);
6813 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
6814 			data |= 1;
6815 		CSR_WRITE(sc, WMREG_CTRL, v);
6816 		CSR_WRITE_FLUSH(sc);
6817 		delay(10);
6818 	}
6819 
6820 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6821 	CSR_WRITE_FLUSH(sc);
6822 	delay(10);
6823 	CSR_WRITE(sc, WMREG_CTRL, v);
6824 	CSR_WRITE_FLUSH(sc);
6825 	delay(10);
6826 
6827 	return data;
6828 }
6829 
6830 #undef MDI_IO
6831 #undef MDI_DIR
6832 #undef MDI_CLK
6833 
6834 /*
6835  * wm_gmii_i82543_readreg:	[mii interface function]
6836  *
6837  *	Read a PHY register on the GMII (i82543 version).
6838  */
6839 static int
6840 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
6841 {
6842 	struct wm_softc *sc = device_private(self);
6843 	int rv;
6844 
6845 	i82543_mii_sendbits(sc, 0xffffffffU, 32);
6846 	i82543_mii_sendbits(sc, reg | (phy << 5) |
6847 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
6848 	rv = i82543_mii_recvbits(sc) & 0xffff;
6849 
6850 	DPRINTF(WM_DEBUG_GMII,
6851 	    ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
6852 	    device_xname(sc->sc_dev), phy, reg, rv));
6853 
6854 	return rv;
6855 }
6856 
6857 /*
6858  * wm_gmii_i82543_writereg:	[mii interface function]
6859  *
6860  *	Write a PHY register on the GMII (i82543 version).
6861  */
6862 static void
6863 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
6864 {
6865 	struct wm_softc *sc = device_private(self);
6866 
6867 	i82543_mii_sendbits(sc, 0xffffffffU, 32);
6868 	i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
6869 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
6870 	    (MII_COMMAND_START << 30), 32);
6871 }
6872 
6873 /*
6874  * wm_gmii_i82544_readreg:	[mii interface function]
6875  *
6876  *	Read a PHY register on the GMII.
6877  */
6878 static int
6879 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
6880 {
6881 	struct wm_softc *sc = device_private(self);
6882 	uint32_t mdic = 0;
6883 	int i, rv;
6884 
6885 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
6886 	    MDIC_REGADD(reg));
6887 
6888 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
6889 		mdic = CSR_READ(sc, WMREG_MDIC);
6890 		if (mdic & MDIC_READY)
6891 			break;
6892 		delay(50);
6893 	}
6894 
6895 	if ((mdic & MDIC_READY) == 0) {
6896 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
6897 		    device_xname(sc->sc_dev), phy, reg);
6898 		rv = 0;
6899 	} else if (mdic & MDIC_E) {
6900 #if 0 /* This is normal if no PHY is present. */
6901 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
6902 		    device_xname(sc->sc_dev), phy, reg);
6903 #endif
6904 		rv = 0;
6905 	} else {
6906 		rv = MDIC_DATA(mdic);
6907 		if (rv == 0xffff)
6908 			rv = 0;
6909 	}
6910 
6911 	return rv;
6912 }
6913 
6914 /*
6915  * wm_gmii_i82544_writereg:	[mii interface function]
6916  *
6917  *	Write a PHY register on the GMII.
6918  */
6919 static void
6920 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
6921 {
6922 	struct wm_softc *sc = device_private(self);
6923 	uint32_t mdic = 0;
6924 	int i;
6925 
6926 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
6927 	    MDIC_REGADD(reg) | MDIC_DATA(val));
6928 
6929 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
6930 		mdic = CSR_READ(sc, WMREG_MDIC);
6931 		if (mdic & MDIC_READY)
6932 			break;
6933 		delay(50);
6934 	}
6935 
6936 	if ((mdic & MDIC_READY) == 0)
6937 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
6938 		    device_xname(sc->sc_dev), phy, reg);
6939 	else if (mdic & MDIC_E)
6940 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
6941 		    device_xname(sc->sc_dev), phy, reg);
6942 }
6943 
6944 /*
6945  * wm_gmii_i80003_readreg:	[mii interface function]
6946  *
6947  *	Read a PHY register on the kumeran
6948  * This could be handled by the PHY layer if we didn't have to lock the
6949  * ressource ...
6950  */
6951 static int
6952 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
6953 {
6954 	struct wm_softc *sc = device_private(self);
6955 	int sem;
6956 	int rv;
6957 
6958 	if (phy != 1) /* only one PHY on kumeran bus */
6959 		return 0;
6960 
6961 	sem = swfwphysem[sc->sc_funcid];
6962 	if (wm_get_swfw_semaphore(sc, sem)) {
6963 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6964 		    __func__);
6965 		return 0;
6966 	}
6967 
6968 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
6969 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6970 		    reg >> GG82563_PAGE_SHIFT);
6971 	} else {
6972 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
6973 		    reg >> GG82563_PAGE_SHIFT);
6974 	}
6975 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
6976 	delay(200);
6977 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
6978 	delay(200);
6979 
6980 	wm_put_swfw_semaphore(sc, sem);
6981 	return rv;
6982 }
6983 
6984 /*
6985  * wm_gmii_i80003_writereg:	[mii interface function]
6986  *
6987  *	Write a PHY register on the kumeran.
6988  * This could be handled by the PHY layer if we didn't have to lock the
6989  * ressource ...
6990  */
6991 static void
6992 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
6993 {
6994 	struct wm_softc *sc = device_private(self);
6995 	int sem;
6996 
6997 	if (phy != 1) /* only one PHY on kumeran bus */
6998 		return;
6999 
7000 	sem = swfwphysem[sc->sc_funcid];
7001 	if (wm_get_swfw_semaphore(sc, sem)) {
7002 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7003 		    __func__);
7004 		return;
7005 	}
7006 
7007 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
7008 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
7009 		    reg >> GG82563_PAGE_SHIFT);
7010 	} else {
7011 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
7012 		    reg >> GG82563_PAGE_SHIFT);
7013 	}
7014 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
7015 	delay(200);
7016 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
7017 	delay(200);
7018 
7019 	wm_put_swfw_semaphore(sc, sem);
7020 }
7021 
7022 /*
7023  * wm_gmii_bm_readreg:	[mii interface function]
7024  *
7025  *	Read a PHY register on the kumeran
7026  * This could be handled by the PHY layer if we didn't have to lock the
7027  * ressource ...
7028  */
7029 static int
7030 wm_gmii_bm_readreg(device_t self, int phy, int reg)
7031 {
7032 	struct wm_softc *sc = device_private(self);
7033 	int sem;
7034 	int rv;
7035 
7036 	sem = swfwphysem[sc->sc_funcid];
7037 	if (wm_get_swfw_semaphore(sc, sem)) {
7038 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7039 		    __func__);
7040 		return 0;
7041 	}
7042 
7043 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
7044 		if (phy == 1)
7045 			wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
7046 			    reg);
7047 		else
7048 			wm_gmii_i82544_writereg(self, phy,
7049 			    GG82563_PHY_PAGE_SELECT,
7050 			    reg >> GG82563_PAGE_SHIFT);
7051 	}
7052 
7053 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
7054 	wm_put_swfw_semaphore(sc, sem);
7055 	return rv;
7056 }
7057 
7058 /*
7059  * wm_gmii_bm_writereg:	[mii interface function]
7060  *
7061  *	Write a PHY register on the kumeran.
7062  * This could be handled by the PHY layer if we didn't have to lock the
7063  * ressource ...
7064  */
7065 static void
7066 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
7067 {
7068 	struct wm_softc *sc = device_private(self);
7069 	int sem;
7070 
7071 	sem = swfwphysem[sc->sc_funcid];
7072 	if (wm_get_swfw_semaphore(sc, sem)) {
7073 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7074 		    __func__);
7075 		return;
7076 	}
7077 
7078 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
7079 		if (phy == 1)
7080 			wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
7081 			    reg);
7082 		else
7083 			wm_gmii_i82544_writereg(self, phy,
7084 			    GG82563_PHY_PAGE_SELECT,
7085 			    reg >> GG82563_PAGE_SHIFT);
7086 	}
7087 
7088 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
7089 	wm_put_swfw_semaphore(sc, sem);
7090 }
7091 
7092 static void
7093 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
7094 {
7095 	struct wm_softc *sc = device_private(self);
7096 	uint16_t regnum = BM_PHY_REG_NUM(offset);
7097 	uint16_t wuce;
7098 
7099 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
7100 	if (sc->sc_type == WM_T_PCH) {
7101 		/* XXX e1000 driver do nothing... why? */
7102 	}
7103 
7104 	/* Set page 769 */
7105 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7106 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
7107 
7108 	wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
7109 
7110 	wuce &= ~BM_WUC_HOST_WU_BIT;
7111 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
7112 	    wuce | BM_WUC_ENABLE_BIT);
7113 
7114 	/* Select page 800 */
7115 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7116 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
7117 
7118 	/* Write page 800 */
7119 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
7120 
7121 	if (rd)
7122 		*val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
7123 	else
7124 		wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
7125 
7126 	/* Set page 769 */
7127 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7128 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
7129 
7130 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
7131 }
7132 
7133 /*
7134  * wm_gmii_hv_readreg:	[mii interface function]
7135  *
7136  *	Read a PHY register on the kumeran
7137  * This could be handled by the PHY layer if we didn't have to lock the
7138  * ressource ...
7139  */
7140 static int
7141 wm_gmii_hv_readreg(device_t self, int phy, int reg)
7142 {
7143 	struct wm_softc *sc = device_private(self);
7144 	uint16_t page = BM_PHY_REG_PAGE(reg);
7145 	uint16_t regnum = BM_PHY_REG_NUM(reg);
7146 	uint16_t val;
7147 	int rv;
7148 
7149 	if (wm_get_swfwhw_semaphore(sc)) {
7150 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7151 		    __func__);
7152 		return 0;
7153 	}
7154 
7155 	/* XXX Workaround failure in MDIO access while cable is disconnected */
7156 	if (sc->sc_phytype == WMPHY_82577) {
7157 		/* XXX must write */
7158 	}
7159 
7160 	/* Page 800 works differently than the rest so it has its own func */
7161 	if (page == BM_WUC_PAGE) {
7162 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
7163 		return val;
7164 	}
7165 
7166 	/*
7167 	 * Lower than page 768 works differently than the rest so it has its
7168 	 * own func
7169 	 */
7170 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
7171 		printf("gmii_hv_readreg!!!\n");
7172 		return 0;
7173 	}
7174 
7175 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
7176 		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7177 		    page << BME1000_PAGE_SHIFT);
7178 	}
7179 
7180 	rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
7181 	wm_put_swfwhw_semaphore(sc);
7182 	return rv;
7183 }
7184 
7185 /*
7186  * wm_gmii_hv_writereg:	[mii interface function]
7187  *
7188  *	Write a PHY register on the kumeran.
7189  * This could be handled by the PHY layer if we didn't have to lock the
7190  * ressource ...
7191  */
7192 static void
7193 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
7194 {
7195 	struct wm_softc *sc = device_private(self);
7196 	uint16_t page = BM_PHY_REG_PAGE(reg);
7197 	uint16_t regnum = BM_PHY_REG_NUM(reg);
7198 
7199 	if (wm_get_swfwhw_semaphore(sc)) {
7200 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7201 		    __func__);
7202 		return;
7203 	}
7204 
7205 	/* XXX Workaround failure in MDIO access while cable is disconnected */
7206 
7207 	/* Page 800 works differently than the rest so it has its own func */
7208 	if (page == BM_WUC_PAGE) {
7209 		uint16_t tmp;
7210 
7211 		tmp = val;
7212 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
7213 		return;
7214 	}
7215 
7216 	/*
7217 	 * Lower than page 768 works differently than the rest so it has its
7218 	 * own func
7219 	 */
7220 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
7221 		printf("gmii_hv_writereg!!!\n");
7222 		return;
7223 	}
7224 
7225 	/*
7226 	 * XXX Workaround MDIO accesses being disabled after entering IEEE
7227 	 * Power Down (whenever bit 11 of the PHY control register is set)
7228 	 */
7229 
7230 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
7231 		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7232 		    page << BME1000_PAGE_SHIFT);
7233 	}
7234 
7235 	wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
7236 	wm_put_swfwhw_semaphore(sc);
7237 }
7238 
7239 /*
7240  * wm_sgmii_uses_mdio
7241  *
7242  * Check whether the transaction is to the internal PHY or the external
7243  * MDIO interface. Return true if it's MDIO.
7244  */
7245 static bool
7246 wm_sgmii_uses_mdio(struct wm_softc *sc)
7247 {
7248 	uint32_t reg;
7249 	bool ismdio = false;
7250 
7251 	switch (sc->sc_type) {
7252 	case WM_T_82575:
7253 	case WM_T_82576:
7254 		reg = CSR_READ(sc, WMREG_MDIC);
7255 		ismdio = ((reg & MDIC_DEST) != 0);
7256 		break;
7257 	case WM_T_82580:
7258 	case WM_T_82580ER:
7259 	case WM_T_I350:
7260 	case WM_T_I354:
7261 	case WM_T_I210:
7262 	case WM_T_I211:
7263 		reg = CSR_READ(sc, WMREG_MDICNFG);
7264 		ismdio = ((reg & MDICNFG_DEST) != 0);
7265 		break;
7266 	default:
7267 		break;
7268 	}
7269 
7270 	return ismdio;
7271 }
7272 
7273 /*
7274  * wm_sgmii_readreg:	[mii interface function]
7275  *
7276  *	Read a PHY register on the SGMII
7277  * This could be handled by the PHY layer if we didn't have to lock the
7278  * ressource ...
7279  */
7280 static int
7281 wm_sgmii_readreg(device_t self, int phy, int reg)
7282 {
7283 	struct wm_softc *sc = device_private(self);
7284 	uint32_t i2ccmd;
7285 	int i, rv;
7286 
7287 	if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
7288 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7289 		    __func__);
7290 		return 0;
7291 	}
7292 
7293 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
7294 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
7295 	    | I2CCMD_OPCODE_READ;
7296 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
7297 
7298 	/* Poll the ready bit */
7299 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
7300 		delay(50);
7301 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
7302 		if (i2ccmd & I2CCMD_READY)
7303 			break;
7304 	}
7305 	if ((i2ccmd & I2CCMD_READY) == 0)
7306 		aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
7307 	if ((i2ccmd & I2CCMD_ERROR) != 0)
7308 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
7309 
7310 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
7311 
7312 	wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
7313 	return rv;
7314 }
7315 
7316 /*
7317  * wm_sgmii_writereg:	[mii interface function]
7318  *
7319  *	Write a PHY register on the SGMII.
7320  * This could be handled by the PHY layer if we didn't have to lock the
7321  * ressource ...
7322  */
7323 static void
7324 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
7325 {
7326 	struct wm_softc *sc = device_private(self);
7327 	uint32_t i2ccmd;
7328 	int i;
7329 
7330 	if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
7331 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7332 		    __func__);
7333 		return;
7334 	}
7335 
7336 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
7337 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
7338 	    | I2CCMD_OPCODE_WRITE;
7339 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
7340 
7341 	/* Poll the ready bit */
7342 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
7343 		delay(50);
7344 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
7345 		if (i2ccmd & I2CCMD_READY)
7346 			break;
7347 	}
7348 	if ((i2ccmd & I2CCMD_READY) == 0)
7349 		aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
7350 	if ((i2ccmd & I2CCMD_ERROR) != 0)
7351 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
7352 
7353 	wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
7354 }
7355 
7356 /*
7357  * wm_gmii_82580_readreg:	[mii interface function]
7358  *
7359  *	Read a PHY register on the 82580 and I350.
7360  * This could be handled by the PHY layer if we didn't have to lock the
7361  * ressource ...
7362  */
7363 static int
7364 wm_gmii_82580_readreg(device_t self, int phy, int reg)
7365 {
7366 	struct wm_softc *sc = device_private(self);
7367 	int sem;
7368 	int rv;
7369 
7370 	sem = swfwphysem[sc->sc_funcid];
7371 	if (wm_get_swfw_semaphore(sc, sem)) {
7372 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7373 		    __func__);
7374 		return 0;
7375 	}
7376 
7377 	rv = wm_gmii_i82544_readreg(self, phy, reg);
7378 
7379 	wm_put_swfw_semaphore(sc, sem);
7380 	return rv;
7381 }
7382 
7383 /*
7384  * wm_gmii_82580_writereg:	[mii interface function]
7385  *
7386  *	Write a PHY register on the 82580 and I350.
7387  * This could be handled by the PHY layer if we didn't have to lock the
7388  * ressource ...
7389  */
7390 static void
7391 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
7392 {
7393 	struct wm_softc *sc = device_private(self);
7394 	int sem;
7395 
7396 	sem = swfwphysem[sc->sc_funcid];
7397 	if (wm_get_swfw_semaphore(sc, sem)) {
7398 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7399 		    __func__);
7400 		return;
7401 	}
7402 
7403 	wm_gmii_i82544_writereg(self, phy, reg, val);
7404 
7405 	wm_put_swfw_semaphore(sc, sem);
7406 }
7407 
7408 /*
7409  * wm_gmii_statchg:	[mii interface function]
7410  *
7411  *	Callback from MII layer when media changes.
7412  */
7413 static void
7414 wm_gmii_statchg(struct ifnet *ifp)
7415 {
7416 	struct wm_softc *sc = ifp->if_softc;
7417 	struct mii_data *mii = &sc->sc_mii;
7418 
7419 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
7420 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
7421 	sc->sc_fcrtl &= ~FCRTL_XONE;
7422 
7423 	/*
7424 	 * Get flow control negotiation result.
7425 	 */
7426 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
7427 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
7428 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
7429 		mii->mii_media_active &= ~IFM_ETH_FMASK;
7430 	}
7431 
7432 	if (sc->sc_flowflags & IFM_FLOW) {
7433 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
7434 			sc->sc_ctrl |= CTRL_TFCE;
7435 			sc->sc_fcrtl |= FCRTL_XONE;
7436 		}
7437 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
7438 			sc->sc_ctrl |= CTRL_RFCE;
7439 	}
7440 
7441 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
7442 		DPRINTF(WM_DEBUG_LINK,
7443 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
7444 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
7445 	} else {
7446 		DPRINTF(WM_DEBUG_LINK,
7447 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
7448 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
7449 	}
7450 
7451 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7452 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
7453 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
7454 						 : WMREG_FCRTL, sc->sc_fcrtl);
7455 	if (sc->sc_type == WM_T_80003) {
7456 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
7457 		case IFM_1000_T:
7458 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
7459 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
7460 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
7461 			break;
7462 		default:
7463 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
7464 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
7465 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
7466 			break;
7467 		}
7468 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
7469 	}
7470 }
7471 
7472 /*
7473  * wm_kmrn_readreg:
7474  *
7475  *	Read a kumeran register
7476  */
7477 static int
7478 wm_kmrn_readreg(struct wm_softc *sc, int reg)
7479 {
7480 	int rv;
7481 
7482 	if (sc->sc_flags == WM_F_SWFW_SYNC) {
7483 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
7484 			aprint_error_dev(sc->sc_dev,
7485 			    "%s: failed to get semaphore\n", __func__);
7486 			return 0;
7487 		}
7488 	} else if (sc->sc_flags == WM_F_SWFWHW_SYNC) {
7489 		if (wm_get_swfwhw_semaphore(sc)) {
7490 			aprint_error_dev(sc->sc_dev,
7491 			    "%s: failed to get semaphore\n", __func__);
7492 			return 0;
7493 		}
7494 	}
7495 
7496 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
7497 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
7498 	    KUMCTRLSTA_REN);
7499 	CSR_WRITE_FLUSH(sc);
7500 	delay(2);
7501 
7502 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
7503 
7504 	if (sc->sc_flags == WM_F_SWFW_SYNC)
7505 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
7506 	else if (sc->sc_flags == WM_F_SWFWHW_SYNC)
7507 		wm_put_swfwhw_semaphore(sc);
7508 
7509 	return rv;
7510 }
7511 
7512 /*
7513  * wm_kmrn_writereg:
7514  *
7515  *	Write a kumeran register
7516  */
7517 static void
7518 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
7519 {
7520 
7521 	if (sc->sc_flags == WM_F_SWFW_SYNC) {
7522 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
7523 			aprint_error_dev(sc->sc_dev,
7524 			    "%s: failed to get semaphore\n", __func__);
7525 			return;
7526 		}
7527 	} else if (sc->sc_flags == WM_F_SWFWHW_SYNC) {
7528 		if (wm_get_swfwhw_semaphore(sc)) {
7529 			aprint_error_dev(sc->sc_dev,
7530 			    "%s: failed to get semaphore\n", __func__);
7531 			return;
7532 		}
7533 	}
7534 
7535 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
7536 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
7537 	    (val & KUMCTRLSTA_MASK));
7538 
7539 	if (sc->sc_flags == WM_F_SWFW_SYNC)
7540 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
7541 	else if (sc->sc_flags == WM_F_SWFWHW_SYNC)
7542 		wm_put_swfwhw_semaphore(sc);
7543 }
7544 
7545 static int
7546 wm_is_onboard_nvm_eeprom(struct wm_softc *sc)
7547 {
7548 	uint32_t eecd = 0;
7549 
7550 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
7551 	    || sc->sc_type == WM_T_82583) {
7552 		eecd = CSR_READ(sc, WMREG_EECD);
7553 
7554 		/* Isolate bits 15 & 16 */
7555 		eecd = ((eecd >> 15) & 0x03);
7556 
7557 		/* If both bits are set, device is Flash type */
7558 		if (eecd == 0x03)
7559 			return 0;
7560 	}
7561 	return 1;
7562 }
7563 
7564 static int
7565 wm_get_swsm_semaphore(struct wm_softc *sc)
7566 {
7567 	int32_t timeout;
7568 	uint32_t swsm;
7569 
7570 	/* Get the FW semaphore. */
7571 	timeout = 1000 + 1; /* XXX */
7572 	while (timeout) {
7573 		swsm = CSR_READ(sc, WMREG_SWSM);
7574 		swsm |= SWSM_SWESMBI;
7575 		CSR_WRITE(sc, WMREG_SWSM, swsm);
7576 		/* if we managed to set the bit we got the semaphore. */
7577 		swsm = CSR_READ(sc, WMREG_SWSM);
7578 		if (swsm & SWSM_SWESMBI)
7579 			break;
7580 
7581 		delay(50);
7582 		timeout--;
7583 	}
7584 
7585 	if (timeout == 0) {
7586 		aprint_error_dev(sc->sc_dev, "could not acquire EEPROM GNT\n");
7587 		/* Release semaphores */
7588 		wm_put_swsm_semaphore(sc);
7589 		return 1;
7590 	}
7591 	return 0;
7592 }
7593 
7594 static void
7595 wm_put_swsm_semaphore(struct wm_softc *sc)
7596 {
7597 	uint32_t swsm;
7598 
7599 	swsm = CSR_READ(sc, WMREG_SWSM);
7600 	swsm &= ~(SWSM_SWESMBI);
7601 	CSR_WRITE(sc, WMREG_SWSM, swsm);
7602 }
7603 
7604 static int
7605 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
7606 {
7607 	uint32_t swfw_sync;
7608 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
7609 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
7610 	int timeout = 200;
7611 
7612 	for (timeout = 0; timeout < 200; timeout++) {
7613 		if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
7614 			if (wm_get_swsm_semaphore(sc)) {
7615 				aprint_error_dev(sc->sc_dev,
7616 				    "%s: failed to get semaphore\n",
7617 				    __func__);
7618 				return 1;
7619 			}
7620 		}
7621 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
7622 		if ((swfw_sync & (swmask | fwmask)) == 0) {
7623 			swfw_sync |= swmask;
7624 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
7625 			if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
7626 				wm_put_swsm_semaphore(sc);
7627 			return 0;
7628 		}
7629 		if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
7630 			wm_put_swsm_semaphore(sc);
7631 		delay(5000);
7632 	}
7633 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
7634 	    device_xname(sc->sc_dev), mask, swfw_sync);
7635 	return 1;
7636 }
7637 
7638 static void
7639 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
7640 {
7641 	uint32_t swfw_sync;
7642 
7643 	if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
7644 		while (wm_get_swsm_semaphore(sc) != 0)
7645 			continue;
7646 	}
7647 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
7648 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
7649 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
7650 	if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
7651 		wm_put_swsm_semaphore(sc);
7652 }
7653 
7654 static int
7655 wm_get_swfwhw_semaphore(struct wm_softc *sc)
7656 {
7657 	uint32_t ext_ctrl;
7658 	int timeout = 200;
7659 
7660 	for (timeout = 0; timeout < 200; timeout++) {
7661 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
7662 		ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
7663 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
7664 
7665 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
7666 		if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
7667 			return 0;
7668 		delay(5000);
7669 	}
7670 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
7671 	    device_xname(sc->sc_dev), ext_ctrl);
7672 	return 1;
7673 }
7674 
7675 static void
7676 wm_put_swfwhw_semaphore(struct wm_softc *sc)
7677 {
7678 	uint32_t ext_ctrl;
7679 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
7680 	ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
7681 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
7682 }
7683 
7684 static int
7685 wm_get_hw_semaphore_82573(struct wm_softc *sc)
7686 {
7687 	int i = 0;
7688 	uint32_t reg;
7689 
7690 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
7691 	do {
7692 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
7693 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
7694 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
7695 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
7696 			break;
7697 		delay(2*1000);
7698 		i++;
7699 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
7700 
7701 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
7702 		wm_put_hw_semaphore_82573(sc);
7703 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
7704 		    device_xname(sc->sc_dev));
7705 		return -1;
7706 	}
7707 
7708 	return 0;
7709 }
7710 
7711 static void
7712 wm_put_hw_semaphore_82573(struct wm_softc *sc)
7713 {
7714 	uint32_t reg;
7715 
7716 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
7717 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
7718 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
7719 }
7720 
7721 static int
7722 wm_valid_nvm_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
7723 {
7724 	uint32_t eecd;
7725 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
7726 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
7727 	uint8_t sig_byte = 0;
7728 
7729 	switch (sc->sc_type) {
7730 	case WM_T_ICH8:
7731 	case WM_T_ICH9:
7732 		eecd = CSR_READ(sc, WMREG_EECD);
7733 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
7734 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
7735 			return 0;
7736 		}
7737 		/* FALLTHROUGH */
7738 	default:
7739 		/* Default to 0 */
7740 		*bank = 0;
7741 
7742 		/* Check bank 0 */
7743 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
7744 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
7745 			*bank = 0;
7746 			return 0;
7747 		}
7748 
7749 		/* Check bank 1 */
7750 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
7751 		    &sig_byte);
7752 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
7753 			*bank = 1;
7754 			return 0;
7755 		}
7756 	}
7757 
7758 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
7759 		device_xname(sc->sc_dev)));
7760 	return -1;
7761 }
7762 
7763 /******************************************************************************
7764  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
7765  * register.
7766  *
7767  * sc - Struct containing variables accessed by shared code
7768  * offset - offset of word in the EEPROM to read
7769  * data - word read from the EEPROM
7770  * words - number of words to read
7771  *****************************************************************************/
7772 static int
7773 wm_read_eeprom_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
7774 {
7775 	int32_t  error = 0;
7776 	uint32_t flash_bank = 0;
7777 	uint32_t act_offset = 0;
7778 	uint32_t bank_offset = 0;
7779 	uint16_t word = 0;
7780 	uint16_t i = 0;
7781 
7782 	/* We need to know which is the valid flash bank.  In the event
7783 	 * that we didn't allocate eeprom_shadow_ram, we may not be
7784 	 * managing flash_bank.  So it cannot be trusted and needs
7785 	 * to be updated with each read.
7786 	 */
7787 	error = wm_valid_nvm_bank_detect_ich8lan(sc, &flash_bank);
7788 	if (error) {
7789 		aprint_error_dev(sc->sc_dev, "%s: failed to detect NVM bank\n",
7790 		    __func__);
7791 		flash_bank = 0;
7792 	}
7793 
7794 	/*
7795 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
7796 	 * size
7797 	 */
7798 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
7799 
7800 	error = wm_get_swfwhw_semaphore(sc);
7801 	if (error) {
7802 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7803 		    __func__);
7804 		return error;
7805 	}
7806 
7807 	for (i = 0; i < words; i++) {
7808 		/* The NVM part needs a byte offset, hence * 2 */
7809 		act_offset = bank_offset + ((offset + i) * 2);
7810 		error = wm_read_ich8_word(sc, act_offset, &word);
7811 		if (error) {
7812 			aprint_error_dev(sc->sc_dev,
7813 			    "%s: failed to read NVM\n", __func__);
7814 			break;
7815 		}
7816 		data[i] = word;
7817 	}
7818 
7819 	wm_put_swfwhw_semaphore(sc);
7820 	return error;
7821 }
7822 
7823 /******************************************************************************
7824  * This function does initial flash setup so that a new read/write/erase cycle
7825  * can be started.
7826  *
7827  * sc - The pointer to the hw structure
7828  ****************************************************************************/
7829 static int32_t
7830 wm_ich8_cycle_init(struct wm_softc *sc)
7831 {
7832 	uint16_t hsfsts;
7833 	int32_t error = 1;
7834 	int32_t i     = 0;
7835 
7836 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
7837 
7838 	/* May be check the Flash Des Valid bit in Hw status */
7839 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
7840 		return error;
7841 	}
7842 
7843 	/* Clear FCERR in Hw status by writing 1 */
7844 	/* Clear DAEL in Hw status by writing a 1 */
7845 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
7846 
7847 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
7848 
7849 	/*
7850 	 * Either we should have a hardware SPI cycle in progress bit to check
7851 	 * against, in order to start a new cycle or FDONE bit should be
7852 	 * changed in the hardware so that it is 1 after harware reset, which
7853 	 * can then be used as an indication whether a cycle is in progress or
7854 	 * has been completed .. we should also have some software semaphore
7855 	 * mechanism to guard FDONE or the cycle in progress bit so that two
7856 	 * threads access to those bits can be sequentiallized or a way so that
7857 	 * 2 threads dont start the cycle at the same time
7858 	 */
7859 
7860 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
7861 		/*
7862 		 * There is no cycle running at present, so we can start a
7863 		 * cycle
7864 		 */
7865 
7866 		/* Begin by setting Flash Cycle Done. */
7867 		hsfsts |= HSFSTS_DONE;
7868 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
7869 		error = 0;
7870 	} else {
7871 		/*
7872 		 * otherwise poll for sometime so the current cycle has a
7873 		 * chance to end before giving up.
7874 		 */
7875 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
7876 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
7877 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
7878 				error = 0;
7879 				break;
7880 			}
7881 			delay(1);
7882 		}
7883 		if (error == 0) {
7884 			/*
7885 			 * Successful in waiting for previous cycle to timeout,
7886 			 * now set the Flash Cycle Done.
7887 			 */
7888 			hsfsts |= HSFSTS_DONE;
7889 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
7890 		}
7891 	}
7892 	return error;
7893 }
7894 
7895 /******************************************************************************
7896  * This function starts a flash cycle and waits for its completion
7897  *
7898  * sc - The pointer to the hw structure
7899  ****************************************************************************/
7900 static int32_t
7901 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
7902 {
7903 	uint16_t hsflctl;
7904 	uint16_t hsfsts;
7905 	int32_t error = 1;
7906 	uint32_t i = 0;
7907 
7908 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
7909 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
7910 	hsflctl |= HSFCTL_GO;
7911 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
7912 
7913 	/* wait till FDONE bit is set to 1 */
7914 	do {
7915 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
7916 		if (hsfsts & HSFSTS_DONE)
7917 			break;
7918 		delay(1);
7919 		i++;
7920 	} while (i < timeout);
7921 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
7922 		error = 0;
7923 
7924 	return error;
7925 }
7926 
7927 /******************************************************************************
7928  * Reads a byte or word from the NVM using the ICH8 flash access registers.
7929  *
7930  * sc - The pointer to the hw structure
7931  * index - The index of the byte or word to read.
7932  * size - Size of data to read, 1=byte 2=word
7933  * data - Pointer to the word to store the value read.
7934  *****************************************************************************/
7935 static int32_t
7936 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
7937     uint32_t size, uint16_t* data)
7938 {
7939 	uint16_t hsfsts;
7940 	uint16_t hsflctl;
7941 	uint32_t flash_linear_address;
7942 	uint32_t flash_data = 0;
7943 	int32_t error = 1;
7944 	int32_t count = 0;
7945 
7946 	if (size < 1  || size > 2 || data == 0x0 ||
7947 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
7948 		return error;
7949 
7950 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
7951 	    sc->sc_ich8_flash_base;
7952 
7953 	do {
7954 		delay(1);
7955 		/* Steps */
7956 		error = wm_ich8_cycle_init(sc);
7957 		if (error)
7958 			break;
7959 
7960 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
7961 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
7962 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
7963 		    & HSFCTL_BCOUNT_MASK;
7964 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
7965 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
7966 
7967 		/*
7968 		 * Write the last 24 bits of index into Flash Linear address
7969 		 * field in Flash Address
7970 		 */
7971 		/* TODO: TBD maybe check the index against the size of flash */
7972 
7973 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
7974 
7975 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
7976 
7977 		/*
7978 		 * Check if FCERR is set to 1, if set to 1, clear it and try
7979 		 * the whole sequence a few more times, else read in (shift in)
7980 		 * the Flash Data0, the order is least significant byte first
7981 		 * msb to lsb
7982 		 */
7983 		if (error == 0) {
7984 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
7985 			if (size == 1)
7986 				*data = (uint8_t)(flash_data & 0x000000FF);
7987 			else if (size == 2)
7988 				*data = (uint16_t)(flash_data & 0x0000FFFF);
7989 			break;
7990 		} else {
7991 			/*
7992 			 * If we've gotten here, then things are probably
7993 			 * completely hosed, but if the error condition is
7994 			 * detected, it won't hurt to give it another try...
7995 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
7996 			 */
7997 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
7998 			if (hsfsts & HSFSTS_ERR) {
7999 				/* Repeat for some time before giving up. */
8000 				continue;
8001 			} else if ((hsfsts & HSFSTS_DONE) == 0)
8002 				break;
8003 		}
8004 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
8005 
8006 	return error;
8007 }
8008 
8009 /******************************************************************************
8010  * Reads a single byte from the NVM using the ICH8 flash access registers.
8011  *
8012  * sc - pointer to wm_hw structure
8013  * index - The index of the byte to read.
8014  * data - Pointer to a byte to store the value read.
8015  *****************************************************************************/
8016 static int32_t
8017 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
8018 {
8019 	int32_t status;
8020 	uint16_t word = 0;
8021 
8022 	status = wm_read_ich8_data(sc, index, 1, &word);
8023 	if (status == 0)
8024 		*data = (uint8_t)word;
8025 	else
8026 		*data = 0;
8027 
8028 	return status;
8029 }
8030 
8031 /******************************************************************************
8032  * Reads a word from the NVM using the ICH8 flash access registers.
8033  *
8034  * sc - pointer to wm_hw structure
8035  * index - The starting byte index of the word to read.
8036  * data - Pointer to a word to store the value read.
8037  *****************************************************************************/
8038 static int32_t
8039 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
8040 {
8041 	int32_t status;
8042 
8043 	status = wm_read_ich8_data(sc, index, 2, data);
8044 	return status;
8045 }
8046 
8047 static int
8048 wm_check_mng_mode(struct wm_softc *sc)
8049 {
8050 	int rv;
8051 
8052 	switch (sc->sc_type) {
8053 	case WM_T_ICH8:
8054 	case WM_T_ICH9:
8055 	case WM_T_ICH10:
8056 	case WM_T_PCH:
8057 	case WM_T_PCH2:
8058 	case WM_T_PCH_LPT:
8059 		rv = wm_check_mng_mode_ich8lan(sc);
8060 		break;
8061 	case WM_T_82574:
8062 	case WM_T_82583:
8063 		rv = wm_check_mng_mode_82574(sc);
8064 		break;
8065 	case WM_T_82571:
8066 	case WM_T_82572:
8067 	case WM_T_82573:
8068 	case WM_T_80003:
8069 		rv = wm_check_mng_mode_generic(sc);
8070 		break;
8071 	default:
8072 		/* noting to do */
8073 		rv = 0;
8074 		break;
8075 	}
8076 
8077 	return rv;
8078 }
8079 
8080 static int
8081 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
8082 {
8083 	uint32_t fwsm;
8084 
8085 	fwsm = CSR_READ(sc, WMREG_FWSM);
8086 
8087 	if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
8088 		return 1;
8089 
8090 	return 0;
8091 }
8092 
8093 static int
8094 wm_check_mng_mode_82574(struct wm_softc *sc)
8095 {
8096 	uint16_t data;
8097 
8098 	wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &data);
8099 
8100 	if ((data & EEPROM_CFG2_MNGM_MASK) != 0)
8101 		return 1;
8102 
8103 	return 0;
8104 }
8105 
8106 static int
8107 wm_check_mng_mode_generic(struct wm_softc *sc)
8108 {
8109 	uint32_t fwsm;
8110 
8111 	fwsm = CSR_READ(sc, WMREG_FWSM);
8112 
8113 	if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
8114 		return 1;
8115 
8116 	return 0;
8117 }
8118 
8119 static int
8120 wm_enable_mng_pass_thru(struct wm_softc *sc)
8121 {
8122 	uint32_t manc, fwsm, factps;
8123 
8124 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
8125 		return 0;
8126 
8127 	manc = CSR_READ(sc, WMREG_MANC);
8128 
8129 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
8130 		device_xname(sc->sc_dev), manc));
8131 	if ((manc & MANC_RECV_TCO_EN) == 0)
8132 		return 0;
8133 
8134 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
8135 		fwsm = CSR_READ(sc, WMREG_FWSM);
8136 		factps = CSR_READ(sc, WMREG_FACTPS);
8137 		if (((factps & FACTPS_MNGCG) == 0)
8138 		    && ((fwsm & FWSM_MODE_MASK)
8139 			== (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT)))
8140 			return 1;
8141 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
8142 		uint16_t data;
8143 
8144 		factps = CSR_READ(sc, WMREG_FACTPS);
8145 		wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &data);
8146 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
8147 			device_xname(sc->sc_dev), factps, data));
8148 		if (((factps & FACTPS_MNGCG) == 0)
8149 		    && ((data & EEPROM_CFG2_MNGM_MASK)
8150 			== (EEPROM_CFG2_MNGM_PT << EEPROM_CFG2_MNGM_SHIFT)))
8151 			return 1;
8152 	} else if (((manc & MANC_SMBUS_EN) != 0)
8153 	    && ((manc & MANC_ASF_EN) == 0))
8154 		return 1;
8155 
8156 	return 0;
8157 }
8158 
8159 static int
8160 wm_check_reset_block(struct wm_softc *sc)
8161 {
8162 	uint32_t reg;
8163 
8164 	switch (sc->sc_type) {
8165 	case WM_T_ICH8:
8166 	case WM_T_ICH9:
8167 	case WM_T_ICH10:
8168 	case WM_T_PCH:
8169 	case WM_T_PCH2:
8170 	case WM_T_PCH_LPT:
8171 		reg = CSR_READ(sc, WMREG_FWSM);
8172 		if ((reg & FWSM_RSPCIPHY) != 0)
8173 			return 0;
8174 		else
8175 			return -1;
8176 		break;
8177 	case WM_T_82571:
8178 	case WM_T_82572:
8179 	case WM_T_82573:
8180 	case WM_T_82574:
8181 	case WM_T_82583:
8182 	case WM_T_80003:
8183 		reg = CSR_READ(sc, WMREG_MANC);
8184 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
8185 			return -1;
8186 		else
8187 			return 0;
8188 		break;
8189 	default:
8190 		/* no problem */
8191 		break;
8192 	}
8193 
8194 	return 0;
8195 }
8196 
8197 static void
8198 wm_get_hw_control(struct wm_softc *sc)
8199 {
8200 	uint32_t reg;
8201 
8202 	switch (sc->sc_type) {
8203 	case WM_T_82573:
8204 		reg = CSR_READ(sc, WMREG_SWSM);
8205 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
8206 		break;
8207 	case WM_T_82571:
8208 	case WM_T_82572:
8209 	case WM_T_82574:
8210 	case WM_T_82583:
8211 	case WM_T_80003:
8212 	case WM_T_ICH8:
8213 	case WM_T_ICH9:
8214 	case WM_T_ICH10:
8215 	case WM_T_PCH:
8216 	case WM_T_PCH2:
8217 	case WM_T_PCH_LPT:
8218 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
8219 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
8220 		break;
8221 	default:
8222 		break;
8223 	}
8224 }
8225 
8226 static void
8227 wm_release_hw_control(struct wm_softc *sc)
8228 {
8229 	uint32_t reg;
8230 
8231 	if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
8232 		return;
8233 
8234 	if (sc->sc_type == WM_T_82573) {
8235 		reg = CSR_READ(sc, WMREG_SWSM);
8236 		reg &= ~SWSM_DRV_LOAD;
8237 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
8238 	} else {
8239 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
8240 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
8241 	}
8242 }
8243 
8244 /* XXX Currently TBI only */
8245 static int
8246 wm_check_for_link(struct wm_softc *sc)
8247 {
8248 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
8249 	uint32_t rxcw;
8250 	uint32_t ctrl;
8251 	uint32_t status;
8252 	uint32_t sig;
8253 
8254 	rxcw = CSR_READ(sc, WMREG_RXCW);
8255 	ctrl = CSR_READ(sc, WMREG_CTRL);
8256 	status = CSR_READ(sc, WMREG_STATUS);
8257 
8258 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
8259 
8260 	DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
8261 		device_xname(sc->sc_dev), __func__,
8262 		((ctrl & CTRL_SWDPIN(1)) == sig),
8263 		((status & STATUS_LU) != 0),
8264 		((rxcw & RXCW_C) != 0)
8265 		    ));
8266 
8267 	/*
8268 	 * SWDPIN   LU RXCW
8269 	 *      0    0    0
8270 	 *      0    0    1	(should not happen)
8271 	 *      0    1    0	(should not happen)
8272 	 *      0    1    1	(should not happen)
8273 	 *      1    0    0	Disable autonego and force linkup
8274 	 *      1    0    1	got /C/ but not linkup yet
8275 	 *      1    1    0	(linkup)
8276 	 *      1    1    1	If IFM_AUTO, back to autonego
8277 	 *
8278 	 */
8279 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
8280 	    && ((status & STATUS_LU) == 0)
8281 	    && ((rxcw & RXCW_C) == 0)) {
8282 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
8283 			__func__));
8284 		sc->sc_tbi_linkup = 0;
8285 		/* Disable auto-negotiation in the TXCW register */
8286 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
8287 
8288 		/*
8289 		 * Force link-up and also force full-duplex.
8290 		 *
8291 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
8292 		 * so we should update sc->sc_ctrl
8293 		 */
8294 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
8295 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8296 	} else if (((status & STATUS_LU) != 0)
8297 	    && ((rxcw & RXCW_C) != 0)
8298 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
8299 		sc->sc_tbi_linkup = 1;
8300 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
8301 			__func__));
8302 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
8303 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
8304 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
8305 	    && ((rxcw & RXCW_C) != 0)) {
8306 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
8307 	} else {
8308 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
8309 			status));
8310 	}
8311 
8312 	return 0;
8313 }
8314 
8315 /* Work-around for 82566 Kumeran PCS lock loss */
8316 static void
8317 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
8318 {
8319 	int miistatus, active, i;
8320 	int reg;
8321 
8322 	miistatus = sc->sc_mii.mii_media_status;
8323 
8324 	/* If the link is not up, do nothing */
8325 	if ((miistatus & IFM_ACTIVE) != 0)
8326 		return;
8327 
8328 	active = sc->sc_mii.mii_media_active;
8329 
8330 	/* Nothing to do if the link is other than 1Gbps */
8331 	if (IFM_SUBTYPE(active) != IFM_1000_T)
8332 		return;
8333 
8334 	for (i = 0; i < 10; i++) {
8335 		/* read twice */
8336 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
8337 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
8338 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) != 0)
8339 			goto out;	/* GOOD! */
8340 
8341 		/* Reset the PHY */
8342 		wm_gmii_reset(sc);
8343 		delay(5*1000);
8344 	}
8345 
8346 	/* Disable GigE link negotiation */
8347 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
8348 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
8349 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
8350 
8351 	/*
8352 	 * Call gig speed drop workaround on Gig disable before accessing
8353 	 * any PHY registers.
8354 	 */
8355 	wm_gig_downshift_workaround_ich8lan(sc);
8356 
8357 out:
8358 	return;
8359 }
8360 
8361 /* WOL from S5 stops working */
8362 static void
8363 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
8364 {
8365 	uint16_t kmrn_reg;
8366 
8367 	/* Only for igp3 */
8368 	if (sc->sc_phytype == WMPHY_IGP_3) {
8369 		kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
8370 		kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
8371 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
8372 		kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
8373 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
8374 	}
8375 }
8376 
8377 #ifdef WM_WOL
8378 /* Power down workaround on D3 */
8379 static void
8380 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
8381 {
8382 	uint32_t reg;
8383 	int i;
8384 
8385 	for (i = 0; i < 2; i++) {
8386 		/* Disable link */
8387 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
8388 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
8389 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
8390 
8391 		/*
8392 		 * Call gig speed drop workaround on Gig disable before
8393 		 * accessing any PHY registers
8394 		 */
8395 		if (sc->sc_type == WM_T_ICH8)
8396 			wm_gig_downshift_workaround_ich8lan(sc);
8397 
8398 		/* Write VR power-down enable */
8399 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
8400 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
8401 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
8402 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
8403 
8404 		/* Read it back and test */
8405 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
8406 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
8407 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
8408 			break;
8409 
8410 		/* Issue PHY reset and repeat at most one more time */
8411 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
8412 	}
8413 }
8414 #endif /* WM_WOL */
8415 
8416 /*
8417  * Workaround for pch's PHYs
8418  * XXX should be moved to new PHY driver?
8419  */
8420 static void
8421 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
8422 {
8423 	if (sc->sc_phytype == WMPHY_82577)
8424 		wm_set_mdio_slow_mode_hv(sc);
8425 
8426 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
8427 
8428 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
8429 
8430 	/* 82578 */
8431 	if (sc->sc_phytype == WMPHY_82578) {
8432 		/* PCH rev. < 3 */
8433 		if (sc->sc_rev < 3) {
8434 			/* XXX 6 bit shift? Why? Is it page2? */
8435 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
8436 			    0x66c0);
8437 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
8438 			    0xffff);
8439 		}
8440 
8441 		/* XXX phy rev. < 2 */
8442 	}
8443 
8444 	/* Select page 0 */
8445 
8446 	/* XXX acquire semaphore */
8447 	wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
8448 	/* XXX release semaphore */
8449 
8450 	/*
8451 	 * Configure the K1 Si workaround during phy reset assuming there is
8452 	 * link so that it disables K1 if link is in 1Gbps.
8453 	 */
8454 	wm_k1_gig_workaround_hv(sc, 1);
8455 }
8456 
8457 static void
8458 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
8459 {
8460 
8461 	wm_set_mdio_slow_mode_hv(sc);
8462 }
8463 
8464 static void
8465 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
8466 {
8467 	int k1_enable = sc->sc_nvm_k1_enabled;
8468 
8469 	/* XXX acquire semaphore */
8470 
8471 	if (link) {
8472 		k1_enable = 0;
8473 
8474 		/* Link stall fix for link up */
8475 		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
8476 	} else {
8477 		/* Link stall fix for link down */
8478 		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
8479 	}
8480 
8481 	wm_configure_k1_ich8lan(sc, k1_enable);
8482 
8483 	/* XXX release semaphore */
8484 }
8485 
8486 static void
8487 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
8488 {
8489 	uint32_t reg;
8490 
8491 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
8492 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
8493 	    reg | HV_KMRN_MDIO_SLOW);
8494 }
8495 
8496 static void
8497 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
8498 {
8499 	uint32_t ctrl, ctrl_ext, tmp;
8500 	uint16_t kmrn_reg;
8501 
8502 	kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
8503 
8504 	if (k1_enable)
8505 		kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
8506 	else
8507 		kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
8508 
8509 	wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
8510 
8511 	delay(20);
8512 
8513 	ctrl = CSR_READ(sc, WMREG_CTRL);
8514 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
8515 
8516 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
8517 	tmp |= CTRL_FRCSPD;
8518 
8519 	CSR_WRITE(sc, WMREG_CTRL, tmp);
8520 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
8521 	CSR_WRITE_FLUSH(sc);
8522 	delay(20);
8523 
8524 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
8525 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
8526 	CSR_WRITE_FLUSH(sc);
8527 	delay(20);
8528 }
8529 
8530 static void
8531 wm_smbustopci(struct wm_softc *sc)
8532 {
8533 	uint32_t fwsm;
8534 
8535 	fwsm = CSR_READ(sc, WMREG_FWSM);
8536 	if (((fwsm & FWSM_FW_VALID) == 0)
8537 	    && ((wm_check_reset_block(sc) == 0))) {
8538 		sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
8539 		sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
8540 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8541 		CSR_WRITE_FLUSH(sc);
8542 		delay(10);
8543 		sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
8544 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8545 		CSR_WRITE_FLUSH(sc);
8546 		delay(50*1000);
8547 
8548 		/*
8549 		 * Gate automatic PHY configuration by hardware on non-managed
8550 		 * 82579
8551 		 */
8552 		if (sc->sc_type == WM_T_PCH2)
8553 			wm_gate_hw_phy_config_ich8lan(sc, 1);
8554 	}
8555 }
8556 
8557 static void
8558 wm_set_pcie_completion_timeout(struct wm_softc *sc)
8559 {
8560 	uint32_t gcr;
8561 	pcireg_t ctrl2;
8562 
8563 	gcr = CSR_READ(sc, WMREG_GCR);
8564 
8565 	/* Only take action if timeout value is defaulted to 0 */
8566 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
8567 		goto out;
8568 
8569 	if ((gcr & GCR_CAP_VER2) == 0) {
8570 		gcr |= GCR_CMPL_TMOUT_10MS;
8571 		goto out;
8572 	}
8573 
8574 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
8575 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
8576 	ctrl2 |= WM_PCIE_DCSR2_16MS;
8577 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
8578 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
8579 
8580 out:
8581 	/* Disable completion timeout resend */
8582 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
8583 
8584 	CSR_WRITE(sc, WMREG_GCR, gcr);
8585 }
8586 
8587 /* special case - for 82575 - need to do manual init ... */
8588 static void
8589 wm_reset_init_script_82575(struct wm_softc *sc)
8590 {
8591 	/*
8592 	 * remark: this is untested code - we have no board without EEPROM
8593 	 *  same setup as mentioned int the freeBSD driver for the i82575
8594 	 */
8595 
8596 	/* SerDes configuration via SERDESCTRL */
8597 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
8598 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
8599 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
8600 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
8601 
8602 	/* CCM configuration via CCMCTL register */
8603 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
8604 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
8605 
8606 	/* PCIe lanes configuration */
8607 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
8608 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
8609 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
8610 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
8611 
8612 	/* PCIe PLL Configuration */
8613 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
8614 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
8615 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
8616 }
8617 
8618 static void
8619 wm_init_manageability(struct wm_softc *sc)
8620 {
8621 
8622 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
8623 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
8624 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
8625 
8626 		/* disabl hardware interception of ARP */
8627 		manc &= ~MANC_ARP_EN;
8628 
8629 		/* enable receiving management packets to the host */
8630 		if (sc->sc_type >= WM_T_82571) {
8631 			manc |= MANC_EN_MNG2HOST;
8632 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
8633 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
8634 
8635 		}
8636 
8637 		CSR_WRITE(sc, WMREG_MANC, manc);
8638 	}
8639 }
8640 
8641 static void
8642 wm_release_manageability(struct wm_softc *sc)
8643 {
8644 
8645 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
8646 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
8647 
8648 		manc |= MANC_ARP_EN;
8649 		if (sc->sc_type >= WM_T_82571)
8650 			manc &= ~MANC_EN_MNG2HOST;
8651 
8652 		CSR_WRITE(sc, WMREG_MANC, manc);
8653 	}
8654 }
8655 
8656 static void
8657 wm_get_wakeup(struct wm_softc *sc)
8658 {
8659 
8660 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
8661 	switch (sc->sc_type) {
8662 	case WM_T_82573:
8663 	case WM_T_82583:
8664 		sc->sc_flags |= WM_F_HAS_AMT;
8665 		/* FALLTHROUGH */
8666 	case WM_T_80003:
8667 	case WM_T_82541:
8668 	case WM_T_82547:
8669 	case WM_T_82571:
8670 	case WM_T_82572:
8671 	case WM_T_82574:
8672 	case WM_T_82575:
8673 	case WM_T_82576:
8674 	case WM_T_82580:
8675 	case WM_T_82580ER:
8676 	case WM_T_I350:
8677 	case WM_T_I354:
8678 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE_MASK) != 0)
8679 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
8680 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
8681 		break;
8682 	case WM_T_ICH8:
8683 	case WM_T_ICH9:
8684 	case WM_T_ICH10:
8685 	case WM_T_PCH:
8686 	case WM_T_PCH2:
8687 	case WM_T_PCH_LPT:
8688 		sc->sc_flags |= WM_F_HAS_AMT;
8689 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
8690 		break;
8691 	default:
8692 		break;
8693 	}
8694 
8695 	/* 1: HAS_MANAGE */
8696 	if (wm_enable_mng_pass_thru(sc) != 0)
8697 		sc->sc_flags |= WM_F_HAS_MANAGE;
8698 
8699 #ifdef WM_DEBUG
8700 	printf("\n");
8701 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
8702 		printf("HAS_AMT,");
8703 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
8704 		printf("ARC_SUBSYS_VALID,");
8705 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
8706 		printf("ASF_FIRMWARE_PRES,");
8707 	if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
8708 		printf("HAS_MANAGE,");
8709 	printf("\n");
8710 #endif
8711 	/*
8712 	 * Note that the WOL flags is set after the resetting of the eeprom
8713 	 * stuff
8714 	 */
8715 }
8716 
8717 #ifdef WM_WOL
8718 /* WOL in the newer chipset interfaces (pchlan) */
8719 static void
8720 wm_enable_phy_wakeup(struct wm_softc *sc)
8721 {
8722 #if 0
8723 	uint16_t preg;
8724 
8725 	/* Copy MAC RARs to PHY RARs */
8726 
8727 	/* Copy MAC MTA to PHY MTA */
8728 
8729 	/* Configure PHY Rx Control register */
8730 
8731 	/* Enable PHY wakeup in MAC register */
8732 
8733 	/* Configure and enable PHY wakeup in PHY registers */
8734 
8735 	/* Activate PHY wakeup */
8736 
8737 	/* XXX */
8738 #endif
8739 }
8740 
8741 static void
8742 wm_enable_wakeup(struct wm_softc *sc)
8743 {
8744 	uint32_t reg, pmreg;
8745 	pcireg_t pmode;
8746 
8747 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
8748 		&pmreg, NULL) == 0)
8749 		return;
8750 
8751 	/* Advertise the wakeup capability */
8752 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
8753 	    | CTRL_SWDPIN(3));
8754 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
8755 
8756 	/* ICH workaround */
8757 	switch (sc->sc_type) {
8758 	case WM_T_ICH8:
8759 	case WM_T_ICH9:
8760 	case WM_T_ICH10:
8761 	case WM_T_PCH:
8762 	case WM_T_PCH2:
8763 	case WM_T_PCH_LPT:
8764 		/* Disable gig during WOL */
8765 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
8766 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
8767 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
8768 		if (sc->sc_type == WM_T_PCH)
8769 			wm_gmii_reset(sc);
8770 
8771 		/* Power down workaround */
8772 		if (sc->sc_phytype == WMPHY_82577) {
8773 			struct mii_softc *child;
8774 
8775 			/* Assume that the PHY is copper */
8776 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
8777 			if (child->mii_mpd_rev <= 2)
8778 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
8779 				    (768 << 5) | 25, 0x0444); /* magic num */
8780 		}
8781 		break;
8782 	default:
8783 		break;
8784 	}
8785 
8786 	/* Keep the laser running on fiber adapters */
8787 	if (((sc->sc_wmp->wmp_flags & WMP_F_1000X) != 0)
8788 	    || (sc->sc_wmp->wmp_flags & WMP_F_SERDES) != 0) {
8789 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
8790 		reg |= CTRL_EXT_SWDPIN(3);
8791 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
8792 	}
8793 
8794 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
8795 #if 0	/* for the multicast packet */
8796 	reg |= WUFC_MC;
8797 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
8798 #endif
8799 
8800 	if (sc->sc_type == WM_T_PCH) {
8801 		wm_enable_phy_wakeup(sc);
8802 	} else {
8803 		CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
8804 		CSR_WRITE(sc, WMREG_WUFC, reg);
8805 	}
8806 
8807 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
8808 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
8809 		|| (sc->sc_type == WM_T_PCH2))
8810 		    && (sc->sc_phytype == WMPHY_IGP_3))
8811 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
8812 
8813 	/* Request PME */
8814 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
8815 #if 0
8816 	/* Disable WOL */
8817 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
8818 #else
8819 	/* For WOL */
8820 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
8821 #endif
8822 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
8823 }
8824 #endif /* WM_WOL */
8825 
8826 static bool
8827 wm_suspend(device_t self, const pmf_qual_t *qual)
8828 {
8829 	struct wm_softc *sc = device_private(self);
8830 
8831 	wm_release_manageability(sc);
8832 	wm_release_hw_control(sc);
8833 #ifdef WM_WOL
8834 	wm_enable_wakeup(sc);
8835 #endif
8836 
8837 	return true;
8838 }
8839 
8840 static bool
8841 wm_resume(device_t self, const pmf_qual_t *qual)
8842 {
8843 	struct wm_softc *sc = device_private(self);
8844 
8845 	wm_init_manageability(sc);
8846 
8847 	return true;
8848 }
8849 
8850 static void
8851 wm_set_eee_i350(struct wm_softc * sc)
8852 {
8853 	uint32_t ipcnfg, eeer;
8854 
8855 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
8856 	eeer = CSR_READ(sc, WMREG_EEER);
8857 
8858 	if ((sc->sc_flags & WM_F_EEE) != 0) {
8859 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
8860 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
8861 		    | EEER_LPI_FC);
8862 	} else {
8863 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
8864 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
8865 		    | EEER_LPI_FC);
8866 	}
8867 
8868 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
8869 	CSR_WRITE(sc, WMREG_EEER, eeer);
8870 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
8871 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
8872 }
8873